content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' Developer function for handling missing values in new metrics #' #' `yardstick_remove_missing()`, and `yardstick_any_missing()` are useful #' alongside the [metric-summarizers] functions for implementing new custom #' metrics. `yardstick_remove_missing()` removes any observations that contains #' missing values across, `truth`, `estimate` and `case_weights`. #' `yardstick_any_missing()` returns `FALSE` if there is any missing values in #' the inputs. #' #' @param truth,estimate Vectors of the same length. #' #' @param case_weights A vector of the same length as `truth` and `estimate`, or #' `NULL` if case weights are not being used. #' #' @seealso [metric-summarizers] #' #' @name yardstick_remove_missing NULL #' @rdname yardstick_remove_missing #' @export yardstick_remove_missing <- function(truth, estimate, case_weights) { complete_cases <- stats::complete.cases( truth, estimate, case_weights ) if (.is_surv(truth, fail = FALSE)) { Surv_type <- .extract_surv_type(truth) truth <- truth[complete_cases, ] attr(truth, "type") <- Surv_type attr(truth, "class") <- "Surv" } else { truth <- truth[complete_cases] } if (is.matrix(estimate)) { estimate <- estimate[complete_cases, , drop = FALSE] } else { estimate <- estimate[complete_cases] } case_weights <- case_weights[complete_cases] list( truth = truth, estimate = estimate, case_weights = case_weights ) } #' @rdname yardstick_remove_missing #' @export yardstick_any_missing <- function(truth, estimate, case_weights) { anyNA(truth) || anyNA(estimate) || (!is.null(case_weights) && anyNA(case_weights)) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/missings.R
#' Concordance correlation coefficient #' #' Calculate the concordance correlation coefficient. #' #' [ccc()] is a metric of both consistency/correlation and accuracy, #' while metrics such as [rmse()] are strictly for accuracy and metrics #' such as [rsq()] are strictly for consistency/correlation #' #' @family numeric metrics #' @family consistency metrics #' @family accuracy metrics #' @templateVar fn ccc #' @template return #' #' @inheritParams rmse #' #' @param bias A `logical`; should the biased estimate of variance #' be used (as is Lin (1989))? #' #' @author Max Kuhn #' #' @references #' #' Lin, L. (1989). A concordance correlation #' coefficient to evaluate reproducibility. _Biometrics_, 45 (1), #' 255-268. #' #' Nickerson, C. (1997). A note on "A concordance correlation #' coefficient to evaluate reproducibility". _Biometrics_, 53(4), #' 1503-1507. #' #' #' @template examples-numeric #' #' @export #' ccc <- function(data, ...) { UseMethod("ccc") } ccc <- new_numeric_metric( ccc, direction = "maximize" ) #' @rdname ccc #' @export ccc.data.frame <- function(data, truth, estimate, bias = FALSE, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "ccc", fn = ccc_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights), # Extra argument for ccc_impl() fn_options = list(bias = bias) ) } #' @export #' @rdname ccc ccc_vec <- function(truth, estimate, bias = FALSE, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } ccc_impl(truth, estimate, bias, case_weights) } ccc_impl <- function(truth, estimate, bias, case_weights) { case_weights <- vec_cast(case_weights, to = double()) truth_mean <- yardstick_mean(truth, case_weights = case_weights) estimate_mean <- yardstick_mean(estimate, case_weights = case_weights) truth_variance <- yardstick_var(truth, case_weights = case_weights) estimate_variance <- yardstick_var(estimate, case_weights = case_weights) covariance <- yardstick_cov(truth, estimate, case_weights = case_weights) if (bias) { # Assumes `case_weights` are frequency weights so we can generate an # appropriate `n` if (is.null(case_weights)) { n <- length(estimate) } else { n <- sum(case_weights) } estimate_variance <- estimate_variance * (n - 1) / n truth_variance <- truth_variance * (n - 1) / n covariance <- covariance * (n - 1) / n } numerator <- 2 * covariance denominator <- truth_variance + estimate_variance + (truth_mean - estimate_mean)^2 numerator / denominator }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-ccc.R
#' Huber loss #' #' Calculate the Huber loss, a loss function used in robust regression. This #' loss function is less sensitive to outliers than [rmse()]. This function is #' quadratic for small residual values and linear for large residual values. #' #' @family numeric metrics #' @family accuracy metrics #' @templateVar fn huber_loss #' @template return #' #' @inheritParams rmse #' #' @param delta A single `numeric` value. Defines the boundary where the loss function #' transitions from quadratic to linear. Defaults to 1. #' #' @author James Blair #' #' @references #' #' Huber, P. (1964). Robust Estimation of a Location Parameter. #' _Annals of Statistics_, 53 (1), 73-101. #' #' @template examples-numeric #' #' @export huber_loss <- function(data, ...) { UseMethod("huber_loss") } huber_loss <- new_numeric_metric( huber_loss, direction = "minimize" ) #' @rdname huber_loss #' @export huber_loss.data.frame <- function(data, truth, estimate, delta = 1, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "huber_loss", fn = huber_loss_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights), # Extra argument for huber_loss_impl() fn_options = list(delta = delta) ) } #' @export #' @rdname huber_loss huber_loss_vec <- function(truth, estimate, delta = 1, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } huber_loss_impl(truth, estimate, delta, case_weights) } huber_loss_impl <- function(truth, estimate, delta, case_weights, call = caller_env()) { # Weighted Huber Loss implementation confirmed against matlab: # https://www.mathworks.com/help/deeplearning/ref/dlarray.huber.html check_number_decimal(delta, min = 0, call = call) a <- truth - estimate abs_a <- abs(a) loss <- ifelse( abs_a <= delta, 0.5 * a^2, delta * (abs_a - 0.5 * delta) ) yardstick_mean(loss, case_weights = case_weights) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-huber_loss.R
#' Index of ideality of correlation #' #' @description #' #' Calculate the index of ideality of correlation. This metric has been #' studied in QSPR/QSAR models as a good criterion for the predictive #' potential of these models. It is highly dependent on the correlation #' coefficient as well as the mean absolute error. #' #' Note the application of IIC is useless under two conditions: #' #' * When the negative mean absolute error and positive mean absolute #' error are both zero. #' #' * When the outliers are symmetric. Since outliers are context #' dependent, please use your own checks to validate whether this #' restriction holds and whether the resulting IIC has #' interpretative value. #' #' The IIC is seen as an alternative to the traditional correlation #' coefficient and is in the same units as the original data. #' #' @family numeric metrics #' @family accuracy metrics #' @templateVar fn iic #' @template return #' #' @inheritParams rmse #' #' @references Toropova, A. and Toropov, A. (2017). "The index of ideality #' of correlation. A criterion of predictability of QSAR models for skin #' permeability?" _Science of the Total Environment_. 586: 466-472. #' #' @author Joyce Cahoon #' #' @template examples-numeric #' #' @export iic <- function(data, ...) { UseMethod("iic") } iic <- new_numeric_metric( iic, direction = "maximize" ) #' @rdname iic #' @export iic.data.frame <- function(data, truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "iic", fn = iic_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname iic iic_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } iic_impl(truth, estimate, case_weights) } iic_impl <- function(truth, estimate, case_weights) { deltas <- truth - estimate neg <- deltas < 0 pos <- deltas >= 0 delta_neg <- deltas[neg] delta_pos <- deltas[pos] if (is.null(case_weights)) { case_weights_neg <- NULL case_weights_pos <- NULL } else { case_weights_neg <- case_weights[neg] case_weights_pos <- case_weights[pos] } # Using a best guess that weighted means are computed from sliced weights mae_neg <- yardstick_mean(abs(delta_neg), case_weights = case_weights_neg) mae_pos <- yardstick_mean(abs(delta_pos), case_weights = case_weights_pos) adjustment <- min(mae_neg, mae_pos) / max(mae_neg, mae_pos) correlation <- yardstick_cor(truth, estimate, case_weights = case_weights) correlation * adjustment }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-iic.R
#' Mean absolute error #' #' Calculate the mean absolute error. This metric is in the same units as the #' original data. #' #' @family numeric metrics #' @family accuracy metrics #' @templateVar fn mae #' @template return #' #' @inheritParams rmse #' #' @author Max Kuhn #' #' @template examples-numeric #' #' @export mae <- function(data, ...) { UseMethod("mae") } mae <- new_numeric_metric( mae, direction = "minimize" ) #' @rdname mae #' @export mae.data.frame <- function(data, truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "mae", fn = mae_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname mae mae_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } mae_impl(truth, estimate, case_weights) } mae_impl <- function(truth, estimate, case_weights) { errors <- abs(truth - estimate) yardstick_mean(errors, case_weights = case_weights) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-mae.R
#' Mean absolute percent error #' #' Calculate the mean absolute percentage error. This metric is in _relative #' units_. #' #' Note that a value of `Inf` is returned for `mape()` when the #' observed value is negative. #' #' @family numeric metrics #' @family accuracy metrics #' @templateVar fn mape #' @template return #' #' @inheritParams rmse #' #' @author Max Kuhn #' #' @template examples-numeric #' #' @export #' mape <- function(data, ...) { UseMethod("mape") } mape <- new_numeric_metric( mape, direction = "minimize" ) #' @rdname mape #' @export mape.data.frame <- function(data, truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "mape", fn = mape_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname mape mape_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } mape_impl(truth, estimate, case_weights) } mape_impl <- function(truth, estimate, case_weights) { errors <- abs((truth - estimate) / truth) out <- yardstick_mean(errors, case_weights = case_weights) out <- out * 100 out }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-mape.R
#' Mean absolute scaled error #' #' Calculate the mean absolute scaled error. This metric is _scale independent_ #' and _symmetric_. It is generally used for comparing forecast error in #' time series settings. Due to the time series nature of this metric, it #' is necessary to order observations in ascending order by time. #' #' `mase()` is different from most numeric metrics. The original implementation #' of `mase()` calls for using the _in-sample_ naive mean absolute error to #' compute scaled errors with. It uses this instead of the out-of-sample error #' because there is a chance that the out-of-sample error cannot be computed #' when forecasting a very short horizon (i.e. the out of sample size is only #' 1 or 2). However, `yardstick` only knows about the out-of-sample `truth` and #' `estimate` values. Because of this, the out-of-sample error is used in the #' computation by default. If the in-sample naive mean absolute error is #' required and known, it can be passed through in the `mae_train` argument #' and it will be used instead. If the in-sample data is available, the #' naive mean absolute error can easily be computed with #' `mae(data, truth, lagged_truth)`. #' #' @family numeric metrics #' @family accuracy metrics #' @templateVar fn mase #' @template return #' #' @inheritParams rmse #' #' @param mae_train A numeric value which allows the user to provide the #' in-sample seasonal naive mean absolute error. If this value is not provided, #' then the out-of-sample seasonal naive mean absolute error will be calculated #' from `truth` and will be used instead. #' #' @param m An integer value of the number of lags used to calculate the #' in-sample seasonal naive error. The default is used for non-seasonal time #' series. If each observation was at the daily level and the data showed weekly #' seasonality, then `m = 7L` would be a reasonable choice for a 7-day seasonal #' naive calculation. #' #' @author Alex Hallam #' #' @references #' #' Rob J. Hyndman (2006). ANOTHER LOOK AT FORECAST-ACCURACY METRICS FOR #' INTERMITTENT DEMAND. _Foresight_, 4, 46. #' #' @template examples-numeric #' #' @export #' mase <- function(data, ...) { UseMethod("mase") } mase <- new_numeric_metric( mase, direction = "minimize" ) #' @rdname mase #' @export mase.data.frame <- function(data, truth, estimate, m = 1L, mae_train = NULL, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "mase", fn = mase_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights), # Extra argument for mase_impl() fn_options = list(mae_train = mae_train, m = m) ) } #' @export #' @rdname mase mase_vec <- function(truth, estimate, m = 1L, mae_train = NULL, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } mase_impl( truth = truth, estimate = estimate, m = m, mae_train = mae_train, case_weights = case_weights ) } mase_impl <- function(truth, estimate, m = 1L, mae_train = NULL, case_weights = NULL, call = caller_env()) { check_number_whole(m, min = 0, call = call) check_number_decimal(mae_train, min = 0, allow_null = TRUE, call = call) if (is.null(mae_train)) { validate_truth_m(truth, m, call = call) } # Use out-of-sample snaive if mae_train is not provided if (is.null(mae_train)) { truth_lag <- dplyr::lag(truth, m) naive_error <- truth - truth_lag mae_denom <- mean(abs(naive_error), na.rm = TRUE) } else { mae_denom <- mae_train } error <- truth - estimate error <- error / mae_denom error <- abs(error) out <- yardstick_mean(error, case_weights = case_weights) out } validate_truth_m <- function(truth, m, call = caller_env()) { if (length(truth) <= m) { cli::cli_abort( "{.arg truth} ({length(truth)}) must have a length greater than \\ {.arg m} ({m}) to compute the out-of-sample naive mean absolute error.", call = call ) } }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-mase.R
#' Mean percentage error #' #' @description #' Calculate the mean percentage error. This metric is in _relative #' units_. It can be used as a measure of the `estimate`'s bias. #' #' Note that if _any_ `truth` values are `0`, a value of: #' `-Inf` (`estimate > 0`), `Inf` (`estimate < 0`), or `NaN` (`estimate == 0`) #' is returned for `mpe()`. #' #' @family numeric metrics #' @family accuracy metrics #' @templateVar fn mpe #' @template return #' #' @inheritParams rmse #' #' @author Thomas Bierhance #' #' @export #' @examples #' # `solubility_test$solubility` has zero values with corresponding #' # `$prediction` values that are negative. By definition, this causes `Inf` #' # to be returned from `mpe()`. #' solubility_test[solubility_test$solubility == 0, ] #' #' mpe(solubility_test, solubility, prediction) #' #' # We'll remove the zero values for demonstration #' solubility_test <- solubility_test[solubility_test$solubility != 0, ] #' #' # Supply truth and predictions as bare column names #' mpe(solubility_test, solubility, prediction) #' #' library(dplyr) #' #' set.seed(1234) #' size <- 100 #' times <- 10 #' #' # create 10 resamples #' solubility_resampled <- bind_rows( #' replicate( #' n = times, #' expr = sample_n(solubility_test, size, replace = TRUE), #' simplify = FALSE #' ), #' .id = "resample" #' ) #' #' # Compute the metric by group #' metric_results <- solubility_resampled %>% #' group_by(resample) %>% #' mpe(solubility, prediction) #' #' metric_results #' #' # Resampled mean estimate #' metric_results %>% #' summarise(avg_estimate = mean(.estimate)) mpe <- function(data, ...) { UseMethod("mpe") } mpe <- new_numeric_metric( mpe, direction = "zero" ) #' @rdname mpe #' @export mpe.data.frame <- function(data, truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "mpe", fn = mpe_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname mpe mpe_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } mpe_impl(truth, estimate, case_weights) } mpe_impl <- function(truth, estimate, case_weights) { error <- (truth - estimate) / truth out <- yardstick_mean(error, case_weights = case_weights) out <- out * 100 out }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-mpe.R
#' Mean signed deviation #' #' @description #' Mean signed deviation (also known as mean signed difference, or mean signed #' error) computes the average differences between `truth` and `estimate`. A #' related metric is the mean absolute error ([mae()]). #' #' @details #' Mean signed deviation is rarely used, since positive and negative errors #' cancel each other out. For example, `msd_vec(c(100, -100), c(0, 0))` would #' return a seemingly "perfect" value of `0`, even though `estimate` is wildly #' different from `truth`. [mae()] attempts to remedy this by taking the #' absolute value of the differences before computing the mean. #' #' This metric is computed as `mean(truth - estimate)`, following the convention #' that an "error" is computed as `observed - predicted`. If you expected this #' metric to be computed as `mean(estimate - truth)`, reverse the sign of the #' result. #' #' @family numeric metrics #' @family accuracy metrics #' @templateVar fn msd #' @template return #' #' @inheritParams rmse #' #' @author Thomas Bierhance #' #' @template examples-numeric #' #' @export msd <- function(data, ...) { UseMethod("msd") } msd <- new_numeric_metric( msd, direction = "zero" ) #' @rdname msd #' @export msd.data.frame <- function(data, truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "msd", fn = msd_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname msd msd_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } msd_impl(truth, estimate, case_weights) } msd_impl <- function(truth, estimate, case_weights) { yardstick_mean(truth - estimate, case_weights = case_weights) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-msd.R
#' Mean log loss for Poisson data #' #' Calculate the loss function for the Poisson distribution. #' #' @family numeric metrics #' @family accuracy metrics #' @templateVar fn poisson_log_loss #' @template return #' #' @inheritParams rmse #' #' @param truth The column identifier for the true counts (that is `integer`). #' This should be an unquoted column name although this argument is passed by #' expression and supports [quasiquotation][rlang::quasiquotation] (you can #' unquote column names). For `_vec()` functions, an `integer` vector. #' #' @author Max Kuhn #' #' @template examples-counts #' #' @export #' poisson_log_loss <- function(data, ...) { UseMethod("poisson_log_loss") } poisson_log_loss <- new_numeric_metric( poisson_log_loss, direction = "minimize" ) #' @rdname poisson_log_loss #' @export poisson_log_loss.data.frame <- function(data, truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "poisson_log_loss", fn = poisson_log_loss_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname poisson_log_loss poisson_log_loss_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } poisson_log_loss_impl(truth, estimate, case_weights) } poisson_log_loss_impl <- function(truth, estimate, case_weights) { if (!is.integer(truth)) { truth <- as.integer(truth) } loss <- -stats::dpois(truth, estimate, log = TRUE) yardstick_mean(loss, case_weights = case_weights) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-poisson_log_loss.R
#' Psuedo-Huber Loss #' #' Calculate the Pseudo-Huber Loss, a smooth approximation of [huber_loss()]. #' Like [huber_loss()], this is less sensitive to outliers than [rmse()]. #' #' @family numeric metrics #' @family accuracy metrics #' @templateVar fn huber_loss_pseudo #' @template return #' #' @inheritParams huber_loss #' #' @author James Blair #' #' @references #' #' Huber, P. (1964). Robust Estimation of a Location Parameter. #' _Annals of Statistics_, 53 (1), 73-101. #' #' Hartley, Richard (2004). Multiple View Geometry in Computer Vision. #' (Second Edition). Page 619. #' #' @template examples-numeric #' #' @export huber_loss_pseudo <- function(data, ...) { UseMethod("huber_loss_pseudo") } huber_loss_pseudo <- new_numeric_metric( huber_loss_pseudo, direction = "minimize" ) #' @rdname huber_loss_pseudo #' @export huber_loss_pseudo.data.frame <- function(data, truth, estimate, delta = 1, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "huber_loss_pseudo", fn = huber_loss_pseudo_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights), # Extra argument for huber_loss_pseudo_impl() fn_options = list(delta = delta) ) } #' @export #' @rdname huber_loss_pseudo huber_loss_pseudo_vec <- function(truth, estimate, delta = 1, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } huber_loss_pseudo_impl( truth = truth, estimate = estimate, delta = delta, case_weights = case_weights ) } huber_loss_pseudo_impl <- function(truth, estimate, delta, case_weights, call = caller_env()) { check_number_decimal(delta, min = 0, call = call) a <- truth - estimate loss <- delta^2 * (sqrt(1 + (a / delta)^2) - 1) yardstick_mean(loss, case_weights = case_weights) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-pseudo_huber_loss.R
#' Root mean squared error #' #' Calculate the root mean squared error. `rmse()` is a metric that is in #' the same units as the original data. #' #' @family numeric metrics #' @family accuracy metrics #' @templateVar fn rmse #' @template return #' #' @param data A `data.frame` containing the columns specified by the `truth` #' and `estimate` arguments. #' #' @param truth The column identifier for the true results #' (that is `numeric`). This should be an unquoted column name although #' this argument is passed by expression and supports #' [quasiquotation][rlang::quasiquotation] (you can unquote column #' names). For `_vec()` functions, a `numeric` vector. #' #' @param estimate The column identifier for the predicted #' results (that is also `numeric`). As with `truth` this can be #' specified different ways but the primary method is to use an #' unquoted variable name. For `_vec()` functions, a `numeric` vector. #' #' @param na_rm A `logical` value indicating whether `NA` #' values should be stripped before the computation proceeds. #' #' @param case_weights The optional column identifier for case weights. This #' should be an unquoted column name that evaluates to a numeric column in #' `data`. For `_vec()` functions, a numeric vector, #' [hardhat::importance_weights()], or [hardhat::frequency_weights()]. #' #' @param ... Not currently used. #' #' @author Max Kuhn #' #' @template examples-numeric #' #' @export #' rmse <- function(data, ...) { UseMethod("rmse") } rmse <- new_numeric_metric( rmse, direction = "minimize" ) #' @rdname rmse #' @export rmse.data.frame <- function(data, truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "rmse", fn = rmse_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname rmse rmse_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } rmse_impl(truth, estimate, case_weights = case_weights) } rmse_impl <- function(truth, estimate, case_weights) { errors <- (truth - estimate)^2 sqrt(yardstick_mean(errors, case_weights = case_weights)) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-rmse.R
#' Ratio of performance to deviation #' #' These functions are appropriate for cases where the model outcome is a #' numeric. The ratio of performance to deviation #' ([rpd()]) and the ratio of performance to inter-quartile ([rpiq()]) #' are both measures of consistency/correlation between observed #' and predicted values (and not of accuracy). #' #' In the field of spectroscopy in particular, the ratio #' of performance to deviation (RPD) has been used as the standard #' way to report the quality of a model. It is the ratio between #' the standard deviation of a variable and the standard error of #' prediction of that variable by a given model. However, its #' systematic use has been criticized by several authors, since #' using the standard deviation to represent the spread of a #' variable can be misleading on skewed dataset. The ratio of #' performance to inter-quartile has been introduced by #' Bellon-Maurel et al. (2010) to address some of these issues, and #' generalise the RPD to non-normally distributed variables. #' #' @family numeric metrics #' @family consistency metrics #' @templateVar fn rpd #' @template return #' #' @inheritParams rmse #' #' @author Pierre Roudier #' #' @seealso #' #' The closely related inter-quartile metric: [rpiq()] #' #' @references #' #' Williams, P.C. (1987) Variables affecting near-infrared #' reflectance spectroscopic analysis. In: Near Infrared Technology #' in the Agriculture and Food Industries. 1st Ed. P.Williams and #' K.Norris, Eds. Am. Cereal Assoc. Cereal Chem., St. Paul, MN. #' #' Bellon-Maurel, V., Fernandez-Ahumada, E., Palagos, B., Roger, #' J.M. and McBratney, A., (2010). Critical review of chemometric #' indicators commonly used for assessing the quality of the #' prediction of soil attributes by NIR spectroscopy. TrAC Trends #' in Analytical Chemistry, 29(9), pp.1073-1081. #' #' @template examples-numeric #' #' @export rpd <- function(data, ...) { UseMethod("rpd") } rpd <- new_numeric_metric( rpd, direction = "maximize" ) #' @rdname rpd #' @export rpd.data.frame <- function(data, truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "rpd", fn = rpd_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname rpd rpd_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } rpd_impl(truth, estimate, case_weights) } rpd_impl <- function(truth, estimate, case_weights) { sd <- yardstick_sd(truth, case_weights = case_weights) rmse <- rmse_vec(truth, estimate, case_weights = case_weights) sd / rmse }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-rpd.R
#' Ratio of performance to inter-quartile #' #' These functions are appropriate for cases where the model outcome is a #' numeric. The ratio of performance to deviation #' ([rpd()]) and the ratio of performance to inter-quartile ([rpiq()]) #' are both measures of consistency/correlation between observed #' and predicted values (and not of accuracy). #' #' @inherit rpd details #' @inherit rpd references #' #' @family numeric metrics #' @family consistency metrics #' @templateVar fn rpd #' @template return #' #' @inheritParams rmse #' #' @author Pierre Roudier #' #' @seealso #' #' The closely related deviation metric: [rpd()] #' #' @template examples-numeric #' #' @export rpiq <- function(data, ...) { UseMethod("rpiq") } rpiq <- new_numeric_metric( rpiq, direction = "maximize" ) #' @rdname rpiq #' @export rpiq.data.frame <- function(data, truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "rpiq", fn = rpiq_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname rpiq rpiq_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } rpiq_impl(truth, estimate, case_weights) } rpiq_impl <- function(truth, estimate, case_weights) { quantiles <- yardstick_quantile( x = truth, probabilities = c(0.25, 0.75), case_weights = case_weights ) iqr <- quantiles[[2L]] - quantiles[[1L]] rmse <- rmse_vec(truth, estimate, case_weights = case_weights) iqr / rmse }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-rpiq.R
#' R squared #' #' Calculate the coefficient of determination using correlation. For the #' traditional measure of R squared, see [rsq_trad()]. #' #' The two estimates for the #' coefficient of determination, [rsq()] and [rsq_trad()], differ by #' their formula. The former guarantees a value on (0, 1) while the #' latter can generate inaccurate values when the model is #' non-informative (see the examples). Both are measures of #' consistency/correlation and not of accuracy. #' #' `rsq()` is simply the squared correlation between `truth` and `estimate`. #' #' Because `rsq()` internally computes a correlation, if either `truth` or #' `estimate` are constant it can result in a divide by zero error. In these #' cases, a warning is thrown and `NA` is returned. This can occur when a model #' predicts a single value for all samples. For example, a regularized model #' that eliminates all predictors except for the intercept would do this. #' Another example would be a CART model that contains no splits. #' #' @family numeric metrics #' @family consistency metrics #' @templateVar fn rsq #' @template return #' #' @inheritParams rmse #' #' @author Max Kuhn #' #' @references #' #' Kvalseth. Cautionary note about \eqn{R^2}. #' American Statistician (1985) vol. 39 (4) pp. 279-285. #' #' @template examples-numeric #' @examples #' # With uninformitive data, the traditional version of R^2 can return #' # negative values. #' set.seed(2291) #' solubility_test$randomized <- sample(solubility_test$prediction) #' rsq(solubility_test, solubility, randomized) #' rsq_trad(solubility_test, solubility, randomized) #' #' # A constant `truth` or `estimate` vector results in a warning from #' # a divide by zero error in the correlation calculation. #' # `NA` will be returned in these cases. #' truth <- c(1, 2) #' estimate <- c(1, 1) #' rsq_vec(truth, estimate) #' @export rsq <- function(data, ...) { UseMethod("rsq") } rsq <- new_numeric_metric( rsq, direction = "maximize" ) #' @rdname rsq #' @export rsq.data.frame <- function(data, truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "rsq", fn = rsq_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname rsq rsq_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } rsq_impl(truth, estimate, case_weights) } rsq_impl <- function(truth, estimate, case_weights) { yardstick_cor(truth, estimate, case_weights = case_weights)^2 }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-rsq.R
#' R squared - traditional #' #' Calculate the coefficient of determination using the traditional definition #' of R squared using sum of squares. For a measure of R squared that is #' strictly between (0, 1), see [rsq()]. #' #' The two estimates for the #' coefficient of determination, [rsq()] and [rsq_trad()], differ by #' their formula. The former guarantees a value on (0, 1) while the #' latter can generate inaccurate values when the model is #' non-informative (see the examples). Both are measures of #' consistency/correlation and not of accuracy. #' #' #' @family numeric metrics #' @family consistency metrics #' @templateVar fn rsq_trad #' @template return #' #' @inheritParams rmse #' #' @author Max Kuhn #' #' @references #' #' Kvalseth. Cautionary note about \eqn{R^2}. #' American Statistician (1985) vol. 39 (4) pp. 279-285. #' #' @template examples-numeric #' @examples #' # With uninformitive data, the traditional version of R^2 can return #' # negative values. #' set.seed(2291) #' solubility_test$randomized <- sample(solubility_test$prediction) #' rsq(solubility_test, solubility, randomized) #' rsq_trad(solubility_test, solubility, randomized) #' #' @export #' rsq_trad <- function(data, ...) { UseMethod("rsq_trad") } rsq_trad <- new_numeric_metric( rsq_trad, direction = "maximize" ) #' @rdname rsq_trad #' @export rsq_trad.data.frame <- function(data, truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "rsq_trad", fn = rsq_trad_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname rsq_trad rsq_trad_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } rsq_trad_impl(truth, estimate, case_weights) } rsq_trad_impl <- function(truth, estimate, case_weights) { # Weighted calculation from the following, basically computing `y_bar`, # `SS_res`, and `SS_tot` in weighted manners: # https://stats.stackexchange.com/questions/83826/is-a-weighted-r2-in-robust-linear-model-meaningful-for-goodness-of-fit-analys/375752#375752 # https://github.com/scikit-learn/scikit-learn/blob/582fa30a31ffd1d2afc6325ec3506418e35b88c2/sklearn/metrics/_regression.py#L805 truth_mean <- yardstick_mean(truth, case_weights = case_weights) SS_residuals <- yardstick_sum((truth - estimate)^2, case_weights = case_weights) SS_total <- yardstick_sum((truth - truth_mean)^2, case_weights = case_weights) 1 - (SS_residuals / SS_total) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-rsq_trad.R
#' Symmetric mean absolute percentage error #' #' Calculate the symmetric mean absolute percentage error. This metric is in #' _relative units_. #' #' This implementation of `smape()` is the "usual definition" where the #' denominator is divided by two. #' #' @family numeric metrics #' @family accuracy metrics #' @templateVar fn smape #' @template return #' #' @inheritParams rmse #' #' @author Max Kuhn, Riaz Hedayati #' #' @template examples-numeric #' #' @export #' smape <- function(data, ...) { UseMethod("smape") } smape <- new_numeric_metric( smape, direction = "minimize" ) #' @rdname smape #' @export smape.data.frame <- function(data, truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { numeric_metric_summarizer( name = "smape", fn = smape_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname smape smape_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_numeric_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } smape_impl(truth, estimate, case_weights) } smape_impl <- function(truth, estimate, case_weights) { numer <- abs(estimate - truth) denom <- (abs(truth) + abs(estimate)) / 2 error <- numer / denom out <- yardstick_mean(error, case_weights = case_weights) out <- out * 100 out }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/num-smape.R
#' Area under the precision recall curve #' #' @description #' #' `average_precision()` is an alternative to `pr_auc()` that avoids any #' ambiguity about what the value of `precision` should be when `recall == 0` #' and there are not yet any false positive values (some say it should be `0`, #' others say `1`, others say undefined). #' #' It computes a weighted average of the precision values returned from #' [pr_curve()], where the weights are the increase in recall from the previous #' threshold. See [pr_curve()] for the full curve. #' #' @details #' #' The computation for average precision is a weighted average of the precision #' values. Assuming you have `n` rows returned from [pr_curve()], it is a sum #' from `2` to `n`, multiplying the precision value `p_i` by the increase in #' recall over the previous threshold, `r_i - r_(i-1)`. #' #' \deqn{AP = \sum (r_{i} - r_{i-1}) * p_i} #' #' By summing from `2` to `n`, the precision value `p_1` is never used. While #' [pr_curve()] returns a value for `p_1`, it is technically undefined as #' `tp / (tp + fp)` with `tp = 0` and `fp = 0`. A common convention is to use #' `1` for `p_1`, but this metric has the nice property of avoiding the #' ambiguity. On the other hand, `r_1` is well defined as long as there are #' some events (`p`), and it is `tp / p` with `tp = 0`, so `r_1 = 0`. #' #' When `p_1` is defined as `1`, the `average_precision()` and `roc_auc()` #' values are often very close to one another. #' #' @family class probability metrics #' @templateVar fn average_precision #' @template return #' @template multiclass-prob #' @template event_first #' #' @inheritParams pr_auc #' #' @seealso #' #' [pr_curve()] for computing the full precision recall curve. #' #' [pr_auc()] for computing the area under the precision recall curve using #' the trapezoidal rule. #' #' @template examples-binary-prob #' @template examples-multiclass-prob #' #' @export average_precision <- function(data, ...) { UseMethod("average_precision") } average_precision <- new_prob_metric( average_precision, direction = "maximize" ) #' @export #' @rdname average_precision average_precision.data.frame <- function(data, truth, ..., estimator = NULL, na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL) { prob_metric_summarizer( name = "average_precision", fn = average_precision_vec, data = data, truth = {{ truth }}, ..., estimator = estimator, na_rm = na_rm, event_level = event_level, case_weights = {{ case_weights }} ) } #' @export #' @rdname average_precision average_precision_vec <- function(truth, estimate, estimator = NULL, na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL, ...) { abort_if_class_pred(truth) estimator <- finalize_estimator(truth, estimator, "average_precision") check_prob_metric(truth, estimate, case_weights, estimator) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } average_precision_estimator_impl( truth = truth, estimate = estimate, estimator = estimator, event_level = event_level, case_weights = case_weights ) } average_precision_estimator_impl <- function(truth, estimate, estimator, event_level, case_weights) { if (is_binary(estimator)) { average_precision_binary(truth, estimate, event_level, case_weights) } else { # weights for macro / macro_weighted are based on truth frequencies # (this is the usual definition) truth_table <- yardstick_truth_table(truth, case_weights = case_weights) w <- get_weights(truth_table, estimator) out_vec <- average_precision_multiclass(truth, estimate, case_weights) stats::weighted.mean(out_vec, w) } } average_precision_binary <- function(truth, estimate, event_level, case_weights) { # `na_rm` should already be done by `average_precision_vec()` curve <- pr_curve_vec( truth = truth, estimate = estimate, na_rm = FALSE, event_level = event_level, case_weights = case_weights ) recalls <- curve[["recall"]] precisions <- curve[["precision"]] sum(diff(recalls) * precisions[-1]) } average_precision_multiclass <- function(truth, estimate, case_weights) { results <- one_vs_all_impl( fn = average_precision_binary, truth = truth, estimate = estimate, case_weights = case_weights ) vapply(results, FUN.VALUE = numeric(1), function(x) x) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/prob-average_precision.R
# For use with the `pr_curve()` and `roc_curve()`. # Returns a data frame with: # - Unique thresholds # - Number of true positives per threshold # - Number of false positives per threshold binary_threshold_curve <- function(truth, estimate, ..., event_level = yardstick_event_level(), case_weights = NULL) { check_dots_empty() if (is.null(case_weights)) { case_weights <- rep(1, times = length(truth)) } case_weights <- vec_cast(case_weights, to = double()) if (!is.factor(truth)) { cli::cli_abort( "{.arg truth} must be a factor, not {.obj_type_friendly {truth}}.", .internal = TRUE ) } if (length(levels(truth)) != 2L) { cli::cli_abort( "{.arg truth} must have two levels, not {length(levels(truth))}.", .internal = TRUE ) } if (!is.numeric(estimate)) { cli::cli_abort( "{.arg estimate} must be numeric, not {.obj_type_friendly {estimate}}.", .internal = TRUE ) } if (length(truth) != length(estimate)) { cli::cli_abort( "{.arg truth} ({length(truth)}) and \\ {.arg estimate} ({length(estimate)}) must be the same length.", .internal = TRUE ) } if (length(truth) != length(case_weights)) { cli::cli_abort( "{.arg truth} ({length(truth)}) and \\ {.arg case_weights} ({length(case_weights)}) must be the same length.", .internal = TRUE ) } truth <- unclass(truth) # Convert to `1 == event`, `0 == non-event` if (is_event_first(event_level)) { truth <- as.integer(truth == 1L) } else { truth <- as.integer(truth == 2L) } # Drop any `0` weights. # These shouldn't affect the result, but can result in divide by zero # issues if they are left in. detect_zero_weight <- case_weights == 0 if (any(detect_zero_weight)) { detect_non_zero_weight <- !detect_zero_weight truth <- truth[detect_non_zero_weight] estimate <- estimate[detect_non_zero_weight] case_weights <- case_weights[detect_non_zero_weight] } # Sort by decreasing `estimate` order <- order(estimate, decreasing = TRUE) truth <- truth[order] estimate <- estimate[order] case_weights <- case_weights[order] # Skip repeated probabilities. # We want the last duplicate to ensure that we capture all the events from the # `cumsum()`, so we use `fromLast`. loc_unique <- which(!duplicated(estimate, fromLast = TRUE)) thresholds <- estimate[loc_unique] case_weights_events <- truth * case_weights case_weights_non_events <- (1 - truth) * case_weights if (sum(case_weights_events) == 0L) { cli::cli_warn( "There are 0 event cases in {.arg truth}, results will be meaningless." ) } tp <- cumsum(case_weights_events) tp <- tp[loc_unique] fp <- cumsum(case_weights_non_events) fp <- fp[loc_unique] dplyr::tibble( threshold = thresholds, tp = tp, fp = fp ) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/prob-binary-thresholds.R
#' Brier score for classification models #' #' Compute the Brier score for a classification model. #' #' @family class probability metrics #' @templateVar fn brier_class #' @template return #' @details #' #' The Brier score is analogous to the mean squared error in regression models. #' The difference between a binary indicator for a class and its corresponding #' class probability are squared and averaged. #' #' This function uses the convention in Kruppa _et al_ (2014) and divides the #' result by two. #' #' Smaller values of the score are associated with better model performance. #' #' @section Multiclass: #' Brier scores can be computed in the same way for any number of classes. #' Because of this, no averaging types are supported. #' #' @inheritParams pr_auc #' #' @author Max Kuhn #' #' @references Kruppa, J., Liu, Y., Diener, H.-C., Holste, T., Weimar, C., #' Koonig, I. R., and Ziegler, A. (2014) Probability estimation with machine #' learning methods for dichotomous and multicategory outcome: Applications. #' Biometrical Journal, 56 (4): 564-583. #' @examples #' # Two class #' data("two_class_example") #' brier_class(two_class_example, truth, Class1) #' #' # Multiclass #' library(dplyr) #' data(hpc_cv) #' #' # You can use the col1:colN tidyselect syntax #' hpc_cv %>% #' filter(Resample == "Fold01") %>% #' brier_class(obs, VF:L) #' #' # Groups are respected #' hpc_cv %>% #' group_by(Resample) %>% #' brier_class(obs, VF:L) #' #' @export brier_class <- function(data, ...) { UseMethod("brier_class") } brier_class <- new_prob_metric( brier_class, direction = "minimize" ) #' @export #' @rdname brier_class brier_class.data.frame <- function(data, truth, ..., na_rm = TRUE, case_weights = NULL) { case_weights_quo <- enquo(case_weights) prob_metric_summarizer( name = "brier_class", fn = brier_class_vec, data = data, truth = !!enquo(truth), ..., na_rm = na_rm, case_weights = !!case_weights_quo ) } #' @rdname brier_class #' @export brier_class_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { abort_if_class_pred(truth) estimator <- finalize_estimator(truth, metric_class = "brier_class") check_prob_metric(truth, estimate, case_weights, estimator) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } brier_class_estimator_impl( truth = truth, estimate = estimate, case_weights = case_weights ) } brier_class_estimator_impl <- function(truth, estimate, case_weights) { brier_factor( truth = truth, estimate = estimate, case_weights = case_weights ) } # If `truth` is already a vector or matrix of binary data brier_ind <- function(truth, estimate, case_weights = NULL) { if (is.vector(truth)) { truth <- matrix(truth, ncol = 1) } if (is.vector(estimate)) { estimate <- matrix(estimate, ncol = 1) } # In the binary case: if (ncol(estimate) == 1 && ncol(truth) == 2) { estimate <- unname(estimate) estimate <- vec_cbind(estimate, 1 - estimate, .name_repair = "unique_quiet") } resids <- (truth - estimate)^2 if (is.null(case_weights)) { case_weights <- rep(1, nrow(resids)) } not_missing <- !is.na(case_weights) resids <- resids[not_missing, , drop = FALSE] case_weights <- case_weights[not_missing] # Normalize weights (in case negative weights) case_weights <- exp(case_weights) / sum(exp(case_weights)) res <- sum(resids * case_weights) / (2 * sum(case_weights)) res } # When `truth` is a factor brier_factor <- function(truth, estimate, case_weights = NULL) { inds <- hardhat::fct_encode_one_hot(truth) case_weights <- vctrs::vec_cast(case_weights, to = double()) brier_ind(inds, estimate, case_weights) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/prob-brier_class.R
#' Costs function for poor classification #' #' @description #' `classification_cost()` calculates the cost of a poor prediction based on #' user-defined costs. The costs are multiplied by the estimated class #' probabilities and the mean cost is returned. #' #' @details #' As an example, suppose that there are three classes: `"A"`, `"B"`, and `"C"`. #' Suppose there is a truly `"A"` observation with class probabilities `A = 0.3 #' / B = 0.3 / C = 0.4`. Suppose that, when the true result is class `"A"`, the #' costs for each class were `A = 0 / B = 5 / C = 10`, penalizing the #' probability of incorrectly predicting `"C"` more than predicting `"B"`. The #' cost for this prediction would be `0.3 * 0 + 0.3 * 5 + 0.4 * 10`. This #' calculation is done for each sample and the individual costs are averaged. #' #' @family class probability metrics #' @templateVar fn class_cost #' @template return #' #' @inheritParams pr_auc #' #' @param costs A data frame with columns `"truth"`, `"estimate"`, and `"cost"`. #' #' `"truth"` and `"estimate"` should be character columns containing unique #' combinations of the levels of the `truth` factor. #' #' `"costs"` should be a numeric column representing the cost that should #' be applied when the `"estimate"` is predicted, but the true result is #' `"truth"`. #' #' It is often the case that when `"truth" == "estimate"`, the cost is zero #' (no penalty for correct predictions). #' #' If any combinations of the levels of `truth` are missing, their costs are #' assumed to be zero. #' #' If `NULL`, equal costs are used, applying a cost of `0` to correct #' predictions, and a cost of `1` to incorrect predictions. #' #' @author Max Kuhn #' #' @export #' @examples #' library(dplyr) #' #' # --------------------------------------------------------------------------- #' # Two class example #' data(two_class_example) #' #' # Assuming `Class1` is our "event", this penalizes false positives heavily #' costs1 <- tribble( #' ~truth, ~estimate, ~cost, #' "Class1", "Class2", 1, #' "Class2", "Class1", 2 #' ) #' #' # Assuming `Class1` is our "event", this penalizes false negatives heavily #' costs2 <- tribble( #' ~truth, ~estimate, ~cost, #' "Class1", "Class2", 2, #' "Class2", "Class1", 1 #' ) #' #' classification_cost(two_class_example, truth, Class1, costs = costs1) #' #' classification_cost(two_class_example, truth, Class1, costs = costs2) #' #' # --------------------------------------------------------------------------- #' # Multiclass #' data(hpc_cv) #' #' # Define cost matrix from Kuhn and Johnson (2013) #' hpc_costs <- tribble( #' ~estimate, ~truth, ~cost, #' "VF", "VF", 0, #' "VF", "F", 1, #' "VF", "M", 5, #' "VF", "L", 10, #' "F", "VF", 1, #' "F", "F", 0, #' "F", "M", 5, #' "F", "L", 5, #' "M", "VF", 1, #' "M", "F", 1, #' "M", "M", 0, #' "M", "L", 1, #' "L", "VF", 1, #' "L", "F", 1, #' "L", "M", 1, #' "L", "L", 0 #' ) #' #' # You can use the col1:colN tidyselect syntax #' hpc_cv %>% #' filter(Resample == "Fold01") %>% #' classification_cost(obs, VF:L, costs = hpc_costs) #' #' # Groups are respected #' hpc_cv %>% #' group_by(Resample) %>% #' classification_cost(obs, VF:L, costs = hpc_costs) classification_cost <- function(data, ...) { UseMethod("classification_cost") } classification_cost <- new_prob_metric( fn = classification_cost, direction = "minimize" ) #' @rdname classification_cost #' @export classification_cost.data.frame <- function(data, truth, ..., costs = NULL, na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL) { prob_metric_summarizer( name = "classification_cost", fn = classification_cost_vec, data = data, truth = !!enquo(truth), ..., na_rm = na_rm, event_level = event_level, case_weights = !!enquo(case_weights), # Extra argument for classification_cost_impl() fn_options = list(costs = costs) ) } #' @rdname classification_cost #' @export classification_cost_vec <- function(truth, estimate, costs = NULL, na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL, ...) { abort_if_class_pred(truth) estimator <- finalize_estimator(truth, metric_class = "classification_cost") check_prob_metric(truth, estimate, case_weights, estimator) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } classification_cost_estimator_impl( truth = truth, estimate = estimate, costs = costs, estimator = estimator, event_level = event_level, case_weights = case_weights ) } classification_cost_estimator_impl <- function(truth, estimate, costs, estimator, event_level, case_weights) { if (is_binary(estimator)) { classification_cost_binary(truth, estimate, costs, event_level, case_weights) } else { classification_cost_multiclass(truth, estimate, costs, case_weights) } } classification_cost_binary <- function(truth, estimate, costs, event_level, case_weights) { if (is_event_first(event_level)) { level1 <- estimate level2 <- 1 - estimate } else { level1 <- 1 - estimate level2 <- estimate } estimate <- c(level1, level2) estimate <- matrix(estimate, ncol = 2) classification_cost_multiclass(truth, estimate, costs, case_weights) } classification_cost_multiclass <- function(truth, estimate, costs, case_weights) { levels <- levels(truth) costs <- validate_costs(costs, levels) costs <- pivot_costs(costs, levels) costs <- recycle_costs(costs, truth) costs <- costs[levels] costs <- as.matrix(costs) out <- estimate * costs out <- rowSums(out) out <- yardstick_mean(out, case_weights = case_weights) out } validate_costs <- function(costs, levels) { if (is.null(costs)) { costs <- generate_equal_cost_grid(levels) return(costs) } check_data_frame(costs, allow_null = TRUE) columns <- names(costs) if (length(columns) != 3L) { cli::cli_abort( "{.arg costs} must be a data.frame with 3 columns, not {length(columns)}." ) } ok <- identical(sort(columns), sort(c("truth", "estimate", "cost"))) if (!ok) { cli::cli_abort( "{.arg costs} must have columns: \\ {.val truth}, {.val estimate}, and {.val cost}. Not {columns}." ) } if (is.factor(costs$truth)) { costs$truth <- as.character(costs$truth) } if (!is.character(costs$truth)) { cli::cli_abort( "{.code costs$truth} must be a character or factor column, \\ not {.obj_type_friendly {costs$truth}}." ) } if (is.factor(costs$estimate)) { costs$estimate <- as.character(costs$estimate) } if (!is.character(costs$estimate)) { cli::cli_abort( "{.code costs$estimate} must be a character or factor column, \\ not {.obj_type_friendly {costs$estimate}}." ) } if (!is.numeric(costs$cost)) { cli::cli_abort( "{.code costs$cost} must be a numeric column, \\ not {.obj_type_friendly {costs$cost}}." ) } ok <- all(costs$truth %in% levels) if (is_false(ok)) { levels <- quote_and_collapse(levels) cli::cli_abort("{.code costs$truth} can only contain {levels}.") } ok <- all(costs$estimate %in% levels) if (is_false(ok)) { levels <- quote_and_collapse(levels) cli::cli_abort("{.code costs$estimate} can only contain {levels}.") } pairs <- costs[c("truth", "estimate")] cost <- costs$cost not_ok <- vec_duplicate_any(pairs) if (not_ok) { cli::cli_abort( "{.field costs} cannot have duplicate \\ {.field truth} / {.field estimate} combinations." ) } out <- generate_all_level_combinations(levels) # Detect user specified cost locations locs <- vec_match(pairs, out) # Default to no cost out$cost <- 0 # Update to user specified cost out$cost[locs] <- cost out } # - 0 cost for correct predictions # - 1 cost for incorrect predictions generate_equal_cost_grid <- function(levels) { costs <- generate_all_level_combinations(levels) costs$cost <- ifelse(costs$truth == costs$estimate, yes = 0, no = 1) costs } generate_all_level_combinations <- function(levels) { # `expand.grid()` expands first column fastest, # but we want first column slowest so we reverse the columns grid <- expand.grid(estimate = levels, truth = levels) grid <- dplyr::as_tibble(grid) grid <- grid[c("truth", "estimate")] grid } pivot_costs <- function(costs, levels) { # Must be a data frame, not a tibble, for `reshape()` to work costs <- as.data.frame(costs) # tidyr::pivot_wider(costs, truth, names_from = estimate, values_from = cost) costs <- stats::reshape( costs, v.names = "cost", idvar = "truth", timevar = "estimate", direction = "wide", sep = "." ) # Ensure column ordering matches `truth` level ordering columns <- paste0("cost.", levels) costs <- costs[c("truth", columns)] names(costs) <- c("truth", levels) costs <- dplyr::as_tibble(costs) costs } recycle_costs <- function(costs, truth) { levels <- levels(truth) # Expand `costs` to equal size of `truth`, matching the `truth` values needles <- truth haystack <- factor(costs$truth, levels = levels) locs <- vec_match(needles, haystack) costs <- vec_slice(costs, locs) costs }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/prob-classification_cost.R
#' Gain capture #' #' `gain_capture()` is a measure of performance similar to an AUC calculation, #' but applied to a gain curve. #' #' `gain_capture()` calculates the area _under_ the gain curve, but _above_ #' the baseline, and then divides that by the area _under_ a perfect gain curve, #' but _above_ the baseline. It is meant to represent the amount of potential #' gain "captured" by the model. #' #' The `gain_capture()` metric is identical to the _accuracy ratio (AR)_, which #' is also sometimes called the _gini coefficient_. These two are generally #' calculated on a cumulative accuracy profile curve, but this is the same as #' a gain curve. See the Engelmann reference for more information. #' #' @family class probability metrics #' @templateVar fn gain_capture #' @template event_first #' @template return #' @template multiclass-prob #' #' @inheritParams pr_auc #' #' @author Max Kuhn #' #' @references #' #' Engelmann, Bernd & Hayden, Evelyn & Tasche, Dirk (2003). #' "Measuring the Discriminative Power of Rating Systems," #' Discussion Paper Series 2: Banking and Financial Studies 2003,01, #' Deutsche Bundesbank. #' #' @seealso #' #' [gain_curve()] to compute the full gain curve. #' #' @template examples-binary-prob #' @template examples-multiclass-prob #' @examples #' # --------------------------------------------------------------------------- #' # Visualize gain_capture() #' #' # Visually, this represents the area under the black curve, but above the #' # 45 degree line, divided by the area of the shaded triangle. #' library(ggplot2) #' autoplot(gain_curve(two_class_example, truth, Class1)) #' #' @export gain_capture <- function(data, ...) { UseMethod("gain_capture") } gain_capture <- new_prob_metric( gain_capture, direction = "maximize" ) #' @rdname gain_capture #' @export gain_capture.data.frame <- function(data, truth, ..., estimator = NULL, na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL) { prob_metric_summarizer( name = "gain_capture", fn = gain_capture_vec, data = data, truth = !!enquo(truth), ..., estimator = estimator, na_rm = na_rm, event_level = event_level, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname gain_capture gain_capture_vec <- function(truth, estimate, estimator = NULL, na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL, ...) { abort_if_class_pred(truth) estimator <- finalize_estimator(truth, estimator, "gain_capture") check_prob_metric(truth, estimate, case_weights, estimator) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } gain_capture_estimator_impl( truth = truth, estimate = estimate, estimator = estimator, event_level = event_level, case_weights = case_weights ) } gain_capture_estimator_impl <- function(truth, estimate, estimator, event_level, case_weights) { if (is_binary(estimator)) { gain_capture_binary(truth, estimate, event_level, case_weights) } else { truth_table <- yardstick_truth_table(truth, case_weights = case_weights) w <- get_weights(truth_table, estimator) out_vec <- gain_capture_multiclass(truth, estimate, case_weights) stats::weighted.mean(out_vec, w) } } gain_capture_binary <- function(truth, estimate, event_level, case_weights) { # `na_rm` should already be done gain_list <- gain_curve_vec( truth, estimate, na_rm = FALSE, event_level = event_level, case_weights = case_weights ) gain_to_0_auc <- auc( x = gain_list[[".percent_tested"]], y = gain_list[[".percent_found"]] ) scaler <- 1 / (100^2) height <- 100 width <- 100 baseline <- 0.5 gain_to_0_auc <- gain_to_0_auc * scaler # perfect = value at the elbow of the perfect gain chart .n_events <- gain_list[[".n_events"]] .n <- gain_list[[".n"]] slope <- 1 / (max(.n_events) / dplyr::last(.n)) perfect <- height / slope # perfect triangle perf_triang <- (perfect * height) / 2 # perfect rectangle perf_rect <- (width - perfect) * height perf_auc <- (perf_rect + perf_triang) * scaler # calculate capture ratio = fraction of area captured # under the gain curve, but above the baseline, and relative to a # perfect capture score (gain_to_0_auc - baseline) / (perf_auc - baseline) } gain_capture_multiclass <- function(truth, estimate, case_weights) { res_lst <- one_vs_all_impl( fn = gain_capture_binary, truth = truth, estimate = estimate, case_weights = case_weights ) vapply(res_lst, FUN.VALUE = numeric(1), function(x) x) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/prob-gain_capture.R
#' Gain curve #' #' `gain_curve()` constructs the full gain curve and returns a tibble. See #' [gain_capture()] for the relevant area under the gain curve. Also see #' [lift_curve()] for a closely related concept. #' #' There is a [ggplot2::autoplot()] method for quickly visualizing the curve. #' This works for binary and multiclass output, and also works with grouped data #' (i.e. from resamples). See the examples. #' #' The greater the area between the gain curve and the baseline, the better the #' model. #' #' Gain curves are identical to CAP curves (cumulative accuracy profile). See #' the Engelmann reference for more information on CAP curves. #' #' @section Gain and Lift Curves: #' #' The motivation behind cumulative gain and lift charts is as a visual method to #' determine the effectiveness of a model when compared to the results one #' might expect without a model. As an example, without a model, if you were #' to advertise to a random 10% of your customer base, then you might expect #' to capture 10% of the of the total number of positive responses had you #' advertised to your entire customer base. Given a model that predicts #' which customers are more likely to respond, the hope is that you can more #' accurately target 10% of your customer base and capture #' `>`10% of the total number of positive responses. #' #' The calculation to construct gain curves is as follows: #' #' 1. `truth` and `estimate` are placed in descending order by the `estimate` #' values (`estimate` here is a single column supplied in `...`). #' #' 2. The cumulative number of samples with true results relative to the #' entire number of true results are found. This is the y-axis in a gain chart. #' #' @family curve metrics #' @templateVar fn gain_curve #' @template multiclass-curve #' @template event_first #' #' @inheritParams pr_auc #' #' @return #' A tibble with class `gain_df` or `gain_grouped_df` having columns: #' #' - `.n` The index of the current sample. #' #' - `.n_events` The index of the current _unique_ sample. Values with repeated #' `estimate` values are given identical indices in this column. #' #' - `.percent_tested` The cumulative percentage of values tested. #' #' - `.percent_found` The cumulative percentage of true results relative to the #' total number of true results. #' #' If using the `case_weights` argument, all of the above columns will be #' weighted. This makes the most sense with frequency weights, which are integer #' weights representing the number of times a particular observation should be #' repeated. #' #' @references #' #' Engelmann, Bernd & Hayden, Evelyn & Tasche, Dirk (2003). #' "Measuring the Discriminative Power of Rating Systems," #' Discussion Paper Series 2: Banking and Financial Studies 2003,01, #' Deutsche Bundesbank. #' #' @seealso #' Compute the relevant area under the gain curve with [gain_capture()]. #' #' @author Max Kuhn #' #' @template examples-binary-prob #' @examples #' # --------------------------------------------------------------------------- #' # `autoplot()` #' #' library(ggplot2) #' library(dplyr) #' #' # Use autoplot to visualize #' # The top left hand corner of the grey triangle is a "perfect" gain curve #' autoplot(gain_curve(two_class_example, truth, Class1)) #' #' # Multiclass one-vs-all approach #' # One curve per level #' hpc_cv %>% #' filter(Resample == "Fold01") %>% #' gain_curve(obs, VF:L) %>% #' autoplot() #' #' # Same as above, but will all of the resamples #' # The resample with the minimum (farthest to the left) "perfect" value is #' # used to draw the shaded region #' hpc_cv %>% #' group_by(Resample) %>% #' gain_curve(obs, VF:L) %>% #' autoplot() #' #' @export #' gain_curve <- function(data, ...) { UseMethod("gain_curve") } #' @rdname gain_curve #' @export gain_curve.data.frame <- function(data, truth, ..., na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL) { result <- curve_metric_summarizer( name = "gain_curve", fn = gain_curve_vec, data = data, truth = !!enquo(truth), ..., na_rm = na_rm, event_level = event_level, case_weights = !!enquo(case_weights) ) curve_finalize(result, data, "gain_df", "grouped_gain_df") } # dont export gain_curve_vec / lift_curve_vec # not as meaningful to return a list of vectors # maybe it could return the tibble? gain_curve_vec <- function(truth, estimate, na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL, ...) { abort_if_class_pred(truth) estimator <- finalize_estimator(truth, metric_class = "gain_curve") check_prob_metric(truth, estimate, case_weights, estimator) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { cli::cli_abort(c( x = "Missing values were detected and {.code na_ra = FALSE}.", i = "Not able to perform calculations." )) } gain_curve_estimator_impl( truth = truth, estimate = estimate, estimator = estimator, event_level = event_level, case_weights = case_weights ) } gain_curve_estimator_impl <- function(truth, estimate, estimator, event_level, case_weights) { if (is_binary(estimator)) { gain_curve_binary(truth, estimate, event_level, case_weights) } else { gain_curve_multiclass(truth, estimate, case_weights) } } gain_curve_binary <- function(truth, estimate, event_level, case_weights) { gain_list <- gain_curve_binary_impl(truth, estimate, event_level, case_weights) dplyr::tibble(!!!gain_list) } gain_curve_multiclass <- function(truth, estimate, case_weights) { one_vs_all_with_level( fn = gain_curve_binary, truth = truth, estimate = estimate, case_weights = case_weights ) } # Following the Example Problem 2 of: # http://www2.cs.uregina.ca/~dbd/cs831/notes/lift_chart/lift_chart.html gain_curve_binary_impl <- function(truth, estimate, event_level, case_weights) { truth <- unclass(truth) # Events are re-coded as 1, non-events are 0. Easier for cumulative calcs. if (is_event_first(event_level)) { truth <- as.integer(truth == 1L) } else { truth <- as.integer(truth == 2L) } if (is.null(case_weights)) { case_weights <- rep(1, times = length(truth)) } case_weights <- vec_cast(case_weights, to = double()) # arrange in decreasing order by class probability score estimate_ord <- order(estimate, decreasing = TRUE) estimate <- estimate[estimate_ord] truth <- truth[estimate_ord] case_weights <- case_weights[estimate_ord] case_weights_event <- ifelse(truth, case_weights, 0) n_events <- sum(case_weights_event) n_predictions <- sum(case_weights) # uninformative model x and y axis # this is also the x axis in the gain / lift charts cumulative_tested <- cumsum(case_weights) cumulative_percent_tested <- (cumulative_tested / n_predictions) * 100 cumulative_found <- cumsum(case_weights_event) cumulative_percent_found <- (cumulative_found / n_events) * 100 # remove all but the last of any duplicated estimates # doing this after the fact allows us to still use cumsum() where_dups <- duplicated(estimate, fromLast = TRUE) cumulative_found <- cumulative_found[!where_dups] cumulative_tested <- cumulative_tested[!where_dups] cumulative_percent_tested <- cumulative_percent_tested[!where_dups] cumulative_percent_found <- cumulative_percent_found[!where_dups] # edge values cumulative_found <- c(0, cumulative_found) cumulative_tested <- c(0, cumulative_tested) cumulative_percent_tested <- c(0, cumulative_percent_tested) cumulative_percent_found <- c(0, cumulative_percent_found) list( .n = cumulative_tested, .n_events = cumulative_found, .percent_tested = cumulative_percent_tested, .percent_found = cumulative_percent_found ) } # autoplot --------------------------------------------------------------------- # dynamically exported in .onLoad() autoplot.gain_df <- function(object, ...) { `%+%` <- ggplot2::`%+%` `%>%` <- dplyr::`%>%` # Base chart chart <- ggplot2::ggplot(data = object) # Grouped specific chart features if (dplyr::is_grouped_df(object)) { # Construct the color interaction group grps <- dplyr::groups(object) interact_expr <- list( color = expr(interaction(!!!grps, sep = "_")) ) # Add group legend label grps_chr <- paste0(dplyr::group_vars(object), collapse = "_") chart <- chart %+% ggplot2::labs(color = grps_chr) } else { interact_expr <- list() } # Generic enough to be used in the pipe chain # for multiclass and binary curves maybe_group_by_level <- function(object, with_old = TRUE) { if (with_old) { grps <- dplyr::groups(object) } else { grps <- list() } if (".level" %in% colnames(object)) { # .level should be the first group b/c of how summarise() # drops a group object <- dplyr::group_by(object, .level, !!!grps) } object } # Construct poly_data # If grouped (ie resamples), we take the min of all "perfect" values # to ensure we capture all lines in the polygon # If multiclass, we calculate each level separately poly_data <- object %>% maybe_group_by_level() %>% dplyr::summarise(slope = 1 / (max(.n_events) / dplyr::last(.n))) %>% dplyr::mutate(perfect = 100 / slope) %>% maybe_group_by_level(with_old = FALSE) %>% dplyr::summarise(perfect = min(perfect)) %>% maybe_group_by_level() %>% dplyr::do( dplyr::tibble( x = c(0, .$perfect, 100), y = c(0, 100, 100) ) ) # Avoid cran check for "globals" .percent_tested <- as.name(".percent_tested") .percent_found <- as.name(".percent_found") x <- as.name("x") y <- as.name("y") chart <- chart %+% # boundary poly ggplot2::geom_polygon( mapping = ggplot2::aes( x = !!x, y = !!y ), data = poly_data, # fill fill = "lightgrey", alpha = 0.4 ) %+% # gain curve ggplot2::geom_line( mapping = ggplot2::aes( x = !!.percent_tested, y = !!.percent_found, !!!interact_expr ), data = object ) %+% ggplot2::labs( x = "% Tested", y = "% Found" ) %+% ggplot2::theme_bw() # facet by .level if this was a multiclass computation if (".level" %in% colnames(object)) { chart <- chart %+% ggplot2::facet_wrap(~.level) } chart }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/prob-gain_curve.R
# AUC helper ------------------------------------------------------------------- # AUC by trapezoidal rule: # https://en.wikipedia.org/wiki/Trapezoidal_rule # assumes x is a partition and that x & y are the same length auc <- function(x, y, na_rm = TRUE) { if (na_rm) { comp <- stats::complete.cases(x, y) x <- x[comp] y <- y[comp] } if (is.unsorted(x, na.rm = TRUE, strictly = FALSE)) { cli::cli_abort( "{.arg x} must already be in weakly increasing order.", .internal = TRUE ) } # length x = length y n <- length(x) # dx dx <- x[-1] - x[-n] # mid height of y height <- (y[-n] + y[-1]) / 2 auc <- sum(height * dx) auc } # One vs all helper ------------------------------------------------------------ one_vs_all_impl <- function(fn, truth, estimate, case_weights, ...) { lvls <- levels(truth) other <- "..other" metric_lst <- new_list(n = length(lvls)) # one vs all for (i in seq_along(lvls)) { # Recode truth into 2 levels, relevant and other # Pull out estimate prob column corresponding to relevant # Pulls by order, so they have to be in the same order as the levels! # (cannot pull by name because they arent always the same name i.e. .pred_{level}) lvl <- lvls[i] truth_temp <- factor( x = ifelse(truth == lvl, lvl, other), levels = c(lvl, other) ) estimate_temp <- as.numeric(estimate[, i]) # `one_vs_all_impl()` always ignores the event level ordering when # computing each individual binary metric metric_lst[[i]] <- fn( truth_temp, estimate_temp, case_weights = case_weights, event_level = "first", ... ) } metric_lst } one_vs_all_with_level <- function(fn, truth, estimate, case_weights, ...) { res <- one_vs_all_impl( fn = fn, truth = truth, estimate = estimate, case_weights = case_weights, ... ) lvls <- levels(truth) with_level <- function(df, lvl) { df$.level <- lvl dplyr::select(df, .level, tidyselect::everything()) } res <- mapply( with_level, df = res, lvl = lvls, SIMPLIFY = FALSE, USE.NAMES = FALSE ) dplyr::bind_rows(res) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/prob-helpers.R
#' Lift curve #' #' `lift_curve()` constructs the full lift curve and returns a tibble. See #' [gain_curve()] for a closely related concept. #' #' There is a [ggplot2::autoplot()] method for quickly visualizing the curve. #' This works for binary and multiclass output, and also works with grouped data #' (i.e. from resamples). See the examples. #' #' @section Gain and Lift Curves: #' #' The motivation behind cumulative gain and lift charts is as a visual method #' to determine the effectiveness of a model when compared to the results one #' might expect without a model. As an example, without a model, if you were to #' advertise to a random 10% of your customer base, then you might expect to #' capture 10% of the of the total number of positive responses had you #' advertised to your entire customer base. Given a model that predicts which #' customers are more likely to respond, the hope is that you can more #' accurately target 10% of your customer base and capture `>`10% of the total #' number of positive responses. #' #' The calculation to construct lift curves is as follows: #' #' 1. `truth` and `estimate` are placed in descending order by the `estimate` #' values (`estimate` here is a single column supplied in `...`). #' #' 2. The cumulative number of samples with true results relative to the #' entire number of true results are found. #' #' 3. The cumulative `%` found is divided by the cumulative `%` tested #' to construct the lift value. This ratio represents the factor of improvement #' over an uninformed model. Values `>`1 represent a valuable model. This is the #' y-axis of the lift chart. #' #' @family curve metrics #' @templateVar fn lift_curve #' @template multiclass-curve #' @template event_first #' #' @inheritParams pr_auc #' #' @return #' A tibble with class `lift_df` or `lift_grouped_df` having #' columns: #' #' - `.n` The index of the current sample. #' #' - `.n_events` The index of the current _unique_ sample. Values with repeated #' `estimate` values are given identical indices in this column. #' #' - `.percent_tested` The cumulative percentage of values tested. #' #' - `.lift` First calculate the cumulative percentage of true results relative #' to the total number of true results. Then divide that by `.percent_tested`. #' #' If using the `case_weights` argument, all of the above columns will be #' weighted. This makes the most sense with frequency weights, which are integer #' weights representing the number of times a particular observation should be #' repeated. #' #' @author Max Kuhn #' #' @template examples-binary-prob #' @examples #' # --------------------------------------------------------------------------- #' # `autoplot()` #' #' library(ggplot2) #' library(dplyr) #' #' # Use autoplot to visualize #' autoplot(lift_curve(two_class_example, truth, Class1)) #' #' # Multiclass one-vs-all approach #' # One curve per level #' hpc_cv %>% #' filter(Resample == "Fold01") %>% #' lift_curve(obs, VF:L) %>% #' autoplot() #' #' # Same as above, but will all of the resamples #' hpc_cv %>% #' group_by(Resample) %>% #' lift_curve(obs, VF:L) %>% #' autoplot() #' #' @export #' lift_curve <- function(data, ...) { UseMethod("lift_curve") } #' @rdname lift_curve #' @export lift_curve.data.frame <- function(data, truth, ..., na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL) { result <- curve_metric_summarizer( name = "lift_curve", fn = lift_curve_vec, data = data, truth = !!enquo(truth), ..., na_rm = na_rm, event_level = event_level, case_weights = !!enquo(case_weights) ) curve_finalize(result, data, "lift_df", "grouped_lift_df") } lift_curve_vec <- function(truth, estimate, na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL, ...) { # Doesn't validate inputs here since it is done in gain_curve_vec() # tibble result, possibly grouped res <- gain_curve_vec( truth = truth, estimate = estimate, na_rm = na_rm, event_level = event_level, case_weights = case_weights ) if (identical(res, NA_real_)) { return(res) } res <- dplyr::mutate(res, .lift = .percent_found / .percent_tested) res[[".percent_found"]] <- NULL res } # autoplot --------------------------------------------------------------------- # dynamically exported in .onLoad() autoplot.lift_df <- function(object, ...) { `%+%` <- ggplot2::`%+%` # Remove data before first event (is this okay?) object <- dplyr::filter(object, .n_events > 0) # Base chart chart <- ggplot2::ggplot(data = object) # Grouped specific chart features if (dplyr::is_grouped_df(object)) { # Construct the color interaction group grps <- dplyr::groups(object) interact_expr <- list( color = expr(interaction(!!!grps, sep = "_")) ) # Add group legend label grps_chr <- paste0(dplyr::group_vars(object), collapse = "_") chart <- chart %+% ggplot2::labs(color = grps_chr) } else { interact_expr <- list() } baseline <- data.frame( x = c(0, 100), y = c(1, 1) ) # Avoid cran check for "globals" .percent_tested <- as.name(".percent_tested") .lift <- as.name(".lift") x <- as.name("x") y <- as.name("y") chart <- chart %+% # gain curve ggplot2::geom_line( mapping = ggplot2::aes( x = !!.percent_tested, y = !!.lift, !!!interact_expr ), data = object ) %+% # baseline ggplot2::geom_line( mapping = ggplot2::aes( x = !!x, y = !!y ), data = baseline, colour = "grey60", linetype = 2 ) %+% ggplot2::labs( x = "% Tested", y = "Lift" ) %+% ggplot2::theme_bw() # facet by .level if this was a multiclass computation if (".level" %in% colnames(object)) { chart <- chart %+% ggplot2::facet_wrap(~.level) } chart }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/prob-lift_curve.R
#' Mean log loss for multinomial data #' #' Compute the logarithmic loss of a classification model. #' #' Log loss is a measure of the performance of a classification model. A #' perfect model has a log loss of `0`. #' #' Compared with [accuracy()], log loss #' takes into account the uncertainty in the prediction and gives a more #' detailed view into the actual performance. For example, given two input #' probabilities of `.6` and `.9` where both are classified as predicting #' a positive value, say, `"Yes"`, the accuracy metric would interpret them #' as having the same value. If the true output is `"Yes"`, log loss penalizes #' `.6` because it is "less sure" of it's result compared to the probability #' of `.9`. #' #' @family class probability metrics #' @templateVar fn mn_log_loss #' @template return #' #' @section Multiclass: #' Log loss has a known multiclass extension, and is simply the sum of the #' log loss values for each class prediction. Because of this, no averaging #' types are supported. #' #' @inheritParams pr_auc #' #' @param sum A `logical`. Should the sum of the likelihood contributions be #' returned (instead of the mean value)? #' #' @author Max Kuhn #' #' @examples #' # Two class #' data("two_class_example") #' mn_log_loss(two_class_example, truth, Class1) #' #' # Multiclass #' library(dplyr) #' data(hpc_cv) #' #' # You can use the col1:colN tidyselect syntax #' hpc_cv %>% #' filter(Resample == "Fold01") %>% #' mn_log_loss(obs, VF:L) #' #' # Groups are respected #' hpc_cv %>% #' group_by(Resample) %>% #' mn_log_loss(obs, VF:L) #' #' #' # Vector version #' # Supply a matrix of class probabilities #' fold1 <- hpc_cv %>% #' filter(Resample == "Fold01") #' #' mn_log_loss_vec( #' truth = fold1$obs, #' matrix( #' c(fold1$VF, fold1$F, fold1$M, fold1$L), #' ncol = 4 #' ) #' ) #' #' # Supply `...` with quasiquotation #' prob_cols <- levels(two_class_example$truth) #' mn_log_loss(two_class_example, truth, Class1) #' mn_log_loss(two_class_example, truth, !!prob_cols[1]) #' #' @export mn_log_loss <- function(data, ...) { UseMethod("mn_log_loss") } mn_log_loss <- new_prob_metric( mn_log_loss, direction = "minimize" ) #' @export #' @rdname mn_log_loss mn_log_loss.data.frame <- function(data, truth, ..., na_rm = TRUE, sum = FALSE, event_level = yardstick_event_level(), case_weights = NULL) { prob_metric_summarizer( name = "mn_log_loss", fn = mn_log_loss_vec, data = data, truth = !!enquo(truth), ..., na_rm = na_rm, event_level = event_level, case_weights = !!enquo(case_weights), # Extra argument for mn_log_loss_impl() fn_options = list(sum = sum) ) } #' @rdname mn_log_loss #' @export mn_log_loss_vec <- function(truth, estimate, na_rm = TRUE, sum = FALSE, event_level = yardstick_event_level(), case_weights = NULL, ...) { abort_if_class_pred(truth) estimator <- finalize_estimator(truth, metric_class = "mn_log_loss") check_prob_metric(truth, estimate, case_weights, estimator) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } mn_log_loss_estimator_impl( truth = truth, estimate = estimate, estimator = estimator, event_level = event_level, sum = sum, case_weights = case_weights ) } mn_log_loss_estimator_impl <- function(truth, estimate, estimator, event_level, sum, case_weights) { if (is_binary(estimator)) { mn_log_loss_binary( truth = truth, estimate = estimate, event_level = event_level, sum = sum, case_weights = case_weights ) } else { mn_log_loss_multiclass( truth = truth, estimate = estimate, sum = sum, case_weights = case_weights ) } } mn_log_loss_binary <- function(truth, estimate, event_level, sum, case_weights) { if (!is_event_first(event_level)) { lvls <- levels(truth) truth <- stats::relevel(truth, lvls[[2]]) } estimate <- matrix(c(estimate, 1 - estimate), ncol = 2) mn_log_loss_multiclass( truth = truth, estimate = estimate, sum = sum, case_weights = case_weights ) } # We apply the min/max rule to avoid undefined log() values (#103) # (Standard seems to be to use `eps = 1e-15`, but base R uses # .Machine$double.eps in many places when they do this, # and it should be more precise) # https://github.com/wch/r-source/blob/582d94805aeee0c91f9bd9bdd63e421dd60e441f/src/library/stats/R/family.R#L83 mn_log_loss_multiclass <- function(truth, estimate, sum, case_weights) { # Binarize factor y <- stats::model.matrix(~ truth - 1) eps <- .Machine$double.eps estimate <- pmax(pmin(estimate, 1 - eps), eps) loss <- y * log(estimate) loss <- rowSums(loss) loss <- -loss if (sum) { yardstick_sum(loss, case_weights = case_weights) } else { yardstick_mean(loss, case_weights = case_weights) } }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/prob-mn_log_loss.R
#' Area under the precision recall curve #' #' `pr_auc()` is a metric that computes the area under the precision #' recall curve. See [pr_curve()] for the full curve. #' #' #' @family class probability metrics #' @templateVar fn pr_auc #' @template return #' @template multiclass-prob #' @template event_first #' #' @inheritParams sens #' #' @param data A `data.frame` containing the columns specified by `truth` and #' `...`. #' #' @param estimate If `truth` is binary, a numeric vector of class probabilities #' corresponding to the "relevant" class. Otherwise, a matrix with as many #' columns as factor levels of `truth`. _It is assumed that these are in the #' same order as the levels of `truth`._ #' #' @param ... A set of unquoted column names or one or more #' `dplyr` selector functions to choose which variables contain the #' class probabilities. If `truth` is binary, only 1 column should be selected, #' and it should correspond to the value of `event_level`. Otherwise, there #' should be as many columns as factor levels of `truth` and the ordering of #' the columns should be the same as the factor levels of `truth`. #' #' @param estimator One of `"binary"`, `"macro"`, or `"macro_weighted"` to #' specify the type of averaging to be done. `"binary"` is only relevant for #' the two class case. The other two are general methods for calculating #' multiclass metrics. The default will automatically choose `"binary"` or #' `"macro"` based on `truth`. #' #' @seealso #' #' [pr_curve()] for computing the full precision recall curve. #' #' @author Max Kuhn #' #' @template examples-binary-prob #' @template examples-multiclass-prob #' #' @export pr_auc <- function(data, ...) { UseMethod("pr_auc") } pr_auc <- new_prob_metric( pr_auc, direction = "maximize" ) #' @export #' @rdname pr_auc pr_auc.data.frame <- function(data, truth, ..., estimator = NULL, na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL) { prob_metric_summarizer( name = "pr_auc", fn = pr_auc_vec, data = data, truth = !!enquo(truth), ..., estimator = estimator, na_rm = na_rm, event_level = event_level, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname pr_auc pr_auc_vec <- function(truth, estimate, estimator = NULL, na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL, ...) { abort_if_class_pred(truth) estimator <- finalize_estimator(truth, estimator, "pr_auc") check_prob_metric(truth, estimate, case_weights, estimator) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } pr_auc_estimator_impl( truth = truth, estimate = estimate, estimator = estimator, event_level = event_level, case_weights = case_weights ) } pr_auc_estimator_impl <- function(truth, estimate, estimator, event_level, case_weights) { if (is_binary(estimator)) { pr_auc_binary(truth, estimate, event_level, case_weights) } else { # weights for macro / macro_weighted are based on truth frequencies # (this is the usual definition) truth_table <- yardstick_truth_table(truth, case_weights = case_weights) w <- get_weights(truth_table, estimator) out_vec <- pr_auc_multiclass(truth, estimate, case_weights) stats::weighted.mean(out_vec, w) } } # Don't remove NA values so any errors propagate # (i.e. no if `truth` has no "events") pr_auc_binary <- function(truth, estimate, event_level, case_weights) { curve <- pr_curve_vec( truth = truth, estimate = estimate, na_rm = TRUE, event_level = event_level, case_weights = case_weights ) auc( x = curve[["recall"]], y = curve[["precision"]], na_rm = FALSE ) } pr_auc_multiclass <- function(truth, estimate, case_weights) { results <- one_vs_all_impl( fn = pr_auc_binary, truth = truth, estimate = estimate, case_weights = case_weights ) vapply(results, FUN.VALUE = numeric(1), function(x) x) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/prob-pr_auc.R
#' Precision recall curve #' #' `pr_curve()` constructs the full precision recall curve and returns a #' tibble. See [pr_auc()] for the area under the precision recall curve. #' #' `pr_curve()` computes the precision at every unique value of the #' probability column (in addition to infinity). #' #' There is a [ggplot2::autoplot()] #' method for quickly visualizing the curve. This works for #' binary and multiclass output, and also works with grouped data (i.e. from #' resamples). See the examples. #' #' @family curve metrics #' @templateVar fn pr_curve #' @template multiclass-curve #' @template event_first #' #' @inheritParams pr_auc #' #' @return #' A tibble with class `pr_df` or `pr_grouped_df` having #' columns `.threshold`, `recall`, and `precision`. #' #' @seealso #' Compute the area under the precision recall curve with [pr_auc()]. #' #' @author Max Kuhn #' @template examples-binary-prob #' @examples #' # --------------------------------------------------------------------------- #' # `autoplot()` #' #' # Visualize the curve using ggplot2 manually #' library(ggplot2) #' library(dplyr) #' pr_curve(two_class_example, truth, Class1) %>% #' ggplot(aes(x = recall, y = precision)) + #' geom_path() + #' coord_equal() + #' theme_bw() #' #' # Or use autoplot #' autoplot(pr_curve(two_class_example, truth, Class1)) #' #' # Multiclass one-vs-all approach #' # One curve per level #' hpc_cv %>% #' filter(Resample == "Fold01") %>% #' pr_curve(obs, VF:L) %>% #' autoplot() #' #' # Same as above, but will all of the resamples #' hpc_cv %>% #' group_by(Resample) %>% #' pr_curve(obs, VF:L) %>% #' autoplot() #' #' @export #' pr_curve <- function(data, ...) { UseMethod("pr_curve") } #' @export #' @rdname pr_curve pr_curve.data.frame <- function(data, truth, ..., na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL) { result <- curve_metric_summarizer( name = "pr_curve", fn = pr_curve_vec, data = data, truth = !!enquo(truth), ..., na_rm = na_rm, event_level = event_level, case_weights = !!enquo(case_weights) ) curve_finalize(result, data, "pr_df", "grouped_pr_df") } # Undecided of whether to export this or not pr_curve_vec <- function(truth, estimate, na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL, ...) { abort_if_class_pred(truth) estimator <- finalize_estimator(truth, metric_class = "pr_curve") check_prob_metric(truth, estimate, case_weights, estimator) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { cli::cli_abort(c( x = "Missing values were detected and {.code na_ra = FALSE}.", i = "Not able to perform calculations." )) } pr_curve_estimator_impl( truth = truth, estimate = estimate, estimator = estimator, event_level = event_level, case_weights = case_weights ) } pr_curve_estimator_impl <- function(truth, estimate, estimator, event_level, case_weights) { if (is_binary(estimator)) { pr_curve_binary(truth, estimate, event_level, case_weights) } else { pr_curve_multiclass(truth, estimate, case_weights) } } pr_curve_binary <- function(truth, estimate, event_level, case_weights) { # Algorithm modified from page 866 of # http://people.inf.elte.hu/kiss/12dwhdm/roc.pdf # P = #positives (sum of case weights when truth == event) # N = #elements (sum of case weights) # # At the start of the curve (we force this): # threshold = infinity # recall = TP / P = 0, if P > 0 # precision = TP / (TP + FP) = undefined b/c we haven't seen any values yet # but we need to put 1 here so we can start the graph in the top # left corner and compute PR AUC correctly # # At the end of the curve: # threshold = last estimate # recall = TP / P = 1, P > 0 # precision = TP / (TP + FP) = P / N curve <- binary_threshold_curve( truth = truth, estimate = estimate, event_level = event_level, case_weights = case_weights ) threshold <- curve$threshold tp <- curve$tp fp <- curve$fp recall <- tp / tp[length(tp)] precision <- tp / (tp + fp) # First row always has `threshold = Inf`. # First recall is always `0`. # First precision is always `1`. threshold <- c(Inf, threshold) recall <- c(0, recall) precision <- c(1, precision) out <- list( .threshold = threshold, recall = recall, precision = precision ) dplyr::tibble(!!!out) } # One vs all approach pr_curve_multiclass <- function(truth, estimate, case_weights) { one_vs_all_with_level( fn = pr_curve_binary, truth = truth, estimate = estimate, case_weights = case_weights ) } # Dynamically exported autoplot.pr_df <- function(object, ...) { `%+%` <- ggplot2::`%+%` # Base chart pr_chart <- ggplot2::ggplot(data = object) # Add in group interactions if required if (inherits(object, "grouped_pr_df")) { grps <- dplyr::groups(object) grps_chr <- paste0(dplyr::group_vars(object), collapse = "_") interact_expr <- list( color = expr(interaction(!!!grps, sep = "_")) ) pr_chart <- pr_chart %+% ggplot2::labs(color = grps_chr) } else { interact_expr <- list() } # splice in the group interactions, or do nothing aes_spliced <- ggplot2::aes( x = recall, y = precision, !!!interact_expr ) # build the graph pr_chart <- pr_chart %+% ggplot2::geom_path(mapping = aes_spliced) %+% ggplot2::lims(x = c(0, 1), y = c(0, 1)) %+% ggplot2::coord_equal(ratio = 1) %+% ggplot2::theme_bw() # If we have .level, that means this was multiclass # and we want to show 1 vs all graphs if (".level" %in% colnames(object)) { pr_chart <- pr_chart %+% ggplot2::facet_wrap(~.level) } pr_chart }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/prob-pr_curve.R
#' Area under the receiver operator curve #' #' @description #' `roc_auc()` is a metric that computes the area under the ROC curve. See #' [roc_curve()] for the full curve. #' #' @details #' Generally, an ROC AUC value is between `0.5` and `1`, with `1` being a #' perfect prediction model. If your value is between `0` and `0.5`, then #' this implies that you have meaningful information in your model, but it #' is being applied incorrectly because doing the opposite of what the model #' predicts would result in an AUC `>0.5`. #' #' Note that you can't combine `estimator = "hand_till"` with `case_weights`. #' #' @family class probability metrics #' @templateVar fn roc_auc #' @template return #' @template event_first #' #' @section Multiclass: #' The default multiclass method for computing `roc_auc()` is to use the #' method from Hand, Till, (2001). Unlike macro-averaging, this method is #' insensitive to class distributions like the binary ROC AUC case. #' Additionally, while other multiclass techniques will return `NA` if any #' levels in `truth` occur zero times in the actual data, the Hand-Till method #' will simply ignore those levels in the averaging calculation, with a warning. #' #' Macro and macro-weighted averaging are still provided, even though they are #' not the default. In fact, macro-weighted averaging corresponds to the same #' definition of multiclass AUC given by Provost and Domingos (2001). #' #' @inheritParams pr_auc #' #' @param options `[deprecated]` #' #' No longer supported as of yardstick 1.0.0. If you pass something here it #' will be ignored with a warning. #' #' Previously, these were options passed on to `pROC::roc()`. If you need #' support for this, use the pROC package directly. #' #' @param estimator One of `"binary"`, `"hand_till"`, `"macro"`, or #' `"macro_weighted"` to specify the type of averaging to be done. `"binary"` #' is only relevant for the two class case. The others are general methods for #' calculating multiclass metrics. The default will automatically choose #' `"binary"` if `truth` is binary, `"hand_till"` if `truth` has >2 levels and #' `case_weights` isn't specified, or `"macro"` if `truth` has >2 levels and #' `case_weights` is specified (in which case `"hand_till"` isn't #' well-defined). #' #' @references #' Hand, Till (2001). "A Simple Generalisation of the Area Under the #' ROC Curve for Multiple Class Classification Problems". _Machine Learning_. #' Vol 45, Iss 2, pp 171-186. #' #' Fawcett (2005). "An introduction to ROC analysis". _Pattern Recognition #' Letters_. 27 (2006), pp 861-874. #' #' Provost, F., Domingos, P., 2001. "Well-trained PETs: Improving probability #' estimation trees", CeDER Working Paper #IS-00-04, Stern School of Business, #' New York University, NY, NY 10012. #' #' @seealso #' [roc_curve()] for computing the full ROC curve. #' #' @author Max Kuhn #' #' @template examples-binary-prob #' @template examples-multiclass-prob #' @export roc_auc <- function(data, ...) { UseMethod("roc_auc") } roc_auc <- new_prob_metric( roc_auc, direction = "maximize" ) #' @export #' @rdname roc_auc roc_auc.data.frame <- function(data, truth, ..., estimator = NULL, na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL, options = list()) { check_roc_options_deprecated("roc_auc", options) case_weights_quo <- enquo(case_weights) out <- prob_metric_summarizer( name = "roc_auc", fn = roc_auc_vec, data = data, truth = !!enquo(truth), ..., estimator = estimator, na_rm = na_rm, event_level = event_level, case_weights = !!case_weights_quo ) out <- roc_auc_adjust_result_estimator( out = out, estimator = estimator, case_weights_quo = case_weights_quo ) out } #' @rdname roc_auc #' @export roc_auc_vec <- function(truth, estimate, estimator = NULL, na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL, options = list(), ...) { abort_if_class_pred(truth) check_roc_options_deprecated("roc_auc_vec", options) estimator <- finalize_estimator_roc_auc( x = truth, estimator = estimator, metric_class = "roc_auc", case_weights = case_weights ) check_prob_metric(truth, estimate, case_weights, estimator) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } roc_auc_estimator_impl( truth = truth, estimate = estimate, estimator = estimator, event_level = event_level, case_weights = case_weights ) } roc_auc_estimator_impl <- function(truth, estimate, estimator, event_level, case_weights) { if (is_binary(estimator)) { roc_auc_binary(truth, estimate, event_level, case_weights) } else if (estimator == "hand_till") { if (!is.null(case_weights)) { cli::cli_abort( "{.arg case_weights} should be `NULL` at this point for hand-till.", .internal = TRUE ) } roc_auc_hand_till(truth, estimate) } else { # weights for macro / macro_weighted are based on truth frequencies # (this is the usual definition) truth_table <- yardstick_truth_table(truth, case_weights = case_weights) w <- get_weights(truth_table, estimator) out_vec <- roc_auc_multiclass(truth, estimate, case_weights) stats::weighted.mean(out_vec, w) } } roc_auc_binary <- function(truth, estimate, event_level, case_weights) { lvls <- levels(truth) if (!is_event_first(event_level)) { lvls <- rev(lvls) } event <- lvls[[1]] control <- lvls[[2]] if (compute_n_occurrences(truth, event) == 0L) { # Warn here and return `NA`. # The curve computation would error and we can be slightly more forgiving. warn_roc_truth_no_event(event) return(NA_real_) } if (compute_n_occurrences(truth, control) == 0L) { # Warn here and return `NA`. # The curve computation would error and we can be slightly more forgiving. warn_roc_truth_no_control(control) return(NA_real_) } curve <- roc_curve_vec( truth = truth, estimate = estimate, na_rm = FALSE, event_level = event_level, case_weights = case_weights ) sensitivity <- curve$sensitivity specificity <- curve$specificity auc( x = specificity, y = sensitivity, na_rm = FALSE ) } roc_auc_multiclass <- function(truth, estimate, case_weights) { results <- one_vs_all_impl( fn = roc_auc_binary, truth = truth, estimate = estimate, case_weights = case_weights ) vapply(results, FUN.VALUE = numeric(1), function(x) x) } # ------------------------------------------------------------------------------ finalize_estimator_roc_auc <- function(x, estimator, metric_class, case_weights) { # This is the `roc_auc_vec()` side of the hack we have to do to go from # hand_till -> macro when case weights are supplied. See # `roc_auc_adjust_result_estimator()` for all of the details. automatic_estimator <- is.null(estimator) estimator <- finalize_estimator( x = x, estimator = estimator, metric_class = metric_class ) if (identical(estimator, "hand_till") && !is.null(case_weights)) { if (automatic_estimator) { # Automatically chose hand-till. Adjust automatic decision to "macro" estimator <- "macro" } else { # Manually chose hand-till and specified case weights. Not compatible! cli::cli_abort( "Can't specify both {.code estimator = 'hand_till'} and \\ {.code case_weights}." ) } } estimator } roc_auc_adjust_result_estimator <- function(out, estimator, case_weights_quo) { # This is a horrible hack that we have to do to support the fact that # `"hand_till"` can be chosen automatically, but doesn't support case weights. # In that case, `roc_auc_vec()` will switch to `"macro"`, but we need that # to propagate up into the data frame method's `.estimator` column. # The alternative is to adjust `finalize_estimator()` to know about the # `case_weights` just for this one metric, and that seemed like too much work. automatically_chose_hand_till_but_also_used_case_weights <- is.null(estimator) && !quo_is_null(case_weights_quo) && identical(out[[".estimator"]][[1]], "hand_till") if (automatically_chose_hand_till_but_also_used_case_weights) { # `roc_auc_vec()` actually "automatically" used `"macro"` weighting here out[[".estimator"]] <- "macro" } out } # ------------------------------------------------------------------------------ roc_auc_hand_till <- function(truth, estimate) { lvls <- levels(truth) # We want to reference the levels by name in the function below, so we # force the column names to be the same as the levels # (and assume the prob matrix columns are given in the same # order as the levels of `truth`) colnames(estimate) <- lvls # Check for levels with no observations in `truth`. Generally this would # return `NA`, but to match pROC and HandTill2001 we remove them with a # warning and proceed with the remaining levels (#123) lvls_loc <- match(lvls, truth) if (anyNA(lvls_loc)) { indicator_missing <- is.na(lvls_loc) lvls_missing <- lvls[indicator_missing] cli::cli_warn(c( x = "No observations were detected in {.arg truth} for level{?s}: \\ {lvls_missing}.", i = "Computation will proceed by ignoring those levels." )) # Proceed with non-missing levels lvls <- lvls[!indicator_missing] } C <- length(lvls) multiplier <- 2 / (C * (C - 1)) sum_val <- 0 for (i_lvl in lvls) { # Double sum: # (sum i<j) cutpoint <- which(lvls == i_lvl) j_lvls <- lvls[-seq_len(cutpoint)] for (j_lvl in j_lvls) { A_hat_i_given_j <- roc_auc_subset(i_lvl, j_lvl, truth, estimate) A_hat_j_given_i <- roc_auc_subset(j_lvl, i_lvl, truth, estimate) A_hat_ij <- mean(c(A_hat_i_given_j, A_hat_j_given_i)) # sum A_hat(i, j) sum_val <- sum_val + A_hat_ij } } multiplier * sum_val } # A_hat(i | j) in the paper roc_auc_subset <- function(lvl1, lvl2, truth, estimate) { # Subset where truth is one of the two current levels subset_idx <- which(truth == lvl1 | truth == lvl2) # Use estimate based on lvl1 being the relevant level # Estimate for lvl2 is just 1-lvl1 rather than the value that # is actually there for the multiclass case estimate_lvl1 <- estimate[, lvl1, drop = TRUE] # subset and recode truth to only have 2 levels truth_subset <- factor(truth[subset_idx], levels = c(lvl1, lvl2)) estimate_subset <- estimate_lvl1[subset_idx] # Hand Till method ignores event level (like macro-average). # As far as we know, using case weights doesn't make any sense with Hand Till. # See also: https://github.com/scikit-learn/scikit-learn/pull/12789 auc_val <- roc_auc_binary( truth = truth_subset, estimate = estimate_subset, event_level = "first", case_weights = NULL ) auc_val } # ------------------------------------------------------------------------------ compute_n_occurrences <- function(x, what) { # `NA` values have already been removed by `roc_auc_vec()` sum(x == what) } msg_roc_truth_no_control <- function(control) { paste0( "No control observations were detected in {.arg truth} ", "with control level '", control, "'." ) } warn_roc_truth_no_control <- function(control) { cli::cli_warn( msg_roc_truth_no_control(control), class = "yardstick_warning_roc_truth_no_control" ) } stop_roc_truth_no_control <- function(control) { cli::cli_abort( msg_roc_truth_no_control(control), class = "yardstick_error_roc_truth_no_control" ) } msg_roc_truth_no_event <- function(event) { paste0( "No event observations were detected in {.arg truth} ", "with event level '", event, "'." ) } warn_roc_truth_no_event <- function(event) { cli::cli_warn( msg_roc_truth_no_event(event), class = "yardstick_warning_roc_truth_no_event" ) } stop_roc_truth_no_event <- function(event) { cli::cli_abort( msg_roc_truth_no_event(event), class = "yardstick_error_roc_truth_no_event" ) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/prob-roc_auc.R
#' Area under the ROC curve of each class against the rest, using the a priori #' class distribution #' #' `roc_aunp()` is a multiclass metric that computes the area under the ROC #' curve of each class against the rest, using the a priori class distribution. #' This is equivalent to `roc_auc(estimator = "macro_weighted")`. #' #' @family class probability metrics #' @templateVar fn roc_aunp #' @template return #' @template event_first #' #' @section Multiclass: #' This multiclass method for computing the area under the ROC curve uses the #' a priori class distribution and is equivalent to #' `roc_auc(estimator = "macro_weighted")`. #' #' @inheritParams roc_auc #' #' @param ... A set of unquoted column names or one or more `dplyr` selector #' functions to choose which variables contain the class probabilities. There #' should be as many columns as factor levels of `truth`. #' #' @param estimate A matrix with as many #' columns as factor levels of `truth`. _It is assumed that these are in the #' same order as the levels of `truth`._ #' #' @references #' #' Ferri, C., Hernández-Orallo, J., & Modroiu, R. (2009). "An experimental #' comparison of performance measures for classification". _Pattern Recognition #' Letters_. 30 (1), pp 27-38. #' #' @seealso #' #' [roc_aunu()] for computing the area under the ROC curve of each class against #' the rest, using the uniform class distribution. #' #' @author Julia Silge #' #' @examples #' # Multiclass example #' #' # `obs` is a 4 level factor. The first level is `"VF"`, which is the #' # "event of interest" by default in yardstick. See the Relevant Level #' # section above. #' data(hpc_cv) #' #' # You can use the col1:colN tidyselect syntax #' library(dplyr) #' hpc_cv %>% #' filter(Resample == "Fold01") %>% #' roc_aunp(obs, VF:L) #' #' # Change the first level of `obs` from `"VF"` to `"M"` to alter the #' # event of interest. The class probability columns should be supplied #' # in the same order as the levels. #' hpc_cv %>% #' filter(Resample == "Fold01") %>% #' mutate(obs = relevel(obs, "M")) %>% #' roc_aunp(obs, M, VF:L) #' #' # Groups are respected #' hpc_cv %>% #' group_by(Resample) %>% #' roc_aunp(obs, VF:L) #' #' # Vector version #' # Supply a matrix of class probabilities #' fold1 <- hpc_cv %>% #' filter(Resample == "Fold01") #' #' roc_aunp_vec( #' truth = fold1$obs, #' matrix( #' c(fold1$VF, fold1$F, fold1$M, fold1$L), #' ncol = 4 #' ) #' ) #' @export roc_aunp <- function(data, ...) { UseMethod("roc_aunp") } roc_aunp <- new_prob_metric( roc_aunp, direction = "maximize" ) #' @export #' @rdname roc_aunp roc_aunp.data.frame <- function(data, truth, ..., na_rm = TRUE, case_weights = NULL, options = list()) { check_roc_options_deprecated("roc_aunp", options) prob_metric_summarizer( name = "roc_aunp", fn = roc_aunp_vec, data = data, truth = !!enquo(truth), ..., estimator = NULL, na_rm = na_rm, event_level = NULL, case_weights = !!enquo(case_weights) ) } #' @rdname roc_aunp #' @export roc_aunp_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, options = list(), ...) { abort_if_class_pred(truth) check_roc_options_deprecated("roc_aunp_vec", options) estimator <- "macro_weighted" check_prob_metric(truth, estimate, case_weights, estimator) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } # `event_level` doesn't really matter, but we set it anyways roc_auc_vec( truth = truth, estimate = estimate, estimator = estimator, na_rm = FALSE, event_level = "first", case_weights = case_weights ) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/prob-roc_aunp.R
#' Area under the ROC curve of each class against the rest, using the uniform #' class distribution #' #' `roc_aunu()` is a multiclass metric that computes the area under the ROC #' curve of each class against the rest, using the uniform class distribution. #' This is equivalent to `roc_auc(estimator = "macro")`. #' #' @family class probability metrics #' @templateVar fn roc_aunu #' @template return #' @template event_first #' #' @section Multiclass: #' This multiclass method for computing the area under the ROC curve uses the #' uniform class distribution and is equivalent to #' `roc_auc(estimator = "macro")`. #' #' @inheritParams roc_auc #' #' @param ... A set of unquoted column names or one or more `dplyr` selector #' functions to choose which variables contain the class probabilities. There #' should be as many columns as factor levels of `truth`. #' #' @param estimate A matrix with as many #' columns as factor levels of `truth`. _It is assumed that these are in the #' same order as the levels of `truth`._ #' #' @references #' #' Ferri, C., Hernández-Orallo, J., & Modroiu, R. (2009). "An experimental #' comparison of performance measures for classification". _Pattern Recognition #' Letters_. 30 (1), pp 27-38. #' #' @seealso #' #' [roc_aunp()] for computing the area under the ROC curve of each class against #' the rest, using the a priori class distribution. #' #' @author Julia Silge #' #' @examples #' # Multiclass example #' #' # `obs` is a 4 level factor. The first level is `"VF"`, which is the #' # "event of interest" by default in yardstick. See the Relevant Level #' # section above. #' data(hpc_cv) #' #' # You can use the col1:colN tidyselect syntax #' library(dplyr) #' hpc_cv %>% #' filter(Resample == "Fold01") %>% #' roc_aunu(obs, VF:L) #' #' # Change the first level of `obs` from `"VF"` to `"M"` to alter the #' # event of interest. The class probability columns should be supplied #' # in the same order as the levels. #' hpc_cv %>% #' filter(Resample == "Fold01") %>% #' mutate(obs = relevel(obs, "M")) %>% #' roc_aunu(obs, M, VF:L) #' #' # Groups are respected #' hpc_cv %>% #' group_by(Resample) %>% #' roc_aunu(obs, VF:L) #' #' # Vector version #' # Supply a matrix of class probabilities #' fold1 <- hpc_cv %>% #' filter(Resample == "Fold01") #' #' roc_aunu_vec( #' truth = fold1$obs, #' matrix( #' c(fold1$VF, fold1$F, fold1$M, fold1$L), #' ncol = 4 #' ) #' ) #' @export roc_aunu <- function(data, ...) { UseMethod("roc_aunu") } roc_aunu <- new_prob_metric( roc_aunu, direction = "maximize" ) #' @export #' @rdname roc_aunu roc_aunu.data.frame <- function(data, truth, ..., na_rm = TRUE, case_weights = NULL, options = list()) { check_roc_options_deprecated("roc_aunu", options) prob_metric_summarizer( name = "roc_aunu", fn = roc_aunu_vec, data = data, truth = !!enquo(truth), ..., estimator = NULL, na_rm = na_rm, event_level = NULL, case_weights = !!enquo(case_weights) ) } #' @rdname roc_aunu #' @export roc_aunu_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, options = list(), ...) { abort_if_class_pred(truth) check_roc_options_deprecated("roc_aunu_vec", options) estimator <- "macro" check_prob_metric(truth, estimate, case_weights, estimator) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } # `event_level` doesn't really matter, but we set it anyways roc_auc_vec( truth = truth, estimate = estimate, estimator = estimator, na_rm = FALSE, event_level = "first", case_weights = case_weights ) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/prob-roc_aunu.R
#' Receiver operator curve #' #' @description #' `roc_curve()` constructs the full ROC curve and returns a #' tibble. See [roc_auc()] for the area under the ROC curve. #' #' @details #' `roc_curve()` computes the sensitivity at every unique #' value of the probability column (in addition to infinity and #' minus infinity). #' #' There is a [ggplot2::autoplot()] method for quickly visualizing the curve. #' This works for binary and multiclass output, and also works with grouped #' data (i.e. from resamples). See the examples. #' #' @family curve metrics #' @templateVar fn roc_curve #' @template multiclass-curve #' @template event_first #' #' @inheritParams roc_auc #' #' @return #' A tibble with class `roc_df` or `roc_grouped_df` having #' columns `.threshold`, `specificity`, and `sensitivity`. #' #' @seealso #' Compute the area under the ROC curve with [roc_auc()]. #' #' @author Max Kuhn #' @template examples-binary-prob #' @examples #' # --------------------------------------------------------------------------- #' # `autoplot()` #' #' # Visualize the curve using ggplot2 manually #' library(ggplot2) #' library(dplyr) #' roc_curve(two_class_example, truth, Class1) %>% #' ggplot(aes(x = 1 - specificity, y = sensitivity)) + #' geom_path() + #' geom_abline(lty = 3) + #' coord_equal() + #' theme_bw() #' #' # Or use autoplot #' autoplot(roc_curve(two_class_example, truth, Class1)) #' #' \dontrun{ #' #' # Multiclass one-vs-all approach #' # One curve per level #' hpc_cv %>% #' filter(Resample == "Fold01") %>% #' roc_curve(obs, VF:L) %>% #' autoplot() #' #' # Same as above, but will all of the resamples #' hpc_cv %>% #' group_by(Resample) %>% #' roc_curve(obs, VF:L) %>% #' autoplot() #' } #' #' @export roc_curve <- function(data, ...) { UseMethod("roc_curve") } #' @export #' @rdname roc_curve roc_curve.data.frame <- function(data, truth, ..., na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL, options = list()) { check_roc_options_deprecated("roc_curve", options) result <- curve_metric_summarizer( name = "roc_curve", fn = roc_curve_vec, data = data, truth = !!enquo(truth), ..., na_rm = na_rm, event_level = event_level, case_weights = !!enquo(case_weights) ) curve_finalize(result, data, "roc_df", "grouped_roc_df") } roc_curve_vec <- function(truth, estimate, na_rm = TRUE, event_level = yardstick_event_level(), case_weights = NULL, ...) { abort_if_class_pred(truth) estimator <- finalize_estimator(truth, metric_class = "roc_curve") check_prob_metric(truth, estimate, case_weights, estimator) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { cli::cli_abort(c( x = "Missing values were detected and {.code na_ra = FALSE}.", i = "Not able to perform calculations." )) } # estimate here is a matrix of class prob columns roc_curve_estimator_impl( truth = truth, estimate = estimate, estimator = estimator, event_level = event_level, case_weights = case_weights ) } roc_curve_estimator_impl <- function(truth, estimate, estimator, event_level, case_weights) { if (is_binary(estimator)) { roc_curve_binary(truth, estimate, event_level, case_weights) } else { roc_curve_multiclass(truth, estimate, case_weights) } } roc_curve_binary <- function(truth, estimate, event_level, case_weights) { lvls <- levels(truth) if (!is_event_first(event_level)) { lvls <- rev(lvls) } event <- lvls[[1]] control <- lvls[[2]] if (compute_n_occurrences(truth, event) == 0L) { stop_roc_truth_no_event(event) } if (compute_n_occurrences(truth, control) == 0L) { stop_roc_truth_no_control(control) } curve <- binary_threshold_curve( truth = truth, estimate = estimate, event_level = event_level, case_weights = case_weights ) threshold <- curve$threshold tp <- curve$tp fp <- curve$fp tpr <- tp / tp[length(tp)] fpr <- fp / fp[length(fp)] sensitivity <- tpr specificity <- 1 - fpr # In order of increasing specificity threshold <- rev(threshold) sensitivity <- rev(sensitivity) specificity <- rev(specificity) # Add first/last rows to the data frame to ensure the curve and # AUC metrics are computed correctly threshold <- c(-Inf, threshold, Inf) sensitivity <- c(1, sensitivity, 0) specificity <- c(0, specificity, 1) dplyr::tibble( .threshold = threshold, specificity = specificity, sensitivity = sensitivity ) } # One-VS-All approach roc_curve_multiclass <- function(truth, estimate, case_weights) { one_vs_all_with_level( fn = roc_curve_binary, truth = truth, estimate = estimate, case_weights = case_weights ) } check_roc_options_deprecated <- function(what, options) { if (!identical(options, list())) { warn_roc_options_deprecated(what) } } warn_roc_options_deprecated <- function(what) { lifecycle::deprecate_warn( when = "1.0.0", what = I(sprintf("The `options` argument of `%s()`", what)), details = paste( "This argument no longer has any effect, and is being ignored.", "Use the pROC package directly if you need these features." ) ) } # Dynamically exported autoplot.roc_df <- function(object, ...) { `%+%` <- ggplot2::`%+%` # Base chart roc_chart <- ggplot2::ggplot(data = object) # Add in group interactions if required if (inherits(object, "grouped_roc_df")) { grps <- dplyr::groups(object) grps_chr <- paste0(dplyr::group_vars(object), collapse = "_") interact_expr <- list( color = expr(interaction(!!!grps, sep = "_")) ) roc_chart <- roc_chart %+% ggplot2::labs(color = grps_chr) } else { interact_expr <- list() } # splice in the group interactions, or do nothing aes_spliced <- ggplot2::aes( x = 1 - specificity, y = sensitivity, !!!interact_expr ) # build the graph roc_chart <- roc_chart %+% ggplot2::geom_path(mapping = aes_spliced) %+% ggplot2::geom_abline(lty = 3) %+% ggplot2::coord_equal() %+% ggplot2::theme_bw() # If we have .level, that means this was multiclass # and we want to show 1 vs all graphs if (".level" %in% colnames(object)) { roc_chart <- roc_chart %+% ggplot2::facet_wrap(~.level) } roc_chart }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/prob-roc_curve.R
#' @importFrom generics tidy #' @export generics::tidy
/scratch/gouwar.j/cran-all/cranData/yardstick/R/reexports.R
#' Time-Dependent Brier score for right censored data #' #' Compute the time-dependent Brier score for right censored data, which is the #' mean squared error at time point `.eval_time`. #' #' @family dynamic survival metrics #' @templateVar fn brier_survival #' @template return-dynamic-survival #' @inheritParams pr_auc #' #' @param data A `data.frame` containing the columns specified by `truth` and #' `...`. #' #' @param truth The column identifier for the true survival result (that #' is created using [survival::Surv()].). This should be an unquoted column name #' although this argument is passed by expression and supports #' [quasiquotation][rlang::quasiquotation] (you can unquote column names). For #' `_vec()` functions, an [survival::Surv()] object. #' #' @param ... The column identifier for the survival probabilities this #' should be a list column of data.frames corresponding to the output given when #' predicting with [censored](https://censored.tidymodels.org/) model. This #' should be an unquoted column name although this argument is passed by #' expression and supports [quasiquotation][rlang::quasiquotation] (you can #' unquote column names). For `_vec()` functions, the dots are not used. #' #' @param estimate A list column of data.frames corresponding to the output #' given when predicting with [censored](https://censored.tidymodels.org/) #' model. See the details for more information regarding format. #' #' @details #' #' This formulation takes survival probability predictions at one or more #' specific _evaluation times_ and, for each time, computes the Brier score. To #' account for censoring, inverse probability of censoring weights (IPCW) are #' used in the calculations. #' #' The column passed to `...` should be a list column with one element per #' independent experiential unit (e.g. patient). The list column should contain #' data frames with several columns: #' #' - `.eval_time`: The time that the prediction is made. #' - `.pred_survival`: The predicted probability of survival up to `.eval_time` #' - `.weight_censored`: The case weight for the inverse probability of censoring. #' #' The last column can be produced using [parsnip::.censoring_weights_graf()]. #' This corresponds to the weighting scheme of Graf _et al_ (1999). The #' internal data set `lung_surv` shows an example of the format. #' #' This method automatically groups by the `.eval_time` argument. #' #' Smaller values of the score are associated with better model performance. #' #' @author Emil Hvitfeldt #' #' @references #' #' E. Graf, C. Schmoor, W. Sauerbrei, and M. Schumacher, “Assessment and #' comparison of prognostic classification schemes for survival data,” #' _Statistics in Medicine_, vol. 18, no. 17-18, pp. 2529–2545, 1999. #' #' @examples #' library(dplyr) #' #' lung_surv %>% #' brier_survival( #' truth = surv_obj, #' .pred #' ) #' @export brier_survival <- function(data, ...) { UseMethod("brier_survival") } brier_survival <- new_dynamic_survival_metric( brier_survival, direction = "minimize" ) #' @rdname brier_survival #' @export brier_survival.data.frame <- function(data, truth, ..., na_rm = TRUE, case_weights = NULL) { dynamic_survival_metric_summarizer( name = "brier_survival", fn = brier_survival_vec, data = data, truth = !!enquo(truth), ..., na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname brier_survival brier_survival_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_dynamic_survival_metric( truth, estimate, case_weights ) if (na_rm) { result <- yardstick_remove_missing( truth, seq_along(estimate), case_weights ) truth <- result$truth estimate <- estimate[result$estimate] case_weights <- result$case_weights } else { any_missing <- yardstick_any_missing( truth, estimate, case_weights ) if (any_missing) { return(NA_real_) } } dplyr::tibble(estimate) %>% tidyr::unnest(estimate) %>% dplyr::group_by(.eval_time) %>% dplyr::summarize( .estimate = brier_survival_impl( truth, .pred_survival, .weight_censored, case_weights, .eval_time ) ) } brier_survival_impl <- function(truth, estimate, censoring_weights, case_weights, eval_time) { surv_time <- .extract_surv_time(truth) surv_status <- .extract_surv_status(truth) if (!is.null(case_weights)) { case_weights <- vec_cast(case_weights, to = double()) norm_const <- sum(case_weights) } else { case_weights <- rep(1, length(estimate)) norm_const <- sum(!survival::is.na.Surv(truth)) } category_1 <- surv_time <= eval_time & surv_status == 1 category_2 <- surv_time > eval_time # (0 - estimate) ^ 2 == estimate ^ 2 res <- (category_1 * estimate^2 * censoring_weights) + (category_2 * (1 - estimate)^2 * censoring_weights) res <- res * case_weights res <- sum(res, na.rm = TRUE) res / norm_const }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/surv-brier_survival.R
#' Integrated Brier score for right censored data #' #' Compute the integrated Brier score for right censored data, which is an #' overall calculation of model performance for all values of `.eval_time`. #' #' @family dynamic survival metrics #' @templateVar fn brier_survival_integrated #' @template return-dynamic-survival #' @details #' #' The integrated time-dependent brier score is calculated in an "area under the #' curve" fashion. The brier score is calculated for each value of `.eval_time`. #' The area is calculated via the trapezoidal rule. The area is divided by the #' largest value of `.eval_time` to bring it into the same scale as the #' traditional brier score. #' #' Smaller values of the score are associated with better model performance. #' #' This formulation takes survival probability predictions at one or more #' specific _evaluation times_ and, for each time, computes the Brier score. #' To account for censoring, inverse probability of censoring weights #' (IPCW) are used in the calculations. #' #' The column passed to `...` should be a list column with one element per #' independent experiential unit (e.g. patient). The list column should contain #' data frames with several columns: #' #' - `.eval_time`: The time that the prediction is made. #' - `.pred_survival`: The predicted probability of survival up to `.eval_time` #' - `.weight_censored`: The case weight for the inverse probability of censoring. #' #' The last column can be produced using [parsnip::.censoring_weights_graf()]. #' This corresponds to the weighting scheme of Graf _et al_ (1999). The #' internal data set `lung_surv` shows an example of the format. #' #' This method automatically groups by the `.eval_time` argument. #' #' @inheritParams brier_survival #' #' @author Emil Hvitfeldt #' #' @references E. Graf, C. Schmoor, W. Sauerbrei, and M. Schumacher, “Assessment #' and comparison of prognostic classification schemes for survival data,” #' Statistics in Medicine, vol. 18, no. 17-18, pp. 2529–2545, 1999. #' #' @examples #' library(dplyr) #' #' lung_surv %>% #' brier_survival_integrated( #' truth = surv_obj, #' .pred #' ) #' @export brier_survival_integrated <- function(data, ...) { UseMethod("brier_survival_integrated") } brier_survival_integrated <- new_integrated_survival_metric( brier_survival_integrated, direction = "minimize" ) #' @rdname brier_survival_integrated #' @export brier_survival_integrated.data.frame <- function(data, truth, ..., na_rm = TRUE, case_weights = NULL) { dynamic_survival_metric_summarizer( name = "brier_survival_integrated", fn = brier_survival_integrated_vec, data = data, truth = !!enquo(truth), ..., na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname brier_survival_integrated brier_survival_integrated_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_dynamic_survival_metric( truth, estimate, case_weights ) num_eval_times <- get_unique_eval_times(estimate) if (num_eval_times < 2) { cli::cli_abort( "At least 2 evaluation times are required. \\ Only {num_eval_times} unique time was given." ) } if (na_rm) { result <- yardstick_remove_missing( truth, seq_along(estimate), case_weights ) truth <- result$truth estimate <- estimate[result$estimate] case_weights <- result$case_weights } else { any_missing <- yardstick_any_missing( truth, estimate, case_weights ) if (any_missing) { return(NA_real_) } } brier_survival_integrated_impl(truth, estimate, case_weights) } get_unique_eval_times <- function(x) { # Since validate_surv_truth_list_estimate() makes sure they are all the same length(x[[1]]$.eval_time) } brier_survival_integrated_impl <- function(truth, estimate, case_weights) { res <- brier_survival_vec( truth = truth, estimate = estimate, na_rm = FALSE, case_weights = case_weights ) auc(res$.eval_time, res$.estimate) / max(res$.eval_time) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/surv-brier_survival_integrated.R
#' Concordance index for right-censored data #' #' Compute the Concordance index for right-censored data #' #' @family static survival metrics #' @templateVar fn concordance_survival #' @template return #' @details #' #' The concordance index is defined as the proportion of all comparable pairs in #' which the predictions and outcomes are concordant. #' #' Two observations are comparable if: #' #' 1. both of the observations experienced an event (at different times), or #' 2. the observation with the shorter observed survival time experienced an #' event, in which case the event-free subject “outlived” the other. #' #' A pair is not comparable if they experienced events at the same time. #' #' Concordance intuitively means that two samples were ordered correctly by the #' model. More specifically, two samples are concordant, if the one with a #' higher estimated risk score has a shorter actual survival time. #' #' Larger values of the score are associated with better model performance. #' #' @inheritParams brier_survival #' #' @param estimate The column identifier for the predicted time, this should be #' a numeric variables. This should be an unquoted column name although this #' argument is passed by expression and supports #' [quasiquotation][rlang::quasiquotation] (you can unquote column names). For #' `_vec()` functions, a numeric vector. #' #' @param ... Currently not used. #' #' @author Emil Hvitfeldt #' #' @references #' #' Harrell, F.E., Califf, R.M., Pryor, D.B., Lee, K.L., Rosati, R.A, #' “Multivariable prognostic models: issues in developing models, evaluating #' assumptions and adequacy, and measuring and reducing errors”, Statistics in #' Medicine, 15(4), 361-87, 1996. #' #' @examples #' concordance_survival( #' data = lung_surv, #' truth = surv_obj, #' estimate = .pred_time #' ) #' @export concordance_survival <- function(data, ...) { UseMethod("concordance_survival") } concordance_survival <- new_static_survival_metric( concordance_survival, direction = "maximize" ) #' @rdname concordance_survival #' @export concordance_survival.data.frame <- function(data, truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { static_survival_metric_summarizer( name = "concordance_survival", fn = concordance_survival_vec, data = data, truth = !!enquo(truth), estimate = !!enquo(estimate), na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname concordance_survival concordance_survival_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_static_survival_metric(truth, estimate, case_weights) if (na_rm) { result <- yardstick_remove_missing(truth, estimate, case_weights) truth <- result$truth estimate <- result$estimate case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { return(NA_real_) } concordance_survival_impl(truth, estimate, case_weights) } concordance_survival_impl <- function(truth, estimate, case_weights) { if (is.null(case_weights)) { case_weights <- rep(1, length(estimate)) } else { case_weights <- vec_cast(case_weights, to = double()) } survival::concordance(truth ~ estimate, weights = case_weights)$concordance }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/surv-concordance_survival.R
#' Time-Dependent ROC AUC for Censored Data #' #' Compute the area under the ROC survival curve using predicted survival #' probabilities that corresponds to different time points. #' #' @family dynamic survival metrics #' @templateVar fn roc_auc_survival #' @template return-dynamic-survival #' @details #' #' This formulation takes survival probability predictions at one or more #' specific _evaluation times_ and, for each time, computes the area under the #' ROC curve. To account for censoring, inverse probability of censoring weights #' (IPCW) are used in the calculations. See equation 7 of section 4.3 in #' Blanche _at al_ (2013) for the details. #' #' The column passed to `...` should be a list column with one element per #' independent experiential unit (e.g. patient). The list column should contain #' data frames with several columns: #' #' - `.eval_time`: The time that the prediction is made. #' - `.pred_survival`: The predicted probability of survival up to `.eval_time` #' - `.weight_censored`: The case weight for the inverse probability of censoring. #' #' The last column can be produced using [parsnip::.censoring_weights_graf()]. #' This corresponds to the weighting scheme of Graf _et al_ (1999). The #' internal data set `lung_surv` shows an example of the format. #' #' This method automatically groups by the `.eval_time` argument. #' #' Larger values of the score are associated with better model performance. #' #' @seealso #' Compute the ROC survival curve with [roc_curve_survival()]. #' #' @inheritParams brier_survival #' #' @author Emil Hvitfeldt #' #' @references #' #' Blanche, P., Dartigues, J.-F. and Jacqmin-Gadda, H. (2013), Review and #' comparison of ROC curve estimators for a time-dependent outcome with #' marker-dependent censoring. _Biom. J._, 55: 687-704. #' #' Graf, E., Schmoor, C., Sauerbrei, W. and Schumacher, M. (1999), Assessment #' and comparison of prognostic classification schemes for survival data. #' _Statist. Med._, 18: 2529-2545. #' #' @examples #' library(dplyr) #' #' lung_surv %>% #' roc_auc_survival( #' truth = surv_obj, #' .pred #' ) #' @export roc_auc_survival <- function(data, ...) { UseMethod("roc_auc_survival") } roc_auc_survival <- new_dynamic_survival_metric( roc_auc_survival, direction = "maximize" ) #' @rdname roc_auc_survival #' @export roc_auc_survival.data.frame <- function(data, truth, ..., na_rm = TRUE, case_weights = NULL) { dynamic_survival_metric_summarizer( name = "roc_auc_survival", fn = roc_auc_survival_vec, data = data, truth = !!enquo(truth), ..., na_rm = na_rm, case_weights = !!enquo(case_weights) ) } #' @export #' @rdname roc_auc_survival roc_auc_survival_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { # No checking since roc_curve_survival_vec() does checking curve <- roc_curve_survival_vec( truth = truth, estimate = estimate, na_rm = na_rm, case_weights = case_weights ) curve %>% dplyr::group_by(.eval_time) %>% dplyr::summarize(.estimate = roc_trap_auc(specificity, sensitivity)) } roc_trap_auc <- function(specificity, sensitivity) { not_na <- !is.na(sensitivity) & !is.na(specificity) sensitivity <- sensitivity[not_na] specificity <- specificity[not_na] auc(1 - specificity, sensitivity) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/surv-roc_auc_survival.R
#' Time-Dependent ROC surve for Censored Data #' #' Compute the ROC survival curve using predicted survival probabilities that #' corresponds to different time points. #' #' @family survival curve metrics #' @templateVar fn roc_curve_survival #' #' @inheritParams brier_survival #' #' @details #' #' This formulation takes survival probability predictions at one or more #' specific _evaluation times_ and, for each time, computes the ROC curve. To #' account for censoring, inverse probability of censoring weights (IPCW) are #' used in the calculations. See equation 7 of section 4.3 in Blanche _at al_ #' (2013) for the details. #' #' The column passed to `...` should be a list column with one element per #' independent experiential unit (e.g. patient). The list column should contain #' data frames with several columns: #' #' - `.eval_time`: The time that the prediction is made. #' - `.pred_survival`: The predicted probability of survival up to `.eval_time` #' - `.weight_censored`: The case weight for the inverse probability of censoring. #' #' The last column can be produced using [parsnip::.censoring_weights_graf()]. #' This corresponds to the weighting scheme of Graf _et al_ (1999). The #' internal data set `lung_surv` shows an example of the format. #' #' This method automatically groups by the `.eval_time` argument. #' #' @return #' A tibble with class `roc_survival_df`, `grouped_roc_survival_df` having #' columns `.threshold`, `sensitivity`, `specificity`, and `.eval_time`. #' #' @seealso #' Compute the area under the ROC survival curve with [roc_auc_survival()]. #' #' @author Emil Hvitfeldt #' #' @references #' #' Blanche, P., Dartigues, J.-F. and Jacqmin-Gadda, H. (2013), Review and #' comparison of ROC curve estimators for a time-dependent outcome with #' marker-dependent censoring. _Biom. J._, 55: 687-704. #' #' Graf, E., Schmoor, C., Sauerbrei, W. and Schumacher, M. (1999), Assessment #' and comparison of prognostic classification schemes for survival data. #' _Statist. Med._, 18: 2529-2545. #' #' @examples #' result <- roc_curve_survival( #' lung_surv, #' truth = surv_obj, #' .pred #' ) #' result #' #' # --------------------------------------------------------------------------- #' # `autoplot()` #' #' # Visualize the curve using ggplot2 manually #' library(ggplot2) #' library(dplyr) #' result %>% #' mutate(.eval_time = format(.eval_time)) %>% #' ggplot(aes( #' x = 1 - specificity, y = sensitivity, #' group = .eval_time, col = .eval_time #' )) + #' geom_step(direction = "hv") + #' geom_abline(lty = 3) + #' coord_equal() + #' theme_bw() #' #' # Or use autoplot #' autoplot(result) #' @export roc_curve_survival <- function(data, ...) { UseMethod("roc_curve_survival") } #' @export #' @rdname roc_curve_survival roc_curve_survival.data.frame <- function(data, truth, ..., na_rm = TRUE, case_weights = NULL) { result <- curve_survival_metric_summarizer( name = "roc_curve_survival", fn = roc_curve_survival_vec, data = data, truth = !!enquo(truth), ..., na_rm = na_rm, case_weights = !!enquo(case_weights) ) curve_finalize(result, data, "roc_survival_df", "grouped_roc_survival_df") } roc_curve_survival_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) { check_dynamic_survival_metric( truth, estimate, case_weights ) if (na_rm) { result <- yardstick_remove_missing( truth, seq_along(estimate), case_weights ) truth <- result$truth estimate <- estimate[result$estimate] case_weights <- result$case_weights } else if (yardstick_any_missing(truth, estimate, case_weights)) { cli::cli_abort(c( x = "Missing values were detected and {.code na_ra = FALSE}.", i = "Not able to perform calculations." )) } roc_curve_survival_impl( truth = truth, estimate = estimate, case_weights = case_weights ) } roc_curve_survival_impl <- function(truth, estimate, case_weights) { event_time <- .extract_surv_time(truth) delta <- .extract_surv_status(truth) case_weights <- vec_cast(case_weights, double()) if (is.null(case_weights)) { case_weights <- rep(1, length(delta)) } # Drop any `0` weights. # These shouldn't affect the result, but can result in wrong thresholds detect_zero_weight <- case_weights == 0 if (any(detect_zero_weight)) { detect_non_zero_weight <- !detect_zero_weight event_time <- event_time[detect_non_zero_weight] delta <- delta[detect_non_zero_weight] case_weights <- case_weights[detect_non_zero_weight] estimate <- estimate[detect_non_zero_weight] } data <- dplyr::tibble(event_time, delta, case_weights, estimate) data <- tidyr::unnest(data, cols = estimate) .eval_times <- unique(data$.eval_time) not_missing_pred_survival <- !is.na(data$.pred_survival) out <- list() for (i in seq_along(.eval_times)) { .eval_time_ind <- .eval_times[[i]] == data$.eval_time & not_missing_pred_survival res <- roc_curve_survival_impl_one( data$event_time[.eval_time_ind], data$delta[.eval_time_ind], data[.eval_time_ind, ], data$case_weights[.eval_time_ind] ) res$.eval_time <- .eval_times[[i]] out[[i]] <- res } dplyr::bind_rows(out) } roc_curve_survival_impl_one <- function(event_time, delta, data, case_weights) { res <- dplyr::tibble(.threshold = sort(unique(c(-Inf, data$.pred_survival, Inf)), decreasing = TRUE)) obs_time_le_time <- event_time <= data$.eval_time obs_time_gt_time <- event_time > data$.eval_time n <- nrow(data) sensitivity_denom <- sum(obs_time_le_time * delta * data$.weight_censored * case_weights, na.rm = TRUE) specificity_denom <- sum(obs_time_gt_time * data$.weight_censored * case_weights, na.rm = TRUE) data_df <- data.frame( le_time = obs_time_le_time, ge_time = obs_time_gt_time, delta = delta, weight_censored = data$.weight_censored, case_weights = case_weights ) data_split <- vec_split(data_df, data$.pred_survival) data_split <- data_split$val[order(data_split$key)] specificity <- vapply( data_split, function(x) sum(x$ge_time * x$weight_censored * x$case_weights, na.rm = TRUE), FUN.VALUE = numeric(1) ) specificity <- cumsum(specificity) specificity <- specificity / specificity_denom specificity <- dplyr::if_else(specificity > 1, 1, specificity) specificity <- dplyr::if_else(specificity < 0, 0, specificity) specificity <- c(0, specificity, 1) specificity <- 1 - specificity res$specificity <- specificity sensitivity <- vapply( data_split, function(x) sum(x$le_time * x$delta * x$weight_censored * x$case_weights, na.rm = TRUE), FUN.VALUE = numeric(1) ) sensitivity <- cumsum(sensitivity) sensitivity <- sensitivity / sensitivity_denom sensitivity <- dplyr::if_else(sensitivity > 1, 1, sensitivity) sensitivity <- dplyr::if_else(sensitivity < 0, 0, sensitivity) sensitivity <- c(0, sensitivity, 1) res$sensitivity <- sensitivity res } # Dynamically exported autoplot.roc_survival_df <- function(object, ...) { `%+%` <- ggplot2::`%+%` object$.eval_time <- format(object$.eval_time) # Base chart roc_chart <- ggplot2::ggplot(data = object) # create aesthetic roc_aes <- ggplot2::aes( x = 1 - specificity, y = sensitivity, color = .eval_time, group = .eval_time ) # build the graph roc_chart <- roc_chart %+% ggplot2::geom_step(mapping = roc_aes, direction = "hv") %+% ggplot2::geom_abline(lty = 3) %+% ggplot2::coord_equal() %+% ggplot2::theme_bw() roc_chart }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/surv-roc_curve_survival.R
#' Developer function for summarizing new metrics #' #' `numeric_metric_summarizer()`, `class_metric_summarizer()`, #' `prob_metric_summarizer()`, `curve_metric_summarizer()`, #' `dynamic_survival_metric_summarizer()`, and #' `static_survival_metric_summarizer()` are useful alongside [check_metric] and #' [yardstick_remove_missing] for implementing new custom metrics. These #' functions call the metric function inside `dplyr::summarise()` or #' `dplyr::reframe()` for `curve_metric_summarizer()`. See [Custom performance #' metrics](https://www.tidymodels.org/learn/develop/metrics/) for more #' information. #' #' @details #' #' `numeric_metric_summarizer()`, `class_metric_summarizer()`, #' `prob_metric_summarizer()`, `curve_metric_summarizer()`, #' `dynamic_survival_metric_summarizer()`, and #' `dynamic_survival_metric_summarizer()` are generally called from the data #' frame version of your metric function. It knows how to call your metric over #' grouped data frames and returns a `tibble` consistent with other metrics. #' #' @inheritParams rlang::args_dots_empty #' @inheritParams rlang::args_error_context #' #' @param name A single character representing the name of the metric to #' use in the `tibble` output. This will be modified to include the type #' of averaging if appropriate. #' #' @param fn The vector version of your custom metric function. It #' generally takes `truth`, `estimate`, `na_rm`, and any other extra arguments #' needed to calculate the metric. #' #' @param data The data frame with `truth` and `estimate` columns passed in from #' the data frame version of your metric function that called #' `numeric_metric_summarizer()`, `class_metric_summarizer()`, #' `prob_metric_summarizer()`, `curve_metric_summarizer()`, #' `dynamic_survival_metric_summarizer()`, or #' `static_survival_metric_summarizer()`. #' #' @param truth The unquoted column name corresponding to the `truth` column. #' #' @param estimate Generally, the unquoted column name corresponding to #' the `estimate` column. For metrics that take multiple columns through `...` #' like class probability metrics, this is a result of [dots_to_estimate()]. #' #' @param estimator This can either be `NULL` for the default auto-selection of #' averaging (`"binary"` or `"macro"`), or a single character to pass along to #' the metric implementation describing the kind of averaging to use. #' #' @param na_rm A `logical` value indicating whether `NA` values should be #' stripped before the computation proceeds. The removal is executed in #' [yardstick_remove_missing()]. #' #' @param event_level This can either be `NULL` to use the default `event_level` #' value of the `fn` or a single string of either `"first"` or `"second"` #' to pass along describing which level should be considered the "event". #' #' @param case_weights For metrics supporting case weights, an unquoted #' column name corresponding to case weights can be passed here. If not `NULL`, #' the case weights will be passed on to `fn` as the named argument #' `case_weights`. #' #' @param fn_options A named list of metric specific options. These #' are spliced into the metric function call using `!!!` from `rlang`. The #' default results in nothing being spliced into the call. #' #' @seealso [check_metric] [yardstick_remove_missing] [finalize_estimator()] [dots_to_estimate()] #' #' @name metric-summarizers NULL #' @rdname metric-summarizers #' @export numeric_metric_summarizer <- function(name, fn, data, truth, estimate, ..., na_rm = TRUE, case_weights = NULL, fn_options = list(), error_call = caller_env()) { check_dots_empty(call = error_call) truth <- enquo(truth) estimate <- enquo(estimate) case_weights <- enquo(case_weights) truth <- yardstick_eval_select( expr = truth, data = data, arg = "truth", error_call = error_call ) estimate <- yardstick_eval_select( expr = estimate, data = data, arg = "estimate", error_call = error_call ) if (quo_is_null(case_weights)) { group_case_weights <- NULL } else { case_weights <- yardstick_eval_select( expr = case_weights, data = data, arg = "case_weights", error_call = error_call ) } group_rows <- dplyr::group_rows(data) group_keys <- dplyr::group_keys(data) data <- dplyr::ungroup(data) groups <- vec_chop(data, indices = group_rows) out <- vector("list", length = length(groups)) for (i in seq_along(groups)) { group <- groups[[i]] group_truth <- group[[truth]] group_estimate <- group[[estimate]] if (is_string(case_weights)) { group_case_weights <- group[[case_weights]] } elt_out <- list( .metric = name, .estimator = finalize_estimator( group_truth, metric_class = name, call = error_call ), .estimate = inject( withCallingHandlers( fn( truth = group_truth, estimate = group_estimate, case_weights = group_case_weights, na_rm = na_rm, !!!fn_options ), error = function(cnd) { cnd$call <- error_call cnd_signal(cnd) } ) ) ) out[[i]] <- tibble::new_tibble(elt_out) } group_keys <- vec_rep_each(group_keys, times = list_sizes(out)) out <- vec_rbind(!!!out) out <- vec_cbind(group_keys, out) out } #' @rdname metric-summarizers #' @export class_metric_summarizer <- function(name, fn, data, truth, estimate, ..., estimator = NULL, na_rm = TRUE, event_level = NULL, case_weights = NULL, fn_options = list(), error_call = caller_env()) { check_dots_empty(call = error_call) truth <- enquo(truth) estimate <- enquo(estimate) case_weights <- enquo(case_weights) truth <- yardstick_eval_select( expr = truth, data = data, arg = "truth", error_call = error_call ) estimate <- yardstick_eval_select( expr = estimate, data = data, arg = "estimate", error_call = error_call ) if (quo_is_null(case_weights)) { group_case_weights <- NULL } else { case_weights <- yardstick_eval_select( expr = case_weights, data = data, arg = "case_weights", error_call = error_call ) } group_rows <- dplyr::group_rows(data) group_keys <- dplyr::group_keys(data) data <- dplyr::ungroup(data) groups <- vec_chop(data, indices = group_rows) out <- vector("list", length = length(groups)) for (i in seq_along(groups)) { group <- groups[[i]] group_truth <- group[[truth]] group_estimate <- group[[estimate]] if (is_string(case_weights)) { group_case_weights <- group[[case_weights]] } elt_out <- list( .metric = name, .estimator = finalize_estimator( group_truth, estimator, name, call = error_call ), .estimate = inject( withCallingHandlers( fn( truth = group_truth, estimate = group_estimate, case_weights = group_case_weights, na_rm = na_rm, !!!spliceable_argument(estimator, "estimator"), !!!spliceable_argument(event_level, "event_level"), !!!fn_options ), error = function(cnd) { cnd$call <- error_call cnd_signal(cnd) } ) ) ) out[[i]] <- tibble::new_tibble(elt_out) } group_keys <- vec_rep_each(group_keys, times = list_sizes(out)) out <- vec_rbind(!!!out) out <- vec_cbind(group_keys, out) out } #' @rdname metric-summarizers #' @export prob_metric_summarizer <- function(name, fn, data, truth, ..., estimator = NULL, na_rm = TRUE, event_level = NULL, case_weights = NULL, fn_options = list(), error_call = caller_env()) { truth <- enquo(truth) case_weights <- enquo(case_weights) truth <- yardstick_eval_select( expr = truth, data = data, arg = "truth", error_call = error_call ) estimate <- yardstick_eval_select_dots( ..., data = data, error_call = error_call ) if (quo_is_null(case_weights)) { group_case_weights <- NULL } else { case_weights <- yardstick_eval_select( expr = case_weights, data = data, arg = "case_weights", error_call = error_call ) } group_rows <- dplyr::group_rows(data) group_keys <- dplyr::group_keys(data) data <- dplyr::ungroup(data) groups <- vec_chop(data, indices = group_rows) out <- vector("list", length = length(groups)) for (i in seq_along(groups)) { group <- groups[[i]] group_truth <- group[[truth]] group_estimate <- prob_estimate_convert(group[estimate]) if (is_string(case_weights)) { group_case_weights <- group[[case_weights]] } elt_out <- list( .metric = name, .estimator = finalize_estimator( group_truth, estimator, name, call = error_call ), .estimate = inject( withCallingHandlers( fn( truth = group_truth, estimate = group_estimate, case_weights = group_case_weights, na_rm = na_rm, !!!spliceable_argument(estimator, "estimator"), !!!spliceable_argument(event_level, "event_level"), !!!fn_options ), error = function(cnd) { cnd$call <- error_call cnd_signal(cnd) } ) ) ) out[[i]] <- tibble::new_tibble(elt_out) } group_keys <- vec_rep_each(group_keys, times = list_sizes(out)) out <- vec_rbind(!!!out) out <- vec_cbind(group_keys, out) out } #' @rdname metric-summarizers #' @export curve_metric_summarizer <- function(name, fn, data, truth, ..., estimator = NULL, na_rm = TRUE, event_level = NULL, case_weights = NULL, fn_options = list(), error_call = caller_env()) { truth <- enquo(truth) case_weights <- enquo(case_weights) truth <- yardstick_eval_select( expr = truth, data = data, arg = "truth", error_call = error_call ) estimate <- yardstick_eval_select_dots( ..., data = data, error_call = error_call ) if (quo_is_null(case_weights)) { group_case_weights <- NULL } else { case_weights <- yardstick_eval_select( expr = case_weights, data = data, arg = "case_weights", error_call = error_call ) } group_rows <- dplyr::group_rows(data) group_keys <- dplyr::group_keys(data) data <- dplyr::ungroup(data) groups <- vec_chop(data, indices = group_rows) out <- vector("list", length = length(groups)) for (i in seq_along(groups)) { group <- groups[[i]] group_truth <- group[[truth]] group_estimate <- prob_estimate_convert(group[estimate]) if (is_string(case_weights)) { group_case_weights <- group[[case_weights]] } elt_out <- list( .metric = name, .estimator = finalize_estimator( group_truth, estimator, name, call = error_call ), .estimate = inject( withCallingHandlers( fn( truth = group_truth, estimate = group_estimate, case_weights = group_case_weights, na_rm = na_rm, !!!spliceable_argument(estimator, "estimator"), !!!spliceable_argument(event_level, "event_level"), !!!fn_options ), error = function(cnd) { cnd$call <- error_call cnd_signal(cnd) } ) ) ) elt_out <- vec_recycle_common(!!!elt_out) out[[i]] <- tibble::new_tibble(elt_out) } group_keys <- vec_rep_each(group_keys, times = list_sizes(out)) out <- vec_rbind(!!!out) out <- vec_cbind(group_keys, out) out } #' @rdname metric-summarizers #' @export dynamic_survival_metric_summarizer <- function(name, fn, data, truth, ..., na_rm = TRUE, case_weights = NULL, fn_options = list(), error_call = caller_env()) { truth <- enquo(truth) case_weights <- enquo(case_weights) truth <- yardstick_eval_select( expr = truth, data = data, arg = "truth", error_call = error_call ) estimate <- yardstick_eval_select_dots( ..., data = data, error_call = error_call ) if (quo_is_null(case_weights)) { group_case_weights <- NULL } else { case_weights <- yardstick_eval_select( expr = case_weights, data = data, arg = "case_weights", error_call = error_call ) } group_rows <- dplyr::group_rows(data) group_keys <- dplyr::group_keys(data) data <- dplyr::ungroup(data) groups <- vec_chop(data, indices = group_rows) out <- vector("list", length = length(groups)) for (i in seq_along(groups)) { group <- groups[[i]] group_truth <- group[[truth]] group_estimate <- group[[estimate]] if (is_string(case_weights)) { group_case_weights <- group[[case_weights]] } elt_out <- list( .metric = name, .estimator = finalize_estimator( group_truth, metric_class = name, call = error_call ), .estimate = inject( withCallingHandlers( fn( truth = group_truth, estimate = group_estimate, case_weights = group_case_weights, na_rm = na_rm, !!!fn_options ), error = function(cnd) { cnd$call <- error_call cnd_signal(cnd) } ) ) ) elt_out <- vec_recycle_common(!!!elt_out) out[[i]] <- tibble::new_tibble(elt_out) } group_keys <- vec_rep_each(group_keys, times = list_sizes(out)) out <- vec_rbind(!!!out) out <- vec_cbind(group_keys, out) if (inherits(out$.estimate, "tbl_df")) { out <- tidyr::unnest(out, .estimate) } out } #' @rdname metric-summarizers #' @export static_survival_metric_summarizer <- function(name, fn, data, truth, estimate, ..., na_rm = TRUE, case_weights = NULL, fn_options = list(), error_call = caller_env()) { check_dots_empty(call = error_call) truth <- enquo(truth) estimate <- enquo(estimate) case_weights <- enquo(case_weights) truth <- yardstick_eval_select( expr = truth, data = data, arg = "truth", error_call = error_call ) estimate <- yardstick_eval_select( expr = estimate, data = data, arg = "estimate", error_call = error_call ) if (quo_is_null(case_weights)) { group_case_weights <- NULL } else { case_weights <- yardstick_eval_select( expr = case_weights, data = data, arg = "case_weights", error_call = error_call ) } group_rows <- dplyr::group_rows(data) group_keys <- dplyr::group_keys(data) data <- dplyr::ungroup(data) groups <- vec_chop(data, indices = group_rows) out <- vector("list", length = length(groups)) for (i in seq_along(groups)) { group <- groups[[i]] group_truth <- group[[truth]] group_estimate <- group[[estimate]] if (is_string(case_weights)) { group_case_weights <- group[[case_weights]] } elt_out <- list( .metric = name, .estimator = finalize_estimator( group_truth, metric_class = name, call = error_call ), .estimate = inject( withCallingHandlers( fn( truth = group_truth, estimate = group_estimate, case_weights = group_case_weights, na_rm = na_rm, !!!fn_options ), error = function(cnd) { cnd$call <- error_call cnd_signal(cnd) } ) ) ) out[[i]] <- tibble::new_tibble(elt_out) } group_keys <- vec_rep_each(group_keys, times = list_sizes(out)) out <- vec_rbind(!!!out) out <- vec_cbind(group_keys, out) out } #' @rdname metric-summarizers #' @export curve_survival_metric_summarizer <- function(name, fn, data, truth, ..., na_rm = TRUE, case_weights = NULL, fn_options = list(), error_call = caller_env()) { truth <- enquo(truth) case_weights <- enquo(case_weights) truth <- yardstick_eval_select( expr = truth, data = data, arg = "truth", error_call = error_call ) estimate <- yardstick_eval_select_dots( ..., data = data, error_call = error_call ) if (quo_is_null(case_weights)) { group_case_weights <- NULL } else { case_weights <- yardstick_eval_select( expr = case_weights, data = data, arg = "case_weights", error_call = error_call ) } group_rows <- dplyr::group_rows(data) group_keys <- dplyr::group_keys(data) data <- dplyr::ungroup(data) groups <- vec_chop(data, indices = group_rows) out <- vector("list", length = length(groups)) for (i in seq_along(groups)) { group <- groups[[i]] group_truth <- group[[truth]] group_estimate <- prob_estimate_convert(group[estimate]) if (is_string(case_weights)) { group_case_weights <- group[[case_weights]] } elt_out <- list( .metric = name, .estimator = finalize_estimator( group_truth, metric_class = name, call = error_call ), .estimate = inject( withCallingHandlers( fn( truth = group_truth, estimate = group_estimate, case_weights = group_case_weights, na_rm = na_rm, !!!fn_options ), error = function(cnd) { cnd$call <- error_call cnd_signal(cnd) } ) ) ) elt_out <- vec_recycle_common(!!!elt_out) out[[i]] <- tibble::new_tibble(elt_out) } group_keys <- vec_rep_each(group_keys, times = list_sizes(out)) out <- vec_rbind(!!!out) out <- vec_cbind(group_keys, out) out } prob_estimate_convert <- function(estimate) { check_data_frame(estimate, .internal = TRUE) n_estimate <- ncol(estimate) if (n_estimate == 0L) { cli::cli_abort( "{.arg estimate} should have errored during tidy-selection.", .internal = TRUE ) } else if (n_estimate == 1L) { # Unwrap single column `estimate`s estimate[[1L]] } else { # Otherwise multiclass case requires a matrix as.matrix(estimate) } } metric_tibbler <- function(.metric, .estimator, .estimate) { dplyr::tibble( .metric = .metric, .estimator = .estimator, .estimate = .estimate ) } spliceable_argument <- function(x, name) { if (is.null(x)) { return(list()) } out <- list(x) names(out) <- name out } yardstick_eval_select <- function(expr, data, arg, ..., error_call = caller_env()) { check_dots_empty(call = error_call) if (!quo_is_missing(expr) && inherits(quo_get_expr(expr), "name")) { expr_name <- as_name(expr) if (is_known_selection(expr_name)) { return(get_known_selection(expr_name)) } } out <- tidyselect::eval_select( expr = expr, data = data, allow_predicates = FALSE, allow_rename = FALSE, allow_empty = FALSE, error_call = error_call ) out <- names(out) if (length(out) != 1L) { cli::cli_abort( "{.arg arg} must select exactly 1 column from `data`, not {length(out)}.", call = error_call ) } set_known_selection(as_name(expr), out) out } yardstick_eval_select_dots <- function(..., data, error_call = caller_env()) { expr <- quo(!!substitute(...)) if (!quo_is_missing(expr) && inherits(quo_get_expr(expr), c("name", "call"))) { expr_label <- as_label(expr) if (is_known_selection(expr_label)) { return(get_known_selection(expr_label)) } } if ("estimate" %in% names(match.call(expand.dots = FALSE)$...)) { cli::cli_abort( c( x = "This metric doesn't use the {.arg estimate} argument.", i = "Specify the columns without {.code estimate = }." ), call = error_call ) } out <- tidyselect::eval_select( expr = expr(c(...)), data = data, allow_predicates = FALSE, allow_rename = FALSE, allow_empty = FALSE, error_call = error_call ) out <- names(out) set_known_selection(as_label(expr), out) out } # store known selections (#428) ------------------------------------------------ is_known_selection <- function(expr_name) { if (!catalog_is_available() || !in_tuning_env()) { return(FALSE) } known_selections <- ns_env("tune")$tune_env$known_selections expr_name %in% names(known_selections) } get_known_selection <- function(expr_name) { known_selections <- ns_env("tune")$tune_env$known_selections known_selections[[expr_name]] } set_known_selection <- function(expr_name, out) { if (!catalog_is_available() || !in_tuning_env()) { return(invisible()) } tune_env <- ns_env("tune")$tune_env if (is.null(tune_env$known_selections)) { init_known_selection(tune_env) } tune_env$known_selections[[expr_name]] <- out invisible() } init_known_selection <- function(tune_env) { withr::defer( env_bind(tune_env, known_selections = NULL), envir = tune_env$progress_env ) } # `catalog_is_available()` defines the per-session condition for whether we # can store known selections, while `in_tuning_env()` defines the # per-tuning instance of the condition. we want to store known selections: # 1) when tune's cataloging machinery is available # 2) when called inside of a tuning env, regardless of whether the catalog is # being used to log errors. # notably, we don't do so when yardstick functions are called outside of # tune, in which case the overhead is `in_tuning_env()` after the first call. # # set up as a function with local variables so that we only have to run # `is_installed()` and `packageVersion()` once. catalog_is_available <- local({ tune_is_installed <- NULL tune_version_is_sufficient <- FALSE function() { if (is.null(tune_is_installed)) { tune_is_installed <<- is_installed("tune") if (tune_is_installed) { tune_version_is_sufficient <<- utils::packageVersion("tune") > "1.1.0" } } tune_is_installed && tune_version_is_sufficient } }) in_tuning_env <- function(tune_env = ns_env("tune")$tune_env) { isTRUE(tune_env$progress_active) }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/template.R
validate_numeric_truth_numeric_estimate <- function(truth, estimate, call = caller_env()) { if (!is.numeric(truth)) { cli::cli_abort( "{.arg truth} should be a numeric, \\ not {.obj_type_friendly {truth}}.", call = call ) } if (!is.numeric(estimate)) { cli::cli_abort( "{.arg estimate} should be a numeric, \\ not {.obj_type_friendly {estimate}}.", call = call ) } if (is.matrix(estimate)) { cli::cli_abort( "{.arg estimate} should be a numeric vector, not a numeric matrix.", call = call ) } if (is.matrix(truth)) { cli::cli_abort( "{.arg truth} should be a numeric vector, not a numeric matrix.", call = call ) } n_truth <- length(truth) n_estimate <- length(estimate) if (n_truth != n_estimate) { cli::cli_abort( "{.arg truth} ({n_truth}) and \\ {.arg estimate} ({n_estimate}) must be the same length.", call = call ) } } validate_factor_truth_factor_estimate <- function(truth, estimate, call = caller_env()) { if (!is.factor(truth)) { cli::cli_abort( "{.arg truth} should be a factor, \\ not a {.obj_type_friendly {truth}}.", call = call ) } if (!is.factor(estimate)) { cli::cli_abort( "{.arg estimate} should be a factor, \\ not a {.obj_type_friendly {estimate}}.", call = call ) } lvls_t <- levels(truth) lvls_e <- levels(estimate) if (!identical(lvls_t, lvls_e)) { cli::cli_abort( c( "x" = "{.arg truth} and {.arg estimate} levels must be equivalent.", "*" = "{.arg truth}: {lvls_t}.", "*" = "{.arg estimate}: {lvls_e}." ), call = call ) } n_truth <- length(truth) n_estimate <- length(estimate) if (n_truth != n_estimate) { cli::cli_abort( "{.arg truth} ({n_truth}) and \\ {.arg estimate} ({n_estimate}) must be the same length.", call = call ) } } validate_factor_truth_matrix_estimate <- function(truth, estimate, estimator, call = caller_env()) { if (!is.factor(truth)) { cli::cli_abort( "{.arg truth} should be a factor, \\ not a {.obj_type_friendly {truth}}.", call = call ) } if (estimator == "binary") { if (is.matrix(estimate)) { cli::cli_abort( "You are using a binary metric but have passed multiple columns to \\ {.arg ...}.", call = call ) } if (!is.numeric(estimate)) { cli::cli_abort( "{.arg estimate} should be a numeric vector, \\ not {.obj_type_friendly {estimate}}.", call = call ) } n_lvls <- length(levels(truth)) if (n_lvls != 2) { cli::cli_abort( "{.arg estimator} is binary, only two class {.arg truth} factors are \\ allowed. A factor with {n_lvls} levels was provided.", call = call ) } } else { n_lvls <- length(levels(truth)) if (is.matrix(estimate)) { n_cols <- ncol(estimate) } else { n_cols <- 1L } if (n_lvls != n_cols) { cli::cli_abort( "The number of levels in `truth` ({n_lvls}) \\ must match the number of columns supplied in `...` ({n_cols}).", call = call ) } if (!is.numeric(as.vector(estimate))) { cls <- as.vector(estimate) cli::cli_abort( "The columns supplied in {.arg ...} should be numerics, \\ not {.cls cls}.", call = call ) } } } validate_surv_truth_list_estimate <- function(truth, estimate, call = caller_env()) { if (!inherits(truth, "Surv")) { cli::cli_abort( "`truth` should be a Surv object, \\ not a {.obj_type_friendly {truth}}.", call = call ) } if (!is.list(estimate)) { cli::cli_abort( "{.arg estimate} should be a list, \\ not a {.obj_type_friendly {estimate}}.", call = call ) } if (!all(vapply(estimate, is.data.frame, FUN.VALUE = logical(1)))) { cli::cli_abort( "All elements of {.arg estimate} should be data.frames.", call = call ) } valid_names <- c(".eval_time", ".pred_survival", ".weight_censored") has_names <- vapply( estimate, function(x) all(valid_names %in% names(x)), FUN.VALUE = logical(1) ) if (!all(has_names)) { cli::cli_abort( "All data.frames of {.arg estimate} should include column names: \\ {.field (.eval_time)}, {.field (.pred_survival)}, and \\ {.field (.weight_censored)}.", call = call) } n_truth <- nrow(truth) n_estimate <- length(estimate) if (n_truth != n_estimate) { cli::cli_abort( "{.arg truth} ({n_truth}) and \\ {.arg estimate} ({n_estimate}) must be the same length.", call = call ) } eval_time_cols <- lapply(estimate, function(x) x$.eval_time) if (length(unique(eval_time_cols)) > 1) { offenders <- vapply( eval_time_cols, function(x) !identical(x, eval_time_cols[[1]]), logical(1) ) offenders <- which(offenders) cli::cli_abort( c( x = "All the {.field .eval_time} columns of {.arg estimate} must be \\ identical.", i = "The folllowing index differed from the first: {.val {offenders}}." ), call = call ) } eval_time <- eval_time_cols[[1]] if (any(is.na(eval_time))) { cli::cli_abort( c( x = "Missing values in {.field .eval_time} are not allowed." ), call = call ) } if (any(eval_time < 0)) { offenders <- unique(eval_time[eval_time < 0]) cli::cli_abort( c( x = "Negative values of {.field .eval_time} are not allowed.", i = "The following negative values were found: {.val {offenders}}." ), call = call ) } if (any(is.infinite(eval_time))) { cli::cli_abort( c( x = "Infinite values of {.field .eval_time} are not allowed." ), call = call ) } if (any(duplicated(eval_time))) { cli::cli_abort( c( x = "Duplicate values of {.field .eval_time} are not allowed." ), call = call ) } } validate_surv_truth_numeric_estimate <- function(truth, estimate, call = caller_env()) { if (!.is_surv(truth, fail = FALSE)) { cli::cli_abort( "{.arg truth} should be a Surv object, \\ not {.obj_type_friendly {truth}}.", call = call) } if (!is.numeric(estimate)) { cli::cli_abort( "{.arg estimate} should be a numeric, \\ not {.obj_type_friendly {estimate}}.", call = call ) } if (is.matrix(estimate)) { cli::cli_abort( "{.arg estimate} should be a numeric vector, not a numeric matrix.", call = call ) } n_truth <- nrow(truth) n_estimate <- length(estimate) if (n_truth != n_estimate) { cli::cli_abort( "{.arg truth} ({n_truth}) and \\ {.arg estimate} ({n_estimate}) must be the same length.", call = call ) } } validate_binary_estimator <- function(truth, estimator, call = caller_env()) { if (estimator != "binary") { return() } lvls <- levels(truth) if (length(lvls) != 2) { cli::cli_abort( "{.arg estimator} is binary, only two class {.arg truth} factors are \\ allowed. A factor with {length(lvls)} levels was provided.", call = call ) } } #' @section Estimator Validation: #' `validate_estimator()` is called from your metric specific method of #' `finalize_estimator_internal()` and ensures that a user provided estimator #' is of the right format and is one of the allowed values. #' #' @param estimator_override A character vector overriding the default allowed #' estimator list of #' `c("binary", "macro", "micro", "macro_weighted")`. Set #' this if your classification estimator does not support all of these methods. #' @rdname developer-helpers #' @export validate_estimator <- function(estimator, estimator_override = NULL, call = caller_env()) { if (is.null(estimator)) { return() } if (!is.null(estimator_override)) { allowed <- estimator_override } else { allowed <- c("binary", "macro", "micro", "macro_weighted") } check_string(estimator, call = call) estimator_ok <- (estimator %in% allowed) if (!estimator_ok) { cli::cli_abort( "{.arg estimator} must be one of: {allowed}, not {estimator}.", call = call ) } } validate_case_weights <- function(case_weights, size, call = caller_env()) { if (is.null(case_weights)) { return(invisible()) } size_case_weights <- length(case_weights) if (size_case_weights != size) { cli::cli_abort( "{.arg truth} ({size}) and \\ {.arg case_weights} ({size_case_weights}) must be the same length.", call = call ) } invisible() }
/scratch/gouwar.j/cran-all/cranData/yardstick/R/validation.R
#' @keywords internal "_PACKAGE" ## usethis namespace: start #' @import rlang #' @import vctrs #' @importFrom dplyr %>% #' @importFrom dplyr as_tibble #' @importFrom lifecycle deprecated #' @useDynLib yardstick, .registration = TRUE ## usethis namespace: end NULL # Importing something from utils so we don't get dinged about having an # Import we don't use. We use `utils::globalVariables()` at a global scope, and # R CMD check doesn't detect that. Usually shows up as a NOTE on rhub's Linux # check machines. #' @importFrom utils globalVariables NULL
/scratch/gouwar.j/cran-all/cranData/yardstick/R/yardstick-package.R
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----pkgs, message = FALSE---------------------------------------------------- library(yardstick) library(dplyr) data("hpc_cv") ## ----hpc-cv------------------------------------------------------------------- tibble(hpc_cv) ## ----hpc-modify--------------------------------------------------------------- set.seed(1) hpc <- tibble(hpc_cv) %>% mutate(batch = sample(c("a", "b"), nrow(.), replace = TRUE)) %>% select(-c(VF, F, M, L)) hpc ## ----acc-1-------------------------------------------------------------------- hpc %>% filter(Resample == "Fold01") %>% accuracy(obs, pred) ## ----hpc-cv-2----------------------------------------------------------------- hpc %>% group_by(Resample) %>% accuracy(obs, pred) ## ----res-1-------------------------------------------------------------------- hpc %>% filter(Resample == "Fold01") ## ----acc-by-group------------------------------------------------------------- acc_by_group <- hpc %>% filter(Resample == "Fold01") %>% group_by(batch) %>% accuracy(obs, pred) acc_by_group ## ----diff-acc----------------------------------------------------------------- diff(c(acc_by_group$.estimate[2], acc_by_group$.estimate[1])) ## ----------------------------------------------------------------------------- accuracy_diff <- new_groupwise_metric( fn = accuracy, name = "accuracy_diff", aggregate = function(acc_by_group) { diff(c(acc_by_group$.estimate[2], acc_by_group$.estimate[1])) } ) ## ----acc-diff-class----------------------------------------------------------- class(accuracy_diff) ## ----acc-diff-by-------------------------------------------------------------- accuracy_diff_by_batch <- accuracy_diff(batch) ## ----metric-classes----------------------------------------------------------- class(accuracy) class(accuracy_diff_by_batch) ## ----ex-acc-diff-by-batch----------------------------------------------------- hpc %>% filter(Resample == "Fold01") %>% accuracy_diff_by_batch(obs, pred) ## ----ex-acc-diff-by-batch-ms-------------------------------------------------- acc_ms <- metric_set(accuracy, accuracy_diff_by_batch) hpc %>% filter(Resample == "Fold01") %>% acc_ms(truth = obs, estimate = pred) ## ----ex-acc-diff-by-batch-2--------------------------------------------------- hpc %>% group_by(Resample) %>% accuracy_diff_by_batch(obs, pred)
/scratch/gouwar.j/cran-all/cranData/yardstick/inst/doc/grouping.R
--- title: "Grouping behavior in yardstick" author: "Simon Couch" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Grouping behavior in yardstick} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` The 1.3.0 release of yardstick introduced an implementation for _groupwise metrics_. The use case motivating the implementation of this functionality is _fairness metrics_, though groupwise metrics have applications beyond that domain. Fairness metrics quantify the degree of disparity in a metric value across groups. To learn more about carrying out fairness-oriented analyses with tidymodels, see the blog post on the tidymodels website. This vignette will instead focus on groupwise metrics generally, clarifying the meaning of "groupwise" and demonstrating functionality with an example dataset. <!-- TODO: link to forthcoming tidymodels blog post --> ```{r pkgs, message = FALSE} library(yardstick) library(dplyr) data("hpc_cv") ``` # Group-awareness Even before the implementation of groupwise metrics, _all_ yardstick metrics had been _group-aware_. When grouped data is passed to a group-aware metric, it will return metric values calculated for each group. To demonstrate, we'll make use of the `hpc_cv` data set, containing class probabilities and class predictions for a linear discriminant analysis fit to the HPC data set of Kuhn and Johnson (2013). The model is evaluated via 10-fold cross-validation, and the predictions for all folds are included. ```{r hpc-cv} tibble(hpc_cv) ``` For the purposes of this vignette, we'll also add a column `batch` to the data and select off the columns for the class probabilities, which we don't need. ```{r hpc-modify} set.seed(1) hpc <- tibble(hpc_cv) %>% mutate(batch = sample(c("a", "b"), nrow(.), replace = TRUE)) %>% select(-c(VF, F, M, L)) hpc ``` If we wanted to compute the accuracy of the first resampled model, we could write: ```{r acc-1} hpc %>% filter(Resample == "Fold01") %>% accuracy(obs, pred) ``` The metric function returns one row, giving the `.metric`, `.estimator`, and `.estimate` for the whole data set it is passed. If we instead group the data by fold, metric functions like `accuracy` will know to compute values for each group; in the output, each row will correspond to a Resample. ```{r hpc-cv-2} hpc %>% group_by(Resample) %>% accuracy(obs, pred) ``` Note that the first row, corresponding to `Fold01`, gives the same value as manually filtering for the observations corresponding to the first resample and then computing the accuracy. This behavior is what we mean by group-awareness. When grouped data is passed to group-aware metric functions, they will return values for each group. # Groupwise metrics Groupwise metrics are associated with a data-column such that, when passed data with that column, the metric will temporarily group by that column, compute values for each of the groups defined by the column, and then aggregate the values computed for the temporary grouping back to the level of the input data's grouping. More concretely, let's turn to an example where there is no pre-existing grouping in the data. Consider the portion of the HPC data pertaining to the first resample: ```{r res-1} hpc %>% filter(Resample == "Fold01") ``` Suppose that the `batch`es in the data represent two groups for which model performance ought not to differ. To quantify the degree to which model performance differs for these two groups, we could compute accuracy values for either group separately, and then take their difference. First, computing accuracies: ```{r acc-by-group} acc_by_group <- hpc %>% filter(Resample == "Fold01") %>% group_by(batch) %>% accuracy(obs, pred) acc_by_group ``` Now, taking the difference: ```{r diff-acc} diff(c(acc_by_group$.estimate[2], acc_by_group$.estimate[1])) ``` Groupwise metrics encode the `group_by()` and aggregation step (in this case, subtraction) shown above into a yardstick metric. We can define a new groupwise metric with the `new_groupwise_metric()` function: ```{r} accuracy_diff <- new_groupwise_metric( fn = accuracy, name = "accuracy_diff", aggregate = function(acc_by_group) { diff(c(acc_by_group$.estimate[2], acc_by_group$.estimate[1])) } ) ``` * The `fn` argument is the yardstick metric that will be computed for each data group. * The `name` argument gives the name of the new metric we've created; we'll call ours "accuracy difference." * The `aggregate` argument is a function defining how to go from `fn` output by group to a single numeric value. The output, `accuracy_diff`, is a function subclass called a `metric_factory`: ```{r acc-diff-class} class(accuracy_diff) ``` `accuracy_diff` now knows to take accuracy values for each group and then return the difference between the accuracy for the first and second result as output. The last thing we need to associate with the object is the name of the grouping variable to pass to `group_by()`; we can pass that variable name to `accuracy_diff` to do so: ```{r acc-diff-by} accuracy_diff_by_batch <- accuracy_diff(batch) ``` The output, `accuracy_diff_by_batch`, is a yardstick metric function like any other: ```{r metric-classes} class(accuracy) class(accuracy_diff_by_batch) ``` <!-- TODO: once a print method is added, we can print this out and the meaning of "this is just a yardstick metric" will be clearer --> We can use the `accuracy_diff_by_batch()` metric in the same way that we would use `accuracy()`. On its own: ```{r ex-acc-diff-by-batch} hpc %>% filter(Resample == "Fold01") %>% accuracy_diff_by_batch(obs, pred) ``` We can also add `accuracy_diff_by_batch()` to metric sets: ```{r ex-acc-diff-by-batch-ms} acc_ms <- metric_set(accuracy, accuracy_diff_by_batch) hpc %>% filter(Resample == "Fold01") %>% acc_ms(truth = obs, estimate = pred) ``` _Groupwise metrics are group-aware._ When passed data with any grouping variables other than the column passed as the first argument to `accuracy_diff()`---in this case, `group`---`accuracy_diff_by_batch()` will behave like any other yardstick metric. For example: ```{r ex-acc-diff-by-batch-2} hpc %>% group_by(Resample) %>% accuracy_diff_by_batch(obs, pred) ``` Groupwise metrics form the backend of fairness metrics in tidymodels. To learn more about groupwise metrics and their applications in fairness problems, see `new_groupwise_metric()`. <!-- TODO: link to tidyverse blog post and tidymodels articles. -->
/scratch/gouwar.j/cran-all/cranData/yardstick/inst/doc/grouping.Rmd
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----warning = FALSE, message = FALSE----------------------------------------- library(yardstick) library(dplyr) data("hpc_cv") hpc_cv %>% group_by(Resample) %>% slice(1:3) ## ----------------------------------------------------------------------------- hpc_cv %>% filter(Resample == "Fold01") %>% accuracy(obs, pred) ## ----------------------------------------------------------------------------- hpc_cv %>% group_by(Resample) %>% accuracy(obs, pred) ## ----------------------------------------------------------------------------- class_metrics <- metric_set(accuracy, kap) hpc_cv %>% group_by(Resample) %>% class_metrics(obs, estimate = pred) ## ----echo=FALSE, warning=FALSE, message=FALSE, results='asis'----------------- library(knitr) library(dplyr) yardns <- asNamespace("yardstick") fns <- lapply(names(yardns), get, envir = yardns) names(fns) <- names(yardns) get_metrics <- function(fns, type) { where <- vapply(fns, inherits, what = type, FUN.VALUE = logical(1)) paste0("`", sort(names(fns[where])), "()`") } all_metrics <- bind_rows( tibble(type = "class", metric = get_metrics(fns, "class_metric")), tibble(type = "class prob", metric = get_metrics(fns, "prob_metric")), tibble(type = "numeric", metric = get_metrics(fns, "numeric_metric")), tibble(type = "dynamic survival", metric = get_metrics(fns, "dynamic_survival_metric")), tibble(type = "static survival", metric = get_metrics(fns, "static_survival_metric")) ) kable(all_metrics, format = "html")
/scratch/gouwar.j/cran-all/cranData/yardstick/inst/doc/metric-types.R
--- title: "Metric types" author: "Davis Vaughan" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Metric types} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Metric types There are three main metric types in `yardstick`: class, class probability, and numeric. Each type of metric has standardized argument syntax, and all metrics return the same kind of output (a tibble with 3 columns). This standardization allows metrics to easily be grouped together and used with grouped data frames for computing on multiple resamples at once. Below are the five types of metrics, along with the types of the inputs they take. 1) **Class metrics** (hard predictions) - `truth` - factor - `estimate` - factor 2) **Class probability metrics** (soft predictions) - `truth` - factor - `estimate / ...` - multiple numeric columns containing class probabilities 3) **Numeric metrics** - `truth` - numeric - `estimate` - numeric 4) **Static survival metircs** - `truth` - Surv - `estimate` - numeric 5) **dynamic survival metrics** - `truth` - Surv - `...` - list of data.frames, each containing the 3 columns `.eval_time`, `.pred_survival, and `.weight_censored` ## Example In the following example, the `hpc_cv` data set is used. It contains class probabilities and class predictions for a linear discriminant analysis fit to the HPC data set of Kuhn and Johnson (2013). It is fit with 10 fold cross-validation, and the predictions for all folds are included. ```{r, warning = FALSE, message = FALSE} library(yardstick) library(dplyr) data("hpc_cv") hpc_cv %>% group_by(Resample) %>% slice(1:3) ``` 1 metric, 1 resample ```{r} hpc_cv %>% filter(Resample == "Fold01") %>% accuracy(obs, pred) ``` 1 metric, 10 resamples ```{r} hpc_cv %>% group_by(Resample) %>% accuracy(obs, pred) ``` 2 metrics, 10 resamples ```{r} class_metrics <- metric_set(accuracy, kap) hpc_cv %>% group_by(Resample) %>% class_metrics(obs, estimate = pred) ``` ## Metrics Below is a table of all of the metrics available in `yardstick`, grouped by type. ```{r, echo=FALSE, warning=FALSE, message=FALSE, results='asis'} library(knitr) library(dplyr) yardns <- asNamespace("yardstick") fns <- lapply(names(yardns), get, envir = yardns) names(fns) <- names(yardns) get_metrics <- function(fns, type) { where <- vapply(fns, inherits, what = type, FUN.VALUE = logical(1)) paste0("`", sort(names(fns[where])), "()`") } all_metrics <- bind_rows( tibble(type = "class", metric = get_metrics(fns, "class_metric")), tibble(type = "class prob", metric = get_metrics(fns, "prob_metric")), tibble(type = "numeric", metric = get_metrics(fns, "numeric_metric")), tibble(type = "dynamic survival", metric = get_metrics(fns, "dynamic_survival_metric")), tibble(type = "static survival", metric = get_metrics(fns, "static_survival_metric")) ) kable(all_metrics, format = "html") ```
/scratch/gouwar.j/cran-all/cranData/yardstick/inst/doc/metric-types.Rmd
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" )
/scratch/gouwar.j/cran-all/cranData/yardstick/inst/doc/multiclass.R
--- title: "Multiclass averaging" author: "Davis Vaughan" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Multiclass averaging} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction Classification metrics in `yardstick` where both the `truth` and `estimate` columns are factors are implemented for the binary and the multiclass case. The multiclass implementations use `micro`, `macro`, and `macro_weighted` averaging where applicable, and some metrics have their own specialized multiclass implementations. ## Macro averaging Macro averaging reduces your multiclass predictions down to multiple sets of binary predictions, calculates the corresponding metric for each of the binary cases, and then averages the results together. As an example, consider `precision` for the binary case. $$ Pr = \frac{TP}{TP + FP} $$ In the multiclass case, if there were levels `A`, `B`, `C` and `D`, macro averaging reduces the problem to multiple one-vs-all comparisons. The `truth` and `estimate` columns are recoded such that the only two levels are `A` and `other`, and then precision is calculated based on those recoded columns, with `A` being the "relevant" column. This process is repeated for the other 3 levels to get a total of 4 precision values. The results are then averaged together. The formula representation looks like this. For `k` classes: $$ Pr_{macro} = \frac{Pr_1 + Pr_2 + \ldots + Pr_k}{k} = Pr_1 \frac{1}{k} + Pr_2 \frac{1}{k} + \ldots + Pr_k \frac{1}{k} $$ where $PR_1$ is the precision calculated from recoding the multiclass predictions down to just `class 1` and `other`. Note that in macro averaging, all classes get equal weight when contributing their portion of the precision value to the total (here `1/4`). This might not be a realistic calculation when you have a large amount of class imbalance. In that case, a _weighted macro average_ might make more sense, where the weights are calculated by the frequency of that class in the `truth` column. $$ Pr_{weighted-macro} = Pr_1 \frac{\#Obs_1}{N} + Pr_2 \frac{\#Obs_2}{N} + \ldots + Pr_k \frac{\#Obs_k}{N} $$ ## Micro averaging Micro averaging treats the entire set of data as an aggregate result, and calculates 1 metric rather than `k` metrics that get averaged together. For precision, this works by calculating all of the true positive results for each class and using that as the numerator, and then calculating all of the true positive and false positive results for each class, and using that as the denominator. $$ Pr_{micro} = \frac{TP_1 + TP_2 + \ldots + TP_k}{(TP_1 + TP_2 + \ldots + TP_k) + (FP_1 + FP_2 + \ldots + FP_k)} $$ In this case, rather than each _class_ having equal weight, each _observation_ gets equal weight. This gives the classes with the most observations more power. ## Specialized multiclass implementations Some metrics have known analytical multiclass extensions, and do not need to use averaging to get an estimate of multiclass performance. Accuracy and Kappa use the same definitions as their binary counterpart, with accuracy counting up the number of correctly predicted true values out of the total number of true values, and kappa being a linear combination of two accuracy values. Matthews correlation coefficient (MCC) has a known multiclass generalization as well, sometimes called the $R_K$ statistic. Refer to [this page](https://en.wikipedia.org/wiki/Matthews_correlation_coefficient#Multiclass_case) for more details. ROC AUC is an interesting metric in that it intuitively makes sense to perform macro averaging, which computes a multiclass AUC as the average of the area under multiple binary ROC curves. However, this loses an important property of the ROC AUC statistic in that its binary case is insensitive to class distribution. To combat this, a multiclass metric was created that retains insensitivity to class distribution, but does not have an easy visual interpretation like macro averaging. This is implemented as the `"hand_till"` method, and is the default for this metric.
/scratch/gouwar.j/cran-all/cranData/yardstick/inst/doc/multiclass.Rmd
--- title: "Grouping behavior in yardstick" author: "Simon Couch" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Grouping behavior in yardstick} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` The 1.3.0 release of yardstick introduced an implementation for _groupwise metrics_. The use case motivating the implementation of this functionality is _fairness metrics_, though groupwise metrics have applications beyond that domain. Fairness metrics quantify the degree of disparity in a metric value across groups. To learn more about carrying out fairness-oriented analyses with tidymodels, see the blog post on the tidymodels website. This vignette will instead focus on groupwise metrics generally, clarifying the meaning of "groupwise" and demonstrating functionality with an example dataset. <!-- TODO: link to forthcoming tidymodels blog post --> ```{r pkgs, message = FALSE} library(yardstick) library(dplyr) data("hpc_cv") ``` # Group-awareness Even before the implementation of groupwise metrics, _all_ yardstick metrics had been _group-aware_. When grouped data is passed to a group-aware metric, it will return metric values calculated for each group. To demonstrate, we'll make use of the `hpc_cv` data set, containing class probabilities and class predictions for a linear discriminant analysis fit to the HPC data set of Kuhn and Johnson (2013). The model is evaluated via 10-fold cross-validation, and the predictions for all folds are included. ```{r hpc-cv} tibble(hpc_cv) ``` For the purposes of this vignette, we'll also add a column `batch` to the data and select off the columns for the class probabilities, which we don't need. ```{r hpc-modify} set.seed(1) hpc <- tibble(hpc_cv) %>% mutate(batch = sample(c("a", "b"), nrow(.), replace = TRUE)) %>% select(-c(VF, F, M, L)) hpc ``` If we wanted to compute the accuracy of the first resampled model, we could write: ```{r acc-1} hpc %>% filter(Resample == "Fold01") %>% accuracy(obs, pred) ``` The metric function returns one row, giving the `.metric`, `.estimator`, and `.estimate` for the whole data set it is passed. If we instead group the data by fold, metric functions like `accuracy` will know to compute values for each group; in the output, each row will correspond to a Resample. ```{r hpc-cv-2} hpc %>% group_by(Resample) %>% accuracy(obs, pred) ``` Note that the first row, corresponding to `Fold01`, gives the same value as manually filtering for the observations corresponding to the first resample and then computing the accuracy. This behavior is what we mean by group-awareness. When grouped data is passed to group-aware metric functions, they will return values for each group. # Groupwise metrics Groupwise metrics are associated with a data-column such that, when passed data with that column, the metric will temporarily group by that column, compute values for each of the groups defined by the column, and then aggregate the values computed for the temporary grouping back to the level of the input data's grouping. More concretely, let's turn to an example where there is no pre-existing grouping in the data. Consider the portion of the HPC data pertaining to the first resample: ```{r res-1} hpc %>% filter(Resample == "Fold01") ``` Suppose that the `batch`es in the data represent two groups for which model performance ought not to differ. To quantify the degree to which model performance differs for these two groups, we could compute accuracy values for either group separately, and then take their difference. First, computing accuracies: ```{r acc-by-group} acc_by_group <- hpc %>% filter(Resample == "Fold01") %>% group_by(batch) %>% accuracy(obs, pred) acc_by_group ``` Now, taking the difference: ```{r diff-acc} diff(c(acc_by_group$.estimate[2], acc_by_group$.estimate[1])) ``` Groupwise metrics encode the `group_by()` and aggregation step (in this case, subtraction) shown above into a yardstick metric. We can define a new groupwise metric with the `new_groupwise_metric()` function: ```{r} accuracy_diff <- new_groupwise_metric( fn = accuracy, name = "accuracy_diff", aggregate = function(acc_by_group) { diff(c(acc_by_group$.estimate[2], acc_by_group$.estimate[1])) } ) ``` * The `fn` argument is the yardstick metric that will be computed for each data group. * The `name` argument gives the name of the new metric we've created; we'll call ours "accuracy difference." * The `aggregate` argument is a function defining how to go from `fn` output by group to a single numeric value. The output, `accuracy_diff`, is a function subclass called a `metric_factory`: ```{r acc-diff-class} class(accuracy_diff) ``` `accuracy_diff` now knows to take accuracy values for each group and then return the difference between the accuracy for the first and second result as output. The last thing we need to associate with the object is the name of the grouping variable to pass to `group_by()`; we can pass that variable name to `accuracy_diff` to do so: ```{r acc-diff-by} accuracy_diff_by_batch <- accuracy_diff(batch) ``` The output, `accuracy_diff_by_batch`, is a yardstick metric function like any other: ```{r metric-classes} class(accuracy) class(accuracy_diff_by_batch) ``` <!-- TODO: once a print method is added, we can print this out and the meaning of "this is just a yardstick metric" will be clearer --> We can use the `accuracy_diff_by_batch()` metric in the same way that we would use `accuracy()`. On its own: ```{r ex-acc-diff-by-batch} hpc %>% filter(Resample == "Fold01") %>% accuracy_diff_by_batch(obs, pred) ``` We can also add `accuracy_diff_by_batch()` to metric sets: ```{r ex-acc-diff-by-batch-ms} acc_ms <- metric_set(accuracy, accuracy_diff_by_batch) hpc %>% filter(Resample == "Fold01") %>% acc_ms(truth = obs, estimate = pred) ``` _Groupwise metrics are group-aware._ When passed data with any grouping variables other than the column passed as the first argument to `accuracy_diff()`---in this case, `group`---`accuracy_diff_by_batch()` will behave like any other yardstick metric. For example: ```{r ex-acc-diff-by-batch-2} hpc %>% group_by(Resample) %>% accuracy_diff_by_batch(obs, pred) ``` Groupwise metrics form the backend of fairness metrics in tidymodels. To learn more about groupwise metrics and their applications in fairness problems, see `new_groupwise_metric()`. <!-- TODO: link to tidyverse blog post and tidymodels articles. -->
/scratch/gouwar.j/cran-all/cranData/yardstick/vignettes/grouping.Rmd
--- title: "Metric types" author: "Davis Vaughan" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Metric types} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Metric types There are three main metric types in `yardstick`: class, class probability, and numeric. Each type of metric has standardized argument syntax, and all metrics return the same kind of output (a tibble with 3 columns). This standardization allows metrics to easily be grouped together and used with grouped data frames for computing on multiple resamples at once. Below are the five types of metrics, along with the types of the inputs they take. 1) **Class metrics** (hard predictions) - `truth` - factor - `estimate` - factor 2) **Class probability metrics** (soft predictions) - `truth` - factor - `estimate / ...` - multiple numeric columns containing class probabilities 3) **Numeric metrics** - `truth` - numeric - `estimate` - numeric 4) **Static survival metircs** - `truth` - Surv - `estimate` - numeric 5) **dynamic survival metrics** - `truth` - Surv - `...` - list of data.frames, each containing the 3 columns `.eval_time`, `.pred_survival, and `.weight_censored` ## Example In the following example, the `hpc_cv` data set is used. It contains class probabilities and class predictions for a linear discriminant analysis fit to the HPC data set of Kuhn and Johnson (2013). It is fit with 10 fold cross-validation, and the predictions for all folds are included. ```{r, warning = FALSE, message = FALSE} library(yardstick) library(dplyr) data("hpc_cv") hpc_cv %>% group_by(Resample) %>% slice(1:3) ``` 1 metric, 1 resample ```{r} hpc_cv %>% filter(Resample == "Fold01") %>% accuracy(obs, pred) ``` 1 metric, 10 resamples ```{r} hpc_cv %>% group_by(Resample) %>% accuracy(obs, pred) ``` 2 metrics, 10 resamples ```{r} class_metrics <- metric_set(accuracy, kap) hpc_cv %>% group_by(Resample) %>% class_metrics(obs, estimate = pred) ``` ## Metrics Below is a table of all of the metrics available in `yardstick`, grouped by type. ```{r, echo=FALSE, warning=FALSE, message=FALSE, results='asis'} library(knitr) library(dplyr) yardns <- asNamespace("yardstick") fns <- lapply(names(yardns), get, envir = yardns) names(fns) <- names(yardns) get_metrics <- function(fns, type) { where <- vapply(fns, inherits, what = type, FUN.VALUE = logical(1)) paste0("`", sort(names(fns[where])), "()`") } all_metrics <- bind_rows( tibble(type = "class", metric = get_metrics(fns, "class_metric")), tibble(type = "class prob", metric = get_metrics(fns, "prob_metric")), tibble(type = "numeric", metric = get_metrics(fns, "numeric_metric")), tibble(type = "dynamic survival", metric = get_metrics(fns, "dynamic_survival_metric")), tibble(type = "static survival", metric = get_metrics(fns, "static_survival_metric")) ) kable(all_metrics, format = "html") ```
/scratch/gouwar.j/cran-all/cranData/yardstick/vignettes/metric-types.Rmd
--- title: "Multiclass averaging" author: "Davis Vaughan" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Multiclass averaging} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction Classification metrics in `yardstick` where both the `truth` and `estimate` columns are factors are implemented for the binary and the multiclass case. The multiclass implementations use `micro`, `macro`, and `macro_weighted` averaging where applicable, and some metrics have their own specialized multiclass implementations. ## Macro averaging Macro averaging reduces your multiclass predictions down to multiple sets of binary predictions, calculates the corresponding metric for each of the binary cases, and then averages the results together. As an example, consider `precision` for the binary case. $$ Pr = \frac{TP}{TP + FP} $$ In the multiclass case, if there were levels `A`, `B`, `C` and `D`, macro averaging reduces the problem to multiple one-vs-all comparisons. The `truth` and `estimate` columns are recoded such that the only two levels are `A` and `other`, and then precision is calculated based on those recoded columns, with `A` being the "relevant" column. This process is repeated for the other 3 levels to get a total of 4 precision values. The results are then averaged together. The formula representation looks like this. For `k` classes: $$ Pr_{macro} = \frac{Pr_1 + Pr_2 + \ldots + Pr_k}{k} = Pr_1 \frac{1}{k} + Pr_2 \frac{1}{k} + \ldots + Pr_k \frac{1}{k} $$ where $PR_1$ is the precision calculated from recoding the multiclass predictions down to just `class 1` and `other`. Note that in macro averaging, all classes get equal weight when contributing their portion of the precision value to the total (here `1/4`). This might not be a realistic calculation when you have a large amount of class imbalance. In that case, a _weighted macro average_ might make more sense, where the weights are calculated by the frequency of that class in the `truth` column. $$ Pr_{weighted-macro} = Pr_1 \frac{\#Obs_1}{N} + Pr_2 \frac{\#Obs_2}{N} + \ldots + Pr_k \frac{\#Obs_k}{N} $$ ## Micro averaging Micro averaging treats the entire set of data as an aggregate result, and calculates 1 metric rather than `k` metrics that get averaged together. For precision, this works by calculating all of the true positive results for each class and using that as the numerator, and then calculating all of the true positive and false positive results for each class, and using that as the denominator. $$ Pr_{micro} = \frac{TP_1 + TP_2 + \ldots + TP_k}{(TP_1 + TP_2 + \ldots + TP_k) + (FP_1 + FP_2 + \ldots + FP_k)} $$ In this case, rather than each _class_ having equal weight, each _observation_ gets equal weight. This gives the classes with the most observations more power. ## Specialized multiclass implementations Some metrics have known analytical multiclass extensions, and do not need to use averaging to get an estimate of multiclass performance. Accuracy and Kappa use the same definitions as their binary counterpart, with accuracy counting up the number of correctly predicted true values out of the total number of true values, and kappa being a linear combination of two accuracy values. Matthews correlation coefficient (MCC) has a known multiclass generalization as well, sometimes called the $R_K$ statistic. Refer to [this page](https://en.wikipedia.org/wiki/Matthews_correlation_coefficient#Multiclass_case) for more details. ROC AUC is an interesting metric in that it intuitively makes sense to perform macro averaging, which computes a multiclass AUC as the average of the area under multiple binary ROC curves. However, this loses an important property of the ROC AUC statistic in that its binary case is insensitive to class distribution. To combat this, a multiclass metric was created that retains insensitivity to class distribution, but does not have an easy visual interpretation like macro averaging. This is implemented as the `"hand_till"` method, and is the default for this metric.
/scratch/gouwar.j/cran-all/cranData/yardstick/vignettes/multiclass.Rmd
# yarr, Yet Another ARFF Reader # Copyright (C) 2019 David Charte & Francisco Charte # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # Functions to read ARFF files in different formats (sparse and dense) # #'@title Read an ARFF file #'@description Reads a dataset from an ARFF file, parsing each section and #' converting the data section into a `data.frame`. #'@param file Name of the file to read the data from #'@param stringsAsFactors Logical: should string attributes be converted to #' factors? (nominal attributes are always converted to factors) #'@return A `data.frame` with some attributes: #' #' - attributes: a named vector indicating the type of each variable #' - relation: the original `@relation` of the dataset #' #'Use `attr.names()`, `attr.types()` and `relation()` to consult attribute #'names, types and the name of the dataset, respectively. #' #'@examples #' #' library(yarr) #'\donttest{ #' yeast <- read.arff("yeast.arff") #'} #'@export # character -> logical -> data.frame read.arff <- function(file, stringsAsFactors = FALSE) { contents <- fix_types(read_arff_internal(file), stringsAsFactors) structure(contents, class = c("arff_data", class(contents))) } # data.frame -> logical -> data.frame fix_types <- function(contents, stringsAsFactors) { types <- attr(contents, "attributes") contents[contents == "?"] <- NA for (i in 1:length(types)) { if (types[i] %in% c("numeric", "integer", "real")) { contents[[i]] <- as.numeric(contents[[i]]) } else if (grepl("^\\s*\\{", types[i])) { contents[[i]] <- factor(contents[[i]], levels = read_factor_levels(types[i])[[1]]) } else if (stringsAsFactors) { contents[[i]] <- factor(contents[[i]]) } # else, contents[[i]] is already a character vector } contents } # Extracts all useful data from an ARFF file in an # R object # # @param arff_file Path to the file # @return data.frame with "variables" and "relation" attributes # character -> ... -> data.frame read_arff_internal <- function(arff_file, ...) { file_con <- file(arff_file, "rb") if (!isOpen(file_con)) { open(file_con, "rb") } # Read whole file file_data <- strsplit(readChar(file_con, nchars = file.info(arff_file)$size, useBytes = TRUE), "\\\r\\\n|\\\r|\\\n", fixed = FALSE, useBytes = TRUE)[[1]] close(file_con) # Split into relation, attributes and data relation_at <- grep("@relation", file_data, ignore.case = TRUE) data_start <- grep("@data", file_data, ignore.case = TRUE) if (is.na(relation_at)) stop("Missing @relation or not unique.") if (is.na(data_start)) stop("Missing @data mark or not unique.") relation <- read_header(file_data[relation_at]) # Get attribute vector attributes <- parse_attributes(file_data[(relation_at + 1):(data_start - 1)]) num_attrs <- length(attributes) if (any(grepl("date", attributes))) warning("Date attributes will be read as strings") if (any(grepl("relational", attributes))) stop("Relational attributes not supported at the moment") # Ignore blank lines and comments before data data_start <- data_start + 1 rawdata <- file_data[data_start:length(file_data)] empty <- grep("^\\s*(%(.*?))?$", rawdata) rawdata <- if (length(empty) > 0) rawdata[-empty] else rawdata # Build character matrix with @data section dataset <- if (detect_sparsity(rawdata)) { parse_sparse_data(rawdata, defaults = sparse_defaults(attributes), ...) } else { parse_nonsparse_data(rawdata, num_attrs, ...) } rm(rawdata) dataset <- as.data.frame(dataset, stringsAsFactors = FALSE) colnames(dataset) <- names(attributes) rownames(dataset) <- NULL return(structure(dataset, relation = relation, attributes = attributes)) } # Reads the attributes section of an ARFF file # # @param arff_attrs Lines containing the attributes # @return A vector containing, for each # attribute, its name and its type # character[] -> named character[] parse_attributes <- function(arff_attrs) { # Extract attribute definitions #----------------------------------------------------------------------------------------------------- # Finding adequate spaces to split the attribute definition into 3 parts: # @attribute attr_name {0, 1} -> c("@attribute", "attr_name", "{0, 1}") # @attribute 'Attr. name' {0,1} -> c("@attribute", "'Attr. name'", "{0,1}") # @attribute 'David\'s attribute' {0,1} -> c("@attribute", "'David\'s attribute'", "{0,1}") #----------------------------------------------------------------------------------------------------- # Using the technique described under "Perl/PCRE Variation" in this StackOverflow answer: # (Regex Pattern to Match, Excluding when...) http://stackoverflow.com/a/23589204/5306389 # We capture any spacing character ignoring those within braces or single quotes, # allowing the appearance of escaped single quotes (\'). #----------------------------------------------------------------------------------------------------- # Regex tested in https://regex101.com/r/tE5mP1/20 #----------------------------------------------------------------------------------------------------- rgx <- "(?:{(?:.*?)}\\s*$|(?<!\\\\)'[^'\\\\]*(?:.*?)(?<!\\\\)'|(?<!\\\\)\"(?:.*?)(?<!\\\\)\"|(\\s*?)@)(*SKIP)(*F)|\\s+" att_list <- strsplit(arff_attrs, rgx, perl = TRUE) # Structure by rows att_mat <- matrix(unlist(att_list[sapply(att_list, function(row) { length(row) == 3 })]), ncol = 3, byrow = T) rm(att_list) # Filter any data that is not an attribute att_mat <- att_mat[grepl("^\\s*@attribute", att_mat[, 1], ignore.case = TRUE), 2:3, drop = FALSE] att_mat <- gsub("^'(.*?)'$", "\\1", att_mat, perl = T) att_mat <- gsub('^"(.*?)"$', "\\1", att_mat, perl = T) att_mat[, 1] <- gsub("\\'", "'", att_mat[, 1], fixed = T) att_mat[, 1] <- gsub('\\"', '"', att_mat[, 1], fixed = T) # Create the named vector att_v <- att_mat[, 2, drop = TRUE] names(att_v) <- att_mat[, 1, drop = TRUE] rm(att_mat) return(att_v) } # Reads the name and potential Meka parameters in the header of an # ARFF file # # @param arff_relation "relation" line of the ARFF file # @return Number of labels in the dataset # character -> character read_header <- function(arff_relation) { rgx <- regexpr("[\\w\\-\\._]+\\s*:\\s*-[Cc]\\s*-?\\d+", arff_relation, perl = TRUE) hdr <- strsplit(regmatches(arff_relation, rgx), "\\s*:\\s*-[Cc]\\s*") if (length(hdr) > 0) { # Meka header structure( hdr[[1]][1], c = as.numeric(hdr[[1]][2]) ) } else { # Normal header, unquoted or quoted (these should not match at the same # time) c(regmatches( arff_relation, regexpr( "(?<=\\s)([^\\s'\"]+?)(?=\\s*$)", arff_relation, perl = TRUE ) ), regmatches( arff_relation, regexpr( "(?<=\\s')(([^']|\\\\')+?)(?='\\s*$)", arff_relation, perl = TRUE ) ), regmatches( arff_relation, regexpr( "(?<=\\s\")(([^\"]|\\\\\")+?)(?=\"\\s*$)", arff_relation, perl = TRUE ) ))[1] # ensure scalar } } # Detects whether an ARFF file is in sparse format # # @param arff_data Content of the data section # @return Boolean, TRUE when the file is sparse # character[] -> logical detect_sparsity <- function(arff_data) { grepl("^\\s*\\{", arff_data[1]) } # Builds a data.frame out of non-sparse ARFF data # # @param arff_data Content of the data section # @return character matrix containing data values # character -> integer -> character[,] parse_nonsparse_data <- function(arff_data, num_attrs) { matrix( unlist(strsplit(arff_data, ",", fixed = T)), ncol = num_attrs, byrow = T ) } # Builds a data.frame out of sparse ARFF data # # @param arff_data Content of the data section # @return character matrix containing data values # character -> character[] -> character[,] parse_sparse_data <- function(arff_data, defaults) { # Extract data items arff_data <- strsplit(gsub("\\s*[\\{\\}]\\s*", "", arff_data), "\\s*,\\s*") dataset <- vapply(arff_data, function(item) { row <- unlist(strsplit(item, "\\s+")) # Build complete row with data complete <- defaults complete[as.integer(row[c(T, F)]) + 1] <- row[c(F, T)] complete }, defaults) matrix(dataset, ncol = length(defaults), byrow = T) } # The default value for a sparse variable is: # - 0, if the attribute is numeric # - The first value of the factor, if the attribute is categorical # - ""? if the type is string (the dataset was probably badly exported) # character[] -> character[] sparse_defaults <- function(attributes) { defaults <- vector(mode = "character", length = length(attributes)) # Detect factors, extract values factors <- which(grepl("^\\s*\\{", attributes)) values <- read_factor_levels(attributes[factors]) defaults[factors] <- sapply(values, function(v) v[1]) numeric <- which(attributes %in% c("numeric", "integer", "real")) defaults[numeric] <- "0" # will be converted to numeric later strings <- setdiff(1:length(attributes), union(factors, numeric)) defaults[strings] <- "" defaults } # character -> character[] read_factor_levels <- function(definition) { strsplit(gsub("\\s*[\\{\\}]\\s*", "", definition), "\\s*,\\s*") }
/scratch/gouwar.j/cran-all/cranData/yarr/R/read.R
# yarr, Yet Another ARFF Reader # Copyright (C) 2019 David Charte & Francisco Charte # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. #' @title Dataset utilities #' #' @description Some tools to access the metadata in ARFF files. `attr.names` #' retrieves the names of the attributes, `attr.types` returns the ARFF type #' of each variable, `relation` shows the name/relation specified in the #' `@relation` section of the file. #' #' @param x A dataset read using `read.arff` #' @rdname utils #' @export attr.names <- function(x) { colnames(x) } #' @rdname utils #' @export attr.types <- function(x) { attr(x, "attributes") } #' @rdname utils #' @export relation <- function(x) { attr(x, "relation") } #' @title Display functions #' @param x A data.frame read from an ARFF file #' @param max_attrs Maximum of attributes to be inspected #' @param max_values Maximum of values to be shown for each attribute #' @param ... Extra parameters for the corresponding S3 method for class #' `data.frame` #' @rdname display #' @export print.arff_data <- function(x, max_attrs = 10, max_values = 5, ...) { typ <- attr.types(x) cat("An ARFF dataset:", relation(x), "\n") cat(length(typ), "attributes and", nrow(x), "instances\n") sep <- " ...\n" if (length(typ) <= max_attrs) { sep <- "" max_attrs <- length(typ) } etc <- "..." if (nrow(x) <= max_values) { max_values <- nrow(x) etc <- "" } summ <- function(variable) { if (is.numeric(variable)) { s <- summary(variable) paste0("(min = ", s["Min."], ", mean = ", s["Mean"], ", max = ", s["Max."], ")") } else { t <- table(variable) mx <- min(length(t), 3) etc <- if (length(t) > 3) "..." else "" paste0("(", paste0(names(t)[1:mx], ": ", t[1:mx], collapse = ", "), etc, ")") } } vals <- function(variable) { if (max_values < 1) "" else paste0(paste0(variable[1:max_values], collapse = ", "), etc) } range <- if (max_attrs > 1) 1:(max_attrs - 1) else 1 for (i in range) { cat(" ", names(typ)[i], ": ", typ[i], " ", summ(x[[i]]), " ", vals(x[[i]]), "\n", sep = "") } if (max_attrs > 2) { cat(sep, " ", names(typ)[length(typ)], ": ", typ[length(typ)], " ", summ(x[[length(typ)]]), " ", vals(x[[length(typ)]]), "\n", sep = "") } }
/scratch/gouwar.j/cran-all/cranData/yarr/R/utils.R
# yarr, Yet Another ARFF Reader # Copyright (C) 2019 David Charte & Francisco Charte # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # Functions to export a dataset onto an ARFF file # #'@title Write a data.frame onto an ARFF file #'@description Takes a data frame and records it in ARFF (Attribute-Relation #' File Format). #'@param x A data.frame #'@param relation Name of the dataset (optional, it may be inferred from the #' `relation` attribute or the name of the variable passed as argument) #'@param types A character vector indicating the type of each variable #' (optional, may be inferred from the `attributes` attribute or computed from #' the class of each variable) #'@param file Name of the file where the data is to be written. Use `""` to #' write to standard output #'@param sparse Logical: write in sparse format? #'@param append Logical: append to an existing file? #'@param ... Extra parameters for internal functions #'@return Invisibly, the name of the file. #' @examples #' #' library(yarr) #'\donttest{ #' write.arff(iris, "iris", file = tempfile()) #'} #'@export write.arff <- function(x, relation = NULL, types = NULL, file = "", sparse = FALSE, append = FALSE, ...) { if (is.null(relation)) { relattr <- attr(x, "relation") relation <- if (is.null(relattr)) substitute(x) else relattr } if (is.null(types)) { types <- compute_types(x) } arffConnection <- if (file == "") stdout() else base::file(file, open = if (append) "a" else "w") on.exit(if (file != "") close(arffConnection)) export.arff(x, relation, types, sparse, arffConnection, ...) invisible(file) } compute_types <- function(x) UseMethod("compute_types") compute_types.arff_data <- function(x) attr.types(x) compute_types.default <- function(x) { types <- vector("character", ncol(x)) for (i in 1:length(types)) { types[i] <- if (is.numeric(x[[i]])) { "numeric" } else if (length(unique(x[[i]])) < length(x[[i]])) { paste0("{", paste(unique(x[[i]]), collapse = ","), "}") } else { "string" } } names(types) <- colnames(x) types } export.arff <- function(x, relation, types, sparse, con, ...) { writeLines(export.header(relation), con) writeLines(export.arff.attributes(types), con) export.arff.data(x, sparse, con = con, ...) } export.header <- function(relation) { paste0("@relation ", relation) } export.arff.attributes <- function(types) { attr_names <- names(types) attr_names <- ifelse(grepl("(\\s|\"|\')", attr_names), paste0("'", gsub( "'", "\\'", attr_names, fixed = T ), "'"), attr_names) paste("@attribute", attr_names, types) } export.arff.data <- function(x, sparse, con, header = "@data\n", ...) { x[is.na(x)] <- '?' cat(header, file = con) export.arff.chunks(x, con = con, sparse = sparse, ...) } export.dense.arff.data <- function(data) { do.call(paste, c(unname(data), list(sep = ','))) } export.sparse.arff.data <- function(data) { nonzero <- sapply(data, function(col) { if (is.numeric(col)) col != 0 # else if (is.factor(col)) col != levels(col)[1] else rep(TRUE, length(col)) }) ch_data <- sapply(data, as.character) sapply(1:nrow(data), function(i) { select <- nonzero[i, ] paste0( "{", paste( which(select) - 1, ch_data[i, select], sep = " ", collapse = "," ), "}" ) }) } export.arff.chunks <- function(data, con, chunk_size = floor(1e6 / ncol(data)), sparse = F, fun = if (sparse) export.sparse.arff.data else export.dense.arff.data) { num_instances <- dim(data)[1] chunks <- floor((num_instances - 1) / chunk_size) finished <- FALSE ch <- 0 while (!finished) { start <- 1 + ch * chunk_size end <- (ch + 1) * chunk_size end <- if (end < num_instances) { end } else { finished <- TRUE num_instances } chunk <- data[start:end, ] writeLines(fun(chunk), con) ch <- ch + 1 } }
/scratch/gouwar.j/cran-all/cranData/yarr/R/write.R
#' BeardLengths #' #' A dataframe containing the lengths of beards on 3 different pirate ships #' #' #' @format A data frame containing 150 rows and 2 columns #' \describe{ #' \item{Ship}{(character) - The pirate's ship} #' \item{Beard}{(numeric) - The length of the pirate's beard in cm} #' } #' @source 2015 annual international pirate meeting at the Bodensee in Konstanz, Germany #' #' #' "BeardLengths"
/scratch/gouwar.j/cran-all/cranData/yarrr/R/BeardLengths_doc.R
#' apa #' #' This function takes a hypothesis test object (e.g.; t.test(), cor.test(), chisq.test()) as an input, and returns a string with the test result in APA format. #' #' @param test.object A hypothesis test object generated by functions such as t.test(), cor.test, chisq.test() #' @param sig.digits The number of digits results are rounded to #' @param tails The number of tails in the test (1 or 2) #' @param p.lb The lower bound of the p-value display. If the p-value is less than p.lb, the exact value will not be displayed. #' @keywords apa #' @export #' @examples #' #' x <- rnorm(100) #' y <- x + rnorm(100) #' a <- sample(1:3, size = 200, prob = c(.3, .2, .5), replace = TRUE) #' b <- sample(1:3, size = 200, prob = c(.3, .2, .5), replace = TRUE) #' #' apa(t.test(x, y)) #' apa(cor.test(x, y)) #' apa(chisq.test(table(a, b))) #' #' #' apa <- function(test.object, tails = 2, sig.digits = 2, p.lb = .01) { statistic.id <- substr(names(test.object$statistic), start = 1, stop = 1) p.value <- test.object$p.value if(tails == 1) {p.value <- p.value / 2} if (p.value < p.lb) {p.display <- paste("p < ", p.lb, " (", tails, "-tailed)", sep = "")} if (p.value > p.lb) {p.display <- paste("p = ", round(p.value, sig.digits), " (", tails, "-tailed)", sep = "")} add.par <- "" if(grepl("product-moment", test.object$method)) { estimate.display <- paste("r = ", round(test.object$estimate, sig.digits), ", ", sep = "") } if(grepl("Chi", test.object$method)) { estimate.display <- "" add.par <- paste(", N = ", sum(test.object$observed), sep = "") } if(grepl("One Sample t-test", test.object$method)) { estimate.display <- paste("mean = ", round(test.object$estimate, sig.digits), ", ", sep = "") } if(grepl("Two Sample t-test", test.object$method)) { estimate.display <- paste("mean difference = ", round(test.object$estimate[2] - test.object$estimate[1], sig.digits), ", ", sep = "") } return(paste( estimate.display, statistic.id, "(", round(test.object$parameter, sig.digits), add.par, ") = ", round(test.object$statistic, sig.digits), ", ", p.display, sep = "" ) ) }
/scratch/gouwar.j/cran-all/cranData/yarrr/R/apa_function.R
#' auction #' #' A dataframe containing data from 1000 ships sold at a pirate auction. #' #' #' @format A data frame containing 1000 rows and 8 columns #' \describe{ #' \item{cannons}{(integer) The number of cannons on the ship} #' \item{rooms}{(integer) The number of rooms on the ship} #' \item{age}{(numeric) The age of the ship in years} #' \item{condition}{(integer) The condition of the ship on a scale of 1 to 10} #' \item{color}{(string) The color of the ship} #' \item{style}{(string) The style of the ship - either modern or classic} #' \item{jbb}{(numeric) The pre-sale predicted value of the ship according to Jack's Blue Book (JBB)} #' \item{price}{(numeric) The actual selling price of the ship (in gold pieces, obviously)} #' } #' @source 2015 annual pirate auction in Portland Oregon #' #' "auction"
/scratch/gouwar.j/cran-all/cranData/yarrr/R/auction_doc.R
#' capture #' #' A dataframe containing a historical record of every ship the Perilous Pigeon captured on the Bodensee in the years 2014 and 2015 #' #' #' @format A data frame containing 1000 rows and 10 columns #' \describe{ #' \item{size}{(integer) - The size (length) of the ship (maybe in meters?)} #' \item{cannons}{(integer) - The number of cannons on the ship} #' \item{style}{(string) - The style of the ship (either modern or classic)} #' \item{warnshot}{(binary) - Did the ship fire a warning shot at the Perilous Pigeon when it got close?} #' \item{date}{(integer) - The date of the capture (1 = January 1, 365 = December 31)} #' \item{heardof}{(binary) - Was the target ship recognized by the captain's first mate?} #' \item{decorations}{(integer) - An integer between 1 and 10 indicating how decorated the ship was. 1 means it looks totally common and shabby, 10 means it is among the finest looking ship you've ever seen!} #' \item{daysfromshore}{(integer) - How many days from the nearest land was the ship when it was found?} #' \item{speed}{(integer) - How fast was the ship going when it was caught?} #' \item{treasure}{(numeric) - How much treasure was found on the ship when it was captured?} #' } #' @source 2015 annual international pirate meeting at the Bodensee in Konstanz, Germany #' #' #' "capture"
/scratch/gouwar.j/cran-all/cranData/yarrr/R/capture_doc.R
#' diamonds #' #' A dataframe containing information about 150 diamonds sold at auction. #' #' #' @format A data frame containing 300 rows and 4 columns #' \describe{ #' \item{weight}{(numeric) - The weight of the diamond} #' \item{clarity}{(numeric) - The clarity of the diamond} #' \item{color}{(numeric) - The color shading of the diamond} #' \item{value}{The value of the diamond} #' } #' @source 2015 annual international pirate meeting at the Bodensee in Konstanz, Germany #' #' #' "diamonds"
/scratch/gouwar.j/cran-all/cranData/yarrr/R/diamonds_doc.R
#' examscores #' #' A dataframe containing the results of 4 exams given to 100 students. Each row represents a student, each column is a score on an exam #' #' #' @format A data frame containing 100 rows and 4 columns #' \describe{ #' \item{a}{(numeric) - Score on exam a} #' \item{b}{(numeric) - ...exam b} #' \item{c}{(numeric) - ...exam c} #' \item{d}{(numeric) - ...exam d} #' } #' @source 2015 annual international pirate meeting at the Bodensee in Konstanz, Germany #' #' #' "examscores"
/scratch/gouwar.j/cran-all/cranData/yarrr/R/examscores_doc.R
#'Opens the HTML manual for the yarrr package #' #'@keywords misc #'@export yarrr.guide <- function(){ vignette('guide', package = 'yarrr') }
/scratch/gouwar.j/cran-all/cranData/yarrr/R/guide_function.R
# This hdi function was written by Kruschke in the BEST package # https://github.com/cran/BEST/blob/master/R/hdi.R hdi <- function(object, credMass=0.95, ...) UseMethod("hdi") hdi.default <- function(object, credMass=0.95, ...) { if(!is.numeric(object)) stop(paste("No applicable method for class", class(object))) if(is.na(credMass) || length(credMass) != 1 || credMass <= 0 || credMass >= 1) stop("credMass must be in 0 < credMass < 1") if(all(is.na(object))) return(c(lower = NA_real_, upper = NA_real_)) # This is Mike's code from way back: x <- sort(object) # also removes NAs n <- length(x) # exclude <- ceiling(n * (1 - credMass)) # Not always the same as... exclude <- n - floor(n * credMass) # Number of values to exclude low.poss <- x[1:exclude] # Possible lower limits... upp.poss <- x[(n - exclude + 1):n] # ... and corresponding upper limits best <- which.min(upp.poss - low.poss) # Combination giving the narrowest interval result <- c(lower = low.poss[best], upper = upp.poss[best]) attr(result, "credMass") <- credMass return(result) } hdi.matrix <- function(object, credMass=0.95, ...) { result <- apply(object, 2, hdi.default, credMass=credMass, ...) attr(result, "credMass") <- credMass return(result) } hdi.data.frame <- function(object, credMass=0.95, ...) hdi.matrix(as.matrix(object), credMass=credMass, ...) hdi.mcmc.list <- function(object, credMass=0.95, ...) hdi.matrix(as.matrix(object), credMass=credMass, ...) # hdi.bugs <- function(object, credMass=0.95, ...) # hdi.matrix(object$sims.matrix, credMass=credMass, ...) # hdi.rjags <- function(object, credMass=0.95, ...) # hdi.matrix(object$BUGSoutput$sims.matrix, credMass=credMass, ...) hdi.function <- function(object, credMass=0.95, tol, ...) { if(is.na(credMass) || length(credMass) != 1 || credMass <= 0 || credMass >= 1) stop("credMass must be in 0 < credMass < 1") if(missing(tol)) tol <- 1e-8 if(class(try(object(0.5, ...), TRUE)) == "try-error") stop(paste("Incorrect arguments for the inverse cumulative density function", substitute(object))) # cf. code in Kruschke 2011 p630 intervalWidth <- function( lowTailPr , ICDF , credMass , ... ) { ICDF( credMass + lowTailPr , ... ) - ICDF( lowTailPr , ... ) } optInfo <- optimize( intervalWidth , c( 0 , 1.0 - credMass) , ICDF=object , credMass=credMass , tol=tol , ... ) HDIlowTailPr <- optInfo$minimum result <- c(lower = object( HDIlowTailPr , ... ) , upper = object( credMass + HDIlowTailPr , ... ) ) attr(result, "credMass") <- credMass return(result) } hdi.density <- function(object, credMass=0.95, allowSplit=FALSE, ...) { if(is.na(credMass) || length(credMass) != 1 || credMass <= 0 || credMass >= 1) stop("credMass must be in 0 < credMass < 1") sorted = sort( object$y , decreasing=TRUE ) heightIdx = min( which( cumsum( sorted) >= sum(object$y) * credMass ) ) height = sorted[heightIdx] indices = which( object$y >= height ) # HDImass = sum( object$y[indices] ) / sum(object$y) gaps <- which(diff(indices) > 1) if(length(gaps) > 0 && !allowSplit) { # In this case, return shortest 95% CrI warning("The HDI is discontinuous but allowSplit = FALSE; the result is a valid CrI but not HDI.") cumul <- cumsum(object$y) / sum(object$y) upp.poss <- low.poss <- which(cumul < 1 - credMass) for (i in low.poss) upp.poss[i] <- min(which(cumul > cumul[i] + credMass)) # all(cumul[upp.poss] - cumul[low.poss] > credMass) # check width <- upp.poss - low.poss best <- which(width == min(width)) # usually > 1 value due to ties result <- c(lower = mean(object$x[low.poss[best]]), upper = mean(object$x[upp.poss[best]])) } else { begs <- indices[c(1, gaps + 1)] ends <- indices[c(gaps, length(indices))] result <- cbind(begin = object$x[begs], end = object$x[ends]) if(!allowSplit) names(result) <- c("lower", "upper") } attr(result, "credMass") <- credMass attr(result, "height") <- height return(result) }
/scratch/gouwar.j/cran-all/cranData/yarrr/R/hdi_function.R
# Miscellaneous Functions yarrr.guide <- function() { vignette("guide", package = "yarrr") }
/scratch/gouwar.j/cran-all/cranData/yarrr/R/helper_fun.R
#' movies #' #' A dataframe containing information about the top 5000 grossing movies of all time. #' #' #' @format A data frame containing 5000 rows and 13 columns #' \describe{ #' \item{name}{Movie name} #' \item{rating}{MPAA rating} #' \item{genre}{Movie genre} #' \item{creative.type}{Creative type} #' \item{time}{Running time in minutes} #' \item{year}{Year of release} #' \item{production.method}{Production method} #' \item{sequel}{Was the movie a sequel? 1 = yes, 0 = no} #' \item{budget}{Movie budget (in $USD millions)} #' \item{revenue.all}{Gross worldwide revenue in $USD millions} #' \item{revenue.dom}{Domestic revenue in $USD millions} #' \item{revenue.int}{International revenue in $USD millions} #' \item{revenue.inf}{Inflation adjusted worldwide revenue in $USD millions} #' } #' @source www.the-numbers.com #' #' #' "movies"
/scratch/gouwar.j/cran-all/cranData/yarrr/R/movies_doc.R
#' piratepal #' #' This function provides a number of color palettes #' #' @param palette A string defining the color palette to use (see examples). To use a random palette, use "random". To plot all palettes, use "all". To see all palette names, use "names" #' @param plot.result A logical value indicating whether or not to display the colors. #' @param trans A number in the interval [0, 1] indicating how transparent to make the colors. A value of 0 means no transparency and a value of 1 means completely transparency. #' @param mix.col string. An optional string representing a color to mix all colors in the palette with. #' @param mix.p numeric. A number in the interval [0, 1] indicating how much to mix the palette colors with the color in \code{mix.col} #' @param length.out An integer indicating how many colors to return. If length.out is larger than the number of colors in the palette, colors will be repeated. #' @keywords colors #' @export #' @importFrom jpeg readJPEG #' @importFrom circlize colorRamp2 #' @examples #' #' #' # Check out the vignette for a full guide #' #' vignette("piratepal", package = "yarrr") #' #' # Show all palettes #' #' piratepal(palette = "all") #' #' # Show some palettes #' #'piratepal(palette = "basel", trans = .5, plot.result = TRUE) #' #' #' # Using a palette in a scatterplot #' #'nemo.cols <- piratepal(palette = "nemo", trans = .5) #' #'set.seed(100) # For reproducibility #'x <- rnorm(100) #'y <- x + rnorm(100) #' #'plot(x = x, y = y, col = nemo.cols, #' pch = 16, #' cex = runif(100, min = 0, max = 2), #' main = "piratepal('nemo', trans = .5)") #' #' piratepal <- function(palette = "all", trans = 0, mix.col = "white", mix.p = 0, plot.result = FALSE, length.out = NULL) { # Check inputs if(trans < 0 | trans > 1) {stop("Problem: trans must be a number between 0 and 1")} # Define all palettes { piratepal.ls <- list( "basel.pal" = data.frame( "blue1" = rgb(12, 91, 176, alpha = (1 - trans) * 255, maxColorValue = 255), "red" = rgb(238, 0, 17, alpha = (1- trans) * 255, maxColorValue = 255), "green" = rgb(21, 152, 61, alpha = (1 - trans) * 255, maxColorValue = 255), "pink" = rgb(236, 87, 154, alpha = (1 - trans) * 255, maxColorValue = 255), "orange" = rgb(250, 107, 9, alpha = (1 - trans) * 255, maxColorValue = 255), "blue2" = rgb(20, 155, 237, alpha = (1 - trans) * 255, maxColorValue = 255), "green2" = rgb(161, 199, 32, alpha = (1 - trans) * 255, maxColorValue = 255), "yellow" = rgb(254, 193, 11, alpha = (1 - trans) * 255, maxColorValue = 255), "turquoise" = rgb(22, 160, 140, alpha = (1 - trans) * 255, maxColorValue = 255), "poop" = rgb(154, 112, 62, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), "pony.pal" = data.frame( "pink" = rgb(235, 82, 145, alpha = (1 - trans) * 255, maxColorValue = 255), "orange" = rgb(251, 187, 104, alpha = (1 - trans) * 255, maxColorValue = 255), "lpink" = rgb(245, 186, 207, alpha = (1 - trans) * 255, maxColorValue = 255), "lblue" = rgb(157, 218, 245, alpha = (1 - trans) * 255, maxColorValue = 255), "purple1" = rgb(99, 81, 160, alpha = (1- trans) * 255, maxColorValue = 255), "gray" = rgb(236, 241, 244, alpha = (1 - trans) * 255, maxColorValue = 255), "yellow" = rgb(254, 247, 158, alpha = (1 - trans) * 255, maxColorValue = 255), "dblue" = rgb(23, 148, 206, alpha = (1 - trans) * 255, maxColorValue = 255), "purple2" = rgb(151, 44, 141, alpha = (1- trans) * 255, maxColorValue = 255), stringsAsFactors = F), "xmen.pal" = data.frame( "blue" = rgb(2, 108, 203, alpha = (1 - trans) * 255, maxColorValue = 255), "red" = rgb(245, 30, 2, alpha = (1 - trans) * 255, maxColorValue = 255), "green" = rgb(5, 177, 2, alpha = (1 - trans) * 255, maxColorValue = 255), "orange" = rgb(251, 159, 83, alpha = (1 - trans) * 255, maxColorValue = 255), "gray" = rgb(155, 155, 155, alpha = (1 - trans) * 255, maxColorValue = 255), "pink" = rgb(251, 130, 190, alpha = (1- trans) * 255, maxColorValue = 255), "brown" = rgb(186, 98, 34, alpha = (1 - trans) * 255, maxColorValue = 255), "yellow" = rgb(238, 194, 41, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), "decision.pal" = data.frame( "red" = rgb(213, 122, 109, alpha = (1 - trans) * 255, maxColorValue = 255), "yellow" = rgb(232, 183, 98, alpha = (1 - trans) * 255, maxColorValue = 255), "blue" = rgb(156, 205, 223, alpha = (1 - trans) * 255, maxColorValue = 255), "gray" = rgb(82, 80, 82, alpha = (1 - trans) * 255, maxColorValue = 255), "tan" = rgb(230, 206, 175, alpha = (1- trans) * 255, maxColorValue = 255), "brown" = rgb(186, 149, 112, alpha = (1- trans) * 255, maxColorValue = 255), stringsAsFactors = F), "southpark.pal" = data.frame( "blue" = rgb(47, 134, 255, alpha = (1 - trans) * 255, maxColorValue = 255), "yellow" = rgb(235, 171, 22, alpha = (1- trans) * 255, maxColorValue = 255), "red" = rgb(222, 0, 18, alpha = (1 - trans) * 255, maxColorValue = 255), "green" = rgb(34, 196, 8, alpha = (1 - trans) * 255, maxColorValue = 255), "tan" = rgb(254, 205, 170, alpha = (1 - trans) * 255, maxColorValue = 255), "orange" = rgb(241, 72, 9, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), # http://www.google.com google.pal =data.frame( "blue" = rgb(61, 121, 243, alpha = (1 - trans) * 255, maxColorValue = 255), "red" = rgb(230, 53, 47, alpha = (1 - trans) * 255, maxColorValue = 255), "yellow" = rgb(249, 185, 10, alpha = (1 - trans) * 255, maxColorValue = 255), "green" = rgb(52, 167, 75, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), "eternal.pal" = data.frame( "purple1" = rgb(23, 12, 46, alpha = (1 - trans) * 255, maxColorValue = 255), "red" = rgb(117, 16, 41, alpha = (1 - trans) * 255, maxColorValue = 255), "purple2" = rgb(82, 25, 76, alpha = (1 - trans) * 255, maxColorValue = 255), "purple3" = rgb(71, 59, 117, alpha = (1 - trans) * 255, maxColorValue = 255), "blue1" = rgb(77, 112, 156, alpha = (1 - trans) * 255, maxColorValue = 255), "tan" = rgb(111, 118, 107, alpha = (1 - trans) * 255, maxColorValue = 255), "blue2" = rgb(146, 173, 196, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), "evildead.pal" = data.frame( "brown" = rgb(25, 24, 13, alpha = (1 - trans) * 255, maxColorValue = 255), "green" = rgb(33, 37, 16, alpha = (1 - trans) * 255, maxColorValue = 255), "red" = rgb(46, 16, 11, alpha = (1 - trans) * 255, maxColorValue = 255), "brown2" = rgb(57, 46, 18, alpha = (1 - trans) * 255, maxColorValue = 255), "brown3" = rgb(87, 81, 43, alpha = (1 - trans) * 255, maxColorValue = 255), "tan" = rgb(150, 142, 76, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), usualsuspects.pal =data.frame( "gray1" = rgb(50, 51, 55, alpha = (1 - trans) * 255, maxColorValue = 255), "gray2" = rgb(83, 76, 83, alpha = (1 - trans) * 255, maxColorValue = 255), "blue" = rgb(63, 81, 106, alpha = (1 - trans) * 255, maxColorValue = 255), "brown" = rgb(155, 102, 89, alpha = (1 - trans) * 255, maxColorValue = 255), "red" = rgb(232, 59, 65, alpha = (1 - trans) * 255, maxColorValue = 255), "gray3" = rgb(159, 156, 162, alpha = (1 - trans) * 255, maxColorValue = 255), "tan" = rgb(234, 174, 157, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), ohbrother.pal =data.frame( "brown1" = rgb(26, 15, 10, alpha = (1 - trans) * 255, maxColorValue = 255), "brown2" = rgb(61, 41, 26, alpha = (1 - trans) * 255, maxColorValue = 255), "brown3" = rgb(113, 86, 57, alpha = (1 - trans) * 255, maxColorValue = 255), "green" = rgb(116, 125, 109, alpha = (1 - trans) * 255, maxColorValue = 255), "tan1" = rgb(173, 157, 11, alpha = (1 - trans) * 255, maxColorValue = 255), "blue" = rgb(148, 196, 223, alpha = (1 - trans) * 255, maxColorValue = 255), "tan2" = rgb(230, 221, 168, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), appletv.pal =data.frame( "green" = rgb(95, 178, 51, alpha = (1 - trans) * 255, maxColorValue = 255), "gray" = rgb(106, 127, 147, alpha = (1 - trans) * 255, maxColorValue = 255), "orange" = rgb(245, 114, 6, alpha = (1 - trans) * 255, maxColorValue = 255), "red" = rgb(235, 15, 19, alpha = (1 - trans) * 255, maxColorValue = 255), "purple" = rgb(143, 47, 139, alpha = (1 - trans) * 255, maxColorValue = 255), "blue" = rgb(19, 150, 219, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F ), # http://a.dilcdn.com/bl/wp-content/uploads/sites/2/2015/05/disneyPixar_PixarPalette_BRA_01.jpg brave.pal =data.frame( "brown" = rgb(168, 100, 59, alpha = (1 - trans) * 255, maxColorValue = 255), "yellow" = rgb(182, 91, 35, alpha = (1 - trans) * 255, maxColorValue = 255), "red" = rgb(148, 34, 14, alpha = (1 - trans) * 255, maxColorValue = 255), "green" = rgb(39, 45, 23, alpha = (1 - trans) * 255, maxColorValue = 255), "blue" = rgb(32, 33, 38, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), # http://a.dilcdn.com/bl/wp-content/uploads/sites/2/2015/05/disneyPixar_PixarPalette_BUG_01.jpg bugs.pal =data.frame( "green1" = rgb(102, 120, 64, alpha = (1 - trans) * 255, maxColorValue = 255), "green2" = rgb(186, 214, 168, alpha = (1 - trans) * 255, maxColorValue = 255), "blue" = rgb(133, 199, 193, alpha = (1 - trans) * 255, maxColorValue = 255), "brown1" = rgb(165, 154, 107, alpha = (1 - trans) * 255, maxColorValue = 255), "brown2" = rgb(103, 85, 63, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), # http://a.dilcdn.com/bl/wp-content/uploads/sites/2/2015/05/disneyPixar_PixarPalette_CAR_01.jpg cars.pal =data.frame( "peach" = rgb(231, 176, 143, alpha = (1 - trans) * 255, maxColorValue = 255), "purple" = rgb(136, 76, 73, alpha = (1 - trans) * 255, maxColorValue = 255), "red" = rgb(224, 54, 58, alpha = (1 - trans) * 255, maxColorValue = 255), "brown" = rgb(106, 29, 26, alpha = (1 - trans) * 255, maxColorValue = 255), "blue" = rgb(157, 218, 230, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), # http://a.dilcdn.com/bl/wp-content/uploads/sites/2/2015/05/disneyPixar_PixarPalette_FIN_021.jpg nemo.pal =data.frame( "yellow" = rgb(251, 207, 53, alpha = (1 - trans) * 255, maxColorValue = 255), "orange" = rgb(237, 76, 28, alpha = (1 - trans) * 255, maxColorValue = 255), "brown" = rgb(156, 126, 112, alpha = (1 - trans) * 255, maxColorValue = 255), "blue1" = rgb(90, 194, 241, alpha = (1 - trans) * 255, maxColorValue = 255), "green" = rgb(17, 119, 108, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), # http://a.dilcdn.com/bl/wp-content/uploads/sites/2/2015/05/disneyPixar_PixarPalette_RAT_02.jpg rat.pal =data.frame( "brown" = rgb(159, 77, 35, alpha = (1 - trans) * 255, maxColorValue = 255), "purple" = rgb(146, 43, 73, alpha = (1 - trans) * 255, maxColorValue = 255), "red" = rgb(178, 29, 19, alpha = (1 - trans) * 255, maxColorValue = 255), "green" = rgb(127, 134, 36, alpha = (1 - trans) * 255, maxColorValue = 255), "yellow" = rgb(241, 156, 31, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), # http://a.dilcdn.com/bl/wp-content/uploads/sites/2/2015/05/disneyPixar_PixarPalette_UP_02.jpg up.pal =data.frame( "blue1" = rgb(95, 140, 244, alpha = (1 - trans) * 255, maxColorValue = 255), "blue2" = rgb(220, 214, 252, alpha = (1 - trans) * 255, maxColorValue = 255), "orange" = rgb(226, 122, 72, alpha = (1 - trans) * 255, maxColorValue = 255), "brown" = rgb(96, 86, 70, alpha = (1 - trans) * 255, maxColorValue = 255), "blue" = rgb(67, 65, 89, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), # Taken from a cellphone photo of espresso cups in the ARC kitchen espresso.pal=data.frame( "blue" = rgb(35, 102, 192, alpha = (1 - trans) * 255, maxColorValue = 255), "yellow" = rgb(233, 215, 56, alpha = (1 - trans) * 255, maxColorValue = 255), "red" = rgb(185, 18, 38, alpha = (1 - trans) * 255, maxColorValue = 255), "green" = rgb(163, 218, 75, alpha = (1 - trans) * 255, maxColorValue = 255), "orange" = rgb(255, 100, 53, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), # Colors of apple ipods (can't remember which year) ipod.pal =data.frame( "lightgray" = rgb(215, 215, 215, alpha = (1 - trans) * 255, maxColorValue = 255), "red" = rgb(243, 174, 175, alpha = (1 - trans) * 255, maxColorValue = 255), "darkgray" = rgb(174, 173, 176, alpha = (1 - trans) * 255, maxColorValue = 255), "green" = rgb(158, 217, 191, alpha = (1 - trans) * 255, maxColorValue = 255), "blue" = rgb(92, 203, 235, alpha = (1 - trans) * 255, maxColorValue = 255), "yellow" = rgb(222, 235, 97, alpha = (1 - trans) * 255, maxColorValue = 255), "background" = rgb(242, 242, 242, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), # Colors from an infographic (can't remember which one) info.pal =data.frame( "red" = rgb(231, 105, 93, alpha = (1 - trans) * 255, maxColorValue = 255), "darkblue" = rgb(107, 137, 147, alpha = (1 - trans) * 255, maxColorValue = 255), "creme" = rgb(246, 240, 212, alpha = (1 - trans) * 255, maxColorValue = 255), "green" = rgb(149, 206, 138, alpha = (1 - trans) * 255, maxColorValue = 255), "gray1" = rgb(210, 210, 210, alpha = (1 - trans) * 255, maxColorValue = 255), "lightblue" = rgb(148, 212, 212, alpha = (1 - trans) * 255, maxColorValue = 255), "gray2" = rgb(150, 150, 150, alpha = (1 - trans) * 255, maxColorValue = 255), "background" = rgb(241, 243, 232, alpha = (1 - trans) * 255, maxColorValue = 255), "brown" = rgb(136, 119, 95, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F), # Colors from another mystery infographic info2.pal =data.frame( "darkblue" = rgb(0, 106, 64, alpha = (1 - trans) * 255, maxColorValue = 255), "pink" = rgb(240, 136, 146, alpha = (1 - trans) * 255, maxColorValue = 255), "lightgreen" = rgb(117, 180, 30, alpha = (1 - trans) * 255, maxColorValue = 255), "lightgray" = rgb(149, 130, 141, alpha = (1 - trans) * 255, maxColorValue = 255), "grayblue" = rgb(112, 140, 152, alpha = (1 - trans) * 255, maxColorValue = 255), "lightblue" = rgb(138, 184, 207, alpha = (1 - trans) * 255, maxColorValue = 255), "turquoise" = rgb(0, 126, 127, alpha = (1 - trans) * 255, maxColorValue = 255), "green" = rgb(53, 131, 89, alpha = (1 - trans) * 255, maxColorValue = 255), "paleblue" = rgb(139, 161, 188, alpha = (1 - trans) * 255, maxColorValue = 255), "purple" = rgb(90, 88, 149, alpha = (1 - trans) * 255, maxColorValue = 255), "orange" = rgb(242, 153, 12, alpha = (1 - trans) * 255, maxColorValue = 255), "purple" = rgb(90, 88, 149, alpha = (1 - trans) * 255, maxColorValue = 255), "paleorange" = rgb(229, 186, 58, alpha = (1 - trans) * 255, maxColorValue = 255), "salmon" = rgb(216, 108, 79, alpha = (1 - trans) * 255, maxColorValue = 255), stringsAsFactors = F) ) } palette.names <- unlist(strsplit(names(piratepal.ls), ".pal", TRUE)) n.palettes <- length(palette.names) if(!(palette %in% c(palette.names, "random", "all", "names"))) { stop(c("You did not specify a valid palette. Run piratepal('names') to see all of the palette names.")) } # Save original margins margin.o <- par("mar") # if palette == "all", show all palettes if(palette == "all") { # Mix palete with mix.col if(mix.p > 0) { for (pal.i in 1:length(piratepal.ls)) { for(col.i in 1:length(piratepal.ls[[pal.i]])) { # Create a color mix funciton using colorRamp2 pal.fun <- circlize::colorRamp2(c(0, 1), colors = c(piratepal.ls[[pal.i]][col.i], mix.col), transparency = trans) # Assign color to palette list piratepal.ls[[pal.i]][col.i] <- pal.fun(mix.p) } } } output <- NULL par(mar = c(1, 6, 4, 0)) n.palettes <- length(palette.names) plot(1, xlim = c(0, 15), ylim = c(0, 1), xaxt = "n", yaxt = "n", bty = "n", type = "n", xlab = "", ylab = "", main = "Here are all of the pirate palettes") mtext(text = paste("Transparency is set to ", trans, sep = ""), side = 3) y.locations <- seq(1, 0, length.out = n.palettes) for(i in 1:n.palettes) { palette.df <- unlist(piratepal.ls[[paste(palette.names[i], ".pal", sep = "")]]) n.colors <- length(palette.df) rect(0:(n.colors - 1), rep(y.locations[i], n.colors) - 1 / (n.palettes * 2.2), 1:(n.colors), rep(y.locations[i], n.colors) + 1 / (n.palettes * 2.2), col = palette.df, border = NA) # points(1:n.colors, rep(y.locations[i], n.colors) * 1, col = palette.df, pch = 16, cex = 1.4) mtext(unlist(strsplit(palette.names[i], fixed = T, split = "."))[1], side = 2, at = y.locations[i], las = 1, cex = .9, line = 0) } } if(palette == "random") { palette <- sample(palette.names[palette.names != "random"], 1) palette <- unlist(strsplit(palette, ".", fixed = T))[1] message(paste("Here's the", palette, "palette")) } if(palette == "names") { output <- palette.names } # Get result vector if(palette %in% c("all", "random", "names") == FALSE) { # Mix palete with mix.col if(mix.p > 0) { for (pal.i in which(palette == palette.names)) { for(col.i in 1:length(piratepal.ls[[pal.i]])) { # Create a color mix funciton using colorRamp2 pal.fun <- circlize::colorRamp2(c(0, 1), colors = c(piratepal.ls[[pal.i]][col.i], mix.col), transparency = trans) # Assign color to palette list piratepal.ls[[pal.i]][col.i] <- pal.fun(mix.p) } } } palette.df <- piratepal.ls[[paste(palette, ".pal", sep = "")]] if(is.null(length.out)) {output <- unlist(palette.df)} if(is.null(length.out) == F) {output <- rep(unlist(palette.df), length.out = length.out)} } # Plot single palette if(plot.result & palette %in% palette.names) { palette.df <- piratepal.ls[[paste(palette, ".pal", sep = "")]] col.vec <- unlist(palette.df) n.colors <- length(col.vec) par(mar = c(1, 1, 1, 1)) plot(1, xlim = c(0, 1), ylim = c(0, 1), type='n',xaxs='i',xaxt = "n", yaxt = "n", bty = "n", yaxs='i',xlab='',ylab='') # is there a picture? if(system.file(paste(palette, ".jpg", sep = ""), package = "yarrr") != "") { point.heights <- .3 text.heights <- .05 pic.center <- c(.5, .65) jpg <- jpeg::readJPEG(system.file(paste(palette, ".jpg", sep = ""), package="yarrr"), native=T) # read the file res <- dim(jpg)[1:2] # get the resolution ar <- res[2] / res[1] if(res[2] >= res[1]) { desired.width <- .6 required.height <- desired.width / ar rasterImage(jpg, pic.center[1] - desired.width / 2, pic.center[2] - required.height / 2, pic.center[1] + desired.width / 2, pic.center[2] + required.height / 2) } if(res[2] < res[1]) { desired.height <- .40 required.width <- desired.height * ar rasterImage(jpg, pic.center[1] - required.width / 2, pic.center[2] - desired.height / 2, pic.center[1] + required.width / 2, pic.center[2] + desired.height / 2) } } if(floor(n.colors / 2) != n.colors / 2) { possible.locations <- seq(0, 1, 1 / 16) start.location <- ceiling(.5 * length(possible.locations)) - floor(n.colors / 2) } if(floor(n.colors / 2) == n.colors / 2) { possible.locations <- seq(0, 1, 1 / 15) start.location <- .5 * length(possible.locations) - n.colors / 2 + 1 } end.location <- start.location + n.colors - 1 locations.to.use <- possible.locations[start.location:end.location] if(system.file(paste(palette, ".jpg", sep = ""), package = "yarrr") == "") { point.heights <- .6 text.heights <- .25 } # Add segments segments(locations.to.use, text.heights + .05, locations.to.use, point.heights, lwd = 1, lty = 2) # Add points points(x = locations.to.use, y = rep(point.heights, length(col.vec)), pch = 16, col = col.vec, cex = 10) text(locations.to.use, text.heights, names(col.vec), srt = 45) text(.5, .95, palette, cex = 2) text(.5, .9, paste("trans = ", trans, sep = "")) # Reset margins } if(is.null(output) == F & plot.result == F) { return(output) } # reset margins par("mar" = margin.o) }
/scratch/gouwar.j/cran-all/cranData/yarrr/R/piratepal_function.R
#' pirateplot #' #' The pirateplot function creates an RDI (Raw data, Descriptive and Inferential statistic) plot showing the relationship between 1 to 3 categorical independent variables and 1 continuous dependent variable. #' #' @param formula formula. A formula in the form \code{y ~ x1 + x2 + x3} indicating the vertical response variable (y) and up to three independent variables #' @param data Either a dataframe containing the variables specified in formula, a list of numeric vectors, or a numeric dataframe / matrix. #' @param plot logical. If \code{TRUE} (the default), thent the pirateplot is produced. If \code{FALSE}, the data summaries created in the plot are returned as a list. #' @param pal string. The color palette of the plot. Can be a single color, a vector of colors, or the name of a palette in the piratepal() function (e.g.; "basel", "google", "southpark"). To see all the palettes, run \code{piratepal(palette = "all", action = "show")} #' @param mix.col,mix.p Optional color mixing arguments to be passed to \code{piratepal}. See \code{?piratepal} for examples. #' @param point.col,bar.f.col,bean.b.col,bean.f.col,inf.f.col,inf.b.col,avg.line.col,bar.b.col,quant.col,point.bg string. Vectors of colors specifying the colors of the plotting elements. This will override values in the palette. f stands for filling, b stands for border. #' @param theme integer. An integer in the set 0, 1, 2 specifying a theme (that is, new default values for opacities and colors). \code{theme = 0} turns off all opacities which can then be individually specified individually. #' @param bar.f.o,point.o,inf.f.o,inf.b.o,avg.line.o,bean.b.o,bean.f.o,bar.b.o numeric. A number between 0 and 1 indicating how opaque to make the bars, points, inference band, average line, and beans respectively. These values override whatever is in the specified theme #' @param avg.line.fun function. A function that determines how average lines and bar heights are determined (default is mean). #' @param back.col string. Color of the plotting background. #' @param point.cex,point.pch,point.lwd numeric. The size, pch type, and line width of raw data points. #' @param bean.lwd,bean.lty,inf.lwd,avg.line.lwd,bar.lwd numeric. Vectors of numbers customizing the look of beans and lines. #' @param width.min,width.max numeric. The minimum/maximum width of the beans. #' @param cut.min,cut.max numeric. Optional minimum and maximum values of the beans. #' @param inf.method string. A string indicating what types of inference bands to calculate. "ci" means frequentist confidence intervals, "hdi" means Bayesian Highest Density Intervals (HDI), "iqr" means interquartile range, "sd" means standard deviation, "se" means standard error, "withinci" means frequentist confidence intervals in a within design (Morey, 2008). #' @param inf.within string. The variable which serves as an ID variable in a within design. #' @param inf.disp string. How should inference ranges be displayed? \code{"line"} creates a classic vertical line, \code{"rect"} creates a rectangle, \code{"bean"} forms the inference around the bean. #' @param inf.p numeric. A number adjusting how inference ranges are calculated. for \code{"ci"} and \code{"hdi"}, a number between 0 and 1 indicating the level of confidence (default is .95). For \code{"sd"} and \code{"se"}, the number of standard deviations / standard errors added to or subtracted from the mean (default is 1). #' @param hdi.iter integer. Number of iterations to run when calculating the HDI. Larger values lead to better estimates, but can be more time consuming. #' @param bw,adjust Arguments passed to density calculations for beans (see ?density) #' @param jitter.val numeric. Amount of jitter added to points horizontally. Defaults to 0.05. #' @param at integer. Locations of the beans. Especially helpful when adding beans to an existing plot with add = TRUE #' @param sortx string. How to sort the x values. Can be "sequential" (as they are found in the original dataframe), "alphabetical", or a string in the set ("mean", "median", "min", "max") indicating a function #' @param decreasing logical. If sortx is a named function, should values be sorted in decreasing order? #' @param add logical. Should the pirateplot elements be added to an existing plotting space? #' @param cap.beans logical. Should maximum and minimum values of the bean densities be capped at the limits found in the data? Default is FALSE. #' @param quant.length,quant.lwd numeric. Specifies line lengths/widths of \code{quant}. #' @param quant.boxplot logical. Should standard values be included? #' @param family a font family (Not currently in use) #' @param cex.lab,cex.axis,cex.names Size of the labels, axes, and bean names. #' @param gl numeric. Locations of the horizontal grid lines #' @param gl.lwd,gl.lty,gl.col Customization for grid lines. Can be entered as vectors for alternating gridline types #' @param bty,xlim,ylim,xlab,ylab,main,yaxt,xaxt General plotting arguments #' @param quant numeric. Adds horizontal lines representing custom quantiles. #' @param bar.b.lwd,line.fun,inf.o,bean.o,inf.col,theme.o,inf,inf.type,inf.band,bar.o,line.o,hdi.o depricated arguments #' @keywords plot #' @importFrom BayesFactor ttestBF #' @importFrom grDevices col2rgb gray rgb #' @importFrom graphics abline axis layout mtext par plot points polygon rasterImage rect segments text #' @importFrom stats density model.frame optimize rnorm t.test qbeta sd quantile IQR aggregate as.formula #' @importFrom utils vignette #' @export #' @examples #' #' #'# Default pirateplot of weight by Time #'pirateplot(formula = weight ~ Time, #' data = ChickWeight) #' #'# Same but in grayscale #'pirateplot(formula = weight ~ Time, #' data = ChickWeight, #' pal = "gray") #' #' #'# Now using theme 2 #'pirateplot(formula = weight ~ Time, #' data = ChickWeight, #' main = "Chicken weight by time", #' theme = 2) # theme 2 #' #'# theme 3 #'pirateplot(formula = weight ~ Time, #' data = ChickWeight, #' main = "Chicken weight by time", #' theme = 3) # theme 3 #' #'# theme 4 #'pirateplot(formula = weight ~ Time, #' data = ChickWeight, #' main = "Chicken weight by time", #' theme = 4) # theme 4 #' #'# Start with theme 2, but then customise! #'pirateplot(formula = weight ~ Time, #' data = ChickWeight, #' theme = 2, # theme 2 #' pal = "xmen", # xmen palette #' main = "Chicken weights by Time", #' point.o = .4, # Add points #' point.col = "black", #' point.bg = "white", #' point.pch = 21, #' bean.f.o = .2, # Turn down bean filling #' inf.f.o = .8, # Turn up inf filling #' gl.col = "gray", # gridlines #' gl.lwd = c(.5, 0)) # turn off minor grid lines #' #'# 2 IVs #'pirateplot(formula = len ~ dose + supp, #' data = ToothGrowth, #' main = "Guinea pig tooth length by supplement", #' point.pch = 16, # Point specifications... #' point.col = "black", #' point.o = .7, #' inf.f.o = .9, # inference band opacity #' gl.col = "gray") #' #' #'# Build everything from scratch with theme 0 #'# And use 3 IVs #'pirateplot(formula = height ~ headband + eyepatch + sex, #' data = pirates, #' pal = gray(.1), # Dark gray palette #' theme = 0, # Start from scratch #' inf.f.o = .7, # Band opacity #' inf.f.col = piratepal("basel"), # Add color to bands #' point.o = .1, # Point opacity #' avg.line.o = .8, # Average line opacity #' gl.col = gray(.6), # Gridline specifications #' gl.lty = 1, #' gl.lwd = c(.5, 0)) #' #' # See the vignette for more details #' vignette("pirateplot", package = "yarrr") #' #' pirateplot <- function( formula = NULL, data = NULL, plot = TRUE, avg.line.fun = mean, pal = "basel", mix.col = "white", mix.p = 0, back.col = NULL, point.cex = NULL, point.pch = NULL, point.lwd = 1, jitter.val = .03, theme = 1, bean.b.o = NULL, bean.f.o = NULL, point.o = NULL, bar.f.o = NULL, bar.b.o = NULL, inf.f.o = NULL, inf.b.o = NULL, avg.line.o = NULL, gl.col = NULL, point.col = NULL, point.bg = NULL, bar.f.col = NULL, bean.b.col = NULL, bean.f.col = NULL, inf.f.col = NULL, inf.b.col = NULL, avg.line.col = NULL, bar.b.col = NULL, quant.col = NULL, avg.line.lwd = 4, bean.lwd = 1, bean.lty = 1, inf.lwd = NULL, bar.lwd = 1, at = NULL, bw = "nrd0", adjust = 1, add = FALSE, sortx = "alphabetical", decreasing = FALSE, cex.lab = 1, cex.axis = 1, cex.names = 1, quant = NULL, quant.length = NULL, quant.lwd = NULL, quant.boxplot = FALSE, bty = "o", cap.beans = TRUE, family = NULL, inf.method = "hdi", inf.within = NULL, inf.p = NULL, hdi.iter = 1e3, inf.disp = NULL, cut.min = NULL, cut.max = NULL, width.min = .3, width.max = NA, ylim = NULL, xlim = NULL, xlab = NULL, ylab = NULL, main = NULL, yaxt = NULL, xaxt = NULL, gl = NULL, gl.lwd = NULL, gl.lty = NULL, bar.b.lwd = NULL, line.fun = NULL, line.o = NULL, inf.o = NULL, bean.o = NULL, inf.col = NULL, theme.o = NULL, bar.o = NULL, inf = NULL, hdi.o = NULL, inf.type = NULL, inf.band = NULL ) { # # # # # # formula = len ~ dose + supp # data = ToothGrowth # plot = TRUE # avg.line.fun = mean # pal = "basel" # back.col = NULL # point.cex = NULL # point.pch = NULL # point.lwd = 1 # jitter.val = .03 # theme = 1 # bean.b.o = NULL # quant.boxplot = FALSE # bean.f.o = NULL # point.o = NULL # bar.f.o = NULL # bar.b.o = NULL # inf.f.o = NULL # inf.b.o = NULL # avg.line.o = NULL # gl.col = NULL # point.col = NULL # point.bg = NULL # bar.f.col = NULL # bean.b.col = NULL # bean.f.col = NULL # inf.f.col = NULL # inf.b.col = NULL # avg.line.col = NULL # bar.b.col = NULL # quant.col = NULL # avg.line.lwd = 4 # bean.lwd = 1 # bean.lty = 1 # inf.lwd = NULL # bar.lwd = 1 # at = NULL # bw = "nrd0" # adjust = 1 # add = FALSE # sortx = "mean" # decreasing = TRUE # cex.lab = 1 # cex.axis = 1 # quant = NULL # quant.length = NULL # quant.lwd = NULL # bty = "o" # evidence = FALSE # family = NULL # inf.method = "hdi" # inf.p = .95 # hdi.iter = 1e3 # inf.disp = "line" # cut.min = NULL # cut.max = NULL # width.min = .3 # width.max = NA # ylim = NULL # xlim = NULL # xlab = NULL # ylab = NULL # main = NULL # yaxt = NULL # xaxt = NULL # gl.lwd = NULL # gl.lty = NULL # bar.b.lwd = NULL # line.fun = NULL # inf.o = NULL # bean.o = NULL # bar.o = NULL # line.o = NULL # inf.col = NULL # theme.o = NULL # inf = NULL # inf.type = NULL # inf.band = NULL # cap.beans = TRUE # # # # # # formula = NULL # data = list(rnorm(100),rnorm(20)) # ylab = "" # sortx = "mean" # ----- # SETUP # ------ # Check for depricated arguments { if(is.null(bar.b.lwd) == FALSE) { message("bar.b.lwd is depricated. Use bar.lwd instead") bar.lwd <- bar.b.lwd } if(is.null(line.fun) == FALSE) { message("line.fun is depricated. Use avg.line.fun instead") avg.line.fun <- line.fun } if(is.null(inf.o) == FALSE) { message("inf.o is depricated. Use inf.f.o instead") inf.f.o <- inf.o } if(is.null(line.o) == FALSE) { message("line.o is depricated. Use avg.line.o instead") avg.line.o <- line.o } if(is.null(bean.o) == FALSE) { message("bean.o is depricated. Use bean.b.o instead") bean.b.o <- bean.o } if(is.null(inf.col) == FALSE) { message("inf.col is depricated. Use inf.f.col instead") inf.f.col <- inf.col } if(is.null(theme.o) == FALSE) { message("theme.o is depricated. Use theme instead") theme <- theme.o } if(is.null(inf) == FALSE) { message("inf is depricated. Use inf.method instead") inf.method <- inf } if(is.null(inf.band) == FALSE) { message("inf.band is depricated. Use inf.disp instead") inf.disp <- inf.band } if(is.null(bar.o) == FALSE) { message("bar.o is depricated. Use bar.f.o (for filling), and bar.b.o (for border) instead") bar.f.o <- bar.o } } # Look for missing critical inputs { if(is.null(data)) {stop("You must specify data in the data argument")} } # Set some defaults if(inf.method %in% c("hdi", "ci", "withinci") & is.null(inf.p)) { inf.p <- .95 } if(inf.method %in% c("sd", "se") & is.null(inf.p)) { inf.p <- 1 } # If no formula, than reshape data if(is.null(formula)) { if(class(data) %in% c("data.frame", "matrix")) { # data <- data.frame(a = rnorm(100), b = rnorm(100), c = rnorm(100)) # data <- matrix(rnorm(100), nrow = 20, ncol = 5) # data <- as.data.frame(data) iv.levels <- names(data) data <- stats::reshape(data, direction = "long", varying = list(1:ncol(data))) data <- data[,1:2] names(data) <- c("group", "y") for(i in 1:length(iv.levels)) { data$group[data$y == i] <- iv.levels[i] } formula <- y ~ group } if(class(data) == "list") { if(is.null(names(data))) {names(data) <- paste0("V", 1:length(data))} iv.levels <- names(data) # Convert list to dataframe data.df <- do.call("rbind", lapply(1:length(data), FUN = function(x) { data.frame(group = rep(iv.levels[x], length(data[[x]])), y = unlist(data[[x]])) })) data <- data.df formula <- y ~ group } } # Reshape dataframe to include relevant variables { withindata <- data data <- model.frame(formula = formula, data = data) dv.name <- names(data)[1] dv.v <- data[,1] all.iv.names <- names(data)[2:ncol(data)] } # GET IV INFORMATION { n.iv <- ncol(data) - 1 if(n.iv > 3) {stop("Currently only 1, 2, or 3 IVs are supported in pirateplot(). Please reduce.")} # selection.mtx dictates which values are in each sub-plot if(n.iv %in% 1:2) { selection.mtx <- matrix(TRUE, nrow = nrow(data), ncol = 1) } if(n.iv == 3) { iv3.name <- names(data)[4] iv3.levels <- sort(unique(data[,4])) selection.mtx <- matrix(unlist(lapply(iv3.levels, FUN = function(x) {data[,4] == x})), nrow = nrow(data), ncol = length(iv3.levels), byrow = F) } n.subplots <- ncol(selection.mtx) # Loop over subplots (only relevant when there is a third IV) if(n.subplots == 2) {par(mfrow = c(1, 2))} if(n.subplots == 3) {par(mfrow = c(1, 3))} if(n.subplots == 4) {par(mfrow = c(2, 2))} if(n.subplots %in% c(5, 6)) {par(mfrow = c(2, 3))} if(n.subplots > 7) {par(mfrow = c(ceiling(sqrt(n.subplots)), ceiling(sqrt(n.subplots))))} } # Setup outputs summary.ls <- vector("list", length = n.subplots) # Loop over subplots for(subplot.i in 1:n.subplots) { # Select data for current subplot data.i <- data[selection.mtx[,subplot.i],] # Remove potential iv3 column data.i <- data.i[,1:min(ncol(data.i), 3)] # Determine levels of each IV { if((substr(sortx, 1, 1) %in% c("a", "s") | sortx %in% c("mean", "median", "min", "max")) == FALSE ) {stop("sortx argument is invalid. use 'alphabetical', 'sequential', 'mean', 'median', 'min', or 'max'")} if(substr(sortx, 1, 1) == "a") { iv.levels <- lapply(2:ncol(data.i), FUN = function(x) {sort(unique(data.i[,x]))}) } if(substr(sortx, 1, 1) == "s") { iv.levels <- lapply(2:ncol(data.i), FUN = function(x) {unique(data.i[,x])}) } if(sortx %in% c("mean", "median", "min", "max")) { agg <- aggregate(formula, data = data.i, FUN = get(sortx)) if(ncol(agg) == 2) { agg <- agg[order(agg[,2], decreasing = decreasing),] iv.levels <- list(paste(agg[,1])) } if(ncol(agg) == 3) { agg.1 <- aggregate(as.formula(paste(dv.name, "~", all.iv.names[1])), data = data.i, FUN = get(sortx)) agg.1 <- agg.1[order(agg.1[,2], decreasing = decreasing),] iv.levels <- list(paste(agg.1[,1])) agg.2 <- aggregate(as.formula(paste(dv.name, "~", all.iv.names[2])), data = data.i, FUN = get(sortx)) agg.2 <- agg.2[order(agg.2[,2], decreasing = decreasing),] iv.levels[2] <- list(paste(agg.2[,1])) } } iv.lengths <- sapply(1:length(iv.levels), FUN = function(x) {length(iv.levels[[x]])}) iv.names <- names(data.i)[2:ncol(data.i)] subplot.n.iv <- length(iv.levels) } # Set up bean info { bean.mtx <- expand.grid(iv.levels) names(bean.mtx) <- names(data.i)[2:ncol(data.i)] n.beans <- nrow(bean.mtx) bean.mtx$bean.num <- 1:nrow(bean.mtx) # Determine bean x locations if(is.null(at)) { bean.loc <- 1:n.beans group.spacing <- 1 if(subplot.n.iv == 2) { bean.loc <- bean.loc + rep(group.spacing * (0:(iv.lengths[2] - 1)), each = iv.lengths[1]) } } if(!is.null(at)) { bean.loc <- rep(at, length.out = n.beans) } bean.mtx$x.loc <- bean.loc data.i <- merge(data.i, bean.mtx) } # Calculate summary statistics for each bean { summary <- data.frame("n" = rep(NA, n.beans), "avg" = rep(NA, n.beans), "inf.lb" = rep(NA, n.beans), "inf.ub" = rep(NA, n.beans)) summary <- cbind(bean.mtx[,(1:ncol(bean.mtx) - 1)], summary) if(n.subplots > 1) {summary[iv3.name] <- iv3.levels[subplot.i]} # Loop over beans in subplot for(bean.i in 1:n.beans) { dv.i <- data.i[data.i$bean.num == bean.i, dv.name] if(is.logical(dv.i)) {dv.i <- as.numeric(dv.i)} summary$n[bean.i] <- length(dv.i) summary$avg[bean.i] <- avg.line.fun(dv.i) # Calculate inference if(length(dv.i) > 0) { # Binary data.i if(length(setdiff(dv.i, c(0, 1))) == 0) { if(inf.method == "hdi") { # Calculate HDI from beta(Success + 1, Failure + 1) inf.lb <- qbeta(.025, shape1 = sum(dv.i) + 1, shape2 = sum(dv.i == 0) + 1) inf.ub <- qbeta(.975, shape1 = sum(dv.i) + 1, shape2 = sum(dv.i == 0) + 1) } if(inf.method == "ci") { # Calculate 95% CI with Normal distribution approximation to binomial inf.lb <- mean(dv.i) - 1.96 * sqrt(mean(dv.i) * (1 - mean(dv.i)) / length(dv.i)) - .5 / length(dv.i) inf.ub <- mean(dv.i) + 1.96 * sqrt(mean(dv.i) * (1 - mean(dv.i)) / length(dv.i)) + .5 / length(dv.i) if(inf.lb < 0) {inf.lb <- 0} if(inf.lb > 1) {inf.ub <- 1} } if(inf.method == "sd") { inf.lb <- mean(dv.i) - sd(dv.i) * inf.p inf.ub <- mean(dv.i) + sd(dv.i) * inf.p if(inf.lb < 0) {inf.lb <- 0} if(inf.lb > 1) {inf.ub <- 1} } if(inf.method == "se") { inf.lb <- mean(dv.i) - sd(dv.i) / sqrt(length(dv.i)) * inf.p inf.ub <- mean(dv.i) + sd(dv.i) / sqrt(length(dv.i)) * inf.p if(inf.lb < 0) {inf.lb <- 0} if(inf.lb > 1) {inf.ub <- 1} } } # Non-Binary data.i if(length(setdiff(dv.i, c(0, 1))) > 0) { if(inf.method == "hdi") { ttest.bf <- BayesFactor::ttestBF(dv.i, posterior = T, iterations = hdi.iter, progress = F) samples <- ttest.bf[,1] # using the hdi function from Kruschke inf.lb <- hdi(samples, credMass = inf.p)[1] inf.ub <- hdi(samples, credMass = inf.p)[2] } if(inf.method == "iqr") { inf.lb <- quantile(dv.i, probs = .25) inf.ub <- quantile(dv.i, probs = .75) } if(inf.method == "ci") { ci.i <- t.test(dv.i, conf.level = inf.p)$conf.int inf.lb <- ci.i[1] inf.ub <- ci.i[2] } if(inf.method == "sd") { inf.lb <- mean(dv.i) - sd(dv.i) * inf.p inf.ub <- mean(dv.i) + sd(dv.i) * inf.p } if(inf.method == "se") { inf.lb <- mean(dv.i) - sd(dv.i) / sqrt(length(dv.i)) * inf.p inf.ub <- mean(dv.i) + sd(dv.i) / sqrt(length(dv.i)) * inf.p } if(inf.method == "withinci") { grandMean <- mean(dv.v) Groups <- unique(withindata[inf.within])[[1]] groupMean <- c() dv.within <- c() for(group in 1:length(Groups)){ #get the participant/group mean for each participant/group groupMean[group] <- mean(dv.v[which(withindata[inf.within] == Groups[group])]) } for(datum in 1:length(dv.i)){ #substitute group mean with grand mean, which removes between subject variance dv.within[datum] <- dv.i[datum] - groupMean[datum] + grandMean } ci.i <- t.test(dv.within, conf.level = inf.p)$conf.int ci.width <- (ci.i[2] - ci.i[1]) inf.lb <- mean(dv.i) - (ci.width/2) * sqrt(n.beans/(n.beans-1)) #with Morey correction inf.ub <- mean(dv.i) + (ci.width/2) * sqrt(n.beans/(n.beans-1)) #with Morey correction } } summary$inf.lb[bean.i] <- inf.lb summary$inf.ub[bean.i] <- inf.ub } if(length(dv.i) == 0) { summary$inf.lb[bean.i] <- NA summary$inf.ub[bean.i] <- NA } } } if(plot == TRUE) { # COLORS AND TRANSPARENCIES # Set number of colors to number of levels of the first IV n.cols <- iv.lengths[1] # DEFINE THEMES { if((theme %in% 0:4) == FALSE) { print("theme must be an integer between 0 and 4. I'll set it to 1 for now.") theme <- 1 } if(theme == 0) { if(is.null(point.pch)) {point.pch <- 16} if(is.null(point.o)) {point.o <- 0} if(is.null(bean.b.o)) {bean.b.o <- 0} if(is.null(bean.f.o)) {bean.f.o <- 0} if(is.null(inf.f.o)) {inf.f.o <- 0} if(is.null(inf.b.o)) {inf.b.o <- 0} if(is.null(avg.line.o)) {avg.line.o <- 0} if(is.null(bar.f.o)) {bar.f.o <- 0} if(is.null(bar.b.o)) {bar.b.o <- 0} if(is.null(point.cex)) {point.cex <- 1} if(is.null(gl.col)) {gl.col <- "white"} if(is.null(inf.disp)) {inf.disp <- "rect"} } if(theme == 1) { if(is.null(point.o)) {point.o <- .2} if(is.null(bean.b.o)) {bean.b.o <- 1} if(is.null(bean.f.o)) {bean.f.o <- .3} if(is.null(inf.f.o)) {inf.f.o <- .8} if(is.null(inf.b.o)) {inf.b.o <- .8} if(is.null(avg.line.o)) {avg.line.o <- 1} if(is.null(bar.f.o)) {bar.f.o <- 0} if(is.null(bar.b.o)) {bar.b.o <- 0} if(is.null(bean.b.col)) {bean.b.col <- "black"} if(is.null(point.cex)) {point.cex <- .7} if(is.null(point.col)) {point.col <- "black"} if(is.null(inf.b.col)) {inf.b.col <- gray(0)} if(is.null(inf.f.col)) {inf.f.col <- "white"} if(is.null(bean.lwd)) {bean.lwd <- 2} if(is.null(avg.line.col)) {avg.line.col <- "black"} if(is.null(gl.col)) {gl.col <- "gray"} if(is.null(gl.lwd)) {gl.lwd <- c(.25, .5)} if(is.null(point.col)) {point.col <- "black"} if(is.null(point.bg)) {point.bg <- "white"} if(is.null(point.pch)) {point.pch <- 16} if(is.null(inf.disp)) {inf.disp <- "rect"} } if(theme == 2) { if(is.null(point.pch)) {point.pch <- 16} if(is.null(point.o)) {point.o <- .1} if(is.null(bean.b.o)) {bean.b.o <- 1} if(is.null(bean.f.o)) {bean.f.o <- 1} if(is.null(inf.f.o)) {inf.f.o <- .6} if(is.null(inf.b.o)) {inf.b.o <- .8} if(is.null(inf.b.col)) {inf.b.col <- gray(0)} if(is.null(avg.line.o)) {avg.line.o <- 1} if(is.null(bar.f.o)) {bar.f.o <- 0} if(is.null(bar.b.o)) {bar.b.o <- 0} if(is.null(bean.b.col)) {bean.b.col <- "black"} if(is.null(point.cex)) {point.cex <- .7} if(is.null(point.col)) {point.col <- "black"} if(is.null(bean.lwd)) {bean.lwd <- 2} if(is.null(avg.line.col)) {avg.line.col <- "black"} if(is.null(bean.f.col)) {bean.f.col <- "white"} if(is.null(inf.disp)) {inf.disp <- "rect"} if(is.null(gl.col)) {gl.col <- "gray"} if(is.null(gl.lwd)) {gl.lwd <- c(.25, .5)} } if(theme == 3) { if(is.null(point.pch)) {point.pch <- 16} if(is.null(point.o)) {point.o <- .3} if(is.null(bean.b.o)) {bean.b.o <- 1} if(is.null(bean.f.o)) {bean.f.o <- .5} if(is.null(inf.f.o)) {inf.f.o <- .9} if(is.null(inf.b.o)) {inf.b.o <- 1} if(is.null(avg.line.o)) {avg.line.o <- 1} if(is.null(bar.f.o)) {bar.f.o <- 0} if(is.null(bar.b.o)) {bar.b.o <- 0} if(is.null(bean.b.col)) {bean.b.col <- "black"} if(is.null(inf.f.col)) {inf.f.col <- "white"} if(is.null(inf.b.col)) {inf.b.col <- gray(0)} if(is.null(avg.line.col)) {avg.line.col <- "black"} if(is.null(point.col)) {point.col <- "black"} if(is.null(point.cex)) {point.cex <- .5} # # if(is.null(back.col)) {back.col <- gray(.97)} if(is.null(inf.disp)) {inf.disp <- "bean"} if(is.null(gl.col)) {gl.col <- "gray"} if(is.null(gl.lwd)) {gl.lwd <- c(.25, .5)} } if(theme == 4) { if(is.null(point.pch)) {point.pch <- 16} if(is.null(point.o)) {point.o <- .3} if(is.null(bean.b.o)) {bean.b.o <- 0} if(is.null(bean.f.o)) {bean.f.o <- 0} if(is.null(inf.f.o)) {inf.f.o <- 1} if(is.null(inf.b.o)) {inf.b.o <- .5} if(is.null(avg.line.o)) {avg.line.o <- 1} if(is.null(bar.f.o)) {bar.f.o <- 1} if(is.null(bar.b.o)) {bar.b.o <- 1} if(is.null(inf.f.col)) {inf.f.col <- "black"} if(is.null(inf.b.col)) {inf.b.col <- gray(0)} if(is.null(avg.line.col)) {avg.line.col <- "black"} if(is.null(bar.f.col)) {bar.f.col <- "white"} if(is.null(bar.b.col)) {bar.b.col <- "black"} if(is.null(point.col)) {point.col <- "black"} if(is.null(point.cex)) {point.cex <- .5} # # if(is.null(back.col)) {back.col <- gray(.97)} if(is.null(gl.col)) {gl.col <- "gray"} if(is.null(gl.lwd)) {gl.lwd <- c(.25, .5)} if(is.null(inf.disp)) {inf.disp <- "line"} } # Inference lwd depends on inf.disp... if(is.null(inf.lwd)) { if(inf.disp == "line") {inf.lwd <- 3} if(inf.disp %in% c("rect", "bean")) {inf.lwd <- 2} } } # DEFINE OPACITIES { opac.df <- data.frame( point.o = rep(NA, n.beans), bean.b.o = rep(NA, n.beans), bean.f.o = rep(NA, n.beans), inf.f.o = rep(NA, n.beans), inf.b.o = rep(NA, n.beans), avg.line.o = rep(NA, n.beans), bar.f.o = rep(NA, n.beans), bar.b.o = rep(NA, n.beans) ) rownames(opac.df) <- 1:n.beans # If opacity values are specified, update them. if(is.null(point.o) == FALSE) {opac.df$point.o <- rep(point.o, length.out = n.beans)} if(is.null(bean.b.o) == FALSE) {opac.df$bean.b.o <- rep(bean.b.o, length.out = n.beans)} if(is.null(bean.f.o) == FALSE) {opac.df$bean.f.o <- rep(bean.f.o, length.out = n.beans)} if(is.null(inf.f.o) == FALSE) {opac.df$inf.f.o <- rep(inf.f.o, length.out = n.beans)} if(is.null(inf.b.o) == FALSE) {opac.df$inf.b.o <- rep(inf.b.o, length.out = n.beans)} if(is.null(avg.line.o) == FALSE) {opac.df$avg.line.o <- rep(avg.line.o, length.out = n.beans)} if(is.null(bar.f.o) == FALSE) {opac.df$bar.f.o <- rep(bar.f.o, length.out = n.beans)} if(is.null(bar.b.o) == FALSE) {opac.df$bar.b.o <- rep(bar.b.o, length.out = n.beans)} } # DEFINE COLORS { colors.df <- data.frame( point.col = rep(NA, n.beans), point.bg = rep(NA, n.beans), bean.b.col = rep(NA, n.beans), bean.f.col = rep(NA, n.beans), inf.f.col = rep(NA, n.beans), inf.b.col = rep(NA, n.beans), avg.line.col = rep(NA, n.beans), bar.f.col = rep(NA, n.beans), bar.b.col = rep(NA, n.beans), quant.col = rep(NA, n.beans) ) rownames(colors.df) <- 1:n.beans # If palette is in piratepal()... if(mean(pal %in% piratepal("names")) == 1) { colors.df$point.col <- rep(piratepal(palette = pal, length.out = n.cols, mix.col = mix.col, mix.p = mix.p), length.out = n.beans) colors.df$point.bg <- rep(piratepal(palette = pal, length.out = n.cols, mix.col = mix.col, mix.p = mix.p), length.out = n.beans) colors.df$bean.b.col <- rep(piratepal(palette = pal, length.out = n.cols, mix.col = mix.col, mix.p = mix.p), length.out = n.beans) colors.df$bean.f.col <- rep(piratepal(palette = pal, length.out = n.cols, mix.col = mix.col, mix.p = mix.p), length.out = n.beans) colors.df$inf.f.col <- rep(piratepal(palette = pal, length.out = n.cols, mix.col = mix.col, mix.p = mix.p), length.out = n.beans) colors.df$inf.b.col <- rep(piratepal(palette = pal, length.out = n.cols, mix.col = mix.col, mix.p = mix.p), length.out = n.beans) colors.df$avg.line.col <- rep(piratepal(palette = pal, length.out = n.cols, mix.col = mix.col, mix.p = mix.p), length.out = n.beans) colors.df$bar.f.col <- rep(piratepal(palette = pal, length.out = n.cols, mix.col = mix.col, mix.p = mix.p), length.out = n.beans) colors.df$bar.b.col <- rep(piratepal(palette = pal, length.out = n.cols, mix.col = mix.col, mix.p = mix.p), length.out = n.beans) colors.df$quant.col <- rep(piratepal(palette = pal, length.out = n.cols, mix.col = mix.col, mix.p = mix.p), length.out = n.beans) } # If palette is NOT in piratepal()... if(mean(pal %in% piratepal("names")) != 1) { colors.df$point.col <- rep(pal, length.out = n.beans) colors.df$point.bg <- rep(pal, length.out = n.beans) colors.df$bean.b.col <- rep(pal, length.out = n.beans) colors.df$bean.f.col <- rep(pal, length.out = n.beans) colors.df$inf.f.col <- rep(pal, length.out = n.beans) colors.df$inf.b.col <- rep(pal, length.out = n.beans) colors.df$avg.line.col <- rep(pal, length.out = n.beans) colors.df$bar.f.col <- rep(pal, length.out = n.beans) colors.df$bar.b.col <- rep(pal, length.out = n.beans) colors.df$quant.col <- rep(pal, length.out = n.beans) } # Apply specified colors if(is.null(point.col) == FALSE) {colors.df$point.col <- rep(point.col, length.out = n.beans)} if(is.null(point.bg) == FALSE) {colors.df$point.bg <- rep(point.bg, length.out = n.beans)} if(is.null(bean.b.col) == FALSE) {colors.df$bean.b.col <- rep(bean.b.col, length.out = n.beans)} if(is.null(bean.f.col) == FALSE) {colors.df$bean.f.col <- rep(bean.f.col, length.out = n.beans)} if(is.null(inf.f.col) == FALSE) {colors.df$inf.f.col <- rep(inf.f.col, length.out = n.beans)} if(is.null(inf.b.col) == FALSE) {colors.df$inf.b.col <- rep(inf.b.col, length.out = n.beans)} if(is.null(avg.line.col) == FALSE) {colors.df$avg.line.col <- rep(avg.line.col, length.out = n.beans)} if(is.null(bar.f.col) == FALSE) {colors.df$bar.f.col <- rep(bar.f.col, length.out = n.beans)} if(is.null(bar.b.col) == FALSE) {colors.df$bar.b.col <- rep(bar.b.col, length.out = n.beans)} if(is.null(quant.col) == FALSE) {colors.df$quant.col <- rep(quant.col, length.out = n.beans)} } # SETUP PLOTTING SPACE { # Determine margins if(n.subplots == 1) { # par(mar = c(5, 4, 4, 1) + .1) add.yaxt <- TRUE } if(n.subplots %in% c(2, 3)) { if(subplot.i == 1) { par(mar = c(5, 4, 4, 1) + .1) add.yaxt <- TRUE } if(subplot.i > 1) { par(mar = c(5, 3, 4, 1) + .1) add.yaxt <- FALSE } } if(n.subplots == c(4)) { if(subplot.i %in% c(1, 3)) { par(mar = c(5, 4, 4, 1) + .1)} else { par(mar = c(5, 3, 4, 1) + .1) } } if(n.subplots %in% c(5, 6)) { if(subplot.i %in% c(1, 4)) { par(mar = c(5, 4, 4, 1) + .1)} else { par(mar = c(5, 3, 4, 1) + .1) } } if(n.subplots > 6) { if(subplot.i %in% seq(ceiling(sqrt(n.subplots)) + 1, n.subplots + 1, length.out = ceiling(sqrt(n.subplots)))) { par(mar = c(5, 4, 4, 1) + .1)} else { par(mar = c(5, 3, 4, 1) + .1) } } # Determine y limits (y axis limits) # y axis breaks (y.levels) if(is.null(ylim) == TRUE) { # Determine best step size steps.p <- c( seq(1e-3, 1e-2, 1e-3), seq(1e-4, 1e-3, 1e-3), seq(1e-5, 1e-4, 1e-4), seq(1e-6, 1e-5, 1e-5), seq(1e-7, 1e-6, 1e-6), seq(1e-8, 1e-7, 1e-7), seq(1e-9, 1e-8, 1e-8), 1/2, 1/5, 1/10, 1/25, 1/50, 1/100, 1, 2, 5, 10, 25, 50, 100, seq(1e2, 1e3, 1e2), seq(1e3, 1e4, 1e3), seq(1e4, 1e5, 1e4), seq(1e5, 1e6, 1e5), seq(1e6, 1e7, 1e6), seq(1e7, 1e8, 1e7), seq(1e8, 1e9, 1e8) ) range <- max(dv.v) - min(dv.v) steps.p.m <- range / steps.p best.step.size <- min(steps.p[which(abs(steps.p.m - 10) == min(abs(steps.p.m - 10)))]) plot.min <- floor(min(dv.v) / best.step.size) * best.step.size plot.max <- ceiling(max(dv.v) / best.step.size) * best.step.size plot.height <- plot.max - plot.min ylim <- c(plot.min, plot.max) y.levels <- seq(plot.min, plot.max, by = best.step.size) } if(is.null(ylim) == FALSE) { steps.p <- c( seq(1e-3, 1e-2, 1e-3), seq(1e-4, 1e-3, 1e-3), seq(1e-5, 1e-4, 1e-4), seq(1e-6, 1e-5, 1e-5), seq(1e-7, 1e-6, 1e-6), seq(1e-8, 1e-7, 1e-7), seq(1e-9, 1e-8, 1e-8), 1/2, 1/5, 1/10, 1/25, 1/50, 1/100, 1, 2, 5, 10, 25, 50, 100, seq(1e2, 1e3, 1e2), seq(1e3, 1e4, 1e3), seq(1e4, 1e5, 1e4), seq(1e5, 1e6, 1e5), seq(1e6, 1e7, 1e6), seq(1e7, 1e8, 1e7), seq(1e8, 1e9, 1e8) ) range <- ylim[2] - ylim[1] steps.p.m <- range / steps.p best.step.size <- min(steps.p[which(abs(steps.p.m - 10) == min(abs(steps.p.m - 10)))]) plot.min <- floor(ylim[1] / best.step.size) * best.step.size plot.max <- ceiling((max(dv.v) - plot.min)/ best.step.size) * best.step.size y.levels <- seq(ylim[1], ylim[2], by = best.step.size) } # Determine x and y labels if(subplot.n.iv == 1 & is.null(xlab)) {my.xlab <- iv.names[1]} if(subplot.n.iv == 1 & is.null(xlab) == FALSE) {my.xlab <- xlab} if(subplot.n.iv > 1) {my.xlab <- ""} if(is.null(ylab)) {ylab <- dv.name} } # PLOTTING SPACE if(add == FALSE) { # X-Axis if(is.null(xlim)) {xlim <- c(min(bean.loc) - .5, max(bean.loc) + .5)} plot(1, xlim = xlim, ylim = ylim, type = "n", xaxt = "n", yaxt = "n", xlab = my.xlab, ylab = ylab, cex.lab = cex.lab, main = main, yaxt = yaxt, bty = bty#, # ... ) # Add title for iv3 if(n.iv > 2) { top.text <- paste(names(data)[4], " = ", iv3.levels[subplot.i], sep = "") mtext(text = top.text, side = 3, line = 1) } # Y-AXIS { if(is.null(yaxt)) { axis(side = 2, at = y.levels, labels = prettyNum(y.levels, big.mark = ","), las = 1, lwd = 0, lwd.ticks = 1, cex.axis = cex.axis) } } # BACKGROUND { if(is.null(back.col) == FALSE) { rect(xleft = par("usr")[1], ybottom = par("usr")[3], xright = par("usr")[2], ytop = par("usr")[4], col = back.col, border = NA) } } # GRIDLINES { if(is.null(gl.col) == FALSE) { if(is.null(gl.lwd)) {gl.lwd <- c(.5)} if(is.null(gl.lty)) {gl.lty <- 1} if(is.null(gl)) {gl <- seq(min(y.levels), max(y.levels), length.out = length(y.levels))} abline(h = gl, lwd = gl.lwd, col = gl.col, lty = gl.lty) } } } # PIRATEPLOT ELEMENTS { if(is.na(width.max)) { if(subplot.n.iv == 1) {width.max <- .45} if(subplot.n.iv == 2) {width.max <- .5} } bean.lwd <- rep(bean.lwd, length.out = n.beans) bean.lty <- rep(bean.lty, length.out = n.beans) inf.lwd <- rep(inf.lwd, length.out = n.beans) avg.line.lwd <- rep(avg.line.lwd, length.out = n.beans) bar.lwd <- rep(bar.lwd, length.out = n.beans) # Loop over beans for (bean.i in 1:n.beans) { dv.i <- data.i[data.i$bean.num == bean.i, dv.name] if(is.logical(dv.i)) {dv.i <- as.numeric(dv.i)} x.loc.i <- bean.mtx$x.loc[bean.i] # CALCULATE DENSITIES if(length(dv.i) > 3) { # only if n > 5 dens.i <- density(dv.i, bw, adjust) dens.y.i <- dens.i$y dens.x.i <- dens.i$x # Rescale density according to width.max and width.min if(max(dens.y.i) < width.min) { dens.y.i <- dens.y.i / max(dens.y.i) * width.min } if(max(dens.y.i) > width.max) { dens.y.i <- dens.y.i / max(dens.y.i) * width.max } # adjust to cut.min and cut.max dens.x.plot.i <- dens.x.i dens.y.plot.i <- dens.y.i if(is.null(cut.min) == FALSE) { dens.x.plot.i <- dens.x.i[dens.x.i > cut.min] dens.y.plot.i <- dens.y.i[dens.x.i > cut.min] } if(is.null(cut.max) == FALSE) { dens.x.plot.i <- dens.x.i[dens.x.i < cut.max] dens.y.plot.i <- dens.y.i[dens.x.i < cut.max] } if(cap.beans == TRUE) { dens.x.plot.i <- dens.x.i[dens.x.i < max(dv.i) & dens.x.i > min(dv.i)] dens.y.plot.i <- dens.y.i[dens.x.i < max(dv.i) & dens.x.i > min(dv.i)] } } # BAR { rect(xleft = x.loc.i - width.max, ybottom = 0, xright = x.loc.i + width.max, ytop = summary$avg[bean.i], col = transparent(colors.df$bar.f.col[bean.i], trans.val = 1 - opac.df$bar.f.o[bean.i]), border = transparent(colors.df$bar.b.col[bean.i], trans.val = 1 - opac.df$bar.b.o[bean.i]), lwd = bar.b.lwd[bean.i] ) } # BEAN { if(length(setdiff(dv.i, c(0, 1))) > 0 & length(dv.i) > 3) { polygon(c(x.loc.i - dens.y.plot.i[1:(length(dens.x.plot.i))], x.loc.i + rev(dens.y.plot.i[1:(length(dens.x.plot.i))])), c(dens.x.plot.i[1:(length(dens.x.plot.i))], rev(dens.x.plot.i[1:(length(dens.x.plot.i))])), col = transparent(colors.df$bean.f.col[bean.i], trans.val = 1 - opac.df$bean.f.o[bean.i]), border = transparent(colors.df$bean.b.col[bean.i], trans.val = 1 - opac.df$bean.b.o[bean.i]), lwd = bean.lwd[bean.i], lty = bean.lty[bean.i] ) } } # POINTS { # 1-color points if((point.pch %in% 21:25) == FALSE) { points(x = rep(x.loc.i, length(dv.i)) + rnorm(length(dv.i), mean = 0, sd = jitter.val), y = dv.i, pch = point.pch, col = transparent(colors.df$point.col[bean.i], trans.val = 1 - opac.df$point.o[bean.i]), cex = point.cex, lwd = point.lwd ) } # 2-color points if(point.pch %in% 21:25) { points(x = rep(x.loc.i, length(dv.i)) + rnorm(length(dv.i), mean = 0, sd = jitter.val), y = dv.i, pch = point.pch, col = transparent(colors.df$point.col[bean.i], trans.val = 1 - opac.df$point.o[bean.i]), bg = transparent(colors.df$point.bg[bean.i], trans.val = 1 - opac.df$point.o[bean.i]), cex = point.cex, lwd = point.lwd ) } } if (quant.boxplot) { quant <- c(0.25, 0.75) } # QUANTILES if (!is.null(quant)) { # set default line length if length is not given manually if (is.null(quant.length)) { quant.length <- c(rep(0.65, length(quant))) } else {quant.length <- rep(quant.length, length.out = length(quant))} if (is.null(quant.lwd)) { quant.lwd <- c(rep(0.75, length(quant))) } else {quant.lwd <- rep(quant.lwd, length.out = length(quant))} if (quant.boxplot) { for (i in 1:length(quant)) { if (i == 1) { segments(x.loc.i + (quant.length[i] - width.max), quantile(dv.i, probs = quant[i]) - (1.5*IQR(dv.i)), x.loc.i - (quant.length[i] - width.max), quantile(dv.i,probs = quant[i]) - (1.5*IQR(dv.i)), col = colors.df$quant.col[bean.i], lwd = quant.lwd[i], lend = 3) } else { segments(x.loc.i + (quant.length[i] - width.max), quantile(dv.i, probs = quant[i]) + (1.5*IQR(dv.i)), x.loc.i - (quant.length[i] - width.max), quantile(dv.i,probs = quant[i]) + (1.5*IQR(dv.i)), col = colors.df$quant.col[bean.i], lwd = quant.lwd[i], lend = 3) } } segments(x.loc.i, quantile(dv.i, probs = min(quant)) - (1.5*IQR(dv.i)), x.loc.i, quantile(dv.i, probs = max(quant)) + (1.5*IQR(dv.i)), col = colors.df$quant.col[bean.i], lwd = quant.lwd[1], lend = 3, lty = 1) } else { for (i in 1:length(quant)) { segments(x.loc.i + (quant.length[i] - width.max), quantile(dv.i, probs = quant[i]), x.loc.i - (quant.length[i] - width.max), quantile(dv.i, probs = quant[i]), col = colors.df$quant.col[bean.i], lwd = quant.lwd[i], lend = 3) } segments(x.loc.i, quantile(dv.i, probs = min(quant)), x.loc.i, quantile(dv.i, probs = max(quant)), col = colors.df$quant.col[bean.i], lwd = quant.lwd[1], lend = 3, lty = 1) } } # INFERENCE BAND { if(length(dv.i) > 3 & sd(dv.i) > 0) { if(length(dv.i) <= 3) { message(paste("Note: Group ", bean.i, " had too few observations (", length(dv.i), ") for an inference band", sep = "")) message(paste("Note: Group", bean.i, "had no variance, so no inference band :(")) } dens.inf.x <- dens.x.i[dens.x.i >= summary$inf.lb[bean.i] & dens.x.i <= summary$inf.ub[bean.i]] dens.inf.y <- dens.y.i[dens.x.i >= summary$inf.lb[bean.i] & dens.x.i <= summary$inf.ub[bean.i]] # Draw inf band if(inf.disp == "line") { segments(x.loc.i, summary$inf.lb[bean.i], x.loc.i, summary$inf.ub[bean.i], col = transparent(colors.df$inf.f.col[bean.i], trans.val = 1 - opac.df$inf.f.o[bean.i]), lwd = inf.lwd[bean.i]) # Add whiskers segments(rep(x.loc.i - width.max * .2, 2), c(summary$inf.lb[bean.i], summary$inf.ub[bean.i]), rep(x.loc.i + width.max * .2, 2), c(summary$inf.lb[bean.i], summary$inf.ub[bean.i]), col = transparent(colors.df$inf.f.col[bean.i], trans.val = 1 - opac.df$inf.f.o[bean.i]), lwd = inf.lwd[bean.i]) } if(inf.disp == "rect") { rect(x.loc.i - width.max * .7, summary$inf.lb[bean.i], x.loc.i + width.max * .7, summary$inf.ub[bean.i], col = transparent(colors.df$inf.f.col[bean.i], trans.val = 1 - opac.df$inf.f.o[bean.i]), lwd = inf.lwd[bean.i], border = transparent(colors.df$inf.b.col[bean.i], trans.val = 1 - opac.df$inf.b.o[bean.i]) ) } if(inf.disp == "bean") { polygon(c(x.loc.i - dens.inf.y[1:(length(dens.inf.x))], x.loc.i + rev(dens.inf.y[1:(length(dens.inf.x))])), c(dens.inf.x[1:(length(dens.inf.x))], rev(dens.inf.x[1:(length(dens.inf.x))])), col = transparent(colors.df$inf.f.col[bean.i], trans.val = 1 - opac.df$inf.f.o[bean.i]), border = transparent(colors.df$inf.b.col[bean.i], trans.val = 1 - opac.df$inf.b.o[bean.i]), lwd = bean.lwd[bean.i] ) } } } # AVERAGE LINE { if(inf.disp %in% c("line", "rect")) { segments(x0 = x.loc.i - width.max, y0 = summary$avg[bean.i], x1 = x.loc.i + width.max, y1 = avg.line.fun(dv.i), col = transparent(colors.df$avg.line.col[bean.i], trans.val = 1 - opac.df$avg.line.o[bean.i]), lwd = avg.line.lwd[bean.i], lend = 3 ) } if(inf.disp == "bean") { fun.loc <- which(abs(dens.x.i - avg.line.fun(dv.i)) == min(abs(dens.x.i - avg.line.fun(dv.i)))) segments(x.loc.i - dens.y.i[fun.loc], summary$avg[bean.i], x.loc.i + dens.y.i[fun.loc], summary$avg[bean.i], col = transparent(colors.df$avg.line.col[bean.i], trans.val = 1 - opac.df$avg.line.o[bean.i]), lwd = avg.line.lwd[bean.i], lend = 3 ) } } } # Add bean names for IV 1 # if(subplot.n.iv == 1) {line.t <- .7} # if(subplot.n.iv == 2) {line.t <- 2} if(is.null(xaxt) == TRUE) { if(is.null(cex.lab)) { cex.lab <- 1 / ((n.subplots - 1) * .1 + 1) } if(is.null(xaxt)) { axis(side = 1, at = bean.loc, labels = rep("", n.beans), lwd = 0, lwd.ticks = 1, cex.axis = cex.axis) } # IV 1 Labels mtext(bean.mtx[,1], side = 1, at = bean.mtx$x.loc, line = 1, cex = cex.names) # IV 2 labels if(subplot.n.iv == 2) { mtext(iv.names[2], side = 1, line = 2.5, at = par("usr")[1], adj = 1, cex = cex.names) mtext(iv.names[1], side = 1, line = 1, at = par("usr")[1], adj = 1, cex = cex.names) text.loc <- (iv.lengths[1] + 1) / 2 * (2 *(1:iv.lengths[2]) - 1) mtext(text = unique(bean.mtx[,2]), side = 1, line = 2.5, at = text.loc, cex = cex.lab ) } } } } summary.ls[[subplot.i]] <- summary } summary.df <- do.call(summary.ls, what = "rbind") summary.df <- summary.df[,c(all.iv.names, setdiff(names(summary.df), all.iv.names))] output.ls <- list("summary" = summary.df, "avg.line.fun" = deparse(substitute(avg.line.fun)), "inf.method" = inf.method, "inf.p" = inf.p) if(plot == FALSE) { return(output.ls) } }
/scratch/gouwar.j/cran-all/cranData/yarrr/R/pirateplot_function.R
#' pirates #' #' A dataset containing the results of a survey of 1,000 pirates. #' #' #' @format A data frame containing 1,000 rows and 14 columns #' \describe{ #' \item{id}{An integer giving the pirate's id number} #' \item{sex}{A string with the pirate's self reported sex} #' \item{age}{An integer giving the age of the pirate in years} #' \item{height}{Height in cm} #' \item{weight}{Weight in kg} #' \item{headband}{A binary variable indicating whether the pirate wears a headband} #' \item{college}{A string indicating the college the pirate went to. JSSFP stands for Jack Sparro's School of Fashion and Piratery, while CCCC stands for Captain Chunk's Cannon Crew} #' \item{tattoos}{An integer indicating the number of tattoos the pirate has} #' \item{tchests}{An integer indicating the number of treasure chests found by the pirate} #' \item{parrots}{An integer indicating the number of parrots owned by the pirate in his/her lifetime} #' \item{favorite.pirate}{A string indicating The pirate's favorite pirate} #' \item{sword.type}{A string indicating the type of sword the pirate uses} #' \item{eyepatch}{An integer indicating the number of eyepatches worn by the pirate} #' \item{sword.time}{A number indicating how long it takes (in seconds) for the pirate to draw his/her sword. Smaller times are better!} #' \item{beard.length}{A number indicating length of the pirate's beard in centimeters} #' \item{fav.pixar}{A string indicating Pirate's favorite pixar movie} #' \item{grogg}{How many mugs of grogg the pirate drinks a day on average.} #' } #' @source 2015 annual international pirate meeting at the Bodensee in Konstanz, Germany #' #' #' "pirates"
/scratch/gouwar.j/cran-all/cranData/yarrr/R/pirates_doc.R
#' pirateserrors #' #' A dataset containing the results of a survey of 1,000 pirates. This dataset is identical to the pirates dataset - except that it has many errors! #' #' #' @format A data frame containing 1,000 rows and 14 columns #' \describe{ #' \item{id}{An integer giving the pirate's id number} #' \item{sex}{A string with the pirate's self reported sex} #' \item{headband}{A binary variable indicating whether the pirate wears a headband} #' \item{age}{An integer giving the age of the pirate in years} #' \item{college}{A string indicating the college the pirate went to. JSSFP stands for Jack Sparro's School of Fashion and Piratery, while CCCC stands for Captain Chunk's Cannon Crew} #' \item{tattoos}{An integer indicating the number of tattoos the pirate has} #' \item{tchests}{An integer indicating the number of treasure chests found by the pirate} #' \item{parrots}{An integer indicating the number of parrots owned by the pirate in his/her lifetime} #' \item{favorite.pirate}{A string indicating The pirate's favorite pirate} #' \item{sword.type}{A string indicating the type of sword the pirate uses} #' \item{sword.time}{A number indicating how long it takes (in seconds) for the pirate to draw his/her sword. Smaller times are better!} #' \item{eyepatch}{An integer indicating the number of eyepatches worn by the pirate} #' \item{beard.length}{A number indicating length of the pirate's beard in centimeters} #' \item{fav.pixar}{A string indicating Pirate's favorite pixar movie} #' } #' @source 2015 annual international pirate meeting at the Bodensee in Konstanz, Germany #' #' #' "pirateserrors"
/scratch/gouwar.j/cran-all/cranData/yarrr/R/pirateserrors_doc.R
#' pircharter #' #' A dataframe containing travel times of chartered ships from three pirate companies to three different destinations. #' #' #' @format A data frame containing 1000 rows and 10 columns #' \describe{ #' \item{company}{(string) - The charter company: JoRo = Jolly Roger, BmcB = Boaty McBoat, MiPa = Millenium Parrot} #' \item{destination}{(string) - The destination of the charter} #' \item{time}{(numeric) - The travel time of the ship in hours} #' } #' @source 2015 annual international pirate meeting at the Bodensee in Konstanz, Germany #' #' #' #' "pircharter"
/scratch/gouwar.j/cran-all/cranData/yarrr/R/pircharter_doc.R
#' poopdeck #' #' A dataframe containing the amount of time it took to clean both pirate and shark poop from the poop deck using one of three different cleaning solutions #' #' #' @format A data frame containing 300 rows and 4 columns #' \describe{ #' \item{day}{(factor) - The day that the poop deck was cleaned (1 through 10000)} #' \item{cleaner}{(string) - The cleaning solution used} #' \item{type}{(string) - The type of poop being cleaned} #' \item{time}{(numeric) - The amount of time (in minutes) the cleaning took.} #' } #' @source 2015 annual international pirate meeting at the Bodensee in Konstanz, Germany #' #' #' "poopdeck"
/scratch/gouwar.j/cran-all/cranData/yarrr/R/poopdeck_doc.R
#' recodev #' #' This function takes a vector original.vector, and converts all values in a vector old.values to the values in a new vector new.values. #' #' @param original.vector A vector you want to recode #' @param old.values A vector of length M. #' @param new.values A vector of length M. #' @param others An optional value indicating what to convert all values in original.vector that are not found in old.values. #' @export #' @examples #' #' x <- c("y", "y", "XSF", "y", "0", "X", "0", "0", "y", "n", "0", "1", "1") #' recodev(original.vector = x, #' old.values = c("y", "1", "n", "0"), #' new.values = c(1, 1, 0, 0) #' ) #' #' #' x <- c("y", "y", "XSF", "y", "0", "X", "0", "0", "y", "n", "0", "1", "1") #' recodev(original.vector = x, #' old.values = c("y", "1", "n", "0"), #' new.values = c(1, 1, 0, 0), #' others = NA #' ) #' #' recodev <- function(original.vector, old.values, new.values, others = NULL) { if(is.null(others)) { new.vector <- original.vector } if(is.null(others) == F) { new.vector <- rep(others, length(original.vector)) } for (i in 1:length(old.values)) { change.log <- original.vector == old.values[i] & is.na(original.vector) == F new.vector[change.log] <- new.values[i] } return(new.vector) }
/scratch/gouwar.j/cran-all/cranData/yarrr/R/recodev_function.R
#' transparent function #' #' This function takes a standard color as an argument and returns a #' transparent version of that color #' #' @param orig.col The original color to be made transparent. Can be specified as a string or a vector of rgb values #' @param trans.val A number in the interval [0, 1] indicating how transparent to make the color. #' @param maxColorValue The maximum color value (only used when orig.col is an rgb vector) #' @keywords colors #' @export #' @examples #' #' # Diagram of some examples #'plot(1, ylim = c(0, 1), xlim = c(0, 12), bty = "n", #'xaxt = "n", yaxt = "n", ylab = "", xlab = "", type = "na") #' #' #'text(6, .9, "transparent('red', trans.val = x)") #'points(x = 1:11, y = rep(.8, 11), pch = 16, #'col = transparent("red", seq(0, 1, .1)), cex = 2) #'text(x = 1:11, y = rep(.85, 11), seq(0, 1, .1)) #' #'text(6, .7, "transparent('red', trans.val = x)") #'points(x = 1:11, y = rep(.6, 11), pch = 16, #' col = transparent("blue", seq(0, 1, .1)), cex = 2) #'text(x = 1:11, y = rep(.65, 11), seq(0, 1, .1)) #' #'text(6, .5, "transparent('forestgreen', trans.val = x)") #'points(x = 1:11, y = rep(.4, 11), pch = 16, #'col = transparent("forestgreen", seq(0, 1, .1)), cex = 2) #'text(x = 1:11, y = rep(.45, 11), seq(0, 1, .1)) #' #'text(6, .3, "transparent('orchid1', trans.val = x)") #'points(x = 1:11, y = rep(.2, 11), pch = 16, #'col = transparent("orchid1", seq(0, 1, .1)), cex = 2) #'text(x = 1:11, y = rep(.25, 11), seq(0, 1, .1)) #' #' #'# Scatterplot with transparent colors #' #'a.x <- rnorm(100, mean = 0, sd = 1) #'a.y <- a.x + rnorm(100, mean = 0, sd = 1) #' #'par(mfrow = c(3, 3)) #' #'for(trans.val.i in seq(0, .1, length.out = 9)) { #' #' plot(a.x, a.y, pch = 16, col = transparent("blue", trans.val.i), cex = 1.5, #' xlim = c(-5, 5), ylim = c(-5, 5), xlab = "x", ylab = "y", #' main = paste("trans.val = ", round(trans.val.i, 2), sep = "")) #' #'} transparent <- function(orig.col = "red", trans.val = 1, maxColorValue = 255) { n.cols <- length(orig.col) orig.col <- col2rgb(orig.col) final.col <- rep(NA, n.cols) for(i in 1:n.cols) { final.col[i] <- rgb(orig.col[1, i], orig.col[2, i], orig.col[3, i], alpha = (1 - trans.val) * 255, maxColorValue = maxColorValue) } return(final.col) }
/scratch/gouwar.j/cran-all/cranData/yarrr/R/transparent_function.R
.onAttach <- function(libname, pkgname) { packageStartupMessage("yarrr v0.1.5. Citation info at citation('yarrr'). Package guide at yarrr.guide()") packageStartupMessage("Email me at [email protected]") }
/scratch/gouwar.j/cran-all/cranData/yarrr/R/zzz.R
## ---- echo = F, message = F, results = 'hide'---------------------------- library(yarrr) ## ---- fig.width = 6, fig.height = 6, fig.align='center'------------------ pirateplot(formula = weight ~ Time, data = ChickWeight, main = "Chicken weights by Time (week)") ## ---- fig.width = 6, fig.height = 6, fig.align = 'center'---------------- piratepal("all") ## ---- fig.width = 6, fig.height = 6, fig.align = 'center'---------------- my.cols <- piratepal(palette = "google", trans = .5) set.seed(100) # For reproducibility x <- rnorm(100) y <- x + rnorm(100) plot(x = x, y = y, col = my.cols, pch = 16, cex = runif(100, min = 0, max = 2), main = "piratepal('google', trans = .5)")
/scratch/gouwar.j/cran-all/cranData/yarrr/inst/doc/guide.R
--- title: "yarrr package guide" author: "Nathaniel Phillips ([email protected])" date: "`r Sys.Date()`" csl: apa.csl bibliography: yarrr.bib output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Package guide} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- # YaRrr! The pirate's guide to R This package contains data, functions, and other random files for the e-book YaRrr! The pirate's guide to R. You can access the latest version of the book at [www.thepiratesguidetor.com](www.thepiratesguidetor.com). # Guides To see guides for the main functions, click one of the following links: - [pirateplot() - A function for creating "Pirate Plots"](pirateplot.html) - [piratepal() - A function to create pirate-y color palettes](piratepal.html) # Examples ## pirateplot() The pirateplot() function creates a pirateplot ```{r, echo = F, message = F, results = 'hide'} library(yarrr) ``` ```{r, fig.width = 6, fig.height = 6, fig.align='center'} pirateplot(formula = weight ~ Time, data = ChickWeight, main = "Chicken weights by Time (week)") ``` ## piratepal() The `piratepal()` function returns different color palettes either created by graphic designers, or inspired by random things I have found around my office `piratepal("espresso")`, or horror movies `piratepal("evildead)"`. Here are all of the palettes: ```{r, fig.width = 6, fig.height = 6, fig.align = 'center'} piratepal("all") ``` You can then use specific palettes in your plots by specifying the named palette. I'll create a scatterplot using the google palette with `piratepal('google', trans = .5)`: ```{r, fig.width = 6, fig.height = 6, fig.align = 'center'} my.cols <- piratepal(palette = "google", trans = .5) set.seed(100) # For reproducibility x <- rnorm(100) y <- x + rnorm(100) plot(x = x, y = y, col = my.cols, pch = 16, cex = runif(100, min = 0, max = 2), main = "piratepal('google', trans = .5)") ``` # Questions If you have any questions, comments, or suggestions, write me at [email protected] ### References
/scratch/gouwar.j/cran-all/cranData/yarrr/inst/doc/guide.Rmd
## ---- echo = F, message = F, results = 'hide'---------------------------- library("yarrr") ## ---- fig.width = 6, fig.height = 6, fig.align='center'------------------ piratepal(palette = "all") ## ------------------------------------------------------------------------ piratepal(palette = "google") ## ---- fig.width = 6, fig.height = 6, fig.align='center'------------------ piratepal(palette = "southpark", trans = .5, plot.result = T) ## ----echo = F------------------------------------------------------------ set.seed(105) ## ---- fig.width = 6, fig.height = 6, fig.align='center'------------------ piratepal("random", plot.result = T) ## ---- fig.width = 6, fig.height = 6, fig.align='center'------------------ piratepal(palette = "evildead", trans = .5, plot.result = T) ## ---- fig.width = 6, fig.height = 6, fig.align = 'center'---------------- xmen.cols <- piratepal(palette = "xmen", trans = .5) x <- rnorm(100) y <- x + rnorm(100) plot(x = x, y = y, col = xmen.cols, pch = 16, cex = 2, main = "piratepal('xmen', trans = .5)") ## ---- fig.width = 8, fig.height = 6, fig.align='center', results='hold'---- # Set up balloons balloon.colors <- piratepal("basel", trans = .2) balloon.x <- rnorm(500, 0) balloon.y <- rnorm(500, 4, 1) par(mar = rep(.1, 4)) plot(1, xlim = c(-15, 7), ylim = c(-15, 7), xlab = "", ylab = "", type = "n", xaxt = "n", yaxt = "n", bty = "n") # skyline start.x <- runif(200, -15, 7) start.y <- sort(runif(200, -15, -12), decreasing = T) heights <- runif(200, 2, 4) widths <- runif(200, .25, 1.5) rect(start.x, start.y, start.x + widths, start.y + heights, col = "white", border = gray(.4)) # house rect(-2, -6, 2, -2) polygon(c(-2, 0, 2), c(-2, 0, -2)) rect(-.5, -6, .5, -4) points(.3, -5) # strings line.start.x <- rnorm(500, 0, .2) line.start.y <- -1 + rnorm(500, 0, .1) segments(line.start.x, line.start.y, balloon.x, balloon.y, lty = 1, col = gray(.5, .1), lwd = .2) # balloons points(balloon.x, balloon.y, pch = 21, bg = balloon.colors, col = gray(.9), cex = rnorm(100, 2, .3)) ## ---- echo = F----------------------------------------------------------- par(mar = c(5, 4, 4, 1) + .1)
/scratch/gouwar.j/cran-all/cranData/yarrr/inst/doc/piratepal.R
--- title: "piratepal(): Color palettes for R Pirates" output: rmarkdown::html_vignette bibliography: yarrr.bib vignette: > %\VignetteIndexEntry{Pirate-y color palettes} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, echo = F, message = F, results = 'hide'} library("yarrr") ``` ## piratepal() `piratepal()` is a function that returns color palettes. The function has four main arguments: - `palette`: The specific palette you want to use. For example, `palette = "appletv"` will give you the appletv palette. You can also specify `palette = "all"` to see all the palettes, or `palette = "random"` to get a random palette. - `trans`: A number between 0 and 1 indicating how transparent to make the colors. 1 is fully transparent (e.g. invisible), and 0 is not transparent at all. Personally, I like at least slightly transparent colors (e.g.; `trans = .3`) - `length.out`: An optional number indicating how many colors to return (if `length.out` is larger than the number of colors in the palette, they will just be repeated). - `plot.result`: A logical value indicating whether or not to display the palette. ## Examples To see all of the palettes, run `piratepal("all")` ```{r, fig.width = 6, fig.height = 6, fig.align='center'} piratepal(palette = "all") ``` Once you find a palette you'd like to use, you can return the colors as a vector by specifying the name of the palette in the `palette` argument. Here is the Google palette ```{r} piratepal(palette = "google") ``` If you want to see a palette in detail (and possibly some images that inspired the palette), include the argument `plot.result = T`. Here's where the `southpark` palette comes from. ```{r, fig.width = 6, fig.height = 6, fig.align='center'} piratepal(palette = "southpark", trans = .5, plot.result = T) ``` You can look at a random palette by setting `palette = "random"`: ```{r echo = F} set.seed(105) ``` ```{r, fig.width = 6, fig.height = 6, fig.align='center'} piratepal("random", plot.result = T) ``` Some of the palettes are darker than others. Here's the nightmare-inducing evildead palette (I'll up the transparency to .5 to make it a bit less scarring...) ```{r, fig.width = 6, fig.height = 6, fig.align='center'} piratepal(palette = "evildead", trans = .5, plot.result = T) ``` Here's a scatterplot using the xmen palette with `piratepal('xmen', trans = .5)`: ```{r, fig.width = 6, fig.height = 6, fig.align = 'center'} xmen.cols <- piratepal(palette = "xmen", trans = .5) x <- rnorm(100) y <- x + rnorm(100) plot(x = x, y = y, col = xmen.cols, pch = 16, cex = 2, main = "piratepal('xmen', trans = .5)") ``` ### Plotting the Up house Let's use the basel palette to draw the house from Up [Up Trailer](https://www.youtube.com/watch?v=pkqzFUhGPJg) ```{r, fig.width = 8, fig.height = 6, fig.align='center', results='hold'} # Set up balloons balloon.colors <- piratepal("basel", trans = .2) balloon.x <- rnorm(500, 0) balloon.y <- rnorm(500, 4, 1) par(mar = rep(.1, 4)) plot(1, xlim = c(-15, 7), ylim = c(-15, 7), xlab = "", ylab = "", type = "n", xaxt = "n", yaxt = "n", bty = "n") # skyline start.x <- runif(200, -15, 7) start.y <- sort(runif(200, -15, -12), decreasing = T) heights <- runif(200, 2, 4) widths <- runif(200, .25, 1.5) rect(start.x, start.y, start.x + widths, start.y + heights, col = "white", border = gray(.4)) # house rect(-2, -6, 2, -2) polygon(c(-2, 0, 2), c(-2, 0, -2)) rect(-.5, -6, .5, -4) points(.3, -5) # strings line.start.x <- rnorm(500, 0, .2) line.start.y <- -1 + rnorm(500, 0, .1) segments(line.start.x, line.start.y, balloon.x, balloon.y, lty = 1, col = gray(.5, .1), lwd = .2) # balloons points(balloon.x, balloon.y, pch = 21, bg = balloon.colors, col = gray(.9), cex = rnorm(100, 2, .3)) ``` ```{r, echo = F} par(mar = c(5, 4, 4, 1) + .1) ``` ### Have a favorite palette? If you have a favorite palette that you'd like me to add, just contact me at [email protected] and I'll see what I can do :)
/scratch/gouwar.j/cran-all/cranData/yarrr/inst/doc/piratepal.Rmd
## ---- echo = F, message = F, result = 'hide'----------------------------- library(yarrr) ## ---- fig.width = 6, fig.height = 5, echo = F, fig.align='center'-------- pirateplot(formula = weight ~ Diet, data = ChickWeight, theme = 1, back.col = "white", gl.col = "white", bean.f.o = c(0, .1, .7, .1), # bean.b.o = c(0, .1, 1, .1), point.o = c(.4, .1, .1, .1), avg.line.o = c(.3, 1, .3, .3), inf.f.o = c(.1, .1, .1, .9), bar.f.o = c(.1, .8, .1, .1), inf.f.col = c("white", "white", "white", piratepal("xmen")[4]), main = "4 Elements of a pirateplot", pal = "xmen") text(.7, 350, labels = "Points") text(.7, 345, labels = "Raw Data", pos = 1, cex = .8) arrows(.7, 310, .97, 270, length = .1) text(1.4, 200, labels = "Bar/Line") text(1.4, 200, labels = "Center", pos = 1, cex = .8) arrows(1.4, 170, 1.54, 125, length = .1) text(2.4, 250, labels = "Bean") text(2.4, 250, labels = "Density", pos = 1, cex = .8) arrows(2.4, 220, 2.85, 200, length = .1) text(3.55, 300, labels = "Band") text(3.55, 290, labels = "Inference\n95% HDI or CI", pos = 1, cex = .8) arrows(3.55, 240, 3.8, 150, length = .1) ## ---- echo = FALSE------------------------------------------------------- pp.elements <- data.frame('Argument' = c("formula", "data", "main", "pal", "theme", "inf"), 'Description' = c("A formula", "A dataframe", "Plot title", "A color palette", "A plotting theme", "Type of inference"), 'Examples' = c("height ~ sex + eyepatch, weight ~ Time", "pirates, ChickWeight", "'Pirate heights', 'Chicken Weights", "'xmen', 'black'", "0, 1, 2", "'ci', 'hdi', 'iqr'" ) ) knitr::kable(pp.elements, caption = "Main Pirateplot Arguments") ## ----fig.align='center', fig.width = 6, fig.height = 4------------------- # Theme 1 (the default) pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 1, main = "theme = 1") ## ----fig.align='center', fig.width = 6, fig.height = 4------------------- # Theme 2 pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 2, main = "theme = 2") ## ----fig.align='center', fig.width = 6, fig.height = 4------------------- # Theme 3 pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 3, main = "theme = 3") ## ----fig.align='center', fig.width = 6, fig.height = 4------------------- # Theme 4 pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 4, main = "theme = 4") ## ----fig.align='center', fig.width = 6, fig.height = 4------------------- # Default theme pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 0, main = "theme = 0\nStart from scratch") ## ----fig.width = 6, fig.height = 4, fig.align = "center"----------------- piratepal("all") ## ----fig.width = 6, fig.height = 4, fig.align = "center"----------------- pirateplot(formula = weight ~ Time, data = ChickWeight, pal = "pony", theme = 1, main = "pony color palette") ## ----fig.width = 6, fig.height = 4, fig.align = "center"----------------- pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 2, pal = "black", main = "pal = 'black") ## ---- echo = FALSE------------------------------------------------------- pp.elements <- data.frame('element' = c("points", "beans", "bar", "inf", "avg.line"), 'color' = c("point.col, point.bg", "bean.f.col, bean.b.col", "bar.f.col, bar.b.col", "inf.f.col, inf.b.col", "avg.line.col" ), "opacity" = c("point.o", "bean.f.o, bean.b.o", "bar.f.o, bar.b.o", "inf.f.o, inf.b.o", "avg.line.o") ) knitr::kable(pp.elements, caption = "Customising plotting elements") ## ----fig.width = 6, fig.height = 4, fig.align = "center"----------------- pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 0, main = "Fully customized pirateplot", pal = "southpark", # southpark color palette bean.f.o = .6, # Bean fill point.o = .3, # Points inf.f.o = .7, # Inference fill inf.b.o = .8, # Inference border avg.line.o = 1, # Average line bar.f.o = .5, # Bar inf.f.col = "white", # Inf fill col inf.b.col = "black", # Inf border col avg.line.col = "black", # avg line col bar.f.col = gray(.8), # bar filling color point.pch = 21, point.bg = "white", point.col = "black", point.cex = .7) ## ----fig.width = 6, fig.height = 4, fig.align = "center"----------------- pirateplot(formula = weight ~ Time, data = ChickWeight, main = "Adjusting an existing theme", theme = 2, # Start with theme 2 inf.f.o = 0, # Turn off inf fill inf.b.o = 0, # Turn off inf border point.o = .2, # Turn up points bar.f.o = .5, # Turn up bars bean.f.o = .4, # Light bean filling bean.b.o = .2, # Light bean border avg.line.o = 0, # Turn off average line point.col = "black" # Black points ) ## ----fig.width = 6, fig.height = 4, fig.align = "center"----------------- pirateplot(formula = weight ~ Time, data = ChickWeight, main = "Reducing a pirateplot to a barplot", theme = 0, # Start from scratch bar.f.o = .7) # Just turn on the bars ## ---- echo = FALSE------------------------------------------------------- pp.elements <- data.frame('element' = c("Background color", "Gridlines", "Quantiles", "Average line", "Inference Calculation", "Inference Display"), 'arguments' = c("back.col", "gl.col, gl.lwd, gl.lty", "quant, quant.lwd, quant.col", "avg.line.fun", "inf.method", "inf.disp" ), "examples" = c("back.col = 'gray(.9, .9)'", "gl.col = 'gray', gl.lwd = c(.75, 0), gl.lty = 1", "quant = c(.1, .9), quant.lwd = 1, quant.col = 'black'", "avg.line.fun = median", "inf.method = 'hdi', inf.method = 'ci'", "inf.disp = 'line', inf.disp = 'bean', inf.disp = 'rect'") ) knitr::kable(pp.elements, caption = "Additonal pirateplot elements") ## ----fig.width = 6, fig.height = 4, fig.align = "center"----------------- pirateplot(formula = weight ~ Time, data = ChickWeight, main = "Adding quantile lines and background colors", theme = 2, back.col = gray(.98), # Add light gray background gl.col = "gray", # Gray gridlines gl.lwd = c(.75, 0), inf.f.o = .6, # Turn up inf filling inf.disp = "bean", # Wrap inference around bean bean.b.o = .4, # Turn down bean borders quant = c(.1, .9), # 10th and 90th quantiles quant.col = "black" # Black quantile lines ) ## ----fig.width = 10, fig.height = 5, fig.align = "center"---------------- pirateplot(formula = height ~ sex + eyepatch + headband, data = pirates, theme = 2, inf.disp = "bean") ## ----fig.width = 8, fig.height = 8, fig.align = "center"----------------- pirateplot(formula = time ~ sequel + genre + rating, data = subset(movies, genre %in% c("Action", "Adventure", "Comedy", "Horror") & rating %in% c("G", "PG", "PG-13", "R") & time > 0), theme = 3, cex.lab = .8, inf.disp = "rect", pal = "up") ## ----fig.width = 10, fig.height = 7, fig.align = "center"---------------- times.pp <- pirateplot(formula = time ~ sequel + genre, data = subset(movies, genre %in% c("Action", "Adventure", "Comedy", "Horror") & rating %in% c("G", "PG", "PG-13", "R") & time > 0), plot = FALSE) ## ------------------------------------------------------------------------ times.pp
/scratch/gouwar.j/cran-all/cranData/yarrr/inst/doc/pirateplot.R
--- title: "pirateplot" author: "Nathaniel Phillips" date: "`r Sys.Date()`" output: rmarkdown::html_vignette bibliography: yarrr.bib vignette: > %\VignetteIndexEntry{pirateplot: The plotting choice of R pirates} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- # What is a pirateplot()? A pirateplot, is the RDI (**Raw** data, **Descriptive** statistics, and **Inferential** statistics) plotting choice of R pirates who are displaying the relationship between 1 to 3 categorical independent variables, and one continuous dependent variable. ```{r, echo = F, message = F, result = 'hide'} library(yarrr) ``` A pirateplot has 4 main elements 1. points, symbols representing the raw data (jittered horizontally) 2. bar, a vertical bar showing central tendencies 3. bean, a smoothed density (inspired by @kampstra2008beanplot) representing a smoothed density 4. inf, a rectangle representing an inference interval (e.g.; Bayesian Highest Density Interval or frequentist confidence interval) ```{r, fig.width = 6, fig.height = 5, echo = F, fig.align='center'} pirateplot(formula = weight ~ Diet, data = ChickWeight, theme = 1, back.col = "white", gl.col = "white", bean.f.o = c(0, .1, .7, .1), # bean.b.o = c(0, .1, 1, .1), point.o = c(.4, .1, .1, .1), avg.line.o = c(.3, 1, .3, .3), inf.f.o = c(.1, .1, .1, .9), bar.f.o = c(.1, .8, .1, .1), inf.f.col = c("white", "white", "white", piratepal("xmen")[4]), main = "4 Elements of a pirateplot", pal = "xmen") text(.7, 350, labels = "Points") text(.7, 345, labels = "Raw Data", pos = 1, cex = .8) arrows(.7, 310, .97, 270, length = .1) text(1.4, 200, labels = "Bar/Line") text(1.4, 200, labels = "Center", pos = 1, cex = .8) arrows(1.4, 170, 1.54, 125, length = .1) text(2.4, 250, labels = "Bean") text(2.4, 250, labels = "Density", pos = 1, cex = .8) arrows(2.4, 220, 2.85, 200, length = .1) text(3.55, 300, labels = "Band") text(3.55, 290, labels = "Inference\n95% HDI or CI", pos = 1, cex = .8) arrows(3.55, 240, 3.8, 150, length = .1) ``` # Main arguments Here are the main arguments to `pirateplot()` ```{r, echo = FALSE} pp.elements <- data.frame('Argument' = c("formula", "data", "main", "pal", "theme", "inf"), 'Description' = c("A formula", "A dataframe", "Plot title", "A color palette", "A plotting theme", "Type of inference"), 'Examples' = c("height ~ sex + eyepatch, weight ~ Time", "pirates, ChickWeight", "'Pirate heights', 'Chicken Weights", "'xmen', 'black'", "0, 1, 2", "'ci', 'hdi', 'iqr'" ) ) knitr::kable(pp.elements, caption = "Main Pirateplot Arguments") ``` # Themes `pirateplot()` currently supports three themes which change the default look of the plot. To specify a theme, use the `theme` argument: ## Theme 1 `theme = 1` is the default ```{r fig.align='center', fig.width = 6, fig.height = 4} # Theme 1 (the default) pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 1, main = "theme = 1") ``` ## Theme 2 Here is `theme = 2` ```{r fig.align='center', fig.width = 6, fig.height = 4} # Theme 2 pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 2, main = "theme = 2") ``` ## Theme 3 And now...`theme = 3`! ```{r fig.align='center', fig.width = 6, fig.height = 4} # Theme 3 pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 3, main = "theme = 3") ``` ## Theme 4 `theme = 4` tries to maintain a classic barplot look (but with added raw data). ```{r fig.align='center', fig.width = 6, fig.height = 4} # Theme 4 pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 4, main = "theme = 4") ``` ## Theme 0 `theme = 0` allows you to start a pirateplot from scratch -- that is, it turns of *all* elements. You can then selectively turn elements on with individual arguments (e.g.; `bean.f.o`, `point.o`) ```{r fig.align='center', fig.width = 6, fig.height = 4} # Default theme pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 0, main = "theme = 0\nStart from scratch") ``` # Color palettes You can specify a general color palette using the `pal` argument. You can do this in two ways. The first way is to specify the name of a color palette in the `piratepal()` function. Here they are: ```{r fig.width = 6, fig.height = 4, fig.align = "center"} piratepal("all") ``` For example, here is a pirateplot using the `"pony"` palette ```{r fig.width = 6, fig.height = 4, fig.align = "center"} pirateplot(formula = weight ~ Time, data = ChickWeight, pal = "pony", theme = 1, main = "pony color palette") ``` The second method is to simply enter a vector of one or more colors. Here, I'll create a black and white pirateplot from theme 2 by specifying `pal = 'black'` ```{r fig.width = 6, fig.height = 4, fig.align = "center"} pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 2, pal = "black", main = "pal = 'black") ``` # Customising elements Regardless of the theme you use, you can always customize the color and opacity of graphical elements. To do this, specify one of the following arguments. Note: Arguments with `.f.` correspond to the *filling* of an element, while `.b.` correspond to the *border* of an element: ```{r, echo = FALSE} pp.elements <- data.frame('element' = c("points", "beans", "bar", "inf", "avg.line"), 'color' = c("point.col, point.bg", "bean.f.col, bean.b.col", "bar.f.col, bar.b.col", "inf.f.col, inf.b.col", "avg.line.col" ), "opacity" = c("point.o", "bean.f.o, bean.b.o", "bar.f.o, bar.b.o", "inf.f.o, inf.b.o", "avg.line.o") ) knitr::kable(pp.elements, caption = "Customising plotting elements") ``` For example, I could create the following pirateplots using `theme = 0` and specifying elements explicitly: ```{r fig.width = 6, fig.height = 4, fig.align = "center"} pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 0, main = "Fully customized pirateplot", pal = "southpark", # southpark color palette bean.f.o = .6, # Bean fill point.o = .3, # Points inf.f.o = .7, # Inference fill inf.b.o = .8, # Inference border avg.line.o = 1, # Average line bar.f.o = .5, # Bar inf.f.col = "white", # Inf fill col inf.b.col = "black", # Inf border col avg.line.col = "black", # avg line col bar.f.col = gray(.8), # bar filling color point.pch = 21, point.bg = "white", point.col = "black", point.cex = .7) ``` If you don't want to start from scratch, you can also start with a theme, and then make selective adjustments: ```{r fig.width = 6, fig.height = 4, fig.align = "center"} pirateplot(formula = weight ~ Time, data = ChickWeight, main = "Adjusting an existing theme", theme = 2, # Start with theme 2 inf.f.o = 0, # Turn off inf fill inf.b.o = 0, # Turn off inf border point.o = .2, # Turn up points bar.f.o = .5, # Turn up bars bean.f.o = .4, # Light bean filling bean.b.o = .2, # Light bean border avg.line.o = 0, # Turn off average line point.col = "black" # Black points ) ``` Just to drive the point home, as a barplot is a special case of a pirateplot, you can even reduce a pirateplot into a horrible barplot: ```{r fig.width = 6, fig.height = 4, fig.align = "center"} pirateplot(formula = weight ~ Time, data = ChickWeight, main = "Reducing a pirateplot to a barplot", theme = 0, # Start from scratch bar.f.o = .7) # Just turn on the bars ``` # Additional arguments There are several more arguments that you can use to customize your plot: ```{r, echo = FALSE} pp.elements <- data.frame('element' = c("Background color", "Gridlines", "Quantiles", "Average line", "Inference Calculation", "Inference Display"), 'arguments' = c("back.col", "gl.col, gl.lwd, gl.lty", "quant, quant.lwd, quant.col", "avg.line.fun", "inf.method", "inf.disp" ), "examples" = c("back.col = 'gray(.9, .9)'", "gl.col = 'gray', gl.lwd = c(.75, 0), gl.lty = 1", "quant = c(.1, .9), quant.lwd = 1, quant.col = 'black'", "avg.line.fun = median", "inf.method = 'hdi', inf.method = 'ci'", "inf.disp = 'line', inf.disp = 'bean', inf.disp = 'rect'") ) knitr::kable(pp.elements, caption = "Additonal pirateplot elements") ``` Here's an example using a background color, and quantile lines. ```{r fig.width = 6, fig.height = 4, fig.align = "center"} pirateplot(formula = weight ~ Time, data = ChickWeight, main = "Adding quantile lines and background colors", theme = 2, back.col = gray(.98), # Add light gray background gl.col = "gray", # Gray gridlines gl.lwd = c(.75, 0), inf.f.o = .6, # Turn up inf filling inf.disp = "bean", # Wrap inference around bean bean.b.o = .4, # Turn down bean borders quant = c(.1, .9), # 10th and 90th quantiles quant.col = "black" # Black quantile lines ) ``` # Multiple IVs You can use up to 3 categorical IVs in your plot. Here are some examples: ```{r fig.width = 10, fig.height = 5, fig.align = "center"} pirateplot(formula = height ~ sex + eyepatch + headband, data = pirates, theme = 2, inf.disp = "bean") ``` Here's a pirateplot with showing the relationship between movie running times based on movie genre and whether the movie is a sequel or not. ```{r fig.width = 8, fig.height = 8, fig.align = "center"} pirateplot(formula = time ~ sequel + genre + rating, data = subset(movies, genre %in% c("Action", "Adventure", "Comedy", "Horror") & rating %in% c("G", "PG", "PG-13", "R") & time > 0), theme = 3, cex.lab = .8, inf.disp = "rect", pal = "up") ``` # Output If you include the `plot = FALSE` argument to a pirateplot, the function will return some values associated with the plot. ```{r fig.width = 10, fig.height = 7, fig.align = "center"} times.pp <- pirateplot(formula = time ~ sequel + genre, data = subset(movies, genre %in% c("Action", "Adventure", "Comedy", "Horror") & rating %in% c("G", "PG", "PG-13", "R") & time > 0), plot = FALSE) ``` Here's the result. The most interesting element is `$summary` which shows summary statistics for each bean: ```{r} times.pp ``` # Contribute! I am very happy to receive new contributions and suggestions to improve the pirateplot. If you come up a new theme (i.e.; customization) that you like, or have a favorite color palette that you'd like to have implemented, please contact me ([email protected]) or post an issue at [www.github.com/ndphillips/yarrr/issues](www.github.com/ndphillips/yarrr/issues) and I might include it in a future update. # References The pirateplot is really a knock-off of the great beanplot package and visualization from @kampstra2008beanplot.
/scratch/gouwar.j/cran-all/cranData/yarrr/inst/doc/pirateplot.Rmd
--- title: "yarrr package guide" author: "Nathaniel Phillips ([email protected])" date: "`r Sys.Date()`" csl: apa.csl bibliography: yarrr.bib output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Package guide} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- # YaRrr! The pirate's guide to R This package contains data, functions, and other random files for the e-book YaRrr! The pirate's guide to R. You can access the latest version of the book at [www.thepiratesguidetor.com](www.thepiratesguidetor.com). # Guides To see guides for the main functions, click one of the following links: - [pirateplot() - A function for creating "Pirate Plots"](pirateplot.html) - [piratepal() - A function to create pirate-y color palettes](piratepal.html) # Examples ## pirateplot() The pirateplot() function creates a pirateplot ```{r, echo = F, message = F, results = 'hide'} library(yarrr) ``` ```{r, fig.width = 6, fig.height = 6, fig.align='center'} pirateplot(formula = weight ~ Time, data = ChickWeight, main = "Chicken weights by Time (week)") ``` ## piratepal() The `piratepal()` function returns different color palettes either created by graphic designers, or inspired by random things I have found around my office `piratepal("espresso")`, or horror movies `piratepal("evildead)"`. Here are all of the palettes: ```{r, fig.width = 6, fig.height = 6, fig.align = 'center'} piratepal("all") ``` You can then use specific palettes in your plots by specifying the named palette. I'll create a scatterplot using the google palette with `piratepal('google', trans = .5)`: ```{r, fig.width = 6, fig.height = 6, fig.align = 'center'} my.cols <- piratepal(palette = "google", trans = .5) set.seed(100) # For reproducibility x <- rnorm(100) y <- x + rnorm(100) plot(x = x, y = y, col = my.cols, pch = 16, cex = runif(100, min = 0, max = 2), main = "piratepal('google', trans = .5)") ``` # Questions If you have any questions, comments, or suggestions, write me at [email protected] ### References
/scratch/gouwar.j/cran-all/cranData/yarrr/vignettes/guide.Rmd
--- title: "piratepal(): Color palettes for R Pirates" output: rmarkdown::html_vignette bibliography: yarrr.bib vignette: > %\VignetteIndexEntry{Pirate-y color palettes} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, echo = F, message = F, results = 'hide'} library("yarrr") ``` ## piratepal() `piratepal()` is a function that returns color palettes. The function has four main arguments: - `palette`: The specific palette you want to use. For example, `palette = "appletv"` will give you the appletv palette. You can also specify `palette = "all"` to see all the palettes, or `palette = "random"` to get a random palette. - `trans`: A number between 0 and 1 indicating how transparent to make the colors. 1 is fully transparent (e.g. invisible), and 0 is not transparent at all. Personally, I like at least slightly transparent colors (e.g.; `trans = .3`) - `length.out`: An optional number indicating how many colors to return (if `length.out` is larger than the number of colors in the palette, they will just be repeated). - `plot.result`: A logical value indicating whether or not to display the palette. ## Examples To see all of the palettes, run `piratepal("all")` ```{r, fig.width = 6, fig.height = 6, fig.align='center'} piratepal(palette = "all") ``` Once you find a palette you'd like to use, you can return the colors as a vector by specifying the name of the palette in the `palette` argument. Here is the Google palette ```{r} piratepal(palette = "google") ``` If you want to see a palette in detail (and possibly some images that inspired the palette), include the argument `plot.result = T`. Here's where the `southpark` palette comes from. ```{r, fig.width = 6, fig.height = 6, fig.align='center'} piratepal(palette = "southpark", trans = .5, plot.result = T) ``` You can look at a random palette by setting `palette = "random"`: ```{r echo = F} set.seed(105) ``` ```{r, fig.width = 6, fig.height = 6, fig.align='center'} piratepal("random", plot.result = T) ``` Some of the palettes are darker than others. Here's the nightmare-inducing evildead palette (I'll up the transparency to .5 to make it a bit less scarring...) ```{r, fig.width = 6, fig.height = 6, fig.align='center'} piratepal(palette = "evildead", trans = .5, plot.result = T) ``` Here's a scatterplot using the xmen palette with `piratepal('xmen', trans = .5)`: ```{r, fig.width = 6, fig.height = 6, fig.align = 'center'} xmen.cols <- piratepal(palette = "xmen", trans = .5) x <- rnorm(100) y <- x + rnorm(100) plot(x = x, y = y, col = xmen.cols, pch = 16, cex = 2, main = "piratepal('xmen', trans = .5)") ``` ### Plotting the Up house Let's use the basel palette to draw the house from Up [Up Trailer](https://www.youtube.com/watch?v=pkqzFUhGPJg) ```{r, fig.width = 8, fig.height = 6, fig.align='center', results='hold'} # Set up balloons balloon.colors <- piratepal("basel", trans = .2) balloon.x <- rnorm(500, 0) balloon.y <- rnorm(500, 4, 1) par(mar = rep(.1, 4)) plot(1, xlim = c(-15, 7), ylim = c(-15, 7), xlab = "", ylab = "", type = "n", xaxt = "n", yaxt = "n", bty = "n") # skyline start.x <- runif(200, -15, 7) start.y <- sort(runif(200, -15, -12), decreasing = T) heights <- runif(200, 2, 4) widths <- runif(200, .25, 1.5) rect(start.x, start.y, start.x + widths, start.y + heights, col = "white", border = gray(.4)) # house rect(-2, -6, 2, -2) polygon(c(-2, 0, 2), c(-2, 0, -2)) rect(-.5, -6, .5, -4) points(.3, -5) # strings line.start.x <- rnorm(500, 0, .2) line.start.y <- -1 + rnorm(500, 0, .1) segments(line.start.x, line.start.y, balloon.x, balloon.y, lty = 1, col = gray(.5, .1), lwd = .2) # balloons points(balloon.x, balloon.y, pch = 21, bg = balloon.colors, col = gray(.9), cex = rnorm(100, 2, .3)) ``` ```{r, echo = F} par(mar = c(5, 4, 4, 1) + .1) ``` ### Have a favorite palette? If you have a favorite palette that you'd like me to add, just contact me at [email protected] and I'll see what I can do :)
/scratch/gouwar.j/cran-all/cranData/yarrr/vignettes/piratepal.Rmd
--- title: "pirateplot" author: "Nathaniel Phillips" date: "`r Sys.Date()`" output: rmarkdown::html_vignette bibliography: yarrr.bib vignette: > %\VignetteIndexEntry{pirateplot: The plotting choice of R pirates} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- # What is a pirateplot()? A pirateplot, is the RDI (**Raw** data, **Descriptive** statistics, and **Inferential** statistics) plotting choice of R pirates who are displaying the relationship between 1 to 3 categorical independent variables, and one continuous dependent variable. ```{r, echo = F, message = F, result = 'hide'} library(yarrr) ``` A pirateplot has 4 main elements 1. points, symbols representing the raw data (jittered horizontally) 2. bar, a vertical bar showing central tendencies 3. bean, a smoothed density (inspired by @kampstra2008beanplot) representing a smoothed density 4. inf, a rectangle representing an inference interval (e.g.; Bayesian Highest Density Interval or frequentist confidence interval) ```{r, fig.width = 6, fig.height = 5, echo = F, fig.align='center'} pirateplot(formula = weight ~ Diet, data = ChickWeight, theme = 1, back.col = "white", gl.col = "white", bean.f.o = c(0, .1, .7, .1), # bean.b.o = c(0, .1, 1, .1), point.o = c(.4, .1, .1, .1), avg.line.o = c(.3, 1, .3, .3), inf.f.o = c(.1, .1, .1, .9), bar.f.o = c(.1, .8, .1, .1), inf.f.col = c("white", "white", "white", piratepal("xmen")[4]), main = "4 Elements of a pirateplot", pal = "xmen") text(.7, 350, labels = "Points") text(.7, 345, labels = "Raw Data", pos = 1, cex = .8) arrows(.7, 310, .97, 270, length = .1) text(1.4, 200, labels = "Bar/Line") text(1.4, 200, labels = "Center", pos = 1, cex = .8) arrows(1.4, 170, 1.54, 125, length = .1) text(2.4, 250, labels = "Bean") text(2.4, 250, labels = "Density", pos = 1, cex = .8) arrows(2.4, 220, 2.85, 200, length = .1) text(3.55, 300, labels = "Band") text(3.55, 290, labels = "Inference\n95% HDI or CI", pos = 1, cex = .8) arrows(3.55, 240, 3.8, 150, length = .1) ``` # Main arguments Here are the main arguments to `pirateplot()` ```{r, echo = FALSE} pp.elements <- data.frame('Argument' = c("formula", "data", "main", "pal", "theme", "inf"), 'Description' = c("A formula", "A dataframe", "Plot title", "A color palette", "A plotting theme", "Type of inference"), 'Examples' = c("height ~ sex + eyepatch, weight ~ Time", "pirates, ChickWeight", "'Pirate heights', 'Chicken Weights", "'xmen', 'black'", "0, 1, 2", "'ci', 'hdi', 'iqr'" ) ) knitr::kable(pp.elements, caption = "Main Pirateplot Arguments") ``` # Themes `pirateplot()` currently supports three themes which change the default look of the plot. To specify a theme, use the `theme` argument: ## Theme 1 `theme = 1` is the default ```{r fig.align='center', fig.width = 6, fig.height = 4} # Theme 1 (the default) pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 1, main = "theme = 1") ``` ## Theme 2 Here is `theme = 2` ```{r fig.align='center', fig.width = 6, fig.height = 4} # Theme 2 pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 2, main = "theme = 2") ``` ## Theme 3 And now...`theme = 3`! ```{r fig.align='center', fig.width = 6, fig.height = 4} # Theme 3 pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 3, main = "theme = 3") ``` ## Theme 4 `theme = 4` tries to maintain a classic barplot look (but with added raw data). ```{r fig.align='center', fig.width = 6, fig.height = 4} # Theme 4 pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 4, main = "theme = 4") ``` ## Theme 0 `theme = 0` allows you to start a pirateplot from scratch -- that is, it turns of *all* elements. You can then selectively turn elements on with individual arguments (e.g.; `bean.f.o`, `point.o`) ```{r fig.align='center', fig.width = 6, fig.height = 4} # Default theme pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 0, main = "theme = 0\nStart from scratch") ``` # Color palettes You can specify a general color palette using the `pal` argument. You can do this in two ways. The first way is to specify the name of a color palette in the `piratepal()` function. Here they are: ```{r fig.width = 6, fig.height = 4, fig.align = "center"} piratepal("all") ``` For example, here is a pirateplot using the `"pony"` palette ```{r fig.width = 6, fig.height = 4, fig.align = "center"} pirateplot(formula = weight ~ Time, data = ChickWeight, pal = "pony", theme = 1, main = "pony color palette") ``` The second method is to simply enter a vector of one or more colors. Here, I'll create a black and white pirateplot from theme 2 by specifying `pal = 'black'` ```{r fig.width = 6, fig.height = 4, fig.align = "center"} pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 2, pal = "black", main = "pal = 'black") ``` # Customising elements Regardless of the theme you use, you can always customize the color and opacity of graphical elements. To do this, specify one of the following arguments. Note: Arguments with `.f.` correspond to the *filling* of an element, while `.b.` correspond to the *border* of an element: ```{r, echo = FALSE} pp.elements <- data.frame('element' = c("points", "beans", "bar", "inf", "avg.line"), 'color' = c("point.col, point.bg", "bean.f.col, bean.b.col", "bar.f.col, bar.b.col", "inf.f.col, inf.b.col", "avg.line.col" ), "opacity" = c("point.o", "bean.f.o, bean.b.o", "bar.f.o, bar.b.o", "inf.f.o, inf.b.o", "avg.line.o") ) knitr::kable(pp.elements, caption = "Customising plotting elements") ``` For example, I could create the following pirateplots using `theme = 0` and specifying elements explicitly: ```{r fig.width = 6, fig.height = 4, fig.align = "center"} pirateplot(formula = weight ~ Time, data = ChickWeight, theme = 0, main = "Fully customized pirateplot", pal = "southpark", # southpark color palette bean.f.o = .6, # Bean fill point.o = .3, # Points inf.f.o = .7, # Inference fill inf.b.o = .8, # Inference border avg.line.o = 1, # Average line bar.f.o = .5, # Bar inf.f.col = "white", # Inf fill col inf.b.col = "black", # Inf border col avg.line.col = "black", # avg line col bar.f.col = gray(.8), # bar filling color point.pch = 21, point.bg = "white", point.col = "black", point.cex = .7) ``` If you don't want to start from scratch, you can also start with a theme, and then make selective adjustments: ```{r fig.width = 6, fig.height = 4, fig.align = "center"} pirateplot(formula = weight ~ Time, data = ChickWeight, main = "Adjusting an existing theme", theme = 2, # Start with theme 2 inf.f.o = 0, # Turn off inf fill inf.b.o = 0, # Turn off inf border point.o = .2, # Turn up points bar.f.o = .5, # Turn up bars bean.f.o = .4, # Light bean filling bean.b.o = .2, # Light bean border avg.line.o = 0, # Turn off average line point.col = "black" # Black points ) ``` Just to drive the point home, as a barplot is a special case of a pirateplot, you can even reduce a pirateplot into a horrible barplot: ```{r fig.width = 6, fig.height = 4, fig.align = "center"} pirateplot(formula = weight ~ Time, data = ChickWeight, main = "Reducing a pirateplot to a barplot", theme = 0, # Start from scratch bar.f.o = .7) # Just turn on the bars ``` # Additional arguments There are several more arguments that you can use to customize your plot: ```{r, echo = FALSE} pp.elements <- data.frame('element' = c("Background color", "Gridlines", "Quantiles", "Average line", "Inference Calculation", "Inference Display"), 'arguments' = c("back.col", "gl.col, gl.lwd, gl.lty", "quant, quant.lwd, quant.col", "avg.line.fun", "inf.method", "inf.disp" ), "examples" = c("back.col = 'gray(.9, .9)'", "gl.col = 'gray', gl.lwd = c(.75, 0), gl.lty = 1", "quant = c(.1, .9), quant.lwd = 1, quant.col = 'black'", "avg.line.fun = median", "inf.method = 'hdi', inf.method = 'ci'", "inf.disp = 'line', inf.disp = 'bean', inf.disp = 'rect'") ) knitr::kable(pp.elements, caption = "Additonal pirateplot elements") ``` Here's an example using a background color, and quantile lines. ```{r fig.width = 6, fig.height = 4, fig.align = "center"} pirateplot(formula = weight ~ Time, data = ChickWeight, main = "Adding quantile lines and background colors", theme = 2, back.col = gray(.98), # Add light gray background gl.col = "gray", # Gray gridlines gl.lwd = c(.75, 0), inf.f.o = .6, # Turn up inf filling inf.disp = "bean", # Wrap inference around bean bean.b.o = .4, # Turn down bean borders quant = c(.1, .9), # 10th and 90th quantiles quant.col = "black" # Black quantile lines ) ``` # Multiple IVs You can use up to 3 categorical IVs in your plot. Here are some examples: ```{r fig.width = 10, fig.height = 5, fig.align = "center"} pirateplot(formula = height ~ sex + eyepatch + headband, data = pirates, theme = 2, inf.disp = "bean") ``` Here's a pirateplot with showing the relationship between movie running times based on movie genre and whether the movie is a sequel or not. ```{r fig.width = 8, fig.height = 8, fig.align = "center"} pirateplot(formula = time ~ sequel + genre + rating, data = subset(movies, genre %in% c("Action", "Adventure", "Comedy", "Horror") & rating %in% c("G", "PG", "PG-13", "R") & time > 0), theme = 3, cex.lab = .8, inf.disp = "rect", pal = "up") ``` # Output If you include the `plot = FALSE` argument to a pirateplot, the function will return some values associated with the plot. ```{r fig.width = 10, fig.height = 7, fig.align = "center"} times.pp <- pirateplot(formula = time ~ sequel + genre, data = subset(movies, genre %in% c("Action", "Adventure", "Comedy", "Horror") & rating %in% c("G", "PG", "PG-13", "R") & time > 0), plot = FALSE) ``` Here's the result. The most interesting element is `$summary` which shows summary statistics for each bean: ```{r} times.pp ``` # Contribute! I am very happy to receive new contributions and suggestions to improve the pirateplot. If you come up a new theme (i.e.; customization) that you like, or have a favorite color palette that you'd like to have implemented, please contact me ([email protected]) or post an issue at [www.github.com/ndphillips/yarrr/issues](www.github.com/ndphillips/yarrr/issues) and I might include it in a future update. # References The pirateplot is really a knock-off of the great beanplot package and visualization from @kampstra2008beanplot.
/scratch/gouwar.j/cran-all/cranData/yarrr/vignettes/pirateplot.Rmd
#' paste variants #' #' Wrappers around \code{\link[base:paste]{base::paste}} with a variety of defaults: #' \tabular{llcc}{ #' \code{} \tab \strong{mnemonic} \tab \strong{\code{collapse=}} \tab \strong{\code{sep=}} \cr #' \code{p()}, \code{p0()} \tab paste, paste0 \tab \code{NULL} \tab \code{""} \cr #' \code{ps()}, \code{pss()} \tab paste (sep) space \tab \code{NULL} \tab \code{" "} \cr #' \code{psh()} \tab paste sep hyphen \tab \code{NULL} \tab \code{"-"} \cr #' \code{psu()} \tab paste sep underscore \tab \code{NULL} \tab \code{"_"} \cr #' \code{psnl()} \tab paste sep newline \tab \code{NULL} \tab \code{"\n"} \cr #' \code{pc()} \tab paste collapse \tab \code{""} \tab \code{""} \cr #' \code{pcs()} \tab paste collapse space \tab \code{" "} \tab \code{""} \cr #' \code{pcc()} \tab paste collapse comma \tab \code{", "} \tab \code{""} \cr #' \code{pcsc()} \tab paste collapse semicolon \tab \code{"; "} \tab \code{""} \cr #' \code{pcnl()} \tab paste collapse newline \tab \code{"\n"} \tab \code{""} \cr #' \code{pc_and()} \tab paste collapse and \tab \emph{varies} \tab \code{""} \cr #' \code{pc_or()} \tab paste collapse or \tab \emph{varies} \tab \code{""} \cr #' } #' #' #' @param ...,sep passed on to \code{\link[base:paste]{base::paste}} #' @export #' @seealso \code{\link{wrap}} \code{\link{sentence}} #' @rdname paste-variants #' @examples #' x <- head(letters, 3) #' y <- tail(letters, 3) #' # paste #' p(x, y) #' p0(x, y) #' # paste + collapse #' pc(x) #' pc(x, y) #' pcs(x) #' pcc(x) #' pcc(x, y) #' pcsc(x) #' pcnl(x) #' pc_and(x[1:2]) #' pc_and(x[1:3]) #' pc_or(x[1:2]) #' pc_or(x[1:3]) #' pc_and(x, y) #' pc_and(x, y, sep = "-") #' pc_and(x[1]) #' pc_and(x[0]) p <- function(..., sep = "") paste(..., sep = sep) # paste space #' @export #' @rdname paste-variants ps <- function(...) paste(..., sep = " ") # paste sep space #' @export #' @rdname paste-variants pss <- ps # paste sep underscore #' @export #' @rdname paste-variants psu <- function(...) paste(..., sep = "_") # paste sep hyphen #' @export #' @rdname paste-variants psh <- function(...) paste(..., sep = "-") # paste sep newline #' @export #' @rdname paste-variants psnl <- function(...) paste(..., sep = "\n") # ? idea? # a fixed width table paste pst() paste sep tab (but an aware tab to make sure entries align...?) # paste0 #' @export #' @rdname paste-variants p0 <- function(...) paste0(...) # paste collapse "" #' @export #' @rdname paste-variants pc <- function(..., sep = "") paste(..., sep = sep, collapse = "") # paste collapse space " " #' @export #' @rdname paste-variants pcs <- function(..., sep = "") paste(..., sep = sep, collapse = " ") # paste collapse comma #' @rdname paste-variants #' @export pcc <- function(..., sep = "") paste(..., sep = sep, collapse = ", ") # paste collapse new line #' @rdname paste-variants #' @export pcnl <- function(..., sep = "") paste(..., sep = sep, collapse = "\n") # paste collapse semicolon #' @rdname paste-variants #' @export pcsc <- function(..., sep = "") paste(..., sep = sep, collapse = "; ") # paste collapse and #' @rdname paste-variants #' @export pc_and <- function(..., sep = "") { x <- paste(..., sep = sep, collapse = NULL) lx <- length(x) if(lx == 0L) "" else if (lx == 1L) x else if (lx == 2L) paste0(x, collapse = " and ") else paste0( paste0(x[-lx], collapse = ", "), ", and ", x[lx]) } # paste collapse or #' @rdname paste-variants #' @export pc_or <- function (..., sep = "") { x <- paste(..., sep = sep, collapse = NULL) lx <- length(x) if (lx == 0L) "" else if (lx == 1L) x else if (lx == 2L) paste0(x, collapse = " or ") else paste0(paste0(x[-lx], collapse = ", "), ", or ", x[lx]) } #' Wrap strings #' #' Wrap strings with flanking characters #' #' @param x character to wrap #' @param left,right character pair to wrap with #' @param sep,... passed to \code{\link[base:paste]{base::paste}} before wrapping #' #' @rdname wrap #' @export #' @seealso \code{\link{unwrap}} \code{\link{p0}} \code{\link{sentence}} #' @examples #' wrap("abc", "__") # __abc__ #' parens("abc") # (abc) #' sngl_quote("abc") # 'abc' #' dbl_quote("abc") # "abc" #' bracket("abc") # [abc] #' brace("abc") # {abc} #' #' label <- p("name", parens("attribute")) #' #' label # "name (attribute)" #' unparens(label) # "name attribute" #' #' # make your own function like this: #' # markdown bold #' bold <- function(...) wrap(paste(...), "**") #' p("make a word", bold("bold")) #' # see unbold example in ?unwrap wrap <- function(x, left, right = left) paste0(left, x, right) #' @rdname wrap #' @export dbl_quote <- function(..., sep = "") wrap(paste(..., sep = sep), '"') #' @rdname wrap #' @export sngl_quote <- function(..., sep = "") wrap(paste(..., sep = sep), "'") #' @rdname wrap #' @export bracket <- function(..., sep = "") wrap(paste(..., sep = sep), "[", "]") #' @rdname wrap #' @export brace <- function(..., sep = "") wrap(paste(..., sep = sep), "{", "}") #' @rdname wrap #' @export parens <- function(..., sep = "") wrap(paste(..., sep = sep), "(", ")")
/scratch/gouwar.j/cran-all/cranData/yasp/R/paste.R
#' sentence #' #' A wrapper around \code{paste} that does some simple cleaning appropriate for #' prose sentences. It #' \enumerate{ #' \item trims leading and trailing whitespace #' \item collapses runs of whitespace into a single space #' \item appends a period (\code{.}) if there is no terminal punctuation #' mark (\code{.}, \code{?}, or \code{!}) #' \item removes spaces preceding punctuation characters: \code{.?!,;:} #' \item collapses sequences of punctuation marks (\code{.?!,;:}) (possibly #' separated by spaces), into a single punctuation mark. #' The first punctuation mark of the sequence is used, with #' priority given to terminal punctuation marks \code{.?!} if present #' \item makes sure a space or end-of-string follows every one of #' \code{.?!,;:}, with an exception for the special case of \code{.,:} #' followed by a digit, indicating the punctuation is decimal period, #' number separator, or time delimiter #' \item capitalizes the first letter of each sentence (start-of-string or #' following a \code{.?!}) #' } #' #' @param ... passed on to \code{paste} #' #' @export #' #' @examples #' compare <- function(x) cat(sprintf(' in: "%s"\nout: "%s"\n', x, sentence(x))) #' compare("capitilized and period added") #' compare("whitespace:added ,or removed ; like this.and this") #' compare("periods and commas in numbers like 1,234.567 are fine !") #' compare("colons can be punctuation or time : 12:00 !") #' compare("only one punctuation mark at a time!.?,;") #' compare("The first mark ,; is kept;,,with priority for terminal marks ;,.") #' #' # vectorized like paste() #' sentence( #' "The", c("first", "second", "third"), "letter is", letters[1:3], #' parens("uppercase:", sngl_quote(LETTERS[1:3])), ".") sentence <- function(...) { x <- paste(...) x <- trimws(x) # we use perl = TRUE as the default everywhere because # it's both faster and more powerful lgsub <- function(ptrn, rplc) gsub(ptrn, rplc, x, perl = TRUE) # Add a period if there isn't a terminal punctuation mark x <- ifelse(grepl("[?!.]$", x, perl = TRUE), x, paste0(x, ".")) # 2 or more spaces into 1 space x <- lgsub("[[:space:]]+", " ") # remove spaces preceding ?!.,;: x <- lgsub("[[:space:]]([.,?;:!])", "\\1") # if there are multiple punctuation characters in a row (possibly separated by # spaces), just keep the first, giving priority to terminal marks ?!. if # present. ## first, look for sequences that contain ?!., and capture that and possibly a ## space x <- lgsub("[;:, ]*([.?!] ?)[.?!:;,]*( ?)[.?!:;, ]*", "\\1\\2") ## next, look for sequences that contain ,;: and capture the first one possibly ## a space x <- lgsub("([,;:])[;:,]*( ?)[;, ]*", "\\1\\2") # make sure a space or EOL or digit follows every period or comma or colon. # digit unless it's followd by a digit, indicating its a decimal or numeric # separator or time separator and not punctuation. # there should not be any ",$" matches at this point x <- lgsub("([.,:])(?![[:digit:] ]|$)", "\\1 ") # make sure a space or EOL follows every ?!; x <- lgsub("([?!;])(?! |$)", "\\1 ") # Capatilize first letter following a .?! or at the start of the string x <- lgsub("(^|[.?!] )([[:lower:]])", "\\1\\U\\2") x }
/scratch/gouwar.j/cran-all/cranData/yasp/R/sentence.R
#' unwrap #' #' Remove pair(s) of characters from a string. The pair(s) to be removed #' can be at any position within the string. #' #' @param x character vector #' @param left left character to remove #' @param right right character to remove. Only removed if position is after \code{left} #' @param n_pairs number of character pairs to remove #' #' @return character vector with pairs removed #' @export #' #' @seealso \code{\link{wrap}} #' #' @examples #' # by default, removes all matching pairs of left and right #' x <- c("a", "(a)", "((a))", "(a) b", "a (b)", "(a) (b)" ) #' data.frame( x, unparens(x), check.names = FALSE ) #' #' # specify n_pairs to remove a specific number of pairs #' x <- c("(a)", "((a))", "(((a)))", "(a) (b)", "(a) (b) (c)", "(a) (b) (c) (d)") #' data.frame( x, #' "n_pairs=1" = unparens(x, n_pairs = 1), #' "n_pairs=2" = unparens(x, n_pairs = 2), #' "n_pairs=3" = unparens(x, n_pairs = 3), #' "n_pairs=Inf" = unparens(x), # the default #' check.names = FALSE ) #' #' # use unwrap() to specify any pair of characters for left and right #' x <- "A string with some \\emph{latex tags}." #' unwrap(x, "\\emph{", "}") #' #' # by default, only pairs are removed. Set a character to "" to override. #' x <- c("a)", "a))", "(a", "((a" ) #' data.frame(x, unparens(x), #' 'left=""' = unwrap(x, left = "", right = ")"), #' check.names = FALSE) #' #' # make your own functions like this #' # markdown bold #' unbold <- function(x) unwrap(x, "**") #' bold <- function(...) wrap(paste(...), "**") #' (x <- (p("make a word", bold("bold")))) #' unbold(x) unwrap <- function(x, left, right = left, n_pairs = Inf) { repeat { # get the index positions of the first left match left_match <- as.integer(regexpr(left, x, fixed = TRUE)) # get the index positions of the first right match after the first left match pos_start_search_right <- left_match + nchar(left) right_match <- as.integer( regexpr(right, substr(x, pos_start_search_right, nchar(x)), fixed = TRUE)) nrm <- right_match != -1L # no right match right_match[nrm] <- right_match[nrm] + pos_start_search_right[nrm] -1L both_match <- left_match != -1L & left_match < right_match # right_match != -1 implicitly must be TRUE if (!any(both_match)) break xtmp <- x[both_match] left_match <- left_match[both_match] right_match <- right_match[both_match] xtmp <- drop_chars(xtmp, start = left_match, len = nchar(left)) # adjust right match after dropping left chars right_match <- right_match - nchar(left) xtmp <- drop_chars(xtmp, start = right_match, len = nchar(right)) x[both_match] <- xtmp n_pairs <- n_pairs - 1L if (n_pairs < 1L) break } x } #' @export #' @rdname unwrap unparens <- function(x, n_pairs = Inf) unwrap(x, left = "(", right = ")", n_pairs = n_pairs) drop_chars <- function(string, start, end = start + len, len = 1L){ left_side <- substr(string, 1L, start - 1L) right_side <- substr(string, end, nchar(string)) paste0(left_side, right_side) }
/scratch/gouwar.j/cran-all/cranData/yasp/R/unwrap.R
#' Abundance table for 199 samples. #' #' A dataset containing the abundances of 1585 lineages among 199 patients. #' #' @format A data.frame with 1585 rows and 200 variables: #' \describe{ #' \item{lineages}{lineage (string)} #' \item{XXX}{abundance of each lineage in the sample XXX (double)} #' } #' @keywords datasets #' @source Zeller et al., 2014 (\doi{10.15252/msb.20145645}), #' Pasolli et al., 2017 (\doi{10.1038/nmeth.4468}). #' @examples #' dim(abundances) #' abundances[1:5, 1:7] "abundances"
/scratch/gouwar.j/cran-all/cranData/yatah/R/abundances.R
#' Throw error if the string is not a lineage #' #' @inheritParams is_lineage #' @keywords internal error_lineage <- function(string){ if(!all(is_lineage(string))) { stop(paste0("Your string is not a lineage. Maybe you have ", "specified the wrong separator or used special caracters.")) } } #' Common depth #' #' Throw an error if depth is not the same across lineages. #' #' @inheritParams is_rank #' @importFrom stringr str_count #' @keywords internal depth <- function(lineage) { N <- str_count(lineage, "__") if (!all(N == N[1])) { stop("Lineages don't have the same depth.") } return(N) }
/scratch/gouwar.j/cran-all/cranData/yatah/R/errors.R
#' Extract the last clade of a lineage #' #' @inheritParams is_rank #' @param same logical. Does the lineage have the same depth? Default to TRUE. #' #' @return A string. The last clades of the given lineages. #' @importFrom stringr str_remove #' @export #' #' @examples #' lineage1 <- "k__Bacteria|p__Verrucomicrobia|c__Verrucomicrobiae" #' lineage2 <- "k__Bacteria|p__Firmicutes|c__Clostridia" #' last_clade(c(lineage1, lineage2)) last_clade <- function(lineage, same = TRUE) { error_lineage(lineage) if (same) depth(lineage) str_remove(lineage, ".*__") } #' Extract the last rank of a lineage #' #' @inheritParams last_clade #' #' @return A string. The last rank of the given lineages. #' @importFrom stringr str_remove #' @export #' #' @examples #' lineage1 <- "k__Bacteria|p__Verrucomicrobia|c__Verrucomicrobiae" #' lineage2 <- "k__Bacteria|p__Firmicutes|c__Clostridia" #' last_rank(c(lineage1, lineage2)) last_rank <- function(lineage, same = TRUE) { error_lineage(lineage) if (same) depth(lineage) letter <- str_sub(str_remove(lineage, paste0("__", .allchr, "*$")), start = -1) unname(.ranks[letter]) } #' Extract all clades present in the lineages #' #' @details If a clade correspond to different ranks (e.g. Actinobacteria #' is both a phylum and a clade), it will be displayed only one time when #' \code{simplify} is set to \code{TRUE}. It is also the case for different #' clades with same name and same rank when \code{simplify} is set to #' \code{FALSE}. #' #' @inheritParams last_clade #' @param simplify logical. Should the output be a vector or a dataframe? #' #' @return The clades present in the lineage. Vector of ordered strings #' or data.frame. #' @importFrom stringr str_split str_sub #' @export #' #' @examples #' lineage1 <- "k__Bacteria|p__Verrucomicrobia|c__Verrucomicrobiae" #' lineage2 <- "k__Bacteria|p__Firmicutes|c__Clostridia" #' lineage3 <- "k__Bacteria|p__Actinobacteria|c__Actinobacteria" #' all_clades(c(lineage1, lineage2, lineage3)) #' all_clades(c(lineage1, lineage2, lineage3), simplify = FALSE) all_clades <- function(lineage, simplify = TRUE) { error_lineage(lineage) sep <- getOption("yatah_sep", default = "\\|") clades <- unique(unlist(str_split(lineage, pattern = sep))) if (simplify) { return(sort(unique(str_sub(clades, start = 4)))) } else { ranks_ <- .ranks[str_sub(clades, end = 1)] df <- data.frame(clade = str_sub(clades, start = 4), rank = ranks_, stringsAsFactors = FALSE) df <- unique(df) ind <- order(df$clade) return(df[ind, ]) } }
/scratch/gouwar.j/cran-all/cranData/yatah/R/extract.R
#' Test if a string is a lineage #' #' @param string string to be tested as lineage. #' #' @details Alphanumeric character, hyphen, dots, square brackets #' and non-consecutive underscores are allowed in clades names. #' #' @return A logical. #' @importFrom stringr str_detect str_replace_all #' @export #' #' @examples #' is_lineage("k__Bacteria|p__Firmicutes|c__Clostridia|o__Clostridiales") is_lineage <- function(string){ sep <- getOption("yatah_sep", default = "\\|") only_clades <- str_replace_all(string, paste0("(^|", sep, ")[kpcofgst]__"), " ") cond1 <- ! str_detect(only_clades, "__") cond2 <- str_detect(string, paste0("^k__", .allchr, "*", "($|", sep, "p__", .allchr, "*)", "($|", sep, "c__", .allchr, "*)", "($|", sep, "o__", .allchr, "*)", "($|", sep, "f__", .allchr, "*)", "($|", sep, "g__", .allchr, "*)", "($|", sep, "s__", .allchr, "*)", "($|", sep, "t__", .allchr, "*)$")) as.logical(cond1 * cond2) } #' Test if a lineage goes down to a specified rank #' #' @param lineage string. Vector of lineages. #' @param rank string. One of \code{c("kingdom", "phylum", "class", #' "order", "family", "genus", "species", "strain")} with partial matching. #' #' @return logical. #' @importFrom stringr str_sub str_detect #' @export #' #' @examples #' lineage1 <- "k__Bacteria|p__Verrucomicrobia|c__Verrucomicrobiae" #' lineage2 <- "k__Bacteria|p__Firmicutes|c__Clostridia" #' is_rank(c(lineage1, lineage2), "class") #' is_rank(c(lineage1, lineage2), "order") is_rank <- function(lineage, rank = c("kingdom", "phylum", "class", "order", "family", "genus", "species", "strain")) { error_lineage(lineage) rank <- match.arg(rank) letter <- ifelse(rank == "strain", "t", str_sub(rank, end = 1)) str_detect(lineage, paste0(letter, "__", .allchr, "*$")) } #' Test if a lineage belongs to a clade #' #' @details If \code{rank} is set to \code{.}, clade is looked for #' among all ranks. #' #' @inheritParams is_rank #' @param clade string. #' #' @return logical. #' @export #' #' @examples #' lineage1 <- "k__Bacteria|p__Verrucomicrobia|c__Verrucomicrobiae" #' lineage2 <- "k__Bacteria|p__Firmicutes|c__Clostridia" #' is_clade(c(lineage1, lineage2), clade = "Verrucomicrobia", rank = "phylum") #' is_clade(c(lineage1, lineage2), clade = "Clostridia") is_clade <- function(lineage, clade, rank = c(".", "kingdom", "phylum", "class", "order", "family", "genus", "species", "strain")) { error_lineage(lineage) sep <- getOption("yatah_sep", default = "\\|") stopifnot(length(clade) == 1) rank <- match.arg(rank) letter <- ifelse(rank == "strain", "t", str_sub(rank, end = 1)) str_detect(lineage, paste0("(^|", sep, ")", letter, "__", clade, "($|", sep, ")")) }
/scratch/gouwar.j/cran-all/cranData/yatah/R/is_tests.R
#' Ranks #' #' Named vector of ranks #' @keywords internal .ranks <- c(k = "kingdom", p = "phylum", c = "class", o = "order", f = "family", g = "genus", s = "species", t = "strain") #' Characters allowed in lineages #' @keywords internal .allchr <- "[a-zA-Z0-9_\\-\\.\\[\\]]"
/scratch/gouwar.j/cran-all/cranData/yatah/R/parameters.R
#' Taxonomic table #' #' Compute taxonomic table from lineages. #' #' Duplicated lineages are removed. #' #' @inheritParams is_rank #' #' @return A data.frame with columns corresponding to different ranks. #' @importFrom stringr str_remove_all str_split #' @importFrom purrr map transpose #' @export #' #' @examples #' lineage1 <- "k__Bacteria|p__Verrucomicrobia|c__Verrucomicrobiae" #' lineage2 <- "k__Bacteria|p__Firmicutes|c__Clostridia" #' lineage3 <- "k__Bacteria|p__Firmicutes|c__Bacilli" #' taxtable(c(lineage1, lineage2, lineage3)) taxtable <- function(lineage) { error_lineage(lineage) sep <- getOption("yatah_sep", default = "\\|") N <- depth(lineage) list <- str_split(str_remove_all(unique(lineage), sep), ".__") list <- map(list, ~ .[-1]) list <- map(transpose(list, .names = .ranks[1:(N[1])]), unlist) as.data.frame(list, stringsAsFactors = FALSE) }
/scratch/gouwar.j/cran-all/cranData/yatah/R/taxtable.R
#' Taxonomic tree #' #' Compute taxonomic tree from taxonomic table. #' #' #' @param table dataframe. #' @param collapse logical. Should node with one child be vanished? Default #' to TRUE. #' @param lineage_length double. Lineage length from the root to the leaves. #' Default to 1. #' @param root character. Name of the root if there is no natural root. #' #' @return A phylo object. #' @importFrom ape collapse.singles write.tree read.tree #' @importFrom stats na.omit #' @importFrom stringr str_replace_all #' @export #' #' @examples #' lineage1 <- "k__Bacteria|p__Verrucomicrobia|c__Verrucomicrobiae" #' lineage2 <- "k__Bacteria|p__Firmicutes|c__Clostridia" #' lineage3 <- "k__Bacteria|p__Firmicutes|c__Bacilli" #' table <- taxtable(c(lineage1, lineage2, lineage3)) #' taxtree(table) taxtree <- function(table, collapse = TRUE, lineage_length = 1, root = ""){ ## Remove NA columns na_col <- apply(table, 2, function(x) all(is.na(x))) table <- table[, !na_col] ## Return error if there is only one unique row without NA stopifnot(nrow(unique(na.omit(table))) > 1) ## Remove duplicated rows table <- unique(na.omit(table)) ## Return an error if there are void ranks if(any(table == "")) { stop("Void ranks are not allowed in taxonomic trees.") } ## Convert to data.frame with factor columns table <- as.data.frame(apply(table, 2, as.factor), stringsAsFactors = TRUE) ## Number of ranks nrk <- ncol(table) ## Create a root if necessary if (length(unique(table[, 1])) >= 2) { table[, nrk + 1] <- as.factor(root) table <- table[, c(nrk + 1, 1:nrk)] nrk <- nrk + 1 } ## Labels & convert factors to unique integer tiplab <- levels(table[[ncol(table)]]) table[[ncol(table)]] <- as.numeric(table[[ncol(table)]]) count <- length(tiplab) nodelab <- character() for (i in 1:(nrk - 1)) { nodelab <- c(nodelab, levels(table[[i]])) table[[i]] <- as.numeric(table[[i]]) + count count <- max(table[[i]]) } alllab <- c(tiplab, nodelab) ## Edgelist el <- as.matrix(table[, 1:2]) if (nrk > 2) { for (i in 2:(nrk - 1)) { el <- rbind(el, as.matrix(table[, i:(i + 1)])) } } el <- unique(unname(el)) ## Tree tree <- list(edge = el, tip.label = tiplab, Nnode = length(nodelab), node.label = nodelab, edge.length = rep(lineage_length / (nrk - 1), nrow(el))) class(tree) <- "phylo" ## Remove brackets tree$tip.label <- str_replace_all(tree$tip.label, "\\[", "_ob_") tree$tip.label <- str_replace_all(tree$tip.label, "\\]", "_cb_") tree$tip.label <- str_replace_all(tree$tip.label, " ", "_sp_") tree$node.label <- str_replace_all(tree$node.label, "\\[", "_ob_") tree$node.label <- str_replace_all(tree$node.label, "\\]", "_cb_") tree$node.label <- str_replace_all(tree$node.label, " ", "_sp_") tree <- read.tree(text = write.tree(tree)) ## Put brackets back tree$tip.label <- str_replace_all(tree$tip.label, "_ob_", "\\[") tree$tip.label <- str_replace_all(tree$tip.label, "_cb_", "\\]") tree$tip.label <- str_replace_all(tree$tip.label, "_sp_", " ") tree$node.label <- str_replace_all(tree$node.label, "_ob_", "\\[") tree$node.label <- str_replace_all(tree$node.label, "_cb_", "\\]") tree$node.label <- str_replace_all(tree$node.label, "_sp_", " ") ## Collapse if (collapse) { tree <- collapse.singles(tree) } ## Add a root tree$root.edge <- 0 ## Return tree }
/scratch/gouwar.j/cran-all/cranData/yatah/R/taxtree.R
#' Trim void ranks in lineages #' #' @details If there is a void rank amid a lineage, deeper ranks #' will be removed. See the example with \code{lineage3}. #' #' @inheritParams last_clade #' @param only_tail Logical. If \code{FALSE} (default), void ranks amid #' lineages and subranks are removed. If \code{TRUE}, only final #' void ranks are removed. #' #' @return The trimmed lineages. Depth could be different among them. #' @importFrom stringr str_extract #' @export #' #' @examples #' lineage1 <- "k__Bacteria|p__Verrucomicrobia|c__Verrucomicrobiae|o__|f__" #' lineage2 <- "k__Bacteria|p__Firmicutes|c__" #' lineage3 <- "k__Bacteria|p__|c__Verrucomicrobiae|o__|f__" #' trim_void(c(lineage1, lineage2, lineage3), same = FALSE) #' trim_void(c(lineage1, lineage2, lineage3), same = FALSE, only_tail = TRUE) trim_void <- function(lineage, same = TRUE, only_tail = FALSE) { error_lineage(lineage) if (same) depth(lineage) if(only_tail){ lin <- str_remove(lineage, paste0("((^|\\|)[kpcofgst]__", "+)*$")) } else { lin <- str_extract(lineage, paste0("((^|\\|)[kpcofgst]__", .allchr, "+)*")) } return(lin) } #' Trim lineages until a specified rank #' #' @details Returns \code{NA} if a lineage is not as deep as the specified rank. #' #' @inheritParams last_clade #' @inheritParams is_rank #' #' @return The trimmed lineages. Depth could be different among them. #' @export #' #' @examples #' lineage1 <- "k__Bacteria|p__Verrucomicrobia|c__Verrucomicrobiae" #' lineage2 <- "k__Bacteria|p__Firmicutes|c__Clostridia" #' trim_rank(c(lineage1, lineage2), rank = "phylum") #' trim_rank(c(lineage1, lineage2), rank = "genus") trim_rank <- function(lineage, rank = c("kingdom", "phylum", "class", "order", "family", "genus", "species", "strain"), same = TRUE) { error_lineage(lineage) rank <- match.arg(rank) if (same) depth(lineage) N <- unname(which(rank == .ranks)) regex <- paste(names(.ranks)[seq_len(N)], collapse = "") str_extract(lineage, paste0("((^|\\|)[", regex, "]__", .allchr, "*){", N, "}")) } #' Trim lineages until the shallowest common rank. #' #' @inheritParams last_clade #' @param remove_void Should void ranks be removed? Default to `TRUE`. #' @param only_tail Logical to be passed to `trim_void()`. Used only if #' `remove_void` is set to `TRUE`. #' #' @return The trimmed lineages, with same depth. #' @export #' #' @examples #' lineage1 <- "k__Bacteria|p__Verrucomicrobia|c__Verrucomicrobiae" #' lineage2 <- "k__Bacteria|p__Firmicutes" #' lineage3 <- "k__Bacteria|p__|c__Clostridia" #' trim_common(c(lineage1, lineage2, lineage3), remove_void = FALSE) #' trim_common(c(lineage1, lineage2, lineage3), only_tail = FALSE) trim_common <- function(lineage, remove_void = TRUE, only_tail = TRUE) { lin <- lineage if (remove_void) { lin <- trim_void(lin, same = FALSE, only_tail = only_tail) } # find shallowest common rank lrs <- last_rank(lin, same = FALSE) lrs <- factor(lrs, levels = .ranks, ordered = TRUE) lr <- as.character(min(lrs)) trim_rank(lin, rank = lr, same = FALSE) }
/scratch/gouwar.j/cran-all/cranData/yatah/R/trim.R
#' @keywords internal "_PACKAGE" # The following block is used by usethis to automatically manage # roxygen namespace tags. Modify with care! ## usethis namespace: start ## usethis namespace: end NULL
/scratch/gouwar.j/cran-all/cranData/yatah/R/yatah-package.R
.onLoad <- function(libname, pkgname){ options(yatah_sep = "\\|") }
/scratch/gouwar.j/cran-all/cranData/yatah/R/zzz.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup, message=FALSE----------------------------------------------------- library(dplyr) library(yatah) ## ----------------------------------------------------------------------------- abundances <- as_tibble(yatah::abundances) print(abundances, max_extra_cols = 2) ## ----data, message=FALSE------------------------------------------------------ taxonomy <- select(abundances, lineages) taxonomy ## ----filter------------------------------------------------------------------- gammap_genus <- taxonomy %>% filter(is_clade(lineages, "Gammaproteobacteria"), is_rank(lineages, "genus")) %>% mutate(genus = last_clade(lineages)) gammap_genus ## ----table-------------------------------------------------------------------- gammaprot_table <- gammap_genus %>% pull(lineages) %>% taxtable() as_tibble(gammaprot_table) ## ----tree--------------------------------------------------------------------- gammaprot_tree <- taxtree(gammaprot_table) gammaprot_tree ## ----ggtree, fig.width=7, fig.height=7---------------------------------------- plot(gammaprot_tree, show.node.label = TRUE, cex = 0.7, main = "Taxonomy of Gammaproteobacteria")
/scratch/gouwar.j/cran-all/cranData/yatah/inst/doc/gammaproteobacteria.R
--- title: "Taxonomy of Gammaproteobacteria" output: rmarkdown::html_vignette bibliography: references.bib link-citations: yes vignette: > %\VignetteIndexEntry{Taxonomy_of_Gammaproteobacteria} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Preliminaries This vignette illustrates the most useful functions of **yatah**. ```{r setup, message=FALSE} library(dplyr) library(yatah) ``` ## Data For this example, we use data from @zeller2014potential. It is the abundances of bacteria present in 199 stool samples. ```{r} abundances <- as_tibble(yatah::abundances) print(abundances, max_extra_cols = 2) ``` ```{r data, message=FALSE} taxonomy <- select(abundances, lineages) taxonomy ``` ## Filtering Here, we have all the present bacteria at all different ranks. As we are just interested in genera that belong to the _Gammaproteobacteria_ class, we `filter()` the lineages with `is_clade()` and `is_rank()`. The genus name is accessible with `last_clade()`. ```{r filter} gammap_genus <- taxonomy %>% filter(is_clade(lineages, "Gammaproteobacteria"), is_rank(lineages, "genus")) %>% mutate(genus = last_clade(lineages)) gammap_genus ``` ## Taxonomic table It is useful to have a taxonomic table. `taxtable()` do the job. ```{r table} gammaprot_table <- gammap_genus %>% pull(lineages) %>% taxtable() as_tibble(gammaprot_table) ``` ## Taxonomic tree To have a tree, use `taxtree()` with a taxonomic table in input. By default, it collapses ranks with only one subrank. ```{r tree} gammaprot_tree <- taxtree(gammaprot_table) gammaprot_tree ``` ```{r ggtree, fig.width=7, fig.height=7} plot(gammaprot_tree, show.node.label = TRUE, cex = 0.7, main = "Taxonomy of Gammaproteobacteria") ``` ## References
/scratch/gouwar.j/cran-all/cranData/yatah/inst/doc/gammaproteobacteria.Rmd
--- title: "Taxonomy of Gammaproteobacteria" output: rmarkdown::html_vignette bibliography: references.bib link-citations: yes vignette: > %\VignetteIndexEntry{Taxonomy_of_Gammaproteobacteria} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Preliminaries This vignette illustrates the most useful functions of **yatah**. ```{r setup, message=FALSE} library(dplyr) library(yatah) ``` ## Data For this example, we use data from @zeller2014potential. It is the abundances of bacteria present in 199 stool samples. ```{r} abundances <- as_tibble(yatah::abundances) print(abundances, max_extra_cols = 2) ``` ```{r data, message=FALSE} taxonomy <- select(abundances, lineages) taxonomy ``` ## Filtering Here, we have all the present bacteria at all different ranks. As we are just interested in genera that belong to the _Gammaproteobacteria_ class, we `filter()` the lineages with `is_clade()` and `is_rank()`. The genus name is accessible with `last_clade()`. ```{r filter} gammap_genus <- taxonomy %>% filter(is_clade(lineages, "Gammaproteobacteria"), is_rank(lineages, "genus")) %>% mutate(genus = last_clade(lineages)) gammap_genus ``` ## Taxonomic table It is useful to have a taxonomic table. `taxtable()` do the job. ```{r table} gammaprot_table <- gammap_genus %>% pull(lineages) %>% taxtable() as_tibble(gammaprot_table) ``` ## Taxonomic tree To have a tree, use `taxtree()` with a taxonomic table in input. By default, it collapses ranks with only one subrank. ```{r tree} gammaprot_tree <- taxtree(gammaprot_table) gammaprot_tree ``` ```{r ggtree, fig.width=7, fig.height=7} plot(gammaprot_tree, show.node.label = TRUE, cex = 0.7, main = "Taxonomy of Gammaproteobacteria") ``` ## References
/scratch/gouwar.j/cran-all/cranData/yatah/vignettes/gammaproteobacteria.Rmd
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 calc_dbar_c <- function(nday, ntupq, day_idx, tupq_idx, ux_window, uu_window, price_slist, cf_slist) { .Call('_ycevo_calc_dbar_c', PACKAGE = 'ycevo', nday, ntupq, day_idx, tupq_idx, ux_window, uu_window, price_slist, cf_slist) } calc_hhat_num2_c <- function(nday, ntupq_x, ntupq_q, day_idx, tupq_idx_x, tupq_idx_q, ux_window, uq_window, uu_window, cf_slist) { .Call('_ycevo_calc_hhat_num2_c', PACKAGE = 'ycevo', nday, ntupq_x, ntupq_q, day_idx, tupq_idx_x, tupq_idx_q, ux_window, uq_window, uu_window, cf_slist) }
/scratch/gouwar.j/cran-all/cranData/ycevo/R/RcppExports.R
#' CRSP US Bond Dataset from 02/01/2007 to 31/12/2007 #' #' A dataset containing the prices and other attributes #' of CRSP US treasury bills, notes, and bonds. #' Columns qdate, crspid, tumat, mid.price, accint, pdint and tupq are required #' for estimation. #' #' @format A data frame #' \describe{ #' \item{qdate}{Quotation date} #' \item{crspid}{Bond identifier} #' \item{type}{1: Treasury Bonds, 2: Treasury Notes, 4: Treasury Bills} #' \item{couprt}{Coupon rate} #' \item{matdate}{Bond maturity date} #' \item{tumat}{Number of days to maturity from quotation date} #' \item{mid.price}{Mid-Price, average between quoted bid and ask prices} #' \item{accint}{The accumulated interest on payments} #' \item{issuedate}{Bond issue date} #' \item{pqdate}{Bond payment date. One entry for each payment.} #' \item{pdint}{Bond payment amount} #' \item{tupq}{Time until a given payment, given in days} #' } #' @source \url{https://wrds-www.wharton.upenn.edu/} "USbonds"
/scratch/gouwar.j/cran-all/cranData/ycevo/R/data.R
# A component of the \code{estimate_yield} function that calculates \eqn{dbar} # # Internal function that estimates the discount function without taking into account # cross products of coupon payments. Not to be used independently. Exported for documentation purpose. # # @param data A data frame; bond data to estimate discount curve from. See \code{?USbonds} for an example bond data structure. # @param ugrid A length T numeric vector; the times at which the discount curve will be estimated. # @param hu A length T numeric vector, bandwidth parameter determining the size of the window # that corresponds to each time at which the discount curve is estimated, # @param rgrid (Optional) A length K numeric vector of interest rate grid values # @param hr (Optional) A length K numeric vector of interest rate grid bandwidths # @param xgrid A length m numeric vector, or either a 1 x x or T x x numeric matrix. If a T x x matrix, each row represents the time-to-maturity grid # that each time-to-maturity in the discount function is compared against. Otherwise the same time-to-maturity grid is repeated for each of the T ugrid values # @param hx An numeric vector of length T, or if xgrid is a matrix, a numeric matrix of the same dimensions as xgrid. # A vector hx for a T x m matrix xgrid repeats values for each row of qgrid. # bandwidth parameter determining the size of the window that corresponds to each time-to-maturity. # @param price_slist (Optional) A list of matrices, generated by calc_price_slist. # @param cf_slist (Optional) A list of matrices, generated by calc_cf_slist. # @param interest (Optional) A vector of daily short term interest rates # @param units (Optional) number of tupq per xgrid (e.g. 365 for daily data with annual grid values). Defaults to 365 # # @return Data frame with the following variables # # \describe{ # \item{ug}{Same as input \code{ugrid}} # \item{dbar_numer}{Numerator in dbar. See \code{Source}} # \item{dbar_denom}{Denominator in dbar. See \code{Source}} # \item{xg}{Same as input \code{xgrid}} # } # # @source Koo, B., La Vecchia, D., & Linton, O. B. (2019). Estimation of a Nonparametric model for Bond Prices from Cross-section and Time series Information. Available at SSRN3341344. # @author Nathaniel Tomasetti # calc_dbar <- function(data, ugrid, hu, rgrid, hr, xgrid, hx, price_slist, cf_slist, interest, units = 365) { if (missing(cf_slist)){ cf_slist <- calc_cf_slist(data) } if (missing(price_slist)) { price_slist <- calc_price_slist(data) } if(!missing(rgrid) & !missing(hr) & !missing(interest)){ interest_grid <- TRUE } else { interest_grid <- FALSE } day_idx <- calc_day_idx(data, ugrid, hu) uu_window <- calc_uu_window(data,ugrid,hu) nday <- length(ugrid) if(interest_grid){ r_window <- calc_r_window(interest, rgrid, hr) day_grid <- expand.grid(ug = ugrid, rg = rgrid) nday <- nrow(day_grid) joint_window <- matrix(0, nrow(uu_window), nday) for(j in seq_along(rgrid)){ for(i in seq_along(ugrid)){ joint_window[, (j-1)*length(ugrid) + i] <- uu_window[,i] * r_window[,j] } } apply(joint_window, 2, function(y) { if(all(y == 0)){ day_idx <- c(0, 0) } else { window_idx <- which(y != 0) day_idx <- c(window_idx[1], window_idx[length(window_idx)]) } day_idx }) %>% t() -> day_idx obs <- which(day_idx[,1] != 0) day_grid <- day_grid[obs, ] joint_window <- joint_window[,obs] day_idx <- day_idx[obs, ] nday <- length(obs) if(nday == 1){ joint_window <- matrix(joint_window, ncol = 1) day_idx <- matrix(day_idx, nrow = 1) } } if(is.vector(xgrid)){ tupq_idx <- calc_tupq_idx(data, xgrid, hx, units) ux_window <- calc_ux_window(data, xgrid, hx, units) ntupq <- length(xgrid) } else if(nrow(xgrid) == 1) { tupq_idx <- calc_tupq_idx(data, xgrid, hx, units) ux_window <- calc_ux_window(data, xgrid, hx, units) ntupq <- length(xgrid) } else { tupq_idx <- NULL ux_window <- NULL for(i in 1:nrow(xgrid)){ if(is.vector(hx)){ tupq_idx <- cbind(tupq_idx, calc_tupq_idx(data, xgrid[i,], hx, units)) ux_window <- cbind(ux_window, calc_ux_window(data, xgrid[i,], hx, units)) } else if(nrow(hx) == 1){ tupq_idx <- cbind(tupq_idx, calc_tupq_idx(data, xgrid[i,], hx, units)) ux_window <- cbind(ux_window, calc_ux_window(data, xgrid[i,], hx, units)) } else if(nrow(hx) == nrow(xgrid)){ tupq_idx <- cbind(tupq_idx, calc_tupq_idx(data, xgrid[i,], hx[i, ], units)) ux_window <- cbind(ux_window, calc_ux_window(data, xgrid[i,], hx[i, ], units)) } else { stop('the dimension of hx must match the dimension of xgrid') } } ntupq <- ncol(xgrid) } if(interest_grid){ dbar <- calc_dbar_c(nday, ntupq, day_idx, tupq_idx, ux_window, joint_window, price_slist, cf_slist) day_grid <- day_grid[rep(1:nday, each=ntupq),] dbar <- data.frame(ug = day_grid$ug, rg = day_grid$rg, dbar_numer = dbar[,1], dbar_denom = dbar[,2]) } else { dbar <- calc_dbar_c(nday, ntupq, day_idx, tupq_idx, ux_window, uu_window, price_slist, cf_slist) dbar <- data.frame(ug = rep(ugrid, rep(ntupq, nday)), dbar_numer = dbar[,1], dbar_denom = dbar[,2]) } if(is.vector(xgrid)){ dbar$xg = rep(xgrid, nday) } else if(nrow(xgrid) == 1) { dbar$xg = rep(xgrid, nday) } else { dbar$xg = c(t(xgrid)) } dbar } # A component of the \code{estimate_yield} function that calculates \eqn{hhat} # # Internal function that calculates coupon payment cross products. # Not to be used independently. Exported for documentation purpose. # # @param data A data frame; bond data to estimate discount curve from. See \code{?USbonds} for an example bond data structure. # @param ugrid A length T numeric vector; the times at which the discount curve will be estimated. # @param hu A length T numeric vector, bandwidth parameter determining the size of the window # that corresponds to each time at which the discount curve is estimated, # @param rgrid (Optional) A length K numeric vector of interest rate grid values # @param hr (Optional) A length K numeric vector of interest rate grid bandwidths # @param xgrid A length m numeric vector, or either a 1 x m or T x m numeric matrix. If a T x m matrix, each row represents the time-to-maturity grid # for the discount function at the corresponding time. Otherwise the same time-to-maturity grid is repeated for each of the T ugrid values # @param hx An numeric vector of length T, or if xgrid is a matrix, a numeric matrix of the same dimensions as xgrid. # A vector hx for a T x m matrix qgrid repeats values for each row of xgrid. # bandwidth parameter determining the size of the window that corresponds to each time-to-maturity. # @param qgrid (Optional), A length m numeric vector, or either a 1 x m or T x m numeric matrix, matching xgrid input with m allowed to be different to M. # If a T x m matrix, each row represents the time-to-maturity grid that each time-to-maturity in the discount function is compared against. # Otherwise the same time-to-maturity grid is repeated for each of the T ugrid values. # If m = M, and each entry of qgrid is identical to xgrid, estimation is performed without interpolaton of the h-hat matrix. # If the entries are not identical each entry of each row of qgrid must be within the range from the smallest to largest value of the corresponding row of xgrid, # and linear interpolation of the h-hat matrix is performed. # If omitted, qgrid is set equal to xgrid. # @param hq (Optional) An numeric vector of length T, or if qgrid is a matrix, a numeric matrix of the same dimensions as qgrid. # A vector hq for a T x M matrix qgrid repeats values for each row of qgrid. # bandwidth parameter determining the size of the window that corresponds to each time-to-maturity. # If omitted, hq is set equal to hx # @param cf_slist (Optional) A list of matrices, generated by calc_cf_slist. # @param interest (Optional) A vector of daily short term interest rates # @param units (Optional) number of tupq per xgrid (e.g. 365 for daily data with annual grid values). Defaults to 365 # # @return Data frame with the following variables # # \describe{ # \item{hhat_numer}{Numerator in H hat. See \code{Source}} # \item{ug}{Same as input \code{ugrid}} # \item{xg}{Same as input \code{xgrid}} # \item{qg}{Same as input \code{qgrid}} # } # # @author Nathaniel Tomasetti # @source Koo, B., La Vecchia, D., & Linton, O. B. (2019). Estimation of a Nonparametric model for Bond Prices from Cross-section and Time series Information. Available at SSRN3341344. # calc_hhat_num <- function(data, ugrid, hu, rgrid = NULL, hr = NULL, xgrid, hx, qgrid = xgrid, hq = hx, cf_slist = NULL, interest = NULL, units = 365) { if (is.null(cf_slist)){ cf_slist <- calc_cf_slist(data) } if(!is.null(rgrid) & !is.null(hr) & !is.null(interest)){ interest_grid <- TRUE } else { interest_grid <- FALSE } # windows etc day_idx <- calc_day_idx(data, ugrid, hu) uu_window <- calc_uu_window(data,ugrid,hu) nday <- length(ugrid) if(interest_grid){ r_window <- calc_r_window(interest, rgrid, hr) day_grid <- expand.grid(ug = ugrid, rg = rgrid) nday <- nrow(day_grid) joint_window <- matrix(0, nrow(uu_window), nday) for(j in seq_along(rgrid)){ for(i in seq_along(ugrid)){ joint_window[, (j-1)*length(ugrid) + i] <- uu_window[,i] * r_window[,j] } } apply(joint_window, 2, function(y) { if(all(y == 0)){ day_idx <- c(0, 0) } else { window_idx <- which(y != 0) day_idx <- c(window_idx[1], window_idx[length(window_idx)]) } day_idx }) %>% t() -> day_idx obs <- which(day_idx[,1] != 0) day_grid <- day_grid[obs, ] joint_window <- joint_window[,obs] day_idx <- day_idx[obs, ] nday <- length(obs) if(nday == 1){ joint_window <- matrix(joint_window, ncol = 1) day_idx <- matrix(day_idx, nrow = 1) } } if(is.vector(xgrid)){ tupq_idx_x <- calc_tupq_idx(data, xgrid, hx, units) ux_window <- calc_ux_window(data, xgrid, hx, units) ntupq_x <- length(xgrid) } else if(nrow(xgrid) == 1){ tupq_idx_x <- calc_tupq_idx(data, xgrid, hx, units) ux_window <- calc_ux_window(data, xgrid, hx, units) ntupq_x <- length(xgrid) } else { tupq_idx_x <- NULL ux_window <- NULL ntupq_x <- ncol(xgrid) for(i in 1:nrow(xgrid)){ for(i in 1:nrow(xgrid)){ if(is.vector(hx)){ tupq_idx_x <- cbind(tupq_idx_x, calc_tupq_idx(data, xgrid[i,], hx, units)) ux_window <- cbind(ux_window, calc_ux_window(data, xgrid[i, ], hx, units)) } else if(nrow(hx) == 1){ tupq_idx_x <- cbind(tupq_idx_x, calc_tupq_idx(data, xgrid[i,], hx, units)) ux_window <- cbind(ux_window, calc_ux_window(data, xgrid[i, ], hx, units)) } else if(nrow(hx) == nrow(xgrid)){ tupq_idx_x <- cbind(tupq_idx_x, calc_tupq_idx(data, xgrid[i,], hx[i, ], units)) ux_window <- cbind(ux_window, calc_ux_window(data, xgrid[i, ], hx[i, ], units)) } else { stop('the dimension of hx must match the dimension of xgrid') } } } } if(is.vector(qgrid)){ tupq_idx_q <- calc_tupq_idx(data, qgrid, hq, units) uq_window <- calc_ux_window(data, qgrid, hq, units) ntupq_q <- length(qgrid) } else if(nrow(qgrid) == 1){ tupq_idx_q <- calc_tupq_idx(data, qgrid, hq, units) uq_window <- calc_ux_window(data, qgrid, hq, units) ntupq_q <- length(qgrid) } else { tupq_idx_q <- NULL uq_window <- NULL ntupq_q <- ncol(qgrid) for(i in 1:nrow(qgrid)){ for(i in 1:nrow(qgrid)){ if(is.vector(hq)){ tupq_idx_q <- cbind(tupq_idx_q, calc_tupq_idx(data, qgrid[i,], hq, units)) uq_window <- cbind(uq_window, calc_ux_window(data, qgrid[i, ], hq, units)) } else if(nrow(hq) == 1){ tupq_idx_q <- cbind(tupq_idx_q, calc_tupq_idx(data, qgrid[i,], hq, units)) uq_window <- cbind(uq_window, calc_ux_window(data, qgrid[i, ], hq, units)) } else if(nrow(hq) == nrow(qgrid)){ tupq_idx_q <- cbind(tupq_idx_q, calc_tupq_idx(data, qgrid[i,], hq[i, ], units)) uq_window <- cbind(uq_window, calc_ux_window(data, qgrid[i, ], hq[i, ], units)) } else { stop('the dimension of hq must match the dimension of qgrid') } } } } if(interest_grid){ hhat <- calc_hhat_num2_c(nday, ntupq_x, ntupq_q, day_idx, tupq_idx_x, tupq_idx_q, ux_window, uq_window, joint_window, cf_slist) day_grid <- day_grid[rep(1:nday, each=ntupq_q*ntupq_x),] hhat <- data.frame(hhat_numer = c(hhat), ug = day_grid$ug, rg = day_grid$rg) } else { hhat <- calc_hhat_num2_c(nday, ntupq_x, ntupq_q, day_idx, tupq_idx_x, tupq_idx_q, ux_window, uq_window, uu_window, cf_slist) hhat <- data.frame(hhat_numer = c(hhat), ug = rep(ugrid, rep(ntupq_q * ntupq_x, nday))) } if(is.vector(xgrid)){ hhat$xg = rep(xgrid, nday*ntupq_q) } else if(nrow(xgrid) == 1){ hhat$xg = rep(xgrid, nday*ntupq_q) } else { x_temp = NULL for(i in 1:nday){ x_temp = c(x_temp, rep(xgrid[i,], ntupq_q)) } hhat$xg = x_temp } if(is.vector(qgrid)){ hhat$qg = rep(qgrid, rep(ntupq_x, ntupq_q)) } else if(nrow(qgrid) == 1){ hhat$qg = rep(qgrid, rep(ntupq_x, ntupq_q)) } else { q_temp = NULL for(i in 1:nday){ q_temp = c(q_temp, rep(qgrid[i,], rep(ntupq_x, ntupq_q))) } hhat$qg = q_temp } hhat } #' @param hx Numeric vector of values between 0 and 1. Bandwidth parameter determining the size of the window #' that corresponds to each time point (\code{xgrid}). #' See \code{Details}. #' The selection of bandwidth parameter is crucial in non-parametric estimation. #' If not sure, please use \code{ycevo} to allow the function choose it for you. #' @param ht Numeric vector that #' represents bandwidth parameter determining the size of the window #' in the kernel function #' that corresponds to each time-to-maturities (\code{tau}). #' The same unit as \code{tau}. #' See \code{Details}. #' The selection of bandwidth parameter is crucial in non-parametric estimation. #' If not sure, please use \code{ycevo} to allow the function choose it for you. #' @param rgrid (Optional) Numeric vector of interest rate grids in percentage at which the discount curve is evaluated, e.g. 4.03 means at interest rate of 4.03\%. #' @param hr (Optional) Numeric vector of bandwidth parameter in percentage determining the size of the window #' in the kernel function #' that corresponds to each interest rate grid (\code{rgrid}). #' @param interest (Optional) Numeric vector of daily short term interest rates. #' The length is the same as the number of quotation dates included in the data, #' i.e. one interest rate per day. # @param units (Optional) number of tupq per tau (e.g. 365 for daily data with annual grid values). Defaults to 365 #' @param price_slist (Internal) Experienced users only. A list of matrices, generated by the internal function \code{calc_price_slist}. #' @param cf_slist (Internal) Experienced users only. A list of matrices, generated by the internal function \code{calc_cf_slist}. #' #' @return Data frame of the yield and discount rate at each combination of the provided grids. #' \describe{ #' \item{discount}{Estimated discount rate} #' \item{xgrid}{Same as input \code{xgrid}} #' \item{tau}{Same as input \code{tau}} #' \item{yield}{Estimated yield} #' } #' #' @author Nathaniel Tomasetti, Bonsoo Koo, and Yangzhuoran Fin Yang #' @describeIn ycevo Experienced users only. #' Yield estimation with interest rate and manually selected bandwidth parameters. #' @export estimate_yield <- function(data, xgrid, hx, tau, ht, rgrid = NULL, hr = NULL, interest = NULL, loess = TRUE, price_slist = NULL, cf_slist = NULL){ units <- 365 tau_p <- NULL htp <- NULL if(is.null(tau_p) || is.null(htp)){ tau_p <- tau htp <- ht } if(!is.null(rgrid) & !is.null(hr) & !is.null(interest)){ interest_grid <- TRUE } else { interest_grid <- FALSE } # Check inputs if(!is.vector(xgrid)){ stop('xgrid must be a vector') } if(!is.vector(hx)){ stop('hx must be a vector') } if(!is.data.frame(data)){ stop('data must be a dataframe') } if(!all(c('qdate', 'crspid', 'mid.price', 'accint', 'pdint', 'tupq') %in% colnames(data))){ stop('data must contain columns qdate, crspid, mid.price, accint, pdint, and tupq') } if(!is.matrix(tau) & !is.vector(tau)){ stop('tau must be a vector or a matrix') } if(!is.numeric(xgrid)){ stop('xgrid must be numeric') } if(!is.numeric(hx)){ stop('hx must be numeric') } if(!is.numeric(tau)){ stop('tau must be numeric') } if(length(xgrid) != length(hx)){ stop('xgrid and hx must have the same length') } if(is.vector(tau)){ if(is.vector(ht)){ if(length(ht) != length(tau)){ stop('ht and tau must have the same length') } } else { stop('ht must be a vector of xgrid is a vector') } } if(is.matrix(tau)){ if(is.vector(ht)){ if(length(ht) != ncol(tau)){ stop('a vector ht must have a length equal to the number of columns of tau') } } else { if(ncol(ht) != ncol(tau) | nrow(ht) != nrow(tau)){ stop('a matrix ht must have the same dimensions as tau') } } } # cf_slist <- NULL if(is.null(cf_slist)){ cf_slist <- calc_cf_slist(data) } # price_slist <- NULL if(is.null(price_slist)) { price_slist <- calc_price_slist(data) } # Estimate dbar & the numerator of the h-hat matrix if(interest_grid){ dbar <- calc_dbar(data = data, ugrid = xgrid, hu = hx, rgrid = rgrid, hr = hr, xgrid = tau, hx = ht, price_slist = price_slist, cf_slist = cf_slist, interest = interest, units = units) hhat_num <- calc_hhat_num(data = data, ugrid = xgrid, hu = hx, rgrid = rgrid, hr = hr, xgrid = tau, hx = ht, qgrid = tau_p, hq = htp, cf_slist = cf_slist, interest = interest, units = units) } else { dbar <- calc_dbar(data = data, ugrid = xgrid, hu = hx, xgrid = tau, hx = ht, price_slist = price_slist, cf_slist = cf_slist, units = units) hhat_num <- calc_hhat_num(data = data, ugrid = xgrid, hu = hx, xgrid = tau, hx = ht, qgrid = tau_p, hq = htp, cf_slist = cf_slist, units = units) } if(any(dbar$dbar_denom == 0)) { problem_tau <- filter(dbar, .data$dbar_denom == 0)$xg warning("tau values at ", paste(problem_tau, collapse = ", "), " does not have enough obs to estimate yield") output <- estimate_yield( data = data, xgrid = xgrid, hx = hx, tau = tau[!tau %in% problem_tau], ht = ht[!tau %in% problem_tau], loess = FALSE) return(output) } # The denominator of h-hat entries are estimated as part of dbar if(interest_grid){ hhat <- dplyr::mutate( dplyr::right_join( dplyr::select(dbar, !!sym('ug'), !!sym('xg'), !!sym('rg'), !!sym('dbar_denom')), hhat_num, by = c('ug' = 'ug', 'xg' = 'xg', 'rg' = 'rg') ), hhat = !!sym('hhat_numer') / !!sym('dbar_denom') ) day_grid <- unique(hhat[c('ug', 'rg')]) } else { hhat <- dplyr::mutate( dplyr::right_join( dplyr::select(dbar, !!sym('ug'), !!sym('xg'), !!sym('dbar_denom')), hhat_num, by = c('ug' = 'ug', 'xg' = 'xg') ), hhat =!!sym('hhat_numer') / !!sym('dbar_denom') ) day_grid <- data.frame(ug = unique(hhat$ug), rg = 0) hhat$rg <- 0 dbar$rg <- 0 } # Create the dataframe that the function returns dhat <- data.frame() for(i in 1:nrow(day_grid)){ # Create the dbar vector and h-hat matrix for this value of ugrid db <- dplyr::mutate( dplyr::filter(dbar, !!sym('ug') == day_grid$ug[i], !!sym('rg') == day_grid$rg[i]), dbar = !!sym('dbar_numer') / !!sym('dbar_denom') )$dbar hh <- matrix(dplyr::filter(hhat, !!sym('ug') == day_grid$ug[i], !!sym('rg') == day_grid$rg[i])$hhat, nrow = length(db)) if(any(is.na(db))){ na <- which(is.na(db)) if(is.vector(tau) & is.vector(tau_p)){ xgr <- tau[-na] qgr <- tau_p[tau_p <= max(xgr)] } else if(nrow(tau) == 1 & nrow(tau_p) == 1){ xgr <- tau[-na] qgr <- tau_p[tau_p <= max(xgr)] } else { xgr <- tau[i,-na] qgr <- tau_p[i, tau_p[tau_p <= max(xgr)]] } db <- db[-na] hh <- hh[1:length(xgr), 1:length(qgr)] } else { if(is.vector(tau) & is.vector(tau_p)){ xgr <- tau qgr <- tau_p } else if(nrow(tau) == 1 & nrow(tau_p) == 1){ xgr <- tau qgr <- tau_p } else { xgr <- tau[i, ] qgr <- tau_p[i, ] } } # Extract the xgrid and qgrid for this value of u as xgr and qgr. # The dimensions of these objects are tested earlier in the code. # Create a matrix of interpolation weights interpol_weights <- matrix(0, length(xgr), length(qgr)) # Iterate over the values of qgrid for(j in 1:length(qgr)){ # If qgrid is contained in xgrid then the weight will be one if(any(xgr == qgr[j])){ interpol_weights[which(xgr == qgr[j]), j] <- 1 } else { # Otherwise find the xgrid immediately above and below this xgrid lower <- max(which(xgr < qgr[j])) upper <- min(which(xgr > qgr[j])) # Error if qgrid is lower than the first xgrid, or greater than the last xgrid if(upper == 1 | lower == length(xgr)){ stop('tau_p entries must lie inside tau') } # Find interpolation weights as ratio between the two xgrid values lying above and below this qgrid dist <- xgr[upper] - xgr[lower] interpol_weights[lower, j] <- (qgr[j] - xgr[lower]) / dist interpol_weights[upper, j] <- (xgr[upper] - qgr[j]) / dist } } # Construct the length(q) x length(x) matrix of the interpolated hhat hh_interpol <- matrix(0, length(xgr), length(xgr)) for(j in 1:length(xgr)){ hh_interpol[,j] <- colSums(t(hh) * interpol_weights[j,]) } # transpose? X <- diag(1, length(xgr)) + t(hh_interpol) dh <- solve(X) %*% db if(interest_grid){ dhat <- rbind(dhat, data.frame(discount = dh, ug = day_grid$ug[i], rgrid = day_grid$rg[i], qg = xgr)) } else { dhat <- rbind(dhat, data.frame(discount = dh, ug = day_grid$ug[i], qg = xgr)) } } # loess smoothing if(loess){ # loess_model <- stats::loess(discount~qg, data = dhat) # dhat$discount <- predict(loess_model) loess_model <- lapply(unique(dhat$ug), function(ugg) stats::loess(discount~qg, data = filter(dhat, .data$ug == ugg))) dhat$discount <- do.call(base::c, lapply(loess_model, stats::predict)) } dhat$yield <- -log(dhat$discount) / dhat$qg dhat <- dplyr::rename(dhat, xgrid = .data$ug, tau = .data$qg ) return(dhat) }
/scratch/gouwar.j/cran-all/cranData/ycevo/R/estimation.R