content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
# One explanatory variable at a time violin plotter
#
# @usage plot_violin_1x(dat, response_variable_name, explanatory_variable_name,
# title="", xlab="", ylab="",
# COLOURS=c("#e0f3db","#ccebc5","#a8ddb5","#7bccc4","#4eb3d3","#2b8cbe"),
# BAR_COLOURS=c("#636363","#1c9099","#de2d26"),
# CI=95, XTICKS=TRUE, LOG=FALSE, BASE=10)
#
# @param dat dataframe where the response and explanatory variables including interaction terms if applicable are explicitly written into columns (output of the parse_formula() function) [mandatory]
# @param response_variable_name string referring to the variable name of the response variable [mandatory]
# @param explanatory_variable_name string referring to the variable name of the explanatory variable [mandatory]
# @param title string corresponding to the explanatory term including additive and interaction terms in the formula [default=""]
# @param xlab string specifying the x-axis label [default=""]
# @param ylab string specifying the y-axis label [default=""]
# @param COLOURS vector of colors of the violin plots which are repeated if the length is less than the number of explanatory factor levels [default=c("#e0f3db", "#ccebc5", "#a8ddb5", "#7bccc4", "#4eb3d3", "#2b8cbe")]
# @param BAR_COLOURS vector of colors of standard deviation, standard error and 95 percent confidence interval error bars (error bar selection via leaving one of the three colors empty) [default=c("#636363", "#1c9099", "#de2d26")]
# @param CI numeric referring to the percent confidence interval [default=95]
# @param XTICKS logical referring to whether the explanatory variable is strictly categorical [default=TRUE]
# @param LOG logical referring to whether to transform the explanatory variable into the logarithm scale [default=FALSE]
# @param BASE numeric referring to the logarithm base to transform the explanatory variable with [default=1]
#
# @return Return 0 if successful
#
# @examples
# x1 = rep(rep(rep(c(1:5), each=5), times=5), times=5)
# x2 = rep(rep(letters[6:10], each=5*5), times=5)
# x3 = rep(letters[11:15], each=5*5*5)
# y = rep(1:5, each=5*5*5) + rnorm(rep(1:5, each=5), length(x1))
# data = data.frame(x1, x2, x3, y)
# formula = y ~ x1 + x2 + x3 + (x2:x3)
# DF = parse_formula(formula=formula, data=data)
# plot_violin_1x(dat=DF, response_variable_name="y", explanatory_variable_name="x3")
#
#' @importFrom stats qnorm density aggregate
#' @importFrom graphics par plot axis polygon arrows points grid par axTicks
#' @importFrom grDevices rgb
#
plot_violin_1x = function(dat, response_variable_name, explanatory_variable_name, title="", xlab="", ylab="", COLOURS=c("#e0f3db", "#ccebc5", "#a8ddb5", "#7bccc4", "#4eb3d3", "#2b8cbe"), BAR_COLOURS=c("#636363", "#1c9099", "#de2d26"), CI=95, XTICKS=TRUE, LOG=FALSE, BASE=10){
### FOR TESTING:
# dat=DF; response_variable_name="y"; explanatory_variable_name="x3"
# title=""; xlab=""; ylab=""; COLOURS=c("#e0f3db", "#ccebc5", "#a8ddb5", "#7bccc4", "#4eb3d3", "#2b8cbe"); BAR_COLOURS=c("#636363", "#1c9099", "#de2d26"); CI=95; XTICKS=TRUE; LOG=FALSE; BASE=10
### extract the dependent or response or y variable, as well as the independent or explanatory or x variable
x = as.character(eval(parse(text=paste0("dat$`", explanatory_variable_name, "`")))) ### numeric and categorical both treated as categorical
# x = as.factor(gsub("-", "_", x)) ### remove "-" because it will conflict with the string splitting if performing TukeyHSD()
y = eval(parse(text=paste0("dat$`", response_variable_name, "`"))) ### numeric
# y = rnorm(length(y)) ### null test
### merge them into a data frame for ease of handling
### while converting the x variable into both categorical and numeric variables
x_categorical = NA ### to prevent devtools error: Undefined global functions or variables: x_categorical
x_numeric = tryCatch(
as.numeric(gsub("_", "-", x)),
warning=function(e){
as.numeric(as.factor(gsub("_", "-", x)))
}
)
if (XTICKS==FALSE){
### for numeric explanatory variable
df = data.frame(y=y, x_categorical=as.factor(x_numeric), x_numeric=x_numeric) ### remove "-" because it will conflict with the string splitting if performing TukeyHSD()
df = droplevels(df[complete.cases(df), ])
df$y = as.numeric(y)
df$x_numeric = as.numeric(df$x_numeric)
### transform the x axis into log-scale for ease of viewing
if (LOG==TRUE){
if(sum(is.na(suppressWarnings(log(df$x_numeric, base=BASE))), na.rm=T) == 0){
df$x_numeric=log(df$x_numeric, base=BASE)
xlab = paste0("log", BASE, "(", xlab, ")")
} else {
df$x_numeric=log(df$x_numeric+abs(min(df$x_numeric))+1, base=BASE)
xlab = paste0("log", BASE, "(", xlab, "+", round(min(df$x_numeric)+1, 3), ")")
}
}
### convert numeric factors into categories
if (length(unique(round(df$x_numeric, 4))) == length(unique(df$x_numeric))){
df$x_categorical=as.factor(round(df$x_numeric, 4))
} else {
df$x_categorical=as.factor(df$x_numeric)
}
} else {
### for strictly categorical explanatory variable
df = data.frame(y=y, x_categorical=x, x_numeric=x_numeric)
df = droplevels(df[complete.cases(df), ])
df$y = as.numeric(y)
df$x_categorical = as.factor(df$x_categorical)
df$x_numeric = as.numeric(df$x_numeric)
}
### extract the levels and unique values of the x variable
x_LEVELS_AND_NUMBERS = aggregate(x_numeric ~ x_categorical, data=df, FUN=mean)
x_levels = as.character(x_LEVELS_AND_NUMBERS[,1])
x_numbers = as.numeric(x_LEVELS_AND_NUMBERS[,2])
### calculate the summary statistics of the x and y vairables
x_min = min(df$x_numeric)
x_max = max(df$x_numeric)
x_sd = sd(df$x_numeric)
y_min = min(df$y)
y_max = max(df$y)
y_sd = sd(df$y)
### calculate the maximum interval between two levels of the x variable (divide by 2)
max_x_interval = min(x_numbers[order(x_numbers)][2:length(x_numbers)] - x_numbers[order(x_numbers)][1:length(x_numbers)-1]) / 2
### repeat violin plot colours to fit the number of explanatory variable levels
COLOURS = rep(COLOURS, times=ceiling(length(x_levels)/length(COLOURS)))
### define las: i.e. the orientation of the x-axis tick labels
### as well as the plot margins and the x-axis label
max_nchar = max(unlist(lapply(x_levels, FUN=nchar)))
# orig_par = par(no.readonly=TRUE)
# on.exit(par(orig_par)) ### breaks the layout (justification: this funtion is only called by the main function: violinplotter())
if (max_nchar > 7){
las = 2
par(mar=c(max_nchar*0.70, 5, 7, 2))
xlab=""
} else {
las =1
par(mar=c(5, 5, 7, 2))
}
### find y-axis ticks location within the range of y
plot(y, type="n", main="", xlab="", ylab="", xaxt="n", yaxt="n")
y_ticks = axTicks(2)
par(new=TRUE)
### initialize the plot with or without the x-axis
if (XTICKS==TRUE){
### for strictly categorical explanatory variable
plot(x=c(x_min-max_x_interval, x_max+max_x_interval), y=c(y_min-y_sd, y_max+y_sd), new=FALSE, type="n", main=title, xlab=xlab, ylab=ylab, las=las, xaxt="n", yaxt="n")
axis(side=1, at=x_numbers, labels=x_levels, las=las)
axis(side=2, at=y_ticks, labels=y_ticks, las=2)
} else {
### for continuous explanatory variable
plot(x=c(x_min-max_x_interval, x_max+max_x_interval), y=c(y_min-y_sd, y_max+y_sd), new=FALSE, type="n", main=title, xlab=xlab, ylab=ylab, las=las, yaxt="n")
axis(side=2, at=y_ticks, labels=y_ticks, las=2)
}
### iteratively plot the density of each explanatory variable level
for (i in 1:length(x_levels)){
# i = 1
subdat = droplevels(subset(df, x_categorical==x_levels[i]))
### calculate the summary statistics of the response variable (y): mean, standard deviation, standard error, and 95% confidence interval
mu = mean(subdat$y)
sigma = sd(subdat$y)
se = sd(subdat$y)/sqrt(nrow(subdat)-1)
ci = qnorm(((CI/100)/2)+0.50) * se
if (is.na(sigma)==FALSE){
### calculate the density with binning adjustment proportional the number of observations divded by 1x10^5
d = density(subdat$y, adjust=max(c(1, nrow(subdat)/1e5)))
### restrict the range of the response variable to the input dataframe
d$y = d$y[(d$x >= min(df$y)) & (d$x <= max(df$y))]
d$x = d$x[(d$x >= min(df$y)) & (d$x <= max(df$y))]
### tranform the density (d$y) into the 0.00 to 1.00 range (in preparation to be mutiplied withe maximum interval variable: "max_x_interval")
d.y_min = min(d$y)
d.y_max = max(d$y)
d$y = (d$y - d.y_min) / (d.y_max - d.y_min)
### define the x-axis points of the polygon defined as the density values left and right of the explanatory variable (df$x) level or value
poly_x = c(x_numbers[i]-rev(d$y*max_x_interval), x_numbers[i]+(d$y*max_x_interval))
### define the y-axis points of the polygon defined as the range of values of the response variable (df$y)
poly_y = c(rev(d$x), d$x)
### draw violin polygon and error bars when the variance is greater than 0
if (sigma > 0){
### draw the polygon
polygon(x=poly_x, y=poly_y, border=NA, col=COLOURS[i])
### plot the summary statistics
suppressWarnings(arrows(x0=x_numbers[i], y0=mu+sigma, y1=mu-sigma, angle=90, code=3, lwd=2, length=0.1, col=BAR_COLOURS[1]))
suppressWarnings(arrows(x0=x_numbers[i], y0=mu+se, y1=mu-se, angle=90, code=3, lwd=2, length=0.1, col=BAR_COLOURS[2]))
suppressWarnings(arrows(x0=x_numbers[i], y0=mu+ci, y1=mu-ci, angle=90, code=3, lwd=2, length=0.1, col=BAR_COLOURS[3]))
}
}
points(x=x_numbers[i], y=mu, pch=20)
# print(x_levels[i])
# print(mean(subdat$y))
}
### plot grid lines
grid()
### show the summary statistics legend
if (sum(BAR_COLOURS!=rgb(1,0,0,alpha=0))>0){
legend("bottomright", inset=c(0, 1), xpd=TRUE, horiz=TRUE, bty="n", col=unlist(BAR_COLOURS)[BAR_COLOURS!=rgb(1,0,0,alpha=0)], cex=(par(no.readonly=TRUE)$cex*0.5), lty=1, lwd=2, legend=c("Standard Deviation", "Standard Error", paste0(CI, "% Confidence Interval"))[BAR_COLOURS!=rgb(1,0,0,alpha=0)])
}
### return the levels and unique values of the x variable
return(0)
}
|
/scratch/gouwar.j/cran-all/cranData/violinplotter/R/plot_violin_1x.R
|
# Show means and sample sizes
#
# @usage show_means_samle_sizesfunction(formula, data=NULL, explanatory_variable_name, SHOW_MEANS=TRUE, SHOW_SAMPLE_SIZE=TRUE)
#
# @param formula R's compact symbolic form to represent linear models with fixed additive and interaction effects (See ?formula for more information) [mandatory]
# @param data data.frame containing the response and explanatory variables which forms the formula above [default=NULL]
# @param explanatory_variable_name string referring to the variable name of the explanatory variable whose class means will be compared [mandatory]
# @param SHOW_SAMPLE_SIZE logical referring to whether or not to show the sample sizes of each factor level into an existing plot [default=FALSE]
# @param SHOW_MEANS logical referring to whether or not to show the means [default=TRUE]
#
# @return Tukey's honest significant difference grouping table with response variable categorical means, grouping, level names and corresponding numeric counterparts
# @return Appends honest significant difference grouping letters into an existing plot
#
# @examples
# x1 = rep(rep(rep(c(1:5), each=5), times=5), times=5)
# x2 = rep(rep(letters[6:10], each=5*5), times=5)
# x3 = rep(letters[11:15], each=5*5*5)
# y = rep(1:5, each=5*5*5) + rnorm(rep(1:5, each=5), length(x1))
# data = data.frame(x1, x2, x3, y)
# formula = y ~ x1 + x2 + x3 + (x2:x3)
# DF = parse_formula(formula=formula, data=data)
# plot_violin_1x(dat=DF, response_variable_name="y", explanatory_variable_name="x3")
# show_means_samle_sizesfunction(formula, data=NULL, explanatory_variable_name, SHOW_MEANS=TRUE, SHOW_SAMPLE_SIZE=TRUE)
#
#' @importFrom graphics text
#
show_means_sample_sizes = function(formula, data=NULL, explanatory_variable_name, SHOW_MEANS=TRUE, SHOW_SAMPLE_SIZE=TRUE){
### FOR TESTING:
# data=NULL; explanatory_variable_name="x3"; alpha=0.05; LOG=FALSE; BASE=10; PLOT=FALSE; SHOW_SAMPLE_SIZE=FALSE
### parse the formula and generate the dataframe with explicit interaction terms if expressed in the formula
df = parse_formula(formula=formula, data=data, IMPUTE=FALSE, IMPUTE_METHOD=mean)
response_var = df[,1]; response_var_name = colnames(df)[1]
### computate the means per explanatory variable level
means = eval(parse(text=paste0("aggregate(", response_var_name, "~ `", explanatory_variable_name, "`, data=df, FUN=mean)")))
colnames(means) = c("LEVELS", "MEANS")
means = means[order(means$MEANS, decreasing=TRUE), ]
### prepare the grouping list
GROUPING_LIST = eval(parse(text=paste0("list('LEVEL_", paste(as.character(means$LEVELS), collapse="'=c(), 'LEVEL_"), "'=c())")))
GROUPING_LIST = as.matrix(lapply(GROUPING_LIST, FUN=paste, collapse=""))
GROUPING_LIST = data.frame(LEVELS=gsub("LEVEL_", "", as.character(rownames(GROUPING_LIST))), GROUPING=as.character(GROUPING_LIST[,1]))
### prepare the explanatory variable names and corresponding numbers
x_levels = eval(parse(text=paste0("levels(as.factor(df$`", explanatory_variable_name, "`))")))
x_numbers = tryCatch(as.numeric(gsub("_", "-", as.character(x_levels))),
warning=function(e){as.numeric(as.factor(x_levels))})
X_LEVELS_AND_NUMBERS = data.frame(LEVELS=x_levels, NUMBERS=x_numbers)
### merge and append the grouping letters together with the means
MERGE_GROUPING_DF = merge(merge(GROUPING_LIST, X_LEVELS_AND_NUMBERS, by="LEVELS"), means, by="LEVELS")
### show means and/or sample sizes
if(SHOW_MEANS){
text(x=MERGE_GROUPING_DF$NUMBERS, y=max(response_var)+(sd(response_var)/4), lab=paste0("(", round(MERGE_GROUPING_DF$MEANS,2), ")"))
}
if(SHOW_SAMPLE_SIZE){
sample_sizes = table(eval(parse(text=paste0("df$`", explanatory_variable_name, "`"))))
SAMPLE_SIZES = as.data.frame(sample_sizes)
colnames(SAMPLE_SIZES) = c("LEVELS", "SAMPLE_SIZES")
MERGE_GROUPING_DF = merge(MERGE_GROUPING_DF, SAMPLE_SIZES, by="LEVELS")
text(x=MERGE_GROUPING_DF$NUMBERS, y=max(response_var)+(sd(response_var)/16), lab=paste0("(n=", MERGE_GROUPING_DF$SAMPLE_SIZES, ")"))
}
### output
return(MERGE_GROUPING_DF)
}
|
/scratch/gouwar.j/cran-all/cranData/violinplotter/R/show_means_sample_sizes.R
|
#' Plotting and Comparing Means with Violin Plots
#'
#' @usage violinplotter(formula,
#' data=NULL,
#' TITLE="",
#' XLAB="",
#' YLAB="",
#' VIOLIN_COLOURS=c("#e0f3db","#a8ddb5","#7bccc4","#2b8cbe"),
#' PLOT_BARS=TRUE,
#' ERROR_BAR_COLOURS=c("#636363","#1c9099","#de2d26"),
#' SHOW_SAMPLE_SIZE=FALSE,
#' SHOW_MEANS=TRUE,
#' CATEGORICAL=TRUE,
#' LOGX=FALSE,
#' LOGX_BASE=10,
#' MANN_WHITNEY=TRUE,
#' HSD=FALSE,
#' ALPHA=0.05,
#' REGRESS=FALSE)
#'
#' @param formula R's compact symbolic form to represent linear models with fixed additive and interaction effects (See ?formula for more information) [mandatory]
#' @param data data.frame containing the response and explanatory variables which forms the formula above [default=NULL]
#' @param TITLE string or vector of strings corresponding to violin plot title/s [default: combinations of the "response variable name X explanatory variable" from the dataframe column names]
#' @param XLAB string or vector of strings specifying the x-axis labels [default: column names of the explanatory variables (and their combinations) from data]
#' @param YLAB string or vector of strings specifying the y-axis labels [default: column names of the response variable from data]
#' @param VIOLIN_COLOURS vector or list of vectors of colors of the violin plots which are repeated if the length is less than the number of explanatory factor levels or less than the number of explanatory factors in the case of a list [default=c("#e0f3db", "#ccebc5", "#a8ddb5", "#7bccc4", "#4eb3d3", "#2b8cbe")]
#' @param PLOT_BARS logical (i.e. TRUE or FALSE) to plot all or none of the bars; or vector strings which bars to plot (e.g. "stdev", "sterr", "ci") [default=TRUE=c("stdev", "sterr", "ci")]
#' @param ERROR_BAR_COLOURS vector of colors of standard deviation, standard error and 95 percent confidence interval error bars (error bar selection via leaving one of the three colors empty) [default=c("#636363", "#1c9099", "#de2d26")]
#' @param SHOW_SAMPLE_SIZE logical referring to whether or not to show the sample sizes for each category [default=FALSE]
#' @param SHOW_MEANS logical referring to whether or not to show the means [default=TRUE]
#' @param CATEGORICAL logical or vector of logicals referring to whether the explanatory variable/s is/are strictly categorical [default=TRUE]
#' @param LOGX logical or vector of logicals referring to whether to transform the explanatory variable/s into the logarithm scale [default=FALSE]
#' @param LOGX_BASE numeric or vector of numerics referring to the logarithm base to transform the explanatory variable/s with [default=1]
#' @param MANN_WHITNEY logical or vector of logicals referring to whether to perform Mann-Whitney Grouping [default=TRUE]
#' @param HSD logical or vector of logicals referring to whether to perform Tukey's Honest Significance Grouping [default=FALSE]
#' @param ALPHA numeric significance level for the analysis of variance F-test and Tukey's mean comparison [default=0.05]
#' @param REGRESS logical or vector of logicals referring to whether to regress the response variable against the explanatory variable/s [default=FALSE]
#'
#' @return Violin plot/s with optional error bars, mean comparison grouping/s, and regression line/s
#' @return Mean comparison grouping/s based on Tukey's Hones significant difference and regression line statistics, if applicable
#'
#' @examples
#' x1 = rep(rep(rep(c(1:5), each=5), times=5), times=5)
#' x2 = rep(rep(letters[6:10], each=5*5), times=5)
#' x3 = rep(letters[11:15], each=5*5*5)
#' y = rep(1:5, each=5*5*5) + rnorm(rep(1:5, each=5), length(x1))
#' formula = log(y) ~ exp(x1) + x2 + x3 + (x2:x3)
#' test1 = violinplotter(formula=formula)
#' test2 = violinplotter(formula=formula, PLOT_BARS=c("ci", "stdev"))
#'
#' @importFrom grDevices rgb
#'
#' @export
violinplotter = function(formula,
data=NULL,
TITLE="",
XLAB="",
YLAB="",
VIOLIN_COLOURS=c("#e0f3db","#a8ddb5","#7bccc4","#2b8cbe"),
PLOT_BARS=TRUE,
ERROR_BAR_COLOURS=c("#636363","#1c9099","#de2d26"),
SHOW_SAMPLE_SIZE=FALSE,
SHOW_MEANS=TRUE,
CATEGORICAL=TRUE,
LOGX=FALSE,
LOGX_BASE=10,
MANN_WHITNEY=TRUE,
HSD=FALSE,
ALPHA=0.05,
REGRESS=FALSE){
### FOR TESTING: load the parsing, plotting, HSD, and regressing functions
# source("parse_formula.R")
# source("plot_violin_1x.R")
# source("show_means_sample_sizes.R")
# source("mean_comparison_HSD.R")
# source("plot_regression_line.R")
# source("violinplotter.R")
# x1 = rep(rep(rep(c(1:5), each=5), times=5), times=5)
# x2 = rep(rep(letters[6:10], each=5*5), times=5)
# x3 = rep(letters[11:15], each=5*5*5)
# y = rep(1:5, each=5*5*5) + rnorm(rep(1:5, each=5), length(x1))
# formula = log(y) ~ exp(x1) + x2 + x3 + (x2:x3)
# data = NULL
# TITLE=""; XLAB=""; YLAB=""; VIOLIN_COLOURS=c("#e0f3db", "#ccebc5", "#a8ddb5", "#7bccc4", "#4eb3d3", "#2b8cbe")
# PLOT_BARS=TRUE; ERROR_BAR_COLOURS=c("#636363", "#1c9099", "#de2d26");
# PLOT_BARS=FALSE; ERROR_BAR_COLOURS=c("#636363", "#1c9099", "#de2d26");
# PLOT_BARS="sterr"; ERROR_BAR_COLOURS=c("#636363", "#1c9099", "#de2d26");
# PLOT_BARS=c("ci", "sterr"); ERROR_BAR_COLOURS=c("#636363", "#1c9099", "#de2d26");
# PLOT_BARS=c(); ERROR_BAR_COLOURS=c("#636363", "#1c9099", "#de2d26");
# CATEGORICAL=TRUE; LOGX=FALSE; LOGX_BASE=1; HSD=TRUE; ALPHA=0.05; REGRESS=FALSE
# CATEGORICAL=FALSE; LOGX=FALSE; LOGX_BASE=1; HSD=TRUE; ALPHA=0.05; REGRESS=TRUE
# SHOW_SAMPLE_SIZE=TRUE
### parse the formula and generate the dataframe with explicit interaction terms if expressed in the formula
df = parse_formula(formula=formula, data=data, IMPUTE=FALSE, IMPUTE_METHOD=mean)
response_var = df[,1]
explanatory_var = df[,2:ncol(df)]
response_var_name = colnames(df)[1]
explanatory_var_names = colnames(df)[2:ncol(df)]
### set axes labels and titles
if (YLAB==""){
YLAB = response_var_name
}
if ( (XLAB=="") | (length(XLAB) != ncol(df)-1) ){
XLAB = explanatory_var_names
}
if ( (sum(TITLE==rep("", times=length(TITLE)))==length(TITLE)) | (length(TITLE) != ncol(df)-1) ){
TITLE = paste0(YLAB, "\nX\n", XLAB)
}
### What are the violin plot colours we're using for each explanatory variable?
if (is.list(VIOLIN_COLOURS)==FALSE) {
VIOLIN_COLOURS = rep(list(VIOLIN_COLOURS), times=length(explanatory_var_names))
} else if (length(VIOLIN_COLOURS) < length(explanatory_var_names)){
VIOLIN_COLOURS = rep(VIOLIN_COLOURS, times=(length(explanatory_var_names)-length(VIOLIN_COLOURS)+1))
}
### Do we have to transform the explanatory variable/s into the log space?
if (length(LOGX) != ncol(df)-1) {
LOGX = rep(LOGX, times=ncol(df)-1)
}
if (length(LOGX_BASE) != ncol(df)-1) {
LOGX_BASE = rep(LOGX_BASE, times=ncol(df)-1)
}
### Are the explanatory variable/s are strictly categorical?
if (length(CATEGORICAL) != ncol(df)-1) {
CATEGORICAL = rep(CATEGORICAL, times=ncol(df)-1)
}
CATEGORICAL[LOGX] = FALSE ### automatically convert factors into non-strictly categorcialy if they were set to be log-transformed!
### Which statistical bars do we plot?
ERROR_BAR_COLOURS=rep(ERROR_BAR_COLOURS, times=3)[1:3] ### to make sure we have 3 error bar colours
orig_ERROR_BAR_COLOURS = ERROR_BAR_COLOURS
ERROR_BAR_COLOURS = rep(c(rgb(1,0,0,alpha=0)), times=3)
if (length(PLOT_BARS)==1){
if (PLOT_BARS==TRUE){
ERROR_BAR_COLOURS = orig_ERROR_BAR_COLOURS
}
}
if (sum(PLOT_BARS=="stdev")>0){
ERROR_BAR_COLOURS[1] = orig_ERROR_BAR_COLOURS[1]
}
if (sum(PLOT_BARS=="sterr")>0){
ERROR_BAR_COLOURS[2] = orig_ERROR_BAR_COLOURS[2]
}
if (sum(PLOT_BARS=="ci")>0){
ERROR_BAR_COLOURS[3] = orig_ERROR_BAR_COLOURS[3]
}
### Do we have to perform Mann-Whitney mean comparison test across the explanatory variable/s?
if (length(MANN_WHITNEY) != ncol(df)-1) {
MANN_WHITNEY = rep(MANN_WHITNEY, times=ncol(df)-1)
}
### Do we have to perform Tukey's honest significant difference test across the explanatory variable/s?
if (length(HSD) != ncol(df)-1) {
HSD = rep(HSD, times=ncol(df)-1)
}
### Do we have to regress the response variable against the explanatory variable/s?
if (length(REGRESS) != ncol(df)-1) {
REGRESS = rep(REGRESS, times=ncol(df)-1)
}
### iterate across explanatory variables defined by the formula
OUT = list()
if (length(explanatory_var_names) > 1){
### define layout if we have more than one explanatory variable
### otherwise don't set the layout so the user can define their own layout and print multiple plots
orig_par = par(no.readonly=TRUE)
on.exit(par(orig_par))
m = length(explanatory_var_names)
par(mfrow=c(round(sqrt(m)), (floor(sqrt(m)) + ceiling(sqrt(m) %% floor(sqrt(m))))))
}
for (i in 1:length(explanatory_var_names)){
# i = 4
message("======================================================")
message(paste0("Violin Plotting: ", explanatory_var_names[i]))
message("======================================================")
VIOPLOT_LETTERS2NUMS = plot_violin_1x(dat=df,
response_variable_name=response_var_name,
explanatory_variable_name=explanatory_var_names[i],
title=TITLE[i],
xlab=XLAB[i],
ylab=YLAB,
COLOURS=VIOLIN_COLOURS[[i]],
BAR_COLOURS=ERROR_BAR_COLOURS,
CI=(1-ALPHA)*100,
XTICKS=CATEGORICAL[i],
LOG=LOGX[i],
BASE=LOGX_BASE[i])
if (SHOW_MEANS | SHOW_SAMPLE_SIZE){
message("======================================================")
message(paste0("Means and/or sample sizes for: ", explanatory_var_names[i]))
message("======================================================")
MEANS_SIZES_out = show_means_sample_sizes(formula=formula,
data=data,
explanatory_variable_name=explanatory_var_names[i],
SHOW_MEANS=SHOW_MEANS,
SHOW_SAMPLE_SIZE=SHOW_SAMPLE_SIZE)
} else {MEANS_SIZES_out = NULL}
if (MANN_WHITNEY[i]){
message("======================================================")
message(paste0("Mann-Whitney Grouping (takes precedence over HSD): ", explanatory_var_names[i]))
message("======================================================")
MEAN_COMPARISON_out = mean_comparison_HSD(formula=formula,
data=data,
explanatory_variable_name=explanatory_var_names[i],
alpha=ALPHA,
LOG=LOGX[i],
BASE=LOGX_BASE[i],
PLOT=MANN_WHITNEY[i])
} else if (HSD[i]){
message("======================================================")
message(paste0("HSD Grouping: ", explanatory_var_names[i]))
message("======================================================")
MEAN_COMPARISON_out = mean_comparison_HSD(formula=formula,
data=data,
explanatory_variable_name=explanatory_var_names[i],
alpha=ALPHA,
LOG=LOGX[i],
BASE=LOGX_BASE[i],
PLOT=HSD[i])
} else {MEAN_COMPARISON_out = NULL}
if (REGRESS[i]){
message("======================================================")
message(paste0("Linear Regressing: ", explanatory_var_names[i]))
message("======================================================")
REGRESS_out = plot_regression_line(dat=df,
response_variable_name=response_var_name,
explanatory_variable_name=explanatory_var_names[i],
LOG=LOGX[i],
BASE=LOGX_BASE[i],
PLOT=TRUE,
LINE_COL="gray")
} else {REGRESS_out = NULL}
OUT[[i]] = c(MEANS_SIZES_out=MEANS_SIZES_out, MEAN_COMPARISON_out=MEAN_COMPARISON_out, REGRESS_out=REGRESS_out)
}
return(OUT)
}
|
/scratch/gouwar.j/cran-all/cranData/violinplotter/R/violinplotter.R
|
#' histoplot
#'
#' Produce histogram plot(s) of the given (grouped) values with enhanced annotation and colour per group. Includes customisation of colours for each aspect of the histogram, boxplot, and separate histograms. This supports input of data as a list or formula, being backwards compatible with \code{\link[vioplot]{histoplot}} (0.2) and taking input in a formula as used for \code{\link[graphics]{boxplot}}.
#'
#' @name histoplot
#' @aliases histoplot
#' @param x for specifying data from which the boxplots are to be produced. Either a numeric vector, or a single list containing such vectors. Additional unnamed arguments specify further data as separate vectors (each corresponding to a component boxplot). NAs are allowed in the data.
#' @param ... additional data vectors or formula parameters. For the formula method, named arguments to be passed to the default method.
#' @param formula a formula, such as y ~ grp, where y is a numeric vector of data values to be split into groups according to the grouping variable grp (usually a factor).
#' @param data a data.frame (or list) from which the variables in formula should be taken.
#' @param use.cols logical indicating if columns (by default) or rows (use.cols = FALSE) should be plotted.
#' @param subset an optional vector specifying a subset of observations to be used for plotting.
#' @param drop,sep,lex.order defines groups to plot from formula, passed to \code{split.default}, see there.
#' @param breaks the breaks for the density estimator, as explained in hist
#' @param xlim,ylim numeric vectors of length 2, giving the x and y coordinates ranges.
#' @param yaxt A character which specifies the y axis type. Specifying "n" suppresses plotting.
#' @param ylog,xlog A logical value (see log in \code{\link[graphics]{plot.default}}). If ylog is TRUE, a logarithmic scale is in use (e.g., after plot(*, log = "y")). For horizontal = TRUE then, if xlog is TRUE, a logarithmic scale is in use (e.g., after plot(*, log = "x")). For a new device, it defaults to FALSE, i.e., linear scale.
#' @param log Logarithmic scale if log = "y" or TRUE. Invokes ylog = TRUE. If horizontal is TRUE then invokes xlog = TRUE.
#' @param logLab Increments for labelling y-axis on log-scale, defaults to numbers starting with 1, 2, 5, and 10.
#' @param names one label, or a vector of labels for the data must match the number of data given
#' @param col Graphical parameter for fill colour of the histogram(s) polygon. NA for no fill colour. If col is a vector, it specifies the colour per histogram, and colours are reused if necessary.
#' @param border Graphical parameters for the colour of the histogram border passed to lines. NA for no border. If border is a vector, it specifies the colour per histogram, and colours are reused if necessary.
#' @param lty,lwd Graphical parameters for the histogram passed to lines and polygon
#' @param rectCol Graphical parameters to control fill colour of the box. NA for no fill colour. If col is a vector, it specifies the colour per histogram, and colours are reused if necessary.
#' @param lineCol Graphical parameters to control colour of the box outline and whiskers. NA for no border. If lineCol is a vector, it specifies the colour per histogram, and colours are reused if necessary.
#' @param pchMed Graphical parameters to control shape of the median point. If pchMed is a vector, it specifies the shape per histogram.
#' @param colMed,colMed2 Graphical parameters to control colour of the median point. If colMed is a vector, it specifies the colour per histogram. colMed specifies the fill colour in all cases unless pchMed is 21:25 in which case colMed is the border colour and colMed2 is the fill colour.
#' @param drawRect logical. The box is drawn if TRUE.
#' @param areaEqual logical. Density plots checked for equal area if TRUE. wex must be scalar, relative widths of histograms depend on area.
#' @param at position of each histogram. Default to 1:n
#' @param add logical. if FALSE (default) a new plot is created
#' @param wex relative expansion of the histogram. If wex is a vector, it specifies the area/width size per histogram and sizes are reused if necessary.
#' @param horizontal logical. To use horizontal or vertical histograms. Note that log scale can only be used on the x-axis for horizontal histograms, and on the y-axis otherwise.
#' @param main,sub,xlab,ylab graphical parameters passed to plot.
#' @param cex A numerical value giving the amount by which plotting text should be magnified relative to the default.
#' @param cex.axis The magnification to be used for y axis annotation relative to the current setting of cex.
#' @param cex.names The magnification to be used for x axis annotation relative to the current setting of cex. Takes the value of cex.axis if not given.
#' @param cex.lab The magnification to be used for x and y labels relative to the current setting of cex.
#' @param cex.main The magnification to be used for main titles relative to the current setting of cex.
#' @param cex.sub The magnification to be used for sub-titles relative to the current setting of cex.
#' @param na.action a function which indicates what should happen when the data contain NAs. The default is to ignore missing values in either the response or the group.
#' @param na.rm logical value indicating whether NA values should be stripped before the computation proceeds. Defaults to TRUE.
#' @param side defaults to "both". Assigning "left" or "right" enables one sided plotting of histograms. May be applied as a scalar across all groups.
#' @param axes,frame.plot,panel.first,panel.last,asp,line,outer,adj,ann,ask,bg,bty,cin,col.axis,col.lab,col.main,col.sub,cra,crt,csi,cxy,din,err,family,fg,fig,fin,font,font.axis,font.lab,font.main,font.sub,lab,las,lend,lheight,ljoin,lmitre,mai,mar,mex,mfcol,mfg,mfrow,mgp,mkh,new,oma,omd,omi,page,pch,pin,plt,ps,pty,smo,srt,tck,tcl,usr,xaxp,xaxs,xaxt,xpd,yaxp,yaxs,ylbias Arguments to be passed to methods, such as graphical parameters (see \code{\link[graphics]{par}})).
#' @keywords plot graphics histogram
#' @import sm
#' @importFrom zoo rollmean
#' @importFrom stats median na.omit quantile
#' @importFrom graphics Axis axis box lines par plot.new plot.window plot.xy points polygon rect title
#' @importFrom grDevices boxplot.stats dev.flush dev.hold dev.interactive devAskNewPage xy.coords
#' @export
#' @examples
#'
#' # box- vs histogram-plot
#' par(mfrow=c(2,1))
#' mu<-2
#' si<-0.6
#' bimodal<-c(rnorm(1000,-mu,si),rnorm(1000,mu,si))
#' uniform<-runif(2000,-4,4)
#' normal<-rnorm(2000,0,3)
#' histoplot(bimodal,uniform,normal)
#' boxplot(bimodal,uniform,normal)
#'
#' # add to an existing plot
#' x <- rnorm(100)
#' y <- rnorm(100)
#' plot(x, y, xlim=c(-5,5), ylim=c(-5,5))
#' histoplot(x, col="tomato", horizontal=TRUE, at=-4, add=TRUE,lty=2, rectCol="gray")
#' histoplot(y, col="cyan", horizontal=FALSE, at=-4, add=TRUE,lty=2)
#'
#' # formula input
#' data("iris")
#' histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length",
#' col=c("lightgreen", "lightblue", "palevioletred"))
#' legend("topleft", legend=c("setosa", "versicolor", "virginica"),
#' fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
#'
#' data("diamonds", package = "ggplot2")
#' palette <- RColorBrewer::brewer.pal(9, "Pastel1")
#' par(mfrow=c(3, 1))
#' histoplot(price ~ cut, data = diamonds, las = 1, col = palette)
#' histoplot(price ~ clarity, data = diamonds, las = 2, col = palette)
#' histoplot(price ~ color, data = diamonds, las = 2, col = palette)
#' par(mfrow=c(3, 1))
#'
#' #generate example data
#' data_one <- rnorm(100)
#' data_two <- rnorm(50, 1, 2)
#'
#' #generate histogram plot with similar functionality to histoplot
#' histoplot(data_one, data_two, col="magenta")
#'
#' #note vioplox defaults to a greyscale plot
#' histoplot(data_one, data_two)
#'
#' #colours can be customised separately, with axis labels, legends, and titles
#' histoplot(data_one, data_two, col=c("red","blue"), names=c("data one", "data two"),
#' main="data histogram", xlab="data class", ylab="data read")
#' legend("topleft", fill=c("red","blue"), legend=c("data one", "data two"))
#'
#' #colours can be customised for the histogram fill and border separately
#' histoplot(data_one, data_two, col="grey85", border="purple", names=c("data one", "data two"),
#' main="data histogram", xlab="data class", ylab="data read")
#'
#' #colours can also be customised for the boxplot rectange and lines (border and whiskers)
#' histoplot(data_one, data_two, col="grey85", rectCol="lightblue", lineCol="blue",
#' border="purple", names=c("data one", "data two"),
#' main="data histogram", xlab="data class", ylab="data read")
#'
#' #these colours can also be customised separately for each histogram
#' histoplot(data_one, data_two, col=c("skyblue", "plum"), rectCol=c("lightblue", "palevioletred"),
#' lineCol="blue", border=c("royalblue", "purple"), names=c("data one", "data two"),
#' main="data histogram", xlab="data class", ylab="data read")
#'
#' #this applies to any number of histograms, given that colours are provided for each
#' histoplot(data_one, data_two, rnorm(200, 3, 0.5), rpois(200, 2.5), rbinom(100, 10, 0.4),
#' col=c("red", "orange", "green", "blue", "violet"),
#' rectCol=c("palevioletred", "peachpuff", "lightgreen", "lightblue", "plum"),
#' lineCol=c("red4", "orangered", "forestgreen", "royalblue", "mediumorchid"),
#' border=c("red4", "orangered", "forestgreen", "royalblue", "mediumorchid"),
#' names=c("data one", "data two", "data three", "data four", "data five"),
#' main="data histogram", xlab="data class", ylab="data read")
#'
#' #The areaEqual parameter scales with width of histograms
#' #histograms will have equal density area (including missing tails) rather than equal maximum width
#' histoplot(data_one, data_two, areaEqual=TRUE)
#'
#' histoplot(data_one, data_two, areaEqual=TRUE,
#' col=c("skyblue", "plum"), rectCol=c("lightblue", "palevioletred"),
#' lineCol="blue", border=c("royalblue", "purple"), names=c("data one", "data two"),
#' main="data histogram", xlab="data class", ylab="data read")
#'
#' histoplot(data_one, data_two, rnorm(200, 3, 0.5), rpois(200, 2.5), rbinom(100, 10, 0.4),
#' areaEqual=TRUE, col=c("red", "orange", "green", "blue", "violet"),
#' rectCol=c("palevioletred", "peachpuff", "lightgreen", "lightblue", "plum"),
#' lineCol=c("red4", "orangered", "forestgreen", "royalblue", "mediumorchid"),
#' border=c("red4", "orangered", "forestgreen", "royalblue", "mediumorchid"),
#' names=c("data one", "data two", "data three", "data four", "data five"),
#' main="data histogram", xlab="data class", ylab="data read")
#' @export
#' @usage NULL
histoplot <- function(x, ...) {
UseMethod("histoplot")
}
#' Draw a histogram plot for each Column (Row) of a Matrix
#'
#' Interpreting the columns (or rows) of a matrix as different groups, draw a boxplot for each.
#'
#' @aliases histogram.matrix histoplot.matrix
#' @param x a numeric matrix.
#' @param use.cols logical indicating if columns (by default) or rows (use.cols = FALSE) should be plotted.
#' @param ... Further arguments to \code{\link[vioplot]{histoplot}}.
#' @rdname histoplot
#' @export
histoplot.matrix <- function (x, use.cols = TRUE, ...)
{
groups <- if (use.cols) {
split(c(x), rep.int(1L:ncol(x), rep.int(nrow(x), ncol(x))))
}
else split(c(x), seq(nrow(x)))
if (length(nam <- dimnames(x)[[1 + use.cols]]))
names(groups) <- nam
invisible(histoplot(groups, ...))
}
#' @rdname histoplot
#' @export
histoplot.list <- function (x, ...){
ind <- sapply(x, is.numeric)
if(all(!ind)){
stop(paste("elements are not numeric: ", names(x)[!sapply(x, is.numeric)]))
}
if(any(!ind)){
warning(paste("some elements are not numeric: ", names(x)[!sapply(x, is.numeric)]))
x <- x[sapply(x, is.numeric)]
}
invisible(histoplot.default(x, ...))
}
#' @rdname histoplot
#' @export
histoplot.data.frame <- histoplot.list
#' @rdname histoplot
#' @export
histoplot.matrix <- histoplot.matrix
#' @rdname histoplot
#' @export
histoplot.formula <-
function (formula, data = NULL, ..., subset, na.action = NULL,
add = FALSE, ann = !add, horizontal = FALSE, side = "both",
xlab = mklab(y_var = horizontal), ylab = mklab(y_var = !horizontal), names=NULL,
drop = FALSE, sep = ".", lex.order = FALSE)
{
if (missing(formula) || (length(formula) != 3L)){
stop("'formula' missing or incorrect")
}
if(add && side != "both"){
if(!is.null(names)) warning("Warning: names can only be changed on first call of histoplot (when add = FALSE)
")
if(!missing(xlab)) warning("Warning: x-axis labels can only be changed on first call of histoplot (when add = FALSE)
")
if(!missing(ylab)) warning("Warning: y-axis labels can only be changed on first call of histoplot (when add = FALSE)
")
}
if (missing(xlab) || missing(ylab)){
mklab <- function(y_var){
if(y_var){
names(mf)[response]
} else {
paste(names(mf)[-response], collapse = " : ")
}
}
}
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval(m$data, parent.frame()))){
m$data <- as.data.frame(data)
}
m$... <- m$drop <- m$sep <- m$lex.order <- NULL
m$xlab <- m$ylab <- m$add <- m$ann <- m$horizontal <- NULL
m$names <- m$side <- NULL
m$na.action <- na.action
m[[1L]] <- quote(stats::model.frame.default)
mf <- eval(m, parent.frame())
response <- attr(attr(mf, "terms"), "response")
if(add){
xlab <- ylab <- NA
}
x <- split(mf[[response]], mf[-response], drop = drop,
sep = sep, lex.order = lex.order)
histoplot(x, xlab = xlab, ylab = ylab, names = names,
add = add, ann = ann, horizontal = horizontal, side = side, ...)
}
#' @rdname histoplot
#' @export
histoplot.default <-
function (x, ..., data = NULL, breaks = 'Sturges', xlim = NULL, ylim = NULL, names = NULL,
horizontal = FALSE, col = "grey50", border = par()$fg, lty = 1,
lwd = 1, rectCol = par()$fg, lineCol = par()$fg,
pchMed = 19, colMed = "white", colMed2 = "grey 75",
at, add = FALSE, wex = 1, drawRect = TRUE, areaEqual=FALSE,
axes = TRUE, frame.plot = axes, panel.first = NULL, panel.last = NULL, asp = NA,
main="", sub="", xlab=NA, ylab=NA, line = NA, outer = FALSE,
xlog = NA, ylog=NA, adj=NA, ann = NA, ask=NA, bg=NA, bty=NA,
cex=NA, cex.axis=NA, cex.lab=NA, cex.main=NA, cex.names=NULL, cex.sub=NA,
cin=NA, col.axis=NA, col.lab=NA, col.main=NA, col.sub=NA,
cra=NA, crt=NA, csi=NA,cxy=NA, din=NA, err=NA, family=NA, fg=NA,
fig=NA, fin=NA, font=NA, font.axis=NA, font.lab=NA, font.main=NA, font.sub=NA,
lab=NA, las=NA, lend=NA, lheight=NA, ljoin=NA, lmitre=NA, mai=NA, mar=NA, mex=NA,
mfcol=NA, mfg=NA, mfrow=NA, mgp=NA, mkh=NA, new=NA, oma=NA, omd=NA, omi=NA,
page=NA, pch=NA, pin=NA, plt=NA, ps=NA, pty=NA, smo=NA, srt=NA, tck=NA, tcl=NA,
usr=NA, xaxp=NA, xaxs=NA, xaxt=NA, xpd=NA, yaxp=NA, yaxs=NA, yaxt=NA, ylbias=NA,
log="", logLab=c(1,2,5),
na.action = NULL, na.rm = T, side = "both")
{
#assign graphical parameters if not given
for(ii in 1:length(names(par()))){
if(is.na(get(names(par())[ii])[1])) assign(names(par()[ii]), unlist(par()[[ii]]))
}
if(add && side != "both"){
if(!is.null(names)) warning("Warning: names can only be changed on first call of histoplot (when add = FALSE)
")
if(!is.na(xlab)) warning("Warning: x-axis labels can only be changed on first call of histoplot (when add = FALSE)
")
if(!is.na(ylab)) warning("vy-axis labels can only be changed on first call of histoplot (when add = FALSE)
")
if(!missing(main)) warning("Warning: main title can only be changed on first call of histoplot (when add = FALSE)
")
if(!missing(sub)) warning("Warning: subtitle can only be changed on first call of histoplot (when add = FALSE)
")
}
if(!is.list(x)){
datas <- list(x, ...)
} else{
datas <- lapply(x, unlist)
if(is.null(names)){
names <- names(datas)
}
}
datas <- lapply(datas, function(x){
if(all(x == unique(x)[1]) & length(x) > 100){
unique(x)[1]
} else {
x
}
})
if(is.character(log)) if("y" %in% unlist(strsplit(log, ""))) log <- TRUE
if(is.na(xlog) | (horizontal == TRUE & (log == FALSE | log == ""))) xlog <- FALSE
log <- ifelse(log == TRUE, "y", "")
if(log == 'x' | log == 'xy' | xlog == TRUE){
if(horizontal | log == "xy"){
log <- TRUE
} else {
log <- FALSE
ylog <- FALSE
}
xlog <- FALSE
}
if(log == TRUE | ylog == TRUE){
ylog <- TRUE
log <- "y"
} else {
log <- ""
}
if(ylog){
#check data is compatible with log scale
if(all(unlist(datas) <= 0)){
ylog <- FALSE
warning("log scale cannot be used with non-positive data")
} else {
#log-scale data
datas <- datas #lapply(datas, function(x) log(unlist(x)))
}
}
if(is.null(na.action)) na.action <- na.omit
lapply(datas, function(data) data <- data[!sapply(data, is.infinite)])
if(na.rm) datas <- lapply(datas, na.action)
n <- length(datas)
#if(is.list(datas)) datas <- as.data.frame(datas)
if (missing(at)){ at <- 1:n }
#upper <- vector(mode = "numeric", length = n)
#lower <- vector(mode = "numeric", length = n)
q1 <- vector(mode = "numeric", length = n)
q2 <- vector(mode = "numeric", length = n)
q3 <- vector(mode = "numeric", length = n)
med <- vector(mode = "numeric", length = n)
base <- vector(mode = "list", length = n)
height <- vector(mode = "list", length = n)
area_check <- vector(mode = "list", length = n)
baserange <- c(Inf, -Inf)
args <- list(plot = FALSE, breaks = breaks)
radj <- ifelse(side == "right", 0, 1)
ladj <- ifelse(side == "left", 0, 1)
boxwex <- wex
if(areaEqual){
for (i in 1:n) {
data <- unlist(datas[[i]])
data.min <- min(data, na.rm = na.rm)
data.max <- max(data, na.rm = na.rm)
q1[i] <- quantile(data, 0.25)
q2[i] <- quantile(data, 0.5)
q3[i] <- quantile(data, 0.75)
med[i] <- median(data)
iqd <- q3[i] - q1[i]
#upper[i] <- min(q3[i] + range * iqd, data.max)
#lower[i] <- max(q1[i] - range * iqd, data.min)
#est.xlim <- c(min(lower[i], data.min), max(upper[i], data.max))
smout <- do.call("hist", c(list(data), args))
Avg.pos <- mean(smout$mids)
xt <- diff(smout$mids[smout$mids<Avg.pos])
yt <- rollmean(smout$mids[smout$mids<Avg.pos],2)
area_check[[i]] <- sum(xt*yt)
}
if(length(wex)>1){
warning("wex may not be a vector if areaEqual is TRUE")
print("using first element of wex")
wex<-wex[i]
}
wex <-unlist(area_check)/max(unlist(area_check))*wex
}
for (i in 1:n) {
data <- unlist(datas[[i]])
data.min <- min(data, na.rm = na.rm)
data.max <- max(data, na.rm = na.rm)
q1[i] <- quantile(data, 0.25)
q2[i] <- quantile(data, 0.5)
q3[i] <- quantile(data, 0.75)
med[i] <- median(data)
iqd <- q3[i] - q1[i]
#upper[i] <- min(q3[i] + range * iqd, data.max)
#lower[i] <- max(q1[i] - range * iqd, data.min)
#est.xlim <- c(min(lower[i], data.min), max(upper[i], data.max))
smout <- do.call("hist", c(list(data), args))
hscale <- 0.4/max(smout$density) * ifelse(length(wex)>1, wex[i], wex)
base[[i]] <- smout$breaks
height[[i]] <- smout$density * hscale
t <- range(base[[i]])
baserange[1] <- min(baserange[1], t[1])
baserange[2] <- max(baserange[2], t[2])
}
if (!add) {
if (is.null(xlim)) {
xlim <- if (n == 1){
at + c(-0.5, 0.5)
} else {
range(at) + min(diff(at))/2 * c(-1, 1)
}
} else {
xlim.default <- if (n == 1){
at + c(-0.5, 0.5)
} else {
range(at) + min(diff(at))/2 * c(-1, 1)
}
print(paste0("Using c(", xlim[1],",", xlim[2], ") as input for xlim, note that default values for these dimensions are c(", xlim.default[1],",", xlim.default[2], ")"))
}
if (is.null(ylim)) {
ylim <- baserange
}
}
if (is.null(names)) {
label <- 1:n
}
else {
label <- names
}
boxwidth <- 0.05 * ifelse(length(boxwex)>1, boxwex[i], boxwex)
if (!add){
plot.new()
if(!horizontal){
plot.window(xlim, ylim, log = log, asp = asp, bty = bty, cex = cex, xaxs = xaxs, yaxs = yaxs, lab = lab, mai = mai, mar = mar, mex = mex, mfcol = mfcol, mfrow = mfrow, mfg = mfg, xlog = xlog, ylog = ylog)
} else {
plot.window(ylim, xlim, log = ifelse(log == "y", "x", ""), asp = asp, bty = bty, cex = cex, xaxs = xaxs, yaxs = yaxs, lab = lab, mai = mai, mar = mar, mex = mex, mfcol = mfcol, mfrow = mfrow, mfg = mfg, xlog = ylog, ylog = xlog)
}
}
panel.first
if (!horizontal) {
if (!add) {
plot.window(xlim, ylim, log = log, asp = asp, bty = bty, cex = cex, xaxs = xaxs, yaxs = yaxs, lab = lab, mai = mai, mar = mar, mex = mex, mfcol = mfcol, mfrow = mfrow, mfg = mfg, xlog = xlog, ylog = ylog)
xaxp <- par()$xaxp
yaxp <- par()$yaxp
if(yaxt !="n"){
if(ylog){
#log_axis_label <- log_axis_label[log_axis >= exp(par("usr")[3])]
#log_axis <- log_axis[log_axis >= exp(par("usr")[3])]
#log_axis_label <- log_axis_label[log_axis <= exp(par("usr")[4])]
#log_axis <- log_axis[log_axis <= exp(par("usr")[4])]
Axis(unlist(datas), side = 2, cex.axis = cex.axis, col.axis = col.axis, font.axis = font.axis, mgp = mgp, tck = tck, tcl = tcl, las = las) # xaxp = xaxp, yaxp = yaxp disabled for log
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, tck = tck, tcl = tcl, las = las) # xaxp = xaxp, yaxp = yaxp disabled for log
}
} else {
Axis(unlist(datas), side = 2, cex.axis = cex.axis, col.axis = col.axis, font.axis = font.axis, mgp = mgp, yaxp = yaxp, tck = tck, tcl = tcl, las = las)
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, xaxp = xaxp, tck = tck, tcl = tcl, las = las)
}
}
} else {
if(ylog){
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, tck = tck, tcl = tcl, las = las) # xaxp = xaxp, yaxp = yaxp disabled for log
}
} else {
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, xaxp = xaxp, tck = tck, tcl = tcl, las = las)
}
}
}
}
if (frame.plot) {
box(lty = lty, lwd = lwd)
}
for (i in 1:n) {
colp <- ifelse(length(col)>1,col[1+(i-1)%%length(col)], col)
borderp <- ifelse(length(border)>1, border[1+(i-1)%%length(border)], border)
nB <- length(base[[i]])
#xp <- c(at[i] - radj*height[[i]], rev(at[i] + ladj*height[[i]]))
#yp <- c(base[[i]], rev(base[[i]]))
#polygon(xp, yp, col=colp, border=borderp,
#lty = lty, lwd = lwd, xpd = xpd, lend = lend, ljoin = ljoin, lmitre = lmitre)
x0 <- at[i]- radj*height[[i]]
y0 <- base[[i]][-nB]
x1 <- at[i]+ ladj*height[[i]]
y1 <- base[[i]][-1L]
rect(x0, y0, x1, y1,
col = colp, border = borderp, lty = lty)
if (drawRect) {
#lines(at[c(i, i)], c(lower[i], upper[i]), lwd = lwd,
# lty = lty, col = ifelse(length(lineCol)>1, lineCol[1+(i-1)%%length(lineCol)], lineCol), lend = lend, ljoin = ljoin, lmitre = lmitre)
rect(at[i] - radj*ifelse(length(boxwidth)>1, boxwidth[i], boxwidth)/2, q1[i], at[i] + ladj*ifelse(length(boxwidth)>1, boxwidth[i], boxwidth)/2,
q3[i], col = ifelse(length(rectCol)>1, rectCol[1+(i-1)%%length(rectCol)], rectCol), border = ifelse(length(lineCol)>1, lineCol[1+(i-1)%%length(lineCol)], lineCol), xpd = xpd, lend = lend, ljoin = ljoin, lmitre = lmitre)
points(at[i], med[i], pch = ifelse(length(pchMed)>1, pchMed[1+(i-1)%%length(pchMed)], pchMed), col = ifelse(length(colMed)>1, colMed[1+(i-1)%%length(colMed)], colMed), bg = ifelse(length(colMed2)>1, colMed2[1+(i-1)%%length(colMed2)], colMed2), cex = cex, lwd = lwd, lty = lty)
}
}
}
else {
if(log == "y" || ylog == TRUE){
log <- "x"
xlog <- TRUE
ylog <- FALSE
}
if (!add) {
plot.window(ylim, xlim, log = log, asp = asp, bty = bty, cex = cex, xaxs = xaxs, yaxs = yaxs, lab = lab, mai = mai, mar = mar, mex = mex, mfcol = mfcol, mfrow = mfrow, mfg = mfg, xlog = xlog, ylog = ylog)
xaxp <- par()$xaxp
yaxp <- par()$yaxp
if(yaxt !="n"){
if(xlog){
#log_axis_label <- log_axis_label[log_axis >= exp(par("usr")[3])]
#log_axis <- log_axis[log_axis >= exp(par("usr")[3])]
#log_axis_label <- log_axis_label[log_axis <= exp(par("usr")[4])]
#log_axis <- log_axis[log_axis <= exp(par("usr")[4])]
Axis(unlist(datas), side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, tck = tck, tcl = tcl, las = las) # xaxp = xaxp, yaxp = yaxp disabled for log
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 2, cex.axis = cex.axis, col.axis = col.axis, font.axis = font.axis, mgp = mgp, tck = tck, tcl = tcl, las = las) # xaxp = xaxp, yaxp = yaxp disabled for log
}
} else {
Axis(unlist(datas), side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, xaxp = xaxp, tck = tck, tcl = tcl, las = las)
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 2, cex.axis = cex.axis, col.axis = col.axis, font.axis = font.axis, mgp = mgp, yaxp = yaxp, tck = tck, tcl = tcl, las = las)
}
}
} else {
if(ylog){
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, tck = tck, tcl = tcl, las = las) # xaxp = xaxp, yaxp = yaxp disabled for log
}
} else {
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, xaxp = xaxp, tck = tck, tcl = tcl, las = las)
}
}
}
}
if (frame.plot) {
box(lty = lty, lwd = lwd)
}
for (i in 1:n) {
colp <- ifelse(length(col)>1,col[1+(i-1)%%length(col)], col)
borderp <- ifelse(length(border)>1, border[1+(i-1)%%length(border)], border)
nB <- length(height[[i]])
#xp <- c(at[i] - radj*height[[i]], rev(at[i] + ladj*height[[i]]))
#yp <- c(base[[i]], rev(base[[i]]))
#polygon(xp, yp, col=colp, border=borderp,
#lty = lty, lwd = lwd, xpd = xpd, lend = lend, ljoin = ljoin, lmitre = lmitre)
x0 <- at[i]- radj*height[[i]]
y0 <- base[[i]][-nB]
x1 <- at[i]+ ladj*height[[i]]
y1 <- base[[i]][-1L]
rect(x0, y0, x1, y1,
col = colp, border = borderp, lty = lty)
if (drawRect) {
#lines(c(lower[i], upper[i]), at[c(i, i)], lwd = lwd,
# lty = lty, col = ifelse(length(lineCol)>1, lineCol[1+(i-1)%%length(lineCol)], lineCol), lend = lend, ljoin = ljoin, lmitre = lmitre)
rect(q1[i], at[i] - radj*ifelse(length(boxwidth)>1, boxwidth[i], boxwidth)/2, q3[i], at[i] +
ladj*ifelse(length(boxwidth)>1, boxwidth[i], boxwidth)/2, col = ifelse(length(rectCol)>1, rectCol[1+(i-1)%%length(rectCol)], rectCol), border = ifelse(length(lineCol)>1, lineCol[1+(i-1)%%length(lineCol)], lineCol), xpd = xpd, lend = lend, ljoin = ljoin, lmitre = lmitre)
points(med[i], at[i], pch = ifelse(length(pchMed)>1, pchMed[1+(i-1)%%length(pchMed)], pchMed), col = ifelse(length(colMed)>1, colMed[1+(i-1)%%length(colMed)], colMed), , bg = ifelse(length(colMed2)>1, colMed2[1+(i-1)%%length(colMed2)], colMed2), cex = cex, lwd = lwd, lty = lty)
}
}
}
panel.last
if (ann) {
title(main = main, sub = sub, xlab = xlab, ylab = ylab, line = line, outer = outer, xpd = xpd, cex.main = cex.main, col.main = col.main, font.main = font.main)
}
invisible(list(
#upper = upper, lower = lower,
median = med, q1 = q1, q3 = q3))
}
|
/scratch/gouwar.j/cran-all/cranData/vioplot/R/histoplot.R
|
#' Violin Plot
#'
#' Produce violin plot(s) of the given (grouped) values with enhanced annotation and colour per group. Includes customisation of colours for each aspect of the violin, boxplot, and separate violins. This supports input of data as a list or formula, being backwards compatible with \code{\link[vioplot]{vioplot}} (0.2) and taking input in a formula as used for \code{\link[graphics]{boxplot}}.
#'
#' @name vioplot
#' @aliases violinplot
#' @param x for specifying data from which the boxplots are to be produced. Either a numeric vector, or a single list containing such vectors. Additional unnamed arguments specify further data as separate vectors (each corresponding to a component boxplot). NAs are allowed in the data.
#' @param ... additional data vectors or formula parameters. For the formula method, named arguments to be passed to the default method.
#' @param formula a formula, such as y ~ grp, where y is a numeric vector of data values to be split into groups according to the grouping variable grp (usually a factor).
#' @param data a data.frame (or list) from which the variables in formula should be taken.
#' @param use.cols logical indicating if columns (by default) or rows (use.cols = FALSE) should be plotted.
#' @param subset an optional vector specifying a subset of observations to be used for plotting.
#' @param drop,sep,lex.order defines groups to plot from formula, passed to \code{split.default}, see there.
#' @param range a factor to calculate the upper/lower adjacent values
#' @param h the height for the density estimator, if omit as explained in sm.density, h will be set to an optimum. A vector of length one, two or three, defining the smoothing parameter. A normal kernel function is used and h is its standard deviation. If this parameter is omitted, a normal optimal smoothing parameter is used.
#' @param xlim,ylim numeric vectors of length 2, giving the x and y coordinates ranges.
#' @param yaxt A character which specifies the y axis type. Specifying "n" suppresses plotting.
#' @param ylog,xlog A logical value (see log in \code{\link[graphics]{plot.default}}). If ylog is TRUE, a logarithmic scale is in use (e.g., after plot(*, log = "y")). For horizontal = TRUE then, if xlog is TRUE, a logarithmic scale is in use (e.g., after plot(*, log = "x")). For a new device, it defaults to FALSE, i.e., linear scale.
#' @param log Logarithmic scale if log = "y" or TRUE. Invokes ylog = TRUE. If horizontal is TRUE then invokes xlog = TRUE.
#' @param logLab Increments for labelling y-axis on log-scale, defaults to numbers starting with 1, 2, 5, and 10.
#' @param names one label, or a vector of labels for the data must match the number of data given
#' @param col Graphical parameter for fill colour of the violin(s) polygon. NA for no fill colour. If col is a vector, it specifies the colour per violin, and colours are reused if necessary.
#' @param border Graphical parameters for the colour of the violin border passed to lines. NA for no border. If border is a vector, it specifies the colour per violin, and colours are reused if necessary.
#' @param lty,lwd Graphical parameters for the violin passed to lines and polygon
#' @param rectCol Graphical parameters to control fill colour of the box. NA for no fill colour. If col is a vector, it specifies the colour per violin, and colours are reused if necessary.
#' @param lineCol Graphical parameters to control colour of the box outline and whiskers. NA for no border. If lineCol is a vector, it specifies the colour per violin, and colours are reused if necessary.
#' @param pchMed Graphical parameters to control shape of the median point. If pchMed is a vector, it specifies the shape per violin.
#' @param colMed,colMed2 Graphical parameters to control colour of the median point. If colMed is a vector, it specifies the colour per violin. colMed specifies the fill colour in all cases unless pchMed is 21:25 in which case colMed is the border colour and colMed2 is the fill colour.
#' @param drawRect logical. The box is drawn if TRUE.
#' @param areaEqual logical. Density plots checked for equal area if TRUE. wex must be scalar, relative widths of violins depend on area.
#' @param at position of each violin. Default to 1:n
#' @param add logical. if FALSE (default) a new plot is created
#' @param wex relative expansion of the violin. If wex is a vector, it specifies the area/width size per violin and sizes are reused if necessary.
#' @param horizontal logical. To use horizontal or vertical violins. Note that log scale can only be used on the x-axis for horizontal violins, and on the y-axis otherwise.
#' @param main,sub,xlab,ylab graphical parameters passed to plot.
#' @param cex A numerical value giving the amount by which plotting text should be magnified relative to the default.
#' @param cex.axis The magnification to be used for y axis annotation relative to the current setting of cex.
#' @param cex.names The magnification to be used for x axis annotation relative to the current setting of cex. Takes the value of cex.axis if not given.
#' @param cex.lab The magnification to be used for x and y labels relative to the current setting of cex.
#' @param cex.main The magnification to be used for main titles relative to the current setting of cex.
#' @param cex.sub The magnification to be used for sub-titles relative to the current setting of cex.
#' @param na.action a function which indicates what should happen when the data contain NAs. The default is to ignore missing values in either the response or the group.
#' @param na.rm logical value indicating whether NA values should be stripped before the computation proceeds. Defaults to TRUE.
#' @param side defaults to "both". Assigning "left" or "right" enables one sided plotting of violins. May be applied as a scalar across all groups.
#' @param plotCentre defaults to "points", plotting a central point at the median. If "line" is given a median line is plotted (subject to side) alternatively.
#' @param axes,frame.plot,panel.first,panel.last,asp,line,outer,adj,ann,ask,bg,bty,cin,col.axis,col.lab,col.main,col.sub,cra,crt,csi,cxy,din,err,family,fg,fig,fin,font,font.axis,font.lab,font.main,font.sub,lab,las,lend,lheight,ljoin,lmitre,mai,mar,mex,mfcol,mfg,mfrow,mgp,mkh,new,oma,omd,omi,page,pch,pin,plt,ps,pty,smo,srt,tck,tcl,usr,xaxp,xaxs,xaxt,xpd,yaxp,yaxs,ylbias Arguments to be passed to methods, such as graphical parameters (see \code{\link[graphics]{par}})).
#' @keywords plot graphics violin
#' @import sm
#' @importFrom zoo rollmean
#' @importFrom stats median na.omit quantile
#' @importFrom graphics Axis axis box lines par plot.new plot.window plot.xy points polygon rect title
#' @importFrom grDevices boxplot.stats dev.flush dev.hold dev.interactive devAskNewPage xy.coords
#' @export
#' @examples
#'
#' # box- vs violin-plot
#' par(mfrow=c(2,1))
#' mu<-2
#' si<-0.6
#' bimodal<-c(rnorm(1000,-mu,si),rnorm(1000,mu,si))
#' uniform<-runif(2000,-4,4)
#' normal<-rnorm(2000,0,3)
#' vioplot(bimodal,uniform,normal)
#' boxplot(bimodal,uniform,normal)
#'
#' # add to an existing plot
#' x <- rnorm(100)
#' y <- rnorm(100)
#' plot(x, y, xlim=c(-5,5), ylim=c(-5,5))
#' vioplot(x, col="tomato", horizontal=TRUE, at=-4, add=TRUE,lty=2, rectCol="gray")
#' vioplot(y, col="cyan", horizontal=FALSE, at=-4, add=TRUE,lty=2)
#'
#' # formula input
#' data("iris")
#' vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length",
#' col=c("lightgreen", "lightblue", "palevioletred"))
#' legend("topleft", legend=c("setosa", "versicolor", "virginica"),
#' fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
#'
#' data("diamonds", package = "ggplot2")
#' palette <- RColorBrewer::brewer.pal(9, "Pastel1")
#' par(mfrow=c(3, 1))
#' vioplot(price ~ cut, data = diamonds, las = 1, col = palette)
#' vioplot(price ~ clarity, data = diamonds, las = 2, col = palette)
#' vioplot(price ~ color, data = diamonds, las = 2, col = palette)
#' par(mfrow=c(3, 1))
#'
#' #generate example data
#' data_one <- rnorm(100)
#' data_two <- rnorm(50, 1, 2)
#'
#' #generate violin plot with similar functionality to vioplot
#' vioplot(data_one, data_two, col="magenta")
#'
#' #note vioplox defaults to a greyscale plot
#' vioplot(data_one, data_two)
#'
#' #colours can be customised separately, with axis labels, legends, and titles
#' vioplot(data_one, data_two, col=c("red","blue"), names=c("data one", "data two"),
#' main="data violin", xlab="data class", ylab="data read")
#' legend("topleft", fill=c("red","blue"), legend=c("data one", "data two"))
#'
#' #colours can be customised for the violin fill and border separately
#' vioplot(data_one, data_two, col="grey85", border="purple", names=c("data one", "data two"),
#' main="data violin", xlab="data class", ylab="data read")
#'
#' #colours can also be customised for the boxplot rectange and lines (border and whiskers)
#' vioplot(data_one, data_two, col="grey85", rectCol="lightblue", lineCol="blue",
#' border="purple", names=c("data one", "data two"),
#' main="data violin", xlab="data class", ylab="data read")
#'
#' #these colours can also be customised separately for each violin
#' vioplot(data_one, data_two, col=c("skyblue", "plum"), rectCol=c("lightblue", "palevioletred"),
#' lineCol="blue", border=c("royalblue", "purple"), names=c("data one", "data two"),
#' main="data violin", xlab="data class", ylab="data read")
#'
#' #this applies to any number of violins, given that colours are provided for each
#' vioplot(data_one, data_two, rnorm(200, 3, 0.5), rpois(200, 2.5), rbinom(100, 10, 0.4),
#' col=c("red", "orange", "green", "blue", "violet"),
#' rectCol=c("palevioletred", "peachpuff", "lightgreen", "lightblue", "plum"),
#' lineCol=c("red4", "orangered", "forestgreen", "royalblue", "mediumorchid"),
#' border=c("red4", "orangered", "forestgreen", "royalblue", "mediumorchid"),
#' names=c("data one", "data two", "data three", "data four", "data five"),
#' main="data violin", xlab="data class", ylab="data read")
#'
#' #The areaEqual parameter scales with width of violins
#' #Violins will have equal density area (including missing tails) rather than equal maximum width
#' vioplot(data_one, data_two, areaEqual=TRUE)
#'
#' vioplot(data_one, data_two, areaEqual=TRUE,
#' col=c("skyblue", "plum"), rectCol=c("lightblue", "palevioletred"),
#' lineCol="blue", border=c("royalblue", "purple"), names=c("data one", "data two"),
#' main="data violin", xlab="data class", ylab="data read")
#'
#' vioplot(data_one, data_two, rnorm(200, 3, 0.5), rpois(200, 2.5), rbinom(100, 10, 0.4),
#' areaEqual=TRUE, col=c("red", "orange", "green", "blue", "violet"),
#' rectCol=c("palevioletred", "peachpuff", "lightgreen", "lightblue", "plum"),
#' lineCol=c("red4", "orangered", "forestgreen", "royalblue", "mediumorchid"),
#' border=c("red4", "orangered", "forestgreen", "royalblue", "mediumorchid"),
#' names=c("data one", "data two", "data three", "data four", "data five"),
#' main="data violin", xlab="data class", ylab="data read")
#' @export
#' @usage NULL
vioplot <- function(x, ...) {
UseMethod("vioplot")
}
#' Draw a Violin plot for each Column (Row) of a Matrix
#'
#' Interpreting the columns (or rows) of a matrix as different groups, draw a boxplot for each.
#'
#' @aliases violin.matrix violinplot.matrix
#' @param x a numeric matrix.
#' @param use.cols logical indicating if columns (by default) or rows (use.cols = FALSE) should be plotted.
#' @param ... Further arguments to \code{\link[vioplot]{vioplot}}.
#' @rdname vioplot
#' @export
vioplot.matrix <- function (x, use.cols = TRUE, ...)
{
groups <- if (use.cols) {
split(c(x), rep.int(1L:ncol(x), rep.int(nrow(x), ncol(x))))
}
else split(c(x), seq(nrow(x)))
if (length(nam <- dimnames(x)[[1 + use.cols]]))
names(groups) <- nam
invisible(vioplot(groups, ...))
}
#' @rdname vioplot
#' @export
vioplot.list <- function (x, ...){
ind <- sapply(x, is.numeric)
if(all(!ind)){
stop(paste("elements are not numeric: ", names(x)[!sapply(x, is.numeric)]))
}
if(any(!ind)){
warning(paste("some elements are not numeric: ", names(x)[!sapply(x, is.numeric)]))
x <- x[sapply(x, is.numeric)]
}
invisible(vioplot.default(x, ...))
}
#' @rdname vioplot
#' @export
vioplot.data.frame <- vioplot.list
#' @rdname vioplot
#' @export
vioplot.matrix <- vioplot.matrix
#' @rdname vioplot
#' @export
vioplot.formula <-
function (formula, data = NULL, ..., subset, na.action = NULL,
add = FALSE, ann = !add, horizontal = FALSE, side = "both",
xlab = mklab(y_var = horizontal), ylab = mklab(y_var = !horizontal), names=NULL,
drop = FALSE, sep = ".", lex.order = FALSE)
{
if (missing(formula) || (length(formula) != 3L)){
stop("'formula' missing or incorrect")
}
if(add && side != "both"){
if(!is.null(names)) warning("Warning: names can only be changed on first call of vioplot (when add = FALSE)
")
if(!missing(xlab)) warning("Warning: x-axis labels can only be changed on first call of vioplot (when add = FALSE)
")
if(!missing(ylab)) warning("Warning: y-axis labels can only be changed on first call of vioplot (when add = FALSE)
")
}
if (missing(xlab) || missing(ylab)){
mklab <- function(y_var){
if(y_var){
names(mf)[response]
} else {
paste(names(mf)[-response], collapse = " : ")
}
}
}
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval(m$data, parent.frame())))
m$data <- as.data.frame(data)
m$... <- m$drop <- m$sep <- m$lex.order <- NULL
m$xlab <- m$ylab <- m$add <- m$ann <- m$horizontal <- NULL
m$names <- m$side <- NULL
m$na.action <- na.action
m[[1L]] <- quote(stats::model.frame.default)
mf <- eval(m, parent.frame())
response <- attr(attr(mf, "terms"), "response")
if(add){
xlab <- ylab <- NA
}
vioplot(split(mf[[response]], mf[-response], drop = drop,
sep = sep, lex.order = lex.order), xlab = xlab, ylab = ylab, names = names,
add = add, ann = ann, horizontal = horizontal, side = side, ...)
}
#' @rdname vioplot
#' @export
vioplot.default <-
function (x, ..., data = NULL, range = 1.5, h = NULL, xlim = NULL, ylim = NULL, names = NULL,
horizontal = FALSE, col = "grey50", border = par()$fg, lty = 1,
lwd = 1, rectCol = par()$fg, lineCol = par()$fg, pchMed = 19, colMed = "white", colMed2 = "grey 75",
at, add = FALSE, wex = 1, drawRect = TRUE, areaEqual=FALSE,
axes = TRUE, frame.plot = axes, panel.first = NULL, panel.last = NULL, asp = NA,
main="", sub="", xlab=NA, ylab=NA, line = NA, outer = FALSE,
xlog = NA, ylog=NA, adj=NA, ann = NA, ask=NA, bg=NA, bty=NA,
cex=NA, cex.axis=NA, cex.lab=NA, cex.main=NA, cex.names=NULL, cex.sub=NA,
cin=NA, col.axis=NA, col.lab=NA, col.main=NA, col.sub=NA,
cra=NA, crt=NA, csi=NA,cxy=NA, din=NA, err=NA, family=NA, fg=NA,
fig=NA, fin=NA, font=NA, font.axis=NA, font.lab=NA, font.main=NA, font.sub=NA,
lab=NA, las=NA, lend=NA, lheight=NA, ljoin=NA, lmitre=NA, mai=NA, mar=NA, mex=NA,
mfcol=NA, mfg=NA, mfrow=NA, mgp=NA, mkh=NA, new=NA, oma=NA, omd=NA, omi=NA,
page=NA, pch=NA, pin=NA, plt=NA, ps=NA, pty=NA, smo=NA, srt=NA, tck=NA, tcl=NA,
usr=NA, xaxp=NA, xaxs=NA, xaxt=NA, xpd=NA, yaxp=NA, yaxs=NA, yaxt=NA, ylbias=NA,
log="", logLab=c(1,2,5),
na.action = NULL, na.rm = T, side = "both", plotCentre = "point")
{
#assign graphical parameters if not given
for(ii in 1:length(names(par()))){
if(is.na(get(names(par())[ii])[1])) assign(names(par()[ii]), unlist(par()[[ii]]))
}
if(add && side != "both"){
if(!is.null(names)) warning("Warning: names can only be changed on first call of vioplot (when add = FALSE)
")
if(!is.na(xlab)) warning("Warning: x-axis labels can only be changed on first call of vioplot (when add = FALSE)
")
if(!is.na(ylab)) warning("vy-axis labels can only be changed on first call of vioplot (when add = FALSE)
")
if(!missing(main)) warning("Warning: main title can only be changed on first call of vioplot (when add = FALSE)
")
if(!missing(sub)) warning("Warning: subtitle can only be changed on first call of vioplot (when add = FALSE)
")
}
if(!is.list(x)){
datas <- list(x, ...)
} else{
datas <- lapply(x, unlist)
if(is.null(names)){
names <- names(datas)
}
}
datas <- lapply(datas, function(x){
if((all(x == na.omit(unique(x))[1] | is.na(x))) & length(x) > 100){
na.omit(unique(x))[1]
} else {
x
}
})
if(is.character(log)) if("y" %in% unlist(strsplit(log, ""))) log <- TRUE
if(is.na(xlog) | (horizontal == TRUE & (log == FALSE | log == ""))) xlog <- FALSE
log <- ifelse(log == TRUE, "y", "")
if(log == 'x' | log == 'xy' | xlog == TRUE){
if(horizontal | log == "xy"){
log <- TRUE
} else {
log <- FALSE
ylog <- FALSE
}
xlog <- FALSE
}
if(log == TRUE | ylog == TRUE){
ylog <- TRUE
log <- "y"
} else {
log <- ""
}
if(ylog){
#check data is compatible with log scale
if(all(unlist(datas) <= 0)){
ylog <- FALSE
warning("log scale cannot be used with non-positive data")
} else {
#log-scale data
datas <- datas #lapply(datas, function(x) log(unlist(x)))
}
}
if(is.null(na.action)) na.action <- na.omit
lapply(datas, function(data) data <- data[!sapply(data, is.infinite)])
if(na.rm) datas <- lapply(datas, na.action)
n <- length(datas)
#if(is.list(datas)) datas <- as.data.frame(datas)
if (missing(at))
at <- 1:n
upper <- vector(mode = "numeric", length = n)
lower <- vector(mode = "numeric", length = n)
q1 <- vector(mode = "numeric", length = n)
q2 <- vector(mode = "numeric", length = n)
q3 <- vector(mode = "numeric", length = n)
med <- vector(mode = "numeric", length = n)
base <- vector(mode = "list", length = n)
height <- vector(mode = "list", length = n)
area_check <- vector(mode = "list", length = n)
baserange <- c(Inf, -Inf)
args <- list(display = "none")
radj <- ifelse(side == "right", 0, 1)
ladj <- ifelse(side == "left", 0, 1)
boxwex <- wex
if (!(is.null(h)))
args <- c(args, h = h)
if(plotCentre == "line") med.dens <- rep(NA, n)
if(areaEqual){
for (i in 1:n) {
data <- unlist(datas[[i]])
data.min <- min(data, na.rm = na.rm)
data.max <- max(data, na.rm = na.rm)
q1[i] <- quantile(data, 0.25)
q2[i] <- quantile(data, 0.5)
q3[i] <- quantile(data, 0.75)
med[i] <- median(data)
iqd <- q3[i] - q1[i]
upper[i] <- min(q3[i] + range * iqd, data.max)
lower[i] <- max(q1[i] - range * iqd, data.min)
est.xlim <- c(min(lower[i], data.min), max(upper[i],
data.max))
smout <- do.call("sm.density", c(list(data, xlim = est.xlim),
args))
if(plotCentre == "line"){
med.dat <- do.call("sm.density",
c(list(data, xlim=est.xlim,
eval.points=med[i], display = "none")))
med.dens[i] <- med.dat$estimate
}
Avg.pos <- mean(smout$eval.points)
xt <- diff(smout$eval.points[smout$eval.points<Avg.pos])
yt <- rollmean(smout$eval.points[smout$eval.points<Avg.pos],2)
area_check[[i]] <- sum(xt*yt)
}
if(length(wex)>1){
warning("wex may not be a vector if areaEqual is TRUE")
print("using first element of wex")
wex<-wex[i]
}
wex <-unlist(area_check)/max(unlist(area_check))*wex
}
for (i in 1:n) {
data <- unlist(datas[[i]])
data.min <- min(data, na.rm = na.rm)
data.max <- max(data, na.rm = na.rm)
q1[i] <- quantile(data, 0.25)
q2[i] <- quantile(data, 0.5)
q3[i] <- quantile(data, 0.75)
med[i] <- median(data)
iqd <- q3[i] - q1[i]
upper[i] <- min(q3[i] + range * iqd, data.max)
lower[i] <- max(q1[i] - range * iqd, data.min)
est.xlim <- c(min(lower[i], data.min), max(upper[i],
data.max))
smout <- do.call("sm.density", c(list(data, xlim = est.xlim),
args))
hscale <- 0.4/max(smout$estimate) * ifelse(length(wex)>1, wex[i], wex)
base[[i]] <- smout$eval.points
height[[i]] <- smout$estimate * hscale
t <- range(base[[i]])
baserange[1] <- min(baserange[1], t[1])
baserange[2] <- max(baserange[2], t[2])
if(plotCentre == "line"){
med.dat <- do.call("sm.density",
c(list(data, xlim=est.xlim,
eval.points=med[i], display = "none")))
med.dens[i] <- med.dat$estimate *hscale
}
}
if (!add) {
if (is.null(xlim)) {
xlim <- if (n == 1){
at + c(-0.5, 0.5)
} else {
range(at) + min(diff(at))/2 * c(-1, 1)
}
} else {
xlim.default <- if (n == 1){
at + c(-0.5, 0.5)
} else {
range(at) + min(diff(at))/2 * c(-1, 1)
}
print(paste0("Using c(", xlim[1],",", xlim[2], ") as input for xlim, note that default values for these dimensions are c(", xlim.default[1],",", xlim.default[2], ")"))
}
if (is.null(ylim)) {
ylim <- baserange
}
}
if (is.null(names)) {
label <- 1:n
}
else {
label <- names
}
boxwidth <- 0.05 * ifelse(length(boxwex)>1, boxwex[i], boxwex)
if (!add){
plot.new()
if(!horizontal){
plot.window(xlim, ylim, log = log, asp = asp, bty = bty, cex = cex, xaxs = xaxs, yaxs = yaxs, lab = lab, mai = mai, mar = mar, mex = mex, mfcol = mfcol, mfrow = mfrow, mfg = mfg, xlog = xlog, ylog = ylog)
} else {
plot.window(ylim, xlim, log = ifelse(log == "y", "x", ""), asp = asp, bty = bty, cex = cex, xaxs = xaxs, yaxs = yaxs, lab = lab, mai = mai, mar = mar, mex = mex, mfcol = mfcol, mfrow = mfrow, mfg = mfg, xlog = ylog, ylog = xlog)
}
}
panel.first
if (!horizontal) {
if (!add) {
plot.window(xlim, ylim, log = log, asp = asp, bty = bty, cex = cex, xaxs = xaxs, yaxs = yaxs, lab = lab, mai = mai, mar = mar, mex = mex, mfcol = mfcol, mfrow = mfrow, mfg = mfg, xlog = xlog, ylog = ylog)
xaxp <- par()$xaxp
yaxp <- par()$yaxp
if(yaxt !="n"){
if(ylog){
#log_axis_label <- log_axis_label[log_axis >= exp(par("usr")[3])]
#log_axis <- log_axis[log_axis >= exp(par("usr")[3])]
#log_axis_label <- log_axis_label[log_axis <= exp(par("usr")[4])]
#log_axis <- log_axis[log_axis <= exp(par("usr")[4])]
Axis(unlist(datas), side = 2, cex.axis = cex.axis, col.axis = col.axis, font.axis = font.axis, mgp = mgp, tck = tck, tcl = tcl, las = las) # xaxp = xaxp, yaxp = yaxp disabled for log
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, tck = tck, tcl = tcl, las = las) # xaxp = xaxp, yaxp = yaxp disabled for log
}
} else {
Axis(unlist(datas), side = 2, cex.axis = cex.axis, col.axis = col.axis, font.axis = font.axis, mgp = mgp, yaxp = yaxp, tck = tck, tcl = tcl, las = las)
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, xaxp = xaxp, tck = tck, tcl = tcl, las = las)
}
}
} else {
if(ylog){
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, tck = tck, tcl = tcl, las = las) # xaxp = xaxp, yaxp = yaxp disabled for log
}
} else {
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, xaxp = xaxp, tck = tck, tcl = tcl, las = las)
}
}
}
}
if (frame.plot) {
box(lty = lty, lwd = lwd)
}
for (i in 1:n) {
polygon(c(at[i] - radj*height[[i]], rev(at[i] + ladj*height[[i]])),
c(base[[i]], rev(base[[i]])), col = ifelse(length(col)>1,col[1+(i-1)%%length(col)], col), border = ifelse(length(border)>1, border[1+(i-1)%%length(border)], border),
lty = lty, lwd = lwd, xpd = xpd, lend = lend, ljoin = ljoin, lmitre = lmitre)
if (drawRect) {
lines(at[c(i, i)], c(lower[i], upper[i]), lwd = lwd,
lty = lty, col = ifelse(length(lineCol)>1, lineCol[1+(i-1)%%length(lineCol)], lineCol), lend = lend, ljoin = ljoin, lmitre = lmitre)
rect(at[i] - radj*ifelse(length(boxwidth)>1, boxwidth[i], boxwidth)/2, q1[i], at[i] + ladj*ifelse(length(boxwidth)>1, boxwidth[i], boxwidth)/2,
q3[i], col = ifelse(length(rectCol)>1, rectCol[1+(i-1)%%length(rectCol)], rectCol), border = ifelse(length(lineCol)>1, lineCol[1+(i-1)%%length(lineCol)], lineCol), xpd = xpd, lend = lend, ljoin = ljoin, lmitre = lmitre)
if(plotCentre == "line"){
lines(x = c(at[i] - radj*med.dens[i],
at[i],
at[i] + ladj*med.dens[i]),
y = rep(med[i],3))
} else {
points(at[i], med[i], pch = ifelse(length(pchMed)>1, pchMed[1+(i-1)%%length(pchMed)], pchMed), col = ifelse(length(colMed)>1, colMed[1+(i-1)%%length(colMed)], colMed), bg = ifelse(length(colMed2)>1, colMed2[1+(i-1)%%length(colMed2)], colMed2), cex = cex, lwd = lwd, lty = lty)
}
}
}
}
else {
if(log == "y" || ylog == TRUE){
log <- "x"
xlog <- TRUE
ylog <- FALSE
}
if (!add) {
plot.window(ylim, xlim, log = log, asp = asp, bty = bty, cex = cex, xaxs = xaxs, yaxs = yaxs, lab = lab, mai = mai, mar = mar, mex = mex, mfcol = mfcol, mfrow = mfrow, mfg = mfg, xlog = xlog, ylog = ylog)
xaxp <- par()$xaxp
yaxp <- par()$yaxp
if(yaxt !="n"){
if(xlog){
#log_axis_label <- log_axis_label[log_axis >= exp(par("usr")[3])]
#log_axis <- log_axis[log_axis >= exp(par("usr")[3])]
#log_axis_label <- log_axis_label[log_axis <= exp(par("usr")[4])]
#log_axis <- log_axis[log_axis <= exp(par("usr")[4])]
Axis(unlist(datas), side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, tck = tck, tcl = tcl, las = las) # xaxp = xaxp, yaxp = yaxp disabled for log
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 2, cex.axis = cex.axis, col.axis = col.axis, font.axis = font.axis, mgp = mgp, tck = tck, tcl = tcl, las = las) # xaxp = xaxp, yaxp = yaxp disabled for log
}
} else {
Axis(unlist(datas), side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, xaxp = xaxp, tck = tck, tcl = tcl, las = las)
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 2, cex.axis = cex.axis, col.axis = col.axis, font.axis = font.axis, mgp = mgp, yaxp = yaxp, tck = tck, tcl = tcl, las = las)
}
}
} else {
if(ylog){
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, tck = tck, tcl = tcl, las = las) # xaxp = xaxp, yaxp = yaxp disabled for log
}
} else {
if(is.null(cex.names)) cex.names <- cex.axis
if(xaxt !="n"){
Axis(1:length(datas), at = at, labels = label, side = 1, cex.axis = cex.names, col.axis = col.axis, font.axis = font.axis, mgp = mgp, xaxp = xaxp, tck = tck, tcl = tcl, las = las)
}
}
}
}
if (frame.plot) {
box(lty = lty, lwd = lwd)
}
for (i in 1:n) {
polygon(c(base[[i]], rev(base[[i]])), c(at[i] - radj*height[[i]],
rev(at[i] + ladj*height[[i]])), col = ifelse(length(col)>1,col[1+(i-1)%%length(col)], col), border = ifelse(length(border)>1, border[1+(i-1)%%length(border)], border),
lty = lty, lwd = lwd, xpd = xpd, lend = lend, ljoin = ljoin, lmitre = lmitre)
if (drawRect) {
lines(c(lower[i], upper[i]), at[c(i, i)], lwd = lwd,
lty = lty, col = ifelse(length(lineCol)>1, lineCol[1+(i-1)%%length(lineCol)], lineCol), lend = lend, ljoin = ljoin, lmitre = lmitre)
rect(q1[i], at[i] - radj*ifelse(length(boxwidth)>1, boxwidth[i], boxwidth)/2, q3[i], at[i] +
ladj*ifelse(length(boxwidth)>1, boxwidth[i], boxwidth)/2, col = ifelse(length(rectCol)>1, rectCol[1+(i-1)%%length(rectCol)], rectCol), border = ifelse(length(lineCol)>1, lineCol[1+(i-1)%%length(lineCol)], lineCol), xpd = xpd, lend = lend, ljoin = ljoin, lmitre = lmitre)
if(plotCentre == "line"){
lines(y = c(at[i] - radj*med.dens[i],
at[i],
at[i] + ladj*med.dens[i]),
x = rep(med[i],3))
} else {
points(med[i], at[i], pch = ifelse(length(pchMed)>1, pchMed[1+(i-1)%%length(pchMed)], pchMed), col = ifelse(length(colMed)>1, colMed[1+(i-1)%%length(colMed)], colMed), , bg = ifelse(length(colMed2)>1, colMed2[1+(i-1)%%length(colMed2)], colMed2), cex = cex, lwd = lwd, lty = lty)
}
}
}
}
panel.last
if (ann) {
title(main = main, sub = sub, xlab = xlab, ylab = ylab, line = line, outer = outer, xpd = xpd, cex.main = cex.main, col.main = col.main, font.main = font.main)
}
invisible(list(upper = upper, lower = lower, median = med,
q1 = q1, q3 = q3))
}
|
/scratch/gouwar.j/cran-all/cranData/vioplot/R/vioplot.R
|
#' Violin Plot Statistics
#'
#' This function is typically called by another function to gather the statistics necessary
#' for producing box plots, but may be invoked separately. See: \code{\link[grDevices]{boxplot.stats}}
#'
#' @aliases violin.stats violinplot.stats
#' @rdname violin.stats
#' @param x a numeric vector for which the violin plot will be constructed \code{NA}s and \code{NaN}s are allowed and omitted).
#' @param coef this determines how far the plot ‘whiskers’ extend out from the box. If coef is positive, the
#' whiskers extend to the most extreme data point which is no more than coef times the length of the box away
#' from the box. A value of zero causes the whiskers to extend to the data extremes (and no outliers be returned).
#' @param do.conf,do.out logicals; if FALSE, the conf or out component respectively will be empty in the result.
#' @param ... arguments passed to \code{\link[vioplot]{vioplot}}.
#' @importFrom grDevices boxplot.stats
#' @export
vioplot.stats <- function(x, coef = 1.5, do.conf = TRUE, do.out = TRUE, ...){
boxplot.stats(x, coef = coef, do.conf = do.conf, do.out = do.out)
}
|
/scratch/gouwar.j/cran-all/cranData/vioplot/R/vioplot.stats.R
|
## -----------------------------------------------------------------------------
library("vioplot")
## ---- message=FALSE, eval=FALSE-----------------------------------------------
# data(iris)
# boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
# library("vioplot")
# vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
## ---- message=FALSE, echo=FALSE-----------------------------------------------
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="magenta")
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
## -----------------------------------------------------------------------------
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
## -----------------------------------------------------------------------------
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue")
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
## -----------------------------------------------------------------------------
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
## -----------------------------------------------------------------------------
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue", border="royalblue")
## -----------------------------------------------------------------------------
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", rectCol="palevioletred", lineCol="violetred")
## -----------------------------------------------------------------------------
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", colMed="violet")
## -----------------------------------------------------------------------------
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue", border="royalblue", rectCol="palevioletred", lineCol="violetred", colMed="violet")
## -----------------------------------------------------------------------------
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T, col=c("lightgreen", "lightblue", "palevioletred"), border=c("darkolivegreen4", "royalblue4", "violetred4"), rectCol=c("forestgreen", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), colMed=c("green", "cyan", "magenta"), pchMed=c(15, 17, 19))
## ---- message=FALSE-----------------------------------------------------------
data(iris)
summary(iris$Sepal.Width)
table(iris$Sepal.Width > mean(iris$Sepal.Width))
iris_large <- iris[iris$Sepal.Width > mean(iris$Sepal.Width), ]
iris_small <- iris[iris$Sepal.Width <= mean(iris$Sepal.Width), ]
## ---- fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'----
histoplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right")
histoplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T)
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/histogram_customisation.R
|
---
title: "Customising Violin Plots with Histograms"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vioplot: Customising Violin Plots with Histograms}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
While boxplots have become the _de facto_ standard for plotting the distribution of data this is a vast oversimplification and may not show everything needed to evaluate the variation of data. This is particularly important for datasets which do not form a Gaussian "Normal" distribution that most researchers have become accustomed to.
While density plots are helpful in this regard, they can be less aesthetically pleasing than boxplots and harder to interpret for those familiar with boxplots. Often the only ways to compare multiple data types with density use slices of the data with faceting the plotting panes or overlaying density curves with colours and a legend. This approach is jarring for new users and leads to cluttered plots difficult to present to a wider audience.
Therefore violin plots are a powerful tool to assist researchers to visualise data, particularly in the quality checking and exploratory parts of an analysis. Violin plots have many benefits:
- Greater flexibility for plotting variation than boxplots
- More familiarity to boxplot users than density plots
- Easier to directly compare data types than existing plots
As shown below for the `iris` dataset, histogram plots show distribution information that the boxplot is unable to.
```{r}
library("vioplot")
```
```{r, message=FALSE, eval=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
library("vioplot")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
```
```{r, message=FALSE, echo=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="magenta")
```
## Plot Defaults
However as we can see here the plot defaults are not aesthetically pleasing, with a rather glaring colour scheme unsuitable for professional or academic usage. Thus the plot default colours have been changed as shown here:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
```
# Histogram plot
Here we introduce a variant of the violin plot, using a mirrored bihistogram to show the distribution:
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
```
## Plot colours: Histogram Fill
Plot colours can be further customised as with the original viooplot package using the `col` argument:
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue")
```
### Vectorisation
The `vioplot` (0.2) function is unable to colour each histogram separately, thus this is enabled with a vectorised `col` in `viooplot` (0.3) and `histoplot` (0.4):
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
```
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
```
## Plot colours: Violin Lines and Boxplot
Colours can also be customised for the histogram fill and border separately using the `col` and `border` arguments:
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue", border="royalblue")
```
Similarly, the arguments `lineCol` and `rectCol` specify the colors of the boxplot outline and rectangle fill. For simplicity the box and whiskers of the boxplot will always have the same colour.
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", rectCol="palevioletred", lineCol="violetred")
```
The same applies to the colour of the median point with `colMed`:
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", colMed="violet")
```
### Combined customisation
These can be customised colours can be combined:
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue", border="royalblue", rectCol="palevioletred", lineCol="violetred", colMed="violet")
```
### Vectorisation
These color and shape settings can also be customised separately for each histogram:
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T, col=c("lightgreen", "lightblue", "palevioletred"), border=c("darkolivegreen4", "royalblue4", "violetred4"), rectCol=c("forestgreen", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), colMed=c("green", "cyan", "magenta"), pchMed=c(15, 17, 19))
```
## Split Bihistogram Plots
We set up the data with two categories (Sepal Width) as follows:
```{r, message=FALSE}
data(iris)
summary(iris$Sepal.Width)
table(iris$Sepal.Width > mean(iris$Sepal.Width))
iris_large <- iris[iris$Sepal.Width > mean(iris$Sepal.Width), ]
iris_small <- iris[iris$Sepal.Width <= mean(iris$Sepal.Width), ]
```
A direct comparision of 2 datasets can be made with the `side` argument and `add = TRUE` on the second plot:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
histoplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right")
histoplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T)
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/histogram_customisation.Rmd
|
## -----------------------------------------------------------------------------
library("vioplot")
## ---- message=FALSE, eval=FALSE-----------------------------------------------
# data(iris)
# boxplot(Sepal.Length~Species, data = iris)
## ---- message=FALSE, echo=FALSE-----------------------------------------------
data(iris)
boxplot(Sepal.Length~Species, data = iris, main = "Sepal Length")
## ---- message=FALSE, eval=FALSE-----------------------------------------------
# devtools::install_version("vioplot", version = "0.2")
# library("vioplot")
# vioplot(Sepal.Length~Species, data = iris)
## ---- message=FALSE, eval=FALSE-----------------------------------------------
# vioplot(Sepal.Length~Species, data = iris)
## ---- message=FALSE, echo=FALSE-----------------------------------------------
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="magenta")
## -----------------------------------------------------------------------------
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length")
## -----------------------------------------------------------------------------
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue")
## -----------------------------------------------------------------------------
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
## -----------------------------------------------------------------------------
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue", border="royalblue")
## -----------------------------------------------------------------------------
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", rectCol="palevioletred", lineCol="violetred")
## -----------------------------------------------------------------------------
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", colMed="violet")
## -----------------------------------------------------------------------------
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue", border="royalblue", rectCol="palevioletred", lineCol="violetred", colMed="violet")
## -----------------------------------------------------------------------------
histoplot(Sepal.Length~Species, data = iris, main="Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"), border=c("darkolivegreen4", "royalblue4", "violetred4"), rectCol=c("forestgreen", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), colMed=c("green", "cyan", "magenta"), pchMed=c(15, 17, 19))
## ---- message=FALSE-----------------------------------------------------------
data(iris)
summary(iris$Sepal.Width)
table(iris$Sepal.Width > mean(iris$Sepal.Width))
iris_large <- iris[iris$Sepal.Width > mean(iris$Sepal.Width), ]
iris_small <- iris[iris$Sepal.Width <= mean(iris$Sepal.Width), ]
## ---- fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'----
histoplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right")
histoplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T)
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/histogram_formulae.R
|
---
title: "Customising Histogram Plots with Formula Input"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{histoplot: Customising Histogram Plots with Formula Input}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
Since boxplots have become the _de facto_ standard for plotting the distribution of data most users are familiar with these and the formula input for dataframes. However this input is not available in the standard `histoplot` package. Thus it has been restored here for enhanced backwards compatibility with `boxplot`.
As shown below for the `iris` dataset, histogram plots show distribution information taking formula input that `boxplot` implements but `histoplot` is unable to. This demonstrates the customisation demonstrated in [the main histoplot vignette using histoplot syntax](histogram_customisation.html) with the formula method commonly used for `boxplot`, `t.test`, and `lm`.
```{r}
library("vioplot")
```
```{r, message=FALSE, eval=FALSE}
data(iris)
boxplot(Sepal.Length~Species, data = iris)
```
```{r, message=FALSE, echo=FALSE}
data(iris)
boxplot(Sepal.Length~Species, data = iris, main = "Sepal Length")
```
Whereas performing the same function does not work with `vioplot` (0.2).
```{r, message=FALSE, eval=FALSE}
devtools::install_version("vioplot", version = "0.2")
library("vioplot")
vioplot(Sepal.Length~Species, data = iris)
```
```
Error in min(data) : invalid 'type' (language) of argument
```
## Plot Defaults
```{r, message=FALSE, eval=FALSE}
vioplot(Sepal.Length~Species, data = iris)
```
```{r, message=FALSE, echo=FALSE}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="magenta")
```
Another concern we see here is that the `vioplot` defaults are not aesthetically pleasing, with a rather glaring colour scheme unsuitable for professional or academic usage. Thus the plot default colours have been changed as shown here:
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length")
```
## Plot colours: Histogram Fill
Plot colours can be further customised as with the original vioplot package using the `col` argument:
```{r}
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue")
```
### Vectorisation
However the `vioplot` (0.2) function is unable to colour each histogram separately, thus this is enabled with a vectorised `col` in `histoplot` (0.4):
```{r}
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
```
## Plot colours: Violin Lines and Boxplot
Colours can also be customised for the histogram fill and border separately using the `col` and `border` arguments:
```{r}
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue", border="royalblue")
```
Similarly, the arguments `lineCol` and `rectCol` specify the colours of the boxplot outline and rectangle fill. For simplicity the box and whiskers of the boxplot will always have the same colour.
```{r}
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", rectCol="palevioletred", lineCol="violetred")
```
The same applies to the colour of the median point with `colMed`:
```{r}
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", colMed="violet")
```
### Combined customisation
These can be customised colours can be combined:
```{r}
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue", border="royalblue", rectCol="palevioletred", lineCol="violetred", colMed="violet")
```
### Vectorisation
These colour and shape settings can also be customised separately for each histogram:
```{r}
histoplot(Sepal.Length~Species, data = iris, main="Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"), border=c("darkolivegreen4", "royalblue4", "violetred4"), rectCol=c("forestgreen", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), colMed=c("green", "cyan", "magenta"), pchMed=c(15, 17, 19))
```
## Split Bihistogram Plots
We set up the data with two categories (Sepal Width) as follows:
```{r, message=FALSE}
data(iris)
summary(iris$Sepal.Width)
table(iris$Sepal.Width > mean(iris$Sepal.Width))
iris_large <- iris[iris$Sepal.Width > mean(iris$Sepal.Width), ]
iris_small <- iris[iris$Sepal.Width <= mean(iris$Sepal.Width), ]
```
A direct comparision of 2 datasets can be made with the `side` argument and `add = TRUE` on the second plot:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
histoplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right")
histoplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T)
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/histogram_formulae.Rmd
|
## -----------------------------------------------------------------------------
# generate dummy data
a <- rnorm(25, 3, 0.5)
b <- rnorm(25, 2, 1.0)
c <- rnorm(25, 2.75, 0.25)
d <- rnorm(25, 3.15, 0.375)
e <- rnorm(25, 1, 0.25)
datamat <- cbind(a, b, c, d, e)
dim(datamat)
## -----------------------------------------------------------------------------
library("vioplot")
## -----------------------------------------------------------------------------
vioplot(datamat, ylim = c(0, 5))
# compute medians
data.med <- apply(datamat, 2, median)
data.med
#overlay medians
lines(data.med, lty = 2, lwd = 1.5)
points(data.med, pch = 19, col = "red", cex = 2.25)
## -----------------------------------------------------------------------------
outcome <- c(rnorm(25, 3, 1), rnorm(25, 2, 0.5))
intervention <- c(rep("treatment", 25), rep("control", 25))
table(intervention)
names(table(intervention))
unique(sort(intervention))
intervention <- as.factor(intervention)
levels(intervention)
d <- data.frame(outcome, intervention)
vioplot(outcome ~ intervention, data = d, xaxt = 'n', yaxt = 'n',
main = "", xlab = "", ylab = "")
axis(side = 1, at = 1:length(levels(intervention)), labels = levels(intervention))
mtext("custom x labels for intervention", side = 1)
mtext("custom y labels for outcome", side = 2)
title(main = "example with custom title", sub = "subtitles are supported")
## -----------------------------------------------------------------------------
histoplot(outcome ~ intervention, data = d, xaxt = 'n', yaxt = 'n',
main = "", xlab = "", ylab = "")
axis(side = 1, at = 1:length(levels(intervention)), labels = levels(intervention))
mtext("custom x labels for intervention", side = 1)
mtext("custom y labels for outcome", side = 2)
title(main = "example with custom title", sub = "subtitles are supported")
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/overlaying_annotations.R
|
---
title: "Overlaying base R graphics"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vioplot: Overlaying base R graphics}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## Introduction: Integration with base R graphics
Here we demonstrate how to combine violin plots with other base R graphics. In principle any base R graphics can be overlayed on top of a violin plot for annotation.
Many problems can be resolved by overlaying base R graphics and integrating vioplot with other plotting functions. Any additional elements can be overlayed by running commands after generating the plot. The x-axes are integer values [1,2,3,…] for each violin. The y-axes are continuous values as displayed.
The following plotting elements are supported for example: points, lines, polygon
It is also possible to modify plotting parameters with: title, axis, legend
"vioplot()" functions similar to "plot()" and passes input arguments from "par()".
### Plotting violins with highlighted medians
For example it is possible to add additional annotations.
```{r}
# generate dummy data
a <- rnorm(25, 3, 0.5)
b <- rnorm(25, 2, 1.0)
c <- rnorm(25, 2.75, 0.25)
d <- rnorm(25, 3.15, 0.375)
e <- rnorm(25, 1, 0.25)
datamat <- cbind(a, b, c, d, e)
dim(datamat)
```
```{r}
library("vioplot")
```
```{r}
vioplot(datamat, ylim = c(0, 5))
# compute medians
data.med <- apply(datamat, 2, median)
data.med
#overlay medians
lines(data.med, lty = 2, lwd = 1.5)
points(data.med, pch = 19, col = "red", cex = 2.25)
```
### Custom axes and titles
It is also possible to modify the axes labels and titles as shown in this example. Here default axes are suppressed and replaced with custom parameters.
```{r}
outcome <- c(rnorm(25, 3, 1), rnorm(25, 2, 0.5))
intervention <- c(rep("treatment", 25), rep("control", 25))
table(intervention)
names(table(intervention))
unique(sort(intervention))
intervention <- as.factor(intervention)
levels(intervention)
d <- data.frame(outcome, intervention)
vioplot(outcome ~ intervention, data = d, xaxt = 'n', yaxt = 'n',
main = "", xlab = "", ylab = "")
axis(side = 1, at = 1:length(levels(intervention)), labels = levels(intervention))
mtext("custom x labels for intervention", side = 1)
mtext("custom y labels for outcome", side = 2)
title(main = "example with custom title", sub = "subtitles are supported")
```
#### Annotated histograms
This is also supported by the histogram plot.
```{r}
histoplot(outcome ~ intervention, data = d, xaxt = 'n', yaxt = 'n',
main = "", xlab = "", ylab = "")
axis(side = 1, at = 1:length(levels(intervention)), labels = levels(intervention))
mtext("custom x labels for intervention", side = 1)
mtext("custom y labels for outcome", side = 2)
title(main = "example with custom title", sub = "subtitles are supported")
```
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/overlaying_annotations.Rmd
|
## -----------------------------------------------------------------------------
library("vioplot")
## ---- message=FALSE-----------------------------------------------------------
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
## ---- echo=FALSE, message=FALSE-----------------------------------------------
par(mar=rep(1,4))
## -----------------------------------------------------------------------------
par(mfrow=c(3, 1))
par(mar=rep(2, 4))
plot(density(iris$Sepal.Length[iris$Species=="setosa"]), main="Sepal Length: setosa", col="green")
plot(density(iris$Sepal.Length[iris$Species=="versicolor"]), main="Sepal Length: versicolor", col="blue")
plot(density(iris$Sepal.Length[iris$Species=="virginica"]), main="Sepal Length: virginica", col="palevioletred4")
par(mfrow=c(1, 1))
## ---- echo=FALSE, message=FALSE-----------------------------------------------
par(mar=c(5, 4, 4, 2) + 0.1)
## ---- echo=FALSE, message=FALSE-----------------------------------------------
par(mar=rep(2,4))
## -----------------------------------------------------------------------------
par(mfrow=c(3, 1))
par(mar=rep(2, 4))
xaxis <- c(3, 9)
yaxis <- c(0, 1.25)
plot(density(iris$Sepal.Length[iris$Species=="setosa"]), main="Sepal Length: setosa", col="green", xlim=xaxis, ylim=yaxis)
plot(density(iris$Sepal.Length[iris$Species=="versicolor"]), main="Sepal Length: versicolor", col="blue", xlim=xaxis, ylim=yaxis)
plot(density(iris$Sepal.Length[iris$Species=="virginica"]), main="Sepal Length: virginica", col="palevioletred4", xlim=xaxis, ylim=yaxis)
par(mfrow=c(1, 1))
## ---- echo=FALSE, message=FALSE-----------------------------------------------
par(mar=c(5, 4, 4, 2) + 0.1)
## -----------------------------------------------------------------------------
par(mfrow=c(1, 1))
xaxis <- c(3, 9)
yaxis <- c(0, 1.25)
plot(density(iris$Sepal.Length[iris$Species=="setosa"]), main="Sepal Length", col="green", xlim=xaxis, ylim=yaxis)
lines(density(iris$Sepal.Length[iris$Species=="versicolor"]), col="blue")
lines(density(iris$Sepal.Length[iris$Species=="virginica"]), col="palevioletred4")
legend("topright", fill=c("green", "blue", "palevioletred4"), legend=levels(iris$Species), cex=0.5)
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", areaEqual = T)
## ---- echo=FALSE, message=FALSE-----------------------------------------------
par(mar=rep(2, 4))
## -----------------------------------------------------------------------------
par(mfrow=c(2,1))
par(mar=rep(2, 4))
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Width)", areaEqual = F)
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T)
par(mfrow=c(1,1))
## ---- echo=FALSE, message=FALSE-----------------------------------------------
par(mar=c(5, 4, 4, 2) + 0.1)
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T, col=c("lightgreen", "lightblue", "palevioletred"), rectCol=c("green", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), border=c("darkolivegreen4", "royalblue4", "violetred4"))
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T, col=c("lightgreen", "lightblue", "palevioletred"), rectCol=c("green", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), border=c("darkolivegreen4", "royalblue4", "violetred4"), wex=1.25)
## -----------------------------------------------------------------------------
vioplot(rnorm(200, 3, 0.5), rpois(200, 2.5), rbinom(100, 10, 0.4), rlnorm(200, 0, 0.5), rnbinom(200, 10, 0.9), rlogis(20, 0, 0.5), areaEqual = F, main="Equal Width", xlab="distribution", ylab="data value", names=c("normal", "poisson", "binomial", "log-normal", "neg-binomial", "logistic"))
vioplot(rnorm(200, 3, 0.5), rpois(200, 2.5), rbinom(100, 10, 0.4), rlnorm(200, 0, 0.5), rnbinom(200, 10, 0.9), rlogis(20, 0, 0.5), areaEqual = T, main="Equal Area", xlab="distribution", ylab="data value", names=c("normal", "poisson", "binomial", "log-normal", "neg-binomial", "logistic"))
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/violin_area.R
|
---
title: "Controlling Violin Plot Area"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vioplot: Controlling Violin Plot Area}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
While boxplots have become the _de facto_ standard for plotting the distribution of data this is a vast oversimplification and may not show everything needed to evaluate the variation of data. This is particularly important for datasets which do not form a Gaussian "Normal" distribution that most researchers have become accustomed to.
While density plots are helpful in this regard, they can be less aesthetically pleasing than boxplots and harder to interpret for those familiar with boxplots. Often the only ways to compare multiple data types with density use slices of the data with faceting the plotting panes or overlaying density curves with colours and a legend. This approach is jarring for new users and leads to cluttered plots difficult to present to a wider audience.
##Violin Plots
Therefore violin plots are a powerful tool to assist researchers to visualise data, particularly in the quality checking and exploratory parts of an analysis. Violin plots have many benefits:
- Greater flexibility for plotting variation than boxplots
- More familiarity to boxplot users than density plots
- Easier to directly compare data types than existing plots
As shown below for the `iris` dataset, violin plots show distribution information that the boxplot is unable to.
```{r}
library("vioplot")
```
```{r, message=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
```
##Violin Plot Area
However there are concerns that existing violin plot packages (such as \code{\link[vioplot]{vioplot}}) scales the data to the most aesthetically suitable width rather than maintaining proportions comparable across data sets. Consider the differing distributions shown below:
```{r, echo=FALSE, message=FALSE}
par(mar=rep(1,4))
```
```{r}
par(mfrow=c(3, 1))
par(mar=rep(2, 4))
plot(density(iris$Sepal.Length[iris$Species=="setosa"]), main="Sepal Length: setosa", col="green")
plot(density(iris$Sepal.Length[iris$Species=="versicolor"]), main="Sepal Length: versicolor", col="blue")
plot(density(iris$Sepal.Length[iris$Species=="virginica"]), main="Sepal Length: virginica", col="palevioletred4")
par(mfrow=c(1, 1))
```
```{r, echo=FALSE, message=FALSE}
par(mar=c(5, 4, 4, 2) + 0.1)
```
#Comparing datasets
Neither of these plots above show the relative distribtions on the same scale, even if we match the x-axis of a density plot the relative heights are obscured and difficult to compare.
```{r, echo=FALSE, message=FALSE}
par(mar=rep(2,4))
```
```{r}
par(mfrow=c(3, 1))
par(mar=rep(2, 4))
xaxis <- c(3, 9)
yaxis <- c(0, 1.25)
plot(density(iris$Sepal.Length[iris$Species=="setosa"]), main="Sepal Length: setosa", col="green", xlim=xaxis, ylim=yaxis)
plot(density(iris$Sepal.Length[iris$Species=="versicolor"]), main="Sepal Length: versicolor", col="blue", xlim=xaxis, ylim=yaxis)
plot(density(iris$Sepal.Length[iris$Species=="virginica"]), main="Sepal Length: virginica", col="palevioletred4", xlim=xaxis, ylim=yaxis)
par(mfrow=c(1, 1))
```
```{r, echo=FALSE, message=FALSE}
par(mar=c(5, 4, 4, 2) + 0.1)
```
This can somewhat be addressed by overlaying density plots:
```{r}
par(mfrow=c(1, 1))
xaxis <- c(3, 9)
yaxis <- c(0, 1.25)
plot(density(iris$Sepal.Length[iris$Species=="setosa"]), main="Sepal Length", col="green", xlim=xaxis, ylim=yaxis)
lines(density(iris$Sepal.Length[iris$Species=="versicolor"]), col="blue")
lines(density(iris$Sepal.Length[iris$Species=="virginica"]), col="palevioletred4")
legend("topright", fill=c("green", "blue", "palevioletred4"), legend=levels(iris$Species), cex=0.5)
```
This has the benefit of highlighting the different distributions of the data subsets. However, notice here that a figure legend become necessary, plot axis limits need to be defined to display the range of all distribution curves, and the plot quickly becomes cluttered if the number of factors to be compared becomes much larger.
##Area control in Violin plot
Therefore the `areaEqual` parameter has been added to customise the violin plot to serve a similar purpose:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", areaEqual = T)
```
If we compare this to the original vioplot functionality (defaulting to `areaEqual = FALSE`) the differences between the two are clear.
```{r, echo=FALSE, message=FALSE}
par(mar=rep(2, 4))
```
```{r}
par(mfrow=c(2,1))
par(mar=rep(2, 4))
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Width)", areaEqual = F)
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T)
par(mfrow=c(1,1))
```
```{r, echo=FALSE, message=FALSE}
par(mar=c(5, 4, 4, 2) + 0.1)
```
Note that `areaEqual` is considering the full area of the density distribution before removing the outlier tails. We leave it up to the users discretion which they elect to use. The `areaEqual` functionality is compatible with all of the customisation used in discussed in [the main vioplot vignette](violin_customisation.html)
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T, col=c("lightgreen", "lightblue", "palevioletred"), rectCol=c("green", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), border=c("darkolivegreen4", "royalblue4", "violetred4"))
```
The violin width can further be scaled with `wex`, which maintains the proportions across the datasets if `areaEqual = TRUE`:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T, col=c("lightgreen", "lightblue", "palevioletred"), rectCol=c("green", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), border=c("darkolivegreen4", "royalblue4", "violetred4"), wex=1.25)
```
## Comparing distributions
Notice the utility of `areaEqual` for cases where different datasets have different underlying distributions:
```{r}
vioplot(rnorm(200, 3, 0.5), rpois(200, 2.5), rbinom(100, 10, 0.4), rlnorm(200, 0, 0.5), rnbinom(200, 10, 0.9), rlogis(20, 0, 0.5), areaEqual = F, main="Equal Width", xlab="distribution", ylab="data value", names=c("normal", "poisson", "binomial", "log-normal", "neg-binomial", "logistic"))
vioplot(rnorm(200, 3, 0.5), rpois(200, 2.5), rbinom(100, 10, 0.4), rlnorm(200, 0, 0.5), rnbinom(200, 10, 0.9), rlogis(20, 0, 0.5), areaEqual = T, main="Equal Area", xlab="distribution", ylab="data value", names=c("normal", "poisson", "binomial", "log-normal", "neg-binomial", "logistic"))
```
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/violin_area.Rmd
|
## -----------------------------------------------------------------------------
library("vioplot")
## ---- message=FALSE, eval=FALSE-----------------------------------------------
# data(iris)
# boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
# library("vioplot")
# vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
## ---- message=FALSE, echo=FALSE-----------------------------------------------
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="magenta")
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue")
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue", border="royalblue")
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", rectCol="palevioletred", lineCol="violetred")
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", colMed="violet")
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue", border="royalblue", rectCol="palevioletred", lineCol="violetred", colMed="violet")
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T, col=c("lightgreen", "lightblue", "palevioletred"), border=c("darkolivegreen4", "royalblue4", "violetred4"), rectCol=c("forestgreen", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), colMed=c("green", "cyan", "magenta"), pchMed=c(15, 17, 19))
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/violin_customisation.R
|
---
title: "Customising Violin Plots"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vioplot: Customising Violin Plots}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
While boxplots have become the _de facto_ standard for plotting the distribution of data this is a vast oversimplification and may not show everything needed to evaluate the variation of data. This is particularly important for datasets which do not form a Gaussian "Normal" distribution that most researchers have become accustomed to.
While density plots are helpful in this regard, they can be less aesthetically pleasing than boxplots and harder to interpret for those familiar with boxplots. Often the only ways to compare multiple data types with density use slices of the data with faceting the plotting panes or overlaying density curves with colours and a legend. This approach is jarring for new users and leads to cluttered plots difficult to present to a wider audience.
Therefore violin plots are a powerful tool to assist researchers to visualise data, particularly in the quality checking and exploratory parts of an analysis. Violin plots have many benefits:
- Greater flexibility for plotting variation than boxplots
- More familiarity to boxplot users than density plots
- Easier to directly compare data types than existing plots
As shown below for the `iris` dataset, violin plots show distribution information that the boxplot is unable to.
```{r}
library("vioplot")
```
```{r, message=FALSE, eval=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
library("vioplot")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
```
```{r, message=FALSE, echo=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="magenta")
```
## Plot Defaults
However as we can see here the plot defaults are not aesthetically pleasing, with a rather glaring colour scheme unsuitable for professional or academic usage. Thus the plot default colours have been changed as shown here:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
```
## Plot colours: Violin Fill
Plot colours can be further customised as with the original vioplot package using the `col` argument:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue")
```
### Vectorisation
However the `vioplot` (0.2) function is unable to colour each violin separately, thus this is enabled with a vectorised `col` in `vioplot` (0.3):
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
```
## Plot colours: Violin Lines and Boxplot
Colours can also be customised for the violin fill and border separately using the `col` and `border` arguments:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue", border="royalblue")
```
Similarly, the arguments `lineCol` and `rectCol` specify the colors of the boxplot outline and rectangle fill. For simplicity the box and whiskers of the boxplot will always have the same colour.
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", rectCol="palevioletred", lineCol="violetred")
```
The same applies to the colour of the median point with `colMed`:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", colMed="violet")
```
### Combined customisation
These can be customised colours can be combined:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue", border="royalblue", rectCol="palevioletred", lineCol="violetred", colMed="violet")
```
### Vectorisation
These color and shape settings can also be customised separately for each violin:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T, col=c("lightgreen", "lightblue", "palevioletred"), border=c("darkolivegreen4", "royalblue4", "violetred4"), rectCol=c("forestgreen", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), colMed=c("green", "cyan", "magenta"), pchMed=c(15, 17, 19))
```
This should be sufficient to customise the violin plot but further examples are given in [the areaEqual vioplot vignette](violin_area.html) including how violin plots are useful for comparing variation when data does not follow the same distribution. This document also compares the violin plot with other established methods to plot data variation.
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/violin_customisation.Rmd
|
## -----------------------------------------------------------------------------
library("vioplot")
## ---- message=FALSE, eval=FALSE-----------------------------------------------
# data(iris)
# boxplot(Sepal.Length~Species, data = iris)
## ---- message=FALSE, echo=FALSE-----------------------------------------------
data(iris)
boxplot(Sepal.Length~Species, data = iris, main = "Sepal Length")
## ---- message=FALSE, eval=FALSE-----------------------------------------------
# devtools::install_version("vioplot", version = "0.2")
# library("vioplot")
# vioplot(Sepal.Length~Species, data = iris)
## ---- message=FALSE, eval=FALSE-----------------------------------------------
# vioplot(Sepal.Length~Species, data = iris)
## ---- message=FALSE, echo=FALSE-----------------------------------------------
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="magenta")
## -----------------------------------------------------------------------------
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length")
## -----------------------------------------------------------------------------
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue")
## -----------------------------------------------------------------------------
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
## -----------------------------------------------------------------------------
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue", border="royalblue")
## -----------------------------------------------------------------------------
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", rectCol="palevioletred", lineCol="violetred")
## -----------------------------------------------------------------------------
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", colMed="violet")
## -----------------------------------------------------------------------------
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue", border="royalblue", rectCol="palevioletred", lineCol="violetred", colMed="violet")
## -----------------------------------------------------------------------------
vioplot(Sepal.Length~Species, data = iris, main="Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"), border=c("darkolivegreen4", "royalblue4", "violetred4"), rectCol=c("forestgreen", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), colMed=c("green", "cyan", "magenta"), pchMed=c(15, 17, 19))
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/violin_formulae.R
|
---
title: "Customising Violin Plots with Formula Input"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vioplot: Customising Violin Plots with Formula Input}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
Since boxplots have become the _de facto_ standard for plotting the distribution of data most users are familiar with these and the formula input for dataframes. However this input is not available in the standard `vioplot` package. Thus it has been restored here for enhanced backwards compatibility with `boxplot`.
As shown below for the `iris` dataset, violin plots show distribution information taking formula input that `boxplot` implements but `vioplot` is unable to. This demonstrates the customisation demonstrated in [the main vioplot vignette using vioplot syntax](violin_customisation.html) with the formula method commonly used for `boxplot`, `t.test`, and `lm`.
```{r}
library("vioplot")
```
```{r, message=FALSE, eval=FALSE}
data(iris)
boxplot(Sepal.Length~Species, data = iris)
```
```{r, message=FALSE, echo=FALSE}
data(iris)
boxplot(Sepal.Length~Species, data = iris, main = "Sepal Length")
```
Whereas performing the same function does not work with `vioplot` (0.2).
```{r, message=FALSE, eval=FALSE}
devtools::install_version("vioplot", version = "0.2")
library("vioplot")
vioplot(Sepal.Length~Species, data = iris)
```
```
Error in min(data) : invalid 'type' (language) of argument
```
## Plot Defaults
```{r, message=FALSE, eval=FALSE}
vioplot(Sepal.Length~Species, data = iris)
```
```{r, message=FALSE, echo=FALSE}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="magenta")
```
Another concern we see here is that the `vioplot` defaults are not aesthetically pleasing, with a rather glaring colour scheme unsuitable for professional or academic usage. Thus the plot default colours have been changed as shown here:
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length")
```
## Plot colours: Violin Fill
Plot colours can be further customised as with the original vioplot package using the `col` argument:
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue")
```
### Vectorisation
However the `vioplot` (0.2) function is unable to colour each violin separately, thus this is enabled with a vectorised `col` in `vioplot` (0.3):
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
```
## Plot colours: Violin Lines and Boxplot
Colours can also be customised for the violin fill and border separately using the `col` and `border` arguments:
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue", border="royalblue")
```
Similarly, the arguments `lineCol` and `rectCol` specify the colours of the boxplot outline and rectangle fill. For simplicity the box and whiskers of the boxplot will always have the same colour.
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", rectCol="palevioletred", lineCol="violetred")
```
The same applies to the colour of the median point with `colMed`:
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", colMed="violet")
```
### Combined customisation
These can be customised colours can be combined:
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue", border="royalblue", rectCol="palevioletred", lineCol="violetred", colMed="violet")
```
### Vectorisation
These colour and shape settings can also be customised separately for each violin:
```{r}
vioplot(Sepal.Length~Species, data = iris, main="Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"), border=c("darkolivegreen4", "royalblue4", "violetred4"), rectCol=c("forestgreen", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), colMed=c("green", "cyan", "magenta"), pchMed=c(15, 17, 19))
```
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/violin_formulae.Rmd
|
## ---- fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'----
library("vioplot")
## ---- message=FALSE-----------------------------------------------------------
data(iris)
summary(iris$Sepal.Width)
table(iris$Sepal.Width > mean(iris$Sepal.Width))
iris_large <- iris[iris$Sepal.Width > mean(iris$Sepal.Width), ]
iris_small <- iris[iris$Sepal.Width <= mean(iris$Sepal.Width), ]
## ---- fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'----
boxplot(Sepal.Length~Species, data=iris, col="grey")
## ---- fig.align = 'center', fig.height = 6, fig.width = 6, fig.keep = 'last'----
{
par(mfrow=c(2,1))
boxplot(Sepal.Length~Species, data=iris_small, col = "lightblue")
boxplot(Sepal.Length~Species, data=iris_large, col = "palevioletred")
par(mfrow=c(1,1))
}
## ---- fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'----
vioplot(Sepal.Length~Species, data=iris)
## ---- fig.align = 'center', fig.height = 6, fig.width = 6, fig.keep = 'last'----
{
par(mfrow=c(2,1))
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line")
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line")
par(mfrow=c(1,1))
}
## ---- fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'----
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T)
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
## ---- fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'----
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right", xlab = "Iris species", ylab = "Length", main = "Sepals", names=paste("Iris", levels(iris$Species)))
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T)
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Width")
## ---- fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'----
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T, xlab = "Iris species", ylab = "Length", main = "Sepals", names=paste("Iris", levels(iris$Species)))
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Width")
## ---- fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'----
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "point", side = "right", pchMed = 21, colMed = "palevioletred4", colMed2 = "palevioletred2")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "point", side = "left", pchMed = 21, colMed = "lightblue4", colMed2 = "lightblue2", add = T)
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
## ---- fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'----
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "point", side = "right", pchMed = 21, colMed = "palevioletred4", colMed2 = "palevioletred2")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "point", side = "left", pchMed = 21, colMed = "lightblue4", colMed2 = "lightblue2", add = T)
points(1:length(levels(iris$Species)), as.numeric(sapply(levels(iris$Species), function(species) median(iris_large[grep(species, iris_large$Species),]$Sepal.Length))), pch = 21, col = "palevioletred4", bg = "palevioletred2")
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
## ---- fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'----
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right", pchMed = 21, colMed = "palevioletred4", colMed2 = "palevioletred2")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", pchMed = 21, colMed = "lightblue4", colMed2 = "lightblue2", add = T)
points(1:length(levels(iris$Species)), as.numeric(sapply(levels(iris$Species), function(species) median(iris_large[grep(species, iris_large$Species),]$Sepal.Length))), pch = 21, col = "palevioletred4", bg = "palevioletred2")
points(1:length(levels(iris$Species)), as.numeric(sapply(levels(iris$Species), function(species) median(iris_small[grep(species, iris_small$Species),]$Sepal.Length))), pch = 21, col = "lightblue4", bg = "lightblue2")
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/violin_split.R
|
---
title: "Split Violin Plots"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
fig_width: 6
fig_height: 3
fig_align: 'center'
fig_keep: 'last'
vignette: >
%\VignetteIndexEntry{vioplot: Split Violin Plots}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
##Violin Plots
Therefore violin plots are a powerful tool to assist researchers to visualise data, particularly in the quality checking and exploratory parts of an analysis. Violin plots have many benefits:
- Greater flexibility for plotting variation than boxplots
- More familiarity to boxplot users than density plots
- Easier to directly compare data types than existing plots
As shown below for the `iris` dataset, violin plots show distribution information that the boxplot is unable to.
###General Set up
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
library("vioplot")
```
We set up the data with two categories (Sepal Width) as follows:
```{r, message=FALSE}
data(iris)
summary(iris$Sepal.Width)
table(iris$Sepal.Width > mean(iris$Sepal.Width))
iris_large <- iris[iris$Sepal.Width > mean(iris$Sepal.Width), ]
iris_small <- iris[iris$Sepal.Width <= mean(iris$Sepal.Width), ]
```
###Boxplots
First we plot Sepal Length on its own:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
boxplot(Sepal.Length~Species, data=iris, col="grey")
```
An indirect comparison can be achieved with par:
```{r, fig.align = 'center', fig.height = 6, fig.width = 6, fig.keep = 'last'}
{
par(mfrow=c(2,1))
boxplot(Sepal.Length~Species, data=iris_small, col = "lightblue")
boxplot(Sepal.Length~Species, data=iris_large, col = "palevioletred")
par(mfrow=c(1,1))
}
```
### Violin Plots
First we plot Sepal Length on its own:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris)
```
An indirect comparison can be achieved with par:
```{r, fig.align = 'center', fig.height = 6, fig.width = 6, fig.keep = 'last'}
{
par(mfrow=c(2,1))
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line")
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line")
par(mfrow=c(1,1))
}
```
### Split Violin Plots
A more direct comparision can be made with the `side` argument and `add = TRUE` on the second plot:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T)
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
#### Custom axes labels
Custom axes labels are supported for split violin plots. However, you must use these arguments on the *first* call of `vioplot`.
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right", xlab = "Iris species", ylab = "Length", main = "Sepals", names=paste("Iris", levels(iris$Species)))
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T)
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Width")
```
Note that this is disabled for the second `vioplot` call to avoid overlaying labels.
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T, xlab = "Iris species", ylab = "Length", main = "Sepals", names=paste("Iris", levels(iris$Species)))
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Width")
```
#### Median
The line median option is more suitable for side by side comparisions but the point option is still available also:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "point", side = "right", pchMed = 21, colMed = "palevioletred4", colMed2 = "palevioletred2")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "point", side = "left", pchMed = 21, colMed = "lightblue4", colMed2 = "lightblue2", add = T)
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
It may be necessary to include a `points` command to fix the median being overwritten by the following plots:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "point", side = "right", pchMed = 21, colMed = "palevioletred4", colMed2 = "palevioletred2")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "point", side = "left", pchMed = 21, colMed = "lightblue4", colMed2 = "lightblue2", add = T)
points(1:length(levels(iris$Species)), as.numeric(sapply(levels(iris$Species), function(species) median(iris_large[grep(species, iris_large$Species),]$Sepal.Length))), pch = 21, col = "palevioletred4", bg = "palevioletred2")
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
Similarly points could be added where a line has been used previously:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right", pchMed = 21, colMed = "palevioletred4", colMed2 = "palevioletred2")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", pchMed = 21, colMed = "lightblue4", colMed2 = "lightblue2", add = T)
points(1:length(levels(iris$Species)), as.numeric(sapply(levels(iris$Species), function(species) median(iris_large[grep(species, iris_large$Species),]$Sepal.Length))), pch = 21, col = "palevioletred4", bg = "palevioletred2")
points(1:length(levels(iris$Species)), as.numeric(sapply(levels(iris$Species), function(species) median(iris_small[grep(species, iris_small$Species),]$Sepal.Length))), pch = 21, col = "lightblue4", bg = "lightblue2")
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
Here it is aesthetically pleasing and intuitive to interpret categorical differences in mean and variation in a continuous variable.
#### Sources
These extensions to `vioplot` here are based on those provided here:
* https://gist.github.com/mbjoseph/5852613
These have previously been discussed on the following sites:
* https://mbjoseph.github.io/posts/2018-12-23-split-violin-plots/
* http://tagteam.harvard.edu/hub_feeds/1981/feed_items/209875
* [https://www.r-bloggers.com/split-violin-plots/](https://www.r-bloggers.com/2013/06/split-violin-plots/)
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/violin_split.Rmd
|
## -----------------------------------------------------------------------------
library("vioplot")
## ---- message=FALSE-----------------------------------------------------------
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", ylog = T, ylim=c(log(1), log(10)))
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", log = T, ylim=c(log(1), log(10)))
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", log = "y", ylim=c(log(1), log(10)))
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", yaxt="n")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", ylog = T, yaxt="n", ylim=c(log(1), log(10)))
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", yaxt="n")
axis(2, at=1:10, labels=1:10)
## -----------------------------------------------------------------------------
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", yaxt="n", log="y", ylim=c(log(4), log(9)))
axis(2, at=log(1:10), labels=1:10)
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/violin_ylog.R
|
---
title: "Controlling y-axis Plotting"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vioplot: Controlling y-axis Plotting}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
While boxplots have become the _de facto_ standard for plotting the distribution of data this is a vast oversimplification and may not show everything needed to evaluate the variation of data. This is particularly important for datasets which do not form a Gaussian "Normal" distribution that most researchers have become accustomed to.
While density plots are helpful in this regard, they can be less aesthetically pleasing than boxplots and harder to interpret for those familiar with boxplots. Often the only ways to compare multiple data types with density use slices of the data with faceting the plotting panes or overlaying density curves with colours and a legend. This approach is jarring for new users and leads to cluttered plots difficult to present to a wider audience.
##Violin Plots
Therefore violin plots are a powerful tool to assist researchers to visualise data, particularly in the quality checking and exploratory parts of an analysis. Violin plots have many benefits:
- Greater flexibility for plotting variation than boxplots
- More familiarity to boxplot users than density plots
- Easier to directly compare data types than existing plots
As shown below for the `iris` dataset, violin plots show distribution information that the boxplot is unable to.
```{r}
library("vioplot")
```
```{r, message=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
```
##Violin y-axis
###Logarithmic scale
However the existing violin plot packages (such as \code{\link[vioplot]{vioplot}}) do not support log-scale of the y-axis. This has been amended with the `ylog` argument.
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", ylog = T, ylim=c(log(1), log(10)))
```
This can also be invoked with the `log="y"` argument compatible with `boxplot`:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", log = T, ylim=c(log(1), log(10)))
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", log = "y", ylim=c(log(1), log(10)))
```
###custom y-axes
The y-axes can also be removed with `yaxt="n"` to enable customised y-axes:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", yaxt="n")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", ylog = T, yaxt="n", ylim=c(log(1), log(10)))
```
Thus custom axes can be added to violin plots. As shown on a linear scale:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", yaxt="n")
axis(2, at=1:10, labels=1:10)
```
As well as for on a log scale:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", yaxt="n", log="y", ylim=c(log(4), log(9)))
axis(2, at=log(1:10), labels=1:10)
```
|
/scratch/gouwar.j/cran-all/cranData/vioplot/inst/doc/violin_ylog.Rmd
|
---
title: "Controlling Violin Plot Area"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vioplot: Controlling Violin Plot Area}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
While boxplots have become the _de facto_ standard for plotting the distribution of data this is a vast oversimplification and may not show everything needed to evaluate the variation of data. This is particularly important for datasets which do not form a Gaussian "Normal" distribution that most researchers have become accustomed to.
While density plots are helpful in this regard, they can be less aesthetically pleasing than boxplots and harder to interpret for those familiar with boxplots. Often the only ways to compare multiple data types with density use slices of the data with faceting the plotting panes or overlaying density curves with colours and a legend. This approach is jarring for new users and leads to cluttered plots difficult to present to a wider audience.
##Violin Plots
Therefore violin plots are a powerful tool to assist researchers to visualise data, particularly in the quality checking and exploratory parts of an analysis. Violin plots have many benefits:
- Greater flexibility for plotting variation than boxplots
- More familiarity to boxplot users than density plots
- Easier to directly compare data types than existing plots
As shown below for the `iris` dataset, violin plots show distribution information that the boxplot is unable to.
```{r}
library("vioplot")
```
```{r, message=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
```
##Violin Plot Area
However there are concerns that existing violin plot packages (such as \code{\link[vioplot]{vioplot}}) scales the data to the most aesthetically suitable width rather than maintaining proportions comparable across data sets. Consider the differing distributions shown below:
```{r, echo=FALSE, message=FALSE}
par(mar=rep(1,4))
```
```{r}
par(mfrow=c(3, 1))
par(mar=rep(2, 4))
plot(density(iris$Sepal.Length[iris$Species=="setosa"]), main="Sepal Length: setosa", col="green")
plot(density(iris$Sepal.Length[iris$Species=="versicolor"]), main="Sepal Length: versicolor", col="blue")
plot(density(iris$Sepal.Length[iris$Species=="virginica"]), main="Sepal Length: virginica", col="palevioletred4")
par(mfrow=c(1, 1))
```
```{r, echo=FALSE, message=FALSE}
par(mar=c(5, 4, 4, 2) + 0.1)
```
#Comparing datasets
Neither of these plots above show the relative distributions on the same scale, even if we match the x-axis of a density plot the relative heights are obscured and difficult to compare.
```{r, echo=FALSE, message=FALSE}
par(mar=rep(2,4))
```
```{r}
par(mfrow=c(3, 1))
par(mar=rep(2, 4))
xaxis <- c(3, 9)
yaxis <- c(0, 1.25)
plot(density(iris$Sepal.Length[iris$Species=="setosa"]), main="Sepal Length: setosa", col="green", xlim=xaxis, ylim=yaxis)
plot(density(iris$Sepal.Length[iris$Species=="versicolor"]), main="Sepal Length: versicolor", col="blue", xlim=xaxis, ylim=yaxis)
plot(density(iris$Sepal.Length[iris$Species=="virginica"]), main="Sepal Length: virginica", col="palevioletred4", xlim=xaxis, ylim=yaxis)
par(mfrow=c(1, 1))
```
```{r, echo=FALSE, message=FALSE}
par(mar=c(5, 4, 4, 2) + 0.1)
```
This can somewhat be addressed by overlaying density plots:
```{r}
par(mfrow=c(1, 1))
xaxis <- c(3, 9)
yaxis <- c(0, 1.25)
plot(density(iris$Sepal.Length[iris$Species=="setosa"]), main="Sepal Length", col="green", xlim=xaxis, ylim=yaxis)
lines(density(iris$Sepal.Length[iris$Species=="versicolor"]), col="blue")
lines(density(iris$Sepal.Length[iris$Species=="virginica"]), col="palevioletred4")
legend("topright", fill=c("green", "blue", "palevioletred4"), legend=levels(iris$Species), cex=0.5)
```
This has the benefit of highlighting the different distributions of the data subsets. However, notice here that a figure legend become necessary, plot axis limits need to be defined to display the range of all distribution curves, and the plot quickly becomes cluttered if the number of factors to be compared becomes much larger.
##Area control in Violin plot
Therefore the `areaEqual` parameter has been added to customise the violin plot to serve a similar purpose:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", areaEqual = T)
```
If we compare this to the original vioplot functionality (defaulting to `areaEqual = FALSE`) the differences between the two are clear.
```{r, echo=FALSE, message=FALSE}
par(mar=rep(2, 4))
```
```{r}
par(mfrow=c(2,1))
par(mar=rep(2, 4))
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Width)", areaEqual = F)
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T)
par(mfrow=c(1,1))
```
```{r, echo=FALSE, message=FALSE}
par(mar=c(5, 4, 4, 2) + 0.1)
```
Note that `areaEqual` is considering the full area of the density distribution before removing the outlier tails. We leave it up to the users discretion which they elect to use. The `areaEqual` functionality is compatible with all of the customisation used in discussed in [the main vioplot vignette](violin_customisation.html)
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T, col=c("lightgreen", "lightblue", "palevioletred"), rectCol=c("green", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), border=c("darkolivegreen4", "royalblue4", "violetred4"))
```
The violin width can further be scaled with `wex`, which maintains the proportions across the datasets if `areaEqual = TRUE`:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T, col=c("lightgreen", "lightblue", "palevioletred"), rectCol=c("green", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), border=c("darkolivegreen4", "royalblue4", "violetred4"), wex=1.25)
```
## Comparing distributions
Notice the utility of `areaEqual` for cases where different datasets have different underlying distributions:
```{r}
vioplot(rnorm(200, 3, 0.5), rpois(200, 2.5), rbinom(100, 10, 0.4), rlnorm(200, 0, 0.5), rnbinom(200, 10, 0.9), rlogis(20, 0, 0.5), areaEqual = F, main="Equal Width", xlab="distribution", ylab="data value", names=c("normal", "poisson", "binomial", "log-normal", "neg-binomial", "logistic"))
vioplot(rnorm(200, 3, 0.5), rpois(200, 2.5), rbinom(100, 10, 0.4), rlnorm(200, 0, 0.5), rnbinom(200, 10, 0.9), rlogis(20, 0, 0.5), areaEqual = T, main="Equal Area", xlab="distribution", ylab="data value", names=c("normal", "poisson", "binomial", "log-normal", "neg-binomial", "logistic"))
```
|
/scratch/gouwar.j/cran-all/cranData/vioplot/vignettes/backup/violin_area.Rmd
|
---
title: "Customising Violin Plots"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vioplot: Customising Violin Plots}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
While boxplots have become the _de facto_ standard for plotting the distribution of data this is a vast oversimplification and may not show everything needed to evaluate the variation of data. This is particularly important for datasets which do not form a Gaussian "Normal" distribution that most researchers have become accustomed to.
While density plots are helpful in this regard, they can be less aesthetically pleasing than boxplots and harder to interpret for those familiar with boxplots. Often the only ways to compare multiple data types with density use slices of the data with faceting the plotting panes or overlaying density curves with colours and a legend. This approach is jarring for new users and leads to cluttered plots difficult to present to a wider audience.
Therefore violin plots are a powerful tool to assist researchers to visualise data, particularly in the quality checking and exploratory parts of an analysis. Violin plots have many benefits:
- Greater flexbility for plotting variation than boxplots
- More familiarity to boxplot users than density plots
- Easier to directly compare data types than existing plots
As shown below for the `iris` dataset, violin plots show distribution information that the boxplot is unable to.
```{r}
library("vioplot")
```
```{r, message=FALSE, eval=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
library("vioplot")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
```
```{r, message=FALSE, echo=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="magenta")
```
## Plot Defaults
However as we can see here the plot defaults are not aesthetically pleasing, with a rather glaring colour scheme unsuitable for professional or academic usage. Thus the plot default colours have been changed as shown here:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
```
## Plot colours: Violin Fill
Plot colours can be further customised as with the original vioplot package using the `col` argument:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue")
```
### Vectorisation
However the `vioplot` (0.2) function is unable to colour each violin separately, thus this is enabled with a vectorised `col` in `vioplot` (0.3):
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
```
## Plot colours: Violin Lines and Boxplot
Colours can also be customised for the violin fill and border separately using the `col` and `border` arguments:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue", border="royalblue")
```
Similarly, the arguments `lineCol` and `rectCol` specify the colours of the boxplot outline and rectangle fill. For simplicity the box and whiskers of the boxplot will always have the same colour.
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", rectCol="palevioletred", lineCol="violetred")
```
The same applies to the colour of the median point with `colMed`:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", colMed="violet")
```
### Combined customisation
These can be customised colours can be combined:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue", border="royalblue", rectCol="palevioletred", lineCol="violetred", colMed="violet")
```
### Vectorisation
These colour and shape settings can also be customised separately for each violin:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T, col=c("lightgreen", "lightblue", "palevioletred"), border=c("darkolivegreen4", "royalblue4", "violetred4"), rectCol=c("forestgreen", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), colMed=c("green", "cyan", "magenta"), pchMed=c(15, 17, 19))
```
This should be sufficient to customise the violin plot but further examples are given in [the areaEqual vioplot vignette](violin_area.html) including how violin plots are useful for comparing variation when data does not follow the same distribution. This document also compares the violin plot with other established methods to plot data variation.
|
/scratch/gouwar.j/cran-all/cranData/vioplot/vignettes/backup/violin_customisation.Rmd
|
---
title: "Split Violin Plots"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
fig_width: 6
fig_height: 3
fig_align: 'center'
fig_keep: 'last'
vignette: >
%\VignetteIndexEntry{vioplot: Split Violin Plots}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
##Violin Plots
Therefore violin plots are a powerful tool to assist researchers to visualise data, particularly in the quality checking and exploratory parts of an analysis. Violin plots have many benefits:
- Greater flexbility for plotting variation than boxplots
- More familiarity to boxplot users than density plots
- Easier to directly compare data types than existing plots
As shown below for the `iris` dataset, violin plots show distribution information that the boxplot is unable to.
###General Set up
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
library("vioplot")
```
We set up the data with two categories (Sepal Width) as follows:
```{r, message=FALSE}
data(iris)
summary(iris$Sepal.Width)
table(iris$Sepal.Width > mean(iris$Sepal.Width))
iris_large <- iris[iris$Sepal.Width > mean(iris$Sepal.Width), ]
iris_small <- iris[iris$Sepal.Width <= mean(iris$Sepal.Width), ]
```
###Boxplots
First we plot Sepal Length on its own:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
boxplot(Sepal.Length~Species, data=iris, col="grey")
```
An indirect comparison can be achieved with par:
```{r, fig.align = 'center', fig.height = 6, fig.width = 6, fig.keep = 'last'}
{
par(mfrow=c(2,1))
boxplot(Sepal.Length~Species, data=iris_small, col = "lightblue")
boxplot(Sepal.Length~Species, data=iris_large, col = "palevioletred")
par(mfrow=c(1,1))
}
```
### Violin Plots
First we plot Sepal Length on its own:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris)
```
An indirect comparison can be achieved with par:
```{r, fig.align = 'center', fig.height = 6, fig.width = 6, fig.keep = 'last'}
{
par(mfrow=c(2,1))
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line")
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line")
par(mfrow=c(1,1))
}
```
### Split Violin Plots
A more direct comparison can be made with the `side` argument and `add = TRUE` on the second plot:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T)
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
### median
The line median option is more suitable for side by side comparisons but the point option is still available also:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "point", side = "right", pchMed = 21, colMed = "palevioletred4", colMed2 = "palevioletred2")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "point", side = "left", pchMed = 21, colMed = "lightblue4", colMed2 = "lightblue2", add = T)
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
It may be necessary to include a `points` command to fix the median being overwritten by the following plots:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "point", side = "right", pchMed = 21, colMed = "palevioletred4", colMed2 = "palevioletred2")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "point", side = "left", pchMed = 21, colMed = "lightblue4", colMed2 = "lightblue2", add = T)
points(1:length(levels(iris$Species)), as.numeric(sapply(levels(iris$Species), function(species) median(iris_large[grep(species, iris_large$Species),]$Sepal.Length))), pch = 21, col = "palevioletred4", bg = "palevioletred2")
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
Similarly points could be added where a line has been used previously:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right", pchMed = 21, colMed = "palevioletred4", colMed2 = "palevioletred2")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", pchMed = 21, colMed = "lightblue4", colMed2 = "lightblue2", add = T)
points(1:length(levels(iris$Species)), as.numeric(sapply(levels(iris$Species), function(species) median(iris_large[grep(species, iris_large$Species),]$Sepal.Length))), pch = 21, col = "palevioletred4", bg = "palevioletred2")
points(1:length(levels(iris$Species)), as.numeric(sapply(levels(iris$Species), function(species) median(iris_small[grep(species, iris_small$Species),]$Sepal.Length))), pch = 21, col = "lightblue4", bg = "lightblue2")
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
Here it is aesthetically pleasing and intuitive to interpret categorical differences in mean and variation in a continuous variable.
#### Sources
These extensions to `vioplot` here are based on those provided here:
* https://gist.github.com/mbjoseph/5852613
These have previously been discussed on the following sites:
* https://mbjoseph.github.io/posts/2018-12-23-split-violin-plots/
* http://tagteam.harvard.edu/hub_feeds/1981/feed_items/209875
* https://www.r-bloggers.com/split-violin-plots/
|
/scratch/gouwar.j/cran-all/cranData/vioplot/vignettes/backup/violin_split.Rmd
|
---
title: "Controlling y-axis Plotting"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vioplot: Controlling y-axis Plotting}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
While boxplots have become the _de facto_ standard for plotting the distribution of data this is a vast oversimplification and may not show everything needed to evaluate the variation of data. This is particularly important for datasets which do not form a Gaussian "Normal" distribution that most researchers have become accustomed to.
While density plots are helpful in this regard, they can be less aesthetically pleasing than boxplots and harder to interpret for those familiar with boxplots. Often the only ways to compare multiple data types with density use slices of the data with faceting the plotting panes or overlaying density curves with colours and a legend. This approach is jarring for new users and leads to cluttered plots difficult to present to a wider audience.
##Violin Plots
Therefore violin plots are a powerful tool to assist researchers to visualise data, particularly in the quality checking and exploratory parts of an analysis. Violin plots have many benefits:
- Greater flexbility for plotting variation than boxplots
- More familiarity to boxplot users than density plots
- Easier to directly compare data types than existing plots
As shown below for the `iris` dataset, violin plots show distribution information that the boxplot is unable to.
```{r}
library("vioplot")
```
```{r, message=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
```
##Violin y-axis
###Logarithmic scale
However the existing violin plot packages (such as \code{\link[vioplot]{vioplot}}) do not support log-scale of the y-axis. This has been amended with the `ylog` argument.
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", ylog = T)
```
This can also be invoked with the `log="y"` argument compatible with `boxplot`:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", log = T)
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", log = "y")
```
Log axis can also be passed to horizontal plots:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", log = "", horizontal = TRUE)
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", log = "y", horizontal = TRUE)
```
###custom y-axes
The y-axes can also be removed with `yaxt="n"` to enable customised y-axes:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", yaxt="n")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", ylog = T, yaxt="n")
```
Thus custom axes can be added to violin plots. As shown on a linear scale:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", yaxt="n")
axis(2, at=1:10, labels=1:10)
```
As well as for on a log scale:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", yaxt="n", log="y", ylim=c(log(4), log(9)))
axis(2, at=log(1:10), labels=1:10)
```
|
/scratch/gouwar.j/cran-all/cranData/vioplot/vignettes/backup/violin_ylog.Rmd
|
---
title: "Customising Violin Plots with Histograms"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vioplot: Customising Violin Plots with Histograms}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
While boxplots have become the _de facto_ standard for plotting the distribution of data this is a vast oversimplification and may not show everything needed to evaluate the variation of data. This is particularly important for datasets which do not form a Gaussian "Normal" distribution that most researchers have become accustomed to.
While density plots are helpful in this regard, they can be less aesthetically pleasing than boxplots and harder to interpret for those familiar with boxplots. Often the only ways to compare multiple data types with density use slices of the data with faceting the plotting panes or overlaying density curves with colours and a legend. This approach is jarring for new users and leads to cluttered plots difficult to present to a wider audience.
Therefore violin plots are a powerful tool to assist researchers to visualise data, particularly in the quality checking and exploratory parts of an analysis. Violin plots have many benefits:
- Greater flexibility for plotting variation than boxplots
- More familiarity to boxplot users than density plots
- Easier to directly compare data types than existing plots
As shown below for the `iris` dataset, histogram plots show distribution information that the boxplot is unable to.
```{r}
library("vioplot")
```
```{r, message=FALSE, eval=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
library("vioplot")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
```
```{r, message=FALSE, echo=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="magenta")
```
## Plot Defaults
However as we can see here the plot defaults are not aesthetically pleasing, with a rather glaring colour scheme unsuitable for professional or academic usage. Thus the plot default colours have been changed as shown here:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
```
# Histogram plot
Here we introduce a variant of the violin plot, using a mirrored bihistogram to show the distribution:
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
```
## Plot colours: Histogram Fill
Plot colours can be further customised as with the original viooplot package using the `col` argument:
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue")
```
### Vectorisation
The `vioplot` (0.2) function is unable to colour each histogram separately, thus this is enabled with a vectorised `col` in `viooplot` (0.3) and `histoplot` (0.4):
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
```
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
```
## Plot colours: Violin Lines and Boxplot
Colours can also be customised for the histogram fill and border separately using the `col` and `border` arguments:
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue", border="royalblue")
```
Similarly, the arguments `lineCol` and `rectCol` specify the colors of the boxplot outline and rectangle fill. For simplicity the box and whiskers of the boxplot will always have the same colour.
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", rectCol="palevioletred", lineCol="violetred")
```
The same applies to the colour of the median point with `colMed`:
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", colMed="violet")
```
### Combined customisation
These can be customised colours can be combined:
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue", border="royalblue", rectCol="palevioletred", lineCol="violetred", colMed="violet")
```
### Vectorisation
These color and shape settings can also be customised separately for each histogram:
```{r}
histoplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T, col=c("lightgreen", "lightblue", "palevioletred"), border=c("darkolivegreen4", "royalblue4", "violetred4"), rectCol=c("forestgreen", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), colMed=c("green", "cyan", "magenta"), pchMed=c(15, 17, 19))
```
## Split Bihistogram Plots
We set up the data with two categories (Sepal Width) as follows:
```{r, message=FALSE}
data(iris)
summary(iris$Sepal.Width)
table(iris$Sepal.Width > mean(iris$Sepal.Width))
iris_large <- iris[iris$Sepal.Width > mean(iris$Sepal.Width), ]
iris_small <- iris[iris$Sepal.Width <= mean(iris$Sepal.Width), ]
```
A direct comparision of 2 datasets can be made with the `side` argument and `add = TRUE` on the second plot:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
histoplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right")
histoplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T)
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
|
/scratch/gouwar.j/cran-all/cranData/vioplot/vignettes/histogram_customisation.Rmd
|
---
title: "Customising Histogram Plots with Formula Input"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{histoplot: Customising Histogram Plots with Formula Input}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
Since boxplots have become the _de facto_ standard for plotting the distribution of data most users are familiar with these and the formula input for dataframes. However this input is not available in the standard `histoplot` package. Thus it has been restored here for enhanced backwards compatibility with `boxplot`.
As shown below for the `iris` dataset, histogram plots show distribution information taking formula input that `boxplot` implements but `histoplot` is unable to. This demonstrates the customisation demonstrated in [the main histoplot vignette using histoplot syntax](histogram_customisation.html) with the formula method commonly used for `boxplot`, `t.test`, and `lm`.
```{r}
library("vioplot")
```
```{r, message=FALSE, eval=FALSE}
data(iris)
boxplot(Sepal.Length~Species, data = iris)
```
```{r, message=FALSE, echo=FALSE}
data(iris)
boxplot(Sepal.Length~Species, data = iris, main = "Sepal Length")
```
Whereas performing the same function does not work with `vioplot` (0.2).
```{r, message=FALSE, eval=FALSE}
devtools::install_version("vioplot", version = "0.2")
library("vioplot")
vioplot(Sepal.Length~Species, data = iris)
```
```
Error in min(data) : invalid 'type' (language) of argument
```
## Plot Defaults
```{r, message=FALSE, eval=FALSE}
vioplot(Sepal.Length~Species, data = iris)
```
```{r, message=FALSE, echo=FALSE}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="magenta")
```
Another concern we see here is that the `vioplot` defaults are not aesthetically pleasing, with a rather glaring colour scheme unsuitable for professional or academic usage. Thus the plot default colours have been changed as shown here:
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length")
```
## Plot colours: Histogram Fill
Plot colours can be further customised as with the original vioplot package using the `col` argument:
```{r}
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue")
```
### Vectorisation
However the `vioplot` (0.2) function is unable to colour each histogram separately, thus this is enabled with a vectorised `col` in `histoplot` (0.4):
```{r}
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
```
## Plot colours: Violin Lines and Boxplot
Colours can also be customised for the histogram fill and border separately using the `col` and `border` arguments:
```{r}
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue", border="royalblue")
```
Similarly, the arguments `lineCol` and `rectCol` specify the colours of the boxplot outline and rectangle fill. For simplicity the box and whiskers of the boxplot will always have the same colour.
```{r}
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", rectCol="palevioletred", lineCol="violetred")
```
The same applies to the colour of the median point with `colMed`:
```{r}
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", colMed="violet")
```
### Combined customisation
These can be customised colours can be combined:
```{r}
histoplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue", border="royalblue", rectCol="palevioletred", lineCol="violetred", colMed="violet")
```
### Vectorisation
These colour and shape settings can also be customised separately for each histogram:
```{r}
histoplot(Sepal.Length~Species, data = iris, main="Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"), border=c("darkolivegreen4", "royalblue4", "violetred4"), rectCol=c("forestgreen", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), colMed=c("green", "cyan", "magenta"), pchMed=c(15, 17, 19))
```
## Split Bihistogram Plots
We set up the data with two categories (Sepal Width) as follows:
```{r, message=FALSE}
data(iris)
summary(iris$Sepal.Width)
table(iris$Sepal.Width > mean(iris$Sepal.Width))
iris_large <- iris[iris$Sepal.Width > mean(iris$Sepal.Width), ]
iris_small <- iris[iris$Sepal.Width <= mean(iris$Sepal.Width), ]
```
A direct comparision of 2 datasets can be made with the `side` argument and `add = TRUE` on the second plot:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
histoplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right")
histoplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T)
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
|
/scratch/gouwar.j/cran-all/cranData/vioplot/vignettes/histogram_formulae.Rmd
|
---
title: "Overlaying base R graphics"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vioplot: Overlaying base R graphics}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## Introduction: Integration with base R graphics
Here we demonstrate how to combine violin plots with other base R graphics. In principle any base R graphics can be overlayed on top of a violin plot for annotation.
Many problems can be resolved by overlaying base R graphics and integrating vioplot with other plotting functions. Any additional elements can be overlayed by running commands after generating the plot. The x-axes are integer values [1,2,3,…] for each violin. The y-axes are continuous values as displayed.
The following plotting elements are supported for example: points, lines, polygon
It is also possible to modify plotting parameters with: title, axis, legend
"vioplot()" functions similar to "plot()" and passes input arguments from "par()".
### Plotting violins with highlighted medians
For example it is possible to add additional annotations.
```{r}
# generate dummy data
a <- rnorm(25, 3, 0.5)
b <- rnorm(25, 2, 1.0)
c <- rnorm(25, 2.75, 0.25)
d <- rnorm(25, 3.15, 0.375)
e <- rnorm(25, 1, 0.25)
datamat <- cbind(a, b, c, d, e)
dim(datamat)
```
```{r}
library("vioplot")
```
```{r}
vioplot(datamat, ylim = c(0, 5))
# compute medians
data.med <- apply(datamat, 2, median)
data.med
#overlay medians
lines(data.med, lty = 2, lwd = 1.5)
points(data.med, pch = 19, col = "red", cex = 2.25)
```
### Custom axes and titles
It is also possible to modify the axes labels and titles as shown in this example. Here default axes are suppressed and replaced with custom parameters.
```{r}
outcome <- c(rnorm(25, 3, 1), rnorm(25, 2, 0.5))
intervention <- c(rep("treatment", 25), rep("control", 25))
table(intervention)
names(table(intervention))
unique(sort(intervention))
intervention <- as.factor(intervention)
levels(intervention)
d <- data.frame(outcome, intervention)
vioplot(outcome ~ intervention, data = d, xaxt = 'n', yaxt = 'n',
main = "", xlab = "", ylab = "")
axis(side = 1, at = 1:length(levels(intervention)), labels = levels(intervention))
mtext("custom x labels for intervention", side = 1)
mtext("custom y labels for outcome", side = 2)
title(main = "example with custom title", sub = "subtitles are supported")
```
#### Annotated histograms
This is also supported by the histogram plot.
```{r}
histoplot(outcome ~ intervention, data = d, xaxt = 'n', yaxt = 'n',
main = "", xlab = "", ylab = "")
axis(side = 1, at = 1:length(levels(intervention)), labels = levels(intervention))
mtext("custom x labels for intervention", side = 1)
mtext("custom y labels for outcome", side = 2)
title(main = "example with custom title", sub = "subtitles are supported")
```
|
/scratch/gouwar.j/cran-all/cranData/vioplot/vignettes/overlaying_annotations.Rmd
|
---
title: "Controlling Violin Plot Area"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vioplot: Controlling Violin Plot Area}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
While boxplots have become the _de facto_ standard for plotting the distribution of data this is a vast oversimplification and may not show everything needed to evaluate the variation of data. This is particularly important for datasets which do not form a Gaussian "Normal" distribution that most researchers have become accustomed to.
While density plots are helpful in this regard, they can be less aesthetically pleasing than boxplots and harder to interpret for those familiar with boxplots. Often the only ways to compare multiple data types with density use slices of the data with faceting the plotting panes or overlaying density curves with colours and a legend. This approach is jarring for new users and leads to cluttered plots difficult to present to a wider audience.
##Violin Plots
Therefore violin plots are a powerful tool to assist researchers to visualise data, particularly in the quality checking and exploratory parts of an analysis. Violin plots have many benefits:
- Greater flexibility for plotting variation than boxplots
- More familiarity to boxplot users than density plots
- Easier to directly compare data types than existing plots
As shown below for the `iris` dataset, violin plots show distribution information that the boxplot is unable to.
```{r}
library("vioplot")
```
```{r, message=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
```
##Violin Plot Area
However there are concerns that existing violin plot packages (such as \code{\link[vioplot]{vioplot}}) scales the data to the most aesthetically suitable width rather than maintaining proportions comparable across data sets. Consider the differing distributions shown below:
```{r, echo=FALSE, message=FALSE}
par(mar=rep(1,4))
```
```{r}
par(mfrow=c(3, 1))
par(mar=rep(2, 4))
plot(density(iris$Sepal.Length[iris$Species=="setosa"]), main="Sepal Length: setosa", col="green")
plot(density(iris$Sepal.Length[iris$Species=="versicolor"]), main="Sepal Length: versicolor", col="blue")
plot(density(iris$Sepal.Length[iris$Species=="virginica"]), main="Sepal Length: virginica", col="palevioletred4")
par(mfrow=c(1, 1))
```
```{r, echo=FALSE, message=FALSE}
par(mar=c(5, 4, 4, 2) + 0.1)
```
#Comparing datasets
Neither of these plots above show the relative distribtions on the same scale, even if we match the x-axis of a density plot the relative heights are obscured and difficult to compare.
```{r, echo=FALSE, message=FALSE}
par(mar=rep(2,4))
```
```{r}
par(mfrow=c(3, 1))
par(mar=rep(2, 4))
xaxis <- c(3, 9)
yaxis <- c(0, 1.25)
plot(density(iris$Sepal.Length[iris$Species=="setosa"]), main="Sepal Length: setosa", col="green", xlim=xaxis, ylim=yaxis)
plot(density(iris$Sepal.Length[iris$Species=="versicolor"]), main="Sepal Length: versicolor", col="blue", xlim=xaxis, ylim=yaxis)
plot(density(iris$Sepal.Length[iris$Species=="virginica"]), main="Sepal Length: virginica", col="palevioletred4", xlim=xaxis, ylim=yaxis)
par(mfrow=c(1, 1))
```
```{r, echo=FALSE, message=FALSE}
par(mar=c(5, 4, 4, 2) + 0.1)
```
This can somewhat be addressed by overlaying density plots:
```{r}
par(mfrow=c(1, 1))
xaxis <- c(3, 9)
yaxis <- c(0, 1.25)
plot(density(iris$Sepal.Length[iris$Species=="setosa"]), main="Sepal Length", col="green", xlim=xaxis, ylim=yaxis)
lines(density(iris$Sepal.Length[iris$Species=="versicolor"]), col="blue")
lines(density(iris$Sepal.Length[iris$Species=="virginica"]), col="palevioletred4")
legend("topright", fill=c("green", "blue", "palevioletred4"), legend=levels(iris$Species), cex=0.5)
```
This has the benefit of highlighting the different distributions of the data subsets. However, notice here that a figure legend become necessary, plot axis limits need to be defined to display the range of all distribution curves, and the plot quickly becomes cluttered if the number of factors to be compared becomes much larger.
##Area control in Violin plot
Therefore the `areaEqual` parameter has been added to customise the violin plot to serve a similar purpose:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", areaEqual = T)
```
If we compare this to the original vioplot functionality (defaulting to `areaEqual = FALSE`) the differences between the two are clear.
```{r, echo=FALSE, message=FALSE}
par(mar=rep(2, 4))
```
```{r}
par(mfrow=c(2,1))
par(mar=rep(2, 4))
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Width)", areaEqual = F)
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T)
par(mfrow=c(1,1))
```
```{r, echo=FALSE, message=FALSE}
par(mar=c(5, 4, 4, 2) + 0.1)
```
Note that `areaEqual` is considering the full area of the density distribution before removing the outlier tails. We leave it up to the users discretion which they elect to use. The `areaEqual` functionality is compatible with all of the customisation used in discussed in [the main vioplot vignette](violin_customisation.html)
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T, col=c("lightgreen", "lightblue", "palevioletred"), rectCol=c("green", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), border=c("darkolivegreen4", "royalblue4", "violetred4"))
```
The violin width can further be scaled with `wex`, which maintains the proportions across the datasets if `areaEqual = TRUE`:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T, col=c("lightgreen", "lightblue", "palevioletred"), rectCol=c("green", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), border=c("darkolivegreen4", "royalblue4", "violetred4"), wex=1.25)
```
## Comparing distributions
Notice the utility of `areaEqual` for cases where different datasets have different underlying distributions:
```{r}
vioplot(rnorm(200, 3, 0.5), rpois(200, 2.5), rbinom(100, 10, 0.4), rlnorm(200, 0, 0.5), rnbinom(200, 10, 0.9), rlogis(20, 0, 0.5), areaEqual = F, main="Equal Width", xlab="distribution", ylab="data value", names=c("normal", "poisson", "binomial", "log-normal", "neg-binomial", "logistic"))
vioplot(rnorm(200, 3, 0.5), rpois(200, 2.5), rbinom(100, 10, 0.4), rlnorm(200, 0, 0.5), rnbinom(200, 10, 0.9), rlogis(20, 0, 0.5), areaEqual = T, main="Equal Area", xlab="distribution", ylab="data value", names=c("normal", "poisson", "binomial", "log-normal", "neg-binomial", "logistic"))
```
|
/scratch/gouwar.j/cran-all/cranData/vioplot/vignettes/violin_area.Rmd
|
---
title: "Customising Violin Plots"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vioplot: Customising Violin Plots}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
While boxplots have become the _de facto_ standard for plotting the distribution of data this is a vast oversimplification and may not show everything needed to evaluate the variation of data. This is particularly important for datasets which do not form a Gaussian "Normal" distribution that most researchers have become accustomed to.
While density plots are helpful in this regard, they can be less aesthetically pleasing than boxplots and harder to interpret for those familiar with boxplots. Often the only ways to compare multiple data types with density use slices of the data with faceting the plotting panes or overlaying density curves with colours and a legend. This approach is jarring for new users and leads to cluttered plots difficult to present to a wider audience.
Therefore violin plots are a powerful tool to assist researchers to visualise data, particularly in the quality checking and exploratory parts of an analysis. Violin plots have many benefits:
- Greater flexibility for plotting variation than boxplots
- More familiarity to boxplot users than density plots
- Easier to directly compare data types than existing plots
As shown below for the `iris` dataset, violin plots show distribution information that the boxplot is unable to.
```{r}
library("vioplot")
```
```{r, message=FALSE, eval=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
library("vioplot")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
```
```{r, message=FALSE, echo=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="magenta")
```
## Plot Defaults
However as we can see here the plot defaults are not aesthetically pleasing, with a rather glaring colour scheme unsuitable for professional or academic usage. Thus the plot default colours have been changed as shown here:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length")
```
## Plot colours: Violin Fill
Plot colours can be further customised as with the original vioplot package using the `col` argument:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue")
```
### Vectorisation
However the `vioplot` (0.2) function is unable to colour each violin separately, thus this is enabled with a vectorised `col` in `vioplot` (0.3):
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
```
## Plot colours: Violin Lines and Boxplot
Colours can also be customised for the violin fill and border separately using the `col` and `border` arguments:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue", border="royalblue")
```
Similarly, the arguments `lineCol` and `rectCol` specify the colors of the boxplot outline and rectangle fill. For simplicity the box and whiskers of the boxplot will always have the same colour.
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", rectCol="palevioletred", lineCol="violetred")
```
The same applies to the colour of the median point with `colMed`:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", colMed="violet")
```
### Combined customisation
These can be customised colours can be combined:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main = "Sepal Length", col="lightblue", border="royalblue", rectCol="palevioletred", lineCol="violetred", colMed="violet")
```
### Vectorisation
These color and shape settings can also be customised separately for each violin:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length (Equal Area)", areaEqual = T, col=c("lightgreen", "lightblue", "palevioletred"), border=c("darkolivegreen4", "royalblue4", "violetred4"), rectCol=c("forestgreen", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), colMed=c("green", "cyan", "magenta"), pchMed=c(15, 17, 19))
```
This should be sufficient to customise the violin plot but further examples are given in [the areaEqual vioplot vignette](violin_area.html) including how violin plots are useful for comparing variation when data does not follow the same distribution. This document also compares the violin plot with other established methods to plot data variation.
|
/scratch/gouwar.j/cran-all/cranData/vioplot/vignettes/violin_customisation.Rmd
|
---
title: "Customising Violin Plots with Formula Input"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vioplot: Customising Violin Plots with Formula Input}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
Since boxplots have become the _de facto_ standard for plotting the distribution of data most users are familiar with these and the formula input for dataframes. However this input is not available in the standard `vioplot` package. Thus it has been restored here for enhanced backwards compatibility with `boxplot`.
As shown below for the `iris` dataset, violin plots show distribution information taking formula input that `boxplot` implements but `vioplot` is unable to. This demonstrates the customisation demonstrated in [the main vioplot vignette using vioplot syntax](violin_customisation.html) with the formula method commonly used for `boxplot`, `t.test`, and `lm`.
```{r}
library("vioplot")
```
```{r, message=FALSE, eval=FALSE}
data(iris)
boxplot(Sepal.Length~Species, data = iris)
```
```{r, message=FALSE, echo=FALSE}
data(iris)
boxplot(Sepal.Length~Species, data = iris, main = "Sepal Length")
```
Whereas performing the same function does not work with `vioplot` (0.2).
```{r, message=FALSE, eval=FALSE}
devtools::install_version("vioplot", version = "0.2")
library("vioplot")
vioplot(Sepal.Length~Species, data = iris)
```
```
Error in min(data) : invalid 'type' (language) of argument
```
## Plot Defaults
```{r, message=FALSE, eval=FALSE}
vioplot(Sepal.Length~Species, data = iris)
```
```{r, message=FALSE, echo=FALSE}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="magenta")
```
Another concern we see here is that the `vioplot` defaults are not aesthetically pleasing, with a rather glaring colour scheme unsuitable for professional or academic usage. Thus the plot default colours have been changed as shown here:
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length")
```
## Plot colours: Violin Fill
Plot colours can be further customised as with the original vioplot package using the `col` argument:
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue")
```
### Vectorisation
However the `vioplot` (0.2) function is unable to colour each violin separately, thus this is enabled with a vectorised `col` in `vioplot` (0.3):
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"))
legend("topleft", legend=c("setosa", "versicolor", "virginica"), fill=c("lightgreen", "lightblue", "palevioletred"), cex = 0.5)
```
## Plot colours: Violin Lines and Boxplot
Colours can also be customised for the violin fill and border separately using the `col` and `border` arguments:
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue", border="royalblue")
```
Similarly, the arguments `lineCol` and `rectCol` specify the colours of the boxplot outline and rectangle fill. For simplicity the box and whiskers of the boxplot will always have the same colour.
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", rectCol="palevioletred", lineCol="violetred")
```
The same applies to the colour of the median point with `colMed`:
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", colMed="violet")
```
### Combined customisation
These can be customised colours can be combined:
```{r}
vioplot(Sepal.Length~Species, data = iris, main = "Sepal Length", col="lightblue", border="royalblue", rectCol="palevioletred", lineCol="violetred", colMed="violet")
```
### Vectorisation
These colour and shape settings can also be customised separately for each violin:
```{r}
vioplot(Sepal.Length~Species, data = iris, main="Sepal Length", col=c("lightgreen", "lightblue", "palevioletred"), border=c("darkolivegreen4", "royalblue4", "violetred4"), rectCol=c("forestgreen", "blue", "palevioletred3"), lineCol=c("darkolivegreen", "royalblue", "violetred4"), colMed=c("green", "cyan", "magenta"), pchMed=c(15, 17, 19))
```
|
/scratch/gouwar.j/cran-all/cranData/vioplot/vignettes/violin_formulae.Rmd
|
---
title: "Split Violin Plots"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
fig_width: 6
fig_height: 3
fig_align: 'center'
fig_keep: 'last'
vignette: >
%\VignetteIndexEntry{vioplot: Split Violin Plots}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
##Violin Plots
Therefore violin plots are a powerful tool to assist researchers to visualise data, particularly in the quality checking and exploratory parts of an analysis. Violin plots have many benefits:
- Greater flexibility for plotting variation than boxplots
- More familiarity to boxplot users than density plots
- Easier to directly compare data types than existing plots
As shown below for the `iris` dataset, violin plots show distribution information that the boxplot is unable to.
###General Set up
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
library("vioplot")
```
We set up the data with two categories (Sepal Width) as follows:
```{r, message=FALSE}
data(iris)
summary(iris$Sepal.Width)
table(iris$Sepal.Width > mean(iris$Sepal.Width))
iris_large <- iris[iris$Sepal.Width > mean(iris$Sepal.Width), ]
iris_small <- iris[iris$Sepal.Width <= mean(iris$Sepal.Width), ]
```
###Boxplots
First we plot Sepal Length on its own:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
boxplot(Sepal.Length~Species, data=iris, col="grey")
```
An indirect comparison can be achieved with par:
```{r, fig.align = 'center', fig.height = 6, fig.width = 6, fig.keep = 'last'}
{
par(mfrow=c(2,1))
boxplot(Sepal.Length~Species, data=iris_small, col = "lightblue")
boxplot(Sepal.Length~Species, data=iris_large, col = "palevioletred")
par(mfrow=c(1,1))
}
```
### Violin Plots
First we plot Sepal Length on its own:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris)
```
An indirect comparison can be achieved with par:
```{r, fig.align = 'center', fig.height = 6, fig.width = 6, fig.keep = 'last'}
{
par(mfrow=c(2,1))
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line")
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line")
par(mfrow=c(1,1))
}
```
### Split Violin Plots
A more direct comparision can be made with the `side` argument and `add = TRUE` on the second plot:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T)
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
#### Custom axes labels
Custom axes labels are supported for split violin plots. However, you must use these arguments on the *first* call of `vioplot`.
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right", xlab = "Iris species", ylab = "Length", main = "Sepals", names=paste("Iris", levels(iris$Species)))
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T)
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Width")
```
Note that this is disabled for the second `vioplot` call to avoid overlaying labels.
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", add = T, xlab = "Iris species", ylab = "Length", main = "Sepals", names=paste("Iris", levels(iris$Species)))
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Width")
```
#### Median
The line median option is more suitable for side by side comparisions but the point option is still available also:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "point", side = "right", pchMed = 21, colMed = "palevioletred4", colMed2 = "palevioletred2")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "point", side = "left", pchMed = 21, colMed = "lightblue4", colMed2 = "lightblue2", add = T)
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
It may be necessary to include a `points` command to fix the median being overwritten by the following plots:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "point", side = "right", pchMed = 21, colMed = "palevioletred4", colMed2 = "palevioletred2")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "point", side = "left", pchMed = 21, colMed = "lightblue4", colMed2 = "lightblue2", add = T)
points(1:length(levels(iris$Species)), as.numeric(sapply(levels(iris$Species), function(species) median(iris_large[grep(species, iris_large$Species),]$Sepal.Length))), pch = 21, col = "palevioletred4", bg = "palevioletred2")
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
Similarly points could be added where a line has been used previously:
```{r, fig.align = 'center', fig.height = 3, fig.width = 6, fig.keep = 'last'}
vioplot(Sepal.Length~Species, data=iris_large, col = "palevioletred", plotCentre = "line", side = "right", pchMed = 21, colMed = "palevioletred4", colMed2 = "palevioletred2")
vioplot(Sepal.Length~Species, data=iris_small, col = "lightblue", plotCentre = "line", side = "left", pchMed = 21, colMed = "lightblue4", colMed2 = "lightblue2", add = T)
points(1:length(levels(iris$Species)), as.numeric(sapply(levels(iris$Species), function(species) median(iris_large[grep(species, iris_large$Species),]$Sepal.Length))), pch = 21, col = "palevioletred4", bg = "palevioletred2")
points(1:length(levels(iris$Species)), as.numeric(sapply(levels(iris$Species), function(species) median(iris_small[grep(species, iris_small$Species),]$Sepal.Length))), pch = 21, col = "lightblue4", bg = "lightblue2")
title(xlab = "Species", ylab = "Sepal Length")
legend("topleft", fill = c("lightblue", "palevioletred"), legend = c("small", "large"), title = "Sepal Width")
```
Here it is aesthetically pleasing and intuitive to interpret categorical differences in mean and variation in a continuous variable.
#### Sources
These extensions to `vioplot` here are based on those provided here:
* https://gist.github.com/mbjoseph/5852613
These have previously been discussed on the following sites:
* https://mbjoseph.github.io/posts/2018-12-23-split-violin-plots/
* http://tagteam.harvard.edu/hub_feeds/1981/feed_items/209875
* [https://www.r-bloggers.com/split-violin-plots/](https://www.r-bloggers.com/2013/06/split-violin-plots/)
|
/scratch/gouwar.j/cran-all/cranData/vioplot/vignettes/violin_split.Rmd
|
---
title: "Controlling y-axis Plotting"
author: "Tom Kelly"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vioplot: Controlling y-axis Plotting}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
While boxplots have become the _de facto_ standard for plotting the distribution of data this is a vast oversimplification and may not show everything needed to evaluate the variation of data. This is particularly important for datasets which do not form a Gaussian "Normal" distribution that most researchers have become accustomed to.
While density plots are helpful in this regard, they can be less aesthetically pleasing than boxplots and harder to interpret for those familiar with boxplots. Often the only ways to compare multiple data types with density use slices of the data with faceting the plotting panes or overlaying density curves with colours and a legend. This approach is jarring for new users and leads to cluttered plots difficult to present to a wider audience.
##Violin Plots
Therefore violin plots are a powerful tool to assist researchers to visualise data, particularly in the quality checking and exploratory parts of an analysis. Violin plots have many benefits:
- Greater flexibility for plotting variation than boxplots
- More familiarity to boxplot users than density plots
- Easier to directly compare data types than existing plots
As shown below for the `iris` dataset, violin plots show distribution information that the boxplot is unable to.
```{r}
library("vioplot")
```
```{r, message=FALSE}
data(iris)
boxplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"))
```
##Violin y-axis
###Logarithmic scale
However the existing violin plot packages (such as \code{\link[vioplot]{vioplot}}) do not support log-scale of the y-axis. This has been amended with the `ylog` argument.
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", ylog = T, ylim=c(log(1), log(10)))
```
This can also be invoked with the `log="y"` argument compatible with `boxplot`:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", log = T, ylim=c(log(1), log(10)))
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", log = "y", ylim=c(log(1), log(10)))
```
###custom y-axes
The y-axes can also be removed with `yaxt="n"` to enable customised y-axes:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", yaxt="n")
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", ylog = T, yaxt="n", ylim=c(log(1), log(10)))
```
Thus custom axes can be added to violin plots. As shown on a linear scale:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", yaxt="n")
axis(2, at=1:10, labels=1:10)
```
As well as for on a log scale:
```{r}
vioplot(iris$Sepal.Length[iris$Species=="setosa"], iris$Sepal.Length[iris$Species=="versicolor"], iris$Sepal.Length[iris$Species=="virginica"], names=c("setosa", "versicolor", "virginica"), main="Sepal Length", yaxt="n", log="y", ylim=c(log(4), log(9)))
axis(2, at=log(1:10), labels=1:10)
```
|
/scratch/gouwar.j/cran-all/cranData/vioplot/vignettes/violin_ylog.Rmd
|
#' Survival of Titanic passengers
#'
#' A data set containing the survival outcome, passenger class, age, sex, and
#' the number of family members for a large number of passengers aboard the
#' ill-fated Titanic.
#'
#' @note As mentioned in the column description, `age` contains 263 `NA`s (or
#' missing values). For a complete version (or versions) of the data set, see
#' [titanic_mice].
#'
#' @format A data frame with 1309 observations on the following 6 variables:
#'
#' * `survived` - binary with levels `"yes"` for survived and `"no"`
#' otherwise;
#' * `pclass` - integer giving the corresponding passenger (i.e., ticket)
#' class with values 1--3;
#' * `age` - the age in years of the corresponding passenger (with 263
#' missing values);
#' * `age` - factor giving the sex of each passenger with levels
#' `"male"` and `"female"`;
#' * `sibsp` - integer giving the number of siblings/spouses aboard for each
#' passenger (ranges from 0--8);
#' * `parch` - integer giving the number of parents/children aboard for each
#' passenger (ranges from 0--9).
#'
#' @source <https://hbiostat.org/data/>.
#'
#' @rdname titanic
"titanic"
#' Survival of Titanic passengers
#'
#' The [titanic] data set contains 263 missing values (i.e., `NA`'s) in the
#' `age` column. This version of the data contains imputed values for the
#' `age` column using *multivariate imputation by chained equations* via
#' the [mice](https://cran.r-project.org/package=mice) package. Consequently,
#' this is a list containing 11 imputed versions of the observations containd
#' in the [titanic] data frame; each completed data sets has the same dimension
#' and column structure as [titanic].
#'
#' @source
#' Greenwell, Brandon M. (2022). Tree-Based Methods for Statistical Learning in
#' R. CRC Press.
#'
#' @rdname titanic_mice
"titanic_mice"
|
/scratch/gouwar.j/cran-all/cranData/vip/R/data.R
|
#' Friedman benchmark data
#'
#' Simulate data from the Friedman 1 benchmark problem. These data were
#' originally described in Friedman (1991) and Breiman (1996). For details, see
#' [sklearn.datasets.make_friedman1](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_friedman1.html).
#'
#' @param n_samples Integer specifying the number of samples (i.e., rows) to
#' generate. Default is 100.
#'
#' @param n_features Integer specifying the number of features to generate.
#' Default is 10.
#'
#' @param n_bins Integer specifying the number of (roughly) equal sized bins to
#' split the response into. Default is `NULL` for no binning. Setting to
#' a positive integer > 1 effectively turns this into a classification problem
#' where \code{n_bins} gives the number of classes.
#'
#' @param sigma Numeric specifying the standard deviation of the noise.
#'
#' @param seed Integer specifying the random seed. If `NULL` (the default)
#' the results will be different each time the function is run.
#'
#' @references
#' Breiman, Leo (1996) Bagging predictors. Machine Learning 24, pages 123-140.
#'
#' Friedman, Jerome H. (1991) Multivariate adaptive regression splines. The
#' Annals of Statistics 19 (1), pages 1-67.
#'
#' @export
#'
#' @examples
#' gen_friedman()
gen_friedman <- function(n_samples = 100, n_features = 10, n_bins = NULL,
sigma = 0.1, seed = NULL) {
bin <- function(x, n_bins) {
quantiles <- stats::quantile(x, probs = seq(from = 0, to = 1,
length = n_bins + 1))
bins <- cut(x, breaks = quantiles, label = FALSE, include.lowest = TRUE)
as.factor(paste0("class", bins))
}
if (!is.null(seed)) {
set.seed(seed)
}
x <- matrix(stats::runif(n_samples * n_features), ncol = n_features)
colnames(x) <- paste0("x", seq_len(n_features))
y = 10 * sin(pi * x[, 1L] * x[, 2L]) + 20 * (x[, 3L] - 0.5) ^ 2 +
10 * x[, 4L] + 5 * x[, 5L] + stats::rnorm(n_samples, sd = sigma)
friedman <- as.data.frame(cbind(y = y, x))
if (!is.null(n_bins)) {
n_bins <- as.integer(n_bins)
if (n_bins < 2) {
stop("Argument `n_bins` shouls be a postive integer > 1.", call. = FALSE)
}
friedman$y <- bin(friedman$y, n_bins = n_bins)
}
friedman
}
|
/scratch/gouwar.j/cran-all/cranData/vip/R/gen_friedman.R
|
#' Extract model formula
#'
#' Calls [formula][stats::formula] to extract the formulae from various
#' modeling objects, but returns `NULL` instead of an error for objects
#' that do not contain a formula component.
#'
#' @param object An appropriate fitted model object.
#'
#' @return Either a \code{\link[stats]{formula}} object or `NULL`.
#'
#' @keywords internal
#' @noRd
get_formula <- function(object) {
UseMethod("get_formula")
}
#' @keywords internal
get_formula.default <- function(object) {
form <- tryCatch(
expr = stats::formula(object),
error = function(e) {
NULL
}
)
}
#' @keywords internal
get_formula.constparty <- function(object) {
get_formula.default(attr(stats::terms(object), which = "Formula_without_dot"))
}
#' Extract feature names
#'
#' Extract predictor names from a fitted model.
#'
#' @param object An appropriate fitted model object.
#'
#' @param ... Additional optional arguments.
#'
#' @keywords internal
get_feature_names <- function(object, ...) {
UseMethod("get_feature_names")
}
#' @keywords internal
get_feature_names.default <- function(object, ...) {
stop("Could not extract feature names from ", deparse(substitute(object)),
", please supply them via the `feature_names` argument.", call. = FALSE)
}
#' @keywords internal
get_feature_names.formula <- function(object, ...) {
tryCatch( # FIXME: IS the RHS always located in the third component?
expr = all.vars(object[[3L]]), # extract unique vars from RHS side of formula
error = function(e) { # in case formula doesn't have both a LHS and RHS, etc.
get_feature_names.default(object)
}
)
}
# Package: C50 ----------------------------------------------------------------
#' @keywords internal
get_feature_names.C5.0 <- function(object, ...) {
object$predictors
}
# Package: caret ---------------------------------------------------------------
#' @keywords internal
get_feature_names.train <- function(object, ...) {
if (!is.null(object$trainingData)) {
xn <- names(object$trainingData)
xn[xn != ".outcome"]
} else {
get_feature_names.default(object)
}
}
# Package: Cubist --------------------------------------------------------------
#' @keywords internal
get_feature_names.cubist <- function(object, ...) {
object$vars$all
}
# Package: earth ----------------------------------------------------------------
#' @keywords internal
get_feature_names.earth <- function(object, ...) {
object$namesx
}
# Package: gbm -----------------------------------------------------------------
#' @keywords internal
get_feature_names.gbm <- function(object, ...) {
object$var.names
}
# Package: glmnet --------------------------------------------------------------
#' @keywords internal
get_feature_names.cv.glmnet <- function(object, ...) {
object$glmnet.fit$beta@Dimnames[[1]]
}
#' @keywords internal
get_feature_names.glmnet <- function(object, ...) {
object$beta@Dimnames[[1]]
}
#' @keywords internal
get_feature_names.multnet <- function(object, ...) {
object$beta[[1L]]@Dimnames[[1L]]
}
# Package: h2o -----------------------------------------------------------------
#' @keywords internal
get_feature_names.H2OBinomialModel <- function(object, ...) {
object@parameters$x
}
#' @keywords internal
get_feature_names.H2OMultinomialModel <- function(object, ...) {
object@parameters$x
}
#' @keywords internal
get_feature_names.H2ORegressionModel <- function(object, ...) {
object@parameters$x
}
# Package: mlr -----------------------------------------------------------------
#' @keywords internal
get_feature_names.WrappedModel <- function(object, ...) {
object$features
}
# Package: mlr3 ----------------------------------------------------------------
#' @keywords internal
get_feature_names.Learner <- function(object, ...) {
if (is.null(object$model)) {
stop("No fitted model found. Did you forget to call ",
deparse(substitute(object)), "$train()?",
call. = FALSE)
}
get_feature_names(object$model, ...)
}
# Package: neuralnet -----------------------------------------------------------
#' @keywords internal
get_feature_names.nn <- function(object, ...) {
# get_feature_names(get_formula(object))
object$model.list$variables
}
# Package: nnet ----------------------------------------------------------------
#' @keywords internal
get_feature_names.nnet <- function(object, ...) {
get_feature_names(get_formula(object))
}
# Package: pls -----------------------------------------------------------------
#' @keywords internal
get_feature_names.mvr <- function(object, ...) {
get_feature_names(get_formula(object))
}
# Package: stats ---------------------------------------------------------------
#' @keywords internal
get_feature_names.lm <- function(object, ...) {
get_feature_names(get_formula(object))
}
#' @keywords internal
get_feature_names.nls <- function(object, ...) {
# all.vars(stats::formula(object)[[3L]]) # returns all params
names(object$dataClasses)
}
#' @keywords internal
get_feature_names.ppr <- function(object, ...) {
object$xnames
}
# Package: party ---------------------------------------------------------------
#' @keywords internal
get_feature_names.BinaryTree <- function(object, ...) {
all.vars(object@data@formula$input)
}
#' @keywords internal
get_feature_names.RandomForest <- function(object, ...) {
all.vars(object@data@formula$input)
}
# Package: partykit ------------------------------------------------------------
#' @keywords internal
get_feature_names.constparty <- function(object, ...) {
get_feature_names(get_formula(object))
}
#' @keywords internal
get_feature_names.cforest <- function(object, ...) {
get_feature_names(get_formula(object))
}
# Package: randomForest --------------------------------------------------------
#' @keywords internal
get_feature_names.randomForest <- function(object, ...) {
rownames(object$importance)
}
# Package: ranger --------------------------------------------------------------
#' @keywords internal
get_feature_names.ranger <- function(object, ...) {
if (!is.null(object$forest$independent.variable.names)) {
object$forest$independent.variable.names
} else if (!is.null(names(object$variable.importance))) {
names(object$variable.importance)
} else {
stop("Unable to recover feature names from ranger models with `importance",
" = \"none\"` and `write.forest = FALSE`.")
}
}
# Package: rpart ---------------------------------------------------------------
#' @keywords internal
get_feature_names.rpart <- function(object, ...) {
# names(object$variable.importance)
get_feature_names(get_formula(object))
}
# Package: xgboost -------------------------------------------------------------
#' @keywords internal
get_feature_names.xgb.Booster <- function(object, ...) {
if (is.null(object$feature_names)) {
get_feature_names.default(object)
} else {
object$feature_names
}
}
|
/scratch/gouwar.j/cran-all/cranData/vip/R/get_feature_names.R
|
# Error message to display when training data cannot be extracted form object
msg <- paste0(
"The training data could not be extracted from object. You can supply the ",
"training data using the `train` argument."
)
#' @keywords internal
get_training_data <- function(object) {
UseMethod("get_training_data")
}
#' @keywords internal
get_training_data.default <- function(object) {
# # Throw error message for S4 objects (for now)
# if (isS4(object)) {
# stop(msg, call. = FALSE)
# }
#
# # Grab the call
# mcall <- tryCatch(stats::getCall(object), error = function(e) {
# stop(msg, call. = FALSE)
# })
#
# # If data component of the call is NULL, then try to make sure each
# # component is named before proceeding (taken from Advanced R, 2nd ed.)
# if (is.null(mcall[[arg]])) {
# f <- tryCatch(eval(mcall[[1L]], envir = env), error = function(e) {
# stop(msg, call. = FALSE)
# })
# if (!is.primitive(f)) {
# mcall <- match.call(f, call = mcall)
# }
# }
#
# # Grab the data component (if it exists)
# n <- 1
# while(length(env) != 0) {
# train <- tryCatch(eval(mcall[[arg]], envir = env), error = function(e) {
# NULL
# })
# if (!is.null(train) || identical(env, globalenv())) {
# break
# }
# env <- parent.frame(n) # inspect calling environment
# n <- n + 1
# }
# if (is.null(train)) {
# stop(msg, call. = FALSE)
# } else {
# if (!(is.data.frame(train))) {
# if (is.matrix(train) || is.list(train)) {
# train <- as.data.frame(train)
# # } else if (inherits(train, what = "dgCMatrix")) {
# # train <- as.data.frame(data.matrix(train))
# } else {
# stop(msg, call. = FALSE)
# }
# }
# }
#
# # Return original training data
# train
stop("Training data cannot be extracted from fitted model object. Please ",
"supply the raw training data using the `train` argument.",
call. = FALSE)
}
# Package: caret ---------------------------------------------------------------
#' @keywords internal
get_training_data.train <- function(object) {
# By default, "train" object have a copy of the training data stored in
# a component called "trainingData". Note that the returned data frame only
# includes the feature columns
train <- object$trainingData
if (is.null(train)) {
stop(msg, call. = FALSE)
}
train$.outcome <- NULL # remove .outcome column
train
}
# Package: h2o -----------------------------------------------------------------
#' @keywords internal
get_training_data.H2OBinomialModel <- function(object) {
as.data.frame(h2o::h2o.getFrame(object@allparameters$training_frame))
}
#' @keywords internal
get_training_data.H2OMultinomialModel <- function(object) {
as.data.frame(h2o::h2o.getFrame(object@allparameters$training_frame))
}
#' @keywords internal
get_training_data.H2ORegressionModel <- function(object) {
as.data.frame(h2o::h2o.getFrame(object@allparameters$training_frame))
}
# Package: party ---------------------------------------------------------------
#' @keywords internal
get_training_data.BinaryTree <- function(object) {
# WARNING: Returns feature columns only in a data frame with some additional
# attributes
object@data@get("input")
}
#' @keywords internal
get_training_data.RandomForest <- function(object) {
# WARNING: Returns feature columns only in a data frame with some additional
# attributes
object@data@get("input")
}
library
# Package: workflow ------------------------------------------------------------
#' @keywords internal
get_training_data.workflow <- function(object) {
stop("Training data cannot be extracted from workflow objects. Please ",
"supply the raw training data using the `train` argument.",
call. = FALSE)
}
|
/scratch/gouwar.j/cran-all/cranData/vip/R/get_training_data.R
|
#' List metrics
#'
#' List all available performance metrics.
#'
#' @return A data frame with the following columns:
#' * `metric` - the optimization or tuning metric;
#' * `description` - a brief description about the metric;
#' * `task` - whether the metric is suitable for regression or classification;
#' * `smaller_is_better` - logical indicating whether or not a smaller value of
#' the metric is considered better.
#' * `yardstick_function` - the name of the corresponding function from the
#' [yardstick][yardstick::yardstick] package.
#'
#' @export
#'
#' @examples
#' (metrics <- list_metrics())
#' metrics[metrics$task == "Multiclass classification", ]
list_metrics <- function() {
data.frame(rbind(
#
# Classification
#
c("metric" = "accuracy",
"description" = "Classification accuracy",
"task" = "Binary/multiclass classification",
"smaller_is_better" = FALSE,
"yardstick_function" = "accuracy_vec"
),
c("metric" = "bal_accuracy",
"description" = "Balanced classification accuracy",
"task" = "Binary/multiclass classification",
"smaller_is_better" = FALSE,
"yardstick_function" = "bal_accuracy_vec"
),
c("metric" = "youden",
"description" = "Youden;'s index (or Youden\'s J statistic)",
"task" = "Binary/multiclass classification",
"smaller_is_better" = FALSE,
"yardstick_function" = "j_index"
),
c("metric" = "roc_auc",
"description" = "Area under ROC curve",
"task" = "Binary classification",
"smaller_is_better" = FALSE,
"yardstick_function" = "roc_auc_vec"
),
c("metric" = "pr_auc",
"description" = "Area under precision-recall (PR) curve",
"task" = "Binary classification",
"smaller_is_better" = FALSE,
"yardstick_function" = "pr_auc_vec"
),
c("metric" = "logloss",
"description" = "Log loss",
"task" = "Binary/multiclass classification",
"smaller_is_better" = TRUE,
"yardstick_function" = "mn_log_loss_vec"
),
c("metric" = "brier",
"description" = "Brier score",
"task" = "Binary/multiclass classification",
"smaller_is_better" = TRUE,
"yardstick_function" = "brier_class_vec"
),
#
# Regression
#
c("metric" = "mae",
"description" = "Mean absolute error",
"task" = "Regression",
"smaller_is_better" = TRUE,
"yardstick_function" = "mae_vec"
),
c("metric" = "mape",
"description" = "Mean absolute percentage error",
"task" = "Regression",
"smaller_is_better" = TRUE,
"yardstick_function" = "mape_vec"
),
c("metric" = "rmse",
"description" = "Root mean squared error",
"task" = "Regression",
"smaller_is_better" = TRUE,
"yardstick_function" = "rmse_vec"
),
c("metric" = "rsq",
"description" = "R-squared (correlation)",
"task" = "Regression",
"smaller_is_better" = FALSE,
"yardstick_function" = "rsq_vec"
),
c("metric" = "rsq_trad",
"description" = "R-squared (traditional)",
"task" = "Regression",
"smaller_is_better" = FALSE,
"yardstick_function" = "rsq_trad_vec"
)
), stringsAsFactors = FALSE)
}
#' Get yardstick metric
#'
#' Grabs the corresponding function from yardstick based on provided string
#' description.
#'
#' @param metric String giving the name of the metric
#'
#' @return A list with two components:
#'
#' * `metric_fun` - the corresponding function from
#' [yardstick][yardstick::yardstick].
#' * `smaller_is_better` - a logical indicating whether or not a smaller value
#' of this metric is better.
#'
#' @keywords internal
#' @noRd
get_metric <- function(metric) {
metric <- tolower(metric) # just in case
# Classification
if (metric == "accuracy") {
metric_fun <- yardstick::accuracy_vec
smaller_is_better <- FALSE
} else if (metric == "bal_accuracy") {
metric_fun <- yardstick::bal_accuracy_vec
smaller_is_better <- FALSE
} else if (metric == "youden") {
metric_fun <- yardstick::j_index_vec
smaller_is_better <- FALSE
} else if (metric == "roc_auc") {
metric_fun <- yardstick::roc_auc_vec
smaller_is_better <- FALSE
} else if (metric == "pr_auc") {
metric_fun <- yardstick::pr_auc_vec
smaller_is_better <- FALSE
} else if (metric == "logloss") {
metric_fun <- yardstick::mn_log_loss_vec
smaller_is_better <- TRUE
} else if (metric == "brier") {
metric_fun <- yardstick::brier_class_vec
smaller_is_better <- TRUE
# Regression
} else if (metric == "rsq") {
metric_fun <- yardstick::rsq_vec
smaller_is_better <- FALSE
} else if (metric == "rsq_trad") {
metric_fun <- yardstick::rsq_trad_vec
smaller_is_better <- FALSE
} else if (metric == "rmse") {
metric_fun <- yardstick::rmse_vec
smaller_is_better <- TRUE
} else if (metric == "mae") {
metric_fun <- yardstick::mae_vec
smaller_is_better <- TRUE
} else if (metric == "mape") {
metric_fun <- yardstick::mape_vec
smaller_is_better <- TRUE
} else {
# Return informative error
stop("Metric \"", metric, "\" is not supported; use ",
"`vip::list_metrics()` to print a list of currently supported ",
"metrics. Alternatively, you can pass in a `yardstick` vector ",
"function directly (e.g., `metric = yardstick::poisson_log_loss_vec` ",
"(just be sure to also set the `smaller_is_better` argument.",
call. = FALSE)
}
list("metric_fun" = metric_fun, "smaller_is_better" = smaller_is_better)
}
|
/scratch/gouwar.j/cran-all/cranData/vip/R/metrics.R
|
#' @keywords internal
abbreviate_names <- function(x, minlength) {
x$Variable <- abbreviate(x$Variable, minlength = minlength)
x
}
#' @keywords internal
check_var_fun <- function(x) {
# x should be a named list of two functions with names "con" and "cat"
if (!is.list(x)) {
stop("Argument `var_fun` should be a list.", call. = FALSE)
}
if (length(x) != 2L) {
stop("FUN should be a list of length 2.", call. = FALSE)
}
if (!identical(sort(names(x)), c("cat", "con"))) {
stop("Argument `var_fun` should be a list with comonents \"con\" and \"cat\".",
call. = FALSE)
}
if (!all(vapply(x, is.function, logical(1L)))) {
stop("Argument `var_fun` should be a list of two functions.", call. = FALSE)
}
}
# #' @keywords internal
# permute_columns <- function(x, columns = NULL) {
# if (is.null(columns)) {
# stop("No columns specified for permutation.")
# }
# x[, columns] <- x[sample(nrow(x)), columns]
# x
# }
#' @keywords internal
sort_importance_scores <- function(x, decreasing) {
x[order(x$Importance, decreasing = decreasing), ]
}
|
/scratch/gouwar.j/cran-all/cranData/vip/R/utils.R
|
#' Variable importance
#'
#' Compute variable importance scores for the predictors in a model.
#'
#' @param object A fitted model object (e.g., a
#' [randomForest][randomForest::randomForest] object) or an object that inherits
#' from class `"vi"`.
#'
#' @param method Character string specifying the type of variable importance
#' (VI) to compute. Current options are:
#'
#' * `"model"` (the default), for model-specific VI scores (see
#' [vi_model][vip::vi_model] for details).
#'
#' * `"firm"`, for variance-based VI scores (see [vi_firm][vip::vi_firm] for
#' details).
#'
#' * `"permute"`, for permutation-based VI scores (see
#' [vi_permute][vip::vi_permute] for details).
#'
#' * `"shap"`, for Shapley-based VI scores (see [vi_shap][vip::vi_shap] for
#' details).
#'
#' @param feature_names Character string giving the names of the predictor
#' variables (i.e., features) of interest.
#'
#' @param abbreviate_feature_names Integer specifying the length at which to
#' abbreviate feature names. Default is `NULL` which results in no
#' abbreviation (i.e., the full name of each feature will be printed).
#'
#' @param sort Logical indicating whether or not to order the sort the variable
#' importance scores. Default is `TRUE`.
#'
#' @param decreasing Logical indicating whether or not the variable importance
#' scores should be sorted in descending (`TRUE`) or ascending
#' (\code{FALSE}) order of importance. Default is `TRUE`.
#'
#' @param scale Logical indicating whether or not to scale the variable
#' importance scores so that the largest is 100. Default is `FALSE`.
#'
#' @param rank Logical indicating whether or not to rank the variable
#' importance scores (i.e., convert to integer ranks). Default is `FALSE`.
#' Potentially useful when comparing variable importance scores across different
#' models using different methods.
#'
#' @param ... Additional optional arguments to be passed on to
#' [vi_model][vip::vi_model], [vi_firm][vip::vi_firm],
#' [vi_permute][vip::vi_permute], or [vi_shap][vip::vi_shap]; see their
#' respective help pages for details.
#'
#' @return A tidy data frame (i.e., a [tibble][tibble::tibble] object) with two
#' columns:
#'
#' * `Variable` - the corresponding feature name;
#' * `Importance` - the associated importance, computed as the average change in
#' performance after a random permutation (or permutations, if `nsim > 1`) of
#' the feature in question.
#'
#' For [lm][stats::lm]/[glm][stats::glm]-like objects, whenever
#' `method = "model"`, the sign (i.e., POS/NEG) of the original coefficient is
#' also included in a column called `Sign`.
#'
#' If `method = "permute"` and `nsim > 1`, then an additional column (`StDev`)
#' containing the standard deviation of the individual permutation scores for
#' each feature is also returned; this helps assess the stability/variation of
#' the individual permutation importance for each feature.
#'
#' @rdname vi
#'
#' @export
#'
#' @examples
#' #
#' # A projection pursuit regression example
#' #
#'
#' # Load the sample data
#' data(mtcars)
#'
#' # Fit a projection pursuit regression model
#' mtcars.ppr <- ppr(mpg ~ ., data = mtcars, nterms = 1)
#'
#' # Prediction wrapper that tells vi() how to obtain new predictions from your
#' # fitted model
#' pfun <- function(object, newdata) predict(object, newdata = newdata)
#'
#' # Compute permutation-based variable importance scores
#' set.seed(1434) # for reproducibility
#' (vis <- vi(mtcars.ppr, method = "permute", target = "mpg", nsim = 10,
#' metric = "rmse", pred_wrapper = pfun, train = mtcars))
#'
#' # Plot variable importance scores
#' vip(vis, include_type = TRUE, all_permutations = TRUE,
#' geom = "point", aesthetics = list(color = "forestgreen", size = 3))
#'
#' #
#' # A binary classification example
#' #
#' \dontrun{
#' library(rpart) # for classification and regression trees
#'
#' # Load Wisconsin breast cancer data; see ?mlbench::BreastCancer for details
#' data(BreastCancer, package = "mlbench")
#' bc <- subset(BreastCancer, select = -Id) # for brevity
#'
#' # Fit a standard classification tree
#' set.seed(1032) # for reproducibility
#' tree <- rpart(Class ~ ., data = bc, cp = 0)
#'
#' # Prune using 1-SE rule (e.g., use `plotcp(tree)` for guidance)
#' cp <- tree$cptable
#' cp <- cp[cp[, "nsplit"] == 2L, "CP"]
#' tree2 <- prune(tree, cp = cp) # tree with three splits
#'
#' # Default tree-based VIP
#' vip(tree2)
#'
#' # Computing permutation importance requires a prediction wrapper. For
#' # classification, the return value depends on the chosen metric; see
#' # `?vip::vi_permute` for details.
#' pfun <- function(object, newdata) {
#' # Need vector of predicted class probabilities when using log-loss metric
#' predict(object, newdata = newdata, type = "prob")[, "malignant"]
#' }
#'
#' # Permutation-based importance (note that only the predictors that show up
#' # in the final tree have non-zero importance)
#' set.seed(1046) # for reproducibility
#' vi(tree2, method = "permute", nsim = 10, target = "Class", train = bc,
#' metric = "logloss", pred_wrapper = pfun, reference_class = "malignant")
#'
#' # Equivalent (but not sorted)
#' set.seed(1046) # for reproducibility
#' vi_permute(tree2, nsim = 10, target = "Class", metric = "logloss",
#' pred_wrapper = pfun, reference_class = "malignant")
#' }
vi <- function(object, ...) {
UseMethod("vi")
}
#' @rdname vi
#'
#' @export
vi.default <- function(
object,
method = c("model", "firm", "permute", "shap"),
feature_names = NULL,
abbreviate_feature_names = NULL,
sort = TRUE,
decreasing = TRUE,
scale = FALSE,
rank = FALSE,
...
) {
# Construct VI scores
method <- match.arg(method)
if (method == "firm") {
if (is.null(feature_names)) {
feature_names <- get_feature_names(object)
}
}
# Construct tibble of VI scores
tib <- switch(method,
"model" = vi_model(object, ...),
"firm" = vi_firm(object, feature_names = feature_names, ...),
"permute" = vi_permute(object, feature_names = feature_names, ...),
vi_shap(object, feature_names = feature_names, ...)
)
# Save attribute
vi_type <- attr(tib, which = "type")
# Remove rows with NA
tib <- stats::na.omit(tib)
# Sort VI scores (if requested)
if (sort) {
tib <- sort_importance_scores(tib, decreasing = decreasing)
}
# Abbreviate feature names (if requested)
if (!is.null(abbreviate_feature_names)) {
tib <- abbreviate_names(tib, minlength = abbreviate_feature_names)
}
# Scale VI scores so that largest is 100
if (scale) {
tib$Importance <- tib$Importance / max(tib$Importance) * 100
}
# Rank VI scores (i.e., convert to integer ranks)
if (rank) {
tib$Importance <- rev(rank(tib$Importance, ties.method = "average"))
}
# Restore attribute
attr(tib, which = "type") <- vi_type
# Add "vi" class
if (!inherits(tib, what = "vi")) { # In case class gets stripped?
class(tib) <- c("vi", class(tib))
}
# Return results
tib
}
|
/scratch/gouwar.j/cran-all/cranData/vip/R/vi.R
|
#' Variance-based variable importance
#'
#' Compute variance-based variable importance (VI) scores using a simple
#' \emph{feature importance ranking measure} (FIRM) approach; for details, see
#' \href{https://arxiv.org/abs/1805.04755}{Greenwell et al. (2018)} and
#' \href{https://arxiv.org/abs/1904.03959}{Scholbeck et al. (2019)}.
#'
#' @param object A fitted model object (e.g., a
#' [randomForest][randomForest::randomForest] object).
#'
#' @param feature_names Character string giving the names of the predictor
#' variables (i.e., features) of interest. If `NULL` (the default) then the
#' internal `get_feature_names()` function will be called to try and extract
#' them automatically. It is good practice to always specify this argument.
#'
#' @param train A matrix-like R object (e.g., a data frame or matrix)
#' containing the training data. If `NULL` (the default) then the
#' internal `get_training_data()` function will be called to try and extract it
#' automatically. It is good practice to always specify this argument.
#'
#' @param var_fun Deprecated; use `var_continuous` and `var_categorical`
#' instead.
#'
#' @param var_continuous Function used to quantify the variability of effects
#' for continuous features. Defaults to using the sample standard deviation
#' (i.e., [stats::sd()]).
#'
#' @param var_categorical Function used to quantify the variability of effects
#' for categorical features. Defaults to using the range divided by four; that
#' is, `function(x) diff(range(x)) / 4`.
#'
#' @param ... Additional arguments to be passed on to the [pdp::partial()]
#' function (e.g., `ice = TRUE`, `prob = TRUE`, or a prediction wrapper via the
#' `pred.fun` argument); see `?pdp::partial` for details on these and other
#' useful arguments.
#'
#' @return A tidy data frame (i.e., a [tibble][tibble::tibble] object) with two
#' columns:
#'
#' * `Variable` - the corresponding feature name;
#' * `Importance` - the associated importance, computed as described in
#' [Greenwell et al. (2018)](https://arxiv.org/abs/1805.04755).
#'
#' @details This approach is based on quantifying the relative "flatness" of the
#' effect of each feature and assumes the user has some familiarity with the
#' [pdp::partial()] function. The Feature effects can be assessed
#' using *partial dependence* (PD) plots (Friedman, 2001) or
#' *individual conditional expectation* (ICE) plots (Goldstein et al., 2014).
#' These methods are model-agnostic and can be applied to any supervised
#' learning algorithm. By default, relative "flatness" is defined by computing
#' the standard deviation of the y-axis values for each feature effect plot for
#' numeric features; for categorical features, the default is to use range
#' divided by 4. This can be changed via the `var_continuous` and
#' `var_categorical` arguments. See
#' [Greenwell et al. (2018)](https://arxiv.org/abs/1805.04755) for details and
#' additional examples.
#'
#' @note This approach can provide misleading results in the presence of
#' interaction effects (akin to interpreting main effect coefficients in a
#' linear with higher level interaction effects).
#'
#' @references
#' J. H. Friedman. Greedy function approximation: A gradient boosting machine.
#' *Annals of Statistics*, **29**: 1189-1232, 2001.
#'
#' Goldstein, A., Kapelner, A., Bleich, J., and Pitkin, E., Peeking Inside the
#' Black Box: Visualizing Statistical Learning With Plots of Individual
#' Conditional Expectation. (2014) *Journal of Computational and Graphical
#' Statistics*, **24**(1): 44-65, 2015.
#'
#' Greenwell, B. M., Boehmke, B. C., and McCarthy, A. J. A Simple
#' and Effective Model-Based Variable Importance Measure. arXiv preprint
#' arXiv:1805.04755 (2018).
#'
#' Scholbeck, C. A. Scholbeck, and Molnar, C., and Heumann C., and Bischl, B.,
#' and Casalicchio, G. Sampling, Intervention, Prediction, Aggregation: A
#' Generalized Framework for Model-Agnostic Interpretations. arXiv preprint
#' arXiv:1904.03959 (2019).
#'
#'
#' @rdname vi_firm
#'
#' @export
#'
#' @examples
#' \dontrun{
#' #
#' # A projection pursuit regression example
#' #
#'
#' # Load the sample data
#' data(mtcars)
#'
#' # Fit a projection pursuit regression model
#' mtcars.ppr <- ppr(mpg ~ ., data = mtcars, nterms = 1)
#'
#' # Compute variable importance scores using the FIRM method; note that the pdp
#' # package knows how to work with a "ppr" object, so there's no need to pass
#' # the training data or a prediction wrapper, but it's good practice.
#' vi_firm(mtcars.ppr, train = mtcars)
#'
#' # For unsopported models, need to define a prediction wrapper; this approach
#' # will work for ANY model (supported or unsupported, so better to just always
#' # define it pass it)
#' pfun <- function(object, newdata) {
#' # To use partial dependence, this function needs to return the AVERAGE
#' # prediction (for ICE, simply omit the averaging step)
#' mean(predict(object, newdata = newdata))
#' }
#'
#' # Equivalent to the previous results (but would work if this type of model
#' # was not explicitly supported)
#' vi_firm(mtcars.ppr, pred.fun = pfun, train = mtcars)
#'
#' # Equivalent VI scores, but the output is sorted by default
#' vi(mtcars.ppr, method = "firm")
#'
#' # Use MAD to estimate variability of the partial dependence values
#' vi_firm(mtcars.ppr, var_continuous = stats::mad)
#'
#' # Plot VI scores
#' vip(mtcars.ppr, method = "firm", train = mtcars, pred.fun = pfun)
#' }
vi_firm <- function(object, ...) {
UseMethod("vi_firm")
}
#' @rdname vi_firm
#'
#' @export
vi_firm.default <- function(
object,
feature_names = NULL,
train = NULL,
var_fun = NULL,
var_continuous = stats::sd,
var_categorical = function(x) diff(range(x)) / 4,
...
) {
# Check for pdp package
if (!requireNamespace("pdp", quietly = TRUE)) {
stop("Package \"pdp\" needed for this function to work. ",
"Please install it.", call. = FALSE)
}
# Catch deprecated arguments
if (!is.null(var_fun)) {
stop("Argument `var_fun` is deprecated; please use the `var_continuous` ",
"and `var_categorical` arguments instead.",
call. = FALSE)
}
# Try to extract feature names from `object`
if (is.null(feature_names)) {
feature_names <- get_feature_names(object)
}
# Try to extract training data if not supplied
if (is.null(train)) {
train <- get_training_data(object)
}
# Construct PD/ICE-based variable importance scores
vis <- lapply(feature_names, function(x) {
firm(object, feature_name = x, var_continuous = var_continuous,
var_categorical = var_categorical, train = train, ...)
})
# vis <- numeric(length(feature_names)) # loses "effects" attribute
# for (i in seq_along(feature_names)) {
# vis[i] <- firm(object, feature_name = feature_names[i],
# var_continuous = var_continuous,
# var_categorical = var_categorical, ...)
# }
tib <- tibble::tibble(
"Variable" = feature_names,
"Importance" = unlist(vis)
)
fe <- lapply(vis, FUN = function(x) attr(x, which = "effects"))
names(fe) <- feature_names
attr(tib, which = "effects") <- fe
attr(tib, which = "type") <- "firm"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
#' Feature importance ranking measure
#'
#' Compute variable importance using a variance-based measure referred to as the
#' \emph{feature importance ranking measure} (FIRM).
#'
#' @keywords internal
#'
#' @noRd
firm <- function(object, feature_name, var_continuous, var_categorical, ...) {
# Only allow for a single feature
if (length(feature_name) != 1L) {
stop("Only a single feature allowed in `firm()`.", call. = FALSE)
}
# Compute feature effect
fe <- pdp::partial(object, pred.var = feature_name, ...)
# Compute partial dependence-based variable importance scores
var_fun <- if (is.factor(fe[[feature_name]])) {
var_categorical # categorical feature
} else {
var_continuous # continuous feature
}
# Compute FIRM
res <- if ("yhat.id" %in% names(fe)) { # ICE
mean(tapply(fe$yhat, INDEX = fe[["yhat.id"]], FUN = var_fun))
} else { # PD
var_fun(fe$yhat)
}
# Include estimated feature effects as an additional attribute
attr(res, which = "effects") <- fe
# Return result
res
}
|
/scratch/gouwar.j/cran-all/cranData/vip/R/vi_firm.R
|
#' Model-specific variable importance
#'
#' Compute model-specific variable importance scores for the predictors in a
#' fitted model.
#'
#' @param object A fitted model object (e.g., a
#' [randomForest][randomForest::randomForest] object). See the details section
#' below to see how variable importance is computed for supported model types.
#'
#' @param type Character string specifying the type of variable importance to
#' return (only used for some models). See the details section below for which
#' methods this argument applies to.
#'
#' @param lambda Numeric value for the penalty parameter of a
#' [glmnet][glmnet::glmnet] model (this is equivalent to the `s`
#' argument in [coef.glmnet][glmnet::coef.glmnet()]). See the section on
#' [glmnet][glmnet::glmnet] in the details below.
#'
#' @param ncomp An integer for the number of partial least squares components
#' to be used in the importance calculations. If more components are requested
#' than were used in the model, all of the model's components are used.
#'
#' @param ... Additional optional arguments to be passed on to other methods.
#' See the details section below for arguments that can be passed to specific
#' object types.
#'
#' @return A tidy data frame (i.e., a [tibble][tibble::tibble] object) with two
#' columns:
#'
#' * `Variable` - the corresponding feature name;
#' * `Importance` - the associated importance, computed as the average change in
#' performance after a random permutation (or permutations, if `nsim > 1`) of
#' the feature in question.
#'
#' For [lm][stats::lm]/[glm][stats::glm]-like objects, the sign (i.e., POS/NEG)
#' of the original coefficient is also included in a column called `Sign`.
#'
#' @details
#'
#' Computes model-specific variable importance scores depending on the class of
#' `object`:
#'
#' * [C5.0][C50::C5.0] - Variable importance is measured by determining
#' the percentage of training set samples that fall into all the terminal nodes
#' after the split. For example, the predictor in the first split automatically
#' has an importance measurement of 100 percent since all samples are affected
#' by this split. Other predictors may be used frequently in splits, but if the
#' terminal nodes cover only a handful of training set samples, the importance
#' scores may be close to zero. The same strategy is applied to rule-based
#' models and boosted versions of the model. The underlying function can also
#' return the number of times each predictor was involved in a split by using
#' the option `metric = "usage"`. See [C5imp][C50::C5imp()] for
#' details.
#'
#' * [cubist][Cubist::cubist.default] - The Cubist output contains variable usage
#' statistics. It gives the percentage of times where each variable was used in
#' a condition and/or a linear model. Note that this output will probably be
#' inconsistent with the rules shown in the output from summary.cubist. At each
#' split of the tree, Cubist saves a linear model (after feature selection) that
#' is allowed to have terms for each variable used in the current split or any
#' split above it. Quinlan (1992) discusses a smoothing algorithm where each
#' model prediction is a linear combination of the parent and child model along
#' the tree. As such, the final prediction is a function of all the linear
#' models from the initial node to the terminal node. The percentages shown in
#' the Cubist output reflects all the models involved in prediction (as opposed
#' to the terminal models shown in the output). The variable importance used
#' here is a linear combination of the usage in the rule conditions and the
#' model. See [summary.cubist][Cubist::summary.cubist()] and
#' [varImp][caret::varImp()] for details.
#'
#' * [glmnet][glmnet::glmnet] - Similar to (generalized) linear models,
#' the absolute value of the coefficients are returned for a specific model.
#' It is important that the features (and hence, the estimated coefficients) be
#' standardized prior to fitting the model. You can specify which coefficients
#' to return by passing the specific value of the penalty parameter via the
#' `lambda` argument (this is equivalent to the `s` argument in
#' [coef.glmnet][glmnet::coef.glmnet()]). By default, `lambda = NULL` and the coefficients
#' corresponding to the final penalty value in the sequence are returned; in
#' other words, you should ALWAYS SPECIFY `lambda`! For [cv.glmnet][glmnet::cv.glmnet]
#' objects, the largest value of lambda such that the error is within one standard
#' error of the minimum is used by default. For a multinomial response, the
#' coefficients corresponding to the first class are used; that is, the first
#' component of [coef.glmnet][glmnet::coef.glmnet()].
#'
#' * [cforest][partykit::cforest] - Variable importance is measured in a
#' way similar to those computed by [importance][randomForest::importance()].
#' Besides the standard version, a conditional version is available that
#' adjusts for correlations between predictor variables. If
#' `conditional = TRUE`, the importance of each variable is computed by
#' permuting within a grid defined by the predictors that are associated (with
#' 1 - *p*-value greater than threshold) to the variable of interest. The
#' resulting variable importance score is conditional in the sense of beta
#' coefficients in regression models, but represents the effect of a variable in
#' both main effects and interactions. See Strobl et al. (2008) for details.
#' Note, however, that all random forest results are subject to random
#' variation. Thus, before interpreting the importance ranking, check whether
#' the same ranking is achieved with a different random seed - or otherwise
#' increase the number of trees ntree in [ctree_control][partykit::ctree_control()].
#' Note that in the presence of missings in the predictor variables the
#' procedure described in Hapfelmeier et al. (2012) is performed. See
#' [varimp][partykit::varimp()] for details.
#'
#' * [earth][earth::earth] - The [earth][earth::earth] package uses
#' three criteria for estimating the variable importance in a MARS model (see
#' [evimp][earth::evimp] for details):
#'
#' - The `nsubsets` criterion (`type = "nsubsets"`) counts the
#' number of model subsets that include each feature. Variables that are
#' included in more subsets are considered more important. This is the
#' criterion used by [summary.earth][earth::summary.earth] to print variable
#' importance. By "subsets" we mean the subsets of terms generated by
#' `earth()`'s backward pass. There is one subset for each model size
#' (from one to the size of the selected model) and the subset is the best set
#' of terms for that model size. (These subsets are specified in the
#' `$prune.terms` component of `earth()`'s return value.) Only
#' subsets that are smaller than or equal in size to the final model are used
#' for estimating variable importance. This is the default method used by
#' [vi_model][vip::vi_model].
#'
#' - The `rss` criterion (`type = "rss"`) first calculates the
#' decrease in the RSS for each subset relative to the previous subset during
#' `earth()`’s backward pass. (For multiple response models, RSS's are
#' calculated over all responses.) Then for each variable it sums these
#' decreases over all subsets that include the variable. Finally, for ease of
#' interpretation the summed decreases are scaled so the largest summed
#' decrease is 100. Variables which cause larger net decreases in the RSS are
#' considered more important.
#'
#' - The `gcv` criterion (`type = "gcv"`) is similar to the
#' `rss` approach, but uses the GCV statistic instead of the RSS. Note
#' that adding a variable can sometimes increase the GCV. (Adding the variable
#' has a deleterious effect on the model, as measured in terms of its
#' estimated predictive power on unseen data.) If that happens often enough,
#' the variable can have a negative total importance, and thus appear less
#' important than unused variables.
#'
#' * [gbm][gbm::gbm] - Variable importance is computed using one of
#' two approaches (See [summary.gbm][gbm::summary.gbm] for details):
#'
#' - The standard approach (`type = "relative.influence"`) described
#' in Friedman (2001). When `distribution = "gaussian"` this returns the
#' reduction of squared error attributable to each variable. For other loss
#' functions this returns the reduction attributable to each variable in sum
#' of squared error in predicting the gradient on each iteration. It describes
#' the *relative influence* of each variable in reducing the loss
#' function. This is the default method used by [vi_model][vip::vi_model].
#'
#' - An experimental permutation-based approach
#' (`type = "permutation"`). This method randomly permutes each predictor
#' variable at a time and computes the associated reduction in predictive
#' performance. This is similar to the variable importance measures Leo
#' Breiman uses for random forests, but [gbm][gbm::gbm] currently computes using
#' the entire training dataset (not the out-of-bag observations).
#'
#' * [H2OModel][h2o::H2OModel] - See [h2o.varimp][h2o::h2o.varimp] or visit
#' \url{https://docs.h2o.ai/h2o/latest-stable/h2o-docs/variable-importance.html}
#' for details.
#'
#' * [nnet][nnet::nnet] - Two popular methods for constructing variable
#' importance scores with neural networks are the Garson algorithm
#' (Garson 1991), later modified by Goh (1995), and the Olden algorithm
#' (Olden et al. 2004). For both algorithms, the basis of these importance
#' scores is the network’s connection weights. The Garson algorithm determines
#' variable importance by identifying all weighted connections between the nodes
#' of interest. Olden’s algorithm, on the other hand, uses the product of the
#' raw connection weights between each input and output neuron and sums the
#' product across all hidden neurons. This has been shown to outperform the
#' Garson method in various simulations. For DNNs, a similar method due to
#' Gedeon (1997) considers the weights connecting the input features to the
#' first two hidden layers (for simplicity and speed); but this method can be
#' slow for large networks.. To implement the Olden and Garson algorithms, use
#' `type = "olden"` and `type = "garson"`, respectively. See
#' [garson][NeuralNetTools::garson] and [olden][NeuralNetTools::olden]
#' for details.
#'
#' * [lm][stats::lm]/[glm][stats::glm] - In (generalized) linear models,
#' variable importance is typically based on the absolute value of the
#' corresponding *t*-statistics (Bring, 1994). For such models, the sign of the
#' original coefficient is also returned. By default, `type = "stat"` is used;
#' however, if the inputs have been appropriately standardized then the raw
#' coefficients can be used with `type = "raw"`. Note that Bring (1994)
#' provides motivation for using the absolute value of the associated
#' *t*-statistics.
#'
#' * [sparklyr][sparklyr::ml_feature_importances] - The Spark ML
#' library provides standard variable importance measures for tree-based methods
#' (e.g., random forests). See
#' [ml_feature_importances][sparklyr::ml_feature_importances] for details.
#'
#' * [randomForest][randomForest::randomForest] Random forests typically
#' provide two measures of variable importance.
#'
#' - The first measure is computed from permuting out-of-bag (OOB) data: for
#' each tree, the prediction error on the OOB portion of the data is recorded
#' (error rate for classification and MSE for regression). Then the same is
#' done after permuting each predictor variable. The difference between the
#' two are then averaged over all trees in the forest, and normalized by the
#' standard deviation of the differences. If the standard deviation of the
#' differences is equal to 0 for a variable, the division is not done (but the
#' average is almost always equal to 0 in that case).
#'
#' - The second measure is the total decrease in node impurities from
#' splitting on the variable, averaged over all trees. For classification, the
#' node impurity is measured by the Gini index. For regression, it is measured
#' by residual sum of squares.
#'
#' See [importance][randomForest::importance] for details, including
#' additional arguments that can be passed via the `...` argument in
#' [vi_model][vip::vi_model].
#'
#' * [cforest][party::cforest] - Same approach described in
#' [cforest][partykit::cforest] (from package **partykit**) above. See
#' [varimp][party::varimp] and [varimpAUC][party::varimpAUC] (if `type = "auc"`)
#' for details.
#'
#' * [ranger][ranger::ranger] - Variable importance for
#' [ranger][ranger::ranger] objects is computed in the usual way for random
#' forests. The approach used depends on the `importance` argument provided
#' in the initial call to [ranger][ranger::ranger]. See
#' [importance][ranger::importance] for details.
#'
#' * [rpart][rpart::rpart] - As stated in one of the [rpart][rpart::rpart]
#' vignettes. A variable may appear in the tree many times, either as a primary
#' or a surrogate variable. An overall measure of variable importance is the sum
#' of the goodness of split measures for each split for which it was the primary
#' variable, plus "goodness" * (adjusted agreement) for all splits in which it
#' was a surrogate. Imagine two variables which were essentially duplicates of
#' each other; if we did not count surrogates, they would split the importance
#' with neither showing up as strongly as it should. See
#' [rpart][rpart::rpart] for details.
#'
#' * [caret][caret::train] - Various model-specific and model-agnostic
#' approaches that depend on the learning algorithm employed in the original
#' call to [caret][caret::train]. See [varImp][caret::varImp] for details.
#'
#' * [xgboost][xgboost::xgboost] - For linear models, the variable
#' importance is the absolute magnitude of the estimated coefficients. For that
#' reason, in order to obtain a meaningful ranking by importance for a linear
#' model, the features need to be on the same scale (which you also would want
#' to do when using either L1 or L2 regularization). Otherwise, the approach
#' described in Friedman (2001) for [gbm][gbm::gbm]s is used. See
#' [xgb.importance][xgboost::xgb.importance] for details. For tree models, you
#' can obtain three different types of variable importance:
#'
#' - Using `type = "gain"` (the default) gives the fractional contribution of
#' each feature to the model based on the total gain of the corresponding
#' feature's splits.
#'
#' - Using `type = "cover"` gives the number of observations related to each
#' feature.
#'
#' - Using `type = "frequency"` gives the percentages representing
#' the relative number of times each feature has been used throughout each
#' tree in the ensemble.
#'
#' * [lightgbm][lightgbm::lightgbm] - Same as for [xgboost][xgboost::xgboost]
#' models, except [lgb.importance][lightgbm::lgb.importance] (which this method
#' calls internally) has an additional argument, `percentage`, that defaults to
#' `TRUE`, resulting in the VI scores shown as a relative percentage; pass
#' `percentage = FALSE` in the call to `vi_model()` to produce VI scores for
#' [lightgbm][lightgbm::lightgbm] models on the raw scale.
#'
#' @source
#' Johan Bring (1994) How to Standardize Regression Coefficients, The American
#' Statistician, 48:3, 209-213, DOI: 10.1080/00031305.1994.10476059.
#'
#' @note Inspired by the [caret](https://cran.r-project.org/package=caret)'s
#' [varImp][caret::varImp] function.
#'
#' @rdname vi_model
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # Basic example using imputed titanic data set
#' t3 <- titanic_mice[[1L]]
#'
#' # Fit a simple model
#' set.seed(1449) # for reproducibility
#' bst <- lightgbm::lightgbm(
#' data = data.matrix(subset(t3, select = -survived)),
#' label = ifelse(t3$survived == "yes", 1, 0),
#' params = list("objective" = "binary", "force_row_wise" = TRUE),
#' verbose = 0
#' )
#'
#' # Compute VI scores
#' vi(bst) # defaults to `method = "model"`
#' vi_model(bst) # same as above
#'
#' # Same as above (since default is `method = "model"`), but returns a plot
#' vip(bst, geom = "point")
#' vi_model(bst, type = "cover")
#' vi_model(bst, type = "cover", percentage = FALSE)
#'
#' # Compare to
#' lightgbm::lgb.importance(bst)
#' }
#'
vi_model <- function(object, ...) {
UseMethod("vi_model")
}
#' @rdname vi_model
#'
#' @export
vi_model.default <- function(object, ...) {
stop("Model-specific variable importance scores are currently not available ",
"for this type of model.", call. = FALSE)
}
# Package: C50 -----------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.C5.0 <- function(object, type = c("usage", "splits"), ...) {
# # Check for dependency
# if (!requireNamespace("C50", quietly = TRUE)) {
# stop("Package \"C50\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Determine which type of variable importance to compute
type <- match.arg(type)
# Consruct model-specific variable importance scores
vis <- C50::C5imp(object, metric = type, ...)
tib <- tibble::tibble(
"Variable" = rownames(vis),
"Importance" = vis[["Overall"]]
)
# Add variable importance type attribute
attr(tib, which = "type") <- type
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: caret ---------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.train <- function(object, ...) {
# # Check for dependency
# if (!requireNamespace("caret", quietly = TRUE)) {
# stop("Package \"caret\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Construct model-specific variable importance scores
vis <- caret::varImp(object, ...)
if (inherits(vis, "varImp.train")) {
vis <- vis$importance
}
tib <- tibble::tibble(
"Variable" = rownames(vis),
"Importance" = vis[["Overall"]]
)
# Add variable importance type attribute
attr(tib, which = "type") <- "caret"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: Cubist --------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.cubist <- function(object, ...) {
# # Check for dependency
# if (!requireNamespace("Cubist", quietly = TRUE)) {
# stop("Package \"Cubist\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Consruct model-specific variable importance scores
vis <- caret::varImp(object, ...)
tib <- tibble::tibble(
"Variable" = rownames(vis),
"Importance" = vis[["Overall"]]
)
# Add variable importance type attribute
attr(tib, which = "type") <- "usage"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: earth ---------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.earth <- function(object, type = c("nsubsets", "rss", "gcv"), ...) {
# # Check for dependency
# if (!requireNamespace("earth", quietly = TRUE)) {
# stop("Package \"earth\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Determine which type of variable importance to compute
type <- match.arg(type)
# Construct model-specific variable importance scores
vis <- earth::evimp(object, trim = FALSE, ...)[, type, drop = TRUE]
tib <- tibble::tibble(
"Variable" = names(vis),
"Importance" = unname(vis) # per tibble 3.0.0
)
tib$Variable <- gsub("-unused$", replacement = "", x = tib$Variable)
# Add variable importance type attribute
attr(tib, which = "type") <- type
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: gbm -----------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.gbm <- function(object, type = c("relative.influence", "permutation"),
...) {
# # Check for dependency
# if (!requireNamespace("gbm", quietly = TRUE)) {
# stop("Package \"gbm\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Determine which type of variable importance to compute
type <- match.arg(type)
# Construct model-specific variable importance scores
vis <- if (type == "relative.influence") {
gbm::summary.gbm(object, plotit = FALSE, order = TRUE,
method = gbm::relative.influence, ...)
} else {
gbm::summary.gbm(object, plotit = FALSE, order = TRUE,
method = gbm::permutation.test.gbm, ...)
}
tib <- tibble::tibble(
"Variable" = vis$var,
"Importance" = vis$rel.inf
)
# Add variable importance type attribute
attr(tib, which = "type") <- type
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: glmnet --------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.glmnet <- function(object, lambda = NULL, ...) {
# Extract coefficients
#s <- list(...)$s
if (is.null(lambda)) {
lambda <- min(object$lambda)
}
coefs <- stats::coef(object, s = lambda)
if (inherits(coefs, what = "list")) { # "multnet" objects
coefs <- coefs[[1L]]
}
coefs <- coefs[, 1L, drop = TRUE]
# Remove intercept (if it's there)
if ("(Intercept)" %in% names(coefs)) {
coefs <- coefs[setdiff(x = names(coefs), y = "(Intercept)")]
}
# Construct model-specific variable importance scores
tib <- tibble::tibble(
"Variable" = names(coefs),
"Importance" = unname(abs(coefs)), # per tibble 3.0.0
"Sign" = ifelse(sign(coefs) == 1, yes = "POS", no = "NEG")
)
# Add variable importance type attribute
attr(tib, which = "type") <- "|coefficient|"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
#' @rdname vi_model
#'
#' @export
vi_model.cv.glmnet <- function(object, lambda = NULL, ...) {
# Extract coefficients
#s <- list(...)$s
if (is.null(lambda)) {
lambda <- "lambda.1se"
}
coefs <- stats::coef(object, s = lambda)
if (inherits(coefs, what = "list")) { # "multnet" objects
coefs <- coefs[[1L]]
}
coefs <- coefs[, 1L, drop = TRUE]
# Remove intercept (if it's there)
if ("(Intercept)" %in% names(coefs)) {
coefs <- coefs[setdiff(x = names(coefs), y = "(Intercept)")]
}
# Construct model-specific variable importance scores
tib <- tibble::tibble(
"Variable" = names(coefs),
"Importance" = unname(abs(coefs)),
"Sign" = ifelse(sign(coefs) == 1, yes = "POS", no = "NEG")
)
# Add variable importance type attribute
attr(tib, which = "type") <- "|coefficient|"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: h2o -----------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.H2OBinomialModel <- function(object, ...) {
# Construct model-specific variable importance scores
tib <- tibble::as_tibble(h2o::h2o.varimp(object))
if (object@algorithm == "glm") {
names(tib) <- c("Variable", "Importance", "Sign")
# FIXME: Extra row at the bottom?
} else {
tib <- tib[1L:2L]
names(tib) <- c("Variable", "Importance")
}
# Add variable importance type attribute
attr(tib, which = "type") <- "h2o"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
#' @rdname vi_model
#'
#' @export
vi_model.H2OMultinomialModel <- function(object, ...) {
# Construct model-specific variable importance scores
tib <- tibble::as_tibble(h2o::h2o.varimp(object))
if (object@algorithm == "glm") {
names(tib) <- c("Variable", "Importance", "Sign")
# FIXME: Extra row at the bottom?
} else {
tib <- tib[1L:2L]
names(tib) <- c("Variable", "Importance")
}
# Add variable importance type attribute
attr(tib, which = "type") <- "h2o"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
#' @rdname vi_model
#'
#' @export
vi_model.H2ORegressionModel <- function(object, ...) {
# Construct model-specific variable importance scores
tib <- tibble::as_tibble(h2o::h2o.varimp(object))
if (object@algorithm == "glm") {
names(tib) <- c("Variable", "Importance", "Sign")
# FIXME: Extra row at the bottom?
} else {
tib <- tib[1L:2L]
names(tib) <- c("Variable", "Importance")
}
# Add variable importance type attribute
attr(tib, which = "type") <- "h2o"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: lightgbm ------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.lgb.Booster <- function(object, type = c("gain", "cover", "frequency"),
...) {
# # Check for dependency
# if (!requireNamespace("xgboost", quietly = TRUE)) {
# stop("Package \"xgboost\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Determine which type of variable importance to compute
type <- match.arg(type)
# Construct model-specific variable importance scores
imp <- lightgbm::lgb.importance(model = object, ...)
names(imp) <- tolower(names(imp))
# if ("weight" %in% names(imp)) {
# type <- "weight" # gblinear
# }
vis <- tibble::as_tibble(imp)[, c("feature", type)]
tib <- tibble::tibble(
"Variable" = vis$feature,
"Importance" = vis[[2L]]
)
# Add variable importance type attribute
attr(tib, which = "type") <- type
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: mixOmics -----------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.mixo_pls <- function(object, ncomp = NULL, ...) {
# Check for dependency
if (!requireNamespace("mixOmics", quietly = TRUE)) {
stop("Bioconductor package \"mixOmics\" needed for this function to work. ",
"Please install it.", call. = FALSE)
}
if (is.null(ncomp)) {
ncomp <- object$ncomp
} else {
if (length(ncomp) != 1) {
stop("'ncomp' should be a single integer.")
}
if (!is.integer(ncomp)) {
ncomp <- as.integer(ncomp)
}
}
vis <- mixOmics::vip(object)
if (ncomp > ncol(vis)) {
warning(ncomp, " components were requested but only ", ncol(vis),
" are available. Results are for ", ncol(vis), ".")
ncomp <- ncol(vis)
}
tib <- tibble::tibble(
"Variable" = rownames(vis),
"Importance" = vis[,ncomp]
)
# Add variable importance type attribute
attr(tib, which = "type") <- "mixOmics"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
#' @rdname vi_model
#'
#' @export
vi_model.mixo_spls <- vi_model.mixo_pls
# Package: mlr -----------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.WrappedModel <- function(object, ...) {
vi_model(object$learner.model, ...)
}
# Package: mlr3 ----------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.Learner <- function(object, ...) {
if (is.null(object$model)) {
stop("No fitted model found. Did you forget to call ",
deparse(substitute(object)), "$train()?",
call. = FALSE)
}
vi_model(object$model, ...)
}
# Package: neuralnet -----------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.nn <- function(object, type = c("olden", "garson"), ...) {
# Check for dependency
if (!requireNamespace("NeuralNetTools", quietly = TRUE)) {
stop("Package \"NeuralNetTools\" needed for this function to work. Please ",
"install it.", call. = FALSE)
}
# Determine which type of variable importance to compute
type <- match.arg(type)
# Construct model-specific variable importance scores
tib <- if (type == "olden") { # Olden's algorithm
vis <- NeuralNetTools::olden(object, bar_plot = FALSE)
tibble::tibble(
"Variable" = rownames(vis),
"Importance" = vis$importance
)
} else { # Garson's algorithm
vis <- NeuralNetTools::garson(object, bar_plot = FALSE)
tibble::tibble(
"Variable" = rownames(vis),
"Importance" = vis$rel_imp
)
}
# Add variable importance type attribute
attr(tib, which = "type") <- type
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: nnet ----------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.nnet <- function(object, type = c("olden", "garson"), ...) {
# Check for dependency
if (!requireNamespace("NeuralNetTools", quietly = TRUE)) {
stop("Package \"NeuralNetTools\" needed for this function to work. Please ",
"install it.", call. = FALSE)
}
# Determine which type of variable importance to compute
type <- match.arg(type)
# Construct model-specific variable importance scores
tib <- if (type == "olden") { # Olden's algorithm
vis <- NeuralNetTools::olden(object, bar_plot = FALSE)
tibble::tibble(
"Variable" = rownames(vis),
"Importance" = vis$importance
)
} else { # Garson's algorithm
vis <- NeuralNetTools::garson(object, bar_plot = FALSE)
tibble::tibble(
"Variable" = rownames(vis),
"Importance" = vis$rel_imp
)
}
# Add variable importance type attribute
attr(tib, which = "type") <- type
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: party ---------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.RandomForest <- function(object, type = c("accuracy", "auc"), ...) {
# # Check for dependency
# if (!requireNamespace("party", quietly = TRUE)) {
# stop("Package \"party\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Determine which type of variable importance to compute
type <- match.arg(type)
# Construct model-specific variable importance scores
vis <- if (type == "auc") {
# Check for dependency
if (!requireNamespace("varImp", quietly = TRUE)) {
stop("Package \"varImp\" needed for this function to work. Please ",
"install it.", call. = FALSE)
}
party::varimpAUC(object, ...) # rm ... for now
} else {
party::varimp(object, ...) # rm ... for now
}
tib <- tibble::tibble(
"Variable" = names(vis),
"Importance" = unname(vis) # per tibble 3.0.0
)
# Add variable importance type attribute
attr(tib, which = "type") <- type
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: partykit ------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.constparty <- function(object, ...) {
# # Check for dependency
# if (!requireNamespace("partykit", quietly = TRUE)) {
# stop("Package \"partykit\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Construct model-specific variable importance scores
vis <- partykit::varimp(object, ...)
features <- attr(stats::terms(object), which = "term.labels")
unused <- setdiff(features, names(vis))
unused <- stats::setNames(rep(0, times = length(unused)), nm = unused)
vis <- c(vis, unused)
tib <- tibble::tibble(
"Variable" = names(vis),
"Importance" = unname(vis) # per tibble 3.0.0
)
# Add variable importance type attribute
attr(tib, which = "type") <- "permutation"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
#' @rdname vi_model
#'
#' @export
vi_model.cforest <- function(object, ...) {
# # Check for dependency
# if (!requireNamespace("partykit", quietly = TRUE)) {
# stop("Package \"partykit\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Construct model-specific variable importance scores
vis <- partykit::varimp(object, ...)
tib <- tibble::tibble(
"Variable" = names(vis),
"Importance" = unname(vis) # per tibble 3.0.0
)
# Add variable importance type attribute
attr(tib, which = "type") <- "permutation"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: pls -----------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.mvr <- function(object, ...) {
# FIXME: For now, just default to using caret.
#
# Check for dependency
if (!requireNamespace("caret", quietly = TRUE)) {
stop("Package \"caret\" needed for this function to work. Please ",
"install it.", call. = FALSE)
}
vis <- caret::varImp(object, ...)
tib <- tibble::tibble(
"Variable" = rownames(vis),
"Importance" = vis[["Overall"]]
)
# Add variable importance type attribute
attr(tib, which = "type") <- "caret"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: mixOmics -----------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.mixo_pls <- function(object, ncomp = NULL, ...) {
# Check for dependency
if (!requireNamespace("mixOmics", quietly = TRUE)) {
stop("Bioconductor package \"mixOmics\" needed for this function to work. ",
"Please install it.", call. = FALSE)
}
if (is.null(ncomp)) {
ncomp <- object$ncomp
} else {
if (length(ncomp) != 1) {
stop("'ncomp' should be a single integer.")
}
if (!is.integer(ncomp)) {
ncomp <- as.integer(ncomp)
}
}
vis <- mixOmics::vip(object)
if (ncomp > ncol(vis)) {
warning(ncomp, " components were requested but only ", ncol(vis),
" are available. Results are for ", ncol(vis), ".")
ncomp <- ncol(vis)
}
tib <- tibble::tibble(
"Variable" = rownames(vis),
"Importance" = vis[,ncomp]
)
# Add variable importance type attribute
attr(tib, which = "type") <- "mixOmics"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
#' @rdname vi_model
#'
#' @export
vi_model.mixo_spls <- vi_model.mixo_pls
# Package: mlr -----------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.WrappedModel <- function(object, ...) { # package: mlr
vi_model(object$learner.model, ...)
}
# Package: mlr3 ----------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.Learner <- function(object, ...) { # package: mlr3
if (is.null(object$model)) {
stop("No fitted model found. Did you forget to call ",
deparse(substitute(object)), "$train()?",
call. = FALSE)
}
vi_model(object$model, ...)
}
# Package: randomForest --------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.randomForest <- function(object, ...) {
# # Check for dependency
# if (!requireNamespace("randomForest", quietly = TRUE)) {
# stop("Package \"randomForest\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Construct model-specific variable importance scores
vis <- randomForest::importance(object, ...)
matched_cols <- intersect(
x = colnames(vis),
y = c("MeanDecreaseAccuracy", "MeanDecreaseGini", "%IncMSE", "IncNodePurity")
)
vis <- vis[, matched_cols, drop = FALSE]
type <- colnames(vis)[1L]
vis <- vis[, 1L, drop = TRUE]
tib <- tibble::tibble(
"Variable" = names(vis),
"Importance" = unname(vis) # per tibble 3.0.0
)
# Add variable importance type attribute
attr(tib, which = "type") <- type
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: ranger --------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.ranger <- function(object, ...) {
# Check for dependency
if (!requireNamespace("ranger", quietly = TRUE)) {
stop("Package \"ranger\" needed for this function to work. Please ",
"install it.", call. = FALSE)
}
# Construct model-specific variable importance scores
vis <- ranger::importance(object)
tib <- tibble::tibble(
"Variable" = names(vis),
"Importance" = unname(vis) # per tibble 3.0.0
)
# Add variable importance type attribute
attr(tib, which = "type") <- object$importance.mode
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: rpart ---------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.rpart <- function(object, ...) {
# Construct model-specific variable importance scores
importance_scores <- object$variable.importance
if (is.null(importance_scores)) {
stop("Cannot extract variable importance scores from a tree with no ",
"splits.", call. = FALSE)
}
# Place variable importance scores in a tibble (the first and second columns
# should always be labelled "Variable" and "Importance", respectively)
tib <- tibble::tibble(
"Variable" = names(importance_scores),
"Importance" = unname(importance_scores)
)
# Add variable importance type attribute
attr(tib, which = "type") <- "GoodnessOfSplit"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: RSNNS ---------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.mlp <- function(object, type = c("olden", "garson"), ...) {
# Check for dependency
if (!requireNamespace("NeuralNetTools", quietly = TRUE)) {
stop("Package \"NeuralNetTools\" needed for this function to work. Please ",
"install it.", call. = FALSE)
}
# Determine which type of variable importance to compute
type <- match.arg(type)
# Construct model-specific variable importance scores
tib <- if (type == "olden") { # Olden's algorithm
vis <- NeuralNetTools::olden(object, bar_plot = FALSE)
tibble::tibble(
"Variable" = rownames(vis),
"Importance" = vis$importance
)
} else { # Garson's algorithm
vis <- NeuralNetTools::garson(object, bar_plot = FALSE)
tibble::tibble(
"Variable" = rownames(vis),
"Importance" = vis$rel_imp
)
}
# Add variable importance type attribute
attr(tib, which = "type") <- type
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: sparklyr ------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.ml_model_decision_tree_regression <- function(object, ...) {
# # Check for dependency
# if (!requireNamespace("sparklyr", quietly = TRUE)) {
# stop("Package \"sparklyr\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Construct model-specific variable importance scores
vis <- sparklyr::ml_feature_importances(object, ...)
names(vis) <- c("Variable", "Importance")
tib <- tibble::as_tibble(vis)
# Add variable importance type attribute
attr(tib, which = "type") <- "impurity"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
#' @rdname vi_model
#'
#' @export
vi_model.ml_model_decision_tree_classification <- function(object, ...) {
# # Check for dependency
# if (!requireNamespace("sparklyr", quietly = TRUE)) {
# stop("Package \"sparklyr\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Construct model-specific variable importance scores
vis <- sparklyr::ml_feature_importances(object, ...)
names(vis) <- c("Variable", "Importance")
tib <- tibble::as_tibble(vis)
# Add variable importance type attribute
attr(tib, which = "type") <- "impurity"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
#' @rdname vi_model
#'
#' @export
vi_model.ml_model_gbt_regression <- function(object, ...) {
# # Check for dependency
# if (!requireNamespace("sparklyr", quietly = TRUE)) {
# stop("Package \"sparklyr\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Construct model-specific variable importance scores
vis <- sparklyr::ml_feature_importances(object, ...)
names(vis) <- c("Variable", "Importance")
tib <- tibble::as_tibble(vis)
# Add variable importance type attribute
attr(tib, which = "type") <- "impurity"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
#' @rdname vi_model
#'
#' @export
vi_model.ml_model_gbt_classification <- function(object, ...) {
# # Check for dependency
# if (!requireNamespace("sparklyr", quietly = TRUE)) {
# stop("Package \"sparklyr\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Construct model-specific variable importance scores
vis <- sparklyr::ml_feature_importances(object, ...)
names(vis) <- c("Variable", "Importance")
tib <- tibble::as_tibble(vis)
# Add variable importance type attribute
attr(tib, which = "type") <- "impurity"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
#' @rdname vi_model
#'
#' @export
vi_model.ml_model_generalized_linear_regression <- function(object, ...) {
# # Check for dependency
# if (!requireNamespace("sparklyr", quietly = TRUE)) {
# stop("Package \"sparklyr\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Construct model-specific variable importance scores
vis <- sparklyr::tidy(object, ...)[, c("term", "statistic")]
if (vis$term[1L] == "(Intercept)") {
vis <- vis[-1L, ]
}
vis$Sign <- ifelse(sign(vis$statistic) == 1, yes = "POS", no = "NEG")
vis$statistic <- abs(vis$statistic)
names(vis) <- c("Variable", "Importance", "Sign")
tib <- tibble::as_tibble(vis)
# Add variable importance type attribute
attr(tib, which = "type") <- "|z-statistic|"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
#' @rdname vi_model
#'
#' @export
vi_model.ml_model_linear_regression <- function(object, ...) {
# # Check for dependency
# if (!requireNamespace("sparklyr", quietly = TRUE)) {
# stop("Package \"sparklyr\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Construct model-specific variable importance scores
vis <- sparklyr::tidy(object, ...)[, c("term", "statistic")]
if (vis$term[1L] == "(Intercept)") {
vis <- vis[-1L, ]
}
vis$Sign <- ifelse(sign(vis$statistic) == 1, yes = "POS", no = "NEG")
vis$statistic <- abs(vis$statistic)
names(vis) <- c("Variable", "Importance", "Sign")
tib <- tibble::as_tibble(vis)
# Add variable importance type attribute
attr(tib, which = "type") <- "|t-statistic|"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
#' @rdname vi_model
#'
#' @export
vi_model.ml_model_random_forest_regression <- function(object, ...) {
# # Check for dependency
# if (!requireNamespace("sparklyr", quietly = TRUE)) {
# stop("Package \"sparklyr\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Construct model-specific variable importance scores
vis <- sparklyr::ml_feature_importances(object, ...)
names(vis) <- c("Variable", "Importance")
tib <- tibble::as_tibble(vis)
# Add variable importance type attribute
attr(tib, which = "type") <- "impurity"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
#' @rdname vi_model
#'
#' @export
vi_model.ml_model_random_forest_classification <- function(object, ...) {
# # Check for dependency
# if (!requireNamespace("sparklyr", quietly = TRUE)) {
# stop("Package \"sparklyr\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Construct model-specific variable importance scores
vis <- sparklyr::ml_feature_importances(object, ...)
names(vis) <- c("Variable", "Importance")
tib <- tibble::as_tibble(vis)
# Add variable importance type attribute
attr(tib, which = "type") <- "impurity"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# Package: stats ---------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.lm <- function(object, type = c("stat", "raw"), ...) {
# Determine which type of variable importance to compute
type <- match.arg(type)
# pattern to match based on type
if (type == "stat") {
type_pattern <- "^(t|z) value"
} else {
type_pattern <- "Estimate"
}
# Construct model-specific variable importance scores
coefs <- summary(object)$coefficients
if (attr(object$terms, "intercept") == 1) {
coefs <- coefs[-1L, , drop = FALSE]
}
pos <- grep(type_pattern, x = colnames(coefs))
tib <- tibble::tibble(
"Variable" = rownames(coefs),
"Importance" = unname(abs(coefs[, pos])),
"Sign" = ifelse(sign(coefs[, "Estimate"]) == 1, yes = "POS", no = "NEG")
)
# Add variable importance type attribute
if (type == "stat") {
label <- colnames(coefs)[pos]
label <- substr(label, start = 1, stop = 1) # strip off t or z
attr(tib, which = "type") <- paste0("|", label, "-statistic|")
} else {
attr(tib, which = "type") <- "|raw coefficients|"
}
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
# tidymodels ===================================================================
# Package: parsnip -------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.model_fit <- function(object, ...) {
vi_model(parsnip::extract_fit_engine(object), ...)
}
# Package: workflows -----------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.workflow <- function(object, ...) {
vi_model(workflows::extract_fit_engine(object), ...)
}
#===============================================================================
# Package: xgboost -------------------------------------------------------------
#' @rdname vi_model
#'
#' @export
vi_model.xgb.Booster <- function(object, type = c("gain", "cover", "frequency"),
...) {
# # Check for dependency
# if (!requireNamespace("xgboost", quietly = TRUE)) {
# stop("Package \"xgboost\" needed for this function to work. Please ",
# "install it.", call. = FALSE)
# }
# Determine which type of variable importance to compute
type <- match.arg(type)
# Construct model-specific variable importance scores
imp <- xgboost::xgb.importance(model = object, ...)
names(imp) <- tolower(names(imp))
if ("weight" %in% names(imp)) {
type <- "weight" # gblinear
}
vis <- tibble::as_tibble(imp)[, c("feature", type)]
tib <- tibble::tibble(
"Variable" = vis$feature,
"Importance" = vis[[2L]]
)
# Add variable importance type attribute
attr(tib, which = "type") <- type
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
|
/scratch/gouwar.j/cran-all/cranData/vip/R/vi_model.R
|
#' Permutation-based variable importance
#'
#' Compute permutation-based variable importance scores for the predictors in a
#' model; for details on the algorithm, see Greenwell and Boehmke (2020).
#'
#' @param object A fitted model object (e.g., a
#' [randomForest][randomForest::randomForest()] object).
#'
#' @param feature_names Character string giving the names of the predictor
#' variables (i.e., features) of interest. If `NULL` (the default) then they
#' will be inferred from the `train` and `target` arguments (see below). It is
#' good practice to always specify this argument.
#'
#' @param train A matrix-like R object (e.g., a data frame or matrix)
#' containing the training data. If `NULL` (the default) then the
#' internal `get_training_data()` function will be called to try and extract it
#' automatically. It is good practice to always specify this argument.
#'
#' @param target Either a character string giving the name (or position) of the
#' target column in `train` or, if `train` only contains feature
#' columns, a vector containing the target values used to train `object`.
#'
#' @param metric Either a function or character string specifying the
#' performance metric to use in computing model performance (e.g., RMSE for
#' regression or accuracy for binary classification). If `metric` is a
#' function, then it requires two arguments, `actual` and `predicted`,
#' and should return a single, numeric value. Ideally, this should be the same
#' metric that was used to train `object`. See [list_metrics()] for a list of
#' built-in metrics.
#'
#' @param smaller_is_better Logical indicating whether or not a smaller value
#' of `metric` is better. Default is `NULL`. Must be supplied if
#' `metric` is a user-supplied function.
#'
#' @param type Character string specifying how to compare the baseline and
#' permuted performance metrics. Current options are `"difference"` (the
#' default) and `"ratio"`.
#'
#' @param nsim Integer specifying the number of Monte Carlo replications to
#' perform. Default is 1. If `nsim > 1`, the results from each replication
#' are simply averaged together (the standard deviation will also be returned).
#'
#' @param keep Logical indicating whether or not to keep the individual
#' permutation scores for all `nsim` repetitions. If `TRUE` (the
#' default) then the individual variable importance scores will be stored in an
#' attribute called `"raw_scores"`. (Only used when `nsim > 1`.)
#'
#' @param sample_size Integer specifying the size of the random sample to use
#' for each Monte Carlo repetition. Default is `NULL` (i.e., use all of the
#' available training data). Cannot be specified with `sample_frac`. Can be
#' used to reduce computation time with large data sets.
#'
#' @param sample_frac Proportion specifying the size of the random sample to use
#' for each Monte Carlo repetition. Default is `NULL` (i.e., use all of the
#' available training data). Cannot be specified with `sample_size`. Can be
#' used to reduce computation time with large data sets.
#'
#' @param reference_class Deprecated, use `event_level` instead.
#'
#' @param event_level String specifying which factor level of `truth` to
#' consider as the "event". Options are `"first"` (the default) or `"second"`.
#' This argument is only applicable for binary classification when `metric` is
#' one of `"roc_auc"`, `"pr_auc"`, or `"youden"`. This argument is passed on to
#' the corresponding [yardstick][yardstick::yardstick] metric.
#'
#' @param pred_wrapper Prediction function that requires two arguments,
#' `object` and `newdata`. The output of this function should be
#' determined by the `metric` being used:
#'
#' * Regression - A numeric vector of predicted outcomes.
#' * Binary classification - A vector of predicted class labels (e.g., if using
#' misclassification error) or a vector of predicted class probabilities for the
#' reference class (e.g., if using log loss or AUC).
#' * Multiclass classification - A vector of predicted class labels (e.g., if
#' using misclassification error) or a A matrix/data frame of predicted class
#' probabilities for each class (e.g., if using log loss or AUC).
#'
#' @param verbose Logical indicating whether or not to print information during
#' the construction of variable importance scores. Default is `FALSE`.
#'
#' @param parallel Logical indicating whether or not to run `vi_permute()`
#' in parallel (using a backend provided by the [foreach][foreach::foreach]
#' package). Default is `FALSE`. If `TRUE`, a
#' [foreach][foreach::foreach]-compatible backend must be provided by must be
#' provided. Note that `set.seed()` will not not work with
#' [foreach][foreach::foreach]'s parellelized for loops; for a workaround, see
#' [this solution](https://github.com/koalaverse/vip/issues/145).
#'
#' @param parallelize_by Character string specifying whether to parallelize
#' across features (`parallelize_by = "features"`) or repetitions
#' (`parallelize_by = "reps"`); the latter is only useful whenever
#' `nsim > 1`. Default is `"features"`.
#'
#' @param ... Additional optional arguments to be passed on to
#' [foreach][foreach::foreach] (e.g., `.packages` or `.export`).
#'
#' @return A tidy data frame (i.e., a [tibble][tibble::tibble] object) with two
#' columns:
#'
#' * `Variable` - the corresponding feature name;
#' * `Importance` - the associated importance, computed as the average change in
#' performance after a random permutation (or permutations, if `nsim > 1`) of
#' the feature in question.
#'
#' If `nsim > 1`, then an additional column (`StDev`) containing the standard
#' deviation of the individual permutation scores for each feature is also
#' returned; this helps assess the stability/variation of the individual
#' permutation importance for each feature.
#'
#' @importFrom foreach foreach %do% %dopar%
#'
#' @references
#' Brandon M. Greenwell and Bradley C. Boehmke, The R Journal (2020) 12:1,
#' pages 343-366.
#'
#' @rdname vi_permute
#'
#' @export
#'
#' @examples
#' \dontrun{
#' #
#' # Regression example
#' #
#'
#' library(ranger) # for fitting random forests
#'
#' # Simulate data from Friedman 1 benchmark; only x1-x5 are important!
#' trn <- gen_friedman(500, seed = 101) # ?vip::gen_friedman
#'
#' # Prediction wrapper
#' pfun <- function(object, newdata) {
#' # Needs to return vector of predictions from a ranger object; see
#' # `ranger::predcit.ranger` for details on making predictions
#' predict(object, data = newdata)$predictions
#' }
#'
#' # Fit a (default) random forest
#' set.seed(0803) # for reproducibility
#' rfo <- ranger(y ~ ., data = trn)
#'
#' # Compute permutation-based VI scores
#' set.seed(2021) # for reproducibility
#' vis <- vi(rfo, method = "permute", target = "y", metric = "rsq",
#' pred_wrapper = pfun, train = trn)
#' print(vis)
#'
#' # Same as above, but using `vi_permute()` directly
#' set.seed(2021) # for reproducibility
#' vi_permute(rfo, target = "y", metric = "rsq", pred_wrapper = pfun
#' train = trn)
#'
#' # Plot VI scores (could also replace `vi()` with `vip()` in above example)
#' vip(vis, include_type = TRUE)
#'
#' # Mean absolute error
#' mae <- function(truth, estimate) {
#' mean(abs(truth - estimate))
#' }
#'
#' # Permutation-based VIP with user-defined MAE metric
#' set.seed(1101) # for reproducibility
#' vi_permute(rfo, target = "y", metric = mae, smaller_is_better = TRUE,
#' pred_wrapper = pfun, train = trn)
#'
#' # Same as above, but using `yardstick` package instead of user-defined metric
#' set.seed(1101) # for reproducibility
#' vi_permute(rfo, target = "y", metric = yardstick::mae_vec,
#' smaller_is_better = TRUE, pred_wrapper = pfun, train = trn)
#'
#' #
#' # Classification (binary) example
#' #
#'
#' library(randomForest) # another package for fitting random forests
#'
#' # Complete (i.e., imputed version of titanic data); see `?vip::titanic_mice`
#' head(t1 <- titanic_mice[[1L]])
#' t1$pclass <- as.ordered(t1$pclass) # makes more sense as an ordered factor
#'
#' # Fit another (default) random forest
#' set.seed(2053) # for reproducibility
#' (rfo2 <- randomForest(survived ~ ., data = t1))
#'
#' # Define prediction wrapper for predicting class labels from a
#' # "randomForest" object
#' pfun_class <- function(object, newdata) {
#' # Needs to return factor of classifications
#' predict(object, newdata = newdata, type = "response")
#' }
#'
#' # Sanity check
#' pfun_class(rfo2, newdata = head(t1))
#' ## 1 2 3 4 5 6
#' ## yes yes yes no yes no
#' ## Levels: no yes
#'
#' # Compute mean decrease in accuracy
#' set.seed(1359) # for reproducibility
#' vi(rfo2,
#' method = "permute",
#' train = t1,
#' target = "survived",
#' metric = "accuracy", # or pass in `yardstick::accuracy_vec` directly
#' # smaller_is_better = FALSE, # no need to set for built-in metrics
#' pred_wrapper = pfun_class,
#' nsim = 30 # use 30 repetitions
#' )
#' ## # A tibble: 5 × 3
#' ## Variable Importance StDev
#' ## <chr> <dbl> <dbl>
#' ## 1 sex 0.228 0.0110
#' ## 2 pclass 0.0825 0.00505
#' ## 3 age 0.0721 0.00557
#' ## 4 sibsp 0.0346 0.00430
#' ## 5 parch 0.0183 0.00236
#'
#' # Define prediction wrapper for predicting class probabilities from a
#' # "randomForest" object
#' pfun_prob <- function(object, newdata) {
#' # Needs to return vector of class probabilities for event level of interest
#' predict(object, newdata = newdata, type = "prob")[, "yes"]
#' }
#'
#' # Sanity check
#' pfun_prob(rfo2, newdata = head(t1)) # estiated P(survived=yes | x)
#' ## 1 2 3 4 5 6
#' ## 0.990 0.864 0.486 0.282 0.630 0.078
#'
#' # Compute mean increase in Brier score
#' set.seed(1411) # for reproducibility
#' vi(rfo2,
#' method = "permute",
#' train = t1,
#' target = "survived",
#' metric = yardstick::brier_class_vec, # or pass in `"brier"` directly
#' smaller_is_better = FALSE, # need to set when supplying a function
#' pred_wrapper = pfun_prob,
#' nsim = 30 # use 30 repetitions
#' )
#'
#' ## # A tibble: 5 × 3
#' ## Variable Importance StDev
#' ## <chr> <dbl> <dbl>
#' ## 1 sex 0.210 0.00869
#' ## 2 pclass 0.0992 0.00462
#' ## 3 age 0.0970 0.00469
#' ## 4 parch 0.0547 0.00273
#' ## 5 sibsp 0.0422 0.00200
#'
#' # Some metrics, like AUROC, treat one class as the "event" of interest. In
#' # such cases, it's important to make sure the event level (which typically
#' # defaults to which ever event class comes first in alphabetical order)
#' # matches the event class that corresponds to the prediction wrappers
#' # returned probabilities. To do this, you can (and should) set the
#' # `event_class` argument. For instance, our prediction wrapper specified
#' # `survived = "yes"` as the event of interest, but this is considered the
#' # second event:
#' levels(t1$survived)
#' ## [1] "no" "yes"
#'
#' # So, we need to specify the second class as the event of interest via the
#' # `event_level` argument (otherwise, we would get the negative of the results
#' # we were hoping for; a telltale sign the event level and prediction wrapper
#' do not match)
#' set.seed(1413) # for reproducibility
#' vi(rfo,
#' method = "permute",
#' train = t1,
#' target = "survived",
#' metric = "roc_auc",
#' event_level = "second", # use "yes" as class label/"event" of interest
#' pred_wrapper = pfun_prob,
#' nsim = 30 # use 30 repetitions
#' )
#'
#' ## # A tibble: 5 × 3
#' ## Variable Importance StDev
#' ## <chr> <dbl> <dbl>
#' ## 1 sex 0.229 0.0137
#' ## 2 pclass 0.0920 0.00533
#' ## 3 age 0.0850 0.00477
#' ## 4 sibsp 0.0283 0.00211
#' ## 5 parch 0.0251 0.00351
#' }
vi_permute <- function(object, ...) {
UseMethod("vi_permute")
}
#' @rdname vi_permute
#'
#' @export
vi_permute.default <- function(
object,
feature_names = NULL,
train = NULL,
target = NULL,
metric = NULL,
smaller_is_better = NULL,
type = c("difference", "ratio"),
nsim = 1,
keep = TRUE,
sample_size = NULL,
sample_frac = NULL,
reference_class = NULL, # deprecated
event_level = NULL,
pred_wrapper = NULL, # FIXME: Why give this a default?
verbose = FALSE,
parallel = FALSE,
parallelize_by = c("features", "repetitions"),
...
) {
# # Check for yardstick package
# if (!requireNamespace("yardstick", quietly = TRUE)) {
# stop("Package \"yardstick\" needed for this function to work. ",
# "Please install it.", call. = FALSE)
# }
# FIXEME: Is there a better way to fix this?
#
# ❯ checking R code for possible problems ... NOTE
# vi_permute.default: no visible binding for global variable ‘j’
# Undefined global functions or variables:
# j
i <- j <- NULL
# # Try to extract feature names if not supplied
# if (is.null(feature_names)) {
# feature_names <- get_feature_names(object)
# }
# Try to extract training data if not supplied
if (is.null(train)) {
train <- get_training_data(object)
}
# Throw informative error messages if required arguments are missing
if (is.null(target)) {
stop("Could not find target. Please specify a target variable via the ",
"`target` argument; see `?vip::vi_permute` for details.",
call. = FALSE)
}
if (is.null(metric)) {
stop("Could not find metric. Please specify a valid metric via the ",
"`metric` argument; see `?vip::vi_permute` for details.",
call. = FALSE)
}
if (is.null(pred_wrapper)) {
stop("Could not find prediction wrapper. Please specify a valid prediction ",
"function via the `pred_wrapper` argument; see `?vip::vi_permute` ",
"for details.", call. = FALSE)
}
# Extract feature names and separate features from target (if necessary)
if (is.character(target)) {
if (is.null(feature_names)) {
feature_names <- setdiff(colnames(train), target)
}
train_x <- train[, feature_names]
train_y <- train[, target, drop = TRUE]
} else {
if (is.null(feature_names)) {
feature_names <- colnames(train)
}
train_x <- train
train_y <- target
}
# Sample the data?
if (!is.null(sample_size) && !is.null(sample_frac)) {
stop("Arguments `sample_size` and `sample_frac` cannot both be specified.")
}
if (!is.null(sample_size)) {
if (sample_size <= 0 || sample_size > nrow(train)) {
stop("Argument `sample_size` must be in (0, ", nrow(train), "].")
}
}
if (!is.null(sample_frac)) {
if (sample_frac <= 0 || sample_frac > 1) {
stop("Argument `sample_frac` must be in (0, 1].")
}
sample_size <- round(nrow(train) * sample_frac, digits = 0)
}
# Metric
if (is.function(metric)) { # user-supplied function
# If `metric` is a user-supplied function, then `smaller_is_better` cannot
# be `NULL`.
if (is.null(smaller_is_better) || !is.logical(smaller_is_better)) {
stop("Please specify a logical value for `smaller_is_better`.",
call. = FALSE)
}
# Check prediction function arguments
if (!all(c("object", "newdata") %in% names(formals(pred_wrapper)))) {
stop("`pred_wrapper()` must be a function with arguments `object` and ",
"`newdata`.", call. = FALSE)
}
# Check metric function arguments
if (!all(c("truth", "estimate") %in% names(formals(metric)))) {
stop("`metric()` must be a function with arguments `truth` and ",
"`estimate`; consider using one of the vector metric functions ",
"from the `yardstick` package (e.g., ",
"`metric = yardstick::huber_loss_vec`).", call. = FALSE)
}
# # Check if reference class is provided
# if (!is.null(reference_class)) {
# reference_class <- train_y[1L]
# }
# train_y <- ifelse(train_y == reference_class, yes = 1, no = 0)
# Performance function
metric_fun <- metric
} else {
# Get corresponding metric/performance function
ys_metric <- get_metric(metric)
# metric_fun <- ys_metric[["metric_fun"]]
smaller_is_better <- ys_metric[["smaller_is_better"]]
# Get metric function and update `event_level` arg if needed
metric_fun <- if (!is.null(event_level)) {
metric_fun <- function(truth, estimate) {
fun <- ys_metric[["metric_fun"]]
fun(truth, estimate = estimate, event_level = event_level)
}
} else {
if (metric %in% c("roc_auc", "pr_auc", "youden")) {
warning("Consider setting the `event_level` argument when using ",
deparse(substitute(metric)), " as the metric; see ",
"`?vip::vi_permute` for details. Defaulting to ",
"`event_level = \"first\"`.", call. = FALSE)
}
ys_metric[["metric_fun"]]
}
# FIXME: How to handle this with new `yardstick` integration?
# # Determine reference class (binary classification only)
# if (is.null(reference_class) && metric %in% c("auc", "logloss")) {
# stop("Please specify the reference class via the `reference_class` ",
# "argument when using \"auc\" or \"logloss\".")
# }
# if (!is.null(reference_class) && metric %in% c("auc", "logloss")) {
# train_y <- ifelse(train_y == reference_class, yes = 1, no = 0)
# }
}
# Compute baseline metric for comparison
baseline <- metric_fun(
truth = train_y,
estimate = pred_wrapper(object, newdata = train_x)
)
# Type of comparison
type <- match.arg(type)
`%compare%` <- if (type == "difference") {
`-`
} else {
`/`
}
# Define ".do" operator
`%do.reps%` <- `%do.features%` <- `%do%`
if (isTRUE(parallel)) {
parallelize_by <- match.arg(parallelize_by)
if (parallelize_by == "reps") {
if (nsim == 1) {
warning("Parallelizing across repititions only works when `nsim > 1`.",
call. = FALSE)
} else{
`%do.reps%` <- `%dopar%`
}
} else {
`%do.features%` <- `%dopar%`
}
}
# Construct VI scores
#
# Loop through each feature and do the following:
#
# 1. make a copy of the training data;
# 2. permute the values of the original feature;
# 3. get new predictions based on permuted data set;
# 4. record difference in accuracy.
vis <- foreach(i = seq_len(nsim), .combine = "cbind") %do.reps% {
res <- foreach(j = seq_along(feature_names),
.combine = "rbind", ...) %do.features% {
# if (verbose && !parallel) {
# message("Computing variable importance for ", x, "...")
# }
if (!is.null(sample_size)) {
ids <- sample(length(train_y), size = sample_size, replace = FALSE)
train_x <- train_x[ids, ]
train_y <- train_y[ids]
}
permx <- train_x
permx[, feature_names[j]] <- permx[sample(nrow(permx)), feature_names[j]]
# train_x_permuted <- permute_columns(train_x, columns = feature_names[j])
permuted <- metric_fun(
truth = train_y,
estimate = pred_wrapper(object, newdata = permx)
)
if (smaller_is_better) {
permuted %compare% baseline # e.g., RMSE
} else {
baseline %compare% permuted # e.g., R-squared
}
}
}
# Construct tibble of variable importance scores
tib <- tibble::tibble(
"Variable" = feature_names,
"Importance" = apply(vis, MARGIN = 1, FUN = mean)
)
if (nsim > 1) {
tib$StDev <- apply(vis, MARGIN = 1, FUN = stats::sd)
}
# Add all nsim scores as an attribute
if (nsim > 1 && keep) {
rownames(vis) <- feature_names
colnames(vis) <- paste0("permutation_", seq_len(ncol(vis)))
attr(tib, which = "raw_scores") <- vis
}
# Add variable importance type attribute
attr(tib, which = "type") <- "permutation"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
|
/scratch/gouwar.j/cran-all/cranData/vip/R/vi_permute.R
|
#' SHAP-based variable importance
#'
#' Compute SHAP-based VI scores for the predictors in a model. See details
#' below.
#'
#' @param object A fitted model object (e.g., a
#' [randomForest][randomForest::randomForest] object).
#'
#' @param feature_names Character string giving the names of the predictor
#' variables (i.e., features) of interest. If `NULL` (the default) then they
#' will be inferred from the `train` and `target` arguments (see below). It is
#' good practice to always specify this argument.
#'
#' @param train A matrix-like R object (e.g., a data frame or matrix)
#' containing the training data. If `NULL` (the default) then the
#' internal `get_training_data()` function will be called to try and extract it
#' automatically. It is good practice to always specify this argument.
#'
#' @param ... Additional arguments to be passed on to [fastshap::explain()]
#' (e.g., `nsim = 30`, `adjust = TRUE`, or avprediction wrapper via the
#' `pred_wrapper` argument); see `?fastshap::explain` for details on these and
#' other useful arguments.
#'
#' @return A tidy data frame (i.e., a [tibble][tibble::tibble] object) with two
#' columns:
#'
#' * `Variable` - the corresponding feature name;
#' * `Importance` - the associated importance, computed as the mean absolute
#' Shapley value.
#'
#' @details This approach to computing VI scores is based on the mean absolute
#' value of the SHAP values for each feature; see, for example,
#' <https://github.com/shap/shap> and the references therein.
#'
#' Strumbelj, E., and Kononenko, I. Explaining prediction models and individual
#' predictions with feature contributions. Knowledge and information systems
#' 41.3 (2014): 647-665.
#'
#' @rdname vi_shap
#'
#' @export
#'
#' @examples
#' \dontrun{
#' library(ggplot2) # for theme_light() function
#' library(xgboost)
#'
#' # Simulate training data
#' trn <- gen_friedman(500, sigma = 1, seed = 101) # ?vip::gen_friedman
#'
#' # Feature matrix
#' X <- data.matrix(subset(trn, select = -y)) # matrix of feature values
#'
#' # Fit an XGBoost model; hyperparameters were tuned using 5-fold CV
#' set.seed(859) # for reproducibility
#' bst <- xgboost(X, label = trn$y, nrounds = 338, max_depth = 3, eta = 0.1,
#' verbose = 0)
#'
#' # Construct VIP using "exact" SHAP values from XGBoost's internal Tree SHAP
#' # functionality
#' vip(bst, method = "shap", train = X, exact = TRUE, include_type = TRUE,
#' geom = "point", horizontal = FALSE,
#' aesthetics = list(color = "forestgreen", shape = 17, size = 5)) +
#' theme_light()
#'
#' # Use Monte-Carlo approach, which works for any model; requires prediction
#' # wrapper
#' pfun_prob <- function(object, newdata) { # prediction wrapper
#' # For Shapley explanations, this should ALWAYS return a numeric vector
#' predict(object, newdata = newdata, type = "prob")[, "yes"]
#' }
#'
#' # Compute Shapley-based VI scores
#' set.seed(853) # for reproducibility
#' vi_shap(rfo, train = subset(t1, select = -survived), pred_wrapper = pfun_prob,
#' nsim = 30)
#' ## # A tibble: 5 × 2
#' ## Variable Importance
#' ## <chr> <dbl>
#' ## 1 pclass 0.104
#' ## 2 age 0.0649
#' ## 3 sex 0.272
#' ## 4 sibsp 0.0260
#' ## 5 parch 0.0291
#' }
vi_shap <- function(object, ...) {
UseMethod("vi_shap")
}
#' @rdname vi_shap
#'
#' @export
vi_shap.default <- function(object, feature_names = NULL, train = NULL, ...) {
# Check for fastshap package
if (!requireNamespace("fastshap", quietly = TRUE)) {
stop("Package \"fastshap\" needed for this function to work. ",
"Please install it.", call. = FALSE)
}
if (utils::packageVersion("fastshap") < "0.1.0") {
stop("Package \"fastshap (>= 0.1.0)\" needed for this function to work. ",
"Please install it.", call. = FALSE)
}
# Try to extract feature names if not supplied
if (is.null(feature_names)) {
feature_names <- get_feature_names(object)
}
# Try to extract training data if not supplied
if (is.null(train)) {
train <- get_training_data(object)
}
# Make sure only the feature columns are used (e.g., no response)
train <- train[, feature_names, drop = FALSE]
# Compute SHAP values
shap <- fastshap::explain(
object = object,
feature_names = feature_names,
X = train,
shap_only = TRUE,
...
)
# Construct SHAP-based variable importance scores
tib <- tibble::tibble(
"Variable" = colnames(shap),
"Importance" = apply(shap, MARGIN = 2, FUN = function(x) mean(abs(x)))
)
attr(tib, which = "shap") <- shap
attr(tib, which = "type") <- "mean(|Shapley value|)"
# Add "vi" class
class(tib) <- c("vi", class(tib))
# Return results
tib
}
|
/scratch/gouwar.j/cran-all/cranData/vip/R/vi_shap.R
|
Variable <- NULL
Importance <- NULL
#' Variable importance plots
#'
#' Plot variable importance scores for the predictors in a model.
#'
#' @param object A fitted model (e.g., of class
#' [randomForest][randomForest::randomForest] object) or a [vi][vip::vi] object.
#'
#' @param num_features Integer specifying the number of variable importance
#' scores to plot. Default is `10`.
#'
#' @param geom Character string specifying which type of plot to construct.
#' The currently available options are described below.
#'
#' * `geom = "col"` uses [geom_col][ggplot2::geom_col] to construct a bar chart
#' of the variable importance scores.
#'
#' * `geom = "point"` uses [geom_point][ggplot2::geom_point] to construct a
#' Cleveland dot plot of the variable importance scores.
#'
#' * `geom = "boxplot"` uses [geom_boxplot][ggplot2::geom_boxplot] to
#' construct a boxplot plot of the variable importance scores. This option can
#' only for the permutation-based importance method with \code{nsim > 1} and
#' `keep = TRUE`; see [vi_permute][vip::vi_permute] for details.
#'
#' * `geom = "violin"` uses [geom_violin][ggplot2::geom_violin] to
#' construct a violin plot of the variable importance scores. This option can
#' only for the permutation-based importance method with \code{nsim > 1} and
#' `keep = TRUE`; see [vi_permute][vip::vi_permute] for details.
#'
#' @param mapping Set of aesthetic mappings created by
#' [aes][ggplot2::aes]-related functions and/or tidy eval helpers. See example
#' usage below.
#'
#' @param aesthetics List specifying additional arguments passed on to
#' [layer][ggplot2::layer]. These are often aesthetics, used to set an aesthetic
#' to a fixed value, like`colour = "red"` or `size = 3`. See example usage
#' below.
#'
#' @param horizontal Logical indicating whether or not to plot the importance
#' scores on the x-axis (`TRUE`). Default is `TRUE`.
#'
#' @param all_permutations Logical indicating whether or not to plot all
#' permutation scores along with the average. Default is `FALSE`. (Only used for
#' permutation scores when `nsim > 1`.)
#'
#' @param jitter Logical indicating whether or not to jitter the raw permutation
#' scores. Default is `FALSE`. (Only used when `all_permutations = TRUE`.)
#'
#' @param include_type Logical indicating whether or not to include the type of
#' variable importance computed in the axis label. Default is `FALSE`.
#'
#' @param ... Additional optional arguments to be passed on to [vi][vip::vi].
#'
#' @importFrom stats reorder
#'
#' @rdname vip
#'
#' @export
#'
#' @examples
#' #
#' # A projection pursuit regression example using permutation-based importance
#' #
#'
#' # Load the sample data
#' data(mtcars)
#'
#' # Fit a projection pursuit regression model
#' model <- ppr(mpg ~ ., data = mtcars, nterms = 1)
#'
#' # Construct variable importance plot (permutation importance, in this case)
#' set.seed(825) # for reproducibility
#' pfun <- function(object, newdata) predict(object, newdata = newdata)
#' vip(model, method = "permute", train = mtcars, target = "mpg", nsim = 10,
#' metric = "rmse", pred_wrapper = pfun)
#'
#' # Better yet, store the variable importance scores and then plot
#' set.seed(825) # for reproducibility
#' vis <- vi(model, method = "permute", train = mtcars, target = "mpg",
#' nsim = 10, metric = "rmse", pred_wrapper = pfun)
#' vip(vis, geom = "point", horiz = FALSE)
#' vip(vis, geom = "point", horiz = FALSE, aesthetics = list(size = 3))
#'
#' # Plot unaggregated permutation scores (boxplot colored by feature)
#' library(ggplot2) # for `aes()`-related functions and tidy eval helpers
#' vip(vis, geom = "boxplot", all_permutations = TRUE, jitter = TRUE,
#' #mapping = aes_string(fill = "Variable"), # for ggplot2 (< 3.0.0)
#' mapping = aes(fill = .data[["Variable"]]), # for ggplot2 (>= 3.0.0)
#' aesthetics = list(color = "grey35", size = 0.8))
#'
#' #
#' # A binary classification example
#' #
#' \dontrun{
#' library(rpart) # for classification and regression trees
#'
#' # Load Wisconsin breast cancer data; see ?mlbench::BreastCancer for details
#' data(BreastCancer, package = "mlbench")
#' bc <- subset(BreastCancer, select = -Id) # for brevity
#'
#' # Fit a standard classification tree
#' set.seed(1032) # for reproducibility
#' tree <- rpart(Class ~ ., data = bc, cp = 0)
#'
#' # Prune using 1-SE rule (e.g., use `plotcp(tree)` for guidance)
#' cp <- tree$cptable
#' cp <- cp[cp[, "nsplit"] == 2L, "CP"]
#' tree2 <- prune(tree, cp = cp) # tree with three splits
#'
#' # Default tree-based VIP
#' vip(tree2)
#'
#' # Computing permutation importance requires a prediction wrapper. For
#' # classification, the return value depends on the chosen metric; see
#' # `?vip::vi_permute` for details.
#' pfun <- function(object, newdata) {
#' # Need vector of predicted class probabilities when using log-loss metric
#' predict(object, newdata = newdata, type = "prob")[, "malignant"]
#' }
#'
#' # Permutation-based importance (note that only the predictors that show up
#' # in the final tree have non-zero importance)
#' set.seed(1046) # for reproducibility
#' vip(tree2, method = "permute", nsim = 10, target = "Class",
#' metric = "logloss", pred_wrapper = pfun, reference_class = "malignant")
#' }
vip <- function(object, ...) {
UseMethod("vip")
}
#' @rdname vip
#'
#' @export
vip.default <- function(
object,
num_features = 10L,
geom = c("col", "point", "boxplot", "violin"),
mapping = NULL,
aesthetics = list(),
horizontal = TRUE,
all_permutations = FALSE,
jitter = FALSE,
include_type = FALSE,
...
) {
# Character string specifying which type of plot to construct
geom <- match.arg(geom, several.ok = FALSE)
# Extract or compute importance scores
imp <- if (inherits(object, what = "vi")) {
object
} else {
vi(object = object, ...) # compute variable importance scores
}
# Character string specifying the type of VI that was computed
vi_type <- attr(imp, which = "type") # subsetting removes this attribute!
# Integer specifying the number of features to include in the plot
num_features <- as.integer(num_features)[1L] # make sure num_features is a single integer
if (num_features > nrow(imp) || num_features < 1L) {
num_features <- nrow(imp)
}
imp <- sort_importance_scores(imp, decreasing = TRUE) # make sure these are sorted first!
imp <- imp[seq_len(num_features), ] # only retain num_features variable importance scores
# x.string <- "reorder(Variable, Importance)"
# Clean up raw scores for permutation-based VI scores
if (!is.null(attr(imp, which = "raw_scores"))) {
raw_scores <- as.data.frame(attr(imp, which = "raw_scores"))
raw_scores$Variable <- rownames(raw_scores)
raw_scores <- stats::reshape(
data = raw_scores,
varying = (1L:(ncol(raw_scores) - 1)),
v.names = "Importance",
direction = "long",
sep = "_"
)
raw_scores <- raw_scores[raw_scores$Variable %in% imp$Variable, ]
}
# Initialize plot
# p <- ggplot2::ggplot(imp, ggplot2::aes_string(x = x.string, y = "Importance"))
p <- ggplot2::ggplot(imp, ggplot2::aes(
x = reorder(Variable, Importance),
y = Importance
))
# Construct a barplot
if (geom == "col") {
p <- p + do.call(
what = ggplot2::geom_col,
args = c(list(mapping = mapping), aesthetics)
)
}
# Construct a (Cleveland) dotplot
if (geom == "point") {
p <- p + do.call(
what = ggplot2::geom_point,
args = c(list(mapping = mapping), aesthetics)
)
}
# Construct a boxplot
if (geom == "boxplot") {
if (!is.null(attr(imp, which = "raw_scores"))) {
p <- p + do.call(
what = ggplot2::geom_boxplot,
args = c(list(data = raw_scores, mapping = mapping), aesthetics)
)
} else {
stop("To construct boxplots for permutation-based importance scores you ",
"must specify `keep = TRUE` in the call `vi()` or `vi_permute()`. ",
"Additionally, you also need to set `nsim >= 2`.",
call. = FALSE)
}
}
# Construct a violin plot
if (geom == "violin") {
if (!is.null(attr(imp, which = "raw_scores"))) {
p <- p + do.call(
what = ggplot2::geom_violin,
args = c(list(data = raw_scores, mapping = mapping), aesthetics)
)
} else {
stop("To construct violin plots for permutation-based importance scores ",
"you must specify `keep = TRUE` in the call `vi()` or ",
"`vi_permute()`. Additionally, you also need to set `nsim >= 2`.",
call. = FALSE)
}
}
# Plot raw permutation scores (if available and requested)
if (!is.null(attr(imp, which = "raw_scores")) && all_permutations) {
p <- if (jitter) {
p + ggplot2::geom_jitter(data = raw_scores)
} else {
p + ggplot2::geom_point(data = raw_scores)
}
}
# Add labels, titles, etc.
p <- p + ggplot2::theme(legend.position = "none")
p <- p + ggplot2::xlab("") # no need for x-axis label
if (horizontal) {
p <- p + ggplot2::coord_flip()
}
if (isTRUE(include_type)) {
p + ggplot2::ylab(paste0("Importance (", vi_type, ")"))
} else {
p + ggplot2::ylab("Importance")
}
}
#' @rdname vip
#'
#' @export
vip.model_fit <- function(object, ...) {
vip(parsnip::extract_fit_engine(object), ...)
}
#' @rdname vip
#'
#' @export
vip.workflow <- function(object, ...) {
vip(workflows::extract_fit_engine(object), ...)
}
#' @rdname vip
#'
#' @export
vip.WrappedModel <- function(object, ...) { # package: mlr
vip(object$learner.model, ...)
}
#' @rdname vip
#'
#' @export
vip.Learner <- function(object, ...) { # package: mlr3
if (is.null(object$model)) {
stop("No fitted model found. Did you forget to call ",
deparse(substitute(object)), "$train()?",
call. = FALSE)
}
vip(object$model, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/vip/R/vip.R
|
---
title: "Variable Importance Plots—An Introduction to the vip Package"
output:
bookdown::html_document2:
base_format: rmarkdown::html_vignette
fig_caption: yes
toc: true
toc_depth: 2
number_sections: false
link-citations: yes
pkgdown:
as_is: true
vignette: >
%\VignetteIndexEntry{vip}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
bibliography: '`r system.file("references.bib", package = "vip")`'
---
This vignette is essentially an up-to-date version of @RJ-2020-013. Please use that if you'd like to cite our work.
# Introduction
Too often machine learning (ML) models are summarized using a single metric (e.g., cross-validated accuracy) and then put into production. Although we often care about the predictions from these models, it is becoming routine (and good practice) to also better understand the predictions! Understanding how an ML model makes its predictions helps build trust in the model and is the fundamental idea of the emerging field of *interpretable machine learning* (IML).^[Although "interpretability" is difficult to formally define in the context of ML, we follow @doshivelez-2017-rigorous and describe "interpretable" as the "...ability to explain or to present in understandable terms to a human."] For an in-depth discussion on IML, see @molnar-2019-iml. In this paper, we focus on *global methods* for quantifying the importance^[In this context "importance" can be defined in a number of different ways. In general, we can describe it as *the extent to which a feature has a "meaningful" impact on the predicted outcome*. A more formal definition and treatment can be found in @laan-2006-statistical.] of features in an ML model; that is, methods that help us understand the global contribution each feature has to a model's predictions. Computing variable importance (VI) and communicating them through variable importance plots (VIPs) is a fundamental component of IML and is the main topic of this paper.
While many of the procedures discussed in this paper apply to any model that makes predictions, it should be noted that these methods heavily depend on the accuracy and importance of the fitted model; hence, unimportant features may appear relatively important (albeit not predictive) in comparison to the other included features. For this reason, we stress the usefulness of understanding the scale on which VI scores are calculated and take that into account when assessing the importance of each feature and communicating the results to others. Also, we should point out that this work focuses mostly on *post-hoc interpretability* where a trained model is given and the goal is to understand what features are driving the model's predictions. Consequently, our work focuses on functional understanding of the model in contrast to the lower-level mechanistic understanding [@montavon-2018-methods]. That is, we seek to explain the relationship between the model's prediction behavior and features without explaining the full internal representation of the model.^[We refer the reader to @poulin-2006-visual, @caruana-2015-intelligible, @bibal-2016-intterpretability, and @bau-2017-network, for discussions around model structure interpretation.]
<!-- For this reason, we stress the usefulness of understanding the scale on which VI scores are calculated and take that into account when assessing the importance of each feature and communicating the results to others. -->
VI scores and VIPs can be constructed for general ML models using a number of available packages. The [iml](https://cran.r-project.org/package=iml) package [@R-iml] provides the `FeatureImp()` function which computes feature importance for general prediction models using the permutation approach (discussed later). It is written in [R6](https://cran.r-project.org/package=R6) [@R-R6] and allows the user to specify a generic loss function or select one from a pre-defined list (e.g., \code{loss = "mse"} for mean squared error). It also allows the user to specify whether importance is measured as the difference or as the ratio of the original model error and the model error after permutation. The user can also specify the number of repetitions used when permuting each feature to help stabilize the variability in the procedure. The \code{iml::FeatureImp()} function can also be run in parallel using any parallel backend supported by the [foreach](https://cran.r-project.org/package=foreach) package [@R-foreach].
The [ingredients](https://cran.r-project.org/package=ingredients) package [@R-ingredients] also provides permutation-based VI scores through the `feature_importance()` function. (Note that this function recently replaced the now deprecated [DALEX](https://cran.r-project.org/package=DALEX) function `variable_importance()` [@R-DALEX].) Similar to `iml::FeatureImp()`, this function allows the user to specify a loss function and how the importance scores are computed (e.g., using the difference or ratio). It also provides an option to sample the training data before shuffling the data to compute importance (the default is to use `n_sample = 1000`), which can help speed up computation.
The [mmpf](https://cran.r-project.org/package=mmpf) package [@R-mmpf] also provides permutation-based VI scores via the `mmpf::permutationImportance()` function. Similar to the [iml](https://cran.r-project.org/package=iml) and [ingredients](https://cran.r-project.org/package=ingredients) implementation, this function is flexible enough to be applied to any class of ML models in R.
The [varImp](https://cran.r-project.org/package=varImp) package [@R-varImp] extends the permutation-based method for RFs in package [party](https://cran.r-project.org/package=party) [@R-party] to arbitrary measures from the [measures](https://cran.r-project.org/package=measures) package [@R-measures]. Additionally, the functions in [varImp](https://cran.r-project.org/package=varImp) include the option of using the conditional approach described in @strobl-2019-conditional which is more reliable in the presence of correlated features. A number of other RF-specific VI packages exist on CRAN, including, but not limited to, [vita](https://cran.r-project.org/package=vita) [@R-vita], [rfVarImpOOB](https://cran.r-project.org/package=rfVarImpOOB) [@R-rfVarImpOOB], [randomForestExplainer](https://cran.r-project.org/package=randomForestExplainer) [@R-randomForestExplainer], and [tree.interpreter](https://cran.r-project.org/package=tree.interpreter) [@R-tree.interpreter].^[These packages were discovered using [pkgsearch](https://cran.r-project.org/package=pkgsearch)'s \code{ps()} function [@R-pkgsearch] with the key phrases "variable importance" and "feature importance".].
The [caret](https://cran.r-project.org/package=caret) package [@R-caret] includes a general `varImp()` function for computing model-specific and *filter-based* VI scores. Filter-based approaches, which are described in @applied-kuhn-2013, do not make use of the fitted model to measure VI. They also do not take into account the other predictors in the model. For regression problems, a popular filter-based approach to measuring the VI of a numeric predictor $x$ is to first fit a flexible nonparametric model between $x$ and the target $Y$; for example, the locally-weighted polynomial regression (LOWESS) method developed by @robust-cleveland-1979. From this fit, a pseudo-$R^2$ measure can be obtained from the resulting residuals and used as a measure of VI. For categorical predictors, a different method based on standard statistical tests (e.g., $t$-tests and ANOVAs) can be employed; see @applied-kuhn-2013 for details. For classification problems, an area under the ROC curve (AUC) statistic can be used to quantify predictor importance. The AUC statistic is computed by using the predictor $x$ as input to the ROC curve. If $x$ can reasonably separate the classes of $Y$, that is a clear indicator that $x$ is an important predictor (in terms of class separation) and this is captured in the corresponding AUC statistic. For problems with more than two classes, extensions of the ROC curve or a one-vs-all approach can be used.
If you use the [mlr](https://cran.r-project.org/package=mlr) interface for fitting ML models [@R-mlr], then you can use the `getFeatureImportance()` function to extract model-specific VI scores from various tree-based models (e.g., RFs and GBMs). Unlike [caret](https://cran.r-project.org/package=caret), the model needs to be fit via the [mlr](https://cran.r-project.org/package=mlr) interface; for instance, you cannot use `getFeatureImportance()` on a [ranger](https://cran.r-project.org/package=ranger) [@R-ranger] model unless it was fit using [mlr](https://cran.r-project.org/package=mlr).
While the [iml](https://cran.r-project.org/package=iml) and [DALEX](https://cran.r-project.org/package=DALEX) packages provide model-agnostic approaches to computing VI, [caret](https://cran.r-project.org/package=caret), and to some extent, [mlr](https://cran.r-project.org/package=mlr), provide model-specific approaches (e.g., using the absolute value of the $t$-statistic for linear models) as well as less accurate filter-based approaches. Furthermore, each package has a completely different interface (e.g., [iml](https://cran.r-project.org/package=iml) is written in R6). The [vip](https://cran.r-project.org/package=vip) package [@R-vip] strives to provide a consistent interface to both model-specific and model-agnostic approaches to feature importance that is simple to use. The three most important functions exported by [vip](https://cran.r-project.org/package=vip) are described below:
* `vi()` computes VI scores using model-specific or model-agnostic approaches (the results are always returned as a tibble [@R-tibble});
* `vip()` constructs VIPs using model-specific or model-agnostic approaches with [ggplot2](https://cran.r-project.org/package=ggplot2)-style graphics [@R-ggplot2];
Note that `vi()` is actually a wrapper around four workhorse functions, `{vi_model()`, `vi_firm()`, `vi_permute()`, and `vi_shap()`, that compute various types of VI scores. The first computes model-specific VI scores, while the latter three produce model-agnostic ones. The workhorse function that actually gets called is controlled by the `method` argument in `vi()`; the default is `method = "model"` which corresponds to model-specific VI (see `?vip::vi` for details and links to further documentation).
## Constructing VIPs in R
We'll illustrate major concepts using the Friedman 1 benchmark problem described in @multivariate-friedman-1991 and @bagging-breiman-1996:
\begin{equation}
Y_i = 10 \sin\left(\pi X_{1i} X_{2i}\right) + 20 \left(X_{3i} - 0.5\right) ^ 2 + 10 X_{4i} + 5 X_{5i} + \epsilon_i, \quad i = 1, 2, \dots, n,
(\#eq:friedman)
\end{equation}
where $\epsilon_i \stackrel{iid}{\sim} N\left(0, \sigma^2\right)$. Data from this model can be generated using the `vip::gen_friedman()`. By default, the features consist of 10 independent variables uniformly distributed on the interval $\left[0,1\right]$; however, only 5 out of these 10 are actually used in the true model. The code chunk below simulates 500 observations from the model in Equation \@ref(eq:friedman) with $\sigma = 1$; see `?vip::gen_friedman` for details.
```r
trn <- vip::gen_friedman(500, sigma = 1, seed = 101) # simulate training data
tibble::as_tibble(trn) # inspect output
```
```
## # A tibble: 500 × 11
## y x1 x2 x3 x4 x5 x6 x7 x8 x9 x10
## <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 14.9 0.372 0.406 0.102 0.322 0.693 0.758 0.518 0.530 0.878 0.763
## 2 15.3 0.0438 0.602 0.602 0.999 0.776 0.533 0.509 0.487 0.118 0.176
## 3 15.1 0.710 0.362 0.254 0.548 0.0180 0.765 0.715 0.844 0.334 0.118
## 4 10.7 0.658 0.291 0.542 0.327 0.230 0.301 0.177 0.346 0.474 0.283
## 5 17.6 0.250 0.794 0.383 0.947 0.462 0.00487 0.270 0.114 0.489 0.311
## 6 18.3 0.300 0.701 0.992 0.386 0.666 0.198 0.924 0.775 0.736 0.974
## 7 14.6 0.585 0.365 0.283 0.488 0.845 0.466 0.715 0.202 0.905 0.640
## 8 17.0 0.333 0.552 0.858 0.509 0.697 0.388 0.260 0.355 0.517 0.165
## 9 8.54 0.622 0.118 0.490 0.390 0.468 0.360 0.572 0.891 0.682 0.717
## 10 15.0 0.546 0.150 0.476 0.706 0.829 0.373 0.192 0.873 0.456 0.694
## # ℹ 490 more rows
```
From Equation \@ref(eq:friedman), it should be clear that features $X_1$--$X_5$ are the most important! (The others don't influence $Y$ at all.) Also, based on the form of the model, we'd expect $X_4$ to be the most important feature, probably followed by $X_1$ and $X_2$ (both comparably important), with $X_5$ probably being less important. The influence of $X_3$ is harder to determine due to its quadratic nature, but it seems likely that this nonlinearity will suppress the variable's influence over its observed range (i.e., 0--1).
# Model-specific VI
Some machine learning algorithms have their own way of quantifying the importance of each feature, which we refer to as *model-specific VI*. We describe some of these in the subsections that follow. One particular issue with model-specific VI scores is that they are not necessarily comparable across different types of models. For example, directly comparing the impurity-based VI scores from tree-based models to the the absolute value of the $t$-statistic in linear models.
## Decision trees and tree ensembles
Decision trees probably offer the most natural model-specific approach to quantifying the importance of each feature. In a binary decision tree, at each node $t$, a single predictor is used to partition the data into two homogeneous groups. The chosen predictor is the one that maximizes some measure of improvement $i^t$. The relative importance of predictor $X$ is the sum of the squared improvements over all internal nodes of the tree for which $X$ was chosen as the partitioning variable; see @classification-breiman-1984 for details. This idea also extends to ensembles of decision trees, such as RFs and GBMs. In ensembles, the improvement score for each predictor is averaged across all the trees in the ensemble. Fortunately, due to the stabilizing effect of averaging, the improvement-based VI metric is often more reliable in large ensembles; see @hastie-elements-2009 [p. 368].
RFs offer an additional method for computing VI scores. The idea is to use the leftover *out-of-bag* (OOB) data to construct validation-set errors for each tree. Then, each predictor is randomly shuffled in the OOB data and the error is computed again. The idea is that if variable $X$ is important, then the validation error will go up when $X$ is perturbed in the OOB data. The difference in the two errors is recorded for the OOB data then averaged across all trees in the forest. Note that both methods for constructing VI scores can be unreliable in certain situations; for example, when the predictor variables vary in their scale of measurement or their number of categories [@party2007a, or when the predictors are highly correlated [@strobl-2019-conditional]. The [varImp](https://cran.r-project.org/package=varImp) package discussed earlier provides methods to address these concerns for random forests in package [party](https://cran.r-project.org/package=party), with similar functionality also built into the [partykit](https://cran.r-project.org/package=partykit) package [@R-partykit]. The [vip](https://cran.r-project.org/package=vip) package also supports the conditional importance described in [@strobl-2019-conditional] for both [party](https://cran.r-project.org/package=party)- and [partykit](https://cran.r-project.org/package=partykit)-based RFs; see `?vip::vi_model` for details. Later on, we'll discuss a more general permutation method that can be applied to any supervised learning model.
To illustrate, we fit a CART-like regression tree, RF, and GBM to the simulated training data. (**Note:** there are a number of different packages available for fitting these types of models, we just picked popular implementations for illustration.)
```r
# Load required packages
library(rpart) # for fitting CART-like decision trees
library(randomForest) # for fitting RFs
library(xgboost) # for fitting GBMs
# Fit a single regression tree
tree <- rpart(y ~ ., data = trn)
# Fit an RF
set.seed(101) # for reproducibility
rfo <- randomForest(y ~ ., data = trn, importance = TRUE)
# Fit a GBM
set.seed(102) # for reproducibility
bst <- xgboost(
data = data.matrix(subset(trn, select = -y)),
label = trn$y,
objective = "reg:squarederror",
nrounds = 100,
max_depth = 5,
eta = 0.3,
verbose = 0 # suppress printing
)
```
Each of the above packages include the ability to compute VI scores for all the features in the model; however, the implementation is rather package-specific, as shown in the code chunk below. The results are displayed in Figure \@ref(fig:vi-plots) (the code to reproduce these plots has been omitted but can be made available upon request).
```r
# Extract VI scores from each model
vi_tree <- tree$variable.importance
vi_rfo <- rfo$variable.importance # or use `randomForest::importance(rfo)`
vi_bst <- xgb.importance(model = bst)
```
<img src="../man/figures/vi-plots-1.png" alt="Model-specific VIPs for the three different tree-based models fit to the simulated Friedman data." width="100%" />
As we would expect, all three methods rank the variables `x1`--`x5` as more important than the others. While this is good news, it is unfortunate that we have to remember the different functions and ways of extracting and plotting VI scores from various model fitting functions. This is one place where [vip](https://cran.r-project.org/package=vip) can help...one function to rule them all! Once [vip](https://cran.r-project.org/package=vip) is loaded, we can use `vi()` to extract a tibble of VI scores.^[In order to avoid deprecation warnings due to recent updates to [tibble](https://cran.r-project.org/package=tibble) and [ggplot2](https://cran.r-project.org/package=ggplot2), the code examples in this article are based on the latest development versions of both [vip](https://cran.r-project.org/package=vip) (version 0.4.1) and [pdp](https://cran.r-project.org/package=pdp) (version 0.8.1); the URL to the development version of each package is available on its associated CRAN landing page.]
```r
# Load required packages
library(vip)
# Compute model-specific VI scores
vi(tree) # CART-like decision tree
```
```
## # A tibble: 10 × 2
## Variable Importance
## <chr> <dbl>
## 1 x4 4234.
## 2 x2 2513.
## 3 x1 2461.
## 4 x5 1230.
## 5 x3 688.
## 6 x6 533.
## 7 x7 357.
## 8 x9 331.
## 9 x8 276.
## 10 x10 275.
```
```r
vi(rfo) # RF
```
```
## # A tibble: 10 × 2
## Variable Importance
## <chr> <dbl>
## 1 x4 72.9
## 2 x2 61.4
## 3 x1 55.6
## 4 x5 37.0
## 5 x3 22.0
## 6 x8 1.84
## 7 x6 1.12
## 8 x9 0.720
## 9 x7 -1.39
## 10 x10 -2.61
```
```r
vi(bst) # GBM
```
```
## # A tibble: 10 × 2
## Variable Importance
## <chr> <dbl>
## 1 x4 0.403
## 2 x2 0.225
## 3 x1 0.189
## 4 x5 0.0894
## 5 x3 0.0682
## 6 x9 0.00802
## 7 x6 0.00746
## 8 x7 0.00400
## 9 x10 0.00377
## 10 x8 0.00262
```
Notice how the `vi()` function always returns a tibble^[Technically, it's a tibble with an additional `"vi"` class.] with two columns: `Variable` and `Importance` (the exceptions are coefficient-based models which also include a `Sign` column giving the sign of the corresponding coefficient, and permutation importance involving multiple Monte Carlo simulations, but more on that later). Also, by default, `vi()` always orders the VI scores from highest to lowest; this, among other options, can be controlled by the user (see `?vip::vi` for details). Plotting VI scores with `vip()` is just as straightforward. For example, the following code can be used to reproduce Figure \@ref(fig:vi-plots).
```r
library(patchwork) # for easily arranging multiple ggplot2 plots
p1 <- vip(tree) + ggtitle("Single tree")
p2 <- vip(rfo) + ggtitle("Random forest")
p3 <- vip(bst) + ggtitle("Gradient boosting")
# Display plots in a grid (Figure 1)
p1 + p2 + p3
```
Notice how the `vip()` function always returns a `"ggplot"` object (by default, this will be a bar plot). For large models with many features, a Cleveland dot plot is more effective (in fact, a number of useful plotting options can be fiddled with). Below we call `vip()` and change a few useful options (the resulting plot is displayed in Figure \@ref(fig:dot-plot). Note that we can also call `vip()` directly on a `"vi"` object if it's already been constructed.
```r
# Construct VIP (Figure 2)
library(ggplot2) # for theme_light() function
vip(bst, num_features = 5, geom = "point", horizontal = FALSE,
aesthetics = list(color = "red", shape = 17, size = 5)) +
theme_light()
```
<img src="../man/figures/dot-plot-1.png" alt="Illustrating various plotting options." width="70%" />
## Linear models
In multiple linear regression, or linear models (LMs), the absolute value of the $t$-statistic (or some other scaled variant of the estimated coefficients) is commonly used as a measure of VI.^[Since this approach is biased towards large-scale features it is important to properly standardize the predictors (before fitting the model) or the estimated coefficients.]. Motivation for the use of the assoicated $t$-statistic is given in @bring-1994-standardize. The same idea also extends to generalized linear models (GLMs). In the code chunk below, we fit an LM to the simulated Friedman data (`trn`) allowing for all main effects and two-way interactions, then use the `step()` function to perform backward elimination. The resulting VIP is displayed in Figure \@ref(fig:vip-step).
```r
# Fit a LM
linmod <- lm(y ~ .^2, data = trn)
backward <- step(linmod, direction = "backward", trace = 0)
# Extract VI scores
(vi_backward <- vi(backward))
```
```
## # A tibble: 21 × 3
## Variable Importance Sign
## <chr> <dbl> <chr>
## 1 x4 14.2 POS
## 2 x2 7.31 POS
## 3 x1 5.63 POS
## 4 x5 5.21 POS
## 5 x3:x5 2.46 POS
## 6 x1:x10 2.41 NEG
## 7 x2:x6 2.41 NEG
## 8 x1:x5 2.37 NEG
## 9 x10 2.21 POS
## 10 x3:x4 2.01 NEG
## # ℹ 11 more rows
```
```r
# Plot VI scores; by default, `vip()` displays the top ten features
pal <- palette.colors(2, palette = "Okabe-Ito") # colorblind friendly palette
vip(vi_backward, num_features = length(coef(backward)), # Figure 3
geom = "point", horizontal = FALSE, mapping = aes(color = Sign)) +
scale_color_manual(values = unname(pal)) +
theme_light() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
```
<img src="../man/figures/vip-step-1.png" alt="Example VIP from a linear model fit to the simulated Friedman data. The points are colored according to the sign of the associated coefficient." width="70%" />
A major limitation of this approach is that a VI score is assigned to each term in the model, rather than to each individual feature! We can solve this problem using one of the model-agnostic approaches discussed later.
Multivariate adaptive regression splines (MARS), which were introduced in @multivariate-friedman-1991, is an automatic regression technique and can be seen as a generalization of LMs and GLMs. In the MARS algorithm, the contribution (or VI score) for each predictor is determined using a generalized cross-validation (GCV) statistic (though, other statistics can also be used; see \code{?vip::vi\_model} for details). An example using the [earth](https://cran.r-project.org/package=earth) package [@R-earth-fixed] is given below (the results are plotted in Figure \@ref(fig:vip-earth)):
```r
# Load required packages
library(earth)
# Fit a MARS model
mars <- earth(y ~ ., data = trn, degree = 2, pmethod = "exhaustive")
# Extract VI scores
vi(mars, type = "gcv")
```
```
## # A tibble: 10 × 2
## Variable Importance
## <chr> <dbl>
## 1 x4 100
## 2 x1 83.2
## 3 x2 83.2
## 4 x5 59.3
## 5 x3 43.5
## 6 x6 0
## 7 x7 0
## 8 x8 0
## 9 x9 0
## 10 x10 0
```
```r
# Plot VI scores (Figure 4)
vip(mars)
```
<img src="../man/figures/vip-earth-1.png" alt="Example VIP from a MARS model fit to the simulated Friedman data." width="70%" />
To access VI scores directly in [earth](https://cran.r-project.org/package=earth), you can use the `earth::evimp()` function.
## Neural networks
For neural networks (NNs), two popular methods for constructing VI scores are the Garson algorithm [@interpreting-garson-1991], later modified by @back-goh-1995, and the Olden algorithm [@accurate-olden-2004]. For both algorithms, the basis of these VI scores is the network’s connection weights. The Garson algorithm determines VI by identifying all weighted connections between the nodes of interest. Olden’s algorithm, on the other hand, uses the products of the raw connection weights between each input and output neuron and sums these products across all hidden neurons. This has been shown to outperform the Garson method in various simulations. For DNNs, a similar method due to @data-gedeon-1997 considers the weights connecting the input features to the first two hidden layers (for simplicity and speed); but this method can be slow for large networks. We illustrate these two methods below using `vip()` with the [nnet](https://cran.r-project.org/package=nnet) package [@R-nnet] (see the results in Figure \@ref(fig:vip-nnet)).
```r
# Load required packages
library(nnet)
# Fit a neural network
set.seed(0803) # for reproducibility
nn <- nnet(y ~ ., data = trn, size = 7, decay = 0.1,
linout = TRUE, trace = FALSE)
# Construct VIPs
p1 <- vip(nn, type = "garson")
p2 <- vip(nn, type = "olden")
# Display plots in a grid (Figure 5)
p1 + p2
```
<img src="../man/figures/vip-nnet-1.png" alt="Example VIPs from a single-hidden-layer NN fit to the simulated Friedman data." width="70%" />
# Model-agnostic VI
Model-agnostic interpretability separates interpretation from the model. Compared to model-specific approaches, model-agnostic VI methods are more flexible and can be applied to any supervised learning algorithm. In this section, we discuss model-agnostic methods for quantifying global feature importance using three different approaches:
1) a simple variance-based approach;
2) permutation-based feature importance;
3) Shapley-based feature importance.
## Variance-based methods
Our first model-agnostic method is based on a simple *feature importance ranking measure* (FIRM); for details, see @greenwell-simple-2018, @zien-2009-feature, and @scholbeck-2019-sampling. The specific approach used here is based on quantifying the "flatness" of the effects of each feature.^[A similar approach is taken in the [vivo](https://cran.r-project.org/package=vivo) package [@R-vivo].] Feature effects can be assessed using *partial dependence plots* (PDPs) \citep{friedman-2001-greedy} or *individual conditional expectation* (ICE) curves [@goldstein-peeking-2015]. PDPs and ICE curves help visualize the effect of low cardinality subsets of the feature space on the estimated prediction surface (e.g., main effects and two/three-way interaction effects.). They are also model-agnostic and can be constructed in the same way for any supervised learning algorithm. Below, we fit a *projection pursuit regression* (PPR) model (see `?stats::ppr` for details and references) and construct PDPs for each feature using the [pdp](https://cran.r-project.org/package=pdp) package @pdp2017. The results are displayed in Figure \@ref(fig:pdp-ppr). Notice how the PDPs for the uninformative features are relatively flat compared to the PDPs for features `x1`--`x5`!
<img src="../man/figures/pdp-ppr-1.png" alt="PDPs of main effects in the PPR model fit to the simulated Friedman data." width="100%" />
Next, we compute PDP-based VI scores for the fitted PPR and NN models. The PDP method constructs VI scores that quantify the relative "flatness" of each PDP (by default, this is defined by computing the standard deviation of the $y$-axis values for each PDP). To use the PDP method, specify `method = "firm"` in the call to `vi()` or `vip()` (or just use `vi_firm()` directly):
```r
# Fit a PPR model (nterms was chosen using the caret package with 5 repeats of
# 5-fold cross-validation)
pp <- ppr(y ~ ., data = trn, nterms = 11)
# Construct VIPs
p1 <- vip(pp, method = "firm", train = trn) + ggtitle("PPR")
p2 <- vip(nn, method = "firm", train = trn) + ggtitle("NN")
# Display plots in a grid (Figure 7)
p1 + p2
```
<img src="../man/figures/pdp-ppr-nn-1.png" alt="PDP-based feature importance for the PPR and NN models fit to the simulated Friedman data." width="70%" />
In Figure \@ref(fig:pdp-ppr-nn) we display the PDP-based feature importance for the previously obtained PPR and NN models. These VI scores essentially capture the variability in the partial dependence values for each main effect.
The ICE curve method is similar to the PDP method, except that we measure the "flatness" of each individual ICE curve and then aggregate the results (e.g., by averaging). If there are no (substantial) interaction effects, using ICE curves will produce results similar to using PDPs (which are just averaged ICE curves). However, if strong interaction effects are present, they can obfuscate the main effects and render the PDP-based approach less useful (since the PDPs for important features can be relatively flat when certain interactions are present; see @goldstein-peeking-2015 for details). In fact, it is probably safest to always use ICE curves when employing the FIRM method.
Below, we display the ICE curves for each feature in the fitted PPR model using the same $y$-axis scale; see Figure \@ref(fig:ice-ppr). Again, there is a clear difference between the ICE curves for features `x1`--`x5` and `x6`--`x10`; the later being relatively flat by comparison. Also, notice how the ICE curves within each feature are relatively parallel (if the ICE curves within each feature were perfectly parallel, the standard deviation for each curve would be the same and the results will be identical to the PDP method). In this example, the interaction term between `x1` and `x2` does not obfuscate the PDPs for the main effects and the results are not much different.
<img src="../man/figures/ice-ppr-1.png" alt="ICE curves for each feature in the PPR model fit to the simulated Friedman data. The red curve represents the PDP (i.e., the averaged ICE curves)." width="100%" />
Obtaining the ICE-based feature importance scores is also straightforward, just specify `ice = TRUE` when using the FIRM approach. This is illustrated in the code chunk below and the results, which are displayed in Figure \@ref(fig:vip-ice-ppr-nn), are similar to those obtained using the PDP method.
```r
# Construct VIPs
p1 <- vip(pp, method = "firm", ice = TRUE, train = trn) + ggtitle("PPR")
p2 <- vip(nn, method = "firm", ice = TRUE, train = trn) + ggtitle("NN")
# Display plots in a grid (Figure 9)
p1 + p2
```
<img src="../man/figures/vip-ice-ppr-nn-1.png" alt="ICE-based feature importance for the PPR and NN models fit to the simulated Friedman data." width="70%" />
When using `method = "firm"`, the feature effect values are stored in an attribute called `"effects"`. This is a convenience so that the feature effect plots (e.g., PDPs and ICE curves) can easily be reconstructed and compared with the VI scores, as demonstrated in the example below (see Figure \@ref(fig:pdp-from-attr)):
```r
# Construct PDP-based VI scores
(vis <- vi(pp, method = "firm", train = trn))
```
```
## # A tibble: 10 × 2
## Variable Importance
## <chr> <dbl>
## 1 x4 2.96
## 2 x2 2.21
## 3 x1 2.14
## 4 x5 1.53
## 5 x3 1.46
## 6 x6 0.128
## 7 x9 0.114
## 8 x8 0.0621
## 9 x10 0.0374
## 10 x7 0.0170
```
```r
# Reconstruct PDPs for all 10 features (Figure 10)
par(mfrow = c(2, 5))
for (name in paste0("x", 1:10)) {
plot(attr(vis, which = "effects")[[name]], type = "l", ylim = c(9, 19), las = 1)
}
```
<img src="../man/figures/pdp-from-attr-1.png" alt="PDPs for all ten features reconstructed from the \code{pdp} attribute of the \code{vis} object." width="100%" />
## Permutation method
The permutation method exists in various forms and was made popular in @random-breiman-2001 for RFs, before being generalized and extended in @fisher-model-2018. The permutation approach used in [vip](https://cran.r-project.org/package=vip) is quite simple and is outlined in Algorithm 1 below. The idea is that if we randomly permute the values of an important feature in the training data, the training performance would degrade (since permuting the values of a feature effectively destroys any relationship between that feature and the target variable). This of course assumes that the model has been properly tuned (e.g., using cross-validation) and is not over fitting. The permutation approach uses the difference between some baseline performance measure (e.g., training $R^2$, AUC, or RMSE) and the same performance measure obtained after permuting the values of a particular feature in the training data (**Note:** the model is NOT refit to the training data after randomly permuting the values of a feature). It is also important to note that this method may not be appropriate when you have, for example, highly correlated features (since permuting one feature at a time may lead to unlikely data instances).
Let $x_1, x_2, \dots, x_j$ be the features of interest and let $M_{orig}$ be the baseline performance metric for the trained model; for brevity, we'll assume smaller is better (e.g., classification error or RMSE). The permutation-based importance scores can be computed as follows:
1. For $i = 1, 2, \dots, j$:
a. Permute the values of feature $x_i$ in the training data.
b. Recompute the performance metric on the permuted data $M_{perm}$.
c. Record the difference from baseline using $VI\left(x_i\right) = M_{perm} - M_{orig}$.
2. Return the VI scores $VI\left(x_1\right), VI\left(x_2\right), \dots, VI\left(x_j\right)$.
Algorithm 1: A simple algorithm for constructing permutation-based VI scores.
Algorithm 1 can be improved or modified in a number of ways. For instance, the process can be repeated several times and the results averaged together. This helps to provide more stable VI scores, and also the opportunity to measure their variability. Rather than taking the difference in step (c), @molnar-2019-iml [sec. 5.5.4] argues that using the ratio $M_{perm} / M_{orig}$ makes the importance scores more comparable across different problems. It's also possible to assign importance scores to groups of features (e.g., by permuting more than one feature at a time); this would be useful if features can be categorized into mutually exclusive groups, for instance, categorical features that have been *one-hot-encoded.
To use the permutation approach in [vip](https://cran.r-project.org/package=vip), specify `method = "permute"` in the call to `vi()` or `vip()` (or you can use `vi_permute()` directly). Note that using `method = "permute"` requires specifying a few additional arguments (e.g., the training data, target name or vector of target values, a prediction function, etc.); see `?vi_permute` for details.
To use `vi_permute()` you should first define a prediction wrapper that tells the function how to generate the write predictions for your chosen metric. An example is given below for the previously fitted PPR and NN models. Here we use $R^2$ (`metric = "rsq"`) as the evaluation metric. The results, which are displayed in Figure \@ref(fig:vip-permute-ppr-nn), agree with those obtained using the PDP- and ICE-based methods.
```r
# Prediction wrapper
pfun_ppr <- function(object, newdata) { # needs to return a numeric vector
stats::predict(object, newdata = newdata)
}
pfun_nnet <- function(object, newdata) { # needs to return a numeric vector
stats::predict(object, newdata = newdata)[, 1L, drop = TRUE]
}
# Plot VI scores
set.seed(2021) # for reproducibility
p1 <- vip(pp, method = "permute", train = trn, target = "y", metric = "rsq",
pred_wrapper = pfun_ppr) + ggtitle("PPR")
p2 <- vip(nn, method = "permute", train = trn, target = "y", metric = "rsq",
pred_wrapper = pfun_nnet) + ggtitle("NN")
# Display plots in a grid (Figure 11)
p1 + p2
```
<img src="../man/figures/vip-permute-ppr-nn-1.png" alt="Permutation-based feature importance for the PPR and NN models fit to the simulated Friedman data." width="70%" />
The permutation approach introduces randomness into the procedure and therefore should be run more than once if computationally feasible. The upside to performing multiple runs of Algorithm 1 is that it allows us to compute standard errors (among other metrics) for the estimated VI scores, as illustrated in the example below; here we specify `nsim = 30` to request that each feature be permuted 30 times and the results averaged together. (Additionally, if `nsim > 1`, you can set \code{geom = "boxplot"} in the call to `vip()` to construct boxplots of the raw permutation-based VI scores. This is useful if you want to visualize the variability in each of the VI estimates; see Figure \@ref(fig:vip-boxplots) for an example.)
```r
# Use 10 Monte Carlo reps
set.seed(403) # for reproducibility
vis <- vi(pp, method = "permute", train = trn, target = "y", metric = "rsq",
pred_wrapper = pfun_ppr, nsim = 30)
vip(vis, geom = "boxplot") # Figure 12
```
<img src="../man/figures/vip-boxplots-1.png" alt="Boxplots of VI scores using the permutation method with 15 Monte Carlo repetitions." width="70%" />
All available performance metrics for regression and classification can be listed using the `list_metrics()` function, for example:
```r
list_metrics()
```
```
## metric description
## 1 accuracy Classification accuracy
## 2 bal_accuracy Balanced classification accuracy
## 3 youden Youden;'s index (or Youden's J statistic)
## 4 roc_auc Area under ROC curve
## 5 pr_auc Area under precision-recall (PR) curve
## 6 logloss Log loss
## 7 brier Brier score
## 8 mae Mean absolute error
## 9 mape Mean absolute percentage error
## 10 rmse Root mean squared error
## 11 rsq R-squared (correlation)
## 12 rsq_trad R-squared (traditional)
## task smaller_is_better yardstick_function
## 1 Binary/multiclass classification FALSE accuracy_vec
## 2 Binary/multiclass classification FALSE bal_accuracy_vec
## 3 Binary/multiclass classification FALSE j_index
## 4 Binary classification FALSE roc_auc_vec
## 5 Binary classification FALSE pr_auc_vec
## 6 Binary/multiclass classification TRUE mn_log_loss_vec
## 7 Binary/multiclass classification TRUE brier_class_vec
## 8 Regression TRUE mae_vec
## 9 Regression TRUE mape_vec
## 10 Regression TRUE rmse_vec
## 11 Regression FALSE rsq_vec
## 12 Regression FALSE rsq_trad_vec
```
The permutation method in [vip](https://cran.r-project.org/package=vip) supports the vector performance functions available in [yardstick](https://cran.r-project.org/package=yardstick) [@R-yardstick]. We can also use a custom metric (i.e., loss function). Suppose for example you want to measure importance using the *mean absolute error* (MAE):
\begin{equation}
MAE = \frac{1}{n}\sum_{i = 1}^n\left|y_i - \hat{f}\left(\boldsymbol{x}_i\right)\right|,
\end{equation}
where $\hat{f}\left(\boldsymbol{x}_i\right)$ is the predicted value of $y_i$. A simple function implementing this metric is given below (to be consistent with [yardstick](https://cran.r-project.org/package=yardstick) functions, user-supplied metric functions require two arguments: `truth` and `estimate`).
```r
mae <- function(truth, estimate) {
mean(abs(truth - estimate))
}
```
To use this for computing permutation-based VI scores just pass it via the `metric` argument (be warned, however, that the metric used for computing permutation importance should be the same as the metric used to train and tune the model). Also, since this is a custom metric, we need to specify whether a smaller value indicates better performance by setting `smaller_is_better = TRUE`. The results, which are displayed in Figure \@ref(fig:vip-nn-mae), are similar to those in Figure \@ref(fig:vip-permute-ppr-nn), albeit a different scale.
```r
# Construct VIP (Figure 13)
set.seed(2321) # for reproducibility
p1 <- vip(nn, method = "permute", train = trn, target = "y", metric = mae,
smaller_is_better = TRUE, pred_wrapper = pfun_nnet) +
ggtitle("Custom loss function: MAE")
set.seed(2321) # for reproducibility
p2 <- vip(nn, method = "permute", train = trn, target = "y",
metric = yardstick::mae_vec, smaller_is_better = TRUE,
pred_wrapper = pfun_nnet) +
ggtitle("Using `yardstick`'s MAE function")
p1 + p2
```
<img src="../man/figures/vip-nn-mae-1.png" alt="Permutation-based VI scores for the NN model fit to the simulated Friedman data. In this example, permutation importance is based on the MAE metric." width="70%" />
Although permutation importance is most naturally computed on the training data, it may also be useful to do the shuffling and measure performance on new data! This is discussed in depth in @molnar-2019-iml [sec. 5.2]. For users interested in computing permutation importance using new data, just supply it to the `train` argument in the call to `vi()`, `vip()`, or `vi_permute()`. For instance, suppose we wanted to only use a fraction of the original training data to carry out the computations. In this case, we could simply pass the sampled data to the `train` argument as follows:
```r
# Construct VIP (Figure 14)
set.seed(2327) # for reproducibility
vip(nn, method = "permute", pred_wrapper = pfun_nnet, target = "y",
metric = "rmse",
train = trn[sample(nrow(trn), size = 400), ]) + # sample 400 observations
ggtitle("Using a random subset of training data")
```
<img src="../man/figures/vip-permute-nn-sample-1.png" alt="Permutation-based feature importance for the NN model fit to the simulated Friedman data. In this example, permutation importance is based on a random sample of 400 training observations." width="70%" />
When using the permutation method with `nsim > 1`, the default is to keep all the permutation scores as an attribute called `"raw_scores"`; you can turn this behavior off by setting `keep = FALSE` in the call to `vi_permute()`, `vi()`, or `vip()`. If `keep = TRUE` and `nsim > 1`, you can request all permutation scores to be plotted by setting `all_permutations = TRUE` in the call to `vip()`, as demonstrated in the code chunk below (see Figure \@ref(fig:vip-nn-mae-all)). This also let's you visually inspect the variability in the permutation scores within each feature.
```r
# Construct VIP (Figure 15)
set.seed(8264) # for reproducibility
vip(nn, method = "permute", pred_wrapper = pfun_nnet, train = trn,
target = "y", metric = "mae", nsim = 10, geom = "point",
all_permutations = TRUE, jitter = TRUE) +
ggtitle("Plotting all permutation scores")
```
<img src="../man/figures/vip-nn-mae-all-1.png" alt="Permutation-based feature importance for the NN model fit to the simulated Friedman data. In this example, all the permutation importance scores (points) are displayed for each feature along with their average (bars)." width="70%" />
### A classification example
In this example, we'll illustrate the use of permutation importance in a classification problem. To start, we'll use the [randomForest](https://cran.r-project.org/package=randomForest) package [@R-randomForest] to build a (default) random forest to predict survivability of passengers on the ill-fated Titanic.
The [source data](https://hbiostat.org/data/) (also available in `vip::titanic`) contains 263 missing values (i.e., `NA`'s) in the age column. The `titanic_mice` version, which we'll use in this vignette, contains imputed values for the age column using *multivariate imputation by chained equations* via the [mice](https://cran.r-project.org/package=mice) package. Consequently, `titanic_mice` is a list containing 11 imputed versions of the original data; see `?vip::titanic_mice` for details. For now, we'll just use one of the 11 imputed versions:
```r
head(t1 <- vip::titanic_mice[[1L]])
```
```
## survived pclass age sex sibsp parch
## 1 yes 1 29.00 female 0 0
## 2 yes 1 0.92 male 1 2
## 3 no 1 2.00 female 1 2
## 4 no 1 30.00 male 1 2
## 5 no 1 25.00 female 1 2
## 6 yes 1 48.00 male 0 0
```
```r
t1$pclass <- as.ordered(t1$pclass) # makes more sense as an ordered factor
```
Next, we'll build a default random forest to predict survivability:
```r
library(randomForest)
set.seed(2053) # for reproducibility
(rfo <- randomForest(survived ~ ., data = t1, importance = TRUE, nPerm = 30))
```
```
##
## Call:
## randomForest(formula = survived ~ ., data = t1, importance = TRUE, nPerm = 30)
## Type of random forest: classification
## Number of trees: 500
## No. of variables tried at each split: 2
##
## OOB estimate of error rate: 18.79%
## Confusion matrix:
## no yes class.error
## no 727 82 0.1013597
## yes 164 336 0.3280000
```
For comparison, here's a plot of the OOB-based permutation importance scores available in a random forest (note that setting `include_type = TRUE` results in the $x$-axis label including the method of importance that was computed):
```r
vip(rfo, include_type = TRUE)
```
<img src="../man/figures/titanic-rfo-vi-1.png" alt="plot of chunk titanic-rfo-vi" width="70%" />
For categorical outcomes, random forests can provide predicted class labels (i.e., classification) or predicted class probabilities (i.e., prediction), as shown below.
```r
head(predict(rfo, newdata = t1, type = "response")) # predicted class labels
```
```
## 1 2 3 4 5 6
## yes yes yes no yes no
## Levels: no yes
```
```r
head(predict(rfo, newdata = t1, type = "prob")) # predicted class probabilities
```
```
## no yes
## 1 0.014 0.986
## 2 0.114 0.886
## 3 0.472 0.528
## 4 0.716 0.284
## 5 0.392 0.608
## 6 0.894 0.106
```
The performance metric we choose for permutation importance will determine whether our prediction wrapper should return a class label (as a factor) or a numeric vector of class probabilities. We'll start with classification accuracy (the same metric used by random forest's build-in OOB-based permutation VI scores). A basic call to `vi()` (or, similarly, to `vi_permute()`) would look something like:
```r
pfun_class <- function(object, newdata) { # prediction wrapper
predict(object, newdata = newdata, type = "response")
}
# Compute mean decrease in accuracy
set.seed(1359) # for reproducibility
vi(rfo,
method = "permute",
train = t1,
target = "survived",
metric = "accuracy", # or pass in `yardstick::accuracy_vec` directly
# smaller_is_better = FALSE, # no need to set for built-in metrics
pred_wrapper = pfun_class,
nsim = 30 # use 30 repetitions
)
```
```
## # A tibble: 5 × 3
## Variable Importance StDev
## <chr> <dbl> <dbl>
## 1 sex 0.226 0.0111
## 2 pclass 0.0801 0.00488
## 3 age 0.0738 0.00595
## 4 sibsp 0.0346 0.00459
## 5 parch 0.0166 0.00247
```
Note that the standard deviation of each VI score is also computed and returned whenever `nsim > 1`. The results are comparable to what the fitted random forest computed internally by setting `importance = TRUE` and `nPerm = 30`; the difference as that the random forest uses the OOB data when computing the drop in accuracy after shuffling each variable.
```r
sort(rfo$importance[, "MeanDecreaseAccuracy"], decreasing = TRUE)
```
```
## sex pclass age parch sibsp
## 0.17102147 0.05877827 0.04408406 0.01895065 0.01583429
```
Next, we'll compute permutation VI scores using a metric that requires predicted probabilities. Here, we'll use the Brier score, which measures the accuracy of the individual probabilities (smaller is better). However, instead of using the built-in `metric = "brier"` option, we'll pass the corresponding [yardstick](https://cran.r-project.org/package=yardstick) function directly. Note that we have to modify the prediction wrapper to not only return predicted probabilities, but a single vector of probabilities in the case of a binary outcome (in this case, we care about the event `survived = "yes"`):
```r
pfun_prob <- function(object, newdata) { # prediction wrapper
predict(object, newdata = newdata, type = "prob")[, "yes"]
}
# Compute mean increase in Brier score
set.seed(1411) # for reproducibility
vi(rfo,
method = "permute",
train = t1,
target = "survived",
metric = yardstick::brier_class_vec, # or pass in `"brier"` directly
smaller_is_better = FALSE, # need to set when supplying a function
pred_wrapper = pfun_prob,
nsim = 30 # use 30 repetitions
)
```
```
## # A tibble: 5 × 3
## Variable Importance StDev
## <chr> <dbl> <dbl>
## 1 sex 0.209 0.00866
## 2 pclass 0.0977 0.00479
## 3 age 0.0947 0.00460
## 4 parch 0.0542 0.00271
## 5 sibsp 0.0414 0.00186
```
Finally, to illustrate the use of the `event_level` argument, we'll compute the permutation-based VI scores using the *area under the ROC curve* (AUROC or `metric = "roc_auc"`).
```r
set.seed(1413) # for reproducibility
vi(rfo,
method = "permute",
train = t1,
target = "survived",
metric = "roc_auc",
pred_wrapper = pfun_prob,
nsim = 30 # use 30 repetitions
)
```
```
## # A tibble: 5 × 3
## Variable Importance StDev
## <chr> <dbl> <dbl>
## 1 parch -0.0251 0.00351
## 2 sibsp -0.0283 0.00211
## 3 age -0.0850 0.00477
## 4 pclass -0.0920 0.00533
## 5 sex -0.229 0.0137
```
Why are the results are negative? The issue is that metrics like AUROC (similar with *area under the PR curve*) treat one of the class outcomes as the "event" of interest. In our case, we are using the predicted probability for the event `survived = "yes"`, but the default event level (in [yardstick](https://cran.r-project.org/package=yardstick) and therefore [vip](https://cran.r-project.org/package=vip)) is always the first class label in alphabetical order (or `survived = "no"`, in this case):
```r
levels(titanic$survived)
```
```
## [1] "no" "yes"
```
Consequently, when using metrics like AUROC, it is a good idea to set the `event_level` parameter in the call to `vi()` or `vi_permute()`. To fix the previous issue, just set the event level to the second class label using `even_level = "second"`:
```r
set.seed(1413) # for reproducibility
vi(rfo,
method = "permute",
train = t1,
target = "survived",
metric = "roc_auc",
event_level = "second", # use "yes" as class label/"event" of interest
pred_wrapper = pfun_prob,
nsim = 30 # use 30 repetitions
)
```
```
## # A tibble: 5 × 3
## Variable Importance StDev
## <chr> <dbl> <dbl>
## 1 sex 0.229 0.0137
## 2 pclass 0.0920 0.00533
## 3 age 0.0850 0.00477
## 4 sibsp 0.0283 0.00211
## 5 parch 0.0251 0.00351
```
Much better (and just the negative of the previous results, as expected)! For a similar example using a multiclass outcome, see the discussion in [this issue](https://github.com/juliasilge/juliasilge.com/issues/57).
### Benchmarks
In this section, we compare the performance of four implementations of permutation-based VI scores: `iml::FeatureImp()` (version 0.11.1), `ingredients::feature_importance()` (version 2.3.0), `mmpf::permutationImportance` (version 0.0.5), and `vip::vi()` (version 0.4.1).
We simulated 10,000 training observations from the Friedman 1 benchmark problem and trained a random forest using the [ranger](https://cran.r-project.org/package=ranger) package. For each implementation, we computed permutation-based VI scores 100 times using the [microbenchmark](https://cran.r-project.org/package=microbenchmark) package [@R-microbenchmark]. For this benchmark we did not use any of the parallel processing capability available in the [iml](https://cran.r-project.org/package=iml) and [vip](https://cran.r-project.org/package=vip) implementations. The results from [microbenchmark](https://cran.r-project.org/package=microbenchmark) are displayed in Figure \ref@(fig:benchmark) and summarized in the output below. In this case, the [vip](https://cran.r-project.org/package=vip) package (version 0.4.1) was the fastest, followed closely by [ingredients](https://cran.r-project.org/package=ingredients) and [mmpf](https://cran.r-project.org/package=mmpf). It should be noted, however, that the implementations in [vip](https://cran.r-project.org/package=vip) and [iml](https://cran.r-project.org/package=iml) can be parallelized. To the best of our knowledge, this is not the case for [ingredients](https://cran.r-project.org/package=ingredients) or [mmpf](https://cran.r-project.org/package=mmpf) (although it would not be difficult to write a simple parallel wrapper for either). The code used to generate these benchmarks can be found at https://github.com/koalaverse/vip/blob/master/slowtests/slowtests-benchmarks.R.
<img src="../man/figures/benchmark-1.png" alt="Violin plots comparing the computation time from three different implementations of permutation-based VI scores across 100 simulations." width="70%" />
## Shapley method
Although [vip](https://cran.r-project.org/package=vip) focuses on global VI methods, it is becoming increasing popular to asses global importance by aggregating local VI measures; in particular, *Shapley explanations* [@strumbelj-2014-explaining]. Using *Shapley values* (a method from coalitional game theory), the prediction for a single instance $x^\star$ can be explained by assuming that each feature value in $x^\star$ is a "player" in a game with a payout equal to the corresponding prediction $\hat{f}\left(x^\star\right)$. Shapley values tell us how to fairly distribute the "payout" (i.e., prediction) among the features. Shapley values have become popular due to the attractive fairness properties they posses [@lundberg_unified_2017]. The most popular implementation is available in the Python [shap](https://github.com/shap/shap) package [@lundberg_unified_2017]; although a number of implementations are now available in R; for example, [iml](https://cran.r-project.org/package=iml), [iBreakDown](https://cran.r-project.org/package=iBreakDown) [@R-iBreakDown], and [fastshap](https://cran.r-project.org/package=fastshap) [@R-fastshap].
Obtaining a global VI score from Shapley values requires aggregating the Shapley values for each feature across the entire training set (or at least a reasonable sample thereof). In particular, we use the mean of the absolute value of the individual Shapley values for each feature. Unfortunately, Shapley values can be computationally expensive, and therefore this approach may not be feasible for large training sets (say, >3000 observations). The [fastshap](https://cran.r-project.org/package=fastshap) package provides some relief by exploiting a few computational tricks, including the option to perform computations in parallel (see \code{?fastshap::explain} for details). Also, fast and exact algorithms \citep{lundberg-explainable-2019} can be exploited for certain classes of models.
Starting with [vip](https://cran.r-project.org/package=vip) version 0.4.1 you can now use `method = "shap"` in the call to `vi()` (or use `vi_shap()` directly) to compute global Shapley-based VI scores using the method described above (provided you have the [fastshap](https://cran.r-project.org/package=fastshap) package installed)---see `?vip::vi_shap` for details. To illustrate, we compute Shapley-based VI scores from an [xgboost](https://cran.r-project.org/package=xgboost) model [R-xgboost] using the Friedman data from earlier; the results are displayed in Figure \ref@(fig:vi-shap).^[Note that the `exact = TRUE` option is only available if you have [fastshap](https://cran.r-project.org/package=fastshap) version 0.0.4 or later.] (**{Note:** specifying `include_type = TRUE` in the call to `vip()` causes the type of VI computed to be displayed as part of the axis label.)
```r
# Load required packages
library(xgboost)
# Feature matrix
X <- data.matrix(subset(trn, select = -y)) # matrix of feature values
# Fit an XGBoost model; hyperparameters were tuned using 5-fold CV
set.seed(859) # for reproducibility
bst <- xgboost(X, label = trn$y, nrounds = 338, max_depth = 3, eta = 0.1,
verbose = 0)
# Construct VIP (Figure 17)
vip(bst, method = "shap", train = X, exact = TRUE, include_type = TRUE,
geom = "point", horizontal = FALSE,
aesthetics = list(color = "forestgreen", shape = 17, size = 5)) +
theme_light()
```
<img src="../man/figures/vi-shap-1.png" alt="Shapley-based VI scores from an XGBoost model fit to the simulated Friedman data." width="70%" />
Passing `exact = TRUE` to `fastshap::explain()` via the `...` argument in the call to `vip()` (or `vi()` and `vi_shap()`) only works for [lightgbm](https://cran.r-project.org/package=lightgbm), [xgboost](https://cran.r-project.org/package=xgboost), and additive (generalized) linear models fit using R's internal **`stats`** package. For all other cases, a prediction wrapper must be supplied via the `...` argument.
To illustrate, let's use the previous random forest that was fit to the Titanic data set. Note that Shapley explanation do not support classification, so we'll have to use the probability-based prediction wrapper defined before:
```r
pfun_prob <- function(object, newdata) { # prediction wrapper
# For Shapley explanations, this should ALWAYS return a numeric vector
predict(object, newdata = newdata, type = "prob")[, "yes"]
}
# Compute Shapley-based VI scores
set.seed(853) # for reproducibility
vi_shap(rfo, train = subset(t1, select = -survived), pred_wrapper = pfun_prob,
nsim = 30)
```
```
## # A tibble: 5 × 2
## Variable Importance
## <chr> <dbl>
## 1 pclass 0.104
## 2 age 0.0649
## 3 sex 0.272
## 4 sibsp 0.0260
## 5 parch 0.0291
```
## Drawbacks of existing methods
As discussed in @hooker-2019-stop, *permute-and-predict* methods—like PDPs, ICE curves, and permutation importance—can produce results that are highly misleading.^[It's been argued that approximate Shapley values share the same drawback, however, @janzing-2019-feature makes a compelling case against those arguments.] For example, the standard approach to computing permutation-based VI scores involves independently permuting individual features. This implicitly makes the assumption that the observed features are statistically independent. In practice, however, features are often not independent which can lead to nonsensical VI scores. One way to mitigate this issue is to use the conditional approach described in @strobl-2019-conditional; @hooker-2019-stop provides additional alternatives, such as *permute-and-relearn importance*. Unfortunately, to the best of our knowledge, this approach is not yet available for general purpose. A similar modification can be applied to PDPs [@parr-2019-technical]^[A basic R implementation is available at <https://github.com/bgreenwell/rstratx>.] which seems reasonable to use in the FIRM approach when strong dependencies among the features are present (though, we have not given this much thought or consideration).
We already mentioned that PDPs can be misleading in the presence of strong interaction effects. This drawback, of course, equally applies to the FIRM approach using PDPs for computing VI scores. As discussed earlier, this can be mitigated by using ICE curves instead. Another alternative would be to use *accumulated local effect* (ALE) plots [@apley-2016-visualizing] (though we haven't really tested this idea). Compared to PDPs, ALE plots have the advantage of being faster to compute and less affected by strong dependencies among the features. The downside, however, is that ALE plots are more complicated to implement (hence, they are not currently available when using `method = "firm"`). ALE plots are available in the [ALEPlot](https://cran.r-project.org/package=ALEPlot) [@R-ALEPlot] and [iml](https://cran.r-project.org/package=iml) packages.
@hooker-2007-generalized also argues that feature importance (which concern only *main effects*) can be misleading in high dimensional settings, especially when there are strong dependencies and interaction effects among the features, and suggests an approach based on a *generalized functional ANOVA decomposition*—though, to our knowledge, this approach is not widely implemented in open source.
# Summary
VIPs help to visualize the strength of the relationship between each feature and the predicted response, while accounting for all the other features in the model. We've discussed two types of VI: model-specific and model-agnostic, as well as some of their strengths and weaknesses. In this paper, we showed how to construct VIPs for various types of "black box" models in R using the [vip](https://cran.r-project.org/package=vip) package. We also briefly discussed related approaches available in a number of other R packages. Suggestions to avoid high execution times were discussed and demonstrated via examples. This paper is based on [vip](https://cran.r-project.org/package=vip) version 0.4.1. In terms of future development, [vip](https://cran.r-project.org/package=vip) can be expanded in a number of ways. For example, we plan to incorporate the option to compute group-based and conditional permutation scores. Although not discussed in this paper, [vip](https://cran.r-project.org/package=vip) also includes a promising statistic (similar to the variance-based VI scores previously discussed) for measuring the relative strength of interaction between features. Although VIPs can help understand which features are driving the model's predictions, ML practitioners should be cognizant of the fact that none of the methods discussed in this paper are uniformly best across all situations; they require an accurate model that has been properly tuned, and should be checked for consistency with human domain knowledge.
# Acknowledgments
The authors would like to thank the anonymous reviewers and the Editor for their helpful comments and suggestions. We would also like to thank the members of the 84.51$^{\circ}$ Interpretable Machine Learning Special Interest Group for their thoughtful discussions on the topics discussed herein.
# References
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/doc/vip.Rmd
|
# Specify model formulae
form1 <- y ~ x1 + x2 + I(x2 ^ 2) + sin(x2)
form2 <- ~ x1 + x2 + I(x2 ^ 2) + sin(x2) # no LHS
form3 <- terms(y ~ ., data = data.frame(y = 1:5, x1 = 1:5, x2 = 1:5))
# Expectations
expect_identical(
current = vip:::get_feature_names.formula(form1),
target = c("x1", "x2")
)
expect_error(
current = vip:::get_feature_names.formula(form2)
)
expect_identical( # check dot expansion
current = vip:::get_feature_names.formula(form3),
target = c("x1", "x2")
)
# Exits
if (!requireNamespace("nnet", quietly = TRUE)) {
exit_file("Package nnet missing")
}
# Load required packages
suppressMessages({
library(nnet)
})
# Formula interface
fit1 <- nnet::nnet(Sepal.Length ~ . + I(Petal.Width^2), size = 2, data = iris,
linout = TRUE, trace = FALSE)
# Matrix interface
mm <- model.matrix(Sepal.Length ~ . - 1, data = iris)
fit2 <- nnet::nnet(x = mm, y = iris$Sepal.Length, size = 2, data = iris,
linout = TRUE, trace = FALSE)
# Expectations
expect_identical(
current = vip:::get_feature_names.nnet(fit1),
target = setdiff(x = names(iris), y = "Sepal.Length")
)
expect_error(
current = vip:::get_feature_names.nnet(fit2)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_get_feature_names.R
|
# Exits
if (!requireNamespace("C50", quietly = TRUE)) {
exit_file("Package 'C50' missing")
}
# # Load required packages
# suppressMessages({
# library(C50)
# })
# Generate Friedman benchmark data
friedman2 <- gen_friedman(seed = 101, n_bins = 2)
# Fit model(s)
fit1 <- C50::C5.0(y ~ ., friedman2)
fit2 <- C50::C5.0(x = friedman2[, paste0("x", 1L:10L)], y = friedman2$y)
# Compute VI scores
vis1 <- vi_model(fit1)
vis2 <- vi_model(fit2)
# Expectations for `vi_model()`
expect_identical(
current = vis1,
target = vis2
)
expect_identical(
current = vis1$Importance,
C50::C5imp(fit1, metric = "usage")$Overall
)
expect_identical(
current = vi_model(fit1, type = "splits", pct = FALSE)$Importance,
C50::C5imp(fit1, metric = "splits", pct = FALSE)$Overall
)
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.C5.0(fit1),
target = paste0("x", 1L:10L)
)
expect_identical(
current = vip:::get_feature_names.C5.0(fit2),
target = paste0("x", 1L:10L)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_C50.R
|
# Exits
if (!requireNamespace("caret", quietly = TRUE)) {
exit_file("Package 'caret' missing")
}
if (!requireNamespace("Cubist", quietly = TRUE)) {
exit_file("Package 'Cubist' missing")
}
# # Load required packages
# suppressMessages({
# library(caret)
# library(Cubist)
# })
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
# Fit model(s)
fit <- Cubist::cubist(
x = subset(friedman1, select = -y),
y = friedman1$y,
committees = 10
)
# Compute VI scores
vis1 <- vi_model(fit)
vis2 <- caret::varImp(fit)
# Expectations for `vi_model()`
expect_identical(
current = vis1$Importance,
target = vis2[vis1$Variable, , drop = TRUE]
)
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.cubist(fit),
target = paste0("x", 1L:10L)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_Cubist.R
|
# Exits
if (!requireNamespace("RSNNS", quietly = TRUE)) {
exit_file("Package 'RSNNS' missing")
}
if (!requireNamespace("NeuralNetTools", quietly = TRUE)) {
exit_file("Package 'NeuralNetTools' missing")
}
# # Load required packages
# suppressMessages({
# library(RSNNS)
# library(NeuralNetTools)
# })
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
# Fit model(s)
set.seed(101) # for reproducibility
fit <- RSNNS::mlp(
x = subset(friedman1, select = -y),
y = friedman1$y,
size = 10,
linOut = TRUE,
maxit = 1000
)
# Compute VI scores
vis1 <- vi_model(fit)
vis2 <- vi_model(fit, type = "garson")
# Expectations for `vi_model()`
expect_identical(
current = vis1$Importance,
target = NeuralNetTools::olden(fit, bar_plot = FALSE)$importance
)
expect_identical(
current = vis2$Importance,
target = NeuralNetTools::garson(fit, bar_plot = FALSE)$rel_imp
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_RSNNS.R
|
# Exits
if (!requireNamespace("caret", quietly = TRUE)) {
exit_file("Package 'caret' missing")
}
# # Load required packages
# suppressMessages({
# library(caret)
# })
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
# Fit model(s)
fit <- caret::train(y ~ ., friedman1, method = "lm")
# Compute VI scores
vis1 <- vi_model(fit)
vis2 <- caret::varImp(fit)
# Expectations for `vi_model()`
expect_identical(
current = vis1$Importance,
target = vis2$importance[vis1$Variable, , drop = TRUE]
)
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.train(fit),
target = paste0("x", 1L:10L)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_caret.R
|
# Exits
if (!requireNamespace("earth", quietly = TRUE)) {
exit_file("Package 'earth' missing")
}
# Load required packages
suppressMessages({
library(earth)
})
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
# Fit model(s)
fit <- earth::earth(y ~ ., degree = 2, data = friedman1)
# Compute VI scores
vis_nsubsets <- vi_model(fit)
vis_rss <- vi_model(fit, type = "rss")
vis_gcv <- vi_model(fit, type = "gcv")
vis_earth <- earth::evimp(fit)
# Expectations for `vi_model()`
expect_identical(
current = vis_nsubsets[seq_len(nrow(vis_earth)), ]$Importance,
target = unname(vis_earth[, "nsubsets", drop = TRUE])
)
expect_identical(
current = vis_rss[seq_len(nrow(vis_earth)), ]$Importance,
target = unname(vis_earth[, "rss", drop = TRUE])
)
expect_identical(
current = vis_gcv[seq_len(nrow(vis_earth)), ]$Importance,
target = unname(vis_earth[, "gcv", drop = TRUE])
)
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.earth(fit),
target = paste0("x", 1L:10L)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_earth.R
|
# Exits
if (!requireNamespace("gbm", quietly = TRUE)) {
exit_file("Package 'gbm' missing")
}
# # Load required packages
# suppressMessages({
# library(gbm)
# })
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
# Fit model(s)
set.seed(101)
fit <- gbm::gbm(y ~ ., distribution = "gaussian", data = friedman1,
n.trees = 100, interaction.depth = 2, shrinkage = 0.1)
# Compute VI scores
vis1 <- vi_model(fit)
set.seed(102)
vis2 <- vi_model(fit, type = "permutation")
vis3 <- gbm::summary.gbm(fit, plotit = FALSE)
set.seed(102)
vis4 <- gbm::summary.gbm(fit, plotit = FALSE,
method = gbm::permutation.test.gbm)
# Expectations for `vi_model()`
expect_identical(
current = vis1$Importance,
target = vis3$rel.inf
)
expect_identical(
current = vis2$Importance,
target = vis4$rel.inf
)
# # Expectations for `get_training_data()`
# expect_identical(
# current = vip:::get_training_data.gbm(fit),
# target = friedman1
# )
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.gbm(fit),
target = paste0("x", 1L:10L)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_gbm.R
|
# Exits
if (!requireNamespace("glmnet", quietly = TRUE)) {
exit_file("Package 'glmnet' missing")
}
# # Load required packages
# suppressMessages({
# library(glmnet)
# })
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
friedman3 <- gen_friedman(seed = 101, n_bins = 3)
# Fit model(s)
fit1 <- glmnet::glmnet(
x = data.matrix(subset(friedman1, select = -y)),
y = friedman1$y
)
fit2 <- glmnet::cv.glmnet(
x = data.matrix(subset(friedman1, select = -y)),
y = friedman1$y
)
fit3 <- glmnet::glmnet(
x = data.matrix(subset(friedman3, select = -y)),
y = friedman3$y,
family = "multinomial"
)
# Compute VI scores
vis1 <- vi_model(fit1)
vis2 <- vi_model(fit2)
vis3 <- vi_model(fit3)
# Expectations for `vi_model()`
expect_identical(
current = vis1$Importance,
target = abs(coef(fit1, s = min(fit1$lambda))[-1L])
)
expect_identical(
current = vis2$Importance,
target = abs(coef(fit2, s = "lambda.1se")[-1L])
)
expect_identical(
current = vis3$Importance,
target = abs(coef(fit3, s = min(fit3$lambda))[[1L]][-1L])
)
expect_identical(
current = vi_model(fit1, lambda = fit1$lambda[5L])$Importance,
target = abs(coef(fit1, s = fit1$lambda[5L])[-1L])
)
expect_identical(
current = vi_model(fit2, lambda = fit2$lambda[5L])$Importance,
target = abs(coef(fit2, s = fit2$lambda[5L])[-1L])
)
expect_identical(
current = vi_model(fit3, lambda = fit3$lambda[5L])$Importance,
target = abs(coef(fit3, s = fit3$lambda[5L])[[1L]][-1L])
)
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.glmnet(fit1),
target = paste0("x", 1L:10L)
)
expect_identical(
current = vip:::get_feature_names.cv.glmnet(fit2),
target = paste0("x", 1L:10L)
)
expect_identical(
current = vip:::get_feature_names.multnet(fit3),
target = paste0("x", 1L:10L)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_glmnet.R
|
# Exits
if (!requireNamespace("h2o", quietly = TRUE)) {
exit_file("Package 'h2o' missing")
}
if (length(unclass(packageVersion("vip"))[[1L]]) %in% c(3, 4)) {
exit_file("Skip h2o tests for CRAN releases")
}
# Load required packages
suppressMessages({
library(h2o)
})
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
friedman2 <- gen_friedman(seed = 101, n_bins = 2)
friedman3 <- gen_friedman(seed = 101, n_bins = 3)
# Initialize connection to H2O
h2o.init()
h2o.no_progress()
# Fit model(s)
fit1 <- h2o.glm( # regression
x = paste0("x", 1L:10L),
y = "y",
training_frame = as.h2o(friedman1)
)
fit2 <- h2o.glm( # binary classification
x = paste0("x", 1L:10L),
y = "y",
training_frame = as.h2o(friedman2),
family = "binomial"
)
fit3 <- h2o.glm( # multiclass classification
x = paste0("x", 1L:10L),
y = "y",
training_frame = as.h2o(friedman3),
family = "multinomial"
)
# Compute VI scores
vis1 <- vi_model(fit1)
vis2 <- vi_model(fit2)
vis3 <- vi_model(fit3)
# Expectations for `vi_model()`
expect_identical(
current = vis1$Importance,
target = h2o.varimp(fit1)$relative_importance
)
expect_identical(
current = vis2$Importance,
target = h2o.varimp(fit2)$relative_importance
)
expect_identical(
current = vis3$Importance,
target = h2o.varimp(fit3)$relative_importance
)
# FIXME: Why not identical? Conversion issues?
# Expectations for `get_training_data()`
expect_equal(
current = vip:::get_training_data.H2ORegressionModel(fit1),
target = friedman1
)
expect_equal(
current = vip:::get_training_data.H2OBinomialModel(fit2),
target = friedman2
)
expect_equal(
current = vip:::get_training_data.H2OMultinomialModel(fit3),
target = friedman3
)
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.H2ORegressionModel(fit1),
target = paste0("x", 1L:10L)
)
expect_identical(
current = vip:::get_feature_names.H2OBinomialModel(fit2),
target = paste0("x", 1L:10L)
)
expect_identical(
current = vip:::get_feature_names.H2OMultinomialModel(fit3),
target = paste0("x", 1L:10L)
)
# Shutdown H2O connection
h2o.shutdown(prompt = FALSE)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_h2o.R
|
# Exits
if (!requireNamespace("lightgbm", quietly = TRUE)) {
exit_file("Package 'lightgbm' missing")
}
# # Load required packages
# suppressMessages({
# library(lightgbm)
# })
# Basic example using imputed titanic data set
t3 <- titanic_mice[[1L]]
# Fit a simple model
set.seed(1449) # for reproducibility
bst <- lightgbm::lightgbm(
data = data.matrix(subset(t3, select = -survived)),
label = ifelse(t3$survived == "yes", 1, 0),
params = list("objective" = "binary", "force_row_wise" = TRUE),
verbose = 0
)
# Compute VI scores
vi_gain <- vi_model(bst)
vi_cover <- vi_model(bst, type = "cover")
vi_frequency <- vi_model(bst, type = "frequency")
vi_lightgbm <- lightgbm::lgb.importance(model = bst)
# Expectations for `vi_model()`
expect_identical(
current = vi_gain$Importance,
target = vi_lightgbm$Gain
)
expect_identical(
current = vi_cover$Importance,
target = vi_lightgbm$Cover
)
expect_identical(
current = vi_frequency$Importance,
target = vi_lightgbm$Frequency
)
expect_identical(
current = vi_model(bst, percentage = FALSE)$Importance,
target = lightgbm::lgb.importance(bst, percentage = FALSE)$Gain
)
# Expectations for `get_training_data()`
expect_error(vip:::get_training_data.default(bst))
# Call `vip::vip()` directly
p <- vip(bst, method = "model", include_type = TRUE)
# Expect `p` to be a `"gg" "ggplot"` object
expect_identical(
current = class(p),
target = c("gg", "ggplot")
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_lightgbm.R
|
# Exits
if (!requireNamespace("mixOmics", quietly = TRUE)) {
exit_file("Bioconductor package 'mixOmics' missing")
}
# # Load required packages
# suppressMessages({
# library(mixOmics)
# })
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
friedman3 <- gen_friedman(seed = 101, n_bins = 3)
# univariate regression
pls_mo <- mixOmics::pls(friedman1[, -1], friedman1[, 1, drop = FALSE], ncomp = 3)
spls_mo <- mixOmics::spls(friedman1[, -1], friedman1[, 1, drop = FALSE], ncomp = 3)
pls_mo_imp <- mixOmics::vip(pls_mo)
spls_mo_imp <- mixOmics::vip(spls_mo)
# classification
plsda_mo <- mixOmics::plsda(friedman3[, -1], friedman3$y, ncomp = 3)
splsda_mo <- mixOmics::splsda(friedman3[, -1], friedman3$y, ncomp = 3)
plsda_mo_imp <- mixOmics::vip(plsda_mo)
splsda_mo_imp <- mixOmics::vip(splsda_mo)
# Expectations for `vi_model()`
for (i in 1:3) {
pls_vip_imp <- vi_model(pls_mo, ncomp = i)
expect_identical(
current = pls_vip_imp$Importance,
target = pls_mo_imp[,i]
)
spls_vip_imp <- vi_model(spls_mo, ncomp = i)
expect_identical(
current = spls_vip_imp$Importance,
target = spls_mo_imp[,i]
)
plsda_vip_imp <- vi_model(plsda_mo, ncomp = i)
expect_identical(
current = plsda_vip_imp$Importance,
target = plsda_mo_imp[,i]
)
splsda_vip_imp <- vi_model(splsda_mo, ncomp = i)
expect_identical(
current = splsda_vip_imp$Importance,
target = splsda_mo_imp[,i]
)
}
pls_vip_imp <- vi_model(pls_mo)
expect_identical(
current = pls_vip_imp$Importance,
target = pls_mo_imp[,3]
)
spls_vip_imp <- vi_model(spls_mo)
expect_identical(
current = spls_vip_imp$Importance,
target = spls_mo_imp[,3]
)
plsda_vip_imp <- vi_model(plsda_mo)
expect_identical(
current = plsda_vip_imp$Importance,
target = plsda_mo_imp[,3]
)
splsda_vip_imp <- vi_model(splsda_mo)
expect_identical(
current = splsda_vip_imp$Importance,
target = splsda_mo_imp[,3]
)
expect_error(
vi_model(pls_mo, ncomp = 1:3),
"should be a single integer"
)
expect_warning(
too_many <- vi_model(pls_mo, ncomp = 300),
"Results are for 3"
)
expect_identical(
current = too_many$Importance,
target = pls_mo_imp[,3]
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_mixOmics.R
|
# Exits
if (!requireNamespace("mlr", quietly = TRUE)) {
exit_file("Package 'mlr' missing")
}
if (!requireNamespace("ranger", quietly = TRUE)) {
exit_file("Package 'ranger' missing")
}
# Load required packages
#suppressMessages({
# library(mlr)
# library(ranger)
#})
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
# Fit model(s)
set.seed(101)
task <- mlr::makeRegrTask("friedman", data = friedman1, target = "y")
lrnr <- mlr::makeLearner("regr.ranger", importance = "impurity")
fit <- mlr::train(lrnr, task = task)
# Compute model-based VI scores
vis <- vi_model(fit)
# Expect `vi()` and `vi_model()` to both work
expect_identical(
current = vi(fit, sort = FALSE),
target = vi_model(fit)
)
# Expectations for `vi_model()`
expect_identical(
current = vis$Importance,
target = unname(fit$learner.model$variable.importance)
)
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.WrappedModel(fit),
target = paste0("x", 1L:10L)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_mlr.R
|
# Exits
if (!requireNamespace("mlr3", quietly = TRUE)) {
exit_file("Package 'mlr3' missing")
}
if (!requireNamespace("mlr3learners", quietly = TRUE)) {
exit_file("Package 'mlr3learners' missing")
}
if (!requireNamespace("ranger", quietly = TRUE)) {
exit_file("Package 'ranger' missing")
}
# Load required packages
#suppressMessages({
# library(mlr3)
# library(mlr3learners)
# library(ranger)
#})
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
# Fit model(s)
set.seed(101)
task <- mlr3::TaskRegr$new("friedman", backend = friedman1, target = "y")
lrnr <- mlr3::lrn("regr.ranger", importance = "impurity")
expect_error(vi(lrnr)) # did not call `$traini()` yet
expect_error(vi_model(lrnr)) # did not call `$traini()` yet
lrnr$train(task) # `vi()`, etc. should now work
# Compute model-based VI scores
vis <- vi_model(lrnr)
# Expect `vi()` and `vi_model()` to both work
expect_identical(
current = vi(lrnr, sort = FALSE),
target = vi_model(lrnr)
)
# Expectations for `vi_model()`
expect_identical(
current = vis$Importance,
target = unname(lrnr$model$variable.importance)
)
# Expectations for `get_feature_names()`
expect_identical(
current = sort(vip:::get_feature_names.Learner(lrnr)),
target = sort(paste0("x", 1L:10L))
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_mlr3.R
|
# Exits
if (!requireNamespace("neuralnet", quietly = TRUE)) {
exit_file("Package 'neuralnet' missing")
}
if (!requireNamespace("NeuralNetTools", quietly = TRUE)) {
exit_file("Package 'NeuralNetTools' missing")
}
# # Load required packages
# suppressMessages({
# library(neuralnet)
# library(NeuralNetTools)
# })
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
# Fit model(s)
set.seed(101) # for reproducibility
fit <- neuralnet::neuralnet(y ~ ., data = friedman1)
# Compute VI scores
vis1 <- vi_model(fit)
vis2 <- vi_model(fit, type = "garson")
# Expectations for `vi_model()`
expect_identical(
current = vis1$Importance,
target = NeuralNetTools::olden(fit, bar_plot = FALSE)$importance
)
expect_identical(
current = vis2$Importance,
target = NeuralNetTools::garson(fit, bar_plot = FALSE)$rel_imp
)
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.nn(fit),
target = paste0("x", 1L:10L)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_neuralnet.R
|
# Exits
if (!requireNamespace("nnet", quietly = TRUE)) {
exit_file("Package 'nnet' missing")
}
if (!requireNamespace("NeuralNetTools", quietly = TRUE)) {
exit_file("Package 'NeuralNetTools' missing")
}
# # Load required packages
# suppressMessages({
# library(nnet)
# library(NeuralNetTools)
# })
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
# Fit model(s)
set.seed(101) # for reproducibility
fit <- nnet::nnet(y ~ ., data = friedman1, size = 10, decay = 0.1,
linout = TRUE, maxit = 1000, trace = FALSE)
# Compute VI scores
vis1 <- vi_model(fit)
vis2 <- vi_model(fit, type = "garson")
# Expectations for `vi_model()`
expect_identical(
current = vis1$Importance,
target = NeuralNetTools::olden(fit, bar_plot = FALSE)$importance
)
expect_identical(
current = vis2$Importance,
target = NeuralNetTools::garson(fit, bar_plot = FALSE)$rel_imp
)
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.nnet(fit),
target = paste0("x", 1L:10L)
)
# Call `vip::vip()` directly
p <- vip(fit, method = "model", include_type = TRUE)
# Expect `p` to be a `"gg" "ggplot"` object
expect_identical(
current = class(p),
target = c("gg", "ggplot")
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_nnet.R
|
# Exits
if (!requireNamespace("party", quietly = TRUE)) {
exit_file("Package 'party' missing")
}
if (!requireNamespace("varImp", quietly = TRUE)) {
exit_file("Package 'varImp' missing")
}
# Load required packages
suppressMessages({
library(party)
library(varImp)
})
# Generate Friedman benchmark data
friedman2 <- gen_friedman(seed = 101, n_bins = 2)
# Fit model(s)
set.seed(101)
fit1 <- party::cforest(y ~ ., data = friedman2)
fit2 <- party::ctree(y ~ ., data = friedman2)
# Compute VI scores
set.seed(102)
vis1 <- vi_model(fit1)
set.seed(102)
vis2 <- vi_model(fit1, type = "auc")
set.seed(102)
vis3 <- party::varimp(fit1)
set.seed(102)
vis4 <- party::varimpAUC(fit1)
# Expectations for `vi_model()`
expect_identical(
current = vis1$Importance,
target = unname(vis3)
)
expect_identical(
current = vis2$Importance,
target = unname(vis4)
)
# Expectations for `get_training_data()`
expect_equal(
current = vip:::get_training_data.RandomForest(fit1),
target = subset(friedman2, select = -y),
check.attributes = FALSE
)
expect_equal(
current = vip:::get_training_data.BinaryTree(fit2),
target = subset(friedman2, select = -y),
check.attributes = FALSE
)
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.RandomForest(fit1),
target = paste0("x", 1L:10L)
)
expect_identical(
current = vip:::get_feature_names.BinaryTree(fit2),
target = paste0("x", 1L:10L)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_party.R
|
# Exits
if (!requireNamespace("partykit", quietly = TRUE)) {
exit_file("Package 'partykit' missing")
}
# Load required packages
suppressMessages({
library(partykit)
})
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
# Fit model(s)
set.seed(101)
fit1 <- partykit::ctree(y ~ ., data = friedman1)
fit2 <- partykit::cforest(y ~ ., data = friedman1)
# Compute VI scores
set.seed(102)
vis1 <- vi_model(fit1)
set.seed(102)
vis2 <- vi_model(fit2)
set.seed(102)
vis3 <- partykit::varimp(fit1)
set.seed(102)
vis4 <- partykit::varimp(fit2)
# Expectations for `vi_model()`
expect_identical(
current = vis1$Importance[seq_along(vis3)],
target = unname(vis3)
)
expect_identical(
current = vis2$Importance,
target = unname(vis4)
)
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.constparty(fit1),
target = paste0("x", 1L:10L)
)
expect_identical(
current = vip:::get_feature_names.cforest(fit2),
target = paste0("x", 1L:10L)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_partykit.R
|
# Exits
if (!requireNamespace("caret", quietly = TRUE)) {
exit_file("Package 'caret' missing")
}
if (!requireNamespace("pls", quietly = TRUE)) {
exit_file("Package 'pls' missing")
}
# # Load required packages
# suppressMessages({
# library(caret)
# library(pls)
# })
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
# Fit model(s)
fit <- pls::mvr(y ~ ., data = friedman1)
# Compute VI scores
vis <- vi_model(fit)
# Expectations for `vi_model()`
expect_identical(
current = vis$Importance,
target = caret::varImp(fit)$Overall
)
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.mvr(fit),
target = paste0("x", 1L:10L)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_pls.R
|
# Exits
if (!requireNamespace("randomForest", quietly = TRUE)) {
exit_file("Package 'randomForest' missing")
}
# # Load required packages
# suppressMessages({
# library(randomForest)
# })
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
friedman2 <- gen_friedman(seed = 101, n_bins = 2)
# Fit model(s)
set.seed(101)
fit1 <- randomForest::randomForest(y ~ ., data = friedman1)
fit2 <- randomForest::randomForest(y ~ ., data = friedman1, importance = TRUE)
fit3 <- randomForest::randomForest(
x = subset(friedman1, select = -y),
y = friedman1$y
)
fit4 <- randomForest::randomForest(y ~ ., data = friedman2, importance = TRUE)
# Compute VI scores
vis1 <- vi_model(fit1)
vis2 <- vi_model(fit2, type = 1)
vis3 <- vi_model(fit3)
vis4 <- vi_model(fit4, type = 2, scale = FALSE)
# Expectations for `vi_model()`
expect_error(vi_model(fit1, type = 1))
expect_identical(
current = vis1$Importance,
target = unname(fit1$importance[, "IncNodePurity"])
)
expect_identical(
current = vi_model(fit2, type = 1, scale = FALSE)$Importance,
target = unname(fit2$importance[, "%IncMSE", drop = TRUE])
)
expect_identical(
current = vis3$Importance,
target = unname(fit3$importance[, "IncNodePurity"])
)
expect_identical(
current = vis4$Importance,
target = unname(fit4$importance[, "MeanDecreaseGini", drop = TRUE])
)
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.randomForest(fit1),
target = paste0("x", 1L:10L)
)
expect_identical(
current = vip:::get_feature_names.randomForest(fit3),
target = paste0("x", 1L:10L)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_randomForest.R
|
# Exits
if (!requireNamespace("ranger", quietly = TRUE)) {
exit_file("Package 'ranger' missing")
}
# # Load required packages
# suppressMessages({
# library(ranger)
# })
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
friedman3 <- gen_friedman(seed = 101, n_bins = 3)
# Fit model(s)
set.seed(101)
fit1 <- ranger::ranger(y ~ ., data = friedman1)
fit2 <- ranger::ranger(y ~ ., data = friedman1, importance = "impurity")
# Compute model-based VI scores
vis <- vi_model(fit2)
# Expectations for `vi_model()`
expect_error(vi_model(fit1))
expect_identical(
current = vis$Importance,
target = unname(fit2$variable.importance)
)
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.ranger(fit1),
target = paste0("x", 1L:10L)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_ranger.R
|
# Exits
if (!requireNamespace("rpart", quietly = TRUE)) {
exit_file("Package 'rpart' missing")
}
# # Load required packages
# suppressMessages({
# library(rpart)
# })
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
# Fit model(s)
fit <- rpart::rpart(y ~ ., data = friedman1)
no_splits <- rpart::rpart(y ~ ., data = friedman1, minsplit = 1e06)
# Compute VI scores
vis <- vi_model(fit)
# Expectations for `vi_model()`
expect_identical(
current = vis$Importance,
target = unname(fit$variable.importance)
)
expect_error(vi(no_splits))
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.rpart(fit),
target = paste0("x", 1L:10L)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_rpart.R
|
# Load Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
friedman2 <- gen_friedman(seed = 101, n_bins = 2)
# Fit an additive linear regression model
fit_lm <- lm(y ~ ., data = friedman1)
# Fit an additive logistic regression model
fit_glm <- glm(y ~ ., data = friedman2, family = "binomial")
# Compute variable importance scores
vi_lm <- vi_model(fit_lm)
vi_glm <- vi_model(fit_glm)
# Expectations for `vi_model()`
expect_identical(
current = vi_lm$Importance,
target = unname(abs(summary(fit_lm)$coefficients[, "t value"])[-1])
)
expect_identical(
current = vi_glm$Importance,
target = unname(abs(summary(fit_glm)$coefficients[, "z value"])[-1])
)
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.lm(fit_lm),
target = paste0("x", 1L:10L)
)
# Setting `type = "raw"` should return the aboldute value of the original
# coefficients (as opposed to |t-value| or |z-value|)
expect_identical(
current = vi_model(fit_lm, type = "raw")$Importance,
target = unname(abs(coef(fit_lm))[-1])
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_stats.R
|
# Exits
if (!requireNamespace("ranger", quietly = TRUE)) {
exit_file("Package 'ranger' missing")
}
if (!requireNamespace("tidymodels", quietly = TRUE)) {
exit_file("Package 'tidymodels' missing")
}
# Load required packages
#suppressMessages({
# library(ranger)
# library(tidymodels)
#})
# FIXME: Switch to data set where we know the actual ranking of the features in
# terms of predictive performance.
################################################################################
# Fit a random forest to some sample data
################################################################################
# Load sample data from 'modeldata' package
data("bivariate", package = "modeldata")
# Define a 'ranger'-based random forest model
ranger_spec <- parsnip::rand_forest(trees = 1e3, mode = "classification") |>
parsnip::set_engine("ranger", importance = "impurity")
# Fit models
set.seed(421) # for reproduicbility
ranger_fit_workflow <- # worflows
workflows::workflow(Class ~ ., ranger_spec) |>
parsnip::fit(bivariate_train)
ranger_fit_parsnip <- # parsnip
ranger_spec |>
parsnip::fit(Class ~ ., data = bivariate_train)
# Extract underlying 'ranger' fits
fit_workflow <- workflows::extract_fit_engine(ranger_fit_workflow)
fit_parsnip <- parsnip::extract_fit_engine(ranger_fit_parsnip)
################################################################################
# Model-based variable importance
################################################################################
# Extract model-based VI scores
vi_mod_workflow <- vi(ranger_fit_workflow)
vi_mod_parsnip <- vi(ranger_fit_parsnip)
# Expect model-based VI scores to be of type "impurity"
expect_identical(attr(vi_mod_workflow, which = "type"), "impurity")
expect_identical(attr(vi_mod_parsnip, which = "type"), "impurity")
# Expect VI scores to be the same (when sorted in decreasing order)
expect_equivalent(vi_mod_workflow$Importance,
sort(fit_workflow$variable.importance, decreasing = TRUE))
expect_equivalent(vi_mod_parsnip$Importance,
sort(fit_parsnip$variable.importance, decreasing = TRUE))
################################################################################
# Permutation-based (i.e., model-agnostic) variable importance
################################################################################
# Define prediction wrapper for 'workflow' object
pfun <- function(object, newdata) {
# Get predicted prob for class "One"
predict(object, new_data = newdata , type = "prob")[[".pred_One"]]
}
# Compute permutation-based VI scores using AUC metric
set.seed(912) # for reproducibility
vi_auc <- ranger_fit_workflow |>
vi(method = "permute",
target = "Class",
metric = "roc_auc",
pred_wrapper = pfun,
event_level = "first",
nsim = 10,
train = bivariate_train,
reference_class = "One")
# Not always the case, but here we can expect these to be in (0, 1)
expect_true(all(vi_auc$Importance > 0 & vi_auc$Importance < 1))
################################################################################
# FIRM-based (i.e., model-agnostic) variable importance
################################################################################
pfun <- function(object, newdata) {
mean(predict(object, new_data = newdata, type = "prob")[[".pred_One"]])
}
vi_pd <- ranger_fit_workflow |>
vi_firm(
feature_names = c("A", "B"), # required
train = bivariate_train, # required
# pdp::partial()-specific arguments
pred.fun = pfun
)
# Not always the case, but here we can expect these to be in (0, 1)
expect_true(all(vi_pd$Importance > 0))
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_tidymodels.R
|
# Exits
if (!requireNamespace("xgboost", quietly = TRUE)) {
exit_file("Package 'xgboost' missing")
}
# # Load required packages
# suppressMessages({
# library(xgboost)
# })
# Generate Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
# Fit model(s)
set.seed(101)
fit <- xgboost::xgboost( # params found using `autoxgb::autoxgb()`
data = data.matrix(subset(friedman1, select = -y)),
label = friedman1$y,
max_depth = 3,
eta = 0.1,
nrounds = 301,
verbose = 0
)
# Compute VI scores
vis_gain <- vi_model(fit)
vis_cover <- vi_model(fit, type = "cover")
vis_frequency <- vi_model(fit, type = "frequency")
vis_xgboost <- xgboost::xgb.importance(model = fit)
# Expectations for `vi_model()`
expect_identical(
current = vis_gain$Importance,
target = vis_xgboost$Gain
)
expect_identical(
current = vis_cover$Importance,
target = vis_xgboost$Cover
)
expect_identical(
current = vis_frequency$Importance,
target = vis_xgboost$Frequency
)
# Expectations for `get_training_data()`
expect_error(vip:::get_training_data.default(fit))
# Expectations for `get_feature_names()`
expect_identical(
current = vip:::get_feature_names.xgb.Booster(fit),
target = paste0("x", 1L:10L)
)
# Call `vip::vip()` directly
p <- vip(fit, method = "model", include_type = TRUE)
# Expect `p` to be a `"gg" "ggplot"` object
expect_identical(
current = class(p),
target = c("gg", "ggplot")
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_pkg_xgboost.R
|
# Check dependencies
exit_if_not(
requireNamespace("pdp", quietly = TRUE),
requireNamespace("ranger", quietly = TRUE)
)
# Use one of the available (imputed) versions of the Titanic data
titanic <- titanic_mice[[1L]]
# Feature names
xnames <- names(subset(titanic, select = -survived))
# Fit a default probability forest
set.seed(1511) # for reproducibility
rfo <- ranger(survived ~ ., data = titanic, probability = TRUE)
# Function to run expectations
expectations <- function(object) {
# Check class
expect_identical(class(object),
target = c("vi", "tbl_df", "tbl", "data.frame"))
# Check dimensions (should be one row for each feature)
expect_identical(ncol(titanic) - 1L, target = nrow(object))
# Check top five predictors
expect_identical(xnames, object[["Variable"]])
# Check attributes
expect_true("effects" %in% names(attributes(object)))
}
# Prediction wrappers
pfun.pd <- function(object, newdata) {
mean(predict(object, data = newdata)$predictions[, "yes"])
}
pfun.ice <- function(object, newdata) {
predict(object, data = newdata)$predictions[, "yes"]
}
# Compute PD-based importance
vis_pd <- vi_firm(rfo) # default (centered) logit scale
vis_pd_prob <- vi_firm(rfo, prob = TRUE) # probability scale
vis_pd_pfun <- vi_firm(rfo, pred.fun = pfun.pd)
# Expectations
expectations(vis_pd)
expect_equal(vis_pd_prob, vis_pd_pfun)
# Compute ICE-based importance
vis_ice <- vi_firm(rfo, ice = TRUE, prob = TRUE, var_continuous = mad) # use ICE plots
vis_ice_pfun <- vi_firm(rfo, pred.fun = pfun.ice, var_continuous = mad)
# Expectations
expectations(vis_ice)
expect_equal(vis_ice, vis_ice_pfun)
# Use `vi()` function
vis_ice_vi <- vi(rfo, method = "firm", ice = TRUE, prob = TRUE,
var_continuous = mad)
# Expectations
expect_identical(sort(vis_ice$Importance), sort(vis_ice_vi$Importance))
# Check computation by hand!
age.effect <- attr(vis_ice, which = "effects")$age
x <- mean(tapply(age.effect$yhat, INDEX = age.effect$yhat.id, FUN = mad))
y <- vis_ice[vis_ice$Variable == "age", "Importance", drop = TRUE]
expect_identical(x, y)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_vi_firm.R
|
# Check dependencies
exit_if_not(
requireNamespace("ranger", quietly = TRUE)
)
# Prediction wrappers
pfun <- function(object, newdata) { # classification and regression
predict(object, data = newdata)$predictions
}
pfun_prob <- function(object, newdata) { # probability estimation
predict(object, data = newdata)$predictions[, "yes"] # P(survived|x)
}
# Read in data sets
f1 <- gen_friedman(seed = 101) # regression
t3 <- titanic_mice[[1L]] # classification
# List all available metrics
metrics <- list_metrics()
expect_true(inherits(metrics, what = "data.frame"))
################################################################################
#
# Regression
#
################################################################################
# Expectation function for models built on the Friedman 1 data set
expectations_f1 <- function(object) {
# Check class
expect_identical(class(object),
target = c("vi", "tbl_df", "tbl", "data.frame"))
# Check dimensions (should be one row for each feature)
expect_identical(ncol(f1) - 1L, target = nrow(object))
# Check top five predictors
expect_true(all(paste0("x", 1L:5L) %in% object$Variable[1L:5L]))
}
# Fit a (default) random forest
set.seed(1433) # for reproducibility
rfo_f1 <- ranger::ranger(y ~ ., data = f1)
# Try all regression metrics
regression_metrics <- metrics[metrics$task == "Regression", ]$metric
set.seed(828) # for reproducibility
vis <- lapply(regression_metrics, FUN = function(x) {
vi(rfo_f1, method = "permute", target = "y", metric = x,
pred_wrapper = pfun, nsim = 10)
})
lapply(vis, FUN = expectations_f1)
# Use a custom metric
rsquared <- function(truth, estimate) {
cor(truth, estimate) ^ 2
}
# Compute permutation-based importance using R-squared (character string)
set.seed(925) # for reproducibility
vis_rsquared <- vi_permute(
object = rfo_f1,
# train = f1,
target = "y",
metric = "rsq",
pred_wrapper = pfun,
sample_size = 90,
nsim = 10
)
expectations_f1(vis_rsquared)
# Compute permutation-based importance using R-squared (custim function)
set.seed(925) # for reproducibility
vis_rsquared_custom <- vi_permute(
object = rfo_f1,
train = subset(f1, select = -y),
target = f1$y,
metric = rsquared,
smaller_is_better = FALSE,
sample_frac = 0.9,
pred_wrapper = pfun,
nsim = 10
)
expectations_f1(vis_rsquared_custom)
# Check that results are identical
expect_equal(vis_rsquared, target = vis_rsquared_custom)
# Expected errors for `vi_permute()`
expect_error( # missing `pred_wrapper`
vi_permute(
object = rfo_f1,
train = subset(f1, select = -y),
target = f1$y,
metric = rsquared,
smaller_is_better = FALSE,
# pred_wrapper = pfun,
sample_frac = 0.9
)
)
expect_error( # missing `target`
vi_permute(
object = rfo_f1,
train = subset(f1, select = -y),
# target = f1$y,
metric = rsquared,
smaller_is_better = FALSE,
pred_wrapper = pfun,
sample_frac = 0.9
)
)
expect_error( # missing `smaller_is_better`
vi_permute(
object = rfo_f1,
train = subset(f1, select = -y),
target = f1$y,
metric = rsquared,
# smaller_is_better = FALSE,
pred_wrapper = pfun,
sample_frac = 0.9
)
)
expect_error( # trying to set`sample_frac` and `sample_size`
vi_permute(
object = rfo_f1,
train = subset(f1, select = -y),
target = f1$y,
metric = rsquared,
smaller_is_better = FALSE,
pred_wrapper = pfun,
sample_frac = 0.9,
sample_size = 90
)
)
expect_error( # setting `sample_frac` outside of range
vi_permute(
object = rfo_f1,
train = subset(f1, select = -y),
target = f1$y,
metric = rsquared,
smaller_is_better = FALSE,
pred_wrapper = pfun,
sample_frac = 1.9
)
)
################################################################################
#
# Binary classification
#
################################################################################
# Expectation function for models built on the Friedman 1 data set
expectations_t3 <- function(object) {
# Check class
expect_identical(class(object),
target = c("vi", "tbl_df", "tbl", "data.frame"))
# Check dimensions (should be one row for each feature)
expect_identical(ncol(t3) - 1L, target = nrow(object))
# Expect all VI scores to be positive
expect_true(all(object$Importance > 0))
}
# Fit a (default) random forest
set.seed(1454) # for reproducibility
rfo_t3 <- ranger::ranger(survived ~ ., data = t3)
# Try all binary classification metrics
binary_class_metrics <-
metrics[grepl("binary", x = metrics$task, ignore.case = TRUE), ]$metric[1:3]
set.seed(928) # for reproducibility
vis <- lapply(binary_class_metrics, FUN = function(x) {
vi(rfo_t3, method = "permute", target = "survived", metric = x,
pred_wrapper = pfun, nsim = 10)
})
lapply(vis, FUN = expectations_t3)
# Fit a (default) probability forest
set.seed(1508) # for reproducibility
rfo_t3_prob <- ranger::ranger(survived ~ ., data = t3, probability = TRUE)
# Try all probability-based metrics
binary_prob_metrics <- c("roc_auc", "pr_auc", "logloss")
set.seed(1028) # for reproducibility
vis <- lapply(binary_prob_metrics, FUN = function(x) {
vi(rfo_t3_prob, method = "permute", target = "survived", metric = x,
pred_wrapper = pfun_prob, nsim = 10, event_level = "second")
})
lapply(vis, FUN = expectations_t3)
# Try user-supplied metric with Brier score
brier <- function(truth, estimate) {
mean((ifelse(truth == "yes", 1, 0) - estimate) ^ 2)
}
expectations_t3(
vi(rfo_t3_prob, method = "permute", target = "survived", metric = brier,
pred_wrapper = pfun_prob, nsim = 10, smaller_is_better = TRUE)
)
expect_error( # need to set `smalle_is_better` for non built-in metrics
vi(rfo_t3_prob, method = "permute", target = "survived", metric = brier,
pred_wrapper = pfun_prob, nsim = 10)
)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_vi_permute.R
|
# Exits
if (!requireNamespace("fastshap", quietly = TRUE)) {
exit_file("Package 'fastshap' missing")
}
# Simulate Friedman benchmark data
trn1 <- gen_friedman(100, seed = 1421)
trn2 <- gen_friedman(100, seed = 1421, n_bins = 2)
# Fit a random forest
set.seed(1502) # for reproducibility
fit1 <- lm(y ~ ., data = trn1)
fit2 <- glm(y ~ ., data = trn2, family = binomial(link = "logit"))
# Prediction wrapper
pfun <- function(object, newdata) {
predict(object, newdata = newdata, type = "response")
}
# Compute SHAP-based VI scores
set.seed(1511) # for reproducibility
vis1 <- vi_shap(fit1, pred_wrapper = pfun, nsim = 10, train = trn1)
vis2 <- vi_shap(fit2, pred_wrapper = pfun, nsim = 10, train = trn2)
vis3 <- vi(fit1, method = "shap", pred_wrapper = pfun, nsim = 10, train = trn1)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_vi_shap.R
|
# Load Friedman benchmark data
friedman1 <- gen_friedman(seed = 101)
# Increase length of each feature name
for (i in seq_along(names(friedman1))) {
if (names(friedman1)[i] != "y") {
names(friedman1)[i] <- paste0(names(friedman1)[i], "_ABCDEFGH")
}
}
# Fit an additive linear regression model
fit <- lm(y ~ ., data = friedman1)
# Compute VI scores
vis <- vi(fit, abbreviate_feature_names = 3, rank = TRUE)
# Expectations
expect_error(vi("a")) # unrecognized model type
expect_true(all(vis$Importance %in% 1L:10L))
expect_true(unique(nchar(vis$Variable)) == 3L)
|
/scratch/gouwar.j/cran-all/cranData/vip/inst/tinytest/test_vip.R
|
---
title: "Variable Importance Plots—An Introduction to the vip Package"
output:
bookdown::html_document2:
base_format: rmarkdown::html_vignette
fig_caption: yes
toc: true
toc_depth: 2
number_sections: false
link-citations: yes
pkgdown:
as_is: true
vignette: >
%\VignetteIndexEntry{vip}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
bibliography: '`r system.file("references.bib", package = "vip")`'
---
This vignette is essentially an up-to-date version of @RJ-2020-013. Please use that if you'd like to cite our work.
# Introduction
Too often machine learning (ML) models are summarized using a single metric (e.g., cross-validated accuracy) and then put into production. Although we often care about the predictions from these models, it is becoming routine (and good practice) to also better understand the predictions! Understanding how an ML model makes its predictions helps build trust in the model and is the fundamental idea of the emerging field of *interpretable machine learning* (IML).^[Although "interpretability" is difficult to formally define in the context of ML, we follow @doshivelez-2017-rigorous and describe "interpretable" as the "...ability to explain or to present in understandable terms to a human."] For an in-depth discussion on IML, see @molnar-2019-iml. In this paper, we focus on *global methods* for quantifying the importance^[In this context "importance" can be defined in a number of different ways. In general, we can describe it as *the extent to which a feature has a "meaningful" impact on the predicted outcome*. A more formal definition and treatment can be found in @laan-2006-statistical.] of features in an ML model; that is, methods that help us understand the global contribution each feature has to a model's predictions. Computing variable importance (VI) and communicating them through variable importance plots (VIPs) is a fundamental component of IML and is the main topic of this paper.
While many of the procedures discussed in this paper apply to any model that makes predictions, it should be noted that these methods heavily depend on the accuracy and importance of the fitted model; hence, unimportant features may appear relatively important (albeit not predictive) in comparison to the other included features. For this reason, we stress the usefulness of understanding the scale on which VI scores are calculated and take that into account when assessing the importance of each feature and communicating the results to others. Also, we should point out that this work focuses mostly on *post-hoc interpretability* where a trained model is given and the goal is to understand what features are driving the model's predictions. Consequently, our work focuses on functional understanding of the model in contrast to the lower-level mechanistic understanding [@montavon-2018-methods]. That is, we seek to explain the relationship between the model's prediction behavior and features without explaining the full internal representation of the model.^[We refer the reader to @poulin-2006-visual, @caruana-2015-intelligible, @bibal-2016-intterpretability, and @bau-2017-network, for discussions around model structure interpretation.]
<!-- For this reason, we stress the usefulness of understanding the scale on which VI scores are calculated and take that into account when assessing the importance of each feature and communicating the results to others. -->
VI scores and VIPs can be constructed for general ML models using a number of available packages. The [iml](https://cran.r-project.org/package=iml) package [@R-iml] provides the `FeatureImp()` function which computes feature importance for general prediction models using the permutation approach (discussed later). It is written in [R6](https://cran.r-project.org/package=R6) [@R-R6] and allows the user to specify a generic loss function or select one from a pre-defined list (e.g., \code{loss = "mse"} for mean squared error). It also allows the user to specify whether importance is measured as the difference or as the ratio of the original model error and the model error after permutation. The user can also specify the number of repetitions used when permuting each feature to help stabilize the variability in the procedure. The \code{iml::FeatureImp()} function can also be run in parallel using any parallel backend supported by the [foreach](https://cran.r-project.org/package=foreach) package [@R-foreach].
The [ingredients](https://cran.r-project.org/package=ingredients) package [@R-ingredients] also provides permutation-based VI scores through the `feature_importance()` function. (Note that this function recently replaced the now deprecated [DALEX](https://cran.r-project.org/package=DALEX) function `variable_importance()` [@R-DALEX].) Similar to `iml::FeatureImp()`, this function allows the user to specify a loss function and how the importance scores are computed (e.g., using the difference or ratio). It also provides an option to sample the training data before shuffling the data to compute importance (the default is to use `n_sample = 1000`), which can help speed up computation.
The [mmpf](https://cran.r-project.org/package=mmpf) package [@R-mmpf] also provides permutation-based VI scores via the `mmpf::permutationImportance()` function. Similar to the [iml](https://cran.r-project.org/package=iml) and [ingredients](https://cran.r-project.org/package=ingredients) implementation, this function is flexible enough to be applied to any class of ML models in R.
The [varImp](https://cran.r-project.org/package=varImp) package [@R-varImp] extends the permutation-based method for RFs in package [party](https://cran.r-project.org/package=party) [@R-party] to arbitrary measures from the [measures](https://cran.r-project.org/package=measures) package [@R-measures]. Additionally, the functions in [varImp](https://cran.r-project.org/package=varImp) include the option of using the conditional approach described in @strobl-2019-conditional which is more reliable in the presence of correlated features. A number of other RF-specific VI packages exist on CRAN, including, but not limited to, [vita](https://cran.r-project.org/package=vita) [@R-vita], [rfVarImpOOB](https://cran.r-project.org/package=rfVarImpOOB) [@R-rfVarImpOOB], [randomForestExplainer](https://cran.r-project.org/package=randomForestExplainer) [@R-randomForestExplainer], and [tree.interpreter](https://cran.r-project.org/package=tree.interpreter) [@R-tree.interpreter].^[These packages were discovered using [pkgsearch](https://cran.r-project.org/package=pkgsearch)'s \code{ps()} function [@R-pkgsearch] with the key phrases "variable importance" and "feature importance".].
The [caret](https://cran.r-project.org/package=caret) package [@R-caret] includes a general `varImp()` function for computing model-specific and *filter-based* VI scores. Filter-based approaches, which are described in @applied-kuhn-2013, do not make use of the fitted model to measure VI. They also do not take into account the other predictors in the model. For regression problems, a popular filter-based approach to measuring the VI of a numeric predictor $x$ is to first fit a flexible nonparametric model between $x$ and the target $Y$; for example, the locally-weighted polynomial regression (LOWESS) method developed by @robust-cleveland-1979. From this fit, a pseudo-$R^2$ measure can be obtained from the resulting residuals and used as a measure of VI. For categorical predictors, a different method based on standard statistical tests (e.g., $t$-tests and ANOVAs) can be employed; see @applied-kuhn-2013 for details. For classification problems, an area under the ROC curve (AUC) statistic can be used to quantify predictor importance. The AUC statistic is computed by using the predictor $x$ as input to the ROC curve. If $x$ can reasonably separate the classes of $Y$, that is a clear indicator that $x$ is an important predictor (in terms of class separation) and this is captured in the corresponding AUC statistic. For problems with more than two classes, extensions of the ROC curve or a one-vs-all approach can be used.
If you use the [mlr](https://cran.r-project.org/package=mlr) interface for fitting ML models [@R-mlr], then you can use the `getFeatureImportance()` function to extract model-specific VI scores from various tree-based models (e.g., RFs and GBMs). Unlike [caret](https://cran.r-project.org/package=caret), the model needs to be fit via the [mlr](https://cran.r-project.org/package=mlr) interface; for instance, you cannot use `getFeatureImportance()` on a [ranger](https://cran.r-project.org/package=ranger) [@R-ranger] model unless it was fit using [mlr](https://cran.r-project.org/package=mlr).
While the [iml](https://cran.r-project.org/package=iml) and [DALEX](https://cran.r-project.org/package=DALEX) packages provide model-agnostic approaches to computing VI, [caret](https://cran.r-project.org/package=caret), and to some extent, [mlr](https://cran.r-project.org/package=mlr), provide model-specific approaches (e.g., using the absolute value of the $t$-statistic for linear models) as well as less accurate filter-based approaches. Furthermore, each package has a completely different interface (e.g., [iml](https://cran.r-project.org/package=iml) is written in R6). The [vip](https://cran.r-project.org/package=vip) package [@R-vip] strives to provide a consistent interface to both model-specific and model-agnostic approaches to feature importance that is simple to use. The three most important functions exported by [vip](https://cran.r-project.org/package=vip) are described below:
* `vi()` computes VI scores using model-specific or model-agnostic approaches (the results are always returned as a tibble [@R-tibble});
* `vip()` constructs VIPs using model-specific or model-agnostic approaches with [ggplot2](https://cran.r-project.org/package=ggplot2)-style graphics [@R-ggplot2];
Note that `vi()` is actually a wrapper around four workhorse functions, `{vi_model()`, `vi_firm()`, `vi_permute()`, and `vi_shap()`, that compute various types of VI scores. The first computes model-specific VI scores, while the latter three produce model-agnostic ones. The workhorse function that actually gets called is controlled by the `method` argument in `vi()`; the default is `method = "model"` which corresponds to model-specific VI (see `?vip::vi` for details and links to further documentation).
## Constructing VIPs in R
We'll illustrate major concepts using the Friedman 1 benchmark problem described in @multivariate-friedman-1991 and @bagging-breiman-1996:
\begin{equation}
Y_i = 10 \sin\left(\pi X_{1i} X_{2i}\right) + 20 \left(X_{3i} - 0.5\right) ^ 2 + 10 X_{4i} + 5 X_{5i} + \epsilon_i, \quad i = 1, 2, \dots, n,
(\#eq:friedman)
\end{equation}
where $\epsilon_i \stackrel{iid}{\sim} N\left(0, \sigma^2\right)$. Data from this model can be generated using the `vip::gen_friedman()`. By default, the features consist of 10 independent variables uniformly distributed on the interval $\left[0,1\right]$; however, only 5 out of these 10 are actually used in the true model. The code chunk below simulates 500 observations from the model in Equation \@ref(eq:friedman) with $\sigma = 1$; see `?vip::gen_friedman` for details.
```r
trn <- vip::gen_friedman(500, sigma = 1, seed = 101) # simulate training data
tibble::as_tibble(trn) # inspect output
```
```
## # A tibble: 500 × 11
## y x1 x2 x3 x4 x5 x6 x7 x8 x9 x10
## <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 14.9 0.372 0.406 0.102 0.322 0.693 0.758 0.518 0.530 0.878 0.763
## 2 15.3 0.0438 0.602 0.602 0.999 0.776 0.533 0.509 0.487 0.118 0.176
## 3 15.1 0.710 0.362 0.254 0.548 0.0180 0.765 0.715 0.844 0.334 0.118
## 4 10.7 0.658 0.291 0.542 0.327 0.230 0.301 0.177 0.346 0.474 0.283
## 5 17.6 0.250 0.794 0.383 0.947 0.462 0.00487 0.270 0.114 0.489 0.311
## 6 18.3 0.300 0.701 0.992 0.386 0.666 0.198 0.924 0.775 0.736 0.974
## 7 14.6 0.585 0.365 0.283 0.488 0.845 0.466 0.715 0.202 0.905 0.640
## 8 17.0 0.333 0.552 0.858 0.509 0.697 0.388 0.260 0.355 0.517 0.165
## 9 8.54 0.622 0.118 0.490 0.390 0.468 0.360 0.572 0.891 0.682 0.717
## 10 15.0 0.546 0.150 0.476 0.706 0.829 0.373 0.192 0.873 0.456 0.694
## # ℹ 490 more rows
```
From Equation \@ref(eq:friedman), it should be clear that features $X_1$--$X_5$ are the most important! (The others don't influence $Y$ at all.) Also, based on the form of the model, we'd expect $X_4$ to be the most important feature, probably followed by $X_1$ and $X_2$ (both comparably important), with $X_5$ probably being less important. The influence of $X_3$ is harder to determine due to its quadratic nature, but it seems likely that this nonlinearity will suppress the variable's influence over its observed range (i.e., 0--1).
# Model-specific VI
Some machine learning algorithms have their own way of quantifying the importance of each feature, which we refer to as *model-specific VI*. We describe some of these in the subsections that follow. One particular issue with model-specific VI scores is that they are not necessarily comparable across different types of models. For example, directly comparing the impurity-based VI scores from tree-based models to the the absolute value of the $t$-statistic in linear models.
## Decision trees and tree ensembles
Decision trees probably offer the most natural model-specific approach to quantifying the importance of each feature. In a binary decision tree, at each node $t$, a single predictor is used to partition the data into two homogeneous groups. The chosen predictor is the one that maximizes some measure of improvement $i^t$. The relative importance of predictor $X$ is the sum of the squared improvements over all internal nodes of the tree for which $X$ was chosen as the partitioning variable; see @classification-breiman-1984 for details. This idea also extends to ensembles of decision trees, such as RFs and GBMs. In ensembles, the improvement score for each predictor is averaged across all the trees in the ensemble. Fortunately, due to the stabilizing effect of averaging, the improvement-based VI metric is often more reliable in large ensembles; see @hastie-elements-2009 [p. 368].
RFs offer an additional method for computing VI scores. The idea is to use the leftover *out-of-bag* (OOB) data to construct validation-set errors for each tree. Then, each predictor is randomly shuffled in the OOB data and the error is computed again. The idea is that if variable $X$ is important, then the validation error will go up when $X$ is perturbed in the OOB data. The difference in the two errors is recorded for the OOB data then averaged across all trees in the forest. Note that both methods for constructing VI scores can be unreliable in certain situations; for example, when the predictor variables vary in their scale of measurement or their number of categories [@party2007a, or when the predictors are highly correlated [@strobl-2019-conditional]. The [varImp](https://cran.r-project.org/package=varImp) package discussed earlier provides methods to address these concerns for random forests in package [party](https://cran.r-project.org/package=party), with similar functionality also built into the [partykit](https://cran.r-project.org/package=partykit) package [@R-partykit]. The [vip](https://cran.r-project.org/package=vip) package also supports the conditional importance described in [@strobl-2019-conditional] for both [party](https://cran.r-project.org/package=party)- and [partykit](https://cran.r-project.org/package=partykit)-based RFs; see `?vip::vi_model` for details. Later on, we'll discuss a more general permutation method that can be applied to any supervised learning model.
To illustrate, we fit a CART-like regression tree, RF, and GBM to the simulated training data. (**Note:** there are a number of different packages available for fitting these types of models, we just picked popular implementations for illustration.)
```r
# Load required packages
library(rpart) # for fitting CART-like decision trees
library(randomForest) # for fitting RFs
library(xgboost) # for fitting GBMs
# Fit a single regression tree
tree <- rpart(y ~ ., data = trn)
# Fit an RF
set.seed(101) # for reproducibility
rfo <- randomForest(y ~ ., data = trn, importance = TRUE)
# Fit a GBM
set.seed(102) # for reproducibility
bst <- xgboost(
data = data.matrix(subset(trn, select = -y)),
label = trn$y,
objective = "reg:squarederror",
nrounds = 100,
max_depth = 5,
eta = 0.3,
verbose = 0 # suppress printing
)
```
Each of the above packages include the ability to compute VI scores for all the features in the model; however, the implementation is rather package-specific, as shown in the code chunk below. The results are displayed in Figure \@ref(fig:vi-plots) (the code to reproduce these plots has been omitted but can be made available upon request).
```r
# Extract VI scores from each model
vi_tree <- tree$variable.importance
vi_rfo <- rfo$variable.importance # or use `randomForest::importance(rfo)`
vi_bst <- xgb.importance(model = bst)
```
<img src="../man/figures/vi-plots-1.png" alt="Model-specific VIPs for the three different tree-based models fit to the simulated Friedman data." width="100%" />
As we would expect, all three methods rank the variables `x1`--`x5` as more important than the others. While this is good news, it is unfortunate that we have to remember the different functions and ways of extracting and plotting VI scores from various model fitting functions. This is one place where [vip](https://cran.r-project.org/package=vip) can help...one function to rule them all! Once [vip](https://cran.r-project.org/package=vip) is loaded, we can use `vi()` to extract a tibble of VI scores.^[In order to avoid deprecation warnings due to recent updates to [tibble](https://cran.r-project.org/package=tibble) and [ggplot2](https://cran.r-project.org/package=ggplot2), the code examples in this article are based on the latest development versions of both [vip](https://cran.r-project.org/package=vip) (version 0.4.1) and [pdp](https://cran.r-project.org/package=pdp) (version 0.8.1); the URL to the development version of each package is available on its associated CRAN landing page.]
```r
# Load required packages
library(vip)
# Compute model-specific VI scores
vi(tree) # CART-like decision tree
```
```
## # A tibble: 10 × 2
## Variable Importance
## <chr> <dbl>
## 1 x4 4234.
## 2 x2 2513.
## 3 x1 2461.
## 4 x5 1230.
## 5 x3 688.
## 6 x6 533.
## 7 x7 357.
## 8 x9 331.
## 9 x8 276.
## 10 x10 275.
```
```r
vi(rfo) # RF
```
```
## # A tibble: 10 × 2
## Variable Importance
## <chr> <dbl>
## 1 x4 72.9
## 2 x2 61.4
## 3 x1 55.6
## 4 x5 37.0
## 5 x3 22.0
## 6 x8 1.84
## 7 x6 1.12
## 8 x9 0.720
## 9 x7 -1.39
## 10 x10 -2.61
```
```r
vi(bst) # GBM
```
```
## # A tibble: 10 × 2
## Variable Importance
## <chr> <dbl>
## 1 x4 0.403
## 2 x2 0.225
## 3 x1 0.189
## 4 x5 0.0894
## 5 x3 0.0682
## 6 x9 0.00802
## 7 x6 0.00746
## 8 x7 0.00400
## 9 x10 0.00377
## 10 x8 0.00262
```
Notice how the `vi()` function always returns a tibble^[Technically, it's a tibble with an additional `"vi"` class.] with two columns: `Variable` and `Importance` (the exceptions are coefficient-based models which also include a `Sign` column giving the sign of the corresponding coefficient, and permutation importance involving multiple Monte Carlo simulations, but more on that later). Also, by default, `vi()` always orders the VI scores from highest to lowest; this, among other options, can be controlled by the user (see `?vip::vi` for details). Plotting VI scores with `vip()` is just as straightforward. For example, the following code can be used to reproduce Figure \@ref(fig:vi-plots).
```r
library(patchwork) # for easily arranging multiple ggplot2 plots
p1 <- vip(tree) + ggtitle("Single tree")
p2 <- vip(rfo) + ggtitle("Random forest")
p3 <- vip(bst) + ggtitle("Gradient boosting")
# Display plots in a grid (Figure 1)
p1 + p2 + p3
```
Notice how the `vip()` function always returns a `"ggplot"` object (by default, this will be a bar plot). For large models with many features, a Cleveland dot plot is more effective (in fact, a number of useful plotting options can be fiddled with). Below we call `vip()` and change a few useful options (the resulting plot is displayed in Figure \@ref(fig:dot-plot). Note that we can also call `vip()` directly on a `"vi"` object if it's already been constructed.
```r
# Construct VIP (Figure 2)
library(ggplot2) # for theme_light() function
vip(bst, num_features = 5, geom = "point", horizontal = FALSE,
aesthetics = list(color = "red", shape = 17, size = 5)) +
theme_light()
```
<img src="../man/figures/dot-plot-1.png" alt="Illustrating various plotting options." width="70%" />
## Linear models
In multiple linear regression, or linear models (LMs), the absolute value of the $t$-statistic (or some other scaled variant of the estimated coefficients) is commonly used as a measure of VI.^[Since this approach is biased towards large-scale features it is important to properly standardize the predictors (before fitting the model) or the estimated coefficients.]. Motivation for the use of the assoicated $t$-statistic is given in @bring-1994-standardize. The same idea also extends to generalized linear models (GLMs). In the code chunk below, we fit an LM to the simulated Friedman data (`trn`) allowing for all main effects and two-way interactions, then use the `step()` function to perform backward elimination. The resulting VIP is displayed in Figure \@ref(fig:vip-step).
```r
# Fit a LM
linmod <- lm(y ~ .^2, data = trn)
backward <- step(linmod, direction = "backward", trace = 0)
# Extract VI scores
(vi_backward <- vi(backward))
```
```
## # A tibble: 21 × 3
## Variable Importance Sign
## <chr> <dbl> <chr>
## 1 x4 14.2 POS
## 2 x2 7.31 POS
## 3 x1 5.63 POS
## 4 x5 5.21 POS
## 5 x3:x5 2.46 POS
## 6 x1:x10 2.41 NEG
## 7 x2:x6 2.41 NEG
## 8 x1:x5 2.37 NEG
## 9 x10 2.21 POS
## 10 x3:x4 2.01 NEG
## # ℹ 11 more rows
```
```r
# Plot VI scores; by default, `vip()` displays the top ten features
pal <- palette.colors(2, palette = "Okabe-Ito") # colorblind friendly palette
vip(vi_backward, num_features = length(coef(backward)), # Figure 3
geom = "point", horizontal = FALSE, mapping = aes(color = Sign)) +
scale_color_manual(values = unname(pal)) +
theme_light() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
```
<img src="../man/figures/vip-step-1.png" alt="Example VIP from a linear model fit to the simulated Friedman data. The points are colored according to the sign of the associated coefficient." width="70%" />
A major limitation of this approach is that a VI score is assigned to each term in the model, rather than to each individual feature! We can solve this problem using one of the model-agnostic approaches discussed later.
Multivariate adaptive regression splines (MARS), which were introduced in @multivariate-friedman-1991, is an automatic regression technique and can be seen as a generalization of LMs and GLMs. In the MARS algorithm, the contribution (or VI score) for each predictor is determined using a generalized cross-validation (GCV) statistic (though, other statistics can also be used; see \code{?vip::vi\_model} for details). An example using the [earth](https://cran.r-project.org/package=earth) package [@R-earth-fixed] is given below (the results are plotted in Figure \@ref(fig:vip-earth)):
```r
# Load required packages
library(earth)
# Fit a MARS model
mars <- earth(y ~ ., data = trn, degree = 2, pmethod = "exhaustive")
# Extract VI scores
vi(mars, type = "gcv")
```
```
## # A tibble: 10 × 2
## Variable Importance
## <chr> <dbl>
## 1 x4 100
## 2 x1 83.2
## 3 x2 83.2
## 4 x5 59.3
## 5 x3 43.5
## 6 x6 0
## 7 x7 0
## 8 x8 0
## 9 x9 0
## 10 x10 0
```
```r
# Plot VI scores (Figure 4)
vip(mars)
```
<img src="../man/figures/vip-earth-1.png" alt="Example VIP from a MARS model fit to the simulated Friedman data." width="70%" />
To access VI scores directly in [earth](https://cran.r-project.org/package=earth), you can use the `earth::evimp()` function.
## Neural networks
For neural networks (NNs), two popular methods for constructing VI scores are the Garson algorithm [@interpreting-garson-1991], later modified by @back-goh-1995, and the Olden algorithm [@accurate-olden-2004]. For both algorithms, the basis of these VI scores is the network’s connection weights. The Garson algorithm determines VI by identifying all weighted connections between the nodes of interest. Olden’s algorithm, on the other hand, uses the products of the raw connection weights between each input and output neuron and sums these products across all hidden neurons. This has been shown to outperform the Garson method in various simulations. For DNNs, a similar method due to @data-gedeon-1997 considers the weights connecting the input features to the first two hidden layers (for simplicity and speed); but this method can be slow for large networks. We illustrate these two methods below using `vip()` with the [nnet](https://cran.r-project.org/package=nnet) package [@R-nnet] (see the results in Figure \@ref(fig:vip-nnet)).
```r
# Load required packages
library(nnet)
# Fit a neural network
set.seed(0803) # for reproducibility
nn <- nnet(y ~ ., data = trn, size = 7, decay = 0.1,
linout = TRUE, trace = FALSE)
# Construct VIPs
p1 <- vip(nn, type = "garson")
p2 <- vip(nn, type = "olden")
# Display plots in a grid (Figure 5)
p1 + p2
```
<img src="../man/figures/vip-nnet-1.png" alt="Example VIPs from a single-hidden-layer NN fit to the simulated Friedman data." width="70%" />
# Model-agnostic VI
Model-agnostic interpretability separates interpretation from the model. Compared to model-specific approaches, model-agnostic VI methods are more flexible and can be applied to any supervised learning algorithm. In this section, we discuss model-agnostic methods for quantifying global feature importance using three different approaches:
1) a simple variance-based approach;
2) permutation-based feature importance;
3) Shapley-based feature importance.
## Variance-based methods
Our first model-agnostic method is based on a simple *feature importance ranking measure* (FIRM); for details, see @greenwell-simple-2018, @zien-2009-feature, and @scholbeck-2019-sampling. The specific approach used here is based on quantifying the "flatness" of the effects of each feature.^[A similar approach is taken in the [vivo](https://cran.r-project.org/package=vivo) package [@R-vivo].] Feature effects can be assessed using *partial dependence plots* (PDPs) \citep{friedman-2001-greedy} or *individual conditional expectation* (ICE) curves [@goldstein-peeking-2015]. PDPs and ICE curves help visualize the effect of low cardinality subsets of the feature space on the estimated prediction surface (e.g., main effects and two/three-way interaction effects.). They are also model-agnostic and can be constructed in the same way for any supervised learning algorithm. Below, we fit a *projection pursuit regression* (PPR) model (see `?stats::ppr` for details and references) and construct PDPs for each feature using the [pdp](https://cran.r-project.org/package=pdp) package @pdp2017. The results are displayed in Figure \@ref(fig:pdp-ppr). Notice how the PDPs for the uninformative features are relatively flat compared to the PDPs for features `x1`--`x5`!
<img src="../man/figures/pdp-ppr-1.png" alt="PDPs of main effects in the PPR model fit to the simulated Friedman data." width="100%" />
Next, we compute PDP-based VI scores for the fitted PPR and NN models. The PDP method constructs VI scores that quantify the relative "flatness" of each PDP (by default, this is defined by computing the standard deviation of the $y$-axis values for each PDP). To use the PDP method, specify `method = "firm"` in the call to `vi()` or `vip()` (or just use `vi_firm()` directly):
```r
# Fit a PPR model (nterms was chosen using the caret package with 5 repeats of
# 5-fold cross-validation)
pp <- ppr(y ~ ., data = trn, nterms = 11)
# Construct VIPs
p1 <- vip(pp, method = "firm", train = trn) + ggtitle("PPR")
p2 <- vip(nn, method = "firm", train = trn) + ggtitle("NN")
# Display plots in a grid (Figure 7)
p1 + p2
```
<img src="../man/figures/pdp-ppr-nn-1.png" alt="PDP-based feature importance for the PPR and NN models fit to the simulated Friedman data." width="70%" />
In Figure \@ref(fig:pdp-ppr-nn) we display the PDP-based feature importance for the previously obtained PPR and NN models. These VI scores essentially capture the variability in the partial dependence values for each main effect.
The ICE curve method is similar to the PDP method, except that we measure the "flatness" of each individual ICE curve and then aggregate the results (e.g., by averaging). If there are no (substantial) interaction effects, using ICE curves will produce results similar to using PDPs (which are just averaged ICE curves). However, if strong interaction effects are present, they can obfuscate the main effects and render the PDP-based approach less useful (since the PDPs for important features can be relatively flat when certain interactions are present; see @goldstein-peeking-2015 for details). In fact, it is probably safest to always use ICE curves when employing the FIRM method.
Below, we display the ICE curves for each feature in the fitted PPR model using the same $y$-axis scale; see Figure \@ref(fig:ice-ppr). Again, there is a clear difference between the ICE curves for features `x1`--`x5` and `x6`--`x10`; the later being relatively flat by comparison. Also, notice how the ICE curves within each feature are relatively parallel (if the ICE curves within each feature were perfectly parallel, the standard deviation for each curve would be the same and the results will be identical to the PDP method). In this example, the interaction term between `x1` and `x2` does not obfuscate the PDPs for the main effects and the results are not much different.
<img src="../man/figures/ice-ppr-1.png" alt="ICE curves for each feature in the PPR model fit to the simulated Friedman data. The red curve represents the PDP (i.e., the averaged ICE curves)." width="100%" />
Obtaining the ICE-based feature importance scores is also straightforward, just specify `ice = TRUE` when using the FIRM approach. This is illustrated in the code chunk below and the results, which are displayed in Figure \@ref(fig:vip-ice-ppr-nn), are similar to those obtained using the PDP method.
```r
# Construct VIPs
p1 <- vip(pp, method = "firm", ice = TRUE, train = trn) + ggtitle("PPR")
p2 <- vip(nn, method = "firm", ice = TRUE, train = trn) + ggtitle("NN")
# Display plots in a grid (Figure 9)
p1 + p2
```
<img src="../man/figures/vip-ice-ppr-nn-1.png" alt="ICE-based feature importance for the PPR and NN models fit to the simulated Friedman data." width="70%" />
When using `method = "firm"`, the feature effect values are stored in an attribute called `"effects"`. This is a convenience so that the feature effect plots (e.g., PDPs and ICE curves) can easily be reconstructed and compared with the VI scores, as demonstrated in the example below (see Figure \@ref(fig:pdp-from-attr)):
```r
# Construct PDP-based VI scores
(vis <- vi(pp, method = "firm", train = trn))
```
```
## # A tibble: 10 × 2
## Variable Importance
## <chr> <dbl>
## 1 x4 2.96
## 2 x2 2.21
## 3 x1 2.14
## 4 x5 1.53
## 5 x3 1.46
## 6 x6 0.128
## 7 x9 0.114
## 8 x8 0.0621
## 9 x10 0.0374
## 10 x7 0.0170
```
```r
# Reconstruct PDPs for all 10 features (Figure 10)
par(mfrow = c(2, 5))
for (name in paste0("x", 1:10)) {
plot(attr(vis, which = "effects")[[name]], type = "l", ylim = c(9, 19), las = 1)
}
```
<img src="../man/figures/pdp-from-attr-1.png" alt="PDPs for all ten features reconstructed from the \code{pdp} attribute of the \code{vis} object." width="100%" />
## Permutation method
The permutation method exists in various forms and was made popular in @random-breiman-2001 for RFs, before being generalized and extended in @fisher-model-2018. The permutation approach used in [vip](https://cran.r-project.org/package=vip) is quite simple and is outlined in Algorithm 1 below. The idea is that if we randomly permute the values of an important feature in the training data, the training performance would degrade (since permuting the values of a feature effectively destroys any relationship between that feature and the target variable). This of course assumes that the model has been properly tuned (e.g., using cross-validation) and is not over fitting. The permutation approach uses the difference between some baseline performance measure (e.g., training $R^2$, AUC, or RMSE) and the same performance measure obtained after permuting the values of a particular feature in the training data (**Note:** the model is NOT refit to the training data after randomly permuting the values of a feature). It is also important to note that this method may not be appropriate when you have, for example, highly correlated features (since permuting one feature at a time may lead to unlikely data instances).
Let $x_1, x_2, \dots, x_j$ be the features of interest and let $M_{orig}$ be the baseline performance metric for the trained model; for brevity, we'll assume smaller is better (e.g., classification error or RMSE). The permutation-based importance scores can be computed as follows:
1. For $i = 1, 2, \dots, j$:
a. Permute the values of feature $x_i$ in the training data.
b. Recompute the performance metric on the permuted data $M_{perm}$.
c. Record the difference from baseline using $VI\left(x_i\right) = M_{perm} - M_{orig}$.
2. Return the VI scores $VI\left(x_1\right), VI\left(x_2\right), \dots, VI\left(x_j\right)$.
Algorithm 1: A simple algorithm for constructing permutation-based VI scores.
Algorithm 1 can be improved or modified in a number of ways. For instance, the process can be repeated several times and the results averaged together. This helps to provide more stable VI scores, and also the opportunity to measure their variability. Rather than taking the difference in step (c), @molnar-2019-iml [sec. 5.5.4] argues that using the ratio $M_{perm} / M_{orig}$ makes the importance scores more comparable across different problems. It's also possible to assign importance scores to groups of features (e.g., by permuting more than one feature at a time); this would be useful if features can be categorized into mutually exclusive groups, for instance, categorical features that have been *one-hot-encoded.
To use the permutation approach in [vip](https://cran.r-project.org/package=vip), specify `method = "permute"` in the call to `vi()` or `vip()` (or you can use `vi_permute()` directly). Note that using `method = "permute"` requires specifying a few additional arguments (e.g., the training data, target name or vector of target values, a prediction function, etc.); see `?vi_permute` for details.
To use `vi_permute()` you should first define a prediction wrapper that tells the function how to generate the write predictions for your chosen metric. An example is given below for the previously fitted PPR and NN models. Here we use $R^2$ (`metric = "rsq"`) as the evaluation metric. The results, which are displayed in Figure \@ref(fig:vip-permute-ppr-nn), agree with those obtained using the PDP- and ICE-based methods.
```r
# Prediction wrapper
pfun_ppr <- function(object, newdata) { # needs to return a numeric vector
stats::predict(object, newdata = newdata)
}
pfun_nnet <- function(object, newdata) { # needs to return a numeric vector
stats::predict(object, newdata = newdata)[, 1L, drop = TRUE]
}
# Plot VI scores
set.seed(2021) # for reproducibility
p1 <- vip(pp, method = "permute", train = trn, target = "y", metric = "rsq",
pred_wrapper = pfun_ppr) + ggtitle("PPR")
p2 <- vip(nn, method = "permute", train = trn, target = "y", metric = "rsq",
pred_wrapper = pfun_nnet) + ggtitle("NN")
# Display plots in a grid (Figure 11)
p1 + p2
```
<img src="../man/figures/vip-permute-ppr-nn-1.png" alt="Permutation-based feature importance for the PPR and NN models fit to the simulated Friedman data." width="70%" />
The permutation approach introduces randomness into the procedure and therefore should be run more than once if computationally feasible. The upside to performing multiple runs of Algorithm 1 is that it allows us to compute standard errors (among other metrics) for the estimated VI scores, as illustrated in the example below; here we specify `nsim = 30` to request that each feature be permuted 30 times and the results averaged together. (Additionally, if `nsim > 1`, you can set \code{geom = "boxplot"} in the call to `vip()` to construct boxplots of the raw permutation-based VI scores. This is useful if you want to visualize the variability in each of the VI estimates; see Figure \@ref(fig:vip-boxplots) for an example.)
```r
# Use 10 Monte Carlo reps
set.seed(403) # for reproducibility
vis <- vi(pp, method = "permute", train = trn, target = "y", metric = "rsq",
pred_wrapper = pfun_ppr, nsim = 30)
vip(vis, geom = "boxplot") # Figure 12
```
<img src="../man/figures/vip-boxplots-1.png" alt="Boxplots of VI scores using the permutation method with 15 Monte Carlo repetitions." width="70%" />
All available performance metrics for regression and classification can be listed using the `list_metrics()` function, for example:
```r
list_metrics()
```
```
## metric description
## 1 accuracy Classification accuracy
## 2 bal_accuracy Balanced classification accuracy
## 3 youden Youden;'s index (or Youden's J statistic)
## 4 roc_auc Area under ROC curve
## 5 pr_auc Area under precision-recall (PR) curve
## 6 logloss Log loss
## 7 brier Brier score
## 8 mae Mean absolute error
## 9 mape Mean absolute percentage error
## 10 rmse Root mean squared error
## 11 rsq R-squared (correlation)
## 12 rsq_trad R-squared (traditional)
## task smaller_is_better yardstick_function
## 1 Binary/multiclass classification FALSE accuracy_vec
## 2 Binary/multiclass classification FALSE bal_accuracy_vec
## 3 Binary/multiclass classification FALSE j_index
## 4 Binary classification FALSE roc_auc_vec
## 5 Binary classification FALSE pr_auc_vec
## 6 Binary/multiclass classification TRUE mn_log_loss_vec
## 7 Binary/multiclass classification TRUE brier_class_vec
## 8 Regression TRUE mae_vec
## 9 Regression TRUE mape_vec
## 10 Regression TRUE rmse_vec
## 11 Regression FALSE rsq_vec
## 12 Regression FALSE rsq_trad_vec
```
The permutation method in [vip](https://cran.r-project.org/package=vip) supports the vector performance functions available in [yardstick](https://cran.r-project.org/package=yardstick) [@R-yardstick]. We can also use a custom metric (i.e., loss function). Suppose for example you want to measure importance using the *mean absolute error* (MAE):
\begin{equation}
MAE = \frac{1}{n}\sum_{i = 1}^n\left|y_i - \hat{f}\left(\boldsymbol{x}_i\right)\right|,
\end{equation}
where $\hat{f}\left(\boldsymbol{x}_i\right)$ is the predicted value of $y_i$. A simple function implementing this metric is given below (to be consistent with [yardstick](https://cran.r-project.org/package=yardstick) functions, user-supplied metric functions require two arguments: `truth` and `estimate`).
```r
mae <- function(truth, estimate) {
mean(abs(truth - estimate))
}
```
To use this for computing permutation-based VI scores just pass it via the `metric` argument (be warned, however, that the metric used for computing permutation importance should be the same as the metric used to train and tune the model). Also, since this is a custom metric, we need to specify whether a smaller value indicates better performance by setting `smaller_is_better = TRUE`. The results, which are displayed in Figure \@ref(fig:vip-nn-mae), are similar to those in Figure \@ref(fig:vip-permute-ppr-nn), albeit a different scale.
```r
# Construct VIP (Figure 13)
set.seed(2321) # for reproducibility
p1 <- vip(nn, method = "permute", train = trn, target = "y", metric = mae,
smaller_is_better = TRUE, pred_wrapper = pfun_nnet) +
ggtitle("Custom loss function: MAE")
set.seed(2321) # for reproducibility
p2 <- vip(nn, method = "permute", train = trn, target = "y",
metric = yardstick::mae_vec, smaller_is_better = TRUE,
pred_wrapper = pfun_nnet) +
ggtitle("Using `yardstick`'s MAE function")
p1 + p2
```
<img src="../man/figures/vip-nn-mae-1.png" alt="Permutation-based VI scores for the NN model fit to the simulated Friedman data. In this example, permutation importance is based on the MAE metric." width="70%" />
Although permutation importance is most naturally computed on the training data, it may also be useful to do the shuffling and measure performance on new data! This is discussed in depth in @molnar-2019-iml [sec. 5.2]. For users interested in computing permutation importance using new data, just supply it to the `train` argument in the call to `vi()`, `vip()`, or `vi_permute()`. For instance, suppose we wanted to only use a fraction of the original training data to carry out the computations. In this case, we could simply pass the sampled data to the `train` argument as follows:
```r
# Construct VIP (Figure 14)
set.seed(2327) # for reproducibility
vip(nn, method = "permute", pred_wrapper = pfun_nnet, target = "y",
metric = "rmse",
train = trn[sample(nrow(trn), size = 400), ]) + # sample 400 observations
ggtitle("Using a random subset of training data")
```
<img src="../man/figures/vip-permute-nn-sample-1.png" alt="Permutation-based feature importance for the NN model fit to the simulated Friedman data. In this example, permutation importance is based on a random sample of 400 training observations." width="70%" />
When using the permutation method with `nsim > 1`, the default is to keep all the permutation scores as an attribute called `"raw_scores"`; you can turn this behavior off by setting `keep = FALSE` in the call to `vi_permute()`, `vi()`, or `vip()`. If `keep = TRUE` and `nsim > 1`, you can request all permutation scores to be plotted by setting `all_permutations = TRUE` in the call to `vip()`, as demonstrated in the code chunk below (see Figure \@ref(fig:vip-nn-mae-all)). This also let's you visually inspect the variability in the permutation scores within each feature.
```r
# Construct VIP (Figure 15)
set.seed(8264) # for reproducibility
vip(nn, method = "permute", pred_wrapper = pfun_nnet, train = trn,
target = "y", metric = "mae", nsim = 10, geom = "point",
all_permutations = TRUE, jitter = TRUE) +
ggtitle("Plotting all permutation scores")
```
<img src="../man/figures/vip-nn-mae-all-1.png" alt="Permutation-based feature importance for the NN model fit to the simulated Friedman data. In this example, all the permutation importance scores (points) are displayed for each feature along with their average (bars)." width="70%" />
### A classification example
In this example, we'll illustrate the use of permutation importance in a classification problem. To start, we'll use the [randomForest](https://cran.r-project.org/package=randomForest) package [@R-randomForest] to build a (default) random forest to predict survivability of passengers on the ill-fated Titanic.
The [source data](https://hbiostat.org/data/) (also available in `vip::titanic`) contains 263 missing values (i.e., `NA`'s) in the age column. The `titanic_mice` version, which we'll use in this vignette, contains imputed values for the age column using *multivariate imputation by chained equations* via the [mice](https://cran.r-project.org/package=mice) package. Consequently, `titanic_mice` is a list containing 11 imputed versions of the original data; see `?vip::titanic_mice` for details. For now, we'll just use one of the 11 imputed versions:
```r
head(t1 <- vip::titanic_mice[[1L]])
```
```
## survived pclass age sex sibsp parch
## 1 yes 1 29.00 female 0 0
## 2 yes 1 0.92 male 1 2
## 3 no 1 2.00 female 1 2
## 4 no 1 30.00 male 1 2
## 5 no 1 25.00 female 1 2
## 6 yes 1 48.00 male 0 0
```
```r
t1$pclass <- as.ordered(t1$pclass) # makes more sense as an ordered factor
```
Next, we'll build a default random forest to predict survivability:
```r
library(randomForest)
set.seed(2053) # for reproducibility
(rfo <- randomForest(survived ~ ., data = t1, importance = TRUE, nPerm = 30))
```
```
##
## Call:
## randomForest(formula = survived ~ ., data = t1, importance = TRUE, nPerm = 30)
## Type of random forest: classification
## Number of trees: 500
## No. of variables tried at each split: 2
##
## OOB estimate of error rate: 18.79%
## Confusion matrix:
## no yes class.error
## no 727 82 0.1013597
## yes 164 336 0.3280000
```
For comparison, here's a plot of the OOB-based permutation importance scores available in a random forest (note that setting `include_type = TRUE` results in the $x$-axis label including the method of importance that was computed):
```r
vip(rfo, include_type = TRUE)
```
<img src="../man/figures/titanic-rfo-vi-1.png" alt="plot of chunk titanic-rfo-vi" width="70%" />
For categorical outcomes, random forests can provide predicted class labels (i.e., classification) or predicted class probabilities (i.e., prediction), as shown below.
```r
head(predict(rfo, newdata = t1, type = "response")) # predicted class labels
```
```
## 1 2 3 4 5 6
## yes yes yes no yes no
## Levels: no yes
```
```r
head(predict(rfo, newdata = t1, type = "prob")) # predicted class probabilities
```
```
## no yes
## 1 0.014 0.986
## 2 0.114 0.886
## 3 0.472 0.528
## 4 0.716 0.284
## 5 0.392 0.608
## 6 0.894 0.106
```
The performance metric we choose for permutation importance will determine whether our prediction wrapper should return a class label (as a factor) or a numeric vector of class probabilities. We'll start with classification accuracy (the same metric used by random forest's build-in OOB-based permutation VI scores). A basic call to `vi()` (or, similarly, to `vi_permute()`) would look something like:
```r
pfun_class <- function(object, newdata) { # prediction wrapper
predict(object, newdata = newdata, type = "response")
}
# Compute mean decrease in accuracy
set.seed(1359) # for reproducibility
vi(rfo,
method = "permute",
train = t1,
target = "survived",
metric = "accuracy", # or pass in `yardstick::accuracy_vec` directly
# smaller_is_better = FALSE, # no need to set for built-in metrics
pred_wrapper = pfun_class,
nsim = 30 # use 30 repetitions
)
```
```
## # A tibble: 5 × 3
## Variable Importance StDev
## <chr> <dbl> <dbl>
## 1 sex 0.226 0.0111
## 2 pclass 0.0801 0.00488
## 3 age 0.0738 0.00595
## 4 sibsp 0.0346 0.00459
## 5 parch 0.0166 0.00247
```
Note that the standard deviation of each VI score is also computed and returned whenever `nsim > 1`. The results are comparable to what the fitted random forest computed internally by setting `importance = TRUE` and `nPerm = 30`; the difference as that the random forest uses the OOB data when computing the drop in accuracy after shuffling each variable.
```r
sort(rfo$importance[, "MeanDecreaseAccuracy"], decreasing = TRUE)
```
```
## sex pclass age parch sibsp
## 0.17102147 0.05877827 0.04408406 0.01895065 0.01583429
```
Next, we'll compute permutation VI scores using a metric that requires predicted probabilities. Here, we'll use the Brier score, which measures the accuracy of the individual probabilities (smaller is better). However, instead of using the built-in `metric = "brier"` option, we'll pass the corresponding [yardstick](https://cran.r-project.org/package=yardstick) function directly. Note that we have to modify the prediction wrapper to not only return predicted probabilities, but a single vector of probabilities in the case of a binary outcome (in this case, we care about the event `survived = "yes"`):
```r
pfun_prob <- function(object, newdata) { # prediction wrapper
predict(object, newdata = newdata, type = "prob")[, "yes"]
}
# Compute mean increase in Brier score
set.seed(1411) # for reproducibility
vi(rfo,
method = "permute",
train = t1,
target = "survived",
metric = yardstick::brier_class_vec, # or pass in `"brier"` directly
smaller_is_better = FALSE, # need to set when supplying a function
pred_wrapper = pfun_prob,
nsim = 30 # use 30 repetitions
)
```
```
## # A tibble: 5 × 3
## Variable Importance StDev
## <chr> <dbl> <dbl>
## 1 sex 0.209 0.00866
## 2 pclass 0.0977 0.00479
## 3 age 0.0947 0.00460
## 4 parch 0.0542 0.00271
## 5 sibsp 0.0414 0.00186
```
Finally, to illustrate the use of the `event_level` argument, we'll compute the permutation-based VI scores using the *area under the ROC curve* (AUROC or `metric = "roc_auc"`).
```r
set.seed(1413) # for reproducibility
vi(rfo,
method = "permute",
train = t1,
target = "survived",
metric = "roc_auc",
pred_wrapper = pfun_prob,
nsim = 30 # use 30 repetitions
)
```
```
## # A tibble: 5 × 3
## Variable Importance StDev
## <chr> <dbl> <dbl>
## 1 parch -0.0251 0.00351
## 2 sibsp -0.0283 0.00211
## 3 age -0.0850 0.00477
## 4 pclass -0.0920 0.00533
## 5 sex -0.229 0.0137
```
Why are the results are negative? The issue is that metrics like AUROC (similar with *area under the PR curve*) treat one of the class outcomes as the "event" of interest. In our case, we are using the predicted probability for the event `survived = "yes"`, but the default event level (in [yardstick](https://cran.r-project.org/package=yardstick) and therefore [vip](https://cran.r-project.org/package=vip)) is always the first class label in alphabetical order (or `survived = "no"`, in this case):
```r
levels(titanic$survived)
```
```
## [1] "no" "yes"
```
Consequently, when using metrics like AUROC, it is a good idea to set the `event_level` parameter in the call to `vi()` or `vi_permute()`. To fix the previous issue, just set the event level to the second class label using `even_level = "second"`:
```r
set.seed(1413) # for reproducibility
vi(rfo,
method = "permute",
train = t1,
target = "survived",
metric = "roc_auc",
event_level = "second", # use "yes" as class label/"event" of interest
pred_wrapper = pfun_prob,
nsim = 30 # use 30 repetitions
)
```
```
## # A tibble: 5 × 3
## Variable Importance StDev
## <chr> <dbl> <dbl>
## 1 sex 0.229 0.0137
## 2 pclass 0.0920 0.00533
## 3 age 0.0850 0.00477
## 4 sibsp 0.0283 0.00211
## 5 parch 0.0251 0.00351
```
Much better (and just the negative of the previous results, as expected)! For a similar example using a multiclass outcome, see the discussion in [this issue](https://github.com/juliasilge/juliasilge.com/issues/57).
### Benchmarks
In this section, we compare the performance of four implementations of permutation-based VI scores: `iml::FeatureImp()` (version 0.11.1), `ingredients::feature_importance()` (version 2.3.0), `mmpf::permutationImportance` (version 0.0.5), and `vip::vi()` (version 0.4.1).
We simulated 10,000 training observations from the Friedman 1 benchmark problem and trained a random forest using the [ranger](https://cran.r-project.org/package=ranger) package. For each implementation, we computed permutation-based VI scores 100 times using the [microbenchmark](https://cran.r-project.org/package=microbenchmark) package [@R-microbenchmark]. For this benchmark we did not use any of the parallel processing capability available in the [iml](https://cran.r-project.org/package=iml) and [vip](https://cran.r-project.org/package=vip) implementations. The results from [microbenchmark](https://cran.r-project.org/package=microbenchmark) are displayed in Figure \ref@(fig:benchmark) and summarized in the output below. In this case, the [vip](https://cran.r-project.org/package=vip) package (version 0.4.1) was the fastest, followed closely by [ingredients](https://cran.r-project.org/package=ingredients) and [mmpf](https://cran.r-project.org/package=mmpf). It should be noted, however, that the implementations in [vip](https://cran.r-project.org/package=vip) and [iml](https://cran.r-project.org/package=iml) can be parallelized. To the best of our knowledge, this is not the case for [ingredients](https://cran.r-project.org/package=ingredients) or [mmpf](https://cran.r-project.org/package=mmpf) (although it would not be difficult to write a simple parallel wrapper for either). The code used to generate these benchmarks can be found at https://github.com/koalaverse/vip/blob/master/slowtests/slowtests-benchmarks.R.
<img src="../man/figures/benchmark-1.png" alt="Violin plots comparing the computation time from three different implementations of permutation-based VI scores across 100 simulations." width="70%" />
## Shapley method
Although [vip](https://cran.r-project.org/package=vip) focuses on global VI methods, it is becoming increasing popular to asses global importance by aggregating local VI measures; in particular, *Shapley explanations* [@strumbelj-2014-explaining]. Using *Shapley values* (a method from coalitional game theory), the prediction for a single instance $x^\star$ can be explained by assuming that each feature value in $x^\star$ is a "player" in a game with a payout equal to the corresponding prediction $\hat{f}\left(x^\star\right)$. Shapley values tell us how to fairly distribute the "payout" (i.e., prediction) among the features. Shapley values have become popular due to the attractive fairness properties they posses [@lundberg_unified_2017]. The most popular implementation is available in the Python [shap](https://github.com/shap/shap) package [@lundberg_unified_2017]; although a number of implementations are now available in R; for example, [iml](https://cran.r-project.org/package=iml), [iBreakDown](https://cran.r-project.org/package=iBreakDown) [@R-iBreakDown], and [fastshap](https://cran.r-project.org/package=fastshap) [@R-fastshap].
Obtaining a global VI score from Shapley values requires aggregating the Shapley values for each feature across the entire training set (or at least a reasonable sample thereof). In particular, we use the mean of the absolute value of the individual Shapley values for each feature. Unfortunately, Shapley values can be computationally expensive, and therefore this approach may not be feasible for large training sets (say, >3000 observations). The [fastshap](https://cran.r-project.org/package=fastshap) package provides some relief by exploiting a few computational tricks, including the option to perform computations in parallel (see \code{?fastshap::explain} for details). Also, fast and exact algorithms \citep{lundberg-explainable-2019} can be exploited for certain classes of models.
Starting with [vip](https://cran.r-project.org/package=vip) version 0.4.1 you can now use `method = "shap"` in the call to `vi()` (or use `vi_shap()` directly) to compute global Shapley-based VI scores using the method described above (provided you have the [fastshap](https://cran.r-project.org/package=fastshap) package installed)---see `?vip::vi_shap` for details. To illustrate, we compute Shapley-based VI scores from an [xgboost](https://cran.r-project.org/package=xgboost) model [R-xgboost] using the Friedman data from earlier; the results are displayed in Figure \ref@(fig:vi-shap).^[Note that the `exact = TRUE` option is only available if you have [fastshap](https://cran.r-project.org/package=fastshap) version 0.0.4 or later.] (**{Note:** specifying `include_type = TRUE` in the call to `vip()` causes the type of VI computed to be displayed as part of the axis label.)
```r
# Load required packages
library(xgboost)
# Feature matrix
X <- data.matrix(subset(trn, select = -y)) # matrix of feature values
# Fit an XGBoost model; hyperparameters were tuned using 5-fold CV
set.seed(859) # for reproducibility
bst <- xgboost(X, label = trn$y, nrounds = 338, max_depth = 3, eta = 0.1,
verbose = 0)
# Construct VIP (Figure 17)
vip(bst, method = "shap", train = X, exact = TRUE, include_type = TRUE,
geom = "point", horizontal = FALSE,
aesthetics = list(color = "forestgreen", shape = 17, size = 5)) +
theme_light()
```
<img src="../man/figures/vi-shap-1.png" alt="Shapley-based VI scores from an XGBoost model fit to the simulated Friedman data." width="70%" />
Passing `exact = TRUE` to `fastshap::explain()` via the `...` argument in the call to `vip()` (or `vi()` and `vi_shap()`) only works for [lightgbm](https://cran.r-project.org/package=lightgbm), [xgboost](https://cran.r-project.org/package=xgboost), and additive (generalized) linear models fit using R's internal **`stats`** package. For all other cases, a prediction wrapper must be supplied via the `...` argument.
To illustrate, let's use the previous random forest that was fit to the Titanic data set. Note that Shapley explanation do not support classification, so we'll have to use the probability-based prediction wrapper defined before:
```r
pfun_prob <- function(object, newdata) { # prediction wrapper
# For Shapley explanations, this should ALWAYS return a numeric vector
predict(object, newdata = newdata, type = "prob")[, "yes"]
}
# Compute Shapley-based VI scores
set.seed(853) # for reproducibility
vi_shap(rfo, train = subset(t1, select = -survived), pred_wrapper = pfun_prob,
nsim = 30)
```
```
## # A tibble: 5 × 2
## Variable Importance
## <chr> <dbl>
## 1 pclass 0.104
## 2 age 0.0649
## 3 sex 0.272
## 4 sibsp 0.0260
## 5 parch 0.0291
```
## Drawbacks of existing methods
As discussed in @hooker-2019-stop, *permute-and-predict* methods—like PDPs, ICE curves, and permutation importance—can produce results that are highly misleading.^[It's been argued that approximate Shapley values share the same drawback, however, @janzing-2019-feature makes a compelling case against those arguments.] For example, the standard approach to computing permutation-based VI scores involves independently permuting individual features. This implicitly makes the assumption that the observed features are statistically independent. In practice, however, features are often not independent which can lead to nonsensical VI scores. One way to mitigate this issue is to use the conditional approach described in @strobl-2019-conditional; @hooker-2019-stop provides additional alternatives, such as *permute-and-relearn importance*. Unfortunately, to the best of our knowledge, this approach is not yet available for general purpose. A similar modification can be applied to PDPs [@parr-2019-technical]^[A basic R implementation is available at <https://github.com/bgreenwell/rstratx>.] which seems reasonable to use in the FIRM approach when strong dependencies among the features are present (though, we have not given this much thought or consideration).
We already mentioned that PDPs can be misleading in the presence of strong interaction effects. This drawback, of course, equally applies to the FIRM approach using PDPs for computing VI scores. As discussed earlier, this can be mitigated by using ICE curves instead. Another alternative would be to use *accumulated local effect* (ALE) plots [@apley-2016-visualizing] (though we haven't really tested this idea). Compared to PDPs, ALE plots have the advantage of being faster to compute and less affected by strong dependencies among the features. The downside, however, is that ALE plots are more complicated to implement (hence, they are not currently available when using `method = "firm"`). ALE plots are available in the [ALEPlot](https://cran.r-project.org/package=ALEPlot) [@R-ALEPlot] and [iml](https://cran.r-project.org/package=iml) packages.
@hooker-2007-generalized also argues that feature importance (which concern only *main effects*) can be misleading in high dimensional settings, especially when there are strong dependencies and interaction effects among the features, and suggests an approach based on a *generalized functional ANOVA decomposition*—though, to our knowledge, this approach is not widely implemented in open source.
# Summary
VIPs help to visualize the strength of the relationship between each feature and the predicted response, while accounting for all the other features in the model. We've discussed two types of VI: model-specific and model-agnostic, as well as some of their strengths and weaknesses. In this paper, we showed how to construct VIPs for various types of "black box" models in R using the [vip](https://cran.r-project.org/package=vip) package. We also briefly discussed related approaches available in a number of other R packages. Suggestions to avoid high execution times were discussed and demonstrated via examples. This paper is based on [vip](https://cran.r-project.org/package=vip) version 0.4.1. In terms of future development, [vip](https://cran.r-project.org/package=vip) can be expanded in a number of ways. For example, we plan to incorporate the option to compute group-based and conditional permutation scores. Although not discussed in this paper, [vip](https://cran.r-project.org/package=vip) also includes a promising statistic (similar to the variance-based VI scores previously discussed) for measuring the relative strength of interaction between features. Although VIPs can help understand which features are driving the model's predictions, ML practitioners should be cognizant of the fact that none of the methods discussed in this paper are uniformly best across all situations; they require an accurate model that has been properly tuned, and should be checked for consistency with human domain knowledge.
# Acknowledgments
The authors would like to thank the anonymous reviewers and the Editor for their helpful comments and suggestions. We would also like to thank the members of the 84.51$^{\circ}$ Interpretable Machine Learning Special Interest Group for their thoughtful discussions on the topics discussed herein.
# References
|
/scratch/gouwar.j/cran-all/cranData/vip/vignettes/vip.Rmd
|
#' Return all permutations of a vector
#'
#' Recursively generates all permutations of a vector. The result will be \code{factorial(length(vals))} long so be careful with any longer vectors (e.g. longer than 10).
#'
#' @param vals a vector of elements to be permuted
#' @return A list of vectors containing all permutation of the values
#' @export
#' @seealso \code{\link{sample}}
#' @examples
#' permute(letters[1:3])
#' permute(1:5)
permute<-function(vals){
if(length(vals)==1)return(list(vals))
if(length(vals)==0)return(NULL)
permList<-lapply(1:length(vals),function(ii){
lapply(permute(vals[-ii]),function(y)c(vals[ii],y))
})
return(unlist(permList,recursive=FALSE))
}
#' Find permutations meeting Tukey criteria
#'
#' Find all permutations of 1:n fulfilling Tukey's criteria that there are no runs of 3 or more increases or decreases in a row. Tukey just uses the default n=5 and limit=2.
#'
#' @param n permutations from 1 to n
#' @param limit the maximum number of increases or decreases in a row
#' @return a list of vectors containing valid permutations
#' @export
#' @examples
#' tukeyPermutes()
#' tukeyPermutes(6,3)
tukeyPermutes<-function(n=5,limit=2){
allPermutes<-permute(1:n)
nSameDir<-sapply(allPermutes,function(x)max(rle(diff(x)>0)$lengths))
okPermutes<-allPermutes[nSameDir<limit]
return(okPermutes)
}
#' Generate a permutation string meeting Tukey criteria
#'
#' Find a random string of concatenated permutations of 1:n fulfilling Tukey's criteria that there are no runs of 3 or more increases or decreases in a row. Tukey just uses the default n=5.
#'
#' @param nReps number of permutations to concatenate
#' @param n permutations from 1 to n
#' @return a vector of nReps*n integers giving concatenated permutations
#' @export
#' @examples
#' tukeyPermutes()
#' tukeyPermutes(6,3)
generatePermuteString<-function(nReps=20,n=5){
permutes<-tukeyPermutes(n)
indexed<-tapply(permutes,sapply(permutes,'[',1),c)
out<-rep(c(NULL),nReps)
out[[1]]<-sample(permutes,1)[[1]]
if(nReps>1){
for(ii in 2:nReps){
if(diff(out[[ii-1]][n-1:0])>0)targets<-unlist(indexed[1:(out[[ii-1]][n]-1)],recursive=FALSE)
else targets<-unlist(indexed[(out[[ii-1]][n]+1):n],recursive=FALSE)
out[[ii]]<-sample(targets,1)[[1]]
}
}
return(unlist(out))
}
#' Combine multiple permutation strings into one
#'
#' Combine base+1 permutation strings to generate offsets
#'
#' @param nReps number of permutations to paste together
#' @param base generate permutations of integers 1:base
#' @return A nReps*base length vector giving offset positions based on Tukey's algorithm
#' @export
#' @examples
#' tukeyT()
#' tukeyT()
#' tukeyT(5,4)
tukeyT<-function(nReps=10,base=5){
T<-generatePermuteString(nReps,base)
ti<-lapply(1:base,function(x)generatePermuteString(nReps,base))
indexs<-1:length(T)
ts<-sapply(indexs,function(fiveGPlusI)ti[[T[fiveGPlusI]]][ceiling(fiveGPlusI/5)])
#final -1 to deal with using 1:base instead of 0:(base-1)
out<-2+4*(ts-1)+20*(T-1)-1
return(out)
}
#' Generate random positions based on Tukey texture algorithm
#'
#' Generate partly random, partly constrained lateral displacements based on Tukey texture algorithm from Tukey and Tukey 1990
#'
#' @param x the points to be jittered. really only used to calculate length
#' @param jitter if TRUE add random jitter to each point
#' @param thin if TRUE then push points to the center in thin regions
#' @param hollow if TRUE then expand points outward to avoid ``hollowness''
#' @param delta a ``reasonably small value'' used in edge straightening and thinning
#' @return a vector of length length(x) giving displacements for each corresponding point in x
#' @export
#' @examples
#' x<-rnorm(200)
#' plot(tukeyTexture(x),x)
#' x<-1:100
#' plot(tukeyTexture(x),x)
#' plot(tukeyTexture(log10(counties$landArea),TRUE,TRUE),log10(counties$landArea),cex=.25)
tukeyTexture<-function(x,jitter=TRUE,thin=FALSE,hollow=FALSE,delta=diff(stats::quantile(x,c(.25,.75)))*.03){
n<-length(x)
orderX<-order(x)
x<-x[orderX]
offset<-tukeyT(10)
offset[26:50]<-offset[26:50]+2
spread<-rep(offset,length.out=n)
if(jitter)spread<-spread+stats::runif(n,-1,1)
#deal with thin regions
if(thin){
diffLeft<-c(Inf,diff(x))
diffRight<-c(diff(x),Inf)
spread[diffLeft>delta&diffRight>delta]<-50
}
#deal with 'hollow' regions
if(hollow){
current<-1
fiveStarts<-seq(1,n,5)
fiveEnds<-fiveStarts+4
fiveEnds[fiveEnds>n]<-n
for(ii in fiveStarts){
breakPoint<-fiveStarts[which(x[fiveEnds]-x[ii]>delta*10)[1]] #sorted so can just take [1] instead of min
if(is.na(breakPoint))break()
rightMost<-min(breakPoint+4,n)
spread[ii:rightMost]<-(spread[ii:rightMost]-min(spread[ii:rightMost]))/diff(range(spread[ii:rightMost]))*100
}
}
return(spread[order(orderX)])
}
#' Census ata on US counties
#'
#' A dataset containing data from the US census burea
#'
#' @format A data frame with 3143 rows and 8 variables:
#' \describe{
#' \item{id}{GEO.id from original data}
#' \item{state}{state in which the county is located}
#' \item{county}{name of the county}
#' \item{population}{population of the county}
#' \item{housingUnits}{housing units in the county}
#' \item{totalArea}{Area in square miles - Total area}
#' \item{waterArea}{Area in square miles - Water area}
#' \item{landArea}{Area in square miles - Land area}
#' }
#' @references \url{https://web.archive.org/web/20150326040847/https://www.census.gov/prod/cen2010/cph-2-1.pdf}
#' @source http://factfinder.census.gov/bkmk/table/1.0/en/DEC/10_SF1/GCTPH1.US05PR (link now dead), system.file("data-raw", "makeCounties.R", package = "vipor")
"counties"
|
/scratch/gouwar.j/cran-all/cranData/vipor/R/tukey.R
|
#' Functions to generate violin scatter plots
#'
#' Arranges data points using quasirandom noise (van der Corput sequence) to create a plot resembling a cross between a violin plot (showing the density distribution) and a scatter plot (showing the individual points). The development version of this package is on \url{https://github.com/sherrillmix/vipor}
#'
#' The main functions are:
#' \describe{
#' \item{\code{\link{offsetX}}:}{calculate offsets in X position for plotting (groups of) one dimensional data}
#' \item{\code{\link{vpPlot}}:}{a simple wrapper around plot and offsetX to generate plots of grouped data}
#' }
#'
#' @docType package
#' @name vipor
#' @author Scott Sherrill-Mix, \email{shescott@@upenn.edu}
#' @seealso \url{https://github.com/sherrillmix/vipor}
#' @examples
#' dat<-list(rnorm(100),rnorm(50,1,2))
#' ids<-rep(1:length(dat),sapply(dat,length))
#' offset<-offsetX(unlist(dat),ids)
#' plot(unlist(dat),ids+offset)
NULL
#' Plot data using offsets by quasirandom noise to generate a violin point plot
#'
#' Arranges data points using quasirandom noise (van der Corput sequence), pseudorandom noise or alternatively positioning extreme values within a band to the left and right to form beeswarm/one-dimensional scatter/strip chart style plots. That is a plot resembling a cross between a violin plot (showing the density distribution) and a scatter plot (showing the individual points) and so here we'll call it a violin point plot.
#'
#' @param x a grouping factor for y (optional)
#' @param y vector of data points
#' @param xaxt if 'n' then no x axis is plotted
#' @param offsetXArgs a list with arguments for offsetX
#' @param ... additional arguments to plot
#' @return invisibly return the adjusted x positions of the points
#' @export
#' @seealso \code{\link{offsetX}}
#' @examples
#' dat<-list(
#' 'Mean=0'=rnorm(200),
#' 'Mean=1'=rnorm(50,1),
#' 'Bimodal'=c(rnorm(40,-2),rnorm(60,2)),
#' 'Gamma'=rgamma(50,1)
#' )
#' labs<-factor(rep(names(dat),sapply(dat,length)),levels=names(dat))
#' vpPlot(labs,unlist(dat))
vpPlot<-function(x=rep('Data',length(y)),y,xaxt='y',offsetXArgs=NULL,...){
x<-as.factor(x)
ids<-as.numeric(x)
labels<-levels(x)
labelIds<-1:length(labels)
names(labelIds)<-labels
xPos<-ids+do.call(offsetX,c(list(y),list(x),offsetXArgs))
graphics::plot(xPos,y,...,xaxt='n',xlab='')
#avoid point color passing into axis ticks inside ...
noColAxis<-function(...,col)graphics::axis(1,labelIds,labels,col='black',...)
if(xaxt!='n')noColAxis(...)
return(invisible(xPos))
}
#' Offset data using quasirandom noise to avoid overplotting
#'
#' Arranges data points using quasirandom noise (van der Corput sequence), pseudorandom noise or alternatively positioning extreme values within a band to the left and right to form beeswarm/one-dimensional scatter/strip chart style plots. That is a plot resembling a cross between a violin plot (showing the density distribution) and a scatter plot (showing the individual points). This function returns a vector of the offsets to be used in plotting.
#'
#' @param y vector of data points
#' @param x a grouping factor for y (optional)
#' @param width the maximum spacing away from center for each group of points. Since points are spaced to left and right, the maximum width of the cluster will be approximately width*2 (0 = no offset, default = 0.4)
#' @param varwidth adjust the width of each group based on the number of points in the group
#' @param ... additional arguments to offsetSingleGroup
#' @return a vector with of x-offsets of the same length as y
#' @export
#' @examples
#' ## Generate fake data
#' dat <- list(rnorm(50), rnorm(500), c(rnorm(100), rnorm(100,5)), rcauchy(100))
#' names(dat) <- c("Normal", "Dense Normal", "Bimodal", "Extremes")
#'
#' ## Plot each distribution with a variety of parameters
#' par(mfrow=c(4,1), mar=c(2,4, 0.5, 0.5))
#' sapply(names(dat),function(label) {
#' y<-dat[[label]]
#'
#' offsets <- list(
#' 'Default'=offsetX(y),
#' 'Smoother'=offsetX(y, adjust=2),
#' 'Tighter'=offsetX(y, adjust=0.1),
#' 'Thinner'=offsetX(y, width=0.1)
#' )
#' ids <- rep(1:length(offsets), sapply(offsets,length))
#'
#' plot(unlist(offsets) + ids, rep(y, length(offsets)),
#' ylab=label, xlab='', xaxt='n', pch=21, las=1)
#' axis(1, 1:4, c("Default", "Adjust=2", "Adjust=0.1", "Width=10%"))
#' })
#'
offsetX <- function(y, x=rep(1, length(y)), width=0.4, varwidth=FALSE,...) {
if (length(x)!=length(y)) stop(simpleError('x and y not the same length in offsetX'))
maxLength<-max(table(x))
# Apply the van der Corput noise to each x group to create offsets
offsets <- aveWithArgs(y,x, FUN=offsetSingleGroup,maxLength=if(varwidth){maxLength}else{NULL},...)
out <- offsets*width
return(out)
}
# Offset data to avoid overplotting for a single subgroup of data
#
# Arranges data points using quasirandom noise (van der Corput sequence), pseudorandom noise or alternatively positioning extreme values within a band to the left and right to form beeswarm/one-dimensional scatter/strip chart style plots. Returns a vector of the offsets to be used in plotting. This function is mostly used as a subroutine of \code{\link{offsetX}}
# @param y y values for a single group for which offsets should be calculated
#' @param maxLength multiply the offset by sqrt(length(y)/maxLength) if not NULL. The sqrt is to match boxplot (allows comparison of order of magnitude different ns, scale with standard error)
#' @param method method used to distribute the points:
#' \describe{
#' \item{quasirandom:}{points are distributed within a kernel density estimate of the distribution with offset determined by quasirandom Van der Corput noise}
#' \item{pseudorandom:}{points are distributed within a kernel density estimate of the distribution with offset determined by pseudorandom noise a la jitter}
#' \item{maxout:}{points are distributed within a kernel density with points in a band distributed with highest value points on the outside and lowest in the middle}
#' \item{minout:}{points are distributed within a kernel density with points in a band distributed with highest value points in the middle and lowest on the outside}
#' \item{tukey:}{points are distributed as described in Tukey and Tukey "Strips displaying empirical distributions: I. textured dot strips"}
#' \item{tukeyDense:}{points are distributed as described in Tukey and Tukey but are constrained with the kernel density estimate}
#' }
#' @param nbins the number of points used to calculate density (defaults to 1000 for quasirandom and pseudorandom and 100 for others)
#' @param adjust adjust the bandwidth used to calculate the kernel density (smaller values mean tighter fit, larger values looser fit, default is 1)
#' @export
#' @rdname offsetX
# @seealso \code{\link{offsetX}}, \code{\link[stats]{density}}
# @return a vector with of x-offsets between -1 and 1 of the same length as y
offsetSingleGroup<-function(y,maxLength=NULL,method=c('quasirandom','pseudorandom','smiley','maxout','frowney','minout','tukey','tukeyDense'),nbins=NULL,adjust=1) {
method<-match.arg(method)
if(method %in% c('smiley'))method<-'maxout'
if(method %in% c('frowney'))method<-'minout'
if(is.null(nbins))nbins<-ifelse(method %in% c("pseudorandom","quasirandom"),2^10,max(2,ceiling(length(y)/5)))
#catch 0 length inputs
if (length(y) == 0) return(NULL)
# If there's only one value in this group, leave it alone
if (length(y) == 1) return(0)
#sqrt to match boxplot (allows comparison of order of magnitude different ns, scale with standard error)
if(is.null(maxLength)||maxLength<=0)subgroup_width <- 1
else subgroup_width <- sqrt(length(y)/maxLength)
dens <- stats::density(y, n = nbins, adjust = adjust)
dens$y <- dens$y / max(dens$y)
offset <- switch(method,
'quasirandom'=vanDerCorput(length(y))[rank(y, ties.method="first")],
'pseudorandom'=stats::runif(length(y)),
'maxout'=stats::ave(y,as.character(cut(y,dens$x)),FUN=topBottomDistribute),
'minout'=stats::ave(y,as.character(cut(y,dens$x)),FUN=function(x)topBottomDistribute(x,frowney=TRUE)),
'tukeyDense'=tukeyTexture(y)/100,
'tukey'=tukeyTexture(y,TRUE,TRUE)/100,
stop(simpleError('Unrecognized method in offsetSingleGroup'))
)
if(method %in% c('tukey'))pointDensities<-1
else pointDensities<-stats::approx(dens$x,dens$y,y)$y
#*2 to get -1 to 1
out<-(offset-.5)*2*pointDensities*subgroup_width
return(out)
}
#' Produce offsets such that points are sorted with most extreme values to right and left
#'
#' Produce offsets to generate smile-like or frown-like distributions of points. That is sorting the points so that the most extreme values alternate between the left and right e.g. (max,3rd max,...,4th max, 2nd max). The function returns either a proportion between 0 and 1 (useful for plotting) or an order
#'
#' @param x the elements to be sorted
#' @param frowney if TRUE then sort minimums to the outside, otherwise sort maximums to the outside
#' @param prop if FALSE then return an ordering of the data with extremes on the outside. If TRUE then return a sequence between 0 and 1 sorted by the ordering
#' @return a vector of the same length as x with values ranging between 0 and 1 if prop is TRUE or an ordering of 1 to length(x)
#' @export
#' @examples
#' topBottomDistribute(1:10)
#' topBottomDistribute(1:10,TRUE)
topBottomDistribute<-function(x,frowney=FALSE,prop=TRUE){
if(length(x)==1)return(.5)
if(frowney)x<- -x
newOrder<-rank(x,ties.method='first')
newOrder[newOrder%%2==1]<- -newOrder[newOrder%%2==1]
newOrder<-rank(newOrder)
if(prop){
props<-seq(0,1,length.out=length(newOrder))
newOrder<-props[newOrder]
}
return(newOrder)
}
#' Generate van der Corput sequences
#'
#' Generates the first (or an arbitrary offset) n elements of the van der Corput low-discrepancy sequence for a given base
#'
#' @param n the first n elements of the van der Corput sequence
#' @param base the base to use for calculating the van der Corput sequence
#' @param start start at this position in the sequence
#' @return a vector of length n with values ranging between 0 and 1
#' @references \url{https://en.wikipedia.org/wiki/Van_der_Corput_sequence}
#' @export
#' @examples
#' vanDerCorput(100)
vanDerCorput <- function(n, base=2,start=1){
#generate n first digits of the van der Corput sequence
if(n==0)return(c())
if(n<0)stop(simpleError('n < 0 in vanDerCorput'))
if(base<=1)stop(simpleError('base <=1 in vanDerCorput'))
if(start<1)stop(simpleError('start < 1 in vanDerCorput'))
out<-sapply(1:n+start-1,function(ii)digits2number(rev(number2digits(ii,base)),base,TRUE))
return(out)
}
#' Convert an integer to an arbitrary base
#'
#' Takes an integer and converts it into an arbitrary base e.g. binary or octal. Note that the first digit in the output is the least significant.
#'
#' @param n the integer to be converted
#' @param base the base for the numeral system (e.g. 2 for binary or 8 for octal)
#' @return a vector of length \code{ceiling(log(n+1,base))} respresenting each digit for that numeral system
#' @references \url{https://en.wikipedia.org/wiki/Radix}
#' @export
#' @examples
#' number2digits(100)
#' number2digits(100,8)
number2digits <- function(n, base=2){
if(n==0)return(c())
if(n<0)stop(simpleError('negative number in number2digits'))
if(base<=1)stop(simpleError('base <=1 in number2digits'))
nDigits<-ceiling(log(n+1,base))
powers<-base^(0:nDigits)
out<-diff(n %% powers)/powers[-length(powers)]
return(out)
}
#' Convert a vector of integers representing digits in an arbitrary base to an integer
#'
#' Takes a vector of integers representing digits in an arbitrary base e.g. binary or octal and converts it into an integer (or the integer divided by base^length(digits) for the number of digits if fractional is TRUE). Note that the first digit in the input is the least significant.
#'
#' @param digits a vector of integers representing digits in an arbitrary base
#' @param base the base for the numeral system (e.g. 2 for binary or 8 for octal)
#' @param fractional divide the output by the max for this number of digits and base. Note that this is \code{base^length(digits)} not \code{base^length(digits)-1}.
#' @return an integer
#' @references \url{https://en.wikipedia.org/wiki/Radix}
#' @export
#' @examples
#' digits2number(c(4,4,1),8)
#' digits2number(number2digits(100))
digits2number<-function(digits,base=2,fractional=FALSE){
if(length(digits)==0)return(0)
if(base<=0)stop(simpleError('base <= 0 in digits2number'))
if(any(digits<0))stop(simpleError('digit < 0 in digits2number'))
powers<-0:(length(digits)-1)
out<-sum(digits*base^powers)
if(fractional)out<-out/base^length(digits)
return(out)
}
#' the ave() function but with arguments passed to FUN
#'
#' A function is applied to subsets of \code{x} where each subset consist of those observations with the same groupings in \code{y}
#'
#' @param x a vector to apply FUN to
#' @param y a vector or list of vectors of grouping variables all of the same length as \code{x}
#' @param FUN function to apply for each factor level combination.
#' @param ... additional arguments to \code{FUN}
#' @return A numeric vector of the same length as \code{x} where an each element contains the output from \code{FUN} after \code{FUN} was applied on the corresponding subgroup for that element (repeated if necessary within a subgroup).
#' @seealso \code{\link{ave}}
#' @export
#' @examples
#' aveWithArgs(1:10,rep(1:5,2))
#' aveWithArgs(c(1:9,NA),rep(1:5,2),max,na.rm=TRUE)
aveWithArgs<-function(x, y, FUN = mean,...){
if (missing(y))
x[] <- FUN(x,...)
else {
g <- interaction(y)
split(x, g) <- lapply(split(x, g), FUN,...)
}
x
}
#' Data on HIV integration sites from several studies
#'
#' A dataset containing data from a meta-analysis looking for differences between active and inactive HIV integrations. Each row represents a provirus integrated somewhere in a human chromosome with whether viral expression was detectd, the distance to the nearest gene and the number of reads from H4K12ac ChIP-Seq mapped to within 50,000 bases of the integration.
#'
#' @format A data frame with 12436 rows and 4 variables:
#' \describe{
#' \item{study}{the cell population infected by HIV}
#' \item{latent}{whether the provirus was active (expressed) or inactive (latent)}
#' \item{nearestGene}{distance to nearest gene (transcription unit) (0 if in a gene)}
#' \item{H4K12ac}{number of reads aligned within +- 50,000 bases in a H4K12ac ChIP-Seq}
#' }
#' @references \url{https://retrovirology.biomedcentral.com/articles/10.1186/1742-4690-10-90}
#' @source \url{https://retrovirology.biomedcentral.com/articles/10.1186/1742-4690-10-90}, system.file("data-raw", "makeIntegrations.R", package = "vipor")
"integrations"
|
/scratch/gouwar.j/cran-all/cranData/vipor/R/violinPoint.R
|
### R code from vignette source 'methodComparison.Rnw'
###################################################
### code chunk number 1: package
###################################################
options(keep.source = TRUE, width = 60)
packageInfo <- packageDescription("vipor")
library(vipor)
packageKeywords<-"visualization, display, one dimensional, grouped, groups, violin, scatter, points, quasirandom, beeswarm, van der Corput, beanplot"
###################################################
### code chunk number 2: simData
###################################################
library(vipor)
library(beeswarm)
library(beanplot)
library(vioplot)
set.seed(12345)
dat <- list(rnorm(50), rnorm(500), c(rnorm(100),
rnorm(100,5)), rcauchy(100))
names(dat) <- c("Normal", "Dense Normal", "Bimodal", "Extremes")
###################################################
### code chunk number 3: simmed (eval = FALSE)
###################################################
## par(mfrow=c(4,1), mar=c(2.5,3.1, 1.2, 0.5),mgp=c(2.1,.75,0),
## cex.axis=1.2,cex.lab=1.2,cex.main=1.2)
## dummy<-sapply(names(dat),function(label) {
## y<-dat[[label]]
## # need to plot first so beeswarm can figure out pars
## # xlim is a magic number due to needing plot for beeswarm
## plot(1,1,type='n',xlab='',xaxt='n',ylab='y value',las=1,main=label,
## xlim=c(0.5,9.5),ylim=range(y))
## offsets <- list(
## 'Quasi'=offsetX(y), # Default
## 'Pseudo'=offsetX(y, method='pseudorandom',nbins=100),
## 'Min out'=offsetX(y, method='minout',nbins=20),
## 'Max out\n20 bin'=offsetX(y, method='maxout',nbins=20),
## 'Max out\n100 bin'=offsetX(y, method='maxout',nbins=100),
## 'Max out\nn/5 bin'=offsetX(y, method='maxout',nbins=round(length(y)/5)),
## 'Beeswarm'=swarmx(rep(0,length(y)),y)$x,
## 'Tukey'=offsetX(y,method='tukey'),
## 'Tukey +\ndensity'=offsetX(y,method='tukeyDense')
## )
## ids <- rep(1:length(offsets), each=length(y))
## points(unlist(offsets) + ids, rep(y, length(offsets)),
## pch=21,col='#00000099',bg='#00000033')
## par(lheight=.8)
## axis(1, 1:length(offsets), names(offsets),padj=1,
## mgp=c(0,-.1,0),tcl=-.5,cex.axis=1.1)
## })
###################################################
### code chunk number 4: plotSimmed
###################################################
par(mfrow=c(4,1), mar=c(2.5,3.1, 1.2, 0.5),mgp=c(2.1,.75,0),
cex.axis=1.2,cex.lab=1.2,cex.main=1.2)
dummy<-sapply(names(dat),function(label) {
y<-dat[[label]]
# need to plot first so beeswarm can figure out pars
# xlim is a magic number due to needing plot for beeswarm
plot(1,1,type='n',xlab='',xaxt='n',ylab='y value',las=1,main=label,
xlim=c(0.5,9.5),ylim=range(y))
offsets <- list(
'Quasi'=offsetX(y), # Default
'Pseudo'=offsetX(y, method='pseudorandom',nbins=100),
'Min out'=offsetX(y, method='minout',nbins=20),
'Max out\n20 bin'=offsetX(y, method='maxout',nbins=20),
'Max out\n100 bin'=offsetX(y, method='maxout',nbins=100),
'Max out\nn/5 bin'=offsetX(y, method='maxout',nbins=round(length(y)/5)),
'Beeswarm'=swarmx(rep(0,length(y)),y)$x,
'Tukey'=offsetX(y,method='tukey'),
'Tukey +\ndensity'=offsetX(y,method='tukeyDense')
)
ids <- rep(1:length(offsets), each=length(y))
points(unlist(offsets) + ids, rep(y, length(offsets)),
pch=21,col='#00000099',bg='#00000033')
par(lheight=.8)
axis(1, 1:length(offsets), names(offsets),padj=1,
mgp=c(0,-.1,0),tcl=-.5,cex.axis=1.1)
})
###################################################
### code chunk number 5: simmedBox (eval = FALSE)
###################################################
## x<-rep(names(dat),sapply(dat,length))
## y<-unlist(lapply(dat,function(x)x/max(abs(x))))
## par(mfrow=c(4,1), mar=c(6,4.5, 1.2, 0.5),mgp=c(3.3,.75,0),
## cex.axis=1.2,cex.lab=1.2,cex.main=1.2,las=1)
## vpPlot(x,y, ylab='',cex=.7, pch=21,
## col='#00000044',bg='#00000011')
## boxplot(y~x,main='Boxplot',ylab='')
## beanplot(y~x,main='Beanplot',ylab='')
## vioInput<-split(y,x)
## labs<-names(vioInput)
## names(vioInput)[1]<-'x'
## do.call(vioplot,c(vioInput,list(names=labs,col='white')))
## title(main='Vioplot')
###################################################
### code chunk number 6: plotSimmedBox
###################################################
x<-rep(names(dat),sapply(dat,length))
y<-unlist(lapply(dat,function(x)x/max(abs(x))))
par(mfrow=c(4,1), mar=c(6,4.5, 1.2, 0.5),mgp=c(3.3,.75,0),
cex.axis=1.2,cex.lab=1.2,cex.main=1.2,las=1)
vpPlot(x,y, ylab='',cex=.7, pch=21,
col='#00000044',bg='#00000011')
boxplot(y~x,main='Boxplot',ylab='')
beanplot(y~x,main='Beanplot',ylab='')
vioInput<-split(y,x)
labs<-names(vioInput)
names(vioInput)[1]<-'x'
do.call(vioplot,c(vioInput,list(names=labs,col='white')))
title(main='Vioplot')
###################################################
### code chunk number 7: vpCounties
###################################################
y<-log10(counties$landArea)
offsets <- list(
'Quasi'=offsetX(y), # Default
'Pseudo'=offsetX(y, method='pseudorandom',nbins=100),
'Min out'=offsetX(y, method='minout',nbins=20),
'Max out\n20 bin'=offsetX(y, method='maxout',nbins=20),
'Max out\n100 bin'=offsetX(y, method='maxout',nbins=100),
'Max out\nn/5 bin'=offsetX(y, method='maxout',nbins=round(length(y)/5)),
'Beeswarm'=swarmx(rep(0,length(y)),y)$x,
'Tukey'=offsetX(y,method='tukey'),
'Tukey +\ndensity'=offsetX(y,method='tukeyDense')
)
ids <- rep(1:length(offsets), each=length(y))
#reduce file size by rendering to raster
tmpPng<-tempfile(fileext='.png')
png(tmpPng,height=1200,width=1800,res=300)
par(mar=c(2.5,3.5,.2,0.2))
plot(
unlist(offsets) + ids, rep(y, length(offsets)),
xlab='', xaxt='n', yaxt='n',pch='.',
ylab='Land area (square miles)',mgp=c(2.7,1,0),
col='#00000077'
)
par(lheight=.8)
axis(1, 1:length(offsets), names(offsets),padj=1,
mgp=c(0,-.3,0),tcl=-.3,cex.axis=.65)
axis(2, pretty(y), format(10^pretty(y),scientific=FALSE,big.mark=','),
mgp=c(0,.5,0),tcl=-.3,las=1,cex.axis=.75)
dev.off()
###################################################
### code chunk number 8: vpOrchard (eval = FALSE)
###################################################
## par(mfrow=c(5,1), mar=c(3.5,3.1, 1.2, 0.5),mgp=c(2.1,.75,0),
## cex.axis=1.2,cex.lab=1.2,cex.main=1.2,las=1)
## #simple function to avoid repeating code
## plotFunc<-function(x,y,offsetXArgs){
## vpPlot(x,y, ylab='Log treatment effect', pch=21,
## col='#00000099',bg='#00000033', offsetXArgs=offsetXArgs)
## title(xlab='Treatment')
## addMeanLines(x,y)
## }
## addMeanLines<-function(x,y,col='#FF000099'){
## means<-tapply(y,x,mean)
## segments(
## 1:length(means)-.25,means,1:length(means)+.25,means,
## col=col,lwd=2
## )
## }
## #quasirandom
## plotFunc(OrchardSprays$treatment,log(OrchardSprays$decrease),
## list(width=.2))
## title(main='Quasirandom')
## #pseudorandom
## plotFunc(OrchardSprays$treatment,log(OrchardSprays$decrease),
## list(method='pseudo',width=.2))
## title(main='Pseudorandom')
## #smiley
## plotFunc(OrchardSprays$treatment,log(OrchardSprays$decrease),
## list(method='maxout',width=.2))
## title(main='Max outside')
## #beeswarm
## beeInput<-split(log(OrchardSprays$decrease), OrchardSprays$treatment)
## beeswarm(beeInput,las=1,ylab='Log treatment effect',xlab='Treatment',
## pch=21, col='#00000099',bg='#00000033', main='Beeswarm')
## addMeanLines(OrchardSprays$treatment,log(OrchardSprays$decrease))
## plotFunc(OrchardSprays$treatment,log(OrchardSprays$decrease),
## list(method='tukey',width=.2))
## title(main='Tukey')
###################################################
### code chunk number 9: showVpOrchard
###################################################
par(mfrow=c(5,1), mar=c(3.5,3.1, 1.2, 0.5),mgp=c(2.1,.75,0),
cex.axis=1.2,cex.lab=1.2,cex.main=1.2,las=1)
#simple function to avoid repeating code
plotFunc<-function(x,y,offsetXArgs){
vpPlot(x,y, ylab='Log treatment effect', pch=21,
col='#00000099',bg='#00000033', offsetXArgs=offsetXArgs)
title(xlab='Treatment')
addMeanLines(x,y)
}
addMeanLines<-function(x,y,col='#FF000099'){
means<-tapply(y,x,mean)
segments(
1:length(means)-.25,means,1:length(means)+.25,means,
col=col,lwd=2
)
}
#quasirandom
plotFunc(OrchardSprays$treatment,log(OrchardSprays$decrease),
list(width=.2))
title(main='Quasirandom')
#pseudorandom
plotFunc(OrchardSprays$treatment,log(OrchardSprays$decrease),
list(method='pseudo',width=.2))
title(main='Pseudorandom')
#smiley
plotFunc(OrchardSprays$treatment,log(OrchardSprays$decrease),
list(method='maxout',width=.2))
title(main='Max outside')
#beeswarm
beeInput<-split(log(OrchardSprays$decrease), OrchardSprays$treatment)
beeswarm(beeInput,las=1,ylab='Log treatment effect',xlab='Treatment',
pch=21, col='#00000099',bg='#00000033', main='Beeswarm')
addMeanLines(OrchardSprays$treatment,log(OrchardSprays$decrease))
plotFunc(OrchardSprays$treatment,log(OrchardSprays$decrease),
list(method='tukey',width=.2))
title(main='Tukey')
###################################################
### code chunk number 10: vpSinger (eval = FALSE)
###################################################
## data('singer',package='lattice')
## parts<-sub(' [0-9]+$','',singer$voice)
## par(mfrow=c(5,1), mar=c(3.5,3.1, 1.2, 0.5),mgp=c(2.1,.75,0),
## cex.axis=1.2,cex.lab=1.2,cex.main=1.2,las=1)
## #simple function to avoid repeating code
## plotFunc<-function(x,y,...){
## vpPlot(x,y, ylab='Height',pch=21,col='#00000099',bg='#00000033',...)
## addMeanLines(x,y)
## }
## #quasirandom
## plotFunc(parts,singer$height,
## main='Quasirandom')
## #pseudorandom
## plotFunc(parts,singer$height,offsetXArgs=list(method='pseudo'),
## main='Pseudorandom')
## #smiley
## plotFunc(parts,singer$height,offsetXArgs=list(method='maxout'),
## main='Max outside')
## #beeswarm
## beeInput<-split(singer$height, parts)
## beeswarm(beeInput,ylab='Height',main='Beeswarm',
## pch=21, col='#00000099',bg='#00000033')
## addMeanLines(parts,singer$height)
## #tukey
## plotFunc(parts,singer$height,offsetXArgs=list(method='tukey'),
## main='Tukey')
###################################################
### code chunk number 11: showVpSinger
###################################################
data('singer',package='lattice')
parts<-sub(' [0-9]+$','',singer$voice)
par(mfrow=c(5,1), mar=c(3.5,3.1, 1.2, 0.5),mgp=c(2.1,.75,0),
cex.axis=1.2,cex.lab=1.2,cex.main=1.2,las=1)
#simple function to avoid repeating code
plotFunc<-function(x,y,...){
vpPlot(x,y, ylab='Height',pch=21,col='#00000099',bg='#00000033',...)
addMeanLines(x,y)
}
#quasirandom
plotFunc(parts,singer$height,
main='Quasirandom')
#pseudorandom
plotFunc(parts,singer$height,offsetXArgs=list(method='pseudo'),
main='Pseudorandom')
#smiley
plotFunc(parts,singer$height,offsetXArgs=list(method='maxout'),
main='Max outside')
#beeswarm
beeInput<-split(singer$height, parts)
beeswarm(beeInput,ylab='Height',main='Beeswarm',
pch=21, col='#00000099',bg='#00000033')
addMeanLines(parts,singer$height)
#tukey
plotFunc(parts,singer$height,offsetXArgs=list(method='tukey'),
main='Tukey')
###################################################
### code chunk number 12: vpBeaver (eval = FALSE)
###################################################
## y<-c(beaver1$temp,beaver2$temp)
## x<-rep(c('Beaver 1','Beaver 2'), c(nrow(beaver1),nrow(beaver2)))
## par(mfrow=c(3,2), mar=c(3.5,4.5, 1.2, 0.5),mgp=c(3,.75,0),
## cex.axis=1.2,cex.lab=1.2,cex.main=1.2)
## #simple function to avoid repeating code
## plotFunc<-function(x,y,...){
## vpPlot(x,y, las=1, ylab='Body temperature',pch=21,
## col='#00000099',bg='#00000033',...)
## addMeanLines(x,y)
## }
## #quasirandom
## plotFunc(x,y,main='Quasirandom')
## #pseudorandom
## plotFunc(x,y,offsetXArgs=list(method='pseudo'),main='Pseudorandom')
## #smiley
## plotFunc(x,y,offsetXArgs=list(method='maxout'),main='Max outside')
## #beeswarm
## beeInput<-split(y,x)
## beeswarm(beeInput,las=1,ylab='Body temperature',main='Beeswarm',
## pch=21, col='#00000099',bg='#00000033')
## addMeanLines(x,y)
## #tukey
## plotFunc(x,y,offsetXArgs=list(method='tukey'),main='Tukey')
###################################################
### code chunk number 13: showBeaver
###################################################
y<-c(beaver1$temp,beaver2$temp)
x<-rep(c('Beaver 1','Beaver 2'), c(nrow(beaver1),nrow(beaver2)))
par(mfrow=c(3,2), mar=c(3.5,4.5, 1.2, 0.5),mgp=c(3,.75,0),
cex.axis=1.2,cex.lab=1.2,cex.main=1.2)
#simple function to avoid repeating code
plotFunc<-function(x,y,...){
vpPlot(x,y, las=1, ylab='Body temperature',pch=21,
col='#00000099',bg='#00000033',...)
addMeanLines(x,y)
}
#quasirandom
plotFunc(x,y,main='Quasirandom')
#pseudorandom
plotFunc(x,y,offsetXArgs=list(method='pseudo'),main='Pseudorandom')
#smiley
plotFunc(x,y,offsetXArgs=list(method='maxout'),main='Max outside')
#beeswarm
beeInput<-split(y,x)
beeswarm(beeInput,las=1,ylab='Body temperature',main='Beeswarm',
pch=21, col='#00000099',bg='#00000033')
addMeanLines(x,y)
#tukey
plotFunc(x,y,offsetXArgs=list(method='tukey'),main='Tukey')
###################################################
### code chunk number 14: vpStock (eval = FALSE)
###################################################
## y<-as.vector(EuStockMarkets)
## x<-rep(colnames(EuStockMarkets), each=nrow(EuStockMarkets))
## par(mfrow=c(3,2), mar=c(4,4.3, 1.2, 0.5),mgp=c(3.3,.75,0),
## cex.axis=1.2,cex.lab=1.2,cex.main=1.2,las=1)
## #simple function to avoid repeating code
## plotFunc<-function(x,y,...){
## vpPlot(x,y, ylab='Price',cex=.7,cex.axis=.7,
## mgp=c(2.5,.75,0),tcl=-.4, pch=21,
## col='#00000011',bg='#00000011',...)
## addMeanLines(x,y)
## }
## #quasirandom
## plotFunc(x,y,main='Quasirandom')
## #pseudorandom
## plotFunc(x,y,offsetXArgs=list(method='pseudo'),main='Pseudorandom')
## #smiley
## plotFunc(x,y,offsetXArgs=list(method='maxout'),main='Max outside')
## #beeswarm
## #beeInput<-split(y,x)
## beeswarm(EuStockMarkets[,'DAX',drop=FALSE],cex=.7, ylab='Price',
## main='Beeswarm',pch=21, col='#00000099',bg='#00000033',cex.axis=.7)
## #tukey
## plotFunc(x,y,offsetXArgs=list(method='tukey'),main='Tukey')
###################################################
### code chunk number 15: showStock
###################################################
y<-as.vector(EuStockMarkets)
x<-rep(colnames(EuStockMarkets), each=nrow(EuStockMarkets))
par(mfrow=c(3,2), mar=c(4,4.3, 1.2, 0.5),mgp=c(3.3,.75,0),
cex.axis=1.2,cex.lab=1.2,cex.main=1.2,las=1)
#simple function to avoid repeating code
plotFunc<-function(x,y,...){
vpPlot(x,y, ylab='Price',cex=.7,cex.axis=.7,
mgp=c(2.5,.75,0),tcl=-.4, pch=21,
col='#00000011',bg='#00000011',...)
addMeanLines(x,y)
}
#quasirandom
plotFunc(x,y,main='Quasirandom')
#pseudorandom
plotFunc(x,y,offsetXArgs=list(method='pseudo'),main='Pseudorandom')
#smiley
plotFunc(x,y,offsetXArgs=list(method='maxout'),main='Max outside')
#beeswarm
#beeInput<-split(y,x)
beeswarm(EuStockMarkets[,'DAX',drop=FALSE],cex=.7, ylab='Price',
main='Beeswarm',pch=21, col='#00000099',bg='#00000033',cex.axis=.7)
#tukey
plotFunc(x,y,offsetXArgs=list(method='tukey'),main='Tukey')
###################################################
### code chunk number 16: vpInts
###################################################
ints<-integrations[integrations$nearestGene>0,]
y<-log10(ints$nearestGene)
x<-paste(ints$latent,ints$study,sep='\n')
#reduce file size by rendering to raster
tmpPng<-tempfile(fileext='.png')
png(tmpPng,height=2400,width=1500,res=300)
par(mfrow=c(4,1), mar=c(7.5,3.5, 1.2, 0.5),mgp=c(2.5,.75,0),
cex.axis=1.2,cex.lab=1.2,cex.main=1.2)
#simple function to avoid repeating code
plotFunc<-function(x,y,...){
cols<-ifelse(grepl('Expressed',x),'#FF000033','#0000FF33')
vpPlot(x,y,las=2, ylab='Distance to gene',cex=.7,yaxt='n',
pch=21, col=NA,bg=cols,lheight=.4,...)
prettyY<-pretty(y)
yLabs<-sapply(prettyY,function(x)as.expression(bquote(10^.(x))))
axis(2,prettyY,yLabs,las=1)
addMeanLines(x,y,col='#000000AA')
}
#quasirandom
plotFunc(x,y,main='Quasirandom')
#pseudorandom
plotFunc(x,y,offsetXArgs=list(method='pseudo'),main='Pseudorandom')
#smiley
plotFunc(x,y,offsetXArgs=list(method='maxout'),main='Max outside')
#tukey
plotFunc(x,y,offsetXArgs=list(method='tukey'),main='Tukey')
#beeswarm
#beeInput<-split(y,x)
#beeswarm(beeInput,las=1,cex=.7, ylab='Log distance to gene',
#main='Beeswarm',pch=21, col='#00000099',bg='#00000033')
#addMeanLines(x,y)
dev.off()
###################################################
### code chunk number 17: vpDiamond (eval = FALSE)
###################################################
## select<-sample(1:nrow(ggplot2::diamonds),3000)
## y<-unlist(log10(ggplot2::diamonds[select,'price']))
## x<-unlist(ggplot2::diamonds[select,'cut'])
## par(mfrow=c(5,1), mar=c(6,4.5, 1.2, 0.5),mgp=c(3.3,.75,0),
## cex.axis=1.2,cex.lab=1.2,cex.main=1.2,las=1)
## #simple function to avoid repeating code
## prettyYAxis<-function(y){
## prettyY<-pretty(y)
## yLabs<-sapply(prettyY,function(x)as.expression(bquote(10^.(x))))
## axis(2,prettyY,yLabs)
## }
## #quasirandom
## vpPlot(x,y,offsetXArgs=list(varwidth=TRUE),
## ylab='Price',cex=.7,pch=21, col='#00000044',
## bg='#00000011',yaxt='n',main='Quasirandom')
## prettyYAxis(y)
## #tukey
## vpPlot(x,y,offsetXArgs=list(method='tukey'),
## ylab='Price',cex=.7,pch=21, col='#00000044',
## bg='#00000011',yaxt='n',main='Tukey')
## prettyYAxis(y)
## #boxplot
## boxplot(y~x,main='Boxplot',ylab='Price',yaxt='n')
## prettyYAxis(y)
## #beanplot
## beanplot(y~x,main='Beanplot',ylab='Price',yaxt='n')
## prettyYAxis(y)
## vioInput<-split(y,x)
## labs<-names(vioInput)
## names(vioInput)[1]<-'x'
## #vioplot
## do.call(vioplot,c(vioInput,list(names=labs,col='white')))
## title(ylab='Price', main='Vioplot')
###################################################
### code chunk number 18: showDiamond
###################################################
select<-sample(1:nrow(ggplot2::diamonds),3000)
y<-unlist(log10(ggplot2::diamonds[select,'price']))
x<-unlist(ggplot2::diamonds[select,'cut'])
par(mfrow=c(5,1), mar=c(6,4.5, 1.2, 0.5),mgp=c(3.3,.75,0),
cex.axis=1.2,cex.lab=1.2,cex.main=1.2,las=1)
#simple function to avoid repeating code
prettyYAxis<-function(y){
prettyY<-pretty(y)
yLabs<-sapply(prettyY,function(x)as.expression(bquote(10^.(x))))
axis(2,prettyY,yLabs)
}
#quasirandom
vpPlot(x,y,offsetXArgs=list(varwidth=TRUE),
ylab='Price',cex=.7,pch=21, col='#00000044',
bg='#00000011',yaxt='n',main='Quasirandom')
prettyYAxis(y)
#tukey
vpPlot(x,y,offsetXArgs=list(method='tukey'),
ylab='Price',cex=.7,pch=21, col='#00000044',
bg='#00000011',yaxt='n',main='Tukey')
prettyYAxis(y)
#boxplot
boxplot(y~x,main='Boxplot',ylab='Price',yaxt='n')
prettyYAxis(y)
#beanplot
beanplot(y~x,main='Beanplot',ylab='Price',yaxt='n')
prettyYAxis(y)
vioInput<-split(y,x)
labs<-names(vioInput)
names(vioInput)[1]<-'x'
#vioplot
do.call(vioplot,c(vioInput,list(names=labs,col='white')))
title(ylab='Price', main='Vioplot')
|
/scratch/gouwar.j/cran-all/cranData/vipor/inst/doc/methodComparison.R
|
### R code from vignette source 'usageExamples.Rnw'
###################################################
### code chunk number 1: package
###################################################
options(keep.source = TRUE, width = 60)
packageInfo <- packageDescription("vipor")
library(vipor)
packageKeywords<-"visualization, display, one dimensional, grouped, groups, violin, scatter, points, quasirandom, beeswarm, van der Corput"
###################################################
### code chunk number 2: vpPlot (eval = FALSE)
###################################################
## library(vipor)
## set.seed(12345)
## n<-100
## dat<-rnorm(n*2)
## labs<-rep(c('a','b'),n)
## vpPlot(labs,dat)
###################################################
### code chunk number 3: showVpPlot
###################################################
library(vipor)
set.seed(12345)
n<-100
dat<-rnorm(n*2)
labs<-rep(c('a','b'),n)
vpPlot(labs,dat)
###################################################
### code chunk number 4: vpOpts (eval = FALSE)
###################################################
## vpPlot(labs,dat,las=1,ylab='Data',col=rep(1:2,n))
## abline(h=0,lty=2)
###################################################
### code chunk number 5: showVpOpts
###################################################
vpPlot(labs,dat,las=1,ylab='Data',col=rep(1:2,n))
abline(h=0,lty=2)
###################################################
### code chunk number 6: vpFactors (eval = FALSE)
###################################################
## labs2<-factor(labs,levels=c('b','a'))
## vpPlot(labs2,dat,las=1,ylab='Data',col=rep(1:2,n))
## abline(h=0,lty=2)
###################################################
### code chunk number 7: showVpFactors
###################################################
labs2<-factor(labs,levels=c('b','a'))
vpPlot(labs2,dat,las=1,ylab='Data',col=rep(1:2,n))
abline(h=0,lty=2)
###################################################
### code chunk number 8: offsetX
###################################################
offsets<-offsetX(dat,labs)
head(offsets,4)
xPos<-vpPlot(labs,dat)
head(xPos,4)
xPos2<-rep(1:2,n)+offsets
head(xPos2,4)
all(xPos==xPos2)
###################################################
### code chunk number 9: distAdjust (eval = FALSE)
###################################################
## dat <- list(
## 'Normal'=rnorm(50),
## 'Dense normal'= rnorm(500),
## 'Bimodal'=c(rnorm(100), rnorm(100,5)),
## 'Extremes'=rcauchy(100)
## )
## par(mfrow=c(4,1), mar=c(2.5,3.1, 1.2, 0.5),mgp=c(2.1,.75,0),
## cex.axis=1.2,cex.lab=1.2,cex.main=1.2)
## dummy<-sapply(names(dat),function(label) {
## y<-dat[[label]]
## offsets <- list(
## 'defaults'=offsetX(y), # Default
## 'adjust=2'=offsetX(y, adjust=2), # More smoothing
## 'adjust=.1'=offsetX(y, adjust=0.1), # Tighter fit
## 'width=.1'=offsetX(y, width=0.1), # Less wide
## 'nbins=100'=offsetX(y, nbins=100) # Less bins
## )
## ids <- rep(1:length(offsets), each=length(y))
## plot(unlist(offsets) + ids, rep(y, length(offsets)), ylab='y value',
## xlab='', xaxt='n', pch=21,
## col='#00000099',bg='#00000033',las=1,main=label)
## axis(1, 1:length(offsets), names(offsets))
## })
###################################################
### code chunk number 10: showDistAdjust
###################################################
dat <- list(
'Normal'=rnorm(50),
'Dense normal'= rnorm(500),
'Bimodal'=c(rnorm(100), rnorm(100,5)),
'Extremes'=rcauchy(100)
)
par(mfrow=c(4,1), mar=c(2.5,3.1, 1.2, 0.5),mgp=c(2.1,.75,0),
cex.axis=1.2,cex.lab=1.2,cex.main=1.2)
dummy<-sapply(names(dat),function(label) {
y<-dat[[label]]
offsets <- list(
'defaults'=offsetX(y), # Default
'adjust=2'=offsetX(y, adjust=2), # More smoothing
'adjust=.1'=offsetX(y, adjust=0.1), # Tighter fit
'width=.1'=offsetX(y, width=0.1), # Less wide
'nbins=100'=offsetX(y, nbins=100) # Less bins
)
ids <- rep(1:length(offsets), each=length(y))
plot(unlist(offsets) + ids, rep(y, length(offsets)), ylab='y value',
xlab='', xaxt='n', pch=21,
col='#00000099',bg='#00000033',las=1,main=label)
axis(1, 1:length(offsets), names(offsets))
})
###################################################
### code chunk number 11: varwidth (eval = FALSE)
###################################################
## dat <- list(
## '10 points'=rnorm(10),
## '50 points'=rnorm(50,2),
## '200 points'=c(rnorm(400), rnorm(100,5)),
## '5000 points'= rnorm(5000,1)
## )
## labs<-rep(names(dat),sapply(dat,length))
## labs<-factor(labs,levels=unique(labs))
## vpPlot( labs,unlist(dat),offsetXArgs=list(varwidth=TRUE),
## las=1,ylab='Value',col='#00000066',bg='#00000022',pch=21)
###################################################
### code chunk number 12: showVarwidth
###################################################
dat <- list(
'10 points'=rnorm(10),
'50 points'=rnorm(50,2),
'200 points'=c(rnorm(400), rnorm(100,5)),
'5000 points'= rnorm(5000,1)
)
labs<-rep(names(dat),sapply(dat,length))
labs<-factor(labs,levels=unique(labs))
vpPlot( labs,unlist(dat),offsetXArgs=list(varwidth=TRUE),
las=1,ylab='Value',col='#00000066',bg='#00000022',pch=21)
###################################################
### code chunk number 13: vpBeaver (eval = FALSE)
###################################################
## y<-c(beaver1$temp,beaver2$temp)
## x<-rep(
## c('Beaver 1','Beaver 2'),
## c(nrow(beaver1),nrow(beaver2))
## )
## vpPlot(x,y,las=1, ylab='Body temperature',
## pch=21, col='#00000099',bg='#00000033')
###################################################
### code chunk number 14: showBeaver
###################################################
y<-c(beaver1$temp,beaver2$temp)
x<-rep(
c('Beaver 1','Beaver 2'),
c(nrow(beaver1),nrow(beaver2))
)
vpPlot(x,y,las=1, ylab='Body temperature',
pch=21, col='#00000099',bg='#00000033')
###################################################
### code chunk number 15: vpGene (eval = FALSE)
###################################################
## ints<-integrations[integrations$nearestGene>0,]
## y<-log(ints$nearestGene)
## x<-as.factor(paste(ints$study,ints$latent))
## activeCols<-c('Expressed'='#FF000033','Unexpressed'='#0000FF33')
## cols<-activeCols[ints$latent]
## par(mar=c(4,7,.1,.1))
## vpPlot(x,y,las=2, ylab='Log distance to gene',xaxt='n',
## pch=21, col=cols,bg=cols,cex=.7)
## uniqX<-levels(x)
## prettyX<-tapply(1:length(uniqX),sub('(Une|E)xpressed$','',uniqX),mean)
## axis(1,prettyX,names(prettyX),las=2)
## legend(grconvertX(0.01,from='ndc'),grconvertY(0.15,from='ndc'),
## names(activeCols),pch=21,col=cols,pt.bg=activeCols,xpd=NA)
###################################################
### code chunk number 16: showGene
###################################################
ints<-integrations[integrations$nearestGene>0,]
y<-log(ints$nearestGene)
x<-as.factor(paste(ints$study,ints$latent))
activeCols<-c('Expressed'='#FF000033','Unexpressed'='#0000FF33')
cols<-activeCols[ints$latent]
par(mar=c(4,7,.1,.1))
vpPlot(x,y,las=2, ylab='Log distance to gene',xaxt='n',
pch=21, col=cols,bg=cols,cex=.7)
uniqX<-levels(x)
prettyX<-tapply(1:length(uniqX),sub('(Une|E)xpressed$','',uniqX),mean)
axis(1,prettyX,names(prettyX),las=2)
legend(grconvertX(0.01,from='ndc'),grconvertY(0.15,from='ndc'),
names(activeCols),pch=21,col=cols,pt.bg=activeCols,xpd=NA)
###################################################
### code chunk number 17: ggPlot (eval = FALSE)
###################################################
## library(ggbeeswarm)
## n<-100
## dat<-rnorm(n*2)
## labs<-rep(c('a','b'),n)
## ggplot(mapping=aes(labs,dat))+geom_quasirandom()
###################################################
### code chunk number 18: showGg
###################################################
library(ggbeeswarm)
n<-100
dat<-rnorm(n*2)
labs<-rep(c('a','b'),n)
ggplot(mapping=aes(labs,dat))+geom_quasirandom()
|
/scratch/gouwar.j/cran-all/cranData/vipor/inst/doc/usageExamples.R
|
#' Calculate the K-Nearest Neighbor model domain applicability score
#'
#' This function fits a K-Nearest Neighbor (KNN) model to the provided data
#' and computes a domain applicability score based on PCA distances.
#'
#' @param featured The name of the response variable to predict.
#' @param train_data The training dataset containing predictor variables and the response variable.
#' @param knn_hyperparameters A list of hyperparameters for the KNN model, including:
#' - \code{neighbors}: The number of neighbors to consider.
#' - \code{weight_func}: The weight function to use.
#' - \code{dist_power}: The distance power parameter.
#' @param test_data The test dataset for making predictions.
#' @param threshold_value The threshold value used for computing domain scores.
#'
#' @return A data frame containing the computed domain scores for each observation in the test dataset.
#'
#' @export
#'
#' @import kknn
#'
#' @examples
#' set.seed(123)
#' library(dplyr)
#' featured <- "cd_2022"
#' # Adding jitter to original features
#' train_data = viral |>
#' transmute(cd_2022 = jitter(cd_2022), vl_2022 = jitter(vl_2022))
#' test_data = sero |>
#' transmute(cd_2022 = jitter(cd_2022), vl_2022 = jitter(vl_2022))
#' knn_hyperparameters <- list(neighbors = 5, weight_func = "optimal", dist_power = 0.3304783)
#' threshold_value <- 0.99
#' # Call the function
#' knn_domain_score(featured, train_data, knn_hyperparameters, test_data, threshold_value)
knn_domain_score <- function(featured, train_data, knn_hyperparameters, test_data, threshold_value) {
workflows::workflow() |>
workflows::add_recipe(
recipes::recipe(
stats::as.formula(
paste(featured, "~ .")
),
data = train_data
)
) |>
workflows::add_model(
parsnip::nearest_neighbor(
neighbors = knn_hyperparameters$neighbors,
weight_func = knn_hyperparameters$weight_func,
dist_power = knn_hyperparameters$dist_power
) |>
parsnip::set_engine("kknn") |>
parsnip::set_mode("regression")
) |>
parsnip::fit(data = train_data) |>
stats::predict(test_data) |>
dplyr::bind_cols(
applicable::apd_pca(
~ .,
data = train_data,
threshold = threshold_value) |>
applicable::score(test_data) |>
dplyr::select(
starts_with("distance")
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/viraldomain/R/knn_domain_score.R
|
#' Calculate the MARS model domain applicability score
#'
#' This function fits a MARS (Multivariate Adaptive Regression Splines) model to
#' the provided data and computes a domain applicability score based on PCA distances.
#'
#' @param featured_col The name of the featured column.
#' @param train_data A data frame containing the training data.
#' @param mars_hyperparameters A list of hyperparameters for the MARS model, including:
#' - \code{num_terms}: The number of terms to include in the MARS model.
#' - \code{prod_degree}: The degree of interaction terms to include.
#' - \code{prune_method}: The method used for pruning the MARS model.
#' @param test_data A data frame containing the test data.
#' @param threshold_value The threshold value for the domain score.
#'
#' @return A tibble with the domain applicability scores.
#' @export
#'
#' @examples
#' library(viraldomain)
#' library(dplyr)
#'
#' # Set the seed for reproducibility
#' set.seed(1234)
#'
#' # Create a tibble with the training data
#' data(viral)
#'
#' # Number of imputations needed
#' num_imputations <- sum(viral$vl_2022 <= 40) # Count values below 40 cpm
#'
#' # Impute unique values
#' imputed_values <- unique(rexp(num_imputations, rate = 1/13))
#'
#' # Create a new tibble with mutated/imputed viral load
#' imputed_viral <- viral |>
#' mutate(imputed_vl_2022 = ifelse(vl_2022 <= 40, imputed_values, vl_2022),
#' log10_imputed_vl_2022 = log10(ifelse(vl_2022 <= 40, imputed_values, vl_2022)),
#' jittered_log10_imputed_vl_2022 = jitter(log10_imputed_vl_2022))
#'
#' # Create a new tibble with mutated/imputed cd4 counts
#' imputed_viral <- imputed_viral |>
#' mutate(
#' jittered_cd_2022 = ifelse(
#' duplicated(cd_2022),
#' cd_2022 + sample(1:100, length(cd_2022), replace = TRUE),
#' cd_2022
#' )
#' )
#'
#' # New data frame with mutated/imputed columns
#' imp_viral <- imputed_viral |>
#' select(jittered_cd_2022, jittered_log10_imputed_vl_2022) |>
#' scale() |>
#' as.data.frame()
#'
#' # Set the seed for reproducibility
#' set.seed(1234)
#'
#' # Create a tibble with the testing data
#' data(sero)
#'
#' # Number of imputations needed
#' num_imputations <- sum(sero$vl_2022 <= 40) # Count values below 40 cpm
#'
#' # Impute unique values
#' imputed_values <- unique(rexp(num_imputations, rate = 1/13))
#'
#' # Create a new tibble with mutated/imputed viral load
#' imputed_sero <- sero |>
#' mutate(imputed_vl_2022 = ifelse(vl_2022 <= 40, imputed_values, vl_2022),
#' log10_imputed_vl_2022 = log10(ifelse(vl_2022 <= 40, imputed_values, vl_2022)),
#' jittered_log10_imputed_vl_2022 = jitter(log10_imputed_vl_2022))
#'
#' # Create a new tibble with mutated/imputed cd
#' imputed_sero <- imputed_sero |>
#' mutate(
#' jittered_cd_2022 = ifelse(
#' duplicated(cd_2022),
#' cd_2022 + sample(1:100, length(cd_2022), replace = TRUE),
#' cd_2022
#' )
#' )
#'
#' # New data frame with mutated/imputed columns
#' imp_sero <- imputed_sero |>
#' select(jittered_cd_2022, jittered_log10_imputed_vl_2022) |>
#' scale() |>
#' as.data.frame()
#'
#' # Specify your function parameters
#' featured_col <- "jittered_cd_2022"
#' train_data <- imp_viral
#' mars_hyperparameters <- list(num_terms = 3, prod_degree = 1, prune_method = "none")
#' test_data <- imp_sero
#' threshold_value <- 0.99
#'
#' # Call the function
#' mars_domain_score(featured_col, train_data, mars_hyperparameters, test_data, threshold_value)
mars_domain_score <- function(featured_col, train_data, mars_hyperparameters, test_data, threshold_value) {
workflows::workflow() |>
workflows::add_recipe(recipes::recipe(stats::as.formula(paste(featured_col, "~ .")), data = train_data)) |>
workflows::add_model(parsnip::mars(num_terms = mars_hyperparameters$num_terms,
prod_degree = mars_hyperparameters$prod_degree,
prune_method = mars_hyperparameters$prune_method) |>
parsnip::set_engine("earth") |>
parsnip::set_mode("regression")) |>
parsnip::fit(data = train_data) |>
stats::predict(test_data) |>
dplyr::bind_cols(
applicable::apd_pca(~ ., data = train_data, threshold = threshold_value) |>
applicable::score(test_data) |> dplyr::select(starts_with("distance"))
)
}
|
/scratch/gouwar.j/cran-all/cranData/viraldomain/R/mars_domain_score.R
|
#' Calculate the Neural Network model domain applicability score
#'
#' This function fits a Neural Network model to the provided data and computes a
#' domain applicability score based on PCA distances.
#'
#' @import applicable
#' @import dplyr
#' @import nnet
#' @import parsnip
#' @import recipes
#' @import workflows
#' @importFrom stats as.formula
#' @importFrom stats predict
#'
#' @param featured_col The name of the featured column in the training data.
#' @param train_data The training data used to fit the Neural Network model.
#' @param nn_hyperparameters A list of Neural Network hyperparameters, including hidden_units, penalty, and epochs.
#' @param test_data The testing domain data used to calculate the domain applicability score.
#' @param threshold_value The threshold value for domain applicability scoring.
#'
#' @return A tibble with the domain applicability scores.
#' @export
#'
#' @examples
#' library(viraldomain)
#' library(dplyr)
#'
#' # Set the seed for reproducibility
#' set.seed(1234)
#'
#' # Create a tibble with the training data
#' data(viral)
#'
#' # Number of imputations needed
#' num_imputations <- sum(viral$vl_2022 <= 40) # Count values below 40 cpm
#'
#' # Impute unique values
#' imputed_values <- unique(rexp(num_imputations, rate = 1/13))
#'
#' # Create a new tibble with mutated/imputed viral load
#' imputed_viral <- viral |>
#' mutate(imputed_vl_2022 = ifelse(vl_2022 <= 40, imputed_values, vl_2022),
#' log10_imputed_vl_2022 = log10(ifelse(vl_2022 <= 40, imputed_values, vl_2022)),
#' jittered_log10_imputed_vl_2022 = jitter(log10_imputed_vl_2022))
#'
#' # Create a new tibble with mutated/imputed cd4 counts
#' imputed_viral <- imputed_viral |>
#' mutate(
#' jittered_cd_2022 = ifelse(
#' duplicated(cd_2022),
#' cd_2022 + sample(1:100, length(cd_2022), replace = TRUE),
#' cd_2022
#' )
#' )
#'
#' # New data frame with mutated/imputed columns
#' imp_viral <- imputed_viral |>
#' select(jittered_cd_2022, jittered_log10_imputed_vl_2022) |>
#' scale() |>
#' as.data.frame()
#'
#' # Set the seed for reproducibility
#' set.seed(1234)
#'
#' # Create a tibble with the testing data
#' data(sero)
#'
#' # Number of imputations needed
#' num_imputations <- sum(sero$vl_2022 <= 40) # Count values below 40 cpm
#'
#' # Impute unique values
#' imputed_values <- unique(rexp(num_imputations, rate = 1/13))
#'
#' # Create a new tibble with mutated/imputed viral load
#' imputed_sero <- sero |>
#' mutate(imputed_vl_2022 = ifelse(vl_2022 <= 40, imputed_values, vl_2022),
#' log10_imputed_vl_2022 = log10(ifelse(vl_2022 <= 40, imputed_values, vl_2022)),
#' jittered_log10_imputed_vl_2022 = jitter(log10_imputed_vl_2022))
#'
#' # Create a new tibble with mutated/imputed cd
#' imputed_sero <- imputed_sero |>
#' mutate(
#' jittered_cd_2022 = ifelse(
#' duplicated(cd_2022),
#' cd_2022 + sample(1:100, length(cd_2022), replace = TRUE),
#' cd_2022
#' )
#' )
#'
#' # New data frame with mutated/imputed columns
#' imp_sero <- imputed_sero |>
#' select(jittered_cd_2022, jittered_log10_imputed_vl_2022) |>
#' scale() |>
#' as.data.frame()
#'
#' # Specify your function parameters
#' featured_col <- "jittered_cd_2022"
#' train_data <- imp_viral
#' nn_hyperparameters <- list(hidden_units = 1, penalty = 0.3746312, epochs = 480)
#' test_data <- imp_sero
#' threshold_value <- 0.99
#'
#' # Call the function
#' nn_domain_score(featured_col, train_data, nn_hyperparameters, test_data, threshold_value)
nn_domain_score <- function(featured_col, train_data, nn_hyperparameters, test_data, threshold_value) {
workflows::workflow() |>
workflows::add_recipe(recipes::recipe(stats::as.formula(paste(featured_col, "~ .")), data = train_data)) |>
workflows::add_model(parsnip::mlp(hidden_units = nn_hyperparameters$hidden_units,
penalty = nn_hyperparameters$penalty,
epochs = nn_hyperparameters$epochs) |>
parsnip::set_engine("nnet") |>
parsnip::set_mode("regression")) |>
parsnip::fit(data = train_data) |>
stats::predict(test_data) |>
dplyr::bind_cols(
applicable::apd_pca(~ ., data = train_data, threshold = threshold_value) |>
applicable::score(test_data) |> dplyr::select(starts_with("distance"))
)
}
|
/scratch/gouwar.j/cran-all/cranData/viraldomain/R/nn_domain_score.R
|
#' Create a Normalized Domain Plot
#'
#' This function generates a domain plot for a normalized model based on PCA
#' distances of the provided data.
#'
#' @import nnet
#'
#' @param features A list containing the following elements:
#' - \code{featured_col}: The name of the featured column.
#' - \code{features_vl}: A character vector of feature names related to viral load.
#' - \code{features_cd}: A character vector of feature names related to cluster of differentiation.
#' @param train_data A data frame containing the training data.
#' @param test_data A data frame containing the test data.
#' @param treshold_value The threshold value for the domain plot.
#' @param impute_hyperparameters A list of hyperparameters for imputation, including:
#' - \code{indetect}: The undetectable viral load level.
#' - \code{tasa_exp}: The exponential distribution rate of undetectable values.
#' - \code{semi}: The seed for random number generation (for reproducibility).
#'
#' @return A domain plot visualizing the distances of imputed values.
#' @export
#'
#' @examples
#' data(viral)
#' data(sero)
#' # Adding "jitter_" prefix to original variable
#' features <- list(
#' featured_col = "jittered_cd_2022",
#' features_vl = "vl_2022",
#' features_cd = "cd_2022"
#' )
#' train_data = viral |>
#' dplyr::select("cd_2022", "vl_2022")
#' test_data = sero
#' treshold_value = 0.99
#' impute_hyperparameters = list(indetect = 40, tasa_exp = 1/13, semi = 123)
#' normalized_domain_plot(features, train_data, test_data, treshold_value, impute_hyperparameters)
normalized_domain_plot <- function(features, train_data, test_data, treshold_value, impute_hyperparameters) {
set.seed(impute_hyperparameters$semi)
applicable::apd_pca(
x = recipes::recipe(
stats::as.formula(paste(features$featured_col, "~.")),
data = train_data |>
dplyr::transmute(
dplyr::across(
dplyr::all_of(features$features_vl),
~ {
imputed_values <- ifelse(. <= impute_hyperparameters$indetect,
train_data |>
dplyr::filter(. <= impute_hyperparameters$indetect) |>
dplyr::count() |>
dplyr::pull(n) |>
stats::rexp(rate = impute_hyperparameters$tasa_exp),
.)
jittered_values <- jitter(log10(imputed_values))
pmax(jittered_values, 0.01) # Ensure values are at least 0.01
},
.names = "jittered_log10_imputed_{.col}"),
dplyr::across(
dplyr::all_of(features$features_cd),
~ jitter(.),
.names = "jittered_{.col}"
)
) |>
scale() |>
dplyr::as_tibble()) |>
recipes::step_normalize(recipes::all_numeric()),
data = test_data |>
dplyr::transmute(
dplyr::across(
dplyr::all_of(features$features_vl),
~ {
imputed_values <- ifelse(. <= impute_hyperparameters$indetect,
test_data |>
dplyr::filter(. <= impute_hyperparameters$indetect) |>
dplyr::count() |>
dplyr::pull(n) |>
stats::rexp(rate = impute_hyperparameters$tasa_exp),
.)
jittered_values <- jitter(log10(imputed_values))
pmax(jittered_values, 0.01) # Ensure values are at least 0.01
},
.names = "jittered_log10_imputed_{.col}"
),
dplyr::across(
dplyr::all_of(features$features_cd),
~ jitter(.),
.names = "jittered_{.col}"
)
) |>
scale() |>
dplyr::as_tibble(),
treshold_value
) |>
applicable::autoplot.apd_pca() + ggplot2::labs(x = "normalized domain")
}
|
/scratch/gouwar.j/cran-all/cranData/viraldomain/R/normalized_domain_plot.R
|
#' Seropositive Data for Applicability Domain Testing
#'
#' This dataset is designed for testing the applicability domain of methods
#' related to HIV research. It provides a tibble with 53 rows and 2 columns
#' containing numeric measurements of CD4 lymphocyte counts (cd_2022) and viral
#' load (vl_2022) for seropositive individuals in 2022. These measurements are
#' vital indicators of HIV disease status. This dataset is ideal for evaluating
#' the performance and suitability of various HIV-predictive models and as an
#' aid in developing diagnostic tools within a seropositive context.
#'
#' @docType data
#'
#' @usage data(sero)
#'
#' @format A tibble (data frame) with 53 rows and 2 columns.
#'
#' @keywords datasets
#'
#' @note
#' To explore more rows of this dataset, you can use the `print(n = ...)` function.
#'
#' @author
#'Juan Pablo Acuña González <[email protected]>
#'
#' @examples
#' data(sero)
#' sero
"sero"
|
/scratch/gouwar.j/cran-all/cranData/viraldomain/R/sero-data.R
|
#' Create a Simple Domain Plot
#'
#' This function generates a domain plot for a simple model based on PCA
#' distances of the provided data.
#'
#' @import applicable
#' @import dplyr
#' @import earth
#' @import ggplot2
#' @import recipes
#' @import vdiffr
#' @importFrom stats as.formula
#' @importFrom stats rexp
#'
#' @param features A list of features according to their modeling roles. It should contain the following elements:
#' - 'featured_col': Name of the featured column in the training data. When specifying the featured column, use "jitter_*" as a prefix to the featured variable of interest.
#' - 'features_vl': Names of the columns containing viral load data (numeric values).
#' - 'features_cd': Names of the columns containing CD4 data (numeric values).
#' @param train_data The training data used to fit the MARS model.
#' @param test_data The testing domain data used to calculate PCA distances.
#' @param treshold_value The threshold for domain applicability scoring.
#' @param impute_hyperparameters A list of parameters for imputation including 'indetect' (undetectable viral load level), 'tasa_exp' (exponential distribution rate of undetectable values), and 'semi' (set a seed for reproducibility).
#'
#' @return A domain plot showing PCA distances.
#' @export
#'
#' @examples
#' data(viral)
#' data(sero)
#' # Adding "jitter_" prefix to original variable
#' features <- list(
#' featured_col = "jittered_cd_2022",
#' features_vl = "vl_2022",
#' features_cd = "cd_2022"
#' )
#' train_data = viral |>
#' dplyr::select("cd_2022", "vl_2022")
#' test_data = sero
#' treshold_value = 0.99
#' impute_hyperparameters = list(indetect = 40, tasa_exp = 1/13, semi = 123)
#' simple_domain_plot(features, train_data, test_data, treshold_value, impute_hyperparameters)
simple_domain_plot <- function(features, train_data, test_data, treshold_value, impute_hyperparameters) {
set.seed(impute_hyperparameters$semi)
applicable::apd_pca(
x = recipes::recipe(
stats::as.formula(paste(features$featured_col, "~.")),
data = train_data |>
dplyr::transmute(
dplyr::across(
dplyr::all_of(features$features_vl),
~ {
imputed_values <- ifelse(. <= impute_hyperparameters$indetect,
train_data |>
dplyr::filter(. <= impute_hyperparameters$indetect) |>
dplyr::count() |>
dplyr::pull(n) |>
stats::rexp(rate = impute_hyperparameters$tasa_exp),
.)
jittered_values <- jitter(log10(imputed_values))
pmax(jittered_values, 0.01) # Ensure values are at least 0.01
},
.names = "jittered_log10_imputed_{.col}"),
dplyr::across(
dplyr::all_of(features$features_cd),
~ jitter(.),
.names = "jittered_{.col}"
)
) |>
scale() |>
dplyr::as_tibble()),
data = test_data |>
dplyr::transmute(
dplyr::across(
dplyr::all_of(features$features_vl),
~ {
imputed_values <- ifelse(. <= impute_hyperparameters$indetect,
test_data |>
dplyr::filter(. <= impute_hyperparameters$indetect) |>
dplyr::count() |>
dplyr::pull(n) |>
stats::rexp(rate = impute_hyperparameters$tasa_exp),
.)
jittered_values <- jitter(log10(imputed_values))
pmax(jittered_values, 0.01) # Ensure values are at least 0.01
},
.names = "jittered_log10_imputed_{.col}"
),
dplyr::across(
dplyr::all_of(features$features_cd),
~ jitter(.),
.names = "jittered_{.col}"
)
) |>
scale() |>
dplyr::as_tibble(),
treshold_value
) |>
applicable::autoplot.apd_pca() + ggplot2::labs(x = "simple domain")
}
|
/scratch/gouwar.j/cran-all/cranData/viraldomain/R/simple_domain_plot.R
|
#' Training Data for Applicability Domain Analysis
#'
#' This dataset contains training data for viral load models applicability
#' domain analysis. It includes CD4 and viral load measurements for different years.
#'
#' @docType data
#'
#' @usage data(train)
#'
#' @format A tibble (data frame) with 26 rows and 6 columns.
#'
#' @keywords datasets
#'
#' @note
#' To explore more rows of this dataset, you can use the `print(n = ...)` function.
#'
#' @author
#' Juan Pablo Acuña González <[email protected]>
#'
#' @examples
#' data(train)
#' train
"train"
|
/scratch/gouwar.j/cran-all/cranData/viraldomain/R/train-data.R
|
#' Predictive Modeling Data for Viral Load and CD4 Lymphocyte Counts
#'
#' This dataset serves as input for predictive modeling tasks related to HIV
#' research. It contains numeric measurements of CD4 lymphocyte counts (cd) and
#' viral load (vl) at three different time points: 2019, 2021, and 2022. These
#' measurements are crucial indicators of HIV disease progression.
#'
#' @docType data
#'
#' @usage data(viral)
#'
#' @format A tibble (data frame) with 35 rows and 6 columns.
#'
#' @keywords datasets
#'
#' @note
#' To explore more rows of this dataset, you can use the `print(n = ...)` function.
#'
#' @author
#' Juan Pablo Acuña González <[email protected]>
#'
#' @examples
#' data(viral)
#' viral
"viral"
|
/scratch/gouwar.j/cran-all/cranData/viraldomain/R/viral-data.R
|
#' Select best model
#'
#' viralmodel returns metrics for a selected model
#'
#' @import earth
#' @import nnet
#' @importFrom stats as.formula
#'
#' @param x A data frame
#' @param semilla A numeric value
#' @param target A character value
#' @param pliegues A numeric value
#' @param repeticiones A numeric value
#' @param rejilla A numeric value
#' @param modelo A character value
#'
#' @return A table with a single model hyperparameters
#' @export
#'
#' @examples
#' cd_2019 <- c(824, 169, 342, 423, 441, 507, 559,
#' 173, 764, 780, 244, 527, 417, 800,
#' 602, 494, 345, 780, 780, 527, 556,
#' 559, 238, 288, 244, 353, 169, 556,
#' 824, 169, 342, 423, 441, 507, 559)
#' vl_2019 <- c(40, 11388, 38961, 40, 75, 4095, 103,
#' 11388, 46, 103, 11388, 40, 0, 11388,
#' 0, 4095, 40, 93, 49, 49, 49,
#' 4095, 6837, 38961, 38961, 0, 0, 93,
#' 40, 11388, 38961, 40, 75, 4095, 103)
#' cd_2021 <- c(992, 275, 331, 454, 479, 553, 496,
#' 230, 605, 432, 170, 670, 238, 238,
#' 634, 422, 429, 513, 327, 465, 479,
#' 661, 382, 364, 109, 398, 209, 1960,
#' 992, 275, 331, 454, 479, 553, 496)
#' vl_2021 <- c(80, 1690, 5113, 71, 289, 3063, 0,
#' 262, 0, 15089, 13016, 1513, 60, 60,
#' 49248, 159308, 56, 0, 516675, 49, 237,
#' 84, 292, 414, 26176, 62, 126, 93,
#' 80, 1690, 5113, 71, 289, 3063, 0)
#' cd_2022 <- c(700, 127, 127, 547, 547, 547, 777,
#' 149, 628, 614, 253, 918, 326, 326,
#' 574, 361, 253, 726, 659, 596, 427,
#' 447, 326, 253, 248, 326, 260, 918,
#' 700, 127, 127, 547, 547, 547, 777)
#' vl_2022 <- c(0, 0, 53250, 0, 40, 1901, 0,
#' 955, 0, 0, 0, 0, 40, 0,
#' 49248, 159308, 56, 0, 516675, 49, 237,
#' 0, 23601, 0, 40, 0, 0, 0,
#' 0, 0, 0, 0, 0, 0, 0)
#' x <- cbind(cd_2019, vl_2019, cd_2021, vl_2021, cd_2022, vl_2022) |> as.data.frame()
#' semilla <- 123
#' target <- "cd_2022"
#' pliegues <- 2
#' repeticiones <- 1
#' rejilla <- 1
#' modelo <- "simple_MARS"
#' viralmodel(x, semilla, target, pliegues, repeticiones, rejilla, modelo)
viralmodel<- function(x, semilla, target, pliegues, repeticiones, rejilla, modelo) {
set.seed(semilla)
workflowsets::workflow_set(
preproc = list( simple = workflows::workflow_variables(outcomes = tidyselect::all_of(target), predictors = tidyselect::everything()),
normalized = recipes::recipe(stats::as.formula(paste(target,"~ .")), data = x) |>
recipes::step_normalize(recipes::all_predictors()),
full_quad = recipes::recipe(stats::as.formula(paste(target,"~ .")), data = x) |>
recipes::step_normalize(recipes::all_predictors()) |>
recipes::step_poly(recipes::all_predictors()) |>
recipes::step_interact(~ all_predictors():all_predictors())),
models = list(MARS = parsnip::mars(prod_degree = parsnip::tune(), num_terms = parsnip::tune(), prune_method = parsnip::tune()) |>
parsnip::set_engine("earth") |>
parsnip::set_mode("regression"),
neural_network = parsnip::mlp(hidden_units = parsnip::tune(), penalty = parsnip::tune(), epochs = parsnip::tune()) |>
parsnip::set_engine("nnet", MaxNWts = 2600) |>
parsnip::set_mode("regression"),
KNN = parsnip::nearest_neighbor(neighbors = parsnip::tune(), dist_power = parsnip::tune(), weight_func = parsnip::tune()) |>
parsnip::set_engine("kknn") |>
parsnip::set_mode("regression")),
) |>
workflowsets::workflow_map(
seed = semilla,
resamples = rsample::initial_split(x) |>
rsample::training() |>
rsample::vfold_cv(v = pliegues, repeats = repeticiones),
grid = rejilla,
control = tune::control_grid(
save_pred = TRUE,
parallel_over = "everything",
save_workflow = TRUE
)
) |>
workflowsets::extract_workflow_set_result(modelo) |>
tune::select_best(metric = "rmse") |>
as.data.frame()
}
|
/scratch/gouwar.j/cran-all/cranData/viralmodels/R/viralmodel.R
|
#' Competing models table
#'
#' viraltab trains and optimizes a series of regression models for viral load
#' or cd4 counts
#'
#' @import dplyr
#' @import earth
#' @import kknn
#' @import nnet
#' @import parsnip
#' @import recipes
#' @import rsample
#' @import tidyselect
#' @import tune
#' @import workflows
#' @import workflowsets
#' @importFrom stats as.formula
#'
#' @param x A data frame
#' @param semilla A numeric value
#' @param target A character value
#' @param pliegues A numeric value
#' @param repeticiones A numeric value
#' @param rejilla A numeric value
#'
#' @return A table of competing models
#' @export
#'
#' @examples
#' cd_2019 <- c(824, 169, 342, 423, 441, 507, 559,
#' 173, 764, 780, 244, 527, 417, 800,
#' 602, 494, 345, 780, 780, 527, 556,
#' 559, 238, 288, 244, 353, 169, 556,
#' 824, 169, 342, 423, 441, 507, 559)
#' vl_2019 <- c(40, 11388, 38961, 40, 75, 4095, 103,
#' 11388, 46, 103, 11388, 40, 0, 11388,
#' 0, 4095, 40, 93, 49, 49, 49,
#' 4095, 6837, 38961, 38961, 0, 0, 93,
#' 40, 11388, 38961, 40, 75, 4095, 103)
#' cd_2021 <- c(992, 275, 331, 454, 479, 553, 496,
#' 230, 605, 432, 170, 670, 238, 238,
#' 634, 422, 429, 513, 327, 465, 479,
#' 661, 382, 364, 109, 398, 209, 1960,
#' 992, 275, 331, 454, 479, 553, 496)
#' vl_2021 <- c(80, 1690, 5113, 71, 289, 3063, 0,
#' 262, 0, 15089, 13016, 1513, 60, 60,
#' 49248, 159308, 56, 0, 516675, 49, 237,
#' 84, 292, 414, 26176, 62, 126, 93,
#' 80, 1690, 5113, 71, 289, 3063, 0)
#' cd_2022 <- c(700, 127, 127, 547, 547, 547, 777,
#' 149, 628, 614, 253, 918, 326, 326,
#' 574, 361, 253, 726, 659, 596, 427,
#' 447, 326, 253, 248, 326, 260, 918,
#' 700, 127, 127, 547, 547, 547, 777)
#' vl_2022 <- c(0, 0, 53250, 0, 40, 1901, 0,
#' 955, 0, 0, 0, 0, 40, 0,
#' 49248, 159308, 56, 0, 516675, 49, 237,
#' 0, 23601, 0, 40, 0, 0, 0,
#' 0, 0, 0, 0, 0, 0, 0)
#' x <- cbind(cd_2019, vl_2019, cd_2021, vl_2021, cd_2022, vl_2022) |> as.data.frame()
#' semilla <- 123
#' target <- "cd_2022"
#' pliegues <- 2
#' repeticiones <- 1
#' rejilla <- 1
#' viraltab(x, semilla, target, pliegues, repeticiones, rejilla)
viraltab <- function(x, semilla, target, pliegues, repeticiones, rejilla) {
set.seed(semilla)
workflowsets::workflow_set(
preproc = list( simple = workflows::workflow_variables(outcomes = tidyselect::all_of(target), predictors = tidyselect::everything()),
normalized = recipes::recipe(stats::as.formula(paste(target,"~ .")), data = x) |>
recipes::step_normalize(recipes::all_predictors()),
full_quad = recipes::recipe(stats::as.formula(paste(target,"~ .")), data = x) |>
recipes::step_normalize(recipes::all_predictors()) |>
recipes::step_poly(recipes::all_predictors()) |>
recipes::step_interact(~ all_predictors():all_predictors())),
models = list(MARS = parsnip::mars(prod_degree = parsnip::tune(), num_terms = parsnip::tune(), prune_method = parsnip::tune()) |>
parsnip::set_engine("earth") |>
parsnip::set_mode("regression"),
neural_network = parsnip::mlp(hidden_units = parsnip::tune(), penalty = parsnip::tune(), epochs = parsnip::tune()) |>
parsnip::set_engine("nnet", MaxNWts = 2600) |>
parsnip::set_mode("regression"),
KNN = parsnip::nearest_neighbor(neighbors = parsnip::tune(), dist_power = parsnip::tune(), weight_func = parsnip::tune()) |>
parsnip::set_engine("kknn") |>
parsnip::set_mode("regression"))
) |>
workflowsets::workflow_map(
seed = semilla,
resamples = rsample::initial_split(x) |>
rsample::training() |>
rsample::vfold_cv(v = pliegues, repeats = repeticiones),
grid = rejilla,
control = tune::control_grid(
save_pred = TRUE,
parallel_over = "everything",
save_workflow = TRUE
)
) |>
workflowsets::rank_results() |>
as.data.frame() |>
dplyr::mutate_if(is.numeric, round, digits = 2)
}
|
/scratch/gouwar.j/cran-all/cranData/viralmodels/R/viraltab.R
|
#' Competing models plot
#'
#' viralvis plots the rankings of a series of regression models for viral load
#' or cd4 counts
#'
#' @import earth
#' @import nnet
#' @import parsnip
#' @import recipes
#' @import rsample
#' @import tidyselect
#' @import tune
#' @import vdiffr
#' @import workflows
#' @import workflowsets
#' @importFrom stats as.formula
#'
#' @param x A data frame
#' @param semilla A numeric value
#' @param target A character value
#' @param pliegues A numeric value
#' @param repeticiones A numeric value
#' @param rejilla A numeric value
#'
#' @return A plot of ranking models
#' @export
#'
#' @examples
#' cd_2019 <- c(824, 169, 342, 423, 441, 507, 559,
#' 173, 764, 780, 244, 527, 417, 800,
#' 602, 494, 345, 780, 780, 527, 556,
#' 559, 238, 288, 244, 353, 169, 556,
#' 824, 169, 342, 423, 441, 507, 559)
#' vl_2019 <- c(40, 11388, 38961, 40, 75, 4095, 103,
#' 11388, 46, 103, 11388, 40, 0, 11388,
#' 0, 4095, 40, 93, 49, 49, 49,
#' 4095, 6837, 38961, 38961, 0, 0, 93,
#' 40, 11388, 38961, 40, 75, 4095, 103)
#' cd_2021 <- c(992, 275, 331, 454, 479, 553, 496,
#' 230, 605, 432, 170, 670, 238, 238,
#' 634, 422, 429, 513, 327, 465, 479,
#' 661, 382, 364, 109, 398, 209, 1960,
#' 992, 275, 331, 454, 479, 553, 496)
#' vl_2021 <- c(80, 1690, 5113, 71, 289, 3063, 0,
#' 262, 0, 15089, 13016, 1513, 60, 60,
#' 49248, 159308, 56, 0, 516675, 49, 237,
#' 84, 292, 414, 26176, 62, 126, 93,
#' 80, 1690, 5113, 71, 289, 3063, 0)
#' cd_2022 <- c(700, 127, 127, 547, 547, 547, 777,
#' 149, 628, 614, 253, 918, 326, 326,
#' 574, 361, 253, 726, 659, 596, 427,
#' 447, 326, 253, 248, 326, 260, 918,
#' 700, 127, 127, 547, 547, 547, 777)
#' vl_2022 <- c(0, 0, 53250, 0, 40, 1901, 0,
#' 955, 0, 0, 0, 0, 40, 0,
#' 49248, 159308, 56, 0, 516675, 49, 237,
#' 0, 23601, 0, 40, 0, 0, 0,
#' 0, 0, 0, 0, 0, 0, 0)
#' x <- cbind(cd_2019, vl_2019, cd_2021, vl_2021, cd_2022, vl_2022) |> as.data.frame()
#' semilla <- 123
#' target <- "cd_2022"
#' pliegues <- 2
#' repeticiones <- 1
#' rejilla <- 1
#' viralvis(x, semilla, target, pliegues, repeticiones, rejilla)
viralvis <- function(x, semilla, target, pliegues, repeticiones, rejilla) {
set.seed(semilla)
workflowsets::workflow_set(
preproc = list(simple = workflows::workflow_variables(outcomes = tidyselect::all_of(target), predictors = tidyselect::everything()),
normalized = recipes::recipe(stats::as.formula(paste(target,"~ .")), data = x) |>
recipes::step_normalize(recipes::all_predictors()),
full_quad = recipes::recipe(stats::as.formula(paste(target,"~ .")), data = x) |>
recipes::step_normalize(recipes::all_predictors()) |>
recipes::step_poly(recipes::all_predictors()) |>
recipes::step_interact(~ all_predictors():all_predictors())),
models = list(MARS = parsnip::mars(prod_degree = parsnip::tune(), num_terms = parsnip::tune(), prune_method = parsnip::tune()) |>
parsnip::set_engine("earth") |>
parsnip::set_mode("regression"),
neural_network = parsnip::mlp(hidden_units = parsnip::tune(), penalty = parsnip::tune(), epochs = parsnip::tune()) |>
parsnip::set_engine("nnet", MaxNWts = 2600) |>
parsnip::set_mode("regression"),
KNN = parsnip::nearest_neighbor(neighbors = parsnip::tune(), dist_power = parsnip::tune(), weight_func = parsnip::tune()) |>
parsnip::set_engine("kknn") |>
parsnip::set_mode("regression"))
) |>
workflowsets::workflow_map(
seed = semilla,
resamples = rsample::initial_split(x) |>
rsample::training() |>
rsample::vfold_cv(v = pliegues, repeats = repeticiones),
grid = rejilla,
control = tune::control_grid(
save_pred = TRUE,
parallel_over = "everything",
save_workflow = TRUE
)
) |>
tune::autoplot(
rank_metric = "rmse", # <- how to order models
metric = "rmse", # <- which metric to visualize
select_best = TRUE # <- one point per workflow
)
}
|
/scratch/gouwar.j/cran-all/cranData/viralmodels/R/viralvis.R
|
#' Global Visualization of SHAP Values for Cubist Rules Model
#'
#' This function generates a visualization for the global feature importance of
#' a Cubist Rules (CR) model trained on HIV data with specified
#' hyperparameters.
#'
#' @param vip_featured The name of the response variable to explain.
#' @param hiv_data The training dataset containing predictor variables and the response variable.
#' @param cr_hyperparameters A list of hyperparameters for the CR model, including:
#' - \code{committees}: The number of committees to consider.
#' - \code{neighbors}: The number of neighbors to consider.
#' @param vip_train The dataset used for training the CR model.
#' @param v_train The response variable used for training the CR model.
#'
#' @return A visualization of global feature importance for the CR model.
#'
#' @export
#'
#' @examples
#' library(dplyr)
#' library(rules)
#' library(Cubist)
#' set.seed(123)
#' hiv_data <- train2
#' cr_hyperparameters <- list(neighbors = 5, committees = 58)
#' vip_featured <- c("cd_2022")
#' vip_features <- c("cd_2019", "vl_2019", "cd_2021", "vl_2021", "vl_2022")
#' vip_train <- train2 |>
#' dplyr::select(rsample::all_of(vip_features))
#' v_train <- train2 |>
#' dplyr::select(rsample::all_of(vip_featured))
#' glob_cr_vis(vip_featured, hiv_data, cr_hyperparameters, vip_train, v_train)
glob_cr_vis <- function(vip_featured, hiv_data, cr_hyperparameters, vip_train, v_train) {
DALEXtra::explain_tidymodels(workflows::workflow() |>
workflows::add_recipe(recipes::recipe(stats::as.formula(paste(vip_featured,"~.")), data = hiv_data)) |>
workflows::add_model(parsnip::cubist_rules(
committees = cr_hyperparameters$committees,
neighbors = cr_hyperparameters$neighbors) |>
parsnip::set_engine("Cubist")) |>
parsnip::fit(data = hiv_data),
data = vip_train,
y = v_train,
label = "cubist",
verbose = FALSE) |>
DALEX::model_parts() |>
plot()
}
|
/scratch/gouwar.j/cran-all/cranData/viralx/R/glob_cub_vis.R
|
#' Global Visualization of SHAP Values for K-Nearest Neighbor Model
#'
#' This function generates a visualization for the global feature importance of
#' a K-Nearest Neighbors (KNN) model trained on HIV data with specified
#' hyperparameters.
#'
#' @param vip_featured The name of the response variable to explain.
#' @param hiv_data The training dataset containing predictor variables and the response variable.
#' @param knn_hyperparameters A list of hyperparameters for the KNN model, including:
#' - \code{neighbors}: The number of neighbors to consider.
#' - \code{weight_func}: The weight function to use.
#' - \code{dist_power}: The distance power parameter.
#' @param vip_train The dataset used for training the KNN model.
#' @param v_train The response variable used for training the KNN model.
#'
#' @return A visualization of global feature importance for the KNN model.
#'
#' @export
#'
#' @examples
#' library(dplyr)
#' set.seed(123)
#' hiv_data <- train2
#' knn_hyperparameters <- list(neighbors = 5, weight_func = "optimal", dist_power = 0.3304783)
#' vip_featured <- c("cd_2022")
#' vip_features <- c("cd_2019", "vl_2019", "cd_2021", "vl_2021", "vl_2022")
#' vip_train <- train2 |>
#' dplyr::select(rsample::all_of(vip_features))
#' v_train <- train2 |>
#' dplyr::select(rsample::all_of(vip_featured))
#' glob_knn_vis(vip_featured, hiv_data, knn_hyperparameters, vip_train, v_train)
glob_knn_vis <- function(vip_featured, hiv_data, knn_hyperparameters, vip_train, v_train) {
DALEXtra::explain_tidymodels(workflows::workflow() |>
workflows::add_recipe(recipes::recipe(stats::as.formula(paste(vip_featured,"~.")), data = hiv_data) |>
recipes::step_normalize(recipes::all_predictors())) |>
workflows::add_model(parsnip::nearest_neighbor(
neighbors = knn_hyperparameters$neighbors,
weight_func = knn_hyperparameters$weight_func,
dist_power = knn_hyperparameters$dist_power) |>
parsnip::set_engine("kknn") |>
parsnip::set_mode("regression")) |>
parsnip::fit(data = hiv_data),
data = vip_train,
y = v_train,
label = "knn + normalized",
verbose = FALSE) |>
DALEX::model_parts() |>
plot()
}
|
/scratch/gouwar.j/cran-all/cranData/viralx/R/glob_knn_vis.R
|
#' Global Visualization of SHAP Values for Neural Network Model
#'
#' The `glob_nn_vis` function generates a global visualization of SHAP (Shapley
#' Additive Explanations) values for a neural network model. It utilizes the
#' DALEXtra package to explain the model's predictions and then creates a global
#' SHAP visualization.
#'
#' @param vip_featured A character value specifying the featured variable of interest.
#' @param hiv_data A data frame containing the HIV research data used for model training.
#' @param hu A numeric value specifying the number of hidden units in the neural network model.
#' @param plty A numeric value specifying the penalty parameter for the neural network model.
#' @param epo A numeric value specifying the number of epochs (training iterations) for the neural network model.
#' @param vip_train A data frame containing the training data used to fit the neural network model.
#' @param v_train A numeric vector representing the response variable corresponding to the training data.
#'
#' @return A global visualization of SHAP values for the specified neural network model.
#'
#' @export
#'
#' @examples
#' library(dplyr)
#' library(rsample)
#' cd_2019 <- c(824, 169, 342, 423, 441, 507, 559,
#' 173, 764, 780, 244, 527, 417, 800,
#' 602, 494, 345, 780, 780, 527, 556,
#' 559, 238, 288, 244, 353, 169, 556,
#' 824, 169, 342, 423, 441, 507, 559)
#' vl_2019 <- c(40, 11388, 38961, 40, 75, 4095, 103,
#' 11388, 46, 103, 11388, 40, 0, 11388,
#' 0, 4095, 40, 93, 49, 49, 49,
#' 4095, 6837, 38961, 38961, 0, 0, 93,
#' 40, 11388, 38961, 40, 75, 4095, 103)
#' cd_2021 <- c(992, 275, 331, 454, 479, 553, 496,
#' 230, 605, 432, 170, 670, 238, 238,
#' 634, 422, 429, 513, 327, 465, 479,
#' 661, 382, 364, 109, 398, 209, 1960,
#' 992, 275, 331, 454, 479, 553, 496)
#' vl_2021 <- c(80, 1690, 5113, 71, 289, 3063, 0,
#' 262, 0, 15089, 13016, 1513, 60, 60,
#' 49248, 159308, 56, 0, 516675, 49, 237,
#' 84, 292, 414, 26176, 62, 126, 93,
#' 80, 1690, 5113, 71, 289, 3063, 0)
#' cd_2022 <- c(700, 127, 127, 547, 547, 547, 777,
#' 149, 628, 614, 253, 918, 326, 326,
#' 574, 361, 253, 726, 659, 596, 427,
#' 447, 326, 253, 248, 326, 260, 918,
#' 700, 127, 127, 547, 547, 547, 777)
#' vl_2022 <- c(0, 0, 53250, 0, 40, 1901, 0,
#' 955, 0, 0, 0, 0, 40, 0,
#' 49248, 159308, 56, 0, 516675, 49, 237,
#' 0, 23601, 0, 40, 0, 0, 0,
#' 0, 0, 0, 0, 0, 0, 0)
#' x <- cbind(cd_2019, vl_2019, cd_2021, vl_2021, cd_2022, vl_2022) |>
#' as.data.frame()
#' set.seed(123)
#' hi_data <- rsample::initial_split(x)
#' set.seed(123)
#' hiv_data <- hi_data |>
#' rsample::training()
#' hu <- 5
#' plty <- 1.131656e-09
#' epo <- 176
#' vip_featured <- c("cd_2022")
#' vip_features <- c("cd_2019", "vl_2019", "cd_2021", "vl_2021", "vl_2022")
#' set.seed(123)
#' vi_train <- rsample::initial_split(x)
#' set.seed(123)
#' vip_train <- vi_train |>
#' rsample::training() |>
#' dplyr::select(rsample::all_of(vip_features))
#' v_train <- vi_train |>
#' rsample::training() |>
#' dplyr::select(rsample::all_of(vip_featured))
#' glob_nn_vis(vip_featured, hiv_data, hu, plty, epo, vip_train, v_train)
glob_nn_vis <- function(vip_featured, hiv_data, hu, plty, epo, vip_train, v_train) {
DALEXtra::explain_tidymodels(workflows::workflow() |>
workflows::add_recipe(recipes::recipe(stats::as.formula(paste(vip_featured,"~.")), data = hiv_data) |>
recipes::step_normalize(recipes::all_predictors())) |>
workflows::add_model(parsnip::mlp(hidden_units = hu, penalty = plty, epochs = epo) |>
parsnip::set_engine("nnet", MaxNWts = 2600) |>
parsnip::set_mode("regression")) |> parsnip::fit(data = hiv_data),
data = vip_train,
y = v_train,
label = "nn + normalized",
verbose = FALSE) |>
DALEX::model_parts() |>
plot()
}
|
/scratch/gouwar.j/cran-all/cranData/viralx/R/glob_nn_vis.R
|
#' Training Data for Explainability of Models
#'
#' This dataset contains training data for viral load explainer models. It
#' includes CD4 and viral load measurements for different years.
#'
#' @docType data
#'
#' @usage data(train2)
#'
#' @format A tibble (data frame) with 25 rows and 6 columns.
#'
#' @keywords datasets
#'
#' @note
#' To explore more rows of this dataset, you can use the `print(n = ...)` function.
#'
#' @author
#' Juan Pablo Acuña González <[email protected]>
#'
#' @examples
#' data(train2)
#' train2
"train2"
|
/scratch/gouwar.j/cran-all/cranData/viralx/R/train2-data.R
|
#' Explain K-Nearest Neighbors Model
#'
#' Explains the predictions of a K-Nearest Neighbors (KNN) model for CD4 and
#' viral load data using the DALEX and DALEXtra packages. It provides insights
#' into the specified variable's impact on the KNN model's predictions.
#'
#' @import kknn
#'
#' @param vip_featured The name of the variable to be explained.
#' @param hiv_data The data frame containing the CD4 and viral load data.
#' @param knn_hyperparameters A list of hyperparameters to be tuned for the KNN model.
#' @param vip_train The training data used for creating the explainer object.
#' @param vip_new A new observation for which to generate explanations.
#'
#' @return A data frame containing explanations for the specified variable.
#'
#' @export
#'
#' @examples
#' hiv_data <- train2
#' knn_hyperparameters <- list(neighbors = 5, weight_func = "optimal", dist_power = 0.3304783)
#' vip_featured <- c("cd_2022")
#' vip_train <- hiv_data
#' vip_new <- vip_train[1,]
#' viralx_knn(vip_featured, hiv_data, knn_hyperparameters, vip_train, vip_new)
viralx_knn <- function(vip_featured, hiv_data, knn_hyperparameters, vip_train, vip_new) {
DALEXtra::explain_tidymodels(
workflows::workflow() |>
workflows::add_recipe(
recipes::recipe(
stats::as.formula(paste(vip_featured, "~.")), data = hiv_data) |>
recipes::step_normalize(recipes::all_predictors())) |>
workflows::add_model(
parsnip::nearest_neighbor(
neighbors = knn_hyperparameters$neighbors,
weight_func = knn_hyperparameters$weight_func,
dist_power = knn_hyperparameters$dist_power) |>
parsnip::set_engine("kknn") |>
parsnip::set_mode("regression")) |>
parsnip::fit(hiv_data),
data = vip_train,
y = vip_featured,
label = "knn + normalized",
verbose = FALSE) |>
DALEX::predict_parts(vip_new) |>
as.data.frame() |>
dplyr::select(1,2) |>
dplyr::mutate_if(is.numeric, round, digits = 2)
}
|
/scratch/gouwar.j/cran-all/cranData/viralx/R/viralx_knn.R
|
#' Global Explainers for K-Nearest Neighbor Models
#'
#' This function calculates global feature importance for a K-Nearest Neighbors
#' (KNN) model trained on HIV data with specified hyperparameters.
#'
#' @param vip_featured The name of the response variable to explain.
#' @param hiv_data The training dataset containing predictor variables and the response variable.
#' @param knn_hyperparameters A list of hyperparameters for the KNN model, including:
#' - \code{neighbors}: The number of neighbors to consider.
#' - \code{weight_func}: The weight function to use.
#' - \code{dist_power}: The distance power parameter.
#' @param vip_train The dataset used for training the KNN model.
#' @param v_train The response variable used for training the KNN model.
#'
#' @return A list of global feature importance measures for each predictor variable.
#'
#' @export
#'
#' @examples
#' library(dplyr)
#' set.seed(123)
#' hiv_data <- train2
#' knn_hyperparameters <- list(neighbors = 5, weight_func = "optimal", dist_power = 0.3304783)
#' vip_featured <- c("cd_2022")
#' vip_features <- c("cd_2019", "vl_2019", "cd_2021", "vl_2021", "vl_2022")
#' vip_train <- train2 |>
#' dplyr::select(rsample::all_of(vip_features))
#' v_train <- train2 |>
#' dplyr::select(rsample::all_of(vip_featured))
#' viralx_knn_glob(vip_featured, hiv_data, knn_hyperparameters, vip_train, v_train)
viralx_knn_glob <- function(vip_featured, hiv_data, knn_hyperparameters, vip_train, v_train) {
DALEXtra::explain_tidymodels(workflows::workflow() |>
workflows::add_recipe(recipes::recipe(stats::as.formula(paste(vip_featured,"~.")), data = hiv_data) |>
recipes::step_normalize(recipes::all_predictors())) |>
workflows::add_model(parsnip::nearest_neighbor(
neighbors = knn_hyperparameters$neighbors,
weight_func = knn_hyperparameters$weight_func,
dist_power = knn_hyperparameters$dist_power) |>
parsnip::set_engine("kknn") |>
parsnip::set_mode("regression")) |>
parsnip::fit(data = hiv_data),
data = vip_train,
y = v_train,
label = "knn + normalized",
verbose = FALSE) |>
DALEX::model_parts()
}
|
/scratch/gouwar.j/cran-all/cranData/viralx/R/viralx_knn_glob.R
|
#' Explain K Nearest Neighbor Model using SHAP values
#'
#' This function calculates SHAP (SHapley Additive exPlanations) values for a
#' K-Nearest Neighbors (KNN) model trained on HIV data with specified
#' hyperparameters.
#'
#' @param vip_featured The name of the response variable to explain.
#' @param hiv_data The training dataset containing predictor variables and the response variable.
#' @param knn_hyperparameters A list of hyperparameters for the KNN model, including:
#' - \code{neighbors}: The number of neighbors to consider.
#' - \code{weight_func}: The weight function to use.
#' - \code{dist_power}: The distance power parameter.
#' @param vip_train The dataset used for training the KNN model.
#' @param vip_new The dataset for which SHAP values are calculated.
#' @param orderings The number of orderings for SHAP value calculations.
#' @return A list of SHAP values for each observation in \code{vip_new}.
#'
#' @export
#'
#' @examples
#' \dontrun{
#' set.seed(123)
#' hiv_data <- train2
#' knn_hyperparameters <- list(neighbors = 5, weight_func = "optimal", dist_power = 0.3304783)
#' vip_featured <- c("cd_2022")
#' vip_train <- hiv_data
#' vip_new <- vip_train[1, ]
#' orderings <- 20
#' viralx_knn_shap(vip_featured, hiv_data, knn_hyperparameters, vip_train, vip_new, orderings)
#' }
viralx_knn_shap <- function(vip_featured, hiv_data, knn_hyperparameters, vip_train, vip_new, orderings) {
DALEXtra::explain_tidymodels(
workflows::workflow() |>
workflows::add_recipe(
recipes::recipe(stats::as.formula(paste(vip_featured,"~.")), data = hiv_data) |>
recipes::step_normalize(recipes::all_predictors())) |>
workflows::add_model(
parsnip::nearest_neighbor(
neighbors = knn_hyperparameters$neighbors,
weight_func = knn_hyperparameters$weight_func,
dist_power = knn_hyperparameters$dist_power) |>
parsnip::set_engine("kknn") |>
parsnip::set_mode("regression")) |>
parsnip::fit(hiv_data),
data = vip_train,
y = vip_featured,
label = "knn + normalized",
verbose = FALSE) |>
DALEX::predict_parts(vip_new, type ="shap", B = orderings)
}
|
/scratch/gouwar.j/cran-all/cranData/viralx/R/viralx_knn_shap.R
|
#' Visualize SHAP Values for K-Nearest Neighbor Model
#'
#' Visualizes SHAP (Shapley Additive Explanations) values for a KNN
#' (K-Nearest Neighbor) model by employing the DALEXtra and DALEX packages to
#' provide visual insights into the impact of a specified variable on the
#' model's predictions.
#'
#' @param vip_featured The name of the response variable to explain.
#' @param hiv_data The training dataset containing predictor variables and the response variable.
#' @param knn_hyperparameters A list of hyperparameters for the KNN model, including:
#' - \code{neighbors}: The number of neighbors to consider.
#' - \code{weight_func}: The weight function to use.
#' - \code{dist_power}: The distance power parameter.
#' @param vip_train The dataset used for training the KNN model.
#' @param vip_new The dataset for which SHAP values are calculated.
#' @param orderings The number of orderings for SHAP value calculations.
#' @return A list of SHAP values for each observation in \code{vip_new}.
#'
#' @export
#'
#' @examples
#' \dontrun{
#' set.seed(123)
#' hiv_data <- train2
#' knn_hyperparameters <- list(neighbors = 5, weight_func = "optimal", dist_power = 0.3304783)
#' vip_featured <- c("cd_2022")
#' vip_train <- hiv_data
#' vip_new <- vip_train[1,]
#' orderings <- 20
#' viralx_knn_vis(vip_featured, hiv_data, knn_hyperparameters, vip_train, vip_new, orderings)
#' }
viralx_knn_vis <- function(vip_featured, hiv_data, knn_hyperparameters, vip_train, vip_new, orderings) {
DALEXtra::explain_tidymodels(
workflows::workflow() |>
workflows::add_recipe(recipes::recipe(stats::as.formula(paste(vip_featured,"~.")), data = hiv_data) |>
recipes::step_normalize(recipes::all_predictors())) |>
workflows::add_model(
parsnip::nearest_neighbor(
neighbors = knn_hyperparameters$neighbors,
weight_func = knn_hyperparameters$weight_func,
dist_power = knn_hyperparameters$dist_power) |>
parsnip::set_engine("kknn") |>
parsnip::set_mode("regression"))|>
parsnip::fit(data = hiv_data),
data = vip_train,
y = vip_featured,
label = "nn + normalized",
verbose = FALSE) |>
DALEX::predict_parts(vip_new, type ="shap", B = orderings) |>
plot()
}
|
/scratch/gouwar.j/cran-all/cranData/viralx/R/viralx_knn_vis.R
|
#' Explain Multivariate Adaptive Regression Splines Model
#'
#' Explains the predictions of a Multivariate Adaptive Regression Splines (MARS)
#' model for viral load or CD4 counts using the DALEX and DALEXtra tools.
#'
#' @import earth
#' @import DALEX
#' @import DALEXtra
#' @import Formula
#' @import parsnip
#' @import plotmo
#' @import plotrix
#' @import recipes
#' @import rsample
#' @import TeachingDemos
#' @import workflows
#' @importFrom dplyr mutate_if
#' @importFrom dplyr select
#' @importFrom stats as.formula
#'
#' @param vip_featured A character value
#' @param hiv_data A data frame
#' @param nt A numeric value
#' @param pd A numeric value
#' @param pru A character value
#' @param vip_train A data frame
#' @param vip_new A numeric vector
#'
#' @return A data frame
#' @export
#'
#' @examples
#' library(dplyr)
#' library(rsample)
#' library(Formula)
#' library(plotmo)
#' library(plotrix)
#' library(TeachingDemos)
#' cd_2019 <- c(824, 169, 342, 423, 441, 507, 559,
#' 173, 764, 780, 244, 527, 417, 800,
#' 602, 494, 345, 780, 780, 527, 556,
#' 559, 238, 288, 244, 353, 169, 556,
#' 824, 169, 342, 423, 441, 507, 559)
#' vl_2019 <- c(40, 11388, 38961, 40, 75, 4095, 103,
#' 11388, 46, 103, 11388, 40, 0, 11388,
#' 0, 4095, 40, 93, 49, 49, 49,
#' 4095, 6837, 38961, 38961, 0, 0, 93,
#' 40, 11388, 38961, 40, 75, 4095, 103)
#' cd_2021 <- c(992, 275, 331, 454, 479, 553, 496,
#' 230, 605, 432, 170, 670, 238, 238,
#' 634, 422, 429, 513, 327, 465, 479,
#' 661, 382, 364, 109, 398, 209, 1960,
#' 992, 275, 331, 454, 479, 553, 496)
#' vl_2021 <- c(80, 1690, 5113, 71, 289, 3063, 0,
#' 262, 0, 15089, 13016, 1513, 60, 60,
#' 49248, 159308, 56, 0, 516675, 49, 237,
#' 84, 292, 414, 26176, 62, 126, 93,
#' 80, 1690, 5113, 71, 289, 3063, 0)
#' cd_2022 <- c(700, 127, 127, 547, 547, 547, 777,
#' 149, 628, 614, 253, 918, 326, 326,
#' 574, 361, 253, 726, 659, 596, 427,
#' 447, 326, 253, 248, 326, 260, 918,
#' 700, 127, 127, 547, 547, 547, 777)
#' vl_2022 <- c(0, 0, 53250, 0, 40, 1901, 0,
#' 955, 0, 0, 0, 0, 40, 0,
#' 49248, 159308, 56, 0, 516675, 49, 237,
#' 0, 23601, 0, 40, 0, 0, 0,
#' 0, 0, 0, 0, 0, 0, 0)
#' x <- cbind(cd_2019, vl_2019, cd_2021, vl_2021, cd_2022, vl_2022) |>
#' as.data.frame()
#' set.seed(123)
#' hi_data <- rsample::initial_split(x)
#' set.seed(123)
#' hiv_data <- hi_data |>
#' rsample::training()
#' nt <- 3
#' pd <- 1
#' pru <- "none"
#' vip_featured <- c("cd_2022")
#' vip_features <- c("cd_2019", "vl_2019", "cd_2021", "vl_2021", "vl_2022")
#' set.seed(123)
#' vi_train <- rsample::initial_split(x)
#' set.seed(123)
#' vip_train <- vi_train |>
#' rsample::training() |>
#' dplyr::select(rsample::all_of(vip_features))
#' vip_new <- vip_train[1,]
#' viralx_mars(vip_featured, hiv_data, nt, pd, pru, vip_train, vip_new)
viralx_mars <- function(vip_featured, hiv_data, nt, pd, pru, vip_train, vip_new) {
DALEXtra::explain_tidymodels(workflows::workflow() |>
workflows::add_recipe(recipes::recipe(stats::as.formula(paste(vip_featured,"~.")), data = hiv_data)) |>
workflows::add_model(parsnip::mars(num_terms = nt, prod_degree = pd, prune_method = pru) |>
parsnip::set_engine("earth") |>
parsnip::set_mode("regression")) |>
parsnip::fit(data = hiv_data), data = vip_train,
y = vip_featured,
label = "MARS",
verbose = FALSE) |>
DALEX::predict_parts(vip_new) |>
as.data.frame() |>
dplyr::select(1,2) |>
dplyr::mutate_if(is.numeric, round, digits = 2)
}
|
/scratch/gouwar.j/cran-all/cranData/viralx/R/viralx_mars.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.