content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
fitted_bart <- function(y, inputs, pars, verbose=FALSE, ...){
opts <- list(...)
model <- dbarts::bart(x.train=inputs[,pars,drop=FALSE], y.train=y, verbose=verbose, ...)
model$y <- y
model$rhat_mean <- check_bart_conv(model)
res <- as.numeric(fitted(model))
attr(res, "model") <- model
res
}
## Convergence check for mean of fitted values, ie for
## mubar = (average of mu|X over X in data)
## where mu is the expectation that BART estimates
## Assume this is sufficient to ensure EVPPI estimate has "converged"
check_bart_conv <- function(model){
sam <- dbarts::extract(model) # 1000 MCMC samples for BART fit x nsam fitted values to evaluate convergence of
sam.df <- data.frame(mean = rowMeans(sam))
summ <- summary(posterior::as_draws(sam.df))
summ$rhat
}
fitted_rep_bart <- function(model) {
as.matrix(dbarts::extract(model))
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/evppi_bart.R
|
fitted_earth <- function(y, inputs, pars, verbose=FALSE, ...){
opts <- list(...)
earth_formula <- opts$earth_formula
if (is.null(earth_formula)) {
args <- list(y=y, x=inputs[,pars,drop=FALSE])
} else {
earth_formula <- formula(sprintf("y ~ %s", earth_formula))
args <- list(formula=earth_formula, data = inputs[,pars,drop=FALSE])
}
if (!is.null(opts$se) && opts$se){
if (is.null(opts$nfold)) opts$nfold <- 10
if (is.null(opts$ncross)) opts$ncross <- 30
if (is.null(opts$varmod.method)) opts$varmod.method <- "lm"
}
opts$se <- NULL
model <- do.call(earth::earth, c(args, opts))
model$y <- y
res <- as.numeric(model$fitted)
attr(res, "model") <- model
res
}
check_plot_earth <- function(mod){
oldpar <- graphics::par(no.readonly=TRUE)
on.exit(par(oldpar))
graphics::par(mfrow=c(2,2))
plot(mod)
}
check_stats_earth <- function(mod){
list(gcv = mod$gcv)
}
fitted_rep_earth <- function(model, B) {
nobs <- length(model$fitted)
fitted_rep <- matrix(nrow=B, ncol=nobs)
se <- sqrt(as.numeric(model$varmod$model.var))
for (i in 1:B){
fitted_rep[i,] <- model$fitted + rnorm(nobs, mean=0, sd=se)
}
fitted_rep
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/evppi_earth.R
|
fitted_gam <- function(y, inputs, pars, verbose=FALSE, ...){
opts <- list(...)
gam_formula <- opts$gam_formula
pars <- clean_pars(pars)
colnames(inputs) <- clean_pars(colnames(inputs))
if (is.null(gam_formula))
gam_formula <- default_gam_formula(pars)
gam_formula <- formula(sprintf("y ~ %s", gam_formula))
model <- mgcv::gam(gam_formula, data = inputs)
res <- model$fitted
attr(res, "model") <- model
res
}
default_gam_formula <- function(pars){
karg <- if (length(pars) >=4) ", k=4" else ""
sprintf("te(%s, bs='cr'%s)", paste(pars, collapse=", "), karg)
}
fitted_rep_gam <- function(model, B) {
beta_rep <- mvtnorm::rmvnorm(B, coef(model), vcov(model))
fitted_rep <- beta_rep %*% t(predict(model,type="lpmatrix"))
fitted_rep
}
check_plot_gam <- function(mod){
oldpar <- graphics::par(no.readonly=TRUE)
on.exit(par(oldpar))
graphics::par(mfrow=c(2,2))
mgcv::gam.check(mod)
}
check_stats_gam <- function(mod){
list(AIC = stats::AIC(mod))
}
#' Generate a string with all interactions of a certain degree, to be used in a GAM formula
#'
#' @param x Character vector of variable names
#'
#' @param degree Maximum interaction degree
#'
#' @return A string looking like the right hand side of a GAM formula with tensor product interactions.
#'
#' For example, if `x` is `c("x1","x2","x3")`, then `all_interactions(x, degree=2)` should return
#'
#' `"te(x1,x2) + te(x1,x3) + te(x1,x3)"`
#'
#' @examples
#' x <- c("x1","x2","x3")
#' all_interactions(x, 2)
#'
#' @export
#'
all_interactions <- function(x, degree=2){
c_mat <- utils::combn(x, degree)
c_comma <- apply(c_mat, 2, function(y) paste(y, collapse = ","))
c_tevec <- paste0("te(",c_comma,")")
form_str <- paste(c_tevec, collapse = " + ")
form_str
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/evppi_gam.R
|
## Gaussian process method for estimating EVPPI
## Code from SAVI
## [Aren't there also packaged GP regression methods ? kernlab? gaupro, GPfit? are they more efficient?]
fitted_gp <- function(y, inputs, pars, verbose=FALSE, ...){
args <- list(...)
model <- gp(y=y, X=inputs[,pars], m=args$gp_hyper_n, maxSample=args$maxSample, verbose=verbose)
res <- model$fitted
attr(res, "model") <- model
res
}
fitted_rep_gp <- function(model, B, ...){
mvtnorm::rmvnorm(B, model$fitted, model$V)
}
## Code below adapted from SAVI, copyright (c) 2014, 2015 the SAVI authors
## https://github.com/Sheffield-Accelerated-VoI/SAVI-package/blob/master/inst/SAVI/scripts_GPfunctions.R
## * Changes to make it more clean/modular.
## * Check for constant/collinearity temporarily moved out
## * Added gp_hyper_n option to select the number of iterations used for estimating the hyperparameters. BCEA [code from older SAVI version?] uses all by default, while SAVI uses a small number for efficiency. SAVI value taken as the default here.
dinvgamma <- function(x, alpha, beta) {
(beta ^ alpha) / gamma(alpha) * x ^ (-alpha - 1) * exp(-beta / x)
}
makeA.Gaussian.old <- function(X, phi) { # function to make A matrix with the Gaussian correlation function
n <- NROW(X)
if(length(phi) > 1) {
b <- diag(phi ^ (-2))
} else {
b <- phi ^ (-2) }
R <- X %*% as.matrix(b) %*% t(X)
S <- matrix(diag(R), nrow = n, ncol = n)
exp(2 * R - S - t(S))
}
##' Form a squared exponential correlation matrix between two sets of predictors for a Gaussian process regression
##'
##' @param X First set of predictors. Matrix with number of columns given by the number of
##' predictors in the model, and number of rows given by the number of alternative values for these.
##'
##' @param Xstar Second set of predictors, in the same format.
##'
##' @param phi correlation parameter. Vector with length given by the number of predictors.
##'
##' @return A matrix with \code{nrow(X)} and \code{nrow(Xstar)} columns, with \eqn{r},\eqn{s} entry
##' given by the correlation between the
##' \eqn{r}th element of \code{X} and the \eqn{s}th element of \code{Xstar}.
##'
##' @noRd
makeA.Gaussian <- function(X, Xstar, phi){
if (!is.matrix(X)) stop("`X` should be a matrix")
if (!is.matrix(Xstar)) stop("`Xstar` should be a matrix")
n <- nrow(X)
m <- nrow(Xstar)
if (!(n>0)) stop("X should have 1 or more rows")
if (!(m>0)) stop("Xstar should have 1 or more rows")
X_rep <- X[rep(1:n, m), , drop=FALSE]
Xstar_rep <- Xstar[rep(1:m, each=n), , drop=FALSE]
scale_rep <- matrix(phi, nrow=n*m, ncol=length(phi), byrow=TRUE)
dists <- rowSums(((X_rep - Xstar_rep) / scale_rep)^2)
matrix(exp(-dists), nrow=n, ncol=m)
}
# function to calculate posterior density
post.density <- function(hyperparams, NB, input.m) {
input.m <- as.matrix(input.m, drop = FALSE)
N <- nrow(input.m)
p <- NCOL(input.m)
H <- cbind(1, input.m)
q <- ncol(H)
a.sigma <- 0.001; b.sigma <- 0.001 ## hyperparameters for IG prior for sigma^2
a.nu <- 0.001; b.nu <- 1 ## hyperparameters for IG prior for nu
delta <- exp(hyperparams)[1:p]
nu <- exp(hyperparams)[p + 1]
A <- makeA.Gaussian(input.m, input.m, delta)
Astar <- A + nu * diag(N)
T <- chol(Astar)
y <- backsolve(t(T), NB, upper.tri = FALSE)
x <- backsolve(t(T), H, upper.tri = FALSE)
tHAstarinvH <- t(x) %*% (x) + 1e-7* diag(q)
betahat <- solve(tHAstarinvH) %*% t(x) %*% y
residSS <- y %*% y -t(y) %*% x %*% betahat - t(betahat) %*% t(x) %*% y +
t(betahat) %*% tHAstarinvH %*% betahat
prior <- prod(dnorm(log(delta), 0, sqrt(1e5))) * dinvgamma(nu, a.nu, b.nu)
l <- -sum(log(diag(T))) - 1 / 2 * log(det(tHAstarinvH)) -
(N - q + 2 * a.sigma) / 2 * log(residSS / 2 + b.sigma) + log(prior)
return(l)
}
estimate.hyperparameters <- function(NB, inputs, verbose=FALSE) {
p <- NCOL(inputs)
hyperparameters <- NA
initial.values <- rep(0, p + 1)
repeat {
if (verbose)
print(paste("calling optim function for net benefit"))
log.hyperparameters <- optim(initial.values, fn=post.density,
NB=NB, input.m=inputs,
method="Nelder-Mead",
control=list(fnscale=-1, maxit=10000, trace=0))$par
if (sum(abs(initial.values - log.hyperparameters)) < 0.05) {
hyperparameters <- exp(log.hyperparameters)
break
}
initial.values <- log.hyperparameters
}
return(hyperparameters)
}
##' Fit a Gaussian process regression
##'
##' Fit a Gaussian process regression. This is a simple
##' implementation, in pure R, that is designed to be sufficient for
##' doing VoI calculations, but not polished and tuned for any other
##' purpose.
##'
##' @param y Vector of outcome data
##'
##' @param X Matrix of inputs
##'
##' @param Xpred Matrix of inputs at which predictions are wanted
##'
##' @param hyper Hyperparameter values. If set to \code{"est"} (the default), these are estimated.
##'
##' @param m number of samples to use to estimate the hyperparameters. By default, this is the minimum
##' of the following three quantities: 30 times the number of predictors in \code{X},
##' interest, 250, and the length of \code{y}.
##'
##' @param maxSample Maximum sample size to employ. If datasets larger than this are supplied, they are
##' truncated to this size.
##'
##' @param verbose Progress messages (not thoroughly implemented).
##'
##' @return A list with the following components
##'
##' \code{fitted} The fitted values at the input data points
##'
##' \code{pred} The fitted values at \code{Xpred}
##' \code{V} The covariance matrix of the fitted values
##'
##' \code{residuals} The residuals
##'
##' \code{hyper} The hyperparameters (delta and nu) used in the fit. Lower values of delta give
##' less smooth regression fits, and nu is an independent measurement error variance (or "nugget").
##'
##' The variance of the predicted points is not implemented.
##'
##' @noRd
gp <- function(y, X, Xpred=NULL, hyper="est", m=NULL, maxSample=5000, verbose=FALSE) {
maxSample <- min(maxSample, length(y)) # to avoid trying to invert huge matrix
X <- as.matrix(X)[1:maxSample, , drop=FALSE]
y <- y[1:maxSample]
standardiseX <- function(X){
colmin <- apply(X, 2, min)
colmax <- apply(X, 2, max)
colrange <- colmax - colmin
X <- sweep(X, 2, colmin, "-")
X <- sweep(X, 2, colrange, "/")
X
}
X <- standardiseX(X)
p <- ncol(X)
if (is.null(Xpred))
Xpred <- X
else {
Xpred <- as.matrix(Xpred)
Xpred <- standardiseX(Xpred)
}
H <- cbind(1, X)
q <- ncol(H)
if(is.null(m)){
m <- min(30 * p, 250)
m <- min(length(y), m)
}
setForHyperparamEst <- 1:m # sample(1:N, m, replace=FALSE)
if (identical(hyper,"est"))
hyper <- estimate.hyperparameters(y[setForHyperparamEst],
X[setForHyperparamEst, ], verbose=verbose)
else {
if (!is.numeric(hyper) || (length(hyper) !=2))
stop("`hyper` should be a numeric vector of length two, or \"est\"")
}
delta.hat <- hyper[1:p]
nu.hat <- hyper[p+1]
A <- makeA.Gaussian(X, X, delta.hat)
Apred <- makeA.Gaussian(Xpred, X, delta.hat)
N <- nrow(X)
Astar <- A + nu.hat * diag(N)
Astarinv <- chol2inv(chol(Astar))
rm(Astar); gc()
AstarinvY <- Astarinv %*% y
tHAstarinv <- t(H) %*% Astarinv
tHAHinv <- solve(tHAstarinv %*% H + 1e-7* diag(q))
betahat <- tHAHinv %*% (tHAstarinv %*% y)
Hbetahat <- H %*% betahat
resid <- y - Hbetahat
g.hat <- Hbetahat + A %*% (Astarinv %*% resid)
Hpred <- cbind(1, Xpred)
Hbetahatpred <- Hpred %*% betahat
pred <- Hbetahatpred + Apred %*% (Astarinv %*% resid)
AAstarinvH <- A %*% t(tHAstarinv)
## get the variance V of the fitted values
sigmasqhat <- as.numeric(t(resid) %*% Astarinv %*% resid)/(N - q - 2)
V <- sigmasqhat*(nu.hat * diag(N) -
nu.hat ^ 2 * Astarinv +
(H - AAstarinvH) %*% (tHAHinv %*% t(H - AAstarinvH)))
rm(A, Astarinv, AstarinvY, tHAstarinv, tHAHinv, Hbetahat, resid);gc()
fitted <- unlist(g.hat)
pred <- unlist(pred)
list(y=y, fitted=fitted, V=V, pred=pred, residuals = y - fitted, hyper=hyper, beta=betahat, sigmasq=sigmasqhat)
}
gp.check <- function(mod){
## qqplot?
## histogram of residuals
graphics::hist(mod$residuals, main="Histogram of residuals", xlab="Residuals")
## residuals vs fitted values
plot(mod$fitted, mod$residuals, xlab="Fitted values", ylab="Residuals")
## response vs fitted values
plot(mod$fitted, mod$y, xlab="Fitted values", ylab="Response")
}
check_plot_gp <- function(mod){
oldpar <- graphics::par(no.readonly=TRUE)
on.exit(par(oldpar))
graphics::par(mfrow=c(2,2))
gp.check(mod)
}
check_stats_gp <- function(mod){
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/evppi_gp.R
|
## Code taken from BCEA package
## Baio, G., Berardi, A., & Heath, A. (2017). Bayesian cost-effectiveness analysis with the R package BCEA. New York: Springer.
## https://github.com/giabaio/BCEA
check_packages <- function(){
if (!isTRUE(requireNamespace("INLA", quietly = TRUE))) {
stop("You need to install the packages 'INLA' and 'splancs'. Please run in your R terminal:\n install.packages('INLA', repos='https://inla.r-inla-download.org/R/stable')\n and\n install.packages('splancs')")
}
}
fitted_inla <- function(y, inputs, pars,
verbose = TRUE,
cutoff = 0.3,
convex.inner = -0.4,
convex.outer = -0.7,
max.edge = 0.7,
plot_inla_mesh = FALSE,
h.value = 5e-05,
robust = FALSE,
int.ord = 1,
pfc_struc="AIC",
...){
check_packages()
family <- if (robust) "T" else "gaussian"
if (!is.element("INLA", (.packages()))) {
attachNamespace("INLA")
}
if (length(pars)<2){
stop("The INLA method can only be used with 2 or more parameters")
}
if (verbose) {
message("Finding projections")
}
projections <- make.proj(parameter = pars, inputs = inputs, x = y, pfc_struc = pfc_struc)
data <- projections$data
if (verbose) {
message("Determining Mesh")
}
mesh <- make.mesh(data = data, convex.inner = convex.inner,
convex.outer = convex.outer, cutoff = cutoff,max.edge=max.edge)
plot_mesh(mesh = mesh$mesh, data = data, plot = plot_inla_mesh)
if (verbose) {
message("Calculating fitted values for the GP regression using INLA/SPDE")
}
fit <- fit.inla(parameter = pars, inputs = inputs,
x = y, mesh = mesh$mesh, data.scale = data, int.ord = int.ord,
convex.inner = convex.inner, convex.outer = convex.outer,
cutoff = cutoff, max.edge = max.edge, h.value = h.value, family=family)
res <- fit$fitted
attr(res, "model") <- data.frame(y=y, fitted=res, residuals=y-res)
res
}
###INLA Fitting
make.proj <- function(parameter, inputs, x, pfc_struc="AIC") {
tic <- proc.time()
scale<-8/(range(x)[2]-range(x)[1])
scale.x <- scale*x -mean(scale*x)
bx <- bf(scale.x,case="poly",2)
fit1 <- pfc(scale(inputs[,parameter]),scale.x,bx,structure="iso")
fit2 <- pfc(scale(inputs[,parameter]),scale.x,bx,structure="aniso")
fit3 <- pfc(scale(inputs[,parameter]),scale.x,bx,structure="unstr")
if (pfc_struc=="AIC"){
aics <- c(fit1$aic,fit2$aic,fit3$aic)
minaic <- which.min(aics)
struc <- c("iso","aniso","unstr")[minaic]
}
else struc <- pfc_struc
AIC.deg<-array()
for(i in 2:7){
bx <- bf(scale.x,case="poly",i)
fit <- pfc(scale(inputs[,parameter]),scale.x,bx,structure=struc)
AIC.deg[i]<-fit$aic}
deg<-which(AIC.deg==min(AIC.deg,na.rm=T))
d<-min(dim(inputs[,parameter])[2],deg)
by <- bf(scale.x,case="poly",deg)
comp.d <- ldr(scale(inputs[,parameter]),scale.x,bx,structure=struc,model="pfc",numdir=d,numdir.test=T)
dim.d<-which(comp.d$aic==min(comp.d$aic))-1
comp <- ldr(scale(inputs[,parameter]),scale.x,bx,structure=struc,model="pfc",numdir=2)
toc <- proc.time() - tic
time <- toc[3]
if(dim.d>2){
warning(paste("The dimension of the sufficient reduction is",dim.d,".
Dimensions greater than 2 imply that the EVPPI approximation using INLA may be inaccurate.
Full residual checking using diag.evppi is required."))}
names(time) = "Time to fit find projections (seconds)"
list(data = comp$R, time = time,dim=dim.d)
}
plot_mesh <- function(mesh, data, plot) {
if (plot) {
plot(mesh)
points(data, col = "blue", pch = 19, cex = 0.8)
}
}
make.mesh <- function(data, convex.inner, convex.outer,
cutoff,max.edge) {
tic <- proc.time()
inner <- suppressMessages({
INLA::inla.nonconvex.hull(data, convex = convex.inner)
})
outer <- INLA::inla.nonconvex.hull(data, convex = convex.outer)
mesh <- INLA::inla.mesh.2d(
loc=data, boundary=list(inner,outer),
max.edge=c(max.edge,max.edge),cutoff=c(cutoff))
toc <- proc.time() - tic
time <- toc[3]
names(time) = "Time to fit determine the mesh (seconds)"
list(mesh = mesh, pts = data, time = time)
}
fit.inla <- function(parameter, inputs, x, mesh,
data.scale, int.ord, convex.inner, convex.outer,
cutoff, max.edge,h.value,family) {
tic <- proc.time()
inputs <- inputs[,parameter,drop=FALSE]
inputs.scale <- scale(inputs, apply(inputs, 2, mean), apply(inputs, 2, sd))
scale<-8/(range(x)[2]-range(x)[1])
scale.x <- scale*x -mean(scale*x)
A <- INLA::inla.spde.make.A(mesh = mesh, loc = data.scale, silent = 2L)
spde <- INLA::inla.spde2.matern(mesh = mesh, alpha = 2)
stk.real <- INLA::inla.stack(tag = "est", data = list(y=scale.x), A = list(A, 1),
effects = list(s = 1:spde$n.spde,
data.frame(b0 = 1, x = cbind(data.scale, inputs.scale))))
data <- INLA::inla.stack.data(stk.real)
ctr.pred <- INLA::inla.stack.A(stk.real)
inp <- paste("x", parameter, sep=".") # CJ
# inp <- names(stk.real$effects$data)[parameter + 4] # BCEA
form <- paste(inp, "+", sep = "", collapse = "")
formula <- paste("y~0+(", form, "+0)+b0+f(s,model=spde)",
sep = "", collapse = "")
if (int.ord[1] > 1) {
formula <- paste("y~0+(", form, "+0)^", int.ord[1],
"+b0+f(s,model=spde)", sep = "", collapse = "")
}
Result <- suppressMessages({
INLA::inla(as.formula(formula), data = data,
family = family, control.predictor = list(A = ctr.pred,link = 1),
control.inla = list(h = h.value),
control.compute = list(config = T))
})
fitted <- (Result$summary.linear.predictor[1:length(x),"mean"]+mean(scale*x))/scale
fit <- Result
toc <- proc.time() - tic
time <- toc[3]
names(time) = "Time to fit INLA/SPDE (seconds)"
list(fitted = fitted, model = fit, time = time, formula = formula,
mesh = list(mesh = mesh, pts = data.scale))
}
check_plot_inla <- function(mod){
oldpar <- graphics::par(no.readonly=TRUE)
on.exit(par(oldpar))
graphics::par(mfrow=c(2,2))
gp.check(mod)
}
check_stats_inla <- function(mod){
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/evppi_inla.R
|
##' Traditional two-level Monte Carlo estimator of EVPPI.
##'
##' Traditional two-level Monte Carlo estimator of the expected value of partial
##' perfect information from a decision-analytic model. Only useful in the
##' simplest of examples. For realistically complex examples, the methods
##' implemented in the \code{\link{evppi}} function, based on regression,
##' will usually be much more computationally efficient.
##'
##' See the \href{https://chjackson.github.io/voi/articles/voi.html#evppimc}{package overview / Get Started vignette} for an example of using this function.
##'
##'
##' @param pars A character vector giving the parameters of interest, for which
##' the EVPPI is required. This should correspond to an explicit argument to
##' \code{model_fn}.
##'
##' The parameters of interest are assumed to have uncertainty distributions
##' that are independent of those of the other parameters.
##'
##' @param model_fn A function to evaluate a decision-analytic model at a given
##' set of parameters. This should have one argument per parameter, and return either:
##'
##' (net benefit format) a vector giving the net benefit for each decision
##' option, or
##'
##' (cost-effectiveness analysis format) a matrix or data frame with two rows,
##' and one column for each decision option. If the rows have names
##' \code{"e"} and \code{"c"} then these are assumed to be the effects and
##' costs respectively.
##'
##' Otherwise, the first row is assumed to be the effects, and the second the
##' costs.
##'
##' @param par_fn A function to generate a random sample of values for the
##' parameters of \code{model_fn}. This should return a matrix or a data frame
##' with named columns matching the arguments of \code{model_fn}.
##'
##' If any required arguments to \code{model_fn} are not supplied in this
##' return value, then \code{evppi_mc} looks for them in the list supplied as
##' the \code{mfargs} argument.
##'
##' If any required arguments are not found in the results of \code{par_fn} or
##' \code{mfargs}, and if \code{model_fn} defines default values for those
##' arguments, then those default values are used.
##'
##' The first argument of \code{par_fn} should be an integer \code{n} denoting
##' the number of random values to draw for each parameter. The object
##' returned by \code{par_fn} should then have \code{n} rows, and one column
##' for each parameter. If one value is drawn, then \code{par_fn} is also
##' allowed to return a vector, but this should still be named.
##'
##' The parameters may be correlated. If we wish to compute the EVPPI for a
##' parameter which is correlated with a different parameter q, then `par_fn`
##' must have an argument with the name of that parameter. If that argument
##' is set to a fixed value, then `par_fn` should return a sample drawn
##' conditionally on that value. If that argument is not supplied, then
##' `par_fn` must return a sample drawn from the marginal distribution. See
##' the vignette for an example.
##'
##' @param nouter Number of outer samples
##'
##' @param ninner Number of inner samples
##'
##' @param k Vector of willingness-to-pay values. Only used if
##' \code{model_fn} is in cost-effectiveness analyis format.
##'
##' @param mfargs Named list of additional arguments to supply to
##' \code{model_fn}.
##'
##' @param verbose Set to \code{TRUE} to print some additional messages to
##' help with debugging.
##'
##' @return A data frame with a column \code{pars}, indicating the parameter(s),
##' and a column \code{evppi}, giving the corresponding EVPPI.
##'
##' If \code{outputs} is of "cost-effectiveness analysis" form, so that there is
##' one EVPPI per willingness-to-pay value, then a column \code{k} identifies the
##' willingness-to-pay.
##'
##' @export
evppi_mc <- function(model_fn, par_fn, pars, nouter, ninner,
k=NULL, mfargs=NULL, verbose=FALSE){
model_fn <- check_model_fn(model_fn, par_fn, mfargs, verbose=verbose)
nopt <- attr(model_fn, "nopt")
check_parfnn(par_fn, model_fn)
check_int(nouter, "nouter")
check_int(ninner, "ninner")
pars_rep <- par_fn(n=nouter)
check_evppimc_pars(pars, model_fn, pars_rep)
pars_rep <- pars_rep[,pars,drop=FALSE]
rese <- evppimc(model_fn=model_fn, par_fn=par_fn, pars=pars, pars_rep=pars_rep,
nouter=nouter, ninner=ninner, nopt=nopt, wtp=k, mfargs=mfargs)
if (inherits(model_fn, "cea")){
res <- data.frame(k=k, evppi=rese)
} else res <- data.frame(evppi=rese)
res
}
evppimc <- function(model_fn, ...){
UseMethod("evppimc", model_fn)
}
evppimc.nb <- function(model_fn, par_fn, pars, pars_rep, nouter, ninner, nopt, mfargs, ...) {
nb_current <- matrix(nrow=nouter, ncol=nopt)
nb_ppi <- numeric(nouter)
nf <- names(formals(model_fn))
defaults <- get_default_args(model_fn, pars_rep)
pars_corr <- intersect(names(formals(par_fn)), nf)
pb <- progress::progress_bar$new(total = nouter)
for (i in 1:nouter){
nbi <- matrix(nrow=ninner, ncol=nopt)
for (j in 1:ninner){
parsfix <- sample_conditional(par_fn, pars, pars_corr, vals=pars_rep[i,,drop=FALSE])
args <- c(parsfix, mfargs, defaults)[nf]
nbi[j,] <- do.call(model_fn, args)
}
pb$tick()
nb_ppi[i] <- max(colMeans(nbi))
args <- c(par_fn(1), mfargs, defaults)[nf]
nb_current[i,] <- do.call(model_fn, args)
}
mean(nb_ppi) - max(colMeans(nb_current))
}
evppimc.cea <- function(model_fn, par_fn, pars, pars_rep, nouter, ninner, nopt, wtp, mfargs, ...) {
nwtp <- length(wtp)
if (nwtp < 1)
stop("If `model_fn` is in cost-effectiveness format, at least one willingness-to-pay should be supplied in `k`")
res <- numeric(nwtp)
ce_current <- array(dim=c(nouter, 2, nopt))
cost_ppi <- eff_ppi <- matrix(nrow=nouter, ncol=nopt)
nf <- names(formals(model_fn))
defaults <- get_default_args(model_fn, pars_rep)
pars_corr <- intersect(names(formals(par_fn)), nf)
for (i in 1:nouter){
cei <- array(dim=c(ninner, 2, nopt))
for (j in 1:ninner){
parsfix <- sample_conditional(par_fn, pars, pars_corr, vals=pars_rep[i,,drop=FALSE])
args <- c(parsfix, mfargs, defaults)[nf]
resj <- do.call(model_fn, args)
cei[j,,] <- resj
}
inds <- mfi(res)
eff_ppi[i,] <- colMeans(cei[,inds$c,])
cost_ppi[i,] <- colMeans(cei[,inds$e,])
args <- c(par_fn(1), mfargs, defaults)[nf]
ce_current[i,,] <- do.call(model_fn, args)
}
for (k in 1:nwtp){
nb_ppi <- apply(eff_ppi*wtp[k] - cost_ppi, 1, max)
nb_current <- ce_current[,inds$e,]*wtp[k] - ce_current[,inds$c,]
res[k] <- mean(nb_ppi) - max(colMeans(nb_current))
}
res
}
## Sample from the joint distribution defined by `par_fn`, given fixed values
## `vals` for parameters `pars`. If `par_fn` has any arguments other than `n`
## (named in `pars_corr`), these are assumed to be parameters that are
## correlated with other parameters, so that fixing their value will change the
## conditional distribution of the remaining parameters. The values `vals` of
## the parameters fixed in the inner EVPPI loop are then supplied for these
## arguments, to allow par_fn to compute the appropriate conditional
## distribution
sample_conditional <- function(par_fn, pars, pars_corr, vals) {
args_fixed <- if (length(pars_corr) == 0) NULL else as.list(vals[,pars_corr])
parsfix <- do.call("par_fn", c(list(n=1), args_fixed))
parsfix[,pars] <- vals
parsfix
}
check_int <- function(n, name){
if (!is.numeric(n) || (n < 2))
stop(sprintf("%s is `%s`, this should be a number greater than 1", name, n))
}
check_evppimc_pars <- function(pars, model_fn, pars_rep){
badpars <- setdiff(pars, names(formals(model_fn)))
if (length(badpars)>0){
stop(sprintf("parameters of interest `%s` not found in arguments of `model_fn`", paste(badpars,collapse=",")))
}
badpars <- setdiff(pars, colnames(pars_rep))
if (length(badpars)>0){
stop(sprintf("parameters of interest `%s` not found in columns of object returned by `par_fn`",
paste(badpars,collapse=",")))
}
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/evppi_mc.R
|
npreg_methods <- c("gam", "gp", "inla", "earth", "bart")
evppi_npreg <- function(outputs, ...){
UseMethod("evppi_npreg", outputs)
}
evppi_npreg.nb <- function(outputs, inputs, pars, method, se, B, verbose, ...){
if (verbose) message("Fitting nonparametric regression")
fit <- fitted_npreg(outputs, inputs=inputs, pars=pars, method=method, se=se, B=B,
verbose=verbose, ...)
res <- data.frame(evppi = calc_evppi(fit))
if (se){
if (verbose) message("Calculating replicate EVPPIs from simulation")
evppi_rep <- numeric(B)
for (i in 1:B){
## This bit could be faster. Is there a vectorised way? Naive apply doesn't help
evppi_rep[i] <- calc_evppi(attr(fit, "rep")[i,,])
}
res$se <- sd(evppi_rep)
}
attr(res, "models") <- attr(fit, "models")
res
}
evppi_npreg.cea <- function(outputs, inputs, pars, method, se, B, verbose, ...){
wtp <- outputs$k
if (verbose) message("Fitting nonparametric regression for costs")
cfit <- fitted_npreg(outputs$c, inputs=inputs, pars=pars, method=method, se=se, B=B, verbose=verbose, ...)
if (verbose) message("Fitting nonparametric regression for effects")
efit <- fitted_npreg(outputs$e, inputs=inputs, pars=pars, method=method, se=se, B=B, verbose=verbose, ...)
calc_evppi_ce(cfit, efit, wtp, se=se, B=B, verbose=verbose)
}
calc_evppi_ce <- function(cfit, efit, wtp, se=FALSE, B=0, verbose=FALSE){
nwtp <- length(wtp)
res <- resse <- numeric(nwtp)
for (i in 1:nwtp){
evppi_rep <- numeric(B)
inbfit <- efit*wtp[i] - cfit
if (se){
inbrep <- attr(efit, "rep")*wtp[i] - attr(cfit, "rep")
if (verbose) message("Calculating replicate EVPPIs from simulation")
for (j in 1:B){
evppi_rep[j] <- calc_evppi(inbrep[j,,])
}
resse[i] <- sd(evppi_rep)
}
res[i] <- calc_evppi(inbfit)
}
res <- data.frame(k=wtp, evppi=res)
attr(res, "models") <- list(c=attr(cfit, "models"), e=attr(efit, "models"))
if (se) res$se <- resse
res
}
check_ref <- function(ref, nb){
if (is.character(ref)){
ref_num <- match(ref, colnames(nb))
if (is.na(ref_num))
stop(sprintf("reference decision option ref=\"%s\" does not appear in the supplied column names of `outputs`: %s",
ref,
paste(paste0("\"", colnames(nb), "\""),collapse=",")))
}
else {
if (!(ref %in% 1:ncol(nb)))
stop(sprintf("reference decision option `ref` should either be a string matching one of the column names of `outputs`, or an integer <= %s indicating the corresponding column number of `outputs`", ncol(nb)))
ref_num <- ref
}
ref_num
}
fitted_npreg <- function(nb, inputs, pars, method, se=FALSE, B=NULL, verbose, ...){
nopt <- ncol(nb)
nsim <- nrow(nb)
## Transforming to incremental net benefit allows us to do one fewer regression
ref <- list(...)$ref # reference decision option
if (is.null(ref)) ref <- 1
else ref <- check_ref(ref, nb)
inb <- nb[, -ref, drop=FALSE] - nb[,ref]
fitted <- matrix(0, nrow=nsim, ncol=nopt)
if (se) {
if (method=="bart"){
B <- list(...)$ndpost
if (is.null(B)) B <- formals(dbarts::bart)$ndpost
}
fitted_rep <- array(0, dim=c(B, nsim, nopt))
}
models <- vector(nopt-1, mode="list")
for (i in 1:(nopt-1)){
if (verbose) message(sprintf("Decision option %s",i+1))
fit <- fitted_npreg_fn(method)(y=inb[,i], inputs=inputs, pars=pars, verbose=verbose, se=se, ...)
fitted[,i+1] <- as.vector(fit)
if (se){
fitted_rep[,,i+1] <- fitted_npreg_rep_call(method, attr(fit,"model"), B, verbose)
}
models[[i]] <- attr(fit, "model")
}
if (se) attr(fitted, "rep") <- fitted_rep
attr(fitted, "models") <- models
fitted
}
fitted_npreg_fn <- function(method){
switch(method,
gam = fitted_gam,
gp = fitted_gp,
inla = fitted_inla,
earth = fitted_earth,
bart = fitted_bart)
}
fitted_npreg_rep_call <- function(method, model, B, verbose=FALSE){
if (verbose) message("Simulating parameters to calculate standard errors")
if (method=="gam") {
frep <- fitted_rep_gam(model, B)
} else if (method=="earth") {
frep <- fitted_rep_earth(model, B)
} else if (method=="gp") {
frep <- fitted_rep_gp(model, B)
} else if (method=="bart") {
frep <- fitted_rep_bart(model)
}
else stop(sprintf("Standard errors not available for method = \"%s\"",method))
frep
}
calc_evppi <- function(fit) {
## NAs are removed in BCEA here. Shouldn't we make users investigate them and remove by hand if they know what they are doing? At least warn users if there are NAs in their samples
mean(apply(fit, 1, max)) - max(colMeans(fit))
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/evppi_npreg.R
|
## Code taken from BCEA package
## Baio, G., Berardi, A., & Heath, A. (2017). Bayesian cost-effectiveness analysis with the R package BCEA. New York: Springer.
## https://github.com/giabaio/BCEA
evppi_sal <- function(outputs, inputs, pars, ...){
n.seps <- list(...)$n.seps
if (is.null(n.seps)) n.seps <- 1
U <- form_nbarray(outputs)
nsim <- dim(U)[1]
nk <- dim(U)[2]
nopt <- dim(U)[3]
if (length(pars) > 1)
stop("`method=\"sal\" only works for single-parameter EVPPI")
param <- inputs[, pars]
o <- order(param)
param <- param[o]
nSegs <- matrix(1, nopt, nopt)
nSegs[1, 2] <- n.seps
nSegs[2, 1] <- n.seps
res <- segPoints <- numeric()
for (k in 1:nk) {
nbs <- U[, k, ]
nbs <- nbs[o, ]
for (i in 1:(nopt - 1)) {
for (j in (i + 1):nopt) {
cm <- cumsum(nbs[, i] - nbs[, j])/nsim
if (nSegs[i, j] == 1) {
l <- which.min(cm)
u <- which.max(cm)
if (cm[u] - max(cm[1], cm[nsim]) > min(cm[1],
cm[nsim]) - cm[l]) {
segPoint <- u
}
else {
segPoint <- l
}
if (segPoint > 1 && segPoint < nsim) {
segPoints <- c(segPoints, segPoint)
}
}
if (nSegs[i, j] == 2) {
distMaxMin <- 0
distMinMax <- 0
minL <- Inf
maxL <- -Inf
for (sims in 1:nsim) {
if (cm[sims] > maxL) {
maxLP <- sims
maxL <- cm[sims]
}
else {
if (maxL - cm[sims] > distMaxMin) {
distMaxMin <- maxL - cm[sims]
segMaxMinL <- maxLP
segMaxMinR <- sims
}
}
if (cm[sims] < minL) {
minLP <- sims
minL <- cm[sims]
}
else {
if (cm[sims] - minL > distMinMax) {
distMinMax <- cm[sims] - minL
segMinMaxL <- minLP
segMinMaxR <- sims
}
}
}
siMaxMin <- cm[segMaxMinL] + distMaxMin +
(cm[nsim] - cm[segMaxMinR])
siMinMax <- -cm[segMaxMinL] + distMinMax -
(cm[nsim] - cm[segMinMaxR])
if (siMaxMin > siMinMax) {
segPoint <- c(segMaxMinL, segMaxMinR)
}
else {
segPoint <- c(segMinMaxL, segMinMaxR)
}
if (segPoint[1] > 1 && segPoint[1] <
nsim) {
segPoints <- c(segPoints, segPoint[1])
}
if (segPoint[2] > 1 && segPoint[2] <
nsim) {
segPoints <- c(segPoints, segPoint[2])
}
}
}
}
if (length(segPoints) > 0) {
segPoints2 <- unique(c(0, segPoints[order(segPoints)],
nsim))
res[k] <- 0
for (j in 1:(length(segPoints2) - 1)) {
res[k] <- res[k] + max(colSums(matrix(nbs[(1 +
segPoints2[j]):segPoints2[j + 1], ],
ncol = nopt)))/nsim
}
res[k] <- res[k] - max(colMeans(nbs))
}
else {
res[k] <- 0
}
}
res <- data.frame(evppi=res)
if (inherits(outputs, "cea"))
res <- cbind(k=outputs$k, res)
res
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/evppi_sal.R
|
##' Convert either "net benefit" or "cost-effectiveness" forms for
##' 'outputs' to a 3D array (number of simulations, x number of
##' willingness-to-pay values, x number of decision options) with one
##' WTP value when `outputs` is in "net benefit" form.
##'
##' @keywords internal
form_nbarray <- function(outputs){
if (inherits(outputs, "cea")){
nsim <- nrow(outputs$c)
nk <- length(outputs$k)
nopt <- ncol(outputs$c) # number of decision options
nb <- array(dim=c(nsim, nk, nopt))
for (i in 1:nk){
nb[,i,] <- outputs$e * outputs$k[i] - outputs$c
}
} else {
nsim <- nrow(outputs)
nk <- 1
nopt <- ncol(outputs)
nb <- array(as.matrix(outputs), dim=c(nsim, nk, nopt))
}
nb
}
## Code taken from BCEA package
## Baio, G., Berardi, A., & Heath, A. (2017). Bayesian cost-effectiveness analysis with the R package BCEA. New York: Springer.
## https://github.com/giabaio/BCEA
evppi_so <- function(outputs, inputs, pars, ...){
n.blocks <- list(...)$n.blocks
if (is.null(n.blocks))
stop("`n.blocks` is required for method=\"so\"")
U <- form_nbarray(outputs)
nsim <- dim(U)[1]
nk <- dim(U)[2]
nopt <- dim(U)[3]
J <- nsim / n.blocks
check <- nsim %% n.blocks
if (check > 0) {
stop("`n.blocks` must be an integer\n")
}
if (length(pars) > 1)
stop("`method=\"so\" only works for single-parameter EVPPI")
sort.order <- order(inputs[, pars])
sort.U <- array(NA, dim(U))
res <- numeric()
for (i in 1:nk) {
sort.U[, i, ] <- U[sort.order, i, ]
U.array <- array(sort.U[, i, ], dim = c(J, n.blocks, nopt))
mean.k <- apply(U.array, c(2, 3), mean)
partial.info <- mean(apply(mean.k, 1, max))
res[i] <- partial.info - max(apply(U[,i,], 2, mean))
}
res <- data.frame(evppi=res)
if (inherits(outputs, "cea"))
res <- cbind(k=outputs$k, res)
res
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/evppi_so.R
|
##' Calculate the expected value of partial perfect information for an estimation problem
##'
##' Calculate the expected value of partial perfect information for an estimation problem. This computes the expected reduction in variance in some quantity of interest with perfect information about a parameter or parameters of interest.
##'
##' @param outputs a vector of values for the quantity of interest, sampled from the uncertainty distribution of this quantity that is induced by the uncertainty about the parameters. This can also be a data frame with one column.
##'
##' Typically this will come from a Monte Carlo sample, where we first sample from the uncertainty distributions of the parameters, and then compute the quantity of interest as a function of the parameters. It might also be produced by a Markov Chain Monte Carlo sample from the joint distribution of parameters and outputs.
##'
##' @return A data frame with a column \code{pars}, indicating the parameter(s), and a column \code{evppi}, giving the corresponding EVPPI.
##'
##' @inheritParams evppi
##'
##' @references
##' Jackson, C., Presanis, A., Conti, S., & De Angelis, D. (2019). Value of information:
##' Sensitivity analysis and research design in Bayesian evidence synthesis.
##' Journal of the American Statistical Association, 114(528), 1436-1449.
##'
##' Jackson, C., Johnson, R., de Nazelle, A., Goel, R., de Sa, T. H.,
##' Tainio, M., & Woodcock, J. (2021). A guide to value of information
##' methods for prioritising research in health impact
##' modelling. Epidemiologic Methods, 10(1).
##'
##' Jackson, C. H., Baio, G., Heath, A., Strong, M., Welton, N. J., &
##' Wilson, E. C. (2022). Value of Information analysis in models to
##' inform health policy. Annual Review of Statistics and its
##' Application, 9, 95-118.
##'
##' @export
evppivar <- function(outputs,
inputs,
pars=NULL,
method=NULL,
nsim=NULL,
verbose=TRUE,
...)
{
inputs <- check_inputs(inputs, iname=deparse(substitute(inputs)))
check_outputs_vector(outputs, inputs)
if (is.list(pars)) {
return(evppivar_list(outputs=outputs, inputs=inputs, pars=pars,
method=method, nsim=nsim, verbose=verbose, ...))
}
pars <- check_pars(pars, inputs)
opts <- list(...)
if (is.null(method))
method <- default_evppi_method(pars)
if (is.null(nsim)) nsim <- nrow(inputs)
outputs <- outputs[1:nsim]
inputs <- inputs[1:nsim,,drop=FALSE]
if (method %in% npreg_methods) {
rese <- evppivar_npreg(outputs=outputs, inputs=inputs,
pars=pars, method=method, verbose=verbose, ...)
} else stop("Other methods not implemented yet")
res <- cbind(pars = paste(pars, collapse=","),
rese)
res
}
evppivar_list <- function(outputs, inputs, pars, method, nsim, verbose, ...)
{
npars <- length(pars)
eres <- vector(npars, mode="list")
for (i in seq_len(npars)){
eres[[i]] <- evppivar(outputs=outputs, inputs=inputs, pars=pars[[i]],
method=method, nsim=nsim, verbose=verbose,
...)
}
do.call("rbind", eres)
}
check_outputs_vector <- function(outputs, inputs){
if (is.data.frame(outputs)) {
if (ncol(outputs) > 1)
stop("if `outputs` is supplied as a data frame, it should have only one column")
outputs <- unlist(outputs)
}
if (!is.numeric(outputs))
stop("`outputs` should be a numeric vector or a data frame with one column")
if (length(outputs) != nrow(inputs))
stop(sprintf("Length of `outputs` (%s) should equal the number of rows of `inputs` (%s)",
length(outputs), nrow(inputs)))
}
evppivar_npreg <- function(outputs, inputs, pars, method, verbose, ...){
fitted <- fitted_npreg_fn(method)(y=outputs, inputs=inputs, pars=pars,
verbose=verbose, ...)
data.frame(evppi = var(fitted))
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/evppivar.R
|
##' Calculate the expected value of sample information from a decision-analytic
##' model
##'
##' Calculate the expected value of sample information from a decision-analytic
##' model
##'
##' See the \href{https://chjackson.github.io/voi/articles/voi.html#evsi}{package overview / Get Started vignette} for some examples of using this function.
##'
##' @inheritParams evppi
##'
##' @param study Name of one of the built-in study types supported by this
##' package for EVSI calculation. If this is supplied, then the columns of
##' \code{inputs} that correspond to the parameters governing the study data
##' should be identified in \code{pars}.
##'
##' Current built-in studies are
##'
##' \code{"binary"} A study with a binary outcome observed on one sample of
##' individuals. Requires one parameter: the probability of the outcome. The
##' sample size is specifed in the \code{n} argument to \code{evsi()}, and the
##' binomially-distributed outcome is named \code{X1}.
##'
##' \code{"trial_binary"} Two-arm trial with a binary outcome. Requires two
##' parameters: the probability of the outcome in arm 1 and 2 respectively.
##' The sample size is the same in each arm, specifed in the \code{n} argument
##' to \code{evsi()}, and the binomial outcomes are named \code{X1} and
##' \code{X2} respectively.
##'
##' \code{"normal_known"} A study of a normally-distributed outcome, with a
##' known standard deviation, on one sample of individuals. Likewise the
##' sample size is specified in the \code{n} argument to \code{evsi()}. The
##' standard deviation defaults to 1, and can be changed by specifying
##' \code{sd} as a component of the \code{aux_pars} argument, e.g.
##' \code{evsi(..., aux_pars=list(sd=2))}.
##'
##' Either \code{study} or \code{datagen_fn} should be supplied to
##' \code{evsi()}.
##'
##' For the EVSI calculation methods where explicit Bayesian analyses of the
##' simulated data are performed, the prior parameters for these built-in studies
##' are supplied in the \code{analysis_args} argument to \code{evsi()}. These
##' assume Beta priors for probabilities, and Normal priors for the mean of a
##' normal outcome.
##'
##'
##' @param datagen_fn If the proposed study is not one of the built-in types
##' supported, it can be specified in this argument as an R function to sample
##' predicted data from the study. This function should have the following
##' specification:
##'
##' 1. the function's first argument should be a data frame of parameter
##' simulations, with one row per simulation and one column per parameter.
##' The parameters in this data frame must all be found in \code{inputs},
##' but need not necessarily be in the same order or include all of them.
##'
##' 2. the function should return a data frame.
##'
##' 3. the returned data frame should have number of rows equal to the number
##' of parameter simulations in \code{inputs}.
##'
##' 4. if \code{inputs} is considered as a sample from the posterior, then
##' \code{datagen_fn(inputs)} returns a corresponding sample from the
##' posterior predictive distribution, which includes two sources of
##' uncertainty: (a) uncertainty about the parameters and (b) sampling
##' variation in observed data given fixed parameter values.
##'
##' 5. the function can optionally have more than one argument. If so, these
##' additional arguments should be given default values in the definition of
##' \code{datagen_fn}. If there is an argument called \code{n}, then it is
##' interpreted as the sample size for the proposed study.
##'
##' @param pars Character vector identifying which parameters are learned from the proposed study.
##' This is required for the moment matching and importance sampling methods,
##' and these should be columns of \code{inputs}. This is not required for the nonparametric
##' regression methods.
##'
##' @param pars_datagen Character vector identifying which columns of \code{inputs} are
##' the parameters required to generate data from the proposed study.
##' These should be columns of \code{inputs}.
##'
##' If \code{pars_datagen} is not supplied, then it is assumed to be the same as \code{pars}.
##' Note that these can be different. Even if the study data are generated by a particular parameter,
##' when analysing the data we could choose to ignore the information that the data provides about
##' that parameter.
##'
##' @param n Sample size of future study, or vector of alternative sample sizes.
##' This is understood by the built-in study designs. For studies specified
##' by the user with \code{datagen_fn}, if \code{datagen_fn} has an argument
##' \code{n}, then this is interpreted as the sample size. However if
##' calling \code{evsi} for a user-specified design where
##' \code{datagen_fn} does not have an \code{n} argument, then any \code{n}
##' argument supplied to \code{evsi} will be ignored.
##'
##' Currently this
##' shortcut is not supported if more than one quantity is required to
##' describe the sample size, for example, trials with unbalanced arms. In
##' that case, you will have to hard-code the required sample sizes into
##' `datagen_fn`.
##'
##' For the nonparametric regression and importance sampling methods, the
##' computation is simply repeated for each sample size supplied here.
##'
##' The moment matching method uses a regression model to estimate the
##' dependency of the EVSI on the sample size, hence to enable EVSI to be
##' calculated efficiently for any number of sample sizes (Heath et al. 2019).
##'
##' @param aux_pars A list of additional fixed arguments to supply to the
##' function to generate the data, whether that is a built-in study design or user-defined
##' function supplied in \code{datagen_fn}. For example, \code{evsi(..., aux_pars = list(sd=2))} defines the fixed
##' standard deviation in the \code{"normal_known"} model.
##'
##' @param method Character string indicating the calculation method. Defaults to \code{"gam"}.
##'
##' All the nonparametric regression methods supported for
##' \code{\link{evppi}}, that is \code{"gam","gp","earth","inla"}, can also be
##' used for EVSI calculation by regressing on a summary statistic of the
##' predicted data (Strong et al 2015).
##'
##' \code{"is"} for importance sampling (Menzies 2016)
##'
##' \code{"mm"} for moment matching (Heath et al 2018)
##'
##' Note that the \code{"is"} and \code{"mm"} methods are used in conjunction
##' with nonparametric regression, and the \code{gam_formula} argument can be
##' supplied to \code{evsi} to specify this regression - see
##' \code{\link{evppi}} for documentation of this argument.
##'
##' @param likelihood Likelihood function, required (and only required) for the
##' importance sampling method when a study design other than one of the
##' built-in ones is used. This should have two arguments, named as follows:
##'
##' \code{Y}: a one-row data frame of predicted data. Columns are defined by different
##' outcomes in the data, with names matching the names of the data frame returned by
##' \code{datagen_fn}.
##'
##' \code{inputs}. a data frame of simulated parameter values. Columns should correspond
##' to different variables in \code{inputs}. The column names should all be
##' found in the names of \code{inputs}, though they do not have to be in the same
##' order, or include everything in \code{inputs}. The number or rows should be the same as
##' the number of rows in \code{inputs}.
##'
##' The function should return a vector whose length matches the number of
##' rows of the parameters data frame given as the second argument. Each
##' element of the vector gives the likelihood of the corresponding set of
##' parameters, given the data in the first argument. An example is given in
##' the vignette.
##'
##' The likelihood can optionally have a \code{n} argument, which is interpreted
##' as the sample size of the study. If the \code{n}
##' argument to \code{evsi} is used then this is passed to the likelihood function.
##' Conversely any \code{n} argument to \code{evsi} will be ignored by a likelihood
##' function that does not have its own \code{n} argument.
##'
##' Note the definition of the likelihood should agree with the definition of
##' \code{datagen_fn} to define a consistent sampling distribution for the
##' data. No automatic check is performed for this.
##'
##' @param analysis_fn Function which fits a Bayesian model to the generated
##' data. Required for \code{method="mm"} if a study design other than one
##' of the built-in ones is used. This should be a function that takes the
##' following arguments:
##'
##' `data`: A data frame with names matching the output of `datagen_fn`
##'
##' `args`: A list with constants required in the Bayesian analysis, e.g.
##' prior parameters, or options for the analysis, e.g. number of MCMC
##' simulations. The component of this list called \code{n} is assumed to
##' contain the sample size of the study.
##'
##' `pars` Names of the parameters whose posterior is being sampled.
##'
##' The function should return a data frame with names matching `pars`,
##' containing a sample from the posterior distribution of the parameters
##' given data supplied through `data`.
##'
##' `analysis_fn` is required to have all three of these arguments, but you do
##' not need to use any elements of `args` or `pars` in the body of
##' `analysis_fn`. Instead, sample sizes, prior parameters, MCMC options and
##' parameter names can alternatively be hard-coded inside `analysis_fn`. Passing these
##' through the function arguments (via the \code{analysis_args} argument to
##' \code{evsi}) is only necessary if we want to use the same `analysis_fn` to
##' do EVSI calculations with different sample sizes or other settings.
##'
##' @param analysis_args List of arguments required for the Bayesian analysis of
##' the predicted data, e.g. definitions of the prior and options to control
##' sampling. Only used in \code{method="mm"}. This is required if the study
##' design is one of the built-in ones specified in \code{study}. If a custom
##' design is specifed through \code{analysis_fn}, then any constants needed
##' in `analysis_fn` can either be supplied in `analysis_args`, or hard-coded
##' in `analysis_fn` itself.
##'
##' For the built-in designs, the lists should have the following named
##' components. An optional component `niter` in each case defines the
##' posterior sample size (default 1000).
##'
##' `study="binary"`: `a` and `b`: Beta shape parameters
##'
##' `study="trial_binary"`: `a1` and `b1`: Beta shape parameters for the prior
##' for the first arm, `a2` and `b2`: Beta shape parameters for the prior for
##' the second arm.
##'
##' `study="normal_known"`: `prior_mean`, `prior_sd` (mean and standard deviation
##' deviation of the Normal prior) and `sampling_sd` (SD of an individual-level normal
##' observation, so that the sampling SD of the mean outcome over the study is
##' `sampling_sd/sqrt(n)`.
##'
##' @param model_fn Function which evaluates the decision-analytic model, given
##' parameter values. Required for \code{method="mm"}. See
##' \code{\link{evppi_mc}} for full documentation of the required specification
##' of this function.
##'
##' @param par_fn Function to simulate values from the uncertainty distributions
##' of parameters needed by the decision-analytic model. Should take one
##' argument and return a data frame with one row for each simulated value,
##' and one column for each parameter. See \code{\link{evppi_mc}} for full
##' specification.
##'
##' @param Q Number of quantiles to use in \code{method="mm"}.
##'
##' @param npreg_method Method to use to calculate the EVPPI, for those methods
##' that require it. This is passed to \code{\link{evppi}} as the
##' \code{method} argument.
##'
##' @param nsim Number of simulations from the model to use for calculating
##' EVPPI. The first \code{nsim} rows of the objects in \code{inputs} and
##' \code{outputs} are used.
##'
##' @param ... Other arguments understood by specific methods, e.g. \code{gam_formula}
##' and other controlling options (see \code{\link{evppi}}) can be passed to the
##' nonparametric regression used inside the moment matching method.
##'
##' @return A data frame with a column \code{pars}, indicating the
##' parameter(s), and a column \code{evsi}, giving the corresponding
##' EVPPI. If the EVSI for multiple sample sizes was requested,
##' then the sample size is returned in the column \code{n}, and if
##' \code{outputs} is of "cost-effectiveness analysis" form, so that
##' there is one EVPPI per willingness-to-pay value, then a column
##' \code{k} identifies the willingness-to-pay.
##'
##' @references
##'
##' Strong, M., Oakley, J. E., Brennan, A., & Breeze, P. (2015). Estimating the
##' expected value of sample information using the probabilistic sensitivity
##' analysis sample: a fast, nonparametric regression-based method. Medical
##' Decision Making, 35(5), 570-583.
##'
##' Menzies, N. A. (2016). An efficient estimator for the expected value of
##' sample information. Medical Decision Making, 36(3), 308-320.
##'
##' Heath, A., Manolopoulou, I., & Baio, G. (2018). Efficient Monte Carlo
##' estimation of the expected value of sample information using moment
##' matching. Medical Decision Making, 38(2), 163-173.
##'
##' Heath, A., Manolopoulou, I., & Baio, G. (2019). Estimating the expected
##' value of sample information across different sample sizes using moment
##' matching and nonlinear regression. Medical Decision Making, 39(4), 347-359.
##'
##' @export
evsi <- function(outputs,
inputs,
study=NULL,
datagen_fn=NULL,
pars=NULL,
pars_datagen=NULL,
n=100,
aux_pars=NULL,
method=NULL,
likelihood=NULL,
analysis_fn=NULL,
analysis_args=NULL,
model_fn=NULL,
par_fn=NULL,
Q=50,
npreg_method="gam",
nsim=NULL,
verbose=FALSE,
check=FALSE,
...)
{
check_inputs(inputs)
check_ss(n)
outputs <- check_outputs(outputs, inputs)
if (is.null(method))
method <- default_evsi_method()
if (is.null(nsim)) nsim <- nrow(inputs)
outputs <- subset_outputs(outputs, nsim)
inputs <- inputs[1:nsim,,drop=FALSE]
datagen_fn <- form_datagen_fn(study, datagen_fn, inputs, aux_pars)
## Could use any nonparametric regression method to regress on a summary statistic, in identical way to EVPPI estimation.
pars_datagen <- check_pars_datagen(pars, pars_datagen, inputs)
npreg_method <- default_npreg_method(pars)
if (method %in% npreg_methods) {
res <- evsi_npreg(outputs=outputs, inputs=inputs,
datagen_fn=datagen_fn, pars=pars_datagen, n=n,
method=method, verbose=verbose, check=check,
aux_pars = aux_pars, ...)
} else if (method=="is") {
if (verbose) message("Processing likelihood...")
likelihood <- form_likelihood(study, likelihood, inputs, datagen_fn, pars_datagen)
if (verbose) message("Entering evsi_is...")
res <- evsi_is(outputs=outputs, inputs=inputs,
pars=pars, pars_datagen=pars_datagen,
datagen_fn=datagen_fn, n=n,
aux_pars=aux_pars, likelihood=likelihood,
npreg_method=npreg_method, verbose=verbose, ...)
} else if (method=="mm") {
res <- evsi_mm(outputs=outputs, inputs=inputs,
pars=pars, pars_datagen=pars_datagen,
datagen_fn=datagen_fn,
study=study,
analysis_fn=analysis_fn,
analysis_args=analysis_args,
model_fn=model_fn,
par_fn=par_fn,
n=n, Q=Q,
npreg_method=npreg_method,
verbose=verbose, ...)
}
else stop("Other methods not implemented yet")
attr(res, "method") <- method
class(res) <- c("evsi", attr(res,"class"))
res
}
evsi_npreg <- function(outputs, inputs, datagen_fn, pars, n, method=NULL, se=FALSE, B=500, verbose, check, aux_pars=NULL, ...){
nn <- length(n)
res <- vector(nn, mode="list")
validate_char(pars, "pars")
for (i in seq_along(n)){
Tdata <- generate_data(inputs, datagen_fn, n[i], pars, aux_pars)
res[[i]] <- evppi_npreg(outputs=outputs, inputs=Tdata,
pars=names(Tdata), method=method, se=se, B=B, verbose=verbose, ...)
names(res[[i]])[names(res[[i]])=="evppi"] <- "evsi"
rownames(res[[i]]) <- NULL
}
resall <- do.call("rbind", res)
resall <- cbind(n=rep(n,each=nrow(res[[1]])), resall)
if (check){
attr(resall, "models") <- lapply(res, function(x)attr(x, "models"))
names(attr(resall,"models")) <- as.character(resall$n)
}
resall
}
generate_data <- function(inputs, datagen_fn, n=100, pars, aux_pars=NULL){
check_datagen_fn(datagen_fn, inputs, pars)
args <- list(inputs=inputs, n=n, pars=pars)
args <- c(args, aux_pars)
do.call(datagen_fn, args)
}
default_evsi_method <- function(){
"gam"
}
check_study <- function(study) {
if (!is.character(study) || (!(study %in% studies_builtin)))
stop("``study` should be a character string matching one of the supported study designs")
}
form_datagen_fn <- function(study, datagen_fn, inputs, aux_pars=NULL){
if (!is.null(study)) {
check_study(study)
if (!is.null(datagen_fn))
warning("Ignoring `datagen_fn`, since a built-in study design was requested")
datagen_fn <- get(sprintf("datagen_%s", study))
} else {
if (is.null(datagen_fn)) stop("`datagen_fn` should be supplied if `study` is not supplied")
if (!is.function(datagen_fn)) stop("`datagen_fn` should be a function")
if (!("n" %in% names(formals(datagen_fn))))
formals(datagen_fn) <- c(formals(datagen_fn), list(n=100))
formals(datagen_fn) <- c(formals(datagen_fn), list(pars=NULL))
check_datagen_fn(datagen_fn, inputs, aux_pars)
}
datagen_fn
}
check_datagen_fn <- function(datagen_fn, inputs, pars=NULL, aux_pars=NULL){
## If there's more than one argument, check that those args have
## defaults (e.g. sample sizes). Give error for now if not, but
## consider relaxing if this becomes a problem
extra_args <- formals(datagen_fn)[-1]
extra_args <- extra_args[names(extra_args) != "pars"]
if (length(extra_args)>0){
no_defaults <- sapply(extra_args, is.symbol)
if (any(no_defaults)){
stop(sprintf("Arguments \"%s\" of `datagen_fn` do not have default values", paste(names(no_defaults),collapse=",")))
}
}
ret <- datagen_fn(inputs, pars=pars)
if (!is.data.frame(ret)) stop("`datagen_fn` should return a data frame")
parnames <- names(ret)[names(ret) %in% names(inputs)]
if (length(parnames)>0) {
stop(sprintf("`datagen_fn` returns variables with the same names as parameters (%s). It should return simulated data", paste(parnames, collapse=",")))
}
if (nrow(ret) != nrow(inputs)){
stop(sprintf("`datagen_fn` returns a data frame with %s rows. There should be %s rows, the same number of rows as `inputs`", nrow(ret), nrow(inputs)))
}
}
form_analysis_fn <- function(study, analysis_fn, analysis_args, datagen_fn, inputs, n, pars, pars_datagen){
if (!is.null(study)){
check_study(study)
analysis_fn <- get(sprintf("analysis_%s", study))
} else {
if (is.null(analysis_fn)) stop("`analysis_fn` should be supplied if `study` is not supplied")
check_analysis_fn(analysis_fn, analysis_args, datagen_fn, inputs, n, pars, pars_datagen)
}
analysis_fn
}
check_analysis_fn <- function(analysis_fn, analysis_args, datagen_fn, inputs, n, pars, pars_datagen){
if (!is.function(analysis_fn)) stop("`analysis_fn` should be a function")
if (!identical(names(formals(analysis_fn)), c("data","args","pars")))
stop("`analysis_fn` should have arguments `data`,`args`,`pars` in that order")
testdata <- datagen_fn(inputs = inputs[1,,drop=FALSE], n = n, pars=pars_datagen)
post_pars <- analysis_fn(data = testdata, args = analysis_args, pars=pars)
missing_pars <- setdiff(pars, names(post_pars))
if (length(missing_pars) > 0){
badpars <- paste(missing_pars, collapse=",")
stop(sprintf("Parameters %s not found in data frame returned by `analysis_fn`", badpars))
}
}
check_pars_in_modelfn <- function(pars, model_fn){
mfargs <- names(formals(model_fn))
missing_args <- setdiff(pars, mfargs)
if (length(missing_args) > 0){
badpars <- paste(missing_args, collapse=",")
stop(sprintf("Parameters %s are not arguments to model_fn. Should model_fn be reparameterised?", badpars))
}
}
check_ss <- function(n){
if (!is.numeric(n))
stop("sample size `n` should be a numeric vector")
if (any(n < 0))
stop("sample sizes `n` should all be positive integers")
}
form_analysis_args <- function(analysis_args, study, n){
if (!is.null(study)){
if (study %in% studies_builtin){
if (!is.list(analysis_args))
stop("analysis_args should be supplied as a named list if using one of the built-in study designs")
}
}
if (is.null(analysis_args))
analysis_args <- list() # for testing
if (is.null(analysis_args$n))
analysis_args$n <- n[1]
analysis_args
}
check_pars_datagen <- function(pars, pars_datagen, inputs){
if (is.null(pars_datagen))
pars_datagen <- pars
check_pars(pars_datagen, inputs, evppi=FALSE)
pars_datagen
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/evsi.R
|
## Importance sampling method for calculating EVSI (Menzies)
evsi_is <- function(outputs, inputs, pars, pars_datagen,
datagen_fn, n=100, aux_pars=aux_pars,
likelihood, npreg_method="gam", verbose, ...){
check_pars(pars, inputs, evppi=FALSE)
nn <- length(n)
res <- vector(nn, mode="list")
for (i in seq_along(n)){
res[[i]] <- data.frame(
n = n[i],
evsi = evsi_is_singlen(outputs, inputs, pars, pars_datagen,
datagen_fn=datagen_fn, n=n, aux_pars=aux_pars,
likelihood=likelihood, npreg_method=npreg_method, verbose=verbose,
...)
)
if (inherits(outputs, "cea")) res[[i]] <- cbind(k = outputs$k, res[[i]])
}
do.call("rbind", res)
}
evsi_is_singlen <- function(outputs, inputs, pars, pars_datagen,
datagen_fn, n=100, aux_pars=aux_pars, likelihood, npreg_method="gam", verbose, ...){
UseMethod("evsi_is", outputs)
}
evsi_is.nb <- function(outputs, inputs, pars, pars_datagen,
datagen_fn, n, aux_pars, likelihood, npreg_method, verbose, ...){
nbfit <- prepost_evsi_is(outputs, inputs=inputs, pars=pars, pars_datagen=pars_datagen,
datagen_fn=datagen_fn, n=n,
aux_pars=aux_pars,
likelihood=likelihood, npreg_method=npreg_method,
verbose=verbose, ...)
calc_evppi(nbfit)
}
evsi_is.cea <- function(outputs, inputs, pars, pars_datagen,
datagen_fn, n, aux_pars, likelihood, npreg_method, verbose, ...){
wtp <- outputs$k
nwtp <- length(wtp)
res <- numeric(nwtp)
cfit <- prepost_evsi_is(outputs$c, inputs=inputs, pars=pars, pars_datagen=pars_datagen,
datagen_fn=datagen_fn, n=n,
aux_pars=aux_pars,
likelihood=likelihood, npreg_method=npreg_method, verbose=verbose, ...)
efit <- prepost_evsi_is(outputs$e, inputs=inputs, pars=pars, pars_datagen=pars_datagen,
datagen_fn=datagen_fn, n=n,
aux_pars=aux_pars,
likelihood=likelihood, npreg_method=npreg_method, verbose=verbose, ...)
for (i in 1:nwtp){
nbfit <- efit*wtp[i] - cfit
res[i] <- calc_evppi(nbfit)
}
res
}
prepost_evsi_is <- function(out, inputs, pars, pars_datagen,
datagen_fn, n=100,
aux_pars=aux_pars,
likelihood, npreg_method="gam", verbose, ...){
if (verbose) message("Generating data...")
simdat <- generate_data(inputs, datagen_fn=datagen_fn, n=n, pars=pars_datagen, aux_pars=aux_pars)
if (is.null(npreg_method))
npreg_method <- default_evppi_method(pars)
if (verbose) message("Calculating EVPPI...")
y <- fitted_npreg(out, inputs=inputs, pars=pars, method=npreg_method, se=FALSE, verbose=verbose, ...)
if (verbose) message("Calculating EVSI...")
nsam <- nrow(inputs)
nout <- ncol(y) # this doesn't handle 1D (for evsivar?)
prepost <- matrix(nrow=nsam, ncol=nout)
pb <- progress::progress_bar$new(total = nsam)
for (i in 1:nsam){
ll <- eval_likelihood(likelihood, Y=simdat[i,,drop=FALSE], inputs=inputs,
n=n, pars=pars, aux_pars=aux_pars)
w <- ll/sum(ll)
for (j in 1:nout) {
## This is slow, though to vectorise would need nsam x nsam storage
## Could do in C? though could we still work with user's lik fn?
prepost[i,j] <- w %*% y[,j]
}
pb$tick()
}
prepost
}
eval_likelihood <- function(likelihood, Y, inputs, n=100, pars, aux_pars=NULL){
args <- list(Y=Y, inputs=inputs, n=n, pars=pars)
args <- c(args, aux_pars)
do.call(likelihood, args)
}
form_likelihood <- function(study, likelihood, inputs, datagen_fn, pars){
if (!is.null(study)){
if (!is.null(likelihood))
warning("Ignoring `likelihood`, since a built-in study design was requested")
likelihood <- get(sprintf("likelihood_%s", study))
}
else {
if (is.null(likelihood)) stop("`likelihood` should be supplied for method=\"is\"")
if (!is.function(likelihood)) stop("`likelihood` should be a function")
if (length(formals(likelihood)) < 2) stop("`likelihood` should have at least two arguments")
if (!identical(names(formals(likelihood))[1:2], c("Y","inputs")))
stop("The first two arguments of `likelihood` should be named `Y` and `inputs`")
if (!("n" %in% names(formals(likelihood))))
formals(likelihood) <- c(formals(likelihood), list(n=100))
formals(likelihood) <- c(formals(likelihood), list(pars=NULL))
check_likelihood(likelihood, inputs, datagen_fn, pars)
}
likelihood
}
check_likelihood <- function(likelihood, inputs, datagen_fn, pars){
## check that when likelihood is called with first two arguments data frames with
## 1. names matching output of datagen_fn
## 2. names matching inputs
## returns output: vector length equal to nrow(inputs)
data_sim <- datagen_fn(inputs, pars=pars)
ret <- likelihood(data_sim[1,,drop=FALSE], inputs=inputs)
if (!is.vector(ret) | !is.numeric(ret))
stop("likelihood function should return a numeric vector")
if (length(ret) != nrow(inputs))
stop(sprintf("likelihood function returns a vector of length %s, should be length %s, the number of rows in `inputs`", length(ret), nrow(inputs)))
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/evsi_is.R
|
#' Moment matching method for calculating EVSI
#'
#' @inheritParams evsi
#'
#' @param ... Options passed to nonparametric regression functions
#'
#' @return Data frame with EVSI estimates, as documented in \code{\link{EVSI}}.
#'
#' @noRd
evsi_mm <- function(outputs,
inputs,
pars,
pars_datagen,
datagen_fn,
study,
analysis_fn,
analysis_args,
model_fn,
par_fn,
n=100,
Q=50,
npreg_method="gam",
verbose, ...){
check_pars(pars, inputs, evppi=FALSE)
model_fn <- check_model_fn(model_fn, par_fn, mfargs=NULL, outputs, verbose=verbose)
check_pars_in_modelfn(pars, model_fn)
analysis_args <- form_analysis_args(analysis_args, study, n)
analysis_fn <- form_analysis_fn(study, analysis_fn, analysis_args, datagen_fn, inputs, n, pars, pars_datagen)
if (inherits(outputs,"nb")){
evsi_mm_nb(outputs, inputs=inputs,
pars=pars, pars_datagen=pars_datagen,
datagen_fn=datagen_fn, n=n,
Q=Q,
analysis_fn=analysis_fn,
analysis_args=analysis_args,
model_fn=model_fn,
par_fn=par_fn,
npreg_method=npreg_method,
verbose=verbose, ...)
}
else if (inherits(outputs,"cea")) {
evsi_mm_cea(outputs, inputs=inputs,
pars=pars, pars_datagen=pars_datagen,
datagen_fn=datagen_fn, n=n,
Q=Q,
analysis_fn=analysis_fn,
analysis_args=analysis_args,
model_fn=model_fn,
par_fn=par_fn,
npreg_method=npreg_method,
verbose=verbose, ...)
}
}
#' Moment matching method for calculating EVSI
#'
#' @inheritParams evsi
#'
#' Specific to `outputs` of net benefit form
#'
#' @noRd
evsi_mm_nb <- function(outputs, inputs, pars, pars_datagen, datagen_fn, n,
Q,
analysis_fn,
analysis_args,
model_fn,
par_fn,
npreg_method="gam",
verbose=FALSE, ...){
fits <- evsi_mm_core(nb=outputs, inputs, pars, pars_datagen, datagen_fn, n, Q,
analysis_fn, analysis_args,
model_fn, par_fn, output_row=NULL, npreg_method, verbose, ...)
evsis <- evppis <- numeric(length(n))
for (i in 1:length(n)){
evsis[i] <- calc_evppi(fits$fit_rescaled[,,i])
}
res <- data.frame(n=n, evsi=evsis)
attr(res, "evppi") <- data.frame(evppi = calc_evppi(fits$fit))
if (any(attr(res,"evppi")$evppi < res$evsi))
message("EVSI > EVPPI may result from approximation error")
res
}
#' Moment matching method for calculating EVSI
#'
#' @inheritParams evsi
#'
#' Specific to `outputs` of "cost-effectiveness analysis" form
#'
#' @noRd
evsi_mm_cea <- function(outputs, inputs, pars,
pars_datagen,
datagen_fn, n,
Q,
analysis_fn,
analysis_args,
model_fn,
par_fn,
npreg_method="gam",
verbose=FALSE, ...){
cfits <- evsi_mm_core(outputs$c, inputs, pars, pars_datagen, datagen_fn, n, Q,
analysis_fn, analysis_args,
model_fn, par_fn,
output_row = "c",
npreg_method, verbose, ...)
efits <- evsi_mm_core(outputs$e, inputs, pars, pars_datagen, datagen_fn, n, Q,
analysis_fn, analysis_args,
model_fn, par_fn,
output_row = "e",
npreg_method, verbose, ...)
evsis <- evppis <- vector(length(n), mode="list")
for (i in 1:length(n)){
evsis[[i]] <- calc_evppi_ce(cfits$fit_rescaled[,,i], efits$fit_rescaled[,,i],
outputs$k, verbose=verbose)$evppi
evsis[[i]] <- cbind(n=n[i], k=outputs$k, evsi=evsis[[i]])
}
res <- as.data.frame(do.call("rbind", evsis))
attr(res, "evppi") <- calc_evppi_ce(cfits$fit, efits$fit, outputs$k, verbose=verbose)
res
}
#' Moment-matching method for EVSI calculation - all the hard work is
#' done in this core function.
#'
#' @inheritParams evsi
#'
#' @param nb Matrix of outputs, with columns indicating decision
#' options, and rows indicating samples. Could be either net
#' benefits, costs or effects.
#'
#' @param output_row Name of the row of the decision model output
#' that is used. Only used if `nb` is costs or effects.
#'
#' @return A list with components:
#'
#' `fit` Fitted values for EVPPI calculation. Matrix with `nsim` rows
#' (number of samples from the parameter uncertainty distribution) and
#' `d` columns (number of decision options). "Fitted values" means
#' the expected net benefit given further information, conditionally
#' on specific parameter values.
#'
#' `fits` Array of fitted values for EVSI calculation, with dimensions
#' `nsim` x `d` x `length(n)`, where `length(n)` is the number of
#' distinct sample sizes that the EVSI calculation was requested for.
#'
#' `p_shrink` is the ratio of standard deviations, describing the
#' proportion of uncertainty explained by the further information
#' gained from the proposed study.
#'
#' @noRd
evsi_mm_core <- function(nb, # could actually be nb, or c, or e
inputs,
pars,
pars_datagen,
datagen_fn,
n,
Q,
analysis_fn,
analysis_args,
model_fn,
par_fn,
output_row=NULL,
npreg_method="gam",
verbose=FALSE, ...){
## Determine grid of Q parameters from quantiles
quants <- mm_gen_quantiles(pars_datagen, inputs, Q)
## Generate future data given these parameters
ncomp <- ncol(nb) - 1 # number of comparisons, not number of decision options
ncov <- ncomp*(ncomp+1)/2
var_sim <- matrix(nrow=Q, ncol=ncov)
pb <- progress::progress_bar$new(total = Q)
if (length(n) > 1) {
nfit <- unique(round(seq(sqrt(min(n)), sqrt(max(n)), length=Q)^2))
} else nfit <- rep(n, Q)
for(i in 1:Q){
## Generate one dataset given parameters equal to a specific prior quantile
simdata <- datagen_fn(inputs = quants[i,,drop=FALSE], n = nfit[i], pars=pars_datagen)
## Fit Bayesian model to future data to get a sample from posterior(pars|simdata)
analysis_args$n <- nfit[i]
if (verbose) message("Running Bayesian analysis function...")
postpars <- analysis_fn(simdata, analysis_args, pars)
niter <- nrow(postpars)
if (i==1)
priorpars <- par_fn(niter)
## Combine with samples from the prior for remaining parameters of the decision model
## (newly-generated samples, in case number of posterior samples (niter) desired is more than nrow(inputs))
modelpars <- priorpars[names(formals(model_fn))]
modelpars[names(postpars)] <- postpars
## Run the decision model, giving a sample from posterior(INB|simdata)
inbpost <- matrix(nrow=niter, ncol=ncomp)
if (verbose) message("Evaluating decision model at updated parameters...")
for (j in 1:niter){
nbpost <- do.call(model_fn, modelpars[j,,drop=FALSE])
if (!is.null(output_row)) nbpost <- nbpost[mfi(nbpost)[[output_row]],]
inbpost[j,] <- nbpost[-1] - nbpost[1]
}
var_sim[i,] <- covvec(inbpost)
pb$tick()
}
mean_prep_var <- apply(var_sim, 2, mean)
inbprior <- nb[,-1,drop=FALSE] - nb[,1]
prior_var <- covvec(inbprior)
if (verbose) message("Calculating fitted values for EVPPI...")
fit <- fitted_npreg(nb, inputs=inputs, pars=pars, method=npreg_method, verbose=verbose, ...)
fitn1 <- fit[,-1,drop=FALSE]
var_fit <- covvec(fitn1)
mean_fit <- apply(fitn1, 2, mean)
var_prep_mean <- matrix(nrow=length(n), ncol=ncov)
if (length(n) > 1){
## do regression to relate variance reduction to sample size
quants <- c(0.025, 0.125, 0.5, 0.875, 0.975)
var_red_n <- matrix(nrow=length(n), ncol=5, dimnames=list(NULL, quants))
if (verbose) message("Running regression on sample size...")
for (d in 1:ncov){
var_red <- prior_var[d] - var_sim[,d]
if (sd(var_red)==0)
beta <- 0
else
beta <- regression_on_sample_size(var_red, nfit, var_fit[d])
for (j in 1:length(n)){
var_red_n[j,] <- quantile(var_fit[d] * n[j] / (n[j] + beta), quants)
var_prep_mean[j,d] <- var_red_n[j,"0.5"]
}
}
} else {
var_prep_mean[1,] <- pmax(0, prior_var - mean_prep_var) # Correct for Monte Carlo error
}
fit_rescaled <- array(dim = c(dim(fit), length(n)))
if (ncomp==1){
p_shrink <- numeric(length(n))
s2 <- sqrt(var_fit)
} else {
p_shrink <- vector(length(n), mode="list")
s2inv <- MatrixSqrt(cov(fitn1), inverse=TRUE)
}
for (j in 1:length(n)){
fit_rescaled[,1,j] <- 0
if (ncomp == 1){
s1 <- sqrt(var_prep_mean[j,])
p_shrink[j] <- s1/s2 # prop of unc explained by new data. if 1 then EVSI=EVPPI, if 0 then EVSI=0.
fit_rescaled[,2,j] <- (fitn1[,1] - mean_fit[1]) * p_shrink[j] + mean_fit[1]
}
else {
s1 <- MatrixSqrt(covvec2mat(var_prep_mean[j,]))
p_shrink[[j]] <- s2inv %*% s1
mean_mat <- matrix(mean_fit, nrow=nrow(fitn1), ncol = ncomp, byrow = TRUE)
fit_rescaled[,-1,j] <- (fitn1 - mean_mat) %*% p_shrink[[j]] + mean_mat
}
}
list(fit=fit, fit_rescaled=fit_rescaled, p_shrink=p_shrink)
}
#' @param pars Character vector of parameters required to generate the study data
#'
#' @param inputs Data frame with sampled values from current distribution of `pars`
#'
#' @param Q Number of equally-spaced quantiles to generate
#'
#' @return Data frame with one column for each parameter in `pars` and one row per quantile
#' For each variable, the quantiles are randomly permuted.
#'
#' A future version of this function should perhaps use Sobol sequences (randtoolbox package).
#'
#' @noRd
mm_gen_quantiles <- function(pars,
inputs,
Q,
N.size = NULL){
quants <- array(NA, dim = c(Q, length(pars)))
colnames(quants) <- pars
for(i in 1:length(pars)){
quants[,i] <- sample(quantile(inputs[,pars[i]],
probs = 1:Q / (Q + 1), type = 4))
}
as.data.frame(quants)
}
#' Bayesian nonlinear regression of the variance reduction in terms of
#' the proposed study sample size
#'
#' @return A sample from the posterior distribution of the parameter
#' `beta` governing this dependence.
#'
#' @noRd
regression_on_sample_size <- function(var_reduction,
sample_size,
var_base) {
if (!requireNamespace("rjags", quietly = TRUE)) {
stop("JAGS and the R package \"rjags\" must be installed to use this function.", call. = FALSE)
}
dat <- list(
N = length(sample_size),
y = as.vector(var_reduction),
x = as.vector(sample_size),
sigma_mu = sd(var_reduction)/2,
sigma_tau = 1/sd(var_reduction),
Nmax_par = max(sample_size)/2,
shape_Nmax = 0.05 / max(sample_size),
var_base = var_base
)
ini <- list(sigma=sd(var_reduction)/2, beta=1)
mod <- "model {
for (i in 1:N) {
y[i] ~ dnorm(mu[i], tau)
mu[i] <- var_base * (x[i]/(x[i] + beta))
}
tau <- 1/(sigma*sigma)
sigma ~ dt(sigma_mu, sigma_tau, 3)I(0, )
beta ~ dnorm(Nmax_par, shape_Nmax)I(0, )
}
"
jagsmod <- rjags::jags.model(textConnection(mod), data=dat, inits=ini, quiet=TRUE)
update(jagsmod, 1000, progress.bar="none")
sam <- rjags::coda.samples(jagsmod, variable.names="beta", n.iter=3000, progress.bar="none")
beta <- as.data.frame(sam[[1]])[,1]
beta
}
## Covariances as a vector rather than a matrix
covvec <- function(x){
covmat2vec(cov(x))
}
## Convert a covariance matrix to a vector (excluding the above-diagonals)
covmat2vec <- function(x){
c(diag(x), x[lower.tri(x)])
}
## Convert a vectorised covariance matrix back to a matrix
covvec2mat <- function(x){
n <- trunc(sqrt(length(x)*2))
mat <- diag(x[1:n])
covs <- x[(n+1):length(x)]
mat[lower.tri(mat)] <- covs
mat[upper.tri(mat)] <- t(mat)[upper.tri(t(mat))]
mat
}
#' Matrix square root. Used to implement (co)variance reduction when
#' using the moment matching method for models with two or more
#' treatment comparisons (i.e. three or more decision options).
#'
#' @noRd
MatrixSqrt <- function(x, inverse=FALSE){
e <- eigen(x)
if (any(e$values<0)){
e <- eigen(Matrix::nearPD(x)$mat)
}
sq <- e$vectors %*% diag(sqrt(e$values)) %*% t(e$vectors)
if (inverse) chol2inv(chol(sq)) else sq
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/evsi_mm.R
|
## Figure 4.3 EVSI vs wtp
## one sample size and coloured by ss
## evsi.wtp.plot
## easier with ggplot
## then people can design their own pub quality
|
/scratch/gouwar.j/cran-all/cranData/voi/R/evsiplots.R
|
##' Calculate the expected value of sample information for an estimation problem
##'
##' Calculate the expected value of sample information for an estimation problem. This computes the expected reduction in variance in some quantity of interest from a study of a certain design that informs the parameters of interest.
##'
##' @param outputs a vector of values for the quantity of interest, sampled from the uncertainty distribution of this quantity that is induced by the uncertainty about the parameters.
##'
##' @param method See \code{\link{evsi}}, only nonparametric regression methods are
##' currently supported in \code{\link{evsivar}}.
##'
##' @return A data frame with a column \code{pars}, indicating the parameter(s), and a column \code{evsi}, giving the corresponding EVSI. If there are EVSI estimates for multiple sample sizes, the sample size is returned in the column \code{n}.
##'
##' @inheritParams evsi
##'
##' @references
##' Jackson, C., Presanis, A., Conti, S., & De Angelis, D. (2019). Value of information:
##' Sensitivity analysis and research design in Bayesian evidence synthesis.
##' Journal of the American Statistical Association, 114(528), 1436-1449.
##'
##' @export
evsivar <- function(outputs,
inputs,
study=NULL,
datagen_fn=NULL,
pars=NULL,
n=100,
aux_pars=NULL,
method=NULL,
nsim=NULL,
verbose=TRUE,
...)
{
inputs <- check_inputs(inputs, iname=deparse(substitute(inputs)))
check_outputs_vector(outputs, inputs)
check_ss(n)
if (is.null(method))
method <- default_evsi_method()
## Take subset of full PSA sample
if (is.null(nsim)) nsim <- nrow(inputs)
outputs <- outputs[1:nsim]
inputs <- inputs[1:nsim,,drop=FALSE]
datagen_fn <- form_datagen_fn(study, datagen_fn, inputs)
if (method %in% npreg_methods) {
evsivar_npreg(outputs=outputs, inputs=inputs,
datagen_fn=datagen_fn, pars=pars, n=n,
aux_pars = aux_pars,
method=method, verbose=verbose, ...)
}
else stop("Only the nonparametric regression methods are currently implemented")
}
evsivar_npreg <- function(outputs, inputs, datagen_fn, pars, n, method=NULL, verbose, aux_pars=NULL, ...){
nn <- length(n)
res <- vector(nn, mode="list")
for (i in seq_along(n)){
Tdata <- generate_data(inputs, datagen_fn, n[i], pars, aux_pars)
evsi <- evppivar_npreg(outputs=outputs, inputs=Tdata, pars=names(Tdata),
method=method, verbose=verbose, ...)
names(evsi)[names(evsi)=="evppi"] <- "evsi"
res[[i]] <- cbind(n = n[i], evsi)
rownames(res[[i]]) <- NULL
}
do.call("rbind", res)
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/evsivar.R
|
##' Import results of probabilistic analysis from heemod
##'
##' [heemod](https://CRAN.R-project.org/package=heemod) is a package
##' for constructing common forms of health economic decision models.
##' The outputs from probabilistic analysis of these models can be
##' imported using these functions, to allow Value of Information
##' measures to be calculated for them using the \pkg{voi} package.
##'
##' @aliases import_heemod_outputs import_heemod_inputs
##'
##' @param obj Object returned by the \code{\link[heemod]{run_psa}}
##' function in \pkg{heemod}, containing samples from probabilistic
##' analysis of a decision model.
##'
##' @param k Vector of willingness-to-pay values. The default is
##' inherited from the \code{\link[BCEA]{bcea}} function from the \pkg{BCEA}
##' package.
##'
##' @return \code{import_heemod_outputs} produces a list of model
##' outputs in "cost-effectiveness analysis" format, that can be
##' supplied as the \code{outputs} argument to \code{\link{evppi}}
##' and similar functions in the \pkg{voi} package. Both the
##' \pkg{heemod} and \pkg{BCEA} packages need to be installed to use
##' this.
##'
##' \code{import_heemod_inputs} produces a data frame with samples of
##' parameter values under uncertainty, that can be supplied as the
##' \code{inputs} argument to \code{\link{evppi}} and similar functions
##' in \pkg{voi}.
##'
##' @name import_heemod
NULL
##' @rdname import_heemod
##' @export
import_heemod_outputs <- function(obj, k=NULL){
if (!requireNamespace("heemod",quietly=TRUE)) {
stop("The `heemod` package is required")
}
if (!requireNamespace("BCEA",quietly=TRUE)) {
stop("The 'BCEA' package is required")
}
heemod::run_bcea(obj,k=k,ref=1)[c("c","e","k")]
}
##' @rdname import_heemod
##' @export
import_heemod_inputs <- function(obj){
obj$psa[!duplicated(obj$psa$.index), obj$resamp_par]
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/heemod.R
|
## From the ldr package, version 1.3.3
## By Kofi Placid Adragni and Andrew M. Raim
## https://www.jstatsoft.org/article/view/v061i03
## No modifications
ldr <-
function(X, y=NULL, fy=NULL, Sigmas=NULL, ns=NULL, numdir=NULL, nslices=NULL,
model=c("core", "lad", "pfc"), numdir.test=FALSE, ...)
{
if (model=="pfc")
{
if (is.null(fy)){stop("fy is not provided"); return()}
return(invisible(pfc(X=X, y=y, fy=fy, numdir=numdir, numdir.test=numdir.test, ...)))
}
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/ldr.R
|
## From the ldr package, version 1.3.3
## By Kofi Placid Adragni and Andrew M. Raim
## https://www.jstatsoft.org/article/view/v061i03
## No modifications
ldr.slices <-
function(y, nslices=3)
{
endpoints = function(n, nslices)
{
# This function is intended to determine the end-points of the slices.
increment <- floor(n/nslices);
if (nslices ==1) return(n);
if (nslices > 1)
{
ends<- seq(1:nslices)*increment;
rest <- n%%nslices;
if (rest==0) return(ends);
if (rest>0)
{
for (i in 1:rest) ends[i]<-ends[i]+i;
for (i in (rest+1):nslices) ends[i]<- ends[i]+rest;
return(ends)
}
}
}
n <- length(y); indicators <- vector(length=n);
sorty <- sort(y); bins.y <- vector("list", nslices);
ends <- endpoints(n, nslices);
if (nslices==1)
{
bins.y[[1]] <- sorty[1:ends[1]];
indicators[1:ends[1]]<-1;
return(list(bins=bins.y, nslices=nslices, slice.size=n, slice.indicator=indicators[rank(y)]))
}
else if (nslices==2)
{
bins.y[[1]] <- sorty[1:ends[1]];
bins.y[[2]] <- sorty[(ends[1]+1):ends[2]];
indicators[1:ends[1]]<-1;
indicators[(ends[1]+1):ends[2]]<-2;
return(list(bins=bins.y, nslices=nslices, slice.size=diff(c(0,ends)), slice.indicator=indicators[rank(y)]))
}
else
{
bins.y[[1]] <- sorty[1:(ends[1]-1)];
indicators[1:(ends[1]-1)]<-1;
for (i in 2:(nslices-1))
{
bins.y[[i]] <- sorty[ends[i-1]:(ends[i]-1)];
indicators[ends[i-1]:(ends[i]-1)] <- i;
}
bins.y[[nslices]] <- sorty[ends[nslices-1]:(ends[nslices])];
indicators[ends[nslices-1]:(ends[nslices])] <- nslices;
return(list(bins=bins.y, nslices=nslices, slice.size=diff(c(0,ends)), slice.indicator=indicators[rank(y)]))
}
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/ldr.slices.R
|
## Facilities to deal with functions that evaluate decision-analytic models, and functions that
## generate parameters for those models.
##' Check that a decision-analytic model function is of the appropriate form. Detect if it returns net benefit or CEA format
##' note check_outputs adds a class
##'
##' @return A modified copy of `model_fn` with a \code{class} attribute indicating whether it is
##' in net benefit \code{"nb"} or cost-effectiveness \code{"cea"} format, and an attribute \code{"nopt"} giving the
##' number of decision options.
##'
##' @keywords internal
check_model_fn <- function(model_fn, par_fn, mfargs=NULL, outputs=NULL, verbose=FALSE){
## Test by evaluating the model function at a single set of parameters/arguments
if (is.null(model_fn)) stop("`model_fn` was not supplied")
if (is.null(par_fn)) stop("`par_fn` was not supplied")
pars <- check_parfn1(par_fn, model_fn, mfargs)
defaults <- get_default_args(model_fn, pars)
res <- do.call(model_fn, c(pars, mfargs, defaults)[names(formals(model_fn))])
if (is.vector(res)){
class(model_fn) <- c("nb", attr(model_fn, "class"))
attr(model_fn, "nopt") <- length(res)
if (!is.null(outputs)){
if (identical(class(outputs)[1], "cea"))
stop("output of model_fn should have two rows if `outputs` is in cost-effectiveness format")
if (length(res) != ncol(outputs))
stop(sprintf("Number of decision options returned by model_fn is %s, whereas `outputs` has %s columns. These should match.",length(res),ncol(outputs)))
}
} else if (is.matrix(res) || is.data.frame(res)) {
class(model_fn) <- c("cea", attr(model_fn, "class"))
attr(model_fn, "nopt") <- ncol(res)
if (nrow(res) != 2)
stop("If `model_fn` returns a matrix or data frame it should have two rows, one for effects and one for costs")
if (!is.null(outputs)){
if (identical(class(outputs)[1], "nb"))
stop("output of model_fn should be a vector if `outputs` is in cost-effectiveness format")
if (ncol(res) != ncol(outputs$c))
stop(sprintf("Number of decision options returned by model_fn is %s, whereas `outputs$c` has %s columns. These should match.",ncol(res),ncol(outputs$c)))
}
} else stop("`model_fn` should return a vector, matrix or data frame")
if (verbose)
describe_modelfn(model_fn)
if (attr(model_fn, "nopt")==1)
stop("model_fn should describe more than one decision option")
model_fn
}
describe_modelfn <- function(model_fn, ...){
UseMethod("describe_modelfn", model_fn)
}
describe_modelfn.nb <- function(model_fn, ...){
plural <- if (attr(model_fn, "nopt") > 1) "s" else ""
message(sprintf("model_fn returns net benefit for %s decision option%s", attr(model_fn, "nopt"), plural))
}
describe_modelfn.cea <- function(model_fn, ...){
plural <- if (attr(model_fn, "nopt") > 1) "s" else ""
message(sprintf("model_fn returns effects and costs for %s decision option%s", attr(model_fn, "nopt"), plural))
}
check_parfn1 <- function(par_fn, model_fn, mfargs=NULL){
fn_try <- try(pars <- par_fn(1), silent=TRUE)
if (inherits(fn_try, "try-error")){
stop("Evaluating `par_fn` returned the following error:\n",
attr(fn_try,"condition")$message)
}
if (is.vector(pars)) {
if (is.null(names(pars)))
stop("pars(1) should return a named vector, matrix or data frame")
pars <- as.data.frame(as.list(pars))
} else if (is.matrix(pars)){
if (is.null(colnames(pars)))
stop("If pars(1) returns a matrix, the columns should be named")
pars <- as.data.frame(pars)
} else if (!is.data.frame(pars)) {
stop("pars(1) should return a named vector, matrix or data frame")
}
model_pars <- names(formals(model_fn))
defaults <- get_default_args(model_fn, pars)
supplied_pars <- c(names(pars),names(mfargs),names(defaults))
missing_pars <- setdiff(model_pars, supplied_pars)
if (length(missing_pars) > 0)
stop("The following parameters of `model_fn` were not found in the components of pars(1) or in the `...` argument: ",paste(missing_pars,collapse=","))
pars
}
# get the arguments to function fn that are not supplied in `supplied`
get_default_args <- function(fn, supplied=NULL){
fs <- formals(fn)
has_default <- sapply(fs, function(x) { if (is.name(x) && !nzchar(x)) FALSE else TRUE } )
default_args <- as.list(fs[has_default])
if (!is.null(supplied))
default_args <- default_args[!(names(default_args) %in% names(supplied))]
default_args
}
check_parfnn <- function(par_fn, model_fn){
fn_try <- try(pars <- par_fn(2), silent=TRUE)
if (inherits(fn_try, "try-error")){
stop("Evaluating `par_fn` returned the following error\n",
attr(fn_try,"condition")$message)
}
if (!(is.matrix(pars) || is.data.frame(pars)))
stop("par_fn(n) for n>1 should return a matrix or data frame")
if (nrow(pars) != 2) {
stop("par_fn(n) should have n rows")
}
as.data.frame(pars) # return value currently unused
}
## todo handle more indices
mfi <- function(res){
ci <- match("c", rownames(res))
if (is.na(ci)) ci <- 2
ei <- match("e", rownames(res))
if (is.na(ei)) ei <- 1
list(c=ci, e=ei)
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/model.R
|
## Copied from
## https://github.com/convoigroup/Chemotherapy_Book/blob/main/03_R/02_model_functions.R
## https://github.com/convoigroup/Chemotherapy_Book/blob/main/03_R/02_misc_functions.R
## on 28/03/2023
## but with the constants sourced from a list chemo_constants, rather than global variables
## This list is sourced and built in data_raw/chemo.R
################################################################################
#### Misc Functions for the Chemotherapy Model
################################################################################
## Function to transform values for mean and standard deviation into parameters
## for a Beta distribution
betaPar <- function(m, s) {
## m: Mean of the Beta distribution
## m: Standard deviation of the Beta distribution
var <- s ^ 2
alpha <- ((1 - m) / var - 1 / m) * m ^ 2
beta <- alpha * (1 / m - 1)
return(
list(alpha = alpha, beta = beta)
)
}
## Function to transform values for mean and standard deviation into parameters
## for a Log-Normal distribution
lognPar <- function(m,s) {
## m: Mean of Log-Normal distribution
## s: Standard deiviation of Log-Normal distribution
var <- s^2
meanlog <- log(m) - 0.5 * log(1 + var/m^2)
varlog <- log(1 + (var/m^2))
sdlog <- sqrt(varlog)
return(
list(meanlog = meanlog, sdlog = sdlog)
)
}
## Function to transform values for mean and standard deviation into parameters
## for a Gamma distribution
gammaPar <- function(m,s) {
## m: Mean of Log-Normal distribution
## s: Standard deiviation of Log-Normal distribution
var <- s^2
beta <- m / var
alpha <- m * beta
return(
list(alpha = alpha, beta = beta)
)
}
################################################################################
#### Functions for the Chemotherapy Model
################################################################################
## Function to generate the PSA parameters
generate_psa_parameters <- function(n){
with(voi::chemo_constants, {
## Probability of side effects under treatment 1
p_side_effects_t1 <- rbeta(n,
1 + n_side_effects,
1 + n_patients - n_side_effects)
## Log odds of side effects on treatment 2
logor_side_effects <- rnorm(n, logor_side_effects_mu, logor_side_effects_sd)
## Odds of side effects on treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
## Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
## Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
#### Variables to define transition probabilities
## Probability that a patient is hospitalised over the time horizon
p_hospitalised_total <- rbeta(n,
1 + n_hospitalised,
1 + n_side_effects - n_hospitalised)
## Probability that a patient dies over the time horizon given they were
## hospitalised
p_died <- rbeta(n, 1 + n_died, 1 + n_hospitalised - n_died)
## Lambda_home: Conditional probability that a patient recovers considering
## that they are not hospitalised
betapars <- betaPar(p_recovery_home_mu, p_recovery_home_sd)
lambda_home <- rbeta(n, betapars$alpha, betapars$beta)
## Lambda_hosp: Conditional probability that a patient recovers considering
## that they do not die
betapars <- betaPar(p_recovery_hosp_mu, p_recovery_hosp_sd)
lambda_hosp <- rbeta(n, betapars$alpha, betapars$beta)
## Health State Costs
lnpars <- lognPar(c_home_care_mu, c_home_care_sd)
c_home_care <- rlnorm(n, lnpars$meanlog, lnpars$sdlog)
lnpars <- lognPar(c_hospital_mu, c_hospital_sd)
c_hospital <- rlnorm(n, lnpars$meanlog, lnpars$sdlog)
lnpars <- lognPar(c_death_mu, c_death_sd)
c_death <- rlnorm(n, lnpars$meanlog, lnpars$sdlog)
## Health Utilities
betapars <- betaPar(u_recovery_mu, u_recovery_sd)
u_recovery <- rbeta(n, betapars$alpha, betapars$beta)
betapars <- betaPar(u_home_care_mu, u_home_care_sd)
u_home_care <- rbeta(n, betapars$alpha, betapars$beta)
betapars <- betaPar(u_hospital_mu, u_hospital_sd)
u_hospital <- rbeta(n, betapars$alpha, betapars$beta)
## Long term survival
gammapars <- gammaPar(rate_longterm_mu, rate_longterm_sd)
rate_longterm <- rgamma(n, shape = gammapars$alpha, rate = gammapars$beta)
## Specify a matrix containing all the parameters
params_matrix <- data.frame(
p_side_effects_t1,
p_side_effects_t2,
c_home_care, c_hospital, c_death,
u_recovery, u_home_care, u_hospital,
logor_side_effects,
p_hospitalised_total, p_died,
lambda_home, lambda_hosp, rate_longterm)
return(params_matrix)
})
}
## Function to calculate average time each patient with adverse events spends
## in the health states of the Markov model
calculate_state_occupancy_markov_model <- function(
p_side_effects_t1,
p_side_effects_t2,
p_home_home, p_home_hospital, p_home_recover,
p_hospital_hospital, p_hospital_recover, p_hospital_dead,
p_longterm)
# All function arguments come from the generate_psa_parameters function except
# time_horizon which is in chemo_constants
{
## Markov transition probability matrix
## States: Home care, Hospital care, Recovery, Death
MM.mat <- matrix(c(p_home_home, p_home_hospital, p_home_recover, 0,
0, p_hospital_hospital, p_hospital_recover, p_hospital_dead,
0, 0, 1 - p_longterm, p_longterm,
0, 0, 0, 1),
nrow = 4, ncol = 4, byrow = TRUE)
## Number of patients in each state for each time point
## 3 dimensions: number of states, number of time points,
## number of treatment options
time_horizon <- voi::chemo_constants$time_horizon
trace <- array(0, dim = c(4, time_horizon + 1, 2))
# Initialise with the predicted number of side effects in the population
trace[1, 1, ] <- c(p_side_effects_t1, p_side_effects_t2)
# Run the markove model over the time horizon
for(i in 2:(time_horizon + 1)){
trace[, i, 1] <- trace[, i - 1, 1] %*% MM.mat
trace[, i, 2] <- trace[, i - 1, 2] %*% MM.mat
}
return(trace) # 4*16*2 array
}
## Function to calculate the costs and effects from our model
calculate_costs_effects <- function(
p_side_effects_t1,
p_side_effects_t2,
p_hospitalised_total, p_died,
lambda_home, lambda_hosp,
c_home_care, c_hospital, c_death,
u_recovery, u_home_care, u_hospital,
logor_side_effects, rate_longterm)
{
with(voi::chemo_constants, {
## Calculate p_side_effects_2 from odds ratio
## Odds of side effects on treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
## Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
## Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
## Transition Probabilities
p_home_hospital <- 1 - (1 - p_hospitalised_total) ^ (1 / time_horizon)
p_home_home <- (1 - lambda_home) * (1 - p_home_hospital)
p_home_recover <- lambda_home * (1 - p_home_hospital)
p_hospital_dead <- 1 - (1 - p_died) ^ (1 / time_horizon)
p_hospital_hospital <- (1 - lambda_hosp) * (1 - p_hospital_dead)
p_hospital_recover <- lambda_hosp * (1 - p_hospital_dead)
p_longterm <- 1 - exp(-rate_longterm * 1/52)
## Calculate the trace matrix from the markov model function
m_markov_trace <- calculate_state_occupancy_markov_model(
p_side_effects_t1,
p_side_effects_t2,
p_home_home, p_home_hospital, p_home_recover,
p_hospital_hospital, p_hospital_recover, p_hospital_dead,
p_longterm)
## costs and effectiveness for four states
c_state_vector <- c(c_home_care, c_hospital, 0, 0)
u_state_vector <- c(u_home_care, u_hospital, u_recovery, 0)
## Estimate the cost of side effects from the Markov model
c_side_effects <- array(NA, dim = 2)
## Average cost for both Soc and novel treatment per person
## (The cost includes one-off cost of death for all patients who died)
c_side_effects[1] <- (sum(c_state_vector %*% m_markov_trace[, , 1]) +
c_death * m_markov_trace[4, time_horizon + 1, 1]) / 52
c_side_effects[2] <- (sum(c_state_vector %*% m_markov_trace[, , 2]) +
c_death * m_markov_trace[4, time_horizon + 1, 2]) / 52
c_drug <- c(c_treatment_1, c_treatment_2)
c_longterm <- (1 - m_markov_trace[4, time_horizon + 1, ]) *
(pexp(time_horizon_longterm - 2, rate = rate_longterm)) * c_death
c_overall <- c(c_drug + c_side_effects + c_longterm)
## Total QALY of side effects for both Soc and novel treatment
u_side_effects <- array(NA, dim = 2)
u_side_effects[1] <- sum(u_state_vector %*% m_markov_trace[,,1]) / 52
u_side_effects[2] <- sum(u_state_vector %*% m_markov_trace[,,2]) / 52
u_longterm <- (1 - m_markov_trace[4, time_horizon + 1, ]) *
integrate(function(x){ (1 - pexp(x, rate = rate_longterm)) * u_home_care}, lower = 2, upper = time_horizon_longterm)$value
## QALY of total number of patients who do not experience adverse events for 15 days
p_no_side_effects <- 1 -
c(p_side_effects_t1,
p_side_effects_t2)
u_no_side_effects <- p_no_side_effects * u_recovery * (time_horizon + 1) / 52
## Average effect for both Soc and novel treatment per person
u_overall <- c(u_side_effects + u_no_side_effects + u_longterm)
names(c_overall) <- paste0("cost",seq_along(c_overall))
names(u_overall) <- paste0("eff",seq_along(u_overall))
output <- array(NA, dim = c(2, length(u_overall)), # CJ FIXED
dimnames = list(c("Effects", "Costs"),
c("SoC", "Novel") # CJ FIXED FROM UPSTREAM
))
output[1, ] <- u_overall
output[2, ] <- c_overall
return(output)
})
}
## NOTE cost_effects is not the output from calculate_costs_effects which is 2 x ntreatments (for one simulation)
## but a 3D array of size nsim x 2 (i.e. costs, effects) x ntreatments
## wtp assumed to be scalar.
calculate_net_benefit <- function(
costs_effects,
wtp)
{
if(!is.null(dim(costs_effects))){
nb <- wtp * costs_effects[, 1, ] -
costs_effects[, 2, ]
}
return(nb)
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/model_chemo_source.R
|
##' Chemotherapy cost-effectiveness model
##'
##' An artificial health economic decision model with a typical Markov model structure, used for illustrating Value of Information methods.
##' Functions are provided to generate model parameters and evaluate the model, and samples from probabilistic analysis of the model are
##' provided as built-in datasets.
##'
##' For more details, refer to Heath et al. (forthcoming book...)
##'
##' @param p_side_effects_t1 Probability of side effects under treatment 1
##' @param p_side_effects_t2 Probability of side effects under treatment 2
##' @param logor_side_effects Log odds ratio of side effects for treatment 2 compared to 1
##' @param p_hospitalised_total Probability of hospitalisation in the year after receiving treatment
##' @param p_died Probability of death in the year after receiving treatment
##' @param lambda_home Recovery probability for someone treated at home
##' @param lambda_hosp Recovery probability for someone treated in hospital who does not die
##' @param c_home_care Cost of a yearly period under treatment at home
##' @param c_hospital Cost of hospital treatment
##' @param c_death Cost of death
##' @param u_recovery Utility of a period in the recovery state
##' @param u_home_care Utility of home care state
##' @param u_hospital Utility of hospital state
##' @param rate_longterm Long term mortality rate
##'
##' @param n Number of samples to generate from the uncertainty distribution of the parameters in \code{chemo_pars_fn}.
##'
##' @return
##' Two alternative functions are provided to evaluate the decision model for given parameters.
##'
##' \code{chemo_model_nb} returns a vector with elements giving the net monetary benefit for standard of care
##' and novel treatment, respectively, at a willingness-to-pay of 20,000 pounds per QALY.
##'
##' \code{chemo_model_cea} returns a matrix with:
##'
##' * two rows, the first for expected costs and the second for expected effects (QALYs) over the fifty year time horizon, and
##'
##' * two columns, the first for the "standard of care" decision option, and the second for the novel
##' treatment.
##'
##' \code{chemo_model_lor_nb} and \code{chemo_model_lor_cea} are the same model, but parameterised in terms of
##' the probability of side effects for the standard of care \code{p_side_effects_t1} and the log odds ratio
##' of side effects between treatment groups \code{logor_side_effects}, rather than in terms of
##' \code{p_side_effects_t1} and \code{p_side_effects_t2}
##'
##' \code{chemo_pars_fn} generates a sample from the uncertainty distribution of the parameters in the chemotherapy model . This returns a data frame with parameters matching the arguments of
##' \code{\link{chemo_model_nb}}, and the following additional derived parameters:
##'
##' * `p_side_effects_t2`:
##'
##' * `p_hospitalised_total`: probability of hospitalisation over the 50 year time horizon
##'
##' * `p_died`: probability of death over the time horizon, given hospitalisation
##'
##' * `lambda_home`: conditional probability that a patient recovers given they are not hospitalised
##'
##' * `lambda_hosp`: conditional probability that a patient in hospital recovers given they do not die
##'
##' @format Samples of 10000 from probabilistic analysis of this model are made available in the package, in the
##' following data objects:
##'
##' \code{chemo_pars}: Sample from the distributions of the parameters, as a data frame with names as documented above.
##'
##' \code{chemo_cea}: List with components `e` (sampled effects), `c` (sampled costs), and `k` (a set of five
##' equally-spaced willingess-to-pay values from 10000 to 50000 pounds). The effects and costs are data frames
##' with two columns, one for each decision option.
##'
##' \code{chemo_nb}: Data frame with two columns, giving the net monetary benefit for each decision option,
##' at a willingness-to-pay of 20000 pounds.
##'
##' \code{chemo_cea_501}: List with components `e` (sampled effects), `c` (sampled costs), and `k` (a set of 501
##' willlingess-to-pay values from 10000 to 50000) This is provided to facilitate illustrations of plots of
##' VoI measures against willingness-to-pay.
##'
##' The following additional data objects are supplied:
##'
##' \code{chemo_constants} includes various constants required by the code.
##'
##' \code{chemo_evsi_or} is the result of an EVSI analysis to estimate the expected value of a two-arm trial, with a binary outcome, to estimate the log odds ratio of side effects. This object is a data frame with three columns, giving the sample size per arm (`n`), willingness-to-pay (`k`) and the corresponding EVSI (`evsi`).
##'
##' @references Value of Information for Healthcare Decision Making
##' (CRC Press, eds. Heath, Kunst and Jackson: forthcoming)
##'
##' @name chemo_model
NULL
##' @rdname chemo_model
##' @export
chemo_pars_fn <- function(n){
generate_psa_parameters(n)
}
##' @rdname chemo_model
##' @export
chemo_model_nb <- function(p_side_effects_t1, p_side_effects_t2,
p_hospitalised_total, p_died,
lambda_home, lambda_hosp,
c_home_care, c_hospital, c_death,
u_recovery, u_home_care, u_hospital,
rate_longterm)
{
if (length(p_side_effects_t1) > 1)
stop("This function is not vectorised, and parameters should be supplied as scalars")
ce <- chemo_model_cea(p_side_effects_t1 = p_side_effects_t1,
p_side_effects_t2 = p_side_effects_t2,
p_hospitalised_total = p_hospitalised_total,
p_died = p_died,
lambda_home = lambda_home,
lambda_hosp = lambda_hosp,
c_home_care = c_home_care,
c_hospital = c_hospital,
c_death = c_death,
u_recovery = u_recovery,
u_home_care = u_home_care,
u_hospital = u_hospital,
rate_longterm = rate_longterm)
ce[1,]*20000 - ce[2,]
}
##' @rdname chemo_model
##' @export
chemo_model_cea <- function(p_side_effects_t1, p_side_effects_t2,
p_hospitalised_total, p_died,
lambda_home, lambda_hosp,
c_home_care, c_hospital, c_death,
u_recovery, u_home_care, u_hospital,
rate_longterm)
{
if (length(p_side_effects_t1) > 1)
stop("This function is not vectorised, and parameters should be supplied as scalars")
odds2 <- p_side_effects_t2 / (1 - p_side_effects_t2)
odds1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
logor_side_effects <- log(odds2 / odds1)
ce <- calculate_costs_effects(p_side_effects_t1 = p_side_effects_t1,
p_hospitalised_total = p_hospitalised_total,
p_died = p_died,
lambda_home = lambda_home,
lambda_hosp = lambda_hosp,
c_home_care = c_home_care,
c_hospital = c_hospital,
c_death = c_death,
u_recovery = u_recovery,
u_home_care = u_home_care,
u_hospital = u_hospital,
logor_side_effects = logor_side_effects,
rate_longterm = rate_longterm)
ce
}
##' @rdname chemo_model
##' @export
chemo_model_lor_nb <- function(p_side_effects_t1, logor_side_effects,
p_hospitalised_total, p_died,
lambda_home, lambda_hosp,
c_home_care, c_hospital, c_death,
u_recovery, u_home_care, u_hospital,
rate_longterm)
{
if (length(p_side_effects_t1) > 1)
stop("This function is not vectorised, and parameters should be supplied as scalars")
ce <- chemo_model_lor_cea(p_side_effects_t1 = p_side_effects_t1,
logor_side_effects = logor_side_effects,
p_hospitalised_total = p_hospitalised_total,
p_died = p_died,
lambda_home = lambda_home,
lambda_hosp = lambda_hosp,
c_home_care = c_home_care,
c_hospital = c_hospital,
c_death = c_death,
u_recovery = u_recovery,
u_home_care = u_home_care,
u_hospital = u_hospital,
rate_longterm = rate_longterm)
ce[1,]*20000 - ce[2,]
}
##' @rdname chemo_model
##' @export
chemo_model_lor_cea <- function(p_side_effects_t1, logor_side_effects,
p_hospitalised_total, p_died,
lambda_home, lambda_hosp,
c_home_care, c_hospital, c_death,
u_recovery, u_home_care, u_hospital,
rate_longterm)
{
if (length(p_side_effects_t1) > 1)
stop("This function is not vectorised, and parameters should be supplied as scalars")
odds1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
odds2 <- odds1 * exp(logor_side_effects)
p_side_effects_t2 <- odds2 / (1 + odds2)
ce <- chemo_model_cea(p_side_effects_t1 = p_side_effects_t1,
p_side_effects_t2 = p_side_effects_t2,
p_hospitalised_total = p_hospitalised_total,
p_died = p_died,
lambda_home = lambda_home,
lambda_hosp = lambda_hosp,
c_home_care = c_home_care,
c_hospital = c_hospital,
c_death = c_death,
u_recovery = u_recovery,
u_home_care = u_home_care,
u_hospital = u_hospital,
rate_longterm = rate_longterm)
ce
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/model_chemo_wrapper.R
|
## From the ldr package, version 1.3.3
## By Kofi Placid Adragni and Andrew M. Raim
## https://www.jstatsoft.org/article/view/v061i03
## Modifications made for voi package:
## Removed "unstr2" structure
pfc <-
function(X, y, fy=NULL, numdir=NULL, structure=c("iso", "aniso", "unstr"), eps_aniso=1e-3, numdir.test=FALSE,...)
{
"%^%"<-function(M, pow)
{
if (prod(dim(M)==list(1,1))) return( as.matrix(M^pow) )
eigenM = eigen(M)
return(eigenM$vectors%*%diag(c(eigenM$values)^pow)%*%t(eigenM$vectors))
}
Trace<-function(X)
{
if (!is.matrix(X)) stop("Argument to Trace is not a matrix in pfc");
return(sum(diag(X)))
}
orthonorm <- function(u)
{
if (is.null(u)) return(NULL)
if (!(is.matrix(u))) u <- as.matrix(u);
dd <- dim(u); n <- dd[1]; p <-dd[2];
if (prod(abs(La.svd(u)$d) > 1e-08) == 0) stop("collinears vectors in orthonorm")
if (n < p)
{
warning("There are too much vectors to orthonormalize in orthonorm.")
u <- as.matrix(u[, 1:p])
n <- p
}
v <- u;
if (p > 1)
{
for (i in 2:p)
{
coef.proj <- c(crossprod(u[, i], v[, 1:(i - 1)]))/diag(crossprod(v[, 1:(i - 1)]));
v[, i] <- u[, i] - matrix(v[, 1:(i - 1)], nrow = n) %*% matrix(coef.proj, nrow = i - 1)
}
}
coef.proj <- 1/sqrt(diag(crossprod(v)))
return(t(t(v) * coef.proj))
}
## nocov start
onepfc = function(X, y, fy)
{
# X is univariate predictor
nobs <- length(X); r <- dim(fy)[2]
P_F <- fy%*%solve(t(fy)%*%fy)%*%t(fy)
Xc <- scale(X, TRUE, FALSE)
Sigmahat_fit <- (1/nobs)*t(Xc)%*%P_F%*%(Xc)
ev.fit <- eigen(Sigmahat_fit)
temp.dat<-data.frame(cbind(X, fy)); xnam<-paste("xx", 1:r, sep="");
names(temp.dat)<-c("yy", xnam);
fm.lm<- as.formula( paste("yy ~ ", paste(xnam, collapse= "+")));
summary.fm <- summary(lm(fm.lm, data=temp.dat));
Betahat <- matrix(summary.fm$coefficients[2:(r+1),1], ncol=r);
Gammahat <- matrix(1, ncol=1, nrow=1);
Deltahat <- matrix(summary.fm$sigma^2, ncol=1, nrow=1);
Muhat <- matrix(summary.fm$coefficients[1,1], ncol=1);
loglik <- - 0.5*n*(1+log(2*pi*summary.fm$sigma^2));
numpar <- p + dim(fy)[2] + 1;
aic <- -2*loglik + 2*numpar;
bic <- -2*loglik + log(n)*numpar;
ans <- list(R=X, Muhat=Muhat, Betahat=Betahat, Gammahat=Gammahat, Deltahat=Deltahat,
loglik=loglik, aic=aic, bic=bic, numpar=numpar, numdir=1, model="pfc",
call=match.call(), structure="iso", y=y, fy=fy, Xc=Xc, numdir.test=numdir.test);
class(ans)<- "pfc";
return(ans)
}
## nocov end
if (is.null(fy)) {fy <- scale(y, TRUE, TRUE); numdir <- 1}
r <- dim(fy)[2]; X <- as.matrix(X)
op <- dim(X); n <- op[1]; p <- op[2]
eff.numdir <- min(numdir, r, p)
vnames <- dimnames(X)[[2]]
if (is.null(vnames)) vnames <- paste("X", 1:p, sep="")
if (p==1) return(onepfc(X=X, y=y, fy=fy))
Muhat <- apply(X, 2, mean)
Xc <- scale(X, TRUE, FALSE)
P_F <- fy%*%solve(t(fy)%*%fy)%*%t(fy)
Sigmahat <- cov(X)
Sigmahat_fit <- cov(P_F%*%X)
Sigmahat_res <- Sigmahat - Sigmahat_fit
if (structure=="iso")
{
iso <- function(i)
{
ev <- eigen(Sigmahat);
ev.fit <- eigen(Sigmahat_fit);
all_evalues <-ev.fit$values
evalues <- all_evalues[1:i]
sigma2hat <- Re(sum(ev$values)/p);
Gammahat <- Re(matrix(ev.fit$vectors[,1:i], ncol=i));
dimnames(Gammahat) <- list(vnames, paste("Dir", 1:i, sep=""))
Betahat <-Re(t(Gammahat)%*%t(Xc)%*%fy%*%solve(t(fy)%*%fy));
sigma2hat <- Re((sum(ev$values)-sum(evalues))/p);
Deltahat <- sigma2hat*diag(1, p);
dimnames(Deltahat) <- list(vnames, vnames)
loglik <- - 0.5*n*p*(1+log(2*pi*sigma2hat));
numpar <- p + (p-i)*i + i*dim(fy)[2] + 1;
aic <- -2*loglik + 2*numpar;
bic <- -2*loglik + log(n)*numpar;
return(list(Betahat=Betahat, Gammahat=Gammahat, Deltahat=Deltahat,
evalues=evalues, loglik=loglik, aic=aic, bic=bic, numpar=numpar));
}
if (identical(numdir.test, FALSE))
{
out <- iso(eff.numdir);
ans <- list(R=X%*%orthonorm(out$Gammahat), Muhat=Muhat, Betahat=out$Betahat, Gammahat=out$Gammahat,
Deltahat=out$Deltahat, loglik=out$loglik, aic=out$aic, bic=out$bic, numpar=out$numpar,
numdir=eff.numdir, evalues=out$evalues, structure="iso", y=y, fy=fy,
Xc=Xc, call=match.call(expand.dots=TRUE), numdir.test=numdir.test);
class(ans) <- "pfc";
return(ans);
}
# nocov start
if (identical(numdir.test, TRUE))
{
aic <- bic <- numpar <- loglik <- vector(length=eff.numdir+1);
Betahat <- Deltahat <- Gammahat <-vector("list");
# No fitting values (eff.numdir=0)
ev <- eigen(Sigmahat);
sigma2hat <- sum(ev$values)/p;
loglik[1] <- - 0.5*n*p*(1+log(2*pi*sigma2hat));
numpar[1] <- p + 1;
aic[1] <- -2*loglik[1] + 2*numpar[1];
bic[1] <- -2*loglik[1] + log(n)*numpar[1];
for (i in 1:eff.numdir)
{
fit <- iso(i);
Betahat[[i]] <-fit$Betahat;
Gammahat[[i]] <-fit$Gammahat;
Deltahat[[i]] <- fit$Deltahat;
loglik[i+1] <- fit$loglik;
numpar[i+1] <- fit$numpar;
aic[i+1] <- fit$aic;
bic[i+1] <- fit$bic;
}
ans <- list(R=X%*%orthonorm(Gammahat[[eff.numdir]]), Muhat=Muhat, Betahat=Betahat, Gammahat=Gammahat,
Deltahat=Deltahat, loglik=loglik, aic=aic, bic=bic, numpar=numpar,
numdir=eff.numdir, model="pfc", evalues=fit$evalues, structure="iso",
y=y, fy=fy, Xc=Xc, call=match.call(), numdir.test=numdir.test);
class(ans)<- "pfc";
return(ans)
}
# nocov end
}
if (structure=="aniso")
{
aniso = function(X, y, fy, d, eps_aniso=1e-3, numdir.test)
{
vnames <- dimnames(X)[[2]]
if (is.null(vnames)) vnames <- paste("X", 1:ncol(X), sep="")
op <- dim(X); n <- op[1]; p <- op[2]
# Initial Step
fit <- pfc(X=X, y=y, fy=fy, numdir=d, structure="iso", numdir.test=numdir.test)
if (identical(numdir.test, FALSE))
{
Betahatx <- fit$Betahat; Gammahatx <- fit$Gammahat
Xc <- scale(X, TRUE, FALSE) - fy%*%t(Gammahatx%*%Betahatx)
deltahat <- diag(cov(Xc))
repeat
{
Xnew = X%*%((1/sqrt(deltahat))*diag(p))
fit <- pfc(X=Xnew, y=y, fy=fy, numdir=d, structure="iso", numdir.test=FALSE)
Betahatx <- fit$Betahat; Gammahatx <- (diag(p)*sqrt(deltahat))%*%fit$Gammahat
Xc <- scale(X, TRUE, FALSE) - fy%*%t(Gammahatx%*%Betahatx)
deltahat0 <- diag(t(Xc)%*%(Xc)/n)
if (sum(abs(deltahat-deltahat0)) < eps_aniso) break
deltahat <- deltahat0
}
dimnames(Gammahatx) <- list(vnames, paste("Dir", 1:d, sep=""))
Deltahat <- deltahat*diag(p)
dimnames(Deltahat) <- list(vnames, vnames)
loglik <- - 0.5*n*p*(1+log(2*pi)) - 0.5*n*log(prod(deltahat))
numpar <- p + d*(p-d) + ncol(fy)*d + p
aic <- -2*loglik + 2*numpar
bic <- -2*loglik + log(n)*numpar
ans <- list(Betahat=Betahatx, Gammahat=orthonorm(Gammahatx), Deltahat=Deltahat, evalues=fit$evalues,
loglik=loglik, aic=aic, bic=bic, numpar=numpar, numdir.test=numdir.test)
return(ans)
}
Deltahat <- Betahat <- Gammahat <- vector("list")
aic <- bic <- numpar <- loglik <- vector(length=eff.numdir+1)
# No fitting values (eff.numdir=0)
ev <- eigen(Sigmahat);
loglik[1] <- - 0.5*n*p*(1+log(2*pi)) - 0.5*n*log(prod(ev$values))
numpar[1] <- p + p
aic[1] <- -2*loglik[1] + 2*numpar[1]
bic[1] <- -2*loglik[1] + log(n)*numpar[1]
for (i in 1:eff.numdir)
{
Betahatx <- fit$Betahat[[i]]; Gammahatx <- fit$Gammahat[[i]]
Xc <- scale(X, TRUE, FALSE) - fy%*%t(Gammahatx%*%Betahatx)
deltahat <- diag(t(Xc)%*%(Xc)/n)
repeat
{
Xnew = X%*%((1/sqrt(deltahat))*diag(p))
fit2 <- pfc(X=Xnew, y=y, fy=fy, numdir=i, structure="iso", numdir.test=FALSE)
Betahatx <- fit2$Betahat; Gammahatx <- (diag(p)*sqrt(deltahat))%*%fit2$Gammahat
Xc <- scale(X, TRUE, FALSE) - fy%*%t(Gammahatx%*%Betahatx)
deltahat0 <- diag(t(Xc)%*%(Xc)/n)
if (sum(abs(deltahat-deltahat0)) < eps_aniso) break
deltahat <- deltahat0
}
Deltahat[[i]] <- deltahat*diag(p)
dimnames(Deltahat[[i]]) <- list(vnames, vnames)
loglik[i+1] = - 0.5*n*p*(1+log(2*pi)) - 0.5*n*log(prod(deltahat))
numpar[i+1] <- p + (p-i)*i + i*dim(fy)[2] + p
aic[i+1] <- -2*loglik[i+1] + 2*numpar[i+1]
bic[i+1] <- -2*loglik[i+1] + log(n)*numpar[i+1]
Betahat[[i]] <- Betahatx
Gammahat[[i]] <- orthonorm(Gammahatx)
dimnames(Gammahat[[i]]) <- list(vnames, paste("Dir", 1:i, sep=""))
}
ans <- list(Betahat=Betahat, Gammahat=Gammahat, Deltahat=Deltahat, evalues=fit2$evalues,
loglik=loglik, aic=aic, bic=bic, numpar=numpar, numdir.test=numdir.test)
return(ans)
}
fit <- aniso(X=X, y=y, fy=fy, d=eff.numdir, eps_aniso=eps_aniso, numdir.test=numdir.test)
ans <- list(Muhat=Muhat, Betahat=fit$Betahat, Gammahat=fit$Gammahat, Deltahat=fit$Deltahat, model="pfc",
loglik=fit$loglik, aic=fit$aic, bic=fit$bic, numpar=fit$numpar, numdir=eff.numdir,
evalues=fit$evalues, structure="aniso", Xc=Xc, y=y, fy=fy, call=match.call(), numdir.test=fit$numdir.test)
if (numdir.test==FALSE) ans$R <- X%*%orthonorm(((fit$Deltahat)%^%(-1))%*%fit$Gammahat) else
ans$R <- X%*%orthonorm(((fit$Deltahat[[eff.numdir]])%^%(-1))%*%fit$Gammahat[[eff.numdir]])
class(ans)<- "pfc"
return(ans)
}
else if (structure=="unstr")
{
unstr<-function(i)
{
sqrt_Sigmahat_res <- Sigmahat_res%^%0.5
Inv_Sqrt_Sigmahat_res <- solve(sqrt_Sigmahat_res)
lf_matrix <- Inv_Sqrt_Sigmahat_res%*%Sigmahat_fit%*%Inv_Sqrt_Sigmahat_res
all_evalues <- eigen(lf_matrix, symmetric=T)$values
evalues <- all_evalues[1:i]
Vhat <- eigen(lf_matrix, symmetric=T)$vectors
Vhati <- matrix(Vhat[,1:i], ncol=i)
Gammahat <- (Sigmahat_res%^%0.5)%*%Vhati%*%solve((t(Vhati)%*%Sigmahat_res%*%Vhati)%^%0.5)
dimnames(Gammahat)<- list(vnames, paste("Dir", 1:i, sep=""))
Khat<-diag(0, p)
if (i < min(ncol(fy),p)) {diag(Khat)[(i+1):min(ncol(fy), p )]<- all_evalues[(i+1):min(ncol(fy), p)]}
Deltahat <- sqrt_Sigmahat_res%*%Vhat%*%(diag(p)+Khat)%*%t(Vhat)%*%sqrt_Sigmahat_res
dimnames(Deltahat) <- list(vnames, vnames)
Betahat <- ((t(Vhati)%*%Sigmahat_res%*%Vhati)%^%0.5)%*%t(Vhati)%*%solve(Sigmahat_res%^%0.5)%*%t(Xc)%*%fy%*% solve(t(fy)%*%fy)
temp0 <- -(n*p/2)*(1 + log(2*pi))
temp1 <- -(n/2)*log(det(Sigmahat_res))
temp2 <- 0;
if (i < min(ncol(fy),p)) temp2 <- -(n/2)*sum(log(1 + all_evalues[(i+1):p]))
loglik <- temp0 + temp1 + temp2
numpar <- p + (p-i)*i + i*ncol(fy) + p*(p+1)/2
aic <- -2*loglik + 2*numpar
bic <- -2*loglik + log(n)*numpar
return(list(Betahat=Betahat, Gammahat=Gammahat, Deltahat=Deltahat, evalues=evalues,
loglik=loglik, aic=aic, bic=bic, numpar=numpar))
}
if (identical(numdir.test, FALSE))
{
out <- unstr(eff.numdir)
ans <- list(R=X%*%orthonorm(solve(out$Deltahat)%*%out$Gammahat), Muhat=Muhat, Betahat=out$Betahat, Gammahat=out$Gammahat,
Deltahat=out$Deltahat, evalues=out$evalues, loglik=out$loglik, aic=out$aic, bic=out$bic, numpar=out$numpar,
numdir=eff.numdir, model="pfc", structure="unstr", y=y, fy=fy, Xc=Xc, call=match.call(), numdir.test=numdir.test)
class(ans) <- "pfc"
return(ans);
}
aic <- bic <- numpar <- loglik <- vector(length=eff.numdir+1)
evalues <- vector(length=eff.numdir)
Betahat <- Deltahat <- Gammahat <-vector("list")
loglik[1] <- - 0.5*n*p*(1+log(2*pi)) - 0.5*n*log(det(Sigmahat))
numpar[1] <- p + p*(p+1)/2
aic[1] <- -2*loglik[1] + 2*numpar[1]
bic[1] <- -2*loglik[1] + log(n)*numpar[1]
Deltahat[[1]] <- Sigmahat
dimnames(Deltahat[[1]]) <- list(vnames, vnames)
for (i in 1:eff.numdir)
{
fit <- unstr(i);
Betahat[[i]] <-fit$Betahat
Gammahat[[i]] <-fit$Gammahat
Deltahat[[i]] <- fit$Deltahat
loglik[i+1] <- fit$loglik
numpar[i+1] <- fit$numpar
aic[i+1] <- fit$aic
bic[i+1] <- fit$bic
}
ans <- list(R=X%*%orthonorm(solve(Deltahat[[eff.numdir]])%*%Gammahat[[eff.numdir]]), Muhat=Muhat,
Betahat=Betahat, Gammahat=Gammahat, Deltahat=Deltahat, evalues=fit$evalues, loglik=loglik,
aic=aic, bic=bic, numpar=numpar, numdir=eff.numdir, model="pfc", structure="unstr", y=y,
fy=fy, Xc=Xc, call=match.call(), numdir.test=numdir.test)
class(ans)<- "pfc"
return(ans)
}
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/pfc.R
|
#' Plot EVPPI estimates
#'
#' Plot EVPPI estimates as simple dot or curve plots.
#'
#' These plotting functions are intended for quick interactive exploration of
#' EVPPI results, so they deliberately have limited options for customising them.
#'
#' For publication quality graphics, it is advised to use `ggplot2` by hand
#' on the data returned by `evppi`. Examine the code for `plot_evppi_dots`
#' and `plot_evppi_curves` to see how these plots might be constructed.
#'
#' @param x Object returned from \code{\link{evppi}}.
#'
#' @param type
#'
#' \code{"dots"} for a dot plot of the EVPPI by parameter. If \code{x} includes multiple
#' willingness-to-pay values for the same parameter, these are shown as multiple dots.
#'
#' \code{"curves"} for a plot of EVPPI against willingness-to-pay, with different
#' parameters distinguished as different curves. This is only applicable if there
#' are multiple willingness-to-pay values included in \code{x}.
#'
#' @param order For dot plots, order the plot with highest EVPPI values at the top.
#'
#' @param top A positive integer. If specified, for example as `top=5` then only
#' five parameters are included in the plot, those with the top five maximum EVPPI
#' values by parameter.
#'
#' @param ... Other arguments (currently unused).
#'
#' @return A `ggplot2` object.
#'
#' @export
plot.evppi <- function(x, type=NULL, order=FALSE, top=NULL, ...){
lower <- upper <- pars <- NULL
if (is.null(type)) {
if (is.null(x$k) || (length(unique(x$k)) == 1))
type <- "dots"
else type <- "curves"
}
if (!is.null(top)){
if (!is.numeric(top) || top<0) stop("`top` must be a positive number")
max_by_par <- sapply(split(x, x$pars), function(y){max(y$evppi)})
top_evppis <- rev(sort(max_by_par))[1:top]
top_pars <- names(top_evppis)
x <- x[x$par %in% top_pars, ,drop=FALSE]
}
if (type=="dots") {
plot_evppi_dots(x=x, order=order, ...)
} else if (type=="curves"){
if (is.null(x$k) || (length(unique(x$k))==1))
stop("`curves` plots are only applicable with multiple willingness-to-pay values")
plot_evppi_curves(x=x, ...)
}
}
plot_evppi_dots <- function(x, order=FALSE, ...){
pars <- evppi <- lower <- upper <- NULL
if (order) x$pars <- reorder(x$pars, x$evppi)
g <- ggplot2::ggplot(x, ggplot2::aes(y=pars, x=evppi)) +
ggplot2::geom_point() +
ggplot2::ylab("") +
ggplot2::xlab("EVPPI")
if (!is.null(x$se)) {
x$lower <- pmax(0, x$evppi - 2*x$se)
x$upper <- pmax(0, x$evppi + 2*x$se)
g <- g + ggplot2::geom_linerange(aes(xmin=lower, xmax=upper))
}
g
}
plot_evppi_curves <- function(x, top=NULL, ...){
k <- evppi <- pars <- NULL
ggplot2::ggplot(x, ggplot2::aes(x=k, y=evppi, group=pars, col=pars)) +
ggplot2::geom_line() +
ggplot2::geom_point() +
ggplot2::ylab("EVPPI") +
ggplot2::xlab("Willingness-to-pay") +
ggplot2::labs(col="Parameters")
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/plot.R
|
## BUILT-IN STUDY DESIGNS
## Not exported for user use - they will break because of the extra "pars" argument
## Single-arm study of a binary outcome
datagen_binary <- function(inputs, n=100, pars){
nsim <- nrow(inputs)
validate_prob(inputs[,pars[1]], pars[1])
data.frame(
X1 = rbinom(nsim, size=n, prob=inputs[,pars[1]])
)
}
likelihood_binary <- function(Y, inputs, n=100, pars){
validate_prob(inputs[,pars[1]], pars[1])
loglik <-
dbinom(Y[,"X1"], size=n, inputs[,pars[1]], log=TRUE)
exp(loglik)
}
analysis_binary <- function(data, args, pars, ...){
if (is.null(args$niter))
args$niter <- 1000
check_analysis_args(args, c("a", "b"))
res <- data.frame(rbeta(args$niter,
shape1 = data$X1 + args$a,
shape2 = args$n - data$X1 + args$b))
names(res) <- pars
res
}
analysis_trial_binary <- function(data, args, pars,...){
if (is.null(args$niter))
args$niter <- 1000
check_analysis_args(args, c("a1", "a2", "b1", "b2"))
p1 <- rbeta(args$niter,
shape1 = data$X1 + args$a1,
shape2 = args$n - data$X1 + args$b1)
p2 <- rbeta(args$niter,
shape1 = data$X2 + args$a2,
shape2 = args$n - data$X2 + args$b2)
res <- data.frame(p1, p2)
names(res) <- pars
res
}
## Could we do one that estimates a log odds ratio from a study with two binary outcomes???
## But if we did a study of two treatments we'd get the absolute outcomes too.
## Two-arm trial of a binary outcome
datagen_trial_binary <- function(inputs, n=100, pars){
nsim <- nrow(inputs)
validate_prob(inputs[,pars[1]], pars[1])
validate_prob(inputs[,pars[2]], pars[2])
data.frame(
X1 = rbinom(nsim, size=n, prob=inputs[,pars[1]]),
X2 = rbinom(nsim, size=n, prob=inputs[,pars[2]])
)
}
likelihood_trial_binary <- function(Y, inputs, n=100, pars){
validate_prob(inputs[,pars[1]], pars[1])
validate_prob(inputs[,pars[2]], pars[2])
loglik <-
dbinom(Y[,"X1"], size=n, inputs[,pars[1]], log=TRUE) +
dbinom(Y[,"X2"], size=n, inputs[,pars[2]], log=TRUE)
exp(loglik)
}
## Single-arm study of a normal outcome with known variance (supplied as an argument)
## Return an estimate of the mean from a study of size n
datagen_normal_known <- function(inputs, n=100, pars, sd=1){
data.frame(
X1 = rnorm(nrow(inputs),
mean = inputs[,pars[1]],
sd = sd/sqrt(n))
)
}
likelihood_normal_known <- function(Y, inputs, n=100, pars, sd=1){
mu <- inputs[,pars[1]]
dnorm(Y[,"X1"], mu, sd/sqrt(n))
}
analysis_normal_known <- function(data, args, pars,...){
if (is.null(args$niter))
args$niter <- 1000
check_analysis_args(args, c("prior_mean", "prior_sd", "sampling_sd"))
xbar_var <- args$sampling_sd^2 / args$n
w <- args$prior_sd^2 / (args$prior_sd^2 + xbar_var)
post_mean <- w*data$X1 + (1-w)*args$prior_mean
post_var <- 1 / (1 / args$prior_sd^2 + 1 / xbar_var)
res <- data.frame(rnorm(args$niter, post_mean, sqrt(post_var)))
names(res) <- pars
res
}
studies_builtin <- c("binary","trial_binary","normal_known")
check_analysis_args <- function(args, required){
for (i in required){
if (is.null(args[[i]]))
stop(sprintf("`%s` not supplied in `analysis_args`", i))
}
}
validate_prob <- function(p, name){
if (any((p<0)|(p>1)))
stop(sprintf("%s should represent a probability, but it has values that are greater than 1 or less than 0", name))
}
|
/scratch/gouwar.j/cran-all/cranData/voi/R/study.R
|
##' Methods to calculate the Expected Value of Information
##'
##' @description
##'
##' \code{\link{evppi}} calculates the expected value of partial perfect information from a decision-analytic model. The default, recommended computation methods are based on nonparametric regression. \code{\link{evpi}} is also provided for the expected value of perfect information.
##'
##' \code{\link{evsi}} calculates the expected value of sample information. Currently this implements the same set of nonparametric regression methods as in \code{\link{evppi}}, and methods based on moment matching and importance sampling. \code{\link{enbs}} can then be used to calculate and optimise the expected net benefit of sampling for a simple study with a fixed upfront cost and per-participant costs.
##'
##' \code{\link{evppi}} and \code{\link{evsi}} both require a sample of inputs and outputs from a Monte Carlo probabilistic analysis of a decision-analytic model.
##'
##' Analogous functions \code{\link{evppivar}} and \code{\link{evsivar}} calculate the EVPPI and EVSI for models used for estimation rather than decision-making. The value of information is measured by expected reductions in variance of an uncertain model output of interest.
##'
##' A pure "brute-force" Monte Carlo method for EVPPI calculation is provided in \code{\link{evppi_mc}}, though this is usually computationally impractical.
##'
##' The \href{https://chjackson.github.io/voi/articles/voi.html}{package overview / Get Started vignette} gives worked examples of the use of all of these functions.
##'
##' @references
##'
##' Heath, A., Manolopoulou, I., & Baio, G. (2017). A review of methods for analysis of the expected value of information. Medical Decision Making, 37(7), 747-758.
##'
##' Heath, A., Kunst, N., Jackson, C., Strong, M., Alarid-Escudero, F., Goldhaber-Fiebert, J. D., Baio, G. Menzies, N.A, Jalal, H. (2020). Calculating the Expected Value of Sample Information in Practice: Considerations from 3 Case Studies. Medical Decision Making, 40(3), 314-326.
##'
##' Kunst, N., Wilson, E. C., Glynn, D., Alarid-Escudero, F., Baio, G., Brennan, A., Fairley, M., Glynn, D., Goldhaber-Fiebert, J. D., Jackson, C., Jalal, H., Menzies, N. A., Strong, M., Thom, H., Heath, A. (2020). Computing the Expected Value of Sample Information Efficiently: Practical Guidance and Recommendations for Four Model-Based Methods. Value in Health, 3(6), 734-742.
##'
##' @name voi-package
##' @aliases voi-package
##' @docType package
##'
##' @importFrom grDevices dev.off
##' @importFrom graphics points hist par
##' @importFrom ggplot2 ggplot aes geom_point ylab xlab geom_linerange geom_histogram
##' @importFrom gridExtra grid.arrange
##' @importFrom stats formula as.formula dist dnorm formula optim sd var rbeta rbinom rlnorm rnorm quantile dbinom coef vcov predict AIC cov lm fitted reorder IQR residuals update pnorm
##' @importFrom utils select.list combn
##' @importFrom progress progress_bar
##' @importFrom Matrix nearPD
##'
NULL
|
/scratch/gouwar.j/cran-all/cranData/voi/R/voi-package.R
|
################################################################################
#### Model Inputs from Data for the Chemotherapy Model
################################################################################
## Read Data from File
dat_side_effs_SC <- read.csv("01_data_raw/Side_Effects_SC.csv",
header = TRUE)
## Total Number of Patients in Observed Data
n_patients <- nrow(dat_side_effs_SC)
## Number of Patients with side effects
n_side_effects <- sum(dat_side_effs_SC$Side_Effects)
## Number of Hospitalised Patients
n_hospitalised <- sum(dat_side_effs_SC$Hosp)
## Number of Patients who Died
n_died <- sum(dat_side_effs_SC$Death)
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/Chemotherapy_Book/02_data/01_Data_Inputs.R
|
################################################################################
#### Model Inputs from Assumptions for the Chemotherapy Model
################################################################################
## Time Horizon
time_horizon <- 52
time_horizon_longterm <- 40
## Parameters for the PSA distribution of the risk reduction of side effects
logor_side_effects_mu <- log(0.54)
logor_side_effects_sd <- 0.3
## Parameters for the PSA distribution of the recovery time for patients who are
## not hospitalised
p_recovery_home_mu <- 0.21
p_recovery_home_sd <- 0.03
## Parameters for the PSA distribution of the recovery time for patients who are
## hospitalised
p_recovery_hosp_mu <- 0.03
p_recovery_hosp_sd <- 0.0065
## Parameters for the PSA distribution of the costs of treating patients at home
c_home_care_mu <- 830
c_home_care_sd <- sqrt(150)
## Parameters for the PSA distribution of the costs of treating patients in
## hospital
c_hospital_mu <- 2400
c_hospital_sd <- sqrt(1880)
## Parameters for the PSA distribution of the one-off cost of death
c_death_mu <- 1710
c_death_sd <- sqrt(760)
## Parameters for the PSA distribution of the utility for recovered patients
u_recovery_mu <- 0.98
u_recovery_sd <- sqrt(0.0008)
## Parameters for the PSA distribution of the utility of patients who are treated
## at home.
u_home_care_mu <- 0.7
u_home_care_sd <- sqrt(0.02)
## Parameters for the PSA distribution of the utility of treating patients in
## hospital
u_hospital_mu <- 0.3
u_hospital_sd <- sqrt(0.03)
## Parameters for the PSA distribution of the long-term
rate_longterm_mu <- 0.95 / 20 # 95% of individuals to die by 20 years
rate_longterm_sd <- sqrt(0.001)
## Drug costs
c_treatment_1 <- 120
c_treatment_2 <- 1975
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/Chemotherapy_Book/02_data/02_Assumption_Inputs.R
|
################################################################################
#### Misc Functions for the Chemotherapy Model
################################################################################
## Function to transform values for mean and standard deviation into parameters
## for a Beta distribution
betaPar <- function(m, s) {
# m: Mean of the Beta distribution
# m: Standard deviation of the Beta distribution
var <- s ^ 2
alpha <- ((1 - m) / var - 1 / m) * m ^ 2
beta <- alpha * (1 / m - 1)
return(
list(alpha = alpha, beta = beta)
)
}
## Function to transform values for mean and standard deviation into parameters
## for a Log-Normal distribution
lognPar <- function(m,s) {
# m: Mean of Log-Normal distribution
# s: Standard deiviation of Log-Normal distribution
var <- s^2
meanlog <- log(m) - 0.5 * log(1 + var/m^2)
varlog <- log(1 + (var/m^2))
sdlog <- sqrt(varlog)
return(
list(meanlog = meanlog, sdlog = sdlog)
)
}
## Function to transform values for mean and standard deviation into parameters
## for a Gamma distribution
gammaPar <- function(m,s) {
# m: Mean of Log-Normal distribution
# s: Standard deiviation of Log-Normal distribution
var <- s^2
beta <- m / var
alpha <- m * beta
return(
list(alpha = alpha, beta = beta)
)
}
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/Chemotherapy_Book/03_R/01_misc_functions.R
|
################################################################################
#### Functions for the Chemotherapy Model
################################################################################
## Function to generate the PSA parameters
generate_psa_parameters <- function(n){
# n: The number of PSA simulations to be drawn
set.seed(123)
# Probability of side effects under treatment 1
p_side_effects_t1 <- rbeta(n,
1 + n_side_effects,
1 + n_patients - n_side_effects)
# Log odds of side effects on treatment 2
logor_side_effects <- rnorm(n, logor_side_effects_mu, logor_side_effects_sd)
# Odds of side effects on treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
## Variables to define transition probabilities
# Probability that a patient is hospitalised over the time horizon
p_hospitalised_total <- rbeta(n,
1 + n_hospitalised,
1 + n_side_effects - n_hospitalised)
# Probability that a patient dies over the time horizon given they were
# hospitalised
p_died <- rbeta(n, 1 + n_died, 1 + n_hospitalised - n_died)
# Lambda_home: Conditional probability that a patient recovers considering
# that they are not hospitalised
betapars <- betaPar(p_recovery_home_mu, p_recovery_home_sd)
lambda_home <- rbeta(n, betapars$alpha, betapars$beta)
# Lambda_hosp: Conditional probability that a patient recovers considering
# that they do not die
betapars <- betaPar(p_recovery_hosp_mu, p_recovery_hosp_sd)
lambda_hosp <- rbeta(n, betapars$alpha, betapars$beta)
## Health State Costs
lnpars <- lognPar(c_home_care_mu, c_home_care_sd)
c_home_care <- rlnorm(n, lnpars$meanlog, lnpars$sdlog)
lnpars <- lognPar(c_hospital_mu, c_hospital_sd)
c_hospital <- rlnorm(n, lnpars$meanlog, lnpars$sdlog)
lnpars <- lognPar(c_death_mu, c_death_sd)
c_death <- rlnorm(n, lnpars$meanlog, lnpars$sdlog)
## Health Utilities
betapars <- betaPar(u_recovery_mu, u_recovery_sd)
u_recovery <- rbeta(n, betapars$alpha, betapars$beta)
betapars <- betaPar(u_home_care_mu, u_home_care_sd)
u_home_care <- rbeta(n, betapars$alpha, betapars$beta)
betapars <- betaPar(u_hospital_mu, u_hospital_sd)
u_hospital <- rbeta(n, betapars$alpha, betapars$beta)
# Long term survival
gammapars <- gammaPar(rate_longterm_mu, rate_longterm_sd)
rate_longterm <- rgamma(n, shape = gammapars$alpha, rate = gammapars$beta)
# Specify a matrix containing all the parameters
params_matrix <- data.frame(
p_side_effects_t1,
p_side_effects_t2,
c_home_care, c_hospital, c_death,
u_recovery, u_home_care, u_hospital,
logor_side_effects,
p_hospitalised_total, p_died,
lambda_home, lambda_hosp, rate_longterm)
return(params_matrix)
}
## Function to calculate average time each patient with adverse events spends
## in the health states of the Markov model
calculate_state_occupancy_markov_model <- function(
p_side_effects_t1,
p_side_effects_t2,
p_home_home, p_home_hospital, p_home_recover,
p_hospital_hospital, p_hospital_recover, p_hospital_dead,
p_longterm)
# All function arguments come from the generate_psa_parameters function except
# time_horizon which is a model assumption
{
## Markov transition probability matrix
## States: Home care, Hospital care, Recovery, Death
MM.mat <- matrix(c(p_home_home, p_home_hospital, p_home_recover, 0,
0, p_hospital_hospital, p_hospital_recover, p_hospital_dead,
0, 0, 1 - p_longterm, p_longterm,
0, 0, 0, 1),
nrow = 4, ncol = 4, byrow = TRUE)
## Number of patients in each state for each time point
## 3 dimensions: number of states, number of time points,
## number of treatment options
trace <- array(0, dim = c(4, time_horizon + 1, 2))
# Initialise with the predicted number of side effects in the population
trace[1, 1, ] <- c(p_side_effects_t1, p_side_effects_t2)
# Run the markove model over the time horizon
for(i in 2:(time_horizon + 1)){
trace[, i, 1] <- trace[, i - 1, 1] %*% MM.mat
trace[, i, 2] <- trace[, i - 1, 2] %*% MM.mat
}
return(trace) # 4*16*2 array
}
## Function to calculate the costs and effects from our model
calculate_costs_effects <- function(
p_side_effects_t1,
p_side_effects_t2,
p_hospitalised_total, p_died,
lambda_home, lambda_hosp,
c_home_care, c_hospital, c_death,
u_recovery, u_home_care, u_hospital,
logor_side_effects, rate_longterm)
# All function arguments come from the generate_psa_parameters function
{
# Calculate p_side_effects_2 from odds ratio
# Odds of side effects on treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
## Transition Probabilities
p_home_hospital <- 1 - (1 - p_hospitalised_total) ^ (1 / time_horizon)
p_home_home <- (1 - lambda_home) * (1 - p_home_hospital)
p_home_recover <- lambda_home * (1 - p_home_hospital)
p_hospital_dead <- 1 - (1 - p_died) ^ (1 / time_horizon)
p_hospital_hospital <- (1 - lambda_hosp) * (1 - p_hospital_dead)
p_hospital_recover <- lambda_hosp * (1 - p_hospital_dead)
p_longterm <- 1 - exp(-rate_longterm * 1/52)
# Calculate the trace matrix from the markov model function
m_markov_trace <- calculate_state_occupancy_markov_model(
p_side_effects_t1,
p_side_effects_t2,
p_home_home, p_home_hospital, p_home_recover,
p_hospital_hospital, p_hospital_recover, p_hospital_dead,
p_longterm)
## costs and effectiveness for four states
c_state_vector <- c(c_home_care, c_hospital, 0, 0)
u_state_vector <- c(u_home_care, u_hospital, u_recovery, 0)
## Estimate the cost of side effects from the Markov model
c_side_effects <- array(NA, dim = 2)
## Average cost for both Soc and novel treatment per person
## (The cost includes one-off cost of death for all patients who died)
c_side_effects[1] <- (sum(c_state_vector %*% m_markov_trace[, , 1]) +
c_death * m_markov_trace[4, time_horizon + 1, 1]) / 52
c_side_effects[2] <- (sum(c_state_vector %*% m_markov_trace[, , 2]) +
c_death * m_markov_trace[4, time_horizon + 1, 2]) / 52
c_drug <- c(c_treatment_1, c_treatment_2)
c_longterm <- (1 - m_markov_trace[4, time_horizon + 1, ]) *
(pexp(time_horizon_longterm - 2, rate = rate_longterm)) * c_death
c_overall <- c(c_drug + c_side_effects + c_longterm)
## Total QALY of side effects for both Soc and novel treatment
u_side_effects <- array(NA, dim = 2)
u_side_effects[1] <- sum(u_state_vector %*% m_markov_trace[,,1]) / 52
u_side_effects[2] <- sum(u_state_vector %*% m_markov_trace[,,2]) / 52
u_longterm <- (1 - m_markov_trace[4, time_horizon + 1, ]) *
integrate(function(x){ (1 - pexp(x, rate = rate_longterm)) * u_home_care}, lower = 2, upper = time_horizon_longterm)$value
## QALY of total number of patients who do not experience adverse events for 15 days
p_no_side_effects <- 1 -
c(p_side_effects_t1,
p_side_effects_t2)
u_no_side_effects <- p_no_side_effects * u_recovery * (time_horizon + 1) / 52
## Average effect for both Soc and novel treatment per person
u_overall <- c(u_side_effects + u_no_side_effects + u_longterm)
names(c_overall) <- paste0("cost",seq_along(c_overall))
names(u_overall) <- paste0("eff",seq_along(u_overall))
output <- array(NA, dim = c(2, length(u_overall)),
dimnames = list(c("Effects", "Costs"),
c("SoC", "Novel")))
output[1, ] <- u_overall
output[2, ] <- c_overall
return(output)
}
calculate_net_benefit <- function(
costs_effects,
wtp)
{
if(!is.null(dim(costs_effects))){
nb <- wtp * costs_effects[, 1, ] -
costs_effects[, 2, ]
}
return(nb)
}
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/Chemotherapy_Book/03_R/02_model_functions.R
|
################################################################################
#### Analysis for the Chemotherapy Example
################################################################################
set.seed(123)
### Load the Inputs
source("02_data/01_data_inputs.R")
source("02_data/02_Assumption_Inputs.R")
### Load the Functions
source("03_R/01_misc_functions.R")
source("03_R/02_model_functions.R")
### Generate the parameters for the PSA
n_psa_size <- 5000
m_params <- generate_psa_parameters(n_psa_size)
### Run the model
m_costs_effects <- array(NA, dim = c(n_psa_size, 2, 2))
for(s in 1:n_psa_size){
v_params <- m_params[s, ]
m_costs_effects[s, , ] <- do.call(calculate_costs_effects,
v_params[names(formals(calculate_costs_effects))])
}
# Set the column names for the output
dimnames(m_costs_effects)[2:3] <- dimnames(
do.call(calculate_costs_effects,
v_params[names(formals(calculate_costs_effects))])
)
# Estimate net benefit for different willingness-to-pay values
n_wtp <- 51
wtp_seq <- seq(0, 50000, length.out = n_wtp)
# Initialise the net benefit matrix
m_net_benefit <- array(NA, dim = c(n_psa_size, 2, n_wtp))
w <- 1
for(wtp in wtp_seq){
m_net_benefit[ , , w] <- calculate_net_benefit(m_costs_effects, wtp = wtp)
w <- w + 1
}
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/Chemotherapy_Book/04_analysis/01_model_run.R
|
################################################################################
#### Baseline Results for Chemotherapy Model
################################################################################
### Packages
library(colorspace)
### Run the Model
source("04_analysis/01_model_run.R")
### Mean costs and effects
mean_costs_effects <- apply(m_costs_effects, c(2, 3), mean)
write.csv(mean_costs_effects, "07_tables/Mean_Costs_Effects.csv")
### Incremental Cost-Effectiveness Ratio (ICER)
ICER <- mean(m_costs_effects[, 2, 2] - m_costs_effects[, 2, 1]) /
mean(m_costs_effects[, 1, 2] - m_costs_effects[, 1, 1])
ICER
write.csv(ICER, "07_tables/ICER.csv")
### Cost-Effectiveness Acceptability Curve
colours <- rainbow_hcl(12)
m_max <- apply(m_net_benefit, c(1,3), which.max)
pdf("06_figs/CEAC.pdf")
plot(wtp_seq, apply(m_max == 2, 2, mean), type = "l", lwd = 3,
ylim = c(0,1),
col = colours[1],
xlab = "Willingness-to-pay",
ylab = "Probability of Cost Effectiveness for Treatment 2",
main = "Cost Effectiveness Acceptability Curve")
dev.off()
### Cost-Effectiveness Acceptability Frontier
m_ceaf <- cbind(apply(m_max == 1, 2, mean), apply(m_max == 2, 2, mean))
pdf("06_figs/CEAF.pdf")
plot(wtp_seq, m_ceaf[, 2], type = "l", lwd = 3,
ylim = c(0,1),
col = colours[1],
xlab = "Willingness-to-pay",
ylab = "Probability of Cost Effectiveness",
main = "Cost Effectiveness Acceptability Frontier")
points(wtp_seq, m_ceaf[, 1], type = "l", col = colours[7], lwd = 3)
v_optimal_treatment <- apply(apply(m_net_benefit, c(2,3), mean), 2, which.max)
v_ceaf <- vector(length = n_wtp)
for(i in 1:n_wtp){
v_ceaf[i] <- m_ceaf[i, v_optimal_treatment[i]]
}
points(wtp_seq, v_ceaf, pch = 19, cex = 0.5)
points(wtp_seq, v_ceaf, pch = 0)
legend("bottomright", c("Treatment 1", "Treatment 2", "Frontier"),
lwd = c(2, 2, NA), pch = c(NA, NA, 0),
col = c(colours[1], colours[7], "black"))
dev.off()
### Baseline willingness to pay = 20000
baseline_wtp <- which(wtp_seq == 20000)
# Average net benefit
mean_net_benefit <- apply(m_net_benefit[, , baseline_wtp], 2, mean)
mean_net_benefit
names(mean_net_benefit) <- c("Treatment 1", "Treatment 2")
mean_net_benefit / 20000
write.csv(mean_net_benefit, "07_tables/Mean_Net_Benefit.csv")
# INB
INB <- diff(mean_net_benefit)
INB
q_INB <- quantile(apply(m_net_benefit[, , baseline_wtp], 1, diff), prob = c(0.025, 0.975))
q_INB
# Optimal treatment
d_star <- which.max(mean_net_benefit)
d_star
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/Chemotherapy_Book/04_analysis/02_baseline_model_output.R
|
###########################################################
### Expected Value of Perfect Information Analysis ###
###########################################################
### Packages
library(voi)
## Run the model
source("04_analysis/01_model_run.R")
## Expected Value of Perfect Information - single WTP
# Specify willingness to pay
wtp_fix = 20000
# Calculate EVPI from net benefit
nb <- m_net_benefit[ , , wtp_seq == wtp_fix]
evpi(nb)
## Baseline Cost-Effectiveness Formatting
# The output from the cost-effectiveness model should be formatted for
# the voi package.
# Use BCEA package to create a cost-effectiveness object.
chemotherapy_output <- list(e = m_costs_effects[, "Effects", ],
c = m_costs_effects[, "Costs", ],
k = seq(0, 50000, length.out = 501))
## Expected Value of Perfect Information
# Calculate
EVPI <- evpi(chemotherapy_output)
# WTP = 20000
EVPI$evpi[EVPI$k == wtp_fix]
# Plot
pdf("06_figs/EVPI.pdf")
plot(EVPI,
xlab = "Willingness-to-Pay",
ylab = "EVPI",
main = "Expected Value of Perfect Information",
type = "l")
dev.off()
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/Chemotherapy_Book/04_analysis/03_Expected_Value_of_Perfect_Information.R
|
###############################################################
### Expected Value of Partial Perfect Information Analysis ###
###############################################################
### Packages
library(voi)
library(ggplot2)
library(dplyr)
library(tidyverse)
## Run the model
source("04_analysis/02_baseline_model_output.R")
## Baseline Cost-Effectiveness Formatting
# The output from the cost-effectiveness model should be formatted for
# the voi package.
# Use BCEA package to create a cost-effectiveness object.
chemotherapy_output <- list(e = m_costs_effects[, "Effects", ],
c = m_costs_effects[, "Costs", ],
k = seq(0, 50000, length.out = 501))
# EVPI
EVPI <- evpi(chemotherapy_output)
evpi.plot <- ggplot(EVPI, aes(x=k, y=evpi)) +
geom_line()
# Select the WTP on the grid that is closest to the ICER
# Represents the maximum decision uncertainty (but not necessarily the max VOI)
wtp.max <- chemotherapy_output$k[
which.min(abs(chemotherapy_output$k - ICER))
]
EVPI[EVPI$k == 20000, , ]
## Single parameter EVPPI
# Perform the calculations to determine the EVPPI for all parameters at all WTP
pars_all <- as.list(names(m_params))
ev_single <- evppi(outputs=chemotherapy_output, inputs=m_params, pars=pars_all)
ev_single <- ev_single %>%
mutate(pars = rep(unlist(pars_all), each = length(chemotherapy_output$k)))
# Explore EVPPI at maximum uncertainty
ev_single %>%
filter(k == wtp.max) %>%
arrange(desc(evppi)) %>%
mutate(evppi = round(evppi, 2))
# Plot key parameters
ev_single %>%
filter(k == wtp.max) %>%
mutate(pars = fct_reorder(pars, (evppi))) %>%
ggplot(mapping = aes(evppi, pars)) +
geom_point() +
xlab("EVPPI") + ylab("Model Parameters")
ggsave("06_figs/EVPPI_ICER.pdf")
## Calculate standard errors
pars_small <- list("p_side_effects_t1", "p_hospitalised_total", "lambda_hosp")
nb <- wtp.max * chemotherapy_output$e - chemotherapy_output$c
ev_small_se <- evppi(outputs=nb, inputs=m_params, pars=pars_small,
se = TRUE)
ev_small_se
# Plot key parameters across WTP
EVPI <- EVPI %>%
mutate(pars = "all_parameters")
ev_single %>%
filter(pars %in%c("logor_side_effects",
"u_home_care",
"p_died",
"rate_longterm",
"p_side_effects_t1",
"p_hospitalised_total",
"lambda_hosp")) %>%
ggplot(aes(x=k, y=evppi, group=pars, col = pars)) +
geom_line() +
geom_line(data = EVPI, aes(x = k, y = evpi), linetype = "dashed") +
theme_bw() +
xlab("Willingness-to-Pay") +
ylab("Value of Information") +
labs(color = "Parameters")
ggsave("06_figs/EVPPI_WTP.pdf")
## EVPPI Groups
# Randomised Trial
par_RCT <- list(
"side_effects" = c("logor_side_effects"),
"side_effects_and_follow_up" = c("logor_side_effects",
"p_hospitalised_total","p_died",
"lambda_home","lambda_hosp")
)
ev_RCT <- evppi(outputs=chemotherapy_output, inputs=m_params, pars=par_RCT,
method = "earth")
# Explore at maximum willingness to pay
ev_RCT %>%
filter(k == wtp.max)
# Explore across willingness to pay
ev_RCT %>%
ggplot(aes(x=k, y=evppi, group=pars, col = pars)) +
geom_line() +
geom_line(data = EVPI, aes(x = k, y = evpi), linetype = "dashed") +
theme_bw()
ggsave("06_figs/EVPPI_RCT.pdf")
# All Studies
par_groups <- list(
"side_effects" = c("logor_side_effects"),
"longterm_surv" = c("rate_longterm"),
"hosp_trans_probs" = c("p_died","lambda_hosp"),
"trans_probs" = c("p_side_effects_t1", "p_hospitalised_total",
"lambda_home", "p_died","lambda_hosp"),
"side_effects_and_follow_up" = c("logor_side_effects", "p_hospitalised_total",
"p_died","lambda_home","lambda_hosp"),
"costs" = c("c_home_care","c_hospital","c_death"),
"utilities" = c("u_recovery","u_home_care","u_hospital")
)
ev_groups <- evppi(outputs=chemotherapy_output, inputs=m_params, pars=par_groups,
method = "earth")
# Explore EVPPI at maximum uncertainty
ev_groups %>%
filter(k == wtp.max) %>%
arrange(desc(evppi)) %>%
mutate(evppi = round(evppi, 2))
# Plot key parameters
ev_groups %>%
filter(k == wtp.max) %>%
plot(order = TRUE)
# Plot all groups across WTP
ev_groups %>%
ggplot(aes(x=k, y=evppi, group=pars, col = pars)) +
geom_line() +
geom_line(data = EVPI, aes(x = k, y = evpi), linetype = "dashed") +
theme_bw()
ggsave("06_figs/EVPPI_groups.pdf")
## Population Level EVPPI
pop_size <- 46000 * c(sum(1 / (1 + 0.035)^(0:5)),
sum(1 / (1 + 0.035)^(0:10)),
sum(1 / (1 + 0.035)^(0:15)))
ev_groups %>%
filter(k == 20000) %>%
arrange(desc(evppi)) %>%
mutate(evppi_5Y = evppi * pop_size[1],
evppi_10Y = evppi * pop_size[2],
evppi_15Y = evppi * pop_size[3])
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/Chemotherapy_Book/04_analysis/04_Expected_Value_of_Partial_Perfect_Information.R
|
#######################################################################
### Expected Value of Sample Information Analysis - Example ###
#######################################################################
### Packages
library(voi)
library(ggplot2)
library(dplyr)
library(R2jags)
## Run the model
source("04_analysis/02_baseline_model_output.R")
source("03_R/01_misc_functions.R")
## Baseline Cost-Effectiveness Formatting
# The output from the cost-effectiveness model should be formatted for
# the voi package.
# Use BCEA package to create a cost-effectiveness object.
chemotherapy_output <- list(e = m_costs_effects[, "Effects", ],
c = m_costs_effects[, "Costs", ],
k = seq(0, 50000, length.out = 501))
# Data generation function - aggregate data (for regression method)
utility_datagen_fn_agg <- function(inputs, n = 500){
dat_indiv <- utility_datagen_fn_indiv(inputs, n = n)
X_hospital_mean <- rowMeans(dat_indiv)
data_save_dat <- data.frame(X_hospital_mean = X_hospital_mean)
return(data_save_dat)
}
# Data generation function - individual data (for other EVSI methods)
utility_datagen_fn_indiv <- function(inputs, n = 500){
# Load the data
X_hospital <- matrix(nrow = nrow(inputs), ncol = n[1])
X_hospital_mean1 <- X_hospital_mean2 <- numeric(nrow(inputs))
for(i in 1:nrow(inputs)){
set.seed(123 + i)
m_hospital <- inputs[i, "u_hospital"]
sd_hospital <- inputs[i, "sd_iid_hospital"]
X_hospital[i, ] <- truncnorm::rtruncnorm(n[1],
mean = m_hospital,
sd = sd_hospital,
a = -Inf, b = 1)
}
data_save_dat <- data.frame(cbind(X_hospital = X_hospital))
return(data_save_dat)
}
m_params$sd_iid_hospital <- runif(nrow(m_params), 0.00001, 0.4)
## Regression Based Method
evsi_utility <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("u_hospital"),
n = seq(50, 1000, by = 200),
method = "gam",
datagen_fn = utility_datagen_fn_agg)
## Moment Matching Method
# Analysis function based on JAGS
utility_analysis_fn <- function(data, args, pars){
# Create the data list for JAGS
data_jags <- list(X_hospital = as.vector(data),
n = args$n,
alpha_hospital = betaPar(
args$u_hospital_mu,
args$u_hospital_sd
)$alpha,
beta_hospital = betaPar(
args$u_hospital_mu,
args$u_hospital_sd
)$beta)
trial <- "
model {
for(i in 1:n){
X_hospital[i] ~ dnorm(u_hospital, tau_hospital)T(, 1)
}
u_hospital ~ dbeta(alpha_hospital, beta_hospital)
sd_hospital ~ dunif(0.00001, 0.4)
tau_hospital <- 1 / sd_hospital ^ 2
}
"
filein <- file.path(tempdir(),fileext="datmodel.txt")
cat(trial, file=filein)
# Perform the MCMC simulation with JAGS.
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 1,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250,
quiet=TRUE, progress.bar = "none")
u_hospital <- bugs.data$BUGSoutput$sims.matrix[, "u_hospital"]
return(data.frame(u_hospital = u_hospital))
}
analysis_args <- list(n = 30,
u_hospital_mu = u_hospital_mu,
u_hospital_sd = u_hospital_sd,
n.iter = 2000)
evsi_utility <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("u_hospital"),
pars_datagen = c("u_hospital","sd_iid_hospital"),
n = seq(30, 1000, by = 200),
method = "mm",
datagen_fn = utility_datagen_fn_indiv,
model_fn = calculate_costs_effects,
analysis_args = analysis_args,
analysis_fn = utility_analysis_fn,
par_fn = generate_psa_parameters,
Q = 50)
## Importance Sampling
# Likelihood function
utility_likelihood <- function(data, inputs){
# Load the data
ll <- numeric(nrow(inputs))
data_vec <- unlist(data)
for(i in 1:nrow(inputs)){
m_hospital <- inputs[i, "u_hospital"]
sd_hospital <- inputs[i, "sd_iid_hospital"]
ll[i] <- exp(
sum(
log(
truncnorm::dtruncnorm(data_vec,
mean = m_hospital,
sd = sd_hospital,
a = -Inf, b = 1)
)))
}
return(ll)
}
# Importance Sampling - EVSI. (this is slow).
evsi_utility <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("u_hospital"),
pars_datagen = c("u_hospital", "sd_iid_hospital"),
n = seq(50, 1000, by = 200),
method = "is",
nsim = 1000,
datagen_fn = utility_datagen_fn_is,
likelihood = utility_likelihood)
### Using the "trial_binary" built-in study design
# EVSI calculation using GAM regression.
evsi_builtin_rb <- evsi(outputs = chemotherapy_output,
inputs = m_params,
study = "trial_binary",
pars = c("p_side_effects_t1",
"p_side_effects_t2"),
n = seq(50, 500, by = 50),
method = "gam")
# EVSI calculation using Importance Sampling
evsi_builtin_is <- evsi(outputs = chemotherapy_output,
inputs = m_params,
study = "trial_binary",
pars = c("p_side_effects_t1",
"p_side_effects_t2"),
n = seq(50, 500, by = 50),
method = "is")
# Beta prior for standard care is set using the number of events
beta_params_t1 <- c(1 + n_side_effects,
1 + n_patients - n_side_effects)
# Beta prior for the novel intervention is approximated from the
# mean andstandard deviation of the PA distribution for the
# probability of side effects.
beta_params_t2 <- betaPar(mean(m_params$p_side_effects_t2),
sd(m_params$p_side_effects_t2))
# EVSI calculation with moment matching method
evsi_builtin_mm <- evsi(outputs = chemotherapy_output,
inputs = m_params,
study = "trial_binary",
pars = c("p_side_effects_t1", "p_side_effects_t2"),
n = seq(50, 500, by = 50),
method = "mm",
model_fn = calculate_costs_effects,
analysis_args = list(a1 = beta_params_t1[1],
b1 = beta_params_t1[2],
a2 = beta_params_t2$alpha,
b2 = beta_params_t2$beta),
par_fn = generate_psa_parameters)
## Plotting
# Using a bespoke analysis function - trial only updates odds ratio.
# Data generation function
OR_datagen_fn <- function(inputs, n = 500){
p_side_effects_t1 <- inputs[, "p_side_effects_t1"]
p_side_effects_t2 <- inputs[, "p_side_effects_t2"]
X1 <- rbinom(length(p_side_effects_t1), n, p_side_effects_t1)
X2 <- rbinom(length(p_side_effects_t2), n, p_side_effects_t2)
# Create odds ratio as summary statistic
OR <- (n - X2) / X2 / ((n - X1) / X1)
data_save <- data.frame(OR = OR)
return(data_save)
}
# EVSI calculation using GAM regression.
evsi_OR <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("logor_side_effects"),
pars_datagen = c("p_side_effects_t1",
"p_side_effects_t2"),
n = seq(50, 500, by = 10),
method = "gam",
datagen_fn = OR_datagen_fn,
par_fn = generate_psa_parameters)
# Using a bespoke analysis function - trial only updates odds ratio.
# Data generation function
OR_datagen_fn <- function(inputs, n = 500){
p_side_effects_t1 <- inputs[, "p_side_effects_t1"]
p_side_effects_t2 <- inputs[, "p_side_effects_t2"]
X1 <- rbinom(length(p_side_effects_t1), n, p_side_effects_t1)
X2 <- rbinom(length(p_side_effects_t2), n, p_side_effects_t2)
data_save <- data.frame(X1 = X1, X2 = X2)
return(data_save)
}
# Analysis function based on JAGS
OR_analysis_fn <- function(data, args, pars){
X1 <- data$X1
X2 <- data$X2
data_jags <- list(X1 = X1,
X2 = X2,
n = args$n,
n_side_effects = args$n_side_effects,
n_patients = args$n_patients,
logor_side_effects_mu = args$logor_side_effects_mu,
logor_side_effects_sd = args$logor_side_effects_sd)
LogOR_trial <- "
model {
# Probability of side effects under treatment 1
p_side_effects_t1 ~ dbeta(1 + n_side_effects,
1 + n_patients - n_side_effects)
# Log odds of side effects on treatment 2
logor_side_effects ~ dnorm(logor_side_effects_mu,
logor_side_effects_sd)
# Odds of side effects on treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <-
odds_side_effects_t2 / (1 + odds_side_effects_t2)
X1 ~ dbin(p_side_effects_t1, n)
X2 ~ dbin(p_side_effects_t2, n)
}
"
filein <- file.path(tempdir(), fileext="datmodel.txt")
cat(LogOR_trial,file=filein)
# Perform the MCMC simulation with JAGS
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 3,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250,
quiet=TRUE, progress.bar = "none")
# Resample treatment 1 from the prior as study will not update t1
nsam <- length(bugs.data$BUGSoutput$sims.matrix[, pars[2]])
resample_t1 <- rbeta(nsam,
1 + args$n_side_effects,
1 + args$n_patients - args$n_side_effects)
resample_t2 <- bugs.data$BUGSoutput$sims.matrix[, pars[2]]
return(data.frame(p_side_effects_t1 = resample_t1,
p_side_effects_t2 = resample_t2))
}
# EVSI calculation using the moment matching method.
analysis_args <- list(n_side_effects = n_side_effects,
n_patients = n_patients,
n = 500,
logor_side_effects_mu = logor_side_effects_mu,
logor_side_effects_sd = logor_side_effects_sd,
n.iter = 7500)
evsi_OR <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("p_side_effects_t1", "p_side_effects_t2"),
n = seq(50, 500, by = 10),
method = "mm",
datagen_fn = OR_datagen_fn,
model_fn = calculate_costs_effects,
analysis_args = analysis_args,
analysis_fn = OR_analysis_fn,
par_fn = generate_psa_parameters)
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/Chemotherapy_Book/04_analysis/05_Expected_Value_of_Sample_Information_Bespoke_Example.R
|
#######################################################################
### Expected Value of Sample Information Analysis - Moment Matching ###
#######################################################################
### Packages
library(voi)
library(ggplot2)
library(dplyr)
library(R2jags)
## Run the model
source("04_analysis/02_baseline_model_output.R")
## Baseline Cost-Effectiveness Formatting
# The output from the cost-effectiveness model should be formatted for
# the voi package.
# Use BCEA package to create a cost-effectiveness object.
chemotherapy_output <- list(e = m_costs_effects[, "Effects", ],
c = m_costs_effects[, "Costs", ],
k = seq(0, 50000, length.out = 501))
## EVSI Calculations
#### STUDY 1: Randomised Trial for Log-Odds ratio ####
## Using the default trials in the voi package
# A randomised trial for binary outcomes requires a beta prior distribution
# Beta prior for standard care is set using the number of events
beta_params_t1 <- c(1 + n_side_effects,
1 + n_patients - n_side_effects)
# Beta prior for the novel intervention is approximated from the mean and
# standard deviation of the PA distribution for the probability of side effects.
beta_params_t2 <- betaPar(mean(m_params$p_side_effects_t2),
sd(m_params$p_side_effects_t2))
# EVSI calculation with moment matching method
evsi_default <- evsi(outputs = chemotherapy_output,
inputs = m_params,
study = "trial_binary",
pars = c("p_side_effects_t1", "p_side_effects_t2"),
n = seq(50, 1500, by = 50),
method = "mm",
model_fn = calculate_costs_effects,
analysis_args = list(a1 = beta_params_t1[1],
b1 = beta_params_t1[2],
a2 = beta_params_t2$alpha,
b2 = beta_params_t2$beta),
par_fn = generate_psa_parameters)
# Using a bespoke analysis function - trial only updates odds ratio.
# Data generation function
OR_datagen_fn <- function(inputs, n = 500){
p_side_effects_t1 <- inputs[, "p_side_effects_t1"]
logor_side_effects <- inputs[, "logor_side_effects"]
# Odds for side effects for treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
# Data generation
X1 <- rbinom(length(p_side_effects_t1), n, p_side_effects_t1)
X2 <- rbinom(length(p_side_effects_t2), n, p_side_effects_t2)
data_save <- data.frame(X1 = X1, X2 = X2)
return(data_save)
}
# Analysis function based on JAGS
OR_analysis_fn <- function(data, args, pars){
X1 <- data$X1
X2 <- data$X2
data_jags <- list(X1 = X1,
X2 = X2,
n = args$n,
n_side_effects = args$n_side_effects,
n_patients = args$n_patients,
logor_side_effects_mu = args$logor_side_effects_mu,
logor_side_effects_sd = args$logor_side_effects_sd)
LogOR_trial <- function(){
# Probability of side effects under treatment 1
p_side_effects_t1 ~ dbeta(1 + n_side_effects,
1 + n_patients - n_side_effects)
# Log odds of side effects on treatment 2
logor_side_effects ~ dnorm(logor_side_effects_mu, logor_side_effects_sd)
# Odds of side effects on treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
X1 ~ dbin(p_side_effects_t1, n)
X2 ~ dbin(p_side_effects_t2, n)
}
filein <- file.path(tempdir(),fileext="datmodel.txt")
R2OpenBUGS::write.model(LogOR_trial,filein)
# Perform the MCMC simulation with OpenBUGS.
# Close OpenBUGS once it has finished (if debug is set to TRUE)
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 1,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250, progress.bar = "none")
return(data.frame(logor_side_effects = bugs.data$BUGSoutput$sims.matrix[, pars[1]]))
}
# EVSI calculation using the momemt matching method.
evsi_OR <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("logor_side_effects"),
pars_datagen = c("p_side_effects_t1", "logor_side_effects"),
n = seq(50, 1500, by = 50),
method = "mm",
datagen_fn = OR_datagen_fn,
model_fn = calculate_costs_effects,
analysis_args = list(n_side_effects = n_side_effects,
n_patients = n_patients,
n = 500,
logor_side_effects_mu = logor_side_effects_mu,
logor_side_effects_sd = logor_side_effects_sd,
n.iter = 5250),
analysis_fn = OR_analysis_fn,
par_fn = generate_psa_parameters)
#### STUDY 2: Randomised Trial with Multiple Outcomes ####
# Data generation function
full_datagen_fn <- function(inputs, n = 500){
p_side_effects_t1 <- inputs[, "p_side_effects_t1"]
logor_side_effects <- inputs[, "logor_side_effects"]
# Odds for side effects for treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
p_hospitalised_total <- inputs[, "p_hospitalised_total"]
p_died <- inputs[, "p_died"]
lambda_home <- inputs[, "lambda_home"]
lambda_hosp <- inputs[, "lambda_hosp"]
rate_recover_hosp <- -log(1 -lambda_hosp )
rate_recover_home <- -log(1 - lambda_home)
X1 <- X2 <- X_hosp <- X_dead <- N_recover_home <- N_recover_hospital <- vector("numeric", length = dim(inputs)[1])
T_home <- T_hosp <- matrix(NA, nrow = dim(inputs)[1], ncol = 2 * n)
for(i in 1:dim(inputs)[1]){
# Simulate the number of patients with side effects
X1[i] <- rbinom(1, n, p_side_effects_t1[i])
X2[i] <- rbinom(1, n, p_side_effects_t2[i])
# Simulate the number of patients hospitalised
X_hosp[i] <- rbinom(1, X1[i] + X2[i], p_hospitalised_total[i])
# Simulate the number of patients die
X_dead[i] <- rbinom(1, X_hosp[i], p_died[i])
## Simulate recovery times for patients
N_recover_home[i] <- X1[i] + X2[i] - X_hosp[i]
if(N_recover_home[i] > 0){
T_home[i, 1:N_recover_home[i]] <- rexp(N_recover_home[i], rate_recover_home[i])
}
N_recover_hospital[i] <- X_hosp[i] - X_dead[i]
if(N_recover_hospital[i] > 0){
T_hosp[i, 1:N_recover_hospital[i]] <- rexp(N_recover_hospital[i], rate_recover_hosp[i])
}
}
data_save_dat <- data.frame(cbind(X1 = X1, X2 = X2,
X_hosp = X_hosp, X_dead = X_dead,
N_recover_home = N_recover_home,
N_recover_hospital = N_recover_hospital,
T_home = T_home, T_hosp = T_hosp))
return(data_save_dat)
}
# Analysis function based on JAGS
full_analysis_fn <- function(data, args, pars){
## Format Data - Adjust for 0 recovery times
T_home <- NA
if(data$N_recover_home > 0){
T_home <- as.numeric(as.matrix(data[, (1:data$N_recover_home) + 6]))
}
T_hosp <- NA
if(data$N_recover_hospital > 0){
T_hosp <- as.vector(as.matrix(data[,
(6 + 2 * args$n) + (1:data$N_recover_hospital)]))
}
# Create the data list for JAGS
data_jags <- list(X1 = data$X1,
X2 = data$X2,
X_hosp = data$X_hosp,
X_dead = data$X_dead,
T_home = T_home,
T_hosp = T_hosp,
N_recover_home = ifelse(data$N_recover_home > 0,
data$N_recover_home,
1),
N_recover_hosp = ifelse(data$N_recover_hospital > 0,
data$N_recover_hospital,
1),
n = args$n,
n_side_effects = args$n_side_effects,
n_patients = args$n_patients,
logor_side_effects_mu = args$logor_side_effects_mu,
logor_side_effects_sd = args$logor_side_effects_sd,
p_recovery_home_alpha = betaPar(args$p_recovery_home_mu,
args$p_recovery_home_sd)$alpha,
p_recovery_home_beta = betaPar(args$p_recovery_home_mu,
args$p_recovery_home_sd)$beta,
p_recovery_hosp_alpha = betaPar(args$p_recovery_hosp_mu,
args$p_recovery_hosp_sd)$alpha,
p_recovery_hosp_beta = betaPar(args$p_recovery_hosp_mu,
args$p_recovery_hosp_sd)$beta,
n_died = args$n_died,
n_hospitalised = args$n_hospitalised)
LogOR_addoutcomes_trial <- function(){
## Models for the data
X1 ~ dbin(p_side_effects_t1, n)
X2 ~ dbin(p_side_effects_t2, n)
X_hosp ~ dbinom(p_hospitalised_total, X1 + X2)
X_dead ~ dbin(p_died, X_hosp)
rate_recover_home <- -log(1 - lambda_home)
rate_recover_hosp <- -log(1 - lambda_hosp)
for(i in 1:N_recover_home){
T_home[i] ~ dexp(rate_recover_home)
}
for(i in 1:N_recover_hosp){
T_hosp[i] ~ dexp(rate_recover_hosp)
}
# Probability of side effects under treatment 1
p_side_effects_t1 ~ dbeta(1 + n_side_effects,
1 + n_patients - n_side_effects)
# Log odds of side effects on treatment 2
logor_side_effects ~ dnorm(logor_side_effects_mu, logor_side_effects_sd)
# Odds of side effects on treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
## Variables to define transition probabilities
# Probability that a patient is hospitalised over the time horizon
p_hospitalised_total ~ dbeta(1 + n_hospitalised,
1 + n_side_effects - n_hospitalised)
# Probability that a patient dies over the time horizon given they were
# hospitalised
p_died ~ dbeta(1 + n_died, 1 + n_hospitalised - n_died)
# Lambda_home: Conditional probability that a patient recovers considering
# that they are not hospitalised
lambda_home ~ dbeta(p_recovery_home_alpha, p_recovery_home_beta)
# Lambda_hosp: Conditional probability that a patient recovers considering
# that they do not die
lambda_hosp ~ dbeta(p_recovery_hosp_alpha, p_recovery_hosp_beta)
}
filein <- file.path(tempdir(),fileext="datmodel.txt")
R2OpenBUGS::write.model(LogOR_addoutcomes_trial,filein)
# Perform the MCMC simulation with OpenBUGS.
# Close OpenBUGS once it has finished (if debug is set to TRUE)
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 1,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250, progress.bar = "none")
return(data.frame(p_side_effects_t1 = args$p_side_effects_t1,
p_side_effects_t2 = bugs.data$BUGSoutput$sims.matrix[, "p_side_effects_t2"],
p_hospitalised_total= bugs.data$BUGSoutput$sims.matrix[, "p_hospitalised_total"],
p_died = bugs.data$BUGSoutput$sims.matrix[, "p_died"],
lambda_home = bugs.data$BUGSoutput$sims.matrix[, "lambda_home"],
lambda_hosp = bugs.data$BUGSoutput$sims.matrix[, "lambda_hosp"]))
}
# EVSI calculation using the momemt matching method.
evsi_OR_allout_MM <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("p_side_effects_t1", "logor_side_effects",
"p_hospitalised_total", "p_died",
"lambda_home", "lambda_hosp"),
n = seq(50, 1500, by = 50),
method = "mm",
datagen_fn = full_datagen_fn,
model_fn = calculate_costs_effects,
analysis_args = list(n_side_effects = n_side_effects,
n_patients = n_patients,
n = 50,
logor_side_effects_mu = logor_side_effects_mu,
logor_side_effects_sd = logor_side_effects_sd,
betaPar = betaPar,
p_recovery_home_mu = p_recovery_home_mu,
p_recovery_home_sd = p_recovery_home_sd,
p_recovery_hosp_mu = p_recovery_hosp_mu,
p_recovery_hosp_sd = p_recovery_hosp_sd,
n.iter = 5250,
n_died = n_died,
n_hospitalised = n_hospitalised,
p_side_effects_t1 = m_params$p_side_effects_t1),
analysis_fn = full_analysis_fn,
par_fn = generate_psa_parameters,
npreg_method = "earth")
#### STUDY 3: Long-term Survival ####
longterm_datagen_fn <- function(inputs, n = 46000){
rate_longterm <- inputs[, "rate_longterm"]
sum_of_surv <- rgamma(dim(inputs)[1], shape = 2 * n, scale = n / rate_longterm)
return(data.frame(surv_sum = sum_of_surv))
}
data <- longterm_datagen_fn(m_params)
pars <- "rate_longterm"
longterm_analysis_fn <- function(data, args, pars){
# Load key function
gammaPar <- args$gammaPar
# Data list for JAGS
data_jags <- list(
surv_sum = data$surv_sum[1],
alpha_rate = gammaPar(args$rate_longterm_mu,
args$rate_longterm_sd)$alpha,
beta_rate = gammaPar(args$rate_longterm_mu,
args$rate_longterm_sd)$beta,
n = args$n
)
longterm_jags <- function(){
## Models for the data
surv_sum ~ dgamma(2 * n, rate_longterm / n)
rate_longterm ~ dgamma(alpha_rate, beta_rate)
}
filein <- file.path(tempdir(),fileext="datmodel.txt")
R2OpenBUGS::write.model(longterm_jags,filein)
# Perform the MCMC simulation with OpenBUGS.
# Close OpenBUGS once it has finished (if debug is set to TRUE)
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 3,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250, progress.bar = "none")
return(data.frame(rate_longterm = bugs.data$BUGSoutput$sims.matrix[, "rate_longterm"]))
}
# EVSI calculation using the momemt matching method.
evsi_longterm <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("rate_longterm"),
n = 46000,
method = "mm",
datagen_fn = longterm_datagen_fn,
model_fn = calculate_costs_effects,
analysis_args = list(n = 40000,
gammaPar = gammaPar,
rate_longterm_mu = rate_longterm_mu,
rate_longterm_sd = rate_longterm_sd,
n.iter = 2000),
analysis_fn = longterm_analysis_fn,
par_fn = generate_psa_parameters)
plotting <- evsi.plot.adapt(chemotherapy_output, m_params, c("rate_longterm"),
evsi_longterm, "gam")
#### STUDY 4: Retrospective Study of Hospital Data ####
# Data generation function
retrohosp_datagen_fn <- function(inputs, n = 500){
# Load the data
p_died <- inputs[, "p_died"]
lambda_hosp <- inputs[, "lambda_hosp"]
rate_recover_hosp <- -log(1 -lambda_hosp )
X_dead <- N_recover_hospital <- vector("numeric", length = dim(inputs)[1])
T_hosp <- matrix(NA, nrow = dim(inputs)[1], ncol = n)
for(i in 1:dim(inputs)[1]){
# Simulate the number of patients die
X_dead[i] <- rbinom(1, n, p_died[i])
## Simulate recovery times for patients
N_recover_hospital[i] <- n - X_dead[i]
T_hosp[i, 1:N_recover_hospital[i]] <- rexp(N_recover_hospital[i], rate_recover_hosp[i])
}
data_save_dat <- data.frame(cbind(X_dead = X_dead, T_hosp = T_hosp))
return(data_save_dat)
}
# Analysis function based on JAGS
retrohosp_analysis_fn <- function(data, args, pars){
# Create the data list for JAGS
data_jags <- list(X_dead = data$X_dead,
N_recover_hosp = args$n - data$X_dead,
T_hosp = as.vector(data[, 1 + (1:(args$n - data$X_dead))]),
n = args$n,
p_recovery_hosp_alpha = betaPar(args$p_recovery_hosp_mu,
args$p_recovery_hosp_sd)$alpha,
p_recovery_hosp_beta = betaPar(args$p_recovery_hosp_mu,
args$p_recovery_hosp_sd)$beta,
n_died = args$n_died,
n_hospitalised = args$n_hospitalised)
LogOR_addoutcomes_trial <- function(){
## Models for the data
X_dead ~ dbin(p_died, n)
rate_recover_hosp <- -log(1 - lambda_hosp)
for(i in 1:N_recover_hosp){
T_hosp[i] ~ dexp(rate_recover_hosp)
}
# Probability that a patient dies over the time horizon given they were
# hospitalised
p_died ~ dbeta(1 + n_died, 1 + n_hospitalised - n_died)
# Lambda_hosp: Conditional probability that a patient recovers considering
# that they do not die
lambda_hosp ~ dbeta(p_recovery_hosp_alpha, p_recovery_hosp_beta)
}
filein <- file.path(tempdir(),fileext="datmodel.txt")
R2OpenBUGS::write.model(LogOR_addoutcomes_trial,filein)
# Perform the MCMC simulation with OpenBUGS.
# Close OpenBUGS once it has finished (if debug is set to TRUE)
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 3,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250, progress.bar = "none")
return(data.frame(p_died = bugs.data$BUGSoutput$sims.matrix[, "p_died"],
lambda_hosp = bugs.data$BUGSoutput$sims.matrix[, "lambda_hosp"]))
}
# EVSI calculation using the momemt matching method.
evsi_retrohosp <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("p_died", "lambda_hosp"),
n = seq(500, 1500, by = 200),
method = "mm",
datagen_fn = retrohosp_datagen_fn,
model_fn = calculate_costs_effects,
analysis_args = list(n = 500,
betaPar = betaPar,
p_recovery_hosp_mu = p_recovery_hosp_mu,
p_recovery_hosp_sd = p_recovery_hosp_sd,
n.iter = 5000,
n_died = n_died,
n_hospitalised = n_hospitalised),
analysis_fn = retrohosp_analysis_fn,
par_fn = generate_psa_parameters)
#### STUDY 5: Registry Study of Observational Data ####
# Data generation function
registry_datagen_fn <- function(inputs, n = 500){
# Load the data
p_hospitalised_total <- inputs[, "p_hospitalised_total"]
p_died <- inputs[, "p_died"]
lambda_home <- inputs[, "lambda_home"]
lambda_hosp <- inputs[, "lambda_hosp"]
rate_recover_hosp <- -log(1 -lambda_hosp )
rate_recover_home <- -log(1 - lambda_home)
X_hosp <- X_dead <- N_recover_home <- N_recover_hospital <- vector("numeric", length = dim(inputs)[1])
T_home <- T_hosp <- matrix(NA, nrow = dim(inputs)[1], ncol = 2 * n)
for(i in 1:dim(inputs)[1]){
# Simulate the number of patients hospitalised
X_hosp[i] <- rbinom(1, n, p_hospitalised_total[i])
# Simulate the number of patients die
X_dead[i] <- rbinom(1, X_hosp[i], p_died[i])
## Simulate recovery times for patients
N_recover_home[i] <- n - X_hosp[i]
T_home[i, 1:N_recover_home[i]] <- rexp(N_recover_home[i], rate_recover_home[i])
N_recover_hospital[i] <- X_hosp[i] - X_dead[i]
T_hosp[i, 1:N_recover_hospital[i]] <- rexp(N_recover_hospital[i], rate_recover_hosp[i])
}
data_save_dat <- data.frame(cbind(X_hosp = X_hosp, X_dead = X_dead,
N_recover_home = N_recover_home,
N_recover_hospital = N_recover_hospital,
T_home = T_home, T_hosp = T_hosp))
return(data_save_dat)
}
# Analysis function based on JAGS
registry_analysis_fn <- function(data, args, pars){
# Create the data list for JAGS
data_jags <- list(X_hosp = data$X_hosp,
X_dead = data$X_dead,
T_home = as.vector(data[, (1:data$N_recover_home) + 6]),
T_hosp = as.vector(data[, (6 + 2 * args$n) + (1:data$N_recover_hospital)]),
N_recover_home = data$N_recover_home,
N_recover_hosp = data$N_recover_hospital,
n = args$n,
n_side_effects = args$n_side_effects,
p_recovery_home_alpha = betaPar(args$p_recovery_home_mu,
args$p_recovery_home_sd)$alpha,
p_recovery_home_beta = betaPar(args$p_recovery_home_mu,
args$p_recovery_home_sd)$beta,
p_recovery_hosp_alpha = betaPar(args$p_recovery_hosp_mu,
args$p_recovery_hosp_sd)$alpha,
p_recovery_hosp_beta = betaPar(args$p_recovery_hosp_mu,
args$p_recovery_hosp_sd)$beta,
n_died = args$n_died,
n_hospitalised = args$n_hospitalised)
LogOR_addoutcomes_trial <- function(){
X_hosp ~ dbinom(p_hospitalised_total, n)
X_dead ~ dbin(p_died, X_hosp)
rate_recover_home <- -log(1 - lambda_home)
rate_recover_hosp <- -log(1 - lambda_hosp)
for(i in 1:N_recover_home){
T_home[i] ~ dexp(rate_recover_home)
}
for(i in 1:N_recover_hosp){
T_hosp[i] ~ dexp(rate_recover_hosp)
}
## Variables to define transition probabilities
# Probability that a patient is hospitalised over the time horizon
p_hospitalised_total ~ dbeta(1 + n_hospitalised,
1 + n_side_effects - n_hospitalised)
# Probability that a patient dies over the time horizon given they were
# hospitalised
p_died ~ dbeta(1 + n_died, 1 + n_hospitalised - n_died)
# Lambda_home: Conditional probability that a patient recovers considering
# that they are not hospitalised
lambda_home ~ dbeta(p_recovery_home_alpha, p_recovery_home_beta)
# Lambda_hosp: Conditional probability that a patient recovers considering
# that they do not die
lambda_hosp ~ dbeta(p_recovery_hosp_alpha, p_recovery_hosp_beta)
}
filein <- file.path(tempdir(),fileext="datmodel.txt")
R2OpenBUGS::write.model(LogOR_addoutcomes_trial,filein)
# Perform the MCMC simulation with OpenBUGS.
# Close OpenBUGS once it has finished (if debug is set to TRUE)
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 3,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250, progress.bar = "none")
return(data.frame(p_hospitalised_total= bugs.data$BUGSoutput$sims.matrix[, "p_hospitalised_total"],
p_died = bugs.data$BUGSoutput$sims.matrix[, "p_died"],
lambda_home = bugs.data$BUGSoutput$sims.matrix[, "lambda_home"],
lambda_hosp = bugs.data$BUGSoutput$sims.matrix[, "lambda_hosp"]))
}
# EVSI calculation using the momemt matching method.
evsi_registry <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("p_hospitalised_total","p_died", "lambda_home",
"lambda_hosp"),
n = seq(500, 1500, by = 200),
method = "mm",
datagen_fn = registry_datagen_fn,
model_fn = calculate_costs_effects,
analysis_args = list(n_side_effects = n_side_effects,
n = 500,
betaPar = betaPar,
p_recovery_home_mu = p_recovery_home_mu,
p_recovery_home_sd = p_recovery_home_sd,
p_recovery_hosp_mu = p_recovery_hosp_mu,
p_recovery_hosp_sd = p_recovery_hosp_sd,
n.iter = 5000,
n_died = n_died,
n_hospitalised = n_hospitalised),
analysis_fn = registry_analysis_fn,
par_fn = generate_psa_parameters)
#### STUDY 6: A Cost Analysis ####
# Data generation function
cost_datagen_fn <- function(inputs, n = 500,
v_home_care_fun = function(){return(sqrt(rgamma(1, shape = 3, scale = 39)))},
v_hospital_fun = function(){return(sqrt(rgamma(1, shape = 10, scale = 45)))},
v_death_fun = function(){return(sqrt(rgamma(1, shape = 15, scale = 112.5)))}
){
lognormPar = function(m,s) {
# m: Mean of Log-Normal distribution
# s: Standard deiviation of Log-Normal distribution
var <- s^2
meanlog <- log(m) - 0.5 * log(1 + var/m^2)
varlog <- log(1 + (var/m^2))
sdlog <- sqrt(varlog)
return(
list(meanlog = meanlog, sdlog = sdlog)
)
}
X_home_care <- X_hospital <- X_death <- matrix(NA, nrow = dim(inputs)[1], ncol = n[1])
for(i in 1:dim(inputs)[1]){
# Load the data
m_home_care <- inputs[i, "c_home_care"]
m_hospital <- inputs[i, "c_hospital"]
m_death <- inputs[i, "c_death"]
v_home_care <- v_home_care_fun()
v_hospital <- v_hospital_fun()
v_death <- v_death_fun()
par_home_care <- lognormPar(m_home_care, sqrt(v_home_care))
par_hospital <- lognormPar(m_hospital, sqrt(v_hospital))
par_death <- lognormPar(m_death, sqrt(v_death))
# Simulate the costs
X_home_care[i, ] <- rlnorm(n[1], par_home_care$meanlog, par_home_care$sdlog)
X_hospital[i, ] <- rlnorm(n[1], par_hospital$meanlog, par_hospital$sdlog)
X_death[i, ] <- rlnorm(n[1], par_death$meanlog, par_death$sdlog)
}
data_save_dat <- data.frame(cbind(X_home_care = X_home_care,
X_hospital = X_hospital,
X_death = X_death))
return(data_save_dat)
}
dat_try <- cost_datagen_fn(m_params)
# Analysis function based on JAGS
cost_analysis_fn <- function(data, args, pars){
# Create the data list for JAGS
data_jags <- list(X_home_care = as.vector(data[, (1:args$n)]),
X_hospital = as.vector(data[, args$n + (1:args$n)]),
X_death = as.vector(data[, 2*args$n + (1:args$n)]),
n = args$n,
mu_home_care = args$lognPar(args$c_home_care_mu,
args$c_home_care_sd)$meanlog,
t_home_care = 1/args$lognPar(args$c_home_care_mu,
args$c_home_care_sd)$sdlog^2,
mu_hospital = args$lognPar(args$c_hospital_mu,
args$c_hospital_sd)$meanlog,
t_hospital = 1 / args$lognPar(args$c_hospital_mu,
args$c_hospital_sd)$sdlog^2,
mu_death = args$lognPar(args$c_death_mu,
args$c_death_sd)$meanlog,
t_death = 1 / args$lognPar(args$c_death_mu,
args$c_death_sd)$sdlog^2,
a_home_care = 3,
b_home_care = 1 / 39,
a_hospital = 10,
b_hospital = 1 / 45,
a_death = 15,
b_death = 1 / 112.5)
LogOR_addoutcomes_trial <- function(){
for(i in 1:n){
X_home_care[i] ~ dlnorm(m_home_care, tau_ind_home_care)
X_hospital[i] ~ dlnorm(m_hospital, tau_ind_hospital)
X_death[i] ~ dlnorm(m_death, tau_ind_death)
}
c_home_care ~ dlnorm(mu_home_care, t_home_care)
c_hospital ~ dlnorm(mu_hospital, t_hospital)
c_death ~ dlnorm(mu_death, t_death)
v_ind_home_care ~ dgamma(a_home_care, b_home_care)
v_ind_hospital ~ dgamma(a_hospital, b_hospital)
v_ind_death ~ dgamma(a_death, b_death)
m_home_care <- log(c_home_care) - 0.5 *
log(1 + v_ind_home_care/c_home_care^2)
tau_ind_home_care <- 1 / log(1 + (v_ind_home_care/m_home_care^2))
m_hospital <- log(c_hospital) - 0.5 *
log(1 + v_ind_hospital/c_hospital^2)
tau_ind_hospital <- 1 / log(1 + (v_ind_hospital/m_hospital^2))
m_death <- log(c_death) - 0.5 *
log(1 + v_ind_death/c_death^2)
tau_ind_death <- 1 / log(1 + (v_ind_death/m_death^2))
}
filein <- file.path(tempdir(),fileext="datmodel.txt")
R2OpenBUGS::write.model(LogOR_addoutcomes_trial,filein)
# Perform the MCMC simulation with OpenBUGS.
# Close OpenBUGS once it has finished (if debug is set to TRUE)
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 1,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250, progress.bar = "none")
return(data.frame(c_home_care= bugs.data$BUGSoutput$sims.matrix[, "c_home_care"],
c_hospital = bugs.data$BUGSoutput$sims.matrix[, "c_hospital"],
c_death = bugs.data$BUGSoutput$sims.matrix[, "c_death"]))
}
# EVSI calculation using the momemt matching method.
evsi_costs <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("c_home_care", "c_hospital", "c_death"),
n = seq(30, 1000, by = 200),
method = "mm",
datagen_fn = cost_datagen_fn,
model_fn = calculate_costs_effects,
analysis_args = list(n = 20,
lognPar = lognPar,
c_home_care_mu = c_home_care_mu,
c_home_care_sd = c_home_care_sd,
c_hospital_mu = c_hospital_mu,
c_hospital_sd = c_hospital_sd,
c_death_mu = c_death_mu,
c_death_sd = c_death_sd,
n.iter = 2000),
analysis_fn = cost_analysis_fn,
par_fn = generate_psa_parameters,
Q = 50)
#### STUDY 7: A Utility Analysis ####
# Data generation function
utility_datagen_fn <- function(inputs, n = 500,
sd_recovery_fun = function(){return(runif(1, 0.000001, 0.00005))},
sd_home_care_fun = function(){return(runif(1, 0.00001, 0.005))},
sd_hospital_fun = function(){return(runif(1, 0.00001, 0.01))}
){
betaPar <- function(m, s) {
# m: Mean of the Beta distribution
# m: Standard deviation of the Beta distribution
var <- s ^ 2
alpha <- ((1 - m) / var - 1 / m) * m ^ 2
beta <- alpha * (1 / m - 1)
return(
list(alpha = alpha, beta = beta)
)
}
# Load the data
X_home_care <- X_hospital <- X_recovery <- matrix(NA, nrow = dim(inputs)[1], ncol = n[1])
for(i in 1:dim(inputs)[1]){
set.seed(123 + i)
m_recovery <- inputs[i, "u_recovery"]
m_home_care <- inputs[i, "u_home_care"]
m_hospital <- inputs[i, "u_hospital"]
sd_recovery <- sd_recovery_fun()
sd_home_care <- sd_home_care_fun()
sd_hospital <- sd_hospital_fun()
par_recovery <- betaPar(m_recovery, sd_recovery)
par_home_care <- betaPar(m_home_care, sd_home_care)
par_hospital <- betaPar(m_hospital, sd_hospital)
# Simulate the costs
X_recovery[i, ] <- rbeta(n[1], par_recovery$alpha, par_recovery$beta)
X_home_care[i, ] <- rbeta(n[1], par_home_care$alpha, par_home_care$beta)
X_hospital[i, ] <- rbeta(n[1], par_hospital$alpha, par_hospital$beta)
}
data_save_dat <- data.frame(cbind(X_recovery = X_recovery,
X_home_care = X_home_care,
X_hospital = X_hospital))
return(data_save_dat)
}
dat_try <- utility_datagen_fn(m_params)
# Analysis function based on JAGS
utility_analysis_fn <- function(data, args, pars){
# Create the data list for JAGS
data_jags <- list(X_recovery = as.vector(data[, (1:args$n)]),
X_home_care = as.vector(data[, args$n + (1:args$n)]),
X_hospital = as.vector(data[, 2*args$n + (1:args$n)]),
n = args$n,
alpha_recovery = args$betaPar(
args$u_recovery_mu,
args$u_recovery_sd
)$alpha,
beta_recovery = args$betaPar(
args$u_recovery_mu,
args$u_recovery_sd
)$beta,
alpha_home_care = args$betaPar(
args$u_home_care_mu,
args$u_home_care_sd
)$alpha,
beta_home_care = args$betaPar(
args$u_home_care_mu,
args$u_home_care_sd
)$beta,
alpha_hospital = args$betaPar(
args$u_hospital_mu,
args$u_hospital_sd
)$alpha,
beta_hospital = args$betaPar(
args$u_hospital_mu,
args$u_hospital_sd
)$beta)
trial <- function(){
for(i in 1:n){
X_recovery[i] ~ dbeta(a_recovery, b_recovery)
X_home_care[i] ~ dbeta(a_home_care, b_home_care)
X_hospital[i] ~ dbeta(a_hospital, b_hospital)
}
u_recovery ~ dbeta(alpha_recovery, beta_recovery)
u_home_care ~ dbeta(alpha_home_care, beta_home_care)
u_hospital ~ dbeta(alpha_hospital, beta_hospital)
sd_recovery ~ dunif(0.000001, 0.00005)
sd_home_care ~ dunif(0.00001, 0.005)
sd_hospital ~ dunif(0.00001, 0.01)
v_recovery <- sd_recovery ^ 2
a_recovery <- ((1 - u_recovery) / v_recovery - 1 / u_recovery) * u_recovery ^ 2
b_recovery <- a_recovery * (1 / u_recovery - 1)
v_home_care <- sd_home_care ^ 2
a_home_care <- ((1 - u_home_care) / v_home_care - 1 / u_home_care) * u_home_care ^ 2
b_home_care <- a_home_care * (1 / u_home_care - 1)
v_hospital <- sd_hospital ^ 2
a_hospital <- ((1 - u_hospital) / v_hospital - 1 / u_hospital) * u_hospital ^ 2
b_hospital <- a_hospital * (1 / u_hospital - 1)
}
filein <- file.path(tempdir(),fileext="datmodel.txt")
R2OpenBUGS::write.model(trial,filein)
# Perform the MCMC simulation with OpenBUGS.
# Close OpenBUGS once it has finished (if debug is set to TRUE)
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 1,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250, progress.bar = "none")
return(data.frame(u_recovery = bugs.data$BUGSoutput$sims.matrix[, "u_recovery"],
u_home_care = bugs.data$BUGSoutput$sims.matrix[, "u_home_care"],
u_hospital = bugs.data$BUGSoutput$sims.matrix[, "u_hospital"]))
}
# EVSI calculation using the momemt matching method.
undebug(evsi)
evsi_utility <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("u_recovery", "u_home_care", "u_hospital"),
n = seq(50, 1500, by = 50),
method = "mm",
datagen_fn = utility_datagen_fn,
model_fn = calculate_costs_effects,
analysis_args = list(n = 50,
betaPar = betaPar,
u_recovery_mu = u_recovery_mu,
u_recovery_sd = u_recovery_sd,
u_home_care_mu = u_home_care_mu,
u_home_care_sd = u_home_care_sd,
u_hospital_mu = u_hospital_mu,
u_hospital_sd = u_hospital_sd,
n.iter = 5000),
analysis_fn = utility_analysis_fn,
par_fn = generate_psa_parameters,
Q = 50)
plotting <- evsi.plot.adapt(chemotherapy_output, m_params, c("u_recovery", "u_home_care", "u_hospital"),
evsi_utility, "earth")
evsi.wtp.plot(plotting)
pop.adjust <- 46000 * (1 / (1 + 0.035)^3)
evsi.enbs.plot(plotting, c(1260000, 1400000), 2 * c(1560.55, 1600),
k = 20000, Pop = pop.adjust, Time = 10)
optim.ss(plotting, c(1260000, 1400000), 2 * c(1560.55, 1600),
k = 20000, Pop = pop.adjust, Time = 10)
coss(plotting, c(1260000, 1400000), 2 * c(1560.55, 1600),
Pop = pop.adjust, Time = 10)
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/Chemotherapy_Book/04_analysis/05_Expected_Value_of_Sample_Information_MM.R
|
########################################################################
### Expected Value of Sample Information Analysis - Regression Based ###
########################################################################
### Packages
library(voi)
library(ggplot2)
library(dplyr)
library(R2jags)
## Run the model
source("04_analysis/02_baseline_model_output.R")
## Baseline Cost-Effectiveness Formatting
# The output from the cost-effectiveness model should be formatted for
# the voi package.
# Use BCEA package to create a cost-effectiveness object.
chemotherapy_output <- list(e = m_costs_effects[, "Effects", ],
c = m_costs_effects[, "Costs", ],
k = seq(0, 50000, length.out = 501))
## EVSI Calculations
#### STUDY 1: Randomised Trial for Log-Odds ratio ####
## Using the default trials in the voi package
# EVSI calculation with GAM regression
evsi_default <- evsi(outputs = chemotherapy_output,
inputs = m_params,
study = "trial_binary",
pars = c("p_side_effects_t1", "p_side_effects_t2"),
n = seq(500, 1500, by = 200),
method = "gam")
# Using a bespoke analysis function - trial only updates odds ratio.
# Data generation function
OR_datagen_fn <- function(inputs, n = 500){
p_side_effects_t1 <- inputs[, "p_side_effects_t1"]
p_side_effects_t2 <- inputs[, "p_side_effects_t2"]
X1 <- rbinom(length(p_side_effects_t1), n, p_side_effects_t1)
X2 <- rbinom(length(p_side_effects_t2), n, p_side_effects_t2)
# Create odds ratio as summary statistic
OR <- (n - X2) / X2 / ((n - X1) / X1)
data_save <- data.frame(OR = OR)
return(data_save)
}
# EVSI calculation using GAM regression.
evsi_OR <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("p_side_effects_t1", "p_side_effects_t2"),
n = seq(50, 1500, by = 50),
method = "gam",
datagen_fn = OR_datagen_fn,
par_fn = generate_psa_parameters)
#### STUDY 2: Randomised Trial with Multiple Outcomes ####
# Data generation function
# Analysis function based on JAGS
full_analysis_fn <- function(data, args, pars){
## Format Data - Adjust for 0 recovery times
T_home <- NA
if(data$N_recover_home > 0){
T_home <- as.numeric(as.matrix(data[, (1:data$N_recover_home) + 6]))
}
T_hosp <- NA
if(data$N_recover_hospital > 0){
T_hosp <- as.vector(as.matrix(data[,
(6 + 2 * args$n) + (1:data$N_recover_hospital)]))
}
# Create the data list for JAGS
data_jags <- list(X1 = data$X1,
X2 = data$X2,
X_hosp = data$X_hosp,
X_dead = data$X_dead,
T_home = T_home,
T_hosp = T_hosp,
N_recover_home = ifelse(data$N_recover_home > 0,
data$N_recover_home,
1),
N_recover_hosp = ifelse(data$N_recover_hospital > 0,
data$N_recover_hospital,
1),
n = args$n,
n_side_effects = args$n_side_effects,
n_patients = args$n_patients,
logor_side_effects_mu = args$logor_side_effects_mu,
logor_side_effects_sd = args$logor_side_effects_sd,
p_recovery_home_alpha = betaPar(args$p_recovery_home_mu,
args$p_recovery_home_sd)$alpha,
p_recovery_home_beta = betaPar(args$p_recovery_home_mu,
args$p_recovery_home_sd)$beta,
p_recovery_hosp_alpha = betaPar(args$p_recovery_hosp_mu,
args$p_recovery_hosp_sd)$alpha,
p_recovery_hosp_beta = betaPar(args$p_recovery_hosp_mu,
args$p_recovery_hosp_sd)$beta,
n_died = args$n_died,
n_hospitalised = args$n_hospitalised)
LogOR_addoutcomes_trial <- function(){
## Models for the data
X1 ~ dbin(p_side_effects_t1, n)
X2 ~ dbin(p_side_effects_t2, n)
X_hosp ~ dbinom(p_hospitalised_total, X1 + X2)
X_dead ~ dbin(p_died, X_hosp)
rate_recover_home <- -log(1 - lambda_home)
rate_recover_hosp <- -log(1 - lambda_hosp)
for(i in 1:N_recover_home){
T_home[i] ~ dexp(rate_recover_home)
}
for(i in 1:N_recover_hosp){
T_hosp[i] ~ dexp(rate_recover_hosp)
}
# Probability of side effects under treatment 1
p_side_effects_t1 ~ dbeta(1 + n_side_effects,
1 + n_patients - n_side_effects)
# Log odds of side effects on treatment 2
logor_side_effects ~ dnorm(logor_side_effects_mu, logor_side_effects_sd)
# Odds of side effects on treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
## Variables to define transition probabilities
# Probability that a patient is hospitalised over the time horizon
p_hospitalised_total ~ dbeta(1 + n_hospitalised,
1 + n_side_effects - n_hospitalised)
# Probability that a patient dies over the time horizon given they were
# hospitalised
p_died ~ dbeta(1 + n_died, 1 + n_hospitalised - n_died)
# Lambda_home: Conditional probability that a patient recovers considering
# that they are not hospitalised
lambda_home ~ dbeta(p_recovery_home_alpha, p_recovery_home_beta)
# Lambda_hosp: Conditional probability that a patient recovers considering
# that they do not die
lambda_hosp ~ dbeta(p_recovery_hosp_alpha, p_recovery_hosp_beta)
}
filein <- file.path(tempdir(),fileext="datmodel.txt")
R2OpenBUGS::write.model(LogOR_addoutcomes_trial,filein)
# Perform the MCMC simulation with OpenBUGS.
# Close OpenBUGS once it has finished (if debug is set to TRUE)
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 1,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250, progress.bar = "none")
return(data.frame(logor_side_effects = bugs.data$BUGSoutput$sims.matrix[, "logor_side_effects"],
p_hospitalised_total= bugs.data$BUGSoutput$sims.matrix[, "p_hospitalised_total"],
p_died = bugs.data$BUGSoutput$sims.matrix[, "p_died"],
lambda_home = bugs.data$BUGSoutput$sims.matrix[, "lambda_home"],
lambda_hosp = bugs.data$BUGSoutput$sims.matrix[, "lambda_hosp"]))
}
# EVSI calculation using the momemt matching method.
evsi_OR_allout_MM <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("logor_side_effects",
"p_hospitalised_total", "p_died",
"lambda_home", "lambda_hosp"),
pars_datagen = c("p_side_effects_t1",
"logor_side_effects",
"p_hospitalised_total", "p_died",
"lambda_home", "lambda_hosp"),
n = seq(50, 1500, by = 50),
method = "mm",
datagen_fn = full_datagen_fn,
model_fn = calculate_costs_effects,
analysis_args = list(n_side_effects = n_side_effects,
n_patients = n_patients,
n = 50,
logor_side_effects_mu = logor_side_effects_mu,
logor_side_effects_sd = logor_side_effects_sd,
betaPar = betaPar,
p_recovery_home_mu = p_recovery_home_mu,
p_recovery_home_sd = p_recovery_home_sd,
p_recovery_hosp_mu = p_recovery_hosp_mu,
p_recovery_hosp_sd = p_recovery_hosp_sd,
n.iter = 5250,
n_died = n_died,
n_hospitalised = n_hospitalised,
p_side_effects_t1 = m_params$p_side_effects_t1),
analysis_fn = full_analysis_fn,
par_fn = generate_psa_parameters,
npreg_method = "earth")
#### STUDY 3: Long-term Survival ####
longterm_datagen_fn <- function(inputs, n = 46000){
rate_longterm <- inputs[, "rate_longterm"]
sum_of_surv <- rgamma(dim(inputs)[1], shape = 2 * n, scale = n / rate_longterm)
return(data.frame(surv_sum = sum_of_surv))
}
# EVSI calculation using GAM regression.
evsi_longterm <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("rate_longterm"),
n = 46000,
method = "gam",
datagen_fn = longterm_datagen_fn,
par_fn = generate_psa_parameters)
evsi.wtp.plot(plotting)
pop.adjust <- 46000 * (1 / (1 + 0.035)^1)
optim.ss(plotting, c(60000, 60000), c(0, 0),
k = 30000, Pop = pop.adjust, Time = 12)
evsi.prob.plot(plotting, c(60000, 60000), c(0, 0),
k = 20000, Pop = c(0, pop.adjust * 2), c(0, 15))
#### STUDY 4: Retrospective Study of Hospital Data ####
# Data generation function
retrohosp_datagen_fn <- function(inputs, n = 500){
# Load the data
p_died <- inputs[, "p_died"]
lambda_hosp <- inputs[, "lambda_hosp"]
rate_recover_hosp <- -log(1 -lambda_hosp )
X_dead <-T_hosp_sum <- N_recover_hospital <- vector("numeric", length = dim(inputs)[1])
for(i in 1:dim(inputs)[1]){
# Simulate the number of patients die
X_dead[i] <- rbinom(1, n, p_died[i])
## Simulate recovery times for patients
N_recover_hospital[i] <- n - X_dead[i]
T_hosp <- rexp(N_recover_hospital[i], rate_recover_hosp[i])
T_hosp_sum[i] <- sum(T_hosp)
}
data_save_dat <- data.frame(cbind(p_dead = X_dead / n, T_hosp_sum = T_hosp_sum))
return(data_save_dat)
}
# EVSI calculation using GAM regression
evsi_retrohosp <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("p_died", "lambda_hosp"),
n = seq(500, 1500, by = 200),
method = "gam",
datagen_fn = retrohosp_datagen_fn,
par_fn = generate_psa_parameters)
#### STUDY 5: Registry Study of Observational Data ####
# Data generation function
registry_datagen_fn <- function(inputs, n = 500){
# Load the data
p_hospitalised_total <- inputs[, "p_hospitalised_total"]
p_died <- inputs[, "p_died"]
lambda_home <- inputs[, "lambda_home"]
lambda_hosp <- inputs[, "lambda_hosp"]
rate_recover_hosp <- -log(1 -lambda_hosp )
rate_recover_home <- -log(1 - lambda_home)
X_hosp <- X_dead <- N_recover_home <- T_home_sum <-
T_hosp_sum <- N_recover_hospital <- vector("numeric", length = dim(inputs)[1])
for(i in 1:dim(inputs)[1]){
# Simulate the number of patients hospitalised
X_hosp[i] <- rbinom(1, n, p_hospitalised_total[i])
# Simulate the number of patients die
X_dead[i] <- rbinom(1, X_hosp[i], p_died[i])
## Simulate recovery times for patients
N_recover_home[i] <- n - X_hosp[i]
T_home <- rexp(N_recover_home[i], rate_recover_home[i])
T_home_sum[i] <- sum(T_home)
N_recover_hospital[i] <- X_hosp[i] - X_dead[i]
T_hosp <- rexp(N_recover_hospital[i], rate_recover_hosp[i])
T_hosp_sum[i] <- sum(T_hosp)
}
data_save_dat <- data.frame(cbind(p_hosp = X_hosp / n, p_dead = X_dead / X_hosp,
T_home_sum = T_home_sum, T_hosp_sum = T_hosp_sum))
return(data_save_dat)
}
# EVSI calculation using GAM regression
evsi_registry <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("p_hospitalised_total","p_died", "lambda_home",
"lambda_hosp"),
n = seq(500, 1500, by = 200),
method = "gam",
datagen_fn = registry_datagen_fn,
par_fn = generate_psa_parameters)
#### STUDY 6: A Cost Analysis ####
# Data generation function
cost_datagen_fn <- function(inputs, n = 500,
v_home_care_fun = function(){return(sqrt(rgamma(1, shape = 3, scale = 39)))},
v_hospital_fun = function(){return(sqrt(rgamma(1, shape = 10, scale = 45)))},
v_death_fun = function(){return(sqrt(rgamma(1, shape = 15, scale = 112.5)))}
){
lognormPar = function(m,s) {
# m: Mean of Log-Normal distribution
# s: Standard deiviation of Log-Normal distribution
var <- s^2
meanlog <- log(m) - 0.5 * log(1 + var/m^2)
varlog <- log(1 + (var/m^2))
sdlog <- sqrt(varlog)
return(
list(meanlog = meanlog, sdlog = sdlog)
)
}
X_home_care <- X_hospital <- X_death <- matrix(NA, nrow = dim(inputs)[1], ncol = n[1])
X_home_care_suff1 <- X_home_care_suff2 <- X_hospital_suff1 <- X_hospital_suff2 <-
X_death_suff1 <- X_death_suff2 <- vector("numeric", dim(inputs)[1])
for(i in 1:dim(inputs)[1]){
# Load the data
m_home_care <- inputs[i, "c_home_care"]
m_hospital <- inputs[i, "c_hospital"]
m_death <- inputs[i, "c_death"]
v_home_care <- v_home_care_fun()
v_hospital <- v_hospital_fun()
v_death <- v_death_fun()
par_home_care <- lognormPar(m_home_care, sqrt(v_home_care))
par_hospital <- lognormPar(m_hospital, sqrt(v_hospital))
par_death <- lognormPar(m_death, sqrt(v_death))
# Simulate the costs
X_home_care[i, ] <- rlnorm(n[1], par_home_care$meanlog, par_home_care$sdlog)
X_hospital[i, ] <- rlnorm(n[1], par_hospital$meanlog, par_hospital$sdlog)
X_death[i, ] <- rlnorm(n[1], par_death$meanlog, par_death$sdlog)
## Sufficient statistics for the log-normal distribution
# Data provides information on mean and variance of log-normal
X_home_care_suff1[i] <- sum(log(X_home_care[i, ]))
X_home_care_suff2[i] <- sum(log(X_home_care[i, ])^2)
X_hospital_suff1[i] <- sum(log(X_hospital[i, ]))
X_hospital_suff2[i] <- sum(log(X_hospital[i, ])^2)
X_death_suff1[i] <- sum(log(X_death[i, ]))
X_death_suff2[i] <- sum(log(X_death[i, ])^2)
}
data_save_dat <- data.frame(cbind(X_home_care_suff1 = X_home_care_suff1,
X_home_care_suff2 = X_home_care_suff2,
X_hospital_suff1 = X_hospital_suff1,
X_hospital_suff2 = X_hospital_suff2,
X_death_suff1 = X_death_suff1,
X_death_suff2 = X_death_suff2))
return(data_save_dat)
}
# EVSI calculation using MARS regression due to large number of parameters.
evsi_costs <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("c_home_care", "c_hospital", "c_death"),
n = seq(30, 1000, by = 200),
method = "earth",
datagen_fn = cost_datagen_fn,
par_fn = generate_psa_parameters)
#### STUDY 7: A Utility Analysis ####
# Data generation function
utility_datagen_fn <- function(inputs, n = 500,
sd_recovery_fun = function(){return(runif(1, 0.000001, 0.00005))},
sd_home_care_fun = function(){return(runif(1, 0.00001, 0.005))},
sd_hospital_fun = function(){return(runif(1, 0.00001, 0.01))}
){
betaPar <- function(m, s) {
# m: Mean of the Beta distribution
# m: Standard deviation of the Beta distribution
var <- s ^ 2
alpha <- ((1 - m) / var - 1 / m) * m ^ 2
beta <- alpha * (1 / m - 1)
return(
list(alpha = alpha, beta = beta)
)
}
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
# Load the data
X_home_care <- X_hospital <- X_recovery <- matrix(NA, nrow = dim(inputs)[1], ncol = n[1])
X_recovery_mean1 <- X_recovery_mean2 <- X_home_care_mean1 <- X_home_care_mean2 <-
X_hospital_mean1 <- X_hospital_mean2 <- vector("numeric", dim(inputs)[1])
for(i in 1:dim(inputs)[1]){
set.seed(123 + i)
m_recovery <- inputs[i, "u_recovery"]
m_home_care <- inputs[i, "u_home_care"]
m_hospital <- inputs[i, "u_hospital"]
sd_recovery <- sd_recovery_fun()
sd_home_care <- sd_home_care_fun()
sd_hospital <- sd_hospital_fun()
par_recovery <- betaPar(m_recovery, sd_recovery)
par_home_care <- betaPar(m_home_care, sd_home_care)
par_hospital <- betaPar(m_hospital, sd_hospital)
# Simulate the costs
X_recovery[i, ] <- rbeta(n[1], par_recovery$alpha, par_recovery$beta)
X_home_care[i, ] <- rbeta(n[1], par_home_care$alpha, par_home_care$beta)
X_hospital[i, ] <- rbeta(n[1], par_hospital$alpha, par_hospital$beta)
## Sufficient statistic for beta distribution is geometric mean of X and (1-X)
X_recovery_mean1[i] <- gm_mean(X_recovery[i, ])
X_recovery_mean2[i] <- gm_mean(1 - X_recovery[i, ])
X_home_care_mean1[i] <- gm_mean(X_home_care[i, ])
X_home_care_mean2[i] <- gm_mean(1 - X_home_care[i, ])
X_hospital_mean1[i] <- gm_mean(X_hospital[i, ])
X_hospital_mean2[i] <- gm_mean(1 - X_hospital[i, ])
}
data_save_dat <- data.frame(cbind(X_recovery_mean1 = X_recovery_mean1,
X_recovery_mean2 = X_recovery_mean2,
X_home_care_mean1 = X_home_care_mean1,
X_home_care_mean2 = X_home_care_mean2,
X_hospital_mean1 = X_hospital_mean1,
X_hospital_mean2 = X_hospital_mean2))
return(data_save_dat)
}
# EVSI calculation using MARS regression due to large number of parameters.
evsi_utility <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("u_recovery", "u_home_care", "u_hospital"),
n = seq(30, 1000, by = 200),
method = "earth",
datagen_fn = utility_datagen_fn)
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/Chemotherapy_Book/04_analysis/05_Expected_Value_of_Sample_Information_RB.R
|
######################
### Plots for Book ###
######################
### Packages
library(voi)
library(ggplot2)
library(dplyr)
library(R2jags)
## Run the model
source("04_analysis/02_baseline_model_output.R")
source("06_figs/01_plotting_functions.R")
## Baseline Cost-Effectiveness Formatting
# The output from the cost-effectiveness model should be formatted for
# the voi package.
# Use BCEA package to create a cost-effectiveness object.
chemotherapy_output <- list(e = m_costs_effects[, "Effects", ],
c = m_costs_effects[, "Costs", ],
k = seq(0, 50000, length.out = 501))
# Using a bespoke analysis function - trial only updates odds ratio.
# Data generation function
OR_datagen_fn <- function(inputs, n = 500){
p_side_effects_t1 <- inputs[, "p_side_effects_t1"]
logor_side_effects <- inputs[, "logor_side_effects"]
# Odds for side effects for treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
# Data generation
X1 <- rbinom(length(p_side_effects_t1), n, p_side_effects_t1)
X2 <- rbinom(length(p_side_effects_t2), n, p_side_effects_t2)
data_save <- data.frame(X1 = X1, X2 = X2)
return(data_save)
}
# Analysis function based on JAGS
OR_analysis_fn <- function(data, args, pars){
X1 <- data$X1
X2 <- data$X2
data_jags <- list(X1 = X1,
X2 = X2,
n = args$n,
n_side_effects = args$n_side_effects,
n_patients = args$n_patients,
logor_side_effects_mu = args$logor_side_effects_mu,
logor_side_effects_sd = args$logor_side_effects_sd)
LogOR_trial <- function(){
# Probability of side effects under treatment 1
p_side_effects_t1 ~ dbeta(1 + n_side_effects,
1 + n_patients - n_side_effects)
# Log odds of side effects on treatment 2
logor_side_effects ~ dnorm(logor_side_effects_mu, logor_side_effects_sd)
# Odds of side effects on treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
X1 ~ dbin(p_side_effects_t1, n)
X2 ~ dbin(p_side_effects_t2, n)
}
filein <- file.path(tempdir(),fileext="datmodel.txt")
R2OpenBUGS::write.model(LogOR_trial,filein)
# Perform the MCMC simulation with OpenBUGS.
# Close OpenBUGS once it has finished (if debug is set to TRUE)
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 1,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250, progress.bar = "none")
return(data.frame(logor_side_effects = bugs.data$BUGSoutput$sims.matrix[, pars[1]]))
}
# EVSI calculation using the momemt matching method.
evsi_OR <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("logor_side_effects"),
pars_datagen = c("p_side_effects_t1", "logor_side_effects"),
n = seq(50, 1500, by = 50),
method = "mm",
datagen_fn = OR_datagen_fn,
model_fn = calculate_costs_effects,
analysis_args = list(n_side_effects = n_side_effects,
n_patients = n_patients,
n = 500,
logor_side_effects_mu = logor_side_effects_mu,
logor_side_effects_sd = logor_side_effects_sd,
n.iter = 5250),
analysis_fn = OR_analysis_fn,
par_fn = generate_psa_parameters)
### Format for plotting
plotting_1 <- evsi.plot.adapt(chemotherapy_output, m_params, c("logor_side_effects"), evsi_OR, "gam")
pdf("06_figs/EVSIwtpN.pdf")
evsi.wtp.plot(plotting_1)
dev.off()
pdf("06_figs/EVSIwtp.pdf")
evsi.wtp.plot(plotting_1, N = 250)
dev.off()
pdf("06_figs/EVSIbyN.pdf")
evsi.ss.plot(plotting_1)
dev.off()
pdf("06_figs/ENBS.pdf")
evsi.enbs.plot(plotting_1, c(5e6, 1e7), c(28000,42000),
k = 20000, Pop = 46000, Time = 10)
dev.off()
optim.ss(plotting_1, mean(c(5e6, 1e7)), mean(c(28000,42000)),
k = 20000, Pop = 46000, Time = 10)
pdf("06_figs/coss.pdf")
coss(plotting_1, c(5e6, 1e7), c(28000,42000), Pop = 46000, Time = 5)
dev.off()
pdf("06_figs/ENBS-pop.pdf")
evsi.prob.plot(plotting_1, setup = c(5e6, 1e7), pp = c(28000,42000), k = 20000,
N = 398, Pop = c(0,60000), Time = c(0,10))
dev.off()
## Side Effects
# Using a bespoke analysis function - trial only updates odds ratio.
# Data generation function
OR_datagen_fn <- function(inputs, n = 500){
p_side_effects_t1 <- inputs[, "p_side_effects_t1"]
p_side_effects_t2 <- inputs[, "p_side_effects_t2"]
X1 <- rbinom(length(p_side_effects_t1), n, p_side_effects_t1)
X2 <- rbinom(length(p_side_effects_t2), n, p_side_effects_t2)
# Create odds ratio as summary statistic
OR <- (n - X2) / X2 / ((n - X1) / X1)
data_save <- data.frame(OR = OR)
return(data_save)
}
# EVSI calculation using GAM regression.
evsi_OR <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("p_side_effects_t1", "p_side_effects_t2"),
n = seq(50, 1500, by = 50),
method = "gam",
datagen_fn = OR_datagen_fn,
par_fn = generate_psa_parameters)
plotting_2 <- evsi.plot.adapt(chemotherapy_output, m_params, c("logor_side_effects"), evsi_OR, "gam")
pdf("06_figs/EVSI_WTP_SE.pdf")
evsi.wtp.plot(plotting_2)
dev.off()
pop.adjust <- 46000 * (1 / (1 + 0.035)^3)
pdf("06_figs/ENBS_SE.pdf")
evsi.enbs.plot(plotting, c(1260000, 1400000), 2 * c(1560.55, 1600),
k = 20000, Pop = pop.adjust, Time = 7)
dev.off()
optim.ss(plotting, c(1260000, 1400000), 2 * c(1560.55, 1600),
k = 20000, Pop = pop.adjust, Time = 7)
## Full Study
## Moment Matching
# Data generation function
full_datagen_fn <- function(inputs, n = 500){
p_side_effects_t1 <- inputs[, "p_side_effects_t1"]
logor_side_effects <- inputs[, "logor_side_effects"]
# Odds for side effects for treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
p_hospitalised_total <- inputs[, "p_hospitalised_total"]
p_died <- inputs[, "p_died"]
lambda_home <- inputs[, "lambda_home"]
lambda_hosp <- inputs[, "lambda_hosp"]
rate_recover_hosp <- -log(1 -lambda_hosp )
rate_recover_home <- -log(1 - lambda_home)
X1 <- X2 <- X_hosp <- X_dead <- N_recover_home <- N_recover_hospital <- vector("numeric", length = dim(inputs)[1])
T_home <- T_hosp <- matrix(NA, nrow = dim(inputs)[1], ncol = 2 * n)
for(i in 1:dim(inputs)[1]){
# Simulate the number of patients with side effects
X1[i] <- rbinom(1, n, p_side_effects_t1[i])
X2[i] <- rbinom(1, n, p_side_effects_t2[i])
# Simulate the number of patients hospitalised
X_hosp[i] <- rbinom(1, X1[i] + X2[i], p_hospitalised_total[i])
# Simulate the number of patients die
X_dead[i] <- rbinom(1, X_hosp[i], p_died[i])
## Simulate recovery times for patients
N_recover_home[i] <- X1[i] + X2[i] - X_hosp[i]
if(N_recover_home[i] > 0){
T_home[i, 1:N_recover_home[i]] <- rexp(N_recover_home[i], rate_recover_home[i])
}
N_recover_hospital[i] <- X_hosp[i] - X_dead[i]
if(N_recover_hospital[i] > 0){
T_hosp[i, 1:N_recover_hospital[i]] <- rexp(N_recover_hospital[i], rate_recover_hosp[i])
}
}
data_save_dat <- data.frame(cbind(X1 = X1, X2 = X2,
X_hosp = X_hosp, X_dead = X_dead,
N_recover_home = N_recover_home,
N_recover_hospital = N_recover_hospital,
T_home = T_home, T_hosp = T_hosp))
return(data_save_dat)
}
## Regression Based
full_datagen_fn_RB <- function(inputs, n = 500){
# Load the data
p_side_effects_t1 <- inputs[, "p_side_effects_t1"]
logor_side_effects <- inputs[, "logor_side_effects"]
# Odds for side effects for treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
p_hospitalised_total <- inputs[, "p_hospitalised_total"]
p_died <- inputs[, "p_died"]
lambda_home <- inputs[, "lambda_home"]
lambda_hosp <- inputs[, "lambda_hosp"]
rate_recover_hosp <- -log(1 -lambda_hosp )
rate_recover_home <- -log(1 - lambda_home)
X1 <- X2 <- X_hosp <- X_dead <- N_recover_home <-
N_recover_hospital <- vector("numeric", length = dim(inputs)[1])
T_home <- T_hosp <- matrix(NA, nrow = dim(inputs)[1], ncol = 2 * n)
for(i in 1:dim(inputs)[1]){
# Simulate the number of patients with side effects
X1[i] <- rbinom(1, n, p_side_effects_t1[i])
X2[i] <- rbinom(1, n, p_side_effects_t2[i])
# Simulate the number of patients hospitalised
X_hosp[i] <- rbinom(1, X1[i] + X2[i], p_hospitalised_total[i])
# Simulate the number of patients die
X_dead[i] <- rbinom(1, X_hosp[i], p_died[i])
## Simulate recovery times for patients
N_recover_home[i] <- X1[i] + X2[i] - X_hosp[i]
if(N_recover_home[i] > 0){
T_home[i, 1:N_recover_home[i]] <- rexp(N_recover_home[i], rate_recover_home[i])
}
N_recover_hospital[i] <- X_hosp[i] - X_dead[i]
if(N_recover_hospital[i] > 0){
T_hosp[i, 1:N_recover_hospital[i]] <- rexp(N_recover_hospital[i], rate_recover_hosp[i])
}
}
OR <- (n - X2) / X2 / ((n - X1) / X1)
p_hosp <- X_hosp / (X1 + X2)
p_dead <- X_dead / X_hosp
T_home_sum <- rowSums(T_home, na.rm = TRUE)
T_hosp_sum <- rowSums(T_hosp, na.rm = TRUE)
data_save_dat <- data.frame(cbind(OR = OR,
p_hosp = p_hosp, p_dead = p_dead,
T_home_sum = T_home_sum,
T_hosp_sum = T_hosp_sum))
return(data_save_dat)
}
# EVSI calculation using MARS regression - large number of parameters.
evsi_OR_allout <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("p_side_effects_t1", "logor_side_effects",
"p_hospitalised_total", "p_died",
"lambda_home", "lambda_hosp"),
n = seq(50, 1500, by = 50),
method = "earth",
datagen_fn = full_datagen_fn_RB,
par_fn = generate_psa_parameters)
plotting_3 <- evsi.plot.adapt(chemotherapy_output, m_params, c("logor_side_effects",
"p_hospitalised_total", "p_died",
"lambda_home", "lambda_hosp"),
evsi_OR_allout, "earth")
pop.adjust <- 46000 * (1 / (1 + 0.035)^3)
pdf("06_figs/ENBS_SEFU.pdf")
evsi.enbs.plot(plotting_3, c(1260000, 1400000), 2 * c(1560.55, 1600),
k = 20000, Pop = pop.adjust, Time = 7)
dev.off()
optim.ss(plotting_3, c(1260000, 1400000), 2 * c(1560.55, 1600),
k = 20000, Pop = pop.adjust, Time = 7)
pdf("06_figs/COSS_SEFU.pdf")
coss(plotting_3, c(1260000, 1400000), 2 * c(1560.55, 1600),
Pop = pop.adjust, Time = 7)
dev.off()
pdf("06_figs/prob_SEFU.pdf")
evsi.prob.plot(plotting_3, setup = c(1260000, 1400000), pp = 2 * c(1560.55, 1600), k = 20000,
N = 1080, Pop = c(0,60000), Time = c(0,15))
dev.off()
# Analysis function based on JAGS
full_analysis_fn <- function(data, args, pars){
## Format Data - Adjust for 0 recovery times
T_home <- NA
if(data$N_recover_home > 0){
T_home <- as.numeric(as.matrix(data[, (1:data$N_recover_home) + 6]))
}
T_hosp <- NA
if(data$N_recover_hospital > 0){
T_hosp <- as.vector(as.matrix(data[,
(6 + 2 * args$n) + (1:data$N_recover_hospital)]))
}
# Create the data list for JAGS
data_jags <- list(X1 = data$X1,
X2 = data$X2,
X_hosp = data$X_hosp,
X_dead = data$X_dead,
T_home = T_home,
T_hosp = T_hosp,
N_recover_home = ifelse(data$N_recover_home > 0,
data$N_recover_home,
1),
N_recover_hosp = ifelse(data$N_recover_hospital > 0,
data$N_recover_hospital,
1),
n = args$n,
n_side_effects = args$n_side_effects,
n_patients = args$n_patients,
logor_side_effects_mu = args$logor_side_effects_mu,
logor_side_effects_sd = args$logor_side_effects_sd,
p_recovery_home_alpha = betaPar(args$p_recovery_home_mu,
args$p_recovery_home_sd)$alpha,
p_recovery_home_beta = betaPar(args$p_recovery_home_mu,
args$p_recovery_home_sd)$beta,
p_recovery_hosp_alpha = betaPar(args$p_recovery_hosp_mu,
args$p_recovery_hosp_sd)$alpha,
p_recovery_hosp_beta = betaPar(args$p_recovery_hosp_mu,
args$p_recovery_hosp_sd)$beta,
n_died = args$n_died,
n_hospitalised = args$n_hospitalised)
LogOR_addoutcomes_trial <- function(){
## Models for the data
X1 ~ dbin(p_side_effects_t1, n)
X2 ~ dbin(p_side_effects_t2, n)
X_hosp ~ dbinom(p_hospitalised_total, X1 + X2)
X_dead ~ dbin(p_died, X_hosp)
rate_recover_home <- -log(1 - lambda_home)
rate_recover_hosp <- -log(1 - lambda_hosp)
for(i in 1:N_recover_home){
T_home[i] ~ dexp(rate_recover_home)
}
for(i in 1:N_recover_hosp){
T_hosp[i] ~ dexp(rate_recover_hosp)
}
# Probability of side effects under treatment 1
p_side_effects_t1 ~ dbeta(1 + n_side_effects,
1 + n_patients - n_side_effects)
# Log odds of side effects on treatment 2
logor_side_effects ~ dnorm(logor_side_effects_mu, logor_side_effects_sd)
# Odds of side effects on treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
## Variables to define transition probabilities
# Probability that a patient is hospitalised over the time horizon
p_hospitalised_total ~ dbeta(1 + n_hospitalised,
1 + n_side_effects - n_hospitalised)
# Probability that a patient dies over the time horizon given they were
# hospitalised
p_died ~ dbeta(1 + n_died, 1 + n_hospitalised - n_died)
# Lambda_home: Conditional probability that a patient recovers considering
# that they are not hospitalised
lambda_home ~ dbeta(p_recovery_home_alpha, p_recovery_home_beta)
# Lambda_hosp: Conditional probability that a patient recovers considering
# that they do not die
lambda_hosp ~ dbeta(p_recovery_hosp_alpha, p_recovery_hosp_beta)
}
filein <- file.path(tempdir(),fileext="datmodel.txt")
R2OpenBUGS::write.model(LogOR_addoutcomes_trial,filein)
# Perform the MCMC simulation with OpenBUGS.
# Close OpenBUGS once it has finished (if debug is set to TRUE)
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 1,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250, progress.bar = "none")
return(data.frame(logor_side_effects = bugs.data$BUGSoutput$sims.matrix[, "logor_side_effects"],
p_hospitalised_total= bugs.data$BUGSoutput$sims.matrix[, "p_hospitalised_total"],
p_died = bugs.data$BUGSoutput$sims.matrix[, "p_died"],
lambda_home = bugs.data$BUGSoutput$sims.matrix[, "lambda_home"],
lambda_hosp = bugs.data$BUGSoutput$sims.matrix[, "lambda_hosp"]))
}
# EVSI calculation using the momemt matching method.
evsi_OR_allout_MM <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("logor_side_effects",
"p_hospitalised_total", "p_died",
"lambda_home", "lambda_hosp"),
pars_datagen = c("p_side_effects_t1",
"logor_side_effects",
"p_hospitalised_total", "p_died",
"lambda_home", "lambda_hosp"),
n = seq(50, 1500, by = 50),
method = "mm",
datagen_fn = full_datagen_fn,
model_fn = calculate_costs_effects,
analysis_args = list(n_side_effects = n_side_effects,
n_patients = n_patients,
n = 50,
logor_side_effects_mu = logor_side_effects_mu,
logor_side_effects_sd = logor_side_effects_sd,
betaPar = betaPar,
p_recovery_home_mu = p_recovery_home_mu,
p_recovery_home_sd = p_recovery_home_sd,
p_recovery_hosp_mu = p_recovery_hosp_mu,
p_recovery_hosp_sd = p_recovery_hosp_sd,
n.iter = 5250,
n_died = n_died,
n_hospitalised = n_hospitalised,
p_side_effects_t1 = m_params$p_side_effects_t1),
analysis_fn = full_analysis_fn,
par_fn = generate_psa_parameters,
npreg_method = "earth")
plotting_4 <- evsi.plot.adapt(chemotherapy_output, m_params, c("logor_side_effects",
"p_hospitalised_total", "p_died",
"lambda_home", "lambda_hosp"),
evsi_OR_allout_MM, "earth")
pop.adjust <- 46000 * (1 / (1 + 0.035)^3)
pdf("06_figs/ENBS_SEFU_MM.pdf")
evsi.enbs.plot(plotting_4, c(1260000, 1400000), 2 * c(1560.55, 1600),
k = 20000, Pop = pop.adjust, Time = 7)
dev.off()
optim.ss(plotting_4, c(1260000, 1400000), 2 * c(1560.55, 1600),
k = 20000, Pop = pop.adjust, Time = 7)
pdf("06_figs/COSS_SEFU_MM.pdf")
coss(plotting_4, c(1260000, 1400000), 2 * c(1560.55, 1600),
Pop = pop.adjust, Time = 7)
dev.off()
pdf("06_figs/prob_SEFU_MM.pdf")
evsi.prob.plot(plotting_4, setup = c(1260000, 1400000), pp = 2 * c(1560.55, 1600), k = 20000,
N = 1020, Pop = c(0,60000), Time = c(0,15))
dev.off()
### Utilities ###
# Data generation function
utility_datagen_fn <- function(inputs, n = 20,
sd_recovery_fun = function(){return(runif(1, 0.000001, 0.15))},
sd_home_care_fun = function(){return(runif(1, 0.00001, 0.6))},
sd_hospital_fun = function(){return(runif(1, 0.00001, 0.4))}
){
# Load the data
X_home_care <- X_hospital <- X_recovery <- matrix(NA, nrow = dim(inputs)[1], ncol = n[1])
X_recovery_mean1 <- X_recovery_mean2 <- X_home_care_mean1 <- X_home_care_mean2 <-
X_hospital_mean1 <- X_hospital_mean2 <- vector("numeric", dim(inputs)[1])
for(i in 1:dim(inputs)[1]){
set.seed(123 + i)
m_recovery <- inputs[i, "u_recovery"]
m_home_care <- inputs[i, "u_home_care"]
m_hospital <- inputs[i, "u_hospital"]
sd_recovery <- sd_recovery_fun()
sd_home_care <- sd_home_care_fun()
sd_hospital <- sd_hospital_fun()
# Simulate the costs
X_recovery[i, ] <- truncnorm::rtruncnorm(n[1], mean = m_recovery, sd = sd_recovery,
a = -Inf, b = 1)
X_home_care[i, ] <- truncnorm::rtruncnorm(n[1], mean = m_home_care, sd = sd_home_care,
a = -Inf, b = 1)
X_hospital[i, ] <- truncnorm::rtruncnorm(n[1], mean = m_hospital, sd = sd_hospital,
a = -Inf, b = 1) }
data_save_dat <- data.frame(cbind(X_recovery = X_recovery,
X_home_care = X_home_care,
X_hospital = X_hospital))
return(data_save_dat)
}
# Analysis function based on JAGS
utility_analysis_fn <- function(data, args, pars){
# Create the data list for JAGS
data_jags <- list(X_recovery = as.vector(as.matrix(data[, (1:args$n)])),
X_home_care = as.vector(as.matrix(data[, args$n + (1:args$n)])),
X_hospital = as.vector(as.matrix(data[, 2*args$n + (1:args$n)])),
n = args$n,
alpha_recovery = args$betaPar(
args$u_recovery_mu,
args$u_recovery_sd
)$alpha,
beta_recovery = args$betaPar(
args$u_recovery_mu,
args$u_recovery_sd
)$beta,
alpha_home_care = args$betaPar(
args$u_home_care_mu,
args$u_home_care_sd
)$alpha,
beta_home_care = args$betaPar(
args$u_home_care_mu,
args$u_home_care_sd
)$beta,
alpha_hospital = args$betaPar(
args$u_hospital_mu,
args$u_hospital_sd
)$alpha,
beta_hospital = args$betaPar(
args$u_hospital_mu,
args$u_hospital_sd
)$beta)
trial <- function(){
for(i in 1:n){
X_recovery[i] ~ dnorm(u_recovery, tau_recovery);T(, 1)
X_home_care[i] ~ dnorm(u_home_care, tau_home_care);T(, 1)
X_hospital[i] ~ dnorm(u_hospital, tau_hospital);T(, 1)
}
u_recovery ~ dbeta(alpha_recovery, beta_recovery)
u_home_care ~ dbeta(alpha_home_care, beta_home_care)
u_hospital ~ dbeta(alpha_hospital, beta_hospital)
sd_recovery ~ dunif(0.000001, 0.15)
sd_home_care ~ dunif(0.00001, 0.6)
sd_hospital ~ dunif(0.00001, 0.4)
tau_recovery <- 1 / sd_recovery ^ 2
tau_home_care <- 1 / sd_home_care ^ 2
tau_hospital <- 1 / sd_hospital ^ 2
}
filein <- file.path(tempdir(),fileext="datmodel.txt")
R2OpenBUGS::write.model(trial,filein)
# Perform the MCMC simulation with OpenBUGS.
# Close OpenBUGS once it has finished (if debug is set to TRUE)
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 1,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250, progress.bar = "none")
return(data.frame(u_recovery = bugs.data$BUGSoutput$sims.matrix[, "u_recovery"],
u_home_care = bugs.data$BUGSoutput$sims.matrix[, "u_home_care"],
u_hospital = bugs.data$BUGSoutput$sims.matrix[, "u_hospital"]))
}
# EVSI calculation using the momemt matching method.
evsi_utility <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("u_recovery", "u_home_care", "u_hospital"),
n = seq(20, 300, by = 10),
method = "mm",
datagen_fn = utility_datagen_fn,
model_fn = calculate_costs_effects,
analysis_args = list(n = 20,
betaPar = betaPar,
u_recovery_mu = u_recovery_mu,
u_recovery_sd = u_recovery_sd,
u_home_care_mu = u_home_care_mu,
u_home_care_sd = u_home_care_sd,
u_hospital_mu = u_hospital_mu,
u_hospital_sd = u_hospital_sd,
n.iter = 5000),
analysis_fn = utility_analysis_fn,
par_fn = generate_psa_parameters,
Q = 50)
plotting_6 <- evsi.plot.adapt(chemotherapy_output, m_params, c("u_recovery", "u_home_care", "u_hospital"),
evsi_utility, "gam")
pdf("06_figs/EVSI_WTP_U_MM.pdf")
evsi.wtp.plot(plotting_6)
dev.off()
pop.adjust <- 46000 * (1 / (1 + 0.035)^2)
pdf("06_figs/ENBS_U_MM.pdf")
evsi.enbs.plot(plotting_6, c(90000, 95000), 3 * c(370-25, 370),
k = 20000, Pop = pop.adjust, Time = 8)
dev.off()
optim.ss(plotting_6, c(90000, 95000), 3 * c(370-25, 370),
k = 20000, Pop = pop.adjust, Time = 8)
pdf("06_figs/COSS_U_MM.pdf")
coss(plotting_6, c(90000, 95000), 3 * c(370-25, 370),
Pop = pop.adjust, Time = 8)
dev.off()
pdf("06_figs/prob_U_MM.pdf")
evsi.prob.plot(plotting_6, setup = c(90000, 95000), pp = 3 * c(370-25, 370), k = 20000,
N = 300, Pop = c(0,60000), Time = c(0,15))
dev.off()
#### STUDY 3: Long-term Survival ####
longterm_datagen_fn <- function(inputs, n = 46000){
rate_longterm <- inputs[, "rate_longterm"]
sum_of_surv <- rgamma(dim(inputs)[1], shape = 2 * n, scale = n / rate_longterm)
return(data.frame(surv_sum = sum_of_surv))
}
# EVSI calculation using GAM regression.
evsi_longterm <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("rate_longterm"),
n = 46000,
method = "gam",
datagen_fn = longterm_datagen_fn,
par_fn = generate_psa_parameters)
plotting_7 <- evsi.plot.adapt(chemotherapy_output, m_params, c("rate_longterm"),
evsi_longterm, "gam")
pdf("06_figs/EVSI_LT.pdf")
evsi.wtp.plot(plotting_7)
dev.off()
evsi_longterm %>% filter(k %in% c(20000, 25000, 30000))
pop.adjust <- 46000 * (1 / (1 + 0.035)^0.5)
ENBS.fun(evsi_longterm %>% filter(k %in% c(20000, 25000, 30000)),
cost=c(60000, 60000), Pop = pop.adjust, Time = 9.5, Dis = 0.035
)
optim.ss(plotting, c(60000, 60000), c(0, 0),
k = 30000, Pop = pop.adjust, Time = 9.5)
pdf("06_figs/EVSI_LT_Prob.pdf")
evsi.prob.plot(plotting, c(60000, 60000), c(0, 0),
k = 20000, Pop = c(0, pop.adjust * 2), Time = c(0, 15))
dev.off()
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/Chemotherapy_Book/04_analysis/06_Plotting_Book.R
|
######################
### Plots for Book ###
######################
### Packages
library(voi)
library(ggplot2)
library(dplyr)
library(R2jags)
## Run the model
source("04_analysis/02_baseline_model_output.R")
source("06_figs/01_plotting_functions.R")
## Baseline Cost-Effectiveness Formatting
# The output from the cost-effectiveness model should be formatted for
# the voi package.
# Use BCEA package to create a cost-effectiveness object.
chemotherapy_output <- list(e = m_costs_effects[, "Effects", ],
c = m_costs_effects[, "Costs", ],
k = seq(0, 50000, length.out = 501))
# Using a bespoke analysis function - trial only updates odds ratio.
# Data generation function
OR_datagen_fn <- function(inputs, n = 500){
p_side_effects_t1 <- inputs[, "p_side_effects_t1"]
logor_side_effects <- inputs[, "logor_side_effects"]
# Odds for side effects for treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
# Data generation
X1 <- rbinom(length(p_side_effects_t1), n, p_side_effects_t1)
X2 <- rbinom(length(p_side_effects_t2), n, p_side_effects_t2)
data_save <- data.frame(X1 = X1, X2 = X2)
return(data_save)
}
# Analysis function based on JAGS
OR_analysis_fn <- function(data, args, pars){
X1 <- data$X1
X2 <- data$X2
data_jags <- list(X1 = X1,
X2 = X2,
n = args$n,
n_side_effects = args$n_side_effects,
n_patients = args$n_patients,
logor_side_effects_mu = args$logor_side_effects_mu,
logor_side_effects_sd = args$logor_side_effects_sd)
LogOR_trial <- function(){
# Probability of side effects under treatment 1
p_side_effects_t1 ~ dbeta(1 + n_side_effects,
1 + n_patients - n_side_effects)
# Log odds of side effects on treatment 2
logor_side_effects ~ dnorm(logor_side_effects_mu, logor_side_effects_sd)
# Odds of side effects on treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
X1 ~ dbin(p_side_effects_t1, n)
X2 ~ dbin(p_side_effects_t2, n)
}
filein <- file.path(tempdir(),fileext="datmodel.txt")
R2OpenBUGS::write.model(LogOR_trial,filein)
# Perform the MCMC simulation with OpenBUGS.
# Close OpenBUGS once it has finished (if debug is set to TRUE)
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 1,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250, progress.bar = "none")
return(data.frame(logor_side_effects = bugs.data$BUGSoutput$sims.matrix[, pars[1]]))
}
# EVSI calculation using the momemt matching method.
evsi_OR <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("logor_side_effects"),
pars_datagen = c("p_side_effects_t1", "logor_side_effects"),
n = seq(50, 1500, by = 50),
method = "mm",
datagen_fn = OR_datagen_fn,
model_fn = calculate_costs_effects,
analysis_args = list(n_side_effects = n_side_effects,
n_patients = n_patients,
n = 500,
logor_side_effects_mu = logor_side_effects_mu,
logor_side_effects_sd = logor_side_effects_sd,
n.iter = 5250),
analysis_fn = OR_analysis_fn,
par_fn = generate_psa_parameters)
saveRDS(evsi_OR, "evsi_OR.rds")
evsi_OR <- readRDS("evsi_OR.rds")
### Format for plotting
### Whats this do? returns list: dataframe with evsi SS by wtp
## and EVPPI . n=Inf? no. need special colour
## ggplot alternative
## TODO evppi better returned as a data frame not a vector
## TODO add custom legend: to show EVPI and EVPPI
evpi_df <- evpi(outputs = chemotherapy_output)
evppi_df <- data.frame(k=unique(evsi_OR$k), evppi=attr(evsi_OR,"evppi"))
ggplot(evsi_OR, aes(x=k, y=evsi, group=n, color=n)) +
geom_line() +
scale_colour_gradient(low="skyblue", high="darkblue") +
xlab("Willingness to pay") + ylab("EVSI") +
geom_line(data=evpi_df, aes(x=k, y=evpi),
color="black", lwd=1.5, inherit.aes = FALSE) +
geom_line(data=evppi_df, aes(x=k, y=evppi),
color="darkblue", lwd=1.5, inherit.aes = FALSE) +
labs(color="Sample size")
## Just show specific sample sizes
## TODO remove legend, make discrete scale
evsi_OR %>%
filter(n == 250) %>%
ggplot(aes(x=k, y=evsi, group=n, color=n)) +
geom_line() +
scale_colour_gradient(low="skyblue", high="darkblue") +
xlab("Willingness to pay") + ylab("EVSI") +
geom_line(data=evppi_df, aes(x=k, y=evppi),
color="black", lwd=1.5, inherit.aes = FALSE)
plotting_1 <- evsi.plot.adapt(chemotherapy_output,
m_params,
c("logor_side_effects"),
chemo_evsi_or, "gam")
## Base graphics solution
plotting_1 <- evsi.plot.adapt(chemotherapy_output,
m_params,
c("logor_side_effects"),
evsi_OR, "gam")
pdf("06_figs/EVSIwtpN.pdf")
evsi.wtp.plot(plotting_1)
dev.off()
pdf("06_figs/EVSIwtp.pdf")
evsi.wtp.plot(plotting_1, N = 250)
dev.off()
### SS PLOTS WHAT ARE THESE . WHICH K? looks intended
## to give a single one, but why not multiple, by colour?
## TODO demonstrate both. It has EVPPI too as horiz line (not evpi)
pdf("06_figs/EVSIbyN.pdf")
evsi.ss.plot(plotting_1)
dev.off()
### ENBS PLOTS
## Quantiles of ENBS by SS. quantiles as lines.
## Nicer with colour than line types
### cant'tell what enbs=0 is
pdf("06_figs/ENBS.pdf")
evsi.enbs.plot(plotting_1,
c(5e6, 1e7), # min and max setup costs of trial
c(28000,42000), # min and max pp costs
k = 20000, Pop = 46000, Time = 10)
dev.off()
optim.ss(plotting_1, mean(c(5e6, 1e7)), mean(c(28000,42000)),
k = 20000, Pop = 46000, Time = 10)
pdf("06_figs/coss.pdf")
coss(plotting_1, c(5e6, 1e7), c(28000,42000), Pop = 46000, Time = 5)
dev.off()
pdf("06_figs/ENBS-pop.pdf")
evsi.prob.plot(plotting_1, setup = c(5e6, 1e7), pp = c(28000,42000), k = 20000,
N = 398, Pop = c(0,60000), Time = c(0,10))
dev.off()
## Side Effects
# Using a bespoke analysis function - trial only updates odds ratio.
# Data generation function
OR_datagen_fn <- function(inputs, n = 500){
p_side_effects_t1 <- inputs[, "p_side_effects_t1"]
p_side_effects_t2 <- inputs[, "p_side_effects_t2"]
X1 <- rbinom(length(p_side_effects_t1), n, p_side_effects_t1)
X2 <- rbinom(length(p_side_effects_t2), n, p_side_effects_t2)
# Create odds ratio as summary statistic
OR <- (n - X2) / X2 / ((n - X1) / X1)
data_save <- data.frame(OR = OR)
return(data_save)
}
# EVSI calculation using GAM regression.
evsi_OR <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("p_side_effects_t1", "p_side_effects_t2"),
n = seq(50, 1500, by = 50),
method = "gam",
datagen_fn = OR_datagen_fn,
par_fn = generate_psa_parameters)
plotting_2 <- evsi.plot.adapt(chemotherapy_output, m_params, c("logor_side_effects"), evsi_OR, "gam")
pdf("06_figs/EVSI_WTP_SE.pdf")
evsi.wtp.plot(plotting_2)
dev.off()
pop.adjust <- 46000 * (1 / (1 + 0.035)^3)
pdf("06_figs/ENBS_SE.pdf")
evsi.enbs.plot(plotting, c(1260000, 1400000), 2 * c(1560.55, 1600),
k = 20000, Pop = pop.adjust, Time = 7)
dev.off()
optim.ss(plotting, c(1260000, 1400000), 2 * c(1560.55, 1600),
k = 20000, Pop = pop.adjust, Time = 7)
## Full Study
## Moment Matching
# Data generation function
full_datagen_fn <- function(inputs, n = 500){
p_side_effects_t1 <- inputs[, "p_side_effects_t1"]
logor_side_effects <- inputs[, "logor_side_effects"]
# Odds for side effects for treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
p_hospitalised_total <- inputs[, "p_hospitalised_total"]
p_died <- inputs[, "p_died"]
lambda_home <- inputs[, "lambda_home"]
lambda_hosp <- inputs[, "lambda_hosp"]
rate_recover_hosp <- -log(1 -lambda_hosp )
rate_recover_home <- -log(1 - lambda_home)
X1 <- X2 <- X_hosp <- X_dead <- N_recover_home <- N_recover_hospital <- vector("numeric", length = dim(inputs)[1])
T_home <- T_hosp <- matrix(NA, nrow = dim(inputs)[1], ncol = 2 * n)
for(i in 1:dim(inputs)[1]){
# Simulate the number of patients with side effects
X1[i] <- rbinom(1, n, p_side_effects_t1[i])
X2[i] <- rbinom(1, n, p_side_effects_t2[i])
# Simulate the number of patients hospitalised
X_hosp[i] <- rbinom(1, X1[i] + X2[i], p_hospitalised_total[i])
# Simulate the number of patients die
X_dead[i] <- rbinom(1, X_hosp[i], p_died[i])
## Simulate recovery times for patients
N_recover_home[i] <- X1[i] + X2[i] - X_hosp[i]
if(N_recover_home[i] > 0){
T_home[i, 1:N_recover_home[i]] <- rexp(N_recover_home[i], rate_recover_home[i])
}
N_recover_hospital[i] <- X_hosp[i] - X_dead[i]
if(N_recover_hospital[i] > 0){
T_hosp[i, 1:N_recover_hospital[i]] <- rexp(N_recover_hospital[i], rate_recover_hosp[i])
}
}
data_save_dat <- data.frame(cbind(X1 = X1, X2 = X2,
X_hosp = X_hosp, X_dead = X_dead,
N_recover_home = N_recover_home,
N_recover_hospital = N_recover_hospital,
T_home = T_home, T_hosp = T_hosp))
return(data_save_dat)
}
## Regression Based
full_datagen_fn_RB <- function(inputs, n = 500){
# Load the data
p_side_effects_t1 <- inputs[, "p_side_effects_t1"]
logor_side_effects <- inputs[, "logor_side_effects"]
# Odds for side effects for treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
p_hospitalised_total <- inputs[, "p_hospitalised_total"]
p_died <- inputs[, "p_died"]
lambda_home <- inputs[, "lambda_home"]
lambda_hosp <- inputs[, "lambda_hosp"]
rate_recover_hosp <- -log(1 -lambda_hosp )
rate_recover_home <- -log(1 - lambda_home)
X1 <- X2 <- X_hosp <- X_dead <- N_recover_home <-
N_recover_hospital <- vector("numeric", length = dim(inputs)[1])
T_home <- T_hosp <- matrix(NA, nrow = dim(inputs)[1], ncol = 2 * n)
for(i in 1:dim(inputs)[1]){
# Simulate the number of patients with side effects
X1[i] <- rbinom(1, n, p_side_effects_t1[i])
X2[i] <- rbinom(1, n, p_side_effects_t2[i])
# Simulate the number of patients hospitalised
X_hosp[i] <- rbinom(1, X1[i] + X2[i], p_hospitalised_total[i])
# Simulate the number of patients die
X_dead[i] <- rbinom(1, X_hosp[i], p_died[i])
## Simulate recovery times for patients
N_recover_home[i] <- X1[i] + X2[i] - X_hosp[i]
if(N_recover_home[i] > 0){
T_home[i, 1:N_recover_home[i]] <- rexp(N_recover_home[i], rate_recover_home[i])
}
N_recover_hospital[i] <- X_hosp[i] - X_dead[i]
if(N_recover_hospital[i] > 0){
T_hosp[i, 1:N_recover_hospital[i]] <- rexp(N_recover_hospital[i], rate_recover_hosp[i])
}
}
OR <- (n - X2) / X2 / ((n - X1) / X1)
p_hosp <- X_hosp / (X1 + X2)
p_dead <- X_dead / X_hosp
T_home_sum <- rowSums(T_home, na.rm = TRUE)
T_hosp_sum <- rowSums(T_hosp, na.rm = TRUE)
data_save_dat <- data.frame(cbind(OR = OR,
p_hosp = p_hosp, p_dead = p_dead,
T_home_sum = T_home_sum,
T_hosp_sum = T_hosp_sum))
return(data_save_dat)
}
# EVSI calculation using MARS regression - large number of parameters.
evsi_OR_allout <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("p_side_effects_t1", "logor_side_effects",
"p_hospitalised_total", "p_died",
"lambda_home", "lambda_hosp"),
n = seq(50, 1500, by = 50),
method = "earth",
datagen_fn = full_datagen_fn_RB,
par_fn = generate_psa_parameters)
plotting_3 <- evsi.plot.adapt(chemotherapy_output, m_params, c("logor_side_effects",
"p_hospitalised_total", "p_died",
"lambda_home", "lambda_hosp"),
evsi_OR_allout, "earth")
pop.adjust <- 46000 * (1 / (1 + 0.035)^3)
pdf("06_figs/ENBS_SEFU.pdf")
evsi.enbs.plot(plotting_3, c(1260000, 1400000), 2 * c(1560.55, 1600),
k = 20000, Pop = pop.adjust, Time = 7)
dev.off()
optim.ss(plotting_3, c(1260000, 1400000), 2 * c(1560.55, 1600),
k = 20000, Pop = pop.adjust, Time = 7)
pdf("06_figs/COSS_SEFU.pdf")
coss(plotting_3, c(1260000, 1400000), 2 * c(1560.55, 1600),
Pop = pop.adjust, Time = 7)
dev.off()
pdf("06_figs/prob_SEFU.pdf")
evsi.prob.plot(plotting_3, setup = c(1260000, 1400000), pp = 2 * c(1560.55, 1600), k = 20000,
N = 1080, Pop = c(0,60000), Time = c(0,15))
dev.off()
# Analysis function based on JAGS
full_analysis_fn <- function(data, args, pars){
## Format Data - Adjust for 0 recovery times
T_home <- NA
if(data$N_recover_home > 0){
T_home <- as.numeric(as.matrix(data[, (1:data$N_recover_home) + 6]))
}
T_hosp <- NA
if(data$N_recover_hospital > 0){
T_hosp <- as.vector(as.matrix(data[,
(6 + 2 * args$n) + (1:data$N_recover_hospital)]))
}
# Create the data list for JAGS
data_jags <- list(X1 = data$X1,
X2 = data$X2,
X_hosp = data$X_hosp,
X_dead = data$X_dead,
T_home = T_home,
T_hosp = T_hosp,
N_recover_home = ifelse(data$N_recover_home > 0,
data$N_recover_home,
1),
N_recover_hosp = ifelse(data$N_recover_hospital > 0,
data$N_recover_hospital,
1),
n = args$n,
n_side_effects = args$n_side_effects,
n_patients = args$n_patients,
logor_side_effects_mu = args$logor_side_effects_mu,
logor_side_effects_sd = args$logor_side_effects_sd,
p_recovery_home_alpha = betaPar(args$p_recovery_home_mu,
args$p_recovery_home_sd)$alpha,
p_recovery_home_beta = betaPar(args$p_recovery_home_mu,
args$p_recovery_home_sd)$beta,
p_recovery_hosp_alpha = betaPar(args$p_recovery_hosp_mu,
args$p_recovery_hosp_sd)$alpha,
p_recovery_hosp_beta = betaPar(args$p_recovery_hosp_mu,
args$p_recovery_hosp_sd)$beta,
n_died = args$n_died,
n_hospitalised = args$n_hospitalised)
LogOR_addoutcomes_trial <- function(){
## Models for the data
X1 ~ dbin(p_side_effects_t1, n)
X2 ~ dbin(p_side_effects_t2, n)
X_hosp ~ dbinom(p_hospitalised_total, X1 + X2)
X_dead ~ dbin(p_died, X_hosp)
rate_recover_home <- -log(1 - lambda_home)
rate_recover_hosp <- -log(1 - lambda_hosp)
for(i in 1:N_recover_home){
T_home[i] ~ dexp(rate_recover_home)
}
for(i in 1:N_recover_hosp){
T_hosp[i] ~ dexp(rate_recover_hosp)
}
# Probability of side effects under treatment 1
p_side_effects_t1 ~ dbeta(1 + n_side_effects,
1 + n_patients - n_side_effects)
# Log odds of side effects on treatment 2
logor_side_effects ~ dnorm(logor_side_effects_mu, logor_side_effects_sd)
# Odds of side effects on treatment 1
odds_side_effects_t1 <- p_side_effects_t1 / (1 - p_side_effects_t1)
# Odds for side effects on treatment 2
odds_side_effects_t2 <- odds_side_effects_t1 * exp(logor_side_effects)
# Probability of side effects under treatment 2
p_side_effects_t2 <- odds_side_effects_t2 / (1 + odds_side_effects_t2)
## Variables to define transition probabilities
# Probability that a patient is hospitalised over the time horizon
p_hospitalised_total ~ dbeta(1 + n_hospitalised,
1 + n_side_effects - n_hospitalised)
# Probability that a patient dies over the time horizon given they were
# hospitalised
p_died ~ dbeta(1 + n_died, 1 + n_hospitalised - n_died)
# Lambda_home: Conditional probability that a patient recovers considering
# that they are not hospitalised
lambda_home ~ dbeta(p_recovery_home_alpha, p_recovery_home_beta)
# Lambda_hosp: Conditional probability that a patient recovers considering
# that they do not die
lambda_hosp ~ dbeta(p_recovery_hosp_alpha, p_recovery_hosp_beta)
}
filein <- file.path(tempdir(),fileext="datmodel.txt")
R2OpenBUGS::write.model(LogOR_addoutcomes_trial,filein)
# Perform the MCMC simulation with OpenBUGS.
# Close OpenBUGS once it has finished (if debug is set to TRUE)
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 1,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250, progress.bar = "none")
return(data.frame(logor_side_effects = bugs.data$BUGSoutput$sims.matrix[, "logor_side_effects"],
p_hospitalised_total= bugs.data$BUGSoutput$sims.matrix[, "p_hospitalised_total"],
p_died = bugs.data$BUGSoutput$sims.matrix[, "p_died"],
lambda_home = bugs.data$BUGSoutput$sims.matrix[, "lambda_home"],
lambda_hosp = bugs.data$BUGSoutput$sims.matrix[, "lambda_hosp"]))
}
# EVSI calculation using the momemt matching method.
evsi_OR_allout_MM <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("logor_side_effects",
"p_hospitalised_total", "p_died",
"lambda_home", "lambda_hosp"),
pars_datagen = c("p_side_effects_t1",
"logor_side_effects",
"p_hospitalised_total", "p_died",
"lambda_home", "lambda_hosp"),
n = seq(50, 1500, by = 50),
method = "mm",
datagen_fn = full_datagen_fn,
model_fn = calculate_costs_effects,
analysis_args = list(n_side_effects = n_side_effects,
n_patients = n_patients,
n = 50,
logor_side_effects_mu = logor_side_effects_mu,
logor_side_effects_sd = logor_side_effects_sd,
betaPar = betaPar,
p_recovery_home_mu = p_recovery_home_mu,
p_recovery_home_sd = p_recovery_home_sd,
p_recovery_hosp_mu = p_recovery_hosp_mu,
p_recovery_hosp_sd = p_recovery_hosp_sd,
n.iter = 5250,
n_died = n_died,
n_hospitalised = n_hospitalised,
p_side_effects_t1 = m_params$p_side_effects_t1),
analysis_fn = full_analysis_fn,
par_fn = generate_psa_parameters,
npreg_method = "earth")
plotting_4 <- evsi.plot.adapt(chemotherapy_output, m_params, c("logor_side_effects",
"p_hospitalised_total", "p_died",
"lambda_home", "lambda_hosp"),
evsi_OR_allout_MM, "earth")
pop.adjust <- 46000 * (1 / (1 + 0.035)^3)
pdf("06_figs/ENBS_SEFU_MM.pdf")
evsi.enbs.plot(plotting_4, c(1260000, 1400000), 2 * c(1560.55, 1600),
k = 20000, Pop = pop.adjust, Time = 7)
dev.off()
optim.ss(plotting_4, c(1260000, 1400000), 2 * c(1560.55, 1600),
k = 20000, Pop = pop.adjust, Time = 7)
pdf("06_figs/COSS_SEFU_MM.pdf")
coss(plotting_4, c(1260000, 1400000), 2 * c(1560.55, 1600),
Pop = pop.adjust, Time = 7)
dev.off()
pdf("06_figs/prob_SEFU_MM.pdf")
evsi.prob.plot(plotting_4, setup = c(1260000, 1400000), pp = 2 * c(1560.55, 1600), k = 20000,
N = 1020, Pop = c(0,60000), Time = c(0,15))
dev.off()
### Utilities ###
# Data generation function
utility_datagen_fn <- function(inputs, n = 20,
sd_recovery_fun = function(){return(runif(1, 0.000001, 0.15))},
sd_home_care_fun = function(){return(runif(1, 0.00001, 0.6))},
sd_hospital_fun = function(){return(runif(1, 0.00001, 0.4))}
){
# Load the data
X_home_care <- X_hospital <- X_recovery <- matrix(NA, nrow = dim(inputs)[1], ncol = n[1])
X_recovery_mean1 <- X_recovery_mean2 <- X_home_care_mean1 <- X_home_care_mean2 <-
X_hospital_mean1 <- X_hospital_mean2 <- vector("numeric", dim(inputs)[1])
for(i in 1:dim(inputs)[1]){
set.seed(123 + i)
m_recovery <- inputs[i, "u_recovery"]
m_home_care <- inputs[i, "u_home_care"]
m_hospital <- inputs[i, "u_hospital"]
sd_recovery <- sd_recovery_fun()
sd_home_care <- sd_home_care_fun()
sd_hospital <- sd_hospital_fun()
# Simulate the costs
X_recovery[i, ] <- truncnorm::rtruncnorm(n[1], mean = m_recovery, sd = sd_recovery,
a = -Inf, b = 1)
X_home_care[i, ] <- truncnorm::rtruncnorm(n[1], mean = m_home_care, sd = sd_home_care,
a = -Inf, b = 1)
X_hospital[i, ] <- truncnorm::rtruncnorm(n[1], mean = m_hospital, sd = sd_hospital,
a = -Inf, b = 1) }
data_save_dat <- data.frame(cbind(X_recovery = X_recovery,
X_home_care = X_home_care,
X_hospital = X_hospital))
return(data_save_dat)
}
# Analysis function based on JAGS
utility_analysis_fn <- function(data, args, pars){
# Create the data list for JAGS
data_jags <- list(X_recovery = as.vector(as.matrix(data[, (1:args$n)])),
X_home_care = as.vector(as.matrix(data[, args$n + (1:args$n)])),
X_hospital = as.vector(as.matrix(data[, 2*args$n + (1:args$n)])),
n = args$n,
alpha_recovery = args$betaPar(
args$u_recovery_mu,
args$u_recovery_sd
)$alpha,
beta_recovery = args$betaPar(
args$u_recovery_mu,
args$u_recovery_sd
)$beta,
alpha_home_care = args$betaPar(
args$u_home_care_mu,
args$u_home_care_sd
)$alpha,
beta_home_care = args$betaPar(
args$u_home_care_mu,
args$u_home_care_sd
)$beta,
alpha_hospital = args$betaPar(
args$u_hospital_mu,
args$u_hospital_sd
)$alpha,
beta_hospital = args$betaPar(
args$u_hospital_mu,
args$u_hospital_sd
)$beta)
trial <- function(){
for(i in 1:n){
X_recovery[i] ~ dnorm(u_recovery, tau_recovery);T(, 1)
X_home_care[i] ~ dnorm(u_home_care, tau_home_care);T(, 1)
X_hospital[i] ~ dnorm(u_hospital, tau_hospital);T(, 1)
}
u_recovery ~ dbeta(alpha_recovery, beta_recovery)
u_home_care ~ dbeta(alpha_home_care, beta_home_care)
u_hospital ~ dbeta(alpha_hospital, beta_hospital)
sd_recovery ~ dunif(0.000001, 0.15)
sd_home_care ~ dunif(0.00001, 0.6)
sd_hospital ~ dunif(0.00001, 0.4)
tau_recovery <- 1 / sd_recovery ^ 2
tau_home_care <- 1 / sd_home_care ^ 2
tau_hospital <- 1 / sd_hospital ^ 2
}
filein <- file.path(tempdir(),fileext="datmodel.txt")
R2OpenBUGS::write.model(trial,filein)
# Perform the MCMC simulation with OpenBUGS.
# Close OpenBUGS once it has finished (if debug is set to TRUE)
bugs.data <- jags(
data = data_jags,
parameters.to.save = pars,
model.file = filein,
n.chains = 1,
n.iter = args$n.iter,
n.thin = 1,
n.burnin = 250, progress.bar = "none")
return(data.frame(u_recovery = bugs.data$BUGSoutput$sims.matrix[, "u_recovery"],
u_home_care = bugs.data$BUGSoutput$sims.matrix[, "u_home_care"],
u_hospital = bugs.data$BUGSoutput$sims.matrix[, "u_hospital"]))
}
# EVSI calculation using the momemt matching method.
evsi_utility <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("u_recovery", "u_home_care", "u_hospital"),
n = seq(20, 300, by = 10),
method = "mm",
datagen_fn = utility_datagen_fn,
model_fn = calculate_costs_effects,
analysis_args = list(n = 20,
betaPar = betaPar,
u_recovery_mu = u_recovery_mu,
u_recovery_sd = u_recovery_sd,
u_home_care_mu = u_home_care_mu,
u_home_care_sd = u_home_care_sd,
u_hospital_mu = u_hospital_mu,
u_hospital_sd = u_hospital_sd,
n.iter = 5000),
analysis_fn = utility_analysis_fn,
par_fn = generate_psa_parameters,
Q = 50)
plotting_6 <- evsi.plot.adapt(chemotherapy_output, m_params, c("u_recovery", "u_home_care", "u_hospital"),
evsi_utility, "gam")
pdf("06_figs/EVSI_WTP_U_MM.pdf")
evsi.wtp.plot(plotting_6)
dev.off()
pop.adjust <- 46000 * (1 / (1 + 0.035)^2)
pdf("06_figs/ENBS_U_MM.pdf")
evsi.enbs.plot(plotting_6, c(90000, 95000), 3 * c(370-25, 370),
k = 20000, Pop = pop.adjust, Time = 8)
dev.off()
optim.ss(plotting_6, c(90000, 95000), 3 * c(370-25, 370),
k = 20000, Pop = pop.adjust, Time = 8)
pdf("06_figs/COSS_U_MM.pdf")
coss(plotting_6, c(90000, 95000), 3 * c(370-25, 370),
Pop = pop.adjust, Time = 8)
dev.off()
pdf("06_figs/prob_U_MM.pdf")
evsi.prob.plot(plotting_6, setup = c(90000, 95000), pp = 3 * c(370-25, 370), k = 20000,
N = 300, Pop = c(0,60000), Time = c(0,15))
dev.off()
#### STUDY 3: Long-term Survival ####
longterm_datagen_fn <- function(inputs, n = 46000){
rate_longterm <- inputs[, "rate_longterm"]
sum_of_surv <- rgamma(dim(inputs)[1], shape = 2 * n, scale = n / rate_longterm)
return(data.frame(surv_sum = sum_of_surv))
}
# EVSI calculation using GAM regression.
evsi_longterm <- evsi(outputs = chemotherapy_output,
inputs = m_params,
pars = c("rate_longterm"),
n = 46000,
method = "gam",
datagen_fn = longterm_datagen_fn,
par_fn = generate_psa_parameters)
plotting_7 <- evsi.plot.adapt(chemotherapy_output, m_params, c("rate_longterm"),
evsi_longterm, "gam")
pdf("06_figs/EVSI_LT.pdf")
evsi.wtp.plot(plotting_7)
dev.off()
evsi_longterm %>% filter(k %in% c(20000, 25000, 30000))
pop.adjust <- 46000 * (1 / (1 + 0.035)^0.5)
ENBS.fun(evsi_longterm %>% filter(k %in% c(20000, 25000, 30000)),
cost=c(60000, 60000), Pop = pop.adjust, Time = 9.5, Dis = 0.035
)
optim.ss(plotting, c(60000, 60000), c(0, 0),
k = 30000, Pop = pop.adjust, Time = 9.5)
pdf("06_figs/EVSI_LT_Prob.pdf")
evsi.prob.plot(plotting, c(60000, 60000), c(0, 0),
k = 20000, Pop = c(0, pop.adjust * 2), Time = c(0, 15))
dev.off()
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/Chemotherapy_Book/04_analysis/06_Plotting_Book_ggplot.R
|
##### Example To Test Plotting #####
### Function to translate the output from the evsi command (evsi.obj) into
### an object to plot
evsi.plot.adapt <- function (ouputs, inputs, pars, evsi.obj, method)
{
if (is.null(evsi.obj)) {
stop("You have not provided the EVSI. Please include an evsi.obj object from the evsi function")
}
evsi <- evsi.obj %>% tidyr::pivot_wider(names_from = "k", values_from = "evsi")
evpi <- voi::evpi(ouputs)
evppi <- evppi(ouputs, inputs, pars, method)
to.return <- list(evsi = evsi, attrib = list(k = as.numeric(colnames(evsi))[-1],
N = evsi$n),
evppi = evppi, evpi = evpi)
class(to.return) <- "evsi.plot"
return(to.return)
}
### Plots evsi across willingess to pay. Originally plot.evsi in EVSI package
evsi.wtp.plot <- function (evsi, pos = c(0, 0.8), N = NULL)
{
if (class(evsi) != "evsi.plot") {
stop("plot.evsi must be used with an evsi.plot object created using the evsi.plot.adapt function.")
}
alt.legend <- pos
if (is.numeric(alt.legend) & length(alt.legend) == 2) {
temp <- ""
if (alt.legend[2] == 0)
temp <- paste0(temp, "bottom")
else if (alt.legend[2] != 0.5)
temp <- paste0(temp, "top")
if (alt.legend[1] == 1)
temp <- paste0(temp, "right")
else temp <- paste0(temp, "left")
alt.legend <- temp
if (length(grep("^((bottom|top)(left|right)|right)$",
temp)) == 0)
alt.legend <- FALSE
}
if (is.logical(alt.legend)) {
if (!alt.legend)
alt.legend = "topright"
else alt.legend = "topleft"
}
N.length <- length(evsi$attrib$N)
if (class(N) == "numeric") {
select.length <- length(N)
select <- rep(NA, select.length)
for (i in 1:select.length) {
select[i] <- which.min((evsi$attrib$N - N[i])^2)
}
warning("The EVSI is only calculated for sample sizes listed in evsi$attrib$N. If N is not in this list then EVSI calculated for closest possible sample size.")
N <- evsi$attrib$N[select]
}
if (is.null(N)) {
select <- 1:N.length
N <- evsi$attrib$N
select.length = length(select)
}
EVSI <- evsi$evsi[select, -1]
plot(evsi$evpi$k, evsi$evpi$evpi, t = "l", xlab = "Willingness to pay",
ylab = "", main = "Expected Value of Sample Information",
lwd = 2, ylim = range(range(evsi$evpi$evpi), range(evsi$evppi$evppi),
range(EVSI)),
xlim = range(range(evsi$evpi$k), range(evsi$evppi$k),
range(evsi$attrib$k)),
col = "black")
points(evsi$evppi$k, evsi$evppi$evppi, t = "l", col = "black",
lty = 1)
colours <- colorRampPalette(colors = c("skyblue", "blue", "darkblue"))(select.length)
if (length(evsi$attrib$k) < 30) {
for (s in 1:select.length) {
points(evsi$attrib$k, EVSI[s, ], pch = 19, col = colours[s])
}
}
if (length(evsi$attrib$k) >= 30) {
for (s in 1:select.length) {
points(evsi$attrib$k, EVSI[s, ], type = "l", col = colours[s])
}
}
if (select.length == 1) {
legend(alt.legend, c("EVPI", "EVPPI for focal parameters",
paste("EVSI for sample size of", N)), col = c("black",
"black", colours), cex = 0.7, box.lwd = 0, box.col = "white",
bg = "white", lty = c(1, 1, 1), lwd = c(2, 1))
}
if (select.length > 1) {
legend(alt.legend, legend = c(min(N), rep(NA, max(0,
select.length - 2)), max(N)), fill = colours, border = colours,
cex = 0.75, y.intersp = max(0.1, 1.2/select.length),
box.lwd = 0, box.col = "white", bg = "white")
}
box()
}
### Plots evsi across sample size for a fixed willingness to pay.
### Originally plot.samplesize in EVSI package
evsi.ss.plot <- function (evsi, k = NULL, pos = c("bottomright"))
{
alt.legend <- pos
if (is.numeric(alt.legend) & length(alt.legend) == 2) {
temp <- ""
if (alt.legend[2] == 0)
temp <- paste0(temp, "bottom")
else if (alt.legend[2] != 0.5)
temp <- paste0(temp, "top")
if (alt.legend[1] == 1)
temp <- paste0(temp, "right")
else temp <- paste0(temp, "left")
alt.legend <- temp
if (length(grep("^((bottom|top)(left|right)|right)$",
temp)) == 0)
alt.legend <- FALSE
}
if (is.logical(alt.legend)) {
if (!alt.legend)
alt.legend = "topright"
else alt.legend = "topleft"
}
if (!(class(k) %in% c("numeric" , "integer"))) {
k.select <- which.max(evsi$evpi$evpi)[1]
k <- evsi$attrib$k[k.select]
}
if (class(k) %in% c("numeric" , "integer")) {
k.select <- which.min(abs(evsi$attrib$k - k))[1]
k <- evsi$attrib$k[k.select]
}
if (length(evsi$attrib$N) == 1) {
stop("This plot gives the EVSI across sample size. Do not use on a single design.")
}
EVSI <- evsi$evsi[k.select + 1]
evppi <- evsi$evppi$evppi[k.select]
plot(1, 1, ylim = c(min(EVSI) * 0.95, evppi),
xlim = c(min(evsi$attrib$N), max(evsi$attrib$N)), col = "white",
xlab = expression("Sample Size"), ylab = "Per Person EVSI",
oma = c(0, 0, -1, 0), main = "Expected Value of Sample Information across Sample Size")
if (length(evsi$attrib$N) < 15) {
points(evsi$attrib$N, t(EVSI), pch = 19, lwd = 2,
lty = 1)
}
if (length(evsi$attrib$N) >= 15) {
points(evsi$attrib$N, t(EVSI), type = "l", lwd = 2,
lty = 1)
}
abline(h = evppi, col = "springgreen", lwd = 3, lty = 2)
legend(alt.legend, c("EVSI", "EVPPI"), col = c("black", "springgreen"),
lwd = c(2,3), lty = c(1,2), box.lwd = 0,
box.col = "white", bg = "white")
box()
}
### Calculates the ENBS (internal function)
ENBS.fun <- function(evsi, Pop, Time, Dis, cost) {
enbs <- evsi * Pop/Dis * (1 - exp(-Dis * Time)) - cost
return(enbs)
}
### Calculates the standard devation for the ENBS to account for unknown costs
### (internal function)
ENBS.sd.calc <- function(evsi.sd, Pop, Time, Dis, cost.sd) {
var <- (Pop/Dis * (1 - exp(-Dis * Time)))^2 * evsi.sd^2 +
cost.sd^2
return(sqrt(var))
}
### Plots the probability of a cost-effective trial as a sentivity analysis
### to time horizon and population size. Originally plot.prob.ce in EVSI package
evsi.prob.plot <- function (evsi, trial.cost = NULL, setup = NULL, pp = NULL,
Pop = c(0, 10000), Time = c(1, 20), Dis = 0.035, k = NULL, N = NULL,
pos = c("topright"))
{
alt.legend <- pos
if (is.numeric(alt.legend) & length(alt.legend) == 2) {
temp <- ""
if (alt.legend[2] == 0)
temp <- paste0(temp, "bottom")
else if (alt.legend[2] != 0.5)
temp <- paste0(temp, "top")
if (alt.legend[1] == 1)
temp <- paste0(temp, "right")
else temp <- paste0(temp, "left")
alt.legend <- temp
if (length(grep("^((bottom|top)(left|right)|right)$",
temp)) == 0)
alt.legend <- FALSE
}
if (is.logical(alt.legend)) {
if (!alt.legend)
alt.legend = "topright"
else alt.legend = "topleft"
}
if (!(class(k) %in% c("numeric" , "integer"))) {
k.select <- which.max(evsi$evpi$evpi)[1]
k <- evsi$attrib$k[k.select]
}
if (class(k) %in% c("numeric" , "integer")) {
k.select <- which.min(abs(evsi$attrib$k - k))[1]
k <- evsi$attrib$k[k.select]
}
if (class(evsi$attrib$N) != "numeric") {
N.select <- 1
if (class(N) != "numeric") {
N <- evsi$attrib$N
}
}
if (class(evsi$attrib$N) == "numeric") {
if (class(N) != "numeric") {
N.select <- ceiling(length(evsi$attrib$N)/2)
N <- evsi$attrib$N[N.select]
}
if (class(N) == "numeric") {
N.select <- which.min(abs(evsi$attrib$N - N))
}
}
type.evsi <- "det"
evsi.focal <- as.numeric(c(evsi$evsi[N.select, k.select + 1]))
evsi.params <- c(as.numeric(evsi$evsi[N.select, k.select + 1]),
0)
if (is.null(trial.cost)) {
if (class(N) == "character") {
stop("Please define the trial costs using trial.costs or the sample size of experiment using N=")
}
if (is.null(setup) || is.null(pp)) {
stop("Please give the trial costs using either trial.costs for the full costs\n or setup and pp to give the set up and per person costs ")
}
setup.params <- c(mean(setup), (range(setup)[2] - range(setup)[1])/4)
pp.params <- c(mean(pp), (range(pp)[2] - range(pp)[1])/4)
trial.cost <- c(setup.params[1] + pp.params[1] * N, sqrt(setup.params[2]^2 +
N^2 * pp.params[2]^2))
}
colours <- colorRampPalette(c("black", "navy", "blue", "skyblue",
"aliceblue", "white"))(100)
if (class(trial.cost) == "numeric") {
if (length(trial.cost) == 1) {
trial.cost.params <- c(trial.cost, 0)
}
if (length(trial.cost) == 2) {
trial.cost.params <- c(mean(trial.cost), (range(trial.cost)[2] -
range(trial.cost)[1])/2)
}
Time.min <- min(Time)
Time.max <- max(Time)
Pop.min <- min(Pop)
Pop.max <- max(Pop)
dens.points <- 100
Time.seq <- seq(Time.min, Time.max, length.out = dens.points)
Pop.seq <- seq(Pop.min, Pop.max, length.out = dens.points)
Prob.mat <- matrix(NA, nrow = length(Time.seq), ncol = length(Pop.seq))
for (i in 1:length(Time.seq)) {
for (j in 1:length(Pop.seq)) {
ENBS.mean <- ENBS.fun(evsi.params[1], Pop.seq[j],
Time.seq[i], Dis, trial.cost.params[1])
ENBS.sd <- ENBS.sd.calc(evsi.params[2], Pop.seq[j],
Time.seq[i], Dis, trial.cost.params[2])
Prob.mat[i, j] <- pnorm(0, ENBS.mean, ENBS.sd,
lower.tail = FALSE)
}
}
}
image(x = Time.seq, y = Pop.seq, z = Prob.mat, col = colours,
main = "Probability of Cost-Effective Trial", xlab = "Time Horizon",
ylab = "Incidence Population", xlim = c(Time.min, Time.max),
ylim = c(Pop.min, Pop.max), breaks = seq(0, 1, length.out = 101))
legend(alt.legend, c("Prob=0", rep(NA, 98/2), "Prob=.5",
rep(NA, 96/2), "Prob=1"), fill = colours, border = colours,
cex = 0.75, y.intersp = 0.15, box.lwd = 0, box.col = "white",
bg = "white")
box()
}
### Determines the optimal sample size of the study/
### Originally optim.samplesize in EVSI package
optim.ss <- function (evsi, setup, pp, Pop, Time, k = NULL, Dis = 0.035)
{
if (!(class(k) %in% c("numeric" , "integer"))) {
k.select <- which.max(evsi$evpi$evpi)[1]
k <- evsi$attrib$k[k.select]
}
if (class(k) %in% c("numeric" , "integer")) {
k.select <- which.min(abs(evsi$attrib$k - k))[1]
k <- evsi$attrib$k[k.select]
}
EVSI <- evsi$evsi[, k.select + 1]
if ((length(setup) > 1) || (length(pp) > 1)) {
setup <- mean(setup)
pp <- mean(pp)
}
ENBS <- as.numeric(as.matrix(Pop * EVSI/Dis * (1 - exp(-Dis * Time)) - setup -
pp * evsi$attrib$N))
max.select <- which.max(ENBS)
max.less <- max.select - 1
max.greater <- max.select + 1
if ((max.less < 1) | (max.greater > length(ENBS))) {
N.max <- evsi$attrib$N[max.select]
ENBS.max <- ENBS[max.select]
warning("Optimal sample size is at the limit of the considered values for N. An alternative sample size may be optimal,\n please consider alternative values of N in the evsi.calc function.")
}
else {
N.fit <- evsi$attrib$N[c(max.less, max.select, max.greater)]
N2 <- N.fit^2
ENBS.fit <- ENBS[c(max.less, max.select, max.greater)]
model.optim <- lm(ENBS.fit ~ N.fit + N2)
N.max <- round(-model.optim$coefficients[2]/(2 * model.optim$coefficients[3]))
ENBS.max <- predict(model.optim, list(N.fit = N.max,
N2 = N.max^2))
}
tol <- ENBS.max - abs(ENBS.max * 0.05)
limits <- which(ENBS > tol)
if(length(limits) > 0){
N.range <- range(evsi$attrib$N[limits])
}
if(length(limits) == 0){
c.quad <- model.optim$coefficients[1] - tol
b.quad <- model.optim$coefficients[2]
a.quad <- model.optim$coefficients[3]
N.range <- c((-b.quad - sqrt(b.quad^2 - 4 * a.quad * c.quad)) / (2 * a.quad),
(-b.quad + sqrt(b.quad^2 - 4 * a.quad * c.quad)) / (2 * a.quad))
}
return(list(SS.max = N.max, ENBS = ENBS.max, SS.I = N.range))
}
### Plots ENBS across sample size for fixed WTP/population size and time horizon
### Originally plot.enbs in EVSI package
evsi.enbs.plot <- function (evsi, setup, pp, Pop = 10000, Time = 10,
Dis = 0.035, k = NULL, N = NULL, pos = c("bottomright"))
{
alt.legend <- pos
if (is.numeric(alt.legend) & length(alt.legend) == 2) {
temp <- ""
if (alt.legend[2] == 0)
temp <- paste0(temp, "bottom")
else if (alt.legend[2] != 0.5)
temp <- paste0(temp, "top")
if (alt.legend[1] == 1)
temp <- paste0(temp, "right")
else temp <- paste0(temp, "left")
alt.legend <- temp
if (length(grep("^((bottom|top)(left|right)|right)$",
temp)) == 0)
alt.legend <- FALSE
}
if (is.logical(alt.legend)) {
if (!alt.legend)
alt.legend = "topright"
else alt.legend = "topleft"
}
if (!(class(k) %in% c("numeric" , "integer"))) {
k.select <- which.max(evsi$evpi$evpi)[1]
k <- evsi$attrib$k[k.select]
}
if (class(k) %in% c("numeric" , "integer")) {
k.select <- which.min(abs(evsi$attrib$k - k))[1]
k <- evsi$attrib$k[k.select]
}
if (class(N) != "numeric") {
N <- evsi$attrib$N
}
if (class(N) == "character") {
stop("Please define the sample size of your experiment using N=")
}
length.N <- length(N)
N.select <- array(NA, dim = length.N)
for (i in 1:length.N) {
N.select[i] <- which.min((evsi$attrib$N - N[i])^2)
}
prob <- c(0.025, 0.25, 0.5, 0.75, 0.975)
length.prob <- length(prob)
type.evsi <- "det"
evsi.params <- cbind(evsi$evsi[N.select, k.select + 1], 0)
if (length(setup) != 2 || length(pp) != 2) {
stop("Please give minimum and maximum values for the setup and per person trial costs.")
}
setup.params <- c(mean(setup), (range(setup)[2] - range(setup)[1])/4)
pp.params <- c(mean(pp), (range(pp)[2] - range(pp)[1])/4)
trial.cost <- array(NA, dim = c(length.N, 2))
for (i in 1:length.N) {
trial.cost[i, ] <- c(setup.params[1] + pp.params[1] *
N[i], sqrt(setup.params[2]^2 + N[i]^2 * pp.params[2]^2))
}
colours <- colorRampPalette(c("black", "navy", "blue", "skyblue",
"aliceblue", "white"))(100)
ENBS.mat <- array(NA, dim = c(length.N, 5))
for (j in 1:length.N) {
ENBS.mean <- ENBS.fun(evsi.params[j, 1], Pop, Time, Dis,
trial.cost[j, 1])
ENBS.sd <- ENBS.sd.calc(evsi.params[j, 2], Pop, Time,
Dis, trial.cost[j, 2])
ENBS.mat[j, ] <- qnorm(prob, ENBS.mean, ENBS.sd)
}
if (length.prob%%2 == 1) {
lwd <- c(1:ceiling(length.prob/2), (ceiling(length.prob/2) -
1):1, 1)
lty <- c(ceiling(length.prob/2):1, 2:ceiling(length.prob/2),
1)
}
if (length.prob%%2 == 0) {
lwd <- c(1:(length.prob/2), (length.prob/2):1, 1)
lty <- c((length.prob/2):1, 2:(length.prob/2), 1)
}
plot.new()
plot.window(xlim = c(min(N), max(N)), ylim = c(min(ENBS.mat),
max(ENBS.mat)))
title(main = "Expected Net Benefit of Sampling by Sample Size",
xlab = "Sample Size", ylab = "ENBS")
axis(side = 2)
if (length.N < 15) {
for (l in 1:length.prob) {
points(N, ENBS.mat[, l], pch = 19, lwd = lwd[l])
points(N, ENBS.mat[, l], type = "l", lwd = lwd[l])
}
legend(alt.legend, c(as.character(prob), "ENBS=0"),
col = c(rep("black", length.prob), "springgreen"),
lwd = lwd, box.lwd = 0, pch = 19,
box.col = "white", bg = "white")
}
if (length.N >= 15) {
for (l in 1:length.prob) {
points(N, ENBS.mat[, l], type = "l", lwd = lwd[l],
lty = lty[l])
}
legend(alt.legend, c(as.character(prob), "ENBS=0"), col = c(rep("black",
length.prob), "springgreen"), lwd = lwd, lty = lty, box.lwd = 0,
box.col = "white", bg = "white")
}
abline(h = 0, col = "springgreen", lwd = lwd[length.prob +
1], lty = lty[length.prob + 1])
box()
optimal <- optim.ss(evsi, setup, pp, Pop, Time, k = k,
Dis = Dis)
axis(side = 1)
a <- 0.04
poi <- (1 + a) * min(ENBS.mat) - a * max(ENBS.mat)
points(c(optimal$SS.I), c(poi, poi), type = "l", col = "red",
lwd = 3)
points(c(optimal$SS.max), min(poi), pch = 9, col = "red",
lwd = 3)
}
### Plots the curve of optimal sample size.
### Not in the EVSI package but function structure and argument names match to
### the other functions
coss <- function (evsi, setup, pp, Pop = 10000, Time = 10,
Dis = 0.035, N = NULL, pos = c("bottomright"))
{
alt.legend <- pos
if (is.numeric(alt.legend) & length(alt.legend) == 2) {
temp <- ""
if (alt.legend[2] == 0)
temp <- paste0(temp, "bottom")
else if (alt.legend[2] != 0.5)
temp <- paste0(temp, "top")
if (alt.legend[1] == 1)
temp <- paste0(temp, "right")
else temp <- paste0(temp, "left")
alt.legend <- temp
if (length(grep("^((bottom|top)(left|right)|right)$",
temp)) == 0)
alt.legend <- FALSE
}
if (is.logical(alt.legend)) {
if (!alt.legend)
alt.legend = "topright"
else alt.legend = "topleft"
}
if (class(N) != "numeric") {
N <- evsi$attrib$N
}
length.N <- length(N)
N.select <- array(NA, dim = length.N)
for (i in 1:length.N) {
N.select[i] <- which.min((evsi$attrib$N - N[i])^2)
}
length.k <- length(evsi$attrib$k)
k <- evsi$attrib$k
type.evsi <- "det"
evsi.params <- evsi$evsi[N.select, ]
oss.mat <- array(NA, dim = c(length.k, 3))
for (j in 1:length.k) {
optimal <- optim.ss(evsi, setup, pp, Pop, Time, k = k[j],
Dis = Dis)
oss.mat[j, 1] <- optimal$SS.max
oss.mat[j, 2:3] <- optimal$SS.I
}
plot.new()
plot.window(xlim = c(min(k), max(k)), ylim = c(min(oss.mat),
max(oss.mat)))
title(main = "Curve of Optimal Sample Size",
xlab = "Willingness to Pay", ylab = "Optimal Sample Size")
axis(side = 2)
points(k, oss.mat[, 1], type = "l", lwd = 2)
points(k, oss.mat[, 2], type = "l", lwd = 1, lty = 2)
points(k, oss.mat[, 3], type = "l", lwd = 1, lty = 2)
box()
axis(side = 1)
}
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/Chemotherapy_Book/06_figs/01_plotting_functions.R
|
---
output:
pdf_document:
keep_tex: TRUE
title: "VoI book code: Case study and guidelines for estimating EVPPI using regression in R"
editor_options:
chunk_output_type: console
---
Change this path to reflect where you downloaded the Chemotherapy_Book repository.
```{r,echo=FALSE}
chemo_dir <- system.file("Chemotherapy_Book",package="voi")
# setwd(chemo_dir) # only if running interactively
```
This block of code is only needed if you want to compile this `Rmd` source file into a document.
```{r,echo=FALSE,message=FALSE}
library(knitr)
library(voi)
opts_knit$set(root.dir = chemo_dir)
options(digits=3, scipen = 1e+05)
```
Draw a sample from probabilistic analysis of the model.
```{r}
source("04_analysis/01_model_run.R")
nb <- m_net_benefit[ , , wtp_seq == 20000]
```
## Calculating single-parameter EVPPI.
Calculate the single-parameter EVPPI for all the parameters in the model. This returns a data frame containing the EVPPI estimates.
```{r}
library(voi)
pars_all <- as.list(names(m_params))
ev_single <- evppi(outputs=nb, inputs=m_params, pars=pars_all)
ev_single
```
Dot-plot of the estimates, sorted with the highest values at the top.
```{r,dotplot,fig.width=7}
plot(ev_single, order = TRUE)
```
Single-parameter EVPPIs for a specified subset of parameters.
```{r,results="hide"}
evppi(outputs=nb, inputs=m_params,
pars=list("logor_side_effects", "p_side_effects_t1", "u_hospital"))
```
Multi-parameter EVPPI for four groups of parameters: those associated with
side effects, transition probabilites, costs and utilities respectively.
```{r}
par_groups <- list(
"side_effects" = c("p_side_effects_t1","logor_side_effects"),
"trans_probs" = c("p_hospitalised_total","p_died",
"lambda_home","lambda_hosp"),
"costs" = c("c_home_care","c_hospital","c_death"),
"utilities" = c("u_recovery","u_home_care","u_hospital")
)
ev_grouped <- evppi(outputs=nb, inputs=m_params, pars=par_groups)
ev_grouped
```
In this example, it is clear that the parameters associated with the risk of side effects
have the greatest EVPPI.
## Checking regression models for EVPPI calculation
Figure shown in the book
```{r,results='hide',fig.keep='all',fig.height=3,regression_diagnostics}
ev_single <- evppi(outputs=nb, inputs=m_params, pars=pars_all, check=TRUE)
check_regression(ev_single, pars = "logor_side_effects")
```
Additional analysis with standard errors:
```{r,eval=FALSE}
evppi(outputs=nb, inputs=m_params,
pars = list("p_side_effects_t2","u_hospital"), se=TRUE)
```
Alternative regression models:
```{r}
evppi(outputs=nb, inputs=m_params,
pars = list("p_side_effects_t2","u_hospital"))
evppi(outputs=nb, inputs=m_params,
pars = list("p_side_effects_t2","u_hospital"),
method="earth")
```
## Comparing different regression specifications: single-parameter EVPPI
```{r}
(e1 <- evppi(outputs=nb, inputs=m_params, pars=par_groups[1], check=TRUE))
(e2 <- evppi(outputs=nb, inputs=m_params, pars=par_groups[1],
gam_formula="s(p_side_effects_t1) + s(logor_side_effects)", check=TRUE))
check_regression(e1, plot=FALSE)
check_regression(e2, plot=FALSE)
```
`earth` models with two-way versus three-way interactions
```{r}
(e1 <- evppi(outputs=nb, inputs=m_params, pars=par_groups[1], method = "earth", check=TRUE))
(e2 <- evppi(outputs=nb, inputs=m_params, pars=par_groups[1], method = "earth",
degree=3, check=TRUE))
check_regression(e1,plot=FALSE)
check_regression(e2,plot=FALSE)
```
## Comparing different regression specifications: multi-parameter EVPPI
```{r,eval=FALSE}
costs_utilities <- c(par_groups$costs, par_groups$utilities)
ev1 <- evppi(outputs=nb, inputs=m_params, pars=costs_utilities, method="gam",
gam_formula = "s(c_home_care) + s(c_hospital) + s(c_death) +
s(u_recovery) + s(u_home_care) + s(u_hospital)",
check=TRUE)
ev2 <- evppi(outputs=nb, inputs=m_params, pars=costs_utilities, method="gam",
gam_formula = all_interactions(costs_utilities, 2), check=TRUE)
ev3 <- evppi(outputs=nb, inputs=m_params, pars=costs_utilities, method="earth", check=TRUE)
ev4 <- evppi(outputs=nb, inputs=m_params, pars=costs_utilities, method="earth",
degree=2, check=TRUE)
ev5 <- evppi(outputs=nb, inputs=m_params, pars=costs_utilities, method="gp")
ev6 <- evppi(outputs=nb, inputs=m_params, pars=costs_utilities, method="inla")
```
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/book_misc/evppi_reg.Rmd
|
---
output:
pdf_document:
keep_tex: TRUE
title: "VoI book: graphs of GAM regression"
editor_options:
chunk_output_type: console
---
Change this path to reflect where you downloaded the Chemotherapy_Book repository
```{r,echo=FALSE}
chemo_dir <- system.file("Chemotherapy_Book",package="voi")
setwd(chemo_dir)
```
```{r,echo=FALSE,message=FALSE}
library(knitr)
library(voi)
opts_knit$set(root.dir = chemo_dir)
options(digits=3, scipen = 1e+05)
```
## Graph to illustrate GAM regression
```{r,regression_illus}
set.seed(1)
source("04_analysis/01_model_run.R")
library(ggplot2)
library(mgcv)
nb <- as.data.frame(m_costs_effects[,"Effects",]*20000 - m_costs_effects[,"Costs",])
names(nb) <- c("SoC","Novel")
inb <- nb[,"Novel"] - nb[,"SoC"]
x <- m_params[,"logor_side_effects"]
set.seed(1)
trim_points <- which(inb > 18000 | inb < -10000)
dat <- data.frame(inb, x)
#dat <- data.frame(inb, x)[-trim_points,]
ggplot(dat, aes(x=x, y=inb)) +
geom_point(alpha=0.2, col="gray30", fill="gray30", pch=1)
mod <- gam(inb ~ s(x), data=dat)
dat$fitted <- fitted(mod)
dat$frontier <- pmax(dat$fitted, 0)
phi_crit <- min(dat$x[dat$frontier==0])
pointdat <- data.frame(x=-1.4, xpad=-1.42, xend=0.2,
inb=c(mean(dat$inb), mean(dat$frontier)),
label=c("Current", "Partial perfect"))
dat2 <- data.frame(x1=-0.25, x2=-0.25, y1=-1000, y2=0)
pdf("~/work/voibook/voibook/Figures/04-evppi/regression_illus.pdf", width=6, height=3.5)
ggplot(dat, aes(x=x, y=inb)) +
geom_point(pch=16, col="gray60", size=0.7) +
geom_segment(data=pointdat, aes(x=x, xend=xend, y=inb, yend=inb)) +
geom_vline(aes(xintercept=round(phi_crit,2)), col="gray70") +
xlab(expression(paste("Parameter ", phi))) +
ylab("Incremental net benefit") +
xlim(-1.7, 0.2) +
scale_y_continuous(breaks = c(-2500, round(mean(dat$inb)),
round(mean(dat$frontier)), 2500)) +
scale_x_continuous(breaks=c(-1.5,-1,round(phi_crit,2),0,0.5)) +
geom_text(data=pointdat, aes(label=label, x=xpad), hjust=1) +
geom_line(aes(y = frontier), lwd=5, alpha=0.4) +
geom_line(aes(y = fitted), lwd=1.5) +
theme(legend.position = "none") +
annotate(x=0,y=-600,geom="text",size=2.7,label="Value gained by",col="blue") +
annotate(x=0,y=-1000,geom="text",size=2.7,label="changing decision",col="blue") +
geom_segment(data=dat2, aes(x = x1, y = y1, xend = x2, yend = y2),
arrow = arrow(length = unit(0.03, "npc")), col="blue") +
geom_segment(data=dat2, aes(x = x1, y = y2, xend = x2, yend = y1),
arrow = arrow(length = unit(0.03, "npc")), col="blue") +
theme_bw()
dev.off()
(evppi <- mean(dat$frontier) - mean(dat$inb))
```
## Graph to illustrate GAM regression for variance-based VoI
```{r,regression_illus_var}
dat2 <- data.frame(x1=-1.3, x2=-1.3, y1=-700, y2=min(dat$fitted[round(dat$x,2)==-1.33 ]))
pdf("regression_illus_var.pdf", width=6, height=3.5)
ggplot(dat, aes(x=x, y=inb)) +
geom_point(pch=16, col="gray60", size=0.7) +
xlab(expression(paste("Value of parameter ", phi))) +
ylab(expression(paste("Model output quantity of interest ", alpha))) +
xlim(-1.7, 0.2) +
geom_line(aes(y = fitted), lwd=1.5) +
theme(legend.position = "none") +
geom_segment(data=dat2, aes(x = x1, y = y1, xend = x2, yend = y2),
arrow = arrow(length = unit(0.03, "npc")), col="blue") +
geom_segment(data=dat2, aes(x = x1, y = y2, xend = x2, yend = y1),
arrow = arrow(length = unit(0.03, "npc")), col="blue") +
annotate("text", x = -1.5, y = 0,
label = "Residuals", col="blue") +
theme_bw()
dev.off()
```
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/book_misc/gam_graphs.Rmd
|
## Illustrations of GP regression
library(ggplot2)
set.seed(1)
y <- sample(chemo_nb, 40) / 1000000
x <- sample(chemo_pars[,"logor_side_effects"], 40)
qplot(y, x)
## So can we fit a GP regression using inbuilt tools?
## can we use gpFunc using hyperpar
# need to understand what gpFunc does and modularise it
# sets: names of POI? why not do this outside ? it does some error checking.
# it removes any parameters that are constant. Needs external warning
xpred <- seq(min(x), max(x), by=0.001)
mod <- voi:::gp(y, x, Xpred=xpred)
# lower the first par to make less smooth
# nu controls the extent of measurement error.
# Note this does not do any prediction outside the data points so the fit is jagged.
mod <- voi:::gp(y, x, hyper = c(1, 0.5637912), Xpred=xpred)
mod <- voi:::gp(y, x, hyper = c(0.5, 0.5637912), Xpred=xpred)
mod1 <- voi:::gp(y, x, hyper = c(1, 0.001), Xpred=xpred)
mod2 <- voi:::gp(y, x, hyper = c(0.5, 0.001), Xpred=xpred)
mod3 <- voi:::gp(y, x, hyper = c(0.1, 0.001), Xpred=xpred)
mod4 <- voi:::gp(y, x, hyper = c(0.01, 0.001), Xpred=xpred)
dat <- data.frame(y, x)
datpred1 <- data.frame(x=xpred, fitted=mod1$pred, delta=1)
datpred2 <- data.frame(x=xpred, fitted=mod2$pred, delta=0.5)
datpred3 <- data.frame(x=xpred, fitted=mod3$pred, delta=0.1)
datpred4 <- data.frame(x=xpred, fitted=mod4$pred, delta=0.01)
datpred <- rbind(datpred1, datpred2, datpred3)
datpred$delta <- factor(datpred$delta)
cols <- RColorBrewer::brewer.pal(4, "Greys")[2:4]
pdf("~/work/voibook/voibook/Figures/04-evppi/gp_regression.pdf", width=6, height=3)
ggplot(dat, aes(x=x, y=y)) +
geom_line(data=datpred, aes(x=x, y=fitted, col=delta), lwd=1, alpha=0.8) +
scale_color_manual(breaks=levels(datpred$delta), values=cols) +
coord_cartesian(ylim=c(0, 0.6)) +
geom_point(size=2) +
xlab("Predictor") + ylab("Outcome") +
labs(col=expression(delta)) +
theme_bw()
dev.off()
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/book_misc/gp_graphs.r
|
library(tidyverse)
library(viridis)
bet <- SHELF::fitdist(vals=c(0.2, 0.4, 0.6),
probs=c(0.025, 0.5, 0.975),
lower=0, upper=1)$Beta
## https://en.wikipedia.org/wiki/Logit-normal_distribution
dlogitnorm <- function(x, mu, sd){
dnorm(qlogis(x), mu, sd) / (x*(1-x))
}
pdat <- data.frame(x = seq(0, 1, by=0.01)) %>%
mutate(dbet = dbeta(x, bet[["shape1"]], bet[["shape2"]]),
dln = dlogitnorm(x, qlogis(0.4),
(qlogis(0.6) - qlogis(0.2))/(2*qnorm(0.975)))) %>%
pivot_longer(cols = c("dbet", "dln"), names_to="dist", values_to = "dens") %>%
mutate(dist = fct_recode(dist, "Beta(8.9, 13.1)"="dbet", "Logit-normal(-0.4,0.46)"="dln"))
green <- viridis(5)[4]
purple <- viridis(5)[2]
pdf("~/work/voibook/voibook/chapters/chapter01_modelling/elic_compare.pdf", width=6, height=4)
ggplot(pdat, aes(x=x, y=dens, col=dist, linetype=dist)) +
geom_line(lwd=2) +
scale_color_manual(name = "", values = c(green, purple)) +
scale_linetype_manual(name = "", values=c(1, 3)) +
theme_bw() +
theme(legend.position = c(0.8, 0.8) ) +
xlab(expression(italic(p))) + ylab("Probability density") +
scale_x_continuous(breaks = c(0, 0.2, 0.4, 0.6, 0.8, 1))
dev.off()
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/book_misc/he_graphs.R
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----message=FALSE,warning=FALSE----------------------------------------------
library(voi)
library(ggplot2)
library(dplyr)
## -----------------------------------------------------------------------------
head(chemo_evsi_or)
## -----------------------------------------------------------------------------
evpi_df <- evpi(outputs = chemo_cea_501)
## -----------------------------------------------------------------------------
evppi_df <- attr(chemo_evsi_or, "evppi")
## -----------------------------------------------------------------------------
ggplot(chemo_evsi_or, aes(x=k, y=evsi, group=n, color=n)) +
geom_line() +
scale_colour_gradient(low="skyblue", high="darkblue") +
xlab("Willingness to pay (£)") + ylab("EVSI per person") +
geom_line(data=evpi_df, aes(x=k, y=evpi),
color="black", lwd=1.5, inherit.aes = FALSE) +
geom_line(data=evppi_df, aes(x=k, y=evppi),
color="darkblue", lwd=1.5, inherit.aes = FALSE) +
labs(color="Sample size") +
xlim(0,52000) +
annotate("text",x=50000,y=125,label="EVPI",hjust=0) +
annotate("text",x=50000,y=107,label="EVPPI",color="darkblue",hjust=0) +
annotate("text",x=50000,y=50,label="EVSI",color="darkblue",hjust=0)
## -----------------------------------------------------------------------------
n_use <- c(250, 500, 1000)
chemo_evsi_or %>%
filter(n %in% n_use) %>%
ggplot(aes(x=k, y=evsi, group=n, color=n)) +
geom_line(lwd=1.5) +
scale_colour_binned(breaks=n_use,
low = "gray80", high = "gray20",
guide=guide_legend(title="Sample size",
reverse=TRUE)) +
xlab("Willingness to pay (£)") + ylab("EVSI (per person)") +
geom_line(data=evppi_df, aes(x=k, y=evppi),
color="darkblue", lwd=1.5, lty=3, inherit.aes = FALSE) +
annotate("text", x=50000, y=130, col="darkblue", label="EVPPI", hjust=1)
## -----------------------------------------------------------------------------
chemo_evsi_or %>%
filter(k %in% seq(10000,50000, by=1000)) %>%
ggplot(aes(x=n, y=evsi, group=k, color=k)) +
geom_line() +
scale_colour_gradient(low="skyblue", high="darkblue") +
xlab("Sample size") + ylab("EVSI per person") +
labs(color="Willingness-to-pay (£)")
## -----------------------------------------------------------------------------
nbs <- enbs(chemo_evsi_or,
costs_setup = c(5000000, 10000000),
costs_pp = c(28000, 42000),
pop = 46000,
time = 10)
nbs %>%
filter(k==20000) %>%
head()
## -----------------------------------------------------------------------------
nbs %>%
filter(k==20000) %>%
ggplot(aes(x=n, y=enbs)) +
geom_line() +
xlab("Sample size") + ylab("Expected net benefit of sampling") +
scale_y_continuous(labels = scales::dollar_format(prefix="£"))
## -----------------------------------------------------------------------------
nbs %>%
filter(k==20000) %>%
mutate(q975 = qnorm(0.975, enbs, sd),
q75 = qnorm(0.75, enbs, sd),
q25 = qnorm(0.25, enbs, sd),
q025 = qnorm(0.025, enbs, sd)) %>%
ggplot(aes(y=enbs, x=n)) +
geom_ribbon(aes(ymin=q025, ymax=q975), fill="gray80") +
geom_ribbon(aes(ymin=q25, ymax=q75), fill="gray50") +
geom_line() +
xlab("Sample size") + ylab("Expected net benefit of sampling") +
scale_y_continuous(labels = scales::dollar_format(prefix="£")) +
annotate("text",x=1100,y=85000000,label="95% credible interval") +
annotate("text",x=1250,y=70000000,label="50% credible interval")
## -----------------------------------------------------------------------------
nbs_subset <- nbs %>%
filter(k==20000, n %in% seq(100,800,by=100))
eopt <- enbs_opt(nbs_subset, smooth=TRUE, keep_preds=TRUE)
## -----------------------------------------------------------------------------
eopt
## -----------------------------------------------------------------------------
preds <- attr(eopt, "preds")
head(preds, 2)
## -----------------------------------------------------------------------------
ggplot(nbs_subset, aes(x=n, y=enbs)) +
geom_point() +
geom_line(data=preds) +
xlab("Sample size") + ylab("Expected net benefit of sampling") +
scale_y_continuous(labels = scales::dollar_format(prefix="£")) +
geom_vline(data=eopt, aes(xintercept=nlower), col="blue", lty=2) +
geom_vline(data=eopt, aes(xintercept=nmax), col="blue", lty=2) +
geom_vline(data=eopt, aes(xintercept=nupper), col="blue", lty=2) +
geom_hline(data=eopt, aes(yintercept=enbsmax), col="blue", lty=2)
## -----------------------------------------------------------------------------
attr(nbs, "enbsmax") %>%
filter(k %in% seq(20000,50000,by=10000))
## -----------------------------------------------------------------------------
p1 <- attr(nbs, "enbsmax") %>%
ggplot(aes(y=nmax, x=k)) +
geom_ribbon(aes(ymin=nlower, ymax=nupper), fill="gray") +
geom_line() +
ylim(0,800) +
xlab("Willingness to pay (£)") + ylab("Optimal sample size") +
ggtitle("Unsmoothed")
nbs_smoothed <- enbs(chemo_evsi_or,
costs_setup = c(5000000, 10000000),
costs_pp = c(28000, 42000),
pop = 46000, time = 10, smooth=TRUE)
p2 <- attr(nbs_smoothed, "enbsmax") %>%
ggplot(aes(y=nmax, x=k)) +
geom_ribbon(aes(ymin=nlower, ymax=nupper), fill="gray") +
geom_line() +
ylim(0,800) +
xlab("Willingness to pay (£)") + ylab("Optimal sample size") +
ggtitle("Smoothed")
gridExtra::grid.arrange(p1, p2, nrow=1)
## -----------------------------------------------------------------------------
nbs <- enbs(chemo_evsi_or %>% filter(k==20000, n==400),
costs_setup = c(5000000, 10000000),
costs_pp = c(28000, 42000),
pop = seq(0,60000,length.out=50),
time = seq(0,10,length.out=50))
ggplot(nbs, aes(y=pop, x=time, fill=pce)) +
geom_raster() +
labs(fill="Probability") +
scale_fill_gradient(low="darkblue", high="white") +
xlab("Decision horizon (years)") + ylab("Incident population") +
theme_minimal()
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/doc/plots.R
|
---
title: 'Plots of Value of Information measures'
author: "Christopher Jackson"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
vignette: >
\usepackage[utf8]{inputenc}
%\VignetteIndexEntry{Plots of Value of Information measures}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
The `voi` package functions `evppi()`, `evpi()` and `evsi()` all return data frames in a "tidy" format with one row per VoI estimate. This allows plots to be produced and customised with little further effort using `ggplot2`.
This vignette demonstrates how to illustrate VoI analyses graphically, using an example EVSI results dataset `chemo_evsi_or` supplied with the `voi` package.
We also explain the use of the `enbs()` function to calculate and optimise the expected net benefit of sampling for a simple study design.
Knowledge of [ggplot2](https://ggplot2.tidyverse.org/) is required to be able to adapt these plots for your own analyses and communication needs, but plenty of learning resources are linked from the [ggplot2 home page](https://ggplot2.tidyverse.org/#learning-ggplot2).
Some functions to create similar plots using base R graphics are supplied with the [VoI Book](https://chjackson.github.io/voi/articles/book.html).
```{r,message=FALSE,warning=FALSE}
library(voi)
library(ggplot2)
library(dplyr)
```
# Example EVSI dataset
The example dataset `chemo_evsi_or` included with the package gives the results of an EVSI analysis for the chemotherapy example model.
This object is a data frame with `r nrow(chemo_evsi_or)` rows and three columns, giving the sample size per arm (`n`, from 50 to 1500), willingness-to-pay (`k`, from 10000 to 50000 pounds) and the corresponding EVSI (`evsi`).
```{r}
head(chemo_evsi_or)
```
> **Sidenote:** The EVSI in this example is the expected value of a two-arm trial with a binary outcome of whether a patient experiences side-effects. The trial is only used to inform the log odds ratio of side effects between treatments, and information about the baseline risk of side effects is ignored. Source code for generating this dataset using the moment matching method in `evsi()` is provided [here](https://github.com/chjackson/voi/blob/master/data-raw/chemo_evsi_or.R).
For reference, we also calculate the EVPI for the corresponding willingness-to-pay values, using output from probabilistic analysis of the decision model, as stored in the object `chemo_cea_501` provided with the `voi` package.
```{r}
evpi_df <- evpi(outputs = chemo_cea_501)
```
We also extract the values of the EVPPI (for the log odds ratio) into a data frame of the same format. These EVPPI values are already stored in `chemo_evsi_or`, because they had been
calculated as a by-product of the moment matching method to calculate EVSI.
```{r}
evppi_df <- attr(chemo_evsi_or, "evppi")
```
# EVSI curves
The following `ggplot` code produces curves of EVSI against willingness to pay, with different sample sizes shown by curves of different colours. The sample size is indicated by a colour gradient. The corresponding curves of EVPI, and EVPPI for the "focal" parameter of interest (the log odds ratio of side effects), are also shown for reference.
```{r}
ggplot(chemo_evsi_or, aes(x=k, y=evsi, group=n, color=n)) +
geom_line() +
scale_colour_gradient(low="skyblue", high="darkblue") +
xlab("Willingness to pay (£)") + ylab("EVSI per person") +
geom_line(data=evpi_df, aes(x=k, y=evpi),
color="black", lwd=1.5, inherit.aes = FALSE) +
geom_line(data=evppi_df, aes(x=k, y=evppi),
color="darkblue", lwd=1.5, inherit.aes = FALSE) +
labs(color="Sample size") +
xlim(0,52000) +
annotate("text",x=50000,y=125,label="EVPI",hjust=0) +
annotate("text",x=50000,y=107,label="EVPPI",color="darkblue",hjust=0) +
annotate("text",x=50000,y=50,label="EVSI",color="darkblue",hjust=0)
```
Or we can adapt the plot to only show a limited number of sample sizes, shown in a discrete legend, and customise the colour range.
```{r}
n_use <- c(250, 500, 1000)
chemo_evsi_or %>%
filter(n %in% n_use) %>%
ggplot(aes(x=k, y=evsi, group=n, color=n)) +
geom_line(lwd=1.5) +
scale_colour_binned(breaks=n_use,
low = "gray80", high = "gray20",
guide=guide_legend(title="Sample size",
reverse=TRUE)) +
xlab("Willingness to pay (£)") + ylab("EVSI (per person)") +
geom_line(data=evppi_df, aes(x=k, y=evppi),
color="darkblue", lwd=1.5, lty=3, inherit.aes = FALSE) +
annotate("text", x=50000, y=130, col="darkblue", label="EVPPI", hjust=1)
```
In a similar way, we can produce curves of EVSI against sample size, with different willingness-to-pay (WTP) values shown by curves of different colours.
To reduce visual clutter, only a limited number of willingness-to-pay values are illustrated. This plot shows how the EVSI converges to the (WTP-dependent) EVPPI for the focal parameter (log odds ratio of side effects) as the sample size increases.
```{r}
chemo_evsi_or %>%
filter(k %in% seq(10000,50000, by=1000)) %>%
ggplot(aes(x=n, y=evsi, group=k, color=k)) +
geom_line() +
scale_colour_gradient(low="skyblue", high="darkblue") +
xlab("Sample size") + ylab("EVSI per person") +
labs(color="Willingness-to-pay (£)")
```
# Expected net benefit of sampling
The `voi` package includes a function `enbs()` to obtain the expected net benefit of sampling for a proposed study of individual participants, given estimates of EVSI. See the help page `enbs()` for full documentation for this function.
> The `enbs()` function supposes that the costs of the study include a fixed setup cost (`costs_setup`), and a cost per participant recruited (`costs_pp`). (Note that if `n` describes the sample size per arm in a two-arm study, `costs_pp` is actually the cost of recruiting two people, one in each arm.) To acknowledge uncertainty, a range of costs may be supplied as two numbers.
These are assumed to describe a 95\% credible interval for the cost, with the point estimate given by the midpoint between these two numbers.
> Additional required information includes the size of the population affected by the decision (`pop`), and a discount rate (`disc`, by default 0.035) to be applied over a time horizon `time`. These may be supplied as vectors, in which case ENBS is calculated for all potential combinations of values ([see below](#probce) for an example).
Here is an example of calling `enbs()`. The result is a data frame
giving the sample size (`n`), willingness-to-pay (`k`), and corresponding
ENBS (`enbs`). The standard deviation (`sd`) describes the uncertainty
about the ENBS estimate due to the uncertain costs. `pce` is the probability
that the study is cost-effective (i.e. has a positive net benefit of sampling).
The other components of this data frame are related to the optimal sample size [(as discussed below)](#optss)
```{r}
nbs <- enbs(chemo_evsi_or,
costs_setup = c(5000000, 10000000),
costs_pp = c(28000, 42000),
pop = 46000,
time = 10)
nbs %>%
filter(k==20000) %>%
head()
```
The expected net benefit of sampling for a given willingness-to-pay
can then be illustrated by filtering the `nbs` dataframe to this
willingness-to-pay value, then passing it to a simple `ggplot`.
```{r}
nbs %>%
filter(k==20000) %>%
ggplot(aes(x=n, y=enbs)) +
geom_line() +
xlab("Sample size") + ylab("Expected net benefit of sampling") +
scale_y_continuous(labels = scales::dollar_format(prefix="£"))
```
> Note the `scales::dollar_format` trick for showing large currency values tidily in the axis labels.
The same colouring techniques used above (using a `group` aesthetic, with `scale_colour_gradient`) might also be used to illustrate the
dependence of the ENBS curve on the willingness-to-pay.
Uncertainty around the ENBS can be illustrated by plotting quantiles of
the ENBS alongside the point estimates. These quantiles can be derived
from the point estimate (`enbs`) and the standard deviation (`sd`) stored in the data frame that we called `nbs`, assuming a normal distribution.
```{r}
nbs %>%
filter(k==20000) %>%
mutate(q975 = qnorm(0.975, enbs, sd),
q75 = qnorm(0.75, enbs, sd),
q25 = qnorm(0.25, enbs, sd),
q025 = qnorm(0.025, enbs, sd)) %>%
ggplot(aes(y=enbs, x=n)) +
geom_ribbon(aes(ymin=q025, ymax=q975), fill="gray80") +
geom_ribbon(aes(ymin=q25, ymax=q75), fill="gray50") +
geom_line() +
xlab("Sample size") + ylab("Expected net benefit of sampling") +
scale_y_continuous(labels = scales::dollar_format(prefix="£")) +
annotate("text",x=1100,y=85000000,label="95% credible interval") +
annotate("text",x=1250,y=70000000,label="50% credible interval")
```
# Optimal sample size {#optss}
The `enbs()` function also estimates the optimal sample size for each willingness-to-pay value. That is, the sample size which gives the
maximum expected net benefit of sampling. The optimal sample size can be
estimated using two alternative methods.
1. A simple and fast method: just pick the highest ENBS from among the
sample sizes included in the `evsi` object that was supplied to `enbs()`.
2. A more sophisticated (but slower) method uses nonparametric regression
to smooth and interpolate the ENBS estimates. This is useful if the EVSI
has only been computed for a limited number of sample sizes, or if the
EVSI estimates are noisy (which may happen for the regression-based method).
We might judge that the ENBS is a smooth function of sample size, that we could
estimate by regression on the estimates that have been computed.
## Smooth interpolation
Here is a demonstration of how the regression smoothing method works.
* Suppose we have computed EVSI (hence ENBS) for a limited set of sample sizes, say eight, from 100 to 800 by 100, and for a specific willingness to pay (and population size, horizon and discount rate).
* The function `enbs_opt` is used to create from this a smoothed and interpolated dataset (here called `preds`) that contains 701 ENBS estimates, for every integer from 100 to 800. The maximum ENBS can then be found by searching through this interpolated dataset.
```{r}
nbs_subset <- nbs %>%
filter(k==20000, n %in% seq(100,800,by=100))
eopt <- enbs_opt(nbs_subset, smooth=TRUE, keep_preds=TRUE)
```
The `enbs_opt` function returns the maximum ENBS (`enbsmax`), the optimal
sample size (`nmax`) and an interval estimate (`nlower` to `nupper`) defined
by the lowest and highest sample size that have ENBS within 5% of the optimum.
```{r}
eopt
```
The full interpolated dataset is also returned as the `"preds"` attribute.
```{r}
preds <- attr(eopt, "preds")
head(preds, 2)
```
In this graph, the eight pre-computed ENBS values are shown as points, and the 701 smoothed/interpolated values are shown on a line.
```{r}
ggplot(nbs_subset, aes(x=n, y=enbs)) +
geom_point() +
geom_line(data=preds) +
xlab("Sample size") + ylab("Expected net benefit of sampling") +
scale_y_continuous(labels = scales::dollar_format(prefix="£")) +
geom_vline(data=eopt, aes(xintercept=nlower), col="blue", lty=2) +
geom_vline(data=eopt, aes(xintercept=nmax), col="blue", lty=2) +
geom_vline(data=eopt, aes(xintercept=nupper), col="blue", lty=2) +
geom_hline(data=eopt, aes(yintercept=enbsmax), col="blue", lty=2)
```
The maximum, and the range of sample sizes whose ENBS is
within 5% of this maximum, are shown as blue dotted lines.
> **Technical note:** this uses the default spline regression model `s()` from
the `gam` function in the `mgcv` package. The flexibility of the fitted model can be configured by setting the `smooth_df` argument to either `enbs` or `enbs_opt`. This is passed as the `k` argument to the `s()` function,
and defaults to 6 (or the number of sample sizes minus 1, if this is lower than 6).
`enbs_opt` assumes a single willingness-to pay (and population size, etc).
In practice you do not need to use `enbs_opt`, unless you want to check the goodness-of-fit of the regression that is fitted. Instead you can just call `enbs(..., smooth=TRUE)`, and this will determine the optimal sample size, using smoothing, for each of multiple willingness-to-pay values (see the `nbs_smoothed` object below for an example).
## Note about uncertainty
In this example, there are a wide range of sample sizes within 5% of the optimum. In practice, the cheapest study, the one at the bottom of this range, might still be deemed acceptable to decision-makers. There is still a lot of unacknowledged uncertainty in this analysis, including about the costs, incident population size, decision horizon, and computational uncertainty in the per-person EVSI estimate. The location of the "exact" maximum of the curve of ENBS versus sample size might be considered a minor issue in the context of these other uncertainties.
## Curve of optimal sample size
For convenience, the `enbs()` function also returns, as the `"enbsmax"` attribute, a data frame with one row per willingness-to-pay (`k`), giving the optimal ENBS (`enbsmax`) the optimal sample size (`nmax`) and an interval estimate for the optimal sample size (`nlower` to `nupper`). An extract is shown here.
```{r}
attr(nbs, "enbsmax") %>%
filter(k %in% seq(20000,50000,by=10000))
```
This allows a plot to be drawn of the optimal sample size against willingness
to pay, with an interval estimate defined by this range of "95% optimal"
sample sizes. This is compared here for the two methods of determing the optimum: with and without smoothing.
```{r}
p1 <- attr(nbs, "enbsmax") %>%
ggplot(aes(y=nmax, x=k)) +
geom_ribbon(aes(ymin=nlower, ymax=nupper), fill="gray") +
geom_line() +
ylim(0,800) +
xlab("Willingness to pay (£)") + ylab("Optimal sample size") +
ggtitle("Unsmoothed")
nbs_smoothed <- enbs(chemo_evsi_or,
costs_setup = c(5000000, 10000000),
costs_pp = c(28000, 42000),
pop = 46000, time = 10, smooth=TRUE)
p2 <- attr(nbs_smoothed, "enbsmax") %>%
ggplot(aes(y=nmax, x=k)) +
geom_ribbon(aes(ymin=nlower, ymax=nupper), fill="gray") +
geom_line() +
ylim(0,800) +
xlab("Willingness to pay (£)") + ylab("Optimal sample size") +
ggtitle("Smoothed")
gridExtra::grid.arrange(p1, p2, nrow=1)
```
# Probability of a cost-effective trial {#probce}
Given the uncertainty about the incident population size and decision time
horizon, we might want to show how the results depend on these. This might be done using a heat map.
The next plot selects a specific willingness to pay (£20,000) and plots
the probability that a study with a sample size of 400 is cost effective,
i.e. the probability that the expected net benefit of sampling is positive.
This is compared on a grid defined by a range of population sizes from 0 to 60000, and a range of decision horizon times from 0 to 10.
```{r}
nbs <- enbs(chemo_evsi_or %>% filter(k==20000, n==400),
costs_setup = c(5000000, 10000000),
costs_pp = c(28000, 42000),
pop = seq(0,60000,length.out=50),
time = seq(0,10,length.out=50))
ggplot(nbs, aes(y=pop, x=time, fill=pce)) +
geom_raster() +
labs(fill="Probability") +
scale_fill_gradient(low="darkblue", high="white") +
xlab("Decision horizon (years)") + ylab("Incident population") +
theme_minimal()
```
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/doc/plots.Rmd
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## -----------------------------------------------------------------------------
set.seed(1)
nsam <- 10000
inputs <- data.frame(p1 = rnorm(nsam, 1, 1),
p2 = rnorm(nsam, 0, 2))
## -----------------------------------------------------------------------------
outputs_nb <- data.frame(t1 = 0,
t2 = inputs$p1 - inputs$p2)
## -----------------------------------------------------------------------------
outputs_cea <- list(
e = data.frame(t1 = 0, t2 = inputs$p1),
c = data.frame(t1 = 0, t2 = inputs$p2),
k = c(1, 2, 3)
)
## -----------------------------------------------------------------------------
decision_current <- 2
nb_current <- 1
decision_perfect <- ifelse(outputs_nb$t2 < 0, 1, 2)
nb_perfect <- ifelse(decision_perfect == 1, 0, outputs_nb$t2)
(evpi1 <- mean(nb_perfect) - nb_current)
## -----------------------------------------------------------------------------
opp_loss <- nb_perfect - nb_current
mean(opp_loss)
## -----------------------------------------------------------------------------
library(voi)
evpi(outputs_nb)
evpi(outputs_cea)
## -----------------------------------------------------------------------------
prob_correct <- 1 - pnorm(0, 1, sqrt(5))
## -----------------------------------------------------------------------------
mean_truncnorm <- function(mu, sig, lower=-Inf, upper=Inf){
a <- (lower-mu)/sig
b <- (upper-mu)/sig
mu + sig * (dnorm(a) - dnorm(b)) / (pnorm(b) - pnorm(a))
}
enb_correct <- mean_truncnorm(1, sqrt(5), lower=0)
mean_nb_perfect <- enb_correct * prob_correct
(evpi_exact <- mean_nb_perfect - nb_current)
## -----------------------------------------------------------------------------
evppi(outputs_nb, inputs, pars="p1")
evppi(outputs_nb, inputs, pars=c("p1","p2"))
## -----------------------------------------------------------------------------
evppi(outputs_nb, inputs, pars=list("p1","p2"))
evppi(outputs_nb, inputs, pars=list("p1",c("p1","p2")))
## -----------------------------------------------------------------------------
evppi(outputs_cea, inputs, pars=list("p1",c("p1","p2")))
## -----------------------------------------------------------------------------
evppi(outputs_nb, inputs, pars="p1", method="gp", nsim=1000)
## -----------------------------------------------------------------------------
evppi(outputs_nb, inputs, pars="p1", method="earth")
## ----eval=FALSE---------------------------------------------------------------
# install.packages('INLA', repos='https://inla.r-inla-download.org/R/stable')
# install.packages('splancs')
## ----eval=FALSE---------------------------------------------------------------
# evppi(outputs_nb, inputs, pars=c("p1","p2"), method="inla", pfc_struc="iso")
## ----cache=TRUE,message=FALSE-------------------------------------------------
evppi(chemo_nb, chemo_pars, pars=colnames(chemo_pars), method="bart")
evpi(chemo_nb)
## -----------------------------------------------------------------------------
evppi(outputs_nb, inputs, pars=c("p1","p2"), method="gam", gam_formula="s(p1) + s(p2)")
## -----------------------------------------------------------------------------
evppi(outputs_nb, inputs, pars="p1", se=TRUE, B=100)
## -----------------------------------------------------------------------------
evppi(outputs_nb, inputs, pars="p1", n.blocks=20, method="so")
## -----------------------------------------------------------------------------
evppi(outputs_nb, inputs, pars="p1", method="sal")
## -----------------------------------------------------------------------------
model_fn_nb <- function(p1, p2){
c(0, p1 - p2)
}
## -----------------------------------------------------------------------------
model_fn_cea <- function(p1, p2){
rbind(e = c(0, p1),
c = c(0, p2))
}
## -----------------------------------------------------------------------------
par_fn <- function(n){
data.frame(p1 = rnorm(n, 1, 1),
p2 = rnorm(n, 0, 2))
}
## ----eval=FALSE---------------------------------------------------------------
# evppi_mc(model_fn_nb, par_fn, pars="p1", ninner=1000, nouter=100)
## ----eval=FALSE---------------------------------------------------------------
# par_fn_corr <- function(n, p1=NULL){
# p1_new <- if (is.null(p1)) rnorm(n, 1, 1) else p1
# data.frame(p1 = p1_new,
# p2 = rnorm(n, p1_new, 2))
# }
# evppi_mc(model_fn_nb, par_fn_corr, pars="p1", ninner=1000, nouter=100)
## -----------------------------------------------------------------------------
datagen_normal <- function(inputs, n=100, sd=1){
data.frame(xbar = rnorm(nrow(inputs),
mean = inputs[,"p1"],
sd = sd / sqrt(n)))
}
set.seed(1)
evsi(outputs_nb, inputs, datagen_fn = datagen_normal, n=c(10,100,1000))
## -----------------------------------------------------------------------------
evsi(outputs_nb, inputs, study = "normal_known", n=c(100,1000), pars = "p1")
evsi(outputs_cea, inputs, study = "normal_known", n=c(100,1000), pars = "p1")
## -----------------------------------------------------------------------------
likelihood_normal <- function(Y, inputs, n=100, sig=1){
mu <- inputs[,"p1"]
dnorm(Y[,"xbar"], mu, sig/sqrt(n))
}
evsi(outputs_nb, inputs, datagen_fn = datagen_normal, likelihood = likelihood_normal,
n=100, pars = "p1", method="is", nsim=1000)
## -----------------------------------------------------------------------------
evsi(outputs_nb, inputs, study = "normal_known", n=100, pars = "p1", method="is", nsim=1000)
## ----message=FALSE------------------------------------------------------------
evsi(outputs_nb, inputs, study = "normal_known", n=10000, pars = "p1", method="mm", Q=30,
model_fn = model_fn_nb, par_fn = par_fn,
analysis_args = list(prior_mean=1, prior_sd=1, sampling_sd=1, niter=1000))
## -----------------------------------------------------------------------------
analysis_fn <- function(data, args, pars){
dat <- list(y=c(data[,"y1"], data[,"y2"]))
design <- list(n = rep(args$n, 2))
priors <- list(a1=53, b1=60, mu=log(0.54), sigma=0.3)
jagsdat <- c(dat, design, priors)
or_jagsmod <- "
model {
y[1] ~ dbinom(p[1], n[1])
y[2] ~ dbinom(p[2], n[2])
p[1] <- p1
p[2] <- odds[2] / (1 + odds[2])
p1 ~ dbeta(a1, b1)
odds[1] <- p[1] / (1 - p[1])
odds[2] <- odds[1] * exp(logor)
logor ~ dnorm(mu, 1/sigma^2)
}
"
or.jag <- rjags::jags.model(textConnection(or_jagsmod),
data=jagsdat, inits=list(logor=0, p1=0.5), quiet = TRUE)
update(or.jag, 100, progress.bar="none")
sam <- rjags::coda.samples(or.jag, c("logor"), 500, progress.bar="none")
data.frame(logor_side_effects = as.numeric(sam[[1]][,"logor"]))
}
## -----------------------------------------------------------------------------
datagen_fn <- function(inputs, n=100){
p1 <- inputs[,"p_side_effects_t1"]
logor <- inputs[,"logor_side_effects"]
odds1 <- p1 / (1 - p1)
odds2 <- odds1 * exp(logor)
p2 <- odds2 / (1 + odds2)
nsim <- nrow(inputs)
data.frame(y1 = rbinom(nsim, n, p1),
y2 = rbinom(nsim, n, p2))
}
## ----cache=TRUE,message=FALSE,eval=requireNamespace("rjags")------------------
ev <- evsi(outputs=chemo_nb, inputs=chemo_pars,
method="mm",
pars="logor_side_effects",
pars_datagen = c("p_side_effects_t1", "logor_side_effects"),
datagen_fn = datagen_fn, analysis_fn = analysis_fn,
n = 100, Q = 10,
model_fn = chemo_model_lor_nb, par_fn = chemo_pars_fn)
## -----------------------------------------------------------------------------
p1 <- rbeta(10000, 5, 95)
## ----fig.width=7,fig.height=5-------------------------------------------------
beta <- rnorm(10000, 0.8, 0.4)
p2 <- plogis(qlogis(p1) + beta)
plot(density(p1), lwd=2, xlim=c(0,1), main="")
lines(density(p2), col="red", lwd=2)
legend("topright", col=c("black","red"), lwd=c(2,2),
legend=c("Surveyed infection probability", "True infection probability"))
## -----------------------------------------------------------------------------
var(p2)
## -----------------------------------------------------------------------------
inputs <- data.frame(p1, beta)
(evppi_beta <- evppivar(p2, inputs, par="beta"))
(evppi_p1 <- evppivar(p2, inputs, par="p1"))
## -----------------------------------------------------------------------------
sqrt(var(p2)) # or sd(p2)
sqrt(var(p2) - evppi_beta$evppi)
sqrt(var(p2) - evppi_p1$evppi)
## ----fig.width=7,fig.height=5-------------------------------------------------
plot(x=p1, y=p2, pch=".")
mod <- mgcv::gam(p2 ~ te(p1, bs="cr"))
p1fit <- fitted(mod)
lines(sort(p1), p1fit[order(p1)], col="blue")
## -----------------------------------------------------------------------------
p1res <- p2 - p1fit
var(p2) - var(p1res)
## -----------------------------------------------------------------------------
var(p1fit)
## -----------------------------------------------------------------------------
evsivar(p2, inputs, study = "binary", pars="p1", n=c(100,1000,10000))
## -----------------------------------------------------------------------------
inputs_p2 = data.frame(p2 = p2)
evsivar(p2, inputs=inputs_p2, study = "binary", pars="p2", n=c(100, 1000, 10000))
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/doc/voi.R
|
---
title: 'voi for Value of Information calculation: package overview'
author: "Christopher Jackson"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
vignette: >
\usepackage[utf8]{inputenc}
%\VignetteIndexEntry{voi for Value of Information calculation: package overview}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
Value of Information methods are a decision-theoretic framework for estimating the expected value of getting more information of particular kinds.
They are used in mathematical and statistical models, where parameters of the models represent quantities that are uncertain, and uncertainty is described by probability distributions.
The following two papers give a thorough recent review of the theory of the methods, and details of how they are applied and interpreted, from the perspective of healthcare decision-making.
[Value of Information Analysis for Research Decisions—An Introduction: Report 1 of the ISPOR Value of Information Analysis Emerging Good Practices Task Force](https://doi.org/10.1016/j.jval.2020.01.001)
[Value of information analytical methods: report 2 of the ISPOR value of information analysis emerging good practices task force](https://doi.org/10.1016/j.jval.2020.01.004)
This document gives a simple overview of how the `voi` package is used to calculate measures of Value of Information. A simple example model is used, but the same methods work in more complex models.
The example model is a model used for _decision making_, which has been the most common application of VoI, e.g. in health economic evaluations.
A later section describes the use of VoI methods for a model that is used for _estimation_ of uncertain quantities, rather than for explicit decision-making. For more information about the theory behind this, see [Jackson et al. 2019](https://doi.org/10.1080/01621459.2018.1562932) and [Jackson et al. 2021](https://doi.org/10.1515/em-2021-0012).
## Simple example model
Suppose we are making a decision between two treatments. Treatment 1 has no costs or effects. Treatment 2 has a _net benefit_ which describes its average costs and effects for a population. We choose Treatment 2 if its _incremental net benefit_, relative to treatment 1, is positive. The incremental net benefit in this simple case is identical to the net benefit of treatment 2, since the net benefit of treatment 1 is zero.
Suppose that the net benefit is simply defined as the difference between two uncertain _parameters_, $y(p_1,p_2) = p_1 - p_2$, where $p_1$ gives the effects, and $p_2$ gives the costs. Our current uncertainty can be described by normal distributions $p_1 \sim N(1,1)$ and $p_2 \sim N(0,2)$.
To make a decision under parameter uncertainty, one option is preferred to another if the _expectation_ of its net benefit, with respect to the uncertainty, is greater. In this case, we choose treatment 2, because the net benefit is distributed as $N(1, \sqrt{1^2+2^2}) = N(1, \sqrt{5})$ which has an expectation of 1, whereas treatment 1 has a known net benefit of zero.
Most of the functions in the `voi` package work with a _random sample_ of model inputs and outputs, generated from "uncertainty analysis", also known as "probabilistic sensitivity analysis" or "probabilistic analysis". For the example model, these are simple to generate, as follows.
### Specifying model inputs
The inputs should be a data frame with one column per parameter and one row per random sample.
```{r}
set.seed(1)
nsam <- 10000
inputs <- data.frame(p1 = rnorm(nsam, 1, 1),
p2 = rnorm(nsam, 0, 2))
```
### Specifying model outputs
The outputs can be supplied in either of two forms.
**Net benefit** form. A data frame with one column per treatment, and one row per random sample, giving the net benefit of each treatment. In this example, the net benefit of treatment 1 is zero.
```{r}
outputs_nb <- data.frame(t1 = 0,
t2 = inputs$p1 - inputs$p2)
```
**Cost-effectiveness analysis** form. This should be a list that includes the following three named elements (in any order)
* `"c"`: a data frame with one column per treatment and one row per random sample, containing sampled values for the expected costs of the treatment.
* `"e"`: a data frame with one column per treatment and one row per random sample, containing sampled values for the expected effects of the treatment.
* `"k"`: a vector of values giving alternative amounts that a decision-maker is willing to pay for one unit of effectiveness, so that the net (monetary) benefit is $e \times k - c$.
In this simple example, the parameter $p_1$ gives the effects, and $p_2$ the costs of treatment 2, and the
net benefit $y = p_1 - p_2$ defined in `outputs_nb` corresponds to a willingness-to-pay of $k=1$. The cost-effectiveness format allows us to compare VoI between different willingness-to-pay values, e.g. 1, 2 and 3 say here.
```{r}
outputs_cea <- list(
e = data.frame(t1 = 0, t2 = inputs$p1),
c = data.frame(t1 = 0, t2 = inputs$p2),
k = c(1, 2, 3)
)
```
Note that objects returned by the `bcea` function in the [BCEA](https://CRAN.R-project.org/package=BCEA) package satisfy this "cost-effectiveness analysis" format.
## Expected value of perfect information {#evpi}
The _expected value of perfect information_ is the expected net benefit given perfect information minus the expected net benefit given current information.
#### Computation using random sampling
* Given current information, we decided on treatment 2. In the example we know that the expected net benefit under current information is 1, the mean of the distribution of treatment 2's net benefit.
* Random sampling can be used to illustrate how to compute the expected net benefit given perfect information. Each sample of parameter values mimics a situation of decision-making given perfect information, where we know the parameters take these values. For each sample, we compare the corresponding treatment 2 net benefit to the threshold of zero, and prefer treatment 1 if the net benefit is negative, and treatment 2 if the net benefit is positive. The net benefit for each sample is the net benefit of the chosen treatment given the "known" sampled parameter values. The expected net benefit given perfect information is then computed as the average of this sample (`nb_perfect`).
```{r}
decision_current <- 2
nb_current <- 1
decision_perfect <- ifelse(outputs_nb$t2 < 0, 1, 2)
nb_perfect <- ifelse(decision_perfect == 1, 0, outputs_nb$t2)
(evpi1 <- mean(nb_perfect) - nb_current)
```
In practice we would not usually know the exact expectation (under the current uncertainty distribution) of the net benefit for any treatment, so we must compute it as the mean of the random sample. In this case, `colMeans(outputs_nb)` would give a vector of the expected net benefit for each treatment. The maximum of these is the net benefit of the decision we take under current information, which in this case is `r max(colMeans(outputs_nb))`. This would become closer to the exact value of 1, the more random samples are drawn.
An alternative view of EVPI is in terms of _opportunity loss_, which is the net benefit of the better decision we should have made (if we had known the truth), minus the net benefit of the decision we did make. The opportunity loss can be computed at each sample as follows. The EVPI is the mean of the opportunity loss.
```{r}
opp_loss <- nb_perfect - nb_current
mean(opp_loss)
```
#### Using the `voi` package to calculate EVPI
The `voi` package contains a simple function `evpi` to compute the EVPI using the above procedure. The function automatically detects whether your outputs are in net benefit or cost-effectiveness format.
```{r}
library(voi)
evpi(outputs_nb)
evpi(outputs_cea)
```
Note the result is slightly different from `evpi1`, since it uses the sample-based estimate of `r max(colMeans(outputs_nb))` of the expected net benefit under current information, rather than the known expectation of 1.
#### Analytic computation
In this simple example, the EVPI can also be calculated "by hand", because the model just involves normal distributions. The probability that the decision under perfect information agrees with the decision under current information, in this case, is the probability that the true value of a $N(1, \sqrt{5})$ is actually positive.
```{r}
prob_correct <- 1 - pnorm(0, 1, sqrt(5))
```
The mean of `nb_perfect` can then be calculated as the expected net benefit given a correct decision, multiplied by the probability of a correct decision. The former is the mean of the values of `outputs_nb$t2` which are positive, which is the mean of a $N(1,\sqrt{5})$ truncated below at zero. The mean of the truncated normal distribution has a [known analytic form](https://en.wikipedia.org/wiki/Truncated_normal_distribution), represented in the following R function.
```{r}
mean_truncnorm <- function(mu, sig, lower=-Inf, upper=Inf){
a <- (lower-mu)/sig
b <- (upper-mu)/sig
mu + sig * (dnorm(a) - dnorm(b)) / (pnorm(b) - pnorm(a))
}
enb_correct <- mean_truncnorm(1, sqrt(5), lower=0)
mean_nb_perfect <- enb_correct * prob_correct
(evpi_exact <- mean_nb_perfect - nb_current)
```
This is the exact value of the EVPI in this example, which differs slightly from the estimate based on Monte Carlo simulation. Unfortunately most realistic decision-analytic models do not have such a nice form, and we must rely on Monte Carlo methods to calculate the expected value of information.
## Expected value of partial perfect information {#evppi}
The _expected value of partial perfect information_ (EVPPI) for a parameter $\phi$ in a decision-analytic model is the expected value of learning the exact value of that parameter, while the other parameters remain uncertain. $\phi$ can comprise a single scalar parameter, or multiple parameters. If $\phi$ refers to multiple parameters then the EVPPI describes the expected value of learning _all_ of these parameters, often referred to as the _multiparameter_ EVPPI.
The EVPPI is defined as the expected net benefit given perfect knowledge of $\phi$, minus the expected net benefit given current information.
The function `evppi` can be used to compute this.
There are a variety of alternative computational methods implemented in this function. The default methods are based on nonparametric regression, and come from [Strong et al. (2013)](https://doi.org/10.1177/0272989X13505910). If there are four or fewer parameters, then a generalized additive model is used (the default spline model in `gam` from the `mgcv` package). With five or more, then Gaussian process regression is used.
### Invoking the `evppi` function.
To call `evppi`, supply a sample of outputs and inputs (in the same form as defined above) in the first two arguments. The parameter or parameters of interest (whose EVPPI is desired) is supplied in the `"pars"` argument. This can be expressed in various ways.
**(a) As a vector**. The joint EVPPI is computed for all parameters in this vector. If the vector has more than one element, then the function returns the expected value of perfect information on all of these parameters simultaneously (described as the "multiparameter" EVPPI by [Strong et al.](https://doi.org/10.1177/0272989X13505910)).
```{r}
evppi(outputs_nb, inputs, pars="p1")
evppi(outputs_nb, inputs, pars=c("p1","p2"))
```
**(b) As a list**. A separate EVPPI is computed for each element of the list. In the second example below, this is the EVPPI of $p_1$, followed by the multiparameter EVPPI of $(p_1,p_2)$. Note that the multiparameter EVPPI is the theoretically same as the EVPI if, as in this case, the vector includes all of the parameters in the model (though note the difference from EVPI estimates above due to the different computational method).
```{r}
evppi(outputs_nb, inputs, pars=list("p1","p2"))
evppi(outputs_nb, inputs, pars=list("p1",c("p1","p2")))
```
The `evppi` function returns a data frame with columns indicating the parameter (or parameters), and the corresponding EVPPI. If the outputs are in cost-effectiveness analysis format, then a separate column is returned indicating the willingness-to-pay.
```{r}
evppi(outputs_cea, inputs, pars=list("p1",c("p1","p2")))
```
### Changing the default calculation method
The method can be changed by supplying the `method` argument to `evppi`. Some methods have additional options to tune them. For a full list of these options, see `help(evppi)`.
#### Gaussian process regression
(from [Strong et al. (2013)](https://doi.org/10.1177/0272989X13505910)). The number of random samples to use in this computation can be changed using the `nsim` argument, which can be useful for this method as it can be prohibitive for large samples. Here the sample of 10000 is reduced to 1000.
```{r}
evppi(outputs_nb, inputs, pars="p1", method="gp", nsim=1000)
```
#### Multivariate adaptive regression splines {#earth}
This is a variant of generalized additive models based on linear splines, which uses a package called [earth](https://CRAN.R-project.org/package=earth).
```{r}
evppi(outputs_nb, inputs, pars="p1", method="earth")
```
While the merits of this method for EVPPI computation have not been systematically investigated, I have found this to be generally faster than comparable `mgcv` spline models for similar levels of accuracy and around 5 or fewer parameters.
#### INLA method
(from [Heath et al.](https://doi.org/10.1002/sim.6983), [Baio et al.](https://doi.org/10.1007/978-3-319-55718-2) ). This needs the following extra packages to be installed, using the following commands.
```{r,eval=FALSE}
install.packages('INLA', repos='https://inla.r-inla-download.org/R/stable')
install.packages('splancs')
```
It is only applicable to calculating the multiparameter EVPPI for 2 or more parameters.
In this toy example it is overkill, since the two-parameter EVPPI is simply the EVPI, and the default method needs an esoteric tweak (`pfc_struc`) to work.
However it has been found to be more efficient than the Gaussian process method in many other situations. See [Heath et al.](https://doi.org/10.1002/sim.6983), [Baio et al.](https://doi.org/10.1007/978-3-319-55718-2) for more details about implementing and tuning this method.
```{r,eval=FALSE}
evppi(outputs_nb, inputs, pars=c("p1","p2"), method="inla", pfc_struc="iso")
```
#### Bayesian additive regression trees (BART) {#bart}
This is another general nonparametric regression procedure. It is designed for regression with lots of predictors, so it may be particularly efficient for calculating multiparameter EVPPI, as the following demonstration shows.
The `voi` package includes a (fictitious) example health economic model based on a decision tree and Markov model: see the help page `voi::chemo_model`. There are 14 uncertain parameters. Outputs and inputs from probabilistic analysis are stored in the datasets `chemo_nb` (net benefit for willingness-to-pay £20000) and `chemo_pars`. The multiparameter EVPPI for all fourteen of these parameters is by definition equal to the EVPI.
Using the BART estimation method, the EVPPI estimate for all 14 parameters is very close to the estimate of the EVPI, and the computation is quick (about 16 seconds on my laptop. Call as `evppi(...,verbose=TRUE)` to see the progress of the estimation).
```{r,cache=TRUE,message=FALSE}
evppi(chemo_nb, chemo_pars, pars=colnames(chemo_pars), method="bart")
evpi(chemo_nb)
```
The BART estimation is being performed using the `bart()` function from the [dbarts](https://CRAN.R-project.org/package=dbarts) package, and in this case the function's default settings are used.
While the BART method has not been investigated systematically as a way of estimating EVPPI, these are promising results.
#### Tuning the generalized additive model method
The generalized additive model formula can be changed with the `gam_formula` argument. This is
supplied to the `gam` function from the `mgcv` package. The default formula uses a tensor product, and
if there are more than four parameters, then a basis dimension of 4 terms per parameter is assumed.
A challenge of estimating EVPPI using GAMs is to define a GAM that is sufficiently flexible to represent how
the outputs depend on the inputs, but can also be estimated in practice, given the complexity
of the GAM and the number of random samples available to fit it to.
```{r}
evppi(outputs_nb, inputs, pars=c("p1","p2"), method="gam", gam_formula="s(p1) + s(p2)")
```
Note that if there are spaces in the variable names in `inputs` and `pars`, then for `gam_formula`
the spaces should be converted to underscores, or else an `"unexpected symbol"` error will be returned from `gam`.
A standard error for the EVPPI estimates from the GAM method, resulting from uncertainty about the parameters of the GAM approximation, can be obtained by calling `evppi` with `se=TRUE`. This uses $B$ samples from the distribution of the GAM parameters, thus the standard error can be estimated more accurately by increasing B.
```{r}
evppi(outputs_nb, inputs, pars="p1", se=TRUE, B=100)
```
#### Single-parameter methods
These are only applicable for computing the EVPPI for a single scalar parameter. They are
supplied in the package for academic interest, but for single-parameter EVPPI we have
found it to be sufficiently reliable to use the default GAM method, which requires less tuning than
these methods.
The method of [Strong and Oakley](https://doi.org/10.1177/0272989X12465123):
```{r}
evppi(outputs_nb, inputs, pars="p1", n.blocks=20, method="so")
```
The method of [Sadatsafavi et al.](https://doi.org/10.1016/j.jval.2012.10.018):
```{r}
evppi(outputs_nb, inputs, pars="p1", method="sal")
```
### Traditional Monte Carlo nested loop method {#evppimc}
(see e.g. [Brennan et al.](https://doi.org/10.1177/0272989x07302555))
This is generally too slow to provide reliable EVPPI estimates in realistic models, but is provided in this package for technical completeness.
This method is available in the function `evppi_mc`. It requires the user to supply two functions; one to evaluate the decision-analytic model, and one to generate parameter values.
#### Model evaluation function
This function evaluates the decision-analytic model for specific parameter values. This must have one argument for each parameter. The return value can be in either a "net benefit" form or a "costs and effects" form. The "net benefit" form is a vector giving the net benefit for each decision option.
```{r}
model_fn_nb <- function(p1, p2){
c(0, p1 - p2)
}
```
The "costs and effects" form is a matrix with two rows, and one column for each decision option. The rows gives the effects and costs respectively for each decision option. If they have names `"e"` and `"c"` then these are assumed to identify the effects and costs. Otherwise the first row is assumed to contain the effects, and the second the costs.
```{r}
model_fn_cea <- function(p1, p2){
rbind(e = c(0, p1),
c = c(0, p2))
}
```
#### Parameter simulation function
This function generates a random sample of $n$ values from the current (joint) uncertainty distribution of the model parameters. This returns a data frame with $n$ rows and one named column for each parameter.
```{r}
par_fn <- function(n){
data.frame(p1 = rnorm(n, 1, 1),
p2 = rnorm(n, 0, 2))
}
```
#### Invoking `evppi_mc`
These functions are then supplied as arguments to `evppi_mc`, along with the number of samples to draw in the inner and outer loops. 1000 inner samples and 100 outer samples give a reasonable EVPPI estimate in this example, but many more samples may be required for the result to converge to the EVPPI in more complex models.
```{r,eval=FALSE}
evppi_mc(model_fn_nb, par_fn, pars="p1", ninner=1000, nouter=100)
```
#### Accounting for parameter correlation
We may want the EVPPI for a parameter which is correlated with another parameter. To account for this correlation, `par_fn` requires an extra argument or arguments to enable a sample to be drawn from the appropriate conditional distribution. For example, the function below specifies a bivariate
normal distribution for $(p_1,p_2)$ where a correlation is induced by defining $E(p_2|p_1) = p_1$. To draw a sample from the conditional distribution of $p_2$ given $p_1=2$, for example, call `par_fn_corr(1, p1=2)$p2`.
If the argument `p1` is not supplied, then the function should return a sample from the joint distribution marginalised over $p_1$, as in this case where if we do not supply `p1` then a random `p1` is drawn followed by `p2|p1`.
A function of this form should then be passed to `evppi_mc` if the parameters are correlated. This allows `evppi_mc` to draw from the appropriate distribution in the inner loop.
```{r,eval=FALSE}
par_fn_corr <- function(n, p1=NULL){
p1_new <- if (is.null(p1)) rnorm(n, 1, 1) else p1
data.frame(p1 = p1_new,
p2 = rnorm(n, p1_new, 2))
}
evppi_mc(model_fn_nb, par_fn_corr, pars="p1", ninner=1000, nouter=100)
```
## Expected value of sample information {#evsi}
The _expected value of sample information_ is the expected value of collecting a specific amount of data from a study
designed to give information about some model parameter or parameters. It is defined as the expected net benefit given the study data, minus the expected net benefit with current information.
The function `evsi` can be used to calculate this. The default method is based on nonparametric regression (from [Strong et al.](https://dx.doi.org/10.1177%2F0272989X15575286)). This requires the user to either
(a) supply an R function to generate and summarise the study data, or
(b) use one of the built-in study designs, and specify which of the model parameters are informed by this study.
To illustrate how to use `evsi`, suppose we want to collect a sample of $n$ normally-distributed observations in order to get a better estimate of the treatment 2 effectiveness $p_1$. Under current information, $p_1$ is distributed as $N(1,1)$. After collecting the sample, we would expect this distribution to become more precise, hence reduce the chance of making a wrong decision. The EVSI measures the expected improvement in net benefit from this sample.
Denote the study data as $x_1,\ldots,x_n$, and suppose that they are distributed as $x_i \sim N(p_1, \sigma)$. Hence the _mean_ of the sample $\bar{x} = \frac{1}{n}\sum_{i=1}^n x_i$ is a _summary statistic_ containing the information
provided by the data about $p_1$.
The sample mean is distributed as $\bar{x} \sim N(p_1, \sigma / \sqrt{n})$. Suppose for simplicity that the sampling variance $\sigma$ of the data is known to equal 1.
To calculate the EVSI using this method, we generate a sample from the _predictive distribution_ of this summary statistic under current information. This is achieved by generating a value of $p_1$ from its current $N(1,1)$ distribution, followed by a value of $\bar{x}$ from $N(p_1, \sigma / \sqrt{n})$.
### Function to generate study data
The function should generate a sample from the predictive distribution of the summary statistic, given a sample `inputs` from the current uncertainty distribution of the parameters.
`inputs` has the same format as described above, a data frame with one row per sample and one column per parameter.
The function must return a data frame with one row per sample, and one column per parameter that is informed by the study data. Each data frame cell contains a summary statistic for that parameter from a simulated study.
The function `datagen_normal` below does this in a vectorised way for the example. Each row of the returned data frame is based on a different simulated $p_1$ taken from the first column of `inputs`, and contains a summary statistic $\bar{x}$ obtained from a dataset generated conditionally on that value of $p_1$.
The sample size is included as an argument `n` to the data generation function. The names of the returned data frame can be anything (`xbar` was used in this case to be descriptive).
The `evsi` function can then be used to compute the EVSI for a series of different sample sizes from this design. Note how the EVSI converges to the EVPPI as the sample size increases.
```{r}
datagen_normal <- function(inputs, n=100, sd=1){
data.frame(xbar = rnorm(nrow(inputs),
mean = inputs[,"p1"],
sd = sd / sqrt(n)))
}
set.seed(1)
evsi(outputs_nb, inputs, datagen_fn = datagen_normal, n=c(10,100,1000))
```
### Built-in study designs {#evsibuiltin}
The function `datagen_normal` is also included in the `voi` package as a built-in study design. To invoke the `evsi` function for a built-in study design, we have to supply the name of the design (in this case `"normal_known"`) and the name of the parameter or parameters (corresponding to a column of "inputs") which is estimated by the study data.
```{r}
evsi(outputs_nb, inputs, study = "normal_known", n=c(100,1000), pars = "p1")
evsi(outputs_cea, inputs, study = "normal_known", n=c(100,1000), pars = "p1")
```
The known standard deviation defaults to 1, but can be changed, e.g. to 2, by calling `evsi` with an `aux_pars` argument, e.g. `evsi(..., aux_pars=list(sd=2), ...)`.
Note that the results will be slightly different every time the `evsi` function is invoked with the same arguments, due to Monte Carlo error from generating the data (unless the seed is set with the R function `set_seed` before each invocation).
Other built-in study designs include
`"binary"`: A single sample of observations of a binary outcome. Requires one parameter to be specified in `pars`, that is, the probability of the outcome.
`"trial_binary"`: A two-arm trial with a binary outcome. Requires two parameters to be specified in `pars`: the probability of the outcome in arm 1 and 2 respectively. The sample size is the same in each arm, specifed in the `n` argument to `evsi`, and the binomial outcomes are returned in the first and second column respectively.
### Importance sampling method
An alternative method comes from [Menzies (2015)](https://doi.org/10.1177/0272989X15583495) and is based on importance sampling. This can be invoked as `evsi(..., method="is")`.
As well as a data generation function in the above format, this also requires the user to supply a _likelihood function_ for the study data.
This is illustrated here for the simple normal example. The likelihood function acts on one row of the data frame $Y$ which is produced by the data generation function, and returns a data frame with number of rows matching the rows of `inputs`. Each row of the returned data frame gives the sampling density for that row of $Y$ given the corresponding parameter values in `inputs`. The corresponding EVSI calculation then involves building a large matrix of likelihoods for combinations of simulated datasets and simulated parameters.
Any user-supplied likelihood function should consistently define the same model for the data as the data generation function (the package does not check this!), and any names of parameters and outputs should match the names defined in `inputs` and the data generation function.
This method is typically slower than the default nonparametric regression method, so it may be worth setting `nsim` to a value lower than the number of samples in `inputs`. Below `nsim=1000` is used so that only 1000 samples are used, instead of the full 10000 samples contained in `inputs`.
```{r}
likelihood_normal <- function(Y, inputs, n=100, sig=1){
mu <- inputs[,"p1"]
dnorm(Y[,"xbar"], mu, sig/sqrt(n))
}
evsi(outputs_nb, inputs, datagen_fn = datagen_normal, likelihood = likelihood_normal,
n=100, pars = "p1", method="is", nsim=1000)
```
Again, this study model is available as a built-in study design, so instead of writing a user-defined likelihood and data generation function, `evsi` can also be invoked with `study="normal_known", method="is"`.
```{r}
evsi(outputs_nb, inputs, study = "normal_known", n=100, pars = "p1", method="is", nsim=1000)
```
### Moment matching method
The momemt matching method from [Heath et al.](https://doi.org/10.1177/0272989X17738515) is available as `evsi(..., method="mm")`. This includes the extension of this method to efficiently estimate the EVSI for the same design but with many different sample sizes (from [Heath et al](https://doi.org/10.1177/0272989X19837983)).
Roughly, this method works as follows (see [Heath et al.](https://doi.org/10.1177/0272989X17738515) for full details)
* a small set of values $\theta_q: q = 1,\ldots, Q$ are simulated for the decision model parameters $\theta$ (typically $Q<50$ is sufficient).
* for each $\theta_q$, future study data $x_q|\theta_q$ are generated from the sampling distribution (as specified through `study` for built-in study designs, or `datagen_fn` for custom designs).
* a sample is generated from the posterior distribution of $\theta | x_q$.
* the posterior variance $v_q$ of the decision model net benefit $NB(\theta)$ is deduced from this sample.
* the average posterior variance over all samples $q$ is compared to the prior variance, obtaining an estimate of the proportion of uncertainty explained by the proposed study. This is used as a "shrinkage" factor to modify the regression-based EVPPI computation to produce an estimate of the EVSI.
To use the moment matching method, `evsi` needs to know some information that is not needed by the other EVSI calculation functions. This includes:
* a function `model_fn` to evaluate the decision model, and a function `par_fn` to sample from the distribution of the model parameters under current information. These are supplied in the same form as in the [EVPPI Monte Carlo method function](#evppimc), `evppi_mc`.
* information about how data $x_q$ are analysed to produce the posterior.
For the built-in study designs, nothing extra is needed from the user here, as this information is built in.
For custom study designs specified using `datagen_fn`, an extra function should be defined, and supplied as the `analysis_fn` argument to `evsi(..., method="mm")`. This function should take three arguments:
* `data` a data frame with names matching the output of `datagen_fn`,
* `args` a list of constants for the analysis that the user might want to vary, e.g. prior parameters or options for posterior computation. The user supplies these in `analysis_args`.
* `pars` names of the parameters whose posterior is being sampled.
and return a data frame with each row containing a draw from the posterior distribution of the parameters named in the columns. If specialised Bayesian analysis software such as JAGS or Stan is needed, then this function should wrap around a complete call to this software. An example is given below.
* `analysis_args`: a list of constants that the data analysis function needs to know. This is needed whether or not a built-in design is used. This would typically include parameters that define the prior distributions, and settings to control posterior computation (e.g. number of MCMC iterations). The specific components that are needed in this list depends on the study design, as specified by `analysis_fn` or `study`.
#### Moment matching method: example using a built-in study design
This is the first EVSI example computation [shown above](#evsibuiltin), implemented using the moment matching method with a study sample size of 1000.
Recall the sampling distribution for the study data is a normal with known variance, specified through `study="normal_known"`. The Bayesian inference procedure here is a simple conjugate normal analysis, and is built in to the package, so we do not need to supply `analysis_fn`.
However we do need to supply `analysis_args`. The constants required by the conjugate normal analysis are the prior mean and SD of the parameter `p1`, and the sampling SD of an individual-level observation in the study, here $\sigma=1$.
```{r,message=FALSE}
evsi(outputs_nb, inputs, study = "normal_known", n=10000, pars = "p1", method="mm", Q=30,
model_fn = model_fn_nb, par_fn = par_fn,
analysis_args = list(prior_mean=1, prior_sd=1, sampling_sd=1, niter=1000))
```
The estimate of the EVSI is fairly close to the result from the regression method. The moment matching method also computes the regression-based estimate of the EVPPI, so this is returned alongside the EVSI estimate.
A lot of randomness is involved in the computation for this method, thus there is some Monte Carlo error and it is not reproducible unless the seed is set. This error can be controlled somewhat by changing $Q$ (default 30) in the call to `evsi`, the number of posterior samples (default 1000) through the `niter` component of `analysis_args`, and the size of `outputs` and `inputs` (as in most VoI calculation methods).
#### Moment matching method: example using a custom study design
A more complex decision model is included with the `voi` package, see `help(chemo_model)`. In this model, the key uncertain parameters describe the probabilities of an adverse outcome (risk of side-effects), $p_1$ and $p_2$, under the standard of care and some novel treatment respectively. We want to design a study that informs only the _relative_ effect of the new treatment on the risk of that outcome. The _baseline_ risk $p_1$ would be informed by some other source. This might be because we believe that the decision population has the same relative risk as the population in the designed study, but the baseline risk may be different.
To calculate the EVSI for the proposed study, we define two R functions:
(a) a function to simulate study data
(b) a function defining a Bayesian model to analyse the potential study data.
**Analysis function**. First we show the function to analyse the data. We parameterise the relative treatment effect as a log odds ratio $log(p_1/(1-p_1)) - log(p_2/(1-p_2))$, supposing that there is previous information about the likely size of this quantity that can be expressed as a Normal$(\mu,\sigma^2)$ prior. The baseline risk $p_1$ is given a Beta$(a_1,b_1)$ prior. These priors match the current uncertainty distributions used in `par_fn` and used to produce the parameter sample `inputs` supplied to `evsi`.
Since this model does not have an analytic posterior distribution, we use JAGS (via the `rjags` R package) to implement MCMC sampling. It is not necessary to use JAGS however - the analysis function may call any Bayesian modelling software.
The following R function encapsulates how the posterior is obtained from the data in this analysis.
```{r}
analysis_fn <- function(data, args, pars){
dat <- list(y=c(data[,"y1"], data[,"y2"]))
design <- list(n = rep(args$n, 2))
priors <- list(a1=53, b1=60, mu=log(0.54), sigma=0.3)
jagsdat <- c(dat, design, priors)
or_jagsmod <- "
model {
y[1] ~ dbinom(p[1], n[1])
y[2] ~ dbinom(p[2], n[2])
p[1] <- p1
p[2] <- odds[2] / (1 + odds[2])
p1 ~ dbeta(a1, b1)
odds[1] <- p[1] / (1 - p[1])
odds[2] <- odds[1] * exp(logor)
logor ~ dnorm(mu, 1/sigma^2)
}
"
or.jag <- rjags::jags.model(textConnection(or_jagsmod),
data=jagsdat, inits=list(logor=0, p1=0.5), quiet = TRUE)
update(or.jag, 100, progress.bar="none")
sam <- rjags::coda.samples(or.jag, c("logor"), 500, progress.bar="none")
data.frame(logor_side_effects = as.numeric(sam[[1]][,"logor"]))
}
```
A full understanding of JAGS model specification and sampling syntax is beyond the scope of this vignette - we just explain what the `voi` package is expecting. The inputs and outputs of `analysis_fn` take the following form.
* The return value is a data frame with a sample from the posterior of the parameters "learnt" from the study to inform the decision model. There should be one column per parameter learnt. Here we will only be using the information gained about `logor_side_effects`, so there is only column. The names of the data frame (here `logor_side_effects`) should match the names of arguments to the decision model function supplied as the `model_fn` argument to `evsi` (here this function is `chemo_model_lor_nb`).
* The `data` input argument is a data frame with the outcome data from the study to be analysed. In this example, the study is a trial with two arms, and the study results are `"y1"` and `"y2"`, the number of people experiencing the outcome in each arm. One row of data is sufficient here, but in other cases (e.g. with individual-level data) we might need multiple rows to define the study data.
The names (`"y1"` and `"y2"` here) should match the names of the data frame returned by the function to simulate the study data (`datagen_fn` below).
* The `args` input argument is a list of constants that are needed in the analysis. The sample size of the proposed study, supplied as the `n` argument to `evsi()` is automatically added to this list.
In this example, `n` is the the size of each trial arm, and is available to the analysis code as `args$n`. The sample size could also have been hard-coded into the analysis code, but supplying it through `args` is advisable as it allows the EVSI calculation to be defined transparently in the call to `evsi()`, and allows multiple calculations to be easily done for different sample sizes by supplying a vector as the `n` argument to `evsi()`.
Any other constants can be supplied through the `analysis_args` argument to `evsi()`. This might be used for constants that define prior distributions. In this example, these are instead hard-coded inside `analysis_fn`.
* The `pars` argument is not used in this example, but this is a more general way of setting the names of the returned data frame. The names supplied in the `pars` argument to the `evsi()` function will be automatically passed to this argument, so they can be used in naming the returned data frame. We might do this if we wanted to write a generic function to fit a particular Bayesian model, that we could use in different decision models or EVSI calculations. Here we simply hard-coded the names as `logor_side_effects` for clarity.
**Data generation function** As in previous examples, this takes an input data frame `inputs` with parameter values sampled from the current uncertainty distribution, and outputs a data frame with a corresponding sample from the predictive distribution of the data. An additional argument `n` defines the sample size of the study. Note in this example how `inputs` is parameterised in terms of a baseline risk and a log odds ratio, and we combine these to obtain the absolute risk in group 2.
```{r}
datagen_fn <- function(inputs, n=100){
p1 <- inputs[,"p_side_effects_t1"]
logor <- inputs[,"logor_side_effects"]
odds1 <- p1 / (1 - p1)
odds2 <- odds1 * exp(logor)
p2 <- odds2 / (1 + odds2)
nsim <- nrow(inputs)
data.frame(y1 = rbinom(nsim, n, p1),
y2 = rbinom(nsim, n, p2))
}
```
Finally `evsi` is called to perform the EVSI calculation using the moment matching method.
```{r,cache=TRUE,message=FALSE,eval=requireNamespace("rjags")}
ev <- evsi(outputs=chemo_nb, inputs=chemo_pars,
method="mm",
pars="logor_side_effects",
pars_datagen = c("p_side_effects_t1", "logor_side_effects"),
datagen_fn = datagen_fn, analysis_fn = analysis_fn,
n = 100, Q = 10,
model_fn = chemo_model_lor_nb, par_fn = chemo_pars_fn)
```
One aspect of the syntax of the `evsi` call is new for this example. `pars_datagen` identifies which parameters (columns of `inputs`) are required to generate data from the proposed study, and `pars` identifies which parameters are _learned_ from the study. These are different in this example. We need to know the baseline risk `"p_side_effects_t1"` to be able to generate study data, but we then choose to _ignore_ the data that the study provides about this parameter when measuring the value of the study.
(In theory, these names need not be supplied to `evsi` if they are hard-coded into the analysis and data-generating functions, but it is safer in general to supply them, as they are required when using built-in study designs, and it allows the analysis and data-generating functions to be written in an abstract manner that can be re-used for different analyses).
## Value of Information in models for estimation
Suppose that the aim of the analysis is to get a precise estimate of a quantity, rather than to make an explicit decision between policies. VoI methodology can still be used to determine which uncertain parameters the estimate is most sensitive to (EVPPI) or the improvements in precision expected from new data (EVSI). The expected value of information is the expected _reduction in variance_ of the quantity of interest given the further information.
We illustrate with an example. Suppose we want to estimate the prevalence of an infection. There are two sources of information. We have survey data which we think is biased, so that the true infection rate is higher than the rate of infection observed in the survey. We then have an expert judgement about the extent of bias in the data, and uncertainty associated with this.
Firstly, suppose the survey observed 100 people and 5 of them were infected. Using a vague Beta(0,0) prior (flat on the logit scale), the posterior distribution of the survey infection rate $p_1$ is a Beta(5,95). We draw a random sample from this.
```{r}
p1 <- rbeta(10000, 5, 95)
```
Secondly, we guess that the true risk of being infected is twice the risk in the survey population, but we are uncertain about this relative risk. We might approximate our belief by placing a normal prior distribution on the log odds ratio $\beta$, with a mean designed to reflect the doubled relative risk, and a variance that is high but concentrates the relative risk on positive values. Hence the true infection probability $p_2$ is a function of the two parameters $p_1$ and $\beta$:
$$p_2 = expit(logit(p_1) + \beta)$$.
We draw random samples from the current belief distributions of $\beta$ and $p_2$, and then graphically
compare the distributions of the infection probability $p_1$ in the survey data (black) and the true infection probability $p_2$ (red).
```{r,fig.width=7,fig.height=5}
beta <- rnorm(10000, 0.8, 0.4)
p2 <- plogis(qlogis(p1) + beta)
plot(density(p1), lwd=2, xlim=c(0,1), main="")
lines(density(p2), col="red", lwd=2)
legend("topright", col=c("black","red"), lwd=c(2,2),
legend=c("Surveyed infection probability", "True infection probability"))
```
The model _output_ quantity is $p_2$, and the model _inputs_ are $p_1$ and $\beta$. We now want to determine the expected value of further information. This is measured in terms of expected _reductions in variance_ of $p_2$. This has a decision-theoretic interpretation, with "loss" being measured by squared error of a parameter estimate compared to the true parameter value. See [Jackson et al. 2019](https://doi.org/10.1080/01621459.2018.1562932).
### EVPI and EVPPI for estimation
The EVPI is trivially $var(p_2)$, the variance under current information, which we can compute from the sample as
```{r}
var(p2)
```
A more interesting quantity is the EVPPI for a parameter. It describes the expected reduction in variance given perfect knowledge of a particular parameter. In this example, we compute the EVPPI for $p_1$ and $\beta$, respectively, defined as
$$EVPPI[p_1] = var(p_2) - E(var(p_2| p_1))$$
$$EVPPI[\beta] = var(p_2) - E(var(p_2 | \beta))$$
These can be computed using nonparametric regression, as described in [Jackson et al. 2019](https://doi.org/10.1080/01621459.2018.1562932). This is implemented in the function `evppivar` in the `voi` package.
```{r}
inputs <- data.frame(p1, beta)
(evppi_beta <- evppivar(p2, inputs, par="beta"))
(evppi_p1 <- evppivar(p2, inputs, par="p1"))
```
Hence a slightly greater improvement in variance is expected from knowing the true risk in the biased sample, compared to knowing the relative odds of infection.
These EVPPI values are easier to interpret if they are converted to the scale of a _standard deviation_. If we subtract the EVPPI from the original variance of $p_2$ we get, for example $E(var(p_2) | p_1)$. The square root of this is an estimate of what the standard deviation of $p_2$ would be if we learnt $p_1$ (note that is not exactly the _expected_ SD remaining, since we cannot swap the order of the square root and expectation operators).
```{r}
sqrt(var(p2)) # or sd(p2)
sqrt(var(p2) - evppi_beta$evppi)
sqrt(var(p2) - evppi_p1$evppi)
```
Hence we would expect to reduce the SD of $p_2$ to around 2/3 of its original value by learning $p_1$ or $\beta$.
### How regression-based EVPPI estimation works
EVPPI is estimated here by _nonparametric regression_ of the output on the input. Recall that the method of [Strong et al. (2013)](https://doi.org/10.1177/0272989X13505910) was used in `evppi` for models with decisions. [Jackson et al. 2019](https://doi.org/10.1080/01621459.2018.1562932) showed how this method applied in a wider class of problems, including models for estimation.
To estimate the expected reduction in variance in $p_2$, given knowledge of $p_1$, the `evppivar` function fits a regression model with `p2` as the outcome, and `p1` as the single predictor. A generalized additive model based on splines is fitted using the `gam` function from the `mgcv` package.
```{r,fig.width=7,fig.height=5}
plot(x=p1, y=p2, pch=".")
mod <- mgcv::gam(p2 ~ te(p1, bs="cr"))
p1fit <- fitted(mod)
lines(sort(p1), p1fit[order(p1)], col="blue")
```
Taking the variance of the _residuals_ from this regression (observed minus fitted values) produces an estimate of $E(var(p_2 | p_1 = x))$, intuitively, the expected variance given knowledge of $p_1$, which, when subtracted from the variance of $p_2$, gives the EVPPI.
```{r}
p1res <- p2 - p1fit
var(p2) - var(p1res)
```
This agrees (up to Monte Carlo error) with the value produced by `evppivar`, which is obtained by a closely-related method, as the variance of the fitted values from this regression. This is equal to the total variance minus the variance of the residuals, through the "law of total variance":
$$var(Y) - E_{X}\left[var_{Y| X}(Y |X)\right] = var_{X} \left[E_{Y|X}(Y|X)\right]$$
```{r}
var(p1fit)
```
### EVSI for estimation
Now suppose we planned to collect additional survey data on the prevalence of infection.
First suppose that we can collect more data on the biased population that was used to estimate $p_1$. The EVSI can be computed to show the expected value of surveying $n$ individuals, for different sample sizes $n$.
This is achieved using the function `evsivar` in the `voi` package, as follows.
- As in `evppivar`, samples from the prior distributions of the parameters are supplied in the data frame `inputs`.
- `study="binary"` indicates that the proposed study consists of a binary outcome observed from `n` individuals.
- `pars="p1"` indicates which input parameter is informed by the study, in other words, which parameter is assumed to generate the study data.
```{r}
evsivar(p2, inputs, study = "binary", pars="p1", n=c(100,1000,10000))
```
As the proposed sample sizes increase, the expected value of sample information informing $p_1$ converges to the EVPPI, the expected value of perfect information about $p_1$.
Alternatively, suppose we were able to collect information about the _unbiased_ population, whose infection prevalence is $p_2$. That is, suppose we surveyed $n$ individuals, each infected with probability $p_2$. In this case, we can compute the EVSI by using `evsivar` with the model `inputs` defined to equal the model outputs, as follows:
```{r}
inputs_p2 = data.frame(p2 = p2)
evsivar(p2, inputs=inputs_p2, study = "binary", pars="p2", n=c(100, 1000, 10000))
```
The unbiased data is clearly more valuable than the biased data, with double the variance reductions from biased data of the same sample size. As the sample size increases, the value converges to the EVPI, hence (in the asymptote) we will eliminate all uncertainty about our quantity of interest $p_2$.
## Expected net benefit of sampling {#enbs}
The `voi` package includes a function `enbs()` to calculate the expected net benefit of sampling for a simple proposed study, given estimates of EVSI, and some other information including study costs and the size of the decision population.
This is described in a [separate vignette](plots.html), which also demonstrates how to plot the results of VoI analyses.
|
/scratch/gouwar.j/cran-all/cranData/voi/inst/doc/voi.Rmd
|
---
title: 'Plots of Value of Information measures'
author: "Christopher Jackson"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
vignette: >
\usepackage[utf8]{inputenc}
%\VignetteIndexEntry{Plots of Value of Information measures}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
The `voi` package functions `evppi()`, `evpi()` and `evsi()` all return data frames in a "tidy" format with one row per VoI estimate. This allows plots to be produced and customised with little further effort using `ggplot2`.
This vignette demonstrates how to illustrate VoI analyses graphically, using an example EVSI results dataset `chemo_evsi_or` supplied with the `voi` package.
We also explain the use of the `enbs()` function to calculate and optimise the expected net benefit of sampling for a simple study design.
Knowledge of [ggplot2](https://ggplot2.tidyverse.org/) is required to be able to adapt these plots for your own analyses and communication needs, but plenty of learning resources are linked from the [ggplot2 home page](https://ggplot2.tidyverse.org/#learning-ggplot2).
Some functions to create similar plots using base R graphics are supplied with the [VoI Book](https://chjackson.github.io/voi/articles/book.html).
```{r,message=FALSE,warning=FALSE}
library(voi)
library(ggplot2)
library(dplyr)
```
# Example EVSI dataset
The example dataset `chemo_evsi_or` included with the package gives the results of an EVSI analysis for the chemotherapy example model.
This object is a data frame with `r nrow(chemo_evsi_or)` rows and three columns, giving the sample size per arm (`n`, from 50 to 1500), willingness-to-pay (`k`, from 10000 to 50000 pounds) and the corresponding EVSI (`evsi`).
```{r}
head(chemo_evsi_or)
```
> **Sidenote:** The EVSI in this example is the expected value of a two-arm trial with a binary outcome of whether a patient experiences side-effects. The trial is only used to inform the log odds ratio of side effects between treatments, and information about the baseline risk of side effects is ignored. Source code for generating this dataset using the moment matching method in `evsi()` is provided [here](https://github.com/chjackson/voi/blob/master/data-raw/chemo_evsi_or.R).
For reference, we also calculate the EVPI for the corresponding willingness-to-pay values, using output from probabilistic analysis of the decision model, as stored in the object `chemo_cea_501` provided with the `voi` package.
```{r}
evpi_df <- evpi(outputs = chemo_cea_501)
```
We also extract the values of the EVPPI (for the log odds ratio) into a data frame of the same format. These EVPPI values are already stored in `chemo_evsi_or`, because they had been
calculated as a by-product of the moment matching method to calculate EVSI.
```{r}
evppi_df <- attr(chemo_evsi_or, "evppi")
```
# EVSI curves
The following `ggplot` code produces curves of EVSI against willingness to pay, with different sample sizes shown by curves of different colours. The sample size is indicated by a colour gradient. The corresponding curves of EVPI, and EVPPI for the "focal" parameter of interest (the log odds ratio of side effects), are also shown for reference.
```{r}
ggplot(chemo_evsi_or, aes(x=k, y=evsi, group=n, color=n)) +
geom_line() +
scale_colour_gradient(low="skyblue", high="darkblue") +
xlab("Willingness to pay (£)") + ylab("EVSI per person") +
geom_line(data=evpi_df, aes(x=k, y=evpi),
color="black", lwd=1.5, inherit.aes = FALSE) +
geom_line(data=evppi_df, aes(x=k, y=evppi),
color="darkblue", lwd=1.5, inherit.aes = FALSE) +
labs(color="Sample size") +
xlim(0,52000) +
annotate("text",x=50000,y=125,label="EVPI",hjust=0) +
annotate("text",x=50000,y=107,label="EVPPI",color="darkblue",hjust=0) +
annotate("text",x=50000,y=50,label="EVSI",color="darkblue",hjust=0)
```
Or we can adapt the plot to only show a limited number of sample sizes, shown in a discrete legend, and customise the colour range.
```{r}
n_use <- c(250, 500, 1000)
chemo_evsi_or %>%
filter(n %in% n_use) %>%
ggplot(aes(x=k, y=evsi, group=n, color=n)) +
geom_line(lwd=1.5) +
scale_colour_binned(breaks=n_use,
low = "gray80", high = "gray20",
guide=guide_legend(title="Sample size",
reverse=TRUE)) +
xlab("Willingness to pay (£)") + ylab("EVSI (per person)") +
geom_line(data=evppi_df, aes(x=k, y=evppi),
color="darkblue", lwd=1.5, lty=3, inherit.aes = FALSE) +
annotate("text", x=50000, y=130, col="darkblue", label="EVPPI", hjust=1)
```
In a similar way, we can produce curves of EVSI against sample size, with different willingness-to-pay (WTP) values shown by curves of different colours.
To reduce visual clutter, only a limited number of willingness-to-pay values are illustrated. This plot shows how the EVSI converges to the (WTP-dependent) EVPPI for the focal parameter (log odds ratio of side effects) as the sample size increases.
```{r}
chemo_evsi_or %>%
filter(k %in% seq(10000,50000, by=1000)) %>%
ggplot(aes(x=n, y=evsi, group=k, color=k)) +
geom_line() +
scale_colour_gradient(low="skyblue", high="darkblue") +
xlab("Sample size") + ylab("EVSI per person") +
labs(color="Willingness-to-pay (£)")
```
# Expected net benefit of sampling
The `voi` package includes a function `enbs()` to obtain the expected net benefit of sampling for a proposed study of individual participants, given estimates of EVSI. See the help page `enbs()` for full documentation for this function.
> The `enbs()` function supposes that the costs of the study include a fixed setup cost (`costs_setup`), and a cost per participant recruited (`costs_pp`). (Note that if `n` describes the sample size per arm in a two-arm study, `costs_pp` is actually the cost of recruiting two people, one in each arm.) To acknowledge uncertainty, a range of costs may be supplied as two numbers.
These are assumed to describe a 95\% credible interval for the cost, with the point estimate given by the midpoint between these two numbers.
> Additional required information includes the size of the population affected by the decision (`pop`), and a discount rate (`disc`, by default 0.035) to be applied over a time horizon `time`. These may be supplied as vectors, in which case ENBS is calculated for all potential combinations of values ([see below](#probce) for an example).
Here is an example of calling `enbs()`. The result is a data frame
giving the sample size (`n`), willingness-to-pay (`k`), and corresponding
ENBS (`enbs`). The standard deviation (`sd`) describes the uncertainty
about the ENBS estimate due to the uncertain costs. `pce` is the probability
that the study is cost-effective (i.e. has a positive net benefit of sampling).
The other components of this data frame are related to the optimal sample size [(as discussed below)](#optss)
```{r}
nbs <- enbs(chemo_evsi_or,
costs_setup = c(5000000, 10000000),
costs_pp = c(28000, 42000),
pop = 46000,
time = 10)
nbs %>%
filter(k==20000) %>%
head()
```
The expected net benefit of sampling for a given willingness-to-pay
can then be illustrated by filtering the `nbs` dataframe to this
willingness-to-pay value, then passing it to a simple `ggplot`.
```{r}
nbs %>%
filter(k==20000) %>%
ggplot(aes(x=n, y=enbs)) +
geom_line() +
xlab("Sample size") + ylab("Expected net benefit of sampling") +
scale_y_continuous(labels = scales::dollar_format(prefix="£"))
```
> Note the `scales::dollar_format` trick for showing large currency values tidily in the axis labels.
The same colouring techniques used above (using a `group` aesthetic, with `scale_colour_gradient`) might also be used to illustrate the
dependence of the ENBS curve on the willingness-to-pay.
Uncertainty around the ENBS can be illustrated by plotting quantiles of
the ENBS alongside the point estimates. These quantiles can be derived
from the point estimate (`enbs`) and the standard deviation (`sd`) stored in the data frame that we called `nbs`, assuming a normal distribution.
```{r}
nbs %>%
filter(k==20000) %>%
mutate(q975 = qnorm(0.975, enbs, sd),
q75 = qnorm(0.75, enbs, sd),
q25 = qnorm(0.25, enbs, sd),
q025 = qnorm(0.025, enbs, sd)) %>%
ggplot(aes(y=enbs, x=n)) +
geom_ribbon(aes(ymin=q025, ymax=q975), fill="gray80") +
geom_ribbon(aes(ymin=q25, ymax=q75), fill="gray50") +
geom_line() +
xlab("Sample size") + ylab("Expected net benefit of sampling") +
scale_y_continuous(labels = scales::dollar_format(prefix="£")) +
annotate("text",x=1100,y=85000000,label="95% credible interval") +
annotate("text",x=1250,y=70000000,label="50% credible interval")
```
# Optimal sample size {#optss}
The `enbs()` function also estimates the optimal sample size for each willingness-to-pay value. That is, the sample size which gives the
maximum expected net benefit of sampling. The optimal sample size can be
estimated using two alternative methods.
1. A simple and fast method: just pick the highest ENBS from among the
sample sizes included in the `evsi` object that was supplied to `enbs()`.
2. A more sophisticated (but slower) method uses nonparametric regression
to smooth and interpolate the ENBS estimates. This is useful if the EVSI
has only been computed for a limited number of sample sizes, or if the
EVSI estimates are noisy (which may happen for the regression-based method).
We might judge that the ENBS is a smooth function of sample size, that we could
estimate by regression on the estimates that have been computed.
## Smooth interpolation
Here is a demonstration of how the regression smoothing method works.
* Suppose we have computed EVSI (hence ENBS) for a limited set of sample sizes, say eight, from 100 to 800 by 100, and for a specific willingness to pay (and population size, horizon and discount rate).
* The function `enbs_opt` is used to create from this a smoothed and interpolated dataset (here called `preds`) that contains 701 ENBS estimates, for every integer from 100 to 800. The maximum ENBS can then be found by searching through this interpolated dataset.
```{r}
nbs_subset <- nbs %>%
filter(k==20000, n %in% seq(100,800,by=100))
eopt <- enbs_opt(nbs_subset, smooth=TRUE, keep_preds=TRUE)
```
The `enbs_opt` function returns the maximum ENBS (`enbsmax`), the optimal
sample size (`nmax`) and an interval estimate (`nlower` to `nupper`) defined
by the lowest and highest sample size that have ENBS within 5% of the optimum.
```{r}
eopt
```
The full interpolated dataset is also returned as the `"preds"` attribute.
```{r}
preds <- attr(eopt, "preds")
head(preds, 2)
```
In this graph, the eight pre-computed ENBS values are shown as points, and the 701 smoothed/interpolated values are shown on a line.
```{r}
ggplot(nbs_subset, aes(x=n, y=enbs)) +
geom_point() +
geom_line(data=preds) +
xlab("Sample size") + ylab("Expected net benefit of sampling") +
scale_y_continuous(labels = scales::dollar_format(prefix="£")) +
geom_vline(data=eopt, aes(xintercept=nlower), col="blue", lty=2) +
geom_vline(data=eopt, aes(xintercept=nmax), col="blue", lty=2) +
geom_vline(data=eopt, aes(xintercept=nupper), col="blue", lty=2) +
geom_hline(data=eopt, aes(yintercept=enbsmax), col="blue", lty=2)
```
The maximum, and the range of sample sizes whose ENBS is
within 5% of this maximum, are shown as blue dotted lines.
> **Technical note:** this uses the default spline regression model `s()` from
the `gam` function in the `mgcv` package. The flexibility of the fitted model can be configured by setting the `smooth_df` argument to either `enbs` or `enbs_opt`. This is passed as the `k` argument to the `s()` function,
and defaults to 6 (or the number of sample sizes minus 1, if this is lower than 6).
`enbs_opt` assumes a single willingness-to pay (and population size, etc).
In practice you do not need to use `enbs_opt`, unless you want to check the goodness-of-fit of the regression that is fitted. Instead you can just call `enbs(..., smooth=TRUE)`, and this will determine the optimal sample size, using smoothing, for each of multiple willingness-to-pay values (see the `nbs_smoothed` object below for an example).
## Note about uncertainty
In this example, there are a wide range of sample sizes within 5% of the optimum. In practice, the cheapest study, the one at the bottom of this range, might still be deemed acceptable to decision-makers. There is still a lot of unacknowledged uncertainty in this analysis, including about the costs, incident population size, decision horizon, and computational uncertainty in the per-person EVSI estimate. The location of the "exact" maximum of the curve of ENBS versus sample size might be considered a minor issue in the context of these other uncertainties.
## Curve of optimal sample size
For convenience, the `enbs()` function also returns, as the `"enbsmax"` attribute, a data frame with one row per willingness-to-pay (`k`), giving the optimal ENBS (`enbsmax`) the optimal sample size (`nmax`) and an interval estimate for the optimal sample size (`nlower` to `nupper`). An extract is shown here.
```{r}
attr(nbs, "enbsmax") %>%
filter(k %in% seq(20000,50000,by=10000))
```
This allows a plot to be drawn of the optimal sample size against willingness
to pay, with an interval estimate defined by this range of "95% optimal"
sample sizes. This is compared here for the two methods of determing the optimum: with and without smoothing.
```{r}
p1 <- attr(nbs, "enbsmax") %>%
ggplot(aes(y=nmax, x=k)) +
geom_ribbon(aes(ymin=nlower, ymax=nupper), fill="gray") +
geom_line() +
ylim(0,800) +
xlab("Willingness to pay (£)") + ylab("Optimal sample size") +
ggtitle("Unsmoothed")
nbs_smoothed <- enbs(chemo_evsi_or,
costs_setup = c(5000000, 10000000),
costs_pp = c(28000, 42000),
pop = 46000, time = 10, smooth=TRUE)
p2 <- attr(nbs_smoothed, "enbsmax") %>%
ggplot(aes(y=nmax, x=k)) +
geom_ribbon(aes(ymin=nlower, ymax=nupper), fill="gray") +
geom_line() +
ylim(0,800) +
xlab("Willingness to pay (£)") + ylab("Optimal sample size") +
ggtitle("Smoothed")
gridExtra::grid.arrange(p1, p2, nrow=1)
```
# Probability of a cost-effective trial {#probce}
Given the uncertainty about the incident population size and decision time
horizon, we might want to show how the results depend on these. This might be done using a heat map.
The next plot selects a specific willingness to pay (£20,000) and plots
the probability that a study with a sample size of 400 is cost effective,
i.e. the probability that the expected net benefit of sampling is positive.
This is compared on a grid defined by a range of population sizes from 0 to 60000, and a range of decision horizon times from 0 to 10.
```{r}
nbs <- enbs(chemo_evsi_or %>% filter(k==20000, n==400),
costs_setup = c(5000000, 10000000),
costs_pp = c(28000, 42000),
pop = seq(0,60000,length.out=50),
time = seq(0,10,length.out=50))
ggplot(nbs, aes(y=pop, x=time, fill=pce)) +
geom_raster() +
labs(fill="Probability") +
scale_fill_gradient(low="darkblue", high="white") +
xlab("Decision horizon (years)") + ylab("Incident population") +
theme_minimal()
```
|
/scratch/gouwar.j/cran-all/cranData/voi/vignettes/plots.Rmd
|
---
title: 'voi for Value of Information calculation: package overview'
author: "Christopher Jackson"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
vignette: >
\usepackage[utf8]{inputenc}
%\VignetteIndexEntry{voi for Value of Information calculation: package overview}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
Value of Information methods are a decision-theoretic framework for estimating the expected value of getting more information of particular kinds.
They are used in mathematical and statistical models, where parameters of the models represent quantities that are uncertain, and uncertainty is described by probability distributions.
The following two papers give a thorough recent review of the theory of the methods, and details of how they are applied and interpreted, from the perspective of healthcare decision-making.
[Value of Information Analysis for Research Decisions—An Introduction: Report 1 of the ISPOR Value of Information Analysis Emerging Good Practices Task Force](https://doi.org/10.1016/j.jval.2020.01.001)
[Value of information analytical methods: report 2 of the ISPOR value of information analysis emerging good practices task force](https://doi.org/10.1016/j.jval.2020.01.004)
This document gives a simple overview of how the `voi` package is used to calculate measures of Value of Information. A simple example model is used, but the same methods work in more complex models.
The example model is a model used for _decision making_, which has been the most common application of VoI, e.g. in health economic evaluations.
A later section describes the use of VoI methods for a model that is used for _estimation_ of uncertain quantities, rather than for explicit decision-making. For more information about the theory behind this, see [Jackson et al. 2019](https://doi.org/10.1080/01621459.2018.1562932) and [Jackson et al. 2021](https://doi.org/10.1515/em-2021-0012).
## Simple example model
Suppose we are making a decision between two treatments. Treatment 1 has no costs or effects. Treatment 2 has a _net benefit_ which describes its average costs and effects for a population. We choose Treatment 2 if its _incremental net benefit_, relative to treatment 1, is positive. The incremental net benefit in this simple case is identical to the net benefit of treatment 2, since the net benefit of treatment 1 is zero.
Suppose that the net benefit is simply defined as the difference between two uncertain _parameters_, $y(p_1,p_2) = p_1 - p_2$, where $p_1$ gives the effects, and $p_2$ gives the costs. Our current uncertainty can be described by normal distributions $p_1 \sim N(1,1)$ and $p_2 \sim N(0,2)$.
To make a decision under parameter uncertainty, one option is preferred to another if the _expectation_ of its net benefit, with respect to the uncertainty, is greater. In this case, we choose treatment 2, because the net benefit is distributed as $N(1, \sqrt{1^2+2^2}) = N(1, \sqrt{5})$ which has an expectation of 1, whereas treatment 1 has a known net benefit of zero.
Most of the functions in the `voi` package work with a _random sample_ of model inputs and outputs, generated from "uncertainty analysis", also known as "probabilistic sensitivity analysis" or "probabilistic analysis". For the example model, these are simple to generate, as follows.
### Specifying model inputs
The inputs should be a data frame with one column per parameter and one row per random sample.
```{r}
set.seed(1)
nsam <- 10000
inputs <- data.frame(p1 = rnorm(nsam, 1, 1),
p2 = rnorm(nsam, 0, 2))
```
### Specifying model outputs
The outputs can be supplied in either of two forms.
**Net benefit** form. A data frame with one column per treatment, and one row per random sample, giving the net benefit of each treatment. In this example, the net benefit of treatment 1 is zero.
```{r}
outputs_nb <- data.frame(t1 = 0,
t2 = inputs$p1 - inputs$p2)
```
**Cost-effectiveness analysis** form. This should be a list that includes the following three named elements (in any order)
* `"c"`: a data frame with one column per treatment and one row per random sample, containing sampled values for the expected costs of the treatment.
* `"e"`: a data frame with one column per treatment and one row per random sample, containing sampled values for the expected effects of the treatment.
* `"k"`: a vector of values giving alternative amounts that a decision-maker is willing to pay for one unit of effectiveness, so that the net (monetary) benefit is $e \times k - c$.
In this simple example, the parameter $p_1$ gives the effects, and $p_2$ the costs of treatment 2, and the
net benefit $y = p_1 - p_2$ defined in `outputs_nb` corresponds to a willingness-to-pay of $k=1$. The cost-effectiveness format allows us to compare VoI between different willingness-to-pay values, e.g. 1, 2 and 3 say here.
```{r}
outputs_cea <- list(
e = data.frame(t1 = 0, t2 = inputs$p1),
c = data.frame(t1 = 0, t2 = inputs$p2),
k = c(1, 2, 3)
)
```
Note that objects returned by the `bcea` function in the [BCEA](https://CRAN.R-project.org/package=BCEA) package satisfy this "cost-effectiveness analysis" format.
## Expected value of perfect information {#evpi}
The _expected value of perfect information_ is the expected net benefit given perfect information minus the expected net benefit given current information.
#### Computation using random sampling
* Given current information, we decided on treatment 2. In the example we know that the expected net benefit under current information is 1, the mean of the distribution of treatment 2's net benefit.
* Random sampling can be used to illustrate how to compute the expected net benefit given perfect information. Each sample of parameter values mimics a situation of decision-making given perfect information, where we know the parameters take these values. For each sample, we compare the corresponding treatment 2 net benefit to the threshold of zero, and prefer treatment 1 if the net benefit is negative, and treatment 2 if the net benefit is positive. The net benefit for each sample is the net benefit of the chosen treatment given the "known" sampled parameter values. The expected net benefit given perfect information is then computed as the average of this sample (`nb_perfect`).
```{r}
decision_current <- 2
nb_current <- 1
decision_perfect <- ifelse(outputs_nb$t2 < 0, 1, 2)
nb_perfect <- ifelse(decision_perfect == 1, 0, outputs_nb$t2)
(evpi1 <- mean(nb_perfect) - nb_current)
```
In practice we would not usually know the exact expectation (under the current uncertainty distribution) of the net benefit for any treatment, so we must compute it as the mean of the random sample. In this case, `colMeans(outputs_nb)` would give a vector of the expected net benefit for each treatment. The maximum of these is the net benefit of the decision we take under current information, which in this case is `r max(colMeans(outputs_nb))`. This would become closer to the exact value of 1, the more random samples are drawn.
An alternative view of EVPI is in terms of _opportunity loss_, which is the net benefit of the better decision we should have made (if we had known the truth), minus the net benefit of the decision we did make. The opportunity loss can be computed at each sample as follows. The EVPI is the mean of the opportunity loss.
```{r}
opp_loss <- nb_perfect - nb_current
mean(opp_loss)
```
#### Using the `voi` package to calculate EVPI
The `voi` package contains a simple function `evpi` to compute the EVPI using the above procedure. The function automatically detects whether your outputs are in net benefit or cost-effectiveness format.
```{r}
library(voi)
evpi(outputs_nb)
evpi(outputs_cea)
```
Note the result is slightly different from `evpi1`, since it uses the sample-based estimate of `r max(colMeans(outputs_nb))` of the expected net benefit under current information, rather than the known expectation of 1.
#### Analytic computation
In this simple example, the EVPI can also be calculated "by hand", because the model just involves normal distributions. The probability that the decision under perfect information agrees with the decision under current information, in this case, is the probability that the true value of a $N(1, \sqrt{5})$ is actually positive.
```{r}
prob_correct <- 1 - pnorm(0, 1, sqrt(5))
```
The mean of `nb_perfect` can then be calculated as the expected net benefit given a correct decision, multiplied by the probability of a correct decision. The former is the mean of the values of `outputs_nb$t2` which are positive, which is the mean of a $N(1,\sqrt{5})$ truncated below at zero. The mean of the truncated normal distribution has a [known analytic form](https://en.wikipedia.org/wiki/Truncated_normal_distribution), represented in the following R function.
```{r}
mean_truncnorm <- function(mu, sig, lower=-Inf, upper=Inf){
a <- (lower-mu)/sig
b <- (upper-mu)/sig
mu + sig * (dnorm(a) - dnorm(b)) / (pnorm(b) - pnorm(a))
}
enb_correct <- mean_truncnorm(1, sqrt(5), lower=0)
mean_nb_perfect <- enb_correct * prob_correct
(evpi_exact <- mean_nb_perfect - nb_current)
```
This is the exact value of the EVPI in this example, which differs slightly from the estimate based on Monte Carlo simulation. Unfortunately most realistic decision-analytic models do not have such a nice form, and we must rely on Monte Carlo methods to calculate the expected value of information.
## Expected value of partial perfect information {#evppi}
The _expected value of partial perfect information_ (EVPPI) for a parameter $\phi$ in a decision-analytic model is the expected value of learning the exact value of that parameter, while the other parameters remain uncertain. $\phi$ can comprise a single scalar parameter, or multiple parameters. If $\phi$ refers to multiple parameters then the EVPPI describes the expected value of learning _all_ of these parameters, often referred to as the _multiparameter_ EVPPI.
The EVPPI is defined as the expected net benefit given perfect knowledge of $\phi$, minus the expected net benefit given current information.
The function `evppi` can be used to compute this.
There are a variety of alternative computational methods implemented in this function. The default methods are based on nonparametric regression, and come from [Strong et al. (2013)](https://doi.org/10.1177/0272989X13505910). If there are four or fewer parameters, then a generalized additive model is used (the default spline model in `gam` from the `mgcv` package). With five or more, then Gaussian process regression is used.
### Invoking the `evppi` function.
To call `evppi`, supply a sample of outputs and inputs (in the same form as defined above) in the first two arguments. The parameter or parameters of interest (whose EVPPI is desired) is supplied in the `"pars"` argument. This can be expressed in various ways.
**(a) As a vector**. The joint EVPPI is computed for all parameters in this vector. If the vector has more than one element, then the function returns the expected value of perfect information on all of these parameters simultaneously (described as the "multiparameter" EVPPI by [Strong et al.](https://doi.org/10.1177/0272989X13505910)).
```{r}
evppi(outputs_nb, inputs, pars="p1")
evppi(outputs_nb, inputs, pars=c("p1","p2"))
```
**(b) As a list**. A separate EVPPI is computed for each element of the list. In the second example below, this is the EVPPI of $p_1$, followed by the multiparameter EVPPI of $(p_1,p_2)$. Note that the multiparameter EVPPI is the theoretically same as the EVPI if, as in this case, the vector includes all of the parameters in the model (though note the difference from EVPI estimates above due to the different computational method).
```{r}
evppi(outputs_nb, inputs, pars=list("p1","p2"))
evppi(outputs_nb, inputs, pars=list("p1",c("p1","p2")))
```
The `evppi` function returns a data frame with columns indicating the parameter (or parameters), and the corresponding EVPPI. If the outputs are in cost-effectiveness analysis format, then a separate column is returned indicating the willingness-to-pay.
```{r}
evppi(outputs_cea, inputs, pars=list("p1",c("p1","p2")))
```
### Changing the default calculation method
The method can be changed by supplying the `method` argument to `evppi`. Some methods have additional options to tune them. For a full list of these options, see `help(evppi)`.
#### Gaussian process regression
(from [Strong et al. (2013)](https://doi.org/10.1177/0272989X13505910)). The number of random samples to use in this computation can be changed using the `nsim` argument, which can be useful for this method as it can be prohibitive for large samples. Here the sample of 10000 is reduced to 1000.
```{r}
evppi(outputs_nb, inputs, pars="p1", method="gp", nsim=1000)
```
#### Multivariate adaptive regression splines {#earth}
This is a variant of generalized additive models based on linear splines, which uses a package called [earth](https://CRAN.R-project.org/package=earth).
```{r}
evppi(outputs_nb, inputs, pars="p1", method="earth")
```
While the merits of this method for EVPPI computation have not been systematically investigated, I have found this to be generally faster than comparable `mgcv` spline models for similar levels of accuracy and around 5 or fewer parameters.
#### INLA method
(from [Heath et al.](https://doi.org/10.1002/sim.6983), [Baio et al.](https://doi.org/10.1007/978-3-319-55718-2) ). This needs the following extra packages to be installed, using the following commands.
```{r,eval=FALSE}
install.packages('INLA', repos='https://inla.r-inla-download.org/R/stable')
install.packages('splancs')
```
It is only applicable to calculating the multiparameter EVPPI for 2 or more parameters.
In this toy example it is overkill, since the two-parameter EVPPI is simply the EVPI, and the default method needs an esoteric tweak (`pfc_struc`) to work.
However it has been found to be more efficient than the Gaussian process method in many other situations. See [Heath et al.](https://doi.org/10.1002/sim.6983), [Baio et al.](https://doi.org/10.1007/978-3-319-55718-2) for more details about implementing and tuning this method.
```{r,eval=FALSE}
evppi(outputs_nb, inputs, pars=c("p1","p2"), method="inla", pfc_struc="iso")
```
#### Bayesian additive regression trees (BART) {#bart}
This is another general nonparametric regression procedure. It is designed for regression with lots of predictors, so it may be particularly efficient for calculating multiparameter EVPPI, as the following demonstration shows.
The `voi` package includes a (fictitious) example health economic model based on a decision tree and Markov model: see the help page `voi::chemo_model`. There are 14 uncertain parameters. Outputs and inputs from probabilistic analysis are stored in the datasets `chemo_nb` (net benefit for willingness-to-pay £20000) and `chemo_pars`. The multiparameter EVPPI for all fourteen of these parameters is by definition equal to the EVPI.
Using the BART estimation method, the EVPPI estimate for all 14 parameters is very close to the estimate of the EVPI, and the computation is quick (about 16 seconds on my laptop. Call as `evppi(...,verbose=TRUE)` to see the progress of the estimation).
```{r,cache=TRUE,message=FALSE}
evppi(chemo_nb, chemo_pars, pars=colnames(chemo_pars), method="bart")
evpi(chemo_nb)
```
The BART estimation is being performed using the `bart()` function from the [dbarts](https://CRAN.R-project.org/package=dbarts) package, and in this case the function's default settings are used.
While the BART method has not been investigated systematically as a way of estimating EVPPI, these are promising results.
#### Tuning the generalized additive model method
The generalized additive model formula can be changed with the `gam_formula` argument. This is
supplied to the `gam` function from the `mgcv` package. The default formula uses a tensor product, and
if there are more than four parameters, then a basis dimension of 4 terms per parameter is assumed.
A challenge of estimating EVPPI using GAMs is to define a GAM that is sufficiently flexible to represent how
the outputs depend on the inputs, but can also be estimated in practice, given the complexity
of the GAM and the number of random samples available to fit it to.
```{r}
evppi(outputs_nb, inputs, pars=c("p1","p2"), method="gam", gam_formula="s(p1) + s(p2)")
```
Note that if there are spaces in the variable names in `inputs` and `pars`, then for `gam_formula`
the spaces should be converted to underscores, or else an `"unexpected symbol"` error will be returned from `gam`.
A standard error for the EVPPI estimates from the GAM method, resulting from uncertainty about the parameters of the GAM approximation, can be obtained by calling `evppi` with `se=TRUE`. This uses $B$ samples from the distribution of the GAM parameters, thus the standard error can be estimated more accurately by increasing B.
```{r}
evppi(outputs_nb, inputs, pars="p1", se=TRUE, B=100)
```
#### Single-parameter methods
These are only applicable for computing the EVPPI for a single scalar parameter. They are
supplied in the package for academic interest, but for single-parameter EVPPI we have
found it to be sufficiently reliable to use the default GAM method, which requires less tuning than
these methods.
The method of [Strong and Oakley](https://doi.org/10.1177/0272989X12465123):
```{r}
evppi(outputs_nb, inputs, pars="p1", n.blocks=20, method="so")
```
The method of [Sadatsafavi et al.](https://doi.org/10.1016/j.jval.2012.10.018):
```{r}
evppi(outputs_nb, inputs, pars="p1", method="sal")
```
### Traditional Monte Carlo nested loop method {#evppimc}
(see e.g. [Brennan et al.](https://doi.org/10.1177/0272989x07302555))
This is generally too slow to provide reliable EVPPI estimates in realistic models, but is provided in this package for technical completeness.
This method is available in the function `evppi_mc`. It requires the user to supply two functions; one to evaluate the decision-analytic model, and one to generate parameter values.
#### Model evaluation function
This function evaluates the decision-analytic model for specific parameter values. This must have one argument for each parameter. The return value can be in either a "net benefit" form or a "costs and effects" form. The "net benefit" form is a vector giving the net benefit for each decision option.
```{r}
model_fn_nb <- function(p1, p2){
c(0, p1 - p2)
}
```
The "costs and effects" form is a matrix with two rows, and one column for each decision option. The rows gives the effects and costs respectively for each decision option. If they have names `"e"` and `"c"` then these are assumed to identify the effects and costs. Otherwise the first row is assumed to contain the effects, and the second the costs.
```{r}
model_fn_cea <- function(p1, p2){
rbind(e = c(0, p1),
c = c(0, p2))
}
```
#### Parameter simulation function
This function generates a random sample of $n$ values from the current (joint) uncertainty distribution of the model parameters. This returns a data frame with $n$ rows and one named column for each parameter.
```{r}
par_fn <- function(n){
data.frame(p1 = rnorm(n, 1, 1),
p2 = rnorm(n, 0, 2))
}
```
#### Invoking `evppi_mc`
These functions are then supplied as arguments to `evppi_mc`, along with the number of samples to draw in the inner and outer loops. 1000 inner samples and 100 outer samples give a reasonable EVPPI estimate in this example, but many more samples may be required for the result to converge to the EVPPI in more complex models.
```{r,eval=FALSE}
evppi_mc(model_fn_nb, par_fn, pars="p1", ninner=1000, nouter=100)
```
#### Accounting for parameter correlation
We may want the EVPPI for a parameter which is correlated with another parameter. To account for this correlation, `par_fn` requires an extra argument or arguments to enable a sample to be drawn from the appropriate conditional distribution. For example, the function below specifies a bivariate
normal distribution for $(p_1,p_2)$ where a correlation is induced by defining $E(p_2|p_1) = p_1$. To draw a sample from the conditional distribution of $p_2$ given $p_1=2$, for example, call `par_fn_corr(1, p1=2)$p2`.
If the argument `p1` is not supplied, then the function should return a sample from the joint distribution marginalised over $p_1$, as in this case where if we do not supply `p1` then a random `p1` is drawn followed by `p2|p1`.
A function of this form should then be passed to `evppi_mc` if the parameters are correlated. This allows `evppi_mc` to draw from the appropriate distribution in the inner loop.
```{r,eval=FALSE}
par_fn_corr <- function(n, p1=NULL){
p1_new <- if (is.null(p1)) rnorm(n, 1, 1) else p1
data.frame(p1 = p1_new,
p2 = rnorm(n, p1_new, 2))
}
evppi_mc(model_fn_nb, par_fn_corr, pars="p1", ninner=1000, nouter=100)
```
## Expected value of sample information {#evsi}
The _expected value of sample information_ is the expected value of collecting a specific amount of data from a study
designed to give information about some model parameter or parameters. It is defined as the expected net benefit given the study data, minus the expected net benefit with current information.
The function `evsi` can be used to calculate this. The default method is based on nonparametric regression (from [Strong et al.](https://dx.doi.org/10.1177%2F0272989X15575286)). This requires the user to either
(a) supply an R function to generate and summarise the study data, or
(b) use one of the built-in study designs, and specify which of the model parameters are informed by this study.
To illustrate how to use `evsi`, suppose we want to collect a sample of $n$ normally-distributed observations in order to get a better estimate of the treatment 2 effectiveness $p_1$. Under current information, $p_1$ is distributed as $N(1,1)$. After collecting the sample, we would expect this distribution to become more precise, hence reduce the chance of making a wrong decision. The EVSI measures the expected improvement in net benefit from this sample.
Denote the study data as $x_1,\ldots,x_n$, and suppose that they are distributed as $x_i \sim N(p_1, \sigma)$. Hence the _mean_ of the sample $\bar{x} = \frac{1}{n}\sum_{i=1}^n x_i$ is a _summary statistic_ containing the information
provided by the data about $p_1$.
The sample mean is distributed as $\bar{x} \sim N(p_1, \sigma / \sqrt{n})$. Suppose for simplicity that the sampling variance $\sigma$ of the data is known to equal 1.
To calculate the EVSI using this method, we generate a sample from the _predictive distribution_ of this summary statistic under current information. This is achieved by generating a value of $p_1$ from its current $N(1,1)$ distribution, followed by a value of $\bar{x}$ from $N(p_1, \sigma / \sqrt{n})$.
### Function to generate study data
The function should generate a sample from the predictive distribution of the summary statistic, given a sample `inputs` from the current uncertainty distribution of the parameters.
`inputs` has the same format as described above, a data frame with one row per sample and one column per parameter.
The function must return a data frame with one row per sample, and one column per parameter that is informed by the study data. Each data frame cell contains a summary statistic for that parameter from a simulated study.
The function `datagen_normal` below does this in a vectorised way for the example. Each row of the returned data frame is based on a different simulated $p_1$ taken from the first column of `inputs`, and contains a summary statistic $\bar{x}$ obtained from a dataset generated conditionally on that value of $p_1$.
The sample size is included as an argument `n` to the data generation function. The names of the returned data frame can be anything (`xbar` was used in this case to be descriptive).
The `evsi` function can then be used to compute the EVSI for a series of different sample sizes from this design. Note how the EVSI converges to the EVPPI as the sample size increases.
```{r}
datagen_normal <- function(inputs, n=100, sd=1){
data.frame(xbar = rnorm(nrow(inputs),
mean = inputs[,"p1"],
sd = sd / sqrt(n)))
}
set.seed(1)
evsi(outputs_nb, inputs, datagen_fn = datagen_normal, n=c(10,100,1000))
```
### Built-in study designs {#evsibuiltin}
The function `datagen_normal` is also included in the `voi` package as a built-in study design. To invoke the `evsi` function for a built-in study design, we have to supply the name of the design (in this case `"normal_known"`) and the name of the parameter or parameters (corresponding to a column of "inputs") which is estimated by the study data.
```{r}
evsi(outputs_nb, inputs, study = "normal_known", n=c(100,1000), pars = "p1")
evsi(outputs_cea, inputs, study = "normal_known", n=c(100,1000), pars = "p1")
```
The known standard deviation defaults to 1, but can be changed, e.g. to 2, by calling `evsi` with an `aux_pars` argument, e.g. `evsi(..., aux_pars=list(sd=2), ...)`.
Note that the results will be slightly different every time the `evsi` function is invoked with the same arguments, due to Monte Carlo error from generating the data (unless the seed is set with the R function `set_seed` before each invocation).
Other built-in study designs include
`"binary"`: A single sample of observations of a binary outcome. Requires one parameter to be specified in `pars`, that is, the probability of the outcome.
`"trial_binary"`: A two-arm trial with a binary outcome. Requires two parameters to be specified in `pars`: the probability of the outcome in arm 1 and 2 respectively. The sample size is the same in each arm, specifed in the `n` argument to `evsi`, and the binomial outcomes are returned in the first and second column respectively.
### Importance sampling method
An alternative method comes from [Menzies (2015)](https://doi.org/10.1177/0272989X15583495) and is based on importance sampling. This can be invoked as `evsi(..., method="is")`.
As well as a data generation function in the above format, this also requires the user to supply a _likelihood function_ for the study data.
This is illustrated here for the simple normal example. The likelihood function acts on one row of the data frame $Y$ which is produced by the data generation function, and returns a data frame with number of rows matching the rows of `inputs`. Each row of the returned data frame gives the sampling density for that row of $Y$ given the corresponding parameter values in `inputs`. The corresponding EVSI calculation then involves building a large matrix of likelihoods for combinations of simulated datasets and simulated parameters.
Any user-supplied likelihood function should consistently define the same model for the data as the data generation function (the package does not check this!), and any names of parameters and outputs should match the names defined in `inputs` and the data generation function.
This method is typically slower than the default nonparametric regression method, so it may be worth setting `nsim` to a value lower than the number of samples in `inputs`. Below `nsim=1000` is used so that only 1000 samples are used, instead of the full 10000 samples contained in `inputs`.
```{r}
likelihood_normal <- function(Y, inputs, n=100, sig=1){
mu <- inputs[,"p1"]
dnorm(Y[,"xbar"], mu, sig/sqrt(n))
}
evsi(outputs_nb, inputs, datagen_fn = datagen_normal, likelihood = likelihood_normal,
n=100, pars = "p1", method="is", nsim=1000)
```
Again, this study model is available as a built-in study design, so instead of writing a user-defined likelihood and data generation function, `evsi` can also be invoked with `study="normal_known", method="is"`.
```{r}
evsi(outputs_nb, inputs, study = "normal_known", n=100, pars = "p1", method="is", nsim=1000)
```
### Moment matching method
The momemt matching method from [Heath et al.](https://doi.org/10.1177/0272989X17738515) is available as `evsi(..., method="mm")`. This includes the extension of this method to efficiently estimate the EVSI for the same design but with many different sample sizes (from [Heath et al](https://doi.org/10.1177/0272989X19837983)).
Roughly, this method works as follows (see [Heath et al.](https://doi.org/10.1177/0272989X17738515) for full details)
* a small set of values $\theta_q: q = 1,\ldots, Q$ are simulated for the decision model parameters $\theta$ (typically $Q<50$ is sufficient).
* for each $\theta_q$, future study data $x_q|\theta_q$ are generated from the sampling distribution (as specified through `study` for built-in study designs, or `datagen_fn` for custom designs).
* a sample is generated from the posterior distribution of $\theta | x_q$.
* the posterior variance $v_q$ of the decision model net benefit $NB(\theta)$ is deduced from this sample.
* the average posterior variance over all samples $q$ is compared to the prior variance, obtaining an estimate of the proportion of uncertainty explained by the proposed study. This is used as a "shrinkage" factor to modify the regression-based EVPPI computation to produce an estimate of the EVSI.
To use the moment matching method, `evsi` needs to know some information that is not needed by the other EVSI calculation functions. This includes:
* a function `model_fn` to evaluate the decision model, and a function `par_fn` to sample from the distribution of the model parameters under current information. These are supplied in the same form as in the [EVPPI Monte Carlo method function](#evppimc), `evppi_mc`.
* information about how data $x_q$ are analysed to produce the posterior.
For the built-in study designs, nothing extra is needed from the user here, as this information is built in.
For custom study designs specified using `datagen_fn`, an extra function should be defined, and supplied as the `analysis_fn` argument to `evsi(..., method="mm")`. This function should take three arguments:
* `data` a data frame with names matching the output of `datagen_fn`,
* `args` a list of constants for the analysis that the user might want to vary, e.g. prior parameters or options for posterior computation. The user supplies these in `analysis_args`.
* `pars` names of the parameters whose posterior is being sampled.
and return a data frame with each row containing a draw from the posterior distribution of the parameters named in the columns. If specialised Bayesian analysis software such as JAGS or Stan is needed, then this function should wrap around a complete call to this software. An example is given below.
* `analysis_args`: a list of constants that the data analysis function needs to know. This is needed whether or not a built-in design is used. This would typically include parameters that define the prior distributions, and settings to control posterior computation (e.g. number of MCMC iterations). The specific components that are needed in this list depends on the study design, as specified by `analysis_fn` or `study`.
#### Moment matching method: example using a built-in study design
This is the first EVSI example computation [shown above](#evsibuiltin), implemented using the moment matching method with a study sample size of 1000.
Recall the sampling distribution for the study data is a normal with known variance, specified through `study="normal_known"`. The Bayesian inference procedure here is a simple conjugate normal analysis, and is built in to the package, so we do not need to supply `analysis_fn`.
However we do need to supply `analysis_args`. The constants required by the conjugate normal analysis are the prior mean and SD of the parameter `p1`, and the sampling SD of an individual-level observation in the study, here $\sigma=1$.
```{r,message=FALSE}
evsi(outputs_nb, inputs, study = "normal_known", n=10000, pars = "p1", method="mm", Q=30,
model_fn = model_fn_nb, par_fn = par_fn,
analysis_args = list(prior_mean=1, prior_sd=1, sampling_sd=1, niter=1000))
```
The estimate of the EVSI is fairly close to the result from the regression method. The moment matching method also computes the regression-based estimate of the EVPPI, so this is returned alongside the EVSI estimate.
A lot of randomness is involved in the computation for this method, thus there is some Monte Carlo error and it is not reproducible unless the seed is set. This error can be controlled somewhat by changing $Q$ (default 30) in the call to `evsi`, the number of posterior samples (default 1000) through the `niter` component of `analysis_args`, and the size of `outputs` and `inputs` (as in most VoI calculation methods).
#### Moment matching method: example using a custom study design
A more complex decision model is included with the `voi` package, see `help(chemo_model)`. In this model, the key uncertain parameters describe the probabilities of an adverse outcome (risk of side-effects), $p_1$ and $p_2$, under the standard of care and some novel treatment respectively. We want to design a study that informs only the _relative_ effect of the new treatment on the risk of that outcome. The _baseline_ risk $p_1$ would be informed by some other source. This might be because we believe that the decision population has the same relative risk as the population in the designed study, but the baseline risk may be different.
To calculate the EVSI for the proposed study, we define two R functions:
(a) a function to simulate study data
(b) a function defining a Bayesian model to analyse the potential study data.
**Analysis function**. First we show the function to analyse the data. We parameterise the relative treatment effect as a log odds ratio $log(p_1/(1-p_1)) - log(p_2/(1-p_2))$, supposing that there is previous information about the likely size of this quantity that can be expressed as a Normal$(\mu,\sigma^2)$ prior. The baseline risk $p_1$ is given a Beta$(a_1,b_1)$ prior. These priors match the current uncertainty distributions used in `par_fn` and used to produce the parameter sample `inputs` supplied to `evsi`.
Since this model does not have an analytic posterior distribution, we use JAGS (via the `rjags` R package) to implement MCMC sampling. It is not necessary to use JAGS however - the analysis function may call any Bayesian modelling software.
The following R function encapsulates how the posterior is obtained from the data in this analysis.
```{r}
analysis_fn <- function(data, args, pars){
dat <- list(y=c(data[,"y1"], data[,"y2"]))
design <- list(n = rep(args$n, 2))
priors <- list(a1=53, b1=60, mu=log(0.54), sigma=0.3)
jagsdat <- c(dat, design, priors)
or_jagsmod <- "
model {
y[1] ~ dbinom(p[1], n[1])
y[2] ~ dbinom(p[2], n[2])
p[1] <- p1
p[2] <- odds[2] / (1 + odds[2])
p1 ~ dbeta(a1, b1)
odds[1] <- p[1] / (1 - p[1])
odds[2] <- odds[1] * exp(logor)
logor ~ dnorm(mu, 1/sigma^2)
}
"
or.jag <- rjags::jags.model(textConnection(or_jagsmod),
data=jagsdat, inits=list(logor=0, p1=0.5), quiet = TRUE)
update(or.jag, 100, progress.bar="none")
sam <- rjags::coda.samples(or.jag, c("logor"), 500, progress.bar="none")
data.frame(logor_side_effects = as.numeric(sam[[1]][,"logor"]))
}
```
A full understanding of JAGS model specification and sampling syntax is beyond the scope of this vignette - we just explain what the `voi` package is expecting. The inputs and outputs of `analysis_fn` take the following form.
* The return value is a data frame with a sample from the posterior of the parameters "learnt" from the study to inform the decision model. There should be one column per parameter learnt. Here we will only be using the information gained about `logor_side_effects`, so there is only column. The names of the data frame (here `logor_side_effects`) should match the names of arguments to the decision model function supplied as the `model_fn` argument to `evsi` (here this function is `chemo_model_lor_nb`).
* The `data` input argument is a data frame with the outcome data from the study to be analysed. In this example, the study is a trial with two arms, and the study results are `"y1"` and `"y2"`, the number of people experiencing the outcome in each arm. One row of data is sufficient here, but in other cases (e.g. with individual-level data) we might need multiple rows to define the study data.
The names (`"y1"` and `"y2"` here) should match the names of the data frame returned by the function to simulate the study data (`datagen_fn` below).
* The `args` input argument is a list of constants that are needed in the analysis. The sample size of the proposed study, supplied as the `n` argument to `evsi()` is automatically added to this list.
In this example, `n` is the the size of each trial arm, and is available to the analysis code as `args$n`. The sample size could also have been hard-coded into the analysis code, but supplying it through `args` is advisable as it allows the EVSI calculation to be defined transparently in the call to `evsi()`, and allows multiple calculations to be easily done for different sample sizes by supplying a vector as the `n` argument to `evsi()`.
Any other constants can be supplied through the `analysis_args` argument to `evsi()`. This might be used for constants that define prior distributions. In this example, these are instead hard-coded inside `analysis_fn`.
* The `pars` argument is not used in this example, but this is a more general way of setting the names of the returned data frame. The names supplied in the `pars` argument to the `evsi()` function will be automatically passed to this argument, so they can be used in naming the returned data frame. We might do this if we wanted to write a generic function to fit a particular Bayesian model, that we could use in different decision models or EVSI calculations. Here we simply hard-coded the names as `logor_side_effects` for clarity.
**Data generation function** As in previous examples, this takes an input data frame `inputs` with parameter values sampled from the current uncertainty distribution, and outputs a data frame with a corresponding sample from the predictive distribution of the data. An additional argument `n` defines the sample size of the study. Note in this example how `inputs` is parameterised in terms of a baseline risk and a log odds ratio, and we combine these to obtain the absolute risk in group 2.
```{r}
datagen_fn <- function(inputs, n=100){
p1 <- inputs[,"p_side_effects_t1"]
logor <- inputs[,"logor_side_effects"]
odds1 <- p1 / (1 - p1)
odds2 <- odds1 * exp(logor)
p2 <- odds2 / (1 + odds2)
nsim <- nrow(inputs)
data.frame(y1 = rbinom(nsim, n, p1),
y2 = rbinom(nsim, n, p2))
}
```
Finally `evsi` is called to perform the EVSI calculation using the moment matching method.
```{r,cache=TRUE,message=FALSE,eval=requireNamespace("rjags")}
ev <- evsi(outputs=chemo_nb, inputs=chemo_pars,
method="mm",
pars="logor_side_effects",
pars_datagen = c("p_side_effects_t1", "logor_side_effects"),
datagen_fn = datagen_fn, analysis_fn = analysis_fn,
n = 100, Q = 10,
model_fn = chemo_model_lor_nb, par_fn = chemo_pars_fn)
```
One aspect of the syntax of the `evsi` call is new for this example. `pars_datagen` identifies which parameters (columns of `inputs`) are required to generate data from the proposed study, and `pars` identifies which parameters are _learned_ from the study. These are different in this example. We need to know the baseline risk `"p_side_effects_t1"` to be able to generate study data, but we then choose to _ignore_ the data that the study provides about this parameter when measuring the value of the study.
(In theory, these names need not be supplied to `evsi` if they are hard-coded into the analysis and data-generating functions, but it is safer in general to supply them, as they are required when using built-in study designs, and it allows the analysis and data-generating functions to be written in an abstract manner that can be re-used for different analyses).
## Value of Information in models for estimation
Suppose that the aim of the analysis is to get a precise estimate of a quantity, rather than to make an explicit decision between policies. VoI methodology can still be used to determine which uncertain parameters the estimate is most sensitive to (EVPPI) or the improvements in precision expected from new data (EVSI). The expected value of information is the expected _reduction in variance_ of the quantity of interest given the further information.
We illustrate with an example. Suppose we want to estimate the prevalence of an infection. There are two sources of information. We have survey data which we think is biased, so that the true infection rate is higher than the rate of infection observed in the survey. We then have an expert judgement about the extent of bias in the data, and uncertainty associated with this.
Firstly, suppose the survey observed 100 people and 5 of them were infected. Using a vague Beta(0,0) prior (flat on the logit scale), the posterior distribution of the survey infection rate $p_1$ is a Beta(5,95). We draw a random sample from this.
```{r}
p1 <- rbeta(10000, 5, 95)
```
Secondly, we guess that the true risk of being infected is twice the risk in the survey population, but we are uncertain about this relative risk. We might approximate our belief by placing a normal prior distribution on the log odds ratio $\beta$, with a mean designed to reflect the doubled relative risk, and a variance that is high but concentrates the relative risk on positive values. Hence the true infection probability $p_2$ is a function of the two parameters $p_1$ and $\beta$:
$$p_2 = expit(logit(p_1) + \beta)$$.
We draw random samples from the current belief distributions of $\beta$ and $p_2$, and then graphically
compare the distributions of the infection probability $p_1$ in the survey data (black) and the true infection probability $p_2$ (red).
```{r,fig.width=7,fig.height=5}
beta <- rnorm(10000, 0.8, 0.4)
p2 <- plogis(qlogis(p1) + beta)
plot(density(p1), lwd=2, xlim=c(0,1), main="")
lines(density(p2), col="red", lwd=2)
legend("topright", col=c("black","red"), lwd=c(2,2),
legend=c("Surveyed infection probability", "True infection probability"))
```
The model _output_ quantity is $p_2$, and the model _inputs_ are $p_1$ and $\beta$. We now want to determine the expected value of further information. This is measured in terms of expected _reductions in variance_ of $p_2$. This has a decision-theoretic interpretation, with "loss" being measured by squared error of a parameter estimate compared to the true parameter value. See [Jackson et al. 2019](https://doi.org/10.1080/01621459.2018.1562932).
### EVPI and EVPPI for estimation
The EVPI is trivially $var(p_2)$, the variance under current information, which we can compute from the sample as
```{r}
var(p2)
```
A more interesting quantity is the EVPPI for a parameter. It describes the expected reduction in variance given perfect knowledge of a particular parameter. In this example, we compute the EVPPI for $p_1$ and $\beta$, respectively, defined as
$$EVPPI[p_1] = var(p_2) - E(var(p_2| p_1))$$
$$EVPPI[\beta] = var(p_2) - E(var(p_2 | \beta))$$
These can be computed using nonparametric regression, as described in [Jackson et al. 2019](https://doi.org/10.1080/01621459.2018.1562932). This is implemented in the function `evppivar` in the `voi` package.
```{r}
inputs <- data.frame(p1, beta)
(evppi_beta <- evppivar(p2, inputs, par="beta"))
(evppi_p1 <- evppivar(p2, inputs, par="p1"))
```
Hence a slightly greater improvement in variance is expected from knowing the true risk in the biased sample, compared to knowing the relative odds of infection.
These EVPPI values are easier to interpret if they are converted to the scale of a _standard deviation_. If we subtract the EVPPI from the original variance of $p_2$ we get, for example $E(var(p_2) | p_1)$. The square root of this is an estimate of what the standard deviation of $p_2$ would be if we learnt $p_1$ (note that is not exactly the _expected_ SD remaining, since we cannot swap the order of the square root and expectation operators).
```{r}
sqrt(var(p2)) # or sd(p2)
sqrt(var(p2) - evppi_beta$evppi)
sqrt(var(p2) - evppi_p1$evppi)
```
Hence we would expect to reduce the SD of $p_2$ to around 2/3 of its original value by learning $p_1$ or $\beta$.
### How regression-based EVPPI estimation works
EVPPI is estimated here by _nonparametric regression_ of the output on the input. Recall that the method of [Strong et al. (2013)](https://doi.org/10.1177/0272989X13505910) was used in `evppi` for models with decisions. [Jackson et al. 2019](https://doi.org/10.1080/01621459.2018.1562932) showed how this method applied in a wider class of problems, including models for estimation.
To estimate the expected reduction in variance in $p_2$, given knowledge of $p_1$, the `evppivar` function fits a regression model with `p2` as the outcome, and `p1` as the single predictor. A generalized additive model based on splines is fitted using the `gam` function from the `mgcv` package.
```{r,fig.width=7,fig.height=5}
plot(x=p1, y=p2, pch=".")
mod <- mgcv::gam(p2 ~ te(p1, bs="cr"))
p1fit <- fitted(mod)
lines(sort(p1), p1fit[order(p1)], col="blue")
```
Taking the variance of the _residuals_ from this regression (observed minus fitted values) produces an estimate of $E(var(p_2 | p_1 = x))$, intuitively, the expected variance given knowledge of $p_1$, which, when subtracted from the variance of $p_2$, gives the EVPPI.
```{r}
p1res <- p2 - p1fit
var(p2) - var(p1res)
```
This agrees (up to Monte Carlo error) with the value produced by `evppivar`, which is obtained by a closely-related method, as the variance of the fitted values from this regression. This is equal to the total variance minus the variance of the residuals, through the "law of total variance":
$$var(Y) - E_{X}\left[var_{Y| X}(Y |X)\right] = var_{X} \left[E_{Y|X}(Y|X)\right]$$
```{r}
var(p1fit)
```
### EVSI for estimation
Now suppose we planned to collect additional survey data on the prevalence of infection.
First suppose that we can collect more data on the biased population that was used to estimate $p_1$. The EVSI can be computed to show the expected value of surveying $n$ individuals, for different sample sizes $n$.
This is achieved using the function `evsivar` in the `voi` package, as follows.
- As in `evppivar`, samples from the prior distributions of the parameters are supplied in the data frame `inputs`.
- `study="binary"` indicates that the proposed study consists of a binary outcome observed from `n` individuals.
- `pars="p1"` indicates which input parameter is informed by the study, in other words, which parameter is assumed to generate the study data.
```{r}
evsivar(p2, inputs, study = "binary", pars="p1", n=c(100,1000,10000))
```
As the proposed sample sizes increase, the expected value of sample information informing $p_1$ converges to the EVPPI, the expected value of perfect information about $p_1$.
Alternatively, suppose we were able to collect information about the _unbiased_ population, whose infection prevalence is $p_2$. That is, suppose we surveyed $n$ individuals, each infected with probability $p_2$. In this case, we can compute the EVSI by using `evsivar` with the model `inputs` defined to equal the model outputs, as follows:
```{r}
inputs_p2 = data.frame(p2 = p2)
evsivar(p2, inputs=inputs_p2, study = "binary", pars="p2", n=c(100, 1000, 10000))
```
The unbiased data is clearly more valuable than the biased data, with double the variance reductions from biased data of the same sample size. As the sample size increases, the value converges to the EVPI, hence (in the asymptote) we will eliminate all uncertainty about our quantity of interest $p_2$.
## Expected net benefit of sampling {#enbs}
The `voi` package includes a function `enbs()` to calculate the expected net benefit of sampling for a simple proposed study, given estimates of EVSI, and some other information including study costs and the size of the decision population.
This is described in a [separate vignette](plots.html), which also demonstrates how to plot the results of VoI analyses.
|
/scratch/gouwar.j/cran-all/cranData/voi/vignettes/voi.Rmd
|
#' Returns the total time of audio files in seconds
#'
#' @param x Either a WAV file or a directory containing WAV files.
#' @param filesRange The desired range of directory files (default: \code{NULL}, i.e., all files).
#' @param recursive Logical. Should the listing recursively into directories? (default: \code{FALSE}) Used by \code{base::list.files}.
#' @return A tibble containing file name <chr> and audio time <dbl> in seconds.
#' @examples
#' library(voice)
#'
#' # get path to audio file
#' path2wav <- list.files(system.file('extdata', package = 'wrassp'),
#' pattern <- glob2rx('*.wav'), full.names = TRUE)
#'
#' # Tibble containing file name and audio time
#' (at <- voice::audio_time(unique(dirname(path2wav))))
#' str(at)
#' @export
audio_time <- function(x, filesRange = NULL, recursive = FALSE){
# checking if x is a file or directory
if(utils::file_test('-f', x)){
wavDir <- dirname(x)
wavFiles <- x
} else{
wavDir <- x
wavFiles <- dir(wavDir, pattern = '.[Ww][Aa][Vv]$', full.names = TRUE,
recursive = recursive)
}
# filtering by filesRange
if(!is.null(filesRange)){
fullRange <- 1:length(wavFiles)
filesRange <- base::intersect(fullRange, filesRange)
wavFiles <- wavFiles[filesRange]
}
# file_name (no extension)
fn <- unlist(strsplit(basename(wavFiles), '.[Ww][Aa][Vv]$'))
# calculating
if(length(wavFiles) > 0){
a <- lapply(wavFiles, tuneR::readWave)
gl <- lapply(a, voice::get_left)
sr <- lapply(a, voice::get_samp.rate)
le <- lapply(gl, length)
at <- unlist(Map('/', le, sr))
at <- dplyr::bind_cols(file_name = fn, tag_audio_time = at)
return(at)
}
else{
cat('NO WAV FILES IN DIRECTORY!')
}
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/audio_time.R
|
#' Who spoke when?
#'
#' Diarization of WAV audios.
#'
#' @param fromWav Either a file or a directory containing WAV files.
#' @param toRttm A directory to write RTTM files. If the default \code{toRttm = NULL} is used, \code{'./voiceAudios/rttm'} is created and used.
#' @param autoDir Logical. Must the directories tree be created? Default: \code{FALSE}. See 'Details'.
#' @param pycall Python call. See \url{https://github.com/filipezabala/voice} for details.
#' @param token Access token needed to instantiate pretrained speaker diarization pipeline from pyannote.audio. #1. Visit \url{https://hf.co/pyannote/speaker-diarization} and accept user conditions. #2. Visit \url{https://hf.co/pyannote/segmentation} and accept user conditions. #3. Visit \url{https://hf.co/settings/tokens} to create an access token. More details at \url{https://github.com/pyannote/pyannote-audio}.
#' @return RTTM files in NIST standard. See 'voice::read_rttm'.
#' @details When \code{autoDir = TRUE}, the following directories are created: \code{'../mp3'},\code{'../rttm'}, \code{'../split'} and \code{'../musicxml'}. Use \code{getwd()} to find the parent directory \code{'../'}.
#' @import reticulate
#' @examples
#' \dontrun{
#' library(voice)
#'
#' wavDir <- list.files(system.file('extdata', package = 'wrassp'),
#' pattern = glob2rx('*.wav'), full.names = TRUE)
#'
#' voice::diarize(fromWav = unique(dirname(wavDir)),
#' toRttm = tempdir(),
#' token = NULL) # Must enter a token! See documentation.
#'
#' (rttm <- dir(tempdir(), '.[Rr][Tt][Tt][Mm]$', full.names = TRUE))
#' file.info(rttm)
#' }
#' @export
diarize <- function(fromWav, toRttm = NULL, autoDir = FALSE,
pycall = '~/miniconda3/envs/pyvoice38/bin/python3.8',
token = NULL){
if(is.null(token)){
stop('Must enter a token!')
}
if(autoDir){
wavDir <- fromWav[1]
ss <- unlist(strsplit(wavDir, '/'))
parDir <- paste0(ss[-length(ss)], collapse ='/')
mp3Dir <- paste0(parDir, '/mp3')
rttmDir <- paste0(parDir, '/rttm')
splitDir <- paste0(parDir, '/split')
mxmlDir <- paste0(parDir, '/musicxml')
ifelse(!dir.exists(parDir), dir.create(parDir), 'Directory exists!')
ifelse(!dir.exists(wavDir), dir.create(wavDir), 'Directory exists!')
ifelse(!dir.exists(mp3Dir), dir.create(mp3Dir), 'Directory exists!')
ifelse(!dir.exists(rttmDir), dir.create(rttmDir), 'Directory exists!')
ifelse(!dir.exists(splitDir), dir.create(splitDir), 'Directory exists!')
ifelse(!dir.exists(mxmlDir), dir.create(mxmlDir), 'Directory exists!')
}
if(is.null(toRttm)){
toRttm <- rttmDir
}
#TODO: sort (by some rule) files for extraction
# process time
pt0 <- proc.time()
st0 <- Sys.time()
reticulate::use_condaenv(pycall, required = TRUE)
pyannote <- reticulate::import('pyannote.audio')
pipeline <- pyannote$Pipeline$from_pretrained('pyannote/speaker-diarization',
use_auth_token = token)
#TODO: solve 'with closing file handler' issue.
wavFiles <- dir(fromWav, '.[Ww][Aa][Vv]$', full.names = TRUE)
for(i in wavFiles){
diarization <- pipeline(i)
py <- reticulate::import_builtins()
rttmFile <- sub('.[Ww][Aa][Vv]$', '.rttm', i)
rttmBase <- basename(rttmFile)
rttmTo <- paste0(toRttm, '/', rttmBase)
# rttmTo <- normalizePath(paste0(toRttm, rttmBase))
f <- py$open(rttmTo, 'w')
diarization$write_rttm(f)
f$close()
}
print(Sys.time()-st0)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/diarize.R
|
#' Duration of sequences
#'
#' @param x A vector containing symbols and \code{NA}.
#' @param windowShift Window shift to duration in ms (default: 5.0).
#' @return A data frame with duration in number of lines/ocurrences (\code{dur_line}), milliseconds (\code{dur_ms}) and proportional (\code{dur_prop}).
#' @examples
#' library(voice)
#' duration(letters)
#' duration(c('a','a','a',letters,'z'))
#'
#' nts <- c('NA','NA','A3','A3','A3','A3','A#3','B3','B3','C4','C4','C4','C4',
#' 'C4','C4','C#4','C4','C4','C4','B3','A#3','NA','NA','NA','NA','NA','NA','NA',
#' 'NA','NA','NA','NA','NA','NA','NA','NA','NA','NA','NA','D4','D4','D4','C#4',
#' 'C#4','C#4','C4','C4','B3','B3','A#3','A#3','A3','A3','G3','G#3','G3','F#3')
#' duration(nts)
#' @export
duration <- function(x, windowShift = 5){
if(!is.factor(x)){
x <- factor(x, levels = unique(x))
}
if(sum(is.na(x))){
x <- factor(x, levels = c(levels(x), NA), exclude = NULL)
}
n <- length(x)
m <- match(x,x)
d <- diff(m)
dur_line <- as.numeric(table(cumsum(abs(d))))
if(x[1] != x[2]){
dur_line <- c(1, dur_line)
} else {
dur_line[1] <- dur_line[1]+1
}
pos <- m[d != 0]
if(d[1] == 0){
pos <- c(pos, n)
}
note <- factor(x[pos], levels = levels(x))
dur_ms <- dur_line*windowShift
dur_prop <- dur_line/sum(dur_line)
dur <- data.frame(note = note, dur_line = dur_line,
dur_ms = dur_ms, dur_prop = dur_prop)
return(dur)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/duration.R
|
#' Enrich RTTM files
#'
#' @description Enrich Rich Transcription Time Marked (RTTM) files obtained from '\code{voice::read_rttm}'.
#' @param listRttm A list containing RTTM files.
#' @param silence.gap The silence gap (in seconds) between adjacent words in a keyword. Rows with \code{tdur <= silence.gap} are removed. (default: \code{0.5})
#' @param as.tibble Logical. Should it return a tibble?
#' @return A list containing either data frames or tibbles obtained from standard RTTM files. See 'voice::read_rttm'.
#' @references \url{https://www.nist.gov/system/files/documents/itl/iad/mig/KWS15-evalplan-v05.pdf}
#' @seealso \code{voice::read_rttm}
#' @examples
#' \donttest{
#' library(voice)
#'
#' url0 <- 'https://raw.githubusercontent.com/filipezabala/voiceAudios/main/rttm/sherlock0.rttm'
#' destfile0 <- paste0(tempdir(), '/sherlock0.rttm')
#' download.file(url0, destfile = destfile0)
#' url1 <- 'https://raw.githubusercontent.com/filipezabala/voiceAudios/main/rttm/sherlock1.rttm'
#' destfile1 <- paste0(tempdir(), '/sherlock1.rttm')
#' download.file(url0, destfile = destfile1)
#'
#' rttm <- voice::read_rttm(dirname(destfile0))
#' (er <- voice::enrich_rttm(rttm))
#' class(er)
#' lapply(er, class)
#' }
#' @export
enrich_rttm <- function(listRttm, silence.gap = 0.5, as.tibble = TRUE){
fltr <- function(x){
f <- cumsum(x$tdur > silence.gap)
f[x$tdur <= silence.gap] <- NA
f <- cbind(f,x)
return(f)
}
rttm <- lapply(listRttm, fltr)
rttm <- lapply(rttm, function(x){cbind(1:nrow(x), x)})
colnames <- c('id', 'id_split', 'type', 'file', 'chnl', 'tbeg', 'tdur',
'ortho', 'stype', 'name', 'conf', 'slat')
rttm <- lapply(rttm, stats::setNames, colnames)
# speakProp <- function(x,y){
# sp <- x/y
# }
if(as.tibble){
rttm <- lapply(rttm, dplyr::as_tibble)
}
return(rttm)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/enrich_rttm.R
|
#' Expand model
#'
#' @description Expand model given \code{y} and \code{x} variables.
#' @param y The Y variable.
#' @param x The X variables.
#' @param k Number of additive components.
#' @return A \code{char} vector containing the expanded models.
#' @examples
#' library(voice)
#'
#' expand_model('y', LETTERS[1:4], 1)
#' expand_model('y', LETTERS[1:4], 2)
#' expand_model('y', LETTERS[1:4], 3)
#' expand_model('y', LETTERS[1:4], 4)
#'
#' # multiple models using apply functions
#' nx <- 10 # number of X variables to be used
#' models <- lapply(1:nx, expand_model, y = 'y', x = LETTERS[1:nx])
#' names(models) <- 1:nx
#' models
#' sum(sapply(models, length)) # total of models
#' @export
expand_model <- function(y, x, k){
y <- paste0(y, ' ~ ')
comb <- utils::combn(x, k)
if(k == 1){
models <- unlist(lapply(y, paste0, comb))
}
if(k > 1){
nc <- ncol(comb)
pred <- vector('list', nc)
for(j in 1:nc){
pred[[j]] <- paste0(comb[,j], collapse = ' + ')
}
models <- unlist(lapply(y, paste0, pred))
}
return(models)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/expand_model.R
|
#' Extract audio features
#' @description Extracts features from WAV audio files.
#' @param x A vector containing either files or directories of audio files in WAV format.
#' @param features Vector of features to be extracted. (Default: \code{'f0','fmt','rf','rcf','rpf','rfc','mfcc'}). The \code{'fmt_praat'} feature may take long time processing. The following features may contain a variable number of columns: \code{'cep'}, \code{'dft'}, \code{'css'} and \code{'lps'}.
#' @param filesRange The desired range of directory files (Default: \code{NULL}, i.e., all files). Should only be used when all the WAV files are in the same folder.
#' @param sex \code{= <code>} set sex specific parameters where <code> = \code{'f'}[emale], \code{'m'}[ale] or \code{'u'}[nknown] (Default: \code{'u'}). Used as 'gender' by \code{wrassp::ksvF0}, \code{wrassp::forest} and \code{wrassp::mhsF0}.
#' @param windowShift \code{= <dur>} set analysis window shift to <dur>ation in ms (Default: \code{5.0}). Used by \code{wrassp::ksvF0}, \code{wrassp::forest}, \code{wrassp::mhsF0}, \code{wrassp::zcrana}, \code{wrassp::rfcana}, \code{wrassp::acfana}, \code{wrassp::cepstrum}, \code{wrassp::dftSpectrum}, \code{wrassp::cssSpectrum} and \code{wrassp::lpsSpectrum}.
#' @param numFormants \code{= <num>} <num>ber of formants (Default: \code{8}). Used by \code{wrassp::forest}.
#' @param numcep Number of Mel-frequency cepstral coefficients (cepstra) to return (Default: \code{12}). Used by \code{tuneR::melfcc}.
#' @param dcttype Type of DCT used. \code{'t1'} or \code{'t2'}, \code{'t3'} for HTK \code{'t4'} for feacalc (Default: \code{'t2'}). Used by \code{tuneR::melfcc}.
#' @param fbtype Auditory frequency scale to use: \code{'mel'}, \code{'bark'}, \code{'htkmel'}, \code{'fcmel'} (Default: \code{'mel'}). Used by \code{tuneR::melfcc}.
#' @param resolution \code{= <freq>} set FFT length to the smallest value which results in a frequency resolution of <freq> Hz or better (Default: \code{40.0}). Used by \code{wrassp::cssSpectrum}, \code{wrassp::dftSpectrum} and \code{wrassp::lpsSpectrum}.
#' @param usecmp Logical. Apply equal-loudness weighting and cube-root compression (PLP instead of LPC) (Default: \code{FALSE}). Used by \code{tuneR::melfcc}.
#' @param mc.cores Number of cores to be used in parallel processing. (Default: \code{1})
#' @param full.names Logical. If \code{TRUE}, the directory path is prepended to the file names to give a relative file path. If \code{FALSE}, the file names (rather than paths) are returned. (Default: \code{TRUE}) Used by \code{base::list.files}.
#' @param recursive Logical. Should the listing recursively into directories? (Default: \code{FALSE}) Used by \code{base::list.files}.
#' @param check.mono Logical. Check if the WAV file is mono. (Default: \code{TRUE})
#' @param stereo2mono (Experimental) Logical. Should files be converted from stereo to mono? (Default: \code{TRUE})
#' @param overwrite (Experimental) Logical. Should converted files be overwritten? If not, the file gets the suffix \code{_mono}. (Default: \code{FALSE})
#' @param freq Frequency in Hz to write the converted files when \code{stereo2mono=TRUE}. (Default: \code{44100})
#' @param round.to Number of decimal places to round to. (Default: \code{NULL})
#' @param verbose Logical. Should the running status be showed? (Default: \code{FALSE})
#' @param pycall Python call. See \url{https://github.com/filipezabala/voice} for details.
#' @return A Media data frame containing the selected features.
#' @details The feature 'df' corresponds to 'formant dispersion' (df2:df8) by Fitch (1997), 'pf' to formant position' (pf1:pf8) by Puts, Apicella & Cárdena (2011), 'rf' to 'formant removal' (rf1:rf8) by Zabala (2023), 'rcf' to 'formant cumulated removal' (rcf2:rcf8) by Zabala (2023) and 'rpf' to 'formant position removal' (rpf2:rpf8) by Zabala (2023).
#' @references Levinson N. (1946). The Wiener (root mean square) error criterion in filter design and prediction. Journal of Mathematics and Physics, 25(1-4), 261–278. (\doi{10.1002/SAPM1946251261})
#'
#' Durbin J. (1960). “The fitting of time-series models.” Revue de l’Institut International de Statistique, pp. 233–244. (\url{https://www.jstor.org/stable/1401322})
#'
#' Cooley J.W., Tukey J.W. (1965). “An algorithm for the machine calculation of complex Fourier series.” Mathematics of computation, 19(90), 297–301. (\url{https://www.ams.org/journals/mcom/1965-19-090/S0025-5718-1965-0178586-1/})
#'
#' Wasson D., Donaldson R. (1975). “Speech amplitude and zero crossings for automated identification of human speakers.” IEEE Transactions on Acoustics, Speech, and Signal Processing, 23(4), 390–392. (\url{https://ieeexplore.ieee.org/document/1162690})
#'
#' Allen J. (1977). “Short term spectral analysis, synthesis, and modification by discrete Fourier transform.” IEEE Transactions on Acoustics, Speech, and Signal Processing, 25(3), 235– 238. (\url{https://ieeexplore.ieee.org/document/1162950})
#'
#' Schäfer-Vincent K. (1982). "Significant points: Pitch period detection as a problem of segmentation." Phonetica, 39(4-5), 241–253. (\doi{10.1159/000261665} )
#'
#' Schäfer-Vincent K. (1983). "Pitch period detection and chaining: Method and evaluation." Phonetica, 40(3), 177–202. (\doi{10.1159/000261691})
#'
#' Ephraim Y., Malah D. (1984). “Speech enhancement using a minimum-mean square error short-time spectral amplitude estimator.” IEEE Transactions on acoustics, speech, and signal processing, 32(6), 1109–1121. (\url{https://ieeexplore.ieee.org/document/1164453})
#'
#' Delsarte P., Genin Y. (1986). “The split Levinson algorithm.” IEEE transactions on acoustics, speech, and signal processing, 34(3), 470–478. (\url{https://ieeexplore.ieee.org/document/1164830})
#'
#' Jackson J.C. (1995). "The Harmonic Sieve: A Novel Application of Fourier Analysis to Machine Learning Theory and Practice." Technical report, Carnegie-Mellon University Pittsburgh PA Schoo; of Computer Science. (\url{https://apps.dtic.mil/sti/pdfs/ADA303368.pdf})
#'
#' Fitch, W.T. (1997) "Vocal tract length and formant frequency dispersion correlate with body size in rhesus macaques." J. Acoust. Soc. Am. 102, 1213 – 1222. (\doi{10.1121/1.421048})
#'
#' Boersma P., van Heuven V. (2001). Praat, a system for doing phonetics by computer. Glot. Int., 5(9/10), 341–347. (\url{https://www.fon.hum.uva.nl/paul/papers/speakUnspeakPraat_glot2001.pdf})
#'
#' Ellis DPW (2005). “PLP and RASTA (and MFCC, and inversion) in Matlab.” Online web resource. (\url{https://www.ee.columbia.edu/~dpwe/resources/matlab/rastamat/})
#'
#' Puts, D.A., Apicella, C.L., Cardenas, R.A. (2012) "Masculine voices signal men's threat potential in forager and industrial societies." Proc. R. Soc. B Biol. Sci. 279, 601–609. (\doi{10.1098/rspb.2011.0829})
#' @examples
#' library(voice)
#'
#' # get path to audio file
#' path2wav <- list.files(system.file('extdata', package = 'wrassp'),
#' pattern = glob2rx('*.wav'), full.names = TRUE)
#'
#' # minimal usage
#' M1 <- extract_features(path2wav)
#' M2 <- extract_features(dirname(path2wav))
#' identical(M1,M2)
#' table(basename(M1$wav_path))
#'
#' # limiting filesRange
#' M3 <- extract_features(path2wav, filesRange = 3:6)
#' table(basename(M3$wav_path))
#' @export
extract_features <- function(x,
features = c('f0', 'fmt', # F0 and formants
'rf', 'rpf', 'rcf', # Formant removals
'rfc', # (R)e(F)lection (C)oefficients
'mfcc'), # (M)el (Frequency (C)epstral (C)oefficients
filesRange = NULL,
sex = 'u',
windowShift = 10,
numFormants = 8,
numcep = 12,
dcttype = c('t2', 't1', 't3', 't4'),
fbtype = c('mel', 'htkmel', 'fcmel', 'bark'),
resolution = 40,
usecmp = FALSE,
mc.cores = 1,
full.names = TRUE,
recursive = FALSE,
check.mono = FALSE,
stereo2mono = FALSE,
overwrite = FALSE,
freq = 44100,
round.to = NULL,
verbose = FALSE,
pycall = '~/miniconda3/envs/pyvoice38/bin/python3.8'){
# time processing
pt0 <- proc.time()
# checking if x is composed by files or directories
if(utils::file_test('-f', x[1])){
wavDir <- lapply(x, dirname)
wavDir <- do.call(rbind, wavDir)
wavDir <- unique(wavDir)
wavFiles <- x
} else{
wavDir <- unique(x)
nWavDir <- length(wavDir)
wavFiles <- lapply(wavDir, list.files, pattern = '[[:punct:]][wW][aA][vV]$',
full.names = full.names, recursive = recursive)
wavFiles <- do.call(rbind, as.list(unlist(wavFiles)))
# wavFiles <- do.call(rbind, wavFiles)
}
# filtering by filesRange
if(!is.null(filesRange)){
fullRange <- 1:length(wavFiles)
filesRange <- base::intersect(fullRange, filesRange)
wavFiles <- wavFiles[filesRange]
}
# number of wav files to be extracted
nWav <- length(wavFiles)
# checking mono/stereo (GO PARALLEL?)
if(check.mono){
mono <- sapply(wavFiles, voice::is_mono)
which.stereo <- which(!mono)
ns <- length(which.stereo)
if(sum(which.stereo)){
if(verbose){
cat('The following', ns, 'audio files are stereo and must be converted to mono: \n',
paste0(names(mono[which.stereo]), sep = '\n'), '\n')
}
if(stereo2mono){
audio <- sapply(wavFiles[which.stereo], tuneR::readWave)
new.mono <- sapply(audio, tuneR::mono)
n <- sapply(wavFiles[which.stereo], nchar)
for(i in 1:ns){
if(overwrite){
seewave::savewav(new.mono[[i]], f=freq,
filename = wavFiles[which.stereo][i])
}
else if(!overwrite){
new.name <- substr(wavFiles[which.stereo][i], 1, n[i]-4)
new.name <- paste0(new.name, '_mono.wav')
seewave::savewav(new.mono[[i]], f=freq, filename = new.name)
}
}
}
}
}
# checking lpa features
f <- vector(length = 3)
f[1] <- sum(features == 'rms')
f[2] <- sum(features == 'gain')
f[3] <- sum(features == 'rfc')
# list of features
features2 <- base::setdiff(features, c('df','pf','rf','rcf','rpf'))
nFe <- length(features2)
ifelse(sum(f) == 0, ind1 <- 0, ind1 <- 1)
ifelse(sum(features == 'mfcc'), ind2 <- 0, ind2 <- 1)
features.list.temp <- vector('list', nFe-sum(f)+ind1+ind2)
features.list <- vector('list', nFe)
length.list <- vector('list', nFe)
i.temp <- 0
i <- 0
# 1. F0 analysis of the signal via Schafer-Vincent (1983), wrassp::ksvF0
if('f0' %in% features){
i.temp <- i.temp+1
i <- i+1
features.list.temp[[i.temp]] <- parallel::mclapply(wavFiles,
wrassp::ksvF0,
gender = sex,
toFile = FALSE,
windowShift = windowShift,
mc.cores = mc.cores)
names(features.list.temp)[i.temp] <- 'f0'
names(features.list)[i] <- 'f0'
names(length.list)[i] <- 'f0'
length.list[[i]] <- unlist(lapply(features.list.temp[[i.temp]],
wrassp::numRecs.AsspDataObj))
}
# 2. F0 analysis of the signal via Jackson (1995) [Michel’s (M)odified (H)armonic (S)ieve algorithm], wrassp::mhsF0
if('f0_mhs' %in% features){
i.temp <- i.temp+1
i <- i+1
features.list.temp[[i.temp]] <- parallel::mclapply(wavFiles,
wrassp::mhsF0,
toFile = FALSE,
gender = sex,
windowShift = windowShift,
mc.cores = mc.cores)
names(features.list.temp)[i.temp] <- 'f0_mhs'
names(features.list)[i] <- 'f0_mhs'
names(length.list)[i] <- 'f0_mhs'
length.list[[i]] <- unlist(lapply(features.list.temp[[i.temp]],
wrassp::numRecs.AsspDataObj))
}
# 3. F0 analysis of the signal via Boersma (1993)
if('f0_praat' %in% features){
i.temp <- i.temp+1
i <- i+1
# setting environment
reticulate::use_condaenv(pycall, required = TRUE)
parselmouth <- reticulate::import('parselmouth')
# setting structure
names(features.list.temp)[i.temp] <- 'f0_praat'
names(features.list)[i] <- 'f0_praat'
names(length.list)[i] <- 'f0_praat'
features.list.temp[[i.temp]] <- vector('list', nWav)
# F0 extraction
for(j in 1:nWav){
snd <- parselmouth$Sound(wavFiles[j])
pitch <- snd$to_pitch(time_step = windowShift/1000)
interval <- seq(pitch$start_time, pitch$end_time, windowShift/1000)
f0_praat_temp <- sapply(interval, pitch$get_value_at_time)
f0_praat_temp[is.nan(f0_praat_temp)] <- NA
features.list.temp[[i.temp]][[j]] <- f0_praat_temp
}
length.list[[i]] <- unlist(lapply(features.list.temp[[i.temp]], length))
}
# 4. Formant estimation (f1:f8) via wrassp::forest
if('fmt' %in% features){
i.temp <- i.temp+1
i <- i+1
features.list.temp[[i.temp]] <- parallel::mclapply(wavFiles,
wrassp::forest,
gender = sex,
toFile = FALSE,
windowShift = windowShift,
numFormants = numFormants,
mc.cores = mc.cores)
names(features.list.temp)[i.temp] <- 'fmt'
names(features.list)[i] <- 'fmt'
names(length.list)[i] <- 'fmt'
length.list[[i]] <- unlist(lapply(features.list.temp[[i.temp]],
wrassp::numRecs.AsspDataObj))
}
# 5. Formant estimation (f1_praat:f8_praat) via Burg algorithm
if('fmt_praat' %in% features){
i.temp <- i.temp+1
i <- i+1
# setting environment
reticulate::use_condaenv(pycall, required = TRUE)
parselmouth <- reticulate::import('parselmouth')
# setting structure
names(features.list.temp)[i.temp] <- 'fmt_praat'
names(features.list)[i] <- 'fmt_praat'
names(length.list)[i] <- 'fmt_praat'
features.list.temp[[i.temp]] <- vector('list', nWav)
# Formant extraction
for(j in 1:nWav){
# formants extraction
snd <- parselmouth$Sound(wavFiles[j])
fmt <- snd$to_formant_burg(time_step = windowShift/1000,
max_number_of_formants = numFormants)
interval <- seq(fmt$start_time, fmt$end_time, windowShift/1000)
#TODO: build with apply
fmt_praat_temp <- matrix(nrow = length(interval), ncol = numFormants)
colnames(fmt_praat_temp) <- paste0('fmt_', 1:numFormants)
for(k in 1:numFormants){
for(l in 1:length(interval)){
fmt_praat_temp[l,k] <- fmt$get_value_at_time(formant_number = as.integer(k),
time = interval[l])
}
}
fmt_praat_temp[is.nan(fmt_praat_temp)] <- NA
features.list.temp[[i.temp]][[j]] <- fmt_praat_temp
}
length.list[[i]] <- unlist(lapply(features.list.temp[[i.temp]], nrow))
}
# 6. Analysis of the averages of the short-term positive and negative (Z)ero-(C)rossing (R)ates
if('zcr' %in% features){
i.temp <- i.temp+1
i <- i+1
features.list.temp[[i.temp]] <- parallel::mclapply(wavFiles,
wrassp::zcrana,
toFile = FALSE,
windowShift = windowShift,
mc.cores = mc.cores)
names(features.list.temp)[i.temp] <- 'zcr'
names(features.list)[i] <- 'zcr'
names(length.list)[i] <- 'zcr'
length.list[[i]] <- unlist(lapply(features.list.temp[[i.temp]],
wrassp::numRecs.AsspDataObj))
}
# 7. (L)inear (P)rediction (A)nalysis [rms, gain, rfc]
if('rms' %in% features | 'gain' %in% features | 'rfc' %in% features){
i.temp <- i.temp+1
features.list.temp[[i.temp]] <- parallel::mclapply(wavFiles,
wrassp::rfcana,
toFile = FALSE,
windowShift = windowShift,
mc.cores = mc.cores)
names(features.list.temp)[i.temp] <- 'lpa'
if('rms' %in% features){
i <- i+1
names(features.list)[i] <- 'rms'
names(length.list)[i] <- 'rms'
length.list[[i]] <- unlist(lapply(features.list.temp[[i.temp]],
wrassp::numRecs.AsspDataObj))
}
if('gain' %in% features){
i <- i+1
names(features.list)[i] <- 'gain'
names(length.list)[i] <- 'gain'
# features.list[[i]] <- dplyr::tibble()
length.list[[i]] <- unlist(lapply(features.list.temp[[i.temp]],
wrassp::numRecs.AsspDataObj))
}
if('rfc' %in% features){
i <- i+1
names(features.list)[i] <- 'rfc'
names(length.list)[i] <- 'rfc'
length.list[[i]] <- unlist(lapply(features.list.temp[[i.temp]],
wrassp::numRecs.AsspDataObj))
}
}
# 8. Analysis of short-term (A)uto(C)orrelation function
if('ac' %in% features){
i.temp <- i.temp+1
i <- i+1
features.list.temp[[i.temp]] <- parallel::mclapply(wavFiles,
wrassp::acfana,
toFile = FALSE,
windowShift = windowShift,
mc.cores = mc.cores)
names(features.list.temp)[i.temp] <- 'ac'
names(features.list)[i] <- 'ac'
names(length.list)[i] <- 'ac'
length.list[[i]] <- unlist(lapply(features.list.temp[[i.temp]],
wrassp::numRecs.AsspDataObj))
}
# 9. Short-term (CEP)stral analysis
if('cep' %in% features){
i.temp <- i.temp+1
i <- i+1
features.list.temp[[i.temp]] <- parallel::mclapply(wavFiles,
wrassp::cepstrum,
toFile = FALSE,
windowShift = windowShift,
mc.cores = mc.cores)
names(features.list.temp)[i.temp] <- 'cep'
names(features.list)[i] <- 'cep'
names(length.list)[i] <- 'cep'
length.list[[i]] <- unlist(lapply(features.list.temp[[i.temp]],
wrassp::numRecs.AsspDataObj))
}
# 10. Short-term (DFT) spectral analysis
if('dft' %in% features){
i.temp <- i.temp+1
i <- i+1
features.list.temp[[i.temp]] <- parallel::mclapply(wavFiles,
wrassp::dftSpectrum,
toFile = FALSE,
resolution = resolution,
windowShift = windowShift,
mc.cores = mc.cores)
names(features.list.temp)[i.temp] <- 'dft'
names(features.list)[i] <- 'dft'
names(length.list)[i] <- 'dft'
length.list[[i]] <- unlist(lapply(features.list.temp[[i.temp]],
wrassp::numRecs.AsspDataObj))
}
# 11. (C)epstral (S)moothed version of dft(S)pectrum
if('css' %in% features){
i.temp <- i.temp+1
i <- i+1
features.list.temp[[i.temp]] <- parallel::mclapply(wavFiles,
wrassp::cssSpectrum,
toFile = FALSE,
resolution = resolution,
windowShift = windowShift,
mc.cores = mc.cores)
names(features.list.temp)[i.temp] <- 'css'
names(features.list)[i] <- 'css'
names(length.list)[i] <- 'css'
length.list[[i]] <- unlist(lapply(features.list.temp[[i.temp]],
wrassp::numRecs.AsspDataObj))
}
# 12. (L)inear (P)redictive (S)moothed version of dftSpectrum
if('lps' %in% features){
i.temp <- i.temp+1
i <- i+1
features.list.temp[[i.temp]] <- parallel::mclapply(wavFiles,
wrassp::lpsSpectrum,
toFile = FALSE,
resolution = resolution,
windowShift = windowShift,
mc.cores = mc.cores)
names(features.list.temp)[i.temp] <- 'lps'
names(features.list)[i] <- 'lps'
names(length.list)[i] <- 'lps'
length.list[[i]] <- unlist(lapply(features.list.temp[[i.temp]],
wrassp::numRecs.AsspDataObj))
}
# 13. Mel-Frequency Cepstral Coefficients (MFCC)
rWave <- parallel::mclapply(wavFiles, tuneR::readWave, mc.cores = mc.cores)
i.temp <- i.temp+1
features.list.temp[[i.temp]] <- parallel::mclapply(rWave, tuneR::melfcc,
wintime = windowShift/1000,
hoptime = windowShift/1000,
dcttype = dcttype,
numcep = numcep,
fbtype = fbtype,
usecmp = usecmp,
mc.cores = mc.cores)
names(features.list.temp)[i.temp] <- 'mfcc'
if('mfcc' %in% features){
i <- i+1
names(features.list)[i] <- 'mfcc'
names(length.list)[i] <- 'mfcc'
length.list[[i]] <- unlist(lapply(features.list.temp[[i.temp]], nrow))
}
# creating tibbles at features.list
features.list <- lapply(features.list, dplyr::tibble)
# minimum length
n_min <- apply(dplyr::bind_rows(length.list), 1, min)
# concatenating
for(j in 1:nWav){ # upgrade: use bind_rows, foreach
# time processing
pt1 <- proc.time()
if('f0' %in% features){
f0_temp <- as.matrix(features.list.temp$f0[[j]]$F0[1:n_min[j]], ncol = 1)
features.list$f0 <- rbind(features.list$f0, f0_temp)
}
if('f0_mhs' %in% features){
f0_mhs_temp <- as.matrix(features.list.temp$f0_mhs[[j]]$pitch[1:n_min[j],],
ncol = 1)
features.list$f0_mhs <- rbind(features.list$f0_mhs, f0_mhs_temp)
}
if('f0_praat' %in% features){
f0_praat_temp <- as.matrix(features.list.temp$f0_praat[[j]][1:n_min[j]],
ncol = 1)
features.list$f0_praat <- rbind(features.list$f0_praat, f0_praat_temp)
}
if('fmt' %in% features){
fmt_temp <- as.matrix(features.list.temp$fmt[[j]]$fm[1:n_min[j],],
ncol = numFormants)
features.list$fmt <- rbind(features.list$fmt, fmt_temp)
}
if('fmt_praat' %in% features){
fmt_praat_temp <- as.matrix(features.list.temp$fmt_praat[[j]][1:n_min[j],],
ncol = numFormants)
features.list$fmt_praat <- rbind(features.list$fmt_praat, fmt_praat_temp)
}
if('zcr' %in% features){
zcr_temp <- as.matrix(features.list.temp$zcr[[j]]$zcr[1:n_min[j],],
ncol = 1)
features.list$zcr <- rbind(features.list$zcr, zcr_temp)
}
if('rms' %in% features){
rms_temp <- as.matrix(features.list.temp$lpa[[j]]$rms[1:n_min[j],],
ncol = 1)
features.list$rms <- rbind(features.list$rms, rms_temp)
}
if('gain' %in% features){
gain_temp <- as.matrix(features.list.temp$lpa[[j]]$gain[1:n_min[j],],
ncol = 1)
features.list$gain <- rbind(features.list$gain, gain_temp)
}
if('rfc' %in% features){
rfc_temp <- as.matrix(features.list.temp$lpa[[j]]$rfc[1:n_min[j],],
ncol = 19)
features.list$rfc <- rbind(features.list$rfc, rfc_temp)
}
if('ac' %in% features){
ac_temp <- as.matrix(features.list.temp$ac[[j]]$acf[1:n_min[j],],
ncol = 20)
features.list$ac <- rbind(features.list$ac, ac_temp)
}
if('cep' %in% features){
cep_temp <- as.matrix(features.list.temp$cep[[j]]$cep[1:n_min[j],],
ncol = 257)
features.list$cep <- rbind(features.list$cep, cep_temp)
}
if('dft' %in% features){
dft_temp <- as.matrix(features.list.temp$dft[[j]]$dft[1:n_min[j],],
ncol = 257)
features.list$dft <- rbind(features.list$dft, dft_temp)
}
if('css' %in% features){
css_temp <- as.matrix(features.list.temp$css[[j]]$css[1:n_min[j],],
ncol = 257)
features.list$css <- rbind(features.list$css, css_temp)
}
if('lps' %in% features){
lps_temp <- as.matrix(features.list.temp$lps[[j]]$lps[1:n_min[j],],
ncol = 257)
features.list$lps <- rbind(features.list$lps, lps_temp)
}
if('mfcc' %in% features){
features.list$mfcc <- rbind(features.list$mfcc,
features.list.temp$mfcc[[j]][1:n_min[j],])
}
if(verbose){
cat('PROGRESS', paste0(round(j/nWav*100,2),'%'), '\n')
}
t1 <- proc.time()-pt1
if(verbose){
cat('FILE', j, 'OF', nWav, '|', t1[3], 'SECONDS\n\n')
}
}
# id, using smaller length: n_min
id <- tibble::enframe(rep(wavFiles, n_min), value = 'wav_path',
name = NULL)
# colnames
if('f0' %in% features){
colnames(features.list$f0) <- 'f0'
}
if('f0_mhs' %in% features){
colnames(features.list$f0_mhs) <- paste0('f0_mhs')
}
if('f0_praat' %in% features){
colnames(features.list$f0_praat) <- paste0('f0_praat')
}
if('fmt' %in% features){
colnames(features.list$fmt) <- paste0('f', 1:ncol(features.list$fmt))
}
if('fmt_praat' %in% features){
colnames(features.list$fmt_praat) <- paste0('f', 1:ncol(features.list$fmt_praat), '_praat')
}
if('zcr' %in% features){
colnames(features.list$zcr) <- paste0('zcr', 1:ncol(features.list$zcr))
}
if('rms' %in% features){
colnames(features.list$rms) <- paste0('rms')
}
if('gain' %in% features){
colnames(features.list$gain) <- paste0('gain')
}
if('rfc' %in% features){
colnames(features.list$rfc) <- paste0('rfc', 1:ncol(features.list$rfc))
}
if('ac' %in% features){
colnames(features.list$ac) <- paste0('acf', 1:ncol(features.list$ac))
}
if('cep' %in% features){
colnames(features.list$cep) <- paste0('cep', 1:ncol(features.list$cep))
}
if('dft' %in% features){
colnames(features.list$dft) <- paste0('dft', 1:ncol(features.list$dft))
}
if('css' %in% features){
colnames(features.list$css) <- paste0('css', 1:ncol(features.list$css))
}
if('lps' %in% features){
colnames(features.list$lps) <- paste0('lps', 1:ncol(features.list$lps))
}
if('mfcc' %in% features){
colnames(features.list$mfcc) <- paste0('mfcc', 1:ncol(features.list$mfcc))
}
# as data frame
dat <- dplyr::bind_cols(id, features.list)
# rounding
if(!is.null(round.to)){
dat[-1] <- round(dat[-1], round.to)
}
# replacing 0 by NA
dat[-1][sapply(dat[-1], R.utils::isZero)] <- NA
# 14. Df - Formant Dispersion by Fitch (1997)
if('f0' %in% features & 'fmt' %in% features & 'df' %in% features){
if(numFormants >= 2) {dat$df2 <- (dat$f2-dat$f1)/1}
if(numFormants >= 3) {dat$df3 <- (dat$f3-dat$f1)/2}
if(numFormants >= 4) {dat$df4 <- (dat$f4-dat$f1)/3}
if(numFormants >= 5) {dat$df5 <- (dat$f5-dat$f1)/4}
if(numFormants >= 6) {dat$df6 <- (dat$f6-dat$f1)/5}
if(numFormants >= 7) {dat$df7 <- (dat$f7-dat$f1)/6}
if(numFormants >= 8) {dat$df8 <- (dat$f8-dat$f1)/7}
}
# TODO: in scale check if columns are not constant (degenerated random variables)
# Scaling
if('f0' %in% features & 'fmt' %in% features &
('pf' %in% features | 'rf' %in% features |
'rcf' %in% features | 'rpf' %in% features)){
cn <- paste0('f', 0:numFormants)
f_sc <- sapply(dat[,cn], scale)
}
# 15. Pf - Formant Position by Puts, Apicella & Cárdenas (2011)
if('f0' %in% features & 'fmt' %in% features & 'pf' %in% features){
if(numFormants >= 1) {dat$pf1 <- f_sc[,'f1']}
if(numFormants >= 2) {dat$pf2 <- rowMeans(f_sc[,c('f1','f2')], na.rm = TRUE)}
if(numFormants >= 3) {dat$pf3 <- rowMeans(f_sc[,c('f1','f2','f3')], na.rm = TRUE)}
if(numFormants >= 4) {dat$pf4 <- rowMeans(f_sc[,c('f1','f2','f3','f4')], na.rm = TRUE)}
if(numFormants >= 5) {dat$pf5 <- rowMeans(f_sc[,c('f1','f2','f3','f4','f5')], na.rm = TRUE)}
if(numFormants >= 6) {dat$pf6 <- rowMeans(f_sc[,c('f1','f2','f3','f4','f5','f6')], na.rm = TRUE)}
if(numFormants >= 7) {dat$pf7 <- rowMeans(f_sc[,c('f1','f2','f3','f4','f5','f6','f7')], na.rm = TRUE)}
if(numFormants >= 8) {dat$pf8 <- rowMeans(f_sc[,c('f1','f2','f3','f4','f5','f6','f7','f8')], na.rm = TRUE)}
}
# 16. Rf - Formant Removal by Zabala (2023)
if('f0' %in% features & 'fmt' %in% features & 'rf' %in% features){
if(numFormants >= 1) {dat$rf1 <- f_sc[,'f0']-f_sc[,'f1']}
if(numFormants >= 2) {dat$rf2 <- f_sc[,'f0']-f_sc[,'f2']}
if(numFormants >= 3) {dat$rf3 <- f_sc[,'f0']-f_sc[,'f3']}
if(numFormants >= 4) {dat$rf4 <- f_sc[,'f0']-f_sc[,'f4']}
if(numFormants >= 5) {dat$rf5 <- f_sc[,'f0']-f_sc[,'f5']}
if(numFormants >= 6) {dat$rf6 <- f_sc[,'f0']-f_sc[,'f6']}
if(numFormants >= 7) {dat$rf7 <- f_sc[,'f0']-f_sc[,'f7']}
if(numFormants >= 8) {dat$rf8 <- f_sc[,'f0']-f_sc[,'f8']}
}
# 17. RCf - Formant Cumulated Removal by Zabala (2023)
if('f0' %in% features & 'fmt' %in% features & 'rcf' %in% features){
# if(numFormants >= 1) {dat$rcf1 <- f_sc[,'f0']-f_sc[,'f1']} # equivalent to Rf1 and RPf1
if(numFormants >= 2) {dat$rcf2 <- f_sc[,'f0']-rowSums(f_sc[,c('f1','f2')], na.rm = TRUE)}
if(numFormants >= 3) {dat$rcf3 <- f_sc[,'f0']-rowSums(f_sc[,c('f1','f2','f3')], na.rm = TRUE)}
if(numFormants >= 4) {dat$rcf4 <- f_sc[,'f0']-rowSums(f_sc[,c('f1','f2','f3','f4')], na.rm = TRUE)}
if(numFormants >= 5) {dat$rcf5 <- f_sc[,'f0']-rowSums(f_sc[,c('f1','f2','f3','f4','f5')], na.rm = TRUE)}
if(numFormants >= 6) {dat$rcf6 <- f_sc[,'f0']-rowSums(f_sc[,c('f1','f2','f3','f4','f5','f6')], na.rm = TRUE)}
if(numFormants >= 7) {dat$rcf7 <- f_sc[,'f0']-rowSums(f_sc[,c('f1','f2','f3','f4','f5','f6','f7')], na.rm = TRUE)}
if(numFormants >= 8) {dat$rcf8 <- f_sc[,'f0']-rowSums(f_sc[,c('f1','f2','f3','f4','f5','f6','f7','f8')], na.rm = TRUE)}
}
# 18. RPf - Formant Position Removal by Zabala (2023)
if('f0' %in% features & 'fmt' %in% features & 'rpf' %in% features){
# if(numFormants >= 1) {dat$rpf1 <- f_sc[,'f0']-dat$pf1} # equivalent to Rf1 and RCf1
if(numFormants >= 2) {dat$rpf2 <- f_sc[,'f0']-rowMeans(f_sc[,c('f1','f2')], na.rm = TRUE)}
if(numFormants >= 3) {dat$rpf3 <- f_sc[,'f0']-rowMeans(f_sc[,c('f1','f2','f3')], na.rm = TRUE)}
if(numFormants >= 4) {dat$rpf4 <- f_sc[,'f0']-rowMeans(f_sc[,c('f1','f2','f3','f4')], na.rm = TRUE)}
if(numFormants >= 5) {dat$rpf5 <- f_sc[,'f0']-rowMeans(f_sc[,c('f1','f2','f3','f4','f5')], na.rm = TRUE)}
if(numFormants >= 6) {dat$rpf6 <- f_sc[,'f0']-rowMeans(f_sc[,c('f1','f2','f3','f4','f5','f6')], na.rm = TRUE)}
if(numFormants >= 7) {dat$rpf7 <- f_sc[,'f0']-rowMeans(f_sc[,c('f1','f2','f3','f4','f5','f6','f7')], na.rm = TRUE)}
if(numFormants >= 8) {dat$rpf8 <- f_sc[,'f0']-rowMeans(f_sc[,c('f1','f2','f3','f4','f5','f6','f7','f8')], na.rm = TRUE)}
}
# creating ids
idf <- lapply(n_min, seq, 1)
dat <- dplyr::bind_cols(section_seq = 1:nrow(dat),
section_seq_file = unlist(lapply(idf, rev)), dat)
# total time
t0 <- proc.time()-pt0
if(verbose){
cat('TOTAL TIME', t0[3], 'SECONDS\n\n')
}
# return dat
return(dat)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/extract_features.R
|
#' Features summary
#'
#' @description Returns summary measures of 'voice::extract_features'.
#' @param x An Extended data frame to be tagged with media information.
#' @param groupBy A variable to group the summary measures. The argument must be a character vector. (Default: \code{groupBy = 'wav_path'}).
#' @param wavPath A vector containing the path(s) to WAV files. May be both as \code{dirname} or \code{basename} formats.
#' @param wavPathName A string containing the WAV path name. (Default: \code{wavPathName = 'wav_path'}).
#' @param features Vector of features to be extracted. (Default: \code{'f0'}).
#' @param filesRange The desired range of directory files (default: \code{NULL}, i.e., all files). Should only be used when all the WAV files are in the same folder.
#' @param sex \code{= <code>} set sex specific parameters where <code> = \code{'f'}[emale], \code{'m'}[ale] or \code{'u'}[nknown] (Default: \code{'u'}). Used as 'gender' by \code{wrassp::ksvF0}, \code{wrassp::forest} and \code{wrassp::mhsF0}.
#' @param windowShift \code{= <dur>} set analysis window shift to <dur>ation in ms (Default: \code{5.0}). Used by \code{wrassp::ksvF0}, \code{wrassp::forest}, \code{wrassp::mhsF0}, \code{wrassp::zcrana}, \code{wrassp::rfcana}, \code{wrassp::acfana}, \code{wrassp::cepstrum}, \code{wrassp::dftSpectrum}, \code{wrassp::cssSpectrum} and \code{wrassp::lpsSpectrum}.
#' @param numFormants \code{= <num>} <num>ber of formants (Default: \code{8}). Used by \code{wrassp::forest}.
#' @param numcep Number of Mel-frequency cepstral coefficients (cepstra) to return (Default: \code{12}). Used by \code{tuneR::melfcc}.
#' @param dcttype Type of DCT used. \code{'t1'} or \code{'t2'}, \code{'t3'} for HTK \code{'t4'} for feacalc (Default: \code{'t2'}). Used by \code{tuneR::melfcc}.
#' @param fbtype Auditory frequency scale to use: \code{'mel'}, \code{'bark'}, \code{'htkmel'}, \code{'fcmel'} (Default: \code{'mel'}). Used by \code{tuneR::melfcc}.
#' @param resolution \code{= <freq>} set FFT length to the smallest value which results in a frequency resolution of <freq> Hz or better (Default: \code{40.0}). Used by \code{wrassp::cssSpectrum}, \code{wrassp::dftSpectrum} and \code{wrassp::lpsSpectrum}.
#' @param usecmp Logical. Apply equal-loudness weighting and cube-root compression (PLP instead of LPC) (Default: \code{FALSE}). Used by \code{tuneR::melfcc}.
#' @param mc.cores Number of cores to be used in parallel processing. (Default: \code{1})
#' @param full.names Logical. If \code{TRUE}, the directory path is prepended to the file names to give a relative file path. If \code{FALSE}, the file names (rather than paths) are returned. (Default: \code{TRUE}). Used by \code{base::list.files}.
#' @param recursive Logical. Should the listing recursively into directories? (Default: \code{FALSE}) Used by \code{base::list.files}.
#' @param check.mono Logical. Check if the WAV file is mono. (Default: \code{TRUE})
#' @param stereo2mono (Experimental) Logical. Should files be converted from stereo to mono? (Default: \code{TRUE})
#' @param overwrite (Experimental) Logical. Should converted files be overwritten? If not, the file gets the suffix \code{_mono}. (Default: \code{FALSE})
#' @param freq Frequency in Hz to write the converted files when \code{stereo2mono=TRUE}. (Default: \code{44100})
#' @param round.to Number of decimal places to round to. (Default: \code{NULL})
#' @param verbose Logical. Should the running status be showed? (Default: \code{FALSE})
#' @return A tibble data frame containing summarized numeric columns using (1) mean, (2) standard deviation, (3) variation coefficient, (4) median, (5) interquartile range and (6) median absolute deviation.
#' @details \code{filesRange} should only be used when all the WAV files are in the same folder.
#' @examples
#' library(voice)
#'
#' # get path to audio file
#' path2wav <- list.files(system.file('extdata', package = 'wrassp'),
#' pattern = glob2rx('*.wav'), full.names = TRUE)
#'
#' # creating Extended synthetic data
#' E <- dplyr::tibble(subject_id = c(1,1,1,2,2,2,3,3,3),
#' wav_path = path2wav)
#'
#' # minimal usage
#' feat_summary(E)
#'
#' # canonical data
#' feat_summary(E, groupBy = 'subject_id')
#' @export
feat_summary <- function(x,
groupBy = 'wav_path',
wavPath = unique(x$wav_path),
wavPathName = 'wav_path',
features = 'f0',
filesRange = NULL,
sex = 'u',
windowShift = 10,
numFormants = 8,
numcep = 12,
dcttype = c('t2', 't1', 't3', 't4'),
fbtype = c('mel', 'htkmel', 'fcmel', 'bark'),
resolution = 40,
usecmp = FALSE,
mc.cores = 1,
full.names = TRUE,
recursive = FALSE,
check.mono = FALSE,
stereo2mono = FALSE,
overwrite = FALSE,
freq = 44100,
round.to = 4,
verbose = FALSE){
M <- voice::extract_features(wavPath,
filesRange = filesRange,
features = features,
sex = sex,
windowShift = windowShift,
numFormants = numFormants,
numcep = numcep,
dcttype = dcttype,
fbtype = fbtype,
resolution = resolution,
usecmp = usecmp,
mc.cores = mc.cores,
full.names = full.names,
recursive = recursive,
check.mono = check.mono,
stereo2mono = stereo2mono,
overwrite = overwrite,
freq = freq,
round.to = round.to,
verbose = verbose)
# full vector of features
featFull <- colnames(M[,-(1:3)])
# normalizing dirnames @ Media
M_path_name <- normalizePath(dirname(dplyr::pull(M[, wavPathName])))
M_base_name <- basename(dplyr::pull(M[, wavPathName]))
M[, wavPathName] <- paste0(M_path_name, '/', M_base_name)
# normalizing dirnames @ Extended
x <- dplyr::as_tibble(x)
if(utils::file_test('-f', dplyr::pull(x[, wavPathName])[1])){
x_path_name <- normalizePath(dirname(dplyr::pull(x[, wavPathName])))
x_base_name <- basename(dplyr::pull(x[, wavPathName]))
x[, wavPathName] <- paste0(x_path_name, '/', x_base_name)
M <- dplyr::left_join(M, x, wavPathName)
} else{
x[, wavPathName] <- normalizePath(dplyr::pull(x[, wavPathName]))
wav_path_full <- dir(as.data.frame(x)[, wavPathName], full.names = TRUE)
x_full <- dplyr::tibble(wav_path = dirname(wav_path_full),
wav_path_full = wav_path_full) # generalize wavPathName!
x_full <- dplyr::left_join(x_full, x, by = 'wav_path')
x_full <- dplyr::transmute(x_full, subject_id = subject_id, wav_path = wav_path_full)
M <- dplyr::left_join(M, x_full, wavPathName)
}
# Variation Coefficient function
vc <- function(x, na.rm = TRUE){
return(stats::sd(x, na.rm = na.rm)/mean(x, na.rm = na.rm))
}
# complement of featFull
compFeatFull <- setdiff(colnames(M), featFull)
# tyding up
M <- dplyr::select(M, tidyselect::all_of(compFeatFull), tidyselect::all_of(featFull))
# dplyr::rename(!!wavPathName := tidyselect::ends_with('.y'))
# group by
Mg <- M %>%
dplyr::group_by(.data[[groupBy]])
# Mean
M_mean <- Mg %>%
dplyr::summarise_at(dplyr::vars(tidyselect::all_of(featFull)), mean, na.rm = TRUE)
colnames(M_mean)[-1] <- paste0(colnames(M_mean)[-1], '_tag_mean')
# (Sample) Standard Deviation
M_sd <- Mg %>%
dplyr::summarise_at(dplyr::vars(tidyselect::all_of(featFull)), stats::sd, na.rm = TRUE)
colnames(M_sd)[-1] <- paste0(colnames(M_sd)[-1], '_tag_sd')
# (Sample) Variation Coefficient
M_vc <- Mg %>%
dplyr::summarise_at(dplyr::vars(tidyselect::all_of(featFull)), vc, na.rm = TRUE)
colnames(M_vc)[-1] <- paste0(colnames(M_vc)[-1], '_tag_vc')
# Median
M_median <- Mg %>%
dplyr::summarise_at(dplyr::vars(tidyselect::all_of(featFull)), stats::median, na.rm = TRUE)
colnames(M_median)[-1] <- paste0(colnames(M_median)[-1], '_tag_median')
# InterQuartile Range
M_iqr <- Mg %>%
dplyr::summarise_at(dplyr::vars(tidyselect::all_of(featFull)), stats::IQR, na.rm = TRUE)
colnames(M_iqr)[-1] <- paste0(colnames(M_iqr)[-1], '_tag_iqr')
# Median Absolute Deviation
M_mad <- Mg %>%
dplyr::summarise_at(dplyr::vars(tidyselect::all_of(featFull)), stats::mad, na.rm = TRUE)
colnames(M_mad)[-1] <- paste0(colnames(M_mad)[-1], '_tag_mad')
# left_join
M_summ <- dplyr::left_join(M_mean, M_sd, by = groupBy)
M_summ <- dplyr::left_join(M_summ, M_vc, by = groupBy)
M_summ <- dplyr::left_join(M_summ, M_median, by = groupBy)
M_summ <- dplyr::left_join(M_summ, M_iqr, by = groupBy)
M_summ <- dplyr::left_join(M_summ, M_mad, by = groupBy)
return(M_summ)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/feat_summary.R
|
#' Get bit rate
#'
#' @description Get bit rate from WAV file.
#' @param x Wave object from `tuneR::readWave`.
#' @return Integer indicating the bit rate from a WAV file.
#' @examples
#' library(voice)
#'
#' # get path to audio file
#' path2wav <- list.files(system.file('extdata', package = 'wrassp'),
#' pattern <- glob2rx('*.wav'), full.names = TRUE)
#'
#' rw <- tuneR::readWave(path2wav[1])
#' voice::get_bit(rw)
#'
#' rwl <- lapply(path2wav, tuneR::readWave)
#' sapply(rwl, voice::get_bit)
#' @export
get_bit <- function(x){
return(x@bit)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/get_bit.R
|
#' Time duration
#'
#' @description Get time duration from WAV file.
#' @param x Wave object from `tuneR::readWave`.
#' @return Numeric indicating the time duration in seconds from a WAV file.
#' @examples
#' library(voice)
#'
#' # get path to audio file
#' path2wav <- list.files(system.file('extdata', package = 'wrassp'),
#' pattern <- glob2rx('*.wav'), full.names = TRUE)
#'
#' rw <- tuneR::readWave(path2wav[1])
#' voice::get_dur(rw)
#'
#' rwl <- lapply(path2wav, tuneR::readWave)
#' sapply(rwl, voice::get_dur)
#' @export
get_dur <- function(x){
return(length(x@left)/[email protected])
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/get_dur.R
|
#' Get left channel
#'
#' @description Get left channel from WAV file.
#' @param x Wave object from `tuneR::readWave`.
#' @return Numeric vector indicating the left channel from a WAV file.
#' @examples
#' library(voice)
#'
#' # get path to audio file
#' path2wav <- list.files(system.file('extdata', package = 'wrassp'),
#' pattern <- glob2rx('*.wav'), full.names = TRUE)
#'
#' rw <- tuneR::readWave(path2wav[1])
#' l <- voice::get_left(rw)
#' head(l)
#' length(l)
#' @export
get_left <- function(x){
return(x@left)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/get_left.R
|
#' Get right channel
#'
#' @description Get right channel from WAV file.
#' @param x Wave object from `tuneR::readWave`.
#' @return Numeric vector indicating the right channel from a WAV file.
#' @examples
#' library(voice)
#'
#' # get path to audio file
#' path2wav <- list.files(system.file('extdata', package = 'wrassp'),
#' pattern <- glob2rx('*.wav'), full.names = TRUE)
#'
#' rw <- tuneR::readWave(path2wav[1])
#' r <- voice::get_right(rw)
#' head(r)
#' length(r)
#' @export
get_right <- function(x){
return(x@right)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/get_right.R
|
#' Get sample rate
#'
#' @description Get sample rate from WAV file.
#' @param x Wave object from `tuneR::readWave`.
#' @return Integer indicating the sample rate from a WAV file.
#' @examples
#' library(voice)
#'
#' # get path to audio file
#' path2wav <- list.files(system.file('extdata', package = 'wrassp'),
#' pattern <- glob2rx('*.wav'), full.names = TRUE)
#'
#' rw <- tuneR::readWave(path2wav[1])
#' voice::get_samp.rate(rw)
#'
#' rwl <- lapply(path2wav, tuneR::readWave)
#' sapply(rwl, voice::get_samp.rate)
#' @export
get_samp.rate <- function(x){
return([email protected])
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/get_samp.rate.R
|
#' Time beginning
#'
#' @description Get time beginning from a data frame in RTTM standard.
#' @param x A data frame in RTTM standard. See 'voice::read_rttm'.
#' @return Numeric vector containing the time beginning in seconds.
#' @examples
#' library(voice)
#'
#' url0 <- 'https://raw.githubusercontent.com/filipezabala/voiceAudios/main/rttm/sherlock0.rttm'
#' download.file(url0, destfile = paste0(tempdir(), '/sherlock0.rttm'))
#'
#' rttm <- voice::read_rttm(tempdir())
#' (gtb <- voice::get_tbeg(rttm$sherlock0.rttm))
#' class(gtb)
#' @export
get_tbeg <- function(x){
return(x$tbeg)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/get_tbeg.R
|
#' Time duration
#'
#' @description Get time duration from a data frame in RTTM standard.
#' @param x A data frame in RTTM standard. See 'voice::read_rttm'.
#' @return Numeric vector containing the time duration in seconds.
#' @examples
#' library(voice)
#'
#' url0 <- 'https://raw.githubusercontent.com/filipezabala/voiceAudios/main/rttm/sherlock0.rttm'
#' download.file(url0, destfile = paste0(tempdir(), '/sherlock0.rttm'))
#'
#' rttm <- voice::read_rttm(tempdir())
#' (gtd <- voice::get_tdur(rttm$sherlock0.rttm))
#' class(gtd)
#' @export
get_tdur <- function(x){
return(x$tdur)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/get_tdur.R
|
utils::globalVariables(c('subject_id', '.data', 'spn.lo', 'spn.hi', 'spn',
'midi', 'black', 'Black', 'wavelength', 'unemploy',
'pop', 'filename', 'tag_audio_time'))
|
/scratch/gouwar.j/cran-all/cranData/voice/R/globals.R
|
#' Interpolate vectors
#'
#' @description Interpolate vactors, compressing to \code{compact.to} fraction. May remove zeros.
#' @param y A vector or time series.
#' @param compact.to Proportion of remaining points after compaction, between (including) 0 and 1. If equals to 1 and keep.zeros = TRUE, the original vector is presented.
#' @param drop.zeros Logical. Drop repeated zeros? Default: \code{FALSE}.
#' @param to.data.frame Logical. Convert to data frame? Default: \code{FALSE}.
#' @param round.off Number of decimal places of the interpolated \code{y} Default: \code{NULL}.
#' @param weight Vector of weights with same length of \code{y}. Default: \code{NULL}.
#' @return A list of interpolated \code{x} and \code{y} values with length near to \code{compact.to*length(y)}.
#' @examples
#' library(voice)
#'
#' v1 <- 1:100
#' (c1 <- interp(v1, compact.to = 0.2))
#' length(c1$y)
#' plot(1:100, type = 'l')
#' points(c1$x, c1$y, col='red')
#'
#' # with weight
#' (c2 <- interp(v1, compact.to = 0.2, weight = rev(v1)))
#' plot(c1$y)
#' points(c2$y, col = 'red')
#'
#' (v2 <- c(1:5, rep(0,10), 1:10, rep(0,5), 10:20, rep(0,10)))
#' length(v2)
#' interp(v2, 0.1, drop.zeros = TRUE, to.data.frame = FALSE)
#' interp(v2, 0.1, drop.zeros = TRUE, to.data.frame = TRUE)
#' interp(v2, 0.2, drop.zeros = TRUE)
#' interp(v2, 0.2, drop.zeros = FALSE)
#'
#' (v3 <- c(rep(0,10), 1:20, rep(0,3)))
#' (c3 <- interp(v3, 1/3, drop.zeros = FALSE, to.data.frame = FALSE))
#' lapply(c3, length)
#' plot(v3, type = 'l')
#' points(c3$x, c3$y, col = 'red')
#'
#' (v4 <- c(rnorm(1:100)))
#' (c4 <- interp(v4, 1/4, round.off = 3))
#' @seealso \code{rm0}, \code{interp_mc}, \code{interp_df}
#' @export
interp <- function(y, compact.to, drop.zeros = FALSE, to.data.frame = FALSE,
round.off = NULL, weight = NULL){
ifelse(drop.zeros, v <- voice::rm0(y), v <- y)
lv <- length(v)
# build weight
if(!is.null(weight)){
w <- ceiling(weight)
v <- rep(v, w)
}
# interpolating
cv <- stats::approx(v, n = ceiling(compact.to*lv), rule = 2)
# round.off
if(!is.null(round.off)){
cv <- lapply(cv, round, round.off)
}
# data frame
if(to.data.frame){
cv <- do.call(cbind, cv)
}
return(cv)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/interp.R
|
#' Inperpolate data frames
#'
#' @description Interpolate data frames using multicore, compressing to \code{compact.to} fraction. May remove zeros.
#' @param x A data frame.
#' @param compact.to Proportion of remaining points after interpolation. If equals to 1 and keep.zeros = TRUE, the original vector is presented.
#' @param id The identification column. Default: \code{colname} of the first column of \code{x}.
#' @param colnum A \code{char} vector indicating the numeric colnames. If \code{NULL}, uses the columns of the \code{numeric} class.
#' @param drop.x Logical. Drop columns containing .x? Default: \code{TRUE}.
#' @param drop.zeros Logical. Drop repeated zeros or keep 1 zero per null set? Default: \code{FALSE}.
#' @param to.data.frame Logical. Should return a data frame? If \code{FALSE} returns a list. Default: \code{TRUE}.
#' @param round.off Number of decimal places of the interpolated \code{y}. Default: \code{NULL}.
#' @param weight Vector of weights with same length of \code{y}. Default: \code{NULL}.
#' @param mc.cores The number of cores to mclapply. Default: \code{1}.
#' @return A data frame of interpolated values with nrow near to \code{compact.to*length(x)}.
#' @importFrom dplyr %>%
#' @importFrom dplyr mutate_each
#' @examples
#' library(voice)
#'
#' # get path to audio file
#' path2wav <- list.files(system.file('extdata', package = 'wrassp'),
#' pattern = glob2rx('*.wav'), full.names = TRUE)
#'
#' # getting Media data frame via lean call
#' M <- extract_features(dirname(path2wav), features = c('f0','fmt'),
#' mc.cores = 1, verbose = FALSE)
#'
#' \donttest{
#' (cM.df <- interp_df(M[,-(1:2)], 0.1, mc.cores = 1))
#' (cM.df2 <- interp_df(M[,-(1:2)], 0.1, drop.x = FALSE, mc.cores = 1))
#'
#' dim(M)
#' dim(cM.df)
#' dim(cM.df2)
#' (cM.list <- interp_df(M[,-(1:2)], 0.1, to.data.frame = FALSE, mc.cores = 1))
#' }
#' @seealso \code{interp}, \code{interp_mc}
#' @export
interp_df <- function(x, compact.to, id = colnames(x)[1], colnum = NULL,
drop.x = TRUE, drop.zeros = FALSE,
to.data.frame = TRUE, round.off = NULL, weight = NULL,
mc.cores = 1){
ini <- Sys.time()
# numeric columns
if(is.null(colnum)){
is.num <- unlist(lapply(x, class)) == 'numeric'
colnum <- colnames(x[,is.num])
}
# non-numeric columns
colnon <- setdiff(colnames(x), colnum)
# split numeric columns by id
snum <- split(x[,colnum], x[,id])
# split non-numeric columns id
snon <- split(x[,colnon], x[,id])
# original lengths by id
lv <- table(x[,id])
# vector and length of distinct id's
nlv <- names(lv)
nid <- length(nlv)
# compact lengths by id
n <- ceiling(compact.to*lv)
cs <- c(0, cumsum(n))
# interolationg numeric variables
cn.li <- lapply(snum, voice::interp_mc, compact.to = compact.to,
drop.zeros = drop.zeros, to.data.frame = to.data.frame,
round.off = round.off, weight = weight, mc.cores = mc.cores)
# transforming in dataframe
cn.df <- lapply(cn.li, as.data.frame)
# cn.df <- bind_cols(cn.li)
# compact list
li <- vector('list', length = nid)
for(i in 1:nid){
index <- 1:n[i]
cn.li[[i]] <- snon[[i]][index,]
even <- seq(2, ncol(cn.df[[i]]), by = 2)
index2 <- sort(union(even, even-!drop.x))
cn.df.temp <- cn.df[[i]][,index2]
if(is.numeric(cn.df.temp)){ # dealing with unitary features
cn.df.temp <- dplyr::as_tibble(cn.df.temp)
names(cn.df.temp) <- colnames(x[colnum])
}
cn.li[[i]] <- dplyr::bind_cols(cn.li[[i]], cn.df.temp)
names(cn.li)[i] <- nlv[i]
}
if(drop.x){
cn <- lapply(cn.li, colnames)[[1]]
cn <- base::strsplit(cn, '[.]')
cn <- as.data.frame(cn)[1,]
cn.li <- lapply(cn.li, stats::setNames, cn)
}
# compact dataframe
if(to.data.frame){
cn.li <- do.call(dplyr::bind_rows, cn.li)
}
return(cn.li)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/interp_df.R
|
#' Interpolate vectors using multicore
#'
#' @param y A numeric vector, matrix or data frame.
#' @param compact.to Proportion of remaining points after compression. If equals to 1 and keep.zeros = TRUE, the original vector is presented.
#' @param drop.zeros Logical. Drop repeated zeros? Default: \code{FALSE}.
#' @param to.data.frame Logical. Convert to data frame? Default: \code{FALSE}.
#' @param round.off Number of decimal places of the interpolated \code{y}. Default: \code{NULL}.
#' @param weight Vector of weights with same length of \code{y}. Default: \code{NULL}.
#' @param mc.cores The number of cores to mclapply. Default: \code{1}.
#' @return A list of x and y convoluted values with length near to \code{compact.to*length(y)}.
#' @importFrom dplyr select
#' @importFrom dplyr %>%
#' @examples
#' library(voice)
#' # Same result of interp() function if x is a vector
#' interp(1:100, compact.to = 0.1, drop.zeros = TRUE, to.data.frame = FALSE)
#' interp_mc(1:100, compact.to = 0.1, drop.zeros = TRUE, to.data.frame = FALSE)
#'
#' interp(1:100, compact.to = 0.1, drop.zeros = TRUE, to.data.frame = TRUE)
#' interp_mc(1:100, compact.to = 0.1, drop.zeros = TRUE, to.data.frame = TRUE)
#'
#' # get path to audio file
#' path2wav <- list.files(system.file('extdata', package = 'wrassp'),
#' pattern = glob2rx('*.wav'), full.names = TRUE)
#'
#' \donttest{
#' # getting Media data frame
#' M <- voice::extract_features(dirname(path2wav), mc.cores = 1, verbose = FALSE)
#'
#' M.num <- M[,-(1:3)]
#' nrow(M.num)
#' cm1 <- interp_mc(M.num, compact.to = 0.1, drop.zeros = TRUE,
#' to.data.frame = FALSE, mc.cores = 1)
#' names(cm1)
#' lapply(cm1$f0, length)
#' }
#' @seealso \code{rm0}, \code{interp}, \code{interp_df}
#' @export
interp_mc <- function(y, compact.to, drop.zeros = FALSE, to.data.frame = FALSE,
round.off = NULL, weight = NULL,
mc.cores = 1){
if(is.vector(y)){
cm <- voice::interp(y, compact.to = compact.to, drop.zeros = drop.zeros,
to.data.frame = to.data.frame, round.off = round.off,
weight = weight)
}
if(is.matrix(y) | is.data.frame(y)){
cm <- parallel::mclapply(y, voice::interp, compact.to = compact.to,
drop.zeros = drop.zeros,
to.data.frame = to.data.frame,
round.off = round.off, weight = weight,
mc.cores = mc.cores)
}
return(cm)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/interp_mc.R
|
#' Verify if an audio is mono
#'
#' @usage is_mono(x)
#' @param x Path to WAV audio file.
#' @return Logical. `TRUE` indicates a mono (one-channel) file. `FALSE` indicates a non-mono (two-channel) file.
#' @examples
#' library(voice)
#'
#' # get path to audio file
#' path2wav <- list.files(system.file('extdata', package = 'wrassp'),
#' pattern = glob2rx('*.wav'), full.names = TRUE)
#'
#' is_mono(path2wav[1])
#' sapply(path2wav, is_mono)
#' @export
is_mono <- function(x){
audio <- tuneR::readWave(x, 1, 2)
is.mono <- tuneR::nchannel(audio) == 1
return(is.mono)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/is_mono.R
|
#' Assign notes to frequencies
#'
#' @description Returns a vector of notes for equal-tempered scale, A4 = 440 Hz.
#' @param x Numeric vector of frequencies in Hz.
#' @param method Method of specifying musical pitch. (Default: \code{spn}, i.e., Scientific Pitch Notation).
#' @param moving.average Logical. Must apply moving average? (Default: \code{FALSE}).
#' @param k Integer width of the rolling window used if moving.average is TRUE. (Default: \code{11}).
#' @return A vector containing the notes for equal-tempered scale, A4 = 440 Hz. When `method = 'spn'` the vector is of class 'ordered factor'. When `method = 'octave'` the vector is of class 'factor'. When `method = 'midi'` the vector is of class 'integer'.
#' @details The symbol '#' is being used to represent a sharp note, the higher
#' in pitch by one semitone on Scientific Pitch Notation (SPN).
#' @references \url{https://pages.mtu.edu/~suits/notefreqs.html}
#' @seealso \code{notes_freq}
#' @examples
#' library(voice)
#' notes(c(220,440,880))
#' notes(c(220,440,880), method = 'octave')
#' notes(c(220,440,880), method = 'midi')
#' @export
notes <- function(x, method = 'spn', moving.average = FALSE, k = 11){
if(moving.average){
x <- zoo::rollmean(x, k)
}
x <- as.matrix(x)
freq <- voice::notes_freq()$freq
distance <- diff(freq)
lf <- length(freq)
freqhalf <- c(freq[1] - distance[1]/2,
freq[-lf] + distance/2,
freq[lf]+distance[lf-1]/2)
spn <- voice::notes_freq()$spn[findInterval(x, freqhalf)]
if(method == 'spn'){
return(spn)
} else if(method == 'midi'){
midi <- voice::notes_freq()$midi[match(spn, voice::notes_freq()$spn)]
return(midi)
} else if(method == 'octave'){
lev <- c('C','C#','D','D#','E','F','F#','G','G#','A','A#','B')
octa <- base::strsplit(as.character(spn), '[0-9]')
octa <- factor(unlist(octa), levels = lev)
return(octa)
} else if(method == 'black'){
black <- notes_freq()$black[match(spn, voice::notes_freq()$spn)]
return(black)
} else if(method == 'Black'){
Black <- notes_freq()$Black[match(spn, voice::notes_freq()$spn)]
return(Black)
}
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/notes.R
|
#' Frequencies on Scientific Pitch Notation (SPN)
#'
#' @description Returns a tibble of frequencies on Scientific Pitch Notation (SPN) for equal-tempered scale, A4 = 440 Hz.
#' @details The symbol '#' is being used to represent a sharp note, the higher in pitch by one semitone. The SPN is also known as American Standard Pitch Notation (ASPN) or International Pitch Notation (IPN).
#' @references \url{https://pages.mtu.edu/~suits/notefreqs.html}
#' @return A tibble with frequencies for equal-tempered scale, A4 = 440 Hz.
#' @seealso \code{notes}
#' @examples
#' library(voice)
#' notes_freq()
#' @export
notes_freq <- function(){
nf <- dplyr::tribble(
~spn, ~freq, ~wavelength, ~black, ~Black,
#--|--|----
'C0' , 16.35 , 2109.89, 0, 0,
'C#0', 17.32 , 1991.47, 1, 1,
'D0' , 18.35 , 1879.69, 0, 0,
'D#0', 19.45 , 1774.20, 1, 1,
'E0' , 20.60 , 1674.62, 0, 0,
'F0' , 21.83 , 1580.63, 0, 0,
'F#0', 23.12 , 1491.91, 1, 1,
'G0' , 24.50 , 1408.18, 0, 0,
'G#0', 25.96 , 1329.14, 1, 1,
'A0' , 27.50 , 1254.55, 0, 0,
'A#0', 29.14 , 1184.13, 1, 1,
'B0' , 30.87 , 1117.67, 0, 1,
'C1' , 32.70 , 1054.94, 0, 0,
'C#1', 34.65 , 995.73 , 1, 1,
'D1' , 36.71 , 939.85 , 0, 0,
'D#1', 38.89 , 887.10 , 1, 1,
'E1' , 41.20 , 837.31 , 0, 0,
'F1' , 43.65 , 790.31 , 0, 0,
'F#1', 46.25 , 745.96 , 1, 1,
'G1' , 49.00 , 704.09 , 0, 0,
'G#1', 51.91 , 664.57 , 1, 1,
'A1' , 55.00 , 627.27 , 0, 0,
'A#1', 58.27 , 592.07 , 1, 1,
'B1' , 61.74 , 558.84 , 0, 1,
'C2' , 65.41 , 527.47 , 0, 0,
'C#2', 69.30 , 497.87 , 1, 1,
'D2' , 73.42 , 469.92 , 0, 0,
'D#2', 77.78 , 443.55 , 1, 1,
'E2' , 82.41 , 418.65 , 0, 0,
'F2' , 87.31 , 395.16 , 0, 0,
'F#2', 92.50 , 372.98 , 1, 1,
'G2' , 98.00 , 352.04 , 0, 0,
'G#2', 103.83 , 332.29 , 1, 1,
'A2' , 110.00 , 313.64 , 0, 0,
'A#2', 116.54 , 296.03 , 1, 1,
'B2' , 123.47 , 279.42 , 0, 1,
'C3' , 130.81 , 263.74 , 0, 0,
'C#3', 138.59 , 248.93 , 1, 1,
'D3' , 146.83 , 234.96 , 0, 0,
'D#3', 155.56 , 221.77 , 1, 1,
'E3' , 164.81 , 209.33 , 0, 0,
'F3' , 174.61 , 197.58 , 0, 0,
'F#3', 185.00 , 186.49 , 1, 1,
'G3' , 196.00 , 176.02 , 0, 0,
'G#3', 207.65 , 166.14 , 1, 1,
'A3' , 220.00 , 156.82 , 0, 0,
'A#3', 233.08 , 148.02 , 1, 1,
'B3' , 246.94 , 139.71 , 0, 1,
'C4' , 261.63 , 131.87 , 0, 0,
'C#4', 277.18 , 124.47 , 1, 1,
'D4' , 293.66 , 117.48 , 0, 0,
'D#4', 311.13 , 110.89 , 1, 1,
'E4' , 329.63 , 104.66 , 0, 0,
'F4' , 349.23 , 98.79 , 0, 0,
'F#4', 369.99 , 93.24 , 1, 1,
'G4' , 392.00 , 88.01 , 0, 0,
'G#4', 415.30 , 83.07 , 1, 1,
'A4' , 440.00 , 78.41 , 0, 0,
'A#4', 466.16 , 74.01 , 1, 1,
'B4' , 493.88 , 69.85 , 0, 1,
'C5' , 523.25 , 65.93 , 0, 0,
'C#5', 554.37 , 62.23 , 1, 1,
'D5' , 587.33 , 58.74 , 0, 0,
'D#5', 622.25 , 55.44 , 1, 1,
'E5' , 659.25 , 52.33 , 0, 0,
'F5' , 698.46 , 49.39 , 0, 0,
'F#5', 739.99 , 46.62 , 1, 1,
'G5' , 783.99 , 44.01 , 0, 0,
'G#5', 830.61 , 41.54 , 1, 1,
'A5' , 880.00 , 39.20 , 0, 0,
'A#5', 932.33 , 37.00 , 1, 1,
'B5' , 987.77 , 34.93 , 0, 1,
'C6' , 1046.50, 32.97 , 0, 0,
'C#6', 1108.73, 31.12 , 1, 1,
'D6' , 1174.66, 29.37 , 0, 0,
'D#6', 1244.51, 27.72 , 1, 1,
'E6' , 1318.51, 26.17 , 0, 0,
'F6' , 1396.91, 24.70 , 0, 0,
'F#6', 1479.98, 23.31 , 1, 1,
'G6' , 1567.98, 22.00 , 0, 0,
'G#6', 1661.22, 20.77 , 1, 1,
'A6' , 1760.00, 19.60 , 0, 0,
'A#6', 1864.66, 18.50 , 1, 1,
'B6' , 1975.53, 17.46 , 0, 1,
'C7' , 2093.00, 16.48 , 0, 0,
'C#7', 2217.46, 15.56 , 1, 1,
'D7' , 2349.32, 14.69 , 0, 0,
'D#7', 2489.02, 13.86 , 1, 1,
'E7' , 2637.02, 13.08 , 0, 0,
'F7' , 2793.83, 12.35 , 0, 0,
'F#7', 2959.96, 11.66 , 1, 1,
'G7' , 3135.96, 11.00 , 0, 0,
'G#7', 3322.44, 10.38 , 1, 1,
'A7' , 3520.00, 9.80 , 0, 0,
'A#7', 3729.31, 9.25 , 1, 1,
'B7' , 3951.07, 8.73 , 0, 1,
'C8' , 4186.01, 8.24 , 0, 0,
'C#8', 4434.92, 7.78 , 1, 1,
'D8' , 4698.63, 7.34 , 0, 0,
'D#8', 4978.03, 6.93 , 1, 1,
'E8' , 5274.04, 6.54 , 0, 0,
'F8' , 5587.65, 6.17 , 0, 0,
'F#8', 5919.91, 5.83 , 1, 1,
'G8' , 6271.93, 5.50 , 0, 0,
'G#8', 6644.88, 5.19 , 1, 1,
'A8' , 7040.00, 4.90 , 0, 0,
'A#8', 7458.62, 4.63 , 1, 1,
'B8' , 7902.13, 4.37 , 0, 1
)
# add midi
nf <- dplyr::bind_cols(nf, midi = 12:119)
# add lo and hi limits to spn
freq <- nf$freq
distance <- diff(freq)
lf <- length(freq)
freqhalf <- c(freq[1] - distance[1]/2,
freq[-lf] + distance/2,
freq[lf] + distance[lf-1]/2)
nf$spn.lo <- freqhalf[1:lf]
nf$spn.hi <- freqhalf[2:(lf+1)]
# ordering spn
lev <- c('C0','C#0','D0','D#0','E0','F0','F#0','G0','G#0','A0','A#0','B0',
'C1','C#1','D1','D#1','E1','F1','F#1','G1','G#1','A1','A#1','B1',
'C2','C#2','D2','D#2','E2','F2','F#2','G2','G#2','A2','A#2','B2',
'C3','C#3','D3','D#3','E3','F3','F#3','G3','G#3','A3','A#3','B3',
'C4','C#4','D4','D#4','E4','F4','F#4','G4','G#4','A4','A#4','B4',
'C5','C#5','D5','D#5','E5','F5','F#5','G5','G#5','A5','A#5','B5',
'C6','C#6','D6','D#6','E6','F6','F#6','G6','G#6','A6','A#6','B6',
'C7','C#7','D7','D#7','E7','F7','F#7','G7','G#7','A7','A#7','B7',
'C8','C#8','D8','D#8','E8','F8','F#8','G8','G#8','A8','A#8','B8')
nf$spn <- factor(nf$spn, levels = lev, ordered = TRUE)
nf <- dplyr::select(nf, freq, spn.lo, spn.hi, spn, midi, black, Black, wavelength)
return(nf)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/notes_freq.R
|
#' Read RTTM files
#'
#' @description Read Rich Transcription Time Marked (RTTM) files in \code{fromRttm} directory.
#' @param fromRttm A directory/folder containing RTTM files.
#' @return A list containing data frames obtained from standard RTTM files. See 'Details'.
#' @details The Rich Transcription Time Marked (RTTM) files are space-delimited text files containing one turn per line defined by NIST - National Institute of Standards and Technology. Each line containing ten fields:
#'
#' \code{type} Type: segment type; should always by SPEAKER.
#'
#' \code{file} File ID: file name; basename of the recording minus extension (e.g., rec1_a).
#'
#' \code{chnl} Channel ID: channel (1-indexed) that turn is on; should always be 1.
#'
#' \code{tbeg} Turn Onset -- onset of turn in seconds from beginning of recording.
#'
#' \code{tdur} Turn Duration -- duration of turn in seconds.
#'
#' \code{ortho} Orthography Field -- should always by <NA>.
#'
#' \code{stype} Speaker Type -- should always be <NA>.
#'
#' \code{name} Speaker Name -- name of speaker of turn; should be unique within scope of each file.
#'
#' \code{conf} Confidence Score -- system confidence (probability) that information is correct; should always be <NA>.
#'
#' \code{slat} Signal Lookahead Time -- should always be <NA>.
#' @references \url{https://www.nist.gov/system/files/documents/itl/iad/mig/KWS15-evalplan-v05.pdf}
#' @seealso \code{voice::enrich_rttm}
#' @examples
#' library(voice)
#'
#' url0 <- 'https://raw.githubusercontent.com/filipezabala/voiceAudios/main/rttm/sherlock0.rttm'
#' download.file(url0, destfile = paste0(tempdir(), '/sherlock0.rttm'))
#' url1 <- 'https://raw.githubusercontent.com/filipezabala/voiceAudios/main/rttm/sherlock1.rttm'
#' download.file(url0, destfile = paste0(tempdir(), '/sherlock1.rttm'))
#'
#' (rttm <- voice::read_rttm(tempdir()))
#' class(rttm)
#' lapply(rttm, class)
#' @export
read_rttm <- function(fromRttm){
rttmFiles <- list.files(fromRttm, pattern = "[[:punct:]][rR][tT][tT][mM]$",
full.names = TRUE)
if(length(rttmFiles)==0){
cat('EMPTY DIRECTORY?')
} else{
rttm <- lapply(rttmFiles, utils::read.table)
colnames <- c('type', 'file', 'chnl', 'tbeg', 'tdur',
'ortho', 'stype', 'name', 'conf', 'slat')
rttm <- lapply(rttm, stats::setNames, colnames)
names(rttm) <- basename(rttmFiles)
return(rttm)
}
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/read_rttm.R
|
#' Compress zeros.
#'
#' @description Transforms \code{n} sets of \code{m>n} zeros (alternated with sets of non zeros) into \code{n} sets of \code{n} zeros.
#' @param y A vector or time series.
#' @return Vector with n zeros.
#' @examples
#' library(voice)
#'
#' (v0 <- c(1:20,rep(0,10)))
#' (r0 <- rm0(v0))
#' length(v0)
#' length(r0)
#' sum(v0 == 0)
#'
#' (v1 <- c(rep(0,10),1:20))
#' (r1 <- rm0(v1))
#' length(r1)
#'
#' (v2 <- rep(0,10))
#' (r2 <- rm0(v2))
#' length(r2)
#'
#' (v3 <- c(0:10))
#' (r3 <- rm0(v3))
#' length(r3)
#'
#' (v4 <- c(rep(0,10), 1:10, rep(0,5), 10:20, rep(0,10)))
#' (r4 <- rm0(v4))
#' length(r4)
#' sum(v4 == 0)
#' @export
rm0 <- function(y){
if(sum(y^2, na.rm = TRUE) == 0){ # null vector
return(0)
}
is.zero <- y == 0 # is zero?
if(sum(is.zero, na.rm = TRUE) == 0){ # no zeros, returns original vector
return(y)
}
ly <- length(y)
first.zero <- is.zero[1] # is the First position a Zero?
diz <- diff(is.zero) # Difference of Is.Zero
w0 <- which(is.zero) # positions containing zeros
lw0 <- length(w0) # number of (positions containing) zeros
d0 <- diff(w0) # position of the changes (F-T or T-F) in v0 == 0
change <- d0 != 1
n0 <- sum(change, na.rm = TRUE)+1 # number of sets with 0's
if(sum(is.zero) == n0){ # number of zeros equals to number of sets with 0's
return(y)
}
lv <- ly - lw0 + n0 # length of compacted vector, after cleaning 0's
v <- rep(0, lv) # compact vector
wdn0 <- which(diz != 0) # position of the last change (F-T or T-F in y==0)
dwdn0 <- c(wdn0[1], diff(wdn0)) # sizes of sets with zeros and non-zeros, alternated
ld0 <- length(dwdn0)
dwdn1 <- rep(1, ld0) # vector of ones
if(ld0 == 1 & first.zero){
suppressWarnings(v[2:lv] <- y[(dwdn0[1]+1):ly])
}
if(ld0 == 1 & !first.zero){
suppressWarnings(v[1:dwdn0[1]] <- y[1:dwdn0[1]])
}
if(ld0 > 1){
odd <- seq(1, ld0, by = 2) # odd numbers 1:length(dwdn0)
dwdn1[first.zero+odd] <- dwdn0[first.zero+odd]
cs0 <- cumsum(dwdn0)
cs1 <- cumsum(dwdn1)
suppressWarnings(v[1:cs1[1]] <- y[1:cs0[1]])
for(i in 2:length(cs0)){ # the loop works fine even with the warning (gets the last position, ok)
suppressWarnings(v[(cs1[i-1]+1):cs1[i]] <- y[(cs0[i-1]+1):cs0[i]])
}
}
return(v)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/rm0.R
|
#' Smooth numeric variables in a data frame
#'
#' @param x A data frame.
#' @param k Integer width of the rolling window. Default: \code{11}.
#' @param id The identification column. Default: \code{colname} of the first column of \code{x}.
#' @param colnum A \code{char} vector indicating the numeric colnames. If \code{NULL}, uses the columns of the \code{numeric} class.
#' @param mc.cores The number of cores to mclapply. By default uses \code{1}.
#' @return Vector of interpolated values with length near to \code{compact.to*length(x)}.
#' @importFrom dplyr %>%
#' @seealso \code{extract_features}
#' @examples
#' library(voice)
#'
#' # get path to audio file
#' path2wav <- list.files(system.file('extdata', package = 'wrassp'),
#' pattern = glob2rx('*.wav'), full.names = TRUE)
#'
#' # minimal usage
#' M <- extract_features(path2wav, features = c('f0', 'fmt'))
#' (Ms <- smooth_df(M[-(1:2)]))
#' dim(M)
#' dim(Ms)
#' @export
smooth_df <- function(x, k = 11, id = colnames(x)[1], colnum = NULL,
mc.cores = 1){
# n
n <- nrow(x)
n_by_id <- table(x[,id])
# n smoothed
ns_fun <- function(x,k) {return(x-k+1)}
ns_by_id <- sapply(n_by_id, ns_fun, k=k)
ns <- sum(ns_by_id) # generalize
# vector and length of distinct id's
id_names <- names(n_by_id)
n_id <- length(id_names)
# beginning (beg0) and end (end0) of original objects
beg0 <- c(0, cumsum(n_by_id)) + 1
end0 <- beg0[-(n_id+1)] -1 + ns_by_id
# beginning (beg1) and end (end1) of smoothed objects
beg1 <- c(0, cumsum(ns_by_id)) + 1
end1 <- cumsum(ns_by_id)
# numeric columns
if(is.null(colnum)){
is.num <- sapply(x, class) %in% c('integer', 'numeric')
colnum <- colnames(x[,is.num])
}
# non-numeric columns
colnon <- base::setdiff(colnames(x), colnum)
# split numeric columns by id
snum <- split(x[,colnum], x[,id])
# split non-numeric columns by id
snon <- split(x[,colnon], x[,id])
snon_df <- tibble::as_tibble(do.call(rbind, snon), .name_repair = 'unique')
# smooth
snum_li <- parallel::mclapply(snum, zoo::rollmean, k,
mc.cores = mc.cores)
snum_df <- tibble::as_tibble(do.call(rbind, snum_li), .name_repair = 'unique')
# binding non numeric columns to *x* *s*moothed
xs <- suppressMessages(tibble::as_tibble(matrix(NA, nrow = ns,
ncol = ncol(snon_df)+ncol(snum_df)),
.name_repair = 'unique'))
colnames(xs) <- c(colnames(snon_df), colnames(snum_df))
for(i in 1:n_id){
fltr0 <- beg0[i]:end0[i]
fltr1 <- beg1[i]:end1[i]
xs[fltr1, ] <- dplyr::bind_cols(snon_df[fltr0,], snum_df[fltr1,])
}
# reordering columns
xs <- xs %>%
dplyr::select(colnames(x))
return(xs)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/smooth_df.R
|
#' Split Wave
#'
#' @description Split WAV files either in \code{fromWav} directory or using (same names) RTTM files/subdirectories as guidance.
#' @param fromWav Either WAV file or directory containing WAV files.
#' @param fromRttm Either RTTM file or directory containing RTTM files. Default: \code{NULL}.
#' @param toSplit A directory to write generated files. Default: \code{NULL}.
#' @param autoDir Logical. Must the directories tree be created? Default: \code{FALSE}. See 'Details'.
#' @param subDir Logical. Must the splitted files be placed in subdirectories? Default: \code{FALSE}.
#' @param output Character string, the class of the object to return, either 'wave' or 'list'.
#' @param filesRange The desired range of directory files (default: \code{NULL}, i.e., all files). Must be TRUE only if \code{fromWav} is a directory.
#' @param full.names Logical. If \code{TRUE}, the directory path is prepended to the file names to give a relative file path. If \code{FALSE}, the file names (rather than paths) are returned. (default: \code{TRUE}) Used by \code{base::list.files}.
#' @param recursive Logical. Should the listing recursively into directories? (default: \code{FALSE}) Used by \code{base::list.files}. Inactive if \code{fromWav} is a file.
#' @param silence.gap The silence gap (in seconds) between adjacent words in a keyword. Rows with \code{tdur <= silence.gap} are removed. (default: \code{0.5})
#' @return Splited audio files according to the correspondent RTTM file(s). See '\code{voice::diarize}'.
#' @details When \code{autoDir = TRUE}, the following directories are created: \code{'../mp3'},\code{'../rttm'}, \code{'../split'} and \code{'../musicxml'}. Use \code{getwd()} to find the parent directory \code{'../'}.
#' @examples
#' \dontrun{
#' library(voice)
#'
#' urlWav <- 'https://raw.githubusercontent.com/filipezabala/voiceAudios/main/wav/sherlock0.wav'
#' destWav <- paste0(tempdir(), '/sherlock0.wav')
#' download.file(urlWav, destfile = destWav)
#'
#' urlRttm <- 'https://raw.githubusercontent.com/filipezabala/voiceAudios/main/rttm/sherlock0.rttm'
#' destRttm <- paste0(tempdir(), '/sherlock0.rttm')
#' download.file(urlRttm, destfile = destRttm)
#'
#' splitDir <- paste0(tempdir(), '/split')
#' dir.create(splitDir)
#' splitw(destWav, fromRttm = destRttm, toSplit = splitDir)
#'
#' dir(splitDir)
#' }
#' @seealso \code{voice::diarize}
#' @export
splitw <- function(fromWav,
fromRttm = NULL,
toSplit = NULL,
autoDir = FALSE,
subDir = FALSE,
output = 'wave',
filesRange = NULL,
full.names = TRUE,
recursive = FALSE,
silence.gap = 0.5){
# time processing
pt0 <- proc.time()
# checking if is either a file or a directory
if(utils::file_test('-f', fromWav)){
wavDir <- dirname(fromWav)
wavFiles <- fromWav
} else{
wavDir <- fromWav
}
if(autoDir){
ss <- unlist(strsplit(wavDir, '/'))
parDir <- paste0(ss[-length(ss)], collapse ='/')
mp3Dir <- paste0(parDir, '/mp3')
rttmDir <- paste0(parDir, '/rttm')
splitDir <- paste0(parDir, '/split')
mxmlDir <- paste0(parDir, '/musicxml')
# ifelse(!dir.exists(parDir), dir.create(parDir), 'Directory exists!')
# ifelse(!dir.exists(wavDir), dir.create(wavDir), 'Directory exists!')
ifelse(!dir.exists(mp3Dir), dir.create(mp3Dir), 'Directory exists!')
ifelse(!dir.exists(rttmDir), dir.create(rttmDir), 'Directory exists!')
ifelse(!dir.exists(splitDir), dir.create(splitDir), 'Directory exists!')
ifelse(!dir.exists(mxmlDir), dir.create(mxmlDir), 'Directory exists!')
}
if(is.null(fromRttm)){
fromRttm <- rttmDir
}
# rttm
if(utils::file_test('-f', fromRttm)){
rttmDir <- dirname(fromRttm)
rttmFiles <- fromRttm
} else{
rttmDir <- fromRttm
rttmFiles <- list.files(fromRttm, pattern = '[[:punct:]][rR][tT][tT][mM]$',
full.names = full.names, recursive = recursive)
}
# split
if(is.null(toSplit)){
toSplit <- splitDir
}
# wav
if(utils::file_test('-d', fromWav)){
wavFiles <- list.files(fromWav, pattern = '[[:punct:]][wW][aA][vV]$',
full.names = full.names, recursive = recursive)
}
# filtering by fileRange
if(!is.null(filesRange)){
fullRange <- 1:length(wavFiles)
filesRange <- base::intersect(fullRange, filesRange)
wavFiles <- wavFiles[filesRange]
get1 <- function(x){x[1]}
splunctWav <- strsplit(basename(wavFiles), '[.]')
splunctRttm <- strsplit(basename(rttmFiles), '[.]')
nameWav <- sapply(splunctWav, get1)
nameRttm <- sapply(splunctRttm, get1)
rttmFiles <- rttmFiles[nameWav == nameRttm]
}
# number of WAV files to be extracted
nWav <- length(wavFiles)
# reading WAV
audio <- sapply(wavFiles, tuneR::readWave)
# reading RTTM
rttm <- lapply(rttmFiles, utils::read.table)
colnames <- c('type','file','chnl','tbeg','tdur',
'ortho','stype','name','conf','slat')
rttm <- lapply(rttm, stats::setNames, colnames)
# silence.gap
keep.row <- function(x){
kr <- x[x$tdur > silence.gap,]
return(kr)
}
rttm <- lapply(rttm, keep.row)
# time beginning, duration and ending
tbeg <- lapply(rttm, voice::get_tbeg)
tdur <- lapply(rttm, voice::get_tdur)
tend <- Map('+', tbeg, tdur)
# # tests
# sapply(tdur, summary)
# sapply(tdur, length)
# sapply(tdur, quantile, prob = seq(0,1,.01))
# defining break points
breaks <- Map('c', tbeg, tend)
breaks <- lapply(breaks, sort)
nbreaks <- sapply(breaks, length)
index <- lapply(nbreaks, seq, from = 2, by = 2)
# audio information
freq <- sapply(audio, voice::get_samp.rate)
# bit <- sapply(audio, voice::get_bit)
# left <- lapply(audio, voice::get_left)
# right <- lapply(audio, voice::get_right)
# totlen <- sapply(audio, length)
# totsec <- totlen/freq
# split.audio - Do parallel/vectorized?
split.audio <- function(x, index, breaks, freq){
splaudio <- vector('list', length(x))
for(i in 1:length(x)){
splaudio[[i]] <- vector('list', length(index[[i]]))
for(j in index[[i]]){
splaudio[[i]][[j/2]] <- x[[i]][(freq[i]*breaks[[i]][j-1]):(freq[i]*breaks[[i]][j])]
}
}
return(splaudio)
}
sa <- split.audio(x=audio, index=index, breaks=breaks, freq=freq)
# splitted audio information
bitSpl <- lapply(unlist(sa), voice::get_bit)
freqSpl <- lapply(unlist(sa), voice::get_samp.rate)
leftSpl <- lapply(unlist(sa), voice::get_left)
rightSpl <- lapply(unlist(sa), voice::get_right)
# writing output as a list
if(output == 'list'){
if(!is.null(toSplit)){
timest <- format(Sys.time(), "%Y-%m-%d_%H-%M-%S")
voice::write_list(x = sa, path = paste0(toSplit,'/list_', timest, '.txt'))
}
return(sa)
}
# writing output as wave
if(output == 'wave'){
fileName <- do.call(rbind, strsplit(basename(wavFiles), '[.]'))
fileNameSplit <- vector('list', length(nbreaks))
k <- 0
audioWave <- vector('list', sum(nbreaks)/2)
for(i in 1:length(nbreaks)){
for(j in (nbreaks[i]/2)){
fileNameSplit[[i]] <- paste0(fileName[i,1], '_split_', 1:j, '.',
fileName[i,2])
}
}
# save files
if(subDir){
pathNameSplit <- lapply(fileName[,1], function(x) paste0(toSplit, '/', x))
dc <- function(x) ifelse(!dir.exists(x), dir.create(x), 'Directory exists!')
lapply(pathNameSplit, dc)
for(i in 1:length(pathNameSplit)){
pathNameSplit[[i]] <- paste0(pathNameSplit[[i]], '/', fileNameSplit[[i]])
}
} else{
pathNameSplit <- lapply(fileNameSplit, function(x) paste0(toSplit, '/', x))
}
for(i in 1:length(audio)){
for(j in 1:length(sa[[i]])){
tuneR::writeWave(sa[[i]][[j]], filename = pathNameSplit[[i]][j])
}
}
}
# total time
t0 <- proc.time()-pt0
cat('TOTAL TIME', t0[3], 'SECONDS\n\n')
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/splitw.R
|
#' Tag a data frame with media information
#'
#' @param x An Extended data frame to be tagged with media information. See references.
#' @param groupBy A variable to group the summary measures. The argument must be a character vector. (Default: \code{groupBy = 'wav_path'}).
#' @param wavPath A vector containing the path(s) to WAV files. May be both as \code{dirname} or \code{basename} formats.
#' @param wavPathName A string containing the WAV path name. (Default: \code{wavPathName = 'wav_path'}).
#' @param tags Tags to be added to \code{x}. See Details. (Default: \code{'feat_summary'}).
#' @param sortByGroupBy Logical. Should the function sort the Extended data frame \code{x} by \code{gropuBy}? (Default: \code{sortByGroupBy = TRUE}).
#' @param filesRange The desired range of directory files. Should only be used when all the WAV files are in the same folder. (Default: \code{NULL}, i.e., all files).
#' @param features Vector of features to be extracted. (Default: \code{'f0'}).
#' @param sex \code{= <code>} set sex specific parameters where <code> = \code{'f'}[emale], \code{'m'}[ale] or \code{'u'}[nknown] (default: \code{'u'}). Used as 'gender' by \code{wrassp::ksvF0}, \code{wrassp::forest} and \code{wrassp::mhsF0}.
#' @param windowShift \code{= <dur>} set analysis window shift to <dur>ation in ms (default: 5.0). Used by \code{wrassp::ksvF0}, \code{wrassp::forest}, \code{wrassp::mhsF0}, \code{wrassp::zcrana}, \code{wrassp::rfcana}, \code{wrassp::acfana}, \code{wrassp::cepstrum}, \code{wrassp::dftSpectrum}, \code{wrassp::cssSpectrum} and \code{wrassp::lpsSpectrum}.
#' @param numFormants \code{= <num>} <num>ber of formants (Default: \code{8}). Used by \code{wrassp::forest}.
#' @param numcep Number of Mel-frequency cepstral coefficients (cepstra) to return (Default: \code{12}). Used by \code{tuneR::melfcc}.
#' @param dcttype Type of DCT used. \code{'t1'} or \code{'t2'}, \code{'t3'} for HTK \code{'t4'} for feacalc (Default: \code{'t2'}). Used by \code{tuneR::melfcc}.
#' @param fbtype Auditory frequency scale to use: \code{'mel'}, \code{'bark'}, \code{'htkmel'}, \code{'fcmel'} (Default: \code{'mel'}). Used by \code{tuneR::melfcc}.
#' @param resolution \code{= <freq>} set FFT length to the smallest value which results in a frequency resolution of <freq> Hz or better (Default: \code{40.0}). Used by \code{wrassp::cssSpectrum}, \code{wrassp::dftSpectrum} and \code{wrassp::lpsSpectrum}.
#' @param usecmp Logical. Apply equal-loudness weighting and cube-root compression (PLP instead of LPC) (Default: \code{FALSE}). Used by \code{tuneR::melfcc}.
#' @param mc.cores Number of cores to be used in parallel processing. (Default: \code{1})
#' @param full.names Logical. If \code{TRUE}, the directory path is prepended to the file names to give a relative file path. If \code{FALSE}, the file names (rather than paths) are returned. (Default: \code{TRUE}) Used by \code{base::list.files}.
#' @param recursive Logical. Should the listing recursively into directories? (Default: \code{FALSE}) Used by \code{base::list.files}.
#' @param check.mono Logical. Check if the WAV file is mono. (Default: \code{TRUE})
#' @param stereo2mono (Experimental) Logical. Should files be converted from stereo to mono? (Default: \code{TRUE})
#' @param overwrite (Experimental) Logical. Should converted files be overwritten? If not, the file gets the suffix \code{_mono}. (Default: \code{FALSE})
#' @param freq Frequency in Hz to write the converted files when \code{stereo2mono=TRUE}. (Default: \code{44100})
#' @param round.to Number of decimal places to round to. (Default: \code{NULL})
#' @param verbose Logical. Should the running status be showed? (Default: \code{FALSE})
#' @return A tibble data frame containing summarized numeric columns using (1) mean, (2) standard deviation, (3) variation coefficient, (4) median, (5) interquartile range and (6) median absolute deviation.
#' @details \code{filesRange} should only be used when all the WAV files are in the same folder.
#' @examples
#' library(voice)
#'
#' # get path to audio file
#' path2wav <- list.files(system.file('extdata', package = 'wrassp'),
#' pattern = glob2rx('*.wav'), full.names = TRUE)
#'
#' # creating Extended synthetic data
#' E <- dplyr::tibble(subject_id = c(1,1,1,2,2,2,3,3,3),
#' wav_path = path2wav)
#' E
#'
#' # minimal usage
#' tag(E)
#'
#' # canonical data
#' tag(E, groupBy = 'subject_id')
#'
#' # limiting filesRange
#' tag(E, filesRange = 3:6)
#'
#' # more features
#' Et <- tag(E, features = c('f0', 'fmt', 'rf', 'rcf', 'rpf', 'rfc', 'mfcc'),
#' groupBy = 'subject_id')
#' Et
#' str(Et)
#' @export
tag <- function(x,
groupBy = 'wav_path',
wavPath = unique(x$wav_path),
wavPathName = 'wav_path',
tags = c('feat_summary'),
sortByGroupBy = TRUE,
filesRange = NULL,
features = 'f0',
sex = 'u',
windowShift = 5,
numFormants = 8,
numcep = 12,
dcttype = c('t2', 't1', 't3', 't4'),
fbtype = c('mel', 'htkmel', 'fcmel', 'bark'),
resolution = 40,
usecmp = FALSE,
mc.cores = 1,
full.names = TRUE,
recursive = FALSE,
check.mono = FALSE,
stereo2mono = FALSE,
overwrite = FALSE,
freq = 44100,
round.to = 4,
verbose = FALSE){
# sort by groupBy
if(sortByGroupBy){
x <- dplyr::arrange(x, groupBy)
}
# voice::feat_summary
if('feat_summary' %in% tags){
res <- voice::feat_summary(x = x,
groupBy = groupBy,
wavPath = wavPath,
wavPathName = wavPathName,
filesRange = filesRange,
features = features,
sex = sex,
windowShift = windowShift,
numFormants = numFormants,
numcep = numcep,
dcttype = dcttype,
fbtype = fbtype,
resolution = resolution,
usecmp = usecmp,
mc.cores = mc.cores,
full.names = full.names,
recursive = recursive,
check.mono = check.mono,
stereo2mono = stereo2mono,
overwrite = overwrite,
freq = freq,
round.to = round.to,
verbose = verbose)
}
# # voice::audio_time
# if('audio_time' %in% tags){
# at <- voice::audio_time(wavPath, filesRange = filesRange,
# recursive = recursive)
# if(exists('res')){
# res <- dplyr::left_join(res, at, by = groupBy)
# } else {
# res <- at
# }
# }
# # voice::spoken_prop
# st <- spoken_time(wavPath, get.id = TRUE, recursive = TRUE)
# x <- left_join(x, st, by = 'filename')
# x <- mutate(x, tag_spoken_prop = tag_spoken_time/tag_audio_time)
#
#
# # voice::notes_summary
# notes_summary(wavPath, get.id = TRUE, i = i)
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/tag.R
|
# these objects are used inside dplyr verbs, so they are not real global variables.
utils::globalVariables(c(
"F0", "F1", "F8", "file_name", "id", "interval"
))
|
/scratch/gouwar.j/cran-all/cranData/voice/R/utils-global.R
|
#' Writes a list to a path
#'
#' @param x A list.
#' @param path A full path to file.
#' @return A file named `list.txt` in `path`.
#' @examples
#' \dontrun{
#' library(voice)
#'
#' pts <- list(x = cars[,1], y = cars[,2])
#' listFile <- paste0(tempdir(), '/list.txt')
#' voice::write_list(pts, listFile)
#' file.info(listFile)
#' system(paste0('head ', listFile))
#' }
#' @export
write_list <- function(x, path){
sink(path)
print(x)
sink()
}
|
/scratch/gouwar.j/cran-all/cranData/voice/R/write_list.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
ini0 <- Sys.time()
## ---- message=FALSE, warning=FALSE--------------------------------------------
# packs
library(voice)
# get path to audio file
wavDir <- list.files(system.file('extdata', package = 'wrassp'),
pattern = glob2rx('*.wav'), full.names = TRUE)
## ---- message=FALSE, warning=FALSE--------------------------------------------
# minimal usage
M <- voice::extract_features(wavDir)
M
## ---- message=FALSE, warning=FALSE--------------------------------------------
# creating Extended synthetic data
E <- dplyr::tibble(subject_id = c(1,1,1,2,2,2,3,3,3), wav_path = wavDir)
E
# minimal usage
voice::tag(E)
# canonical data
voice::tag(E, groupBy = 'subject_id')
|
/scratch/gouwar.j/cran-all/cranData/voice/inst/doc/voicegnette_R.R
|
---
title: '`voice` vignette'
subtitle: 'version `r packageVersion("voice")`'
author: 'Filipe J. Zabala'
date: "`r format(Sys.time(), '%Y-%m-%d')`"
# date: "`r format(Sys.time(), '%Y-%m-%d %H:%M:%S')`"
output:
rmarkdown::html_vignette:
toc: true
vignette: >
%\VignetteIndexEntry{`voice` vignette}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
ini0 <- Sys.time()
```
## 0. Installation
https://github.com/filipezabala/voice
## 1. Extract features
### Load packages and audio files
```{r, message=FALSE, warning=FALSE}
# packs
library(voice)
# get path to audio file
wavDir <- list.files(system.file('extdata', package = 'wrassp'),
pattern = glob2rx('*.wav'), full.names = TRUE)
```
### Examples
```{r, message=FALSE, warning=FALSE}
# minimal usage
M <- voice::extract_features(wavDir)
M
```
## 2. Tag
```{r, message=FALSE, warning=FALSE}
# creating Extended synthetic data
E <- dplyr::tibble(subject_id = c(1,1,1,2,2,2,3,3,3), wav_path = wavDir)
E
# minimal usage
voice::tag(E)
# canonical data
voice::tag(E, groupBy = 'subject_id')
```
|
/scratch/gouwar.j/cran-all/cranData/voice/inst/doc/voicegnette_R.Rmd
|
---
title: '`voice` vignette'
subtitle: 'version `r packageVersion("voice")`'
author: 'Filipe J. Zabala'
date: "`r format(Sys.time(), '%Y-%m-%d')`"
# date: "`r format(Sys.time(), '%Y-%m-%d %H:%M:%S')`"
output:
rmarkdown::html_vignette:
toc: true
vignette: >
%\VignetteIndexEntry{`voice` vignette}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
ini0 <- Sys.time()
```
## 0. Installation
https://github.com/filipezabala/voice
## 1. Extract features
### Load packages and audio files
```{r, message=FALSE, warning=FALSE}
# packs
library(voice)
# get path to audio file
wavDir <- list.files(system.file('extdata', package = 'wrassp'),
pattern = glob2rx('*.wav'), full.names = TRUE)
```
### Examples
```{r, message=FALSE, warning=FALSE}
# minimal usage
M <- voice::extract_features(wavDir)
M
```
## 2. Tag
```{r, message=FALSE, warning=FALSE}
# creating Extended synthetic data
E <- dplyr::tibble(subject_id = c(1,1,1,2,2,2,3,3,3), wav_path = wavDir)
E
# minimal usage
voice::tag(E)
# canonical data
voice::tag(E, groupBy = 'subject_id')
```
|
/scratch/gouwar.j/cran-all/cranData/voice/vignettes/voicegnette_R.Rmd
|
#' Automatically analyze audio files
#'
#' Automatically analyzes audio files and outputs a data.frame with their main extracted audio features.
#'
#' @details The voiceR package requires the audio file names to follow a specific pattern, in which the different components are separated by a non alphanumeric character (e.g., “_”). File name components refer to:
#' \describe{
#' \item{ID}{Unique identifier of the speaker or recording.}
#' \item{Condition}{Experimental condition or other grouping variable.}
#' \item{Dimension}{Additional survey or experiment information (e.g., additional conditions).}
#' }
#' Order and presence of the different components is not important, as long as at least one of the aforementioned components is present.
#' Furthermore, non-relevant components can be skipped by specifying “Null” in its position such as: ID_Null_Condition. Valid name patterns are, for example, 876h Interior (ID Condition), Exterior-3543h (Condition-ID), 983b-Exterior-q1 (ID-Condition-Dimension) or 455k (ID). All voice files within one session need to follow the same file naming pattern.
#' Note: the non-alpha numeric separator should also be specified as sep.
#'
#' @param path An optional character string indicating the path to the folder containing the audio files. Default corresponds to the current working directory. (You should only define it if the audios you wish to analyze are not already read in R. Otherwise define the audioList parameter).
#' @param audioList An optional list of Wave objects to analyze.
#' @param filter An optional character vector indicating IDs, Conditions, Dimensions, or other patterns used to filter for specific audio files. Default corresponds to NA.
#' @param fileType A character string indicating the audio file format (wav or mp3). Default corresponds to wav.
#' @param fileNamePattern A character string indicating the naming format of the audio files, such as "ID-Condition-Dimension", "Condition_ID_Dimension" or just "ID". Default corresponds to "ID".
#' @param sep A non alpha-numeric character that acts as separator between the different naming components. Default corresponds to an underscore. This field can be ignored if the audio file name only contains an ID component.
#' @param parallel Logical value indicating whether to use parallelism to extract the different audio characteristics to enhance computational performance. Default corresponds to FALSE.
#' @param recursive Logical value indicating whether subdirectories in the specified directory should be included when searching for voice files. Default corresponds to FALSE.
#' @param preprocess Logical value indicating whether to preprocess (normalize amplitude and remove background noise) the audio files before extraction and analysis. Default corresponds to FALSE.
#' @param extended Logical value indicating whether all features extracted by the soundgen package should be inputted. Default corresponds to FALSE.
#' @param ... Other options used to control preprocessing behavior.
#' @return A data.frame is created with the following audio features:
#' \describe{
#' \item{duration}{Total duration in seconds.}
#' \item{voice_breaks_percent}{Proportion of unvoiced frames.}
#' \item{RMS_env}{Root mean square of the amplitude envelope.}
#' \item{mean_loudness}{Average subjective loudness in sone.}
#' \item{mean_F0}{Average fundamental frequency in Hertz.}
#' \item{sd_F0}{Standard deviation of the fundamental frequency in Hertz.}
#' \item{mean_entropy}{Average Wiener entropy. A value of 0 indicates a pure tone, while a value of 1 indicates white noise.}
#' \item{mean_HNR}{Average Harmonics-to-Noise Ratio.}
#' \item{ID}{ID component of the audio file.}
#' \item{Condition}{If fileNamePattern and audio names include a Condition, an additional column with the Condition component of the audio file is included.}
#' \item{Dimension}{If fileNamePattern and audio names include a Dimension, an additional column with the Dimension component of the audio file is included.}
#' }
#' @seealso [soundgen::analyze()], [seewave::duration()], [seewave::rms()], [seewave::env()]
#' @examples
#' \donttest{
#' audioData <- autoExtract(audioList = testAudioList, filter = c("5b438f516066ad470d3be72c52005251"))
#' }
#'
#' @importFrom stringr str_sub str_length str_split
#' @importFrom parallel makeCluster detectCores stopCluster
#' @importFrom doParallel registerDoParallel
#' @importFrom seewave duration rms env zapsilw
#' @importFrom soundgen analyze
#' @importFrom foreach foreach
#' @export
autoExtract <- function(path = ".", audioList = list(), filter = NA, fileType = "wav", fileNamePattern = "ID_Condition_Dimension", sep = "_", parallel = FALSE, recursive = FALSE, preprocess = FALSE, extended = FALSE, ...){
if(is.list(path)) stop("Did you pass a list of Wave objetcs as a path? If so, specify audioList parameter")
if(!is.list(audioList)) stop("audioList must be a list of Wave objects!")
if(!is.character(path) || !file.exists(path)) stop("Invalid path!")
if(!is.character(fileType)) stop("fileType must be a string!")
if(!is.character(fileNamePattern)) stop("fileNamePattern must be a string!")
if (!grepl("[^[:alnum:]]", sep)) stop("Error: sep should be a non-alphanumeric character")
if(!is.logical(parallel)) stop("parallel must be a boolean!")
if(!is.logical(recursive)) stop("recursive must be a boolean!")
if(!is.logical(preprocess)) stop("preprocess must be a boolean!")
if(all(is.na(filter)) || length(filter) == 0){
filter <- c()
} else {
filter = paste(filter,collapse="|")
}
#Measures to compute
measures <- c("duration", "voice_breaks_percent", "RMS_env", "mean_loudness", "mean_F0", "sd_F0", "mean_entropy", "mean_HNR")
initialMeasures <- measures
if(length(audioList) == 0){
#Check path to prevent errors
if(str_sub(path, -1) == "/"){
path <- str_sub(path, 1, str_length(path)-1)
}
audioList <- readAudio(path = path, filter = filter, fileType = fileType, recursive = recursive)
}
else{
if(length(filter) != 0) audioList <- audioList[grep(filter, names(audioList))]
}
#Check if pattern is correct
checkPattern <- str_split(fileNamePattern, sep, simplify = TRUE)
checkPattern <- tolower(checkPattern)
patternCorrectness <- checkPattern %in% c("id", "condition", "dimension", "null")
if(length(checkPattern[!patternCorrectness]) >= 1){
stop(paste("Incorrect Components:", checkPattern[!patternCorrectness]))
}
#Extract the different components and their respective positions from the file names
componentsAndPositions <- getComponents(names(audioList), fileNamePattern, sep)
ids <- unique(componentsAndPositions[["Components"]][,"ID"])
#Check for missing components
if(componentsAndPositions$Positions["Condition"] == 99){
conditions <- ""
conditionPresence <- FALSE
}else{
conditions <- unique(componentsAndPositions[["Components"]][,"Condition"])
conditionPresence <- TRUE
}
if(componentsAndPositions$Positions["Dimension"] == 99){
dimensions <- ""
dimensionPresence <- FALSE
}else{
dimensions <- unique(componentsAndPositions[["Components"]][,"Dimension"])
dimensionPresence <- TRUE
}
# Create a list with all the audios in tuneR format and normalize them if required
if(preprocess){
audioList <- preprocess(audioList, ...)
}
#create an empty data.frame with all the files and measures
measures <- c("duration", "voice_breaks_percent", "RMS_env", "mean_loudness", "mean_F0", "sd_F0", "mean_entropy", "mean_HNR")
audioData <- as.data.frame(matrix(nrow = length(audioList), ncol = length(measures)))
row.names(audioData) <- names(audioList)
colnames(audioData) <- measures
components <- getComponents(rownames(audioData), fileNamePattern, sep)
if(dimensionPresence){
audioData <- cbind("Dimension" = components[["Components"]][,"Dimension"], audioData)
}
if(conditionPresence){
audioData <- cbind("Condition" = components[["Components"]][,"Condition"], audioData)
}
audioData <- cbind("ID" = components[["Components"]][,"ID"], audioData)
#Extract the different features and fill them into the previous data.frame (use parallel processing if parallel is True)
if(parallel == TRUE){
i <- 1
cl <- makeCluster(detectCores() - 2)
registerDoParallel(cl)
audioData <- foreach::`%dopar%`(
foreach(i = 1:length(audioList), .combine=rbind, .export = c("getComponents"), .errorhandling = 'remove'), {
audioName <- names(audioList)[i]
componentsAndPositions <- getComponents(audioName, fileNamePattern, sep)
id <- componentsAndPositions[["Components"]][,"ID"]
tempData <- as.data.frame(matrix(nrow = 1, ncol = length(measures)))
colnames(tempData) <- measures
if(conditionPresence){
condition <- componentsAndPositions[["Components"]][,"Condition"]
tempData[,"Condition"] <- condition
}
if(dimensionPresence){
dimension <- componentsAndPositions[["Components"]][,"Dimension"]
tempData[,"Dimension"] <- dimension
}
tempData[,"ID"] <- id
sound_orig = as.numeric(scale(audioList[[audioName]]@left))
samplingRate = audioList[[audioName]]@samp.rate
# savewav(audioList[[audioName]], filename = paste0(newFolder, "/",audioName, ".", "wav"))
analyzeData <- analyze(audioList[[audioName]]@left, samplingRate = audioList[[audioName]]@samp.rate, plot = FALSE, osc = FALSE, summaryFun = c("mean", "sd"), ...)
analyzeData <- analyzeData$summary
if(extended){
tempData[,colnames(analyzeData)] <- analyzeData
}
else{
tempData[, measures[1]] <- duration(audioList[[audioName]])
tempData[, measures[2]] <- 1 - analyzeData$voiced
tempData[, measures[3]] <- rms(env(zapsilw(audioList[[audioName]], plot = FALSE),f=audioList[[audioName]]@samp.rate, plot = FALSE))
tempData[, measures[4]] <- analyzeData$loudness_mean
tempData[, measures[5]] <- analyzeData$pitch_mean
tempData[, measures[6]] <- analyzeData$pitch_sd
tempData[, measures[7]] <- analyzeData$entropy_mean
tempData[, measures[8]] <- analyzeData$HNR_mean
}
tempData
})
stopCluster(cl)
}
else{
for(i in 1:length(audioList)) {
tryCatch({
audioName <- names(audioList)[i]
componentsAndPositions <- getComponents(audioName, fileNamePattern, sep)
id <- componentsAndPositions[["Components"]][,"ID"]
sound_orig = as.numeric(scale(audioList[[audioName]]@left))
samplingRate = audioList[[audioName]]@samp.rate
# savewav(audioList[[audioName]], filename = paste0(newFolder, "/",audioName, ".", "wav"))
if(conditionPresence & dimensionPresence){
dimension <- componentsAndPositions[["Components"]][,"Dimension"]
Condition <- componentsAndPositions[["Components"]][,"Condition"]
rowsFilter <- audioData$ID == id & audioData$Dimension == dimension & audioData$Condition == Condition
}
else if(conditionPresence){
Condition <- componentsAndPositions[["Components"]][,"Condition"]
}
else if(dimensionPresence){
dimension <- componentsAndPositions[["Components"]][,"Dimension"]
}
rowsFilter <- which(rownames(audioData) %in% names(audioList)[i])
analyzeData <- analyze(audioList[[audioName]]@left, samplingRate = audioList[[audioName]]@samp.rate, plot = FALSE, osc = FALSE, summaryFun = c("mean", "sd"))
analyzeData <- analyzeData$summary
if(extended){
audioData[rowsFilter,colnames(analyzeData)] <- analyzeData
}
else{
audioData[rowsFilter, measures[1]] <- duration(audioList[[audioName]])
audioData[rowsFilter, measures[2]] <- 1 - analyzeData$voiced
audioData[rowsFilter, measures[3]] <- rms(env(zapsilw(audioList[[audioName]], plot = FALSE),f=audioList[[audioName]]@samp.rate, plot = FALSE))
audioData[rowsFilter, measures[4]] <- analyzeData$loudness_mean
audioData[rowsFilter, measures[5]] <- analyzeData$pitch_mean
audioData[rowsFilter, measures[6]] <- analyzeData$pitch_sd
audioData[rowsFilter, measures[7]] <- analyzeData$entropy_mean
audioData[rowsFilter, measures[8]] <- analyzeData$HNR_mean
}
}, error = function(e) {
warning(paste(audioName, "excluded from the analysis. File corrupted or with no sound."))
})
}
}
#Postprocess check: Remove empty audios
audioData <- audioData[!is.na(audioData$duration),]
audioData <- audioData[,!is.na(colnames(audioData))]
rownames(audioData) <- c()
if(extended){
audioData <- audioData[,!colnames(audioData) %in% initialMeasures[-1]]
}
#Return the filled data.frame with all the vocal measurements
return(audioData)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/autoExtract.R
|
#' Generate a Summary Report
#'
#' Generates a summary report containing the output of autoExtract(), normalityPlots() and comparisonPlots().
#'
#' @param audioData A data.frame generated by autoExtract() function.
#' @param savePath Character string indicating the full path to the folder to which we want to save the generated report. By default it is set to the current working directory.
#' @param includeDimensions Logical value indicating whether Dimensions should be also included as a factor or not. Default corresponds to FALSE.
#' @param avoidNormalCheck Logical vector, indicating if and what variables' distribution were transformed to normal. By default it is set to FALSE for each of the measures. Alternatively, you can set it to TRUE to avoid checking normality for all the measures.
#' @param filename Optional character string indicating the file name of the generated report. Default corresponds to "voiceR_report.html".
#' @return html report file, which is saved in the selected path, but returns nothing.
#' @examples
#' \donttest{
#' autoReport(testAudioData = testAudioData)
#' }
#'
#' @importFrom rmarkdown render html_document
#' @export
autoReport <- function(audioData, savePath = getwd(), includeDimensions = FALSE, avoidNormalCheck = c(), filename = "voiceR_report.html"){
if(!is.character(savePath) || !file.exists(savePath)) stop("Invalid savePath!")
if(!is.data.frame(audioData)) stop("audioData must be a data.frame")
if(!is.character(filename)) stop("filename must be a string!")
if(!is.logical(includeDimensions)) stop("includeDimensions must be a boolean!")
#Use the location for the report template (Rmd file)
src <- system.file("AppShiny/report.Rmd", package = "voiceR")
measures <- c("duration", "voice_breaks_percent", "RMS_env", "mean_loudness", "mean_F0", "sd_F0", "mean_entropy", "mean_HNR")
if(length(avoidNormalCheck) == 1 && avoidNormalCheck) avoidNormalCheck <- rep(TRUE, length(measures))
# temporarily switch to the temp dir, in case you do not have write
# permission to the current working directory
owd <- setwd(tempdir())
on.exit(setwd(owd))
#copy file
file.copy(src, 'report.Rmd', overwrite = TRUE)
#Create comparison and normality plots
comparisons <- comparisonPlots(audioData, by = "Condition")
normalPlots <- normalityPlots(audioData)
comparisons2 <- list()
normalPlots2 <- list()
for (i in 1:length(measures)) {
measure <- measures[i]
comparisons2[[i]] <- comparisons[[measure]]
normalPlots2[[i]] <- normalPlots[[measure]]
}
if(length(avoidNormalCheck) == 0)
avoidNormalCheck <- rep(FALSE, 8)
#Send these parameters to the R markdown document and render the file
params <- list(audioData = audioData, comparisons = comparisons2, normalPlots = normalPlots2, includeDimensions = includeDimensions, avoidNormalCheck = avoidNormalCheck)
out <- render('report.Rmd', html_document(), params = params, envir = new.env())
file.rename(out, paste0(savePath, filename))
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/autoReport.R
|
#' Create boxplots for extracted audio features
#'
#' Generates boxplots for each exracted feature. Plots can be split by experimental condition.
#'
#' @param audioData A data.frame generated by the autoExtract() function.
#' @param by An optional character vector indicating column(s) from which the comparison groups are to be retrieved.
#' @param measures An optional character vector indicating the name of the variables to be plotted.
#' @param normalSig Set significance level to test for normality assumptions. Default corresponds to "0.05".
#' @param avoidNormalCheck Logical value forcing to avoid checking for normality. When defined as TRUE, it is assumed that the data is normally distributed. Default corresponds to FALSE.
#' @return A list containing the generated boxplots.
#' @examples
#' comparisonPlots(testAudioData, by = "Condition")
#'
#' @importFrom ggplot2 ggplot geom_boxplot aes theme element_blank ggplot_build annotation_custom unit
#' @importFrom gtable gtable_add_grob
#' @importFrom gridExtra tableGrob ttheme_minimal
#' @importFrom grid segmentsGrob gpar
#' @importFrom ggpubr stat_compare_means stat_pvalue_manual
#' @importFrom utils combn
#' @importFrom FSA dunnTest
#' @importFrom stats as.formula kruskal.test TukeyHSD aov
#' @importFrom stringr str_split
#' @importFrom rcompanion scheirerRayHare
#' @importFrom rlang !! sym
#' @export
comparisonPlots <- function(audioData, by = c(), measures = c("duration", "voice_breaks_percent", "RMS_env", "mean_loudness", "mean_F0", "sd_F0", "mean_entropy", "mean_HNR")
, normalSig = 0.05, avoidNormalCheck = FALSE){
if(!is.data.frame(audioData)) stop("audioData should be a data.frame produced by autoExtract")
if(!all(measures %in% c("duration", "voice_breaks_percent", "RMS_env", "mean_loudness", "mean_F0", "sd_F0", "mean_entropy", "mean_HNR"))) {
stop("measures should contain: duration, voice_breaks_percent, RMS_env, mean_loudness, mean_F0, sd_F0, mean_entropy, or mean_HNR")
}
if(!is.numeric(normalSig) & (normalSig < 0 | normalSig > 1)) stop("Error normalSig should be numeric and no smaller than 0 and no bigger than 1")
if(length(avoidNormalCheck) != 1) stop("avoidNormalCheck should have length 1")
#Measures for which we want to generate plots
my_comparisons <- list( c("1", "2") )
#Empty list, in which we will store the plots
plots <- list()
i <- 1
#If we only use one variable to create comparisons, just use that variable for both dimensions
if(length(by) == 1){
dimension1 <- by
dimension2 <- by
}
#Otherwise use a different variable for each dimension
else if(length(by) == 2){
dimension1 <- by[1]
dimension2 <- by[2]
}
else{
warning("No comparison Variables set.")
}
#When there's at least one comparison variable create the plots
if(length(by) >= 1){
numberCategoriesDim1 <- length(unique(audioData[,dimension1]))
audioData[,dimension1] <- as.factor(audioData[,dimension1])
numberCategoriesDim2 <- length(unique(audioData[,dimension2]))
audioData[,dimension2] <- as.factor(audioData[,dimension2])
#Create a plot for each measure
for (measure in measures) {
#If there are many NAs do not plot
if(sum(is.na(audioData[,measure])) >= 0.92 * length(audioData[,measure])){
warning(paste(measure, "contains all NAs. Plot for ", measure, "not generated."))
next
}
#Check if data is normally distributed
normalTable <- tableNormality(audioData, measure = measure, includeDimensions = "Dimension" %in% by)
if(is.character(normalTable)) stop("Sample size not big enough. Please consider increasing your sample.")
if(min(normalTable$N) < 3){
if("Dimension" %in% by){
warning("Only checking condition, as N per Condition and Dimension is lower than 3")
dimension1 <- "Condition"
dimension2 <- "Condition"
}
else{
stop("Not enough data to check comparisons")
}
}
pValue <- min(normalTable$pValue)
#Generate the plot comparing means depending on number of comparison variables and number of factors per comparison variable:
# One factor:
# - Normality and only 2 groups: t-test.
# - Normality and more than 2 groups: ANOVA and Tukey's test.
# - Non-normality and only 2 groups: Wilcoxon's Test.
# - Non-normality and more than 2 groups: Kruskal-Wallis test and Mann-Whitney U test.
# Two factors:
# - Normality: 2-way ANOVA.
# - Non-normality: Scheirer-Ray-Hare test.
plots[[measure]] <- local({
i <- i
if(numberCategoriesDim2 == 2 & length(by) == 1){
if(pValue < normalSig & !avoidNormalCheck){
p1 <- ggplot(audioData, aes(x=!!sym(dimension1), y=!!sym(measure), color = !!sym(dimension2))) +
geom_boxplot() +
stat_compare_means(method = "wilcox.test", label.y = max(audioData[,measure], na.rm = TRUE)*1.1, label.x = 1) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
}
else{
p1 <- ggplot(audioData, aes(x=!!sym(dimension1), y=!!sym(measure), color = !!sym(dimension2))) +
geom_boxplot() +
stat_compare_means(method = "t.test", label.y = max(audioData[,measure], na.rm = TRUE)*1.1, label.x = 1) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
}
}
else{
if(length(by) == 1){
x <- combn(seq(1,numberCategoriesDim1), 2)
my_comparisons <- lapply(seq_len(ncol(x)), function(i) x[,i])
if(pValue < normalSig & !avoidNormalCheck){
dunnData <- dunnTest(as.formula(paste0(measure, "~", dimension1)), data = audioData)
dunnData <- dunnData$res
dunnData$significance <- NA
if(length(dunnData[dunnData$P.adj > 0.1, "significance"]) > 0)
dunnData[dunnData$P.adj > 0.1, "significance"] <- "ns"
if(length(dunnData[dunnData$P.adj < 0.1, "significance"]) > 0)
dunnData[dunnData$P.adj < 0.1, "significance"] <- "+"
if(length(dunnData[dunnData$P.adj < 0.05, "significance"]) > 0)
dunnData[dunnData$P.adj < 0.05, "significance"] <- "*"
if(length(dunnData[dunnData$P.adj < 0.01, "significance"]) > 0)
dunnData[dunnData$P.adj < 0.01, "significance"] <- "**"
if(length(dunnData[dunnData$P.adj < 0.001, "significance"]) > 0)
dunnData[dunnData$P.adj < 0.001, "significance"] <- "***"
dunnData$group1 <- str_split(dunnData$Comparison, "-", simplify = T)[,1]
dunnData$group2 <- str_split(dunnData$Comparison, "-", simplify = T)[,2]
dunnData$group1 <- gsub(" ", "", dunnData$group1, fixed = TRUE)
dunnData$group2 <- gsub(" ", "", dunnData$group2, fixed = TRUE)
dunnData <- dunnData[dunnData$P.adj < 0.1,]
p1 <- ggplot(audioData, aes(x=!!sym(dimension1), y=!!sym(measure), color = !!sym(dimension2))) +
geom_boxplot() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplotData <- ggplot_build(p1)
multiplier <- (max(audioData[,measure], na.rm = TRUE)-min(audioData[,measure], na.rm = TRUE))*0.08
ypos <- seq((max(ggplotData$data[[1]]$ymax, na.rm = TRUE)+multiplier), max(ggplotData$data[[1]]$ymax, na.rm = TRUE) + multiplier*(nrow(dunnData)+1) , by = multiplier)[1:nrow(dunnData)]
kruskalTestData <- kruskal.test(as.formula(paste0(measure, "~", dimension1)), audioData)
p1 <- p1 +
stat_compare_means(method = "kruskal.test", label.y = ggplotData$layout$panel_scales_y[[1]]$range$range[2] - (ggplotData$layout$panel_scales_y[[1]]$range$range[2] - ggplotData$layout$panel_scales_y[[1]]$range$range[1])*0.15, label.x = 1.3)
if(nrow(dunnData) > 0){
p1 <- p1 + stat_pvalue_manual(dunnData, label = "significance", y.position = ypos, tip.length = 0)
}
}
else{
tukeyData <- TukeyHSD(aov(as.formula(paste0(measure, "~", dimension1)), data = audioData))
tukeyData <- as.data.frame(tukeyData$Condition)
tukeyData$significance <- NA
if(length(tukeyData[tukeyData$`p adj` > 0.1, "significance"]) > 0)
tukeyData[tukeyData$`p adj` > 0.1, "significance"] <- "ns"
if(length(tukeyData[tukeyData$`p adj` < 0.1, "significance"]) > 0)
tukeyData[tukeyData$`p adj` < 0.1, "significance"] <- "+"
if(length(tukeyData[tukeyData$`p adj` < 0.05, "significance"]) > 0)
tukeyData[tukeyData$`p adj` < 0.05, "significance"] <- "*"
if(length(tukeyData[tukeyData$`p adj` < 0.01, "significance"]) > 0)
tukeyData[tukeyData$`p adj` < 0.01, "significance"] <- "**"
if(length(tukeyData[tukeyData$`p adj` < 0.001, "significance"]) > 0)
tukeyData[tukeyData$`p adj` < 0.001, "significance"] <- "***"
tukeyData$group1 <- str_split(rownames(tukeyData), "-", simplify = T)[,1]
tukeyData$group2 <- str_split(rownames(tukeyData), "-", simplify = T)[,2]
tukeyData <- tukeyData[tukeyData$`p adj` < 0.1,]
p1 <- ggplot(audioData, aes(x=!!sym(dimension1), y=!!sym(measure), color = !!sym(dimension2))) +
geom_boxplot() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplotData <- ggplot_build(p1)
multiplier <- (max(audioData[,measure], na.rm = TRUE)-min(audioData[,measure], na.rm = TRUE))*0.08
ypos <- seq((max(ggplotData$data[[1]]$ymax, na.rm = TRUE)+multiplier), max(ggplotData$data[[1]]$ymax, na.rm = TRUE) + multiplier*(nrow(tukeyData)+1) , by = multiplier)[1:nrow(tukeyData)]
AnovaTestData <- summary(aov(as.formula(paste0(measure, "~", dimension1)), audioData))
p1 <- p1 +
stat_compare_means(method = "anova", label.y = ggplotData$layout$panel_scales_y[[1]]$range$range[2] - (ggplotData$layout$panel_scales_y[[1]]$range$range[2] - ggplotData$layout$panel_scales_y[[1]]$range$range[1])*0.15, label.x = 1.3)
if(nrow(tukeyData) > 0){
p1 <- p1 + stat_pvalue_manual(tukeyData, label = "significance", y.position = ypos, tip.length = 0)
}
}
}
if(length(by) == 2){
if(pValue < normalSig & !avoidNormalCheck){
res.schreirer <- scheirerRayHare(as.formula(paste(measure, "~", dimension1, "*", dimension2)), audioData, verbose = FALSE)
row.names(res.schreirer)[3] <- "Interaction"
annotationTable <- data.frame('Scheirer-Ray-Hare' = rownames(res.schreirer)[1:3], pValue = round(res.schreirer[1:3,"p.value"], 2))
annotationTable$Scheirer.Ray.Hare[3] <- "Interaction"
p1 <- ggplot(audioData, aes(x=!!sym(dimension1), y=!!sym(measure), color = !!sym(dimension2))) +
geom_boxplot() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplotData <- ggplot_build(p1)
tbl <- tableGrob(annotationTable, theme = ttheme_minimal(), rows=NULL, cols= c("Scheirer-Ray-Hare", "p-Value"))
tbl <- gtable_add_grob( tbl, grobs = segmentsGrob( name = 'segment', y1 = unit( 0, 'npc' ), gp = gpar( lty = 1, lwd = 1 ) ),
t = 1, l = 1, r = ncol( tbl ) )
p1 <- p1 + annotation_custom(tbl, ymin = ggplotData$layout$panel_scales_y[[1]]$range$range[2] - (ggplotData$layout$panel_scales_y[[1]]$range$range[2] - ggplotData$layout$panel_scales_y[[1]]$range$range[1])*0.25, xmin = 1, xmax = 2)
}
else{
res.aov2 <- aov(as.formula(paste(measure, "~", dimension1, "*", dimension2)), data = audioData)
res.aov2 <- summary(res.aov2)[[1]]
annotationTable <- data.frame('2-way ANOVA' = rownames(res.aov2)[1:3], pValue = round(res.aov2[1:3,"Pr(>F)"], 2))
row.names(annotationTable)[3] <- "Interaction"
text <- " Two-way ANOVA p-value\n"
for (i in 1:(nrow(res.aov2)-1)) {
text <- paste(text, trimws(rownames(res.aov2)[i]), paste0(rep(" ", 30 - length(trimws(rownames(res.aov2)[i]))), collapse = ""), round(res.aov2$`Pr(>F)`[i], 2), "\n")
}
p1 <- ggplot(audioData, aes(x=!!sym(dimension1), y=!!sym(measure), color = !!sym(dimension2))) +
geom_boxplot() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplotData <- ggplot_build(p1)
tbl <- tableGrob(annotationTable, theme = ttheme_minimal(), rows=NULL, cols= c("2-way ANOVA", "p-Value"))
tbl <- gtable_add_grob( tbl, grobs = segmentsGrob( name = 'segment', y1 = unit( 0, 'npc' ), gp = gpar( lty = 1, lwd = 1 ) ),
t = 1, l = 1, r = ncol( tbl ) )
p1 <- p1 + annotation_custom(tbl, ymin = ggplotData$layout$panel_scales_y[[1]]$range$range[2] - (ggplotData$layout$panel_scales_y[[1]]$range$range[2] - ggplotData$layout$panel_scales_y[[1]]$range$range[1])*0.25, xmin = 1, xmax = 2)
}
}
}
p1
})
}
}
else{
for (measure in measures) {
plots[[measure]] <- local({
p1 <- ggplot(audioData, aes(x=factor(0), y=!!sym(measure))) +
geom_boxplot() + theme(axis.title.x= element_blank(),
axis.text.x= element_blank(), axis.ticks.x= element_blank()) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
p1
})
}
}
return(plots)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/comparisonPlots.R
|
#' Create an Empty data.frame
#'
#' Internal function which creates an empty data.frame in which the different audio files represent rows and the extracted measures represent columns. Several options can be configured such as joining dimensions or separating conditions.
#'
#' @param path An optional character string indicating the path to the folder containing the audio files. Default corresponds to current working directory.
#' @param audioList Optional list with already loaded Wave objects to analyze.
#' @param fileType Character string indicating the file format (wav or mp3) of the audio files. Default corresponds to wav.
#' @param fileNamePattern A character string indicating the naming format, such as "ID-Condition-Dimension", "Condition_ID_Dimension" or "ID". Default corresponds to "ID_Condition_Dimension".
#' @param sep A non alpha-numeric character that acts as separator between the different naming components. Default corresponds to underscore. This field can be ignored if the audio file names only contain an ID component.
#' @param measures A character vector of measures that should appear in the data frame columns.
#' @param jointDimensions Logical value indicating whether dimensions should be joint into a single or not. Default corresponds to FALSE.
#' @param separateConditions Logical value indicating whether conditions should be separated or not. Default corresponds to TRUE.
#' @param filter Optional character vector indicating IDs, Conditions, Dimensions or other name patterns. Default corresponds to NA.
#' @param recursive Logical value indicating whether subdirectories should be included when searching for audio files. Default corresponds to FALSE.
#' @return An empty data.frame in which ID's represent rows and dimensions/measures represent columns.
#'
#' @importFrom stringr str_remove_all
createEmptyDF <-
function(path = ".", audioList = list(), fileType = "wav", fileNamePattern = "ID_Condition_Dimension", sep = "_",
measures = c(), jointDimensions = FALSE, separateConditions = TRUE, filter = NA, recursive = FALSE) {
if(!is.character(path) || !file.exists(path)) stop("Invalid path!")
if(!is.list(audioList)) stop("audioList must be a list of Wave objects!")
if(!is.character(fileType)) stop("fileType must be a string!")
if(!is.character(fileNamePattern)) stop("fileNamePattern must be a string!")
if (!grepl("[^[:alnum:]]", sep)) stop("Error: sep should be a non-alphanumeric character")
if(!is.logical(recursive)) stop("recursive must be a boolean!")
if(!is.logical(separateConditions)) stop("separateConditions must be a boolean!")
if(!is.logical(jointDimensions)) stop("separateConditions must be a boolean!")
if(length(audioList) == 0){
if(length(filter) == 1){
if(is.na(filter)){
files <- list.files(path = path, pattern = fileType, recursive=recursive, full.names = FALSE)
}
else{
files <- list.files(path = path, pattern = filter, recursive=recursive, full.names = FALSE)
files <- files[grep(fileType, files)]
}
}
else{
if(length(filter) > 1)
files <- list.files(path = path, pattern = paste(as.character(filter),collapse="|"), recursive = recursive, full.names = FALSE)
else
files <- list.files(path = path, pattern = as.character(filter), recursive = recursive, full.names = FALSE)
files <- files[grep(fileType, files)]
}
processedNames <-
str_remove_all(files, pattern = paste0("\\.", fileType))
}
else{
processedNames <- names(audioList)
}
componentsAndPositions <- getComponents(processedNames, fileNamePattern, sep)
userIds <-
unique(componentsAndPositions[["Components"]][,componentsAndPositions[["Positions"]][["ID"]]])
conditions <- unique(componentsAndPositions[["Components"]][,componentsAndPositions[["Positions"]][["Condition"]]])
dimensions <- unique(componentsAndPositions[["Components"]][,componentsAndPositions[["Positions"]][["Dimension"]]])
if(separateConditions & length(as.vector(conditions)) > 0){
if(jointDimensions | length(as.vector(dimensions)) == 0){
rowNumber <- length(userIds) * max(length(as.vector(conditions)), 1)
}
else{
rowNumber <- length(userIds) * length(dimensions) * max(length(as.vector(conditions)), 1)
}
}
else{
if(jointDimensions | length(as.vector(dimensions)) == 0){
rowNumber <- length(userIds)
}
else{
rowNumber <- length(userIds) * length(dimensions)
}
}
MeasuresDimensionsDF <-
(data.frame(matrix(
nrow = rowNumber,
ncol = length(measures)
)))
colnames(MeasuresDimensionsDF) <- measures
if(!jointDimensions & separateConditions){
tempData <- tempData <- expand.grid(userIds, conditions, dimensions)
colnames(tempData) <- c("ID", "Condition", "Dimension")
} else if(!jointDimensions){
tempData <- expand.grid(userIds, dimensions)
colnames(tempData) <- c("ID", "Dimension")
} else if(separateConditions) {
tempData <- expand.grid(userIds, conditions)
colnames(tempData) <- c("ID", "Condition")
} else{
tempData <- as.data.frame(userIds)
colnames(tempData) <- "ID"
}
MeasuresDimensionsDF <- cbind(MeasuresDimensionsDF, tempData)
return(MeasuresDimensionsDF)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/createEmptyDF.R
|
#' Get Components Name and Positions
#'
#' Indicates the presence and order in which components are retrieved from the file name of each recording.
#'
#' @param fileNames A character vector of audio file names.
#' @param fileNamePattern A character string indicating the naming format of the audio files, such as "ID-Condition-Dimension", "Condition_ID_Dimension" or "ID". Default corresponds to "ID_Condition_Dimension".
#' @param sep A non alpha-numeric that acts as separator between the different naming components. Default corresponds to underscore.
#' @return A list, containing a vector of positions for each component and a data.frame containing the values for each component of the audio files.
#' @examples
#' getComponents(names(testAudioList), fileNamePattern = "ID_Condition_", sep = "_")
#'
#' @importFrom stringr str_split
#' @export
getComponents <- function(fileNames, fileNamePattern = "ID_Condition_Dimension", sep = "_"){
if(length(fileNames) == 0){
stop("File names are empty")
}
if(!is.character(fileNamePattern)) stop("fileNamePattern must be a string!")
if (!grepl("[^[:alnum:]]", sep)) stop("Error: sep should be a non-alphanumeric character")
#Split string using the provided separator
fileNamePattern1 <- str_split(fileNamePattern, sep)
#convert text to lower case
fileNamePattern1 <- as.vector(sapply(fileNamePattern1, tolower))
#Check for the position of the ID field
idPosition <- which(fileNamePattern1 %in% "id")
#Check for the position of the Condition field
conditionPosition <- which(fileNamePattern1 %in% "condition")
#Check for the position of the dimension field
dimensionPosition <- which(fileNamePattern1 %in% "dimension")
#Get the original names but separated
processedNames <- str_split(fileNames, sep, simplify = TRUE)
#Check if some component is missing and set the value of 99 if it is
if(length(conditionPosition) == 0){
conditionPosition <- 99
}
else if(conditionPosition > ncol(processedNames)){
conditionPosition <- 99
warning("Attention: the pattern you defined includes conditions.
But the names do not have as many divisions (ignoring conditions component)")
}
else{
conditions <- unique(processedNames[,conditionPosition])
}
if(length(dimensionPosition) == 0){
dimensionPosition <- 99
}
else if(dimensionPosition > ncol(processedNames)){
dimensionPosition <- 99
warning("Attention: the pattern you defined includes dimensions.
But the names do not have as many divisions (ignoring dimensions component)")
}
else{
dimensions <- unique(processedNames[,dimensionPosition])
}
#Create a data frame containing the component names and their positions
positions <- data.frame(ID = idPosition, Condition = conditionPosition, Dimension = dimensionPosition)
processedNames <- as.data.frame(processedNames)
colnames(processedNames)[positions[positions < 99]] <- colnames(positions)[positions < 99]
processedNames <- processedNames[,!is.na(colnames(processedNames)), drop=FALSE]
cols_to_keep <- c("ID", "Condition", "Dimension")
processedNames <- processedNames[,colnames(processedNames) %in% cols_to_keep, drop=FALSE]
finalList <- list(Positions = positions, Components = processedNames)
return(finalList)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/getComponents.R
|
#' Get Conditions
#'
#' Retrieves the experimental conditions from the file name following a naming pattern in which the various components (IDs, conditions, and dimensions) are separated by a non-alphanumeric character.
#'
#' @param path A character string indicating the path to the folder containing the audio files. Default corresponds to the current working directory.
#' @param audioList Optional list with Wave objects to analyze.
#' @param fileType Character string indicating the file format (wav or mp3) of the audio files. Default corresponds to wav.
#' @param fileNamePattern Character string indicating the naming format of the audio files, such as "ID-Condition-Dimension", "Condition_ID_Dimension" or "ID". Default corresponds to "ID_Condition_Dimension".
#' @param sep A non alpha-numeric that acts as separator between the different naming components. Default corresponds to underscore.
#' @param filter Optional character vector used to filter for specific audio files. Default corresponds to NULL.
#' @param recursive A logical value indicating whether subdirectories should be included when searching for voice files. Default corresponds to FALSE.
#' @return Character vector, which contains all the unique conditions of the voice files extracted from the name pattern of the audio files.
#' @examples
#' getConditions(audioList = testAudioList,
#' fileNamePattern = "ID_Condition_Dimension", sep = "_")
#'
#' @importFrom stringr str_extract_all str_replace_all str_split
#' @importFrom xfun file_ext
#' @export
getConditions <- function(path = ".", audioList = list(), fileType = "wav", fileNamePattern = "ID_Condition_Dimension", sep = "_", filter = NULL, recursive=FALSE) {
if(is.list(path)){
stop("Error file path is a list. If you meant to use a Wave list object, specify the audioList parameter")
}
if(!is.character(path) || !file.exists(path)) stop("Invalid path!")
if(!is.list(audioList)) stop("audioList must be a list of Wave objects!")
if(!is.character(fileType)) stop("fileType must be a string!")
if(!is.character(fileNamePattern)) stop("fileNamePattern must be a string!")
if (!grepl("[^[:alnum:]]", sep)) stop("Error: sep should be a non-alphanumeric character")
if(!is.logical(recursive)) stop("recursive must be a boolean!")
path <- file.path(path)
# Read files from path if audioList is NULL
if (is.null(audioList)) {
# Find files matching filter and file type in path
files <- list.files(path, recursive = recursive, full.names = TRUE, include.dirs = FALSE)
if (!is.null(filter) && length(filter) > 0) {
files <- files[grep(paste(filter, collapse = "|"), files)]
}
if (fileType != "") {
files <- files[grep(paste0(".", fileType, "$"), files)]
}
if (length(files) == 0) {
stop("Error, no files found in the specified directory")
}
processedNames <- str_extract_all(files, "(?<=/|^)[^/]+(?=\\.[^/]+$)")
} else {
if(!is.null(filter) && length(filter) > 0) audioList <- audioList[grep(filter, names(audioList))]
processedNames <- names(audioList)
}
# Split file names into components and determine position of dimension component
components <- str_split(processedNames, sep, simplify = TRUE)
fileNamePattern <- tolower(fileNamePattern)
cond_pos <- match("condition", str_split(fileNamePattern, sep, simplify = TRUE)[1, ])
if (is.na(cond_pos)) {
conditions <- character()
} else {
conditions <- unique(components[, cond_pos])
}
return(conditions)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/getConditions.R
|
#' Get Dimensions
#'
#' Retrieves the unique dimensions from the file name of multiple audio files following a naming pattern in which the various components (IDs, dimensions and, conditions) are separated by a non-alphanumeric character.
#'
#' @param path A character string indicating the path to the folder containing the audio files. Default corresponds to the current working directory.
#' @param audioList Optional list with Wave objects to analyze.
#' @param fileType Character string indicating the file format (wav or mp3) of the audio files. Default corresponds to wav.
#' @param fileNamePattern cCharacter string indicating the naming format of the audio files, such as "ID-Condition-Dimension", "Condition_ID_Dimension" or "ID". Default corresponds to "ID_Condition_Dimension".
#' @param sep A non alpha-numeric that acts as separator between the different naming components. Default corresponds to underscore.
#' @param filter Optional character vector to filter for specific audio files. Default corresponds to NULL.
#' @param recursive A logical value indicating whether subdirectories should be included when searching for voice files. Default corresponds to FALSE.
#' @return Character vector, which contains all the unique dimensions of the voice files found in the specified directory.
#' @examples
#' getDimensions(audioList = testAudioList,
#' fileNamePattern = "ID_Condition_Dimension", sep = "_")
#'
#' @importFrom stringr str_extract_all str_replace_all str_split
#' @importFrom xfun file_ext
#' @export
getDimensions <- function(path = ".", audioList = list(), fileType = "wav", fileNamePattern = "ID_Condition_Dimension", sep = "_", filter = NULL, recursive = FALSE) {
if(is.list(path)){
stop("Error file path is a list. If you meant to use a Wave list object, specify the audioList parameter")
}
if(!is.character(path) || !file.exists(path)) stop("Invalid path!")
if(!is.list(audioList)) stop("audioList must be a list of Wave objects!")
if(!is.character(fileType)) stop("fileType must be a string!")
if(!is.character(fileNamePattern)) stop("fileNamePattern must be a string!")
if (!grepl("[^[:alnum:]]", sep)) stop("Error: sep should be a non-alphanumeric character")
if(!is.logical(recursive)) stop("recursive must be a boolean!")
path <- file.path(path)
# Read files from path if audioList is NULL
if (is.null(audioList)) {
# Find files matching filter and file type in path
files <- list.files(path, recursive = recursive, full.names = TRUE, include.dirs = FALSE)
if (!is.null(filter) && length(filter) > 0) {
files <- files[grep(paste(filter, collapse = "|"), files)]
}
if (fileType != "") {
files <- files[grep(paste0(".", fileType, "$"), files)]
}
if (length(files) == 0) {
stop("Error, no files found in the specified directory")
}
processedNames <- str_extract_all(files, "(?<=/|^)[^/]+(?=\\.[^/]+$)")
} else {
if(!is.null(filter) && length(filter) > 0) audioList <- audioList[grep(filter, names(audioList))]
processedNames <- names(audioList)
}
# Split file names into components and determine position of dimension component
components <- str_split(processedNames, sep, simplify = TRUE)
fileNamePattern <- tolower(fileNamePattern)
dim_pos <- match("dimension", str_split(fileNamePattern, sep, simplify = TRUE)[1, ])
if (is.na(dim_pos)) {
dimensions <- character()
} else {
dimensions <- unique(components[, dim_pos])
}
return(dimensions)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/getDimensions.R
|
#' Get IDs
#'
#' Retrieves the unique IDs from the file name of multiple audio files following a naming pattern in which the various components (IDs, dimensions, conditions) are separated by a non-alphanumeric character.
#'
#' @param path A character string indicating the path to the folder containing the audio files. Default corresponds to the current working directory.
#' @param audioList Optional list with Wave objects to analyze.
#' @param fileType Character string indicating the file format (wav or mp3) of the audio files. Default corresponds to wav.
#' @param fileNamePattern Character string indicating the naming format of the audio files, such as "ID-Condition-Dimension", "Condition_ID_Dimension" or "ID". Default corresponds to "ID_Condition_Dimension".
#' @param sep A non alpha-numeric that acts as separator between the different naming components. Default corresponds to underscore.
#' @param filter Optional character vector used to filter for specific audio files. Default corresponds to NULL.
#' @param recursive A logical value indicating whether subdirectories should be included when searching for voice files. Default corresponds to FALSE.
#' @return Character vector, which contains all the unique IDs extracted from the name pattern of the audio files.
#' @examples
#' getIds(audioList = testAudioList,
#' fileNamePattern = "ID_Condition_Dimension", sep = "_")
#'
#' @importFrom stringr str_extract_all str_replace_all str_split
#' @importFrom xfun file_ext
#' @export
getIds <- function(path = ".", audioList = NULL, fileType = "wav", fileNamePattern = "ID_Condition_Dimension", sep = "_", filter = NULL, recursive = FALSE) {
if(is.list(path)){
stop("Error file path is a list. If you meant to use a Wave list object, specify the audioList parameter")
}
if(!is.character(path) || !file.exists(path)) stop("Invalid path!")
if(!is.list(audioList)) stop("audioList must be a list of Wave objects!")
if(!is.character(fileType)) stop("fileType must be a string!")
if(!is.character(fileNamePattern)) stop("fileNamePattern must be a string!")
if (!grepl("[^[:alnum:]]", sep)) stop("Error: sep should be a non-alphanumeric character")
if(!is.logical(recursive)) stop("recursive must be a boolean!")
path <- file.path(path)
# Read files from path if audioList is NULL
if (is.null(audioList)) {
# Find files matching filter and file type in path
files <- list.files(path, recursive = recursive, full.names = TRUE, include.dirs = FALSE)
if (!is.null(filter) && length(filter) > 0) {
files <- files[grep(paste(filter, collapse = "|"), files)]
}
if (fileType != "") {
files <- files[grep(paste0(".", fileType, "$"), files)]
}
if (length(files) == 0) {
stop("Error, no files found in the specified directory")
}
processedNames <- str_extract_all(files, "(?<=/|^)[^/]+(?=\\.[^/]+$)")
} else {
if(!is.null(filter) && length(filter) > 0) audioList <- audioList[grep(filter, names(audioList))]
processedNames <- names(audioList)
}
# Split file names into components and determine position of dimension component
components <- str_split(processedNames, sep, simplify = TRUE)
fileNamePattern <- tolower(fileNamePattern)
id_pos <- match("id", str_split(fileNamePattern, sep, simplify = TRUE)[1, ])
if (is.na(id_pos)) {
ids <- character()
} else {
ids <- unique(components[, id_pos])
}
return(ids)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/getIds.R
|
#' What IDs have missing dimensions?
#'
#' Indicates whether and which dimensions are missing for each ID.
#'
#' @param path Character string indicating the path to the folder containing the audio files. Default corresponds to the current working directory.
#' @param audioList Optional list with Wave objects to analyze.
#' @param ids Character vector indicating the IDs of the files.
#' @param fileType Character string indicating the file format (wav or mp3) of the audio files. Default corresponds to wav.
#' @param fileNamePattern Character string indicating the naming format of the audio files, such as "ID-Condition-Dimension", "Condition_ID_Dimension" or "ID". Default corresponds to "ID_Condition_Dimension".
#' @param sep A non alpha-numeric that acts as separator between the different naming components. Default corresponds to underscore.
#' @param recursive A logical value indicating whether subdirectories should be included when searching for voice files. Default corresponds to FALSE.
#' @return A data.frame, in which rows represent IDs and columns represent missing vs. present Dimensions.
#' @export
#' @examples
#' MissDimPerId(audioList = testAudioList)
MissDimPerId <- function(path = ".", audioList = NULL, ids = c(), fileType = "wav", fileNamePattern = "ID_Condition_Dimension", sep = "_", recursive=FALSE){
# Validate parameters
if (!is.character(path) || length(path) != 1) stop("path must be a single character string")
if (!is.null(audioList) && !is.list(audioList)) stop("audioList must be a list")
if (length(ids) == 0) {
warning("No IDs provided, using all IDs")
ids <- getIds(path = path, audioList = audioList, fileType = fileType, fileNamePattern = fileNamePattern, sep = sep, recursive = recursive)
}
if (!is.character(fileType) || length(fileType) != 1) stop("fileType must be a single character string")
if (!is.character(fileNamePattern) || length(fileNamePattern) != 1) stop("fileNamePattern must be a single character string")
if (!is.character(sep) || length(sep) != 1) stop("sep must be a single character string")
if (!is.logical(recursive) || length(recursive) != 1) stop("recursive must be a single logical value")
# Get all dimensions
dimensions <- getDimensions(path = path, audioList = audioList, fileType = fileType, fileNamePattern = fileNamePattern, sep = sep, recursive = recursive)
# Create an empty data frame with the desired IDs and dimensions
id_vec <- unique(ids)
df <- data.frame(ID = id_vec, stringsAsFactors = FALSE)
df[,dimensions] <- NA
#Check which dimensions are missing for each ID
for(id in id_vec){
dimensionsId <- getDimensions(path = path, audioList = audioList, fileType = fileType, sep = sep, fileNamePattern = fileNamePattern, filter = id, recursive=recursive)
df[df$ID == id,dimensions] <- dimensions %in% dimensionsId
}
return(df)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/missDimPerId.R
|
#' Normalize Amplitude
#'
#' Internal function adapted from the soundgen package, in order to normalize the amplitude of all the objects in a list of Wave objects.
#'
#' @param audioList A list of Wave objects.
#' @param type normalize so the output files has the same peak amplitude ('peak'), root mean square amplitude ('rms'), or subjective loudness in sone ('loudness').
#' @param maxAmp maximum amplitude in dB (0 = max possible, -10 = 10 dB below max possible, etc.).
#' @param summaryFun should the output files have the same mean / median / max etc rms amplitude or loudness? (summaryFun has no effect if type = 'peak').
#' @param windowLength length of FFT window, ms
#' @param step you can override overlap by specifying FFT step, ms (NB: because digital audio is sampled at discrete time intervals of 1/samplingRate, the actual step and thus the time stamps of STFT frames may be slightly different, eg 24.98866 instead of 25.0 ms).
#' @param overlap overlap between successive FFT frames, \%.
#' @param killDC if TRUE, removed DC offset (see also flatEnv).
#' @param windowDC the window for calculating DC offset, ms.
#' @param verbose a logical value for printing messages.
#' @param progress a logical value to report the progress (only for the Shiny app).
#' @return list of normalized Wave objects.
#'
#' @source This function is adapted from `soundgen::normalizeFolder()`.
#' @importFrom tuneR normalize
#' @importFrom shiny incProgress
#' @importFrom soundgen getRMS getLoudness
#' @noRd
normAmplitude = function(audioList,
type = c('peak', 'rms', 'loudness')[1],
maxAmp = 0,
summaryFun = 'mean',
windowLength = 50,
step = NULL,
overlap = 70,
killDC = FALSE,
windowDC = 200,
verbose = TRUE, progress=FALSE) {
files <- audioList
n <- length(audioList)
if(n == 0){
stop("No audio Files provided")
}
## process all the files
if (verbose) message('Processing...')
# for either peak or RMS normalization, start by peak normalization to maxAmp dB
level = 10 ^ (maxAmp / 20)
for (i in 1:n) {
files[[i]] = normalize(files[[i]],
unit = as.character(files[[i]]@bit),
rescale = TRUE, level = level)
if(progress)
incProgress(1/(length(files)))
}
# for RMS- or loudness-normalization, perform additional steps
if (type %in% c('rms', 'loudness')) {
perSound = vector('list', n)
if (type == 'rms') {
for (i in 1:n) {
# calculate the RMS amplitude of each file
perSound[[i]] = getRMS(files[[i]]@left,
samplingRate = files[[i]]@samp.rate,
windowLength = windowLength,
step = step,
overlap = overlap,
scale = 2^(files[[i]]@bit - 1),
killDC = killDC,
windowDC = windowDC,
plot = FALSE)
}
} else if (type == 'loudness') {
for (i in 1:n) {
# estimate subjective loudness of each file
perSound[[i]] = getLoudness(as.numeric(files[[i]]@left),
samplingRate = files[[i]]@samp.rate,
scale = 2^(files[[i]]@bit - 1),
windowLength = windowLength,
step = step,
plot = FALSE)$loudness
}
}
# summary measure per file
summaryPerSound = unlist(lapply(perSound, summaryFun))
names(summaryPerSound) = names(audioList)
# find the quietest file
ref = which.min(summaryPerSound)
# the quietest file is untouched, but all others are rescaled to have the
# same RMS/loudness as the quietest one
for (i in 1:n) {
if (i != ref) {
if (type == 'rms') {
rescale = summaryPerSound[ref] / summaryPerSound[i]
} else if (type == 'loudness') {
rescale = (summaryPerSound[ref] / summaryPerSound[i]) ^ (5 / 3)
}
files[[i]]@left = as.integer(round(files[[i]]@left * rescale))
if(progress)
incProgress(1/(length(files)))
}
}
}
# return the rescaled files
return(files)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/normAmplitude.R
|
#' Normality Plots
#'
#' Generates plots showing the normality of the different measures from the data.frame obtained from autoExtract.
#'
#' @param audioData A data.frame generated by autoExtract.
#' @param measures An optional character vector indicating the name of the variables to be plotted.
#' @return A list containing the different plots that are generated.
#' @examples
#' normalityPlots(testAudioData)
#'
#' @importFrom ggplot2 ggplot ggplot_build annotate aes geom_density
#' @importFrom stats shapiro.test
#' @importFrom rlang !! sym
#' @export
normalityPlots <- function(audioData, measures = c("duration", "voice_breaks_percent", "RMS_env", "mean_loudness", "mean_F0", "sd_F0", "mean_entropy", "mean_HNR")){
if(!is.data.frame(audioData)) stop("audioData should be a data.frame produced by autoExtract")
if(!all(measures %in% colnames(audioData))) {
stop("measures should be one of the following measures: duration, voice_breaks_percent, RMS_env, mean_loudness, mean_F0, sd_F0, mean_entropy, or mean_HNR")
}
#Check if audiodata contains conditions and dimensions
conditionPresence <- "Condition" %in% colnames(audioData)
dimensionPresence <- "Dimension" %in% colnames(audioData)
#Create empty list to save the plots
plots <- list()
i <- 1
#if no conditions
if(conditionPresence == FALSE){
#generate a plot for each measure, showing the shapiro p value
for (measure in measures) {
annotation <- paste0("Shapiro-Wilk p: ", round(shapiro.test(audioData[,measure])$p.value,4), "\n")
plots[[measure]] <- local({
p1 <- ggplot(audioData, aes(x=!!sym(measure))) + geom_density()
p1Params <- ggplot_build(p1)
p1 <- p1 + annotate("text", x= p1Params$layout$panel_scales_x[[1]]$range$range[1] + abs(p1Params$layout$panel_scales_x[[1]]$range$range[1] - p1Params$layout$panel_scales_x[[1]]$range$range[2])*0.4, y = p1Params$layout$panel_scales_y[[1]]$range$range[2]*0.85, label = annotation)
p1
})
}
}
else{
#if there are conditions, generate a plot for each measure containing annotations for the shapiro wilk p value for each condition
for (measure in measures) {
annotation <- ""
for (condition in unique(audioData$Condition)) {
annotation <- paste0(annotation, "Shapiro-Wilk p (", condition, "): ", ifelse(length(which(!is.na(audioData[audioData$Condition == condition,measure]))) >= 3, round(shapiro.test(audioData[audioData$Condition == condition,measure])$p.value,4), "Not enough data"), "\n")
}
#annotation <- grid::grobTree(grid::textGrob(annotation, x=0.5, y=0.7, hjust=0, gp=grid::gpar(col="blue", fontsize=9, fontface="bold")))
plots[[measure]] <- local({
p1 <- ggplot(audioData, aes(x=!!sym(measure), fill=!!sym("Condition"))) + geom_density(alpha = 0.3)
p1Params <- ggplot_build(p1)
p1 <- p1 + annotate("text", x= p1Params$layout$panel_scales_x[[1]]$range$range[1] + abs(p1Params$layout$panel_scales_x[[1]]$range$range[1] - p1Params$layout$panel_scales_x[[1]]$range$range[2])*0.4, y = p1Params$layout$panel_scales_y[[1]]$range$range[2]*0.85, label = annotation)
p1
})
}
}
return(plots)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/normalityPlots.R
|
#' Normalize audio data using Box-Cox transformation
#'
#' This function normalizes audio data using the Box-Cox transformation. It takes in a data frame of audio data and a vector of measures to be normalized. Users can choose to normalize by dimensions and/or conditions.
#'
#' @param audioData A data.frame generated by the autoExtract() function.
#' @param measures A vector of strings specifying the measures to be normalized. Default corresponds to all the measures extracted by autoExtract().
#' @param includeDimensions A logical value indicating whether or not to include dimensions in the normalization process. Default corresponds to FALSE.
#' @param includeConditions A logical value indicating whether or not to include conditions in the normalization process. Default corresponds to FALSE.
#'
#' @return A list containing three elements: (1) a data frame of the normalized audio data and (2) a logical vector indicating whether or not each measure was transformed using Box-Cox transformation and (3) the Box-cox constant added to each measure.
#'
#' @examples
#' normalizeData(testAudioData)
#'
#' @importFrom rcompanion transformTukey
#' @importFrom MASS boxcox
#'
#' @export
normalizeData <- function(audioData, measures = c("duration", "voice_breaks_percent", "RMS_env", "mean_loudness", "mean_F0", "sd_F0", "mean_entropy", "mean_HNR"), includeDimensions = FALSE, includeConditions = FALSE){
# Check parameters
stopifnot(is.data.frame(audioData))
stopifnot(is.character(measures))
stopifnot(is.logical(includeDimensions))
stopifnot(is.logical(includeConditions))
if(nrow(audioData) < 3) stop("Not enough data.")
avoidNormalCheck <- rep(FALSE, length(measures))
constantBoxCox <- numeric(length(measures))
for (i in seq_along(measures)) {
measure <- measures[i]
normalityData <- tableNormality(audioData, measure, includeDimensions = includeDimensions)
if(includeConditions && any(is.na(normalityData$pValue))){
stop("Some conditions do not have enough data (N < 3)")
}
if(includeConditions && includeDimensions && any(is.na(normalityData$pValue))){
stop("Some combinations of conditions and dimensions do not have enough data (N < 3)")
}
if (min(normalityData$pValue) >= 0.05) {
next
}
if (min(audioData[, measure], na.rm = TRUE) <= 0) {
constantBoxCox[i] <- abs(min(audioData[, measure], na.rm = TRUE)) + 0.01
audioData[!is.na(audioData[, measure]), measure] <- audioData[!is.na(audioData[, measure]), measure] + constantBoxCox[i]
}
if (includeConditions) {
formula <- paste0(measure, " ~ Condition")
if (includeDimensions) {
formula <- paste0(formula, " + Dimension")
}
Box <- boxcox(as.formula(formula), data = audioData, lambda = seq(-6, 6, 0.1))
lambda <- Box$x[which.max(Box$y)]
audioData[, measure] <- (audioData[, measure] ^ lambda - 1) / lambda
} else {
audioData[, measure] <- transformTukey(audioData[, measure], plotit = FALSE, start = -6, end = 6, int = 0.1)
}
avoidNormalCheck[i] <- TRUE
}
# Return list with results
return(list(audioData = audioData, avoidNormalCheck = avoidNormalCheck, constantBoxCox = constantBoxCox))
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/normalizeData.R
|
#' Preprocess list of Audio objects
#'
#' Automatically preprocesses a list of Wave objects by normalizing their amplitude and removing background noise.
#'
#' @param audioList A list of Wave objects.
#' @param normalizeAmplitude A logical value indicating whether to normalize amplitude.
#' @param removeNoise A logical value indicating whether to remove background noise.
#' @param ... Other options used to control preprocessing behavior.
#' @return A list of (processed) Wave objects.
#' @examples
#' preprocess(testAudioList)
#'
#' @importFrom seewave rmnoise
#' @export
preprocess <- function(audioList, normalizeAmplitude = TRUE, removeNoise = TRUE, ...){
if(!is.list(audioList)) stop("audioList must be a list of Wave objects!")
if(!is.logical(normalizeAmplitude)) stop("normalizeAmplitude must be a boolean!")
if(!is.logical(removeNoise)) stop("removeNoise must be a boolean!")
#Separate arguments for normalizeAmplitude and removeNoise
normArgs_string <- c("type", "maxAmp", "summaryFun", "windowLength", "step", "overlap", "killDC", "windowDC", "verbose", "progress")
extra_args <- list(...)
normArgs <- extra_args[names(extra_args) %in% normArgs_string]
normArgs[["audioList"]] = audioList
rmnoiseArgs <- extra_args[!names(extra_args) %in% normArgs_string]
rmnoiseArgs[["output"]] = "Wave"
#if normalize amplitude, then normalize amplitude
if(normalizeAmplitude){
audioList <- do.call(normAmplitude, normArgs)
}
#Remove noise
if(removeNoise){
for (i in 1:length(audioList)) {
audioList[[i]] <- do.call(rmnoise, c("wave" = audioList[[i]], "f" = audioList[[i]]@samp.rate, rmnoiseArgs))
}
}
return(audioList)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/preprocess.R
|
#' Read Audio Files
#'
#' Loads all audio files in the specified directory and provides the option to filter via ID, conditions, and/or dimensions.
#'
#' @param path Character string indicating the full path to the folder containing the audio files. Default corresponds to the current working directory.
#' @param filter Optional character vector, containing patterns, such as IDs or conditions of each audio file.
#' @param fileType Character string indicating the file format (wav or mp3) of the audio files. Default corresponds to wav.
#' @param recursive A logical value indicating whether subdirectories should be included when searching for voice files. Default corresponds to FALSE.
#' @return Returns a list of Wave objects.
#' @examples
#' readAudio(system.file("Audios", package = "voiceR"),
#' fileType = "wav", recursive = TRUE)
#'
#' @importFrom tuneR readWave readMP3
#' @importFrom stringr str_remove_all str_sub str_length
#' @export
readAudio <-
function(path = ".", filter = c(),
fileType = "wav",
recursive = FALSE
) {
if(!is.character(path) || !file.exists(path)) stop("Invalid path!")
if(!is.logical(recursive)) stop("recursive must be a boolean!")
#Check path to prevent errors
if(str_sub(path, -1) == "/"){
path <- str_sub(path, 1, str_length(path)-1)
}
#If filter is NA convert it to an empty vector. Otherwise concatenate different filter words using the or operator
if(all(is.na(filter)) || length(filter) == 0){
filter <- c()
} else {
filter = paste(filter,collapse="|")
}
#list files in the specified folDer, matching the specified patterns and the specified file type
files <- list.files(path = path, pattern = filter, recursive = recursive, full.names = TRUE)
fileType <- tolower(fileType)
if(length(files) == 0){
stop("No audio files found", call. = FALSE)
}
audioList <- vector("list",length(files))
i <- 1
for (file in files) {
if(fileType == "wav"){
audioList[[i]] <- readWave(file)
names(audioList)[i] <- basename(str_remove_all(file, pattern = paste0("\\.", fileType)))
}
else{
audioList[[i]] <- readMP3(file)
names(audioList)[i] <- basename(str_remove_all(file, pattern = paste0("\\.", fileType)))
}
i <- i + 1
}
return(audioList)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/readAudio.R
|
#' Save Audio Files
#'
#' Save all objects in a list of \code{Wave} objects as a .wav or .mp3 file in the specified path.
#'
#' @param audioList A list of Wave objects.
#' @param path A character string indicating the full path to the folder containing the audio files. Default corresponds to the current working directory.
#' @param fileType Character string indicating the file format (wav or mp3) of the audio files. Default corresponds to wav.
#' @return Save objects of a \code{Wave} list as .mp3 or .wav files.
#' @examples
#' \donttest{
#' saveAudio(testAudioList, fileType = "wav")
#' }
#' @importFrom tuneR writeWave
#' @export
saveAudio <- function(audioList, path = "./Preprocessed/", fileType = "wav"){
if(!is.character(path) || !file.exists(path)) stop("Invalid path!")
if(!is.list(audioList)) stop("audioList must be a list of Wave objects!")
if(!is.character(fileType)) stop("fileType must be a string!")
if(substr(path, nchar(path), nchar(path)) != "/")
path <- paste0(path, "/")
if(substr(fileType, 1, 1) != ".")
fileType <- paste0(".", fileType)
#create directory if it does not exist
ifelse(!dir.exists(file.path(path)), dir.create(file.path(path)), FALSE)
#save files using tuner
for (i in 1:length(audioList)) {
writeWave(object = audioList[[i]], filename = paste0(path, names(audioList[i]), fileType))
}
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/saveAudio.R
|
#' Create a Table of ANOVA results
#'
#' Automatically generates HTML table with results for one-way or two-way ANOVAs.
#'
#' @param audioData A data.frame generated by the autoExtract() function.
#' @param by A character vector indicating the name of the factor(s).
#' @param measure Name of the dependent variable.
#' @param nameMeasure Optional relabelling of dependent variable for the output table. If no value is provided, the original variable name is used.
#' @param figureNumber An integer indicating the figure number to create the title for the table. Default corresponds to 1.
#' @return HTML table showing the ANOVA results in APA formatting style.
#' @examples
#' tableANOVA(testAudioData, by = c("Condition", "Dimension"), measure = "duration")
#'
#' @importFrom stats as.formula aov
#' @importFrom stringr str_replace_all
#' @importFrom kableExtra kable_classic footnote
#' @importFrom knitr kable
#' @export
tableANOVA <- function(audioData, by = c(), measure = "duration", nameMeasure = c(), figureNumber = 1){
by <- as.vector(by)
if(!is.data.frame(audioData)) stop("audioData should be a data.frame produced by autoExtract")
if(length(measure) != 1 && !measure %in% colnames(audioData)) {
stop("measure should be present on audioData")
}
if(sum(!by %in% colnames(audioData)) > 0){
stop(paste(by[which(!by %in% colnames(audioData))], "not found in audioData"))
}
if(!is.numeric(audioData[,measure])){
stop("Variable selected using measure is not numeric")
}
if(length(by) == 0){
stop("No by values provided.")
}
if(!(all(apply(audioData[, tolower(colnames(audioData)) %in% tolower(by), drop = FALSE], 2, is.factor)) || all(apply(audioData[, tolower(colnames(audioData)) %in% tolower(by), drop = FALSE], 2, is.character)))){
stop("Variables selected using by are not factors")
}
#If no custom name provided for the measure, use the measure name
if(length(nameMeasure) == 0 || !is.character(nameMeasure)){
nameMeasure <- measure
}
#If one-way
if(length(by) == 1){
#Compute one way anova
formula = as.formula(paste(measure, "~", by))
#Summarize results
AnovaTestData <- summary(aov(formula, audioData))[[1]]
#Create descrtiption table from the test data
descriptionTable <- data.frame(Source = rownames(AnovaTestData), df = AnovaTestData$Df, SS = round(AnovaTestData$`Sum Sq`,2), MS = round(AnovaTestData$`Mean Sq`, 2), F = round(AnovaTestData$`F value`, 2), p = round(AnovaTestData$`Pr(>F)`, 3))
#Create star notation for p values
descriptionTable2 <- descriptionTable[1,]
if(length(descriptionTable2[descriptionTable2$p < 0.05, "p"]) > 0)
descriptionTable2[descriptionTable2$p < 0.05, "sig"] <- paste(descriptionTable2[descriptionTable2$p < 0.05, "p"], "(*)")
if(length(descriptionTable2[descriptionTable2$p < 0.01, "p"]) > 0)
descriptionTable2[descriptionTable2$p < 0.01, "sig"] <- paste(descriptionTable2[descriptionTable2$p < 0.01, "p"], "(**)")
if(length(descriptionTable2[descriptionTable2$p < 0.001, "p"]) > 0)
descriptionTable2[descriptionTable2$p < 0.001, "sig"] <- paste(descriptionTable2[descriptionTable2$p < 0.001, "p"], "(***)")
if(length(descriptionTable2[descriptionTable2$p >= 0.05, "p"]) > 0)
descriptionTable2[descriptionTable2$p >= 0.05, "sig"] <- paste(descriptionTable2[descriptionTable2$p >= 0.05, "p"], "")
descriptionTable[1,]$p <- descriptionTable2$sig
descriptionTable <- rbind(descriptionTable, c("Total", sum(descriptionTable[,2]), sum(descriptionTable[,3]), NA, NA, NA))
descriptionTable[2,which(is.na(descriptionTable[2,]))] <- " "
descriptionTable[3,which(is.na(descriptionTable[3,]))] <- " "
columnNames <- colnames(descriptionTable)
rowNames <- descriptionTable[,1]
descriptionTable <- data.frame(lapply(descriptionTable, function(x) str_replace_all(x, "0\\.", ".")))
colnames(descriptionTable) <- columnNames
descriptionTable[,1] <- rowNames
#Format table using kable extra
descriptionTable <- kable_classic(kable(
descriptionTable,
format = "html",
booktabs = TRUE,
caption = paste0("Figure ", figureNumber, ". One-Way Analysis of Variance of ", nameMeasure, " by ", by)
), full_width = F, html_font = "Cambria")
}
#If two-way
else if(length(by) == 2){
#Compute two way anova
formula = as.formula(paste(measure, "~", by[1], "*", by[2]))
AnovaTestData <- summary(aov(formula, audioData))[[1]]
#Build table from summarized results
descriptionTable <- data.frame(Source = rownames(AnovaTestData), df = AnovaTestData$Df, SS = round(AnovaTestData$`Sum Sq`,2), MS = round(AnovaTestData$`Mean Sq`, 2), F = round(AnovaTestData$`F value`, 2), p = round(AnovaTestData$`Pr(>F)`, 3))
descriptionTable2 <- descriptionTable[1:3,]
#Create star notation for p values
if(length(descriptionTable2[descriptionTable2$p < 0.05, "p"]) > 0)
descriptionTable2[descriptionTable2$p < 0.05, "sig"] <- paste(descriptionTable2[descriptionTable2$p < 0.05, "p"], "(*)")
if(length(descriptionTable2[descriptionTable2$p < 0.01, "p"]) > 0)
descriptionTable2[descriptionTable2$p < 0.01, "sig"] <- paste(descriptionTable2[descriptionTable2$p < 0.01, "p"], "(**)")
if(length(descriptionTable2[descriptionTable2$p < 0.001, "p"]) > 0)
descriptionTable2[descriptionTable2$p < 0.001, "sig"] <- paste(descriptionTable2[descriptionTable2$p < 0.001, "p"], "(***)")
if(length(descriptionTable2[descriptionTable2$p >= 0.05, "p"]) > 0)
descriptionTable2[descriptionTable2$p >= 0.05, "sig"] <- paste(descriptionTable2[descriptionTable2$p >= 0.05, "p"], "")
descriptionTable[1:3,]$p <- descriptionTable2$sig
# produce final description table
descriptionTable <- rbind(descriptionTable, c("Total", sum(descriptionTable[,2]), sum(descriptionTable[,3]), NA, NA, NA))
descriptionTable$Source[3] <- "Interaction"
descriptionTable[2,which(is.na(descriptionTable[2,]))] <- " "
descriptionTable[3,which(is.na(descriptionTable[3,]))] <- " "
descriptionTable[4,which(is.na(descriptionTable[4,]))] <- " "
descriptionTable[5,which(is.na(descriptionTable[5,]))] <- " "
columnNames <- colnames(descriptionTable)
rowNames <- descriptionTable[,1]
descriptionTable <- data.frame(lapply(descriptionTable, function(x) str_replace_all(x, "0\\.", ".")))
colnames(descriptionTable) <- columnNames
descriptionTable[,1] <- rowNames
# Format final table using kable extra
descriptionTable <- kable_classic(kable(
descriptionTable,
format = "html",
booktabs = TRUE,
caption = paste0("Figure ", figureNumber, ". Two-Way Analysis of Variance of ", nameMeasure, " by ", paste(by[1], "and", by[2]))
), full_width = F, html_font = "Cambria")
}
#return error
else{
stop("Error, incorrect by argument. By should have minimum length of 1 and maximum length of 2.")
}
#Add footnote with p values
descriptionTable <- footnote(descriptionTable, general ="* p < .05, ** p < .01, *** p < .001", threeparttable = TRUE, footnote_as_chunk = TRUE)
return(descriptionTable)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/tableANOVA.R
|
#' Create a Table for the results of a Dunn's test
#'
#' Automatically generates HTML table with results for Dunn's test.
#'
#' @param audioData A data.frame generated by the autoExtract() function.
#' @param by A character vector indicating the name of the factor(s).
#' @param measure Name of the dependent variable.
#' @param nameMeasure Optional string to rename dependent variable in the output table. If no value is provided, then original variable name is displayed.
#' @param figureNumber Integer indicating the figure number, used to create the title for the table. Default corresponds to 1.
#' @return HTML table showing Dunn's test results in APA formatting style.
#' @examples
#' tableDunn(testAudioData, by = "Condition", measure = "duration")
#'
#' @importFrom stringr str_split str_replace
#' @importFrom stats as.formula
#' @importFrom FSA dunnTest
#' @importFrom kableExtra kable_classic footnote
#' @importFrom knitr kable
#' @export
tableDunn <- function(audioData, by = c(), measure = "duration", nameMeasure = c(), figureNumber = 1){
by <- as.vector(by)
if(!is.data.frame(audioData)) stop("audioData should be a data.frame produced by autoExtract")
if(!measure %in% colnames(audioData)){
stop(paste(measure, "not found in audioData"))
}
if(sum(!by %in% colnames(audioData)) > 0){
stop(paste(by[which(!by %in% colnames(audioData))], "not found in audioData"))
}
if(!is.numeric(audioData[,measure])){
stop("Variables selected using measure is not numeric")
}
if(length(by) == 0){
stop("No by values provided.")
}
if(!(all(apply(audioData[, tolower(colnames(audioData)) %in% tolower(by), drop = FALSE], 2, is.factor)) || all(apply(audioData[, tolower(colnames(audioData)) %in% tolower(by), drop = FALSE], 2, is.character)))){
stop("Variables selected using by are not factors")
}
#If no custom name provided for the measure, use the measure name
if(length(nameMeasure) == 0 || !is.character(nameMeasure)){
nameMeasure <- measure
}
if(length(by) == 0){
stop("No by argument provided")
}
# Generate formula
formula = as.formula(paste(measure, "~", by))
#Compute dunn test
dunnData <- dunnTest(formula, data = audioData)
#Extract results
dunnData <- dunnData$res
#Preprocess group text
dunnData$group1 <- trimws(str_split(dunnData$Comparison, "-", simplify = T)[,1])
dunnData$group2 <- trimws(str_split(dunnData$Comparison, "-", simplify = T)[,2])
#Generate empty description table
descriptionTable <- as.data.frame(matrix(nrow = length(unique(audioData[,by])), ncol = length(unique(audioData[,by]))))
#Set column and row names
colnames(descriptionTable) <- unique(audioData[,by])
rownames(descriptionTable) <- unique(audioData[,by])
descriptionTable[lower.tri(descriptionTable)] <- " "
descriptionTable[row(descriptionTable) == col(descriptionTable)] <- "1"
#Fill the table
for (i in 1:nrow(descriptionTable)) {
for(j in which(is.na(descriptionTable[i,]))){
number <- dunnData[dunnData$group1 == rownames(descriptionTable)[i] & dunnData$group2 == colnames(descriptionTable)[j] | dunnData$group2 == rownames(descriptionTable)[i] & dunnData$group1 == colnames(descriptionTable)[j],"Z"]
number <- round(number, 2)
number <- str_replace(number, "0\\.", ".")
if(dunnData[dunnData$group1 == rownames(descriptionTable)[i] & dunnData$group2 == colnames(descriptionTable)[j] | dunnData$group2 == rownames(descriptionTable)[i] & dunnData$group1 == colnames(descriptionTable)[j],"P.adj"] < 0.05)
descriptionTable[i, j] <- paste(number, "(*)")
if(dunnData[dunnData$group1 == rownames(descriptionTable)[i] & dunnData$group2 == colnames(descriptionTable)[j] | dunnData$group2 == rownames(descriptionTable)[i] & dunnData$group1 == colnames(descriptionTable)[j] | dunnData$group2 == rownames(descriptionTable)[i] & dunnData$group1 == colnames(descriptionTable)[j],"P.adj"] < 0.01)
descriptionTable[i, j] <- paste(number, "(**)")
if(dunnData[dunnData$group1 == rownames(descriptionTable)[i] & dunnData$group2 == colnames(descriptionTable)[j] | dunnData$group2 == rownames(descriptionTable)[i] & dunnData$group1 == colnames(descriptionTable)[j] | dunnData$group2 == rownames(descriptionTable)[i] & dunnData$group1 == colnames(descriptionTable)[j],"P.adj"] < 0.001)
descriptionTable[i, j] <- paste(number, "(***)")
if(dunnData[dunnData$group1 == rownames(descriptionTable)[i] & dunnData$group2 == colnames(descriptionTable)[j] | dunnData$group2 == rownames(descriptionTable)[i] & dunnData$group1 == colnames(descriptionTable)[j],"P.adj"] >= 0.05)
descriptionTable[i, j] <- paste(number, " ")
}
}
descriptionTable[descriptionTable < 1 & descriptionTable > -1] <- str_replace(descriptionTable[descriptionTable < 1 & descriptionTable > -1], "0.", ".")
descriptionTable <- cbind(unique(audioData[,by]), descriptionTable)
colnames(descriptionTable)[1] <- " "
rownames(descriptionTable) <- c()
#Convert table into kable extra format
descriptionTable <- kable_classic(kable(
descriptionTable,
format = "html",
booktabs = TRUE,
caption = paste0("Figure ", figureNumber, ". Posthoc comparisons using Dunn's Test for ", nameMeasure, " by ", by[1])
), full_width = T, html_font = "Cambria")
#Add foot note for p values
descriptionTable <- footnote(descriptionTable, general ="Z value shown<br/>* p < 0.05, ** p < 0.01, *** p < 0.001", threeparttable = TRUE, footnote_as_chunk = TRUE, escape = FALSE)
return(descriptionTable)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/tableDunn.R
|
#'Create a table for Kruskal-Wallis test results
#'
#' Automatically generates HTML table with the results of a Kruskal-Wallis test.
#'
#' @param audioData A data.frame generated by the autoExtract() function.
#' @param by A character vector indicating the name of the factor(s).
#' @param measure Name of the dependent variable.
#' @param nameMeasure Optional string to rename dependent variable in the output table. If no value is provided, original variable name is displayed.
#' @param figureNumber Integer indicating the figure number, used to create the title for the table. Default corresponds to 1.
#' @param InfoTable Logical value indicating the type of table to be reported. If FALSE, a table containing the mean ranks for each group is displayed; if TRUE, a table containing the main results for the Kruskal-Wallis test is displayed. Default corresponds to FALSE.
#' @return HTML table showing Kruskal-Wallis test results in APA formatting style.
#' @examples
#' tableKruskal(testAudioData, by = "Condition", measure = "duration")
#'
#' @importFrom stats as.formula kruskal.test
#' @importFrom stringr str_replace
#' @importFrom kableExtra kable_classic footnote
#' @importFrom knitr kable
#' @export
tableKruskal <- function(audioData, by = c(), measure = "duration", nameMeasure = c(), figureNumber = 1, InfoTable = FALSE){
by <- as.vector(by)
if(!is.data.frame(audioData)) stop("audioData should be a data.frame produced by autoExtract")
if(length(measure) != 1 && !measure %in% colnames(audioData)) {
stop("measure should be one of the following measures: duration, voice_breaks_percent, RMS_env, mean_loudness, mean_F0, sd_F0, mean_entropy, or mean_HNR")
}
if(sum(!by %in% colnames(audioData)) > 0){
stop(paste(by[which(!by %in% colnames(audioData))], "not found in audioData"))
}
if(length(by) == 0){
stop("No by values provided.")
}
if(!(all(apply(audioData[, tolower(colnames(audioData)) %in% tolower(by), drop = FALSE], 2, is.factor)) || all(apply(audioData[, tolower(colnames(audioData)) %in% tolower(by), drop = FALSE], 2, is.character)))){
stop("Variables selected using by are not factors")
}
if(!is.numeric(audioData[,measure])){
stop("Variable selected using measure is not numeric")
}
#If no custom name provided for the measure, use the measure name
if(length(nameMeasure) == 0){
nameMeasure <- measure
}
# Generate formula
formula = as.formula(paste(measure, "~", by))
#Compute kruskal test
kruskalTestData <- kruskal.test(formula, audioData)
#Get ranks
audioData$VarRank <- rank(audioData[,measure], ties.method = "average")
#Generate empty table
descriptionTable <- data.frame(Names = unique(audioData[,by]), N = rep(NA, length(unique(audioData[,by]))), Ranks = rep(NA, length(unique(audioData[,by]))))
#Fill table with N and Ranks
for(i in 1:nrow(descriptionTable)){
descriptionTable$N[i] <- nrow(audioData[audioData[,by] == descriptionTable$Names[i],])
descriptionTable$Ranks[i] <- round(mean(audioData[audioData[,by] == descriptionTable$Names[i],"VarRank"]),2)
}
#Fill data with Kruskal test results
descriptionTable2 <- data.frame(Factor = by, df = kruskalTestData$parameter[1], X2 = round(kruskalTestData$statistic[1], 2), p = round(kruskalTestData$p.value[1], 4))
#Convert p values into star notation
descriptionTable2$p[1] <- ifelse(descriptionTable2$p[1] < 0.001, paste(descriptionTable2$p, "(***)"), ifelse(descriptionTable2$p[1] < 0.01, paste(descriptionTable2$p, "(**)"),ifelse(descriptionTable2$p[1] < 0.05, paste(descriptionTable2$p[1], "(*)"), paste(descriptionTable2$p[1], ""))))
colnames(descriptionTable2)[1] <- " "
colnames(descriptionTable)[1] <- " "
descriptionTable2[1,] <- str_replace(descriptionTable2[1,], "0\\.", ".")
descriptionTable[,3] <- str_replace(descriptionTable[,3], "0\\.", ".")
#Convert table into kable extra format and If Infotable FALSE show a table containing the mean ranks for each group is displayed, if Infotable is TRUE a table containing the main results for the Kruskal-Wallis test is displayed. Default corresponds to FALSE.
if(InfoTable){
descriptionTable <- kable_classic(kable(
descriptionTable,
format = "html",
booktabs = TRUE,
caption = paste0("Figure ", figureNumber, ". Mean Ranks for ", nameMeasure, " by ", by)
), full_width = T, html_font = "Cambria")
}
else{
descriptionTable <- kable_classic(kable(
descriptionTable2,
format = "html",
booktabs = TRUE,
caption = paste0("Figure ", figureNumber, ". Kruskal Wallis-H test for ", nameMeasure, " by ", by)
), full_width = T, html_font = "Cambria")
descriptionTable <- footnote(descriptionTable, general ="* p < .05, ** p < .01, *** p < .001", threeparttable = TRUE, footnote_as_chunk = TRUE)
}
return(descriptionTable)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/tableKruskal.R
|
#' Create a table for Shapiro Wilk results
#'
#' This function returns a data.frame containing Shapiro Wilk results by conditions (and dimensions if "includeDimensions" is set to TRUE). Likewise, if "HTMLTable" is set to TRUE, it outputs results as an HTML table in APA style.
#'
#' @param audioData A data.frame generated by the autoExtract() function.
#' @param measure Name of the dependent variable. Default corresponds to duration.
#' @param includeDimensions Logical value indicating if different dimensions should be also included as a factor when testing for normality. Default corresponds to FALSE.
#' @param figureNumber Integer indicating the figure number, used to create the title for the table. Default corresponds to 1.
#' @param nameMeasure Optional string indicating the name to be displayed for the dependent variable in the output table. If no value is provided, the string used for the measure attribute is displayed.
#' @param HTMLTable Logical value indicating if an HTML table should be generated. Default corresponds to FALSE.
#' @return If "HTMLTable" is set to FALSE, this function returns a data.frame with Shapiro-Wilk test results for each condition (if condition column exists) and dimension (if dimension column exists and "includeDimensions" is set to TRUE). Otherwise an HTML table showing test results in APA formatting style is created.
#' @examples
#' tableNormality(testAudioData, measure = "duration")
#'
#' @importFrom stats shapiro.test
#' @importFrom kableExtra kable_classic footnote
#' @importFrom knitr kable
#' @export
tableNormality <- function(audioData, measure = "duration", includeDimensions = FALSE, figureNumber = 1, nameMeasure = c(), HTMLTable = FALSE){
if(!is.data.frame(audioData)) stop("audioData should be a data.frame produced by autoExtract")
if(length(measure) != 1 && !measure %in% colnames(audioData)) {
stop("measure should be present on audioData")
}
if(length(audioData))
if(!is.numeric(audioData[,measure])){
stop("Variable selected using measure is not numeric")
}
#Create empty data.frame
normalityData <- as.data.frame(matrix(ncol = ifelse("Condition" %in% colnames(audioData) & includeDimensions, 5, 4), nrow = ifelse("Condition" %in% colnames(audioData) & includeDimensions, nrow(expand.grid(unique(audioData$Condition), unique(audioData$Dimension))) + 1, ifelse("Condition" %in% colnames(audioData), length(unique(audioData$Condition)) + 1, 1))))
#Set column names
if("Condition" %in% colnames(audioData) & includeDimensions){
colnames(normalityData) <- c("Condition", "Dimension", "N", "W", "pValue")
}else{
colnames(normalityData) <- c("Condition", "N", "W", "pValue")
}
normalityData$Condition[1] <- "General"
#If dimensions are included and conditions too, generate all possible combinations of conditions and dimensions and check normality for these
if("Condition" %in% colnames(audioData) & includeDimensions){
normalityData$Dimension[1] <- "General"
tempCombinations <- expand.grid(unique(audioData$Condition), unique(audioData$Dimension))
normalityData[-1,1] <- as.character(tempCombinations[,1])
normalityData[-1,2] <- as.character(tempCombinations[,2])
for (row in 2:nrow(normalityData)) {
normalityData[row,]$N <- nrow(audioData[audioData$Condition == normalityData$Condition[row] & audioData$Dimension == normalityData$Dimension[row],])
if(sum(!is.na(audioData[audioData$Condition == normalityData$Condition[row] & audioData$Dimension == normalityData$Dimension[row], measure])) < 3){
next
}
shapiroTest <- shapiro.test(audioData[audioData$Condition == normalityData$Condition[row] & audioData$Dimension == normalityData$Dimension[row], measure])
normalityData[row,]$W <- round(shapiroTest$statistic, 2)
normalityData[row,]$pValue <- round(shapiroTest$p.value,3)
}
}
#If only conditions are present, check normality for each condition
else if("Condition" %in% colnames(audioData)){
normalityData$Condition[2:nrow(normalityData)] <- as.character(unique(audioData$Condition))
for (condition in unique(audioData$Condition)) {
normalityData[normalityData$Condition == condition,]$N <- nrow(audioData[audioData$Condition == condition,])
if(length(which(!is.na(audioData[audioData$Condition == condition, measure]))) < 3){
next
}
shapiroTest <- shapiro.test(audioData[audioData$Condition == condition,measure])
normalityData[normalityData$Condition == condition,]$W <- round(shapiroTest$statistic, 2)
normalityData[normalityData$Condition == condition,]$pValue <- round(shapiroTest$p.value,3)
}
}
#produce HTMLtable with the results
if(HTMLTable && length(audioData[!is.na(audioData[,measure]), measure]) > 3){
if(length(nameMeasure) == 0){
nameMeasure <- measure
}
shapiroTest <- shapiro.test(audioData[,measure])
normalityData$W[1] <- round(shapiroTest$statistic, 2)
normalityData$pValue[1] <- round(shapiroTest$p.value,3)
normalityData$N[1] <- nrow(audioData)
colnames(normalityData)[ncol(normalityData)] <- "p"
descriptionTable2 <- normalityData
#Convert p values into star notation
if(length(descriptionTable2[descriptionTable2$p < 0.05, "p"]) > 0)
descriptionTable2[descriptionTable2$p < 0.05, "sig"] <- paste(descriptionTable2[descriptionTable2$p < 0.05, "p"], "(*)")
if(length(descriptionTable2[descriptionTable2$p < 0.01, "p"]) > 0)
descriptionTable2[descriptionTable2$p < 0.01, "sig"] <- paste(descriptionTable2[descriptionTable2$p < 0.01, "p"], "(**)")
if(length(descriptionTable2[descriptionTable2$p < 0.001, "p"]) > 0)
descriptionTable2[descriptionTable2$p < 0.001, "sig"] <- paste(descriptionTable2[descriptionTable2$p < 0.001, "p"], "(***)")
if(length(descriptionTable2[descriptionTable2$p >= 0.05, "p"]) > 0)
descriptionTable2[descriptionTable2$p >= 0.05, "sig"] <- paste(descriptionTable2[descriptionTable2$p >= 0.05, "p"], "")
normalityData$p <- descriptionTable2$sig
normalityData <- data.frame(lapply(normalityData, function(x) str_replace_all(x, "0\\.", ".")))
descriptionTable <- kable_classic(kable(
normalityData,
format = "html",
booktabs = TRUE,
caption = paste0("Figure ", figureNumber,". Shapiro Wilk Normality Test for ", nameMeasure, " by ", ifelse(includeDimensions, "Condition and Dimension", "Condition") )
), full_width = F, html_font = "Cambria")
descriptionTable <- footnote(descriptionTable, general ="* p < .05, ** p < .01, *** p < .001", threeparttable = TRUE, footnote_as_chunk = TRUE)
return(descriptionTable)
}
#produce data.frame with the results
else{
if(length(audioData[!is.na(audioData[,measure]), measure]) > 3){
shapiroTest <- shapiro.test(audioData[,measure])
normalityData$W[1] <- round(shapiroTest$statistic, 2)
normalityData$pValue[1] <- round(shapiroTest$p.value,3)
normalityData$N[1] <- nrow(audioData)
return(normalityData)
} else {
text <- "Sample size was not big enough to test for normality"
return(text)
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/tableNormality.R
|
#' Create a table for the Scheirer-Ray-Hare test results
#'
#' Automatically generates an HTML table with the results for Scheirer-Ray-Hare test.
#'
#' @param audioData A data.frame generated by the autoExtract() function.
#' @param by A character vector indicating the name of the factor(s).
#' @param measure Name of the dependent variable.
#' @param nameMeasure Optional string to rename the dependent variable in the output table. If no value is provided, the original variable name is displayed.
#' @param figureNumber Integer indicating the figure number, used to create the title for the table. Default corresponds to 1.
#' @return HTML table showing Scheirer-Ray-Hare test results in APA formatting style.
#' @examples
#' tableSchreirer(testAudioData, by = c("Condition", "Dimension"), measure = "duration")
#'
#' @importFrom stats as.formula
#' @importFrom rcompanion scheirerRayHare
#' @importFrom stringr str_replace_all
#' @importFrom kableExtra kable_classic footnote
#' @importFrom knitr kable
#' @export
tableSchreirer <- function(audioData, by = c(), measure = "duration", nameMeasure = c(), figureNumber = 1){
by <- as.vector(by)
if(!is.data.frame(audioData)) stop("audioData should be a data.frame produced by autoExtract")
if(length(measure) != 1 && !measure %in% colnames(audioData)) {
stop("measure should be present on audioData")
}
if(sum(!by %in% colnames(audioData)) > 0){
stop(paste(by[which(!by %in% colnames(audioData))], "not found in audioData"))
}
if(length(by) == 0){
stop("No by values provided.")
}
if(!(all(apply(audioData[, tolower(colnames(audioData)) %in% tolower(by), drop = FALSE], 2, is.factor)) || all(apply(audioData[, tolower(colnames(audioData)) %in% tolower(by), drop = FALSE], 2, is.character)))){
stop("Variables selected using by are not factors")
}
if(!is.numeric(audioData[,measure])){
stop("Variable selected using measure is not numeric")
}
#If no custom name provided for the measure, use the measure name
if(length(nameMeasure) == 0 || !is.character(nameMeasure)){
nameMeasure <- measure
}
# If by does not have length equal to 2 throw error
if(length(by) != 2){
stop("Error, incorrect by argument. By should have length of 2.")
}
# Generate formula
formula = as.formula(paste(measure, "~", by[1], "*", by[2]))
#Compute Scheirer Ray hare test
SchreirerTestData <- scheirerRayHare(formula, audioData, verbose = FALSE)
#Generate table with the results
descriptionTable <- data.frame(Source = rownames(SchreirerTestData), df = SchreirerTestData$Df, SS = round(SchreirerTestData$`Sum Sq`,2), MS = round(SchreirerTestData$`Sum Sq`/SchreirerTestData$Df, 2), H = round(SchreirerTestData$H, 2), p = round(SchreirerTestData$p.value, 3))
descriptionTable2 <- descriptionTable[1:3,]
#Convert p values into star notation
if(length(descriptionTable2[descriptionTable2$p < 0.05, "p"]) > 0)
descriptionTable2[descriptionTable2$p < 0.05, "sig"] <- paste(descriptionTable2[descriptionTable2$p < 0.05, "p"], "(*)")
if(length(descriptionTable2[descriptionTable2$p < 0.01, "p"]) > 0)
descriptionTable2[descriptionTable2$p < 0.01, "sig"] <- paste(descriptionTable2[descriptionTable2$p < 0.01, "p"], "(**)")
if(length(descriptionTable2[descriptionTable2$p < 0.001, "p"]) > 0)
descriptionTable2[descriptionTable2$p < 0.001, "sig"] <- paste(descriptionTable2[descriptionTable2$p < 0.001, "p"], "(***)")
if(length(descriptionTable2[descriptionTable2$p >= 0.05, "p"]) > 0)
descriptionTable2[descriptionTable2$p >= 0.05, "sig"] <- paste(descriptionTable2[descriptionTable2$p >= 0.05, "p"], "")
descriptionTable[1:3,]$p <- descriptionTable2$sig
#Format the table for final output
descriptionTable <- rbind(descriptionTable, c("Total", sum(descriptionTable[,2]), sum(descriptionTable[,3]), NA, NA, NA))
descriptionTable$Source[3] <- "Interaction"
descriptionTable[2,which(is.na(descriptionTable[2,]))] <- " "
descriptionTable[3,which(is.na(descriptionTable[3,]))] <- " "
descriptionTable[4,which(is.na(descriptionTable[4,]))] <- " "
descriptionTable[5,which(is.na(descriptionTable[5,]))] <- " "
columnNames <- colnames(descriptionTable)
rowNames <- descriptionTable[,1]
descriptionTable <- data.frame(lapply(descriptionTable, function(x) str_replace_all(x, "0\\.", ".")))
colnames(descriptionTable) <- columnNames
descriptionTable[,1] <- rowNames
descriptionTable <- kable_classic(kable(
descriptionTable,
format = "html",
booktabs = TRUE,
caption = paste0("Figure ", figureNumber, ". Scheirer-Ray-Hare test of ", nameMeasure, " by ", paste(by[1], "and", by[2]))
), full_width = F, html_font = "Cambria")
descriptionTable <- footnote(descriptionTable, general ="* p < .05, ** p < .01, *** p < .001", threeparttable = TRUE, footnote_as_chunk = TRUE)
return(descriptionTable)
}
|
/scratch/gouwar.j/cran-all/cranData/voiceR/R/tableSchreirer.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.