content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
devpart <-
function (null, environment, community, full) {
getdev <- function (y, z) {
p <- pnorm(z, log.p = TRUE)
for(j in 1:ncol(y)) {
ind <- which(!y[, j])
p[, ind, j] <- log(-expm1(p[, ind, j]))
}
-2 * apply(p, 1, colSums)
}
nuldev <- getdev(null$call$Y, null$trace$z)
envdev <- getdev(environment$call$Y, environment$trace$z)
comdev <- getdev(community$call$Y, community$trace$z)
fuldev <- getdev(full$call$Y, full$trace$z)
nul <- rbind(rowMeans(nuldev), apply(nuldev, 1, quantile, c(0.25, 0.975)))
env <- rbind(rowMeans(envdev), apply(envdev, 1, quantile, c(0.25, 0.975)))
com <- rbind(rowMeans(comdev), apply(comdev, 1, quantile, c(0.25, 0.975)))
ful <- rbind(rowMeans(fuldev), apply(fuldev, 1, quantile, c(0.25, 0.975)))
rownames(nul)[1] <- rownames(env)[1] <- rownames(com)[1] <- rownames(ful)[1] <- "Mean"
propR2 <- t(rbind(1 - env[1, ] / nul[1, ], 1 - com[1, ] / nul[1, ], 1 - ful[1, ] / nul[1, ], rep(1, length(env[1, ]))))
colnames(propR2) <- c("env", "com", "full", "total")
rownames(propR2) <- colnames(full$call$Y)
list(devpart = propR2, null = nul, environment = env, community = com, full = ful)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesComm/R/devpart.R
|
find_trunc <-
function (mu, y) {
n <- dim(mu)[1]
nsp <- dim(mu)[2]
eps <- 10 ^ -7
trunc <- array(-Inf, dim = c(n, nsp, 2))
trunc[, , 2] <- -mu - eps
ind <- which(t(y) == 1)
trunc[, , 1][ind] <- (eps - mu)[ind]
trunc[, , 2][ind] <- Inf
trunc
}
|
/scratch/gouwar.j/cran-all/cranData/BayesComm/R/find_trunc.R
|
plot.bayescomm <-
function (x, chain, ...) {
bsp <- paste("B", names(x$trace$B), sep = "$")
if (chain =="R") {
plot(mcmc(x$trace[[chain]], start = x$call$start,
thin = x$call$thin))
} else if (chain %in% bsp) {
plot(mcmc(x$trace$B[[substr(chain, 3, nchar(chain))]],
start = x$call$start, thin = x$call$thin))
} else {
stop("chain must be either 'R' or 'B$sp' for a named species sp")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesComm/R/plot.bayescomm.R
|
predict.bayescomm <- function(object, newdata, ...) {
# Return an array of sample probabilities at new sites.
# Rows are test sites; columns are species; slices are coefficient samples
bindSpeciesCoefficients <- function(object) {
# Helper function to bind the coefficient lists in B into an array.
pre.binding <- lapply(object$trace$B, function(x) {
dim(x) <- c(dim(x), 1)
x
})
out <- do.call(abind, pre.binding)
colnames(out) <- colnames(object$trace$B[[1]])
out
}
# Probably only works with the "full" model type and no `covlist` or
# `condition` specified.
# Haven't played with any cases where mu isn't null
if(!is.null(object$other$mu)){
stop("predictions are not supported for non-null mu")
}
X <- cbind(intercept = 1, newdata)
B <- bindSpeciesCoefficients(object)
R <- object$trace$R
n.species <- dim(B)[3]
predictions <- array(
NA,
dim = c(nrow(X), dim(B)[3], nrow(B)),
dimnames = list(row.names(X), dimnames(B)[[3]], NULL)
)
# Fill in predictions slice by slice
for (i in 1:nrow(B)) {
Sigma <- matrix(0, nrow = n.species, ncol = n.species)
Sigma[upper.tri(Sigma)] <- R[i, ] # Fill in upper triangle?
Sigma <- Sigma + t(Sigma) # Fill in lower triangle?
diag(Sigma) <- 1 # Diagonal equals 1 in multivariate probit model
Z <- rmvnorm(n = nrow(X), mean = rep(0, n.species), sigma = Sigma)
predictions[, , i] <- pnorm(X %*% B[i, , ] + Z)
}
predictions
}
|
/scratch/gouwar.j/cran-all/cranData/BayesComm/R/predict.bayescomm.R
|
print.bayescomm <-
function (x, ...) { # need number of iterations / thinning etc.!!
cat("model type: ", x$call$model)
cat("\nobservations: ", nrow(x$call$Y))
cat("\nspecies: ", colnames(x$call$Y))
cat("\ncovariates: ", colnames(x$call$X))
cat("\niterations: ", x$call$its, "\tthin: ", x$call$thin, "\tdiscarded: ", (x$call$start - 1))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesComm/R/print.bayescomm.R
|
residuals.bayescomm <-
function (object, ...) {
object$call$Y - apply(pnorm(object$trace$z), c(2, 3), mean)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesComm/R/residuals.bayescomm.R
|
rtnorm <-
function(n, mu, si, low, up){
.Call( "rtnorm", n, mu, si, low, up, PACKAGE = "BayesComm" )
}
|
/scratch/gouwar.j/cran-all/cranData/BayesComm/R/rtnorm.R
|
rwish <-
function (S, df) {
.Call( "rwish", S, df, PACKAGE = "BayesComm" )
}
|
/scratch/gouwar.j/cran-all/cranData/BayesComm/R/rwish.R
|
sample_B <-
function (z, X) {
.Call( "sample_B", z, X, PACKAGE = "BayesComm" )
}
|
/scratch/gouwar.j/cran-all/cranData/BayesComm/R/sample_B.R
|
sample_R <-
function (e, priR) {
S <- t(e) %*% e + priR[2] * diag(ncol(e))
v <- priR[1]
df <- v:(v - ncol(S) + 1)
sig <- solve(rwish(solve(S), df))
cov2cor(sig)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesComm/R/sample_R.R
|
sample_e <-
function(e, trunc, iR) {
e2 <- e
n <- dim(e)[1]
nsp <- dim(iR)[1]
coy2 <- 1 / iR[col(iR) == row(iR)]
A <- iR
A[col(A) == row(A)] <- 0
for (i in 1:nsp) {
mn <- -coy2[i] * (e2 %*% A[i, ])
std <- sqrt(coy2[i])
n <- length(mn)
e2[, i] <- rtnorm(n, mn, rep(std, n), trunc[, i, 1], trunc[, i, 2])
}
e2
}
|
/scratch/gouwar.j/cran-all/cranData/BayesComm/R/sample_e.R
|
sample_mu <-
function (z, X, covlist) {
nsp <- ncol(z)
mu <- matrix(NA, nrow(z), nsp)
blis <- NULL
for (i in 1:nsp) {
x <- matrix(X[, covlist[[i]]], ncol = length(covlist[[i]]))
b <- sample_B(matrix(z[, i], ncol = 1), x)
mu[, i] <- x %*% b
blis[[i]] <- b
}
list(mu, blis)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesComm/R/sample_mu.R
|
summary.bayescomm <-
function (object, chain, ...) {
bsp <- paste("B", names(object$trace$B), sep = "$")
if (chain == "R") {
summary(mcmc(object$trace[[chain]], start = object$call$start,
thin = object$call$thin))
} else if (chain %in% bsp) {
summary(mcmc(object$trace$B[[substr(chain, 3, nchar(chain))]], start = object$call$start,
thin = object$call$thin))
} else {
stop("chain must be either 'R' or 'B$sp' for a named species sp")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesComm/R/summary.bayescomm.R
|
window.bayescomm <-
function (x, start = NULL, end = NULL, thin = 1, ...) {
oldstart <- x$call$start
oldend <- x$call$its
oldthin <- x$call$thin
if (is.null(end)) {
end <- oldend
}
if(is.null(start)) {
start <- oldstart
}
if(is.null(thin)) {
thin <- oldthin
}
trim <- function(x, start, end, thin, oldstart, oldthin) {
suppressWarnings(as.matrix(window(x = mcmc(x,
start = oldstart,
thin = oldthin),
start = start,
end = end,
thin = thin)))
}
x$trace$R <- trim(x$trace$R, start, end, thin, oldstart, oldthin)
x$trace$z <- apply(x$trace$z, c(2, 3), trim, start, end, thin, oldstart, oldthin)
x$trace$B <- lapply(x$trace$B, trim, start, end, thin, oldstart, oldthin)
x$call$start <- start
x$call$thin <- thin
x$call$its <- end
x
}
|
/scratch/gouwar.j/cran-all/cranData/BayesComm/R/window.bayescomm.R
|
# ------------------------------------------------------------------------------
#
# prior --> list containing the information for prior
# [[1]] - the prior distribution type:
# 1 - DIP
# 2 - Beta(a,b)
# [[2]] - a: first parameter of the Beta distribution
# [[3]] - b: second parameter of the Beta distribution
#
#
# ------------------------------------------------------------------------------
#' One sample Bernoulli model - Trial Design
#'
#' Calculate the minimum planned sample size under an admissible design.
#' The users decide the power and type-I-error, and pick the efficacy and futility boundaries.
#' If there are no admissible design based on controlled type-I-error, then default to output
#' the designs with the lowest type-I-error and at least the user-defined (e.g. 80\%) power.
#'
#'
#' @param prior A list of length 3 containing the distributional information of the prior.
#' The first element is a number specifying the type of prior. Options are
#' \enumerate{
#' \item DIP ;
#' \item Beta(a,b), where a = shape, b = scale}
#' The second and third elements of the list are the parameters a and b, respectively.
#' @param nmin The start searching sample size
#' @param nmax The stop searching sample size
#' @param p0 The null response rate, which could be taken as the standard or historical rate.
#' @param p1 The response rate of the new treatment.
#' @param d The target improvement (minimal clinically meaningful difference).
#' @param ps The efficacy boundary (upper boundary).
#' @param pf The futility boundary (lower boundary).
#' @param power The power to achieve.
#' @param t1error The controlled type-I-error.
#' @param alternative less (lower values imply greater efficacy) or greater (larger
#' values imply greater efficacy).
#' @param seed The seed for simulations.
#' @param sim The number of simulations.
#' @return A list of the arguments with method and computed elements.
#' @examples
#' \donttest{
#' # with traditional Bayesian prior Beta(1,1)
#' OneSampleBernoulli.Design(list(2,1,1), nmin = 10, nmax=100, p0 = 0.3, p1 = 0.5, d = 0,
#' ps = 0.98, pf = 0.02, power = 0.80, t1error=0.05, alternative = "greater",
#' seed = 202210, sim = 10)
#' # with DIP
#' OneSampleBernoulli.Design(list(1,0,0), nmin = 10, nmax=100, p0 = 0.3, p1 = 0.5, d = 0,
#' ps = 0.98, pf = 0.02, power = 0.80, t1error=0.05, alternative = "greater",
#' seed = 202210, sim = 10)
#' }
#' @importFrom stats rbeta rbinom rgamma rnorm rpois
#' @export OneSampleBernoulli.Design
OneSampleBernoulli.Design <- function(prior, nmin = 10, nmax = 100, p0, p1, d = 0,
ps, pf, power = 0.8, t1error = 0.05,
alternative = c("less", "greater"), seed = 202209, sim = 1000){
alternative <- match.arg(alternative)
# Define the inputs
if(prior[[1]] == 1){
prior[[2]] <- NA
prior[[3]] <- NA
}
## nmin limit
if(!is.null(nmin) && (!is.numeric(nmin) || nmin < 10 || nmin >= nmax))
stop("nmin must be positive number and at least 10")
## nmax limit
if(!is.null(nmax) && (!is.numeric(nmax) || nmax <= nmin || nmax >= 200))
stop("nmax must greater than 'nmin' and less than 200")
## p0 limit
if(!is.null(p0) && (!is.numeric(p0) || (p0 < 0 | p0 > 1)))
stop("p0 must be numeric in [0,1]")
## p1 limit
if(!is.null(p1) && (!is.numeric(p1) || (p1 < 0 | p1 > 1)))
stop("p1 must be numeric in [0,1]")
## d limit
if(!is.null(d) && (!is.numeric(d) || (d < 0 | d > abs(p1-p0))))
stop("d must be numeric in [0, |p1-p0|]")
## efficacy boundary limit
if(!is.null(ps) && (!is.numeric(ps) || (ps < 0.8 | ps > 1)))
stop("ps (efficacy boundary) must be numeric in [0.8,1]")
## futility boundary limit
if(!is.null(pf) && (!is.numeric(pf) || (pf < 0 | pf > 0.2)))
stop("pf (futility boundary) must be numeric in [0,0.2]")
## power limit
if(!is.null(power) && (!is.numeric(power) || (power < 0 | power > 1)))
stop("power must be numeric in [0,1]")
## t1error limit
if(!is.null(t1error) && (!is.numeric(t1error) || (t1error < 0 |t1error > 1)))
stop("type-I-error must be numeric in [0,1]")
## set.seed
if(!is.numeric(seed))
stop("seed must be numeric")
if(!is.numeric(sim))
stop("simulation number must be numeric")
set.seed(seed)
# Functions to calculate the posterior
Bernoulli <- function(a,b,y){posterior<-rbeta(1000, a+sum(y), b+(length(y)-sum(y)))}
Bernoulli.DIP <- function(p0, y, N){
j<-length(y)
posterior<-rbeta(1000,1+sum(y)+p0*(N-j),1+(j-sum(y))+(1-p0)*(N-j))
}
# Simulated Data
# calculate N that can achieve the power
N_v <- NULL
power_v <- NULL
n_v <- NULL
sd_v <- NULL
for (N in seq(from=nmin, to=nmax, by=1)){
cat1s <- 0
cat1f <- 0
n.enrolled <- NULL
for (k in 1:sim) {
y.data <- NULL
j <- 0
cat <- 0
cats <- 0
catf <- 0
pp_stop <- 0.5
while(cat == 0){
j <- j+1
y.data <- append(y.data, rbinom(1,1,p1))
if(j>=10)
{
if (prior[[1]] == 2){
p1_s<-Bernoulli(a = prior[[2]], b = prior[[3]], y = y.data)
}else if (prior[[1]] == 1){
p1_s <- Bernoulli.DIP(p0, y = y.data, N = N)
}
if (alternative == "greater"){
pp_stop<-sum(p1_s>p0+d)/length(p1_s)
}else if (alternative == "less"){
pp_stop<-sum(p1_s<p0-d)/length(p1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
# Recruited Sample Size
n.enrolled <- append(n.enrolled, j)
}
power.cal <- cat1s/sim
jitter <- 0.01
if (power.cal >= power-jitter){
N_v <- append(N_v, N)
power_v <- append(power_v, power.cal)
n_v <- append(n_v, round(mean(n.enrolled), 0))
sd_v <- append(sd_v, round(sd(n.enrolled), 1))
}
result1 <- cbind(N_v, power_v, n_v, sd_v)
} # End of power calculation
if (is.null(result1)){
message("Suggest: please adjust your input values!")
stop(paste("No sample size in the range [",nmin,",",nmax,"] can achieve ", power*100, "% power", sep=""))
}
# calculate type I error
nmin1 <- N_v[which.min(N_v)] # start minimum sample size in calculation of exact type I error
N_v <- NULL
t1error_v <- NULL
for (N in seq(from=nmin1, to=nmax, by=1)){
cat1s <- 0
cat1f <- 0
for (k in 1:sim) {
y.data <- NULL
j <- 0
cat <- 0
cats <- 0
catf <- 0
pp_stop <- 0.5
while(cat == 0){
j <- j+1
y.data <- append(y.data, rbinom(1,1,p0)) # under the null hypothesis p1 = p0
if(j>=10)
{
if (prior[[1]] == 2){
p1_s<-Bernoulli(a = prior[[2]], b = prior[[3]], y = y.data)
}else if (prior[[1]] == 1){
p1_s <- Bernoulli.DIP(p0, y = y.data, N = N)
}
if (alternative == "greater"){
pp_stop<-sum(p1_s>p0+d)/length(p1_s)
}else if (alternative == "less"){
pp_stop<-sum(p1_s<p0-d)/length(p1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
}
t1error.cal <- cat1s/sim
N_v <- append(N_v, N)
t1error_v <- append(t1error_v, t1error.cal)
result2 <- cbind(N_v, t1error_v)
} # End of Type-I-error calculation
# Outputs
if (!is.null(result1) & !is.null(result2)){
result <- merge(result1, result2, by=c("N_v"))
final <- as.data.frame(result)
# select the lowest/best-controlled type I error
final$diff <- abs(final$t1error_v - t1error)
final <- final[order(final$diff, final$t1error_v, final$power_v, final$N_v), ]
ff <- final[1,]
planN <- ff$N_v
exact.power <- ff$power_v
exact.t1 <- ff$t1error_v
ss <- ff$n_v
sd <- ff$sd_v
if (prior[[1]] == 1) {method = "DIP"
} else if (prior[[1]] == 2) {method = paste("Beta(",prior[[2]], ",", prior[[3]], ")", sep="")
}
z <- list(method = method, planned_sample_size = planN,
efficacy_boundary = ps, futility_boundary = pf,
exact_power = exact.power, exact_type_I_error = exact.t1,
expected_sample_size = ss, expected_sample_size_std = sd)
z
} # End of Outputs
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDIP/R/OneSampleBernoulli.Design.R
|
# ------------------------------------------------------------------------------
# prior --> list containing the information for prior
# [[1]] - the prior distribution type:
# 1 - DIP
# 2 - Beta(a,b)
# [[2]] - a: first parameter of the Beta distribution
# [[3]] - b: second parameter of the Beta distribution
# ------------------------------------------------------------------------------
#' One sample Bernoulli model
#'
#' For a given planned sample size, the efficacy and futility boundaries,
#' return the power, the type I error, the expected sample size and its
#' standard deviation, the probability of reaching the efficacy and futility boundaries.
#'
#'
#' @param prior A list of length 3 containing the distributional information of the prior.
#' The first element is a number specifying the type of prior. Options are
#' \enumerate{
#' \item DIP ;
#' \item Beta(a,b), where a = shape, b = scale}
#' The second and third elements of the list are the parameters a and b, respectively.
#' @param N The planned sample size.
#' @param p0 The null response rate, which could be taken as the standard or historical rate.
#' @param p1 The response rate of the new treatment.
#' @param d The target improvement (minimal clinically meaningful difference).
#' @param ps The efficacy boundary (upper boundary).
#' @param pf The futility boundary (lower boundary).
#' @param alternative less (lower values imply greater efficacy) or greater (larger
#' values imply greater efficacy).
#' @param seed The seed for simulations.
#' @param sim The number of simulations.
#' @return A list of the arguments with method and computed elements
#' @examples
#' # with traditional Bayesian prior Beta(1,1)
#' OneSampleBernoulli(list(2,1,1), N = 100, p0 = 0.3, p1 = 0.5, d = 0.05,
#' ps = 0.98, pf = 0.05, alternative = "greater",
#' seed = 202210, sim = 10)
#' # with DIP
#' OneSampleBernoulli(list(1,0,0), N = 100, p0 = 0.3, p1 = 0.5, d = 0.05,
#' ps = 0.98, pf = 0.05, alternative = "greater",
#' seed = 202210, sim = 10)
#' @importFrom stats rbeta rbinom rgamma rnorm rpois
#' @export OneSampleBernoulli
OneSampleBernoulli <- function(prior, N = 100, p0, p1, d = 0,
ps = 0.95, pf = 0.05,
alternative = c("less", "greater"), seed = 202209, sim = 5000) {
alternative <- match.arg(alternative)
# Define the inputs
if(prior[[1]] == 1){
prior[[2]] <- NA
prior[[3]] <- NA
}
## N limit
if(!is.null(N) && (!is.numeric(N) || N <= 0 ))
stop("N must be positive number and greater than 10")
## p0 limit
if(!is.null(p0) && (!is.numeric(p0) || (p0 < 0 | p0 > 1)))
stop("p0 must be numeric in [0,1]")
## p1 limit
if(!is.null(p1) && (!is.numeric(p1) || (p1 < 0 | p1 > 1)))
stop("p1 must be numeric in [0,1]")
## d limit
if(!is.null(d) && (!is.numeric(d) || (d < 0 | d > abs(p1-p0))))
stop("d must be numeric in [0, |p1-p0|]")
## efficacy boundary limit
if(!is.null(ps) && (!is.numeric(ps) || (ps < 0.8 | ps > 1)))
stop("ps (efficacy boundary) must be numeric in [0.8,1]")
## futility boundary limit
if(!is.null(pf) && (!is.numeric(pf) || (pf < 0 | pf > 0.2)))
stop("pf (futility boundary) must be numeric in [0,0.2]")
## set.seed
if(!is.numeric(seed))
stop("seed must be numeric")
## number of simulation
if(!is.numeric(sim))
stop("simulation number must be numeric")
set.seed(seed)
# Functions to calculate the posterior
Bernoulli <- function(a,b,y){posterior<-rbeta(1000, a+sum(y), b+(length(y)-sum(y)))}
Bernoulli.DIP <- function(p0, y, N){
j<-length(y)
posterior<-rbeta(1000,1+sum(y)+p0*(N-j),1+(j-sum(y))+(1-p0)*(N-j))
}
# Simulated Data
# calculate power
n.enrolled <- NULL
cat1s <- 0
cat1f <- 0
for (k in 1:sim) {
y.data<-NULL
j<-0
cat<-0
cats<-0
catf<-0
pp_stop<-0.5
while(cat == 0)
{
j<-j+1
y.data<-append(y.data,rbinom(1,1,p1))
if(j>=10)
{
if (prior[[1]] == 2){
p1_s<-Bernoulli(a = prior[[2]], b = prior[[3]], y = y.data)
}else if (prior[[1]] == 1){
p1_s <- Bernoulli.DIP(p0, y = y.data, N = N)
}
if (alternative == "greater"){
pp_stop<-sum(p1_s>p0+d)/length(p1_s)
}else if (alternative == "less"){
pp_stop<-sum(p1_s<p0-d)/length(p1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
# Recruited Sample Size
n.enrolled <- append(n.enrolled, j)
}
ss <- round(mean(n.enrolled), digits = 1)
sd <- round(sd(n.enrolled), digits = 2)
fut.rate <- cat1f/sim
power <- cat1s/sim
# calculate type I error
cat1s <- 0
cat1f <- 0
for (k in 1:sim) {
y.data<-NULL
j<-0
cat<-0
cats<-0
catf<-0
pp_stop<-0.5
while(cat == 0)
{
j<-j+1
y.data<-append(y.data,rbinom(1,1,p0)) # under the null hypothesis p1 = p0
if(j>=10)
{
if (prior[[1]] == 2){
p1_s<-Bernoulli(a = prior[[2]], b = prior[[3]], y = y.data)
}else if (prior[[1]] == 1){
p1_s <- Bernoulli.DIP(p0, y = y.data, N = N)
}
if (alternative == "greater"){
pp_stop<-sum(p1_s>p0+d)/length(p1_s)
}else if (alternative == "less"){
pp_stop<-sum(p1_s<p0-d)/length(p1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
}
t1error <- cat1s/sim
# Outputs
if (prior[[1]] == 1) {method = "DIP"
} else if (prior[[1]] == 2) {method = paste("Beta(",prior[[2]], ",", prior[[3]], ")", sep="")}
z <- list(method = method, power = power, type_I_error = t1error,
expected_sample_size = ss, expected_sample_size_std = sd,
the_prob_efficacy = power, the_prob_futility = fut.rate)
z
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDIP/R/OneSampleBernoulli.R
|
# ------------------------------------------------------------------------------
# prior --> list containing the information for prior
# [[1]] - the prior distribution type:
# 1 - DIP
# 2 - Normal(mu0,var/n0)
# [[2]] - n0: the level of information contained in the prior and
# the contribution of null mean
# ------------------------------------------------------------------------------
#' One sample Normal model with one-parameter unknown, given variance
#'
#' #' Calculate the minimum planned sample size under an admissible design.
#' The users decide the power and type-I-error, and pick the efficacy and futility boundaries.
#' If there are no admissible design based on controlled type-I-error, then default to output
#' the designs with the lowest type-I-error and at least the user-defined (e.g. 80\%) power.
#'
#'
#' @param prior A list of length 2 containing the distributional information of the prior.
#' The first element is a number specifying the type of prior. Options are
#' \enumerate{
#' \item DIP ;
#' \item Normal(mu0,var/n0), where mu0 = prior mean, var = the known variance}
#' The second elements of the list is the parameter n0.
#' @param nmin The start searching sample size
#' @param nmax The stop searching sample size
#' @param mu0 The null mean value, which could be taken as the standard or current mean.
#' @param mu1 The mean value of the new treatment.
#' @param var The variance
#' @param d The target improvement (minimal clinically meaningful difference).
#' @param ps The efficacy boundary (upper boundary).
#' @param pf The futility boundary (lower boundary).
#' @param power The power to achieve.
#' @param t1error The controlled type-I-error.
#' @param alternative less (lower values imply greater efficacy) or greater (larger
#' values imply greater efficacy).
#' @param seed The seed for simulations.
#' @param sim The number of simulations.
#' @return A list of the arguments with method and computed elements.
#' @examples
#' \donttest{
#' # with traditional Bayesian prior Beta(1,1)
#' OneSampleNormal1.Design(list(2,6), nmin = 10, nmax = 100, mu0 = 100, mu1 = 95, var=15, d = 0.05,
#' ps = 0.95, pf = 0.05, power = 0.8, t1error = 0.05, alternative = "less",
#' seed = 202210, sim = 10)
#' # with DIP
#' OneSampleNormal1.Design(list(1,0), nmin = 10, nmax = 100, mu0 = 100, mu1 = 95, var=15, d = 0.05,
#' ps = 0.95, pf = 0.05, power = 0.8, t1error = 0.05, alternative = "less",
#' seed = 202210, sim = 10)
#' }
#' @importFrom stats rbeta rbinom rgamma rnorm rpois
#' @export OneSampleNormal1.Design
OneSampleNormal1.Design <- function(prior, nmin = 10, nmax = 100, mu0, mu1, var, d = 0,
ps, pf, power = 0.8, t1error = 0.05,
alternative = c("less", "greater"), seed = 202209, sim = 1000){
alternative <- match.arg(alternative)
# Define the inputs
if(prior[[1]] == 1){
prior[[2]] <- NA
}
## nmin limit
if(!is.null(nmin) && (!is.numeric(nmin) || nmin < 10 || nmin >= nmax))
stop("nmin must be positive number and at least 10")
## nmax limit
if(!is.null(nmax) && (!is.numeric(nmax) || nmax <= nmin || nmax >= 200))
stop("nmax must greater than 'nmin' and less than 200")
## mu0 limit
if(!is.null(mu0) && (!is.numeric(mu0)))
stop("mu0 must be numeric")
## mu1 limit
if(!is.null(mu1) && (!is.numeric(mu1)))
stop("mu1 must be numeric")
## var limit
if(!is.null(var) && (!is.numeric(var) || var < 0))
stop("var must be positive numeric")
## d limit
if(!is.null(d) && (!is.numeric(d) || (d < 0 | d > abs(mu1-mu0))))
stop("d must be numeric in [0, |mu1-mu0|]")
## efficacy boundary limit
if(!is.null(ps) && (!is.numeric(ps) || (ps < 0.8 | ps > 1)))
stop("ps (efficacy boundary) must be numeric in [0.8,1]")
## futility boundary limit
if(!is.null(pf) && (!is.numeric(pf) || (pf < 0 | pf > 0.2)))
stop("pf (futility boundary) must be numeric in [0,0.2]")
## power limit
if(!is.null(power) && (!is.numeric(power) || (power < 0 | power > 1)))
stop("power must be numeric in [0,1]")
## t1error limit
if(!is.null(t1error) && (!is.numeric(t1error) || (t1error < 0 |t1error > 1)))
stop("type-I-error must be numeric in [0,1]")
## set.seed
if(!is.numeric(seed))
stop("seed must be numeric")
if(!is.numeric(sim))
stop("simulation number must be numeric")
set.seed(seed)
# Functions to calculate the posterior
Normal <- function(n0, mu0, var, y){
n <- length(y)
y_mean <- mean(y)
posterior<-rnorm(1000,(n0*mu0+n*y_mean)/(n0+n), sqrt(var)/sqrt(n0+n))
}
Normal.DIP <- function(N, mu0, var, y){
n <- length(y)
y_mean <- mean(y)
posterior<-rnorm(1000, ((N-n)*mu0+n*y_mean)/N, sqrt(var/N))
}
# Simulated Data
# calculate N that can achieve the power
N_v <- NULL
power_v <- NULL
n_v <- NULL
sd_v <- NULL
for (N in seq(from=nmin, to=nmax, by=1)){
cat1s <- 0
cat1f <- 0
n.enrolled <- NULL
for (k in 1:sim) {
y.data <- NULL
j <- 0
cat <- 0
cats <- 0
catf <- 0
pp_stop <- 0.5
while(cat == 0){
j <- j+1
y.data<-append(y.data,rnorm(1,mu1,sqrt(var)))
if(j>=10)
{
if (prior[[1]] == 2){
mu1_s<-Normal(n0 = prior[[2]], mu0, var, y = y.data)
}else if (prior[[1]] == 1){
mu1_s<-Normal.DIP(N, mu0, var, y = y.data)
}
if (alternative == "greater"){
pp_stop<-sum(mu1_s>mu0+d)/length(mu1_s)
}else if (alternative == "less"){
pp_stop<-sum(mu1_s<mu0-d)/length(mu1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
# Recruited Sample Size
n.enrolled <- append(n.enrolled, j)
}
power.cal <- cat1s/sim
jitter <- 0.01
if (power.cal >= power-jitter){
N_v <- append(N_v, N)
power_v <- append(power_v, power.cal)
n_v <- append(n_v, round(mean(n.enrolled), 0))
sd_v <- append(sd_v, round(sd(n.enrolled), 1))
}
result1 <- cbind(N_v, power_v, n_v, sd_v)
} # End of power calculation
if (is.null(result1)){
message("Suggest: please adjust your input values!")
stop(paste("No sample size in the range [",nmin,",",nmax,"] can achieve ", power*100, "% power", sep=""))
}
# calculate type I error
nmin1 <- N_v[which.min(N_v)] # start minimum sample size in calculation of exact type I error
N_v <- NULL
t1error_v <- NULL
for (N in seq(from=nmin1, to=nmax, by=1)){
cat1s <- 0
cat1f <- 0
for (k in 1:sim) {
y.data <- NULL
j <- 0
cat <- 0
cats <- 0
catf <- 0
pp_stop <- 0.5
while(cat == 0){
j <- j+1
y.data<-append(y.data,rnorm(1,mu0, sqrt(var))) # under the null hypothesis mu1 = mu0
if(j>=10)
{
if (prior[[1]] == 2){
mu1_s<-Normal(n0 = prior[[2]], mu0, var, y = y.data)
}else if (prior[[1]] == 1){
mu1_s<-Normal.DIP(N, mu0, var, y = y.data)
}
if (alternative == "greater"){
pp_stop<-sum(mu1_s>mu0+d)/length(mu1_s)
}else if (alternative == "less"){
pp_stop<-sum(mu1_s<mu0-d)/length(mu1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
}
t1error.cal <- cat1s/sim
N_v <- append(N_v, N)
t1error_v <- append(t1error_v, t1error.cal)
result2 <- cbind(N_v, t1error_v)
} # End of Type-I-error calculation
# Outputs
if (!is.null(result1) & !is.null(result2)){
result <- merge(result1, result2, by=c("N_v"))
final <- as.data.frame(result)
# select the lowest/best-controlled type I error
final$diff <- abs(final$t1error_v - t1error)
final <- final[order(final$diff, final$t1error_v, final$power_v, final$N_v), ]
ff <- final[1,]
planN <- ff$N_v
exact.power <- ff$power_v
exact.t1 <- ff$t1error_v
ss <- ff$n_v
sd <- ff$sd_v
if (prior[[1]] == 1) {method = "DIP"
} else if (prior[[1]] == 2) {method = paste("Normal(",mu0, ",", var/prior[[2]], ")", sep="")}
z <- list(method = method, planned_sample_size = planN,
efficacy_boundary = ps, futility_boundary = pf,
exact_power = exact.power, exact_type_I_error = exact.t1,
expected_sample_size = ss, expected_sample_size_std = sd)
z
} # End of Outputs
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDIP/R/OneSampleNormal1.Design.R
|
# ------------------------------------------------------------------------------
# prior --> list containing the information for prior
# [[1]] - the prior distribution type:
# 1 - DIP
# 2 - Normal(mu0,var/n0)
# [[2]] - n0: the level of information contained in the prior and
# the contribution of null mean
# ------------------------------------------------------------------------------
#' One sample Normal model with one-parameter unknown, given variance
#'
#' For a given planned sample size, the efficacy and futility boundaries,
#' return the power, the type I error, the expected sample size and its
#' standard deviation, the probability of reaching the efficacy and futility boundaries.
#'
#'
#' @param prior A list of length 2 containing the distributional information of the prior.
#' The first element is a number specifying the type of prior. Options are
#' \enumerate{
#' \item DIP ;
#' \item Normal(mu0,var/n0), where mu0 = prior mean, var = the known variance}
#' The second elements of the list is the parameter n0.
#' @param N The planned sample size.
#' @param mu0 The null mean value, which could be taken as the standard or current mean.
#' @param mu1 The mean value of the new treatment.
#' @param var The variance
#' @param d The target improvement (minimal clinically meaningful difference).
#' @param ps The efficacy boundary (upper boundary).
#' @param pf The futility boundary (lower boundary).
#' @param alternative less (lower values imply greater efficacy) or greater (larger
#' values imply greater efficacy).
#' @param seed The seed for simulations.
#' @param sim The number of simulations.
#' @return A list of the arguments with method and computed elements.
#' @examples
#' # with traditional Bayesian prior Beta(1,1)
#' OneSampleNormal1(list(2,6), N = 100, mu0 = 100, mu1 = 95, var=15, d = 0.05,
#' ps = 0.95, pf = 0.05, alternative = "less",
#' seed = 202210, sim = 10)
#' OneSampleNormal1(list(1,0), N = 100, mu0 = 100, mu1 = 95, var=15, d = 0.05,
#' ps = 0.95, pf = 0.05, alternative = "less",
#' seed = 202210, sim = 10)
#' @importFrom stats rbeta rbinom rgamma rnorm rpois
#' @export OneSampleNormal1
OneSampleNormal1 <- function(prior, N = 100, mu0, mu1, var, d = 0,
ps = 0.95, pf = 0.05,
alternative = c("less", "greater"), seed = 202209, sim = 5000) {
alternative <- match.arg(alternative)
# Define the inputs
if(prior[[1]] == 1){
prior[[2]] <- NA
}
## N limit
if(!is.null(N) && (!is.numeric(N) || N <= 0 ))
stop("N must be positive number and greater than 10")
## mu0 limit
if(!is.null(mu0) && (!is.numeric(mu0)))
stop("mu0 must be numeric")
## mu1 limit
if(!is.null(mu1) && (!is.numeric(mu1)))
stop("mu1 must be numeric")
## var limit
if(!is.null(var) && (!is.numeric(var) || var < 0))
stop("var must be positive numeric")
## d limit
if(!is.null(d) && (!is.numeric(d) || (d < 0 | d > abs(mu1-mu0))))
stop("d must be numeric in [0, |mu1-mu0|]")
## efficacy boundary limit
if(!is.null(ps) && (!is.numeric(ps) || (ps < 0.8 | ps > 1)))
stop("ps (efficacy boundary) must be numeric in [0.8,1]")
## futility boundary limit
if(!is.null(pf) && (!is.numeric(pf) || (pf < 0 | pf > 0.2)))
stop("pf (futility boundary) must be numeric in [0,0.2]")
## set.seed
if(!is.numeric(seed))
stop("seed must be numeric")
## number of simulation
if(!is.numeric(sim))
stop("simulation number must be numeric")
set.seed(seed)
# Functions to calculate the posterior
Normal <- function(n0, mu0, var, y){
n <- length(y)
y_mean <- mean(y)
posterior<-rnorm(1000,(n0*mu0+n*y_mean)/(n0+n), sqrt(var)/sqrt(n0+n))
}
Normal.DIP <- function(N, mu0, var, y){
n <- length(y)
y_mean <- mean(y)
posterior<-rnorm(1000, ((N-n)*mu0+n*y_mean)/N, sqrt(var/N))
}
# Simulated Data
# calculate power
n.enrolled <- NULL
cat1s <- 0
cat1f <- 0
for (k in 1:sim) {
y.data<-NULL
j<-0
cat<-0
cats<-0
catf<-0
pp_stop<-0.5
while(cat == 0)
{
j<-j+1
y.data<-append(y.data,rnorm(1,mu1,sqrt(var)))
if(j>=10)
{
if (prior[[1]] == 2){
mu1_s<-Normal(n0 = prior[[2]], mu0, var, y = y.data)
}else if (prior[[1]] == 1){
mu1_s<-Normal.DIP(N, mu0, var, y = y.data)
}
if (alternative == "greater"){
pp_stop<-sum(mu1_s>mu0+d)/length(mu1_s)
}else if (alternative == "less"){
pp_stop<-sum(mu1_s<mu0-d)/length(mu1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
# Recruited Sample Size
n.enrolled <- append(n.enrolled, j)
}
ss <- round(mean(n.enrolled), digits = 1)
sd <- round(sd(n.enrolled), digits = 2)
fut.rate <- cat1f/sim
power <- cat1s/sim
# calculate type I error
cat1s <- 0
cat1f <- 0
for (k in 1:sim) {
y.data<-NULL
j<-0
cat<-0
cats<-0
catf<-0
pp_stop<-0.5
while(cat == 0)
{
j<-j+1
y.data<-append(y.data,rnorm(1,mu0, sqrt(var))) # under the null hypothesis mu1 = mu0
if(j>=10)
{
if (prior[[1]] == 2){
mu1_s<-Normal(n0 = prior[[2]], mu0, var, y = y.data)
}else if (prior[[1]] == 1){
mu1_s<-Normal.DIP(N, mu0, var, y = y.data)
}
if (alternative == "greater"){
pp_stop<-sum(mu1_s>mu0+d)/length(mu1_s)
}else if (alternative == "less"){
pp_stop<-sum(mu1_s<mu0-d)/length(mu1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
}
t1error <- cat1s/sim
# Outputs
if (prior[[1]] == 1) {method = "DIP"
} else if (prior[[1]] == 2) {method = paste("Normal(",mu0, ",", var/prior[[2]], ")", sep="")}
z <- list(method = method, power = power, type_I_error = t1error,
expected_sample_size = ss, expected_sample_size_std = sd,
the_prob_efficacy = power, the_prob_futility = fut.rate)
z
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDIP/R/OneSampleNormal1.R
|
# ------------------------------------------------------------------------------
# prior --> list containing the information for prior
# [[1]] - the prior distribution type:
# 1 - DIP
# 2 - Joint Priors
# [[2]] - k: sample size of observations from Normal prior
# [[3]] - v: sample size of observations from Gamma prior
# ------------------------------------------------------------------------------
#' One sample Normal model with two-parameter unknown - both mean and variance unknown
#'
#' Calculate the minimum planned sample size under an admissible design.
#' The users decide the power and type-I-error, and pick the efficacy and futility boundaries.
#' If there are no admissible design based on controlled type-I-error, then default to output
#' the designs with the lowest type-I-error and at least the user-defined (e.g. 80\%) power.
#'
#' @param prior A list of length 3 containing the distributional information of the prior.
#' The first element is a number specifying the type of prior. Options are
#' \enumerate{
#' \item DIP ;
#' \item Normal(mu0,var/k) and var ~ Inverse-Gamma(v/2, v*var0/2)
#'
#' where mu0 = prior mean, k = sample size of prior observations (Normal prior),
#'
#' v = sample size of prior observations (Gamma prior), var0 = prior sample variance}
#' The second and third elements of the list are the parameters k and v, respectively.
#' @param nmin The start searching sample size
#' @param nmax The stop searching sample size
#' @param mu0 The null mean value, which could be taken as the standard or current mean.
#' @param mu1 The mean value of the new treatment.
#' @param var0 The prior sample variance
#' @param var The variance
#' @param d The target improvement (minimal clinically meaningful difference).
#' @param ps The efficacy boundary (upper boundary).
#' @param pf The futility boundary (lower boundary).
#' @param power The power to achieve.
#' @param t1error The controlled type-I-error.
#' @param alternative less (lower values imply greater efficacy) or greater (larger
#' values imply greater efficacy).
#' @param seed The seed for simulations.
#' @param sim The number of simulations.
#' @return A list of the arguments with method and computed elements.
#' @examples
#' \donttest{
#' # with traditional Bayesian prior Beta(1,1)
#' OneSampleNormal2.Design(list(2,2,1), nmin = 10, nmax = 100, mu0 = 100, mu1 = 95,
#' var0=225, var=225, d = 0, ps = 0.95, pf = 0.05,
#' power = 0.8, t1error = 0.05, alternative = "less",
#' seed = 202210, sim = 10)
#' # with DIP
#' OneSampleNormal2.Design(list(1,0,0), nmin = 10, nmax = 100, mu0 = 100, mu1 = 95,
#' var0=225, var=225, d = 0, ps = 0.95, pf = 0.05,
#' power = 0.8, t1error = 0.05, alternative = "less",
#' seed = 202210, sim = 10)
#' }
#' @importFrom stats rbeta rbinom rgamma rnorm rpois
#' @export OneSampleNormal2.Design
OneSampleNormal2.Design <- function(prior, nmin = 10, nmax = 100, mu0, mu1, var0, var, d = 0,
ps, pf, power = 0.8, t1error = 0.05,
alternative = c("less", "greater"), seed = 202209, sim = 1000){
alternative <- match.arg(alternative)
# Define the inputs
if(prior[[1]] == 1){
prior[[2]] <- NA
prior[[3]] <- NA
}
## nmin limit
if(!is.null(nmin) && (!is.numeric(nmin) || nmin < 10 || nmin >= nmax))
stop("nmin must be positive number and at least 10")
## nmax limit
if(!is.null(nmax) && (!is.numeric(nmax) || nmax <= nmin || nmax >= 200))
stop("nmax must greater than 'nmin' and less than 200")
## mu0 limit
if(!is.null(mu0) && (!is.numeric(mu0)))
stop("mu0 must be numeric")
## mu1 limit
if(!is.null(mu1) && (!is.numeric(mu1)))
stop("mu1 must be numeric")
## var0 limit
if(!is.null(var0) && (!is.numeric(var0) || var0 < 0))
stop("var must be positive number")
## var limit
if(!is.null(var) && (!is.numeric(var) || var < 0))
stop("var must be positive numeric")
## d limit
if(!is.null(d) && (!is.numeric(d) || (d < 0 | d > abs(mu1-mu0))))
stop("d must be numeric in [0, |mu1-mu0|]")
## efficacy boundary limit
if(!is.null(ps) && (!is.numeric(ps) || (ps < 0.8 | ps > 1)))
stop("ps (efficacy boundary) must be numeric in [0.8,1]")
## futility boundary limit
if(!is.null(pf) && (!is.numeric(pf) || (pf < 0 | pf > 0.2)))
stop("pf (futility boundary) must be numeric in [0,0.2]")
## power limit
if(!is.null(power) && (!is.numeric(power) || (power < 0 | power > 1)))
stop("power must be numeric in [0,1]")
## t1error limit
if(!is.null(t1error) && (!is.numeric(t1error) || (t1error < 0 |t1error > 1)))
stop("type-I-error must be numeric in [0,1]")
## set.seed
if(!is.numeric(seed))
stop("seed must be numeric")
if(!is.numeric(sim))
stop("simulation number must be numeric")
set.seed(seed)
# Functions to calculate the posterior
Normal <- function(k, v, mu0, var0, y){
n <- length(y)
y_mean <- mean(y)
y_var <- var(y)
ss <- v*var0+(n-1)*y_var+(k*n*(y_mean-mu0)^2)/(k+n)
s2_inv<-rgamma(1000,(v+n)/2,ss/2)
s2<-1/s2_inv
posterior<-rnorm(1000,(k*mu0+n*y_mean)/(k+n),sqrt(s2/(k+n)))
}
Normal.DIP <- function(N, mu0, var0, y){
n <- length(y)
y_mean <- mean(y)
y_var <- var(y)
ss <- (N-n+3)*var0+(n-1)*y_var+((N-n)*n*(y_mean-mu0)^2)/N
s2_inv<-rgamma(1000,(N+3)/2,ss/2)
s2<-1/s2_inv
posterior<-rnorm(1000, ((N-n)*mu0+n*y_mean)/N, sqrt(s2/N))
}
# Simulated Data
# calculate N that can achieve the power
N_v <- NULL
power_v <- NULL
n_v <- NULL
sd_v <- NULL
for (N in seq(from=nmin, to=nmax, by=1)){
cat1s <- 0
cat1f <- 0
n.enrolled <- NULL
for (k in 1:sim) {
y.data <- NULL
j <- 0
cat <- 0
cats <- 0
catf <- 0
pp_stop <- 0.5
while(cat == 0){
j <- j+1
y.data<-append(y.data,rnorm(1,mu1,sqrt(var)))
if(j>=10)
{
if (prior[[1]] == 2){
mu1_s<-Normal(k = prior[[2]], v = prior[[3]], mu0, var0, y = y.data)
}else if (prior[[1]] == 1){
mu1_s<-Normal.DIP(N, mu0, var0, y = y.data)
}
if (alternative == "greater"){
pp_stop<-sum(mu1_s>mu0+d)/length(mu1_s)
}else if (alternative == "less"){
pp_stop<-sum(mu1_s<mu0-d)/length(mu1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
# Recruited Sample Size
n.enrolled <- append(n.enrolled, j)
}
power.cal <- cat1s/sim
jitter <- 0.01
if (power.cal >= power-jitter){
N_v <- append(N_v, N)
power_v <- append(power_v, power.cal)
n_v <- append(n_v, round(mean(n.enrolled), 0))
sd_v <- append(sd_v, round(sd(n.enrolled), 1))
}
result1 <- cbind(N_v, power_v, n_v, sd_v)
} # End of power calculation
if (is.null(result1)){
message("Suggest: please adjust your input values!")
stop(paste("No sample size in the range [",nmin,",",nmax,"] can achieve ", power*100, "% power", sep=""))
}
# calculate type I error
nmin1 <- N_v[which.min(N_v)] # start minimum sample size in calculation of exact type I error
N_v <- NULL
t1error_v <- NULL
for (N in seq(from=nmin1, to=nmax, by=1)){
cat1s <- 0
cat1f <- 0
for (k in 1:sim) {
y.data <- NULL
j <- 0
cat <- 0
cats <- 0
catf <- 0
pp_stop <- 0.5
while(cat == 0){
j <- j+1
y.data<-append(y.data,rnorm(1,mu0, sqrt(var))) # under the null hypothesis mu1 = mu0
if(j>=10)
{
if (prior[[1]] == 2){
mu1_s<-Normal(k = prior[[2]], v = prior[[3]], mu0, var0, y = y.data)
}else if (prior[[1]] == 1){
mu1_s<-Normal.DIP(N, mu0, var0, y = y.data)
}
if (alternative == "greater"){
pp_stop<-sum(mu1_s>mu0+d)/length(mu1_s)
}else if (alternative == "less"){
pp_stop<-sum(mu1_s<mu0-d)/length(mu1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
}
t1error.cal <- cat1s/sim
N_v <- append(N_v, N)
t1error_v <- append(t1error_v, t1error.cal)
result2 <- cbind(N_v, t1error_v)
} # End of Type-I-error calculation
# Outputs
if (!is.null(result1) & !is.null(result2)){
result <- merge(result1, result2, by=c("N_v"))
final <- as.data.frame(result)
# select the lowest/best-controlled type I error
final$diff <- abs(final$t1error_v - t1error)
final <- final[order(final$diff, final$t1error_v, final$power_v, final$N_v), ]
ff <- final[1,]
planN <- ff$N_v
exact.power <- ff$power_v
exact.t1 <- ff$t1error_v
ss <- ff$n_v
sd <- ff$sd_v
if (prior[[1]] == 1) {method = "DIP"
} else if (prior[[1]] == 2) {method = paste("Bayesian ", "k = ", prior[[2]], ", ", "v = ", prior[[3]], sep="")}
z <- list(method = method, planned_sample_size = planN,
efficacy_boundary = ps, futility_boundary = pf,
exact_power = exact.power, exact_type_I_error = exact.t1,
expected_sample_size = ss, expected_sample_size_std = sd)
z
} # End of Outputs
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDIP/R/OneSampleNormal2.Design.R
|
# ------------------------------------------------------------------------------
# prior --> list containing the information for prior
# [[1]] - the prior distribution type:
# 1 - DIP
# 2 - Joint Priors
# [[2]] - k: sample size of observations from Normal prior
# [[3]] - v: sample size of observations from Gamma prior
# ------------------------------------------------------------------------------
#' One sample Normal model with two-parameter unknown - both mean and variance unknown
#'
#' For a given planned sample size, the efficacy and futility boundaries,
#' return the power, the type I error, the expected sample size and its
#' standard deviation, the probability of reaching the efficacy and futility boundaries.
#'
#'
#' @param prior A list of length 3 containing the distributional information of the prior.
#' The first element is a number specifying the type of prior. Options are
#' \enumerate{
#' \item DIP ;
#' \item Normal(mu0,var/k) and var ~ Inverse-Gamma(v/2, v*var0/2)
#'
#' where mu0 = prior mean, k = sample size of prior observations (Normal prior),
#'
#' v = sample size of prior observations (Gamma prior), var0 = prior sample variance}
#' The second and third elements of the list are the parameters k and v, respectively.
#' @param N The planned sample size.
#' @param mu0 The null mean value, which could be taken as the standard or current mean.
#' @param mu1 The mean value of the new treatment.
#' @param var0 The prior sample variance
#' @param var The variance
#' @param d The target improvement (minimal clinically meaningful difference).
#' @param ps The efficacy boundary (upper boundary).
#' @param pf The futility boundary (lower boundary).
#' @param alternative less (lower values imply greater efficacy) or greater (larger
#' values imply greater efficacy).
#' @param seed The seed for simulations.
#' @param sim The number of simulations.
#' @return A list of the arguments with method and computed elements.
#' @examples
#' # with traditional Bayesian prior Beta(1,1)
#' OneSampleNormal2(list(2,2,1), N = 100, mu0 = 100, mu1 = 95, var0=225, var=225, d = 0,
#' ps = 0.95, pf = 0.05, alternative = "less",
#' seed = 202210, sim = 10)
#' # with DIP
#' OneSampleNormal2(list(1,0,0), N = 100, mu0 = 100, mu1 = 95, var0=225, var=225, d = 0,
#' ps = 0.95, pf = 0.05, alternative = "less",
#' seed = 202210, sim = 10)
#' @importFrom stats rbeta rbinom rgamma rnorm rpois
#' @export OneSampleNormal2
OneSampleNormal2 <- function(prior, N = 100, mu0, mu1, var0, var, d = 0,
ps = 0.95, pf = 0.05,
alternative = c("less", "greater"), seed = 202209, sim = 5000) {
alternative <- match.arg(alternative)
# Define the inputs
if(prior[[1]] == 1){
prior[[2]] <- NA
prior[[3]] <- NA
}
## N limit
if(!is.null(N) && (!is.numeric(N) || N <= 0 ))
stop("N must be positive number and greater than 10")
## mu0 limit
if(!is.null(mu0) && (!is.numeric(mu0)))
stop("mu0 must be numeric")
## mu1 limit
if(!is.null(mu1) && (!is.numeric(mu1)))
stop("mu1 must be numeric")
## var0 limit
if(!is.null(var0) && (!is.numeric(var0) || var0 < 0))
stop("var must be positive number")
## var limit
if(!is.null(var) && (!is.numeric(var) || var < 0))
stop("var must be positive numeric")
## d limit
if(!is.null(d) && (!is.numeric(d) || (d < 0 | d > abs(mu1-mu0))))
stop("d must be numeric in [0, |mu1-mu0|]")
## efficacy boundary limit
if(!is.null(ps) && (!is.numeric(ps) || (ps < 0.8 | ps > 1)))
stop("ps (efficacy boundary) must be numeric in [0.8,1]")
## futility boundary limit
if(!is.null(pf) && (!is.numeric(pf) || (pf < 0 | pf > 0.2)))
stop("pf (futility boundary) must be numeric in [0,0.2]")
## set.seed
if(!is.numeric(seed))
stop("seed must be numeric")
## number of simulation
if(!is.numeric(sim))
stop("simulation number must be numeric")
set.seed(seed)
# Functions to calculate the posterior
Normal <- function(k, v, mu0, var0, y){
n <- length(y)
y_mean <- mean(y)
y_var <- var(y)
ss <- v*var0+(n-1)*y_var+(k*n*(y_mean-mu0)^2)/(k+n)
s2_inv<-rgamma(1000,(v+n)/2,ss/2)
s2<-1/s2_inv
posterior<-rnorm(1000,(k*mu0+n*y_mean)/(k+n),sqrt(s2/(k+n)))
}
Normal.DIP <- function(N, mu0, var0, y){
n <- length(y)
y_mean <- mean(y)
y_var <- var(y)
ss <- (N-n+3)*var0+(n-1)*y_var+((N-n)*n*(y_mean-mu0)^2)/N
s2_inv<-rgamma(1000,(N+3)/2,ss/2)
s2<-1/s2_inv
posterior<-rnorm(1000, ((N-n)*mu0+n*y_mean)/N, sqrt(s2/N))
}
# Simulated Data
# calculate power
n.enrolled <- NULL
cat1s <- 0
cat1f <- 0
for (k in 1:sim) {
y.data<-NULL
j<-0
cat<-0
cats<-0
catf<-0
pp_stop<-0.5
while(cat == 0)
{
j<-j+1
y.data<-append(y.data,rnorm(1,mu1,sqrt(var)))
if(j>=10)
{
if (prior[[1]] == 2){
mu1_s<-Normal(k = prior[[2]], v = prior[[3]], mu0, var0, y = y.data)
}else if (prior[[1]] == 1){
mu1_s<-Normal.DIP(N, mu0, var0, y = y.data)
}
if (alternative == "greater"){
pp_stop<-sum(mu1_s>mu0+d)/length(mu1_s)
}else if (alternative == "less"){
pp_stop<-sum(mu1_s<mu0-d)/length(mu1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
# Recruited Sample Size
n.enrolled <- append(n.enrolled, j)
}
ss <- round(mean(n.enrolled), digits = 1)
sd <- round(sd(n.enrolled), digits = 2)
fut.rate <- cat1f/sim
power <- cat1s/sim
# calculate type I error
cat1s <- 0
cat1f <- 0
for (k in 1:sim) {
y.data<-NULL
j<-0
cat<-0
cats<-0
catf<-0
pp_stop<-0.5
while(cat == 0)
{
j<-j+1
y.data<-append(y.data,rnorm(1,mu0, sqrt(var))) # under the null hypothesis mu1 = mu0
if(j>=10)
{
if (prior[[1]] == 2){
mu1_s<-Normal(k = prior[[2]], v = prior[[3]], mu0, var0, y = y.data)
}else if (prior[[1]] == 1){
mu1_s<-Normal.DIP(N, mu0, var0, y = y.data)
}
if (alternative == "greater"){
pp_stop<-sum(mu1_s>mu0+d)/length(mu1_s)
}else if (alternative == "less"){
pp_stop<-sum(mu1_s<mu0-d)/length(mu1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
}
t1error <- cat1s/sim
# Outputs
if (prior[[1]] == 1) {method = "DIP"
} else if (prior[[1]] == 2) {method = paste("Bayesian ", "k = ", prior[[2]], ", ", "v = ", prior[[3]], sep="")}
z <- list(method = method, power = power, type_I_error = t1error,
expected_sample_size = ss, expected_sample_size_std = sd,
the_prob_efficacy = power, the_prob_futility = fut.rate)
z
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDIP/R/OneSampleNormal2.R
|
# ------------------------------------------------------------------------------
# prior --> list containing the information for prior
# [[1]] - the prior distribution type:
# 1 - DIP
# 2 - Gamma(a,b)
# [[2]] - a: shape parameter of the Gamma distribution
# [[3]] - b: rate parameter of the Gamma distribution
# ------------------------------------------------------------------------------
#' One sample Poisson model - Trial Design
#'
#' Calculate the minimum planned sample size under an admissible design.
#' The users decide the power and type-I-error, and pick the efficacy and futility boundaries.
#' If there are no admissible design based on controlled type-I-error, then default to output
#' the designs with the lowest type-I-error and at least the user-defined (e.g. 80\%) power.
#'
#'
#' @param prior A list of length 3 containing the distributional information of the prior.
#' The first element is a number specifying the type of prior. Options are
#' \enumerate{
#' \item DIP ;
#' \item Gamma(a,b), where a = shape, b = rate}
#' The second and third elements of the list are the parameters a and b, respectively.
#' @param nmin The start searching sample size
#' @param nmax The stop searching sample size
#' @param m0 The null event rate, which could be taken as the standard or current event rate.
#' @param m1 The event rate of the new treatment.
#' @param d The target improvement (minimal clinically meaningful difference).
#' @param ps The efficacy boundary (upper boundary).
#' @param pf The futility boundary (lower boundary).
#' @param power The expected power to achieve.
#' @param t1error The controlled type-I-error.
#' @param alternative less (lower values imply greater efficacy) or greater (larger
#' values imply greater efficacy).
#' @param seed The seed for simulations.
#' @param sim The number of simulations.
#' @return A list of the arguments with method and computed elements
#' @examples
#' \donttest{
#' # with traditional Bayesian prior Gamma(0.5,0.001)
#' OneSamplePoisson.Design(list(2,0.5,0.001), nmin = 10, nmax=100, m0 = 5, m1 = 4, d = 0,
#' ps = 0.95, pf = 0.05, power = 0.80, t1error=0.05, alternative = "less",
#' seed = 202210, sim = 10)
#' # with DIP
#' OneSamplePoisson.Design(list(1,0,0), nmin = 10, nmax=100, m0 = 5, m1 = 4, d = 0,
#' ps = 0.95, pf = 0.05, power = 0.80, t1error=0.05, alternative = "less",
#' seed = 202210, sim = 10)
#' }
#' @importFrom stats rbeta rbinom rgamma rnorm rpois
#' @export OneSamplePoisson.Design
OneSamplePoisson.Design <- function(prior, nmin = 10, nmax = 100, m0, m1, d = 0,
ps, pf, power = 0.8, t1error = 0.05,
alternative = c("less", "greater"), seed = 202209, sim = 1000){
alternative <- match.arg(alternative)
# Define the inputs
if(prior[[1]] == 1){
prior[[2]] <- NA
prior[[3]] <- NA
}
## nmin limit
if(!is.null(nmin) && (!is.numeric(nmin) || nmin < 10 || nmin >= nmax))
stop("nmin must be positive number and at least 10")
## nmax limit
if(!is.null(nmax) && (!is.numeric(nmax) || nmax <= nmin || nmax >= 200))
stop("nmax must greater than 'nmin' and less than 200")
## m0 limit
if(!is.null(m0) && (!is.numeric(m0) || (m0 < 0)))
stop("m0 must be numeric in [0,inf]")
## m1 limit
if(!is.null(m1) && (!is.numeric(m1) || (m1 < 0)))
stop("m1 must be numeric in [0,inf]")
## d limit
if(!is.null(d) && (!is.numeric(d) || (d < 0 | d > abs(m1-m0))))
stop("d must be numeric in [0, |m1-m0|]")
## efficacy boundary limit
if(!is.null(ps) && (!is.numeric(ps) || (ps < 0.8 | ps > 1)))
stop("ps (efficacy boundary) must be numeric in [0.8,1]")
## futility boundary limit
if(!is.null(pf) && (!is.numeric(pf) || (pf < 0 | pf > 0.2)))
stop("pf (futility boundary) must be numeric in [0,0.2]")
## power limit
if(!is.null(power) && (!is.numeric(power) || (power < 0 | power > 1)))
stop("power must be numeric in [0,1]")
## t1error limit
if(!is.null(t1error) && (!is.numeric(t1error) || (t1error < 0 |t1error > 1)))
stop("type-I-error must be numeric in [0,1]")
## set.seed
if(!is.numeric(seed))
stop("seed must be numeric")
if(!is.numeric(sim))
stop("simulation number must be numeric")
set.seed(seed)
# Functions to calculate the posterior
Poisson <- function(a,b,y){posterior<-rgamma(1000, a+sum(y), b+length(y))}
Poisson.DIP <- function(m0, y, N){
posterior<-rgamma(1000, 0.05+sum(y)+m0*(N-length(y)), 0.001+N)
}
# Simulated Data
# calculate N that can achieve the power
N_v <- NULL
power_v <- NULL
n_v <- NULL
sd_v <- NULL
for (N in seq(from=nmin, to=nmax, by=1)){
cat1s <- 0
cat1f <- 0
n.enrolled <- NULL
for (k in 1:sim) {
y.data <- NULL
j <- 0
cat <- 0
cats <- 0
catf <- 0
pp_stop <- 0.5
while(cat == 0){
j <- j+1
y.data<-append(y.data,rpois(1,m1))
if(j>=10)
{
if (prior[[1]] == 2){
m1_s<-Poisson(a = prior[[2]], b = prior[[3]], y = y.data)
}else if (prior[[1]] == 1){
m1_s <- Poisson.DIP(m0, y = y.data, N = N)
}
if (alternative == "greater"){
pp_stop<-sum(m1_s>m0+d)/length(m1_s)
}else if (alternative == "less"){
pp_stop<-sum(m1_s<m0-d)/length(m1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
# Recruited Sample Size
n.enrolled <- append(n.enrolled, j)
}
power.cal <- cat1s/sim
jitter <- 0.01
if (power.cal >= power-jitter){
N_v <- append(N_v, N)
power_v <- append(power_v, power.cal)
n_v <- append(n_v, round(mean(n.enrolled), 0))
sd_v <- append(sd_v, round(sd(n.enrolled), 1))
}
result1 <- cbind(N_v, power_v, n_v, sd_v)
} # End of power calculation
if (is.null(result1)){
message("Suggest: please adjust your input values!")
stop(paste("No sample size in the range [",nmin,",",nmax,"] can achieve ", power*100, "% power", sep=""))
}
# calculate type I error
nmin1 <- N_v[which.min(N_v)] # start minimum sample size in calculation of exact type I error
N_v <- NULL
t1error_v <- NULL
for (N in seq(from=nmin1, to=nmax, by=1)){
cat1s <- 0
cat1f <- 0
for (k in 1:sim) {
y.data <- NULL
j <- 0
cat <- 0
cats <- 0
catf <- 0
pp_stop <- 0.5
while(cat == 0){
j <- j+1
y.data<-append(y.data,rpois(1,m0)) # under the null hypothesis m1 = m0
if(j>=10)
{
if (prior[[1]] == 2){
m1_s<-Poisson(a = prior[[2]], b = prior[[3]], y = y.data)
}else if (prior[[1]] == 1){
m1_s <- Poisson.DIP(m0, y = y.data, N = N)
}
if (alternative == "greater"){
pp_stop<-sum(m1_s>m0+d)/length(m1_s)
}else if (alternative == "less"){
pp_stop<-sum(m1_s<m0-d)/length(m1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
}
t1error.cal <- cat1s/sim
N_v <- append(N_v, N)
t1error_v <- append(t1error_v, t1error.cal)
result2 <- cbind(N_v, t1error_v)
} # End of Type-I-error calculation
# Outputs
if (!is.null(result1) & !is.null(result2)){
result <- merge(result1, result2, by=c("N_v"))
final <- as.data.frame(result)
# select the lowest/best-controlled type I error
final$diff <- abs(final$t1error_v - t1error)
final <- final[order(final$diff, final$t1error_v, final$power_v, final$N_v), ]
ff <- final[1,]
planN <- ff$N_v
exact.power <- ff$power_v
exact.t1 <- ff$t1error_v
ss <- ff$n_v
sd <- ff$sd_v
if (prior[[1]] == 1) {method = "DIP"
} else if (prior[[1]] == 2) {method = paste("Gamma(",prior[[2]], ",", prior[[3]], ")", sep="")}
z <- list(method = method, planned_sample_size = planN,
efficacy_boundary = ps, futility_boundary = pf,
exact_power = exact.power, exact_type_I_error = exact.t1,
expected_sample_size = ss, expected_sample_size_std = sd)
z
} # End of Outputs
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDIP/R/OneSamplePoisson.Design.R
|
# ------------------------------------------------------------------------------
# prior --> list containing the information for prior
# [[1]] - the prior distribution type:
# 1 - DIP
# 2 - Gamma(a,b)
# [[2]] - a: shape parameter of the Gamma distribution
# [[3]] - b: rate parameter of the Gamma distribution
# ------------------------------------------------------------------------------
#' One sample Poisson model
#'
#' For a given planned sample size, the efficacy and futility boundaries,
#' return the power, the type I error, the expected sample size and its
#' standard deviation, the probability of reaching the efficacy and futility boundaries.
#'
#'
#' @param prior A list of length 3 containing the distributional information of the prior.
#' The first element is a number specifying the type of prior. Options are
#' \enumerate{
#' \item DIP ;
#' \item Gamma(a,b), where a = shape, b = rate}
#' The second and third elements of the list are the parameters a and b, respectively.
#' @param N The planned sample size.
#' @param m0 The null event rate, which could be taken as the standard or current event rate.
#' @param m1 The event rate of the new treatment.
#' @param d The target improvement (minimal clinically meaningful difference).
#' @param ps The efficacy boundary (upper boundary).
#' @param pf The futility boundary (lower boundary).
#' @param alternative less (lower values imply greater efficacy) or greater (larger
#' values imply greater efficacy).
#' @param seed The seed for simulations.
#' @param sim The number of simulations.
#' @return A list of the arguments with method and computed elements
#' @examples
#' # with traditional Bayesian prior Gamma(0.5,0.001)
#' OneSamplePoisson(list(2,0.5,0.001), N = 100, m0 = 0.5, m1 = 0.4, d = 0.05,
#' ps = 0.95, pf = 0.05, alternative = "less",
#' seed = 202210, sim = 10)
#' # with DIP
#' OneSamplePoisson(list(1,0,0), N = 100, m0 = 0.5, m1 = 0.4, d = 0.05,
#' ps = 0.95, pf = 0.05, alternative = "less",
#' seed = 202210, sim = 10)
#' @importFrom stats rbeta rbinom rgamma rnorm rpois
#' @export OneSamplePoisson
OneSamplePoisson <- function(prior, N = 100, m0, m1, d = 0,
ps = 0.95, pf = 0.05,
alternative = c("less", "greater"), seed = 202209, sim = 5000) {
alternative <- match.arg(alternative)
# Define the inputs
if(prior[[1]] == 1){
prior[[2]] <- NA
prior[[3]] <- NA
}
## N limit
if(!is.null(N) && (!is.numeric(N) || N <= 0 ))
stop("N must be positive number and greater than 10")
## m0 limit
if(!is.null(m0) && (!is.numeric(m0) || (m0 < 0)))
stop("m0 must be numeric in [0,inf]")
## m1 limit
if(!is.null(m1) && (!is.numeric(m1) || (m1 < 0)))
stop("m1 must be numeric in [0,inf]")
## d limit
if(!is.null(d) && (!is.numeric(d) || (d < 0 | d > abs(m1-m0))))
stop("d must be numeric in [0, |m1-m0|]")
## efficacy boundary limit
if(!is.null(ps) && (!is.numeric(ps) || (ps < 0.8 | ps > 1)))
stop("ps (efficacy boundary) must be numeric in [0.8,1]")
## futility boundary limit
if(!is.null(pf) && (!is.numeric(pf) || (pf < 0 | pf > 0.2)))
stop("pf (futility boundary) must be numeric in [0,0.2]")
## set.seed
if(!is.numeric(seed))
stop("seed must be numeric")
## number of simulation
if(!is.numeric(sim))
stop("simulation number must be numeric")
set.seed(seed)
# Functions to calculate the posterior
Poisson <- function(a,b,y){posterior<-rgamma(1000, a+sum(y), b+length(y))}
Poisson.DIP <- function(m0, y, N){
posterior<-rgamma(1000, 0.05+sum(y)+m0*(N-length(y)), 0.001+N)
}
# Simulated Data
# calculate power
n.enrolled <- NULL
cat1s <- 0
cat1f <- 0
for (k in 1:sim) {
y.data<-NULL
j<-0
cat<-0
cats<-0
catf<-0
pp_stop<-0.5
while(cat == 0)
{
j<-j+1
y.data<-append(y.data,rpois(1,m1))
if(j>=10)
{
if (prior[[1]] == 2){
m1_s<-Poisson(a = prior[[2]], b = prior[[3]], y = y.data)
}else if (prior[[1]] == 1){
m1_s <- Poisson.DIP(m0, y = y.data, N = N)
}
if (alternative == "greater"){
pp_stop<-sum(m1_s>m0+d)/length(m1_s)
}else if (alternative == "less"){
pp_stop<-sum(m1_s<m0-d)/length(m1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
# Recruited Sample Size
n.enrolled <- append(n.enrolled, j)
}
ss <- round(mean(n.enrolled), digits = 1)
sd <- round(sd(n.enrolled), digits = 2)
fut.rate <- cat1f/sim
power <- cat1s/sim
# calculate type I error
cat1s <- 0
cat1f <- 0
for (k in 1:sim) {
y.data<-NULL
j<-0
cat<-0
cats<-0
catf<-0
pp_stop<-0.5
while(cat == 0)
{
j<-j+1
y.data<-append(y.data,rpois(1,m0)) # under the null hypothesis m1 = m0
if(j>=10)
{
if (prior[[1]] == 2){
m1_s<-Poisson(a = prior[[2]], b = prior[[3]], y = y.data)
}else if (prior[[1]] == 1){
m1_s <- Poisson.DIP(m0, y = y.data, N = N)
}
if (alternative == "greater"){
pp_stop<-sum(m1_s>m0+d)/length(m1_s)
}else if (alternative == "less"){
pp_stop<-sum(m1_s<m0-d)/length(m1_s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
}
t1error <- cat1s/sim
# Outputs
if (prior[[1]] == 1) {method = "DIP"
} else if (prior[[1]] == 2) {method = paste("Gamma(",prior[[2]], ",", prior[[3]], ")", sep="")}
z <- list(method = method, power = power, type_I_error = t1error,
expected_sample_size = ss, expected_sample_size_std = sd,
the_prob_efficacy = power, the_prob_futility = fut.rate)
z
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDIP/R/OneSamplePoisson.R
|
# ------------------------------------------------------------------------------
# prior --> list containing the information for prior
# [[1]] - the prior distribution type:
# 1 - DIP
# 2 - Beta(a,b)
# [[2]] - a: first parameter of the Beta distribution
# [[3]] - b: second parameter of the Beta distribution
# ------------------------------------------------------------------------------
#' Two sample Bernoulli model - Trial Design
#'
#' Calculate the minimum planned sample size under an admissible design.
#' The users decide the power and type-I-error, and pick the efficacy and futility boundaries.
#' If there are no admissible design based on controlled type-I-error, then default to output
#' the designs with the lowest type-I-error and at least the user-defined (e.g. 80\%) power.
#'
#'
#' @param prior A list of length 3 containing the distributional information of the prior.
#' The first element is a number specifying the type of prior. Options are
#' \enumerate{
#' \item DIP ;
#' \item Beta(a,b), where a = shape, b = scale}
#' The second and third elements of the list are the parameters a and b, respectively.
#' @param nmin The start searching total sample size for two treatment groups.
#' @param nmax The stop searching total sample size for two treatment groups.
#' @param p1 The response rate of the new treatment.
#' @param p2 The response rate of the compared treatment.
#' @param d The target improvement (minimal clinically meaningful difference).
#' @param ps The efficacy boundary (upper boundary).
#' @param pf The futility boundary (lower boundary).
#' @param power The power to achieve.
#' @param t1error The controlled type-I-error.
#' @param alternative less (lower values imply greater efficacy) or greater (larger
#' values imply greater efficacy).
#' @param seed The seed for simulations.
#' @param sim The number of simulations.
#' @return A list of the arguments with method and computed elements
#' @examples
#' \donttest{
#' # with traditional Bayesian prior Beta(1,1)
#' TwoSampleBernoulli.Design(list(2,1,1), nmin = 100, nmax = 120, p1 = 0.5, p2 = 0.3, d = 0,
#' ps = 0.90, pf = 0.05, power = 0.8, t1error = 0.05, alternative = "greater",
#' seed = 202210, sim = 10)
#' # with DIP
#' TwoSampleBernoulli.Design(list(1,0,0), nmin = 100, nmax = 120, p1 = 0.5, p2 = 0.3, d = 0,
#' ps = 0.90, pf = 0.05, power = 0.8, t1error = 0.05, alternative = "greater",
#' seed = 202210, sim = 10)
#' }
#' @import stats
#' @export TwoSampleBernoulli.Design
TwoSampleBernoulli.Design <- function(prior, nmin=10, nmax = 200, p1, p2, d = 0,
ps = 0.95, pf = 0.05, power = 0.80, t1error = 0.05,
alternative = c("less", "greater"), seed = 202209, sim = 500){
alternative <- match.arg(alternative)
# Define the inputs
if(prior[[1]] == 1){
prior[[2]] <- NA
prior[[3]] <- NA
}
## nmin limit
if(!is.null(nmin) && (!is.numeric(nmin) || nmin < 10 || nmin >= nmax))
stop("nmin must be positive number and at least 10")
## nmax limit
if(!is.null(nmax) && (!is.numeric(nmax) || nmax <= nmin || nmax >= 300))
stop("nmax must greater than 'nmin' and less than 300")
## p1 limit
if(!is.null(p1) && (!is.numeric(p1) || (p1 < 0 | p1 > 1)))
stop("p1 must be numeric in [0,1]")
## p2 limit
if(!is.null(p2) && (!is.numeric(p2) || (p2 < 0 | p2 > 1)))
stop("p2 must be numeric in [0,1]")
## d limit
if(!is.null(d) && (!is.numeric(d) || (d < 0 | d > abs(p1-p2))))
stop("d must be numeric in [0, |p1-p2|]")
## efficacy boundary limit
if(!is.null(ps) && (!is.numeric(ps) || (ps < 0.8 | ps > 1)))
stop("ps (efficacy boundary) must be numeric in [0.8,1]")
## futility boundary limit
if(!is.null(pf) && (!is.numeric(pf) || (pf < 0 | pf > 0.2)))
stop("pf (futility boundary) must be numeric in [0,0.2]")
## power limit
if(!is.null(power) && (!is.numeric(power) || (power < 0 | power > 1)))
stop("power must be numeric in [0,1]")
## t1error limit
if(!is.null(t1error) && (!is.numeric(t1error) || (t1error < 0 |t1error > 1)))
stop("type-I-error must be numeric in [0,1]")
## set.seed
if(!is.numeric(seed))
stop("seed must be numeric")
## number of simulation
if(!is.numeric(sim))
stop("simulation number must be numeric")
set.seed(seed)
# Functions to calculate the posterior
Bernoulli <- function(a,b,y){posterior<-rbeta(1000, a+sum(y), b+(length(y)-sum(y)))}
Bernoulli.DIP <- function(p0, y, N){
j<-length(y)
posterior<-rbeta(1000,1+sum(y)+p0*(N-j),1+(j-sum(y))+(1-p0)*(N-j))
}
# Simulated Data
# calculate N that can achieve the power
N_v <- NULL
power_v <- NULL
n1_v <- NULL
n2_v <- NULL
std1_v <- NULL
std2_v <- NULL
for (N in seq(from=nmin, to=nmax, by=1)){
cat1s <- 0
cat1f <- 0
n.enrolled <- NULL
n1.enrolled <- NULL
n2.enrolled <- NULL
for (k in 1:sim) {
y.data<-NULL
Group<-NULL
j<-0
r<-0.5 # equal allocation
cat<-0
cats<-0
catf<-0
pp_stop<-0.5
while(cat == 0)
{
j<-j+1
u<-runif(1,min = 0,max = 1)
if(u<=r){
Group=append(Group,1)
y.data<-append(y.data,rbinom(1,1,p1))}
if(u>r){
Group=append(Group,0)
y.data<-append(y.data,rbinom(1,1,p2))}
Matd<-as.data.frame(cbind(y.data,Group))
y1<-Matd$y[which(Matd$Group==1)]
y2<-Matd$y[which(Matd$Group==0)]
sn1 <- length(y1[y1==1]) # number of successes
sn2 <- length(y2[y2==1])
if(j>=10 & sn1>0 & sn2>0)
{
if (prior[[1]] == 2){
p1s<-Bernoulli(a = prior[[2]], b = prior[[3]], y = y1)
p2s<-Bernoulli(a = prior[[2]], b = prior[[3]], y = y2)
}else if (prior[[1]] == 1){
N1<-ceiling(N/2)
N2<-ceiling(N/2)
p0<-rbeta(1000,1,1) # hyper-prior
p1s<-Bernoulli.DIP(p0, y=y1, N=N1)
p2s<-Bernoulli.DIP(p0, y=y2, N=N2)
p1s[is.na(p1s)]<-sum(y1)/length(y1)
p2s[is.na(p2s)]<-sum(y2)/length(y2)
}
if (alternative == "greater"){
pp_stop<-sum(p1s>p2s+d)/length(p1s)
}else if (alternative == "less"){
pp_stop<-sum(p1s<p2s-d)/length(p1s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
# Recruited Sample Size
n.enrolled <- append(n.enrolled, j)
n1.enrolled <- append(n1.enrolled, length(y1))
n2.enrolled <- append(n2.enrolled, length(y2))
}
power.cal <- cat1s/sim
jitter <- 0.01
if (power.cal >= power-jitter){
N_v <- append(N_v, N)
power_v <- append(power_v, power.cal)
n1_v <- append(n1_v, round(mean(n1.enrolled), 0))
n2_v <- append(n2_v, round(mean(n2.enrolled), 0))
std1_v <- append(std1_v, round(sd(n1.enrolled), 1))
std2_v <- append(std2_v, round(sd(n2.enrolled), 1))
}
result1 <- cbind(N_v, power_v, n1_v, std1_v, n2_v, std2_v)
} # End of power calculation
if (is.null(result1)){
message("Suggest: please adjust your input values!")
stop(paste("No sample size in the range [",nmin,",",nmax,"] can achieve ", power*100, "% power", sep=""))
}
# calculate type I error
nmin1 <- N_v[which.min(N_v)] # start minimum sample size in calculation of exact type I error
N_v <- NULL
t1error_v <- NULL
for (N in seq(from=nmin1, to=nmax, by=1)){
cat1s <- 0
cat1f <- 0
for (k in 1:sim) {
y.data<-NULL
Group<-NULL
j<-0
r<-0.5 # equal allocation
cat<-0
cats<-0
catf<-0
pp_stop<-0.5
while(cat == 0)
{
j<-j+1
u<-runif(1,min = 0,max = 1)
if(u<=r){
Group=append(Group,1)
y.data<-append(y.data,rbinom(1,1,p2))} #under the null hypothesis p1 = p2
if(u>r){
Group=append(Group,0)
y.data<-append(y.data,rbinom(1,1,p2))}
Matd<-as.data.frame(cbind(y.data,Group))
y1<-Matd$y[which(Matd$Group==1)]
y2<-Matd$y[which(Matd$Group==0)]
sn1 <- length(y1[y1==1]) # number of successes
sn2 <- length(y2[y2==1])
if(j>=10 & sn1>0 & sn2>0)
{
if (prior[[1]] == 2){
p1s<-Bernoulli(a = prior[[2]], b = prior[[3]], y = y1)
p2s<-Bernoulli(a = prior[[2]], b = prior[[3]], y = y2)
}else if (prior[[1]] == 1){
N1<-ceiling(N/2)
N2<-ceiling(N/2)
p0<-rbeta(1000,1,1) # hyper-prior
p1s<-Bernoulli.DIP(p0, y=y1, N=N1)
p2s<-Bernoulli.DIP(p0, y=y2, N=N2)
p1s[is.na(p1s)]<-sum(y1)/length(y1)
p2s[is.na(p2s)]<-sum(y2)/length(y2)
}
if (alternative == "greater"){
pp_stop<-sum(p1s>p2s+d)/length(p1s)
}else if (alternative == "less"){
pp_stop<-sum(p1s<p2s-d)/length(p1s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
}
t1error.cal <- cat1s/sim
N_v <- append(N_v, N)
t1error_v <- append(t1error_v, t1error.cal)
result2 <- cbind(N_v, t1error_v)
} # End of Type-I-error calculation
# Outputs
if (!is.null(result1) & !is.null(result2)){
result <- merge(result1, result2, by=c("N_v"))
final <- as.data.frame(result)
# select the lowest/best-controlled type I error
final$diff <- abs(final$t1error_v - t1error)
final <- final[order(final$diff, final$t1error_v, final$power_v, final$N_v), ]
ff <- final[1,]
planN <- ff$N_v
exact.power <- ff$power_v
exact.t1 <- ff$t1error_v
grp1 = paste(ff$n1_v, " (", ff$std1_v, ")", sep="")
grp2 = paste(ff$n2_v, " (", ff$std2_v, ")", sep="")
if (prior[[1]] == 1) {method = "DIP"
} else if (prior[[1]] == 2) {method = paste("Beta(",prior[[2]], ",", prior[[3]], ")", sep="")}
z <- list(method = method, planned_sample_size = planN,
efficacy_boundary = ps, futility_boundary = pf,
exact_power = exact.power, exact_type_I_error = exact.t1,
expected_sample_size_and_std_for_the_new_treatment_group = grp1,
expected_sample_size_and_std_for_the_compared_treatment_group = grp2)
z
} # End of Outputs
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDIP/R/TwoSampleBernoulli.Design.R
|
# ------------------------------------------------------------------------------
# prior --> list containing the information for prior
# [[1]] - the prior distribution type:
# 1 - DIP
# 2 - Beta(a,b)
# [[2]] - a: first parameter of the Beta distribution
# [[3]] - b: second parameter of the Beta distribution
# ------------------------------------------------------------------------------
#' Two sample Bernoulli model
#'
#' For a given planned sample size, the efficacy and futility boundaries,
#' return the power, the type I error, the expected sample size and its
#' standard deviation, the probability of reaching the efficacy and futility boundaries.
#' Equal allocation between two treatment groups.
#'
#'
#' @param prior A list of length 3 containing the distributional information of the prior.
#' The first element is a number specifying the type of prior. Options are
#' \enumerate{
#' \item DIP ;
#' \item Beta(a,b), where a = shape, b = scale}
#' The second and third elements of the list are the parameters a and b, respectively.
#' @param N The total planned sample size for two treatment groups.
#' @param p1 The response rate of the new treatment.
#' @param p2 The response rate of the compared treatment.
#' @param d The target improvement (minimal clinically meaningful difference).
#' @param ps The efficacy boundary (upper boundary).
#' @param pf The futility boundary (lower boundary).
#' @param alternative less (lower values imply greater efficacy) or greater (larger
#' values imply greater efficacy).
#' @param seed The seed for simulations.
#' @param sim The number of simulations.
#' @return A list of the arguments with method and computed elements
#' @examples
#' # with traditional Bayesian prior Beta(1,1)
#' TwoSampleBernoulli(list(2,1,1), N = 200, p1 = 0.5, p2 = 0.3, d = 0,
#' ps = 0.90, pf = 0.05, alternative = "greater",
#' seed = 202210, sim = 5)
#' # with DIP
#' TwoSampleBernoulli(list(1,0,0), N = 200, p1 = 0.5, p2 = 0.3, d = 0,
#' ps = 0.90, pf = 0.05, alternative = "greater",
#' seed = 202210, sim = 5)
#' @import stats
#' @export TwoSampleBernoulli
TwoSampleBernoulli <- function(prior, N = 200, p1, p2, d = 0,
ps = 0.95, pf = 0.05,
alternative = c("less", "greater"), seed = 202209, sim = 5000) {
alternative <- match.arg(alternative)
# Define the inputs
if(prior[[1]] == 1){
prior[[2]] <- NA
prior[[3]] <- NA
}
## N limit
if(!is.null(N) && (!is.numeric(N) || N <= 0 ))
stop("N must be positive number and greater than 10")
## p1 limit
if(!is.null(p1) && (!is.numeric(p1) || (p1 < 0 | p1 > 1)))
stop("p1 must be numeric in [0,1]")
## p2 limit
if(!is.null(p2) && (!is.numeric(p2) || (p2 < 0 | p2 > 1)))
stop("p2 must be numeric in [0,1]")
## d limit
if(!is.null(d) && (!is.numeric(d) || (d < 0 | d > abs(p1-p2))))
stop("d must be numeric in [0, |p1-p2|]")
## efficacy boundary limit
if(!is.null(ps) && (!is.numeric(ps) || (ps < 0.8 | ps > 1)))
stop("ps (efficacy boundary) must be numeric in [0.8,1]")
## futility boundary limit
if(!is.null(pf) && (!is.numeric(pf) || (pf < 0 | pf > 0.2)))
stop("pf (futility boundary) must be numeric in [0,0.2]")
## set.seed
if(!is.numeric(seed))
stop("seed must be numeric")
## number of simulation
if(!is.numeric(sim))
stop("simulation number must be numeric")
set.seed(seed)
# Functions to calculate the posterior
Bernoulli <- function(a,b,y){posterior<-rbeta(1000, a+sum(y), b+(length(y)-sum(y)))}
Bernoulli.DIP <- function(p0, y, N){
j<-length(y)
posterior<-rbeta(1000,1+sum(y)+p0*(N-j),1+(j-sum(y))+(1-p0)*(N-j))
}
# Simulated Data
# calculate power
n.enrolled <- NULL
n1.enrolled <- NULL
n2.enrolled <- NULL
cat1s <- 0
cat1f <- 0
for (k in 1:sim) {
y.data<-NULL
Group<-NULL
j<-0
r<-0.5 # equal allocation
cat<-0
cats<-0
catf<-0
pp_stop<-0.5
while(cat == 0)
{
j<-j+1
u<-runif(1,min = 0,max = 1)
if(u<=r){
Group=append(Group,1)
y.data<-append(y.data,rbinom(1,1,p1))}
if(u>r){
Group=append(Group,0)
y.data<-append(y.data,rbinom(1,1,p2))}
Matd<-as.data.frame(cbind(y.data,Group))
y1<-Matd$y[which(Matd$Group==1)]
y2<-Matd$y[which(Matd$Group==0)]
sn1 <- length(y1[y1==1]) # number of successes
sn2 <- length(y2[y2==1])
if(sn1>0 & sn2>0)
{
if (prior[[1]] == 2){
p1s<-Bernoulli(a = prior[[2]], b = prior[[3]], y = y1)
p2s<-Bernoulli(a = prior[[2]], b = prior[[3]], y = y2)
}else if (prior[[1]] == 1){
N1<-ceiling(N/2)
N2<-ceiling(N/2)
p0<-rbeta(1000,1,1) # hyper-prior
p1s<-Bernoulli.DIP(p0, y1, N1)
p2s<-Bernoulli.DIP(p0, y2, N2)
p1s[is.na(p1s)]<-sum(y1)/length(y1)
p2s[is.na(p2s)]<-sum(y2)/length(y2)
}
if (alternative == "greater"){
pp_stop<-sum(p1s>p2s+d)/length(p1s)
}else if (alternative == "less"){
pp_stop<-sum(p1s<p2s-d)/length(p1s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
# Recruited Sample Size
n.enrolled <- append(n.enrolled, j)
n1.enrolled <- append(n1.enrolled, length(y1))
n2.enrolled <- append(n2.enrolled, length(y2))
}
ss1 <- round(mean(n1.enrolled), digits = 0)
ss2 <- round(mean(n2.enrolled), digits = 0)
std1 <- round(sd(n1.enrolled), digits = 1)
std2 <- round(sd(n2.enrolled), digits = 1)
fut.rate <- cat1f/sim
power <- cat1s/sim
# calculate type I error
cat1s <- 0
cat1f <- 0
for (k in 1:sim) {
y.data<-NULL
Group<-NULL
j<-0
r<-0.5 # equal allocation
cat<-0
cats<-0
catf<-0
pp_stop<-0.5
while(cat == 0)
{
j<-j+1
u<-runif(1,min = 0,max = 1)
if(u<=r){
Group=append(Group,1)
y.data<-append(y.data,rbinom(1,1,p2))} #under the null hypothesis p1 = p2
if(u>r){
Group=append(Group,0)
y.data<-append(y.data,rbinom(1,1,p2))}
Matd<-as.data.frame(cbind(y.data,Group))
y1<-Matd$y[which(Matd$Group==1)]
y2<-Matd$y[which(Matd$Group==0)]
sn1 <- length(y1[y1==1]) # number of successes
sn2 <- length(y2[y2==1])
if(sn1>0 & sn2>0)
{
if (prior[[1]] == 2){
p1s<-Bernoulli(a = prior[[2]], b = prior[[3]], y = y1)
p2s<-Bernoulli(a = prior[[2]], b = prior[[3]], y = y2)
}else if (prior[[1]] == 1){
N1<-ceiling(N/2)
N2<-ceiling(N/2)
p0<-rbeta(1000,1,1) # hyper-prior
p1s<-Bernoulli.DIP(p0, y1, N1)
p2s<-Bernoulli.DIP(p0, y2, N2)
p1s[is.na(p1s)]<-sum(y1)/length(y1)
p2s[is.na(p2s)]<-sum(y2)/length(y2)
}
if (alternative == "greater"){
pp_stop<-sum(p1s>p2s+d)/length(p1s)
}else if (alternative == "less"){
pp_stop<-sum(p1s<p2s-d)/length(p1s)
}
}
if(pp_stop>=ps){cats<-1}
if(pp_stop<pf){catf<-1}
cat<-cats+catf
if(j==N){cat<-1}
}
if(cats==1){cat1s<-cat1s+1}
if(cats==0){cat1s<-cat1s}
if(catf==1){cat1f<-cat1f+1}
if(catf==0){cat1f<-cat1f}
}
t1error <- cat1s/sim
# Outputs
if (prior[[1]] == 1) {method = "DIP"
} else if (prior[[1]] == 2) {method = paste("Beta(",prior[[2]], ",", prior[[3]], ")", sep="")}
grp1 = paste(ss1, " (", std1, ")", sep="")
grp2 = paste(ss2, " (", std2, ")", sep="")
z <- list(method = method, power = power, type_I_error = t1error,
The_planned_sample_size_for_each_group = N/2,
expected_sample_size_and_std_for_the_new_treatment = grp1,
expected_sample_size_and_std_for_the_compared_treatment = grp2,
the_prob_efficacy = power, the_prob_futility = fut.rate)
z
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDIP/R/TwoSampleBernoulli.R
|
#' Bayesian Matrix-Variate Dynamic Linear Models for Task-based fMRI Modeling in R
#'
#' The 'BayesDLMfMRI' package performs statistical analysis for task-based functional magnetic resonance imaging (fMRI) data at both individual and group levels. The analysis to detect brain activation at the individual level is based on modeling the fMRI signal using Matrix-Variate Dynamic Linear Models (MDLM). The analysis for the group stage is based on posterior distributions of the state parameter obtained from the modeling at the individual level. In this way, this package offers several R functions with different algorithms to perform inference on the state parameter to assess brain activation for both individual and group stages. Those functions allow for parallel computation when the analysis is performed for the entire brain as well as analysis at specific voxels when it is required.
#'
#' @section Authors:
#' Maintainer: Carlos Peréz \email{[email protected]} \cr
#' Creator: Johnatan Cardona-Jiménez \email{[email protected]} \cr
#' Contributor: Isabel Ramírez \email{[email protected]}
#' @docType package
#' @name BayesDLMfMRI
#' @aliases BayesDLMfMRI-package
#' @useDynLib BayesDLMfMRI , .registration=TRUE
#' @exportPattern "^[[:alpha:]]+"
#' @import mathjaxr
#' @import stats
#' @import utils
NULL
#> NULL
#' @title Covariates related to the observed BOLD response
#' @description
#' Covariates related to the observed BOLD response and its derivative used in the examples presented in the vignettes.
#' @examples
#' data("covariates", package="BayesDLMfMRI")
"Covariates"
#' @title MNI image used to plot posterior probability maps in the vignette examples.
#' @description MNI image used to plot posterior probability maps in the examples presented in the vignettes.
#' @examples
#' data("ffd", package="BayesDLMfMRI")
"ffd"
#' @title A 3D array that works as a brain of reference (MNI atlas).
#' @description A 3D array that works as a brain of reference (MNI atlas) for the group analysis.
#' @examples
#' data("mask", package="BayesDLMfMRI")
"mask"
#' @name summary.fMRI_group_evidence
#' @title summary.fMRI_group_evidence
#' @description
#' Summary function
#' @details
#' Summary function
#' @param object is the returned value of any of the fdGroupEvidence* functions
#' @param ... Other potential arguments
#' @examples
#'\dontrun{
#' DatabaseGroup <- get_example_fMRI_data_group()
#' data("covariates", package="BayesDLMfMRI")
#' data("mask", package="BayesDLMfMRI")
#' res <- ffdGroupEvidenceFETS(ffdGroup = DatabaseGroup, covariates = Covariates,
#' m0 = 0, Cova = 100, delta = 0.95, S0 = 1,
#' n0 = 1, N1 = FALSE, Nsimu1 = 100, Cutpos=30,
#' r1 = 1, Test = "JointTest", mask = mask, Ncores = 7)
#' summary(res)
#' }
#' @export
summary.fMRI_group_evidence <- function(object, ...) {
str(object)
}
#' @name print.fMRI_group_evidence
#' @title print.fMRI_group_evidence
#' @description
#' Print the structure of the object related to the ffdGroupEvidence* functions.
#' @details
#' Print the structure of the object related to the ffdGroupEvidence* functions.
#' @param x is the returned value of any of the ffdGroupEvidence* functions
#' @param ... Other potential arguments
#' @examples
#'\dontrun{
#' DatabaseGroup <- get_example_fMRI_data_group()
#' data("covariates", package="BayesDLMfMRI")
#' data("mask", package="BayesDLMfMRI")
#' res <- ffdGroupEvidenceFETS(ffdGroup = DatabaseGroup, covariates = Covariates,
#' m0 = 0, Cova = 100, delta = 0.95, S0 = 1,
#' n0 = 1, N1 = FALSE, Nsimu1 = 100, Cutpos=30,
#' r1 = 1, Test = "JointTest", mask = mask, Ncores = 7)
#' print(res)
#' }
#' @export
print.fMRI_group_evidence <- function(x, ...) {
str(x)
}
#' @name summary.fMRI_single_evidence
#' @title summary.fMRI_single_evidence
#' @description
#' Summary function
#' @details
#' Summary function
#' @param object is the returned value of any of the ffdEvidence* functions
#' @param ... Other potential arguments
#' @examples
#'\dontrun{
#' fMRI.data <- get_example_fMRI_data()
#' data("covariates", package="BayesDLMfMRI")
#' res <- ffdEvidenceFFBS(ffdc = fMRI.data, covariates = Covariates, m0=0, Cova=100,
#' delta=0.95, S0=1, n0=1, N1=FALSE,
#' Nsimu1 = 100, Cutpos1=30, r1 = 1,
#' perVol = 0.10, Ncores=3)
#' summary(res)
#' }
#' @export
summary.fMRI_single_evidence <- function(object, ...) {
str(object)
}
#' @name print.fMRI_single_evidence
#' @title print.fMRI_single_evidence
#' @description
#' Print the structure of the object related to the ffdEvidence* functions.
#' @details
#' Print the structure of the object related to the ffdEvidence* functions.
#' @param x is the returned value of any of the ffdEvidence* functions
#' @param ... Other potential arguments
#' @examples
#'\dontrun{
#' fMRI.data <- get_example_fMRI_data()
#' data("covariates", package="BayesDLMfMRI")
#' res <- ffdEvidenceFFBS(ffdc = fMRI.data, covariates = Covariates, m0=0, Cova=100,
#' delta=0.95, S0=1, n0=1, N1=FALSE,
#' Nsimu1 = 100, Cutpos1=30, r1 = 1,
#' perVol = 0.10, Ncores=3)
#' print(res)
#' }
#' @export
print.fMRI_single_evidence <- function(x, ...) {
str(x)
}
#' @name plot.fMRI_single_evidence
#' @title plot.fMRI_single_evidence
#' @description
#' Plot function
#' @details
#' Plot function
#' @param x is the returned value of any of the ffdEvidence* functions.
#' @param overlay MNI image used to plot posterior probability maps.
#' @param index the element of \code{res} to be plotted.
#' @param index2 the element of \code{res} to be plotted, only used if needed.
#' @param ... additional parameters passed to the \code{ortho2} function.
#' @examples
#'\dontrun{
#' fMRI.data <- get_example_fMRI_data()
#' data("covariates", package="BayesDLMfMRI")
#' data("ffd", package="BayesDLMfMRI") # used for overlay.
#' res <- ffdEvidenceFETS(ffdc = fMRI.data,
#' covariates = Covariates,
#' m0 = 0, Cova = 100, delta = 0.95,
#' S0 = 1, n0 = 1, Nsimu1 = 100, Cutpos1 = 30,
#' r1 = 1, Test = "LTT", Ncores = 15)
#' plot(res, overlay=ffd, index=1, col.y = heat.colors(50),
#' ycolorbar = TRUE, ybreaks = seq(0.95, 1, by = 0.001))
#' }
#' @export
plot.fMRI_single_evidence <- function(x, overlay, index, index2=NULL, ...) {
res <- x
if( (index > length(res)) | (1 > index) ) {
stop("index out of range")
}
res.auxi <- res[[index]]
if(length(dim(res.auxi)) > length(dim(overlay))) {
if(is.null(index2)) {
stop("you must provide a second index using index2")
}
if( (index2 > (dim(res.auxi)[1]) ) | (1 > index2) ) {
stop("index out of range")
}
res.auxi <- res.auxi[index,,,]
}
Z.visual.c <- oro.nifti::nifti(res.auxi, datatype=16)
neurobase::ortho2(x=overlay, y=ifelse(Z.visual.c > 0.95, Z.visual.c, NA), ...)
}
#' @name print.fMRI_single_voxel
#' @title print.fMRI_single_voxel
#' @description
#' Print the structure of the object related to the SingleVoxel* functions.
#' @details
#' Print the structure of the object related to the SingleVoxel* functions.
#' @param x is the returned value of any of the SingleVoxel* functions,
#' @param ... Other potential arguments
#' @examples
#'\dontrun{
#' fMRI.data <- get_example_fMRI_data()
#' data("covariates", package="BayesDLMfMRI")
#' res.indi <- SingleVoxelFSTS(posi.ffd = c(14, 56, 40),
#' covariates = Covariates,
#' ffdc = fMRI.data,
#' m0 = 0, Cova = 100, delta = 0.95, S0 = 1,
#' n0 = 1, Nsimu1 = 100, N1 = N1, Cutpos1 = 30,
#' Min.vol = 0.10, r1 = 1)
#' print(res.indi)
#' }
#' @export
print.fMRI_single_voxel <- function(x, ...) {
str(x)
}
#' @name print.fMRI_group_single_voxel
#' @title print.fMRI_group_single_voxel
#' @description
#' Print the structure of the object related to the SingleVoxel* functions.
#' @details
#' Print the structure of the object related to the GroupSingleVoxel* functions.
#' @param x is the returned value of any of the GroupSingleVoxel* functions
#' @param ... Other potential arguments
#' @examples
#'\dontrun{
#' DatabaseGroup <- get_example_fMRI_data_group()
#' data("covariates", package="BayesDLMfMRI")
#' res <- GroupSingleVoxelFFBS(posi.ffd = c(14, 56, 40), DatabaseGroup,
#' covariates = Covariates, m0 = 0, Cova = 100,
#' delta = 0.95, S0 = 1, n0 = 1, N1 = FALSE,
#' Nsimu1 = 100, r1 = 1, Cutpos = 30)
#' print(res)
#' }
#' @export
print.fMRI_group_single_voxel <- function(x, ...) {
str(x)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/BayesDLMfMRI.R
|
#' @name GroupSingleVoxelFETS
#' @title GroupSingleVoxelFETS
#' @description
#' \loadmathjax
#' This function is used to perform a group activation analysis for single voxels based on the FETS algorithm.
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' This function allows the performance of a group activation analysis for single voxels. A multivariate dynamic linear model is fitted to a cluster of voxels, with its center at location \code{(i,j,k)}, in the way it is presented in \insertCite{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}.
#' @param posi.ffd the position of the voxel in the brain image.
#' @param DatabaseGroup list of N elements, each being a 4D array (\code{ffdc[i,j,k,t]}) that contains the sequence of MRI images related to each of the N subjects in the sample.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at \code{t=0} (\code{m=0} is the default value when no prior information is available). For the case of available prior information, \code{m0} can be defined as a \mjseqn{p\times q} matrix, where \mjseqn{p} is the number of columns in the covariates object and \mjseqn{q} is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at \code{t=0} (\code{Cova=100} is the default value when no prior information is available). For the case of available prior information, \code{Cova} can be defined as a \mjseqn{p \times p} matrix, where \mjseqn{p} is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between \code{0.85<delta<1}. \code{delta=1} will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure between pair of voxels within every cluster at \code{t=0}. \code{S0=1} is the default value when no prior information is available and defines an \mjseqn{q \times q} identity matrix. For the case of available prior information, \code{S0} can be defined as an \mjseqn{q \times q} matrix, where \mjseqn{q} is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix \code{S0} at \code{t=0} (\code{n1=1} is the default value when no prior information is available). For the case of available prior information, \code{n0} can be set as \code{n0=np}, where \code{np} is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (\code{2<N1<T}) from the \code{ffdc} array employed in the model fitting. \code{N1=NULL} (or equivalently \code{N1=T}) is its default value, taking all the images in the \code{ffdc} array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (\code{30<Cutpos1<T}).
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different \code{r1} values: \mjseqn{r1 = 0, 1, 2, 3, 4}, which leads to \mjseqn{q = 1, 7, 19, 27, 33}, where \mjseqn{q} is the size of the cluster.
#' @param Test test type either \code{"LTT"} (Average cluster effect) or \code{"JointTest"} (Joint effect).
#' @return a list containing a vector (Evidence) with the evidence measure of
#' activation for each of the \code{p} covariates considered in the model, the simulated
#' online trajectories related to the state parameter, the simulated BOLD responses, \mjseqn{\hat{Y}}{ascii}.
#' and a measure to examine the goodness of fit of the model \mjseqn{(100 \ast |Y[i,j,k]_t - \hat{Y}[i,j,k]_t | \hat{Y}[i,j,k]_t )} for that particular voxel (\code{FitnessV}).
#' @examples
#'\dontrun{
#' # This example can take a long time to run.
#' DatabaseGroup <- get_example_fMRI_data_group()
#' data("covariates", package="BayesDLMfMRI")
#' resSingle <- GroupSingleVoxelFETS(posi.ffd = c(14, 56, 40), DatabaseGroup,
#' covariates = Covariates, m0 = 0, Cova = 100,
#' delta = 0.95, S0 = 1, n0 = 1, N1 = FALSE,
#' Nsimu1 = 100, r1 = 1, Test = "JointTest", Cutpos = 30)
#' }
#' @export
GroupSingleVoxelFETS <- function(posi.ffd, DatabaseGroup,
covariates, m0, Cova, delta, S0, n0, N1, Nsimu1,
r1, Test, Cutpos){
if(is.logical(N1)) {
if(N1==FALSE){N1 = dim(covariates)[1]}
}
.validate_input(
covariates=covariates,
delta=delta,
n0=n0,
N1=N1,
Nsimu1=Nsimu1,
r1=r1,
Cutpos1=Cutpos,
Test=Test
)
covariates <- as.matrix(covariates)
if(r1 == 0){
posi <- .distanceNeighbors (posi.refer = posi.ffd, r1)
Ngroup <- length(DatabaseGroup)
#BOLD RESPONSE SERIES FOR A SPECIFIC CLUSTER
series.group = NULL
#system.time(
for(i in 1:Ngroup){
ffd.c <- DatabaseGroup[[i]]
series <- sapply(1:dim(posi)[1], function(ii){ffd.c[posi[ii,1], posi[ii,2], posi[ii,3], ]})
series.group <- rbind(series.group, series)
}
#case for single voxels
if(any(series.group==0)){
if(Test=="LTT"){return(NA)}
if(Test=="JointTest"){return( NA )}}else{
if(Test=="JointTest"){
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
delta1<- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
res <- .Group_FunctionalMultiTest(ffd1 = series.group, Cova = covariates, m0In = m0, c0In = Cova1, S0In = S0,
beta0In = Beta1, nt0In = n0, flag1 = 0, NIn = N1, NS = Ngroup, Nsimu = Nsimu1, CUTpos = Cutpos)
attr(res, "class") <- "fMRI_group_single_voxel"
return(res)
}
if(Test=="LTT"){return(NA)}}}else{
Ngroup <- length(DatabaseGroup)
#THIS LINE RETURN THE POSITIONS OF EACH VOXEL INSIDE THE CLUSTER GIVEN THE DISTANCE r1
posi1 <- .distanceNeighbors (posi.refer = posi.ffd, r1)
aux.pos <- dim(DatabaseGroup[[1]])[1:3]
row_sub1 <- apply(posi1, 1, function(row, x1){0 < row & row<=x1}, x1=aux.pos)
posi <- posi1[apply(t(row_sub1), 1, sum)==3, ]
#BOLD RESPONSE SERIES FOR A SPECIFIC CLUSTER
series.group = NULL
for(i in 1:Ngroup){
ffd.c <- DatabaseGroup[[i]]
series <- sapply(1:dim(posi)[1], function(ii){ffd.c[posi[ii,1], posi[ii,2], posi[ii,3], ]})
series.group <- rbind(series.group, series)
}
if(any(series.group[,1]==0)){if(Test=="LTT"){return(NA)}
if(Test=="JointTest"){return(NA)}}else{
flag <- any(series.group==0)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
if(Test=="LTT"){
res <- .Gruop_FunctionalTestLT(series.group, covariates, m0, Cova1, S0, Beta1, n0, sum(flag), N1, Ngroup, Nsimu1, Cutpos)
attr(res, "class") <- "fMRI_group_single_voxel"
return(res)
}
if(Test=="JointTest"){
res <- .Group_FunctionalMultiTest(ffd1 = series.group, Cova = covariates, m0In = m0, c0In = Cova1, S0In = S0,
beta0In = Beta1, nt0In = n0, flag1 = sum(flag), NIn = N1, NS = Ngroup, Nsimu = Nsimu1, CUTpos = Cutpos)
attr(res, "class") <- "fMRI_group_single_voxel"
return(res)
}
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/GroupSingleVoxelFETS.R
|
#' @name GroupSingleVoxelFFBS
#' @title GroupSingleVoxelFFBS
#' @description
#' \loadmathjax
#' This function is used to perform a group activation analysis for single voxels based on the FFBS algorithm.
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' This function allows the performance of a group activation analysis for single voxels. A multivariate dynamic linear model is fitted to a cluster of voxels, with its center at location (i,j,k), in the way it is presented in \insertCite{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}.
#' @param posi.ffd the position of the voxel in the brain image.
#' @param DatabaseGroup list of N elements, each being a 4D array (\code{ffdc[i,j,k,t]}) that contains the sequence of MRI images related to each of the N subjects in the sample.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at \code{t=0} (\code{m0=0} is the default value when no prior information is available). For the case of available prior information, \code{m0} can be defined as a \mjseqn{p\times q} matrix, where \mjseqn{p} is the number of columns in the covariates object and \mjseqn{q} is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at \code{t=0} (\code{Cova=100} is the default value when no prior information is available). For the case of available prior information, \code{Cova} can be defined as a \mjseqn{p \times p} matrix, where \code{p} is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between \code{0.85<delta<1}. \code{delta=1} will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure between pair of voxels within every cluster at \code{t=0}, \code{S0=1} is the default value when no prior information is available and defines an \mjseqn{q\times q} identity matrix. For the case of available prior information, \code{S0} can be defined as an \mjseqn{q \times q} matrix, where \mjseqn{q} is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix \code{S0} at \code{t=0} (\code{n1=1} is the default value when no prior information is available). For the case of available prior information, \code{n0} can be set as \code{n0=np}, where \code{np} is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (\code{2<N1<T}) from the \code{ffdc} array employed in the model fitting. \code{N1=NULL} (or equivalently \code{N1=T}) is its default value, taking all the images in the \code{ffdc} array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (\code{30<Cutpos1<T}).
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different \code{r1} values: \mjseqn{r1 = 0, 1, 2, 3, 4}, which leads to \mjseqn{q = 1, 7, 19, 27, 33}, where \mjseqn{q} is the size of the cluster.
#' @return a list containing a vector (\code{Evidence}) with the evidence measure of
#' activation for each of the \code{p} covariates considered in the model, the simulated
#' online trajectories related to the state parameter, the simulated BOLD responses,
#' and a measure to examine the goodness of fit of the model \mjseqn{(100 \ast |Y[i,j,k]_t - \hat{Y}[i,j,k]_t |/ \hat{Y}[i,j,k]_t )} for that particular voxel (\code{FitnessV}).
#' @examples
#'\dontrun{
#' # This example can take a long time to run.
#' DatabaseGroup <- get_example_fMRI_data_group()
#' data("covariates", package="BayesDLMfMRI")
#' resSingle <- GroupSingleVoxelFFBS(posi.ffd = c(14, 56, 40), DatabaseGroup,
#' covariates = Covariates, m0 = 0, Cova = 100,
#' delta = 0.95, S0 = 1, n0 = 1, N1 = FALSE,
#' Nsimu1 = 100, r1 = 1, Cutpos = 30)
#' }
#' @export
GroupSingleVoxelFFBS <- function(posi.ffd, DatabaseGroup, covariates, m0, Cova, delta, S0, n0, N1, Nsimu1, r1, Cutpos){
if(is.logical(N1)) {
if(N1==FALSE){N1 = dim(covariates)[1]}
}
.validate_input(
covariates=covariates,
delta=delta,
n0=n0,
N1=N1,
Nsimu1=Nsimu1,
r1=r1,
Cutpos1=Cutpos
)
covariates <- as.matrix(covariates)
if(r1 == 0){
posi <- .distanceNeighbors(posi.refer = posi.ffd, r1)
Ngroup <- length(DatabaseGroup)
#BOLD RESPONSE SERIES FOR A SPECIFIC CLUSTER
series.group = NULL
#system.time(
for(i in 1:Ngroup){
ffd.c <- DatabaseGroup[[i]]
series <- sapply(1:dim(posi)[1], function(ii){ffd.c[posi[ii,1], posi[ii,2], posi[ii,3], ]})
series.group <- rbind(series.group, series)
}
#case for single voxels
if(any(series.group==0)){return( NA )}else{
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
delta1<- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
res <- .Group_Functional_Backwards_Sampling(ffd1 = series.group, Cova = covariates, m0In = m0, c0In = Cova1, S0In = S0,
beta0In = Beta1, nt0In = n0, flag1 = 0, NIn = N1, NS = Ngroup, Nsimu = Nsimu1,
CUTpos = Cutpos)
attr(res, "class") <- "fMRI_group_single_voxel"
return(res)
}
}else{
Ngroup <- length(DatabaseGroup)
#THIS LINE RETURN THE POSITIONS OF EACH VOXEL INSIDE THE CLUSTER GIVEN THE DISTANCE r1
posi1 <- .distanceNeighbors(posi.refer = posi.ffd, r1)
aux.pos <- dim(DatabaseGroup[[1]])[1:3]
row_sub1 <- apply(posi1, 1, function(row, x1){0 < row & row<=x1}, x1=aux.pos)
posi <- posi1[apply(t(row_sub1), 1, sum)==3, ]
#BOLD RESPONSE SERIES FOR A SPECIFIC CLUSTER
series.group = NULL
for(i in 1:Ngroup){
ffd.c <- DatabaseGroup[[i]]
series <- sapply(1:dim(posi)[1], function(ii){ffd.c[posi[ii,1], posi[ii,2], posi[ii,3], ]})
series.group <- rbind(series.group, series)
}
if(any(series.group[,1]==0)){return( NA )}else{
flag <- any(series.group==0)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
res <- .Group_Functional_Equation(ffd1 = series.group, Cova = covariates, m0In = m0, c0In = Cova1, S0In = S0,
beta0In = Beta1, nt0In = n0, flag1 = flag, NIn = N1, NS = Ngroup, Nsimu = Nsimu1,
CUTpos = Cutpos)
attr(res, "class") <- "fMRI_group_single_voxel"
return( res )
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/GroupSingleVoxelFFBS.R
|
#' @name GroupSingleVoxelFSTS
#' @title GroupSingleVoxelFSTS
#' @description
#' \loadmathjax
#' This function is used to perform a group activation analysis for single voxels based on the FSTS algorithm.
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' This function allows the performance of a group activation analysis for single voxels. A multivariate dynamic linear model is fitted to a cluster of voxels, with its center at location (i,j,k), in the way it is presented in \insertCite{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}.
#' @param posi.ffd the position of the voxel in the brain image.
#' @param DatabaseGroup list of N elements, each being a 4D array (\code{ffdc[i,j,k,t]}) that contains the sequence of MRI images related to each of the N subjects in the sample.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at \code{t=0} (\code{m0=0} is the default value when no prior information is available). For the case of available prior information, \code{m0} can be defined as a \mjseqn{p\times q} matrix, where \mjseqn{p} is the number of columns in the covariates object and \mjseqn{q} is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at \code{t=0} (\code{Cova=100} is the default value when no prior information is available). For the case of available prior information, \code{Cova} can be defined as a \mjseqn{p\times p} matrix, where \code{p} is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between \code{0.85<delta<1}. \code{delta=1} will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure between pair of voxels within every cluster at \code{t=0}, \code{S0=1} is the default value when no prior information is available and defines an \mjseqn{q\times q} identity matrix. For the case of available prior information, \code{S0} can be defined as an \mjseqn{q \times q} matrix, where \mjseqn{q} is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix \code{S0} at \code{t=0} (\code{n1=1} is the default value when no prior information is available). For the case of available prior information, \code{n0} can be set as \code{n0=np}, where \code{np} is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (\code{2<N1<T}) from the ffdc array employed in the model fitting. \code{N1=NULL} (or equivalently \code{N1=T}) is its default value, taking all the images in the \code{ffdc} array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (\code{30<Cutpos1<T}).
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different \code{r1} values: \mjseqn{r1 = 0, 1, 2, 3, 4}, which leads to \mjseqn{q = 1, 7, 19, 27, 33}, where \mjseqn{q} is the size of the cluster.
#' @return a list containing a vector (\code{Evidence}) with the evidence measure of
#' activation for each of the \code{p} covariates considered in the model, the simulated
#' online trajectories related to the state parameter, the simulated BOLD responses,
#' and a measure to examine the goodness of fit of the model \mjseqn{(100 \ast |Y[i,j,k]_t - \hat{Y}[i,j,k]_t |/ \hat{Y}[i,j,k]_t )} for that particular voxel (\code{FitnessV}).
#' @examples
#'\dontrun{
#' # This example can take a long time to run.
#' DatabaseGroup <- get_example_fMRI_data_group()
#' data("covariates", package="BayesDLMfMRI")
#' resSingle <- GroupSingleVoxelFSTS(posi.ffd = c(14, 56, 40), DatabaseGroup,
#' covariates = Covariates, m0 = 0, Cova = 100,
#' delta = 0.95, S0 = 1, n0 = 1, N1 = FALSE,
#' Nsimu1 = 100, r1 = 1, Cutpos = 30)
#' }
#' @export
GroupSingleVoxelFSTS <- function(posi.ffd, DatabaseGroup, covariates, m0, Cova, delta, S0, n0, N1, Nsimu1, r1, Cutpos){
if(is.logical(N1)) {
if(N1==FALSE){N1 = dim(covariates)[1]}
}
.validate_input(
covariates=covariates,
delta=delta,
n0=n0,
N1=N1,
Nsimu1=Nsimu1,
r1=r1,
Cutpos1=Cutpos
)
covariates <- as.matrix(covariates)
if(r1 == 0){
posi <- .distanceNeighbors (posi.refer = posi.ffd, r1)
Ngroup <- length(DatabaseGroup)
#BOLD RESPONSE SERIES FOR A SPECIFIC CLUSTER
series.group = NULL
#system.time(
for(i in 1:Ngroup){
ffd.c <- DatabaseGroup[[i]]
series <- sapply(1:dim(posi)[1], function(ii){ffd.c[posi[ii,1], posi[ii,2], posi[ii,3], ]})
series.group <- rbind(series.group, series)
}
#case for single voxels
if(any(series.group==0)){return( NA )}else{
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
delta1<- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
res <- .Group_Functional_Equation(ffd1 = series.group, Cova = covariates, m0In = m0, c0In = Cova1, S0In = S0,
beta0In = Beta1, nt0In = n0, flag1 = 0, NIn = N1, NS = Ngroup, Nsimu = Nsimu1,
CUTpos = Cutpos)
attr(res, "class") <- "fMRI_group_single_voxel"
return( res )
}
}else{
Ngroup <- length(DatabaseGroup)
#THIS LINE RETURN THE POSITIONS OF EACH VOXEL INSIDE THE CLUSTER GIVEN THE DISTANCE r1
posi1 <- .distanceNeighbors (posi.refer = posi.ffd, r1)
aux.pos <- dim(DatabaseGroup[[1]])[1:3]
row_sub1 <- apply(posi1, 1, function(row, x1){0 < row & row<=x1}, x1=aux.pos)
posi <- posi1[apply(t(row_sub1), 1, sum)==3, ]
#BOLD RESPONSE SERIES FOR A SPECIFIC CLUSTER
series.group = NULL
for(i in 1:Ngroup){
ffd.c <- DatabaseGroup[[i]]
series <- sapply(1:dim(posi)[1], function(ii){ffd.c[posi[ii,1], posi[ii,2], posi[ii,3], ]})
series.group <- rbind(series.group, series)
}
if(any(series.group[,1]==0)){return( NA )}else{
flag <- any(series.group==0)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
res <- .Group_Functional_Equation(ffd1 = series.group, Cova = covariates, m0In = m0, c0In = Cova1, S0In = S0,
beta0In = Beta1, nt0In = n0, flag1 = flag, NIn = N1, NS = Ngroup, Nsimu = Nsimu1,
CUTpos = Cutpos)
attr(res, "class") <- "fMRI_group_single_voxel"
return( res )
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/GroupSingleVoxelFSTS.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
.Group_Functional_Backwards_Sampling <- function(ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, flag1, NIn, NS, Nsimu, CUTpos) {
.Call(`_BayesDLMfMRI_Group_Functional_Backwards_Sampling`, ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, flag1, NIn, NS, Nsimu, CUTpos)
}
.Gruop_FunctionalTestLT <- function(ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, flag1, NIn, NS, Nsimu, CUTpos) {
.Call(`_BayesDLMfMRI_Gruop_FunctionalTestLT`, ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, flag1, NIn, NS, Nsimu, CUTpos)
}
.Group_FunctionalMultiTest <- function(ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, flag1, NIn, NS, Nsimu, CUTpos) {
.Call(`_BayesDLMfMRI_Group_FunctionalMultiTest`, ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, flag1, NIn, NS, Nsimu, CUTpos)
}
.Group_Functional_Equation <- function(ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, flag1, NIn, NS, Nsimu, CUTpos) {
.Call(`_BayesDLMfMRI_Group_Functional_Equation`, ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, flag1, NIn, NS, Nsimu, CUTpos)
}
.Individual_Backwards_Sampling <- function(ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, NIn, Nsimu, CUTpos) {
.Call(`_BayesDLMfMRI_Individual_Backwards_Sampling`, ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, NIn, Nsimu, CUTpos)
}
.Individual_FunctionalTestLT <- function(ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, NIn, Nsimu, CUTpos) {
.Call(`_BayesDLMfMRI_Individual_FunctionalTestLT`, ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, NIn, Nsimu, CUTpos)
}
.Individual_FunctionalMultiTest <- function(ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, NIn, Nsimu, CUTpos) {
.Call(`_BayesDLMfMRI_Individual_FunctionalMultiTest`, ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, NIn, Nsimu, CUTpos)
}
.Individual_Functional_States <- function(ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, NIn, Nsimu, CUTpos) {
.Call(`_BayesDLMfMRI_Individual_Functional_States`, ffd1, Cova, m0In, c0In, S0In, beta0In, nt0In, NIn, Nsimu, CUTpos)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/RcppExports.R
|
#' @name SingleVoxelFETS
#' @title SingleVoxelFETS
#' @description
#' \loadmathjax
#' This function is used to perform an activation analysis for single voxels based on the FETS algorithm.
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' This function allows the development of an activation analysis for single voxels. A multivariate dynamic linear model is fitted to a cluster of voxels, with its center at location \code{(i,j,k)}, in the way it is presented in \insertCite{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}.
#' @param posi.ffd the position of the voxel in the brain image.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param ffdc a 4D array (\code{ffdc[i,j,k,t]}) that contains the sequence of MRI images that are meant to be analyzed. \code{(i,j,k)} define the position of the observed voxel at time t.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at \code{t=0} (\code{m=0} is the default value when no prior information is available). For the case of available prior information, \code{m0} can be defined as a \mjseqn{p\times q} matrix, where \mjseqn{p} is the number of columns in the covariates object and \mjseqn{q} is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at \code{t=0} (\code{Cova=100} is the default value when no prior information is available). For the case of available prior information, \code{Cova} can be defined as a \mjseqn{p \times p} matrix, where \mjseqn{p} is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between \code{0.85<delta<1}. \code{delta=1} will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure among voxels within every cluster at \code{t=0}. \code{S0=1} is the default value when no prior information is available and defines an \mjseqn{q\times q} identity matrix. For the case of available prior information, \code{S0} can be defined as an \mjseqn{q\times q} matrix, where \mjseqn{q} is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix \code{S0} at \code{t=0} (\code{n1=1} is the default value when no prior information is available). For the case of available prior information, \code{n0} can be set as \code{n0=np}, where \code{np} is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (\code{2<N1<T}) from the \code{ffdc} array employed in the model fitting. \code{N1=NULL} (or equivalently \code{N1=T}) is its default value, taking all the images in the \code{ffdc} array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos1 a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (\code{30<Cutpos1<T}).
#' @param Min.vol helps to define a threshold for the voxels considered in
#' the analysis. For example, \code{Min.vol = 0.10} means that all the voxels with values
#' below to \code{max(ffdc)*Min.vol} can be considered irrelevant and discarded from the analysis.
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different \code{r1} values: \mjseqn{r1 = 0, 1, 2, 3, 4}, which leads to \mjseqn{q = 1, 7, 19, 27, 33}, where \mjseqn{q} is the size of the cluster.
#' @param Test test type either \code{"LTT"} (Average cluster effect) or \code{"JointTest"} (Joint effect).
#' @return a list containing a vector (Evidence) with the evidence measure of
#' activation for each of the \code{p} covariates considered in the model, the simulated
#' online trajectories related to the state parameter, the simulated BOLD responses,
#' and a measure to examine the goodness of fit of the model \mjseqn{(100 \ast |Y[i,j,k]_t - \hat{Y}[i,j,k]_t |/ \hat{Y}[i,j,k]_t )} for that particular voxel (\code{FitnessV}).
#' @examples
#'\dontrun{
#' # This example can take a long time to run.
#' fMRI.data <- get_example_fMRI_data()
#' data("covariates", package="BayesDLMfMRI")
#' res.indi <- SingleVoxelFETS(posi.ffd = c(14, 56, 40),
#' covariates = Covariates,
#' ffdc = fMRI.data,
#' m0 = 0, Cova = 100, delta = 0.95, S0 = 1,
#' n0 = 1, Nsimu1 = 100, N1 = FALSE, Cutpos1 = 30,
#' Min.vol = 0.10, r1 = 1, Test = "LTT")
#'
#' }
#' @export
SingleVoxelFETS <- function(posi.ffd, covariates, ffdc, m0, Cova, delta,
S0, n0, N1, Nsimu1, Cutpos1,
Min.vol, r1, Test){
if(is.logical(N1)) {
if(N1==FALSE){N1 = dim(covariates)[1]}
}
.validate_input(
covariates=covariates,
ffdc = ffdc,
delta=delta,
n0=n0,
N1=N1,
Nsimu1=Nsimu1,
Cutpos1=Cutpos1,
Min.vol=Min.vol,
r1=r1,
Test=Test
)
if(r1 == 0){
posi <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
#BOLD RESPONSE SERIES IN THE CLUSTER RELATED TO posi
series.def <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series.def[,1]) < Min.vol){if(Test=="LTT"){return( rep(NA, dim(covariates)[2]) )}
if(Test=="JointTest"){return( list(EvidenceJoint = rep(NA, dim(covariates)[2]),
EvidenceMargin = rep(NA, dim(covariates)[2]) ) )}}else{
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
if(Test=="LTT"){
return(NA)
}
if(Test=="JointTest"){
res <- .Individual_FunctionalMultiTest(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
res <- list(EvidenceJoint = rep(NA, dim(covariates)[2]), EvidenceMargin = res$EvidenMarginal)
attr(res, "class") <- "fMRI_single_voxel"
return(res)
}
}
}else{
#THIS LINE RETURN THE POSITIONS OF EACH VOXEL INSIDE THE CLUSTER GIVEN THE DISTANCE r1
posi1 <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
aux.pos <- dim(ffdc)[1:3]
#GOING THROUGH EACH ROW AND CHECKING IF ANY POSITION IS OUTSIDE THE BOUNDS
row_sub1 = apply(posi1, 1, function(row, x1){0 < row & row<=x1}, x1=aux.pos)
posi <- posi1[apply(t(row_sub1), 1, sum)==3, ]
#BOLD RESPONSE SERIES FOR THE CLUSTER RELATED TO posi
series <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series[,1])<Min.vol){if(Test=="LTT"){return( rep(NA, dim(covariates)[2]) )}
if(Test=="JointTest"){return( list(EvidenceJoint = rep(NA, dim(covariates)[2]),
EvidenceMargin = rep(NA, dim(covariates)[2])) )}}else{
# IDENTIFYING AND REMOVING TEMPORAL SERIES INSIDE THE CLUSTER WITH ZERO VALUES
zero.series <- unique(which(series==0, arr.ind = TRUE)[,2])
if(length(zero.series)==0){series.def <- series}else{series.def <- series[,-(zero.series)]}
#CHECKING THE SIZE OF THE CLUSTER: q=1 or q>1
#is.vector(series.def)==TRUE THEN q=1 OTHERWISE q>1
if(is.vector(series.def)){
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
#PRIOR HYPERPARAMETERS FOR q1=1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))}else{
series.def <- apply(series.def, 2, function(x){(x-mean(x))/sd(x)})
#PRIOR HYPERPARAMETERS FOR q1>1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=dim(series.def)[2])
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
delta1 <- sqrt(delta)
#DISCOUNT FACTORS MATRIX
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
}
if(Test=="LTT"){
res <- .Individual_FunctionalTestLT (ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1, S0In = S01,
beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
#EVIDENCE OF ACTIVATION FOR A SINGLE VOXEL TAKING INTO ACCOUNT THE INFORMATION OF THE ENTIRE CLUSTER OF SIZE q
attr(res, "class") <- "fMRI_single_voxel"
return(res)
}
if(Test=="JointTest"){
res <- .Individual_FunctionalMultiTest(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
#EVIDENCE OF ACTIVATION FOR A SINGLE VOXEL TAKING INTO ACCOUNT THE INFORMATION OF THE ENTIRE CLUSTER OF SIZE q
attr(res, "class") <- "fMRI_single_voxel"
return(res)
}
}
}
}
#END FUNCTION
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/SingleVoxelFETS.R
|
#' @name SingleVoxelFFBS
#' @title SingleVoxelFFBS
#' @description
#' \loadmathjax
#' This function is used to perform an activation analysis for single voxels based on the FFBS algorithm.
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' This function allows the development of an activation analysis for single voxels. A multivariate dynamic linear model is fitted to a cluster of voxels, with its center at location (i,j,k), in the way it is presented in \insertCite{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}.
#' @param posi.ffd the position of the voxel in the brain image.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param ffdc a 4D array (\code{ffdc[i,j,k,t]}) that contains the sequence of MRI images that are meant to be analyzed. \code{(i,j,k)} define the position of the observed voxel at time t.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at \code{t=0} (\code{m=0} is the default value when no prior information is available). For the case of available prior information, \code{m0} can be defined as a \mjseqn{p\times q} matrix, where \mjseqn{p} is the number of columns in the covariates object and \mjseqn{q} is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at \code{t=0} (\code{Cova=100} is the default value when no prior information is available). For the case of available prior information, \code{Cova} can be defined as a \mjseqn{p\times p} matrix, where \mjseqn{p} is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between \code{0.85<delta<1}. delta=1 will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure among voxels within every cluster at \code{t=0}. \code{S0=1} is the default value when no prior information is available and defines an \mjseqn{q\times q} identity matrix. For the case of available prior information, \code{S0} can be defined as an \mjseqn{q\times q} matrix, where \mjseqn{q} is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix \code{S0} at \code{t=0} (\code{n1=1} is the default value when no prior information is available). For the case of available prior information, \code{n0} can be set as \code{n0=np}, where \code{np} is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (\code{2<N1<T}) from the \code{ffdc} array employed in the model fitting. \code{N1=NULL} (or equivalently \code{N1=T}) is its default value, taking all the images in the \code{ffdc} array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos1 a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (\code{30<Cutpos1<T}).
#' @param Min.vol helps to define a threshold for the voxels considered in
#' the analysis. For example, \code{Min.vol = 0.10} means that all the voxels with values
#' below to \code{max(ffdc)*Min.vol} can be considered irrelevant and discarded from the analysis.
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different \code{r1} values: \mjseqn{r1 = 0, 1, 2, 3, 4}, which leads to \mjseqn{q = 1, 7, 19, 27, 33}, where \mjseqn{q} is the size of the cluster.
#' @return a list containing a vector (Evidence) with the evidence measure of
#' activation for each of the \code{p} covariates considered in the model, the simulated
#' online trajectories related to the state parameter, the simulated BOLD responses,
#' and a measure to examine the goodness of fit of the model \mjseqn{(100 \ast |Y[i,j,k]_t - \hat{Y}[i,j,k]_t |/ \hat{Y}[i,j,k]_t )} for that particular voxel (\code{FitnessV}).
#' @examples
#'\dontrun{
#' # This example can take a long time to run.
#' fMRI.data <- get_example_fMRI_data()
#' data("covariates", package="BayesDLMfMRI")
#' res.indi <- SingleVoxelFFBS(posi.ffd = c(14, 56, 40),
#' covariates = Covariates,
#' ffdc = fMRI.data,
#' m0 = 0, Cova = 100, delta = 0.95, S0 = 1,
#' n0 = 1, Nsimu1 = 100, N1 = FALSE, Cutpos1 = 30,
#' Min.vol = 0.10, r1 = 1)
#'
#' }
#' @export
SingleVoxelFFBS <- function(posi.ffd, covariates, ffdc, m0, Cova, delta,
S0, n0, N1, Nsimu1, Cutpos1, Min.vol, r1){
if(is.logical(N1)) {
if(N1==FALSE){N1 = dim(covariates)[1]}
}
.validate_input(
covariates=covariates,
ffdc = ffdc,
delta=delta,
n0=n0,
N1=N1,
Nsimu1=Nsimu1,
Cutpos1=Cutpos1,
Min.vol=Min.vol,
r1=r1
)
if(r1 == 0){
posi <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
#BOLD RESPONSE SERIES IN THE CLUSTER RELATED TO posi
series.def <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series.def[,1]) < Min.vol){
return(list(EvidenceJoint = rep(NA, dim(covariates)[2]), EvidenceMargin = rep(NA, dim(covariates)[2]), EvidenLTT = rep(NA, dim(covariates)[2])))}else{
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
#PRIOR HYPERPARAMETERS FOR q1=1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
res <- .Individual_Backwards_Sampling(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
res <- list(EvidenceJoint = as.vector(res$Eviden_joint), EvidenceMargin = as.vector(res$Eviden_margin), EvidenLTT=as.vector(res$eviden_lt))
attr(res, "class") <- "fMRI_single_voxel"
return(res)
}
}else{
#THIS LINE RETURN THE POSITIONS OF EACH VOXEL INSIDE THE CLUSTER GIVEN THE DISTANCE r1
posi1 <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
aux.pos <- dim(ffdc)[1:3]
#GOING THROUGH EACH ROW AND CHECKING IF ANY POSITION IS OUTSIDE THE BOUNDS
row_sub1 <- apply(posi1, 1, function(row, x1){0 < row & row<=x1}, x1=aux.pos)
posi <- posi1[apply(t(row_sub1), 1, sum)==3, ]
#BOLD RESPONSE SERIES FOR THE CLUSTER RELATED TO posi
series <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series[,1]) < Min.vol){return(list(EvidenceJoint = rep(NA, dim(covariates)[2]), EvidenceMargin = rep(NA, dim(covariates)[2]), EvidenLTT = rep(NA, dim(covariates)[2])))}else{
# IDENTIFYING AND REMOVING TEMPORAL SERIES INSIDE THE CLUSTER WITH ZERO VALUES
zero.series <- unique(which(series==0, arr.ind = TRUE)[,2])
if(length(zero.series)==0){series.def <- series}else{series.def <- series[,-(zero.series)]}
#CHECKING THE SIZE OF THE CLUSTER: q=1 or q>1
#is.vector(series.def)==TRUE THEN q=1 OTHERWISE q>1
if(is.vector(series.def)){
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
#PRIOR HYPERPARAMETERS FOR q1=1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))}else{
series.def <- apply(series.def, 2, function(x){(x-mean(x))/sd(x)})
#PRIOR HYPERPARAMETERS FOR q1>1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=dim(series.def)[2])
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
delta1 <- sqrt(delta)
#DISCOUNT FACTORS MATRIX
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
}
res <- .Individual_Backwards_Sampling(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
attr(res, "class") <- "fMRI_single_voxel"
#EVIDENCE OF ACTIVATION FOR A SINGLE VOXEL TAKING INTO ACCOUNT THE INFORMATION OF THE ENTIRE CLUSTER OF SIZE q
return(res)
}
}
}
#END FUNCTION
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/SingleVoxelFFBS.R
|
#' @name SingleVoxelFSTS
#' @title SingleVoxelFSTS
#' @description
#' \loadmathjax
#' This function is used to perform an activation analysis for single voxels based on the FSTS algorithm.
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' This function allows the development of an activation analysis for single voxels. A multivariate dynamic linear model is fitted to a cluster of voxels, with its center at location \code{(i,j,k)}, in the way it is presented in \insertCite{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}.
#' @param posi.ffd the position of the voxel in the brain image.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param ffdc a 4D array (\code{ffdc[i,j,k,t]}) that contains the sequence of MRI images that are meant to be analyzed. \code{(i,j,k)} define the position of the voxel observed at time \code{t}.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at \code{t=0} (\code{m0=0} is the default value when no prior information is available). For the case of available prior information, \code{m0} can be defined as a \mjseqn{p\times q} matrix, where \mjseqn{p} is the number of columns in the covariates object and \mjseqn{q} is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at \code{t=0} (\code{Cova=100} is the default value when no prior information is available). For the case of available prior information, \code{Cova} can be defined as a \mjseqn{p \times p} matrix, where \mjseqn{p} is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between \code{0.85<delta<1}. \code{delta=1} will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure among voxels within every cluster at \code{t=0}. \code{S0=1} is the default value when no prior information is available and defines an \mjseqn{q \times q} identity matrix. For the case of available prior information, \code{S0} can be defined as an \mjseqn{q \times q} matrix, where \mjseqn{q} is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix \code{S0} at \code{t=0} (\code{n=1} is the default value when no prior information is available). For the case of available prior information, \code{n0} can be set as \code{n0=np}, where \code{np} is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (\code{2<N1<T}) from the \code{ffdc} array employed in the model fitting. \code{N1=NULL} (or equivalently \code{N1=T}) is its default value, taking all the images in the \code{ffdc} array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos1 a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (\code{30<Cutpos1<T}).
#' @param Min.vol helps to define a threshold for the voxels considered in
#' the analysis. For example, \code{Min.vol = 0.10} means that all the voxels with values
#' below to \code{max(ffdc)*Min.vol} can be considered irrelevant and discarded from the analysis.
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different \code{r1} values: \mjseqn{r1 = 0, 1, 2, 3, 4}, which leads to \mjseqn{q = 1, 7, 19, 27, 33}, where \mjseqn{q} is the size of the cluster.
#' @return a list containing a vector (Evidence) with the evidence measure of
#' activation for each of the \code{p} covariates considered in the model, the simulated
#' online trajectories related to the state parameter, the simulated BOLD responses,
#' and a measure to examine the goodness of fit of the model \mjseqn{(100 \ast |Y[i,j,k]_t - \hat{Y}[i,j,k]_t |/ \hat{Y}[i,j,k]_t )} for that particular voxel (\code{FitnessV}).
#' @examples
#'\dontrun{
#' # This example can take a long time to run.
#' fMRI.data <- get_example_fMRI_data()
#' data("covariates", package="BayesDLMfMRI")
#' res.indi <- SingleVoxelFSTS(posi.ffd = c(14, 56, 40),
#' covariates = Covariates,
#' ffdc = fMRI.data,
#' m0 = 0, Cova = 100, delta = 0.95, S0 = 1,
#' n0 = 1, Nsimu1 = 100, N1 = FALSE, Cutpos1 = 30,
#' Min.vol = 0.10, r1 = 1)
#'
#' }
#' @export
SingleVoxelFSTS <- function(posi.ffd, covariates, ffdc, m0,
Cova, delta, S0, n0, N1,
Nsimu1, Cutpos1, Min.vol, r1){
if(is.logical(N1)) {
if(N1==FALSE){N1 = dim(covariates)[1]}
}
.validate_input(
covariates=covariates,
ffdc = ffdc,
delta=delta,
n0=n0,
N1=N1,
Nsimu1=Nsimu1,
Cutpos1=Cutpos1,
Min.vol=Min.vol,
r1=r1
)
if(r1 == 0){
posi <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
#BOLD RESPONSE SERIES IN THE CLUSTER RELATED TO posi
series.def <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series.def[,1]) < Min.vol){
return(list(EvidenceJoint = rep(NA, dim(covariates)[2]), EvidenceMargin = rep(NA, dim(covariates)[2]), EvidenLTT = rep(NA, dim(covariates)[2])))}else{
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
#PRIOR HYPERPARAMETERS FOR q1=1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
res <- .Individual_Functional_States(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
res <- list(EvidenceJoint = as.vector(res$Eviden_joint), EvidenceMargin = as.vector(res$Eviden_margin), EvidenLTT=as.vector(res$eviden_lt))
attr(res, "class") <- "fMRI_single_voxel"
return(res)
}
}else{
#THIS LINE RETURN THE POSITIONS OF EACH VOXEL INSIDE THE CLUSTER GIVEN THE DISTANCE r1
posi1 <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
aux.pos <- dim(ffdc)[1:3]
#GOING THROUGH EACH ROW AND CHECKING IF ANY POSITION IS OUTSIDE THE BOUNDS
row_sub1 <- apply(posi1, 1, function(row, x1){0 < row & row<=x1}, x1=aux.pos)
posi <- posi1[apply(t(row_sub1), 1, sum)==3, ]
#BOLD RESPONSE SERIES FOR THE CLUSTER RELATED TO posi
series <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series[,1]) < Min.vol){return(list(EvidenceJoint = rep(NA, dim(covariates)[2]), EvidenceMargin = rep(NA, dim(covariates)[2]), EvidenLTT = rep(NA, dim(covariates)[2])))}else{
# IDENTIFYING AND REMOVING TEMPORAL SERIES INSIDE THE CLUSTER WITH ZERO VALUES
zero.series <- unique(which(series==0, arr.ind = TRUE)[,2])
if(length(zero.series)==0){series.def <- series}else{series.def <- series[,-(zero.series)]}
#CHECKING THE SIZE OF THE CLUSTER: q=1 or q>1
#is.vector(series.def)==TRUE THEN q=1 OTHERWISE q>1
if(is.vector(series.def)){
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
#PRIOR HYPERPARAMETERS FOR q1=1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))}else{
series.def <- apply(series.def, 2, function(x){(x-mean(x))/sd(x)})
#PRIOR HYPERPARAMETERS FOR q1>1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=dim(series.def)[2])
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
delta1 <- sqrt(delta)
#DISCOUNT FACTORS MATRIX
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
}
res <- .Individual_Functional_States(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
#EVIDENCE OF ACTIVATION FOR A SINGLE VOXEL TAKING INTO ACCOUNT THE INFORMATION OF THE ENTIRE CLUSTER OF SIZE q
attr(res, "class") <- "fMRI_single_voxel"
return(res)
}
}
}
#END FUNCTION
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/SingleVoxelFSTS.R
|
#' @name .distanceNeighbors
#' @title .distanceNeighbors
#' @description
#' this is an internal function
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' this is an internal function
#' @param posi.refer the position of the voxel in the brain image.
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different r values: r = 0, 1, 2, 3, 4, which leads to q = 1, 7, 19, 27, 33, where q is the size of the cluster.
#' @keywords internal
.distanceNeighbors <- function(posi.refer, r1){
if(r1==0){return(rbind(posi.refer))}
if(r1==1){
res1 <- matrix(rep(posi.refer,3), byrow = TRUE, ncol=3) + diag(1, nrow = 3)
res2 <- matrix(rep(posi.refer,3), byrow = TRUE, ncol=3) + diag(-1, nrow = 3)
return(rbind(posi.refer, res1, res2))
}
if(r1==2){
res1 <- matrix(rep(posi.refer,3), byrow = TRUE, ncol=3) + diag(1, nrow = 3)
res2 <- matrix(rep(posi.refer,3), byrow = TRUE, ncol=3) + diag(-1, nrow = 3)
res3 <- matrix(rep(posi.refer,4), byrow = TRUE, ncol=3) + cbind(rep(1, 4), c(1, 0, -1, 0), c(0, 1, 0, -1))
res4 <- matrix(rep(posi.refer,3), byrow = TRUE, ncol=3) + cbind(c(-1, 0, 0), rep(1, 3), c(0, 1, -1))
res5 <- matrix(rep(posi.refer,5), byrow = TRUE, ncol=3) + cbind(c(-1, -1, -1, 0, 0), c(0, -1, 0, -1, -1), c(1, 0, -1, 1, -1))
return(rbind(posi.refer, res1, res2, res3, res4, res5))
}
if(r1==3){
res1 <- matrix(rep(posi.refer,3), byrow = TRUE, ncol=3) + diag(1, nrow = 3)
res2 <- matrix(rep(posi.refer,3), byrow = TRUE, ncol=3) + diag(-1, nrow = 3)
res3 <- matrix(rep(posi.refer,4), byrow = TRUE, ncol=3) + cbind(rep(1, 4), c(1, 0, -1, 0), c(0, 1, 0, -1))
res4 <- matrix(rep(posi.refer,3), byrow = TRUE, ncol=3) + cbind(c(-1, 0, 0), rep(1, 3), c(0, 1, -1))
res5 <- matrix(rep(posi.refer,5), byrow = TRUE, ncol=3) + cbind(c(-1, -1, -1, 0, 0), c(0, -1, 0, -1, -1), c(1, 0, -1, 1, -1))
res6 <- matrix(rep(posi.refer,4), byrow = TRUE, ncol=3) + cbind(rep(1, 4), c(1, 1, -1, -1), c(1, -1, 1, -1))
res7 <- matrix(rep(posi.refer,4), byrow = TRUE, ncol=3) + cbind(rep(-1, 4), c(1, 1, -1, -1), c(1, -1, 1, -1))
return(rbind(posi.refer, res1, res2, res3, res4, res5, res6, res7))
}
if(r1==4){
res1 <- matrix(rep(posi.refer,3), byrow = TRUE, ncol=3) + diag(1, nrow = 3)
res2 <- matrix(rep(posi.refer,3), byrow = TRUE, ncol=3) + diag(-1, nrow = 3)
res3 <- matrix(rep(posi.refer,4), byrow = TRUE, ncol=3) + cbind(rep(1, 4), c(1, 0, -1, 0), c(0, 1, 0, -1))
res4 <- matrix(rep(posi.refer,3), byrow = TRUE, ncol=3) + cbind(c(-1, 0, 0), rep(1, 3), c(0, 1, -1))
res5 <- matrix(rep(posi.refer,5), byrow = TRUE, ncol=3) + cbind(c(-1, -1, -1, 0, 0), c(0, -1, 0, -1, -1), c(1, 0, -1, 1, -1))
res6 <- matrix(rep(posi.refer,4), byrow = TRUE, ncol=3) + cbind(rep(1, 4), c(1, 1, -1, -1), c(1, -1, 1, -1))
res7 <- matrix(rep(posi.refer,4), byrow = TRUE, ncol=3) + cbind(rep(-1, 4), c(1, 1, -1, -1), c(1, -1, 1, -1))
res8 <- matrix(rep(posi.refer,6), byrow = TRUE, ncol=3) + rbind(diag(2, 3), diag(-2, 3))
return(rbind(posi.refer, res1, res2, res3, res4, res5, res6, res7, res8))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/distanceNeighbors.R
|
#' @import pbapply
#' @importFrom Rcpp evalCpp
#' @name ffdEvidenceFETS
#' @title ffdEvidenceFETS
#' @description
#' \loadmathjax
#' This function can be used to build activation maps for task-based fMRI data.
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' Every voxel from the 4D array image is clustered with its nearest neighbors. There are as many clusters as voxels in the image. Then, activation maps are obtained by fitting a multivariate dynamic linear model on every cluster of voxels.
#' The resulting activation evidence measure for every voxel is obtained using the Forward Estimated Trajectories Sampler (FETS) algorithm. To deeply understand the method implemented in this package, a reading of \insertCite{CARDONAJIMENEZ2021107297}{BayesDLMfMRI} and \insertCite{cardona2021bayesdlmfmri}{BayesDLMfMRI} is mandatory.
#' @param ffdc a 4D array (\code{ffdc[i,j,k,t]}) that contains the sequence of MRI images that are meant to be analyzed. \code{(i,j,k)} define the position of the observed voxel at time t.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at \code{t=0} (\code{m=0} is the default value when no prior information is available). For the case of available prior information, \code{m0} can be defined as a \mjseqn{p\times q} matrix, where \mjseqn{p} is the number of columns in the covariates object and \mjseqn{q} is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at \code{t=0} (\code{Cova=100} is the default value when no prior information is available). For the case of available prior information, \code{Cova} can be defined as a \mjseqn{p\times p} matrix, where \mjseqn{p} is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between \code{0.85<delta<1}. \code{delta=1} will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure among voxels within every cluster at \code{t=0}. \code{S0=1} is the default value when no prior information is available and defines an \mjseqn{q\times q} identity matrix. For the case of available prior information, \code{S0} can be defined as an \mjseqn{q\times q} matrix, where \mjseqn{q} is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix \code{S0} at \code{t=0} (\code{n0=1} is the default value when no prior information is available). For the case of available prior information, \code{n0} can be set as \code{n0=np}, where \code{np} is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (\code{2<N1<T}) from the \code{ffdc} array employed in the model fitting. \code{N1=NULL} (or equivalently \code{N1=T}) is its default value, taking all the images in the \code{ffdc} array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos1 a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (\code{30<Cutpos1<T}).
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different \code{r1} values: \mjseqn{r1 = 0, 1, 2, 3, 4}, which leads to \mjseqn{q = 1, 7, 19, 27, 33}, where \mjseqn{q} is the size of the cluster.
#' @param perVol helps to define a threshold for the voxels considered in the analysis. For example, \code{Min.vol = 0.10} means that all the voxels with values
#' below to \code{max(ffdc)*perVol} can be considered irrelevant and discarded from the analysis.
#' @param Test test type either \code{"LTT"} (Average cluster effect) or \code{"JointTest"} (Joint effect).
#' @param Ncores a positive integer indicating the number of threads or cores to be used in the computation of the activation maps.
#' @param seed random seed.
#' @return It returns a list of the type \code{res[[p]][x,y,z]}, where \code{p} represents the column position in
#' the covariates matrix and \code{[x,y,z]} represent the voxel position in the brain image.
#' @examples
#'\dontrun{
#' fMRI.data <- get_example_fMRI_data()
#' data("covariates", package="BayesDLMfMRI")
#' res <- ffdEvidenceFETS(ffdc = fMRI.data, covariates = Covariates,
#' m0 = 0, Cova = 100, delta = 0.95, S0 = 1,
#' n0 = 1, Nsimu1 = 100, Cutpos1 = 30,
#' r1 = 2, Test = "JointTest", Ncores = 1)
#' str(res)
#' }
#' @export
ffdEvidenceFETS = function(ffdc, covariates, m0=0, Cova=100,
delta=0.95, S0=1, n0=1, N1=FALSE,
Nsimu1 = 100, Cutpos1=30, r1 = 1,
perVol = 0.10, Test = "LTT", Ncores = NULL, seed=NULL){
if(is.logical(N1)) {
if(N1==FALSE){N1=dim(ffdc)[4]}
}
# validation
Ncores <- .get_n_cores(Ncores)
.validate_input(
N1=N1, Test=Test,
ffdc=ffdc, covariates=covariates,
n0=n0, Nsimu1=Nsimu1, perVol=perVol,
Cutpos1=Cutpos1, r1=r1, delta=delta
)
#TAKING THE POSITIONS FROM THE 4D IMAGE WITH NON-NULL VALUES
posiffd1 <- which(ffdc[,,,1] != 0, arr.ind = TRUE)
if(Test == "LTT"){
#COMPUTING THE EVIDENCE FOR BRAIN ACTIVATION: VOXEL-WISE ANALYSIS
set.seed(seed)
ffd.out = pbapply::pbapply(posiffd1, 1, .ffdSingleVoxelFETS, covariates, ffdc, m0, Cova,
delta, S0, n0, N1, Nsimu1, Cutpos1, Min.vol = perVol*max(ffdc), r1, Test, cl = Ncores)
vol.evidence <- list()
for(k in 1:(dim(covariates)[2])){
vol.evidence[[k]] <- array(0, dim(ffdc)[1:3])
}
for(j in 1:dim(covariates)[2]){
for(ii in 1:dim(posiffd1)[1]){
vol.evidence[[j]][posiffd1[ii,1], posiffd1[ii,2], posiffd1[ii,3]] <- ffd.out[j, ii]
}
}
attr(vol.evidence, "class") <- "fMRI_single_evidence"
return(vol.evidence)
}
if(Test == "JointTest"){
#COMPUTING THE EVIDENCE FOR BRAIN ACTIVATION: VOXEL-WISE ANALYSIS
set.seed(seed)
ffd.out = pbapply::pbapply(posiffd1, 1, .ffdSingleVoxelFETS, covariates, ffdc, m0, Cova,
delta, S0, n0, N1, Nsimu1, Cutpos1, Min.vol = perVol*max(ffdc), r1, Test, cl = Ncores)
#number of tests from the output of ffdIndividualVoxelLTT (Joint and marginal)
Ntest <- 2
pAux <- dim(covariates)[2]
vol.evidence <- list()
for(k in 1:(dim(covariates)[2]*Ntest)){
vol.evidence[[k]] <- array(0, dim(ffdc)[1:3])
}
for(j in 1:dim(covariates)[2]){
for(ii in 1:dim(posiffd1)[1]){
vol.evidence[[j]][posiffd1[ii,1], posiffd1[ii,2], posiffd1[ii,3]] <- ffd.out[[ii]]$EvidenceJoint[j]
vol.evidence[[Ntest+j]][posiffd1[ii,1], posiffd1[ii,2], posiffd1[ii,3]] <- ffd.out[[ii]]$EvidenceMargin[j]
}
}
attr(vol.evidence, "class") <- "fMRI_single_evidence"
return(vol.evidence)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/ffdEvidenceFETS.R
|
#' @import pbapply
#' @name ffdEvidenceFFBS
#' @title ffdEvidenceFFBS
#' @description
#' \loadmathjax
#' This function can be used to build activation maps for task-based fMRI data.
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' Every voxel from the 4D array image is clustered with its nearest neighbors. There are as many clusters as voxels in the image. Then, activation maps are obtained by fitting a multivariate dynamic linear model on every cluster of voxels. The resulting activation evidence measure for every voxel is obtained by using the Forward filtering backward sampling (FFBS) algorithm. To deeply understand the method implemented in this package, a reading of \insertCite{CARDONAJIMENEZ2021107297}{BayesDLMfMRI} and \insertCite{cardona2021bayesdlmfmri}{BayesDLMfMRI} is mandatory.
#' @param ffdc a 4D array (\code{ffdc[i,j,k,t]}) that contains the sequence of MRI images that are meant to be analyzed. \code{(i,j,k)} define the position of the voxel observed at time \code{t}.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at \code{t=0} (\code{m=0} is the default value when no prior information is available). For the case of available prior information, \code{m0} can be defined as a \mjseqn{p\times q} matrix, where \mjseqn{p} is the number of columns in the covariates object and \mjseqn{q} is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at \code{t=0} (\code{Cova=100} is the default value when no prior information is available). For the case of available prior information, \code{Cova} can be defined as a \mjseqn{p\times p} matrix, where \mjseqn{p} is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between \code{0.85<delta<1}. \code{delta=1} will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure among voxels within every cluster at \code{t=0}. \code{S0=1} is the default value when no prior information is available and defines an \mjseqn{q\times q} identity matrix. For the case of available prior information, \code{S0} can be defined as an \mjseqn{q\times q} matrix, where \mjseqn{q} is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix \code{S0} at \code{t=0} (\code{n0=1} is the default value when no prior information is available). For the case of available prior information, \code{n0} can be set as \code{n0=np}, where \code{np} is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (\code{2<N1<T}) from the \code{ffdc} array employed in the model fitting. \code{N1=NULL} (or equivalently \code{N1=T}) is its default value, taking all the images in the \code{ffdc} array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos1 a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (\code{30<Cutpos1<T}).
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different \code{r1} values: \mjseqn{r1 = 0, 1, 2, 3, 4}, which leads to \mjseqn{q = 1, 7, 19, 27, 33}, where \mjseqn{q} is the size of the cluster.
#' @param perVol helps to define a threshold for the voxels considered in the analysis. For example, \code{Min.vol = 0.10} means that all the voxels with values
#' below to \code{max(ffdc)*perVol} can be considered irrelevant and discarded from the analysis.
#' @param Ncores a postive integer indicating the number of threads or cores to be used in the computation of the activation maps.
#' @param seed random seed.
#' @return It returns a list of the form \code{res[[k]][p,x,y,z]}, where \code{k} defines the type of test (\code{k = 1} for \code{"Marginal"}, \code{k = 2} for \code{"JointTest"}, and \code{k = 3} for \code{"LTT"}), \code{p} represents the column position in the covariates matrix and \code{x,y,z} represent the voxel position in the brain image.
#' @examples
#'\dontrun{
#' fMRI.data <- get_example_fMRI_data()
#' data("covariates", package="BayesDLMfMRI")
#' res <- ffdEvidenceFFBS(ffdc = fMRI.data, covariates = Covariates, m0=0, Cova=100,
#' delta=0.95, S0=1, n0=1, N1=FALSE,
#' Nsimu1 = 100, Cutpos1=30, r1 = 1,
#' perVol = 0.10, Ncores=3)
#' str(res)
#' }
#' @export
ffdEvidenceFFBS = function(ffdc, covariates, m0=0, Cova=100,
delta=0.95, S0=1, n0=1, N1=FALSE,
Nsimu1 = 100, Cutpos1=30, r1 = 1,
perVol = 0.10, Ncores = NULL, seed=NULL){
if(is.logical(N1)) {
if(N1==FALSE){N1=dim(ffdc)[4]}
}
# validation
Ncores <- .get_n_cores(Ncores)
.validate_input(
ffdc=ffdc,
covariates=covariates,
delta=delta,
n0 =n0,
N1 = N1,
Nsimu1 = Nsimu1,
Cutpos1=Cutpos1,
r1=r1,
perVol=perVol
)
#TAKING THE POSITIONS FROM THE 4D IMAGE WITH NON-NULL VALUES
posiffd1 <- which(ffdc[,,,1] != 0, arr.ind = TRUE)
#COMPUTING THE EVIDENCE FOR BRAIN ACTIVATION: VOXEL-WISE ANALYSIS
set.seed(seed)
ffd.out = pbapply::pbapply(posiffd1, 1, .ffdsingleVoxelFFBS, covariates, ffdc, m0, Cova,
delta, S0, n0, N1, Nsimu1, Cutpos1, Min.vol = perVol*max(ffdc), r1, cl = Ncores)
#number of tests from the output of .ffdsingleVoxelFFBS (Joint, marginal and LTT)
Ntest <- 3
vol.evidence <- list()
for(k in 1:(Ntest)){
vol.evidence[[k]] <- array(0, c(dim(covariates)[2], dim(ffdc)[1:3]))
}
for(ii in 1:dim(posiffd1)[1]){
vol.evidence[[1]][ ,posiffd1[ii,1], posiffd1[ii,2], posiffd1[ii,3]] <- ffd.out[[ii]]$EvidenceJoint
vol.evidence[[2]][ ,posiffd1[ii,1], posiffd1[ii,2], posiffd1[ii,3]] <- ffd.out[[ii]]$EvidenceMargin
vol.evidence[[3]][ ,posiffd1[ii,1], posiffd1[ii,2], posiffd1[ii,3]] <- ffd.out[[ii]]$EvidenLTT
}
attr(vol.evidence, "class") <- "fMRI_single_evidence"
return(vol.evidence)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/ffdEvidenceFFBS.R
|
#' @import pbapply
#' @importFrom Rdpack reprompt
#' @name ffdEvidenceFSTS
#' @title ffdEvidenceFSTS
#' @description
#' \loadmathjax
#' This function can be used to build activation maps for task-based fMRI data.
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' Every voxel from the 4D array image is clustered with its nearest neighbors. There are as many clusters as voxels in the image. Then, activation maps are obtained by fitting a multivariate dynamic linear model on every cluster of voxels. The resulting activation evidence measure for every voxel is obtained by using the Forward State Trajectories Sampler (FSTS) algorithm. To deeply understand the method implemented in this package, a reading of \insertCite{CARDONAJIMENEZ2021107297}{BayesDLMfMRI} and \insertCite{cardona2021bayesdlmfmri}{BayesDLMfMRI} is mandatory.
#' @param ffdc a 4D array (\code{ffdc[i,j,k,t]}) that contains the sequence of MRI images that are meant to be analyzed. \code{(i,j,k)} define the position of the voxel observed at time \code{t}.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at \code{t=0} (\code{m=0} is the default value when no prior information is available). For the case of available prior information, \code{m0} can be defined as a \mjseqn{p\times q} matrix, where \mjseqn{p} is the number of columns in the covariates object and \mjseqn{q} is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at \code{t=0} (\code{Cova=100} is the default value when no prior information is available). For the case of available prior information, \code{Cova} can be defined as a \mjseqn{p\times p} matrix, where \code{p} is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between \code{0.85<delta<1}. \code{delta=1} will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure among voxels within every cluster at \code{t=0}. \code{S0=1} is the default value when no prior information is available and defines an \mjseqn{q\times q} identity matrix. For the case of available prior information, \code{S0} can be defined as an \mjseqn{q\times q} matrix, where \mjseqn{q} is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix \code{S0} at \code{t=0} (\code{n0=1} is the default value when no prior information is available). For the case of available prior information, \code{n0} can be set as \code{n0=np}, where \code{np} is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (\code{2<N1<T}) from the \code{ffdc} array employed in the model fitting. \code{N1=NULL} (or equivalently \code{N1=T}) is its default value, taking all the images in the \code{ffdc} array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos1 a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (\code{30<Cutpos1<T}).
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different \code{r1} values: \mjseqn{r1 = 0, 1, 2, 3, 4}, which leads to \mjseqn{q = 1, 7, 19, 27, 33}, where \mjseqn{q} is the size of the cluster.
#' @param perVol helps to define a threshold for the voxels considered in the analysis. For example, \code{Min.vol = 0.10} means that all the voxels with values
#' below to \code{max(ffdc)*perVol} can be considered irrelevant and discarded from the analysis.
#' @param Ncores a positive integer indicating the number of threads or cores to be used in the computation of the activation maps.
#' @param seed random seed.
#' @return It returns a list of the form \code{[[k]][p,x,y,z]}, where k defines the type of test (\code{k = 1} for \code{"Marginal"}, \code{k = 2} for \code{"JointTest"}, and \code{k = 3} for \code{"LTT"}), \code{p} represents the column position in the covariates matrix and \code{x,y,z} represent the voxel position in the brain image.
#' @examples
#'\dontrun{
#' fMRI.data <- get_example_fMRI_data()
#' data("covariates", package="BayesDLMfMRI")
#' res <- ffdEvidenceFSTS(ffdc = fMRI.data, covariates = Covariates, m0=0, Cova=100,
#' delta=0.95, S0=1, n0=1, N1=FALSE,
#' Nsimu1 = 100, Cutpos1=30, r1 = 1,
#' perVol = 0.10, Ncores=3)
#' str(res)
#' }
#' @export
ffdEvidenceFSTS = function(ffdc, covariates, m0=0, Cova=100,
delta=0.95, S0=1, n0=1, N1=FALSE,
Nsimu1 = 100, Cutpos1=30, r1 = 1,
perVol = 0.10, Ncores = NULL, seed=NULL){
if(is.logical(N1)) {
if(N1==FALSE){N1=dim(ffdc)[4]}
}
# validation
Ncores <- .get_n_cores(Ncores)
.validate_input(
ffdc=ffdc,
covariates=covariates,
delta=delta,
n0=n0,
N1=N1,
Nsimu1=Nsimu1,
Cutpos1=Cutpos1,
r1=r1,
perVol=perVol
)
#TAKING THE POSITIONS FROM THE 4D IMAGE WITH NON-NULL VALUES
posiffd1 <- which(ffdc[,,,1] != 0, arr.ind = TRUE)
#COMPUTING THE EVIDENCE FOR BRAIN ACTIVATION: VOXEL-WISE ANALYSIS
set.seed(seed)
ffd.out = pbapply::pbapply(posiffd1, 1, .ffdSingleVoxelFSTS, covariates, ffdc, m0, Cova,
delta, S0, n0, N1, Nsimu1, Cutpos1, Min.vol = perVol*max(ffdc), r1, cl = Ncores)
#number of tests from the output of .ffdsingleVoxelFFBS (Joint, marginal and LTT)
Ntest <- 3
vol.evidence <- list()
for(k in 1:(Ntest)){
vol.evidence[[k]] <- array(0, c(dim(covariates)[2], dim(ffdc)[1:3]))
}
for(ii in 1:dim(posiffd1)[1]){
vol.evidence[[1]][ ,posiffd1[ii,1], posiffd1[ii,2], posiffd1[ii,3]] <- ffd.out[[ii]]$EvidenceJoint
vol.evidence[[2]][ ,posiffd1[ii,1], posiffd1[ii,2], posiffd1[ii,3]] <- ffd.out[[ii]]$EvidenceMargin
vol.evidence[[3]][ ,posiffd1[ii,1], posiffd1[ii,2], posiffd1[ii,3]] <- ffd.out[[ii]]$EvidenLTT
}
attr(vol.evidence, "class") <- "fMRI_single_evidence"
return(vol.evidence)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/ffdEvidenceFSTS.R
|
#' @import pbapply
#' @name ffdGroupEvidenceFETS
#' @title ffdGroupEvidenceFETS
#' @description
#' \loadmathjax
#' This function can be used to build activation maps for group task-based fMRI data.
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' A multivariate dynamic linear model is fitted in the same fashion as at the individual level for every subject in the sample.
#' However, at this stage, the posterior distributions from all the subjects are combined to build a single one,
#' which is then employed to compute the activation evidence maps for the group using Forward estimated trajectories sampler (FETS) algorithm. To deeply understand the method implemented in this package, a reading of \insertCite{CARDONAJIMENEZ2021107297}{BayesDLMfMRI} and \insertCite{cardona2021bayesdlmfmri}{BayesDLMfMRI} is mandatory.
#' @param ffdGroup list of N elements, each being a 4D array (\code{ffdc[i,j,k,t]}) that contains the sequence of MRI images related to each of the N subjects in the sample.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at \code{t=0} (\code{m=0} is the default value when no prior information is available). For the case of available prior information, \code{m0} can be defined as a \mjseqn{p\times q} matrix, where \mjseqn{p} is the number of columns in the covariates object and \mjseqn{q} is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at \code{t=0} (\code{Cova=100} is the default value when no prior information is available). For the case of available prior information, \code{Cova} can be defined as a pXp matrix, where p is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between \code{0.85<delta<1}. \code{delta=1} will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure between pair of voxels within every cluster at \code{t=0}. \code{S0=1} is the default value when no prior information is available and defines an \mjseqn{q\times q} identity matrix. For the case of available prior information, \code{S0} can be defined as an \mjseqn{q \times q} matrix, where \mjseqn{q} is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix \code{S0} at \mjseqn{t=0} (\code{n1=1} is the default value when no prior information is available). For the case of available prior information, \code{n0} can be set as \code{n0=np}, where \code{np} is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (\code{2<N1<T}) from the \code{ffdc} array employed in the model fitting. \code{N1=NULL} (or equivalently \code{N1=T}) is its default value, taking all the images in the \code{ffdc} array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (\code{30<Cutpos1<T}).
#' @param r1 positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different \code{r1} values: \mjseqn{r1 = 0, 1, 2, 3, 4}, which leads to \mjseqn{q = 1, 7, 19, 27, 33}, where \mjseqn{q} is the size of the cluster.
#' @param Test test type either \code{"LTT"} (Average cluster effect) or \code{"JointTest"} (Joint effect).
#' @param mask a 3D array that works as a brain of reference (MNI atlas) for the group analysis.
#' @param Ncores a positive integer indicating the number of threads or cores to be used in the computation of the activation maps.
#' @return It returns a list of \mjseqn{2 \times p} elements, where \mjseqn{p} is the number of covariates, and 2 is the number
#' of options evaluated as sampler distributions: Average cluster effect and Marginal effect (when \code{Test=="LTT"}) or Joint effect and Marginal effect (when \code{Test=="JointTest"}). The first \code{p} elements from the list are
#' the activation maps related to each column of the covariates matrix respectively when computing the activation evidence using either
#' \code{Test=="LTT"} or \code{Test=="JointTest"}. The remaining activation maps are those associated with the marginal distribution.
#' @examples
#'\dontrun{
#' DatabaseGroup <- get_example_fMRI_data_group()
#' data("covariates", package="BayesDLMfMRI")
#' data("mask", package="BayesDLMfMRI")
#' res <- ffdGroupEvidenceFETS(ffdGroup = DatabaseGroup, covariates = Covariates,
#' m0 = 0, Cova = 100, delta = 0.95, S0 = 1,
#' n0 = 1, N1 = FALSE, Nsimu1 = 100, Cutpos=30,
#' r1 = 1, Test = "JointTest", mask = mask, Ncores = 7)
#' str(res)
#' }
#' @export
ffdGroupEvidenceFETS <- function(ffdGroup, covariates, m0=0, Cova=100,
delta = 0.95, S0 = 1, n0 = 1,
N1 = FALSE, Nsimu1=100,
Cutpos=30, r1, Test, mask, Ncores = NULL){
if(is.logical(N1)) {
if(N1==FALSE){N1 = dim(covariates)[1]}
}
# validation
Ncores <- .get_n_cores(Ncores)
.validate_input(
ffdGroup=ffdGroup,
covariates=covariates,
delta=delta,
n0=n0,
N1=N1,
Nsimu1=Nsimu1,
Cutpos1=Cutpos,
r1=r1,
Test=Test
)
covariates <- as.matrix(covariates)
#TAKING THE POSITIONS FROM THE 4D IMAGE WITH NON-NULL VALUES
posiffd <- which(mask[,,] != 0, arr.ind = TRUE)
if(Test == "LTT"){
ffd.out <- pbapply::pbapply(posiffd, 1, .ffdGroupVoxelFETS, ffdGroup, covariates, m0, Cova,
delta, S0, n0, N1, Nsimu1, r1, Test, Cutpos, cl = Ncores)
vol.evidence <- list()
for(k in 1:(dim(covariates)[2])){
vol.evidence[[k]] <- array(0, dim(mask))
}
for(j in 1:(dim(covariates)[2])){
for(i in 1:dim(posiffd)[1]){
vol.evidence[[j]][posiffd[i,1], posiffd[i,2], posiffd[i,3]] <- ffd.out[j, i]
}
}
attr(vol.evidence, "class") <- "fMRI_group_evidence"
return(vol.evidence)
}
if(Test == "JointTest"){
ffd.out = pbapply::pbapply(posiffd, 1, .ffdGroupVoxelFETS, ffdGroup, covariates, m0, Cova,
delta, S0, n0, N1, Nsimu1, r1, Test, Cutpos, cl = Ncores)
Ntest <- 2
pAux <- dim(covariates)[2]
vol.evidence <- list()
for(k in 1:(dim(covariates)[2]*Ntest)){
vol.evidence[[k]] <- array(0, dim(mask)[1:3])
}
for(j in 1:dim(covariates)[2]){
for(ii in 1:dim(posiffd)[1]){
vol.evidence[[j]][posiffd[ii,1], posiffd[ii,2], posiffd[ii,3]] <- ffd.out[[ii]]$EvidenceJoint[j]
vol.evidence[[Ntest+j]][posiffd[ii,1], posiffd[ii,2], posiffd[ii,3]] <- ffd.out[[ii]]$EvidenceMargin[j]
}
}
attr(vol.evidence, "class") <- "fMRI_group_evidence"
return(vol.evidence)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/ffdGroupEvidenceFETS.R
|
#' @import pbapply
#' @name ffdGroupEvidenceFFBS
#' @title ffdGroupEvidenceFFBS
#' @description
#' \loadmathjax
#' This function can be used to build activation maps for group task-based fMRI data.
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' A multivariate dynamic linear model is fitted in the same fashion as at the individual level for every subject in the sample.
#' However, at this stage, the posterior distributions from all the subjects are combined to build a single one,
#' which is then employed to compute the activation evidence maps for the group using the Forward Filtering Backward Sampling (FFBS) algorithm. To deeply understand the method implemented in this package, a reading of \insertCite{CARDONAJIMENEZ2021107297}{BayesDLMfMRI} and \insertCite{cardona2021bayesdlmfmri}{BayesDLMfMRI} is mandatory.
#' @param ffdGroup list of N elements, each being a 4D array (\code{ffdc[i,j,k,t]}) that contains the sequence of MRI images related to each of the N subjects in the sample.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at \code{t=0} (\code{m=0} is the default value when no prior information is available). For the case of available prior information, \code{m0} can be defined as a \mjseqn{p\times q} matrix, where \mjseqn{p} is the number of columns in the covariates object and \mjseqn{q} is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at \code{t=0} (\code{Cova=100} is the default value when no prior information is available). For the case of available prior information, \code{Cova} can be defined as a \mjseqn{p\times p} matrix, where \mjseqn{p} is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between \code{0.85<delta<1}. \code{delta=1} will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure between pair of voxels within every cluster at \code{t=0}. \code{S0=1} is the default value when no prior information is available and defines an \mjseqn{q\times q} identity matrix. For the case of available prior information, \code{S0} can be defined as an \mjseqn{q\times q} matrix, where \mjseqn{q} is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix \code{S0} at \mjseqn{t=0} (\code{n1=1} is the default value when no prior information is available). For the case of available prior information, \code{n0} can be set as \code{n0=np}, where \code{np} is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (\code{2<N1<T}) from the \code{ffdc} array employed in the model fitting. \code{N1=NULL} (or equivalently \code{N1=T}) is its default value, taking all the images in the \code{ffdc} array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (\code{30<Cutpos1<T}).
#' @param r1 positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different \code{r1} values: \mjseqn{r1 = 0, 1, 2, 3, 4}, which leads to \mjseqn{q = 1, 7, 19, 27, 33}, where \mjseqn{q} is the size of the cluster.
#' @param mask a 3D array that works as a brain of reference (MNI atlas) for the group analysis.
#' @param Ncores a postive integer indicating the number of threads or cores to be used in the computation of the activation maps.
#' @return It returns a list of the form \code{[[k]][p,x,y,z]}, where k defines the type of test
#' (\code{k = 1} for Marginal effect, \code{k = 2} for Joint effect, and \code{k = 3} for Average cluster effect), \code{p} represents the column
#' position in the covariates matrix and \code{x,y,z} represent the voxel position in the brain image.
#' @examples
#'\dontrun{
#' # This example can take a long time to run.
#' DatabaseGroup <- get_example_fMRI_data_group()
#' data("covariates", package="BayesDLMfMRI")
#' res <- ffdGroupEvidenceFFBS(ffdGroup = DatabaseGroup, covariates = Covariates,
#' m0=0, Cova=100, delta = 0.95,
#' S0 = 1, n0 = 1, N1 = FALSE, Nsimu1 = 100,
#' Cutpos = 30, r1 = 1, mask = MASK, Ncores = 7)
#' str(res)
#' }
#' @export
ffdGroupEvidenceFFBS <- function(ffdGroup, covariates, m0=0, Cova=100,
delta = 0.95, S0 = 1, n0 = 1,
N1 = FALSE, Nsimu1=100,
Cutpos=30, r1, mask, Ncores = NULL){
if(is.logical(N1)) {
if(N1==FALSE){N1 = dim(covariates)[1]}
}
# validation
Ncores <- .get_n_cores(Ncores)
.validate_input(
ffdGroup=ffdGroup,
covariates=covariates,
delta=delta,
n0=n0,
N1=N1,
Nsimu1=Nsimu1,
Cutpos1=Cutpos,
r1=r1
)
covariates <- as.matrix(covariates)
#TAKING THE POSITIONS FROM THE 4D IMAGE WITH NON-NULL VALUES
posiffd <- which(mask[,,] != 0, arr.ind = TRUE)
ffd.out <- pbapply::pbapply(posiffd, 1, .ffdGroupVoxelFFBS, ffdGroup, covariates, m0, Cova,
delta, S0, n0, N1, Nsimu1, r1, Cutpos, cl = Ncores)
# TODO parece que la variable no es requerida
#number of tests from the output of .ffdsingleVoxelFFBS (Joint, marginal and LTT)
Ntest <- 3
vol.evidence <- list()
for(k in 1:(Ntest)){
vol.evidence[[k]] <- array(0, c(dim(covariates)[2], dim(mask)[1:3]))
}
for(ii in 1:dim(posiffd)[1]){
vol.evidence[[1]][ ,posiffd[ii,1], posiffd[ii,2], posiffd[ii,3]] <- ffd.out[[ii]]$EvidenceJoint
vol.evidence[[2]][ ,posiffd[ii,1], posiffd[ii,2], posiffd[ii,3]] <- ffd.out[[ii]]$EvidenceMargin
vol.evidence[[3]][ ,posiffd[ii,1], posiffd[ii,2], posiffd[ii,3]] <- ffd.out[[ii]]$EvidenLTT
}
attr(vol.evidence, "class") <- "fMRI_group_evidence"
return(vol.evidence)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/ffdGroupEvidenceFFBS.R
|
#' @import pbapply
#' @name ffdGroupEvidenceFSTS
#' @title ffdGroupEvidenceFSTS
#' @description
#' \loadmathjax
#' This function can be used to build activation maps for group task-based fMRI data.
#' @details
#' A multivariate dynamic linear model is fitted in the same fashion as at the individual level for every subject in the sample.
#' However, at this stage, the posterior distributions from all the subjects are combined to build a single one,
#' which is then employed to compute the activation evidence maps for the group using the Forward State Trajectories Sampler (FSTS) algorithm To deeply understand the method implemented in this package, a reading of \insertCite{CARDONAJIMENEZ2021107297}{BayesDLMfMRI} and \insertCite{cardona2021bayesdlmfmri}{BayesDLMfMRI} is mandatory.
#' @param ffdGroup list of N elements, each being a 4D array (\code{ffdc[i,j,k,t]}) that contains the sequence of MRI images related to each of the N subjects in the sample.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at \code{t=0} (\code{m=0} is the default value when no prior information is available). For the case of available prior information, \code{m0} can be defined as a \mjseqn{p\times q} matrix, where \mjseqn{p} is the number of columns in the covariates object and \mjseqn{q} is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at \code{t=0} (\code{Cova=100} is the default value when no prior information is available). For the case of available prior information, \code{Cova} can be defined as a \mjseqn{p\times p} matrix, where \mjseqn{p} is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between \code{0.85<delta<1}. \code{delta=1} will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure between pair of voxels within every cluster at \code{t=0}. \code{S0=1} is the default value when no prior information is available and defines an \mjseqn{q\times q} identity matrix. For the case of available prior information, \code{S0} can be defined as an \mjseqn{q \times q} matrix, where \mjseqn{q} is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix \code{S0} at \code{t=0} (\code{n1=1} is the default value when no prior information is available). For the case of available prior information, \code{n0} can be set as \code{n0=np}, where \code{np} is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (\code{2<N1<T}) from the \code{ffdc} array employed in the model fitting. \code{N1=NULL} (or equivalently \code{N1=T}) is its default value, taking all the images in the \code{ffdc} array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (\code{30<Cutpos1<T}).
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different \code{r1} values: \mjseqn{r1 = 0, 1, 2, 3, 4}, which leads to \mjseqn{q = 1, 7, 19, 27, 33}, where \mjseqn{q} is the size of the cluster.
#' @param mask 3D array that works as a brain of reference (MNI atlas) for the group analysis.
#' @param Ncores a postive integer indicating the number of threads or cores to be used in the computation of the activation maps.
#' @return It returns a list of the form \code{[[k]][p,x,y,z]}, where \code{k} defines the type of test
#' (\code{k = 1} for Marginal effect, \code{k = 2} for Joint effect, and \code{k = 3} for Average cluster effect), \code{p} represents the column
#' position in the covariates matrix and \code{x,y,z} represent the voxel position in the brain image.
#' @examples
#'\dontrun{
#' # This example can take a long time to run.
#' DatabaseGroup <- get_example_fMRI_data_group()
#' data("covariates", package="BayesDLMfMRI")
#' res <- ffdGroupEvidenceFSTS(ffdGroup = DatabaseGroup, covariates = Covariates,
#' m0 = 0, Cova = 100, delta = 0.95, S0 = 1,
#' n0 = 1, N1 = FALSE, Nsimu1 = 100, Cutpos=30,
#' r1 = 1, mask = MASK, Ncores = 7)
#' str(res)
#' }
#' @export
ffdGroupEvidenceFSTS <- function(ffdGroup, covariates, m0=0, Cova=100,
delta = 0.95, S0 = 1, n0 = 1,
N1 = FALSE, Nsimu1=100, Cutpos=30, r1,
mask, Ncores = NULL){
if(is.logical(N1)) {
if(N1==FALSE){N1 = dim(covariates)[1]}
}
# validation
Ncores <- .get_n_cores(Ncores)
.validate_input(
ffdGroup=ffdGroup,
covariates=covariates,
delta=delta,
n0=n0,
N1=N1,
Nsimu1=Nsimu1,
Cutpos1=Cutpos,
r1=r1
)
covariates <- as.matrix(covariates)
#TAKING THE POSITIONS FROM THE 4D IMAGE WITH NON-NULL VALUES
posiffd <- which(mask[,,] != 0, arr.ind = TRUE)
ffd.out <- pbapply::pbapply(posiffd, 1, .ffdGroupVoxelFSTS, ffdGroup, covariates, m0, Cova,
delta, S0, n0, N1, Nsimu1, r1, Cutpos, cl = Ncores)
#number of tests from the output of .ffdsingleVoxelFFBS (Joint, marginal and LTT)
Ntest <- 3
vol.evidence <- list()
for(k in 1:(Ntest)){
vol.evidence[[k]] <- array(0, c(dim(covariates)[2], dim(mask)[1:3]))
}
for(ii in 1:dim(posiffd)[1]){
vol.evidence[[1]][ ,posiffd[ii,1], posiffd[ii,2], posiffd[ii,3]] <- ffd.out[[ii]]$EvidenceJoint
vol.evidence[[2]][ ,posiffd[ii,1], posiffd[ii,2], posiffd[ii,3]] <- ffd.out[[ii]]$EvidenceMargin
vol.evidence[[3]][ ,posiffd[ii,1], posiffd[ii,2], posiffd[ii,3]] <- ffd.out[[ii]]$EvidenLTT
}
attr(vol.evidence, "class") <- "fMRI_group_evidence"
return(vol.evidence)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/ffdGroupEvidenceFSTS.R
|
#' @name .ffdGroupVoxelFETS
#' @title .ffdGroupVoxelFETS
#' @description
#' this is an internal function
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' this is an internal function
#' @param posi.ffd the position of the voxel in the brain image.
#' @param DatabaseGroup list of N elements, each being a 4D array (ffdc[i,j,k,t]) that contains the sequence of MRI images related to each of the N subjects in the sample.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at t=0 (m=0 is the default value when no prior information is available). For the case of available prior information, m0 can be defined as a pXr matrix, where p is the number of columns in the covariates object and r is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at t=0 (Cova=100 is the default value when no prior information is available). For the case of available prior information, Cova0 can be defined as a pXp matrix, where p is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between 0.85<delta<1. delta=1 will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure between pair of voxels within every cluster at t=0. S0=1 is the default value when no prior information is available and defines an rXr identity matrix. For the case of available prior information, S0 can be defined as an rXr matrix, where r is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix S0 at t=0 (n=1 is the default value when no prior information is available). For the case of available prior information, n0 can be set as n0=np, where np is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (2<N1<T) from the ffdc array employed in the model fitting.N1=NULL (or equivalently N1=T) is its default value, taking all the images in the ffdc array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different r values: r = 0, 1, 2, 3, 4, which leads to q = 1, 7, 19, 27, 33, where q is the size of the cluster.
#' @param Test test type either "LTT" (Average cluster effect) or "JointTest" (Joint effect).
#' @param Cutpos a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (30<Cutpos1<T).
#' @keywords internal
.ffdGroupVoxelFETS <- function(posi.ffd, DatabaseGroup, covariates, m0, Cova, delta, S0, n0, N1, Nsimu1, r1, Test, Cutpos){
#browser()
if(r1 == 0){
posi <- .distanceNeighbors (posi.refer = posi.ffd, r1)
Ngroup <- length(DatabaseGroup)
#BOLD RESPONSE SERIES FOR A SPECIFIC CLUSTER
series.group = NULL
#system.time(
for(i in 1:Ngroup){
ffd.c <- DatabaseGroup[[i]]
series <- sapply(1:dim(posi)[1], function(ii){ffd.c[posi[ii,1], posi[ii,2], posi[ii,3], ]})
series.group <- rbind(series.group, series)
}
#case for single voxels
if(any(series.group==0)){
if(Test=="LTT"){return( rep(NA, dim(covariates)[2]))}
if(Test=="JointTest"){return( list(EvidenceJoint = rep(NA, dim(covariates)[2]),
EvidenceMargin = rep(NA, dim(covariates)[2]) ) )}}else{
if(Test=="JointTest"){
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
delta1<- sqrt(delta)
Beta1 <-diag(1/c(delta1, delta1))
res <- .Group_FunctionalMultiTest(ffd1 = series.group, Cova = covariates, m0In = m0, c0In = Cova1, S0In = S0,
beta0In = Beta1, nt0In = n0, flag1 = 0, NIn = N1, NS = Ngroup, Nsimu = Nsimu1, CUTpos = Cutpos)
return( list(EvidenceJoint = res$EvidenMultivariate,
EvidenceMargin = res$EvidenMarginal) )
}
if(Test=="LTT"){return(rep(NA, dim(covariates)[2]))}}}else{
Ngroup <- length(DatabaseGroup)
#THIS LINE RETURN THE POSITIONS OF EACH VOXEL INSIDE THE CLUSTER GIVEN THE DISTANCE r1
posi1 <- .distanceNeighbors (posi.refer = posi.ffd, r1)
aux.pos <- dim(DatabaseGroup[[1]])[1:3]
row_sub1 <- apply(posi1, 1, function(row, x1){0 < row & row<=x1}, x1=aux.pos)
posi <- posi1[apply(t(row_sub1), 1, sum)==3, ]
#BOLD RESPONSE SERIES FOR A SPECIFIC CLUSTER
series.group = NULL
for(i in 1:Ngroup){
ffd.c <- DatabaseGroup[[i]]
series <- sapply(1:dim(posi)[1], function(ii){ffd.c[posi[ii,1], posi[ii,2], posi[ii,3], ]})
series.group <- rbind(series.group, series)
}
if(any(series.group[,1]==0)){if(Test=="LTT"){return( rep(NA, dim(covariates)[2]))}
if(Test=="JointTest"){return( list(EvidenceJoint = rep(NA, dim(covariates)[2]),
EvidenceMargin = rep(NA, dim(covariates)[2]) ) )}}else{
flag <- any(series.group==0)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
delta1 <- sqrt(delta)
Beta1 <-diag(1/c(delta1, delta1))
if(Test=="LTT"){
res <- .Gruop_FunctionalTestLT(series.group, covariates, m0, Cova1, S0, Beta1, n0, sum(flag), N1, Ngroup, Nsimu1, Cutpos)
return(as.vector(res$Eviden))
}
if(Test=="JointTest"){
res <- .Group_FunctionalMultiTest(ffd1 = series.group, Cova = covariates, m0In = m0, c0In = Cova1, S0In = S0,
beta0In = Beta1, nt0In = n0, flag1 = sum(flag), NIn = N1, NS = Ngroup, Nsimu = Nsimu1, CUTpos = Cutpos)
return( list(EvidenceJoint = res$EvidenMultivariate,
EvidenceMargin = res$EvidenMarginal) )
}
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/ffdGroupVoxelFETS.R
|
#' @name .ffdGroupVoxelFFBS
#' @title .ffdGroupVoxelFFBS
#' @description
#' this is an internal function
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' this is an internal function
#' @param posi.ffd the position of the voxel in the brain image.
#' @param DatabaseGroup list of N elements, each being a 4D array (ffdc[i,j,k,t]) that contains the sequence of MRI images related to each of the N subjects in the sample.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at t=0 (m=0 is the default value when no prior information is available). For the case of available prior information, m0 can be defined as a pXr matrix, where p is the number of columns in the covariates object and r is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at t=0 (Cova=100 is the default value when no prior information is available). For the case of available prior information, Cova0 can be defined as a pXp matrix, where p is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between 0.85<delta<1. delta=1 will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure between pair of voxels within every cluster at t=0. S0=1 is the default value when no prior information is available and defines an rXr identity matrix. For the case of available prior information, S0 can be defined as an rXr matrix, where r is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix S0 at t=0 (n=1 is the default value when no prior information is available). For the case of available prior information, n0 can be set as n0=np, where np is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (2<N1<T) from the ffdc array employed in the model fitting.N1=NULL (or equivalently N1=T) is its default value, taking all the images in the ffdc array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different r values: r = 0, 1, 2, 3, 4, which leads to q = 1, 7, 19, 27, 33, where q is the size of the cluster.
#' @param Cutpos a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (30<Cutpos1<T).
#' @keywords internal
.ffdGroupVoxelFFBS <- function(posi.ffd, DatabaseGroup, covariates, m0, Cova, delta, S0, n0, N1, Nsimu1, r1, Cutpos){
if(r1 == 0){
posi <- .distanceNeighbors (posi.refer = posi.ffd, r1)
Ngroup <- length(DatabaseGroup)
#BOLD RESPONSE SERIES FOR A SPECIFIC CLUSTER
series.group = NULL
#system.time(
for(i in 1:Ngroup){
ffd.c <- DatabaseGroup[[i]]
series <- sapply(1:dim(posi)[1], function(ii){ffd.c[posi[ii,1], posi[ii,2], posi[ii,3], ]})
series.group <- rbind(series.group, series)
}
#case for single voxels
if(any(series.group==0)){return( list(EvidenceJoint = rep(NA, dim(covariates)[2]),
EvidenceMargin = rep(NA, dim(covariates)[2]),
EvidenLTT = rep(NA, dim(covariates)[2])) )}else{
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
delta1<- sqrt(delta)
Beta1 <-diag(1/c(delta1, delta1))
res <- .Group_Functional_Backwards_Sampling(ffd1 = series.group, Cova = covariates, m0In = m0, c0In = Cova1, S0In = S0,
beta0In = Beta1, nt0In = n0, flag1 = 0, NIn = N1, NS = Ngroup, Nsimu = Nsimu1,
CUTpos = Cutpos)
return( list(EvidenceJoint = res$Eviden_joint,
EvidenceMargin = res$Eviden_margin,
EvidenLTT = res$eviden_lt) )
}
}else{
Ngroup <- length(DatabaseGroup)
#THIS LINE RETURN THE POSITIONS OF EACH VOXEL INSIDE THE CLUSTER GIVEN THE DISTANCE r1
posi1 <- .distanceNeighbors (posi.refer = posi.ffd, r1)
aux.pos <- dim(DatabaseGroup[[1]])[1:3]
row_sub1 <- apply(posi1, 1, function(row, x1){0 < row & row<=x1}, x1=aux.pos)
posi <- posi1[apply(t(row_sub1), 1, sum)==3, ]
#BOLD RESPONSE SERIES FOR A SPECIFIC CLUSTER
series.group = NULL
for(i in 1:Ngroup){
ffd.c <- DatabaseGroup[[i]]
series <- sapply(1:dim(posi)[1], function(ii){ffd.c[posi[ii,1], posi[ii,2], posi[ii,3], ]})
series.group <- rbind(series.group, series)
}
if(any(series.group[,1]==0)){return( list(EvidenceJoint = rep(NA, dim(covariates)[2]),
EvidenceMargin = rep(NA, dim(covariates)[2]),
EvidenLTT = rep(NA, dim(covariates)[2])) )}else{
flag <- any(series.group==0)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
delta1 <- sqrt(delta)
Beta1 <-diag(1/c(delta1, delta1))
res <- .Group_Functional_Backwards_Sampling(ffd1 = series.group, Cova = covariates, m0In = m0, c0In = Cova1, S0In = S0,
beta0In = Beta1, nt0In = n0, flag1 = flag, NIn = N1, NS = Ngroup, Nsimu = Nsimu1,
CUTpos = Cutpos)
return( list(EvidenceJoint = res$Eviden_joint,
EvidenceMargin = res$Eviden_margin,
EvidenLTT = res$eviden_lt) )
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/ffdGroupVoxelFFBS.R
|
#' @name .ffdGroupVoxelFSTS
#' @title .ffdGroupVoxelFSTS
#' @description
#' this is an internal function
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' this is an internal function
#' @param posi.ffd the position of the voxel in the brain image.
#' @param DatabaseGroup list of N elements, each being a 4D array (ffdc[i,j,k,t]) that contains the sequence of MRI images related to each of the N subjects in the sample.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at t=0 (m=0 is the default value when no prior information is available). For the case of available prior information, m0 can be defined as a pXr matrix, where p is the number of columns in the covariates object and r is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at t=0 (Cova=100 is the default value when no prior information is available). For the case of available prior information, Cova0 can be defined as a pXp matrix, where p is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between 0.85<delta<1. delta=1 will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure between pair of voxels within every cluster at t=0. S0=1 is the default value when no prior information is available and defines an rXr identity matrix. For the case of available prior information, S0 can be defined as an rXr matrix, where r is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix S0 at t=0 (n=1 is the default value when no prior information is available). For the case of available prior information, n0 can be set as n0=np, where np is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (2<N1<T) from the ffdc array employed in the model fitting.N1=NULL (or equivalently N1=T) is its default value, taking all the images in the ffdc array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different r values: r = 0, 1, 2, 3, 4, which leads to q = 1, 7, 19, 27, 33, where q is the size of the cluster.
#' @param Cutpos a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (30<Cutpos1<T).
#' @keywords internal
.ffdGroupVoxelFSTS <- function(posi.ffd, DatabaseGroup, covariates, m0, Cova, delta, S0, n0, N1, Nsimu1, r1, Cutpos){
if(r1 == 0){
posi <- .distanceNeighbors (posi.refer = posi.ffd, r1)
Ngroup <- length(DatabaseGroup)
#BOLD RESPONSE SERIES FOR A SPECIFIC CLUSTER
series.group = NULL
#system.time(
for(i in 1:Ngroup){
ffd.c <- DatabaseGroup[[i]]
series <- sapply(1:dim(posi)[1], function(ii){ffd.c[posi[ii,1], posi[ii,2], posi[ii,3], ]})
series.group <- rbind(series.group, series)
}
#case for single voxels
if(any(series.group==0)){return( list(EvidenceJoint = rep(NA, dim(covariates)[2]),
EvidenceMargin = rep(NA, dim(covariates)[2]),
EvidenLTT = rep(NA, dim(covariates)[2])) )}else{
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
delta1<- sqrt(delta)
Beta1 <-diag(1/c(delta1, delta1))
res <- .Group_Functional_Equation(ffd1 = series.group, Cova = covariates, m0In = m0, c0In = Cova1, S0In = S0,
beta0In = Beta1, nt0In = n0, flag1 = 0, NIn = N1, NS = Ngroup, Nsimu = Nsimu1,
CUTpos = Cutpos)
return( list(EvidenceJoint = res$Eviden_joint,
EvidenceMargin = res$Eviden_margin,
EvidenLTT = res$eviden_lt) )
}
}else{
Ngroup <- length(DatabaseGroup)
#THIS LINE RETURN THE POSITIONS OF EACH VOXEL INSIDE THE CLUSTER GIVEN THE DISTANCE r1
posi1 <- .distanceNeighbors (posi.refer = posi.ffd, r1)
aux.pos <- dim(DatabaseGroup[[1]])[1:3]
row_sub1 <- apply(posi1, 1, function(row, x1){0 < row & row<=x1}, x1=aux.pos)
posi <- posi1[apply(t(row_sub1), 1, sum)==3, ]
#BOLD RESPONSE SERIES FOR A SPECIFIC CLUSTER
series.group = NULL
for(i in 1:Ngroup){
ffd.c <- DatabaseGroup[[i]]
series <- sapply(1:dim(posi)[1], function(ii){ffd.c[posi[ii,1], posi[ii,2], posi[ii,3], ]})
series.group <- rbind(series.group, series)
}
if(any(series.group[,1]==0)){return( list(EvidenceJoint = rep(NA, dim(covariates)[2]),
EvidenceMargin = rep(NA, dim(covariates)[2]),
EvidenLTT = rep(NA, dim(covariates)[2])) )}else{
flag <- any(series.group==0)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
delta1 <- sqrt(delta)
Beta1 <-diag(1/c(delta1, delta1))
res <- .Group_Functional_Equation(ffd1 = series.group, Cova = covariates, m0In = m0, c0In = Cova1, S0In = S0,
beta0In = Beta1, nt0In = n0, flag1 = flag, NIn = N1, NS = Ngroup, Nsimu = Nsimu1,
CUTpos = Cutpos)
return( list(EvidenceJoint = res$Eviden_joint,
EvidenceMargin = res$Eviden_margin,
EvidenLTT = res$eviden_lt) )
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/ffdGroupVoxelFSTS.R
|
#' @name .ffdSingleVoxelFETS
#' @title .ffdSingleVoxelFSTS
#' @description
#' this is an internal function
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' this is an internal function
#' @param posi.ffd the position of the voxel in the brain image.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param ffdc a 4D array (ffdc[i,j,k,t]) that contains the sequence of MRI images that are meant to be analyzed. (i,j,k) define the position of the observed voxel at time t.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at t=0 (m=0 is the default value when no prior information is available). For the case of available prior information, m0 can be defined as a pXr matrix, where p is the number of columns in the covariates object and r is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at t=0 (Cova=100 is the default value when no prior information is available). For the case of available prior information, Cova0 can be defined as a pXp matrix, where p is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between 0.85<delta<1. delta=1 will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure among voxels within every cluster at t=0. S0=1 is the default value when no prior information is available and defines an rXr identity matrix. For the case of available prior information, S0 can be defined as an rXr matrix, where r is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix S0 at t=0 (n=1 is the default value when no prior information is available). For the case of available prior information, n0 can be set as n0=np, where np is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (2<N1<T) from the ffdc array employed in the model fitting.N1=NULL (or equivalently N1=T) is its default value, taking all the images in the ffdc array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos1 a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (30<Cutpos1<T).
#' @param Min.vol helps to define a threshold for the voxels considered in
#' the analysis. For example, Min.vol = 0.10 means that all the voxels with values
#' below to max(ffdc)*Min.vol can be considered irrelevant and discarded from the analysis.
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different r values: r = 0, 1, 2, 3, 4, which leads to q = 1, 7, 19, 27, 33, where q is the size of the cluster.
#' @param Test test type either "LTT" (Average cluster effect) or "JointTest" (Joint effect).
#' @keywords internal
.ffdSingleVoxelFETS <- function(posi.ffd, covariates, ffdc, m0, Cova, delta, S0, n0, N1, Nsimu1, Cutpos1, Min.vol, r1, Test){
if(r1 == 0){
posi <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
#BOLD RESPONSE SERIES IN THE CLUSTER RELATED TO posi
series.def <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series.def[,1]) < Min.vol){if(Test=="LTT"){return( rep(NA, dim(covariates)[2]) )}
if(Test=="JointTest"){return( list(EvidenceJoint = rep(NA, dim(covariates)[2]),
EvidenceMargin = rep(NA, dim(covariates)[2]) ) )}}else{
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
if(Test=="LTT"){
return(rep(NA, dim(covariates)[2]))
}
if(Test=="JointTest"){
res <- .Individual_FunctionalMultiTest(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
return(list(EvidenceJoint = rep(NA, dim(covariates)[2]), EvidenceMargin = res$EvidenMarginal))
}
}
}else{
#THIS LINE RETURN THE POSITIONS OF EACH VOXEL INSIDE THE CLUSTER GIVEN THE DISTANCE r1
posi1 <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
aux.pos <- dim(ffdc)[1:3]
#GOING THROUGH EACH ROW AND CHECKING IF ANY POSITION IS OUTSIDE THE BOUNDS
row_sub1 = apply(posi1, 1, function(row, x1){0 < row & row<=x1}, x1=aux.pos)
posi <- posi1[apply(t(row_sub1), 1, sum)==3, ]
#BOLD RESPONSE SERIES FOR THE CLUSTER RELATED TO posi
series <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series[,1])<Min.vol){if(Test=="LTT"){return( rep(NA, dim(covariates)[2]) )}
if(Test=="JointTest"){return( list(EvidenceJoint = rep(NA, dim(covariates)[2]),
EvidenceMargin = rep(NA, dim(covariates)[2])) )}}else{
# IDENTIFYING AND REMOVING TEMPORAL SERIES INSIDE THE CLUSTER WITH ZERO VALUES
zero.series <- unique(which(series==0, arr.ind = TRUE)[,2])
if(length(zero.series)==0){series.def <- series}else{series.def <- series[,-(zero.series)]}
#CHECKING THE SIZE OF THE CLUSTER: q=1 or q>1
#is.vector(series.def)==TRUE THEN q=1 OTHERWISE q>1
if(is.vector(series.def)){
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
#PRIOR HYPERPARAMETERS FOR q1=1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))}else{
series.def <- apply(series.def, 2, function(x){(x-mean(x))/sd(x)})
#PRIOR HYPERPARAMETERS FOR q1>1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=dim(series.def)[2])
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
delta1 <- sqrt(delta)
#DISCOUNT FACTORS MATRIX
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
}
if(Test=="LTT"){
res <- .Individual_FunctionalTestLT (ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1, S0In = S01,
beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
#EVIDENCE OF ACTIVATION FOR A SINGLE VOXEL TAKING INTO ACCOUNT THE INFORMATION OF THE ENTIRE CLUSTER OF SIZE q
return(res$Eviden)
}
if(Test=="JointTest"){
res <- .Individual_FunctionalMultiTest(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
#EVIDENCE OF ACTIVATION FOR A SINGLE VOXEL TAKING INTO ACCOUNT THE INFORMATION OF THE ENTIRE CLUSTER OF SIZE q
return(list(EvidenceJoint = res$EvidenMultivariate, EvidenceMargin = res$EvidenMarginal))
}
}
}
}
#END FUNCTION
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/ffdSingleVoxelFETS.R
|
#' @name .ffdsingleVoxelFFBS
#' @title .ffdsingleVoxelFFBS
#' @description
#' this is an internal function
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' this is an internal function
#' @param posi.ffd the position of the voxel in the brain image.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param ffdc a 4D array (ffdc[i,j,k,t]) that contains the sequence of MRI images that are meant to be analyzed. (i,j,k) define the position of the observed voxel at time t.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at t=0 (m=0 is the default value when no prior information is available). For the case of available prior information, m0 can be defined as a pXr matrix, where p is the number of columns in the covariates object and r is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at t=0 (Cova=100 is the default value when no prior information is available). For the case of available prior information, Cova0 can be defined as a pXp matrix, where p is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between 0.85<delta<1. delta=1 will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure among voxels within every cluster at t=0. S0=1 is the default value when no prior information is available and defines an rXr identity matrix. For the case of available prior information, S0 can be defined as an rXr matrix, where r is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix S0 at t=0 (n=1 is the default value when no prior information is available). For the case of available prior information, n0 can be set as n0=np, where np is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (2<N1<T) from the ffdc array employed in the model fitting.N1=NULL (or equivalently N1=T) is its default value, taking all the images in the ffdc array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos1 a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (30<Cutpos1<T).
#' @param Min.vol helps to define a threshold for the voxels considered in
#' the analysis. For example, Min.vol = 0.10 means that all the voxels with values
#' below to max(ffdc)*Min.vol can be considered irrelevant and discarded from the analysis.
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different r values: r = 0, 1, 2, 3, 4, which leads to q = 1, 7, 19, 27, 33, where q is the size of the cluster.
#' @keywords internal
.ffdsingleVoxelFFBS <- function(posi.ffd, covariates, ffdc, m0, Cova, delta, S0, n0, N1, Nsimu1, Cutpos1, Min.vol, r1){
if(r1 == 0){
posi <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
#BOLD RESPONSE SERIES IN THE CLUSTER RELATED TO posi
series.def <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series.def[,1]) < Min.vol){
return(list(EvidenceJoint = rep(NA, dim(covariates)[2]), EvidenceMargin = rep(NA, dim(covariates)[2]), EvidenLTT = rep(NA, dim(covariates)[2])))}else{
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
#PRIOR HYPERPARAMETERS FOR q1=1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
res <- .Individual_Backwards_Sampling(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
return(list(EvidenceJoint = as.vector(res$Eviden_joint), EvidenceMargin = as.vector(res$Eviden_margin), EvidenLTT=as.vector(res$eviden_lt)))
}
}else{
#THIS LINE RETURN THE POSITIONS OF EACH VOXEL INSIDE THE CLUSTER GIVEN THE DISTANCE r1
posi1 <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
aux.pos <- dim(ffdc)[1:3]
#GOING THROUGH EACH ROW AND CHECKING IF ANY POSITION IS OUTSIDE THE BOUNDS
row_sub1 <- apply(posi1, 1, function(row, x1){0 < row & row<=x1}, x1=aux.pos)
posi <- posi1[apply(t(row_sub1), 1, sum)==3, ]
#BOLD RESPONSE SERIES FOR THE CLUSTER RELATED TO posi
series <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series[,1]) < Min.vol){return(list(EvidenceJoint = rep(NA, dim(covariates)[2]), EvidenceMargin = rep(NA, dim(covariates)[2]), EvidenLTT = rep(NA, dim(covariates)[2])))}else{
# IDENTIFYING AND REMOVING TEMPORAL SERIES INSIDE THE CLUSTER WITH ZERO VALUES
zero.series <- unique(which(series==0, arr.ind = TRUE)[,2])
if(length(zero.series)==0){series.def <- series}else{series.def <- series[,-(zero.series)]}
#CHECKING THE SIZE OF THE CLUSTER: q=1 or q>1
#is.vector(series.def)==TRUE THEN q=1 OTHERWISE q>1
if(is.vector(series.def)){
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
#PRIOR HYPERPARAMETERS FOR q1=1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))}else{
series.def <- apply(series.def, 2, function(x){(x-mean(x))/sd(x)})
#PRIOR HYPERPARAMETERS FOR q1>1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=dim(series.def)[2])
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
delta1 <- sqrt(delta)
#DISCOUNT FACTORS MATRIX
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
}
res <- .Individual_Backwards_Sampling(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
#EVIDENCE OF ACTIVATION FOR A SINGLE VOXEL TAKING INTO ACCOUNT THE INFORMATION OF THE ENTIRE CLUSTER OF SIZE q
return(list(EvidenceJoint = as.vector(res$Eviden_joint), EvidenceMargin = as.vector(res$Eviden_margin), EvidenLTT=as.vector(res$eviden_lt)))
}
}
}
#END FUNCTION
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/ffdSingleVoxelFFBS.R
|
#' @name .ffdSingleVoxelFSTS
#' @title .ffdSingleVoxelFSTS
#' @description
#' this is an internal function
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' this is an internal function
#' @param posi.ffd the position of the voxel in the brain image.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param ffdc a 4D array (ffdc[i,j,k,t]) that contains the sequence of MRI images that are meant to be analyzed. (i,j,k) define the position of the observed voxel at time t.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at t=0 (m=0 is the default value when no prior information is available). For the case of available prior information, m0 can be defined as a pXr matrix, where p is the number of columns in the covariates object and r is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at t=0 (Cova=100 is the default value when no prior information is available). For the case of available prior information, Cova0 can be defined as a pXp matrix, where p is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between 0.85<delta<1. delta=1 will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure among voxels within every cluster at t=0. S0=1 is the default value when no prior information is available and defines an rXr identity matrix. For the case of available prior information, S0 can be defined as an rXr matrix, where r is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix S0 at t=0 (n=1 is the default value when no prior information is available). For the case of available prior information, n0 can be set as n0=np, where np is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (2<N1<T) from the ffdc array employed in the model fitting.N1=NULL (or equivalently N1=T) is its default value, taking all the images in the ffdc array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos1 a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (30<Cutpos1<T).
#' @param Min.vol helps to define a threshold for the voxels considered in
#' the analysis. For example, Min.vol = 0.10 means that all the voxels with values
#' below to max(ffdc)*Min.vol can be considered irrelevant and discarded from the analysis.
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different r values: r = 0, 1, 2, 3, 4, which leads to q = 1, 7, 19, 27, 33, where q is the size of the cluster.
#' @keywords internal
.ffdSingleVoxelFSTS <- function(posi.ffd, covariates, ffdc, m0, Cova, delta, S0, n0, N1, Nsimu1, Cutpos1, Min.vol, r1){
if(r1 == 0){
posi <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
#BOLD RESPONSE SERIES IN THE CLUSTER RELATED TO posi
series.def <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series.def[,1]) < Min.vol){
return(list(EvidenceJoint = rep(NA, dim(covariates)[2]), EvidenceMargin = rep(NA, dim(covariates)[2]), EvidenLTT = rep(NA, dim(covariates)[2])))}else{
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
#PRIOR HYPERPARAMETERS FOR q1=1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
res <- .Individual_Functional_States(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
return(list(EvidenceJoint = as.vector(res$Eviden_joint), EvidenceMargin = as.vector(res$Eviden_margin), EvidenLTT=as.vector(res$eviden_lt)))
}
}else{
#THIS LINE RETURN THE POSITIONS OF EACH VOXEL INSIDE THE CLUSTER GIVEN THE DISTANCE r1
posi1 <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
aux.pos <- dim(ffdc)[1:3]
#GOING THROUGH EACH ROW AND CHECKING IF ANY POSITION IS OUTSIDE THE BOUNDS
row_sub1 <- apply(posi1, 1, function(row, x1){0 < row & row<=x1}, x1=aux.pos)
posi <- posi1[apply(t(row_sub1), 1, sum)==3, ]
#BOLD RESPONSE SERIES FOR THE CLUSTER RELATED TO posi
series <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series[,1]) < Min.vol){return(list(EvidenceJoint = rep(NA, dim(covariates)[2]), EvidenceMargin = rep(NA, dim(covariates)[2]), EvidenLTT = rep(NA, dim(covariates)[2])))}else{
# IDENTIFYING AND REMOVING TEMPORAL SERIES INSIDE THE CLUSTER WITH ZERO VALUES
zero.series <- unique(which(series==0, arr.ind = TRUE)[,2])
if(length(zero.series)==0){series.def <- series}else{series.def <- series[,-(zero.series)]}
#CHECKING THE SIZE OF THE CLUSTER: q=1 or q>1
#is.vector(series.def)==TRUE THEN q=1 OTHERWISE q>1
if(is.vector(series.def)){
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
#PRIOR HYPERPARAMETERS FOR q1=1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))}else{
series.def <- apply(series.def, 2, function(x){(x-mean(x))/sd(x)})
#PRIOR HYPERPARAMETERS FOR q1>1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=dim(series.def)[2])
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
delta1 <- sqrt(delta)
#DISCOUNT FACTORS MATRIX
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
}
res <- .Individual_Functional_States(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
#EVIDENCE OF ACTIVATION FOR A SINGLE VOXEL TAKING INTO ACCOUNT THE INFORMATION OF THE ENTIRE CLUSTER OF SIZE q
return(list(EvidenceJoint = as.vector(res$Eviden_joint), EvidenceMargin = as.vector(res$Eviden_margin), EvidenLTT=as.vector(res$eviden_lt)))
}
}
}
#END FUNCTION
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/ffdSingleVoxelFSTS.R
|
#' @name get_example_fMRI_data
#' @title get_example_fMRI_data
#' @description
#' This function is used to download the example data used in the Vignettes.
#' @references
#' \insertRef{pernet2015human}{BayesDLMfMRI}
#'
#' \insertRef{gorgolewski2017openneuro}{BayesDLMfMRI}
#' @details
#' The data for this example is related to an fMRI experiment where a sound stimulus is presented.
#' That experiment is intended to offer a "voice localizer" scan, which allows rapid and reliable
#' localization of the voice-sensitive "temporal voice areas" (TVA) of the human auditory cortex
#' \insertCite{pernet2015human}{BayesDLMfMRI}. The data of this "voice localizer" scan is freely
#' available on the online platform OpenNEURO \insertCite{gorgolewski2017openneuro}{BayesDLMfMRI}.
#' @param save_path location where the data the example data is stored.
#' @param force force the download, even if the data already exists.
#' @param subject The example subject, must be 1 or 2.
#' @return It returns an array of dimensions \code{[91, 109, 91, 310]}.
#' @examples
#'\dontrun{
#' # This example can take a long time to run.
#' fMRI.data <- get_example_fMRI_data()
#' }
#' @export
get_example_fMRI_data <- function(save_path=NULL, force=FALSE, subject=1) {
if(is.null(save_path)) {
save_path <- tempdir()
}
if(length(subject) > 1) {
"To load multiple subjects use get_example_fMRI_data_group"
}
if(!(subject %in% c(1,2))) {
stop("The subject must be 1 or 2")
}
if(subject == 1) {
url_list <- c("https://johnatanlab.github.io/files/test_1.rds",
"https://johnatanlab.github.io/files/test_2.rds",
"https://johnatanlab.github.io/files/test_3.rds"
)
} else {
url_list <- c("https://johnatanlab.github.io/files/test_s2_1.rds",
"https://johnatanlab.github.io/files/test_s2_2.rds",
"https://johnatanlab.github.io/files/test_s2_3.rds"
)
}
dir.create(save_path, showWarnings = FALSE)
result_list <- list()
for(i in 1:length(url_list)) {
path_1 <- file.path(save_path,paste0("s",subject,"_test_",i,".rds") )
if( (!file.exists(path_1)) & (!force)) {
download.file(url_list[i], destfile = path_1, quiet = FALSE)
}
result_list[[i]] <- readRDS(path_1)
}
temp <- oro.nifti::dim_
fMRI.data <- abind::abind(result_list, along = 1)
attr(fMRI.data, "dimnames") <- NULL
fMRI.data <- unname(fMRI.data)
return(fMRI.data)
}
#' @name get_example_fMRI_data_group
#' @title get_example_fMRI_data_group
#' @description
#' This function is used to download the example data used in the Vignettes.
#' @references
#' \insertRef{pernet2015human}{BayesDLMfMRI}
#'
#' \insertRef{gorgolewski2017openneuro}{BayesDLMfMRI}
#' @details
#' The data for this example is related to an fMRI experiment where a sound stimulus is presented.
#' That experiment is intended to offer a "voice localizer" scan, which allows rapid and reliable
#' localization of the voice-sensitive "temporal voice areas" (TVA) of the human auditory cortex
#' \insertCite{pernet2015human}{BayesDLMfMRI}. The data of this "voice localizer" scan is freely
#' available on the online platform OpenNEURO \insertCite{gorgolewski2017openneuro}{BayesDLMfMRI}.
#' @param save_path location where the data the example data is stored.
#' @param force force the download, even if the data already exists.
#' @return It returns a list in which each element is an array of dimensions \code{[91, 109, 91, 310]}.
#' @examples
#'\dontrun{
#' # This example can take a long time to run.
#' DatabaseGroup <- get_example_fMRI_data_group()
#' }
#' @export
get_example_fMRI_data_group <- function(save_path=NULL, force=FALSE) {
result_list <- list()
result_list[[1]] <- get_example_fMRI_data(save_path = save_path,
force = force,
subject = 1)
result_list[[2]] <- get_example_fMRI_data(save_path = save_path,
force = force,
subject = 2)
return(result_list)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/load_data.R
|
#' @name .get_n_cores
#' @title .get_n_cores
#' @description
#' return the number of cores.
#' @param Ncores numer of cores
#' @keywords internal
.get_n_cores <- function(Ncores) {
# check ncores
if(!is.null(Ncores)) {
if(is.na(Ncores)) {
Ncores <- 1
}
if(!is.numeric(Ncores)) {
stop("The number of cores must be numeric")
}
Ncores <- as.integer(Ncores)
if(Ncores < 1) {
stop("The number of cores must be greater than zero")
}
}
return(Ncores)
}
#' @name .check_ffdgroup
#' @title .check_ffdgroup
#' @description
#' validate ffdGroup
#' @param ffdGroup group
#' @keywords internal
.check_ffdgroup <- function(ffdGroup) {
if(!is.list(ffdGroup)) {
stop("ffdGroup must be a list.")
}
for(i in 1:length(ffdGroup)) {
ffdc <- ffdGroup[[i]]
# Check ffdc
if(!is.null(ffdc)) {
if(!is.array(ffdc)) {
stop("all elements of ffdGroup must be an array.")
}
if(length(dim(ffdc)) != 4 ) {
stop("all elements of ffdGroup must be an 4D array.")
}
}
}
}
#' @name .validate_input
#' @title .validate_input
#' @description
#' validate input
#' @param N1 is the number of images (2<N1<T) from the ffdc array employed in the model fitting.N1=NULL (or equivalently N1=T) is its default value, taking all the images in the ffdc array for the fitting process.
#' @param Test test type either "LTT" (Average cluster effect) or "JointTest" (Joint effect).
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param ffdc a 4D array (ffdc[i,j,k,t]) that contains the sequence of MRI images that are meant to be analyzed. (i,j,k) define the position of the observed voxel at time t.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different r values: r = 0, 1, 2, 3, 4, which leads to q = 1, 7, 19, 27, 33, where q is the size of the cluster.
#' @param delta a discount factor related to the evolution variances. Recommended values between 0.85<delta<1. delta=1 will yield results similar to the classical general linear model.
#' @param perVol helps to define a threshold for the voxels considered in the analysis. For example, Min.vol = 0.10 means that all the voxels with values
#' below to max(ffdc)*perVol can be considered irrelevant and discarded from the analysis.
#' @param Min.vol helps to define a threshold for the voxels considered in
#' the analysis. For example, Min.vol = 0.10 means that all the voxels with values
#' below to max(ffdc)*Min.vol can be considered irrelevant and discarded from the analysis.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix S0 at t=0 (n=1 is the default value when no prior information is available). For the case of available prior information, n0 can be set as n0=np, where np is the number of MRI images in the pilot sample.
#' @param Cutpos1 a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (30<Cutpos1<T).
#' @param ffdGroup group
#' @keywords internal
.validate_input <- function(N1=NULL, Test=NULL, Nsimu1=NULL,
ffdc=NULL, covariates=NULL,
r1 = NULL, delta=NULL, perVol=NULL,Min.vol=NULL,
n0 = NULL, Cutpos1=NULL, ffdGroup=NULL) {
# .check_ffdgroup
if(!is.null(ffdGroup)) {
.check_ffdgroup(ffdGroup)
}
# check perVol
if(!is.null(n0)) {
if(n0 < 0 ) {
stop("n0 must be non-negative" )
}
}
# check delta
if(!is.null(delta)) {
if( (delta >= 1) | (delta <= 0) ) {
stop("The discount factor, delta must between 0 and 1" )
}
}
# check perVol
if(!is.null(perVol)) {
if(perVol < 0 ) {
stop("The threshold for the voxels, perVol must be non-negative" )
}
}
# check Min.vol
if(!is.null(Min.vol)) {
if(Min.vol < 0 ) {
stop("The threshold for the voxels, Min.vol must be non-negative" )
}
}
# check r1
if(!is.null(r1)) {
if(r1 < 0 ) {
stop("r1 must be non-negative" )
}
}
# check Cutpos1
if(!is.null(Cutpos1)) {
if(Cutpos1 < 0 ) {
stop("the cutpoint, Cutpos1 must be non-negative" )
}
}
# check N1
if(!is.null(N1)) {
if( !(N1 > 2) ) {
stop("The number of images must be grater than 2")
}
}
# check Nsimu1
if(!is.null(Nsimu1)) {
if( !(Nsimu1 > 2) ) {
stop("The numbersimulations must be grater than 2")
}
}
# check test type
if(!is.null(Test)) {
if(!(Test %in% c("LTT", "JointTest"))) {
stop("Test must be LTT or JointTest")
}
}
# Check ffdc
if(!is.null(ffdc)) {
if(!is.array(ffdc)) {
stop("ffdc must be an array.")
}
if(length(dim(ffdc)) != 4 ) {
stop("ffdc must be a 4D array.")
}
}
# check covariates
if(!is.null(covariates)) {
if(!is.data.frame(covariates)) {
stop("covariates must be a dataframe.")
}
if(length(dim(covariates)) != 2) {
stop("covariates must be a 2D array.")
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/R/validate_input.R
|
---
title: "Group_analysis"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Group_analysis}
%\VignetteEngine{knitr::rmarkdown_notangle}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
is_check <- ("CheckExEnv" %in% search()) || any(c("_R_CHECK_TIMINGS_",
"_R_CHECK_LICENSE_") %in% names(Sys.getenv()))
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.align = "center",
eval = !is_check, purl = !is_check
)
```
```{r setup}
library(BayesDLMfMRI)
library(oro.nifti)
library(neurobase)
```
Now we illustrate how to run an fMRI group analysis as described in Cardona-Jiménez and Pereira [2021]. In this first version of the BayesDLMfMRI package, we implement functions to detect brain activation for single groups. The comparison analysis between groups is under development and will be offered in future versions of the package. First, we read the fMRI images of 2 subjects taken from the "voice-localizer" example:
```{r}
DataGroups <- get_example_fMRI_data_group()
```
The complete dateset(21 subjets) and results are avalible in Cardona-Jiménez and Pereira [2021].
```{r}
# load example covaraites and mask
data("covariates", package="BayesDLMfMRI")
data("mask", package="BayesDLMfMRI")
```
In order to run any of the functions available in this package to perform fMRI group analysis, the data sets or sets of images from each subject must be stored on a list object as it is shown above. To deal with this massive amount of information, the user must have a big RAM capacity available on the machine where this process will be run. It is also recommended to have a multi-core processor available in order to speed up computation time. The arguments or input parameters for any functions offered in this package to run group analysis are almost identical to those required for individual analysis. There is only an additional argument needed (mask), which adds a 3D array that works as a brain of reference (MNI atlas) for the group analysis.
```{r}
res <- ffdGroupEvidenceFFBS(ffdGroup = DataGroups, covariates = Covariates,
m0=0, Cova=100, delta = 0.95, S0 = 1, n0 = 1, N1 = FALSE, Nsimu1 = 100,
Cutpos = 30, r1 = 1, mask = mask, Ncores = 15)
```
```{r}
str(res)
```
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/inst/doc/Group_analysis.Rmd
|
---
title: "Individual_analysis"
output:
rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Individual_analysis}
%\VignetteEngine{knitr::rmarkdown_notangle}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
is_check <- ("CheckExEnv" %in% search()) || any(c("_R_CHECK_TIMINGS_",
"_R_CHECK_LICENSE_") %in% names(Sys.getenv()))
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.align = "center",
eval = !is_check, purl = !is_check
)
```
```{r setup}
library(BayesDLMfMRI)
library(oro.nifti)
library(neurobase)
```
To run any of the functions related to individual analysis, the user must provide two inputs: an array of four dimensions containing the sequence of MRI images and a matrix whose columns have the covariates to model the observed BOLD response. Thus, we load the data, which contains the MRI images, using the function. The covariates, specifically, the expected BOLD response and its derivative, are contained in the covariates dataframe The rationale for including the temporal derivative is that this basis can capture small offsets in the time to peak of the hemodynamic response
```{r}
# load example data
fMRI.data <- get_example_fMRI_data()
fMRI.data |> dim()
```
```{r}
# load example covaraites
data("covariates", package="BayesDLMfMRI")
Covariates |> dim()
```
To perform an individual voxel-wise analysis and consequently obtain a 3D array of measurements of activation, the user can choose among three different functions: ffdEvidenceFETS, ffdEvidenceFFBS and ffdEvidenceFSTS. These functions can yield three types of evidence measurements for voxel activation: Marginal effect, Average cluster effect, and Joint effect. To illustrate their use and functionality, we run an example from the "voice localizer" data.
```{r}
res <- ffdEvidenceFETS(ffdc = fMRI.data,
covariates = Covariates,
m0 = 0, Cova = 100, delta = 0.95,
S0 = 1, n0 = 1, Nsimu1 = 100, Cutpos1 = 30,
r1 = 1, Test = "LTT", Ncores = 15)
```
The arguments m0, Cova, S0 and n0 are the hyper-parameters related to the joint prior distribution of $(\Theta_{{[i,j,k]t\mathstrut}}^{{(z)\mathstrut}}, \Sigma_{{[i,j,k]t\mathstrut}}^{{(z)\mathstrut}})$. For this example, we are setting a "vague" prior distribution according to Quintana and West [1987], where m0 = 0 define a null matrix with zero values in all its entries. And both Cova = 100 and S0 = 1 define respectively the values for the diagonal matrices $C_0$ and $S_0$ as defined in Cardona-Jiménez and Pereira [2021]. r1 is the euclidean distance, which defines the size of the cluster of voxels jointly modeled. Test is the parameter related to the test selected by the user, for which there are two options: "LTT" and "Joint". "Ncores" is the argument related to the number of cores when the process is executed in parallel. Nsimu1 is the number of simulated on-line trajectories related to the state parameter $\Theta_{{[i,j,k]t\mathstrut}}^{{(z)\mathstrut}}$. From our own experience dealing with different sets of fMRI data, we recommend Nsimu1 = 100 as a good number of draws to obtain reliable results. Cutpos1 is the time up from where the on-line trajectories are considered in the computation of the activation evidence, and delta is the value of the discount factor. For a better understanding about the setting of these two last arguments, see Cardona-Jiménez and Pereira [2021].
```{r}
str(res)
```
```{r}
dim(res[[1]])
```
The output for the ffdEvidenceFEST function depends on the type of Test set by the user. For Test = "LTT" the function returns a list of the type res[[p]][x, y, z], where p represents the column position in the covariates matrix and [x, y, z] represent the voxel position in the brain image. Thus, for the "voice localizer" example res[[1]] and res[[2]] are the 3D arrays related to the evidence for brain activation associated to the BOLD response for the auditory stimuli and its derivative respectively. When Test = Joint the output returned is an array of the type res[[2*p]][x, y, z], with the first p elements of the array related to the Joint effect distribution and the rest of it to the Marginal effect distribution.
```{r}
plot(res, overlay=ffd, index=1, col.y = heat.colors(50), ycolorbar = TRUE, ybreaks = seq(0.95, 1, by = 0.001))
```
The neurobase package is one option available in R to visualize MRI images. In this example, we use its ortho2() function to plot the evidence activation map. The ffd data contains the MNI brain atlas, which is used in this example as a reference space for individual and group analysis. For a better understanding of the use of brain atlas, see Brett et al. [2002].
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/inst/doc/Individual_analysis.Rmd
|
---
title: "Group_analysis"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Group_analysis}
%\VignetteEngine{knitr::rmarkdown_notangle}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
is_check <- ("CheckExEnv" %in% search()) || any(c("_R_CHECK_TIMINGS_",
"_R_CHECK_LICENSE_") %in% names(Sys.getenv()))
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.align = "center",
eval = !is_check, purl = !is_check
)
```
```{r setup}
library(BayesDLMfMRI)
library(oro.nifti)
library(neurobase)
```
Now we illustrate how to run an fMRI group analysis as described in Cardona-Jiménez and Pereira [2021]. In this first version of the BayesDLMfMRI package, we implement functions to detect brain activation for single groups. The comparison analysis between groups is under development and will be offered in future versions of the package. First, we read the fMRI images of 2 subjects taken from the "voice-localizer" example:
```{r}
DataGroups <- get_example_fMRI_data_group()
```
The complete dateset(21 subjets) and results are avalible in Cardona-Jiménez and Pereira [2021].
```{r}
# load example covaraites and mask
data("covariates", package="BayesDLMfMRI")
data("mask", package="BayesDLMfMRI")
```
In order to run any of the functions available in this package to perform fMRI group analysis, the data sets or sets of images from each subject must be stored on a list object as it is shown above. To deal with this massive amount of information, the user must have a big RAM capacity available on the machine where this process will be run. It is also recommended to have a multi-core processor available in order to speed up computation time. The arguments or input parameters for any functions offered in this package to run group analysis are almost identical to those required for individual analysis. There is only an additional argument needed (mask), which adds a 3D array that works as a brain of reference (MNI atlas) for the group analysis.
```{r}
res <- ffdGroupEvidenceFFBS(ffdGroup = DataGroups, covariates = Covariates,
m0=0, Cova=100, delta = 0.95, S0 = 1, n0 = 1, N1 = FALSE, Nsimu1 = 100,
Cutpos = 30, r1 = 1, mask = mask, Ncores = 15)
```
```{r}
str(res)
```
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/vignettes/Group_analysis.Rmd
|
---
title: "Individual_analysis"
output:
rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Individual_analysis}
%\VignetteEngine{knitr::rmarkdown_notangle}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
is_check <- ("CheckExEnv" %in% search()) || any(c("_R_CHECK_TIMINGS_",
"_R_CHECK_LICENSE_") %in% names(Sys.getenv()))
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.align = "center",
eval = !is_check, purl = !is_check
)
```
```{r setup}
library(BayesDLMfMRI)
library(oro.nifti)
library(neurobase)
```
To run any of the functions related to individual analysis, the user must provide two inputs: an array of four dimensions containing the sequence of MRI images and a matrix whose columns have the covariates to model the observed BOLD response. Thus, we load the data, which contains the MRI images, using the function. The covariates, specifically, the expected BOLD response and its derivative, are contained in the covariates dataframe The rationale for including the temporal derivative is that this basis can capture small offsets in the time to peak of the hemodynamic response
```{r}
# load example data
fMRI.data <- get_example_fMRI_data()
fMRI.data |> dim()
```
```{r}
# load example covaraites
data("covariates", package="BayesDLMfMRI")
Covariates |> dim()
```
To perform an individual voxel-wise analysis and consequently obtain a 3D array of measurements of activation, the user can choose among three different functions: ffdEvidenceFETS, ffdEvidenceFFBS and ffdEvidenceFSTS. These functions can yield three types of evidence measurements for voxel activation: Marginal effect, Average cluster effect, and Joint effect. To illustrate their use and functionality, we run an example from the "voice localizer" data.
```{r}
res <- ffdEvidenceFETS(ffdc = fMRI.data,
covariates = Covariates,
m0 = 0, Cova = 100, delta = 0.95,
S0 = 1, n0 = 1, Nsimu1 = 100, Cutpos1 = 30,
r1 = 1, Test = "LTT", Ncores = 15)
```
The arguments m0, Cova, S0 and n0 are the hyper-parameters related to the joint prior distribution of $(\Theta_{{[i,j,k]t\mathstrut}}^{{(z)\mathstrut}}, \Sigma_{{[i,j,k]t\mathstrut}}^{{(z)\mathstrut}})$. For this example, we are setting a "vague" prior distribution according to Quintana and West [1987], where m0 = 0 define a null matrix with zero values in all its entries. And both Cova = 100 and S0 = 1 define respectively the values for the diagonal matrices $C_0$ and $S_0$ as defined in Cardona-Jiménez and Pereira [2021]. r1 is the euclidean distance, which defines the size of the cluster of voxels jointly modeled. Test is the parameter related to the test selected by the user, for which there are two options: "LTT" and "Joint". "Ncores" is the argument related to the number of cores when the process is executed in parallel. Nsimu1 is the number of simulated on-line trajectories related to the state parameter $\Theta_{{[i,j,k]t\mathstrut}}^{{(z)\mathstrut}}$. From our own experience dealing with different sets of fMRI data, we recommend Nsimu1 = 100 as a good number of draws to obtain reliable results. Cutpos1 is the time up from where the on-line trajectories are considered in the computation of the activation evidence, and delta is the value of the discount factor. For a better understanding about the setting of these two last arguments, see Cardona-Jiménez and Pereira [2021].
```{r}
str(res)
```
```{r}
dim(res[[1]])
```
The output for the ffdEvidenceFEST function depends on the type of Test set by the user. For Test = "LTT" the function returns a list of the type res[[p]][x, y, z], where p represents the column position in the covariates matrix and [x, y, z] represent the voxel position in the brain image. Thus, for the "voice localizer" example res[[1]] and res[[2]] are the 3D arrays related to the evidence for brain activation associated to the BOLD response for the auditory stimuli and its derivative respectively. When Test = Joint the output returned is an array of the type res[[2*p]][x, y, z], with the first p elements of the array related to the Joint effect distribution and the rest of it to the Marginal effect distribution.
```{r}
plot(res, overlay=ffd, index=1, col.y = heat.colors(50), ycolorbar = TRUE, ybreaks = seq(0.95, 1, by = 0.001))
```
The neurobase package is one option available in R to visualize MRI images. In this example, we use its ortho2() function to plot the evidence activation map. The ffd data contains the MNI brain atlas, which is used in this example as a reference space for individual and group analysis. For a better understanding of the use of brain atlas, see Brett et al. [2002].
|
/scratch/gouwar.j/cran-all/cranData/BayesDLMfMRI/vignettes/Individual_analysis.Rmd
|
B2Size=function(shape,S0,x,ta,tf,a,delta,eta,zeta,frac,xi,emax,dist)
{
S1=S0^delta
tau=ta+tf
if (dist=="WB"){
f=function(a,b,u){dweibull(u,a,b)}
scale1=x/(-log(S1))^(1/shape)
}
if (dist=="LN"){
f=function(a,b,u){dlnorm(u,b,a)}
scale1=log(x)-shape*qnorm(1-S1)
}
if (dist=="LG"){
f=function(a,b,u){(a/b)*(u/b)^(a-1)/(1+(u/b)^a)^2}
scale1=x/(1/S1-1)^(1/shape)
}
if (dist=="GM"){
s=function(a,b,u){1-pgamma(u,a,b)}
f=function(a,b,u){dgamma(u,a,b)}
root1=function(t){s(shape,t,x)-S1}
scale1=uniroot(root1,c(0,10))$root
}
G=function(u){1-punif(u, tf, tau)}
g=function(u){f(shape,scale1,u)*G(u)}
p=integrate(g, 0, tau)$value
b=2*a/(1+delta)
zeta=eta*(1-xi)
for (i in 1:emax){
ff=function(k){
eta-pgamma(1,shape=a+i,rate=b+k)
}
k=uniroot(ff,c(0, 999))$root
m=i; k=round(k,2)
exp=pgamma(delta,shape=a+i,rate=b+k)
if (exp<=1-zeta) break
}
m1=ceiling(m*frac)
zeta1=eta*(1-xi*frac)
f1=function(k){
zeta1-(1-pgamma(delta,shape=a+m1,rate=b+k))
}
k1=uniroot(f1,c(0, 999))$root
n1=ceiling(m1/p)
n=ceiling(m/p)
ans=round(c(a, frac, eta, zeta1, zeta, m1,n1,k1,m,n,k),3)
return(ans)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDesign/R/B2Size.R
|
BSize=function(shape,S0,x,ta,tf,a,delta,eta,zeta,emax,dist)
{
S1=S0^delta
tau=ta+tf
if (dist=="WB"){
f=function(a,b,u){dweibull(u,a,b)}
scale1=x/(-log(S1))^(1/shape)
}
if (dist=="LN"){
f=function(a,b,u){dlnorm(u,b,a)}
scale1=log(x)-shape*qnorm(1-S1)
}
if (dist=="LG"){
f=function(a,b,u){(a/b)*(u/b)^(a-1)/(1+(u/b)^a)^2}
scale1=x/(1/S1-1)^(1/shape)
}
if (dist=="GM"){
s=function(a,b,u){1-pgamma(u,a,b)}
f=function(a,b,u){dgamma(u,a,b)}
root1=function(t){s(shape,t,x)-S1}
scale1=uniroot(root1,c(0,10))$root
}
G=function(u){1-punif(u, tf, tau)}
g=function(u){f(shape,scale1,u)*G(u)}
p=integrate(g, 0, tau)$value
b=2*a/(1+delta)
for (s in 1:emax){
ff=function(k){
pgamma(1,shape=a+s,rate=b+k)-eta
}
k=uniroot(ff,c(0,100))$root
m=s; k=round(k,2)
exp=pgamma(delta,shape=a+s,rate=b+k)
if (exp<=1-zeta) break
}
n=ceiling(m/p)
return(c(eta,zeta,m,n,k))
}
BSize(shape=1,S0=0.53,x=3,ta=4,tf=2,a=2,delta=0.6,eta=0.95,
zeta=0.85,emax=100,dist="WB")
|
/scratch/gouwar.j/cran-all/cranData/BayesDesign/R/BSize.R
|
optimal_OneStage <- function(alphacutoff, powercutoff, S0,
x, ta, tf, a = 2, delta, ntrial,
complete = "partial", seed = 8232){
shape <- 1
dist <- "WB"
eta <- seq(0.8, 0.95, by = 0.05)
zeta <- seq(0.8, 0.9, by = 0.01)
oc_mat <- NULL
for (i in 1:length(eta)){
for (j in 1:length(zeta)){
bsize <- BSize(shape=shape,S0=S0,x=x,ta=ta,tf=tf,a = a,delta=delta,
eta=eta[i],zeta=zeta[j],emax=100,dist = dist)
h0 <- pow_OneStage(S0=S0,x=x,delta=1,ta=ta,tf=tf,m1=bsize[3],k1=bsize[5],
N=500,M=ntrial, seed = seed)
h1 <- pow_OneStage(S0=S0,x=x,delta=delta,ta=ta,tf=tf,m1=bsize[3],k1=bsize[5],
N=500,M=ntrial, seed = seed)
## type I error
typeI <- h0[1]
## power
power <- h1[1]
if((typeI <= alphacutoff) & (powercutoff <= power)){
oc_mat <- rbind(oc_mat, c(eta[i], zeta[j], bsize[3], bsize[4], bsize[5],
typeI, power, h1[2], h0[2]))
}
}
}
if (complete == "partial"){
oc_df <- data.frame(oc_mat)[, -c(1, 2)]
colnames(oc_df) <- c("m", "n", "k", "typeI", "power", "ES1", "ES0")
out <- oc_df[oc_df$n == min(oc_df$n), ]
}
else if (complete == "complete"){
oc_df <- data.frame(oc_mat)
colnames(oc_df) <- c("eta", "zeta", "m", "n", "k", "typeI", "power", "ES1", "ES0")
out <- oc_df[oc_df$n == min(oc_df$n), ]
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDesign/R/optimal_OneStage.R
|
optimal_TwoStage <- function(alphacutoff, powercutoff, S0, x, ta,
tf, a = 2, delta, frac = .5, ntrial,
complete = "partial", seed = 8232){
shape <- 1
dist <- "WB"
eta <- seq(0.8, 0.95, by = 0.05)
xi <- seq(0.01, 0.15, by = 0.01)
oc_mat <- NULL
for (i in 1:length(eta)){
for (j in 1:length(xi)){
bsize <- B2Size(shape=shape,S0=S0,x=x,ta=ta,tf=tf,a = a,delta=delta,
eta=eta[i],frac=frac,xi=xi[j],emax=100,dist = dist)
h0 <- pow_TwoStage(S0=S0,x=x,delta=1,ta=ta,tf=tf,m1=bsize[6],m2=bsize[9],
k1=bsize[8],k2=bsize[11],N=500,M=ntrial, seed = seed)
h1 <- pow_TwoStage(S0=S0,x=x,delta=delta,ta=ta,tf=tf,m1=bsize[6],m2=bsize[9],
k1=bsize[8],k2=bsize[11],N=500,M=ntrial, seed = seed)
## type I error
typeI <- h0[1]
## power
power <- h1[1]
if((typeI <= alphacutoff) & (powercutoff <= power)){
oc_mat <- rbind(oc_mat, c(eta[i], xi[j], bsize[6], bsize[7], bsize[8],
bsize[9], bsize[10], bsize[11], typeI, power,
h1[2], h1[3], h0[2], h0[3]))
}
}
}
if (complete == "partial"){
oc_df <- data.frame(oc_mat)[, -c(1, 2)]
colnames(oc_df) <- c("m1", "n1", "k1", "m", "n", "k", "typeI", "power", "PET1", "ES1", "PET0", "ES0")
out <- oc_df[oc_df$n == min(oc_df$n), ]
}
else if (complete == "complete"){
oc_df <- data.frame(oc_mat)
colnames(oc_df) <- c("eta", "xi", "m1", "n1", "k1", "m", "n", "k", "typeI", "power", "PET1", "ES1", "PET0", "ES0")
out <- oc_df[oc_df$n == min(oc_df$n), ]
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDesign/R/optimal_TwoStage.R
|
pow_OneStage=function(S0,x,delta,ta,tf,m1,k1,N,M,seed)
{
lambda0=-log(S0)/x
lambda=delta*lambda0
s0=function(t){exp(-lambda0*t)}
tau=ta+tf
set.seed(seed)
s=0
pts <- 0
for (j in 1:M){
time=rexp(N, rate=lambda)
A=runif(N, 0, ta)
data=data.frame(Entry=A, time=time)
data0=data[order(data$Entry),]
time0=data0$time
A0=data0$Entry
cens=as.numeric(time0<tau-A0)
x=pmin(time0,tau-A0)
d = cumsum(cens)
s1 = min(which(d == m1))
x1=x[1:s1]
cens1=cens[1:s1]
w1=-log(s0(x1))
U1=sum(w1)
if (U1>k1) {
s=s+1
pts <- pts + s1
}
else{
pts <- pts + s1
}
}
pow=round(s/M,3)
pts_rate <- round(pts/M, 3)
return(c(pow, pts_rate))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDesign/R/pow_OneStage.R
|
pow_TwoStage=function(S0,x,delta,ta,tf,m1,m2,k1,k2,N,M,seed)
{
lambda0=-log(S0)/x
lambda=delta*lambda0
s0=function(t){exp(-lambda0*t)}
tau=ta+tf
set.seed(seed)
s=0
earlystopping <- 0
pts <- 0
for (j in 1:M){
time=rexp(N, rate=lambda)
A=runif(N, 0, ta)
data=data.frame(Entry=A, time=time)
data0=data[order(data$Entry),]
time0=data0$time
A0=data0$Entry
cens=as.numeric(time0<tau-A0)
x=pmin(time0,tau-A0)
d = cumsum(cens)
s1 = min(which(d == m1))
s2 = min(which(d == m2))
x1=x[1:s1]
cens1=cens[1:s1]
w1=-log(s0(x1))
U1=sum(w1)
x2=x[1:s2]
w2=-log(s0(x2))
U2=sum(w2)
if (U1>k1 & U2>k2) {
s=s+1
pts <- pts + s2
}
else if (U1 <= k1) {
earlystopping <- earlystopping + 1
pts <- pts + s1
}
else {
pts <- pts + s2
}
}
pow=round(s/M,3)
es_rate <- round(earlystopping/M, 3)
pts_rate <- round(pts/M, 3)
return(c(pow, es_rate, pts_rate))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDesign/R/pow_TwoStage.R
|
tot_time <- function(obs_time, S0, x){
lambda0 <- -log(S0)/x
s0 <- function(t){exp(-lambda0*t)}
w <- -log(s0(obs_time))
U <- sum(w)
return(U)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDesign/R/tot_time.R
|
#' Bayesian Multivariate Normal Model for Dissolution Profile Modeling
#'
#' This function implements the Bayesian multivariate normal model described in Pourmohamad et al (2022).
#'
#' @param dis_data A data frame containing the dissolution data. The first column of the data frame should denote
#' the group labels identifying whether a given dissolution belongs to the "reference" or "test" formulation group.
#' For a given dissolution run, the remaining columns of the data frame contains the individual run's dissolution
#' measurements sorted in time. Alternatively, the user may provide a data object of class dis_data containing the
#' dissolution data. See the \code{make_dis_data()} function for the particular structure of the data object.
#' @param B A positive integer specifying the number of posterior samples to draw. By default \code{B} is set to 10000.
#' @return The function returns a list of B posterior samples for the following parameters:
#' \itemize{
#' \item delta: A vector of posterior samples of delta as defined in Novick et. al 2015
#' \item f2: A vector of posterior values of f2
#' \item muR: A matrix of posterior samples for the reference group mean. Each row of the matrix corresponds to an observed time point, and each column of the matrix corresponds to a posterior sample.
#' \item muT: A matrix of posterior samples for the test group mean. Each row of the matrix corresponds to an observed time point, and each column of the matrix corresponds to a posterior sample.
#' }
#' @note You should always check MCMC diagnostics on the posterior samples before drawing conclusions.
#' @references Novick, S., Shen, Y., Yang, H., Peterson, J., LeBlond, D., and Altan, S. (2015). Dissolution Curve Comparisons Through the F2 Parameter, a Bayesian Extension of the f2 Statistic. Journal of Biopharmaceutical Statistics, 25(2):351-371.
#' @references Pourmohamad, T., Oliva Aviles, C.M., and Richardson, R. Gaussian Process Modeling for Dissolution Curve Comparisons. Journal of the Royal Statistical Society, Series C, 71(2):331-351.
#' @examples
#' ### dis_data comes loaded with the package
#' ### We set B = 1000 to obtain 1000 posterior samples
#' B <- 1000
#' post <- bmn(dis_data, B = B)
#'
#' ### We can check how well the posterior samples of the means are mixing by
#' ### plotting the individual chains by time point
#' burnin <- B * 0.1 # A 10% burn-in
#' post$mu_R <- post$muR[,-(1:burnin)]
#' post$mu_T <- post$muT[,-(1:burnin)]
#'
#' N <- B - burnin # Number of posterior samples after burn-in
#' chains <- data.frame(samples = rep(c(1:N, 1:N), each = ncol(dis_data) - 1),
#' group = rep(c("muR", "muT"), each = (ncol(dis_data) - 1) * N),
#' timepoint = paste("Time Point", rep(1:(ncol(dis_data) - 1), 2 * N)),
#' values = c(c(post$mu_R), c(post$mu_T)))
#'
#' g <- ggplot2::ggplot(chains, ggplot2::aes(samples, values)) +
#' ggplot2::geom_line() +
#' ggplot2::labs(x = "Iterations", y = "Posterior Sample Values") +
#' ggplot2::facet_wrap(group ~ timepoint) +
#' ggplot2::theme(text = ggplot2::element_text(size = 16))
#'
#' ### If we want to calculate the Pr(f2 > 50)
#' post$f2<- post$f2[-(1:burnin)]
#' prob <- sum(post$f2 > 50) / (B - burnin)
#'
#' ### Or if we want calculate a 95% credible interval for f2
#' alpha <- 0.05
#' f2_cred <- c(quantile(post$f2, alpha / 2),quantile(post$f2, 1 - alpha / 2))
#'
#' @export
bmn <- function(dis_data, B = 10000){
if(class(dis_data)[1] == "dis_data"){
dis_data <- data.frame(rbind(data.frame(group = "Reference", dis_data$yRef), data.frame(group = "Test", dis_data$yTest)))
}
if(B <= 0 | !is.numeric(B)){
stop("B must be a positive integer.")
}else if(!is.data.frame(dis_data)){
stop("The dissolution data must be stored in a data frame.")
}else if(length(unique(dis_data[,1])) != 2){
stop("The dissolution data must contain 2 groups.")
}else if(ncol(dis_data) < 3){
stop("The dissolution data must contain at least 2 time points (but you probably intended for more than that).")
}else{
X <- cbind(2-(dis_data[,1] == dis_data[1,1]), dis_data[-1])
names(X)[1] <- "Group"
dat_R <- X[X$Group == 1,-1]
dat_T <- X[X$Group == 2,-1]
nlocs <- ncol(dat_R)
nreps <- nrow(dat_R)
Ybar_R <- apply(dat_R, 2, mean)
Ybar_T <- apply(dat_T, 2, mean)
S2_R <- stats::cov(dat_R) * (nreps - 1)
S2_T <- stats::cov(dat_T) * (nreps - 1)
nrun <- B
f2 <- rep(NA, nrun)
delta <- rep(NA, nrun)
muR <- matrix(NA, nrow = nlocs, ncol = B)
muT <- matrix(NA, nrow = nlocs, ncol = B)
for(i in 1:nrun){
V_R <- MCMCpack::riwish(nreps - 1, S2_R)
mu_R <- mnormt::rmnorm(1, Ybar_R, V_R / nreps)
V_T <- MCMCpack::riwish(nreps - 1, S2_T)
mu_T <- mnormt::rmnorm(1, Ybar_T, V_T / nreps)
delta[i] <- max(abs(mu_R - mu_T))
f2[i] <- 50 * log10(1 / sqrt(1 + (1 / length(mu_R)) * sum((mu_R - mu_T)^2)) * 100)
muR[,i] <- mu_R
muT[,i] <- mu_T
}
return(list(delta = delta, f2 = f2, muR = muR, muT = muT))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/R/bmn.R
|
#' A dissolution data set taken from Ocana et al. (2009).
#'
#' A dissolution data set that consists of dissolution measurements
#' taken on oral tablets made with metoclopramide hydrochloride. Of interest is
#' to test the similarity of metoclopramide hydrochloride tablets made with and without the
#' ingredient tensioactive.
#'
#' @format A data frame with 24 rows and 9 columns:
#' \describe{
#' \item{group}{An indicator of whether the dissolution run came from the reference or test group}
#' \item{X1}{The first time point at which measurements are made at.}
#' \item{X2}{The second time point at which measurements are made at.}
#' \item{X3}{The third time point at which measurements are made at.}
#' \item{X4}{The fourth time point at which measurements are made at.}
#' \item{X5}{The fifth time point at which measurements are made at.}
#' \item{X6}{The sixth time point at which measurements are made at.}
#' \item{X7}{The seventh time point at which measurements are made at.}
#' \item{X8}{The eight time point at which measurements are made at.}
#' ...
#' }
#' @source Ocana et al. (2009) <doi:10.1016/j.chemolab.2009.07.010>
"dis_data"
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/R/data.R
|
#' Dissolution Data Plot
#'
#' This function plots dissolution data sets.
#'
#' @param dis_data A data frame containing the dissolution data. The first column of the data frame should denote
#' the group labels identifying whether a given dissolution belongs to the "reference" or "test" formulation group.
#' For a given dissolution run, the remaining columns of the data frame contains the individual run's dissolution
#' measurements sorted in time. Alternatively, the user may provide a data object of class dis_data containing the
#' dissolution data. See the \code{make_dis_data()} function for the particular structure of the data object.
#' @param tp An optional vector of time points at which the dissolution data is measured at.
#' @param pch A vector of two elements specifying the plotting character to use for each group. If only one value is passed then the plotting character is the same for both groups.
#' @param color A vector of two elements specifying the color in the plot to associate with each group. If only one value is passed then the color choice is the same for both groups.
#' @param groups A vector of two elements specifying the name to use for each group in the plot.
#' @param legend_location A string that denotes the location of where the legend should appear. Possible options are "left", "top", "bottom", "right", and any logical combination of the four, e.g., "bottomright" or "topleft".
#' @param xlab A string specifying the x-axis label.
#' @param ylab A string specifying the y-axis label.
#' @param mean logical; if \code{TRUE}, plot the connected mean dissolution values for each group
#' @param var logical; if \code{TRUE}, calculate the variance of the dissolution data at each time point for each group. The values are placed at the top of the plot over the corresponding time point.
#' @param var_label logical; if \code{TRUE}, use the group labels when printing out the variances.
#' @param ... other graphical parameters commonly found in \link[graphics]{plot.default}
#' @return The function returns a plot of the dissolution data.
#' @examples
#' ### dis_data comes loaded with the package
#' dissplot(dis_data)
#'
#' @importFrom graphics legend lines matplot mtext par
#' @export
dissplot <- function(dis_data, tp = NULL, pch = c(19, 17), color = c("gray65", "black"), groups = c("Reference", "Test"), legend_location = "bottomright",
xlab = "Time Points", ylab = "Percentage Dissolved", mean = FALSE, var = FALSE, var_label = TRUE, ...){
if(class(dis_data)[1] == "dis_data"){
dis_data <- data.frame(rbind(data.frame(group = "Reference", dis_data$yRef), data.frame(group = "Test", dis_data$yTest)))
}
if(!is.data.frame(dis_data)){
stop("The dissolution data must be stored in a data frame.")
}else if(length(unique(dis_data[,1])) != 2){
stop("The dissolution data must contain 2 groups.")
}else if(ncol(dis_data) < 3){
stop("The dissolution data must contain at least 2 time points (but you probably intended for more than that).")
}else{
X <- cbind(2-(dis_data[,1] == dis_data[1,1]), dis_data[-1])
names(X)[1] <- "Group"
dat_R <- X[X$Group == 1,-1]
dat_T <- X[X$Group == 2,-1]
nlocs <- ncol(dat_R)
nreps <- nrow(dat_R)
Ybar_R <- apply(dat_R, 2, mean)
Ybar_T <- apply(dat_T, 2, mean)
s2_R <- round(apply(dat_R, 2, var), 2)
s2_T <- round(apply(dat_T, 2, var), 2)
if(is.null(tp)){
tp <- 1:nlocs
}
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(ps=15)
matplot(tp, t(X[,-1]), pch = rep(pch, each = nreps),
col = rep(color, each = nreps),
xlab = xlab, ylab = ylab, ...)
if(isTRUE(mean)){
lines(tp, Ybar_R, col = color[1], lty = 1)
lines(tp, Ybar_T, col = color[2], lty = 2)
}
if(isTRUE(var)){
if(isTRUE(var_label)){
mtext(c(groups[1], as.character(s2_R)), at = c(0.5, tp), side = 3, line = 1, col = color[1], cex = .75)
mtext(c(groups[2], as.character(s2_T)), at = c(0.5, tp), side = 3, line = 2, col = color[2], cex = .75)
}else{
mtext(as.character(s2_R), at = tp, side = 3, line = 1, col = color[1], cex = .75)
mtext(as.character(s2_T), at = tp, side = 3, line = 2, col = color[2], cex = .75)
}
}
legend(legend_location, groups, pch = pch,col = color, bty = "n")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/R/dissplot.R
|
#' Calculation of a Bayesian 100*prob% credible interval for the F2 parameter
#'
#' This function calculates a 100*prob% credible interval for the F2 parameter using Bayesian methods. The model assumes a
#' version of the Jerffreys' prior with a pooled variance-covariance matrix from based on the reference and test data sets.
#' See Novick (2015) for more details of the model.
#'
#' @param dis_data A data frame containing the dissolution data. The first column of the data frame should denote
#' the group labels identifying whether a given dissolution belongs to the "reference" or "test" formulation group.
#' For a given dissolution run, the remaining columns of the data frame contains the individual run's dissolution
#' measurements sorted in time. Alternatively, the user may provide a data object of class dis_data containing the
#' dissolution data. See the \code{make_dis_data()} function for the particular structure of the data object.
#' @param prob The probability associated with the credible interval. A value between 0 and 1.
#' @param B A positive integer specifying the number of Monte Carlo samples.
#' @param ci.type The type of credible interval to report. Specifying \code{quantile} returns a credible interval based on the posterior sample quantiles of the F2 distribution. Specifying \code{HPD} returns a highest posterior density interval.
#' @param get.dist logical; if \code{TRUE}, returns the posterior samples of the F2 distribution.
#' @return The function returns a 100*prob% credible interval for the F2 parameter calculated from the observed dissolution data.
#' @note Use the \code{plotdiss()} or \code{ggplotdiss()} function to visually check if it's appropriate to calculate the f2 statistic.
#' @references Novick, S., Shen, Y., Yang, H., Peterson, J., LeBlond, D., and Altan, S. (2015). Dissolution Curve Comparisons Through the F2 Parameter, a Bayesian Extension of the f2 Statistic. Journal of Biopharmaceutical Statistics, 25(2):351-371.
#' @references Pourmohamad, T., Oliva Aviles, C.M., and Richardson, R. Gaussian Process Modeling for Dissolution Curve Comparisons. Journal of the Royal Statistical Society, Series C, 71(2):331-351.
#' @examples
#' ### dis_data comes loaded with the package
#' f2bayes(dis_data, prob = 0.9, B = 1000)
#'
#' @export
f2bayes <- function(dis_data, prob = 0.9, B = 1000, ci.type = c("quantile", "HPD"), get.dist = FALSE){
## Bayesian version with Jeffreys' prior (pooled variance-covariance)
if(is.data.frame(dis_data)){
X <- cbind(2-(dis_data[,1] == dis_data[1,1]), dis_data[-1])
names(X)[1] <- "Group"
yRef <- X[X$Group == 1,-1]
yTest <- X[X$Group == 2,-1]
dis_data <- make_dis_data(yRef, yTest)
}
if(class(dis_data)[1] != "dis_data"){
stop("Input must be of class 'dis_data'. See function make_dis_data()")
}else if(B <= 0 | !is.numeric(B)){
stop("B must be a positive integer.")
}else if(prob <= 0 | prob >= 1 | !is.numeric(prob)){
stop("prob must be a value between 0 and 1.")
}else{
K <- B
V1 <- var(dis_data$yRef)
V2 <- var(dis_data$yTest)
Vhat <- 0.5 * (V1 + V2) ## Pooled variance estimator
degFree <- 2 * (dis_data$nTab - 1)
Scale.hat <- solve(degFree * Vhat)
Omega.post <- stats::rWishart(K, df = degFree, Sigma = Scale.hat )
V.post <- lapply(1:K, function(k){ solve(Omega.post[,,k]) })
Vhalf.post <- lapply(1:K, function(k){ solve(chol(Omega.post[,,k])) })
muT.post <- matrix(rnorm(K * dis_data$nTime, sd = sqrt(1 / dis_data$nTab)), K, dis_data$nTime)
muR.post <- matrix(rnorm(K * dis_data$nTime, sd=sqrt(1/dis_data$nTab)), K, dis_data$nTime)
for (k in 1:K){
muR.post[k,] <- dis_data$ybarR + Vhalf.post[[k]] %*% muR.post[k,]
muT.post[k,] <- dis_data$ybarT + Vhalf.post[[k]] %*% muT.post[k,]
}
D.post <- rowMeans(sapply(1:dis_data$nTime, function(j){ (muT.post[,j] - muR.post[,j])^2 }))
f2.post <- 100 - 25*log10(1 + D.post)
out <- process_results("bayes", f2.post, ci.type, prob, get.dist)
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/R/f2bayes.R
|
#' Calculation of a biased-corrected and accepted 100*level% confidence interval for the F2 parameter
#'
#' This function calculates a 100*level% confidence interval for the F2 parameter using biased-correctd and accelerated (BCa) boostrap
#'
#' @param dis_data A data frame containing the dissolution data. The first column of the data frame should denote
#' the group labels identifying whether a given dissolution belongs to the "reference" or "test" formulation group.
#' For a given dissolution run, the remaining columns of the data frame contains the individual run's dissolution
#' measurements sorted in time. Alternatively, the user may provide a data object of class dis_data containing the
#' dissolution data. See the \code{make_dis_data()} function for the particular structure of the data object.
#' @param level The confidence level. A value between 0 and 1.
#' @param B A positive integer specifying the number of bootstrap samples.
#' @param ci.type The type of confidence interval to report. Specifying \code{quantile} returns a bootstrap confidence interval based on the sample quantiles. Specifying \code{HPD} returns a highest density region interval.
#' @param get.dist logical; if \code{TRUE}, returns the posterior samples of the F2 distribution.
#' @return The function returns a 100*level% confidence interval for the F2 parameter calculated from the observed dissolution data.
#' @note Use the \code{plotdiss()} or \code{ggplotdiss()} function to visually check if it's appropriate to calculate the f2 statistic.
#' @references Liu, S. and Cai, X. and Shen, M. and Tsong, Y. (2023). In vitro dissolution profile comparison using bootstrap
#' bias corrected similarity factor, f2. Journal of Biopharmaceutical Statistics, 34(1):78-89.
#' @examples
#' ### dis_data comes loaded with the package
#' f2bca(dis_data, level = 0.9, B = 1000)
#'
#' @export
f2bca <- function (dis_data, level = 0.9, B = 1000, ci.type = c("quantile", "HPD"), get.dist = FALSE){
if(is.data.frame(dis_data)){
X <- cbind(2-(dis_data[,1] == dis_data[1,1]), dis_data[-1])
names(X)[1] <- "Group"
yRef <- X[X$Group == 1,-1]
yTest <- X[X$Group == 2,-1]
dis_data <- make_dis_data(yRef, yTest)
}
if(class(dis_data)[1] != "dis_data"){
stop("Input must be of class 'dis_data'. See function make_dis_data()")
}else if(B <= 0 | !is.numeric(B)){
stop("B must be a positive integer.")
}else if(level <= 0 | level >= 1 | !is.numeric(level)){
stop("level must be a value between 0 and 1.")
}else{
f2.bs <- rep(NA, B)
for (k in 1:B) {
y1 <- dis_data$yRef[sample(1:dis_data$nTab, replace = TRUE),]
y2 <- dis_data$yTest[sample(1:dis_data$nTab, replace = TRUE),]
ybar1 <- colMeans(y1)
ybar2 <- colMeans(y2)
D <- mean((ybar1 - ybar2)^2)
f2.bs[k] <- 100 - 25 * log10(1 + D)
}
f2.est <- f2calc(dis_data)
## BCA confidence interval for f2
## (from https://cran.r-project.org/web/packages/bootf2/vignettes/bootf2.html)
z0.hat <- stats::qnorm(mean(f2.bs < f2.est))
## Leave-one-out (LOO) jackknife procedure, using the "n1+n2" method, in which, out of
## n test and n reference tablets, we LOO for each of the 2n tablets, one at a time.
ysumR <- dis_data$ybarR * dis_data$nTab
ysumT <- dis_data$ybarT * dis_data$nTab
f2.jk <- sapply( 1:dis_data$nTab, function(j){
ybarR.j <- (ysumR - dis_data$yRef[-j,]) / (dis_data$nTab-1)
ybarT.j <- (ysumT - dis_data$yTest[-j,]) / (dis_data$nTab-1)
D <- mean((ybarR.j - ybarT.j)^2)
f2 <- 100 - 25 * log10(1 + D)
return(f2)
})
f2.jk.resid <- mean(f2.jk) - f2.jk
alpha.hat <- sum( f2.jk.resid^3 ) / (6 * sum(f2.jk.resid^2)^1.5)
alpha <- 0.5 * (1 - level)
alpha1 <- stats::pnorm(z0.hat + (z0.hat + stats::qnorm(alpha)) / (1 - alpha.hat * (z0.hat + stats::qnorm(alpha))))
alpha2 <- stats::pnorm(z0.hat + (z0.hat + stats::qnorm(1 - alpha)) / (1 - alpha.hat * (z0.hat + stats::qnorm(1 - alpha))))
out <- process_results(name = "bca", f2.dist = f2.bs, ci.type = ci.type, level = c(alpha1, alpha2), get.dist = get.dist)
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/R/f2bca.R
|
#' Calculation of a wild bootstrap 100*level% confidence interval for the F2 parameter
#'
#' This function calculates a 100*level% confidence interval for the F2 parameter using wild bootstrap
#'
#' @param dis_data A data frame containing the dissolution data. The first column of the data frame should denote
#' the group labels identifying whether a given dissolution belongs to the "reference" or "test" formulation group.
#' For a given dissolution run, the remaining columns of the data frame contains the individual run's dissolution
#' measurements sorted in time. Alternatively, the user may provide a data object of class dis_data containing the
#' dissolution data. See the \code{make_dis_data()} function for the particular structure of the data object.
#' @param level The confidence level. A value between 0 and 1.
#' @param B A positive integer specifying the number of bootstrap samples.
#' @param ci.type The type of confidence interval to report. Specifying \code{quantile} returns a bootstrap confidence interval based on the sample quantiles. Specifying \code{HPD} returns a highest density region interval.
#' @param get.dist logical; if \code{TRUE}, returns the posterior samples of the F2 distribution.
#' @return The function returns a 100*level% confidence interval for the F2 parameter calculated from the observed dissolution data.
#' @note Use the \code{plotdiss()} or \code{ggplotdiss()} function to visually check if it's appropriate to calculate the f2 statistic.
#' @references Liu, S. and Cai, X. and Shen, M. and Tsong, Y. (2023). In vitro dissolution profile comparison using bootstrap
#' bias corrected similarity factor, f2. Journal of Biopharmaceutical Statistics, 34(1):78-89.
#' @examples
#' ### dis_data comes loaded with the package
#' f2boot(dis_data, level = 0.9, B = 1000)
#'
#' @export
f2boot <- function(dis_data, level = 0.9, B = 1000, ci.type = c("quantile", "HPD"), get.dist = FALSE){
## Wild bootstrap of f2 and bias-corrected f2
if(is.data.frame(dis_data)){
X <- cbind(2-(dis_data[,1] == dis_data[1,1]), dis_data[-1])
names(X)[1] <- "Group"
yRef <- X[X$Group == 1,-1]
yTest <- X[X$Group == 2,-1]
dis_data <- make_dis_data(yRef, yTest)
}
if(class(dis_data)[1] != "dis_data"){
stop("Input must be of class 'dis_data'. See function make_dis_data()")
}else if(B <= 0 | !is.numeric(B)){
stop("B must be a positive integer.")
}else if(level <= 0 | level >= 1 | !is.numeric(level)){
stop("level must be a value between 0 and 1.")
}else{
K <- B
## Step 1. Resample from the population by sampling with replacement from the nTab tablets of each group
f2.bs <- rep(NA, K)
for (k in 1:K){
y1 <- dis_data$yRef[sample(1:dis_data$nTab, replace = TRUE),]
y2 <- dis_data$yTest[sample(1:dis_data$nTab, replace = TRUE),]
ybar1 <- colMeans(y1)
ybar2 <- colMeans(y2)
D <- mean((ybar1 - ybar2)^2)
f2.bs[k] <- 100 - 25 * log10(1 + D)
}
## Step 2. Get 100*level% CI for f2
out <- process_results(name = "bs", f2.dist = f2.bs, ci.type = ci.type, level = level, get.dist = get.dist)
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/R/f2boot.R
|
#' Calculation of the f2 Statistic
#'
#' This function calculates the f2 statistic as described in Moore and Flanner (1996).
#'
#' @param dis_data A data frame containing the dissolution data. The first column of the data frame should denote
#' the group labels identifying whether a given dissolution belongs to the "reference" or "test" formulation group.
#' For a given dissolution run, the remaining columns of the data frame contains the individual run's dissolution
#' measurements sorted in time. Alternatively, the user may provide a data object of class dis_data containing the
#' dissolution data. See the \code{make_dis_data()} function for the particular structure of the data object.
#' @return The function returns the f2 statistic calculated from the observed dissolution data.
#' @note Use the \code{plotdiss()} or \code{ggplotdiss()} function to visually check if it's appropriate to calculate the f2 statistic.
#' @references Moore, J.W. and Flanner, H.H. (1996). Mathematical comparison of distribution profiles. Pharmaceutical Technology, 20(6):64-74.
#' @examples
#' ### dis_data comes loaded with the package
#' f2calc(dis_data)
#'
#' @export
f2calc <- function(dis_data){
if(is.data.frame(dis_data)){
X <- cbind(2-(dis_data[,1] == dis_data[1,1]), dis_data[-1])
names(X)[1] <- "Group"
yRef <- X[X$Group == 1,-1]
yTest <- X[X$Group == 2,-1]
dis_data <- make_dis_data(yRef, yTest)
}
if(class(dis_data)[1] != "dis_data"){
stop("Input must be of class 'dis_data'. See function make_dis_data()")
}else{
D <- mean((dis_data$ybarR - dis_data$ybarT)^2)
f2 <- 100 - 25*log10(1 + D)
return(f2)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/R/f2calc.R
|
#' Calculation of a generalized pivotal quantity 100*level% confidence interval for the F2 parameter
#'
#' This function calculates a 100*level% confidence interval for the F2 parameter using generalized pivotal quantity methods based on a two variance component model with means for Time x Group, i.e., Dissolution ~ Time x Group + (1|Tablet:Group).
#'
#' @param dis_data A data frame containing the dissolution data. The first column of the data frame should denote
#' the group labels identifying whether a given dissolution belongs to the "reference" or "test" formulation group.
#' For a given dissolution run, the remaining columns of the data frame contains the individual run's dissolution
#' measurements sorted in time. Alternatively, the user may provide a data object of class dis_data containing the
#' dissolution data. See the \code{make_dis_data()} function for the particular structure of the data object.
#' @param level The confidence level. A value between 0 and 1.
#' @param B The number of generalized pivotal quantity samples.
#' @param ci.type The type of confidence interval to report. Specifying \code{quantile} returns a bootstrap confidence interval based on the sample quantiles. Specifying \code{HPD} returns a highest density region interval.
#' @param get.dist logical; if \code{TRUE}, returns the posterior samples of the F2 distribution.
#' @return The function returns a 100*level% confidence interval for the F2 parameter calculated from the observed dissolution data.
#' @note Use the \code{plotdiss()} or \code{ggplotdiss()} function to visually check if it's appropriate to calculate the f2 statistic.
#' @examples
#' ### dis_data comes loaded with the package
#' f2gpq(dis_data, level = 0.9, B = 10000)
#'
#' @export
f2gpq <- function(dis_data, level = 0.9, B = 10000, ci.type = c("quantile", "HPD"), get.dist = FALSE){
## Generalized pivotal quantity analysis for 2 variance components problem
## Y[ijk] = theta[ij] + T[k(i)] + eps[ijk], T[k(i)] ~ N( 0, sigmaSqT ), eps[ijk] ~ N( 0, sigmaSqE )
## i = 1,2 (groups); j=1, ..., P (time points), k = 1, 2, .., n (tablets). Data must be balanced for this function!
## Wild bootstrap of f2 and bias-corrected f2
if(is.data.frame(dis_data)){
X <- cbind(2-(dis_data[,1] == dis_data[1,1]), dis_data[-1])
names(X)[1] <- "Group"
yRef <- X[X$Group == 1,-1]
yTest <- X[X$Group == 2,-1]
dis_data <- make_dis_data(yRef, yTest)
}
if(class(dis_data)[1] != "dis_data"){
stop("Input must be of class 'dis_data'. See function make_dis_data()")
}else if(B <= 0 | !is.numeric(B)){
stop("B must be a positive integer.")
}else if(level <= 0 | level >= 1 | !is.numeric(level)){
stop("level must be a value between 0 and 1.")
}else{
ybar_Diff <- dis_data$ybarR - dis_data$ybarT
Z <- matrix(rnorm(B * dis_data$nTime), B, dis_data$nTime)
U1 <- stats::rchisq(B, dis_data$degFreeGroupTab)
U2 <- stats::rchisq(B, dis_data$degFreeRes)
sigmaSqE <- dis_data$ssRes / U2
sigmaSqT <- (dis_data$ssGroupTab / U1 - sigmaSqE) / dis_data$nTime
sigmaSqT[sigmaSqT < 0] <- 0
sigmaSqTot <- sigmaSqT + sigmaSqE
muDiff <- matrix(NA, B, dis_data$nTime)
for (j in 1:B){
V <- matrix(sigmaSqT[j], dis_data$nTime, dis_data$nTime)
diag(V) <- sigmaSqTot[j]
Vchol <- chol(V)
muDiff[j,] <- ybar_Diff - sqrt(2 / dis_data$nTab) * t(Vchol) %*% Z[j,]
}
D_gpq <- rowMeans(muDiff^2)
f2.gpq <- 100 - 25 * log10(1 + D_gpq)
out <- process_results("gpq", f2.gpq, ci.type, level, get.dist)
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/R/f2gpq.R
|
#' Calculation of a parametric bootstrap 100*level% confidence interval for the F2 parameter
#'
#' This function calculates a 100*level% confidence interval for the F2 parameter using a parametric bootstrap based on a two variance component model with means for Time x Group, i.e., Dissolution ~ Time x Group + (1|Tablet:Group).
#'
#' @param dis_data A data frame containing the dissolution data. The first column of the data frame should denote
#' the group labels identifying whether a given dissolution belongs to the "reference" or "test" formulation group.
#' For a given dissolution run, the remaining columns of the data frame contains the individual run's dissolution
#' measurements sorted in time. Alternatively, the user may provide a data object of class dis_data containing the
#' dissolution data. See the \code{make_dis_data()} function for the particular structure of the data object.
#' @param level The confidence level. A value between 0 and 1.
#' @param B A positive integer specifying the number of bootstrap samples.
#' @param ci.type The type of confidence interval to report. Specifying \code{quantile} returns a bootstrap confidence interval based on the sample quantiles. Specifying \code{HPD} returns a highest density region interval.
#' @param get.dist logical; if \code{TRUE}, returns the posterior samples of the F2 distribution.
#' @return The function returns a 100*level% confidence interval for the F2 parameter calculated from the observed dissolution data.
#' @note Use the \code{plotdiss()} or \code{ggplotdiss()} function to visually check if it's appropriate to calculate the f2 statistic.
#' @examples
#' ### dis_data comes loaded with the package
#' f2pbs(dis_data, level = 0.9, B = 1000)
#'
#' @export
f2pbs <- function(dis_data, level = 0.9, B = 1000, ci.type = c("quantile", "HPD"), get.dist = FALSE){
if(is.data.frame(dis_data)){
X <- cbind(2-(dis_data[,1] == dis_data[1,1]), dis_data[-1])
names(X)[1] <- "Group"
yRef <- X[X$Group == 1,-1]
yTest <- X[X$Group == 2,-1]
dis_data <- make_dis_data(yRef, yTest)
}
if(class(dis_data)[1] != "dis_data"){
stop("Input must be of class 'dis_data'. See function make_dis_data()")
}else if(B <= 0 | !is.numeric(B)){
stop("B must be a positive integer.")
}else if(level <= 0 | level >= 1 | !is.numeric(level)){
stop("level must be a value between 0 and 1.")
}else{
K <- B
sigmaSqE <- dis_data$ssRes / dis_data$degFreeRes
sigmaSqTab <- (dis_data$ssGroupTab / dis_data$degFreeGroupTab - sigmaSqE) / dis_data$nTime
sigmaSqTab[sigmaSqTab < 0] <- 0
sigmaSqTot <- sigmaSqTab + sigmaSqE
Sigma <- matrix(sigmaSqTab, dis_data$nTime, dis_data$nTime)
diag(Sigma) <- sigmaSqTot
Sigma.diff <- 2 * Sigma / dis_data$nTab
ybarDiff.pbs <- mnormt::rmnorm(K, mean = dis_data$ybarR - dis_data$ybarT, varcov = Sigma.diff)
D.pbs <- rowMeans(ybarDiff.pbs^2)
f2.pbs <- 100 - 25 * log10(1 + D.pbs)
## Get PBS 95% CI for f2 (there are other CI methods)
out <- process_results("pbs", f2.pbs, ci.type, level, get.dist)
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/R/f2pbs.R
|
#' Dissolution Data Plot
#'
#' Minimalist ggplot function for plotting dissolution data sets.
#'
#' @param dis_data A data frame containing the dissolution data. The first column of the data frame should denote
#' the group labels identifying whether a given dissolution belongs to the "reference" or "test" formulation group.
#' For a given dissolution run, the remaining columns of the data frame contains the individual run's dissolution
#' measurements sorted in time. Alternatively, the user may provide a data object of class dis_data containing the
#' dissolution data. See the \code{make_dis_data()} function for the particular structure of the data object.
#' @param show.mean logical; if \code{TRUE}, plot the connected mean dissolution values for each group.
#' @param show.SD logical; if \code{TRUE}, calculate the variance of the dissolution data at each time point for each group. The values are placed at the top of the plot over the corresponding time point.
#' @return The function returns a plot of the dissolution data.
#' @examples
#' ### dis_data comes loaded with the package
#' ggdissplot(dis_data)
#'
#' @importFrom graphics legend lines matplot mtext par
#' @export
ggdissplot <- function(dis_data, show.mean = FALSE, show.SD = FALSE){
if(is.data.frame(dis_data)){
X <- cbind(2-(dis_data[,1] == dis_data[1,1]), dis_data[-1])
names(X)[1] <- "Group"
yRef <- X[X$Group == 1,-1]
yTest <- X[X$Group == 2,-1]
dis_data <- make_dis_data(yRef, yTest)
}
if(class(dis_data)[1] != "dis_data"){
stop("Input must be of class 'dis_data'. See function make_dis_data()")
}else{
Group <- Time <- txt.ref <- txt.test <- y <- y.ref <- y.test <- NULL ## Hack to silence notes
d <- data.frame(y = c(as.vector(unlist(dis_data$yRef)), as.vector(unlist(dis_data$yTest))),
Group = factor(rep(c("Reference", "Test"), each = dis_data$nTab * dis_data$nTime), levels = c("Reference", "Test")),
Time = factor(rep(1:dis_data$nTime, each = dis_data$nTab)),
Tablet = factor(rep(1:dis_data$nTab, dis_data$nTime))
)
p <- ggplot2::ggplot(d, ggplot2::aes(Time, y, col = Group, group = Group)) +
ggplot2::geom_jitter(height = 0, width = 0.05) +
ggplot2::xlab("Time point") +
ggplot2::ylab("Dissolution (%)") +
ggplot2::theme(legend.position = "top")
if ( isTRUE(show.mean) ){
d.mean <- data.frame(Time = factor(rep(1:dis_data$nTime, 2)),
Group = factor(rep(c("Reference", "Test"), each = dis_data$nTime)),
y = c(dis_data$ybarR, dis_data$ybarT)
)
p <- p + ggplot2::geom_line(data = d.mean, linetype="dashed")
}
if ( isTRUE(show.SD) ){
SD <- tapply(d$y, list(d$Time, d$Group), sd)
d.sd <- data.frame(Time = c(0.7, 1:dis_data$nTime),
txt.ref =c("Ref", round(SD[,1], 1)),
txt.test = c("Test", round(SD[,2], 1)),
y.ref = max(d$y) + 0.1 * diff(range(d$y)),
y.test = max(d$y) + 0.05 * diff(range(d$y))
)
p <- p + ggplot2::geom_text(data = d.sd, ggplot2::aes(Time, y.ref, label = txt.ref), inherit.aes=FALSE) +
ggplot2::geom_text(data = d.sd, ggplot2::aes(Time, y.test, label = txt.test), inherit.aes = FALSE)
}
}
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/R/ggdissplot.R
|
#' Hierarchical Gaussian Process Model for Dissolution Profile Modeling
#'
#' This function implements the Bayesian hierarchical Gaussian process model described in Pourmohamad et al (2022).
#'
#' @param dis_data A data frame containing the dissolution data. The first column of the data frame should denote
#' the group labels identifying whether a given dissolution belongs to the "reference" or "test" formulation group.
#' For a given dissolution run, the remaining columns of the data frame contains the individual run's dissolution
#' measurements sorted in time. Alternatively, the user may provide a data object of class dis_data containing the
#' dissolution data. See the \code{make_dis_data()} function for the particular structure of the data object.
#' @param B A positive integer specifying the number of posterior samples to draw. By default \code{B} is set to 10000.
#' @param locs A vector in ascending order that corresponds to each time point the dissolution data was measured at.
#' @param B A positive integer specifying the number of posterior samples to draw. By default \code{B} is set to 10000.
#' @param n_interp An integer value specifying the number of time points to interpolate at. This sets the interploated points to be to \code{seq(1st time point, last time point, length = n_interp)}.
#' @param control An optional list of priors and initial values, otherwise the default values/strategies found in Pourmohamad et al (2022) will be used. More specifically, \code{control} can be used to define the following settings:
#' \itemize{
#' \item \code{sigma2_starting}: starting value for sigma^2
#' \item \code{tau2_starting}: starting value for tau^2
#' \item \code{phi_starting}: starting value for phi
#' \item \code{psi_starting}: starting value for psi
#' \item \code{sigma2_alpha} and \code{sigma2_beta}: parameters for the inverse gamma prior for sigma^2
#' \item \code{tau2_alpha} and \code{tau2_beta}: parameters for the inverse gamma prior for tau^2
#' \item \code{phi_alpha} and \code{phi_beta}: parameters for the gamma prior for phi
#' \item \code{psi_alpha} and \code{psi_beta}: parameters for the gamma prior for psi
#' \item \code{prop_phi}: proposal variance for the parameter phi
#' \item \code{prop_psi}: proposal variance for the parameter psi
#' }
#' @param adaptive logical; an option for using adaptive MCMC. If \code{adaptive = TRUE}, this will replace both \code{prop_phi} and \code{prop_psi} by using past MCMC draws to inform the proposal variance.
#' @return The function returns a list of summary statistics and B posterior samples for parameters of the model. More specifically it returns:
#' \itemize{
#' \item delta: The average delta value over the posterior samples of delta. The definition of delta is given in Novick et. al 2015.
#' \item f2: The average f2 value over the posterior samples of f2.
#' \item mcmc_chains: A list of posterior samples for delta, f2, the mean parameters (\code{mu_pars}), and the covariance parameters (\code{cov_pars}).
#' }
#' @note You should always check MCMC diagnostics on the posterior samples before drawing conclusions. Likewise, plots of the predicted dissolution curves should also be checked to evaluate if the model fit to the observed data seems reasonable.
#' @references Novick, S., Shen, Y., Yang, H., Peterson, J., LeBlond, D., and Altan, S. (2015). Dissolution Curve Comparisons Through the F2 Parameter, a Bayesian Extension of the f2 Statistic. Journal of Biopharmaceutical Statistics, 25(2):351-371.
#' @references Pourmohamad, T., Oliva Aviles, C.M., and Richardson, R. Gaussian Process Modeling for Dissolution Curve Comparisons. Journal of the Royal Statistical Society, Series C, 71(2):331-351.
#' @examples
#' ### dis_data comes loaded with the package
#' ### We set B = 100 to obtain 100 posterior samples, you probably want to run it
#' ### longer for say, B = 100000, but B = 100 runs fast for illustrative purposes
#' ### and passing CRAN checks
#' B <- 100
#'
#' tp <- seq(10, 80, 10) # Time points
#' burnin <- B * 0.1 # A 10% burn-in
#' thin <- 10 # Keep every 10th sample, i.e., thinning
#' post <- hgp(dis_data, locs = tp, B = B, n_interp = 100)
#'
#' ### Example: Removing burn-in and then thinning the posterior samples for the covariance parameters
#' ### and then plotting the chains
#' phi <- post$mcmc_chains$cov_pars$phi[-c(1:burnin)]
#' phi <- phi[seq(1, (B-burnin), thin)]
#' psi <- post$mcmc_chains$cov_pars$psi[-c(1:burnin)]
#' psi <- psi[seq(1, (B-burnin), thin)]
#' sigma_R <- post$mcmc_chains$cov_pars$sigma_R[-c(1:burnin)]
#' sigma_R <- sigma_R[seq(1, (B-burnin), thin)]
#' sigma_T <- post$mcmc_chains$cov_pars$sigma_T[-c(1:burnin)]
#' sigma_T <- sigma_T[seq(1, (B-burnin), thin)]
#' tau <- post$mcmc_chains$cov_pars$tau[-c(1:burnin)]
#' tau <- tau[seq(1, (B-burnin), thin)]
#'
#' chains <- data.frame( # Data frame holding posterior samples
#' samples = rep(1:((B-burnin)/thin), times = 5),
#' parameter = rep(c("phi", "psi", "sigma_R", "sigma_T", "tau"),
#' each = (B-burnin)/thin),
#' values = c(phi, psi, sigma_R, sigma_T, tau))
#' chains$parameter <- factor(chains$parameter,
#' labels = c(expression(phi),
#' expression(psi),
#' expression(sigma[R]),
#' expression(sigma[T]),
#' expression(tau)))
#' ggplot2::ggplot(chains, ggplot2::aes(samples, values)) +
#' ggplot2::geom_line() +
#' ggplot2::labs(x = "Iterations", y = "Posterior Sample Values") +
#' ggplot2::facet_wrap(~parameter, scales = "free",
#' labeller = ggplot2::label_parsed) +
#' ggplot2::theme(text = ggplot2::element_text(size = 22))
#'
#' ggplot2::ggplot(chains, ggplot2::aes(values)) +
#' ggplot2::geom_density() +
#' ggplot2::labs(x = "Values", y = "Posterior Density") +
#' ggplot2::facet_wrap(~parameter, scales = "free",
#' labeller = ggplot2::label_parsed) +
#' ggplot2::theme(text = ggplot2::element_text(size = 22))
#'
#' ### Plotting the predicted dissolution profiles
#' dissplot(dis_data, tp)
#' grid <- sort(c(tp, seq(min(tp), max(tp), length = 100)))
#' grid1 <- (1:B)[-(1:burnin)][seq(1, (B-burnin), thin)]
#' grid2 <- ((B+1):(2*B))[-(1:burnin)][seq(1, (B-burnin), thin)]
#' lines(grid, apply(post$mcmc_chains$mu_pars[,grid1], 1, mean),
#' col = "gray65", lwd = 2)
#' lines(grid, apply(post$mcmc_chains$mu_pars[,grid2], 1, mean),
#' col = "black", lwd = 2)
#' lower <- apply(post$mcmc_chains$mu_pars[,grid1], 1,
#' quantile, prob = 0.025)
#' upper <- apply(post$mcmc_chains$mu_pars[,grid1], 1,
#' quantile, prob = 0.975)
#' polygon(c(grid, rev(grid)), c(lower, rev(upper)),
#' col = scales::alpha("gray65",.2), border = NA)
#' lower <- apply(post$mcmc_chains$mu_pars[,grid2], 1,
#' quantile, prob = 0.025)
#' upper <- apply(post$mcmc_chains$mu_pars[,grid2], 1,
#' quantile, prob = 0.975)
#' polygon(c(grid, rev(grid)), c(lower, rev(upper)),
#' col = scales::alpha("black",.2), border = NA)
#'
#' ### If we want to calculate the Pr(f2 > 50 & delta < 15)
#' prob <- sum(post$mcmc_chains$f2[grid1] > 50 &
#' post$mcmc_chains$delta[grid1] < 15) / ((B - burnin)/thin)
#'
#' @importFrom stats dgamma rnorm runif sd var
#' @export
hgp <- function(dis_data, locs, B = 1000, n_interp = 30, control = list(), adaptive = FALSE){
if(class(dis_data)[1] == "dis_data"){
dis_data <- data.frame(rbind(data.frame(group = "Reference", dis_data$yRef), data.frame(group = "Test", dis_data$yTest)))
}
if(B <= 0 | !is.numeric(B)){
stop("B must be a positive integer.")
}else if(!is.data.frame(dis_data)){
stop("The dissolution data must be stored in a data frame.")
}else if(length(unique(dis_data[,1])) != 2){
stop("The dissolution data must contain 2 groups.")
}else if(ncol(dis_data) < 3){
stop("The dissolution data must contain at least 2 time points (but you probably intended for more than that).")
}else if(length(locs) != (ncol(dis_data) - 1)){
stop("The number of time points does not match the dissolution data set.")
}else if(!is.numeric(locs)){
stop("The time point vector needs to be a numeric vector of length ncol(dis_data) - 1.")
}else if(n_interp <= 0 | !is.numeric(n_interp)){
stop("n_interp must be a positive integer.")
}else if(!is.list(control)){
stop("control needs to be a list of priors and/or initial values")
}else{
draw_mu = function(dat,n,Sigma){
Xbar <- apply(dat,2,mean)
Sstar <- solve(solve(S)+n*solve(Sigma))
mstar <- Sstar%*%(solve(S)%*%m+n*solve(Sigma)%*%(Xbar))
mnormt::rmnorm(1,mstar,Sstar)
}
draw_phiS <- function(){
alpha <- ifelse(is.null(control$phiS_alpha),(max(locs)-min(locs))/(nloc/2),control$phiS_alpha)
beta <- ifelse(is.null(control$phiS_beta),1,control$phiS_beta)
phiSnew <- exp(rnorm(1,log(phiS),prop_phiS))
Snew <- tau2S*covf2(D,phiSnew)+diag(nloc)*.0001
alph <- mnormt::dmnorm(mu_A,m,Snew,log=TRUE)+mnormt::dmnorm(mu_B,m,Snew,log=TRUE)+dgamma(phiSnew,alpha,beta,log=TRUE)-
mnormt::dmnorm(mu_A,m,S,log=TRUE)-mnormt::dmnorm(mu_B,m,S,log=TRUE)-dgamma(phiS,alpha,beta,log=TRUE)
if(log(runif(1)) < alph){
phiS <- phiSnew
S <- Snew
acceptS <- acceptS + 1
}
phiS
}
draw_phi <- function(){
phinew <- exp(rnorm(1,log(phi),prop_phi))
Sigma_Anew <- tau2_A*covdelta(D,phinew)
Sigma_Bnew <- tau2_B*covdelta(D,phinew)
alpha <- ifelse(is.null(control$phi_alpha),(max(locs)-min(locs))/(nloc/2),control$phi_alpha)
beta <- ifelse(is.null(control$phi_beta),1,control$phi_beta)
alph <- sum(mnormt::dmnorm(dat_A,mu_A,Sigma_Anew,log=TRUE))+sum(mnormt::dmnorm(dat_B,mu_B,Sigma_Bnew,log=TRUE))+dgamma(phinew,alpha,beta,log=TRUE)-
sum(mnormt::dmnorm(dat_A,mu_A,Sigma_A,log=TRUE)+mnormt::dmnorm(dat_B,mu_B,Sigma_B,log=TRUE))-dgamma(phi,alpha,beta,log=TRUE)
if(log(runif(1)) < alph){
phi <- phinew
Sigma_A <- Sigma_Anew
Sigma_B <- Sigma_Bnew
accept <- accept + 1
}
phi
}
draw_tau2S <- function(){
alpha <- ifelse(is.null(control$tau2S_alpha),mean(diag(var(X[,-1]))),control$tau2S_alpha)
beta <- ifelse(is.null(control$tau2S_beta),mean(diag(var(X[,-1]))),control$tau2S_beta)
pscl::rigamma(1,alpha+nloc*2/2,beta+1/2*(mu_A-m)%*%solve(covf2(D,phiS),mu_A-m)+
1/2*(mu_B-m)%*%solve(covf2(D,phiS),mu_B-m))
}
draw_tau2<- function(dat,mu,n){
alpha <- ifelse(is.null(control$tau2_alpha),mean(diag(var(X[,-1]))),control$tau2_alpha)
beta <- ifelse(is.null(control$tau2_beta),mean(diag(var(X[,-1]))),control$tau2_beta)
wsse <- 0
anom <- as.matrix(t(t(dat)-mu))
wsse <- sum(diag(anom%*%solve(covdelta(D,phi))%*%t(anom)))
pscl::rigamma(1,alpha+(nloc*n)/2,beta+1/2*wsse)
}
mu_post <- function(mu,tau2){
S22 <- tau2S*covf2(D22,phiS)
S12 <- tau2S*covf2(D12,phiS)
mnew <- 50+t(S12)%*%solve(S,mu-50)
signew <- S22 - t(S12)%*%solve(S,(S12))
munew <- mnormt::rmnorm(1,mnew,signew)
c(mu,munew)
}
# Initialize Data
X <- cbind(2-(dis_data[,1]==dis_data[1,1]),dis_data[-1])
names(X)[1] <- "Group"
nloc <- length(locs)
otherlocs <- seq(min(locs),max(locs),length=n_interp)
alllocs <- c(locs,otherlocs)
nreps_A <- sum(X[,1]==1)
nreps_B <- sum(X[,1]==2)
D <- abs(outer(locs,locs,"-"))
D22 <- abs(outer(otherlocs,otherlocs,"-"))
D12 <- abs(outer(locs,otherlocs,"-"))
dat_A <- X[X$Group==1,-1]
dat_B <- X[X$Group==2,-1]
dat <- X
# Initialize mu, sigma2,tau2,phi,tau2S
# 1st group is denoted with _A and second group with _B
mu_A <- rep(0,nloc)
mu_B <- rep(0,nloc)
tau2 <- ifelse(is.null(control$tau2_starting),mean(diag(var(X[,-1]))),control$tau2_starting)
tau2_A <- tau2 # Covariance of 1st group mean
tau2_B <- tau2 #Covariance of 2nd group mean
phi <- ifelse(is.null(control$phi_starting),(max(locs)-min(locs))/(nloc/2),control$phi_starting)
phiS <- phi
tau2S <- ifelse(is.null(control$tau2S_starting),tau2*5,control$tau2S_starting) # Covariance of mean function
covdelta <- function(D,phi)geoR::matern(D,phi,3/2)
Sigma_A <- tau2_A*covdelta(D,phi) # 1st group
Sigma_B <- tau2_B*covdelta(D,phi) # second group
m <- rep(50,nloc)
covf2 <- function(D,phiS)exp(-abs(D/phiS))
S <- tau2S*covf2(D,phiS)
# Metropolis Hastings proposal parameters
prop_phiS <- ifelse(is.null(control$phiS_prop),1,control$phiS_prop)
prop_phi <- ifelse(is.null(control$phi_prop),1,control$phi_prop)
# initialize collection vectors
accept <- 0
acceptS <- 0
nrun <- B
f2 <- rep(NA,nrun)
delta <- rep(NA,nrun)
mucollect_A <- matrix(0,length(alllocs),nrun)
mucollect_B <- matrix(0,length(alllocs),nrun)
phivec <- matrix(0,nrun,5)
# Run chain
for(i in 1:nrun){
# Draw mu_A|.
mu_A <- draw_mu(dat_A,nreps_A,Sigma_A)
# Draw mu_B|.
mu_B <- draw_mu(dat_B,nreps_B,Sigma_B)
# Draw tau2S
tau2S <- draw_tau2S()
S <- tau2S*covf2(D,phiS)
# Draw phiS
phiS <- draw_phiS()
S <- tau2S*covf2(D,phiS)+diag(nloc)*.0001
# Draw tau2_A
tau2_A <- draw_tau2(dat_A,mu_A,nreps_A)
Sigma_A <- tau2_A*covdelta(D,phi)
# Draw tau2_B
tau2_B <- draw_tau2(dat_B,mu_B,nreps_B)
Sigma_B <- tau2_B*covdelta(D,phi)
# Draw phi
phi <- draw_phi()
phivec[i,] <- c(phi,phiS,sqrt(tau2_A),sqrt(tau2_B),sqrt(tau2S))
# posterior predictive for f2
mustar_A <- mu_post(mu_A,tau2_A)
mustar_B <- mu_post(mu_B,tau2_B)
mucollect_A[,i] <- mustar_A[order(alllocs)]
mucollect_B[,i] <- mustar_B[order(alllocs)]
f2[i] <- 50*log10(1/sqrt(1+(1/length(mustar_A))*sum((mustar_A-mustar_B)^2))*100)
delta[i] <- max(abs(mustar_A-mustar_B))
if(adaptive & i>25){
prop_phi = sd(phivec[1:i,1])/3
prop_phiS = sd(phivec[1:i,2])/3
}
}
cov_pars <- data.frame(phivec)
names(cov_pars) <- c("phi","psi","sigma_R","sigma_T","tau")
mucollect_A <- data.frame(mucollect_A)
names(mucollect_A) <- rep("muR", ncol(mucollect_A))
mucollect_B <- data.frame(mucollect_B)
names(mucollect_B) <- rep("muT", ncol(mucollect_B))
mu_pars = cbind(mucollect_A,mucollect_B)
return(list(delta = mean(delta), f2 = mean(f2), mcmc_chains = list(delta=delta,f2=f2,mu_pars = mu_pars,cov_pars = cov_pars)))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/R/hgp.R
|
#' Class dis_data creation
#'
#' This function creates a data object of class dis_data.
#'
#' @param yRef A data frame or matrix containing the dissolution data for the reference group data. The rows of the data set correspond
#' to the individual dissolution runs. The columns of the data frame contains the individual run's dissolution
#' measurements sorted in time.
#' @param yTest A data frame or matrix containing the dissolution data for the test group data. The rows of the data set correspond
#' to the individual dissolution runs. The columns of the data frame contains the individual run's dissolution
#' measurements sorted in time.
#' @return The function returns a data object of class dis_data.
#' @examples
#' ### dis_data comes loaded with the package
#' ### but need to update dis_data to be an object of class dis_data
#' new_dis_data <- make_dis_data(yRef = dis_data[dis_data$group == "Reference", -1],
#' yTest = dis_data[dis_data$group == "Test", -1])
#'
#' @export
make_dis_data <- function(yRef, yTest){
out <- list()
if(is.data.frame(yRef) == TRUE){
yRef <- data.matrix(yRef)
}
if(is.data.frame(yTest) == TRUE){
yTest <- data.matrix(yTest)
}
out$yRef <- yRef
out$yTest <- yTest
out$nTab <- nrow(yRef)
out$nTime <- ncol(yRef)
out$ybarR <- colMeans(yRef)
out$ybarT <- colMeans(yTest)
## Sum of squares due to Group:Tablet
out$ssGroupTab <- out$nTime * (sum((rowMeans(yRef) - mean(yRef))^2) + sum((rowMeans(yTest) - mean(yTest))^2))
out$degFreeGroupTab <- 2 * out$nTab - 2
## Sum of squares residuals
out$ssRes <- sum(sweep(yRef, 2, out$ybarR, FUN = "-")^2) +
sum(sweep(yTest, 2, out$ybarT, FUN = "-")^2) - out$ssGroupTab
out$degFreeRes <- 2 * (out$nTab - 1) * (out$nTime - 1)
class(out) = c("dis_data", "list")
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/R/make_dis_data.R
|
#' Helper function for processing results
#'
#' This function helps process the final results for the different f2 functions (e.g., f2bayes).
#'
#' @param name A character string denoting the type of method used to calculate the interval.
#' @param f2.dist A vector of samples for the F2 parameter or f2 statistic.
#' @param ci.type The type of confidence, or credible, interval to return. The option \code{quantile} returns sample quantile based intervals, while the option \code{HPD} returns intervals based on highest density regions.
#' @param level The confidence level or probability associated with the confidence or credible interval, respectively. Must be a value between 0 and 1.
#' @param get.dist logical; if \code{TRUE}, returns the samples of the distribution.
#' @return The function returns a data object of class dis_data.
#' @examples
#' ### dis_data comes loaded with the package
#' out1 <- f2bayes(dis_data, prob = 0.9, B = 1000, get.dist = TRUE)
#'
#' out2 <- process_results("bayes", out1$f2.dist, level = 0.9)
#'
#' ### out1 and out2 should contain the results for the info and intervals
#'
#' @export
process_results = function(name, f2.dist, ci.type = c("quantile", "HPD"), level, get.dist = FALSE){
out <- list()
if (length(level) == 1){
level <- c(0.5 * (1 - level), 1 - 0.5 * (1 - level))
}
out$info <- data.frame(type = name, K = length(f2.dist), level = level)
ci.type <- match.arg(ci.type, several.ok = TRUE)
if(any(ci.type == "quantile")){
out$ci.quantile <- stats::quantile(f2.dist, p = level)
}
if(any(ci.type == "HPD")){
out$ci.HPD <- coda::HPDinterval(coda::as.mcmc(f2.dist), prob = diff(level))
}
if(get.dist){
out$f2.dist <- f2.dist
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/R/process_results.R
|
#' Run BayesDissolution Shiny App
#'
#' Runs a shiny application for calculating the different confidence and credible intervals for the F2 parameter. The different intervals are
#' constructed using the \code{f2bayes()}, \code{f2bca()}, \code{f2boot()}, \code{f2gpq()}, and \code{f2pbs()} functions. The shiny application comes
#' preloaded with an example excel data set based on the \code{dis_data} data set.
#'
#' @examples
#' ### The function requires no input to run
#' if(FALSE){ ## Make me TRUE to run
#' runExample()
#' }
#'
#' @export
runExample <- function() {
appDir <- system.file("shiny-examples", "myapp", package = "BayesDissolution")
if (appDir == "") {
stop("Could not find example directory. Try re-installing `BayesDissolution`.", call. = FALSE)
}
shiny::runApp(appDir, display.mode = "normal")
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/R/runExample.R
|
loadDataServer <- function(id) {
moduleServer(id, function(input, output, session) {
## Load the data
filedata <- reactive({
req( input$input.file )
extension = tools::file_ext((input$input.file$datapath))
if ( extension=="csv" ){
d = read.csv(input$input.file$datapath)
}else if ( extension=="xlsx"){
d = as.data.frame(readxl::read_excel(input$input.file$datapath))
}
X <- cbind(2 - (d[, 1] == d[1, 1]), d[-1])
names(X)[1] <- "Group"
yRef <- X[X$Group == 1, -1]
yTest <- X[X$Group == 2, -1]
dis_data <- make_dis_data(yRef, yTest)
return(dis_data)
})
## Download an example data set
output$exampleData <- downloadHandler(
filename = function() {
"dis_data_example.xlsx"
},
content = function(file) {
myfile <- paste0(system.file("extdata", package = "BayesDissolution"), "/dis_data_example.xlsx")
file.copy(myfile, file)
}
)
return(filedata)
})
}
############################################################################
############################################################################
summaryServer <- function(id, filedata) {
moduleServer(id, function(input, output, session) {
## Create scatterplot of allocation
output$scatterplot <- renderPlot({
req(input$input.file)
dis_data = filedata()
p = ggdissplot(dis_data, show.mean=TRUE, show.SD=TRUE)
print(p)
})
output$results <- renderTable({
req(input$input.file)
dis_data = filedata()
f2 = f2calc(dis_data)
f2.ci = switch(input$ciMethod, "boot"={f2boot(dis_data, level=input$level, B=input$B)},
"boot BCA"={f2bca(dis_data, level=input$level, B=input$B)},
"PBS"={f2pbs(dis_data, level=input$level, B=input$B)},
"GPQ"={f2gpq(dis_data, level=input$level, B=input$B)},
"Bayes"={f2bayes(dis_data, prob=input$level, B=input$B)})
out = data.frame(y=c(round(f2, 1),
paste(round(f2.ci$ci.quantile[1], 1), "-", round(f2.ci$ci.quantile[2], 1))))
rownames(out) = c("Est", "95% CI")
return(out)
}, bordered=TRUE, colnames=FALSE, rownames=TRUE)
})
}
############################################################################
############################################################################
server <- function(input, output, session) {
filedata = loadDataServer("f2")
summaryServer("f2", filedata)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/inst/shiny-examples/myapp/server.R
|
loadDataUI <- function(id) {
ns <- NS(id)
tagList(
wellPanel(
helpText(HTML("Load file. First column contains Group (Ref/Test). Each subsequent column",
"contains dissolution data for increasing time points.")),
fileInput(ns("input.file"),
label="Choose *.csv or *.xlsx File",
accept=c("csv", ".csv", "xlsx", ".xlsx")),
downloadLink(ns("exampleData"), label="Example Data"),
)
)
}
############################################################################
############################################################################
summaryUI <- function(id) {
ns <- NS(id)
tagList(
fluidRow(column(3, selectInput(ns("ciMethod"), label="CI Method",
choices=c("boot", "boot BCA", "PBS", "GPQ", "Bayes"),
selected="boot")),
column(3, numericInput(inputId=ns("level"), label="Level/Prob", value=0.9, min=0.5, max=1, step=0.01)),
column(3, numericInput(inputId=ns("B"), label="# Samples", value=10000, min=1000, max=100000, step=1000))),
fluidRow(column(4, tableOutput(ns("results"))),
column(8, plotOutput(ns("scatterplot"))))
)
}
############################################################################
############################################################################
ui <- navbarPage( title="Dissolution: F2",
tabPanel(title="Main Page",
fluidRow(
column(6, loadDataUI("f2")),
),
summaryUI("f2")
)
)
|
/scratch/gouwar.j/cran-all/cranData/BayesDissolution/inst/shiny-examples/myapp/ui.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
fastMean <- function(xx) {
.Call(`_BayesESS_fastMean`, xx)
}
fastProd <- function(X, Y) {
.Call(`_BayesESS_fastProd`, X, Y)
}
fastSum <- function(xx) {
.Call(`_BayesESS_fastSum`, xx)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesESS/R/RcppExports.R
|
#' @export
#' @importFrom Rcpp evalCpp
#' @importFrom stats runif approx rnorm rexp
#' @importFrom LaplacesDemon rinvchisq
#' @importFrom MCMCpack rinvgamma
#' @importFrom dfcrm titesim crmsim
#' @useDynLib BayesESS, .registration = TRUE
#####################################################################
##
## A wrapper function for ESS calculation written by Jaejoon Song
##
#####################################################################
ess <- function(model=NULL,label=NULL,
prior=NULL,
m=NULL,nsim=NULL,
ncov=NULL,svec1=NULL,svec2=NULL,
PI=NULL,betaSD=NULL,target=NULL,
obswin=NULL,rate=NULL,accrual=NULL,
shapeParam=NULL,scaleParam=NULL,
fast=TRUE){
Prior_0 <- Prior_1 <- Prior_2 <- Prior_3 <- Prior_4 <- Prior_5 <- NULL
Prior_6 <- Prior_7 <- Prior_8 <- Prior_9 <- Prior_10 <- Prior_11 <- NULL
modelStatement <- switch(EXPR = model,
'betaBin' = 'beta-binomial',
'Betabin' = 'beta-binomial',
'betabin' = 'beta-binomial',
'BetaBin' = 'beta-binomial',
'gammaEx' = 'gamma-exponential',
'Gammaex' = 'gamma-exponential',
'gammaex' = 'gamma-exponential',
'GammaEx' = 'gamma-exponential',
'dirMult' = 'dirichlet-multinomial',
'Dirmult' = 'dirichlet-multinomial',
'dirmult' = 'dirichlet-multinomial',
'DirMult' = 'dirichlet-multinomial',
'gammaPois' = 'gamma-poisson',
'GammaPois' = 'gamma-poisson',
'Gammapois' = 'gamma-poisson',
'gammapois' = 'gamma-poisson',
'normNorm' = 'normal-normal',
'NormNorm' = 'normal-normal',
'Normnorm' = 'normal-normal',
'normnorm' = 'normal-normal',
'tite.crm' = 'TITE CRM',
'crm' = 'CRM',
'surv' = 'time to event',
'invChisqNorm' = 'scaled inverse-chi-squared-normal',
'InvChisqNorm' = 'scaled inverse-chi-squared-normal',
'invchisqnorm' = 'scaled inverse-chi-squared-normal',
'invChiSqNorm' = 'scaled inverse-chi-squared-normal',
'invGammaNorm' = 'inverse-gamma-normal',
'InvGammaNorm' = 'inverse-gamma-normal',
'invgammanorm' = 'inverse-gamma-normal')
##
## ESS for conjugate models
##
if(model %in% c('betaBin','Betabin','betabin','BetaBin')){
if( as.numeric(prior[2]) <= 0 ) stop('For Beta(alpha,beta) distribution, alpha should be alpha > 0')
if( as.numeric(prior[3]) <= 0 ) stop('For Beta(alpha,beta) distribution, beta should be beta > 0')
label <- c('alpha','beta')
overall_out_label <- paste("(",paste(label,collapse=","),")",sep="")
myESS <- as.numeric(prior[2]) + as.numeric(prior[3])
cat("\n")
cat(noquote(paste("ESS was calculated for a ",modelStatement," model", sep="")))
cat("\n")
cat("\n")
cat(noquote(paste("ESS for the beta",overall_out_label," prior is: ",myESS, sep="")))
cat("\n")
cat("\n")
cat("\n")
ESS <- myESS
}
if(model %in% c('gammaEx','Gammaex','gammaex','GammaEx')){
if( as.numeric(prior[2]) <= 0 ) stop('For Gamma(alpha,beta) distribution, alpha should be alpha > 0')
if( as.numeric(prior[3]) <= 0 ) stop('For Gamma(alpha,beta) distribution, beta should be beta > 0')
label <- c('alpha','beta')
overall_out_label <- paste("(",paste(label,collapse=","),")",sep="")
myESS <- as.numeric(prior[3])
cat("\n")
cat(noquote(paste("ESS was calculated for a ",modelStatement," model", sep="")))
cat("\n")
cat("\n")
cat(noquote(paste("ESS for the gamma",overall_out_label," prior is: ",myESS, sep="")))
cat("\n")
cat("\n")
cat("\n")
ESS <- myESS
}
if(model %in% c('gammaPois', 'GammaPois', 'gammapois', 'Gammapois')){
if( as.numeric(prior[2]) <= 0 ) stop('For Gamma(alpha,beta) distribution, alpha should be alpha > 0')
if( as.numeric(prior[3]) <= 0 ) stop('For Gamma(alpha,beta) distribution, beta should be beta > 0')
label <- c('alpha','beta')
overall_out_label <- paste("(",paste(label,collapse=","),")",sep="")
myESS <- as.numeric(prior[3])
cat("\n")
cat(noquote(paste("ESS was calculated for a ",modelStatement," model", sep="")))
cat("\n")
cat("\n")
cat(noquote(paste("ESS for the gamma",overall_out_label," prior is: ",myESS, sep="")))
cat("\n")
cat("\n")
cat("\n")
ESS <- myESS
}
if(model %in% c('normNorm', 'NormNorm', 'normnorm', 'Normnorm')){
label <- c('mu_0','sigma^2/n_0')
overall_out_label <- paste("(",paste(label,collapse=","),")",sep="")
prior <- suppressWarnings(as.numeric(gsub("[^0-9\\.]", "", prior)))
prior <- prior[!is.na(prior)]
itIsInteger <- round(as.numeric(prior[3])) == as.numeric(prior[3])
if( as.numeric(prior[2]) <= 0 ) stop(paste('For Normal ',overall_out_label,' distribution, sigma^2 should be sigma^2 > 0',sep=""))
if( as.numeric(prior[3]) <= 0 | itIsInteger == FALSE) stop(paste('For Normal ',overall_out_label,' distribution, n_0 should be a positive integer',sep=""))
myESS <- as.numeric(prior[3])
cat("\n")
cat(noquote(paste("ESS was calculated for a ",modelStatement," model", sep="")))
cat("\n")
cat("\n")
cat(noquote(paste("ESS for the normal",overall_out_label," prior is: ",myESS, sep="")))
cat("\n")
cat("\n")
cat("\n")
ESS <- myESS
}
if(model %in% c('dirMult','Dirmult','dirmult','DirMult')){
label <- 'alpha1'
for(i in 2:(length(prior)-1)){
label <- c(label,paste('alpha',i,sep=""))
}
overall_out_label <- paste("(",paste(label,collapse=","),")",sep="")
aProblem1 <- sum( as.numeric(prior[2:length(prior)]) <= 0 ) > 0
aProblem2 <- sum( round(as.numeric(prior[2:length(prior)])) == as.numeric(prior[2:length(prior)]) ) < ( length(prior) - 1 )
if( aProblem1 == TRUE | aProblem2 == TRUE) stop(paste('For dirichlet ',overall_out_label,' distribution, all alpha_i should be a positive integers',sep=""))
myESS <- sum(as.numeric(prior[2:length(prior)]))
cat("\n")
cat(noquote(paste("ESS was calculated for a ",modelStatement," model", sep="")))
cat("\n")
cat("\n")
cat(noquote(paste("ESS for the dirichlet",overall_out_label," prior is: ",myESS, sep="")))
cat("\n")
cat("\n")
cat("\n")
ESS <- myESS
}
##
## ESS for linear regression or logistic regression models
##
if(model %in% c('linreg','Linreg','logistic','Logistic')){
default_prior <- list(c(1,0,1000),c(1,0,1000),c(1,0,1000),
c(1,0,1000),c(1,0,1000),c(1,0,1000),
c(1,0,1000),c(1,0,1000),c(1,0,1000),
c(1,0,1000),c(1,0,1000),c(1,0,1000))
if(length(svec1)<12){svec1 <- c(svec1,rep(0,(12-length(svec1))))}
if(length(svec2)<12){svec2 <- c(svec2,rep(0,(12-length(svec2))))}
if(model=='linreg'){model <- 1}
if(model=='logistic'){model <- 2}
for(i in 1:length(prior)){
prior[[i]][prior[[i]] %in% c('norm','normal','N','Norm','Normal')] <- 1
prior[[i]][prior[[i]] %in% c('gamma','Gamma','Gam','gam')] <- 2
prior[[i]] <- as.numeric(prior[[i]])
}
for(i in 1:length(default_prior)){
assign(paste('Prior_',(i-1),sep=''),default_prior[[i]])
}
if(length(prior)>0){
for(i in 1:length(prior)){
assign(paste('Prior_',(i-1),sep=''),prior[[i]])
}
}
if(is.null(label)){
svec <- svec1 + svec2
svec[1] <- 1
label <- 'theta1'
for(i in 2:sum(svec)){
label <- c(label,paste('theta',i,sep=""))
}
}
svec1_label <- svec1[1:length(label)]
svec2_label <- svec2[1:length(label)]
svec1_label <- label[which(svec1_label==1)]
svec2_label <- label[which(svec2_label==1)]
overall_out_label <- paste("(",paste(label,collapse=","),")",sep="")
svec1_out_label <- paste("(",paste(svec1_label,collapse=","),")",sep="")
svec2_out_label <- paste("(",paste(svec2_label,collapse=","),")",sep="")
if(fast==FALSE){
myESS <- ESS_RegressionCalc (Reg_model = model, Num_cov = ncov,
Prior_0 = Prior_0, Prior_1 = Prior_1, Prior_2 = Prior_2,
Prior_3 = Prior_3, Prior_4 = Prior_4, Prior_5 = Prior_5,
Prior_6 = Prior_6, Prior_7 = Prior_7, Prior_8 = Prior_8,
Prior_9 = Prior_9, Prior_10 = Prior_10, Prior_11 = Prior_11,
M = m, NumSims = nsim,
theta_sub1=svec1, theta_sub2=svec2 )
}
if(fast==TRUE){
myESS <- ESS_RegressionCalcFast1 (Reg_model = model, Num_cov = ncov,
Prior_0 = Prior_0, Prior_1 = Prior_1, Prior_2 = Prior_2,
Prior_3 = Prior_3, Prior_4 = Prior_4, Prior_5 = Prior_5,
Prior_6 = Prior_6, Prior_7 = Prior_7, Prior_8 = Prior_8,
Prior_9 = Prior_9, Prior_10 = Prior_10, Prior_11 = Prior_11,
M = m, NumSims = nsim,
theta_sub1=svec1, theta_sub2=svec2 )
}
ESSoutput <- list(#ESSoverall = myESS$ESSwholetheta,
ESSsubvec1 = myESS$ESSsubvector1,
ESSsubvec2 = myESS$ESSsubvector2)
modelStatement <- switch(EXPR = model, 'linreg' = 'linear regression',
'logistic' = 'logistic regression')
cat("\n")
cat(noquote(paste("ESS was calculated for a ",modelStatement," model", sep="")))
cat("\n")
cat("\n")
#cat(noquote(paste("ESSoverall: Overall ESS for the whole vector ",overall_out_label, sep="")))
#cat("\n")
cat(noquote(paste("ESSsubvector1: ESS for the first sub-vector ",svec1_out_label, sep="")))
cat("\n")
cat(noquote(paste("ESSsubvector1: ESS for the second sub-vector ",svec2_out_label, sep="")))
cat("\n")
cat("\n")
ESS <- ESSoutput
}
##
## ESS for CRM models
## Uses the essCRM function in 'internal'
##
# For CRM
if(model=='crm'){
myEssCRM <- essCRM(type=model,
prior=prior,
m=m,nsim=nsim,
PI=PI,
betaSD=betaSD,
target=target)
label <- c('beta')
cat("\n")
cat(noquote(paste("ESS was calculated for a ",modelStatement," model", sep="")))
cat("\n")
cat("\n")
cat(noquote(paste("ESS for the N(0,",round(betaSD^2,10),") ",label," prior"," is: ",round(myEssCRM,10), sep="")))
cat("\n")
cat("\n")
cat("\n")
ESS <- myEssCRM
}
# For TITE CRM
if(model=='tite.crm'){
myEssCRM <- essCRM(type=model,
prior=prior,
m=m,nsim=nsim,
PI=PI,
betaSD=betaSD,
target=target,
obswin=obswin,rate=rate,accrual=accrual)
label <- c('beta')
cat("\n")
cat(noquote(paste("ESS was calculated for a ",modelStatement," model", sep="")))
cat("\n")
cat("\n")
cat(noquote(paste("ESS for the N(0,",round(betaSD^2,10),") ",label," prior"," is: ",round(myEssCRM,10), sep="")))
cat("\n")
cat("\n")
cat("\n")
ESS <- myEssCRM
}
##
## ESS for time to event models
## Uses the essSurv function in 'internal'
##
if(model=='surv'){
myEssSurv <- essSurv(shapeParam=shapeParam,
scaleParam=scaleParam,
m=m,nsim=nsim)
label <- c('alpha','beta')
overall_out_label <- paste("(",paste(label,collapse=","),")",sep="")
cat("\n")
cat(noquote(paste("ESS was calculated for a ",modelStatement," model", sep="")))
cat("\n")
cat("\n")
cat(noquote(paste("ESS for the inverse-gamma ",overall_out_label," prior is: ",round(myEssSurv,2), sep="")))
cat("\n")
cat("\n")
cat("\n")
ESS <- myEssSurv
}
##
## ESS for scaled-inverse-chi-squared-normal
## Uses the essNormal function in 'internal'
##
if(model %in% c('invChisqNorm','InvChisqNorm','invchisqnorm','invChiSqNorm')){
#if(is.character(prior) == TRUE){ prior <- prior[2:length(prior)] }
prior <- suppressWarnings(as.numeric(gsub("[^0-9\\.]", "", prior)))
prior <- prior[!is.na(prior)]
if(length(prior)==2){
aProblem1 <- sum( as.numeric(prior) <= 0 ) > 0
if( aProblem1 == TRUE ) stop(paste("Both parameters for the scaled-inv-chi-squared prior should be positive", sep=""))
myESSNorm <- essNormal(nu=prior[1],sigma0=prior[2],knownMean=TRUE,m=m,nsim=nsim)
cat("\n")
cat(noquote(paste("ESS was calculated for a ",modelStatement," model (with known mean and unknown variance)", sep="")))
cat("\n")
cat("\n")
cat(noquote(paste("ESS for the scaled-inv-chi-squared(",prior[1],",",prior[2],") prior for variance"," is: ",myESSNorm, sep="")))
cat("\n")
cat("\n")
cat("\n")
ESS <- myESSNorm
}
if(length(prior)==4){
if( length(prior) != 4 ) stop(paste("Please specify two parameters (nu_0,sigma^2) for scaled-inverse-chi-squared(nu_0,sigma^2) prior for variance,
and two parameters (mu_0 and phi) for normal(mu_0,sigma^2/phi) prior for mean", sep=""))
aProblem1 <- sum( as.numeric(prior[1:2]) <= 0 ) > 0
if( aProblem1 == TRUE ) stop(paste("Both parameters for the scaled-inv-chi-squared prior should be positive", sep=""))
itIsInteger <- round(as.numeric(prior[4])) == as.numeric(prior[4])
if( as.numeric(prior[4]) <= 0 | itIsInteger == FALSE) stop(paste('For Normal (mu_0,sigma^2/phi) distribution, phi should be a positive integer',sep=""))
myESSNorm <- essNormal(nu=prior[1],sigma0=prior[2],mu0=prior[3],phi=prior[4],knownMean=FALSE,m=m,nsim=nsim)
cat("\n")
cat(noquote(paste("ESS was calculated for a ",modelStatement," model (with unknown mean and unknown variance)", sep="")))
cat("\n")
cat("\n")
#cat(noquote(paste("ESS_overall: Overall ESS for the priors specified for the mean and variance is ",myESSNorm$ESS_overall, sep="")))
#cat("\n")
cat(noquote(paste("ESS_sigmasq: ESS for the scaled-inv-chi-squared(",prior[1],",",prior[2],") prior for variance is ",round(myESSNorm$ESS_sigmasq,2), sep="")))
cat("\n")
cat(noquote(paste("ESS_mu: ESS for the normal prior for mean is ",round(myESSNorm$ESS_mu,2), sep="")))
cat("\n")
cat("\n")
ESS <- myESSNorm
}
}
##
## ESS for inverse-gamma-normal
## Uses the essNormal function in 'internal'
##
if(model %in% c('invGammaNorm','InvGammaNorm','inverse-gamma-normal','invgammanorm')){
#if(is.character(prior) == TRUE){ prior <- prior[2:length(prior)] }
prior <- suppressWarnings(as.numeric(gsub("[^0-9\\.]", "", prior)))
prior <- prior[!is.na(prior)]
if(length(prior)==2){
aProblem1 <- sum( as.numeric(prior) <= 0 ) > 0
if( aProblem1 == TRUE ) stop(paste("Both parameters for the inverse-gamma prior should be positive", sep=""))
myESSNorm <- essNormal(nu=(prior[1]*2),sigma0=(prior[2]/prior[1]),knownMean=TRUE,m=m,nsim=nsim)
cat("\n")
cat(noquote(paste("ESS was calculated for a ",modelStatement," model (with known mean and unknown variance)", sep="")))
cat("\n")
cat("\n")
cat(noquote(paste("ESS for the inv-gamma(",prior[1],",",prior[2],") prior for variance"," is: ",myESSNorm, sep="")))
cat("\n")
cat("\n")
cat("\n")
ESS <- myESSNorm
}
if(length(prior)==4){
if( length(prior) != 4 ) stop(paste("Please specify two parameters (alpha,beta) for inverse-gamma (alpha,beta) prior for variance,
and two parameters (mu_0 and phi) for normal(mu_0,sigma^2/phi) prior for mean", sep=""))
aProblem1 <- sum( as.numeric(prior[1:2]) <= 0 ) > 0
if( aProblem1 == TRUE ) stop(paste("Both parameters for the inverse-gamma prior should be positive", sep=""))
itIsInteger <- round(as.numeric(prior[4])) == as.numeric(prior[4])
if( as.numeric(prior[4]) <= 0 | itIsInteger == FALSE) stop(paste('For Normal (mu_0,sigma^2/phi) distribution, phi should be a positive integer',sep=""))
myESSNorm <- essNormal(nu=(prior[1]*2),sigma0=(prior[2]/prior[1]),mu0=prior[3],phi=prior[4],knownMean=FALSE,m=m,nsim=nsim)
cat("\n")
cat(noquote(paste("ESS was calculated for a ",modelStatement," model (with unknown mean and unknown variance)", sep="")))
cat("\n")
cat("\n")
#cat(noquote(paste("ESS_overall: Overall ESS for the priors specified for the mean and variance is ",myESSNorm$ESS_overall, sep="")))
#cat("\n")
cat(noquote(paste("ESS_sigmasq: ESS for the inv-gamma(",prior[1],",",prior[2],") prior for variance is ",round(myESSNorm$ESS_sigmasq,2), sep="")))
cat("\n")
cat(noquote(paste("ESS_mu: ESS for the normal prior for mean is ",round(myESSNorm$ESS_mu,2), sep="")))
cat("\n")
cat("\n")
ESS <- myESSNorm
}
}
ESS
}
|
/scratch/gouwar.j/cran-all/cranData/BayesESS/R/ess.R
|
#' @importFrom Rcpp evalCpp
#' @importFrom dfcrm titesim crmsim
#' @useDynLib BayesESS
##################################################################################
##
## Main R function written by Satoshi Morita
## Downloaded 12/12/17 via https://biostatistics.mdanderson.org/softwaredownload/
##
##################################################################################
#***************************************************************************
# R code for determining the Effective Sample Size (ESS)
# of a logistic or linear regression model
# Version 1.0, 11Aug2009
#***************************************************************************
#For the example input data shown here:
#This normal linear regression example
# should calculate the ESS of 2 for the whole vector (theta0,theta_1,theta_2,theta_3,tau),
# the ESS1 of 2 for the subvector 1 (theta0,theta_1,theta_2,theta_3), and
# the ESS2 of 2 for the subvector 2 (tau).
## Notes on a regression model ##
## Let theta_j for j=0,...,d be a parameter for a regression model.
## Let theta_0 be a parameter for an intercept.
## Let theta_1,..., theta_d be parameters for regression coefficients of covariates X_1,..., X_d.
## Thus, a linear term of a selected regression model is given as theta_0 + X_1*theta_1 + ... + X_d*theta_d.
##### Please input the follwoing information at Steps 1 to 5 #####
## Step 1. Specify a regression model by imputing 1 or 2:
## 1 for a linear regression model, --> SEE Note (2) below
## 2 for a logistic regression model.
# Reg_model <- 1
## Step 2. Specify the number of covariates (up to 10, 1<=d<=10):
# Num_cov <- 3
## Step 3. Specify a prior distribution function for each theta by imputing 1 or 2,
## 1 for a normal N(mu,s2) with mean mu and variance s2,
## 2 for a gamma Ga(a,b) with mean a/b and variance a/(b*b)
## and give the numerical values of your hyperparameters:
## For example, you assume that theta_0 follows N(0,1000), please input as "Prior_0 <- c(1, 0, 1000)".
## Note(1): If, for example, the number of covariates is set at 5, please just ignore the entries for
## theta_6,..,theta_10.
## Those numerical values do not affect the ESS computations.
## Note(2): Please specify a gamma prior for the precision parameter tau using the final line
## following those for the covariates.
## For example, the number of covariates is 5, please specify the prior using "Prior_6" as
## "Prior_6 <- c(2, 0.001, 0.001)".
# Prior_0 <- c(1, 0, 1) # for theta_0
# Prior_1 <- c(1, 0, 1) # for theta_1
# Prior_2 <- c(1, 0, 1) # for theta_2
# Prior_3 <- c(1, 0, 1) # for theta_3
# Prior_4 <- c(2, 1, 1) # for theta_4
# Prior_5 <- c(1, 0, 1000) # for theta_5
# Prior_6 <- c(1, 0, 1000) # for theta_6
# Prior_7 <- c(1, 0, 1000) # for theta_7
# Prior_8 <- c(1, 0, 1000) # for theta_8
# Prior_9 <- c(1, 0, 1000) # for theta_9
# Prior_10<- c(1, 0, 1000) # for theta_10
# Prior_11<- c(1, 0, 1000) # for theta_11
## Step 4. Set M being a positive integer chosen so that, initially, it is reasonable to assume the prior ESS <= M.
## If M is not sufficiently large, 'NA' returns as a result of the computations.
# M <- 10
## Step 5. Specify the number of simulations. A suggested value is 5000.
# The user can use NumSims = 10,000 to carry out the most accurate ESS computations.
# The value of NumSims as low at 1,000 may be used to reduce runtime.
# NumSims <- 5000
## Step 6. If you would like to compute ESS of a subvector of (theta_0,...,theta_11),
## please input "1" in the corresponding elements in the following indicator vectors.
## This program can compute ESSs of two subvectors of theta at the same time.
## If you are interested in three or more subvectors, please repeat the computations
## with different indicator vectors.
## For example, if you are interested in the first three parameters, theta_0, theta_1, and theta_2,
## please input 1's as c( 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0)
## theta0,theta1,theta2,theta3,theta4,theta5,theta6,theta7,theta8,theta9,theta10,theta11
# theta_sub1 <- c( 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)
# theta_sub2 <- c( 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0)
#########################################################################################################
########### End of sample input information. #########
#########################################################################################################
#########################################################################################################
########### The code below performs the actual Regression Calculation. #########
#########################################################################################################
ESS_RegressionCalc <- function( Reg_model, Num_cov,
Prior_0, Prior_1, Prior_2, Prior_3, Prior_4, Prior_5,
Prior_6, Prior_7, Prior_8, Prior_9, Prior_10, Prior_11,
M, NumSims,
theta_sub1, theta_sub2
)
{
##### Start computing #####
# Specify the prior means and Dp values under the priors and the Dq0 values under the epsilon-information priors.
Prior <- rbind(Prior_0,Prior_1,Prior_2,Prior_3,Prior_4,Prior_5,Prior_6,Prior_7,Prior_8,Prior_9,Prior_10,Prior_11)
p_mn <- numeric(1)
Dp <- numeric(1)
Dq0 <- numeric(1)
c <- 10000
for (j in 1:12){
if (Prior[j,1] == 1){
p_mn.s <- Prior[j,2]
Dp.s <- Prior[j,3]^(-1)
Dq0.s <- (Prior[j,3]*c)^(-1)
}
if (Prior[j,1] == 2){
p_mn.s <- Prior[j,2]/Prior[j,3]
Dp.s <- (Prior[j,2] -1)/p_mn.s^2
Dq0.s <- (Prior[j,2]/c-1)/p_mn.s^2
}
p_mn <- rbind(p_mn,p_mn.s)
Dp <- rbind(Dp ,Dp.s )
Dq0 <- rbind(Dq0 ,Dq0.s )
}
p_mn <- p_mn[2:13]
Dp <- Dp[2:13]
Dq0 <- Dq0[2:13]
th_ind <- numeric(12)
dim_th1 <- Num_cov+2
dim_th2 <- Num_cov+1
if (Reg_model == 1){
for (j in 1:dim_th1){
th_ind[j] <- 1
}
}
if (Reg_model == 2){
for (j in 1:dim_th2){
th_ind[j] <- 1
}
}
cov_ind <- numeric(11)
dim_linear <- Num_cov+1
for (j in 1:dim_linear){
cov_ind[j] <- 1
}
# Compute sum_Dp, the trace of the information matrix of the prior p
sum_Dp <- sum(Dp*th_ind)
sum_Dp.s1 <- sum(Dp*th_ind*theta_sub1)
sum_Dp.s2 <- sum(Dp*th_ind*theta_sub2)
# Compute sum_Dq0, the trace of the information matrix of the epsilon-information prior q0
sum_Dq0 <- sum(Dq0*th_ind)
sum_Dq0.s1 <- sum(Dq0*th_ind*theta_sub1)
sum_Dq0.s2 <- sum(Dq0*th_ind*theta_sub2)
# Simulate Monte Carlo samples Y from f(Y)
DqYMrep.out <- numeric(M+1)
DqYMrep.out.s1 <- numeric(M+1)
DqYMrep.out.s2 <- numeric(M+1)
for (t in 1:NumSims)
{
DqYm.out <- numeric(M)
DqYm.out.s1 <- numeric(M)
DqYm.out.s2 <- numeric(M)
DqY <- numeric(1)
DqY.s1 <- numeric(1)
DqY.s2 <- numeric(1)
for (i in 1:M) {
# Simulate Monte Carlo samples X from Unif(-1,+1)
# If you would like, you can modify the upper and lower limits of the distributions.
X1 <- runif(1,min=-1,max=+1)
X2 <- runif(1,min=-1,max=+1)
X3 <- runif(1,min=-1,max=+1)
X4 <- runif(1,min=-1,max=+1)
X5 <- runif(1,min=-1,max=+1)
X6 <- runif(1,min=-1,max=+1)
X7 <- runif(1,min=-1,max=+1)
X8 <- runif(1,min=-1,max=+1)
X9 <- runif(1,min=-1,max=+1)
X10 <- runif(1,min=-1,max=+1)
X <- c(1,X1,X2,X3,X4,X5,X6,X7,X8,X9,X10)*cov_ind
if (Reg_model == 1){
Dq.lin <- X*X*p_mn[Num_cov+2]
Dq.all <- numeric(12)
Dq.all[1:dim_th1] <- c(Dq.lin[1:dim_linear],p_mn[Num_cov+2]^(-2)/2)
Dq <- sum(Dq.all)
Dq.s1 <- sum(Dq.all*theta_sub1)
Dq.s2 <- sum(Dq.all*theta_sub2)
}
if (Reg_model == 2){
pi <- exp(sum(p_mn[1:11]*X))/(1+exp(sum(p_mn[1:11]*X)))
pi_pi2 <- pi - pi^2
Dq <- sum(X*X*(pi-pi^2))
Dq.s1 <- sum(X*X*(pi-pi^2)*theta_sub1[1:11])
Dq.s2 <- sum(X*X*(pi-pi^2)*theta_sub2[1:11])
}
DqY <- DqY + Dq
DqY.s1 <- DqY.s1 + Dq.s1
DqY.s2 <- DqY.s2 + Dq.s2
DqYm.out[i] <- sum_Dq0 + DqY
DqYm.out.s1[i] <- sum_Dq0.s1 + DqY.s1
DqYm.out.s2[i] <- sum_Dq0.s2 + DqY.s2
}
DqYm.out <- c(sum_Dq0, DqYm.out)
DqYm.out.s1 <- c(sum_Dq0.s1, DqYm.out.s1)
DqYm.out.s2 <- c(sum_Dq0.s2, DqYm.out.s2)
DqYMrep.out <- rbind(DqYMrep.out,DqYm.out)
DqYMrep.out.s1 <- rbind(DqYMrep.out.s1,DqYm.out.s1)
DqYMrep.out.s2 <- rbind(DqYMrep.out.s2,DqYm.out.s2)
}
T1 <- NumSims+1
DqYMrep.out <- DqYMrep.out[c(2:T1),]
DqYMrep.out.s1 <- DqYMrep.out.s1[c(2:T1),]
DqYMrep.out.s2 <- DqYMrep.out.s2[c(2:T1),]
Dqm.out <- numeric(M+1)
Dqm.out.s1 <- numeric(M+1)
Dqm.out.s2 <- numeric(M+1)
M1 <- M+1
for (i in 1:M1) {
Dqm.out[i] <- mean(DqYMrep.out[,i])
Dqm.out.s1[i] <- mean(DqYMrep.out.s1[,i])
Dqm.out.s2[i] <- mean(DqYMrep.out.s2[,i])
}
# Compute the ESS of the whole theta.
D.m <- Dqm.out - sum_Dp
D.min.n <- which(abs(D.m) == min(abs(D.m)))
D.min.v <- D.m[which(abs(D.m) == min(abs(D.m)))]
{
if (D.min.v < 0) {
D.min.v.nxt <- D.m[D.min.n+1]
ESS <- D.min.n - 1 + (-D.min.v / (-D.min.v + D.min.v.nxt))
}
else if (D.min.v > 0) {
D.min.v.prv <- D.m[D.min.n-1]
ESS <- D.min.n - 1 - (D.min.v / (D.min.v - D.min.v.prv))
}
else if (D.min.v == 0) {
ESS <- D.min.n -1
}
}
# Compute the ESS.1 of subvector 1 of theta.
D.m.s1 <- Dqm.out.s1 - sum_Dp.s1
D.min.n.s1 <- which(abs(D.m.s1) == min(abs(D.m.s1)))
D.min.v.s1 <- D.m.s1[which(abs(D.m.s1) == min(abs(D.m.s1)))]
{
if (D.min.v.s1 < 0) {
D.min.v.nxt.s1 <- D.m.s1[D.min.n.s1+1]
ESS.s1 <- D.min.n.s1 - 1 + (-D.min.v.s1 / (-D.min.v.s1 + D.min.v.nxt.s1))
}
else if (D.min.v.s1 > 0) {
D.min.v.prv.s1 <- D.m.s1[D.min.n.s1-1]
ESS.s1 <- D.min.n.s1 - 1 - (D.min.v.s1 / (D.min.v.s1 - D.min.v.prv.s1))
}
else if (D.min.v.s1 == 0) {
ESS.s1 <- D.min.n.s1 -1
}
}
# Compute the ESS.2 of subvector 2 of theta.
D.m.s2 <- Dqm.out.s2 - sum_Dp.s2
D.min.n.s2 <- which(abs(D.m.s2) == min(abs(D.m.s2)))
D.min.v.s2 <- D.m.s2[which(abs(D.m.s2) == min(abs(D.m.s2)))]
{
if (D.min.v.s2 < 0) {
D.min.v.nxt.s2 <- D.m.s2[D.min.n.s2+1]
ESS.s2 <- D.min.n.s2 - 1 + (-D.min.v.s2 / (-D.min.v.s2 + D.min.v.nxt.s2))
}
else if (D.min.v.s2 > 0) {
D.min.v.prv.s2 <- D.m.s2[D.min.n.s2-1]
ESS.s2 <- D.min.n.s2 - 1 - (D.min.v.s2 / (D.min.v.s2 - D.min.v.prv.s2))
}
else if (D.min.v.s2 == 0) {
ESS.s2 <- D.min.n.s2 -1
}
}
### The prior ESS of the whole theta is ESS
### The prior ESS of subvector 1 is ESS.s1
### The prior ESS of subvector 2 is ESS.s2
#return( list(ESSwholetheta=ESS, ESSsubvector1=ESS.s1, ESSsubvector2=ESS.s2) )
return( list(ESSsubvector1=ESS.s1, ESSsubvector2=ESS.s2) )
} # end of ESS_RegressionCalc function
#######################################################################
###
### A faster version 1 of ESS_RegressionCalc using some Cpp functions
###
#######################################################################
ESS_RegressionCalcFast1 <- function( Reg_model, Num_cov,
Prior_0, Prior_1, Prior_2, Prior_3, Prior_4, Prior_5,
Prior_6, Prior_7, Prior_8, Prior_9, Prior_10, Prior_11,
M, NumSims,
theta_sub1, theta_sub2
)
{
##### Start computing #####
# Specify the prior fastMeans and Dp values under the priors and the Dq0 values under the epsilon-information priors.
Prior <- rbind(Prior_0,Prior_1,Prior_2,Prior_3,Prior_4,Prior_5,Prior_6,Prior_7,Prior_8,Prior_9,Prior_10,Prior_11)
p_mn <- numeric(1)
Dp <- numeric(1)
Dq0 <- numeric(1)
c <- 10000
for (j in 1:12){
if (Prior[j,1] == 1){
p_mn.s <- Prior[j,2]
Dp.s <- Prior[j,3]^(-1)
Dq0.s <- (Prior[j,3]*c)^(-1)
}
if (Prior[j,1] == 2){
p_mn.s <- Prior[j,2]/Prior[j,3]
Dp.s <- (Prior[j,2] -1)/p_mn.s^2
Dq0.s <- (Prior[j,2]/c-1)/p_mn.s^2
}
p_mn <- rbind(p_mn,p_mn.s)
Dp <- rbind(Dp ,Dp.s )
Dq0 <- rbind(Dq0 ,Dq0.s )
}
p_mn <- p_mn[2:13]
Dp <- Dp[2:13]
Dq0 <- Dq0[2:13]
th_ind <- numeric(12)
dim_th1 <- Num_cov+2
dim_th2 <- Num_cov+1
if (Reg_model == 1){
for (j in 1:dim_th1){
th_ind[j] <- 1
}
}
if (Reg_model == 2){
for (j in 1:dim_th2){
th_ind[j] <- 1
}
}
cov_ind <- numeric(11)
dim_linear <- Num_cov+1
for (j in 1:dim_linear){
cov_ind[j] <- 1
}
# Compute sum_Dp, the trace of the information matrix of the prior p
sum_Dp <- sum(Dp*th_ind)
sum_Dp.s1 <- sum(Dp*th_ind*theta_sub1)
sum_Dp.s2 <- sum(Dp*th_ind*theta_sub2)
# Compute sum_Dq0, the trace of the information matrix of the epsilon-information prior q0
sum_Dq0 <- sum(Dq0*th_ind)
sum_Dq0.s1 <- sum(Dq0*th_ind*theta_sub1)
sum_Dq0.s2 <- sum(Dq0*th_ind*theta_sub2)
# Simulate Monte Carlo samples Y from f(Y)
DqYMrep.out <- numeric(M+1)
DqYMrep.out.s1 <- numeric(M+1)
DqYMrep.out.s2 <- numeric(M+1)
for (t in 1:NumSims)
{
DqYm.out <- numeric(M)
DqYm.out.s1 <- numeric(M)
DqYm.out.s2 <- numeric(M)
DqY <- numeric(1)
DqY.s1 <- numeric(1)
DqY.s2 <- numeric(1)
for (i in 1:M) {
# Simulate Monte Carlo samples X from Unif(-1,+1)
# If you would like, you can modify the upper and lower limits of the distributions.
X1 <- runif(1,min=-1,max=+1)
X2 <- runif(1,min=-1,max=+1)
X3 <- runif(1,min=-1,max=+1)
X4 <- runif(1,min=-1,max=+1)
X5 <- runif(1,min=-1,max=+1)
X6 <- runif(1,min=-1,max=+1)
X7 <- runif(1,min=-1,max=+1)
X8 <- runif(1,min=-1,max=+1)
X9 <- runif(1,min=-1,max=+1)
X10 <- runif(1,min=-1,max=+1)
X <- c(1,X1,X2,X3,X4,X5,X6,X7,X8,X9,X10)*cov_ind
if (Reg_model == 1){
Dq.lin <- X*X*p_mn[Num_cov+2]
Dq.all <- numeric(12)
Dq.all[1:dim_th1] <- c(Dq.lin[1:dim_linear],p_mn[Num_cov+2]^(-2)/2)
Dq <- sum(Dq.all)
Dq.s1 <- sum(Dq.all*theta_sub1)
Dq.s2 <- sum(Dq.all*theta_sub2)
}
if (Reg_model == 2){
pi <- exp(sum(p_mn[1:11]*X))/(1+exp(sum(p_mn[1:11]*X)))
pi_pi2 <- pi - pi^2
Dq <- sum(X*X*(pi-pi^2))
Dq.s1 <- sum(X*X*(pi-pi^2)*theta_sub1[1:11])
Dq.s2 <- sum(X*X*(pi-pi^2)*theta_sub2[1:11])
}
DqY <- DqY + Dq
DqY.s1 <- DqY.s1 + Dq.s1
DqY.s2 <- DqY.s2 + Dq.s2
DqYm.out[i] <- sum_Dq0 + DqY
DqYm.out.s1[i] <- sum_Dq0.s1 + DqY.s1
DqYm.out.s2[i] <- sum_Dq0.s2 + DqY.s2
}
DqYm.out <- c(sum_Dq0, DqYm.out)
DqYm.out.s1 <- c(sum_Dq0.s1, DqYm.out.s1)
DqYm.out.s2 <- c(sum_Dq0.s2, DqYm.out.s2)
DqYMrep.out <- rbind(DqYMrep.out,DqYm.out)
DqYMrep.out.s1 <- rbind(DqYMrep.out.s1,DqYm.out.s1)
DqYMrep.out.s2 <- rbind(DqYMrep.out.s2,DqYm.out.s2)
}
T1 <- NumSims+1
DqYMrep.out <- DqYMrep.out[c(2:T1),]
DqYMrep.out.s1 <- DqYMrep.out.s1[c(2:T1),]
DqYMrep.out.s2 <- DqYMrep.out.s2[c(2:T1),]
Dqm.out <- numeric(M+1)
Dqm.out.s1 <- numeric(M+1)
Dqm.out.s2 <- numeric(M+1)
M1 <- M+1
for (i in 1:M1) {
Dqm.out[i] <- fastMean(DqYMrep.out[,i])
Dqm.out.s1[i] <- fastMean(DqYMrep.out.s1[,i])
Dqm.out.s2[i] <- fastMean(DqYMrep.out.s2[,i])
}
# Compute the ESS of the whole theta.
D.m <- Dqm.out - sum_Dp
D.min.n <- which(abs(D.m) == min(abs(D.m)))
D.min.v <- D.m[which(abs(D.m) == min(abs(D.m)))]
{
if (D.min.v < 0) {
D.min.v.nxt <- D.m[D.min.n+1]
ESS <- D.min.n - 1 + (-D.min.v / (-D.min.v + D.min.v.nxt))
}
else if (D.min.v > 0) {
D.min.v.prv <- D.m[D.min.n-1]
ESS <- D.min.n - 1 - (D.min.v / (D.min.v - D.min.v.prv))
}
else if (D.min.v == 0) {
ESS <- D.min.n -1
}
}
# Compute the ESS.1 of subvector 1 of theta.
D.m.s1 <- Dqm.out.s1 - sum_Dp.s1
D.min.n.s1 <- which(abs(D.m.s1) == min(abs(D.m.s1)))
D.min.v.s1 <- D.m.s1[which(abs(D.m.s1) == min(abs(D.m.s1)))]
{
if (D.min.v.s1 < 0) {
D.min.v.nxt.s1 <- D.m.s1[D.min.n.s1+1]
ESS.s1 <- D.min.n.s1 - 1 + (-D.min.v.s1 / (-D.min.v.s1 + D.min.v.nxt.s1))
}
else if (D.min.v.s1 > 0) {
D.min.v.prv.s1 <- D.m.s1[D.min.n.s1-1]
ESS.s1 <- D.min.n.s1 - 1 - (D.min.v.s1 / (D.min.v.s1 - D.min.v.prv.s1))
}
else if (D.min.v.s1 == 0) {
ESS.s1 <- D.min.n.s1 -1
}
}
# Compute the ESS.2 of subvector 2 of theta.
D.m.s2 <- Dqm.out.s2 - sum_Dp.s2
D.min.n.s2 <- which(abs(D.m.s2) == min(abs(D.m.s2)))
D.min.v.s2 <- D.m.s2[which(abs(D.m.s2) == min(abs(D.m.s2)))]
{
if (D.min.v.s2 < 0) {
D.min.v.nxt.s2 <- D.m.s2[D.min.n.s2+1]
ESS.s2 <- D.min.n.s2 - 1 + (-D.min.v.s2 / (-D.min.v.s2 + D.min.v.nxt.s2))
}
else if (D.min.v.s2 > 0) {
D.min.v.prv.s2 <- D.m.s2[D.min.n.s2-1]
ESS.s2 <- D.min.n.s2 - 1 - (D.min.v.s2 / (D.min.v.s2 - D.min.v.prv.s2))
}
else if (D.min.v.s2 == 0) {
ESS.s2 <- D.min.n.s2 -1
}
}
### The prior ESS of the whole theta is ESS
### The prior ESS of subvector 1 is ESS.s1
### The prior ESS of subvector 2 is ESS.s2
#return( list(ESSwholetheta=ESS, ESSsubvector1=ESS.s1, ESSsubvector2=ESS.s2) )
return( list(ESSsubvector1=ESS.s1, ESSsubvector2=ESS.s2) )
} # end of ESS_RegressionCalc function
##################################################################################
##
## Main R function written by Jaejoon Song
## Function to calculate ESS for CRM
## Last update: 1/20/2018
##
##################################################################################
essCRM <- function(type,PI,prior,betaSD,target,m,nsim,obswin=30,rate=2,accrual="poisson"){
#library(dfcrm)
mRange <- 0:m
numMC <- nsim
getDiff <- function(d,w,y){
denom <- d*w - 1
term1 <- d*(log(d)^2)*w*(y-d*w)
term2 <- d*(log(d)^2)*w
term3 <- log(d)*(y-d*w)
result <- term1/(denom^2) + term2/denom - term3/denom
result
}
getDiffCRM <- function(d,y){
denom <- d - 1
term1 <- d*(log(d)^2)*(y-d)
term2 <- d*(log(d)^2)
term3 <- log(d)*(y-d)
result <- term1/(denom^2) + term2/denom - term3/denom
result
}
deltaBar <- rep(NA,length(mRange))
for(q in 1:length(mRange)){
m <- mRange[q]
sampMC <- rep(NA,numMC)
Dq <- 0
if(m>0){
for(k in 1:numMC){
set.seed(k)
if(type=='tite.crm'){
simData <- titesim(PI, prior,
target, n=max(mRange),
x0=1, nsim=1,
# obswin=30, rate=2,
obswin=obswin, rate=rate,
accrual="poisson",
scale=betaSD, seed=k)
get <- sort(sample(1:max(mRange),m))
d <- prior[simData$level][get]
y <- simData$tox[get]
Tup <- obswin
u <- simData$arrival[get]
u[u=='Inf' | u > Tup] <- Tup
mydim <- length(simData$prior)
w <- u/Tup
}
if(type=='crm'){
simData <- crmsim(PI, prior,
target, n = max(mRange),
x0 = 1, nsim = 1,
mcohort = 1, restrict = TRUE,
count = TRUE, method = "bayes",
model = "empiric", intcpt = 3,
scale = betaSD, seed = k)
get <- sort(sample(1:max(mRange),m))
d <- prior[simData$level][get]
y <- simData$tox[get]
#Tup <- obswin
#u <- simData$arrival[get]
#u[u=='Inf' | u > Tup] <- Tup
mydim <- length(simData$prior)
w <- 1
}
Dq <- 0
if(type=='tite.crm'){
for(i in 1:m){
Dq <- Dq + getDiff(d=d[i],w=w[i],y=y[i])
}
}
if(type=='crm'){
for(i in 1:m){
Dq <- Dq + getDiffCRM(d=d[i],y=y[i])
}
}
sampMC[k] <- Dq
}
deltaBar[q] <- 1/((betaSD)^2) + mean(sampMC)
}
if(m==0){
deltaBar[q] <- 1/((betaSD)^2)
}
}
min.n.index <- which.min(abs(deltaBar))
min.n <- mRange[which.min(abs(deltaBar))]
min.v <- deltaBar[which.min(abs(deltaBar))]
interpolated <- approx(mRange, deltaBar, method = "linear")
ESS <- interpolated$x[which.min(abs(interpolated$y))]
ESS
}
##################################################################################
##
## R function written by Jaejoon Song
## Function to calculate ESS for time to event outcome
## Last update: 1/20/2018
##
##################################################################################
essSurv <- function(shapeParam,scaleParam,m,nsim){
#library(MCMCpack)
mRange <- 1:m
numMC <- nsim
## Generate prior from inverse gamma distribution
myMu <- rinvgamma(numMC, shape=shapeParam, scale = scaleParam)
getDp <- function(alpha,beta){
myMu <- beta/(alpha-1)
myDp <- -(alpha+1)/(myMu^2) + 2*beta/(myMu^3)
myDp
}
Dp <- getDp(alpha = shapeParam, beta = scaleParam)
Dqm <- rep(NA,length(mRange))
for(i in 1:length(mRange)){
m <- mRange[i]
myDq <- rep(NA,numMC)
for(q in 1:numMC){
genData <- function(m,rateParam,censtime){
lifetime <- rexp(m, rate = rateParam)
t0 <- pmin(lifetime, censtime)
y <- as.numeric(censtime > lifetime)
data <- cbind(y,t0)
data
}
myData <- genData(m,rateParam=1/myMu[q],censtime=3)
getDq <- function(myMu,y,t0){
myDq <- (1+sum(y))*(-1/(myMu^2)) + (2/(myMu^3))*sum(t0)
myDq
}
myDq[q] <- getDq(myMu=myMu[q], y = myData[,1], t0 = myData[,2])
}
Dqm[i] <- mean(myDq)
#print(Dp-Dqm[i])
rm(myDq)
}
deltaBar <- Dp - Dqm
min.n.index <- which.min(abs(deltaBar))
min.n <- mRange[which.min(abs(deltaBar))]
min.v <- deltaBar[which.min(abs(deltaBar))]
interpolated <- approx(mRange, deltaBar, method = "linear")
ESS <- interpolated$x[which.min(abs(interpolated$y))]
ESS
}
##################################################################################
##
## R function written by Jaejoon Song
## Function to calculate ESS for time to event outcome
## Last update: 1/20/2018
##
##################################################################################
essNormal <- function(nu,sigma0,mu0=NULL,phi=NULL,knownMean=FALSE,m,nsim){
ESS <- list()
if(knownMean==TRUE){
ESS <- nu
}
if(knownMean==FALSE){
sigmasq_bar <- (nu*sigma0^2)/(nu-2)
mRange <- 1:m
numMC <- nsim
#library(LaplacesDemon)
sigmasq <- rinvchisq(n=numMC, df=nu, scale=sigma0)
delta1 <- rep(NA,length(mRange))
delta2 <- rep(NA,length(mRange))
delta <- rep(NA,length(mRange))
for(i in 1:length(mRange)){
Dp1 <- (1/sigmasq_bar^3)*(nu*sigma0^2)-(1/(2*sigmasq_bar^2))*(3+nu)
Dp2 <- phi/sigmasq_bar
Dp <- Dp1 + Dp2
Dq1samp <- rep(NA,numMC)
Dq2samp <- rep(NA,numMC)
DqSamp <- rep(NA,numMC)
for(j in 1:numMC){
mu <- rnorm(n=1, mean = mu0, sd = sqrt(sigmasq[j]/phi))
y <- rnorm(n=mRange[i], mean = mu, sd = sqrt(sigmasq[j]))
Dq1sampAdg1 <- -(((0+1)/2)+3)*(1/sigmasq_bar^2) +
(2/sigmasq_bar^3)*((1/2)*(y-mu0)%*%(y-mu0)+sigmasq_bar)
Dq1sampAdg2 <- -(((4+1)/2)+3)*(1/sigmasq_bar^2) +
(2/sigmasq_bar^3)*((1/2)*(y-mu0)%*%(y-mu0)+sigmasq_bar)
Dq1Adg <- Dq1sampAdg2 - Dq1sampAdg1
Dq2sampAdg1 <- 0/sigmasq_bar
Dq2sampAdg2<- 4/sigmasq_bar
Dq2Adg <- Dq1sampAdg2 - Dq2sampAdg1
Dq1samp[j] <- -(((mRange[i]+1)/2)+3)*(1/sigmasq_bar^2) +
(2/sigmasq_bar^3)*((1/2)*(y-mu0)%*%(y-mu0)+sigmasq_bar) + Dq1Adg
Dq2samp[j] <- mRange[i]/sigmasq_bar + Dq2Adg
DqSamp[j] <- Dq1samp[j] + Dq2samp[j]
}
Dq1 <- mean(Dq1samp)
Dq2 <- mean(Dq2samp)
Dq <- mean(DqSamp)
delta1[i] <- abs(Dp1-Dq1)
delta2[i] <- abs(Dp2-Dq2)
delta[i] <- abs(Dp-Dq)
}
interpolated_delta_1 <- approx(mRange, delta1, method = "linear")
ESS_delta_1 <- interpolated_delta_1$x[which.min(abs(interpolated_delta_1$y))]
ESS_delta_1
interpolated_delta_2 <- approx(mRange, delta2, method = "linear")
ESS_delta_2 <- interpolated_delta_2$x[which.min(abs(interpolated_delta_2$y))]
ESS_delta_2
interpolated_delta <- approx(mRange, delta, method = "linear")
ESS_delta <- interpolated_delta$x[which.min(abs(interpolated_delta$y))]
ESS_delta
ESS$ESS_sigmasq <- round(ESS_delta_1,2)
ESS$ESS_mu <- round(ESS_delta_2,2)
#ESS$ESS_overall <- round(ESS_delta,2)
}
ESS
}
|
/scratch/gouwar.j/cran-all/cranData/BayesESS/R/internal.R
|
#' @title S3 generic, calls the correct GibbsMH sampler
#'
#' @description An MCMC sampler for Bayesian borrowing with time-to-event data.
#' We obtain a flexible baseline hazard function by making the split points
#' random within a piecewise exponential model and using a Gaussian Markov
#' random field prior to smooth the baseline hazards. Only calls the sampler and
#' does not run any input checks. Best practice is to call BayesFBHborrow(), if the
#' user is not familiar with the model at hand.
#'
#' @param Y data
#' @param I event indicator
#' @param X design matrix
#' @param Y_0 historical data, default is NULL
#' @param I_0 historical event indicator, default is NULL
#' @param X_0 historical design matrix, default is NULL
#' @param tuning_parameters list of "cprop_beta", "cprop_beta_0", "alpha", "Jmax",
#' and "pi_b"
#' @param initial_values list containing the initial values of c("J", "s_r",
#' "mu", "sigma2", "tau", "lambda_0", "lambda", "beta_0", "beta") (optional)
#' @param hyperparameters list containing the hyperparameters c("a_tau", "b_tau",
#' "c_tau", "d_tau","type", "p_0", "a_sigma", "b_sigma", "Jmax", "clam_smooth",
#' "cprop_beta", "phi", "pi_b"). Default is list("a_tau" = 1,"b_tau" = 1,"c_tau" = 1,
#' "d_tau" = 0.001, "type" = "mix", "p_0" = 0.5, "a_sigma" = 2, "b_sigma" = 2,
#' "Jmax" = 20, "clam_smooth" = 0.8, "cprop_beta" = 0.3, "phi" = 3, "pi_b" = 0.5)
#' @param lambda_hyperparameters contains two hyperparameters (a_lambda and b_lambda) used
#' for the update of lambda and lambda_0
#' @param iter number of iterations for MCMC sampler, excluding warmup,
#' default is 2000
#' @param warmup_iter number of warmup iterations (burn-in) for MCMC sampler,
#' default is 2000
#' @param refresh number of iterations between printed screen updates,
#' default is 500
#' @param max_grid grid size for the smoothed baseline hazard, default is 2000
#'
#' @return depending on if the user wishes to borrow; returns a list with values
#' after each iteration for parameters: out_fixed (J, mu, sigma2, beta), lambda,
#' lambda_0, tau, s, as well as tuning values of the total number of accepts:
#' lambda_move, lambda_0_move and beta_move. Also included is the out_slam which
#' contains the shrunk estimate of the baseline hazard.
#' @export
#'
#' @examples
#' set.seed(123)
#' # Load example data and set your initial values and hyper parameters
#' data(weibull_cc, package = "BayesFBHborrow")
#' data(weibull_hist, package = "BayesFBHborrow")
#'
#' # The datasets consists of 3 (2) columns named "tte", "event" and "X"
#' # (only for concurrent). To explicitly run the sampler, extract the samples as
#' # following
#' Y <- weibull_cc$tte
#' I <- weibull_cc$event
#' X <- matrix(weibull_cc$X_trt)
#'
#' Y_0 <- weibull_hist$tte
#' I_0 <- weibull_hist$event
#' X_0 <- NULL
#'
#' # Specify hyperparameters and tuning parameters
#' hyper <- list("a_tau" = 1,
#' "b_tau" = 0.001,
#' "c_tau" = 1,
#' "d_tau" = 1,
#' "type" = "all",
#' "p_0" = 0.5,
#' "a_sigma" = 2,
#' "b_sigma" = 2,
#' "clam_smooth" = 0.5,
#' "phi" = 3)
#'
#' tuning_parameters <- list("Jmax" = 5,
#' "pi_b" = 0.5,
#' "cprop_beta" = 0.5,
#' "alpha" = 0.4)
#'
#' # Set initial values to 'NULL' for default settings
#' output <- GibbsMH(Y, I, X, Y_0, I_0, X_0,
#' tuning_parameters, initial_values = NULL, hyper,
#' iter = 5, warmup_iter = 1)
#'
GibbsMH <- function(Y, I, X, Y_0 = NULL, I_0 = NULL, X_0 = NULL, tuning_parameters,
initial_values, hyperparameters,
lambda_hyperparameters, iter, warmup_iter, refresh,
max_grid) {
checkmate::assert_numeric(Y)
Y_0 <- Y_0
if (is.null(Y_0)) {
class(Y) <- c("numeric", "NoBorrow")
} else {
class(Y) <- c("numeric", "WBorrow")
}
UseMethod("GibbsMH", Y)
}
#' @title GibbsMH sampler, with Bayesian Borrowing
#' @description An MCMC sampler for Bayesian borrowing with time-to-event data.
#' We obtain a flexible baseline hazard function by making the split points
#' random within a piecewise exponential model and using a Gaussian Markov
#' random field prior to smooth the baseline hazards. Only calls the sampler and
#' does not run any input checks. Best practice is to call BayesFBHborrow(), if the
#' user is not familiar with the model at hand.
#'
#' @param Y data
#' @param I event indicator
#' @param X design matrix
#' @param Y_0 historical data
#' @param I_0 historical event indicator
#' @param X_0 historical design matrix
#' @param tuning_parameters list of "cprop_beta", "cprop_beta_0", "alpha", "Jmax",
#' and "pi_b"
#' @param initial_values list containing the initial values of c("J", "s_r",
#' "mu", "sigma2", "tau", "lambda_0", "lambda", "beta_0", "beta") (optional)
#' @param hyperparameters list containing the hyperparameters c("a_tau", "b_tau",
#' "c_tau", "d_tau","type", "p_0", "a_sigma", "b_sigma", "Jmax", "clam_smooth",
#' "cprop_beta", "phi", "pi_b"). Default is list("a_tau" = 1,"b_tau" = 1,"c_tau" = 1,
#' "d_tau" = 0.001, "type" = "mix", "p_0" = 0.5, "a_sigma" = 2, "b_sigma" = 2,
#' "Jmax" = 20, "clam_smooth" = 0.8, "cprop_beta" = 0.3, "phi" = 3, "pi_b" = 0.5)
#' @param lambda_hyperparameters contains two hyperparameters (a_lambda and b_lambda)
#' used for the update of lambda and lambda_0. Default is c(0.01, 0.01)
#' @param iter number of iterations for MCMC sampler, excluding warmup,
#' default is 2000
#' @param warmup_iter number of warmup iterations (burn-in) for MCMC sampler,
#' default is 2000
#' @param refresh number of iterations between printed screen updates,
#' default is 500
#' @param max_grid grid size for the smoothed baseline hazard, default is 2000
#'
#' @return list with values after each iteration for parameters: out_fixed (J,
#' mu, sigma2, beta), lambda, lambda_0, tau, s, as well as tuning values of the total number
#' of accepts: lambda_move, lambda_0_move and beta_move. Also included is the out_slam which
#' contains the shrunk estimate of the baseline hazard.
#'
#' @export
#'
#' @examples
#' set.seed(123)
#' # Load example data and set your initial values and hyper parameters
#' data(weibull_cc, package = "BayesFBHborrow")
#' data(weibull_hist, package = "BayesFBHborrow")
#'
#' # The datasets consists of 3 (2) columns named "tte", "event" and "X"
#' # (only for concurrent). To explicitly run the sampler, extract the samples as
#' # following
#' Y <- weibull_cc$tte
#' I <- weibull_cc$event
#' X <- matrix(weibull_cc$X_trt)
#'
#' Y_0 <- weibull_hist$tte
#' I_0 <- weibull_hist$event
#' X_0 <- NULL
#'
#' # Specify hyperparameters and tuning parameters
#' hyper <- list("a_tau" = 1,
#' "b_tau" = 0.001,
#' "c_tau" = 1,
#' "d_tau" = 1,
#' "type" = "all",
#' "p_0" = 0.5,
#' "a_sigma" = 2,
#' "b_sigma" = 2,
#' "clam_smooth" = 0.5,
#' "phi" = 3)
#'
#' tuning_parameters <- list("Jmax" = 5,
#' "pi_b" = 0.5,
#' "cprop_beta" = 0.5,
#' "alpha" = 0.4)
#'
#' # Set initial values to 'NULL' for default settings
#' output <- GibbsMH(Y, I, X, Y_0, I_0, X_0,
#' tuning_parameters, initial_values = NULL, hyper,
#' iter = 5, warmup_iter = 1)
GibbsMH.WBorrow <- function(Y, I, X,
Y_0, I_0, X_0,
tuning_parameters,
initial_values = NULL,
hyperparameters = list(
"a_tau" = 1,
"b_tau" = 0.001,
"c_tau" = 1,
"d_tau" = 1,
"type" = "mix",
"p_0" = 0.8,
"a_sigma" = 1,
"b_sigma" = 1,
"phi" = 3,
"clam_smooth" = 0.8),
lambda_hyperparameters = list(
"a_lambda" = 0.01,
"b_lambda" = 0.01
),
iter = 150L,
warmup_iter = 10L,
refresh = 0,
max_grid = 2000L
) {
### Initialize parameters ###
# count accept for MH beta
lambda_0_count <- 0
lambda_count <- 0
lambda_move <- 0
lambda_0_move <- 0
# proposal prior
a_lambda <- lambda_hyperparameters$a_lambda
b_lambda <- lambda_hyperparameters$b_lambda
#alpha <- lambda_hyperparameters$alpha
# hyperparameters
a_sigma <- hyperparameters$a_sigma
b_sigma <- hyperparameters$b_sigma
clam <- hyperparameters$clam_smooth
phi <- hyperparameters$phi
if (is.null(initial_values)) {
J <- phi
sigma2 <- b_sigma / (a_sigma + 1)
quantiles <- quantile(Y, probs = seq(0, 1, length.out = J + 2),
na.rm = TRUE, names = FALSE)
s <- c(0, quantiles[1:J + 1], max(Y, Y_0))
group_data_cc <- group_summary(Y[X[,1] == 0], I[X[,1] == 0], NULL, s)
group_data_hist <- group_summary(Y_0, I_0, NULL, s)
lambda_init <- init_lambda_hyperparameters(group_data_cc, s)
lambda_init_hist <- init_lambda_hyperparameters(group_data_hist, s)
lambda <- mapply(stats::rgamma, n = 1,
shape = lambda_init$shape,
rate = lambda_init$rate)
lambda_0 <- mapply(stats::rgamma, n = 1,
shape = lambda_init_hist$shape,
rate = lambda_init_hist$rate)
### mu and sigma2
# add the time exposed, etc.
lambda_init_sum_hist <- init_lambda_hyperparameters(lapply(group_data_hist, sum), s[c(1, J + 2)])
mu <- mean(log(mapply(stats::rgamma, n = 500,
shape = lambda_init_sum_hist$shape,
rate = lambda_init_sum_hist$rate)))
sigma2 <- var(log(mapply(stats::rgamma, n = 500,
shape = lambda_init_sum_hist$shape,
rate = lambda_init_sum_hist$rate)))
#Data and beta initial values
bp <- ncol(X)
df_curr <- .dataframe_fun(Y = Y, I = I, X = X, s = s, lambda = lambda, bp = ncol(X), J = J)
if(is.null(X_0)){
bp_0 <- 0
beta_0 <- NULL
} else {
bp_0 <- ncol(X_0)
df_hist <- .dataframe_fun(Y = Y_0, I = I_0, X = X_0, s = s, lambda = lambda_0, bp = bp_0, J = J)
glm.mom_0 <- .glmFit(df_hist)
beta_0 <- glm.mom_0$beta.mu
beta_0_count <- numeric(bp_0)
}
glm.mom <- .glmFit(df_curr)
beta <- glm.mom$beta.mu
bp <- ncol(X)
beta_count <- numeric(bp)
if(is.null(X_0)){
df_hist <- .dataframe_fun(Y = Y_0, I = I_0, X = X_0, s = s, lambda = lambda_0, bp = bp_0, J = J)
}
} else {
J <- initial_values$J
mu <- initial_values$mu
sigma2 <- initial_values$sigma2
s <- c(0, sort(initial_values$s_r), max(Y, Y_0))
lambda <- initial_values$lambda
lambda_0 <- initial_values$lambda_0
beta <- initial_values$beta
bp <- length(beta)
beta_count <- numeric(bp)
if(!is.null(X_0)) {
beta_0 <- initial_values$beta_0
bp_0 <- length(beta_0)
beta_0_count <- rep(0, bp_0)
} else {
bp_0 <- 0
beta_0 <- NULL
}
#Map lambda and introduce indicators.
df_hist <- .dataframe_fun(Y = Y_0, I = I_0, X = X_0, s = s, lambda = lambda_0, bp = bp_0, J = J)
df_curr <- .dataframe_fun(Y = Y, I = I, X = X, s = s, lambda = lambda, bp = bp, J = J)
}
a_tau <- hyperparameters$a_tau
b_tau <- hyperparameters$b_tau
type <- hyperparameters$type
if (type == "mix") {
c_tau <- hyperparameters$c_tau
d_tau <- hyperparameters$d_tau
p_0 <- hyperparameters$p_0
tau <- p_0 * invgamma::rinvgamma(J + 1, shape = a_tau, rate = b_tau) + (1-p_0) *
invgamma::rinvgamma(J + 1, shape = c_tau, rate = d_tau)
} else if (type == "uni") {
c_tau <- NULL
d_tau <- NULL
p_0 <- NULL
tau <- invgamma::rinvgamma(J+1, shape = a_tau, rate = b_tau)
} else {
c_tau <- hyperparameters$c_tau
d_tau <- hyperparameters$d_tau
p_0 <- hyperparameters$p_0
tau <- p_0 * invgamma::rinvgamma(1, shape = a_tau, rate = b_tau) + (1-p_0) *
invgamma::rinvgamma(1, shape = c_tau, rate = d_tau)
}
# Tuning parameters
Jmax <- tuning_parameters$Jmax
cprop_beta <- tuning_parameters$cprop_beta
pi_b <- tuning_parameters$pi_b
alpha <- tuning_parameters$alpha
if (!is.null(X_0)) {
cprop_beta_0 <- tuning_parameters$cprop_beta_0
}
maxSj <- min(max(Y), max(Y_0))
out_fixed <- data.frame(matrix(NA, nrow = iter + warmup_iter, ncol = 3 + bp + bp_0))
colnames(out_fixed)[1:3] <- c("J", "mu", "sigma2")
colnames(out_fixed)[4:(3 + bp)] <- paste0("beta_", 1:bp)
if(bp_0 > 0) {
colnames(out_fixed)[(3 + bp + 1):(3 + bp + bp_0)] <- paste0("beta_0_", 1:bp_0)
}
out_lambda <- data.frame(matrix(NA, nrow = iter + warmup_iter, ncol = Jmax + 1))
out_lambda_0 <- data.frame(matrix(NA, nrow = iter + warmup_iter, ncol = Jmax + 1))
out_s <- data.frame(matrix(NA, nrow = iter + warmup_iter, ncol = Jmax + 2))
if(type %in% c("uni", "mix")) {
out_tau <- data.frame(matrix(NA, nrow = iter + warmup_iter, ncol = Jmax +1))
}else{
out_tau <- data.frame(matrix(NA, nrow = iter + warmup_iter, ncol = 1))
colnames(out_tau) <- NULL
}
#Max number of grid points
time_grid <- seq(1e-8, max(Y), length.out = max_grid)
out_slam <- data.frame(matrix(data = NA, nrow = 0, ncol = max_grid))
colnames(out_slam) <- time_grid
### MCMC START ###
sample <- c(rep("(Warmup)", warmup_iter), rep("(Sampling)", iter))
mess <- character(iter + warmup_iter)
if (refresh != 0 && refresh < (iter + warmup_iter)) {
mess[seq(refresh, iter + warmup_iter, refresh)] <- paste("Iteration:",
seq(refresh, iter + warmup_iter, refresh), "/", (iter + warmup_iter),
sample[seq(refresh, iter + warmup_iter, refresh)])
} else if (refresh > (iter + warmup_iter)) {
message("'refresh' is larger than number of iterations, using default value (0)")
refresh <- 0
}
for(i in 1:(iter + warmup_iter)) {
if(i%%refresh == 0 && refresh != 0){message(mess[i])}
ICAR <- .ICAR_calc(s = s, J = J, clam = clam)
Sigma_s <- ICAR$Sigma_s
# 1. Conjugate posterior updates [Gibbs]
mu <- .mu_update(Sigma_s, lambda_0, sigma2, J)
sigma2 <- .sigma2_update(mu, lambda_0, Sigma_s, J, a_sigma, b_sigma)
tau_all <- .tau_update(lambda_0, lambda, J, s, a_tau, b_tau, c_tau, d_tau, p_0, type)
tau <- tau_all$tau
# 2. Update beta [MH NR]
beta_all <- .beta_MH_NR(df_curr, beta, bp, cprop_beta, beta_count)
beta <- beta_all$beta
beta_count <- beta_all$beta_count
if(bp_0 > 0) {
beta_0_all <- .beta_MH_NR(df = df_hist, beta = beta_0, bp = bp_0,
cprop_beta = cprop_beta_0, beta_count = beta_0_count)
beta_0 <- beta_0_all$beta
beta_0_count <- beta_0_all$beta_count
}
# 3. Update lambda_0, propose new lambda_0 from conditional [MH]
# conjugate posterior
df_lambda_0 <- .lambda_0_MH_cp(df_hist, Y_0, I_0, X_0, s,
beta_0, mu, sigma2, lambda, lambda_0, tau,
bp_0, J, clam, a_lam = a_lambda, b_lam = b_lambda,
lambda_0_count, lambda_0_move)
lambda_0 <- df_lambda_0$lambda_0
lambda_0_move <- df_lambda_0$lambda_0_move
lambda_0_count <- df_lambda_0$lambda_0_count
df_hist <- df_lambda_0$df_hist
# 4. Update lambda, propose new lambda from conditional
# conjugate posterior [MH]
df_lambda <- .lambda_MH_cp(df_hist, df_curr, Y, I, X, s,
beta, beta_0, mu, sigma2, lambda, lambda_0, tau,
bp, bp_0, J, a_lam = a_lambda, b_lam = b_lambda, lambda_move,
lambda_count, alpha)
lambda <- df_lambda$lambda
lambda_move <- df_lambda$lambda_move
lambda_count <- df_lambda$lambda_count
df_curr <- df_lambda$df_curr
# 5. Shuffle split point locations (accept/reject) [MH]
if (J > 0) {
swap_df <- .shuffle_split_point_location(df_hist, df_curr, Y_0, I_0, X_0,
lambda_0, beta_0, Y, I, X, lambda, beta, s, J, bp_0, bp,
clam, maxSj)
s <- swap_df$s
Sigma_s <- swap_df$Sigma_s
df_hist <- swap_df$df_hist
df_curr <- swap_df$df_curr
}
# 6. Propose a birth/death of a split point via a reversible jump step [MH-Green]
# This will update lambda_0, lambda, tau, s and J (via weighted mean) if accepted
rjmcmc_out <- .J_RJMCMC(df_hist, df_curr, Y, Y_0, I, I_0, X, X_0,
lambda, lambda_0, beta, beta_0,
mu, sigma2, tau, s, J, Jmax,
bp, bp_0,
clam,
a_tau, b_tau, c_tau, d_tau, type,
p_0, phi, pi_b, maxSj)
J <- rjmcmc_out$J
s <- rjmcmc_out$s
lambda <- rjmcmc_out$lambda
lambda_0 <- rjmcmc_out$lambda_0
Sigma_s <- rjmcmc_out$Sigma_s
if(type %in% c("uni", "mix")) {
tau <- rjmcmc_out$tau
}
df_hist <- rjmcmc_out$df_hist
df_curr <- rjmcmc_out$df_curr
# 7. Save parameter values of iteration i
out_fixed[i, 1] <- J
out_fixed[i, 2] <- mu
out_fixed[i, 3] <- sigma2
out_fixed[i, 4:(3 + bp)] <- beta
if(bp_0 > 0) {
out_fixed[i, (3 + bp + 1):(3 + bp + bp_0)] <- beta_0
}
out_lambda[i, 1:(length(lambda))] <- lambda
out_lambda_0[i, 1:(length(lambda_0))] <- lambda_0
out_s[i, 1:(length(s))] <- s
out_tau[i, 1:(length(tau))] <- tau
#Grid of baseline hazards for shrunk estimate
indx <- findInterval(time_grid, s, left.open = T)
out_slam[i,] <- lambda[indx]
}
# Remove burn-in
out_fixed <- out_fixed[-(1:warmup_iter),]
out_lambda <- out_lambda[-(1:warmup_iter),]
out_lambda_0 <- out_lambda_0[-(1:warmup_iter),]
out_s <- out_s[-(1:warmup_iter),]
out_tau <- out_tau[-(1:warmup_iter),]
out_slam <- out_slam[-(1:warmup_iter),]
if (type %in% c("uni", "mix")) {
out_list <- list("out_fixed" = out_fixed, "lambda" = out_lambda,
"lambda_0" = out_lambda_0, "s" = out_s, "tau" = out_tau,
"lambda_0_move" = lambda_0_move,
"lambda_move" = lambda_move,
"beta_move" = beta_count,
"out_slam" = out_slam)
class(out_list) <- c("BayesFBHborrow", "list")
} else {
out_fixed <- cbind(out_fixed, out_tau)
out_fixed <- dplyr::rename_all(out_fixed, dplyr::recode, out_tau = "tau")
out_list <- list("out_fixed" = out_fixed, "lambda" = out_lambda,
"lambda_0" = out_lambda_0, "s" = out_s,
"lambda_0_move" = lambda_0_move,
"lambda_move" = lambda_move,
"beta_move" = beta_count,
"out_slam" = out_slam)
class(out_list) <- c("BayesFBHborrow", "list")
}
if(!is.null(beta_0)) {
out_list$beta_0_move <- beta_0_count
}
return(out_list)
}
#' @title GibbsMH sampler, without Bayesian Borrowing
#'
#' @description An MCMC sampler for time-to-event data, without Bayesian Borrowing.
#' We obtain a flexible baseline hazard function by making the split points
#' random within a piecewise exponential model and using a Gaussian Markov
#' random field prior to smooth the baseline hazards. Only calls the sampler and
#' does not run any input checks. Best practice is to call BayesFBHborrow(), if the
#' user is not familiar with the model at hand.
#'
#' @param Y data
#' @param I event indicator
#' @param X design matrix
#' @param Y_0 historical data, default is NULL
#' @param I_0 historical event indicator, default is NULL
#' @param X_0 historical design matrix, default is NULL
#' @param tuning_parameters list of "cprop_beta", "Jmax",
#' and "pi_b"
#' @param initial_values list containing the initial values of c("J", "s_r",
#' "mu", "sigma2", "lambda", beta") (optional)
#' @param hyperparameters list containing the hyperparameters c("a_sigma",
#' "b_sigma", "Jmax", "clam_smooth", "cprop_beta", "phi"). Default is
#' list("a_sigma" = 2, "b_sigma" = 2, "Jmax" = 20, "clam_smooth" = 0.8,
#' "cprop_beta" = 0.3, "phi" = 3)
#' @param lambda_hyperparameters contains two hyperparameters ("a" and "b") used for
#' the update of lambda, default is c(0.01, 0.01)
#' @param iter number of iterations for MCMC sampler, excluding warmup,
#' default is 2000
#' @param warmup_iter number of warmup iterations (burn-in) for MCMC sampler,
#' default is 2000
#' @param refresh number of iterations between printed screen updates,
#' default is 500
#' @param max_grid grid size for the smoothed baseline hazard, default is 2000
#'
#' @return list with values after each iteration for parameters: out_fixed (J,
#' mu, sigma2, beta), lambda, s, as well as tuning values of the total number
#' of accepts: lambda_move and beta_move. Also included is the out_slam which
#' contains the shrunk estimate of the baseline hazard.
#'
#' @export
#'
#' @examples
#' set.seed(123)
#' # Load example data and set your initial values and hyper parameters
#' data(weibull_cc, package = "BayesFBHborrow")
#' data(weibull_hist, package = "BayesFBHborrow")
#'
#' # The datasets consists of 3 (2) columns named "tte", "event" and "X".
#' # To explicitly run the sampler, extract the samples as following
#' Y <- weibull_cc$tte
#' I <- weibull_cc$event
#' X <- matrix(weibull_cc$X_trt)
#'
#' # Specify hyperparameters and tuning parameters
#' hyper <- list("a_sigma" = 2,
#' "b_sigma" = 2,
#' "clam_smooth" = 0.5,
#' "phi" = 3)
#'
#' tuning_parameters <- list("Jmax" = 5,
#' "pi_b" = 0.5,
#' "cprop_beta" = 0.5)
#'
#' # Set initial values to 'NULL' for default settings
#' output <- GibbsMH(Y, I, X, NULL, NULL, NULL,
#' tuning_parameters, initial_values = NULL, hyper,
#' iter = 5, warmup_iter = 1)
GibbsMH.NoBorrow <- function(Y, I, X = NULL, Y_0 = NULL, I_0 = NULL, X_0 = NULL,
tuning_parameters,
initial_values = NULL,
hyperparameters = list(
"a_sigma" = 1,
"b_sigma" = 1,
"phi" = 3,
"clam_smooth" = 0.8),
lambda_hyperparameters = list(
"a_lambda" = 0.01,
"b_lambda" = 0.01
),
iter = 1500L,
warmup_iter = 10L,
refresh = 0,
max_grid = 2000L
) {
#Proposal prior
a_lambda <- lambda_hyperparameters$a_lambda
b_lambda <- lambda_hyperparameters$b_lambda
a_sigma <- hyperparameters$a_sigma
b_sigma <- hyperparameters$b_sigma
Jmax <- hyperparameters$Jmax
clam_smooth <- hyperparameters$clam_smooth
phi <- hyperparameters$phi
pi_b <- hyperparameters$pi_b
# Count accepts
lambda_count <- 0
lambda_move <- 0
# hyperparameters
a_sigma <- hyperparameters$a_sigma
b_sigma <- hyperparameters$b_sigma
clam <- hyperparameters$clam_smooth
phi <- hyperparameters$phi
if (is.null(initial_values)) {
J <- phi
sigma2 <- b_sigma / (a_sigma + 1)
quantiles <- quantile(Y, probs = seq(0, 1, length.out = J + 2),
na.rm = TRUE, names = FALSE)
s <- c(0, quantiles[1:J + 1], max(Y))
group_data <- group_summary(Y, I, NULL, s)
lambda_init <- init_lambda_hyperparameters(group_data, s)
lambda <- mapply(stats::rgamma, n = 1,
shape = lambda_init$shape,
rate = lambda_init$rate)
lambda_init_sum <- init_lambda_hyperparameters(lapply(group_data, sum), s[c(1, J + 2)])
log_hazard_sample <- log(mapply(stats::rgamma, n = 500,
shape = lambda_init_sum$shape,
rate = lambda_init_sum$rate))
mu <- mean(log_hazard_sample)
sigma2 <- var(log_hazard_sample)
if(is.null(X)) {
beta <- NULL
bp <- 0
} else {
bp <- ncol(X)
df_all <- .dataframe_fun(Y = Y, I = I, X = X, s = s, lambda = lambda, bp = ncol(X), J = J)
glm.mom <- .glmFit(df_all)
beta <- glm.mom$beta.mu
beta_count <- numeric(bp)
}
if (bp == 0) {
df_all <- .dataframe_fun(Y = Y, I = I, X = NULL, s = s, lambda = lambda, bp = 0, J = J)
}
} else {
J <- initial_values$J
mu <- initial_values$mu
sigma2 <- initial_values$sigma2
s <- c(0, sort(initial_values$s_r), max(Y, Y_0))
lambda <- initial_values$lambda
if (!is.null(X)) {
beta <- initial_values$beta
bp <- length(beta)
beta_count <- numeric(bp)
} else {
beta <- NULL
bp <- 0
}
if (bp > 0) {
df_all <- .dataframe_fun(Y = Y, I = I, X = X, s = s, lambda = lambda, bp = bp, J = J)
}else{
df_all <- .dataframe_fun(Y = Y, I = I, X = NULL, s = s, lambda = lambda, bp = 0, J = J)
}
}
# Tuning parameters
Jmax <- tuning_parameters$Jmax
cprop_beta <- tuning_parameters$cprop_beta
pi_b <- tuning_parameters$pi_b
#Output array
# J, mu, sigma2, beta
out_fixed <- data.frame(matrix(NA, nrow = iter + warmup_iter, ncol = 3 + bp))
colnames(out_fixed)[1:3] <- c("J", "mu", "sigma2")
if (bp > 0) {
colnames(out_fixed)[4:(3 + bp)] <- paste0("beta_", 1: bp)
}
out_lambda <- data.frame(matrix(NA, nrow = iter + warmup_iter, ncol = Jmax + 1))
out_s <- data.frame(matrix(NA, nrow = iter + warmup_iter, ncol = Jmax + 2))
#Max number of grid points
t <- seq(1e-8, max(Y), length.out = max_grid)
out_slam <- data.frame(matrix(data = NA, nrow = 0, ncol = max_grid))
colnames(out_slam) <- t
### MCMC START ###
sample <- c(rep("(Warmup)", warmup_iter), rep("(Sampling)", iter))
mess <- character(iter + warmup_iter)
if (refresh != 0) {
mess[seq(refresh, iter + warmup_iter, refresh)] <- paste("Iteration:",
seq(refresh, iter + warmup_iter, refresh), "/", (iter + warmup_iter),
sample[seq(refresh, iter + warmup_iter, refresh)])
} else if (refresh > (iter + warmup_iter)) {
message("Refresh is larger than number of iterations, using default value (0)")
refresh <- 0
}
for (i in 1:(iter + warmup_iter)) {
if(i%%refresh == 0 && refresh != 0){message(mess[i])}
ICAR <- .ICAR_calc(s, J, clam_smooth)
Sigma_s <- ICAR$Sigma_s
mu <- .mu_update(Sigma_s, lambda, sigma2, J)
sigma2 <- .sigma2_update(mu, lambda, Sigma_s, J, a_sigma, b_sigma)
#Map lambda and introduce indicators.
if (bp > 0) {
beta_all <- .beta_MH_NR(df = df_all, beta = beta, bp = bp, cprop_beta = cprop_beta, beta_count = beta_count)
beta <- beta_all$beta
beta_count <- beta_all$beta_count
}
#lambda, lambda and adjusted data frames
dflambda <-.lambda_0_MH_cp_NoBorrow(df_all, Y, I, X, s, beta, mu,
sigma2, lambda, bp, J, clam_smooth,
a_lam = a_lambda, b_lam = b_lambda, lambda_count,
lambda_move)
lambda <- dflambda$lambda_0
df_all <- dflambda$df
lambda_count <- dflambda$lambda_0_count
lambda_move <- dflambda$lambda_0_move
#shuffle s
if (J > 0) {
swap_df <- .shuffle_split_point_location_NoBorrow(df_all, Y, I, X,
lambda, beta, s, J, bp, clam_smooth)
s <- swap_df$s
Sigma_s <- swap_df$Sigma_s
df_all <- swap_df$df_all
}
#J update
rjmcmc_out <- .J_RJMCMC_NoBorrow(df_all, Y, I, X, lambda, beta,
mu, sigma2, s, J, Jmax, bp, clam_smooth,
phi, pi_b)
J <- rjmcmc_out$J
s <- rjmcmc_out$s
lambda <- rjmcmc_out$lambda
Sigma_s <- rjmcmc_out$Sigma_s
df_all <- rjmcmc_out$df_all
#J, mu, sigma2, beta, beta
out_fixed[i, 1] <- J
out_fixed[i, 2] <- mu
out_fixed[i, 3] <- sigma2
if (bp > 0) {
out_fixed[i, 4:(3 + bp)] <- beta
}
out_lambda[i, 1:(length(lambda))] <- lambda
out_s[i, 1:(length(s))] <- s
#Grid of baseline hazards for shrunk estimate
indx <- findInterval(t, s, left.open = T)
out_slam[i, ] <- lambda[indx]
}
# Remove burn-in
out_fixed <- out_fixed[-(1:warmup_iter), ]
out_lambda <- out_lambda[-(1:warmup_iter), ]
out_s <- out_s[-(1:warmup_iter), ]
out_slam <- out_slam[-(1:warmup_iter), ]
out_list <- list("out_fixed" = out_fixed, "lambda" = out_lambda, "s" = out_s,
"out_slam" = out_slam, "lambda_count" = lambda_count,
"lambda_move" = lambda_move)
class(out_list) <- c("BayesFBHborrow", "list")
if (!is.null(beta)) {
out_list$beta_move <- beta_count
}
return(out_list)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFBHborrow/R/GibbsMH.R
|
#' @title Birth move in RJMCMC
#' @description Calculates new values of x when proposing another split point, based on a
#' weighted mean, as x_new/x <- (1-U)/U
#'
#' @param U uniform random number
#' @param sj upcoming split point location, j
#' @param s_star new split point location, *
#' @param sjm1 previous split point location, j-1
#' @param x vector of parameter values, length J + 1
#' @param j split point
#'
#' @return vector with adjusted parameter values after additional split point,
#' length J + 2
.birth_move <- function(U, sj, s_star, sjm1, x, j) {
lxj <- log(x[j]) - (sj - s_star) / (sj - sjm1) * log((1 - U) / U)
lxjp1 <- log(x[j]) + (s_star - sjm1) / (sj - sjm1) * log((1 - U) / U)
x_prop <- append(x, exp(c(lxj, lxjp1)) , after = j)[-j]
return(x_prop)
}
#' @title Death move in RJMCMC
#' @description Calculates new values of x when proposing the death of a split point
#'
#' @param sjp1 upcoming split point location, J + 1
#' @param sj split point location to be removed, j
#' @param sjm1 previous split point location, j-1
#' @param x vector of parameter values, length J + 1
#' @param j split point
#'
#' @return vector with adjusted parameter values after removal of split point,
#' length J
.death_move <- function(sjp1, sj, sjm1, x, j) {
lxj <- ((sj- sjm1) * log(x[j-1]) + (sjp1 - sj) * log(x[j])) / (sjp1 - sjm1)
x_prop <- append(x, exp(lxj), after = j)[-((j-1):j)]
return(x_prop)
}
#' @title Calculate log density tau prior
#'
#' @param tau current value(s) of tau
#' @param a_tau tau hyperparameter
#' @param b_tau tau hyperparameter
#' @param c_tau tau hyperparameter
#' @param d_tau tau hyperparameter
#' @param p_0 mixture ratio
#' @param type choice of borrowing, "mix", "uni", or any other string for
#' borrowing on every baseline hazard without mixture
#'
#' @return log density of tau
.ltau_dprior <- function(tau, a_tau, b_tau, c_tau = NULL, d_tau = NULL, p_0 = NULL, type) {
if (type == "mix") {
ldtau <- sum(log(p_0 * invgamma::dinvgamma(tau, shape = a_tau, rate = b_tau) +
(1 - p_0) * invgamma::dinvgamma(tau, shape = c_tau, rate = d_tau)))
} else if (type == "uni") {
ldtau <- sum(invgamma::dinvgamma(tau, shape = a_tau, rate = b_tau, log = T))
}
}
#' @title RJMCMC (with Bayesian Borrowing)
#' @description Metropolis-Hastings Green Reversible Jump move, with Bayesian
#' Borrowing
#'
#' @param df_hist data_frame containing historical data.
#' @param df_curr data_frame containing current trial data.
#' @param Y data.
#' @param Y_0 historical data.
#' @param I censoring indicator.
#' @param I_0 historical trial censoring indicator.
#' @param X design matrix.
#' @param X_0 historical trial design matrix.
#' @param lambda baseline hazard.
#' @param lambda_0 historical trial baseline hazard.
#' @param beta current trial parameters.
#' @param beta_0 historical trial parameters.
#' @param mu prior mean for baseline hazard.
#' @param sigma2 prior variance hyperparameter for baseline hazard.
#' @param tau borrowing parameter.
#' @param s split point locations, J + 2.
#' @param J number of split points.
#' @param Jmax maximum number of split points.
#' @param bp number of covariates in current trial.
#' @param bp_0 number of covariates in historical trial.
#' @param clam_smooth neighbor interactions, in range (0, 1), for ICAR update.
#' @param a_tau tau hyperparameter.
#' @param b_tau tau hyperparameter.
#' @param c_tau tau hyperparameter.
#' @param d_tau tau hyperparameter.
#' @param type choice of borrowing, "mix", "uni", or any other string for
#' borrowing on every baseline hazard without mixture.
#' @param p_0 mixture ratio.
#' @param phi J hyperparameter.
#' @param pi_b probability of birth move.
#' @param maxSj maximal time point, either current or historic.
#'
#' @return list of proposed J and s, with adjusted values of lambda, lambda_0,
#' tau, Sigma_s, and data_frames for historical and current trial data.
#'
.J_RJMCMC <- function(df_hist, df_curr, Y, Y_0, I, I_0,
X, X_0, lambda, lambda_0,
beta, beta_0,
mu, sigma2,
tau,
s, J, Jmax, bp, bp_0,
clam_smooth,
a_tau = NULL, b_tau = NULL, c_tau = NULL, d_tau = NULL, type,
p_0 = NULL, phi, pi_b,
maxSj) {
sindx <- 1:(J + 2)
Sigma_s <- .ICAR_calc(s, J, clam_smooth)$Sigma_s
#Birth or death move
if (J==0) {
move <- 0
pi_b <- 1
pi_d <- 1
} else if (J == Jmax) {
move <- 2
pi_b <- 1
pi_d <- 1
} else {
move <- stats::runif(1)
pi_d <- 1 - pi_b
}
if (move < pi_b) {
# Birth move, update split point locations
s_star <-stats::runif(1, s[1], maxSj)
s_max <- max(s)
jlow <- max(sindx[s < s_star])
jup <- min(sindx[s > s_star])
slow <- s[jlow]
sup <- s[jup]
s_prop <- append(s, s_star , after = jlow)
J_prop <- J + 1
U2 <- stats::runif(1)
U3 <- stats::runif(1)
U4 <- stats::runif(1)
# lambda proposal
lambda0_prop <- .birth_move(U = U3, sj = sup , s_star = s_star, sjm1 = slow, x = lambda_0, j = jlow)
lambda_prop <- .birth_move(U = U2, sj = sup , s_star = s_star, sjm1 = slow, x = lambda, j = jlow)
# Update data.frames and calculate llikelihood ratio
df_hist_prop <- .dataframe_fun(Y = Y_0, I = I_0, X = X_0, s = s_prop, lambda = lambda0_prop, bp = bp_0, J = J_prop)
df_curr_prop <- .dataframe_fun(Y = Y, I = I, X = X, s = s_prop, lambda = lambda_prop, bp = bp, J = J_prop)
llike_num <- .log_likelihood(df_hist_prop, beta_0) + .log_likelihood(df_curr_prop, beta)
llike_den <- .log_likelihood(df_hist, beta_0) + .log_likelihood(df_curr, beta)
# Calculate lpriors
Sigma_s_prop <- .ICAR_calc(s_prop, J_prop, clam_smooth)$Sigma_s
lprior_num <- mvtnorm::dmvnorm(log(lambda0_prop), rep(mu, J + 2), Sigma_s_prop * sigma2, log = T) +
log(s_star - slow) + log(sup - s_star) + log(2 * J + 3) + log(2 * J + 2) +
stats::dpois(J_prop, phi, log = T)
lprior_den <- mvtnorm::dmvnorm(log(lambda_0), rep(mu, J + 1), Sigma_s * sigma2, log = T) +
stats::dpois(J, phi, log = T) + log(sup - slow) + 2 * log(s_max)
# Adjust for scalar tau if J == 0 and non-piecewise tau
if (type %in% c("uni", "mix")) {
tau_prop <- .birth_move(U = U4, sj = sup , s_star = s_star, sjm1 = slow, x = tau, j = jlow)
tau_star <- tau_prop[c(jlow, jup)]
tau_curr <- tau[jlow]
lprior_num <- lprior_num + mvtnorm::dmvnorm(log(lambda_prop), log(lambda0_prop), diag(tau_prop), log = T) +
.ltau_dprior(tau_star, a_tau, b_tau, c_tau, d_tau, p_0, type)
lprior_den <- lprior_den + .ltau_dprior(tau_curr, a_tau, b_tau, c_tau, d_tau, p_0, type)
if (J == 0) {
lprior_den <- lprior_den + stats::dnorm(log(lambda), log(lambda_0), sqrt(tau), log = T)
} else {
lprior_den <- lprior_den + mvtnorm::dmvnorm(log(lambda), log(lambda_0), diag(tau), log = T)
}
} else {
lprior_num <- lprior_num + mvtnorm::dmvnorm(log(lambda_prop), log(lambda0_prop), diag(tau, J + 2, J + 2), log = T)
if (J == 0) {
lprior_den <- lprior_den + stats::dnorm(log(lambda), log(lambda_0), sqrt(tau), log = T)
} else {
lprior_den <- lprior_den + mvtnorm::dmvnorm(log(lambda), log(lambda_0), diag(tau, J + 1, J + 1), log = T)
}
}
# Proposal
lprop <- log(pi_d) - log(J + 1) - log(pi_b) + log(maxSj)
# Jacobian
ljac <- -log(U2) - log(1 - U2) - log(U3) - log(1 - U3)
if (type %in% c("uni", "mix")) {
ljac <- ljac - log(U4) - log(1 - U4)
}
# Prob
logacc <- llike_num - llike_den + lprior_num - lprior_den + lprop + ljac
if (logacc > log(stats::runif(1))) {
lambda_0 <- lambda0_prop
lambda <- lambda_prop
s <- s_prop
J <- J_prop
Sigma_s <- Sigma_s_prop
if (type %in% c("uni", "mix")) {
tau <- tau_prop
}
df_hist <- df_hist_prop
df_curr <- df_curr_prop
}
} else {
#Death move
if (J >= 2) {
j <- sample(sindx[-c(1, J + 2)], 1)
} else {
j <- 2
}
s_max <- max(s)
sj <- s[j]
slow <- s[j - 1]
sup <- s[j + 1]
s_prop <- s[-j]
J_prop <- J - 1
U2 <- stats::runif(1)
U3 <- stats::runif(1)
U4 <- stats::runif(1)
# lambda proposal
lambda0_prop <- .death_move(sjp1 = sup, sj = sj, sjm1 = slow, x = lambda_0, j = j)
lambda_prop <- .death_move(sjp1 = sup, sj = sj, sjm1 = slow, x = lambda, j = j)
# likelihood
df_hist_prop <- .dataframe_fun(Y = Y_0, I = I_0, X = X_0, s = s_prop, lambda = lambda0_prop, bp = bp_0, J = J_prop)
df_curr_prop <- .dataframe_fun(Y = Y, I = I, X = X, s = s_prop, lambda = lambda_prop, bp = bp, J = J_prop)
llike_num <- .log_likelihood(df_hist_prop, beta_0) + .log_likelihood(df_curr_prop, beta)
llike_den <- .log_likelihood(df_hist, beta_0) + .log_likelihood(df_curr, beta)
# lprior calculations
Sigma_s_prop <- .ICAR_calc(s_prop, J_prop, clam_smooth)$Sigma_s
lprior_num <- stats::dpois(J_prop, phi, log = T) +
mvtnorm::dmvnorm(log(lambda0_prop), rep(mu, J), Sigma_s_prop * sigma2, log = T) +
2 * log(s_max) + log(sup - slow)
lprior_den <- stats::dpois(J, phi, log = T) +
mvtnorm::dmvnorm(log(lambda_0), rep(mu, J + 1), Sigma_s * sigma2, log = T) +
log(sj - slow) + log(sup - sj) + log(2 * J + 1) + log(2 * J)
# Account for scalar / piecewise tau
if (type %in% c("uni", "mix")) {
tau_prop <- .death_move(sjp1 = sup, sj = sj, sjm1 = slow, x = tau, j = j)
tau_star <- tau_prop[j-1]
tau_curr <- tau[(j-1):j]
lprior_num <- lprior_num + .ltau_dprior(tau_star, a_tau, b_tau, c_tau, d_tau, p_0, type)
lprior_den <- lprior_den + mvtnorm::dmvnorm(log(lambda), log(lambda_0), diag(tau), log = T) +
.ltau_dprior(tau_curr, a_tau, b_tau, c_tau, d_tau, p_0, type)
if (J == 1) {
lprior_num<- lprior_num + stats::dnorm(log(lambda_prop), log(lambda0_prop), sqrt(tau_prop), log = T)
} else {
lprior_num <- lprior_num + mvtnorm::dmvnorm(log(lambda_prop), log(lambda0_prop), diag(tau_prop), log = T)
}
} else { #Non piecewise tau (no proposal for tau)
lprior_den <- lprior_den + mvtnorm::dmvnorm(log(lambda), log(lambda_0), diag(tau, J + 1, J + 1), log = T)
if (J == 1) {
lprior_num <- lprior_num + stats::dnorm(log(lambda_prop), log(lambda0_prop), sqrt(tau), log = T)
} else {
lprior_num <- lprior_num + mvtnorm::dmvnorm(log(lambda_prop), log(lambda0_prop), diag(tau, J, J), log = T)
}
}
# Proposal
lprop <- log(pi_b) - log(maxSj) - log(pi_d) + log(J)
# Jacobian
ljac <- log(U2) + log(1 - U2) + log(U3) + log(1 - U3)
if (type %in% c("uni", "mix")) {
ljac <- ljac + log(U4) + log(1 - U4)
}
# Acceptance ratio
logacc <- llike_num - llike_den + lprior_num - lprior_den + lprop + ljac
if (logacc > log(stats::runif(1))) {
lambda_0 <- lambda0_prop
lambda <- lambda_prop
s <- s_prop
J <- J_prop
Sigma_s <- Sigma_s_prop
if (type %in% c("uni", "mix")) {
tau <- tau_prop
}
df_hist <- df_hist_prop
df_curr <- df_curr_prop
}
}
return(list("J" = J, "s" = s, "lambda" = lambda, "lambda_0" = lambda_0,
"tau" = tau, "Sigma_s" = Sigma_s, "df_hist"= df_hist, "df_curr" = df_curr))
}
#' @title RJMCMC (without Bayesian Borrowing)
#' @description Metropolis-Hastings Green Reversible Jump move, without Bayesian
#' Borrowing
#'
#' @param df data_frame
#' @param Y_0 data
#' @param I_0 censoring indicator
#' @param X_0 design matrix
#' @param lambda_0 baseline hazard
#' @param beta_0 historical trial parameters
#' @param mu prior mean for baseline hazard
#' @param sigma2 prior variance hyperparameter for baseline hazard
#' @param s split point locations, J + 2
#' @param J number of split points
#' @param Jmax maximum number of split points
#' @param bp_0 number of covariates in historical trial
#' @param clam_smooth neighbor interactions, in range (0, 1), for ICAR update
#' @param phi J hyperparameter
#' @param pi_b probability of birth move
#'
#' @return list of proposed J and s, with adjusted values of lambda, lambda_0,
#' tau, Sigma_s, and data_frames for historical and current trial data
.J_RJMCMC_NoBorrow <- function(df, Y_0, I_0, X_0, lambda_0, beta_0, mu, sigma2,
s, J, Jmax, bp_0, clam_smooth, phi, pi_b) {
sindx <- 1:(J + 2)
Sigma_s <- .ICAR_calc(s, J, clam_smooth)$Sigma_s
#Birth or death
if (J==0) {
move <- 0
pi_b <- 1
pi_d <- 1
} else if (J == Jmax) {
move <- 2
pi_b <- 1
pi_d <- 1
} else {
move <- stats::runif(1)
pi_d <- 1 - pi_b
}
if (move < pi_b) {
s_star <-stats::runif(1, s[1], s[J + 2])
s_max <- max(s)
jlow <- max(sindx[s < s_star])
jup <- min(sindx[s > s_star])
slow <- s[jlow]
sup <- s[jup]
s_prop <- append(s, s_star , after = jlow)
J_prop <- J + 1
U2 <- stats::runif(1)
#lambda proposal
lambda0_prop <- .birth_move(U = U2, sj = sup , s_star = s_star, sjm1 = slow, x = lambda_0, j = jlow)
##Likelihood
df_prop <- .dataframe_fun(Y = Y_0, I = I_0, X = X_0, s = s_prop, lambda = lambda0_prop, bp = bp_0, J = J_prop)
llike_num <- .log_likelihood(df_prop, beta_0)
llike_den <- .log_likelihood(df, beta_0)
##Prior
Sigma_s_prop <- .ICAR_calc(s_prop, J_prop, clam_smooth)$Sigma_s
lprior_num <- mvtnorm::dmvnorm(log(lambda0_prop), rep(mu, J + 2), Sigma_s_prop * sigma2, log = T) +
log(s_star - slow) + log(sup - s_star) + log(2 * J + 3) + log(2 * J + 2) + stats::dpois(J + 1, phi, log = T)
lprior_denom <- mvtnorm::dmvnorm(log(lambda_0), rep(mu, J + 1), Sigma_s * sigma2, log = T) +
stats::dpois(J, phi, log = T) + log(sup - slow) + 2 * log(s_max)
##Proposal
lprop <- log(pi_d) - log(pi_b) - log(J + 1) + log(s_max)
##Jacobian
ljac <- -log(U2) - log(1 - U2)
#Prob
logacc <- llike_num - llike_den + lprior_num - lprior_denom + lprop + ljac
if (logacc > log(stats::runif(1))) {
lambda_0 <- lambda0_prop
s <- s_prop
J <- J_prop
Sigma_s <- Sigma_s_prop
df <- df_prop
}
} else {
if (J >= 2) {
j <- sample(sindx[-c(1, J + 2)], 1)
} else {
j <- 2
}
s_max <- max(s)
sj <- s[j]
slow <- s[j - 1]
sup <- s[j + 1]
s_prop <- s[-j]
J_prop <- J - 1
U2 <- stats::runif(1)
#lambda proposal
lambda0_prop <- .death_move(sjp1 = sup, sj = sj, sjm1 = slow, x = lambda_0, j = j)
##Likelihood
df_prop <- .dataframe_fun(Y = Y_0, I = I_0, X = X_0, s = s_prop, lambda = lambda0_prop, bp = bp_0, J = J_prop)
llike_num <- .log_likelihood(df_prop, beta_0)
llike_den <- .log_likelihood(df, beta_0)
##Prior
Sigma_s_prop <- .ICAR_calc(s_prop, J_prop, clam_smooth)$Sigma_s
lprior_num <- stats::dpois(J_prop, phi, log = T) + mvtnorm::dmvnorm(log(lambda0_prop), rep(mu, J), Sigma_s_prop * sigma2, log = T) +
2 * log(s_max) + log(sup- slow)
lprior_denom <- stats::dpois(J, phi, log = T) + mvtnorm::dmvnorm(log(lambda_0), rep(mu, J + 1), Sigma_s * sigma2, log = T) +
log(sj - slow) + log(sup - sj) + log(2 * J + 1) + log(2 * J)
##Proposal
lprop <- log(pi_b) - log(pi_d) + log(J) - log(s_max)
##Jacobian
ljac <- log(U2) + log(1 - U2)
#Prob
logacc <- llike_num - llike_den + lprior_num - lprior_denom + lprop + ljac
if (logacc > log(stats::runif(1))) {
lambda_0 <- lambda0_prop
s <- s_prop
J <- J_prop
Sigma_s <- Sigma_s_prop
df <- df_prop
}
}
return(list("J" = J, "s" = s, "lambda_0" = lambda_0, "Sigma_s" = Sigma_s, "df_all" = df))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFBHborrow/R/RJMCMC.R
|
#' @title Loglikelihood ratio calculation for beta parameters
#' @description Compute log likelihood for beta update
#'
#' @param df data.frame from dataframe_fun()
#' @param beta beta values
#' @param beta_new proposed beta values
#'
#' @return likelihood ratio
.llikelihood_ratio_beta <- function(df, beta, beta_new) {
X <- as.matrix(df[, substr(colnames(df), 1, 1) == "X"])
xdpb <- X %*% beta
xdpb_new <- X %*% beta_new
llikelihood_ratio <- sum((xdpb_new - xdpb) * df$I -
((df$Y - df$tstart) * df$lambda) *
(exp(xdpb_new) - exp(xdpb)))
return(llikelihood_ratio)
}
#' @title Beta Metropolis-Hastings Random walk move
#' @description Update beta via a Metropolis-Hastings Random Walk move
#'
#' @param df data.frame from dataframe_fun()
#' @param beta beta values
#' @param bp number of covariates
#' @param cprop_beta hyperparameter for beta proposal standard deviation
#' @param beta_count number of moves done for beta
#'
#' @return beta, either old or new move
.beta_MH_RW <- function(df, beta, bp, cprop_beta, beta_count) {
for (k in 1:bp) {
beta_new <- beta
beta_prop <- stats::rnorm(1, beta[k], cprop_beta)
beta_new[k] <- beta_prop
logacc <- .llikelihood_ratio_beta(df, beta, beta_new)
if(logacc > log(stats::runif(1))) {
beta[k] <- beta_prop
beta_count[k] <- beta_count[k] + 1
}
}
return(list("beta" = beta, "beta_count" = beta_count))
}
#' @title Mean for MALA using derivative for beta proposal
#'
#' @param df Data frame with indicators
#' @param k index for beta
#' @param beta vector of parameters
#' @param bp number of covariates
#' @param cprop_beta proposal standard dev
#'
#' @return proposal mean
.beta_mom <- function(df, k, beta, bp, cprop_beta) {
X <- as.matrix(df[, paste0("X", 1:bp)])
xdpb <- X %*% beta
x <- X[, k]
D1 <- sum(df$I * x - (df$Y - df$tstart) * df$lambda * x * exp(xdpb))
mu_prop <- beta[k] + (cprop_beta[k]**2) / 2 * D1
return(mu_prop)
}
#' @title Log density of proposal for MALA
#'
#' @param beta_prop proposal beta
#' @param mu mean of proposal distribution
#' @param cprop_beta proposal standard dev
#'
#' @return log density
.lprop_density_beta <- function(beta_prop, mu, cprop_beta) {
ldens <- (-1 / (2 * cprop_beta**2)) * (beta_prop - mu)**2
}
#' @title Proposal beta with a Metropolis Adjusted Langevin (MALA)
#'
#' @param df Data frame with indicators
#' @param beta vector of parameters
#' @param bp number of covariates
#' @param cprop_beta proposal variance standard deviation
#' @param beta_count count number of accepts
#'
#' @return updated beta vector
.beta_MH_MALA <- function(df, beta, bp, cprop_beta, beta_count) {
for(k in 1:bp){
beta_new <- beta
mu_prop <- .beta_mom(df, k, beta, bp, cprop_beta)
beta_prop <- stats::rnorm(n = 1, mean = mu_prop, sd = cprop_beta[k])
beta_new[k] <- beta_prop
mu_old <- .beta_mom(df, k, beta_new, bp, cprop_beta)
log_prop_ratio <- .lprop_density_beta(mu_prop, beta[k], cprop_beta[k]) -
.lprop_density_beta(beta_prop, mu_old, cprop_beta[k])
target_ratio <- .llikelihood_ratio_beta(df, beta, beta_new)
logacc <- target_ratio - log_prop_ratio
if(logacc > log(stats::runif(1))) {
beta[k] <- beta_prop
beta_count[k] <- beta_count[k] + 1
}
}
return(list("beta" = beta, "beta_count" = beta_count))
}
#' @title Fit frequentist piecewise exponential model for MLE and information matrix of beta
#' @description Compute MLE for PEM
#'
#' @param df Data frame with time-to-event, censoring indicator and covariates
#'
#' @return beta MLE and inverse of information matrix
.glmFit <- function(df){
lenp <- length(df[1, grepl("X", names(df))])
lab <- paste0("X", 1:lenp)
splits <- length(unique(df$tstart))
if(splits == 1){
alllab <- paste(paste(lab, collapse= "+"), "+ offset(log(Y))")
fmla <- stats::as.formula(paste("I ~", paste(paste(lab, collapse= "+"), "+ offset(log(Y))")))
}else{
df$tstart <- as.factor(df$tstart)
alllab <- paste(paste(lab, collapse= "+"), "+ offset(log(Y))")
fmla <- stats::as.formula(paste("I ~ tstart +", paste(paste(lab, collapse= "+"), "+ offset(log(Y))")))
}
#fit PWE model
fit <- stats::glm(fmla, data = df)
ss.x <- grepl("X", names(fit$coefficients))
beta.mu <- fit$coefficients[ss.x]
beta.vcov <- stats::vcov(fit)[ss.x, ss.x]
return(list("beta.mu" = beta.mu, "beta.vcov" = beta.vcov))
}
#' @title Beta MH RW sampler from freq PEM fit
#' @description Sample beta from RW sampler
#'
#' @param df Data frame with indicators
#' @param beta vector of parameters
#' @param beta_count count number of accepted proposals
#' @param cprop_beta proposal scalar
#'
#' @return beta, either old or new move
.beta.MH.RW.glm <- function(df, beta, beta_count, cprop_beta){
glm.mom <- .glmFit(df)
beta.new <- beta
cd2 <- cprop_beta**2 / length(beta)
beta.prop <- as.vector(mvtnorm::rmvnorm(1, mean = beta, sigma = cd2 * glm.mom$beta.vcov))
for (k in 1:length(beta.new)) {
beta.new[k] <- beta.prop[k]
logacc <- .llikelihood_ratio_beta(df, beta, beta.new)
if (logacc > log(stats::runif(1))) {
beta <- beta.new
beta_count[k] <- beta_count[k] + 1
}
}
return(list("beta" = beta, "beta_count" = beta_count))
}
#' @title log Gaussian proposal density for Newton Raphson proposal
#'
#' @param beta.prop beta proposal
#' @param mu_old density mean
#' @param var_old density variance
#'
#' @return log Gaussian density
.lprop.dens.beta.NR <- function(beta.prop, mu_old, var_old){
ldens <- (-1 / (2 * var_old)) * (beta.prop - mu_old)**2
return(ldens)
}
#' @title First and second derivative of target for mode and variance of proposal
#'
#' @param df Data frame with indicators
#' @param k index
#' @param beta vector of parameters
#' @param bp number of covariates
#' @param cprop_beta proposal variance standard deviation
#'
#' @return First and second derivative mode and variance
.beta_mom.NR.fun <- function(df, k, beta, bp, cprop_beta) {
bp <- length(beta)
X <- as.matrix(df[, paste0("X", 1:bp)])
xdpb <- X %*% beta
x <- X[, k]
D1 <- sum(df$I * x - (df$Y - df$tstart) * df$lambda * x * exp(xdpb))
D2 <- - sum(df$lambda * (df$Y - df$tstart) * x**2 * exp(xdpb))
mu <- beta[k] - D1 / D2
var <- -cprop_beta**2 / D2
return(list("D1" = D1, "D2" = D2, "mu" = mu, "var" = var))
}
#' @title Newton Raphson MH move
#' @description Sample beta from RW sampler
#'
#' @param df Data frame with indicators
#' @param beta vector of parameters
#' @param bp number of covariates
#' @param cprop_beta proposal scalar
#' @param beta_count count number of accepts
#'
#' @return updated beta
.beta_MH_NR <- function(df, beta, bp, cprop_beta, beta_count){
for(k in 1:bp){
beta.new <- beta
mom.prop <- .beta_mom.NR.fun(df, k, beta, bp, cprop_beta)
beta.prop <- stats::rnorm(n = 1, mean = mom.prop$mu, sd = sqrt(mom.prop$var))
beta.new[k] <- beta.prop
mom.old <- .beta_mom.NR.fun(df, k, beta.new, bp, cprop_beta)
log_prop_ratio <- .lprop.dens.beta.NR(beta[k], mom.prop$mu, mom.prop$var) - .lprop.dens.beta.NR(beta.prop, mom.old$mu, mom.old$var)
target_ratio <- .llikelihood_ratio_beta(df, beta, beta.new)
logacc <- target_ratio - log_prop_ratio
if(logacc > log(stats::runif(1))){
beta[k] <- beta.prop
beta_count[k] <- beta_count[k] + 1
}
}
return(list("beta" = beta, "beta_count" = beta_count))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFBHborrow/R/beta_updates.R
|
#' @title Propose lambda from a gamma conditional conjugate posterior proposal
#'
#' @param df data.frame from dataframe_fun()
#' @param beta parameter value for beta
#' @param j current split point
#' @param bp number of covariates
#' @param alam lambda hyperparameter, default set to 0.01
#' @param blam lambda hyperparameter, default set to 0.01
#'
#' @return list containing proposed lambda, shape and rate parameters
.lambda_conj_prop <- function(df, beta, j, bp, alam = 0.01, blam = 0.01) {
indx <- unique(df$tstart)
df_ss <- df[df$tstart == indx[j], ]
if(!is.null(beta)) {
X <- as.matrix(df_ss[, paste0("X", 1:bp)])
xdpb <- X %*% beta
rate_prop <- blam + sum((df_ss$Y - df_ss$tstart) * exp(xdpb))
}else{
rate_prop <- blam + sum((df_ss$Y - df_ss$tstart))
}
shape_prop <- alam + sum(df_ss$I)
lambda_prop <- 0
# if sum(df_ss) = 0, this causes lambda_prop --> 0, which will cause NA in logacc
while (lambda_prop == 0) {
lambda_prop <- stats:: rgamma(1, shape = shape_prop, rate = rate_prop)
}
return(list("lambda_prop" = lambda_prop, "shape_prop" = shape_prop, "rate_prop" = rate_prop))
}
#' @title Log likelihood for lambda / lambda_0 update
#'
#' @param df data.frame from dataframe_fun()
#' @param df_prop proposal data.frame
#' @param beta parameter value for beta
#'
#' @return log likelihood ratio for lambda
.llikelihood_ratio_lambda <- function(df, df_prop, beta) {
if(!is.null(beta)) {
X <- as.matrix(df[, substr(colnames(df), 1, 1) == "X"])
xdpb <- X %*% beta
llikelihood_ratio <- sum((log(df_prop$lambda) - log(df$lambda)) * df$I -
(df$Y - df$tstart) * (df_prop$lambda - df$lambda) * exp(xdpb))
}else{
llikelihood_ratio <- sum((log(df_prop$lambda) - log(df$lambda)) * df$I -
(df$Y - df$tstart) * (df_prop$lambda - df$lambda))
}
return(llikelihood_ratio)
}
#' @title Calculates nu and sigma2 for the Gaussian Markov random field prior,
#' for a given split point j
#'
#' @param j current split point
#' @param lambda_0 historical baseline hazard
#' @param mu prior mean for baseline hazard
#' @param sigma2 prior variance hyperparameter for baseline hazard
#' @param W influence from right and left neighbors
#' @param Q individual effect of neighborhood
#' @param J number of split points
#'
#' @return nu and sigma2
.nu_sigma_update <- function(j, lambda_0, mu, sigma2, W, Q, J) {
nu <- mu + (lambda_0 - rep(mu, nrow(W))) %*% W[j,]
if(J > 0) {
sigma2j <- sigma2 * Q[j,j]
}else{
sigma2j <- sigma2 * 1
}
return(list("nu" = nu, "sigma2j" = sigma2j))
}
#' @title Calculate log gamma ratio for two different parameter values
#'
#' @param x1 old parameter value
#' @param x2 proposed parameter value
#' @param shape shape parameter
#' @param rate rate parameter
#'
#' @return log gamma ratio
.lgamma_ratio <- function(x1, x2, shape, rate) {
(shape - 1) * log(x1) - rate * x1 - (shape - 1) * log(x2) + rate * x2
}
#' @title Lambda_0 MH step, proposal from conditional conjugate posterior
#'
#' @param df_hist data.frame from dataframe_fun()
#' @param Y_0 historical trial data
#' @param I_0 historical trial censoring indicator
#' @param X_0 historical trial design matrix
#' @param s split point locations, (J+2)
#' @param beta_0 parameter value for historical covariates
#' @param mu prior mean for baseline hazard
#' @param sigma2 prior variance hyperparameter for baseline hazard
#' @param lambda baseline hazard
#' @param lambda_0 historical baseline hazard
#' @param tau borrowing parameter
#' @param bp_0 number of covariates, length(beta_0)
#' @param J number of split points
#' @param clam controls neighbor interactions, in range (0, 1)
#' @param a_lam lambda hyperparameter, default is 0.01
#' @param b_lam lambda hyperparameter, default is 0.01
#' @param lambda_0_count number of total moves for lambda_0
#' @param lambda_0_move number of accepted moves for lambda_0
#'
#' @return list of updated (if accepted) lambda_0 and data.frames, as well as the
#' number of accepted moves
.lambda_0_MH_cp <- function(df_hist, Y_0, I_0, X_0 = NULL, s, beta_0 = NULL,
mu, sigma2, lambda, lambda_0, tau, bp_0 = 0, J,
clam, a_lam = 0.01, b_lam = 0.01, lambda_0_count = 0,
lambda_0_move = 0) {
ICAR <- .ICAR_calc(s, J, clam)
Sigma_s <- ICAR$Sigma_s
Q <- ICAR$Q
W <- ICAR$W
for (j in 1:(J + 1)) {
lambda_0_new <- lambda_0
lambda_0_prop_all <- .lambda_conj_prop(df_hist, beta = beta_0, j, bp = bp_0, alam = a_lam,
blam = b_lam)
lambda_0_prop <- lambda_0_prop_all$lambda_prop
lambda_0_new[j] <- lambda_0_prop
shape_prop <- lambda_0_prop_all$shape_prop
rate_prop <- lambda_0_prop_all$rate_prop
df_prop <- .dataframe_fun(Y = Y_0, I = I_0, X = X_0, s = s, lambda = lambda_0_new, bp = bp_0, J = J)
nu_sigma <-.nu_sigma_update(j, lambda_0, mu, sigma2, W, Q, J)
llikelihood_ratio <- .llikelihood_ratio_lambda(df_hist, df_prop, beta_0)
log_prop_ratio <- .lgamma_ratio(x1 = lambda_0[j], x2 = lambda_0_prop, shape = shape_prop, rate = rate_prop)
target_num <- stats::dnorm(log(lambda_0_prop), nu_sigma$nu, sqrt(nu_sigma$sigma2j),log = T) -
log(lambda_0_prop)
target_den <- stats::dnorm(log(lambda_0[j]), nu_sigma$nu, sqrt(nu_sigma$sigma2j),log = T) -
log(lambda_0[j])
if(length(tau) > 1) {
target_num <- target_num +
stats::dnorm(log(lambda[j]), log(lambda_0_prop), sqrt(tau[j]), log = T)
target_den <- target_den +
stats::dnorm(log(lambda[j]), log(lambda_0[j]), sqrt(tau[j]), log = T)
}else{
target_num <- target_num +
stats::dnorm(log(lambda[j]), log(lambda_0_prop), sqrt(tau), log = T)
target_den <- target_den +
stats::dnorm(log(lambda[j]), log(lambda_0[j]), sqrt(tau), log = T)
}
logacc <- llikelihood_ratio + target_num - target_den + log_prop_ratio
if(logacc > log(stats::runif(1))) {
lambda_0 <- lambda_0_new
df_hist <- df_prop
lambda_0_move <- lambda_0_move + 1
}
lambda_0_count <- lambda_0_count + 1
}
return(list("lambda_0" = lambda_0, "df_hist" = df_hist, "lambda_0_count" = lambda_0_count, "lambda_0_move" = lambda_0_move))
}
#' @title Lambda_0 MH step, proposal from conditional conjugate posterior
#'
#' @param df_hist data.frame from dataframe_fun()
#' @param Y_0 historical trial data
#' @param I_0 historical trial censoring indicator
#' @param X_0 historical trial design matrix
#' @param s split point locations, (J+2)
#' @param beta_0 parameter value for historical covariates
#' @param mu prior mean for baseline hazard
#' @param sigma2 prior variance hyperparameter for baseline hazard
#' @param lambda_0 baseline hazard
#' @param bp_0 number of covariates, length(beta_0)
#' @param J number of split points
#' @param clam controls neighbor interactions, in range (0, 1)
#' @param a_lam lambda hyperparameter, default is 0.01
#' @param b_lam lambda hyperparameter, default is 0.01
#' @param lambda_0_count number of total moves for lambda_0
#' @param lambda_0_move number of accepted moves for lambda_0
#'
#' @return list of updated (if accepted) lambda_0 and data.frames, as well as the
#' number of accepted moves
.lambda_0_MH_cp_NoBorrow <- function(df_hist, Y_0, I_0, X_0 = NULL, s,
beta_0 = NULL, mu, sigma2, lambda_0,
bp_0 = 0, J, clam, a_lam = 0.01,
b_lam = 0.01, lambda_0_count = 0,
lambda_0_move = 0) {
ICAR <- .ICAR_calc(s, J, clam)
Sigma_s <- ICAR$Sigma_s
Q <- ICAR$Q
W <- ICAR$W
for (j in 1:(J + 1)) {
lambda_0_new <- lambda_0
lambda_0_prop_all <- .lambda_conj_prop(df_hist, beta = beta_0, j, bp = bp_0, alam = a_lam,
blam = b_lam)
lambda_0_prop <- lambda_0_prop_all$lambda_prop
lambda_0_new[j] <- lambda_0_prop
shape_prop <- lambda_0_prop_all$shape_prop
rate_prop <- lambda_0_prop_all$rate_prop
df_prop <- .dataframe_fun(Y = Y_0, I = I_0, X = X_0, s = s, lambda = lambda_0_new, bp = bp_0, J = J)
nu_sigma <-.nu_sigma_update(j, lambda_0, mu, sigma2, W, Q, J)
llikelihood_ratio <- .llikelihood_ratio_lambda(df_hist, df_prop, beta_0)
log_prop_ratio <- stats::dgamma(lambda_0[j], shape = shape_prop, rate = rate_prop, log = T) -
stats::dgamma(lambda_0_prop, shape = shape_prop, rate = rate_prop, log = T)
target_num <- .log_likelihood(df_prop, beta_0) +
stats::dnorm(log(lambda_0_prop), nu_sigma$nu, sqrt(nu_sigma$sigma2j),log = T) -
log(lambda_0_prop)
target_den <- .log_likelihood(df_hist, beta_0) +
stats::dnorm(log(lambda_0[j]), nu_sigma$nu, sqrt(nu_sigma$sigma2j),log = T) -
log(lambda_0[j])
logacc <- target_num - target_den + log_prop_ratio
if(logacc > log(stats::runif(1))) {
lambda_0 <- lambda_0_new
df_hist <- df_prop
lambda_0_move <- lambda_0_move + 1
}
lambda_0_count <- lambda_0_count + 1
}
return(list("lambda_0" = lambda_0, "df_hist" = df_hist, "lambda_0_count" = lambda_0_count, "lambda_0_move" = lambda_0_move))
}
#' @title Lambda MH step, proposal from conditional conjugate posterior
#'
#' @param df_hist data.frame from dataframe_fun()
#' @param df_curr data.frame from dataframe_fun()
#' @param Y data
#' @param I censoring indicator
#' @param X design matrix
#' @param s split point locations, J + 2
#' @param beta parameter value for covariates
#' @param beta_0 parameter value for historical covariates
#' @param mu
#' @param mu prior mean for baseline hazard
#' @param sigma2 prior variance hyperparameter for baseline hazard
#' @param lambda baseline hazard
#' @param lambda_0 historical baseline hazard
#' @param tau borrowing parameter
#' @param bp number of covariates, length(beta)
#' @param bp_0 number of covariates, length(beta_0)
#' @param J number of split points
#' @param a_lam lambda hyperparameter
#' @param b_lam lambda hyperparameter
#' @param lambda_move number of accepted lambda moves
#' @param lambda_count total number of lambda moves
#' @param alpha power parameter
#'
#' @return list of updated (if accepted) lambda and data.frames, as well as the
#' number of accepted moves
.lambda_MH_cp <- function(df_hist, df_curr, Y, I, X, s, beta, beta_0 = NULL, mu, sigma2, lambda, lambda_0, tau,
bp, bp_0 = 0, J, a_lam = 0.01, b_lam = 0.01, lambda_move = 0,
lambda_count = 0, alpha = 0.3) {
for (j in 1:(J + 1)) {
lambda_new <- lambda
lambda_prop_cc <- .lambda_conj_prop(df_curr, beta, j, bp = bp, alam = a_lam,
blam = b_lam)
cc_shape_prop <- lambda_prop_cc$shape_prop
cc_rate_prop <- lambda_prop_cc$rate_prop
lambda_prop_hist <- .lambda_conj_prop(df_hist, beta_0, j, bp = bp_0, alam = a_lam,
blam = b_lam)
hist_shape_prop <- lambda_prop_hist$shape_prop
hist_rate_prop <- lambda_prop_hist$rate_prop
shape_prop <- a_lam + cc_shape_prop + alpha * hist_shape_prop
rate_prop <- b_lam + cc_rate_prop + alpha * hist_rate_prop
lambda_prop <- stats:: rgamma(1, shape = shape_prop, rate = rate_prop)
lambda_new[j] <- lambda_prop
df_prop <- .dataframe_fun(Y = Y, I = I, X = X, s = s, lambda = lambda_new, bp = bp, J = J)
llikelihood_ratio <- .llikelihood_ratio_lambda(df_curr, df_prop, beta)
log_prop <- .lgamma_ratio(x1 = lambda[j], x2 = lambda_prop, shape = shape_prop, rate = rate_prop)
target_num <- (- log(lambda_prop))
target_den <- (- log(lambda[j]))
# Adjust for non piecewise tau
if(length(tau) > 1) {
target_num <- target_num + stats::dnorm(log(lambda_prop), log(lambda_0[j]), sqrt(tau[j]), log = T)
target_den <- target_den + stats::dnorm(log(lambda[j]), log(lambda_0[j]), sqrt(tau[j]), log = T)
}else{
target_num <- target_num + stats::dnorm(log(lambda_prop), log(lambda_0[j]), sqrt(tau), log = T)
target_den <- target_den + stats::dnorm(log(lambda[j]), log(lambda_0[j]), sqrt(tau), log = T)
}
logacc <- llikelihood_ratio + target_num - target_den + log_prop
if(logacc > log(stats::runif(1))) {
lambda <- lambda_new
df_curr <- df_prop
lambda_move <- lambda_move + 1
}
lambda_count <- lambda_count + 1
}
return(list("lambda" = lambda, "df_curr" = df_curr, "lambda_count" = lambda_count, "lambda_move" = lambda_move))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFBHborrow/R/lambda_updates.R
|
#' @title Summarize fixed MCMC results
#'
#' @description S3 method for with borrowing. Returns summary of mean, median and given
#' percentiles for the one dimensional parameters.
#'
#' @param object MCMC sample object from BayesFBHborrow()
#' @param estimator The type of estimator to summarize, could be "fixed", "lambda",
#' "lambda_0" or "s". The default is NULL and will print a summary of the output list.
#' @param percentiles Given percentiles to output, default is c(0.025, 0.25, 0.75, 0.975)
#' @param ... other arguments, see summary.default
#'
#' @importFrom magrittr %>%
#' @importFrom stats var
#' @importFrom stats quantile
#' @import dplyr
#'
#' @return summary of the given estimator
#' @export
#'
#' @examples
#' data(piecewise_exp_cc, package = "BayesFBHborrow")
#'
#' # Set your tuning parameters
#' tuning_parameters <- list("Jmax" = 5,
#' "pi_b" = 0.5,
#' "cprop_beta" = 0.5)
#'
#' # run the MCMC sampler
#' out <- BayesFBHborrow(piecewise_exp_cc, NULL, tuning_parameters,
#' initial_values = NULL,
#' iter = 10, warmup_iter = 1)
#'
#' # Create a summary of the output
#' summary(out, estimator = "out_fixed")
summary.BayesFBHborrow <- function(object, estimator = NULL,
percentiles = c(0.025, 0.25, 0.75, 0.975), ...) {
summary <- NULL
if (is.null(estimator)) {
summary <- summary.default(object)
} else if (estimator == "out_fixed") {
## For out_fixed
mean_val <- apply(object$out_fixed, 2, mean)
variance_val <- apply(object$out_fixed, 2, var)
# Given (or default) percentiles
quantiles <- apply(object$out_fixed, 2, quantile, probs = percentiles)
# Output structure
summary <- tibble::tibble(
id = names(object$out_fixed),
Mean = mean_val,
sd = sqrt(variance_val)
)
summary <-
summary %>%
dplyr::bind_cols(tibble::as_tibble(t(quantiles)))
} else if (estimator == "lambda") {
if (exists("lambda_0", object)) {
lambda <- cbind(object$lambda, object$lambda_0)
lam_names <- paste0("lam_X", 1:ncol(object$lambda))
lam0_names <- paste0("lam0_X", 1:ncol(object$lambda))
names(lambda) <- c(lam_names, lam0_names)
} else {lambda <- object$lambda}
mean_val <- apply(lambda, 2, mean, na.rm = TRUE)
variance_val <- apply(lambda, 2, var, na.rm = TRUE)
N_samples <- colSums(!is.na(lambda))
quantiles <- apply(lambda, 2, stats::quantile, probs = percentiles, na.rm = TRUE)
summary <- tibble::tibble(
id = names(mean_val),
samples = N_samples,
mean = mean_val,
sd = sqrt(variance_val)
)
summary <-
summary %>%
bind_cols(tibble::as_tibble(t(quantiles)))
} else if (estimator == "s") {
table_J <- table(object$out_fixed$J)
most_frequent <- as.numeric(names(table_J[which.max(table_J)]))
cat(paste0("Most frequent number of split points: ", most_frequent, "\n"))
mean_val <- apply(object$s, 2, mean, na.rm = TRUE)
variance_val <- apply(object$s, 2, stats::var, na.rm = TRUE)
# Given (or default) percentiles
N_samples <- colSums(!is.na(object$s))
quantiles <- apply(object$s, 2, stats::quantile, probs = percentiles, na.rm = TRUE)
summary <- tibble::tibble(
id = names(mean_val),
samples = N_samples,
mean = mean_val,
sd = sqrt(variance_val)
)
summary <-
summary %>%
dplyr::bind_cols(tibble::as_tibble(t(quantiles)))
} else {stop("Type not recognized")}
return(summary)
}
#' @title Extract mean posterior values
#'
#' @description S3 method for class "BayesFBHborrow", returns the mean posterior values
#' for the fixed parameters
#'
#' @param object MCMC sample object from BayesFBHborrow()
#' @param ... other arguments, see coef.default()
#'
#' @return mean values of given samples
#' @export
#'
#' @examples
#' data(weibull_cc, package = "BayesFBHborrow")
#'
#' # Set your tuning parameters
#' tuning_parameters <- list("Jmax" = 5,
#' "pi_b" = 0.5,
#' "cprop_beta" = 0.5)
#'
#' # run the MCMC sampler
#' out <- BayesFBHborrow(weibull_cc, NULL, tuning_parameters,
#' initial_values = NULL,
#' iter = 10, warmup_iter = 1)
#'
#' # Plot the posterior mean values of the fixed parameters
#' coef(out)
coef.BayesFBHborrow <- function(object, ...) {
return(apply(object$out_fixed, 2, mean))
}
#' @title Plot smoothed baseline hazards
#'
#' @description Plot mean and given quantiles of a matrix. Can also be used to
#' plot derivatives of the baseline hazard, such as estimated cumulative hazard
#' and survival function.
#'
#' @param x_lim time grid
#' @param y samples
#' @param percentiles percentiles to include in plot, default is c(0.025, 0.975)
#' @param title optional, add title to plot
#' @param xlab optional, add xlabel
#' @param ylab optional, add ylabel
#' @param color color of the mid line, default is blue
#' @param fill color of the percentiles, default is blue
#' @param linewidth thickness of the plotted line, default is 1
#' @param alpha opacity of the percentiles, default is 0.2
#'
#' @import ggplot2
#'
#' @return a ggplot2 object
#' @export
#'
#' @examples
#' data(weibull_cc, package = "BayesFBHborrow")
#'
#' # Set your tuning parameters
#' tuning_parameters <- list("Jmax" = 5,
#' "pi_b" = 0.5,
#' "cprop_beta" = 0.5)
#'
#' # run the MCMC sampler
#' out <- BayesFBHborrow(weibull_cc, NULL, tuning_parameters,
#' initial_values = NULL,
#' iter = 10, warmup_iter = 1)
#'
#' # Visualize the smoothed baseline hazard
#' time_grid <- seq(0, max(weibull_cc$tte), length.out = 2000)
#' gg <- plot_matrix(time_grid, out$out_slam,
#' title = "Example plot of smoothed baseline hazard",
#' xlab = "time", ylab = "baseline hazard")
plot_matrix <- function(x_lim, y, percentiles = c(0.05, 0.95), title = "",
xlab = "", ylab = "", color = "blue", fill = "blue",
linewidth = 1, alpha = 0.2) {
mean_values <- apply(y, 2, mean)
ql_values <- apply(y, 2, stats::quantile, probs = percentiles[1])
qu_values <- apply(y, 2, stats::quantile, probs = percentiles[2])
# data frame
plot_data <- data.frame(
time_grid = x_lim,
mean = mean_values,
ql = ql_values,
qu = qu_values
)
# Create the plot of survival
gg <- ggplot(plot_data, aes_string(x = "time_grid")) +
geom_line(aes_string(y = "mean"), color = color, linewidth = linewidth) +
geom_ribbon(aes_string(ymin = "ql", ymax = "qu"), fill = fill, alpha = alpha) +
labs(x = xlab, y = ylab,
title = title) +
theme_minimal()
return(gg)
}
#' @title Plot MCMC trace
#'
#' @description Creates a trace plot of given MCMC samples.
#'
#' @param x_lim x-axis of the plot
#' @param samples samples from MCMC
#' @param title optional, add title to plot
#' @param xlab optional, add xlabel
#' @param ylab optional, add ylabel
#' @param color color of the mid line, default is black
#' @param linewidth thickness of the plotted line, default is 1
#'
#' @import ggplot2
#'
#' @return a ggplot2 object
#' @export
#'
#' @examples
#' data(weibull_cc, package = "BayesFBHborrow")
#'
#' # Set your tuning parameters
#' tuning_parameters <- list("Jmax" = 5,
#' "pi_b" = 0.5,
#' "cprop_beta" = 0.5)
#'
#' # run the MCMC sampler
#' out <- BayesFBHborrow(weibull_cc, NULL, tuning_parameters,
#' initial_values = NULL,
#' iter = 10, warmup_iter = 1)
#'
#' # Create a tarce plot of the treatment effect, beta_1
#' time_grid <- seq(0, max(weibull_cc$tte), length.out = 2000)
#' gg <- plot_trace(1:10, out$out_fixed$beta_1,
#' title = "Example trace plot",
#' xlab = "iterations", ylab = "beta_1 (treatment effect)")
plot_trace <- function(x_lim, samples, title = "", xlab = "", ylab = "",
color = "black", linewidth = 1) {
trace_data <- data.frame(iterations = x_lim, samples = samples)
gg <- ggplot(trace_data, aes_string(x = "iterations", y = "samples")) +
geom_line(color = color, linewidth = linewidth) +
labs(x = xlab, y = ylab, title = title) +
theme_minimal()
return(gg)
}
#' @title Plot histogram from MCMC samples
#'
#' @description Plots a histogram of the given discrete MCMC samples
#'
#' @param samples data.frame containing the discrete MCMC samples
#' @param title title of the plot, default is none
#' @param xlab x-label of the plot, default is "Values"
#' @param ylab y-label of the plot, default is "Frequency"
#' @param color outline color for the bars, default is "black"
#' @param fill fill color, default is "blue"
#' @param binwidth width of the histogram bins, default is 0.5
#' @param scale_x option to scale the x-axis, suitable for discrete samples,
#' default is FALSE
#'
#' @import ggplot2
#'
#' @return a ggplot2 object
#' @export
#'
#' @examples
#' data(weibull_cc, package = "BayesFBHborrow")
#'
#' # Set your tuning parameters
#' tuning_parameters <- list("Jmax" = 5,
#' "pi_b" = 0.5,
#' "cprop_beta" = 0.5)
#'
#' # run the MCMC sampler
#' out <- BayesFBHborrow(weibull_cc, NULL, tuning_parameters,
#' initial_values = NULL,
#' iter = 10, warmup_iter = 1)
#'
#' # Plot the frequency of the number of split points, J with a histogram
#' time_grid <- seq(0, max(weibull_cc$tte), length.out = 2000)
#' gg <- plot_hist(out$out_fixed$J, title = "Example histogram of J",
#' scale_x = TRUE)
plot_hist <- function(samples, title = "", xlab = "Values", ylab = "Frequency",
color = "black", fill = "blue", binwidth = 0.05,
scale_x = FALSE) {
if (scale_x == TRUE) {
gg <- ggplot(data.frame(values = samples), aes_string(x = "values")) +
geom_histogram(binwidth = binwidth, fill = fill, color = color) +
labs(x = xlab, y = ylab, title = title) +
scale_x_continuous(breaks = seq(min(samples), max(samples), by = 1)) +
theme_minimal()
} else {
gg <- ggplot(data.frame(values = samples), aes_string(x = "values")) +
geom_histogram(binwidth = binwidth, fill = fill, color = color) +
labs(x = xlab, y = ylab, title = title) +
theme_minimal()
}
return(gg)
}
#' @title Plot the MCMC results
#'
#' @description S3 object which produces different plots depending on the
#' "type" variable
#'
#' @param x object of class "BayesFBHborrow" to be visualized
#' @param x_lim x-axis to be used for plot
#' @param estimator which estimate to be visualized
#' @param type The type of plot to be produced,
#' "trace" will produce a trace plot of the "fixed" parameters,
#' "hist" will give a histogram for the "fixed" parameters,
#' and "matrix" will plot the mean and quantiles of a given sample.
#' @param ... other plotting arguments, see plot_trace(), plot_hist(), plot_matrix()
#' for more information
#'
#' @importFrom graphics plot.default
#'
#' @return ggplot2 object
#'
#' @export
#'
#' @examples
#' data(weibull_cc, package = "BayesFBHborrow")
#'
#' # Set your tuning parameters
#' tuning_parameters <- list("Jmax" = 5,
#' "pi_b" = 0.5,
#' "cprop_beta" = 0.5)
#'
#' # run the MCMC sampler
#' out <- BayesFBHborrow(weibull_cc, NULL, tuning_parameters,
#' initial_values = NULL,
#' iter = 10, warmup_iter = 1)
#'
#' # Now let's create a variety of plots
#'
#' # Staring with a histogram of beta_1 (treatment effect)
#' gg_hist <- plot(out, NULL, estimator = "beta_1", type = "hist",
#' title = "Example histogram of beta_1")
#'
#' # And an accompanied trace plot of the same parameter
#' gg_trace <- plot(out, 1:10, estimator = "beta_1", type = "trace",
#' title = "Example trace plot", xlab = "iterations",
#' ylab = "beta_1 (treatment effect)")
#'
#' # Lastly. visualize the smoothed baseline hazard
#' time_grid <- seq(0, max(weibull_cc$tte), length.out = 2000)
#' gg_matrix <- plot(out, time_grid, estimator = "out_slam", type = "matrix",
#' title = "Example plot of smoothed baseline hazard",
#' xlab = "time", ylab = "baseline hazard")
plot.BayesFBHborrow <- function(x, x_lim, estimator = NULL, type = NULL, ...) {
if (is.null(type)) {
message("Please specify type of plot for BayesFBHborrow class object, calling default")
gg_object <- plot.default(x_lim, x, ...)
} else if (type == "trace") {
samples = x[[1]]
gg_object <- plot_trace(x_lim = x_lim, samples = samples[[estimator]], ...)
} else if (type == "hist") {
samples = x[[1]]
gg_object <- plot_hist(samples = samples[[estimator]], ...)
} else if (type == "matrix") {
gg_object <- plot_matrix(x_lim = x_lim, y = x[[as.symbol(estimator)]], ...)
} else {stop("'type' not recognized")}
return(gg_object)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFBHborrow/R/outputs.R
|
#' Example data, simulated from a piecewise exponential model.
#'
#' Data is simulated for a concurrent trial with three columns named "tte"
#' (time-to-event), "event" (event indicator), and "X_trt" (treatment indicator).
#' It was simulated using the following parameters:
#'
#' @docType data
#'
#' @usage data(piecewise_exp_cc)
#'
#' @keywords datasets
#'
#' @examples
#' data(piecewise_exp_cc)
#' survival_model <- survival::survfit(survival::Surv(tte, event) ~ X_trt, data = piecewise_exp_cc)
#' line_colors <- c("blue", "red") # Adjust colors as needed
#' line_types <- 1:length(unique(piecewise_exp_cc$X_trt))
#' plot(survival_model, col = line_colors, lty = line_types,
#' xlab = "Time (tte)", ylab = "Survival Probability",
#' main = "Kaplan-Meier Survival Curves by Treatment")
"piecewise_exp_cc"
|
/scratch/gouwar.j/cran-all/cranData/BayesFBHborrow/R/piecewise_exp_cc-data.R
|
#' Example data, simulated from a piecewise exponential model.
#'
#' Data is simulated for a historical trial with two columns named "tte"
#' (time-to-event) and "event" (event indicator).
#' It was simulated using the following parameters:
#'
#' @docType data
#'
#' @usage data(piecewise_exp_hist)
#'
#' @keywords datasets
#'
#' @examples
#' data(piecewise_exp_cc)
#' data(piecewise_exp_hist)
#' piecewise_exp_hist$X_trt <- 0
#' survival_model <- survival::survfit(survival::Surv(tte, event) ~ X_trt,
#' data = rbind(piecewise_exp_cc,
#' piecewise_exp_hist))
#' line_colors <- c("blue", "red", "green") # Adjust colors as needed
#' line_types <- 1:length(unique(piecewise_exp_cc$X_trt))
#' plot(survival_model, col = line_colors, lty = line_types,
#' xlab = "Time (tte)", ylab = "Survival Probability",
#' main = "Kaplan-Meier Survival Curves by Treatment")
"piecewise_exp_hist"
|
/scratch/gouwar.j/cran-all/cranData/BayesFBHborrow/R/piecewise_exp_hist-data.R
|
#' @title Calculate sigma2 posterior update
#'
#' @param mu mean.
#' @param lambda_0 Baseline hazard.
#' @param Sigma_s VCV matrix (j + 1) x (j + 1).
#' @param J Number of split point.
#' @param a_sigma Hyperparameter a.
#' @param b_sigma Hyperparameter b.
#'
#' @return sigma2 draw from IG
.sigma2_update <- function(mu, lambda_0, Sigma_s, J, a_sigma, b_sigma) {
a_post <- a_sigma + (J + 1) / 2
one <- rep(1, J + 1)
cp <- t(mu * one - log(lambda_0)) %*% solve(Sigma_s) %*% (mu * one - log(lambda_0))
b_post <- b_sigma + cp / 2
sigma2 <- invgamma::rinvgamma(1, shape = a_post, rate = b_post)
}
#' @title Calculate mu posterior update
#'
#' @param Sigma_s VCV matrix (j + 1) x (j + 1).
#' @param lambda_0 Baseline hazard.
#' @param sigma2 Scale variance.
#' @param J Number of split point.
#'
#' @return mu update from Normal.
.mu_update <- function(Sigma_s, lambda_0, sigma2, J) {
one <- rep(1, J + 1)
mu_num <- (t(one) %*% solve(Sigma_s) %*% log(lambda_0))
mu_den <- t(one) %*% solve(Sigma_s) %*% one
mu_mu <- mu_num / mu_den
mu_var <- sigma2 / mu_den
mu <- stats::rnorm(1, mean = mu_mu, sd = sqrt(mu_var))
}
#' @title Calculate covariance matrix in the MVN-ICAR
#'
#' @param s split points, J + 2
#' @param J number of split points
#' @param clam controls neighbor interactions, in range (0, 1)
#'
#' @return Sigma_s = (I - W)^(-1) * Q, W, Q
.ICAR_calc <- function(s, J, clam) {
W <- matrix(rep(0,(J + 1) * (J + 1)), nrow = J + 1)
Q <- matrix(rep(0,(J + 1) * (J + 1)), nrow = J + 1)
interval_length <- diff(s[!(is.na(s))])
if (J < 2) {
if (J == 1) {
W[1, 2] <- clam * (interval_length[1] + interval_length[2]) / (2 * interval_length[1] + interval_length[2])
W[2, 1] <- clam * (interval_length[2]+ interval_length[1]) / (interval_length[1] + 2 * interval_length[2])
Q[1, 1] <- 2 / (2 * interval_length[1] + interval_length[2])
Q[2, 2] <- 2 / (interval_length[1] + 2 * interval_length[2])
Sigma_s <- solve(diag(J + 1) - W) %*% Q
} else {
Sigma_s <- as.matrix(1)
}
} else {
for (j in 2:J) {
W[j, j + 1] <- clam * (interval_length[j] + interval_length[j + 1]) / (interval_length[j-1] + 2 * interval_length[j] + interval_length[j + 1])
W[j, j-1] <- clam * (interval_length[j] + interval_length[j-1]) / (interval_length[j-1] + 2 * interval_length[j] + interval_length[j + 1])
Q[j, j] <- 2 / (interval_length[j-1] + 2 * interval_length[j] + interval_length[j + 1])
}
Q[j + 1, j + 1] <- 2 / (interval_length[J] + 2 * interval_length[j + 1])
Q[1, 1] <- 2 / (2 * interval_length[1] + interval_length[2])
W[1, 2] <- clam * (interval_length[1] + interval_length[2]) / (2 * interval_length[1] + interval_length[2])
W[j + 1, J] <- clam * (interval_length[j + 1] + interval_length[J]) / (interval_length[J]+ 2 * interval_length[j + 1])
Sigma_s <- solve(diag(j + 1) - W) %*% Q
}
return(list("Sigma_s" = Sigma_s, "W" = W, "Q" = Q))
}
#' @title Sample tau from posterior distribution
#'
#' @param lambda_0 historical baseline hazard
#' @param lambda baseline hazard
#' @param J number of split points
#' @param s split point locations, J + 2
#' @param a_tau Inverse Gamma hyperparameter
#' @param b_tau Inverse Gamma hyperparameter
#' @param c_tau Inverse Gamma hyperparameter
#' @param d_tau Inverse Gamma hyperparameter
#' @param p_0 mixture ratio
#' @param type choice of borrowing, "mix", "uni", or any other string for
#' borrowing on every baseline hazard without mixture
#'
#' @return list containing tau and new mixture ratio
.tau_update <- function(lambda_0, lambda, J, s,
a_tau, b_tau, c_tau = NULL, d_tau = NULL,
p_0 = NULL, type) {
sq_diff <- (log(lambda_0) - log(lambda))**2
if (type == "mix") {
#compute prob on the log scale
lw_0_num <- a_tau * log(b_tau) + lgamma(a_tau + 0.5)
lw_0_den <- (0.5 + a_tau) * log((sq_diff / 2) + b_tau) + lgamma(a_tau)
lw_1_num <- c_tau * log(d_tau) + lgamma(c_tau + 0.5)
lw_1_den <- (0.5 + c_tau) * log((sq_diff / 2) + d_tau) + lgamma(c_tau)
p_0_new <- log(p_0) + lw_0_num - lw_0_den
p_1_new <- log(1 - p_0) + lw_1_num - lw_1_den
probability_mat <- cbind(p_0_new, p_1_new)
# normalize with log sum exp trick - avoid overflow
p_new <- apply(probability_mat, 1, .normalize_prob)
# sample mixture
comp <- apply(p_new, 2, sample, x = 1:2, size = 1, replace = F)
# hyperparameters
ac <- matrix(rep(c(0.5 + a_tau, 0.5 + c_tau), J + 1), nrow = J + 1, byrow = T)
bd <- cbind(sq_diff / 2 + b_tau, sq_diff / 2 + d_tau)
call <- cbind(1:(J + 1), comp)
tau <- invgamma::rinvgamma(n = J + 1, shape = ac[call], rate = bd[call])
}else if (type == "uni") {
shape_tau <- 0.5 + a_tau
rate_tau <- sq_diff / 2 + b_tau
# placeholder
p_new <- 1
tau <- invgamma::rinvgamma(n = J + 1, shape = shape_tau, rate = rate_tau)
} else {
sq_diff_all <- sum(sq_diff)
# compute prob on the log scale
lw_0_num <- a_tau * log(b_tau) + lgamma(a_tau + (J + 1) / 2)
lw_0_den <- ((J + 1) / 2 + a_tau) * log((sq_diff_all / 2) + b_tau) + lgamma(a_tau)
lw_1_num <- c_tau * log(d_tau) + lgamma(c_tau + (J + 1) / 2)
lw_1_den <- ((J + 1) / 2 + c_tau) * log((sq_diff_all / 2) + d_tau) + lgamma(c_tau)
p_0_new <- log(p_0) + lw_0_num - lw_0_den
p_1_new <- log(1 - p_0) + lw_1_num - lw_1_den
probability_mat <- cbind(p_0_new, p_1_new)
# normalize with log sum exp trick - avoid overflow
p_new <- apply(probability_mat, 1, .normalize_prob)
# sample mixture
mix <- sample(x = 1:2, size = 1, replace = F, prob = p_new)
# hyperparameters
ac <- c((J + 1) / 2 + a_tau, (J + 1) / 2 + c_tau)
bd <- c(sq_diff_all / 2 + b_tau, sq_diff_all / 2 + d_tau)
tau <- invgamma::rinvgamma(n = 1, shape = ac[mix], rate = bd[mix])
}
return(list("tau" = tau, "p_new" = p_new))
}
#' @title Metropolis Hastings step: shuffle the split point locations (with
#' Bayesian borrowing)
#'
#' @param df_hist dataframe containing historical trial data and parmaeters
#' @param df_curr data.frame containing current trial data and parameters
#' @param Y_0 historical trial data
#' @param I_0 historical trial censoring indicator
#' @param X_0 historical trial design matrix
#' @param lambda_0 historical baseline hazard
#' @param beta_0 historical parameter vector
#' @param Y data
#' @param I censoring indicator
#' @param X design matrix
#' @param lambda baseline hazard
#' @param beta parameter vector
#' @param s split point locations, J + 2
#' @param J number of split points
#' @param bp number of covariates in current trial
#' @param bp_0 number of covariates in historical trial
#' @param clam_smooth neighbor interactions, in range (0, 1), for ICAR update
#' @param maxSj the smallest of the maximal time points, min(max(Y), max(Y_0))
#'
#' @return list containing new split points, updated Sigma_s and data.frames
#' for historic and current trial data
.shuffle_split_point_location <- function(df_hist, df_curr, Y_0, I_0, X_0, lambda_0,
beta_0, Y, I, X, lambda, beta, s, J, bp_0,
bp, clam_smooth, maxSj) {
Sigma_s <- .ICAR_calc(s, J, clam_smooth)$Sigma_s
#star individual proposals
#prop vector of proposals and current
for (j in 1:J) {
if (j == J) {
s_star <- stats::runif(1, min = s[j], max = maxSj)
} else {
s_star <- stats::runif(1, min = s[j], max = s[j + 2])
}
s_prop <- s
s_prop[j + 1] <- s_star
# Create new data.frames for s_prop
df_hist_prop <- .dataframe_fun(Y = Y_0, I = I_0, X = X_0, s = s_prop, lambda = lambda_0, bp = bp_0, J = J)
df_curr_prop <- .dataframe_fun(Y = Y, I = I, X = X, s = s_prop, lambda = lambda, bp = bp, J = J)
# Update ICAR
Sigma_s_prop <- .ICAR_calc(s_prop, J, clam_smooth)$Sigma_s
# Probability of accepting
if (j == J) {
lprior_num <- log(maxSj - s_star) + log(s_star - s[j])
lprior_den <- log(maxSj - s[j + 1]) + log(s[j + 1] - s[j])
} else {
lprior_num <- log(s[j + 2] - s_star) + log(s_star - s[j])
lprior_den <- log(s[j + 2] - s[j + 1]) + log(s[j + 1] - s[j])
}
llike_num <- .log_likelihood(df_hist_prop, beta_0) + .log_likelihood(df_curr_prop, beta)
llike_den <- .log_likelihood(df_hist, beta_0) + .log_likelihood(df_curr, beta)
# Acceptance ratio
logacc <- llike_num - llike_den + lprior_num - lprior_den
if (logacc > log(stats::runif(1))) {
Sigma_s <- Sigma_s_prop
df_hist <- df_hist_prop
df_curr <- df_curr_prop
s <- s_prop
}
}
return(list("s" = s, "Sigma_s" = Sigma_s, "df_hist" = df_hist, "df_curr" = df_curr))
}
#' @title Metropolis Hastings step: shuffle the split point locations (without
#' Bayesian borrowing)
#'
#' @param df dataframe containing trial data and parameters
#' @param Y_0 data
#' @param I_0 censoring indicator
#' @param X_0 design matrix
#' @param lambda_0 baseline hazard
#' @param beta_0 parameter vector
#' @param s split point locations, J + 2
#' @param J number of split points
#' @param bp_0 number of covariates in historical trial
#' @param clam_smooth neighbor interactions, in range (0, 1), for ICAR update
#'
#' @return list containing new split points, updated Sigma_s and data.frames
#' for historic and current trial data
.shuffle_split_point_location_NoBorrow <- function(df, Y_0, I_0, X_0,
lambda_0, beta_0, s, J,
bp_0, clam_smooth) {
Sigma_s <- .ICAR_calc(s, J, clam_smooth)$Sigma_s
#star individual proposals
#prop vector of proposals and current
for (j in 1:J) {
s_star <- stats::runif(1, min = s[j], max = s[j + 2])
s_prop <- s
s_prop[j + 1] <- s_star
##like
df_prop <- .dataframe_fun(Y = Y_0, I = I_0, X = X_0, s = s_prop, lambda = lambda_0, bp = bp_0, J = J)
#ICAR
Sigma_s_prop <- .ICAR_calc(s_prop, J, clam_smooth)$Sigma_s
#Prob of accepting
lprior_num <- log(s[j + 2] - s_star) + log(s_star - s[j])
lprior_denom <- log(s[j + 2] - s[j + 1]) + log(s[j + 1] - s[j])
llike_num <- .log_likelihood(df_prop, beta_0)
llike_den <- .log_likelihood(df, beta_0)
#Prob
logacc <- llike_num - llike_den + lprior_num - lprior_denom
if (logacc > log(stats::runif(1))) {
Sigma_s <- Sigma_s_prop
df <- df_prop
s <- s_prop
}
}
return(list("s" = s, "Sigma_s" = Sigma_s, "df_all" = df))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFBHborrow/R/posterior_updates.R
|
#' @title BayesFBHborrow: Run MCMC for a piecewise exponential model
#'
#' @description Main function of the BayesFBHborrow package. This generic function
#' calls the correct MCMC sampler for time-to-event Bayesian borrowing.
#'
#' @param data data.frame containing atleast three vectors of "tte" (time-to-event)
#' and "event" (censoring), and covariates "X_i" (where i should be a number/
#' indicator of the covariate)
#' @param data_hist data.frame containing atleast three vectors of "tte"
#' (time-to-event) and "event" (censoring), with the option of adding covariates
#' named "X_0_i" (where i should be a number/
#' indicator of the covariate), for historical
#' data
#' @param tuning_parameters list of "cprop_beta", "cprop_beta_0", "alpha", "Jmax",
#' and "pi_b"
#' @param initial_values list containing the initial values of c("J", "s_r",
#' "mu", "sigma2", "tau", "lambda_0", "lambda", "beta_0", "beta") (optional)
#' @param hyperparameters list containing the hyperparameters c("a_tau", "b_tau",
#' "c_tau", "d_tau","type", "p_0", "a_sigma", "b_sigma", "Jmax", "clam_smooth",
#' "cprop_beta", "phi", "pi_b"). Default is list("a_tau" = 1,"b_tau" = 1,"c_tau" = 1,
#' "d_tau" = 0.001, "type" = "mix", "p_0" = 0.5, "a_sigma" = 2, "b_sigma" = 2,
#' "Jmax" = 20, "clam_smooth" = 0.8, "cprop_beta" = 0.3, "phi" = 3, "pi_b" = 0.5)
#' @param lambda_hyperparameters contains two hyperparameters (a_lambda and b_lambda)
#' used for the update of lambda and lambda_0. Default is c(0.01, 0.01)
#' @param lambda_hyperparameters contains two (three) hyperparameters (a, b (,alpha))
#' used for the update of lambda and lambda_0. alpha is the power parameter when
#' sampling for lambda (effects how much is borrowed)
#' @param iter number of iterations for MCMC sampler
#' @param warmup_iter number of warmup iterations (burn-in) for MCMC sampler.
#' @param refresh number of iterations between printed screen updates
#' @param verbose TRUE (default), choice of output, if TRUE will output
#' intermittent results into console
#' @param max_grid grids size for the smoothed baseline hazard
#'
#' @export
#'
#' @return list of samples for both fixed (can be found in $out_fixed) and
#' multidimensional parameters (lambda, lambda_0, s, tau)
#'
#' @examples
#' set.seed(123)
#' # Load the example data and write your initial values and hyper parameters
#' data(piecewise_exp_cc, package = "BayesFBHborrow")
#' data(piecewise_exp_hist, package = "BayesFBHborrow")
#'
#' # Set your hyperparameters and tuning parameters
#' hyper <- list("a_tau" = 1,
#' "b_tau" = 0.001,
#' "c_tau" = 1,
#' "d_tau" = 1,
#' "type" = "all",
#' "p_0" = 0.5,
#' "a_sigma" = 2,
#' "b_sigma" = 2,
#' "clam_smooth" = 0.5,
#' "phi" = 3)
#'
#' tuning_parameters <- list("Jmax" = 5,
#' "pi_b" = 0.5,
#' "cprop_beta" = 0.5,
#' "alpha" = 0.4)
#'
#' # Set initial values to default
#' out <- BayesFBHborrow(piecewise_exp_cc, piecewise_exp_hist, tuning_parameters,
#' initial_values = NULL, hyper, iter = 5, warmup_iter = 1)
#'
#' # Create a summary of the output
#' # summary(out, estimator = "out_fixed")
#'
#' # Plot some of the estimates
#' # Do beta (trace), s (hist) and lambda (matrix)
#' trace <- plot(out, 1:5, estimator = "beta_1", type = "trace")
#' hist <- plot(out, estimator = "J", type = "hist")
#' smoothed_baseline_hazard <- plot(out, 1:2000, estimator = "out_slam",
#' type = "matrix")
BayesFBHborrow <- function(data, data_hist = NULL, tuning_parameters,
initial_values, hyperparameters,
lambda_hyperparameters, iter, warmup_iter, refresh,
verbose, max_grid) {
checkmate::assert_data_frame(data)
data_hist = data_hist
if (is.null(data_hist)) {
class(data) <- c("data.frame", "NoBorrow")
} else {
class(data) <- c("data.frame", "WBorrow")
}
UseMethod("BayesFBHborrow", as.data.frame(data))
}
#' @title Run the MCMC sampler with Bayesian Borrowing
#'
#' @description Main function of the BayesFBHborrow package. This generic function
#' calls the correct MCMC sampler for time-to-event Bayesian borrowing.
#'
#' @param data data.frame containing atleast three vectors called "tte"
#' (time-to-event), "event" (censoring), and covariates "X_i" (where i should be a number/
#' indicator of the covariate)
#' @param data_hist data.frame containing atleast two vectors called "tte"
#' (time-to-event) and "event" (censoring), with the option of adding covariates
#' named "X_0_i" (where i should be a number/
#' indicator of the covariate), for historical data
#' @param tuning_parameters list of "cprop_beta", "cprop_beta_0", "alpha", "Jmax",
#' and "pi_b"
#' @param initial_values list containing the initial values of c("J", "s_r",
#' "mu", "sigma2", "tau", "lambda_0", "lambda", "beta_0", "beta") (optional)
#' @param hyperparameters list containing the hyperparameters c("a_tau", "b_tau",
#' "c_tau", "d_tau","type", "p_0", "a_sigma", "b_sigma", "Jmax", "clam_smooth",
#' "cprop_beta", "phi", "pi_b"). Default is list("a_tau" = 1,"b_tau" = 1,"c_tau" = 1,
#' "d_tau" = 0.001, "type" = "mix", "p_0" = 0.5, "a_sigma" = 2, "b_sigma" = 2,
#' "Jmax" = 20, "clam_smooth" = 0.8, "cprop_beta" = 0.3, "phi" = 3, "pi_b" = 0.5)
#' @param lambda_hyperparameters contains two hyperparameters (a_lambda and b_lambda)
#' used for the update of lambda and lambda_0. Default is c(0.01, 0.01)
#' @param iter number of iterations for MCMC sampler. Default is 2000
#' @param warmup_iter number of warmup iterations (burn-in) for MCMC sampler.
#' Default is 2000
#' @param refresh number of iterations between printed console updates. Default
#' is 0
#' @param verbose TRUE (default), choice of output, if TRUE will output
#' intermittent results into console
#' @param max_grid grid size for the smoothed baseline hazard. Default is 2000
#'
#' @return list of samples for both fixed (can be found in $out_fixed) and
#' multidimensional parameters (lambda, lambda_0, s, tau)
#' @export
#'
#' @examples
#' set.seed(123)
#' # Load the example data and write your initial values and hyper parameters
#' data(piecewise_exp_cc, package = "BayesFBHborrow")
#' data(piecewise_exp_hist, package = "BayesFBHborrow")
#'
#' # Set your hyperparameters and tuning parameters
#' hyper <- list("a_tau" = 1,
#' "b_tau" = 0.001,
#' "c_tau" = 1,
#' "d_tau" = 1,
#' "type" = "all",
#' "p_0" = 0.5,
#' "a_sigma" = 2,
#' "b_sigma" = 2,
#' "clam_smooth" = 0.5,
#' "phi" = 3)
#'
#' tuning_parameters <- list("Jmax" = 5,
#' "pi_b" = 0.5,
#' "cprop_beta" = 0.5,
#' "alpha" = 0.4)
#'
#' # Set initial values to default
#' out <- BayesFBHborrow(piecewise_exp_cc, piecewise_exp_hist, tuning_parameters,
#' initial_values = NULL, hyper, iter = 5, warmup_iter = 1)
#'
#' # Create a summary of the output
#' # summary(out, estimator = "out_fixed")
#'
#' # Plot some of the estimates
#' # Do beta (trace), s (hist) and lambda (matrix)
#' trace <- plot(out, 1:5, estimator = "beta_1", type = "trace")
#' hist <- plot(out, estimator = "J", type = "hist")
#' smoothed_baseline_hazard <- plot(out, 1:2000, estimator = "out_slam",
#' type = "matrix")
BayesFBHborrow.WBorrow <- function(data, data_hist,
tuning_parameters,
initial_values = NULL,
hyperparameters = list(
"a_tau" = 1,
"b_tau" = 0.001,
"c_tau" = 1,
"d_tau" = 1,
"type" = "mix",
"p_0" = 0.8,
"a_sigma" = 1,
"b_sigma" = 1,
"phi" = 3,
"clam_smooth" = 0.8),
lambda_hyperparameters = list(
"a_lambda" = 0.01,
"b_lambda" = 0.01
),
iter = 150,
warmup_iter = 10,
refresh = 0,
verbose = FALSE,
max_grid = 2000) {
checkmate::assert_flag(verbose) # user choice on output
checkmate::assert_data_frame(data, any.missing = FALSE)
checkmate::assert_names(
names(data),
must.include = c("tte", "event")
)
checkmate::assert_data_frame(data[grepl("X", names(data))], any.missing = FALSE, min.cols = 1)
checkmate::assert_data_frame(data_hist, any.missing = FALSE)
if (length(data_hist[grepl("X", names(data_hist))]) != 0) {
checkmate::assert_data_frame(data_hist[grepl("X", names(data_hist))], any.missing = FALSE, min.cols = 1)
X_0 <- as.matrix(data_hist[grepl("X", names(data_hist))])
} else {
X_0 <- NULL
}
checkmate::assert_names(
names(data_hist),
must.include = c("tte", "event")
)
## Hyperparameters
checkmate::assert_names(
names(hyperparameters),
must.include = c("a_tau", "b_tau", "c_tau", "d_tau","type", "p_0", "a_sigma",
"b_sigma", "clam_smooth", "phi")
)
## Tuning parameters
checkmate::assert_names(
names(tuning_parameters),
must.include = c("cprop_beta", "pi_b", "alpha", "Jmax")
)
## Initial values
if (!is.null(initial_values)) {
checkmate::assert_names(
names(initial_values),
must.include = c("J", "s_r", "mu", "sigma2", "tau", "lambda_0",
"lambda", "beta_0", "beta")
)
}
# Call the Gibbs_MH_WBorrow function with the input data
X <- as.matrix(data[grepl("X", names(data))])
# Call the .input_check function
if (verbose) {
s <- .input_check(data$tte, data_hist$tte, X, X_0, tuning_parameters,
initial_values, hyperparameters)
message((paste0(s, "\nInputs look ok\n")))
} else {
suppressMessages(
.input_check(data$tte, data_hist$tte, X, X_0, tuning_parameters,
initial_values, hyperparameters)
)
}
if (verbose) {
# print diagnostics
message("Starting MCMC sampler")
}
out <- GibbsMH(Y = data$tte, I = data$event, X = X,
Y_0 = data_hist$tte, I_0 = data_hist$event, X_0 = X_0,
tuning_parameters = tuning_parameters,
initial_values = initial_values,
hyperparameters = hyperparameters,
lambda_hyperparameters = lambda_hyperparameters,
iter = iter,
warmup_iter = warmup_iter,
refresh = refresh,
max_grid = max_grid)
if (verbose) {
# print diagnostics
beta_acc_ratio <- out$beta_move/(warmup_iter + iter)
message("MCMC sampler complete")
message(paste0("beta_",c(1:length(beta_acc_ratio)) ," acceptance ratio: ", beta_acc_ratio, collapse = "\n"))
}
class(out) <- c("BayesFBHborrow", "list")
return(out)
}
#' @title Run the MCMC sampler without Bayesian Borrowing
#'
#' @description Main function of the BayesFBHborrow package. This generic function
#' calls the correct MCMC sampler for time-to-event without Bayesian borrowing.
#'
#' @param data data.frame containing atleast three vectors of "tte" (time-to-event)
#' and "event" (event indicator), and covariates "X_i" (where i should be a number/
#' indicator of the covariate)
#' @param data_hist NULL (not used)
#' @param tuning_parameters list of "cprop_beta", "Jmax", and "pi_b"
#' @param initial_values list containing the initial values of c("J", "s_r",
#' "mu", "sigma2", "lambda", beta") (optional)
#' @param hyperparameters list containing the hyperparameters c("a_sigma",
#' "b_sigma", "Jmax", "clam_smooth", "cprop_beta", "phi"). Default is
#' list("a_sigma" = 2, "b_sigma" = 2, "Jmax" = 20, "clam_smooth" = 0.8,
#' "cprop_beta" = 0.3, "phi" = 3)
#' @param lambda_hyperparameters contains two hyperparameters ("a" and "b") used for
#' the update of lambda, default is c(0.01, 0.01)
#' @param iter number of iterations for MCMC sampler. Default is 2000
#' @param warmup_iter number of warmup iterations (burn-in) for MCMC sampler.
#' Default is 2000
#' @param refresh number of iterations between printed console updates. Default
#' is 0
#' @param verbose TRUE (default), choice of output, if TRUE will output
#' intermittent results into console
#' @param max_grid grid size for the smoothed baseline hazard. Default is 2000
#'
#' @return list of samples for both fixed (can be found in $out_fixed) and
#' multidimensional parameters (lambda, s, tau)
#' @export
#'
#' @examples
#' set.seed(123)
#' # Load the example data and write your initial values and hyper parameters
#' data(piecewise_exp_cc, package = "BayesFBHborrow")
#'
#' # Set your hyperparameters and tuning parameters
#' hyper <- list("a_sigma" = 2,
#' "b_sigma" = 2,
#' "clam_smooth" = 0.5,
#' "phi" = 3)
#'
#' tuning_parameters <- list("Jmax" = 5,
#' "pi_b" = 0.5,
#' "cprop_beta" = 0.5)
#'
#' # Set initial values to default
#' out <- BayesFBHborrow(piecewise_exp_cc, NULL, tuning_parameters,
#' initial_values = NULL, hyper, iter = 5, warmup_iter = 1)
BayesFBHborrow.NoBorrow <- function(data, data_hist = NULL,
tuning_parameters,
initial_values = NULL,
hyperparameters = list(
"a_sigma" = 1,
"b_sigma" = 1,
"phi" = 3,
"clam_smooth" = 0.8),
lambda_hyperparameters = list(
"a_lambda" = 0.01,
"b_lambda" = 0.01
),
iter = 150,
warmup_iter = 10,
refresh = 0,
verbose = FALSE,
max_grid = 2000) {
checkmate::assert_flag(verbose) # user choice on output
checkmate::assert_data_frame(data, any.missing = FALSE)
checkmate::assert_names(
names(data),
must.include = c("tte", "event")
)
checkmate::assert_data_frame(data[grepl("X", names(data))], any.missing = FALSE, min.cols = 1)
## Hyperparameters
checkmate::assert_names(
names(hyperparameters),
must.include = c("a_sigma", "b_sigma", "clam_smooth", "phi")
)
## Tuning parameters
checkmate::assert_names(
names(tuning_parameters),
must.include = c("cprop_beta", "pi_b", "Jmax")
)
## Initial values
if (!is.null(initial_values)) {
checkmate::assert_names(
names(initial_values),
must.include = c("J", "s_r", "mu", "sigma2", "lambda", "beta")
)
}
# Call the Gibbs_MH_WBorrow function with the input data
X <- as.matrix(data[grepl("X", names(data))])
# Call the .input_check function
if (verbose) {
s <- .input_check(data$tte, NULL, X, NULL, tuning_parameters,
initial_values, hyperparameters)
message((paste0(s, "\nInputs look ok\n")))
} else {
suppressMessages(
.input_check(data$tte, NULL, X, NULL, tuning_parameters,
initial_values, hyperparameters)
)
}
out <- GibbsMH(Y = data$tte, I = data$event, X = X,
tuning_parameters = tuning_parameters,
initial_values = initial_values,
hyperparameters = hyperparameters,
lambda_hyperparameters = lambda_hyperparameters,
iter = iter,
warmup_iter = warmup_iter,
refresh = refresh,
max_grid = max_grid
)
if (verbose) {
# print diagnostics
beta_acc_ratio <- out$beta_move/(warmup_iter + iter)
message("MCMC sampler complete")
message(paste0("beta_",c(1:length(beta_acc_ratio)) ," acceptance ratio: ", beta_acc_ratio, collapse = "\n"))
}
class(out) <- c("BayesFBHborrow", "list")
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFBHborrow/R/run_mcmc.R
|
#' @title Input checker
#' @description Checks inputs before Gibbs sampler is run
#'
#' @param Y current time-to-event data
#' @param Y_0 historical time-to-event data
#' @param X design Matrix
#' @param X_0 design Matrix for historical data
#' @param tuning_parameters list of tuning parameters
#' @param initial_values list of initial values (optional)
#' @param hyperparameters list of hyperparameters
#'
#' @return a print statement
.input_check <- function(Y, Y_0, X, X_0, tuning_parameters, initial_values = NULL, hyperparameters) {
if (any(unlist(hyperparameters) < 0)) {
stop((paste("in hyperparameters(s): ",
names(which(unlist(hyperparameters) < 0)),
"hyperparameters must be non negative")))
} else if (max(hyperparameters$p_0, hyperparameters$clam_smooth,
tuning_parameters$pi_b) > 1) {
listed <- c("p_0" = hyperparameters$p_0, "clam_smooth" = hyperparameters$clam_smooth,
"pi_b" = tuning_parameters$pi_b)
stop(paste("in hyperparameters:",
names(which(unlist(listed) > 1)),
", should be in range [0, 1]"))
}
if (!is.null(Y_0)) {
if (!is.null(initial_values)) {
J <- initial_values$J
if (hyperparameters$type != c("uni")) {
borr_choice <- ifelse(hyperparameters$type == c("mix"), "mix", "all")
s <- (paste("Choice of borrowing:", borr_choice))
if (any(hyperparameters[c("c_tau", "d_tau")] == "NULL")) {
stop(paste("specify hyperparameters for",
names(which(hyperparameters[c("c_tau", "d_tau")] == "NULL"))))
}
if (borr_choice == "mix" && length(initial_values$tau) != J + 1) {
stop("wrong dimension for tau, should be J+1")
}
else if (borr_choice == "all" && length(initial_values$tau) == J + 1) {
stop("Borrowing is 'all' but several initial values for tau were provided")
}
} else {
s <- "Choice of borrowing: uni"
if (any(hyperparameters[c("c_tau", "d_tau")] != "NULL")) {
message("Borrowing is 'uni', choice of c_tau and d_tau will be ignored")
}
if (length(initial_values$tau) != J + 1) {
stop("wrong dimension for tau, should be J+1")
}
}
maxSj <- min(max(Y), max(Y_0))
if (max(initial_values$s_r) > maxSj) {
stop("all s_r must be < min(max(Y),max(Y_0))")
} else if (any(initial_values$s_r < 0)) {
stop("all s_r must be > 0")
}
if (any(sapply(initial_values[c("lambda", "lambda_0")], length) != J+1)) {
stop(paste("dimension error in", names(which(sapply(
initial_values[c("lambda", "lambda_0")], length) != J+1))))
}
if (any(c(initial_values$lambda, initial_values$lambda_0) < 0)) {
stop("baseline hazard must be > 0")
}
if (is.null(X_0)) {
if (length(initial_values$beta) != dim(X)[[2]] || length(initial_values$beta_0) != 0) {
arg <- which(sapply(initial_values[c("beta", "beta_0")],
length) != c(ncol(X), ncol(X_0)))
arg_dim <- dim(initial_values[names(arg)])
trial <- switch(ncol(X) != length(initial_values$beta), dim(X), NULL)
stop(paste0("dimension mismatch in ", names(arg), " with length ", arg_dim,
", given design matrix has dimension ", trial))
}
} else {
if (length(initial_values$beta) != dim(X)[[2]] || length(initial_values$beta_0) != dim(X_0)[[2]]) {
arg <- which(sapply(initial_values[c("beta", "beta_0")],
length) != c(ncol(X), ncol(X_0)))
arg_dim <- length(initial_values[names(arg)][[1]])
trial <- dplyr::if_else(ncol(X) != length(initial_values$beta),
dim(X)[2], dim(X_0)[2])
stop(paste0("dimension mismatch in ", names(arg), " with length ",
arg_dim,", given design matrix has dimension ", trial))
}
}
cprop_beta <- tuning_parameters$cprop_beta
max_length <- max(length(initial_values$beta), length(initial_values$beta_0))
if (max_length != length(cprop_beta)) {
arg <- which(sapply(initial_values[c("beta", "beta_0")],
length) == max_length)
arg_length <- length(initial_values[names(arg)][[1]])
stop(paste0("dimension mismatch in 'cprop_beta' with length ",
length(cprop_beta),", given the number of covariates in ",names(arg), " is ", arg_length, "\n"))
}
if (initial_values$sigma2 < 0) {
stop("sigma2 must be > 0")
}
if (length(initial_values$s_r) != J) {
stop("dimension error in s_r")
}
} else { # if no initial values are provided
if (hyperparameters$type != c("uni")) {
borr_choice <- ifelse(hyperparameters$type == c("mix"), "mix", "all")
s <- (paste("Choice of borrowing:", borr_choice))
if (any(hyperparameters[c("c_tau", "d_tau")] == "NULL")) {
stop(paste("specify hyperparameters for",
names(which(hyperparameters[c("c_tau", "d_tau")] == "NULL"))))
}
} else {
s <- "Choice of borrowing: uni"
if (any(hyperparameters[c("c_tau", "d_tau")] != "NULL")) {
message("Borrowing is 'uni', choice of c_tau and d_tau will be ignored")
}
}
}
} else if (!is.null(initial_values)) {
s <- "No borrowing"
lambda <- initial_values[grepl("^lambda", names(initial_values))]
beta <- initial_values[grepl("^beta", names(initial_values))]
J <- initial_values$J
if (max(initial_values$s_r) > max(Y)) {
stop("all s_r must be < max(Y)")
} else if (any(initial_values$s_r < 0)) {
stop("all s_r must be > 0")
}
if (length(lambda[[1]]) != J+1) {
stop(paste0("dimension error in ", names(lambda), " with length ",
length(lambda),", should be of length ", J+1))
}
if (any(lambda[[1]] < 0)) {
stop("baseline hazard must be > 0")
}
if (!is.null(X) && length(beta[[1]]) != ncol(X)) {
stop(paste0("dimension error in beta with length ", length(beta),
", should be of length "), ncol(X))
}
if (initial_values$sigma2 < 0) {
stop("sigma2 must be > 0")
}
if (length(initial_values$s_r) != J) {
stop("dimension error in s_r")
}
} else {
s <- "No borrowing"
}
return(invisible(s))
}
#' @title Create data.frame for piecewise exponential models
#'
#' @description Construct a split data.frame for updated split points
#'
#' @param Y time-to-event
#' @param I censor indicator
#' @param X design Matrix
#' @param s split point locations, including start and end (length J + 2)
#' @param lambda baseline Hazards (length J+1)
#' @param bp number of covariates
#' @param J number of split points
#'
#' @return data.frame with columns c(tstart, id, X1,..., Xp, Y, I, lambda)
#'
#' @import survival
.dataframe_fun <- function(Y, I, X, s, lambda, bp, J) {
id <- seq_along(Y)
if (bp > 0) {
xcol <- 1:bp
df_like <- as.data.frame(cbind(Y, I, id, X))
colnames(df_like)[xcol + 3] <- paste0("X", xcol)
} else {
df_like <- as.data.frame(cbind(Y, I, id))
}
#Create indicators
df_split <- survival::survSplit(formula = Surv(Y, I) ~ .,
data = df_like, cut = s)
lam_mat <- as.data.frame(cbind(s[1:(J + 1)], lambda))
colnames(lam_mat)[1] <- "tstart"
df_all <- merge(df_split, lam_mat, by = "tstart", all.x = T)
}
#' Computes the logarithmic sum of an exponential
#'
#' @param x set of log probabilities
#'
#' @return the logarithmic sum of an exponential
.logsumexp <- function(x) {
c <- max(x)
p <- c + log(sum(exp(x - c)))
}
#' Normalize a set of probability to one, using the the log-sum-exp trick
#'
#' @param x set of log probabilities
#'
#' @return normalized set of log probabilities
.normalize_prob <- function(x) {
exp(x - .logsumexp(x))
}
#' Log likelihood function
#'
#' @param df data.frame containing data, time split points, and lambda
#' @param beta coefficients for covariates
#'
#' @return log likelihood given lambdas and betas
.log_likelihood <- function(df, beta) {
if (!is.null(beta)) {
X <- as.matrix(df[, substr(colnames(df), 1, 1) == "X"])
xdpb <- X %*% beta
# df has the delta_ij indicator integrated within it. Each individual
# is partitioned by interval with Y adjusted.
# I is the nu_i indicator.
llike <- sum(log(df$lambda) * df$I + xdpb * df$I -
((df$Y - df$tstart) * df$lambda) * exp(xdpb))
} else {
llike <- sum(log(df$lambda) * df$I + df$I - ((df$Y - df$tstart) * df$lambda))
}
}
#' @title Create group level data
#'
#' @description Aggregate individual level data into group level data
#'
#' @param Y data
#' @param I censoring indicator
#' @param X design matrix
#' @param s split points, J + 2
#'
#' @return list of group level data
#' @export
#'
#' @examples
#' set.seed(111)
#' # Load example data and set your initial values and hyper parameters
#' data(weibull_cc, package = "BayesFBHborrow")
#' data(weibull_hist, package = "BayesFBHborrow")
#'
#' Y <- weibull_cc$tte
#' I <- weibull_cc$event
#' X <- weibull_cc$X_trt
#'
#' # Say we want to know the group level data for the following split points
#' s <- quantile(Y, c(0, 0.45, 0.65, 1), names = FALSE)
#'
#' group_summary(Y, I, X, s)
group_summary <- function(Y, I, X, s) {
data <- as.data.frame(cbind(seq_along(Y), Y, I))
if (is.null(X)) {
# Censoring count
df_cens <- (data[data$I == 0, ])
df_cens$C <- 1 - df_cens$I
events_split <- survival::survSplit(formula = Surv(Y, I) ~ .,
data = data, cut = s)
cnsr_split <- survival::survSplit(formula = Surv(Y, C) ~ .,
data = df_cens, cut = s)
# Number of events in each time segment
events <- sapply(1:(length(s) - 1), function(i) {
sum(events_split[events_split$tstart == s[i], ]$I)})
# Number of censored patients in each time segment
cnsr <- sapply(1:(length(s) - 1), function(i) {
sum(cnsr_split[cnsr_split$tstart == s[i], ]$C)})
# Time exposed
time_exposed <- sapply(1:(length(s) - 1), function(i) {
sum(events_split[events_split$tstart == s[i], ]$Y -
events_split[events_split$tstart == s[i], ]$tstart)})
# Number of patients no events in each time segment
num_at_risk <- sapply(1:(length(s) - 1), function(i) {
nrow(events_split[events_split$tstart == s[i], ])})
return(list("events" = events, "time_exposed" = time_exposed,
"num_at_risk" = num_at_risk, "num_cnsr" = cnsr))
} else {
# Concurrent to include covariate
data <- as.data.frame(cbind(data, X))
# Censoring count which includes covariate
df_cens <- (data[data$I == 0, ])
df_cens$C <- 1 - df_cens$I
events_split <- survival::survSplit(formula = Surv(Y, I) ~ .,
data = data, cut = s)
csnr_split <- survival::survSplit(formula = Surv(Y, I) ~ .,
data = df_cens, cut = s)
# Number of events control
events_c <- sapply(1:(length(s) - 1), function(i) {
sum(events_split[events_split$tstart == s[i] & events_split$X == 0, ]$I)})
num_at_risk_c <- sapply(1:(length(s) - 1), function(i) {
nrow(events_split[events_split$tstart == s[i] & events_split$X == 0, ])})
cens <- sapply(1:(length(s) - 1), function(i) {
sum(csnr_split[csnr_split$tstart == s[i] & csnr_split$X == 0, ]$I)})
# Number of events treatment
events_trt <- sapply(1:(length(s) - 1), function(i) {
sum(events_split[events_split$tstart == s[i] & events_split$X == 1, ]$I)})
num_at_risk_trt <- sapply(1:(length(s) - 1), function(i) {
nrow(events_split[events_split$tstart == s[i] & events_split$X == 1, ])})
# Time exposed
time_exposed_c <- sapply(1:(length(s) - 1), function(i) {
sum(events_split[events_split$tstart == s[i] & events_split$X == 0, ]$Y -
events_split[events_split$tstart == s[i] & events_split$X == 0, ]$tstart)})
time_exposed_trt <- sapply(1:(length(s) - 1), function(i) {
sum(events_split[events_split$tstart == s[i] & events_split$X == 1, ]$Y -
events_split[events_split$tstart == s[i] & events_split$X == 1, ]$tstart)})
return(list("events_c" = events_c, "events_trt" = events_trt,
"time_c" = time_exposed_c, "time_trt" = time_exposed_trt,
"num_at_risk_c" = num_at_risk_c, "num_at_risk_trt" = num_at_risk_trt))
}
}
#' @title Initialize lambda hyperparameters
#'
#' @description Propose lambda hyperparameters for the choice of
#' initial values for lambda
#'
#' @param group_data group level data
#' @param s split points
#' @param w weight
#'
#' @return shape and rate for the estimated lambda distribution
#' @export
#'
#' @examples
#' set.seed(111)
#' # Load example data and set your initial values and hyper parameters
#' data(weibull_cc, package = "BayesFBHborrow")
#' data(weibull_hist, package = "BayesFBHborrow")
#'
#' Y <- weibull_cc$tte
#' I <- weibull_cc$event
#' X <- weibull_cc$X_trt
#'
#' # Say we want to know the group level data for the following split points
#' s <- quantile(Y, c(0, 0.45, 0.65, 1), names = FALSE)
#'
#' group_data <- group_summary(Y, I, NULL, s)
#' init_lambda_hyperparameters(group_data, s)
init_lambda_hyperparameters <- function(group_data, s, w = 0.5) {
h_star <- rep(0, length(s)- 1)
# Set index
idx <- 1:length(h_star)
xi <- s[-1] - s[-length(s)]
n_dash <- group_data$num_at_risk - group_data$num_cnsr / 2
numerator <- ((n_dash - group_data$events/2) * xi)
# Adjust index for 0
idx <- idx[numerator!= 0]
h_star[idx] <- (group_data$events / numerator)[idx]
ingroup_data <- (1:length(xi))[group_data$events == 0]
indnz <- (1:length(xi))[group_data$events != 0]
rate <- NULL
shape <-group_data$num_at_risk * w
# Account for zero number of events
rate[indnz] <- shape[indnz] / h_star[indnz]
rate[ingroup_data] <- shape[ingroup_data] / rep(0.1, length(ingroup_data))
# t2_gamma hyper prior for mu - using nonzero values only
t2 <- (log(max(h_star) / min(h_star[h_star > 0]))) ** 2
# Adjust for zero shape and rate
if (any(shape == 0)) {
shape[shape == 0] <- 0.001
}
if (any(rate == 0)) {
rate[rate == 0] <- 0.001
}
return(list("shape" = shape, "rate" = rate, "t2" = t2))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFBHborrow/R/utils.R
|
#' Example data, simulated from a Weibull distribution.
#'
#' Data is simulated for a concurrent trial with three columns named "tte"
#' (time-to-event), "event" (event indicator), and "X_trt" (treatment indicator).
#' It was simulated by drawing samples from a Weibull with kappa = 1.5 (shape)
#' and nu = 0.4 (scale)
#'
#' @docType data
#'
#' @usage data(weibull_cc)
#'
#' @keywords datasets
#'
#' @examples
#' data(weibull_cc)
#' survival_model <- survival::survfit(survival::Surv(tte, event) ~ X_trt, data = weibull_cc)
#' line_colors <- c("blue", "red") # Adjust colors as needed
#' line_types <- 1:length(unique(weibull_cc$X_trt))
#' plot(survival_model, col = line_colors, lty = line_types,
#' xlab = "Time (tte)", ylab = "Survival Probability",
#' main = "Kaplan-Meier Survival Curves by Treatment")
"weibull_cc"
|
/scratch/gouwar.j/cran-all/cranData/BayesFBHborrow/R/weibull_cc-data.R
|
#' Example data, simulated from a Weibull distribution
#'
#' Data is simulated for a historical trial with two columns named "tte"
#' (time-to-event) and "event" (event indicator).
#' It was simulated using the following parameters:
#'
#' @docType data
#'
#' @usage data(weibull_hist)
#'
#' @keywords datasets
#'
#' @examples
#' data(weibull_cc)
#' data(weibull_hist)
#' weibull_hist$X_trt <- 0
#' survival_model <- survival::survfit(survival::Surv(tte, event) ~ X_trt,
#' data = rbind(weibull_cc,
#' weibull_hist))
#' line_colors <- c("blue", "red", "green") # Adjust colors as needed
#' line_types <- 1:length(unique(weibull_cc$X_trt))
#' plot(survival_model, col = line_colors, lty = line_types,
#' xlab = "Time (tte)", ylab = "Survival Probability",
#' main = "Kaplan-Meier Survival Curves by Treatment")
"weibull_hist"
|
/scratch/gouwar.j/cran-all/cranData/BayesFBHborrow/R/weibull_hist-data.R
|
#' BayesFM: Package for Bayesian Factor Modeling
#'
#' The long-term goal of this package is to provide a collection of procedures
#' to perform Bayesian inference on a variety of factor models.
#'
#' @details Currently, this package includes: Bayesian Exploratory Factor
#' Analysis (\code{befa}), as developed in Conti et al. (2014), an approach to
#' dedicated factor analysis with stochastic search on the structure of the
#' factor loading matrix. The number of latent factors, as well as the
#' allocation of the observed variables to the factors, are not fixed a priori
#' but determined during MCMC sampling. More approaches will be included in
#' future releases of this package.
#'
#' @note You are very welcome to send me any comments or suggestions for
#' improvements, and to share with me any problems you may encounter with the
#' use of this package.
#'
#' @author Rémi Piatek \email{remi.piatek@@gmail.com}
#'
#' @references G. Conti, S. Frühwirth-Schnatter, J.J. Heckman, R. Piatek (2014):
#' ``Bayesian Exploratory Factor Analysis'', \emph{Journal of Econometrics},
#' 183(1), pages 31-57, \doi{10.1016/j.jeconom.2014.06.008}.
#'
#' @docType package
#' @name BayesFM
NULL
.onAttach <- function(libname, pkgname) {
if (interactive() || getOption("verbose")) {
msg <- sprintf(paste(
"###",
"### Package %s (%s) loaded",
"###",
"### Please report any bugs, and send suggestions or feedback",
"### to %s",
"###", sep = "\n"),
pkgname,
utils::packageDescription(pkgname)$Version,
utils::maintainer(pkgname))
packageStartupMessage(msg)
}
}
.onUnload <- function(libpath) {
library.dynam.unload("BayesFM", libpath)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFM/R/BayesFM.R
|
#'
#' Bayesian Exploratory Factor Analysis
#'
#' This function implements the Bayesian Exploratory Factor Analysis
#' (\code{befa}) approach developed in Conti et al. (CFSHP, 2014). It runs a
#' MCMC sampler for a factor model with dedicated factors, where each manifest
#' variable is allowed to load on at most one latent factor. The allocation of
#' the manifest variables to the latent factors is not fixed \emph{a priori} but
#' determined stochastically during sampling. The minimum number of variables
#' dedicated to each factor can be controlled by the user to achieve the desired
#' level of identification. The manifest variables can be continuous or
#' dichotomous, and control variables can be introduced as covariates.
#'
#' @param model
#' This argument specifies the manifest variables and the covariates used
#' in the model (if any). It can be specified in two different ways:
#' \itemize{
#' \item A numeric matrix or a data frame containing the manifest
#' variables. This corresponds to a model without covariates,
#' and the argument \code{data} is not required.
#' \item A list of model formulas. Each element of the list specifies
#' a manifest variable and its corresponding control variables (e.g.,
#' '\code{Y1 ~ X1 + X2}' to use \code{X1} and \code{X2} as control
#' variables for \code{Y1}).
#' If a formula has no left-hand side variable, covariates on the
#' right-hand side are included in all equations (e.g., '\code{~ X3}'
#' means that \code{X3} is used as a control variable for all the
#' manifest variables). Argument \code{data} can be passed to the
#' function in that case, otherwise parent data frame is used.
#' }
#' Binary manifest variables should be specified as logical vectors in
#' the data frame to be treated as dichotomous. \code{NA} values are
#' accepted in manifest variables only.
#' @param data
#' Data frame. If missing, parent data frame if used.
#' @param burnin
#' Burn-in period of the MCMC sampler.
#' @param iter
#' Number of MCMC iterations saved for posterior inference (after
#' burn-in).
#' @param Nid
#' Minimum number of manifest variables dedicated to each latent factor
#' for identification.
#' @param Kmax
#' Maximum number of latent factors. If missing, the maximum number of
#' factors that satisfies the identification condition determined by
#' \code{Nid} and the Ledermann bound is specified (see CFSHP,
#' section 2.2).
#' @param A0
#' Scaling parameters of the variance of the Normal prior on the nonzero
#' factor loadings. Either a scalar or a numeric vector of length equal
#' to the number of manifest variables.
#' @param B0
#' Variances of the Normal prior on the regression coefficients. Either a
#' scalar or a numeric vector of length equal to the number of
#' manifest variables.
#' @param c0
#' Shape parameters of the Inverse-Gamma prior on the idiosyncratic
#' variances. Either a scalar or a numeric vector of length equal to the
#' number of manifest variables.
#' @param C0
#' Scale parameters of the Inverse-Gamma prior on the idiosyncratic
#' variances. Either a scalar or a numeric vector of length equal to the
#' number of manifest variables.
#' @param HW.prior
#' If \code{TRUE}, implement Huang-Wand (2013) prior on the covariance
#' matrix of the factors in the expanded model, otherwise use an
#' Inverse-Wishart prior if \code{FALSE}, see CFSHP section 2.3.5.
#' @param nu0
#' Degrees of freedom of the Inverse-Wishart prior on the covariance
#' matrix of the latent factors in the expanded model.
#' @param S0
#' Scale parameters of the Inverse-Wishart prior on the covariance matrix
#' of latent factors in the expanded model:
#' \itemize{
#' \item if \code{HW.prior = TRUE}, scale parameter of the Gamma
#' hyperprior distribution on the individual scales of the
#' Inverse-Wishart prior.
#' \item if \code{HW.prior = FALSE}, diagonal elements of the scale
#' matrix of the Inverse-Wishart prior on the covariance matrix of
#' the latent factors in the expanded model.
#' }
#' Either a scalar or a numeric vector of length equal to \code{Kmax}.
#' @param kappa0
#' First shape parameter of the Beta prior distribution on the
#' probability \eqn{\tau_0} that a manifest variable does not load on any
#' factor.
#' @param xi0
#' Second shape parameter of the Beta prior distribution on the
#' probability \eqn{\tau_0} that a manifest variable does not load on any
#' factor.
#' @param kappa
#' Concentration parameters of the Dirichlet prior distribution on the
#' indicators. Either a scalar or a numeric vector of length equal to
#' \code{Kmax}.
#' @param indp.tau0
#' If \code{TRUE}, specify the alternative prior specification with
#' independent parameters \eqn{\tau_{0m}}{\tau_0m} across manifest
#' variables \eqn{m = 1, ..., M}, otherwise use a common parameter
#' \eqn{\tau_0} if \code{FALSE}.
#' @param rnd.step
#' If \code{TRUE}, select randomly the number of intermediate steps in
#' non-identified models at each MCMC iteration, otherwise use a fixed
#' number of steps if \code{FALSE}.
#' @param n.step
#' Controls the number of intermediate steps in non-identified models:
#' \itemize{
#' \item if \code{rnd.step = TRUE}, average number of steps. The number
#' of steps is sampled at each MCMC iteration from
#' 1+Poisson(\code{n.step}-1).
#' \item if \code{rnd.step = FALSE}, fixed number of steps.
#' }
#' @param search.delay
#' Number of MCMC iterations run with fixed indicator matrix (specified
#' in \code{dedic.start}) at beginning of MCMC sampling.
#' @param R.delay
#' Number of MCMC iterations run with fixed correlation matrix (specified
#' in \code{dedic.start}) at beginning of MCMC sampling.
#' @param alpha.start
#' Starting values for the factor loadings. Numeric vector of length
#' equal to the number of manifest variables. If missing, sampled from a
#' Normal distribution with zero mean and variance \code{A0}.
#' @param dedic.start
#' Starting values for the indicators. Vector of integers of length equal
#' to the number of manifest variables. Each element takes a value among
#' 0, 1, ..., \code{Kmax}. If missing, random allocation of the manifest
#' variables to the maximum number of factors \code{Kmax}, with a minimum
#' of \code{Nid} manifest variables loading on each factor.
#' @param sigma.start
#' Starting values for the idiosyncratic variances. Numeric vector of
#' length equal to the number of manifest variables. Sampled from prior
#' if missing.
#' @param beta.start
#' Starting values for the regression coefficients. Numeric vector of
#' length equal to the total number of regression coefficients,
#' concatenated for all the manifest variables. Sampled from prior if
#' missing.
#' @param R.start
#' Starting values for the correlation matrix of the latent factors.
#' Numeric matrix with \code{Kmax} rows and columns, and unit diagonal
#' elements. If missing, identity matrix is used.
#' @param verbose
#' If \code{TRUE}, display information on the progress of the function.
#'
#' @details \strong{Model specification.} The model is specified as follows, for
#' each observation \eqn{i = 1, ..., N}:
#' \deqn{Y^*_i = \beta X_i + \alpha \theta_i + \epsilon_i}{
#' Y*_i = \beta X_i + \alpha \theta_i + \epsilon_i}
#' \deqn{\theta_i \sim \mathcal{N}(0, R)}{\theta_i ~ N(0, R)}
#' \deqn{\epsilon_i \sim \mathcal{N}(0, \Sigma)}{\epsilon_i ~ N(0, \Sigma)}
#' \deqn{\Sigma = diag(\sigma^2_1, ..., \sigma^2_M)}
#' where \eqn{Y^*_i}{Y*_i} is the \eqn{M}-vector containing the latent
#' variables underlying the corresponding \eqn{M} manifest variables
#' \eqn{Y_i}, which can be continuous such that
#' \eqn{Y_{im} = Y^*_{im}}{Y_im = Y*_im}, or binary with
#' \eqn{Y_{im} = 1[Y^*_{im} > 0]}{Y_im = 1[Y*_im > 0]}, for \eqn{m = 1, ..., M}.
#' The \eqn{K}-vector \eqn{\theta_i} contains the latent factors, and
#' \eqn{\alpha} is the \eqn{(M \times K)}{(M*K)}-matrix of factor loadings.
#' The \eqn{M}-vector \eqn{\epsilon_i} is the vector of error terms.
#' Covariates can be included in the \eqn{Q}-vector \eqn{X_i} and are related to
#' the manifest variables through the \eqn{(M \times Q)}{(M*Q)}-matrix of
#' regression coefficients \eqn{\beta}. Intercept terms are automatically
#' included, but can be omitted in some or all equations using the usual syntax
#' for R formulae (e.g., 'Y1 ~ X1 - 1' specifies that that Y1 is regressed on X1
#' and no intercept is included in the corresponding equation).
#'
#' The number of latent factors \eqn{K} is specified as \code{Kmax}. However,
#' during MCMC sampling the stochastic search process on the matrix \eqn{\alpha}
#' may produce zero columns, thereby reducing the number of active factors.
#'
#' The covariance matrix \eqn{R} of the latent factors is assumed to be a
#' correlation matrix for identification.
#'
#' Each row of the factor loading matrix \eqn{\alpha} contains at most one
#' nonzero element (dedicated factor model). The allocation of the manifest
#' variables to the latent factors is indicated by the binary matrix
#' \eqn{\Delta} with same dimensions as \eqn{\alpha}, such that each row
#' \eqn{\Delta_m} indicates which factor loading is different from zero, e.g.:
#' \deqn{\Delta_m = (0, .., 0, 1, 0, ..., 0) \equiv e_k}{
#' \Delta_m = (0, .., 0, 1, 0, ..., 0) = e_k}
#' indicates that variable \eqn{m} loads on the \eqn{k}th factor, where
#' \eqn{e_k} is a \eqn{K}-vector that contains only zeros, besides its \eqn{k}th
#' element that equals 1.
#'
#' \strong{Identification.} The function verifies that the maximum number of
#' latent factors \code{Kmax} does not exceed the Ledermann bound. It also
#' checks that \code{Kmax} is consistent with the identification restriction
#' specified with \code{Nid} (enough variables should be available to load on
#' the factors when \code{Kmax} is reached). The default value for \code{Kmax}
#' is the minimum between the Ledermann bound and the maximum number of factors
#' that can be loaded by \code{Nid} variables. The user is free to select the
#' level of identification, see CFSHP section 2.2 (non-identified models are
#' allowed with \code{Nid = 1}).
#'
#' Note that identification is achieved only with respect to the scale of the
#' latent factors. Non-identifiability problems may affect the posterior sample
#' because of column switching and sign switching of the factor loadings.
#' These issues can be addressed \emph{a posteriori} with the functions
#' \code{\link{post.column.switch}} and \code{\link{post.sign.switch}}.
#'
#' \strong{Prior specification.}
#' The indicators are assumed to have the following probabilities,
#' for \eqn{k = 1, ..., K}:
#' \deqn{Prob(\Delta_m = e_k \mid \tau_k) = \tau_k}{
#' Prob(\Delta_m = e_k | \tau_k) = \tau_k}
#' \deqn{\tau = (\tau_0, \tau_1, ..., \tau_K)}
#' If \code{indp.tau0 = FALSE}, the probabilities are specified as:
#' \deqn{\tau = [\tau_0, (1-\tau_0)\tau^*_1, ..., (1-\tau_0)\tau^*_K]}{
#' \tau = [\tau_0, (1-\tau_0)\tau*_1, ..., (1-\tau_0)\tau*_K]}
#' \deqn{\tau_0 \sim \mathcal{B}eta(\kappa_0, \xi_0)}{
#' \tau_0 ~ Beta(\kappa_0, \xi_0)}
#' \deqn{\tau^* = (\tau^*_1, ..., \tau^*_K) \sim \mathcal{D}ir(\kappa)}{
#' \tau* = (\tau*_1, ..., \tau*_K) ~ Dir(\kappa)}
#' with \eqn{\kappa_0} = \code{kappa0}, \eqn{\xi_0} = \code{xi0} and
#' \eqn{\kappa} = \code{kappa}.
#' Alternatively, if \code{indp.tau0 = TRUE}, the probabilities are specified
#' as:
#' \deqn{\tau_m = [\tau_{0m}, (1-\tau_{0m})\tau^*_1, ...,
#' (1-\tau_{0m})\tau^*_K]}{
#' \tau_m = [\tau_0m, (1-\tau_0m)\tau*_1, ..., (1-\tau_0m)\tau*_K]}
#' \deqn{\tau_{0m} \sim \mathcal{B}eta(\kappa_0, \xi_0)}{
#' \tau_0m ~ Beta(\kappa_0, \xi_0)}
#' for each manifest variable \eqn{m = 1, ..., M}.
#'
#' A normal-inverse-Gamma prior distribution is assumed on the nonzero factor
#' loadings and on the idiosyncratic variances:
#' \deqn{\sigma^2_m \sim \mathcal{I}nv-\mathcal{G}amma(c_{0m}, C_{0m})}{
#' \sigma^2_m ~ Inv-Gamma(c0_m, C0_m)}
#' \deqn{\alpha_m^\Delta \mid \sigma^2_m \sim \mathcal{N}(0, A_{0m}\sigma^2_m)}{
#' \alpha_m^\Delta | \sigma^2_m ~ N(0, A0_m * \sigma^2_m)}
#' where \eqn{\alpha_m^\Delta} denotes the only nonzero loading in the \eqn{m}th
#' row of \eqn{\alpha}.
#'
#' For the regression coefficients, a multivariate Normal prior distribution is
#' assumed on each row \eqn{m = 1, ..., M} of \eqn{\beta}:
#' \deqn{\beta_m \sim \mathcal{N}(0, B_0 I_Q)}{\beta_m ~ N(0, B_0 I_Q)}
#' The covariates can be different across manifest variables, implying zero
#' restrictions on the matrix \eqn{\beta}. To specify covariates, use a list
#' of formulas as \code{model} (see example below). Intercept terms can be
#' introduced using
#'
#' To sample the correlation matrix \eqn{R} of the latent factors, marginal data
#' augmentation is implemented (van Dyk and Meng, 2001), see CFSHP section 2.2.
#' Using the transformation \eqn{\Omega = \Lambda^{1/2} R \Lambda^{1/2}}, the
#' parameters \eqn{\Lambda = diag(\lambda_1, ..., \lambda_K)} are used as
#' \emph{working parameters}. These parameters correspond to the variances of
#' the latent factors in an expanded version of the model where the factors do
#' not have unit variances. Two prior distributions can be specified on the
#' covariance matrix \eqn{\Omega} in the expanded model:
#' \itemize{
#' \item If \code{HW.prior = FALSE}, inverse-Wishart distribution:
#' \deqn{\Omega \sim \mathcal{I}nv-\mathcal{W}ishart(\nu_0, diag(S_0))}{
#' \Omega ~ Inv-Wishart(\nu_0, diag(S0))}
#' with \eqn{\nu_0} = \code{nu0} and \eqn{S_0} = \code{S0}.
#' \item If \code{HW.prior = TRUE}, Huang-Wand (2013) prior:
#' \deqn{\Omega \sim \mathcal{I}nv-\mathcal{W}ishart(\nu_0, W), \qquad
#' W = diag(w_1, ..., w_K)}{
#' \Omega ~ Inv-Wishart(nu0, W), W = diag(w_1, ..., w_K)}
#' \deqn{w_k \sim \mathcal{G}amma\left(\frac{1}{2},
#' \frac{1}{2\nu^*S_{0k}}\right)}{w_k ~ Gamma(1/2, 1/(2\nu*S0_k))}
#' with \eqn{\nu^*}{\nu*} = \code{nu0} - \code{Kmax} + 1, and the shape and
#' rate parameters are specified such that the mean of the gamma distribution
#' is equal to \eqn{\nu^* S_{0k}}{\nu* S0_k}, for each \eqn{k = 1, ..., K}.
#' }
#'
#' \strong{Missing values.} Missing values (\code{NA}) are allowed in the
#' manifest variables. They are drawn from their corresponding conditional
#' distributions during MCMC sampling. Control variables with missing values
#' can be passed to the function. However, all the observations with at least
#' one missing value in the covariates are discarded from the sample (a warning
#' message is issued in that case).
#'
#' @return The function returns an object of class '\code{befa}' containing the
#' MCMC draws of the model parameters saved in the following matrices (each
#' matrix has '\code{iter}' rows):
#' \itemize{
#' \item \code{alpha}: Factor loadings.
#' \item \code{sigma}: Idiosyncratic variances.
#' \item \code{R}: Correlation matrix of the latent factors (off-diagonal
#' elements only).
#' \item \code{beta}: regression coefficients (if any).
#' \item \code{dedic}: indicators (integers indicating on which factors the
#' manifest variable load).
#' }
#' The returned object also contains:
#' \itemize{
#' \item \code{nfac}: Vector of number of 'active' factors across MCMC
#' iterations (i.e., factors loaded by at least \code{Nid} manifest
#' variables).
#' \item \code{MHacc}: Logical vector indicating accepted proposals of
#' Metropolis-Hastings algorithm.
#' }
#' The parameters \code{Kmax} and \code{Nid} are saved as object attributes, as
#' well as the function call and the number of mcmc iterations (\code{burnin}
#' and \code{iter}), and two logical variables indicating if the returned object
#' has been post processed to address the column switching problem
#' (\code{post.column.switch}) and the sign switching problem
#' (\code{post.sign.switch}).
#'
#' @author Rémi Piatek \email{remi.piatek@@gmail.com}
#'
#' @references G. Conti, S. Frühwirth-Schnatter, J.J. Heckman, R. Piatek (2014):
#' ``Bayesian Exploratory Factor Analysis'', \emph{Journal of Econometrics},
#' 183(1), pages 31-57, \doi{10.1016/j.jeconom.2014.06.008}.
#'
#' @references A. Huang, M.P. Wand (2013):
#' ``Simple Marginally Noninformative Prior Distributions for Covariance
#' Matrices'', \emph{Bayesian Analysis}, 8(2), pages 439-452,
#' \doi{10.1214/13-BA815}.
#'
#' @references D.A. van Dyk, X.-L. Meng (2001):
#' ``The Art of Data Augmentation'',
#' \emph{Journal of Computational and Graphical Statistics}, 10(1), pages 1-50,
#' \doi{10.1198/10618600152418584}.
#'
#' @seealso \code{\link{post.column.switch}} and \code{\link{post.sign.switch}}
#' for column switching and sign switching of the factor loading matrix and of
#' the correlation matrix of the latent factors to restore identification
#' \emph{a posteriori}.
#'
#' @seealso \code{\link{summary.befa}} and \code{\link{plot.befa}} to summarize
#' and plot the posterior results.
#'
#' @seealso \code{\link{simul.R.prior}} and \code{\link{simul.nfac.prior}} to
#' simulate the prior distribution of the correlation matrix of the factors and
#' the prior distribution of the indicator matrix, respectively. This is useful
#' to perform prior sensitivity analysis and to understand the role of the
#' corresponding parameters in the factor search.
#'
#' @examples
#' #### model without covariates
#'
#' set.seed(6)
#'
#' # generate fake data with 15 manifest variables and 3 factors
#' N <- 100 # number of observations
#' Y <- simul.dedic.facmod(N, dedic = rep(1:3, each = 5))
#'
#' # run MCMC sampler
#' # notice: 1000 MCMC iterations for illustration purposes only,
#' # increase this number to obtain reliable posterior results!
#' mcmc <- befa(Y, Kmax = 5, iter = 1000)
#'
#' # post process MCMC draws to restore identification
#' mcmc <- post.column.switch(mcmc)
#' mcmc <- post.sign.switch(mcmc)
#' \donttest{
#' summary(mcmc) # summarize posterior results
#' plot(mcmc) # plot posterior results
#'
#' # summarize highest posterior probability (HPP) model
#' summary(mcmc, what = 'hppm')
#'
#' #### model with covariates
#'
#' # generate covariates and regression coefficients
#' Xcov <- cbind(1, matrix(rnorm(4*N), ncol = 4))
#' colnames(Xcov) <- c('(Intercept)', paste0('X', 1:4))
#' beta <- rbind(rnorm(15), rnorm(15), diag(3) %x% t(rnorm(5)))
#'
#' # add covariates to previous model
#' Y <- Y + Xcov %*% beta
#'
#' # specify model
#' model <- c('~ X1', # X1 covariate in all equations
#' paste0('Y', 1:5, ' ~ X2'), # X2 covariate for Y1-Y5 only
#' paste0('Y', 6:10, ' ~ X3'), # X3 covariate for Y6-Y10 only
#' paste0('Y', 11:15, ' ~ X4')) # X4 covariate for Y11-Y15 only
#' model <- lapply(model, as.formula) # make list of formulas
#'
#' # run MCMC sampler, post process and summarize
#' mcmc <- befa(model, data = data.frame(Y, Xcov), Kmax = 5, iter = 1000)
#' mcmc <- post.column.switch(mcmc)
#' mcmc <- post.sign.switch(mcmc)
#' mcmc.sum <- summary(mcmc)
#' mcmc.sum
#'
#' # compare posterior mean of regression coefficients to true values
#' beta.comp <- cbind(beta[beta != 0], mcmc.sum$beta[, 'mean'])
#' colnames(beta.comp) <- c('true', 'mcmc')
#' print(beta.comp, digits = 3)
#' }
#'
#' @export befa
#' @import checkmate
#' @importFrom stats rnorm runif
#' @useDynLib BayesFM, .registration = TRUE, .fixes = "F_"
befa <- function(model, data, burnin = 1000, iter = 10000, Nid = 3, Kmax,
A0 = 10, B0 = 10, c0 = 2, C0 = 1, HW.prior = TRUE,
nu0 = Kmax + 1, S0 = 1, kappa0 = 2, xi0 = 1, kappa = 1/Kmax,
indp.tau0 = TRUE, rnd.step = TRUE, n.step = 5,
search.delay = min(burnin, 10), R.delay = min(burnin, 100),
dedic.start, alpha.start, sigma.start, beta.start, R.start,
verbose = TRUE)
{
checkArgs <- makeAssertCollection()
##############################################################################
## data and model specification
if (missing(data))
data <- parent.frame()
else
assertDataFrame(data)
if (is.matrix(model))
model <- as.data.frame(model)
if (is.data.frame(model)) {
assertDataFrame(model, types = c('double', 'logical'), all.missing = FALSE)
Ytype <- sapply(model, typeof)
Yobs <- as.matrix(model)
Xobs <- nX <- 0
YXloc <- FALSE
} else if (is.list(model) &
all(sapply(model, is.formula))) {
tmp <- extract.data(model, data)
if (!is.null(tmp$errmsg)) {
for (w in tmp$errmsg) checkArgs$push(w)
} else {
for (w in tmp$warnmsg) warning(w, immediate. = TRUE)
Yobs <- tmp$Yobs
Ytype <- tmp$Ytype
Xobs <- tmp$Xobs
Xlab <- colnames(Xobs)
YXloc <- tmp$YXloc
nX <- ncol(Xobs)
}
} else {
checkArgs$push('Y should be a matrix, a data frame or a list of formulas.')
}
# if any errors, report and stop
reportAssertions(checkArgs)
# check manifest variables are either continuous or dichotomous
Ycat <- rep(0, length(Ytype))
Ycat[Ytype == 'logical'] <- 2
Yind <- Ytype %in% c('double', 'numeric', 'logical')
if (any(!Yind)) {
checkArgs$push(paste('following variables not continuous nor dichotomous:',
paste0(Ylab[!Yind], collapse = ', ')))
}
Ylab <- colnames(Yobs)
nobs <- nrow(Yobs)
nvar <- ncol(Yobs)
nbeta <- sum(YXloc)
Ymiss <- is.na(Yobs)
Yobs[Ymiss] <- -99999 # flag for NA (not used in Fortran subroutine)
##############################################################################
## number of latent factors and identification restrictions
# minimum number of dedicated variables per factor
assertInt(Nid, lower = 1, upper = nvar, add = checkArgs)
# check maximum number of latent factors and Ledermann bound
Ledermann.bound <- 0.5 * (2 * nvar + 1 - sqrt(8 * nvar + 1))
if (missing(Kmax)) {
Kmax <- floor(min(nvar/Nid, Ledermann.bound))
} else {
assertInt(Kmax, lower = 1, upper = nvar, add = checkArgs)
}
if (Kmax > Ledermann.bound) {
warning("Check identification! (Kmax exceeds Ledermann bound)",
immediate. = TRUE)
}
# check consistency of Nid and Kmax
if (Kmax > floor(nvar/Nid)) {
msg <- paste("Too many latent factors specified given identification",
"restriction. Check arguments Nid and Kmax.")
checkArgs$push(msg)
}
# throw warning in case of single-factor model
if (Kmax == 1) {
checkArgs$push("Single-factor model not allowed.")
}
reportAssertions(checkArgs)
##############################################################################
## prior parameters
# function checking prior parameter values
check.prior <- function(x, n, min, name) {
if (length(x) == 1)
x <- rep(x, n)
assertNumeric(x, len = n, lower = min, finite = TRUE, any.missing = FALSE,
.var.name = name, add = checkArgs)
return(x)
}
tiny <- 10^-9
A0 <- check.prior(A0, nvar, tiny, "A0")
B0 <- check.prior(B0, nvar, tiny, "B0")
c0 <- check.prior(c0, nvar, tiny, "c0")
C0 <- check.prior(C0, nvar, tiny, "C0")
S0 <- check.prior(S0, Kmax, tiny, "S0")
nu0 <- check.prior(nu0, 1, Kmax, "nu0")
xi0 <- check.prior(xi0, 1, tiny, "xi0")
kappa <- check.prior(kappa, Kmax, tiny, "kappa")
kappa0 <- check.prior(kappa0, 1, tiny, "kappa0")
# use Huang-Wand (2013) prior?
assertFlag(HW.prior, add = checkArgs)
# use specific tau0 parameters for each manifest variable? [see CFSHP, p.36]
assertFlag(indp.tau0, add = checkArgs)
# if any errors, stop here
reportAssertions(checkArgs)
# prior values for Fortran subroutine
prior.idiovar <- cbind(c0, C0)
prior.loading <- 1/A0 # precision passed to Fortran subroutine
prior.beta <- 1/B0 # precision passed to Fortran subroutine
prior.dedic <- c(indp.tau0, 1/A0, c0, C0, xi0, kappa0, kappa)
prior.facdist <- c(HW.prior, nu0, S0)
##############################################################################
## starting values
### idiosyncratic variances
if (missing(sigma.start)) {
sigma.start <- 1/rgamma(nvar, shape = c0, rate = C0)
} else {
assertNumeric(sigma.start, len = nvar, lower = tiny, any.missing = FALSE,
add = checkArgs)
}
sigma.start[Ycat > 0] <- 1 # fix variance to 1 for binary variables
### factor loadings
if (missing(alpha.start)) {
alpha.start <- rnorm(nvar, mean = 0, sd = sqrt(A0))
} else {
assertNumeric(alpha.start, len = nvar, any.missing = FALSE,
add = checkArgs)
}
### regression coefficients
if (missing(beta.start)) {
if (nbeta > 0) {
beta.start <- rnorm(nbeta, mean = 0, sd = rep(sqrt(B0), rowSums(YXloc)))
} else {
beta.start <- double()
}
}
assertNumeric(beta.start, len = nbeta, any.missing = FALSE, add = checkArgs)
# prepare matrix to be passed to Fortran subroutine
beta.start.1 <- matrix(-99999, nX, nvar)
if (length(beta.start) == nbeta) {
beta.start.1[t(YXloc)] <- beta.start
beta.start.1 <- t(beta.start.1)
}
### correlation matrix of latent factors
if (missing(R.start)) {
R.start <- diag(Kmax)
}
# check matrix is a correlation matrix, positive semi-definite, and invertible
assertMatrix(R.start, mode = "double", nrows = Kmax, ncols = Kmax,
any.missing = FALSE, add = checkArgs)
if (!all(diag(R.start) == 1)) {
checkArgs$push("R.start should be a correlation matrix.")
}
if (!is.pos.semidefinite.matrix(R.start)) {
checkArgs$push("R.start should be a positive semi-definite matrix.")
}
if (!is.invertible.matrix(R.start)) {
checkArgs$push("R.start is not invertible (singular matrix).")
}
### indicators - default: maximum number of factors, random allocation
if (missing(dedic.start)) {
dedic.start <- rep(0, nvar)
ind <- matrix(sample(Nid * Kmax), ncol = Kmax)
for (k in 1:Kmax) dedic.start[ind[, k]] <- k
dedic.start[dedic.start == 0] <- sample(Kmax, nvar - Nid * Kmax,
replace = TRUE)
}
assertIntegerish(dedic.start, len = nvar, lower = 0, upper = Kmax,
any.missing = FALSE, add = checkArgs)
# check identification constraint
if (any(table(dedic.start[dedic.start != 0]) < Nid)) {
checkArgs$push("dedic.start does not correspond to an identified model.")
}
### latent factors
start.factor <- replicate(Kmax, rnorm(nobs))
##############################################################################
## MCMC tuning
assertCount(iter, positive = TRUE, add = checkArgs)
assertCount(burnin, positive = FALSE, add = checkArgs)
assertInt(search.delay, lower = 0, upper = burnin + iter)
assertInt(R.delay, lower = 0, upper = burnin + iter)
assertFlag(rnd.step, add = checkArgs)
if (rnd.step) {
assertNumber(n.step, lower = 1.1, add = checkArgs)
n.step <- n.step - 1 # so that number of steps ~ 1 + Poisson(n.step-1)
} else {
assertCount(n.step, positive = TRUE, add = checkArgs)
}
##############################################################################
## if any errors in arguments, report and stop execution
assertFlag(verbose, add = checkArgs)
reportAssertions(checkArgs)
##############################################################################
## MCMC sampling
# total number of model parameters
npar <- c(nvar, nvar, Kmax*(Kmax - 1)/2, nbeta)
npar.all <- sum(npar)
# seed for RNG in Fortran subroutine
seed <- round(runif(1) * 10^9)
# call Fortran subroutine
if (verbose) cat("starting MCMC sampling...\n")
mcmc <- .Fortran(F_befa,
as.integer(nvar),
as.integer(nobs),
as.integer(Kmax),
as.integer(Nid),
as.double(Yobs),
as.integer(Ycat),
as.logical(Ymiss),
as.integer(nX),
as.double(Xobs),
as.logical(YXloc),
as.integer(burnin + iter),
as.integer(burnin),
as.integer(search.delay),
as.integer(R.delay),
as.logical(rnd.step),
as.double(n.step),
as.integer(seed),
as.double(cbind(prior.loading, prior.idiovar)),
as.double(prior.beta),
as.double(prior.dedic),
as.double(prior.facdist),
as.double(cbind(alpha.start, sigma.start)),
as.double(beta.start.1),
as.integer(dedic.start),
as.double(start.factor),
as.double(R.start),
as.logical(verbose),
as.integer(npar.all),
MCMCdraws = double(iter * npar.all),
MCMCdedic = integer(iter * nvar),
MHacc = logical(iter))
if (verbose) cat("done with sampling!\n")
##############################################################################
## label MCMC draws and return output
# extract MCMC draws
par.mcmc <- split(mcmc$MCMCdraws, rep(1:4, times = iter * npar))
par.mcmc <- lapply(par.mcmc, matrix, nrow = iter)
# label parameters
names(par.mcmc)[1:3] <- c("alpha", "sigma", "R")
colnames(par.mcmc$R) <- paste("R", rep(1:(Kmax-1), times = (Kmax-1):1),
unlist(mapply(seq, 2:Kmax, Kmax)), sep = ":")
colnames(par.mcmc$alpha) <- paste0("alpha:", Ylab)
colnames(par.mcmc$sigma) <- paste0("sigma:", Ylab)
iter.lab <- burnin + 1:iter
rownames(par.mcmc$alpha) <- iter.lab
rownames(par.mcmc$sigma) <- iter.lab
rownames(par.mcmc$R) <- iter.lab
if (nbeta > 0) {
names(par.mcmc)[4] <- "beta"
beta.lab <- c()
for (i in 1:nvar) {
if (!any(YXloc[i, ])) next
beta.lab <- c(beta.lab, paste(Ylab[i], Xlab[YXloc[i, ]], sep = ":"))
}
colnames(par.mcmc$beta) <- beta.lab
rownames(par.mcmc$beta) <- iter.lab
}
# indicators
dedic.mcmc <- as.integer(mcmc$MCMCdedic)
dedic.mcmc <- matrix(dedic.mcmc, nrow = iter, ncol = nvar)
colnames(dedic.mcmc) <- paste0("dedic:", Ylab)
rownames(dedic.mcmc) <- iter.lab
# number of active latent factors across MCMC iterations
nfac.mcmc <- apply(dedic.mcmc, 1, count.unique.nonzero)
# prepare and return output
output <- par.mcmc
output$dedic <- dedic.mcmc
output$nfac <- nfac.mcmc
output$MHacc <- mcmc$MHacc
attr(output, "call") <- match.call()
attr(output, "title") <- "BEFA posterior sample"
attr(output, "Kmax") <- Kmax
attr(output, "Nid") <- Nid
attr(output, "iter") <- iter
attr(output, "burnin") <- burnin
attr(output, "post.column.switch") <- FALSE
attr(output, "post.sign.switch") <- FALSE
class(output) <- "befa"
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFM/R/befa.R
|
#' @importFrom stats complete.cases cor terms
extract.data <- function(model, data)
{
errmsg <- warnmsg <- NULL
# get equation terms
neq <- length(model)
all.terms <- lapply(model, terms)
# which equations have a manifest variable?
resp <- sapply(all.terms, attr, 'response')
resp <- as.logical(resp)
# manifest variables
allY <- sapply(lapply(model, all.vars), '[', 1)
Ylab <- unique(allY[resp])
nY <- length(Ylab)
allY[!resp] <- NA
# intercept terms
const <- sapply(all.terms, attr, 'intercept')
const <- as.logical(const)
# covariates
allX <- lapply(all.terms, attr, 'term.labels')
Xlab <- unique(unlist(allX))
Xlab <- c('(Intercept)', Xlab)
nX <- length(Xlab)
YXloc <- matrix(FALSE, nY, nX, dimnames = list(Ylab, Xlab))
for (i in 1:neq) {
if (is.na(allY[i])) {
YXloc[, unlist(allX[i])] <- TRUE
YXloc[, '(Intercept)'] <- const[i]
} else {
YXloc[allY[i], unlist(allX[i])] <- TRUE
YXloc[allY[i], '(Intercept)'] <- const[i]
}
}
# check that all variables are in data frame
YXlab <- unique(unlist(c(Ylab, Xlab)))
YXlab <- YXlab[YXlab != '(Intercept)'] # no error message for intercept term
indat <- YXlab %in% names(data)
if (any(!indat)) {
errmsg <- paste('following variables not in data:\n ',
paste(YXlab[!indat], collapse = ', '))
return(list(errmsg))
}
YXdata <- data[YXlab]
nobs <- nrow(YXdata)
YXdata[["(Intercept)"]] <- 1 # add vector of ones for intercept terms
# type of manifest variables
Ytype <- sapply(YXdata[Ylab], typeof)
# discard missing values in covariates
nomiss <- complete.cases(YXdata[Xlab])
YXdata <- YXdata[nomiss, ]
if (!all(nomiss)) {
warnmsg <- paste(sum(!nomiss), 'observations discarded because of NAs',
'in at least one covariate')
nobs <- nobs - sum(!nomiss)
}
# check for multicollinearity in covariates specified in each equation
Xcor <- cor(data[Xlab[Xlab != '(Intercept)']]) # exclude intercept
for (i in 1:nY) {
Xcori <- Xcor[YXloc[i, -1], YXloc[i, -1]]
Xcori <- Xcori[lower.tri(Xcori)]
if (any(abs(Xcori - 1) < 1e-12)) {
errmsg <- c(errmsg, paste('perfect multicollinearity between covariates',
'of manifest variable', Ylab[i]))
} else if (any(abs(Xcori) > 0.95)) {
warnmsg <- c(warnmsg, paste('possible multicollinearity problem between',
'covariates of manifest variable', Ylab[i]))
}
}
# return
return(list(Ytype = Ytype,
Yobs = as.matrix(YXdata[Ylab]),
Xobs = as.matrix(YXdata[Xlab]),
YXloc = YXloc,
errmsg = errmsg,
warnmsg = warnmsg))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFM/R/extract.data.R
|
#'
#' Plot object of class 'befa'
#'
#' This function makes different plots that are useful to assess the posterior
#' results: a trace plot of the number of latent factors (also showing
#' Metropolis-Hastings acceptance across MCMC replications), a histogram
#' of the posterior probabilities of the number of factors, heatmaps for the
#' inficator probabilities, the factor loading matrix, and the correlation
#' matrix of the latent factors.
#'
#' @inheritParams summary.befa
#' @param x Object of class 'befa'.
#'
#' @details This function makes graphs based on the summary results returned by
#' \code{\link{summary.befa}}. It therefore accepts the same optional arguments
#' as this function.
#'
#' @return No return value, called for side effects (plots the posterior results
#' returned by \code{\link{summary.befa}}).
#'
#' @author Rémi Piatek \email{remi.piatek@@gmail.com}
#'
#' @seealso \code{\link{summary.befa}} to summarize posterior results.
#'
#' @examples
#' set.seed(6)
#'
#' # generate fake data with 15 manifest variables and 3 factors
#' Y <- simul.dedic.facmod(N = 100, dedic = rep(1:3, each = 5))
#'
#' # run MCMC sampler and post process output
#' # notice: 1000 MCMC iterations for illustration purposes only,
#' # increase this number to obtain reliable posterior results!
#' mcmc <- befa(Y, Kmax = 5, iter = 1000)
#' mcmc <- post.column.switch(mcmc)
#' mcmc <- post.sign.switch(mcmc)
#'
#' # plot results for highest posterior probability model
#' plot(mcmc, what = 'hppm')
#'
#' @export
#' @importFrom ggplot2 ggplot aes_string theme labs element_text
#' @importFrom ggplot2 geom_line geom_tile geom_text geom_rug geom_bar
#' @importFrom ggplot2 scale_fill_gradient2 guide_colorbar guides unit
plot.befa <- function(x, ...)
{
args <- list(...)
show.val <- ifelse(is.null(args$show.val), TRUE, args$show.val)
what <- ifelse(is.null(args$what), 'maxp', args$what)
assertFlag(show.val)
if (!what %in% c('maxp', 'hppm'))
stop('plot.befa() only implemented for what = "maxp" and what = "hppm"')
Kmax <- attr(x, 'Kmax')
##############################################################################
### trace of number of factors
dat <- data.frame(nfac = factor(x$nfac, levels = 0:Kmax),
iter = as.numeric(names(x$nfac)),
MHacc = as.numeric(x$MHacc))
p.nfac <- ggplot(dat, aes_string(x = 'iter', y = 'nfac')) +
geom_line(colour = 'steelblue')
p.nfac <- p.nfac + labs(title = paste0('trace plot of number of factors\n',
'(accepted Metropolis-Hastings draws at bottom)'),
x = 'MCMC iterations',
y = 'number of factors')
# add Metropolis-Hastings acceptance
p.nfac <- p.nfac + geom_rug(aes_string(y = 'MHacc'), sides = 'b',
colour = 'darkcyan')
# posterior probabilities of number of factors
nft <- table(factor(x$nfac, levels = 0:Kmax))
dat <- data.frame(nfac = as.factor(0:Kmax),
freq = as.numeric(nft / length(x$nfac)))
p.hnfac <- ggplot(dat, aes_string(x = 'nfac')) +
geom_bar(aes_string(weight = 'freq'), fill = 'steelblue') +
labs(title = 'posterior probabilities of number of factors',
x = 'number of factors',
y = 'frequencies')
##############################################################################
# summarize and plot
x <- summary(x, ...)
if (what == 'hppm') {
alpha <- x$alpha$m1
dedic <- x$alpha$m1$dedic
R <- x$R$m1
} else {
alpha <- x$alpha
dedic <- x$alpha$dedic
R <- x$R
}
nvar <- length(dedic)
### matrix of indicator probabilities
if (what != 'hppm') { # skip for HPP model
pind <- matrix(NA, nvar, Kmax)
rownames(pind) <- sapply(strsplit(rownames(alpha), ':'), '[', 2)
colnames(pind) <- paste0('f', 1:Kmax)
for (i in 1:nvar)
pind[i, dedic[i]] <- alpha$prob[i]
# which factors are loaded by at least one measurement?
acti <- apply(!is.na(pind), 2, any)
# heatmap for active factors only
p.indic <- make.heatmap(pind[, acti],
title = 'indicator probabilities of being nonzero',
xlab = 'latent factors (active factors only)',
ylab = 'manifest variables',
show.val)
}
### matrix of factor loadings
# construct matrix from factor loadings and indicators
# (remove the 'alpha:' part from variable names to simplify plot)
alpha.post <- matrix(NA, nvar, Kmax)
rownames(alpha.post) <- sapply(strsplit(rownames(alpha), ':'), '[', 2)
colnames(alpha.post) <- paste0('f', 1:Kmax)
for (i in 1:nvar)
alpha.post[i, dedic[i]] <- alpha$mean[i]
# which factors are loaded by at least one measurement?
acti <- !apply(is.na(alpha.post), 2, all)
# heatmap for active factors only
p.alpha <- make.heatmap(alpha.post[, acti],
title = 'factor loading matrix',
xlab = 'latent factors (active factors only)',
ylab = 'manifest variables',
show.val)
### correlation matrix of the factors
# construct matrix from lower diagonal elements
Rmat <- .5 * diag(Kmax)
Rmat[lower.tri(Rmat)] <- R$mean
Rmat <- Rmat + t(Rmat)
rownames(Rmat) <- colnames(Rmat) <- paste0('f', 1:Kmax)
# heatmap for active factors only
p.Rmat <- make.heatmap(Rmat[acti, acti],
title = 'correlation matrix of the factors',
xlab = 'latent factors (active factors only)',
ylab = '',
show.val)
##############################################################################
print(p.nfac)
invisible(readline(prompt = "Press <Enter> to show next graph..."))
print(p.hnfac)
invisible(readline(prompt = "Press <Enter> to show next graph..."))
if (what != 'hppm') {
print(p.indic)
invisible(readline(prompt = "Press <Enter> to show next graph..."))
}
print(p.alpha)
invisible(readline(prompt = "Press <Enter> to show next graph..."))
print(p.Rmat)
}
################################################################################
make.heatmap <- function(x, title, xlab, ylab, show.val) {
# prepare data
xcol <- colnames(x)
xrow <- rownames(x)
dat <- data.frame(xvar = factor(rep(xcol, each = nrow(x)), levels = xcol),
yvar = factor(rep(xrow, ncol(x)), levels = rev(xrow)),
val = c(round(x, digits = 2)))
# make heatmap
p <- ggplot(dat, aes_string(x = 'xvar', y = 'yvar')) +
geom_tile(aes_string(fill = 'val')) +
scale_fill_gradient2(low = 'steelblue',
mid = 'white',
high = 'red',
na.value = NA)
# add title and labels
p <- p + labs(title = title, x = xlab, y = ylab, fill = 'posterior\nmean')
# adjust font sizes
# p <- p + theme(axis.title = element_text(size=12),
# axis.text = element_text(size=12),
# plot.title = element_text(size=16),
# legend.text = element_text(size=10))
# legend
p <- p + guides(fill = guide_colorbar(barheight = unit(6, 'cm')))
# add values to heatmap?
if (show.val)
p <- p + geom_text(aes_string(label = 'val'),
colour = 'white',
na.rm = TRUE)
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFM/R/plot.befa.R
|
#' @export
#' @importFrom ggplot2 ggplot aes_string geom_density theme labs element_blank
#' @importFrom ggplot2 ggplot_gtable ggplot_build
#' @importFrom gridExtra grid.arrange arrangeGrob
plot.simul.R.prior <- function(x, ...)
{
if (!is.list(x)) x <- as.list(x)
nsim <- length(x)
# function making individual plot
plot.dens <- function(Rsim, FUN, xlab) {
val <- lapply(Rsim, function(z) apply(z, 3, FUN))
val <- do.call(cbind, val)
dat <- data.frame(val = c(val),
lab = rep(colnames(val), each = nrow(val)))
# make plot
p <- ggplot(dat, aes_string(x = 'val', color = 'lab')) +
geom_density(kernel = 'gaussian') +
labs(x = xlab, y = 'prior density') +
theme(legend.position = 'bottom',
legend.direction = 'horizontal',
legend.title = element_blank())
return(p)
}
# make plots
plots <- list()
plots$maxcor <- plot.dens(x, FUN = get.maxcor, xlab = 'max(|R|)')
plots$mineig <- plot.dens(x, FUN = get.mineig, xlab = 'min(eigen(R))')
# get common legend
legend <- ggplot_gtable(ggplot_build(plots[[1]]))
legid <- which(sapply(legend$grobs, function(p) p$name) == 'guide-box')
legend <- legend$grobs[[legid]]
# make plot
grid.arrange(arrangeGrob(plots[[1]] + theme(legend.position = 'none'),
plots[[2]] + theme(legend.position = 'none'),
nrow = 1),
legend, nrow = 2, heights = c(10, 1),
top = 'correlation matrix of the latent factors')
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFM/R/plot.simul.R.prior.R
|
#' @export
#' @importFrom ggplot2 ggplot aes_string geom_bar labs theme element_blank
plot.simul.nfac.prior <- function(x, ...)
{
nfac <- summary(x)$nfac
dat <- data.frame(freq = unlist(nfac),
nfac = c(as.integer(sapply(nfac, names))),
lab = rep(names(nfac), sapply(nfac, length)))
ggplot(dat, aes_string(x = 'nfac', y = 'freq')) +
geom_bar(aes_string(fill = 'lab'), position = 'dodge', stat = 'identity') +
labs(x = 'number of factors', y = 'prior probability') +
theme(legend.position = 'bottom',
legend.direction = 'horizontal',
legend.title = element_blank())
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFM/R/plot.simul.nfac.prior.R
|
#'
#' Perform column switchting on posterior MCMC sample
#'
#' This function reorders the columns of the factor loading matrix for each MCMC
#' draw, as well as the rows and columns of the correlation matrix of the
#' factors, to restore the identification of the model \emph{a posteriori} with
#' respect to the column switching problem.
#'
#' @param mcmc
#' Object of class '\code{befa}'.
#'
#' @details The reordering of the columns of the factor loading matrix is based
#' on the top elements of the columns (i.e., the first row containing a nonzero
#' factor loading in each nonzero column of \eqn{\alpha}, starting from the top
#' of the matrix). At each MCMC iteration, the nonzero columns of \eqn{\alpha}
#' are reordered such that the top elements appear in increasing order.
#' The rows and columns of the correlation matrix \eqn{R} of the factors are
#' switched accordingly. See section 4.3 of CFSHP (p.42) for more details.
#'
#' @return Same '\code{befa}' object as the one passed to the function, where
#' the indicators in the matrix \code{dedic}, as well as the rows and columns of
#' the correlation matrix of the factors saved in \code{draws}, have been
#' switched appropriately to restore the identification of the factor model with
#' respect to column switching.
#'
#' @author Rémi Piatek \email{remi.piatek@@gmail.com}
#'
#' @references
#' G. Conti, S. Frühwirth-Schnatter, J.J. Heckman,
#' R. Piatek (2014): ``Bayesian Exploratory Factor Analysis'',
#' \emph{Journal of Econometrics}, 183(1), pages 31-57,
#' \doi{10.1016/j.jeconom.2014.06.008}.
#'
#' @seealso \code{\link{post.sign.switch}} to restore identification a
#' posteriori with respect to the sign switching problem.
#'
#' @examples
#' set.seed(6)
#' Y <- simul.dedic.facmod(N = 100, dedic = rep(1:3, each = 5))
#' mcmc <- befa(Y, Kmax = 5, iter = 1000)
#' mcmc <- post.column.switch(mcmc)
#'
#' @export post.column.switch
#' @import checkmate
post.column.switch <- function(mcmc)
{
assertClass(mcmc, classes = "befa")
if (attr(mcmc, "post.column.switch")) {
warning("column switching already performed, nothing done.")
return(mcmc)
}
# issue warning if M-H acceptance rate too low
if (mean(mcmc$MHacc) < 0.2) {
warning(paste("M-H acceptance rate of sampler is low (< 0.20).",
"Check convergence and mixing!"))
}
Kmax <- attr(mcmc, "Kmax")
nvar <- ncol(mcmc$dedic)
iter <- nrow(mcmc$dedic)
R.npar <- Kmax * (Kmax - 1)/2
# index matrix used to reconstruct matrix from lower triangular elements
R.mat <- diag(Kmax) * (R.npar + 1)
R.mat[lower.tri(R.mat)] <- 1:R.npar
R.mat[upper.tri(R.mat)] <- t(R.mat)[upper.tri(R.mat)]
v <- 1:Kmax
for (i in 1:iter) {
d <- mcmc$dedic[i, ]
# relabel indicators
mcmc$dedic[i, ] <- relabel.dedic(d)
# reorder rows and columns of correlation matrix
u <- unique(d[d != 0])
r <- c(u, v[!v %in% u])
R <- c(mcmc$R[i, ], 1)
R <- matrix(R[R.mat], nrow = Kmax)
R <- R[r, r]
mcmc$R[i, ] <- R[lower.tri(R)]
}
attr(mcmc, "post.column.switch") <- TRUE
return(mcmc)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFM/R/post.column.switch.R
|
#'
#' Perform sign switchting on posterior MCMC sample
#'
#' This function performs a sign switch on the MCMC draws to restore the
#' consistency of the signs of the factors loadings and of the correlations of
#' the latent factors \emph{a posteriori}.
#'
#' @param mcmc
#' Object of class '\code{befa}'.
#' @param benchmark
#' Vector of integers of length equal to the maximum number of latent
#' factors. Each element indicates which factor loading is used as a
#' benchmark for the sign switch. If \code{NULL}, the factor loadings
#' with the highest posterior probabilities of being different from zero
#' in each column of the factor loading matrix are used as benchmarks.
#' @param benchmark.threshold
#' Minimum posterior probability for a factor loading to be considered
#' as a benchmark.
#'
#' @details The signs of the factor loadings, as well as of the corresponding
#' correlations of the latent factors, are switched for each MCMC iteration such
#' that the factor loadings defined as \code{benchmark}s are positive. The sign
#' switch can only be performed if \code{\link{post.column.switch}} has been run
#' before. See section 4.3 (p.42) of CFSHP for more details.
#'
#' If a latent factor has no benchmarks, or if its benchmark is equal to zero at
#' some MCMC iteration, then no sign switch is performed on the corresponding
#' loadings and correlations for this particular factor or MCMC iteration.
#'
#' Note that in complicated models where the sampler visits several models with
#' different numbers of latent factors, it may not be relevant to use the
#' default value of \code{benchmark}, as the posterior probabilities that the
#' factor loadings are different from zero would be computed across models.
#' Instead, the user might consider finding the highest posterior probability
#' model first, and use its top elements in each column of the factor loading
#' matrix as benchmarks to perform the sign switch.
#'
#' @return This function returns the same '\code{befa}' object, where the signs
#' of the factor loadings and of the factor correlations have been switched
#' appropriately to restore the identification of the factor model with respect
#' to sign switching.
#'
#' @author Rémi Piatek \email{remi.piatek@@gmail.com}
#'
#' @references
#' G. Conti, S. Frühwirth-Schnatter, J.J. Heckman,
#' R. Piatek (2014): ``Bayesian Exploratory Factor Analysis'',
#' \emph{Journal of Econometrics}, 183(1), pages 31-57,
#' \doi{10.1016/j.jeconom.2014.06.008}.
#'
#' @seealso \code{\link{post.column.switch}} for column switching of the factor
#' loading matrix and of the correlation matrix of the latent factors to restore
#' identification \emph{a posteriori}.
#'
#' @examples
#' set.seed(6)
#' Y <- simul.dedic.facmod(N = 100, dedic = rep(1:3, each = 5))
#' mcmc <- befa(Y, Kmax = 5, iter = 1000)
#' mcmc <- post.column.switch(mcmc)
#'
#' # factor loadings corresponding to variables 1, 6, 11, 12 and 13 are
#' # used as benchmarks:
#' mcmc1 <- post.sign.switch(mcmc, benchmark = c(1, 6, 11, 12, 13))
#'
#' # factor loadings with the highest posterior probability of being different
#' # from zero in each column are used as benchmark:
#' mcmc2 <- post.sign.switch(mcmc)
#'
#' @export post.sign.switch
#' @import checkmate
post.sign.switch <- function(mcmc, benchmark = NULL, benchmark.threshold = 0.5)
{
assertClass(mcmc, classes = "befa")
if (attr(mcmc, "post.sign.switch")) {
warning("sign switching already performed, nothing done.")
return(mcmc)
}
if (!attr(mcmc, "post.column.switch")) {
stop("post.column.switch should be run first.")
}
assertNumber(benchmark.threshold, lower = 0, upper = 1)
Kmax <- attr(mcmc, "Kmax")
nvar <- ncol(mcmc$dedic)
iter <- nrow(mcmc$dedic)
R.npar <- Kmax * (Kmax - 1)/2
# factor loadings used as benchmarks
if (is.null(benchmark)) {
alpha.post.prob <- matrix(0, Kmax, nvar)
for (k in 1:Kmax) {
alpha.post.prob[k, ] <- colMeans(mcmc$dedic == k)
}
benchmark <- max.col(alpha.post.prob, ties.method = "first")
for (k in 1:Kmax) {
if (alpha.post.prob[k, benchmark[k]] < benchmark.threshold) {
benchmark[k] <- NA # not a benchmark if post prob < threshold
}
}
} else {
assertIntegerish(benchmark, lower = 0, upper = nvar, any.missing = FALSE,
len = Kmax)
}
# index matrix used to reconstruct matrix from lower triangular elements
R.mat <- diag(Kmax) * (R.npar + 1)
R.mat[lower.tri(R.mat)] <- 1:R.npar
R.mat[upper.tri(R.mat)] <- t(R.mat)[upper.tri(R.mat)]
for (i in 1:iter) {
# switch signs of factor loadings
dedic <- mcmc$dedic[i, ]
switch <- sign(mcmc$alpha[i, benchmark])
switch[is.na(switch)] <- 1 # no sign switch for factors with no benchmark
switch.meas <- rep(0, nvar)
for (j in 1:nvar) {
if (dedic[j] == 0) next
switch.meas[j] <- switch[dedic[j]]
}
mcmc$alpha[i, ] <- mcmc$alpha[i, ] * switch.meas
# switch signs of rows and columns of correlation matrix
R <- c(mcmc$R[i, ], 1)
R <- matrix(R[R.mat], nrow = Kmax)
R <- diag(switch) %*% R %*% diag(switch)
mcmc$R[i, ] <- R[lower.tri(R)]
}
attr(mcmc, "post.sign.switch") <- TRUE
return(mcmc)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFM/R/post.sign.switch.R
|
#' @export
print.befa <- function(x, ...)
{
if (class(x) != 'befa')
stop('object passed to print.befa should be of class befa')
cat('BEFA output - Bayesian Exploratory Factor Analysis\n\n')
cat('call:\n ')
print(attr(x, 'call'))
cat('\n')
cat('posterior column switch:', attr(x, "post.column.switch"), '\n')
cat('posterior sign switch: ', attr(x, "post.sign.switch"), '\n\n')
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFM/R/print.befa.R
|
#' @export
print.simul.R.prior <- function(x, ...)
{
cat('sample from prior distribution of factor correlation matrix\n')
cat('\ncall:\n ')
print(attr(x, 'call'))
cat('\nKmax =', attr(x, 'Kmax'), '\n')
cat('\nparameter values:\n ')
cat(names(x), sep = '\n ')
cat('\nnrep =', attr(x, 'nrep'), '\n\n')
cat('use summary() and plot() functions to summarize and plot prior density',
'of maximum correlation and/or of minimum eigenvalue', sep = '\n')
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFM/R/print.simul.R.prior.R
|
#' @export
print.simul.nfac.prior <- function(x, ...)
{
cat('sample from prior distribution of number of latent factors\n')
cat('\ncall:\n ')
print(attr(x, 'call'))
cat('\n')
cat('nvar =', attr(x, 'nvar'), '\n')
cat('Kmax =', attr(x, 'Kmax'), '\n')
cat('Nid =', attr(x, 'Nid'), '\n')
cat('kappa =', attr(x, 'kappa'), '\n')
cat('nrep =', attr(x, 'nrep'), '\n\n')
cat('use summary() and plot() functions to summarize and plot',
'prior distribution of number of latent factors', sep = '\n')
}
|
/scratch/gouwar.j/cran-all/cranData/BayesFM/R/print.simul.nfac.prior.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.