content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
read.gra <- function(file, sorted = FALSE) {
## disable warnings and revert to old settings afterwards
oldOptions <- options(warn = -1)
on.exit(options(oldOptions))
## read information from the graph file
mapinfo <- scan(file)
## extract the number of districts
S <- mapinfo[1]
mapinfo <- mapinfo[-1]
cat("Note: map consists of", S, "regions\n")
cat("Reading map ...")
## create a vector to contain the names of the districts and the adjacency matrix
districts <- rep(0, S)
pmat <- matrix(0, S, S)
## loop over the districts
for (i in 1:S) {
## extract the name of the i-th district
districts[i] <- paste(mapinfo[1])
mapinfo <- mapinfo[-1]
## extract the number of neighbors for the i-tz district
nrn <- mapinfo[1]
mapinfo <- mapinfo[-1]
pmat[i, i] <- nrn
## off-diagonal entries in pmat Note: the indices are zero-based!
if (nrn > 0) {
for (j in 1:nrn) {
pmat[i, mapinfo[j] + 1] <- pmat[mapinfo[j] + 1, i] <- -1
}
mapinfo <- mapinfo[-(1:nrn)]
}
}
cat(" finished\n")
if (sorted) {
## sort districts and adjacency matrix
if (sum(is.na(as.numeric(districts))) == 0) {
districts <- as.numeric(districts)
cat("Note: regions sorted by number\n")
} else cat("Note: regions sorted by name\n")
ord <- order(districts)
districts <- sort(districts)
pmat.sort <- matrix(0, S, S)
for (i in 1:S) {
ordi <- ord[i]
for (j in 1:S) {
ordj <- ord[j]
pmat.sort[i, j] <- pmat[ordi, ordj]
}
}
pmat <- pmat.sort
}
rownames(pmat) <- districts
colnames(pmat) <- districts
class(pmat) <- "gra"
return(pmat)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesX/R/read.gra.R
|
## Author: Daniel Sabanes Bove [daniel *.* sabanesbove *a*t* campus *.* lmu *.*
## de] Project: BayesX Time-stamp: [ringDirxy.R] by DSB Sam 28/02/2009 19:18 (GMT)
## on daniel@puc-home> Description: Private R code replacing
## maptools:::.ringDirxy. History: 28/02/2009 file creation
.ringDirxy <- function(xy) {
xy <- as.matrix(xy)
nPoints <- nrow(xy)
## checks
stopifnot(is.numeric(xy), identical(ncol(xy), as.integer(2)), nPoints >= 3)
## index vector from the 2nd to the 2nd but last point
inds <- seq_len(nPoints - 2) + 1
## the start point and the middle and tail matrix
start <- xy[1, ] # here the drop of dims is OK
middle <- xy[inds, , drop = FALSE] # here prevent drop of dims if there are only 3 points
tail <- xy[inds + 1, , drop = FALSE]
## compute twice the signed areas
areas <- (middle[, 1] - start[1]) * (tail[, 2] - start[2]) - (tail[, 1] - start[1]) *
(middle[, 2] - start[2])
## and sum up to total twice signed area of the polygon
total <- sum(areas)
## the sign then gives the direction
if (total > 0) {
return(as.integer(-1)) # counter-clockwise
} else {
return(as.integer(1)) # clockwise
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesX/R/ringDirxy.R
|
smooth.bnd <- function(map, digits = 2, scale = 1) {
if (!inherits(map, "bnd"))
stop("Argument 'map' is not an object of class 'bnd'!")
nrpolys <- length(map)
for (i in 1:nrpolys) {
temp <- unique(round(map[[i]] * scale, digits)/scale)
if (nrow(temp) > 3)
map[[i]] <- temp
}
class(map) <- "bnd"
return(map)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesX/R/round.bnd.R
|
shp2bnd <- function(shpname, regionnames, check.is.in = TRUE) {
## safe coercions ...
shpname <- as.character(shpname)
regionnames <- as.character(regionnames)
check.is.in <- as.logical(check.is.in)
## ... and checks
stopifnot(identical(length(shpname), 1L), length(regionnames) >= 1L, identical(length(check.is.in),
1L))
## now read the shapefile information
shp <- shapefiles::read.shapefile(shpname)
dbf <- shapefiles::read.dbf(paste(shpname, ".dbf", sep = ""))
## extract names of the regions:
regionnames <- if (identical(length(regionnames), 1L)) {
## so get that variable
as.character(dbf$dbf[regionnames][[1L]])
} else {
## we stay at the given names
regionnames
}
## delete commas in names of the regions
newRegionnames <- gsub(pattern = ",", replacement = "", x = regionnames, fixed = TRUE)
## check if commas have been deleted and if so, issue a warning
if (!identical(regionnames, newRegionnames))
warning(simpleWarning("commas in names of the regions have been deleted"))
## overwrite old region names with new region names
regionnames <- newRegionnames
rm(newRegionnames)
## split data into closed polygons. we need:
## storage for the new names and the new polygons
polyList <- list()
## and the corresponding original indexes
originalRegionNumber <- integer()
cat("Reading map ...")
## now process all original regions
for (i in seq_along(regionnames)) {
## this is temporary storage (originally a data.frame with X and Y columns):
temppoly <- as.matrix(shp$shp$shp[[i]]$points)
dimnames(temppoly) <- NULL
## as long as there are still points to be processed
while ((nPoints <- nrow(temppoly)) > 0L) {
## where does the first point occur in the data at the second time?
endIndex <- which((temppoly[-1L, 1] == temppoly[1L, 1]) & (temppoly[-1L,
2] == temppoly[1L, 2])) + 1L
## take the first next occurrence, or the last point if the polygon is not closed
endIndex <- if (length(endIndex) > 0L) {
endIndex[1L]
} else {
nPoints
}
## the range of this polygon
polyRange <- 1L:endIndex
## this was index i
originalRegionNumber <- c(originalRegionNumber, i)
## save the polygon
polyList <- c(polyList, list(temppoly[polyRange, ]))
## list is necessary so that c(list(), data.frame(..)) is a one-element list, and
## not a list with the variables of the data.frame as elements
## and delete this part from temporary storage
temppoly <- temppoly[-polyRange, ]
}
}
cat(" finished\n")
## so how many polygons do we have now?
nPolys <- length(polyList)
cat("Note: map consists originally of", nPolys, "polygons\n")
## here is the parallel list of the surrounding region names of single polygons
surrounding <- replicate(n = nPolys, expr = character()) ## until now no region names anywhere!
## check for polygons contained in another polygon?
if (check.is.in) {
## get dimensions of all polygons
dims <- sapply(polyList, nrow)
## save here which polygons should be removed, because they are boundaries to
## polygons lying inside
rmcheck <- logical(nPolys)
## save here the indexes of the polygons which have already been
## matched/processed. these must not be processed again!
whichWereProcessed <- integer()
## process each polygon i
for (i in seq_len(nPolys)) {
## if we had processed this already
if (i %in% whichWereProcessed) {
## go on to the next polygon
next
} else {
## add i to processed ones
whichWereProcessed <- union(whichWereProcessed, i)
}
## which polygons have same number of points as the current?
sameDimsInds <- setdiff(which(dims == dims[i]), whichWereProcessed) ## but without the already processed ones
## process all polygons j with same dims as polygon i (this works as a hash)
for (j in sameDimsInds) {
## compute squared distance of polygon_i and reversed polygon_j
reverseInds <- dims[i]:1L
squaredDistance <- sum((polyList[[i]] - polyList[[j]][reverseInds,
])^2)
## if it is small enough
if (squaredDistance < 1e-05) {
## find out which is the outer one
outer <- inner <- 0L
if (.ringDirxy(polyList[[j]]) < 0) {
outer <- j
inner <- i
} else {
outer <- i
inner <- j
}
## remove the outer polygon
rmcheck[outer] <- TRUE
## and add the information in which region it is lying (each polygon can only lie
## in 1 other region, of course...)
surrounding[[inner]] <- regionnames[originalRegionNumber[outer]]
}
## we have processed j
whichWereProcessed <- union(whichWereProcessed, j)
}
}
## we have processed all polygons, and can remove the unnecessary ones
polyList <- polyList[!rmcheck]
originalRegionNumber <- originalRegionNumber[!rmcheck]
surrounding <- surrounding[!rmcheck]
cat("Note: After removing unnecessary surrounding polygons, the map consists of",
length(polyList), "polygons\n")
}
## add the original region names to the polygons list as names
names(polyList) <- regionnames[originalRegionNumber]
## the new unique regions
regions <- unique(names(polyList))
cat("Note: map consists of", length(regions), "regions\n")
## compute relation of height to width (for plotting etc)
minima <- sapply(polyList, function(x) {
apply(x, 2, min)
})
maxima <- sapply(polyList, function(x) {
apply(x, 2, max)
})
minimum <- apply(minima, 1, min)
maximum <- apply(maxima, 1, max)
x.range <- maximum[1] - minimum[1]
y.range <- maximum[2] - minimum[2]
height2width <- round(y.range/x.range, digits = 2)
## now return the bnd object
return(structure(polyList, class = "bnd", height2width = height2width, surrounding = surrounding,
regions = regions))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesX/R/shp2bnd.R
|
## Author: Daniel Sabanes Bove [daniel *.* sabanesbove *a*t* campus *.* lmu *.*
## de] Project: BayesX Time-stamp: <[spAndBndConversion.R] by DSB Die 09/06/2009
## 21:19 (CEST)> Description: Convert a SpatialPolygons object from package sp to
## the bnd format required by BayesX, and vice versa. History: 19/02/2009 file
## creation 22/02/2009 explicit scoping of sp package functions
bnd2sp <- function(bndObject) {
## check if S3 class of bndObject is 'bnd'
stopifnot(inherits(x = bndObject, what = "bnd"))
## extracts
bndNames <- names(bndObject)
regions <- unique(bndNames)
bndAttributes <- attributes(bndObject)
## close all polygons (last coordinates must match first coordinates)
bndObject <- lapply(bndObject, FUN = function(polygon) {
if (!isTRUE(identical(polygon[1, ], polygon[nrow(polygon), ]))) {
rbind(polygon, polygon[1, ])
} else {
polygon
}
})
## set up return list
ret <- list()
## process all unique regions
for (id in regions) {
## which polygons belong to this region?
idMatches <- which(id == bndNames)
## convert these polygons to Polygon class objects
idPolygons <- lapply(bndObject[idMatches], FUN = sp::Polygon, hole = FALSE)
## add the Polygons object with these Polygon parts to return list
ret[[id]] <- sp::Polygons(srl = idPolygons, ID = id)
}
## add holes of inner polygons to outer regions
surrounding <- bndAttributes$surrounding
whichAreInner <- which(sapply(surrounding, length) > 0L)
for (innerInd in whichAreInner) {
## get the hole
hole <- sp::Polygon(coords = bndObject[[innerInd]], hole = TRUE)
## get outer polys list
outerId <- surrounding[[innerInd]]
outerPolys <- ret[[outerId]]@Polygons
## add the hole to outer polys
outerPolys <- c(outerPolys, hole)
## write back extended outer polys list as new Polygons object with same ID as
## before
ret[[outerId]] <- sp::Polygons(srl = outerPolys, ID = outerId)
}
## convert list of Polygons to a SpatialPolygons object and return that
ret <- sp::SpatialPolygons(Srl = ret)
return(ret)
}
sp2bnd <- function(spObject, regionNames = sapply(spObject@polygons, slot, "ID"),
height2width = round(diff(sp::bbox(spObject)[2, ])/diff(sp::bbox(spObject)[1,
]), 2), epsilon = sqrt(.Machine$double.eps)) {
## check if S4 class of spObject is 'SpatialPolygons'
stopifnot(is(object = spObject, class2 = "SpatialPolygons"))
## extracts
spObject <- sp::polygons(spObject) # now surely a SpatialPolygons object
spList <- spObject@polygons # discard other slots
nRegions <- length(spList)
## check if number of regions matches with the length of regionNames etc
stopifnot(is.character(regionNames), identical(length(regionNames), nRegions),
height2width > 0)
## set up return and holes list
ret <- list()
holes <- list()
## number of polygons and holes already processed
numPolysProcessed <- 0
numHolesProcessed <- 0
## process each region
for (regionIterator in seq_along(spList)) {
thisRegion <- spList[[regionIterator]]@Polygons
## process each Polygon in this region
for (polygonObject in thisRegion) {
## if it is a hole, put it in holes, else in ret. the name is set to the region
## name so we know from which region this polygon stems.
if (polygonObject@hole) {
## increment hole counter
numHolesProcessed <- numHolesProcessed + 1
## and correct invariant
holes[[numHolesProcessed]] <- sp::coordinates(polygonObject)
names(holes)[numHolesProcessed] <- regionNames[regionIterator]
} else {
## increment Polygon counter
numPolysProcessed <- numPolysProcessed + 1
## and correct invariant
ret[[numPolysProcessed]] <- sp::coordinates(polygonObject)
names(ret)[numPolysProcessed] <- regionNames[regionIterator]
}
}
}
## sanity check
stopifnot(all.equal(length(ret), numPolysProcessed), all.equal(length(holes),
numHolesProcessed))
## now process all holes:
## set up surrounding list
surrounding <- replicate(n = numPolysProcessed, character())
## use number of coordinates as hash for quicker search for the embedded region
polyDims <- sapply(ret, nrow)
holeDims <- sapply(holes, nrow)
for (i in seq_along(holes)) {
## hash lookup
potentialMatchesInds <- which(holeDims[i] == polyDims)
## now more precise search in these potential matches
matchFound <- FALSE
for (j in potentialMatchesInds) {
## decide
thisHole <- holes[[i]]
thisRegion <- ret[[j]]
squaredEuclideanDistances <- rowSums((thisHole[rev(seq_len(nrow(thisHole))),
] - thisRegion)^2)
doesMatch <- max(squaredEuclideanDistances) < epsilon
## if it matches, update the surrounding data and break out of the for loop
if (doesMatch) {
matchFound <- TRUE
surrounding[[j]] <- names(holes)[i]
## we can proceed with the next hole:
break
}
}
## echo a warning if a hole has no match
if (!matchFound) {
warning(simpleWarning(paste("No match found for hole in region", names(holes)[i])))
}
}
## finally collect information and return the bnd object
ret <- structure(ret, surrounding = surrounding, height2width = height2width,
class = "bnd")
return(ret)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesX/R/spAndBndConversion.R
|
write.bnd <- function(map, file, replace = FALSE) {
if (!inherits(map, "bnd"))
stop("argument 'map' is not an object of class 'bnd'")
## coercions, checks
replace <- as.logical(replace)
file <- as.character(file)
stopifnot(identical(length(file), 1L), identical(length(replace), 1L))
## check whether the file exists
if (file.exists(file)) {
if (replace) {
removeSucceeded <- file.remove(file)
if (!removeSucceeded) {
stop("file exists, but could not be removed")
}
} else {
stop("specified file already exists")
}
}
myQuote <- function(string) {
return(paste("\"", string, "\"", sep = ""))
}
## names of the belonging regions
belongingRegions <- names(map)
## no. of polygons
nPolygons <- length(map)
## the surrounding list
surrounding <- attr(map, "surrounding")
for (i in seq_len(nPolygons)) {
dat <- map[[i]]
dat <- dat[complete.cases(dat), ]
temp <- paste(myQuote(belongingRegions[i]), nrow(dat), sep = ",")
write(temp, file, append = TRUE)
if (length(outerRegionName <- surrounding[[i]])) {
con <- paste("is.in", myQuote(outerRegionName), sep = ",")
write(con, file, append = TRUE)
}
write.table(dat, file, append = TRUE, col.names = FALSE, row.names = FALSE,
sep = ",", quote = FALSE)
}
return(invisible())
}
|
/scratch/gouwar.j/cran-all/cranData/BayesX/R/write.bnd.R
|
write.gra <- function(map, file, replace = FALSE) {
if (!inherits(map, "gra"))
stop("Argument 'map' is not an object of class 'gra'!")
## check whether the file exists
if (replace & file.exists(file))
test <- file.remove(file)
if (!replace & file.exists(file))
stop("Specified file already exists!")
## names of districts
districts <- as.integer(rownames(map))
## no. of regions
S <- length(districts)
write(S, file)
## loop over the regions
for (i in 1:S) {
## write name of the district
write(districts[i], file, append = TRUE)
## write no. of neighbors
write(map[i, i], file, append = TRUE)
## derive and write neighbors
ind <- which(map[i, ] == -1) - 1
write(ind, file, ncolumns = length(ind), append = TRUE)
}
return(invisible())
}
|
/scratch/gouwar.j/cran-all/cranData/BayesX/R/write.gra.R
|
setwd("c:/arbeit/packages/BayesX")
# nonparametric function
x <- round(runif(300,-pi,pi),2)
time <- seq(1, 60)
time <- rep(time, 5)
y <- sin(x) + rnorm(300, 0, 0.3)
ytime <- sin(2*pi*time/60) + rnorm(300, 0, 0.3)
data <- data.frame(x,time,y,ytime)
write.table(data, "inst/examples/nonparametric.raw", col.names=TRUE, row.names=FALSE, sep=" ", quote=FALSE)
# spatial effect
# surface
# sample path
# boundary file
# graph file
|
/scratch/gouwar.j/cran-all/cranData/BayesX/inst/examples/createdata.R
|
#####################################################################################
## Author: Daniel Sabanes Bove [daniel *.* sabanesbove *a*t* campus *.* lmu *.* de]
## Time-stamp: <[createData.R] by DSB Die 10/03/2009 11:06 (GMT) on daniel@puc-home>
##
## Description:
## Create the data for the example BayesX run.
##
## History:
## 10/03/2009 file creation
#####################################################################################
### setup
nObs <- 300
set.seed(991)
### create covariates and their effects
## smooth functions in x1 and x2
x1 <- round(runif(n=nObs,
min=-pi, max=pi),
2)
x1Effect <- sin(x1) * cos(x1)
plot(x1Effect[order(x1)] ~ x1[order(x1)],
type="l")
x2 <- round(runif(n=nObs),
2)
x2Effect <- (x2 - 0.5)^2
plot(x2Effect[order(x2)] ~ x2[order(x2)],
type="l")
## linear functions in x3 and x4
x3 <- rnorm(n=nObs)
x3Effect <- 9.2 * x3
x4 <- rexp(n=nObs)
x4Effect <- 5.1 * x4
## spatial effect from the district in Tanzania
library(BayesX)
tanzania <- read.bnd(file="tanzania.bnd")
tanzaniaEffects <- rnorm(n=length(tanzania))
names(tanzaniaEffects) <- names(tanzania)
drawmap(map=tanzania,
data=
data.frame(x=names(tanzaniaEffects),
y=tanzaniaEffects),
regionvar="x",
plotvar="y")
district <- sample(x=names(tanzania),
size=nObs,
replace=TRUE)
districtEffect <- tanzaniaEffects[district]
### now generate the response
linearPredictor <- x1Effect + x2Effect + x3Effect + x4Effect + districtEffect
y <- linearPredictor + rnorm(n=nObs)
### write data into text file
data <- data.frame(x1=x1,
x2=x2,
x3=x3,
x4=x4,
district=district,
y=y)
write.table(x=data, file="data.txt",
quote=FALSE, col.names=TRUE, row.names=FALSE)
|
/scratch/gouwar.j/cran-all/cranData/BayesX/inst/examples/samples/createData.R
|
run.bayesx <-
function(prg = NULL, verbose = TRUE, ...)
{
os.win <- .Platform$OS.type == "windows"
if(os.win) {
bin <- shQuote(system.file(package = "BayesXsrc", "libs", .Platform$r_arch, "BayesX.exe"))
} else {
bin <- shQuote(system.file(package = "BayesXsrc", "libs", .Platform$r_arch, "BayesX"))
}
if(is.null(prg)) {
output <- file.exists("output")
temp <- file.exists("temp")
if(!os.win)
bin <- paste(shQuote(file.path(R.home(),"bin","R")), "CMD", bin)
system(bin, ...)
if(!output && file.exists("output"))
try(suppressWarnings(file.remove("output")), silent = TRUE)
if(!temp && file.exists("temp"))
try(suppressWarnings(file.remove("temp")), silent = TRUE)
return(invisible(NULL))
} else {
fileo <- getwd()
prg <- path.expand(prg)
dir <- dirname(prg)
prg.name <- basename(prg)
setwd(dir)
output <- file.exists("output")
temp <- file.exists("temp")
ptm <- proc.time()
if(!verbose) {
if(os.win) {
log <- try(system(paste(bin, " ", dir, "/", prg.name, sep = ""),
intern = TRUE, show.output.on.console = FALSE, ...))
} else {
log <- try(system(paste(bin, " ", dir, "/", prg.name,
" > ", dir, "/bayesx.log", sep = ""), intern = FALSE, ...))
}
} else log <- try(system(paste(bin, " ", dir, "/", prg.name, sep = ""), intern = FALSE, ...))
if(!os.win) {
if(log != 0)
warning("problem processing BayesX!")
}
if(!verbose && .Platform$OS.type == "unix")
log <- readLines(paste(dir, "/bayesx.log", sep = ""))
now <- proc.time()
runtime <- now - ptm
runtime <- runtime
if(verbose)
cat("Total run time was:", runtime[3L], "sec\n")
if(!output && file.exists("output"))
try(suppressWarnings(file.remove("output")), silent = TRUE)
if(!temp && file.exists("temp"))
try(suppressWarnings(file.remove("temp")), silent = TRUE)
try(setwd(fileo), silent = TRUE)
logerror <- FALSE
for(logline in log)
if(grepl("ERROR", logline))
logerror <- TRUE
if(logerror)
warning("an error occurred during runtime of BayesX, please check the BayesX logfile!")
return(invisible(list(log = log, runtime = runtime)))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesXsrc/R/run.bayesx.R
|
mydebug <- FALSE
if (mydebug) {
cat("R_ARCH=", R_ARCH,"\n")
cat("R_PACKAGE_DIR=", R_PACKAGE_DIR, "\n")
cat("R_PACKAGE_NAME=", R_PACKAGE_NAME, "\n")
cat("R_PACKAGE_SOURCE=", R_PACKAGE_SOURCE, "\n")
cat("SHLIB_EXT=", SHLIB_EXT, "\n")
cat("WINDOWS=", WINDOWS, "\n")
}
binary <- if(WINDOWS) "BayesX.exe" else "BayesX"
if ( file.exists(binary) ) {
libarch <- if (nzchar(R_ARCH)) paste('libs', R_ARCH, sep='') else 'libs'
dest <- file.path(R_PACKAGE_DIR, libarch)
dir.create(dest, recursive = TRUE, showWarnings = FALSE)
file.copy(binary, dest, overwrite = TRUE)
file.remove(binary)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesXsrc/src/install.libs.R
|
#' Bayes GLM arg checks
#'
#' Checks arguments for \code{BayesGLM} and \code{BayesGLM_cifti}
#'
#' Avoids duplicated code between \code{BayesGLM} and \code{BayesGLM_cifti}
#'
#' @param scale_BOLD,scale_design See \code{\link{BayesGLM}}.
#' @param Bayes,EM See \code{\link{BayesGLM}}.
#' @param ar_order,ar_smooth,aic See \code{\link{BayesGLM}}.
#' @param num.threads See \code{\link{BayesGLM}}.
#' @param return_INLA See \code{\link{BayesGLM}}.
#' @param verbose See \code{\link{BayesGLM}}.
#' @param combine_sessions See \code{\link{BayesGLM}}.
#' @param meanTol,varTol,emTol See \code{\link{BayesGLM}}.
#'
#' @return The arguments that may have changed, in a list: \code{scale_BOLD},
#' \code{do_Bayesian}, \code{do_EM}, and \code{do_pw}.
#'
#' @keywords internal
BayesGLM_argChecks <- function(
combine_sessions = FALSE,
scale_BOLD = c("auto", "mean", "sd", "none"),
scale_design = TRUE,
Bayes = TRUE,
EM = FALSE,
ar_order = 6,
ar_smooth = 5,
aic = FALSE,
num.threads = 4,
return_INLA = c("trimmed", "full", "minimal"),
verbose=1,
meanTol=1e-6,
varTol=1e-6,
emTol=1e-3
){
stopifnot(is_1(combine_sessions, "logical"))
if (isTRUE(scale_BOLD)) {
message("Setting `scale_BOLD` to 'auto'"); scale_BOLD <- "auto"
}
if (isFALSE(scale_BOLD)) {
message("Setting `scale_BOLD` to 'none'"); scale_BOLD <- "none"
}
scale_BOLD <- match.arg(scale_BOLD, c("auto", "mean", "sd", "none"))
stopifnot(is_1(scale_design, "logical"))
stopifnot(is_1(Bayes, "logical"))
stopifnot(is_1(EM, "logical"))
if (EM && !Bayes) {
warning("EM is a Bayesian method: setting `Bayes` to `TRUE`.")
Bayes <- TRUE
}
if (Bayes) {
if (!EM) { check_INLA(require_PARDISO=FALSE) }
}
if (isTRUE(return_INLA)) {
message("Setting `return_INLA` to 'trimmed'"); return_INLA <- "trimmed"
}
if (isFALSE(return_INLA)) {
message("Setting `return_INLA` to 'minimal'"); return_INLA <- "minimal"
}
return_INLA <- match.arg(return_INLA, c("trimmed", "full", "minimal"))
# Rename these arguments.
do_Bayesian <- Bayes; rm(Bayes)
do_EM <- EM; rm(EM)
if (is.null(ar_order)) ar_order <- 0
stopifnot(is_1(ar_order, "numeric"))
do_pw <- ar_order > 0
if (is.null(ar_smooth)) ar_smooth <- 0
stopifnot(is_1(ar_smooth, "numeric"))
stopifnot(is_1(aic, "logical"))
stopifnot(is_1(num.threads, "numeric"))
stopifnot(num.threads <= parallel::detectCores())
if (isTRUE(verbose)) { verbose <- 2 }
if (isFALSE(verbose)) { verbose <- 0 }
stopifnot(is_posNum(verbose, zero_ok=TRUE))
stopifnot(is_posNum(meanTol))
stopifnot(is_posNum(varTol))
stopifnot(is_posNum(emTol))
list(
scale_BOLD=scale_BOLD,
do_Bayesian=do_Bayesian,
do_EM = do_EM,
do_pw = do_pw,
return_INLA=return_INLA
)
}
#' BayesGLM
#'
#' Performs spatial Bayesian GLM for fMRI task activation
#'
#' @inheritSection INLA_Description INLA Requirement
#'
#' @param data A list of sessions in the \code{"BfMRI.sess"} object format. Each
#' session is a list with elements \code{"BOLD"}, \code{"design"}, and
#' optionally \code{"nuisance"}. Each element should be a numeric matrix with
#' \eqn{T} rows. The name of each element in \code{data} is the name of that
#' session. See \code{?is.BfMRI.sess} for details.
#'
#' Note that the argument \code{session_names} can be used instead of providing
#' the session names as the names of the elements in \code{data}.
#' @param vertices,faces If \code{Bayes}, the geometry data can be provided
#' with either both the \code{vertices} and \code{faces} arguments, or with the
#' \code{mesh} argument.
#'
#' \code{vertices} is a \eqn{V \times 3} matrix, where each row contains the
#' Euclidean coordinates at which a given vertex in the mesh is located.
#' \eqn{V} is the number of vertices in the mesh.
#'
#' \code{faces} is a \eqn{F \times 3} matrix, where each row contains the
#' vertex indices for a given triangular face in the mesh. \eqn{F} is the
#' number of faces in the mesh.
#' @param mesh If \code{Bayes}, the geometry data can be provided
#' with either both the \code{vertices} and \code{faces} arguments, or with the
#' \code{mesh} argument.
#'
#' \code{mesh} is an \code{"inla.mesh"} object. This can be created for surface
#' data using \code{\link{make_mesh}}.
#' @param mask (Optional) A length \eqn{V} logical vector indicating the
#' vertices to include.
#' @inheritParams task_names_Param
#' @inheritParams session_names_Param
#' @inheritParams scale_BOLD_Param
#' @inheritParams scale_design_Param
#' @inheritParams Bayes_Param
# @inheritParams EM_Param
#' @inheritParams ar_order_Param
#' @inheritParams ar_smooth_Param
#' @inheritParams aic_Param
#' @inheritParams num.threads_Param
#' @inheritParams return_INLA_Param
#' @inheritParams verbose_Param
#' @inheritParams combine_sessions_Param
#' @param meanTol,varTol Tolerance for mean and variance of each data location.
#' Locations which do not meet these thresholds are masked out of the analysis.
#' Default: \code{1e-6} for both.
# @inheritParams emTol_Param
#'
#' @return A \code{"BayesGLM"} object: a list with elements
#' \describe{
#' \item{INLA_model_obj}{The full result of the call to \code{INLA::inla}.}
#' \item{task_estimates}{The task coefficients for the Bayesian model.}
#' \item{result_classical}{Results from the classical model: task estimates, task standard error estimates, residuals, degrees of freedom, and the mask.}
#' \item{mesh}{The model mesh including only the locations analyzed, i.e. within \code{mask}, without missing values, and meeting \code{meanTol} and \code{varTol}.}
#' \item{mesh_orig}{The original mesh provided.}
#' \item{mask}{A mask of \code{mesh_orig} indicating the locations inside \code{mesh}.}
#' \item{design}{The design matrix, after centering and scaling, but before any nuisance regression or prewhitening.}
#' \item{task_names}{The names of the tasks.}
#' \item{session_names}{The names of the sessions.}
#' \item{hyperpar_posteriors}{Hyperparameter posterior densities.}
#' \item{theta_estimates}{Theta estimates from the Bayesian model.}
#' \item{posterior_Sig_inv}{For joint group modeling.}
#' \item{mu_theta}{For joint group modeling.}
#' \item{Q_theta}{For joint group modeling.}
#' \item{y}{For joint group modeling: The BOLD data after any centering, scaling, nuisance regression, or prewhitening.}
#' \item{X}{For joint group modeling: The design matrix after any centering, scaling, nuisance regression, or prewhitening.}
#' \item{prewhiten_info}{Vectors of values across locations: \code{phi} (AR coefficients averaged across sessions), \code{sigma_sq} (residual variance averaged across sessions), and AIC (the maximum across sessions).}
#' \item{call}{match.call() for this function call.}
#' }
#'
#' @importFrom excursions submesh.mesh
#' @importFrom matrixStats colVars
#' @importFrom Matrix bandSparse bdiag crossprod solve
#' @importFrom parallel detectCores makeCluster clusterMap stopCluster
#' @importFrom stats as.formula
#' @importFrom fMRItools is_1 nuisance_regression scale_timeseries
#'
#' @importFrom utils tail
#'
#' @importFrom methods as
#' @export
BayesGLM <- function(
data,
vertices = NULL,
faces = NULL,
mesh = NULL,
mask = NULL,
# Below arguments shared with `BayesGLM_cifti`
task_names = NULL,
session_names = NULL,
combine_sessions = TRUE,
scale_BOLD = c("auto", "mean", "sd", "none"),
scale_design = TRUE,
Bayes = TRUE,
#EM = FALSE,
ar_order = 6,
ar_smooth = 5,
aic = FALSE,
num.threads = 4,
return_INLA = c("trimmed", "full", "minimal"),
verbose = 1,
meanTol = 1e-6,
varTol = 1e-6#, emTol = 1e-3,
){
EM <- FALSE
emTol <- 1e-3
#TO DO:
#add "(ignored if classicalGLM_only = TRUE) to some params"
#add if statements for some of code if classicalGLM_only = TRUE
#make need_mesh object for if classicalGLM_only == TRUE and no AR smoothing
#do masking only (no involvement of mesh) if need_mesh == FALSE <-- copy from classicalGLM()
#check whether data is a list OR a session (for single-session analysis)
#check whether each element of data is a session (use is.session)
# V = number of data locations
# T = length of time series for each session (vector)
# K = number of tasks
# Preliminary steps. ---------------------------------------------------------
## Check simple arguments.
## These checks are in a separate function because they are shared with
## `BayesGLM_cifti`.
argChecks <- BayesGLM_argChecks(
scale_BOLD = scale_BOLD,
scale_design = scale_design,
Bayes = Bayes,
EM = EM,
ar_order = ar_order,
ar_smooth = ar_smooth,
aic = aic,
num.threads = num.threads,
return_INLA = return_INLA,
verbose = verbose,
combine_sessions = combine_sessions,
varTol = varTol,
meanTol = meanTol,
emTol = emTol
)
scale_BOLD <- argChecks$scale_BOLD
do_Bayesian <- argChecks$do_Bayesian
do_EM <- argChecks$do_EM
do_pw <- argChecks$do_pw
return_INLA <- argChecks$return_INLA
need_mesh <- do_Bayesian || (do_pw && ar_smooth > 0)
## Define a few return variables that may or may not be calculated. ----------
INLA_model_obj <- hyperpar_posteriors <- Q_theta <- NULL
task_estimates <- hyperpar_posteriors <- mu_theta <- y_all <- X_all_list <- NULL
theta_estimates <- Sig_inv <- NULL
## Sessions and data dimensions. ---------------------------------------------
if (!is.BfMRI.sess(data)) {
stop("`data` must be a list of sessions, as described in `?is.BfMRI.sess`.")
}
if (is.null(session_names)) {
session_names <- names(data)
} else {
if (!is.null(names(data)) && !identical(session_names, names(data))) {
warning("Using `session_names` rather than `names(data)`.")
names(data) <- session_names
}
}
nS <- nS_orig <- length(session_names) # number of sessions
nK <- ncol(data[[1]]$design) # number of fields
nV <- ncol(data[[1]]$BOLD) # number of data locations
nT <- vapply(data, function(x){ nrow(x$BOLD) }, 0) # numbers of timepoints
if (nS == 1 && combine_sessions) combine_sessions <- FALSE
## Mesh: check or make. ------------------------------------------------------
# We need a mesh if doing spatial Bayesian modeling or any AR smoothing.
if (need_mesh) {
# Check that only mesh OR vertices+faces supplied
has_mesh <- !is.null(mesh)
has_verts_faces <- !is.null(vertices) && !is.null(faces)
if (!xor(has_mesh, has_verts_faces)) {
stop('Must supply EITHER mesh OR vertices and faces.')
}
if (is.null(mesh)) mesh <- make_mesh(vertices, faces) # This function has been modified to no longer require INLA (2022-03-24)
if (mesh$n != nV) { stop("Mesh has ", mesh$n, " locations, but the data has ", nV, " locations.") }
} else {
mesh <- NULL
}
## Mask: check or make. -----------------------------------------------------
# Get `mask` based on intersection of input mask and `make_mask` checks.
if (is.null(mask)) { mask <- rep(TRUE, ncol(data[[1]]$BOLD)) }
mask <- mask & make_mask(data, meanTol=meanTol, varTol=varTol)
if (!any(mask)) { stop("No in-mask data locations.") }
# If any masked locations, apply to `mesh` and `data`.
mesh_orig <- NULL #for output only. initialize to NULL, only update if applying a mask to the mesh
if (!all(mask)) {
# `mask2` is in case `need_mesh==FALSE`
mask <- mask2 <- as.logical(mask)
# `mesh`
if (need_mesh) {
mesh_orig <- mesh #for later plotting
# mesh <- excursions::submesh.mesh(mask, mesh) # This is commented out because we now have our own submesh function!
mesh <- submesh(mask, mesh)
mask2 <- !is.na(mesh$idx$loc) #update mask (sometimes vertices not excluded by mask will be excluded in mesh)
mesh$idx$loc <- mesh$idx$loc[mask2]
}
# `data`
for (ss in 1:nS) {
data[[ss]]$BOLD <- data[[ss]]$BOLD[,mask2,drop=FALSE]
}
}
if (do_Bayesian && !do_EM) {spde <- INLA::inla.spde2.matern(mesh)}
if (do_EM) {
stop()
#spde <- create_spde_surf(mesh)
}
# Update number of locations after masking
nV <- sum(mask2)
nV_all <- length(mask2)
## Task names: check or make. ------------------------------------------------
if (!is.null(task_names)) {
if (length(task_names) != nK) {
stop(
'I detect ', nK,
' task based on the design matrix, but the length of task_names is ',
length(task_names), '. Please fix task_names.'
)
}
} else {
# Grab beta names from design (if provided)
task_names <- colnames(data[[1]]$design)
if (is.null(task_names)) { task_names <- paste0("beta", seq(nK)) }
}
## Scale, nuisance regress, and/or concatenate session data. -----------------
#collect data and design matrices
design <- vector('list', length=nS)
nK2 <- if (is.null(data[[1]]$nuisance)) { 0 } else { ncol(data[[1]]$nuisance) }
for (ss in seq(nS)) {
# Scale data
# TEMPORARY FOR fMRItools < 3.0 -----
# `scale_timeseries` used to always transpose the matrix before returning.
# In 3.0 Damon got rid of this t(), so we will need it here (four lines below this one).
# Later, require fMRItools > 3.0.
dBOLD <- dim(data[[ss]]$BOLD)
data[[ss]]$BOLD <- scale_timeseries(t(data[[ss]]$BOLD), scale=scale_BOLD, transpose=FALSE)
if (!(all.equal(dBOLD, dim(data[[ss]]$BOLD), check.attributes=FALSE)==TRUE)) {
data[[ss]]$BOLD <- t(data[[ss]]$BOLD)
}
# ---------------------------------
# Scale design matrix
if (scale_design) {
data[[ss]]$design <- scale_design_mat(data[[ss]]$design)
} else {
data[[ss]]$design <- scale(data[[ss]]$design, scale = FALSE)
}
design[[ss]] <- data[[ss]]$design #after scaling but before nuisance regression
#regress nuisance parameters from BOLD data and design matrix
if ('nuisance' %in% names(data[[ss]])) {
nuisance_s <- scale(data[[ss]]$nuisance, scale=FALSE)
data[[ss]]$BOLD <- nuisance_regression(data[[ss]]$BOLD, nuisance_s)
data[[ss]]$design <- nuisance_regression(data[[ss]]$design, nuisance_s)
data[[ss]]$nuisance <- NULL
}
}
#concatenate sessions if combine_sessions=TRUE
if(combine_sessions){
#concatenate BOLD data across all sessions
data <- list(
session_combined = list(
BOLD = do.call(rbind, lapply(data, function(sess){ sess$BOLD })),
design = do.call(rbind, lapply(data, function(sess){ sess$design }))
)
)
# Update nT, nS, session_names
nT <- nrow(data$session_combined$BOLD)
sess_names_orig <- session_names
session_names <- 'session_combined'
nS <- 1
} else {
# [TO DO]: allow different `nT`.
# Is this only problematic when `do_pw`?
if (length(unique(nT)) > 1) {
stop("Not supported yet: different BOLD time durations while `combine_sessions=FALSE`.")
}
nT <- nT[1]
}
# Prewhitening. --------------------------------------------------------------
if (do_pw) {
if (verbose>0) cat("\tPrewhitening.\n")
## Estimate prewhitening parameters. ---------------------------------------
AR_coeffs <- array(dim = c(nV,ar_order,nS))
AR_resid_var <- array(dim = c(nV,nS))
AR_AIC <- if (aic) { array(dim = c(nV,nS)) } else { NULL }
#estimate prewhitening parameters for each session
for (ss in 1:nS) {
resids <- nuisance_regression(data[[ss]]$BOLD, data[[ss]]$design)
AR_est <- pw_estimate(resids, ar_order, aic=aic)
AR_coeffs[,,ss] <- AR_est$phi
AR_resid_var[,ss] <- AR_est$sigma_sq
if (aic) { AR_AIC[,ss] <- AR_est$aic }
}
#average prewhitening parameters across sessions
avg_AR <- apply(AR_coeffs, 1:2, mean)
avg_var <- apply(as.matrix(AR_resid_var), 1, mean)
if (aic) { max_AIC <- apply(AR_AIC, 1, max) } else { max_AIC <- NULL }
#smooth prewhitening parameters
if (ar_smooth > 0) {
AR_smoothed_list <- pw_smooth(
vertices=mesh$loc, faces=mesh$graph$tv,
#mask=mask,
AR=avg_AR, var=avg_var, FWHM=ar_smooth
)
avg_AR <- AR_smoothed_list$AR
avg_var <- AR_smoothed_list$var
}
## Apply prewhitening. -----------------------------------------------------
# Create the sparse pre-whitening matrix
if (is.null(num.threads) | num.threads < 2) {
# Initialize the block diagonal covariance matrix
template_pw <- Matrix::bandSparse(
n = nT, k = 0:(ar_order + 1), symmetric = TRUE
)
template_pw_list <- rep(list(template_pw), nV)
for (vv in 1:nV) {
if(vv %% 100 == 0) if (verbose>0) cat("\tLocation",vv,"of",nV,"\n")
template_pw_list[[vv]] <- .getSqrtInvCpp(
AR_coeffs = avg_AR[vv,],
nTime = nT,
avg_var = avg_var[vv]
)
}
} else {
if (!requireNamespace("parallel", quietly = TRUE)) {
stop("Prewhitening in parallel requires the `parallel` package. Please install it.", call. = FALSE)
}
max_threads <- max(parallel::detectCores(), 25)
num_threads <- min(max_threads,num.threads)
cl <- parallel::makeCluster(num_threads)
template_pw_list <- parallel::clusterMap(
cl,
.getSqrtInvCpp,
AR_coeffs = split(avg_AR, row(avg_AR)),
nTime = nT,
avg_var = avg_var,
SIMPLIFY = FALSE
)
parallel::stopCluster(cl)
}
if (verbose>0) cat("\tDone!\n")
#consider using a variant of bdiag_m if this is very slow. See help(Matrix::bdiag)
sqrtInv_all <- Matrix::bdiag(template_pw_list)
#apply prewhitening matrix to BOLD and design for each session
data <- sapply(data, function(data_s) {
#bold_out <- matrix(NA, nT, nV)
bold_out <- as.vector(sqrtInv_all %*% c(data_s$BOLD))
#bold_out[,mask] <- pw_BOLD
all_design <- organize_data(data_s$BOLD, data_s$design)$design
pw_design <- sqrtInv_all %*% all_design
return(list(BOLD = bold_out, design = pw_design))
}, simplify = F)
}
# Classical GLM. -------------------------------------------------------------
#organize data
y_all <- c()
X_all_list <- NULL
# Classical GLM
result_classical <- vector('list', length=nS)
for (ss in seq(nS)) {
#set up vectorized data and big sparse design matrix
if(!do_pw) data_s <- organize_data(data[[ss]]$BOLD, data[[ss]]$design)
if(do_pw) data_s <- data[[ss]] #data has already been "organized" (big sparse design) in prewhitening step above
y_all <- c(y_all, data_s$BOLD)
X_list <- list(data_s$design)
names(X_list) <- session_names[ss]
X_all_list <- c(X_all_list, X_list)
y_reg <- data_s$BOLD #a vector (grouped by location)
X_reg <- data_s$design
#perform classical GLM after any prewhitening
beta_hat_s <- SE_beta_hat_s <- matrix(NA, nV_all, nK)
XTX_inv <- try(Matrix::solve(Matrix::crossprod(X_reg)))
if (inherits(XTX_inv, "try-error")) {
stop("There is some numerical instability in your design matrix (due to very large or very small values). Scaling the design matrix is suggested.")
}
coef_s <- as.matrix(XTX_inv %*% t(X_reg) %*% y_reg) #a vector of (estimates for location 1, estimates for location 2, ...)
coef_s_mat <- matrix(coef_s, nrow = nV, ncol = nK)
beta_hat_s[mask2==TRUE,] <- coef_s_mat
resid_s <- t(matrix(y_reg - X_reg %*% coef_s, nrow = nT))
# ESTIMATE STANDARD ERRORS OF ESTIMATES
#compute residual SD
#using length(y_reg)/nV instead of nT here because we want nT for single session case and nT*nS for multi-session case
DOF_true <- (length(y_reg)/nV) - nK - nK2 - 1
DOF_false <- (length(y_reg)/nV - 1)
var_error <- matrixStats::rowVars(resid_s) * DOF_false / DOF_true #correct DOF
if(do_pw) var_error <- rep(mean(var_error), length(var_error)) #if prewhitening has been done, use same estimate of residual SD everywhere
sd_error <- sqrt(var_error)
#compute SE of betas
SE_beta_s <- sqrt(Matrix::diag(XTX_inv)) * rep(sd_error, times = nK) #each?
SE_beta_hat_s[mask2==TRUE,] <- SE_beta_s
colnames(beta_hat_s) <- colnames(SE_beta_hat_s) <- task_names
result_classical[[ss]] <- list(
estimates = beta_hat_s,
SE_estimates = SE_beta_hat_s,
resids = resid_s,
DOF = DOF_true,
mask = mask2
)
}
names(result_classical) <- session_names
# Bayesian GLM. --------------------------------------------------------------
if (do_Bayesian) {
#construct betas and repls objects
replicates_list <- organize_replicates(n_sess=nS, task_names=task_names, mesh=mesh)
betas <- replicates_list$betas
repls <- replicates_list$repls
model_data <- make_data_list(y=y_all, X=X_all_list, betas=betas, repls=repls)
## EM Model. ---------------------------------------------------------------
if(do_EM) {
stop()
# if (!requireNamespace("MatrixModels", quietly = TRUE)) {
# stop("EM requires the `MatrixModels` package. Please install it.", call. = FALSE)
# }
# if (verbose>0) cat('\tEstimating model with EM.\n')
# Psi_k <- spde$Amat
# Psi <- Matrix::bdiag(rep(list(Psi_k),nK))
# A <- Matrix::crossprod(model_data$X %*% Psi)
# # Initial values for kappa and tau
# kappa2 <- 4
# phi <- 1 / (4*pi*kappa2*4)
# # Using values based on the classical GLM
# if (verbose>0) cat("\t\tFinding best guess initial values.\n")
# beta_hat <- MatrixModels:::lm.fit.sparse(model_data$X, model_data$y)
# res_y <- (model_data$y - model_data$X %*% beta_hat)@x
# sigma2 <- stats::var(res_y)
# beta_hat <- matrix(beta_hat, ncol = nK*nS)
# rcpp_spde <- create_listRcpp(spde$spde)
# if(nS > 1) {
# task_cols <- sapply(seq(nS), function(ss) seq(nK) + nK *(ss - 1))
# beta_hat <- apply(task_cols,1,function(x) beta_hat[,x])
# }
# n_threads <- parallel::detectCores()
# n_threads <- min(n_threads,nK,num.threads)
# cl <- parallel::makeCluster(n_threads)
# kappa2_phi_rcpp <- parallel::parApply(
# cl = cl,
# beta_hat,
# 2,
# .initialKP,
# theta = c(kappa2, phi),
# spde = rcpp_spde,
# n_sess = nS,
# tol = emTol,
# verbose = FALSE
# )
# parallel::stopCluster(cl)
# if (verbose>0) cat("\t\tDone!\n")
# theta <- c(t(kappa2_phi_rcpp), sigma2)
# theta_init <- theta
# Ns <- 50 # This is a level of approximation used for the Hutchinson trace estimator
# if(verbose>0) cat("\t\tStarting EM algorithm.\n")
# em_output <-
# .findTheta(
# theta = theta,
# spde = rcpp_spde,
# y = model_data$y,
# X = model_data$X,
# QK = make_Q(theta, rcpp_spde, nS),
# Psi = as(Psi, "dgCMatrix"),
# A = as(A, "dgCMatrix"),
# Ns = 50,
# tol = emTol,
# verbose = verbose>0
# )
# if(verbose>0) cat("\t\tEM algorithm complete!\n")
# kappa2_new <- phi_new <- sigma2_new <- mu_theta <- NULL
# list2env(em_output, envir = environment())
# Qk_new <- mapply(spde_Q_phi,kappa2 = kappa2_new, phi = phi_new,
# MoreArgs = list(spde=rcpp_spde), SIMPLIFY = F)
# Q_theta <- Matrix::bdiag(Qk_new)
# if(nS > 1) Q_theta <- Matrix::bdiag(lapply(seq(nS), function(x) Q_theta))
# Sig_inv <- Q_theta + A/sigma2_new
# m <- Matrix::t(model_data$X%*%Psi)%*%model_data$y / sigma2_new
# mu_theta <- Matrix::solve(Sig_inv, m)
# # Prepare results
# task_estimates <- matrix(NA, nrow = length(mask2), ncol = nK*nS)
# task_estimates[mask2 == 1,] <- matrix(mu_theta,nrow = nV, ncol = nK*nS)
# colnames(task_estimates) <- rep(task_names, nS)
# task_estimates <- lapply(seq(nS), function(ss) task_estimates[,(seq(nK) + nK * (ss - 1))])
# names(task_estimates) <- session_names
# avg_task_estimates <- NULL
# if(combine_sessions) avg_task_estimates <- Reduce(`+`,task_estimates) / nS
# theta_estimates <- c(sigma2_new,c(phi_new,kappa2_new))
# names(theta_estimates) <- c("sigma2",paste0("phi_",seq(nK)),paste0("kappa2_",seq(nK)))
# #extract stuff needed for group analysis
# tau2_init <- 1 / (4*pi*theta_init[seq(nK)]*theta_init[(seq(nK) + nK)])
# mu_init <- c(log(1/tail(theta_init,1)), c(rbind(log(sqrt(tau2_init)),log(sqrt(theta_init[seq(nK)])))))
# tau2 <- 1 / (4*pi*kappa2_new*phi_new)
# mu_theta <- c(log(1/sigma2_new),c(rbind(log(sqrt(tau2)),log(sqrt(kappa2_new)))))
# if (verbose>0) cat("\t\tDone!\n")
## INLA Model. -------------------------------------------------------------
} else {
#estimate model using INLA
if (verbose>0) cat('\tEstimating model with INLA.\n')
#organize the formula and data objects
repl_names <- names(repls)
hyper_initial <- c(-2,2)
hyper_initial <- rep(list(hyper_initial), nK)
hyper_vec <- paste0(', hyper=list(theta=list(initial=', hyper_initial, '))')
formula_vec <- paste0('f(',task_names, ', model = spde, replicate = ', repl_names, hyper_vec, ')')
formula_vec <- c('y ~ -1', formula_vec)
formula_str <- paste(formula_vec, collapse=' + ')
formula <- as.formula(formula_str)
INLA_model_obj <- INLA::inla(
formula,
data=model_data,
#data=INLA::inla.stack.data(model_data, spde=spde),
control.predictor=list(A=model_data$X, compute = TRUE),
verbose = verbose>1, keep = FALSE, num.threads = num.threads,
control.inla = list(strategy = "gaussian", int.strategy = "eb"),
control.family=list(hyper=list(prec=list(initial=1))),
control.compute=list(config=TRUE), contrasts = NULL, lincomb = NULL #required for excursions
)
if (verbose>0) cat("\t\tDone!\n")
#extract useful stuff from INLA model result
task_estimates <- extract_estimates(
INLA_model_obj=INLA_model_obj,
session_names=session_names,
mask=mask2
) #posterior means of latent task field
hyperpar_posteriors <- get_posterior_densities(
INLA_model_obj=INLA_model_obj,
spde, task_names
) #hyperparameter posterior densities
#construct object to be returned
INLA_model_obj <- switch(return_INLA,
trimmed=trim_INLA_model_obj(INLA_model_obj, minimal=FALSE),
full=INLA_model_obj,
minimal=trim_INLA_model_obj(INLA_model_obj, minimal=TRUE)
)
attr(INLA_model_obj, "format") <- return_INLA
}
} else {
task_estimates <- lapply(result_classical, function(x){ x$estimates })
attr(task_estimates, "GLM_type") <- "classical"
}
# Clean up and return. -------------------------------------------------------
prewhiten_info <- if (do_pw) {
list(phi = avg_AR, sigma_sq = avg_var, AIC = max_AIC)
} else {
NULL
}
result <- list(
task_estimates = task_estimates,
INLA_model_obj = INLA_model_obj,
result_classical = result_classical,
mesh = mesh,
mesh_orig = mesh_orig,
mask = mask2,
design = design,
task_names = task_names,
session_names = session_names,
n_sess_orig = nS_orig,
hyperpar_posteriors = hyperpar_posteriors,
theta_estimates = theta_estimates,
# For joint group model ~~~~~~~~~~~~~
#posterior_Sig_inv = Sig_inv,
y = y_all,
X = X_all_list,
prewhiten_info = prewhiten_info,
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
call = match.call()
)
class(result) <- "BayesGLM"
result
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/BayesGLM.R
|
#' Group-level Bayesian GLM
#'
#' Performs group-level Bayesian GLM estimation and inference using the joint
#' approach described in Mejia et al. (2020).
#'
#' @inheritSection INLA_Description INLA Requirement
#'
#' @param results Either (1) a length \eqn{N} list of \code{"BayesGLM"} objects,
#' or (2) a length \eqn{N} character vector of files storing \code{"BayesGLM"}
#' objects saved with \code{\link{saveRDS}}.
#' @param contrasts (Optional) A list of contrast vectors that specify the
#' group-level summaries of interest. If \code{NULL}, use contrasts that compute
#' the average of each field (task HRF) across subjects and sessions.
#'
#' Each contrast vector is length \eqn{K * S * N} vector specifying a
#' group-level summary of interest, where \eqn{K} is the number
#' of fields (task HRFs), \eqn{S} is the number of sessions, and \eqn{N} is the
#' number of subjects. For a single subject-session the contrast
#' for the first field would be:
#'
#' \code{contrast1 <- c(1, rep(0, K-1))}
#'
#' and so the full contrast vector representing the group average across
#' sessions and subjects for the first task would be:
#'
#' \code{rep(rep(contrast1, S), N) /S /N}.
#'
#' To obtain the group average for the first task, for just the first sessions
#' from each subject:
#'
#' \code{rep(c(contrast1, rep(0, K*(S-1))), N) /N}.
#'
#' To obtain the mean difference between the first and second sessions, for the
#' first task:
#'
#' \code{rep(c(contrast1, -contrast1, rep(0, K-2)), N) /N}.
#'
#' To obtain the mean across sessions of the first task, just for the first
#' subject:
#'
#' \code{c(rep(contrast1, S-1), rep(0, K*S*(N-1)) /S}.
#'
#' @param quantiles (Optional) Vector of posterior quantiles to return in
#' addition to the posterior mean.
#' @param excursion_type (For inference only) The type of excursion function for
#' the contrast (">", "<", "!="), or a vector thereof (each element
#' corresponding to one contrast). If \code{NULL}, no inference performed.
#' @param contrast_names (Optional) Names of contrasts.
#' @param gamma (For inference only) Activation threshold for the excursion set,
#' or a vector thereof (each element corresponding to one contrast). Default:
#' \code{0}.
#' @param alpha (For inference only) Significance level for activation for the
#' excursion set, or a vector thereof (each element corresponding to one
#' contrast). Default: \code{.05}.
#' @param nsamp_theta Number of theta values to sample from posterior. Default:
#' \code{50}.
#' @param nsamp_beta Number of beta vectors to sample conditional on each theta
#' value sampled. Default: \code{100}.
#' @param num_cores The number of cores to use for sampling betas in parallel. If
#' \code{NULL} (default), do not run in parallel.
#' @inheritParams verbose_Param
#'
#' @return A list containing the estimates, PPMs and areas of activation for each contrast.
#'
#' @importFrom MASS mvrnorm
#' @importFrom Matrix bdiag crossprod
#'
#' @export
BayesGLM2 <- function(
results,
contrasts = NULL,
quantiles = NULL,
excursion_type=NULL,
contrast_names = NULL,
gamma = 0,
alpha = 0.05,
nsamp_theta = 50,
nsamp_beta = 100,
num_cores = NULL,
verbose = 1){
use_INLA <- TRUE # alternative: use EM model, but it's been removed.
if (!requireNamespace("abind", quietly = TRUE)) {
stop("`BayesGLM2` requires the `abind` package. Please install it.", call. = FALSE)
}
# Check `results`, reading in the files if needed.
results_ok <- FALSE
if (is.character(results)) {
if (!all(file.exists(results))) {
stop("`results` is a character vector, but not all elements are existing files.")
}
results <- lapply(results, readRDS) # [TO DO]: delete w/ each read-in, stuff not needed
}
if (!is.list(results)) {
stop("`results` must be a list of `'BayesGLM'` objects, or a character vector of files with `'BayesGLM'` results saved.")
}
is_BayesGLM <- all(vapply(results, inherits, FALSE, "BayesGLM"))
is_cifti <- all(vapply(results, inherits, FALSE, "BayesGLM_cifti"))
if (!is_BayesGLM && !is_cifti) {
stop("`results` must be a list of all `'BayesGLM'` or all `'BayesGLM_cifti'` objects, or a character vector of files with `'BayesGLM(_cifti)'` results.")
}
rm(is_BayesGLM) # use `is_cifti`
model_names <- if (is_cifti) {
names(results[[1]]$BayesGLM_results)[!vapply(results[[1]]$BayesGLM_results, is.null, FALSE)]
} else {
"BayesGLM"
}
nM <- length(model_names) # models
nN <- length(results) # subjects
nS <- length(results[[1]]$session_names) # sessions
nK <- length(results[[1]]$task_names) # fields
session_names <- results[[1]]$session_names
task_names <- results[[1]]$task_names
# Check that every subject has the same models, sessions, tasks
for (nn in seq(nN)) {
sub_nn <- results[[nn]]
stopifnot(identical(
model_names,
names(sub_nn$BayesGLM_results)[!vapply(sub_nn$BayesGLM_results, is.null, FALSE)]
))
stopifnot(identical(session_names, sub_nn$session_names))
stopifnot(identical(task_names, sub_nn$task_names))
}
# Check `contrasts`.
# `contrasts` should be fields * sessions * subjects
if(!is.null(contrasts) & !is.list(contrasts)) contrasts <- list(contrasts)
if(is.null(contrasts)) {
if (verbose>0) cat('Using a contrast that computes the average across subjects for each task. If other contrasts are desired, provide `contrasts`.\n')
contrasts <- vector('list', length=nK)
names(contrasts) <- paste0(task_names, '_avg')
for (kk in 1:nK) {
# (1/J, 0, 0, ..., 0) for k=1,
# (0, 1/J, 0, ..., 0) for k=2,
# ...,
# (0, 0, ..., 0, 1/J) for k=K
# for each session, for each subject
# where J == S * N
contrast_1 <- c(rep(0, kk-1), 1/(nS*nN), rep(0, nK-kk)) # length nK
contrasts[[kk]] <- rep(rep(contrast_1, nS), nN) # length nK*nS*nN
}
} else {
#Check that each contrast vector is numeric and length J*K
if(any(sapply(contrasts, length) != nK*nS*nN)) {
stop('Each contrast vector must be of length K*S*N (fields times sessions times subjects).')
}
if(any(!sapply(contrasts, is.numeric))) {
stop('Each contrast vector must be numeric, but at least one is not.')
}
if (is.null(names(contrasts))) {
names(contrasts) <- paste0("contrast_", seq(length(contrasts)))
}
}
# Override `names(contrasts)` with `contrast_names` if provided.
if (!is.null(contrast_names)) {
stopifnot(length(contrast_names) == length(contrasts))
names(contrasts) <- contrast_names
}
nC <- length(contrasts)
#Check `quantiles`
if(!is.null(quantiles)){
stopifnot(is.numeric(quantiles))
if(any(quantiles > 1 | quantiles < 0)) stop('All elements of `quantiles` must be between 0 and 1.')
}
do_excur <- !is.null(excursion_type) && (!identical(excursion_type, "none"))
if (do_excur) {
if(length(excursion_type) == 1) excursion_type <- rep(excursion_type, nC)
if(length(gamma) == 1) gamma <- rep(gamma, nC)
if(length(alpha) == 1) alpha <- rep(alpha, nC)
if(length(gamma) != nC) stop('Length of gamma must match number of contrasts or be equal to one.')
if(length(alpha) != nC) stop('Length of alpha must match number of contrasts or be equal to one.')
if(length(excursion_type) != nC) stop('Length of excursion_type must match number of contrasts or be equal to one.')
} else {
excursion_type <- 'none'
}
out <- vector("list", nM)
names(out) <- model_names
for (mm in seq(nM)) {
if (nM>1) { if (verbose>0) cat(model_names[mm], " ~~~~~~~~~~~\n") }
results_mm <- lapply(results, function(x){ x$BayesGLM_results[[mm]] })
# `Mask`
Mask <- lapply(results_mm, function(x){ x$mask })
if (length(unique(vapply(Mask, length, 0))) != 1) {
stop("Unequal mask lengths--check that the input files are in the same resolution.")
}
Mask <- do.call(rbind, Mask)
Mask_sums <- colSums(Mask)
need_Mask <- !all(Mask_sums %in% c(0, nrow(Mask)))
Mask <- apply(Mask, 2, all)
# `mesh`, `spde`, `Amat`
mesh <- results_mm[[1]]$mesh
if (need_Mask) {
mesh <- retro_mask_mesh(mesh, Mask[results_mm[[1]]$mask])
}
if (use_INLA) {
spde <- INLA::inla.spde2.matern(mesh)
Amat <- INLA::inla.spde.make.A(mesh) #Psi_{km} (for one task and subject, a VxN matrix, V=num_vox, N=num_mesh)
Amat <- Amat[mesh$idx$loc,]
} else {
stop()
}
Amat.tot <- bdiag(rep(list(Amat), nK)) #Psi_m from paper (VKxNK)
# Collecting theta posteriors from subject models
Qmu_theta <- Q_theta <- 0
# Collecting X and y cross-products from subject models (for posterior distribution of beta)
Xcros.all <- Xycros.all <- vector("list", nN)
for (nn in seq(nN)) {
# [TO DO] test this
if (need_Mask) {
results_mm[[nn]] <- retro_mask_BGLM(
results_mm[[nn]], Mask[results_mm[[nn]]$mask]
)
}
# Check that mesh has same neighborhood structure
if (!all.equal(results_mm[[nn]]$mesh$faces, mesh$faces, check.attribute=FALSE)) {
stop(paste0(
'Subject ', nn,
' does not have the same mesh neighborhood structure as subject 1.',
' Check meshes for discrepancies.'
))
}
#Collect posterior mean and precision of hyperparameters
mu_theta_mm <- results_mm[[nn]]$INLA_model_obj$misc$theta.mode
Q_theta_mm <- solve(results_mm[[nn]]$INLA_model_obj$misc$cov.intern)
#iteratively compute Q_theta and mu_theta (mean and precision of q(theta|y))
Qmu_theta <- Qmu_theta + as.vector(Q_theta_mm%*%mu_theta_mm)
Q_theta <- Q_theta + Q_theta_mm
rm(mu_theta_mm, Q_theta_mm)
# compute Xcros = Psi'X'XPsi and Xycros = Psi'X'y
# (all these matrices for a specific subject mm)
y_vec <- results_mm[[nn]]$y
X_list <- results_mm[[nn]]$X
if (length(X_list) > 1) {
n_sess <- length(X_list)
X_list <- Matrix::bdiag(X_list)
Amat.final <- Matrix::bdiag(rep(list(Amat.tot),n_sess))
} else {
X_list <- X_list[[1]]
Amat.final <- Amat.tot
}
Xmat <- X_list %*% Amat.final
Xcros.all[[nn]] <- Matrix::crossprod(Xmat)
Xycros.all[[nn]] <- Matrix::crossprod(Xmat, y_vec)
}
rm(results_mm, y_vec, X_list, Xmat) # save memory
if (use_INLA) {
mu_theta <- solve(Q_theta, Qmu_theta) #mu_theta = poterior mean of q(theta|y) (Normal approximation) from paper, Q_theta = posterior precision
#### DRAW SAMPLES FROM q(theta|y)
#theta.tmp <- mvrnorm(nsamp_theta, mu_theta, solve(Q_theta))
if (verbose>0) cat(paste0('Sampling ',nsamp_theta,' posterior samples of thetas \n'))
theta.samp <- INLA::inla.qsample(n=nsamp_theta, Q = Q_theta, mu = mu_theta)
#### COMPUTE WEIGHT OF EACH SAMPLES FROM q(theta|y) BASED ON PRIOR
if (verbose>0) cat('Computing weights for each theta sample \n')
logwt <- rep(NA, nsamp_theta)
for (tt in seq(nsamp_theta)) {
logwt[tt] <- F.logwt(theta.samp[,tt], spde, mu_theta, Q_theta, nN)
}
#weights to apply to each posterior sample of theta
wt.tmp <- exp(logwt - max(logwt))
wt <- wt.tmp/(sum(wt.tmp))
} else {
# theta.samp <- qsample(n=nsamp_theta, Q = Q_theta, mu = mu_theta) # ?
mu_theta <- mu_theta / nN
theta.samp <- as.matrix(mu_theta)
wt <- 1
}
#get posterior quantities of beta, conditional on a value of theta
if (verbose>0) cat(paste0('Sampling ',nsamp_beta,' betas for each value of theta \n'))
if (is.null(num_cores)) {
#6 minutes in simuation
beta.posteriors <- apply(
theta.samp,
MARGIN = 2,
FUN = beta.posterior.thetasamp,
spde = spde,
Xcros = Xcros.all,
Xycros = Xycros.all,
contrasts = contrasts,
quantiles = quantiles,
excursion_type = excursion_type,
gamma = gamma,
alpha = alpha,
nsamp_beta = nsamp_beta
)
} else {
if (!requireNamespace("parallel", quietly = TRUE)) {
stop(
"`BayesGLM2` requires the `parallel` package. Please install it.",
call. = FALSE
)
}
#2 minutes in simulation (4 cores)
max_num_cores <- min(parallel::detectCores() - 1, 25)
num_cores <- min(max_num_cores, num_cores)
cl <- parallel::makeCluster(num_cores)
beta.posteriors <- parallel::parApply(
cl, theta.samp,
MARGIN=2,
FUN=beta.posterior.thetasamp,
spde=spde,
Xcros = Xcros.all,
Xycros = Xycros.all,
contrasts=contrasts,
quantiles=quantiles,
excursion_type=excursion_type,
gamma=gamma,
alpha=alpha,
nsamp_beta=nsamp_beta
)
parallel::stopCluster(cl)
}
## Sum over samples using weights
if (verbose>0) cat('Computing weighted summaries over beta samples \n')
## Posterior mean of each contrast
betas.all <- lapply(beta.posteriors, function(x) return(x$mu))
betas.wt <- mapply(
function(x, a){return(x*a)},
betas.all, wt, SIMPLIFY=FALSE
) #apply weight to each element of betas.all (one for each theta sample)
betas.summ <- apply(abind::abind(betas.wt, along=3), MARGIN = c(1,2), sum) #N x L (# of contrasts)
dimnames(betas.summ) <- NULL
## Posterior quantiles of each contrast
num_quantiles <- length(quantiles)
if(num_quantiles > 0){
quantiles.summ <- vector('list', num_quantiles)
names(quantiles.summ) <- quantiles
for(iq in 1:num_quantiles){
quantiles.all_iq <- lapply(beta.posteriors, function(x) return(x$quantiles[[iq]]))
betas.wt_iq <- mapply(function(x, a){return(x*a)}, quantiles.all_iq, wt, SIMPLIFY=FALSE) #apply weight to each element of quantiles.all_iq (one for each theta sample)
quantiles.summ[[iq]] <- apply(abind::abind(betas.wt_iq, along=3), MARGIN = c(1,2), sum) #N x L (# of contrasts)
dimnames(quantiles.summ[[iq]]) <- NULL
}
} else {
quantiles.summ <- NULL
}
## Posterior probabilities and activations
if(do_excur){
ppm.all <- lapply(beta.posteriors, function(x) return(x$F))
ppm.wt <- mapply(function(x, a){return(x*a)}, ppm.all, wt, SIMPLIFY=FALSE) #apply weight to each element of ppm.all (one for each theta sample)
ppm.summ <- apply(abind::abind(ppm.wt, along=3), MARGIN = c(1,2), sum) #N x L (# of contrasts)
dimnames(ppm.summ) <- NULL
active <- array(0, dim=dim(ppm.summ))
for (cc in seq(nC)) { active[ppm.summ[,cc] > (1-alpha[cc]),cc] <- 1 }
} else {
ppm.summ <- active <- NULL
}
### Save results
out[[mm]] <- list(
estimates = betas.summ,
quantiles = quantiles.summ,
ppm = ppm.summ,
active = active,
mask = Mask,
Amat = Amat # not Amat.final?
)
if (nM>1) { cat("~~~~~~~~~~~~~~~~~~~~\n\n") }
}
out <- list(
model_results = out,
contrasts = contrasts,
excursion_type = excursion_type,
task_names=task_names,
session_names=session_names,
gamma = gamma,
alpha = alpha,
nsamp_theta = nsamp_theta,
nsamp_beta = nsamp_beta
)
class(out) <- "BayesGLM2"
if (is_cifti) {
out <- list(
contrast_estimates_xii = as.xifti(
out$model_results$cortex_left$estimates,
out$model_results$cortex_left$mask,
out$model_results$cortex_right$estimates,
out$model_results$cortex_right$mask
),
activations_xii = NULL,
BayesGLM2_results = out
)
out$contrast_estimates_xii$meta$cifti$names <- names(contrasts)
if (do_excur) {
act_xii <- as.xifti(
out$BayesGLM2_results$model_results$cortex_left$active,
out$BayesGLM2_results$model_results$cortex_left$mask,
out$BayesGLM2_results$model_results$cortex_right$active,
out$BayesGLM2_results$model_results$cortex_right$mask
)
out$activations_xii <- convert_xifti(act_xii, "dlabel", colors='red')
names(out$activations_xii$meta$cifti$labels) <- names(contrasts)
}
class(out) <- "BayesGLM2_cifti"
}
out
}
#' @rdname BayesGLM2
#' @export
BayesGLM_group <- function(
results,
contrasts = NULL,
quantiles = NULL,
excursion_type=NULL,
gamma = 0,
alpha = 0.05,
nsamp_theta = 50,
nsamp_beta = 100,
num_cores = NULL,
verbose = 1){
BayesGLM2(
results=results,
contrasts=contrasts,
quantiles=quantiles,
excursion_type=excursion_type,
gamma=gamma, alpha=alpha,
nsamp_theta=nsamp_theta, nsamp_beta=nsamp_beta,
num_cores=num_cores, verbose=verbose
)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/BayesGLM2.R
|
#' Beta posterior theta sampling
#'
#' Internal function used in joint approach to group-analysis
#'
#' @inheritSection INLA_Description INLA Requirement
#'
#' @param theta A single sample of theta (hyperparameters) from q(theta|y)
#' @param spde A SPDE object from inla.spde2.matern() function.
#' @param Xcros A crossproduct of design matrix.
#' @param Xycros A crossproduct of design matrix and BOLD y.
#' @param contrasts A list of vectors of length M*K specifying the contrasts of interest.
#' @param quantiles Vector of posterior quantiles to return in addition to the posterior mean
#' @param excursion_type Vector of excursion function type (">", "<", "!=") for each contrast
#' @param gamma Vector of activation thresholds for each contrast
#' @param alpha Significance level for activation for the excursion sets
#' @param nsamp_beta The number of samples to draw from full conditional of beta given the current value of theta (p(beta|theta,y))
#'
#' @importFrom excursions excursions.mc
#' @importFrom Matrix Diagonal
#'
#' @return A list containing \code{mu}, \code{quantiles}, and \code{F}
#'
#' @keywords internal
beta.posterior.thetasamp <- function(
theta, spde, Xcros, Xycros, contrasts,
quantiles, excursion_type, gamma, alpha, nsamp_beta=100){
use_INLA <- TRUE
n.mesh <- spde$n.spde
prec.error <- exp(theta[1])
theta_spde <- matrix(theta[-1], nrow=2) #2xK matrix of the hyperparameters (2 per task)
K <- ncol(theta_spde)
M <- length(Xcros)
use_EM <- all(sapply(c("M0","M1","M2"), function(x) x %in% names(spde)))
#contruct prior precision matrix for beta, Q_theta for given sampled value of thetas
# For EM
if (use_EM) {
Q.beta <- apply(theta_spde, 2, function(theta_k) {
theta_k <- exp(theta_k) ^ 2
out <-
theta_k[1] * (theta_k[2] ^ 2 * spde$M0 + theta_k[2] * spde$M1 + spde$M2)
return(out)
})
}
# For INLA
if (!use_EM) {
Q.beta <- list()
for (k in 1:K) {
theta_k <-
theta_spde[, k] #theta[(2:3) + 2*(k-1)] #1:2, 2:3, 4:5, ...
Q.beta[[k]] <-
INLA::inla.spde2.precision(spde, theta = theta_k) # prior precision for a single task k
}
}
Q <- Matrix::bdiag(Q.beta) #Q_theta in the paper
N <- dim(Q.beta[[1]])[1] #number of mesh locations
if(N != n.mesh) stop('Length of betas does not match number of vertices in mesh. Inform developer.')
beta.samples <- NULL
# ~5 seconds per subject with PARDISO
nS <- 1
Q <- Q_theta <- Matrix::bdiag(Q.beta) #Q_theta in the paper
for(mm in seq(M)) {
if(nrow(Q) != nrow(Xcros[[mm]])) {
nS <- nrow(Xcros[[mm]]) / nrow(Q)
Q_theta <- Matrix::bdiag(rep(list(Q),nS))
}
# compute posterior mean and precision of beta|theta
Q_mm <- prec.error*Xcros[[mm]] + Q_theta #Q_m in paper
print(mean(attr(Q_mm, "x")))
cholQ_mm <- Matrix::Cholesky(Q_mm)
if (use_INLA) {
mu_mm <- INLA::inla.qsolve(Q_mm, prec.error*Xycros[[mm]]) #mu_m in paper
# draw samples from pi(beta_m|theta,y)
beta_samp_mm <- INLA::inla.qsample(n = nsamp_beta, Q = Q_mm, mu = mu_mm)
} else {
mu_mm <- Matrix::solve(cholQ_mm, prec.error*Xycros[[mm]], system = "A")
beta_samp_mm <- cholQsample(n = nsamp_beta, cholQ = Q_mm, mu = mu_mm)
}
# concatenate samples over models
beta.samples <- rbind(beta.samples, beta_samp_mm)
}
if (excursion_type[1] == 'none') do_excur <- FALSE else do_excur <- TRUE
# Loop over contrasts
nC <- length(contrasts)
mu.contr <- matrix(NA, nrow=n.mesh, ncol=nC)
if(do_excur) F.contr <- mu.contr else F.contr <- NULL
if(!is.null(quantiles)){
num_quantiles <- length(quantiles)
quantiles.contr <- rep(list(mu.contr), num_quantiles)
names(quantiles.contr) <- quantiles
} else {
num_quantiles <- 0
quantiles.contr <- NULL
}
for (cc in 1:nC) {
#Construct "A" matrix from paper (linear combinations)
ctr.mat <- kronecker(t(contrasts[[cc]]), Diagonal(n.mesh, 1))
#beta.mean.pop.contr <- as.vector(ctr.mat%*%beta.mean.pop.mat) # NKx1 or Nx1
samples_cc <- as.matrix(ctr.mat%*%beta.samples) # N x nsamp_beta
mu.contr[,cc] <- rowMeans(samples_cc) #compute mean over beta samples
if(num_quantiles > 0){
for(iq in 1:num_quantiles){
quantiles.contr[[iq]][,cc] <- apply(samples_cc, 1, quantile, quantiles[iq])
}
}
# Estimate excursions set for current contrast
if (do_excur) {
excur_cc <- excursions::excursions.mc(
samples_cc, u = gamma[cc], type = excursion_type[cc], alpha = alpha[cc]
)
F.contr[,cc] <- excur_cc$F
}
}
list(
mu = mu.contr,
quantiles = quantiles.contr,
F = F.contr
)
}
#' F logwt
#'
#' Internal function used in joint approach to group-analysis for combining across models
#'
#' @inheritSection INLA_Description INLA Requirement
#'
#' @param theta A vector of hyperparameter values at which to compute the posterior log density
#' @param spde A SPDE object from inla.spde2.matern() function, determines prior precision matrix
#' @param mu_theta Posterior mean from combined subject-level models.
#' @param Q_theta Posterior precision matrix from combined subject-level models.
#' @param nN Number of subjects
#' @return The prior density
#'
#' @importFrom stats dgamma
#'
#' @keywords internal
F.logwt <- function(theta, spde, mu_theta, Q_theta, nN){
#mu_theta - posterior mean from combined subject-level models
#Q_theta - posterior precision matrix from combined subject-level models
#nN - number of subjects
a <- 1; b <- 5e-5
n.spde <- (length(theta) - 1)/2
mu.tmp <- spde$f$hyper$theta1$param[1:2]
mu <- rep(mu.tmp, n.spde)
Q.tmp <- matrix(spde$f$hyper$theta1$param[-(1:2)], 2, 2, byrow = TRUE)
Q <- kronecker(diag(1, n.spde, n.spde), Q.tmp)
## Prior density
pr.delta <- dgamma(exp(theta[1]), a, b, log = TRUE) #log prior density on residual precision
pr.tk <- as.vector(-t(theta[-1] - mu)%*%Q%*%(theta[-1] - mu))/2 + log(det(Q))/2 - dim(Q)[1]*log(2*pi)/2 #joint log prior density on 2K spde parameters
pr.theta <- pr.delta + pr.tk
(1-nN)*pr.theta
}
#' Sample from a multivariate normal with mean and precision
#'
#' @param n number of samples
#' @param mu mean vector (length = p)
#' @param Q sparse p x p positive definite precision matrix (class = dgCMatrix)
#'
#' @return An n x p matrix of samples
#'
#' @importFrom Matrix solve
#' @importFrom stats rnorm
#' @keywords internal
qsample <- function(n, mu, Q) {
p <- length(mu)
if(p != nrow(Q) | p != ncol(Q)) stop("Dimension mismatch between mu and Q.")
cholQ <- Matrix::Cholesky(Q)
Z <- matrix(rnorm(n*p), nrow = n, ncol = p)
out <- Matrix::solve(cholQ,Z, system = "A")
out <- out + mu
return(out)
}
#' Sample from the multivariate normal distribution with Cholesky(Q)
#'
#' @param n number of samples
#' @param mu mean vector
#' @param cholQ Cholesky decomposition of the precision (found via \code{Matrix::Cholesky(Q)})
#'
#' @return An \eqn{n \times p} matrix of samples from the MVN distribution,
#' where \eqn{p} is the length of \code{mu}.
#'
#' @importFrom stats rnorm
#' @importFrom Matrix solve
#' @keywords internal
cholQsample <- function(n, mu, cholQ) {
p <- length(mu)
if(p != nrow(cholQ) | p != ncol(cholQ)) stop("Dimension mismatch between mu and Q.")
Z <- matrix(rnorm(n*p), nrow = n, ncol = p)
out <- Matrix::solve(cholQ,Z, system = "A")
out <- out + mu
return(out)
}
#' Summarize a \code{"BayesGLM2"} object
#'
#' Summary method for class \code{"BayesGLM2"}
#'
#' @param object Object of class \code{"BayesGLM2"}.
#' @param ... further arguments passed to or from other methods.
#' @export
#' @return A \code{"summary.BayesGLM2"} object, a list summarizing the
#' properties of \code{object}.
#' @method summary BayesGLM2
summary.BayesGLM2 <- function(object, ...) {
x <- list(
n_contrasts = length(object$contrasts),
tasks = object$task_names,
sessions = object$session_names,
n_loc_total = vapply(lapply(object$model_results, '[[', "mask"), length, 0),
n_loc_modeled = vapply(lapply(object$model_results, '[[', "mask"), sum, 0),
excursion_type = object$excursion_type
)
class(x) <- "summary.BayesGLM2"
return(x)
}
#' @rdname summary.BayesGLM2
#' @export
#'
#' @param x Object of class \code{"summary.BayesGLM2"}.
#' @return \code{NULL}, invisibly.
#' @method print summary.BayesGLM2
print.summary.BayesGLM2 <- function(x, ...) {
cat("====BayesGLM2 result===================\n")
cat("Tasks: ", paste0("(", length(x$tasks), ") ", paste(x$tasks, collapse=", ")), "\n")
if (length(x$sessions)==1 && x$sessions == "session_combined") {
cat("Sessions: ", paste0("(", x$n_sess_orig, ", combined) \n"))
} else {
cat("Sessions: ", paste0("(", length(x$sessions), ") ", paste(x$sessions, collapse=", ")), "\n")
}
cat("Locations:\n")
for (ii in seq(length(x$n_loc_total))) {
cat(
" ", paste0(names(x$n_loc_total)[ii], ": ", x$n_loc_modeled[[ii]]),
"modeled,", x$n_loc_total[[ii]], "total", "\n"
)
}
cat("Excursion:", paste(x$excursion_type, collapse=", "), "\n")
cat("\n")
invisible(NULL)
}
#' @rdname summary.BayesGLM2
#' @export
#'
#' @return \code{NULL}, invisibly.
#' @method print BayesGLM2
print.BayesGLM2 <- function(x, ...) {
print.summary.BayesGLM2(summary(x))
}
#' Summarize a \code{"BayesGLM2_cifti"} object
#'
#' Summary method for class \code{"BayesGLM2_cifti"}
#'
#' @param object Object of class \code{"BayesGLM2_cifti"}.
#' @param ... further arguments passed to or from other methods.
#' @export
#' @return A \code{"summary.BayesGLM2_cifti"} object, a list summarizing the
#' properties of \code{object}.
#' @method summary BayesGLM2_cifti
summary.BayesGLM2_cifti <- function(object, ...) {
x <- summary.BayesGLM2(object$BayesGLM2_results)
class(x) <- "summary.BayesGLM2_cifti"
x
}
#' @rdname summary.BayesGLM2_cifti
#' @export
#'
#' @param x Object of class \code{"summary.BayesGLM2_cifti"}.
#' @return \code{NULL}, invisibly.
#' @method print summary.BayesGLM2_cifti
print.summary.BayesGLM2_cifti <- function(x, ...) {
print.summary.BayesGLM2(x, ...)
}
#' @rdname summary.BayesGLM2_cifti
#' @export
#'
#' @return \code{NULL}, invisibly.
#' @method print BayesGLM2_cifti
print.BayesGLM2_cifti <- function(x, ...) {
print.summary.BayesGLM2_cifti(summary(x))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/BayesGLM2_utils.R
|
#' BayesGLM for CIFTI
#'
#' Performs spatial Bayesian GLM on the cortical surface for fMRI task activation
#'
#' @section INLA latent fields limit:
#' INLA computation times increase greatly when the number of columns in the
#' design matrix exceeds five. So if there are more than five tasks, or
#' three or more tasks each with its temporal derivative being modeled as a
#' task, \code{BayesGLM} will raise a warning. In cases like the latter, we
#' recommend modeling the temporal derivatives as nuisance signals using the
#' \code{nuisance} argument, rather than modeling them as tasks.
#'
#' @inheritSection INLA_Description INLA Requirement
#'
#' @section Connectome Workbench Requirement:
#' This function uses a system wrapper for the 'wb_command' executable. The
#' user must first download and install the Connectome Workbench, available
#' from https://www.humanconnectome.org/software/get-connectome-workbench .
#'
#' @param cifti_fname fMRI timeseries data in CIFTI format ("*.dtseries.nii").
#' For single-session analysis this can be a file path to a CIFTI file or a
#' \code{"xifti"} object from the \code{ciftiTools} package. For multi-session
#' analysis this can be a vector of file paths or a list of \code{"xifti"}
#' objects.
#' @param surfL_fname Left cortex surface geometry in GIFTI format
#' ("*.surf.gii"). This can be a file path to a GIFTI file or a \code{"surf"}
#' object from the \code{ciftiTools} package. This argument is only used if
#' \code{brainstructures} includes \code{"left"} and \code{Bayes==TRUE}. If
#' it's not provided, the HCP group-average inflated surface included in the
#' \code{ciftiTools} package will be used.
#' @param surfR_fname Right cortex surface geometry in GIFTI format
#' ("*.surf.gii"). This can be a file path to a GIFTI file or a \code{"surf"}
#' object from the \code{ciftiTools} package. This argument is only used if
#' \code{brainstructures} includes \code{"right"} and \code{Bayes==TRUE}. If
#' it's not provided, the HCP group-average inflated surface included in the
#' \code{ciftiTools} package will be used.
#' @param brainstructures Character vector indicating which brain structure(s)
#' to analyze: \code{"left"} (left cortical surface) and/or \code{"right"}
#' (right cortical surface). Default: \code{c("left","right")} (both
#' hemispheres). Note that the subcortical models have not yet been implemented.
#' @param design,onsets,TR Either provide \code{design} directly, or provide
#' both \code{onsets} and \code{TR} from which the design matrix or matrices
#' will be constructed.
#'
#' \code{design} is a \eqn{T \times K} task design matrix. Each column
#' represents the expected BOLD response due to each task, a convolution of
#' the hemodynamic response function (HRF) and the task stimulus. Note that
#' the scale of the regressors will affect the scale and interpretation of the
#' beta coefficients, so imposing a proper scale is recommended; see the
#' \code{scale_design} argument, which by default is \code{TRUE}. Task names
#' should be the column names, if not provided by the \code{task_names}
#' argument. For multi-session modeling, this argument should be a list of
#' such matrices. To model HRF derivatives, calculate the derivatives of the
#' task columns beforehand (see the helper function \code{\link{cderiv}} which
#' computes the discrete central derivative) and either add them to
#' \code{design} to model them as tasks, or \code{nuisance} to model them as
#' nuisance signals; it's recommended to then drop the first and last
#' timepoints because the discrete central derivative doesn't exist at the
#' time series boundaries. Do note that INLA computation times increase
#' greatly if the design matrix has more than five columns, so it might be
#' required to add these derivatives to \code{nuisance} rather than
#' \code{design}.
#'
#' \code{onsets} is an \eqn{L}-length list in which the name of each element is
#' the name of the corresponding task, and the value of each element is a
#' matrix of onsets (first column) and durations (second column) for each
#' stimuli (each row) of the corresponding task. The units of both columns
#' is seconds. For multi-session modeling, this argument should be a list of
#' such lists. To model HRF derivatives, use the arguments \code{dHRF} and
#' \code{dHRF_as}. If \code{dHRF==0} or \code{dHRF_as=="nuisance"}, the total
#' number of columns in the design matrix, \eqn{K}, will equal \eqn{L}.
#' If \code{dHRF_as=="task"}, \eqn{K} will equal \eqn{L} times \code{dHRF+1}.
#'
#' \code{TR} is the temporal resolution of the data, in seconds.
#'
#' @param nuisance (Optional) A \eqn{T \times J} matrix of nuisance signals.
#' These are regressed from the fMRI data and the design matrix prior to the
#' GLM computation. For multi-session modeling, this argument should be a list
#' of such matrices.
#' @param dHRF,dHRF_as Only applicable if \code{onsets} and \code{TR} are
#' provided. These arguments enable the modeling of HRF derivatives.
#'
#' Set \code{dHRF} to \code{1} to model the temporal derivatives of each task,
#' \code{2} to add the second derivatives too, or \code{0} to not model the
#' derivatives. Default: \code{1}.
#'
#' If \code{dHRF > 0}, \code{dHRF_as} controls whether the derivatives are
#' modeled as \code{"nuisance"} signals to regress out, \code{"tasks"}, or
#' \code{"auto"} (default) to treat them as tasks unless the total number of
#' columns in the design matrix would exceed five.
#'
#' @param hpf,DCT Add DCT bases to \code{nuisance} to apply a temporal
#' high-pass filter to the data? Only one of these arguments should be provided.
#' \code{hpf} should be the filter frequency; if it is provided, \code{TR}
#' must be provided too. The number of DCT bases to include will be computed
#' to yield a filter with as close a frequency to \code{hpf} as possible.
#' Alternatively, \code{DCT} can be provided to directly specify the number
#' of DCT bases to include.
#'
#' Default: \code{DCT=4}. For typical \code{TR}, four DCT bases amounts to a
#' lower frequency cutoff than the approximately .01 Hz used in most studies.
#' We selected this default to err on the side of retaining more low-frequency
#' information, but we recommend setting these arguments to values most
#' appropriate for the data analysis at hand.
#'
#' Using at least two DCT bases is as sufficient as using linear and quadratic
#' drift terms in the design matrix. So if DCT detrending is being used, there
#' is no need to add linear and quadratic drift terms to \code{nuisance}.
#' @param resamp_res The number of vertices to which each cortical surface
#' should be resampled, or \code{NULL} to not resample. For computational
#' feasibility, a value of \code{10000} or lower is recommended.
#' @inheritParams task_names_Param
#' @inheritParams session_names_Param
#' @inheritParams scale_BOLD_Param
#' @inheritParams scale_design_Param
#' @inheritParams Bayes_Param
# @inheritParams EM_Param
#' @inheritParams ar_order_Param
#' @inheritParams ar_smooth_Param
#' @inheritParams aic_Param
#' @inheritParams num.threads_Param
#' @inheritParams return_INLA_Param
#' @inheritParams verbose_Param
#' @inheritParams combine_sessions_Param
#' @param meanTol,varTol Tolerance for mean and variance of each data location.
#' Locations which do not meet these thresholds are masked out of the analysis.
#' Default: \code{1e-6} for both.
# @inheritParams emTol_Param
#' @inheritParams trim_INLA_Param
#'
#' @return An object of class \code{"BayesGLM_cifti"}: a list with elements
#' \describe{
#' \item{betas_Bayesian}{The task coefficients for the Bayesian model.}
#' \item{betas_classical}{The task coefficients for the classical model.}
#' \item{GLMs_Bayesian}{The entire list of GLM results, except for parameters estimated for the classical model.}
#' \item{GLMs_classical}{Parameters estimated for the classical model from the GLM.}
#' \item{session_names}{The names of the sessions.}
#' \item{n_sess_orig}{The number of sessions (before averaging, if applicable).}
#' \item{task_names}{The task part of the design matrix, after centering and scaling, but before any nuisance regression or prewhitening.}
#' }
#'
# @importFrom ciftiTools read_cifti resample_gifti as.xifti remove_xifti
#' @import ciftiTools
#' @importFrom fMRItools unmask_mat dct_bases dct_convert match_input is_posNum
#' @importFrom matrixStats rowVars rowSums2 colVars
#' @importFrom parallel detectCores
#'
#' @export
BayesGLM_cifti <- function(
cifti_fname,
surfL_fname=NULL,
surfR_fname=NULL,
brainstructures=c('left','right'),
design=NULL,
onsets=NULL,
TR=NULL,
nuisance=NULL,
dHRF=c(0, 1, 2),
dHRF_as=c("auto", "nuisance", "task"),
hpf=NULL,
DCT=if(is.null(hpf)) {4} else {NULL},
resamp_res=10000,
# Below arguments shared with `BayesGLM`
task_names = NULL,
session_names = NULL,
combine_sessions = TRUE,
scale_BOLD = c("auto", "mean", "sd", "none"),
scale_design = TRUE,
Bayes = TRUE,
#EM = FALSE,
ar_order = 6,
ar_smooth = 5,
aic = FALSE,
num.threads = 4,
return_INLA = c("trimmed", "full", "minimal"),
verbose = 1,
meanTol = 1e-6,
varTol = 1e-6#,emTol = 1e-3,
){
EM <- FALSE
emTol <- 1e-3
# Preliminary steps. ---------------------------------------------------------
## Check simple arguments.
## These checks are in a separate function because they are shared with
## `BayesGLM_cifti`.
argChecks <- BayesGLM_argChecks(
combine_sessions = combine_sessions,
scale_BOLD = scale_BOLD,
scale_design = scale_design,
Bayes = Bayes,
EM = EM,
ar_order = ar_order,
ar_smooth = ar_smooth,
aic = aic,
num.threads = num.threads,
return_INLA = return_INLA,
verbose = verbose,
meanTol = meanTol,
varTol = varTol,
emTol = emTol
)
scale_BOLD <- argChecks$scale_BOLD
do_Bayesian <- argChecks$do_Bayesian
do_EM <- argChecks$do_EM
do_pw <- argChecks$do_pw
return_INLA <- argChecks$return_INLA
need_mesh <- do_Bayesian || (do_pw && ar_smooth > 0)
# Brain structures.
if ("both" %in% brainstructures) { brainstructures <- c("left", "right") }
if ("all" %in% brainstructures) {
message(
"`brainstructures` is `all`, so using both left and right cortex. ",
"Skipping subcortex (not implemented yet)."
)
brainstructures <- c("left","right") # "subcortical"
}
brainstructures <- fMRItools::match_input(
brainstructures, c("left","right"),
user_value_label="brainstructures"
)
do_left <- ('left' %in% brainstructures)
do_right <- ('right' %in% brainstructures)
# Nuisance arguments.
dHRF <- as.numeric(match.arg(as.character(dHRF), c("0", "1", "2")))
if (dHRF == 0) {
if (identical(dHRF_as, "nuisance") || identical(dHRF_as, "task")) {
warning("`dHRF_as` is only applicable if `dHRF > 0`. If `dHRF == 0`, there's no need to specify `dHRF_as`.")
}
}
dHRF_as <- match.arg(dHRF_as, c("auto", "nuisance", "task"))
if (!is.null(DCT)) {
stopifnot(fMRItools::is_posNum(DCT, zero_ok=TRUE) && DCT==round(DCT))
if (DCT==0) { DCT <- NULL }
}
if (!is.null(hpf)) {
stopifnot(fMRItools::is_posNum(hpf, zero_ok=TRUE))
if (hpf==0) { hpf <- NULL }
}
# xifti.
# Coerce to: a (length one) character vector, or a (length one) list of
# \code{"xifti"} objects.
is_xifti <- FALSE
if (is.character(cifti_fname)) {
NULL
} else if (is.xifti(cifti_fname, messages=FALSE)) {
is_xifti <- TRUE
cifti_fname <- list(cifti_fname)
} else if (is.list(cifti_fname)) {
if (all(vapply(cifti_fname, is.character, FALSE)) && all(vapply(cifti_fname, length, 0)==1)) {
cifti_fname <- as.character(cifti_fname)
} else if (all(vapply(cifti_fname, is.xifti, messages=FALSE, FALSE))) {
is_xifti <- TRUE
}
} else {
stop('`cifti_fname` should be a character vector or list of `"xifti"` objects.')
}
# [TO DO]: If input is a `"xifti"`, infer `resamp_res`
# or maybe just add surfaces to the `"xifti"` using `add_surf` and that will handle the
# difference in resolution.
## Sessions. -----------------------------------------------------------------
# Name sessions and check compatibility of multi-session arguments
nS <- nS_orig <- length(cifti_fname)
if (nS==1) {
combine_sessions <- FALSE
if (is.null(session_names)) session_names <- 'single_session'
if (!is.null(design)) design <- list(single_session = design)
if (!is.null(onsets)) onsets <- list(single_session = onsets)
if (!is.null(nuisance)) nuisance <- list(single_session = nuisance)
} else {
if (is.null(session_names)) session_names <- paste0('session', 1:nS)
# if (length(session_names) == 1) { session_names <- paste0(session_names, 1:nsess) } # allow prefix?
if (!is.null(design) && (length(design) != nS)) {
stop(paste(
"If multiple sessions provided (because `cifti_fname` is a vector), ",
"`design` must be a list of length equal to the number of sessions ",
" (or `NULL`, if onsets provided)."
))
}
if (!is.null(onsets) && (length(onsets) != nS)) {
stop(paste(
"If multiple sessions provided (because `cifti_fname` is a vector), ",
"`onsets` must be a list of length equal to the number of sessions ",
" (or `NULL`, if `design` provided)."
))
}
if (!is.null(nuisance) && (length(nuisance) != nS)) {
stop(paste(
"If multiple sessions provided (because `cifti_fname` is a vector), ",
"`nuisance` must be a list of length equal to the number of sessions ",
" (or `NULL`)."
))
}
}
if (length(session_names) != nS) {
stop('The length of `session_names` must match the number of sessions in `cifti_fname`.')
}
if(is.null(nuisance)) nuisance <- vector("list",length = nS)
## Surfaces: check or get. ---------------------------------------------------
surf_list <- list(left=NULL, right=NULL)
if (need_mesh) {
if (do_left) {
if (is.null(surfL_fname)) {
if (verbose>0) cat("Using `ciftiTools` default inflated surface for the left cortex.\n")
surfL_fname <- ciftiTools.files()$surf["left"]
}
surf_list$left <- read_surf(surfL_fname, resamp_res=resamp_res)
}
if (do_right) {
if (is.null(surfR_fname)) {
if (verbose>0) cat("Using `ciftiTools` default inflated surface for the right cortex.\n")
surfR_fname <- ciftiTools.files()$surf["right"]
}
surf_list$right <- read_surf(surfR_fname, resamp_res=resamp_res)
}
} else {
surf_list <- list(
left = list(vertices=NULL, faces=NULL),
right = list(vertices=NULL, faces=NULL)
)
}
## `design`, `onsets`, `task_names`. -----------------------------------------
## Also, determine if we are doing multi-session modeling.
if (!xor(is.null(design), is.null(onsets))) { stop('`design` or `onsets` must be provided, but not both.') }
if (!is.null(design)) {
do_multisesh <- inherits(design, "list")
if (!do_multisesh) { stopifnot(inherits(design, "matrix") || inherits(design, "data.frame")) }
d1 <- if (do_multisesh) { design[[1]] } else { design }
task_names <- if (!is.null(task_names)) {
task_names
} else if (!is.null(names(d1))) {
names(d1)
} else {
paste0("beta", seq(ncol(d1)))
}
rm(d1)
}
if (!is.null(onsets)) {
if (is.null(TR)) { stop('Please provide `TR` if onsets provided') }
do_multisesh <- inherits(onsets[[1]], "list")
o1 <- if (do_multisesh) { onsets[[1]] } else { onsets }
if (!do_multisesh) { stopifnot(inherits(o1, "matrix") || inherits(o1, "data.frame")) }
task_names <- if (!is.null(task_names)) {
task_names
} else if (!is.null(names(o1))) {
names(o1)
} else {
paste0("beta", seq(length(o1)))
}
rm(o1)
}
# Data setup. ----------------------------------------------------------------
if (verbose>0) cat('Setting up data:\n')
## xifti things. -------------------------------------------------------------
### For each session, separate the CIFTI data into left/right/sub and read in files
BOLD_list <- list(left=NULL, right=NULL)
mwallL <- mwallR <- NULL
ntime <- vector("numeric", nS)
for (ss in seq(nS)) {
if (nS > 1) if (verbose>0) cat(paste0('\tReading in data for session ', ss,'.\n'))
if (is_xifti) {
xii_ss <- cifti_fname[[ss]]
xii_ss_res <- infer_resolution(xii_ss)
if (!is.null(resamp_res) && any(infer_resolution(xii_ss)!=resamp_res)) {
xii_ss <- resample_xifti(xii_ss, resamp_res=resamp_res)
}
} else {
xii_ss <- read_cifti(
cifti_fname[ss], brainstructures=brainstructures,
resamp_res=resamp_res
)
}
mwallL_ss <- xii_ss$meta$cortex$medial_wall_mask$left
mwallR_ss <- xii_ss$meta$cortex$medial_wall_mask$right
ntime[ss] <- ncol(xii_ss)
# Get medial wall mask, or check that it matches.
if (ss == 1) {
if (do_left) { mwallL <- mwallL_ss }
if (do_right) { mwallR <- mwallR_ss }
# [TO DO] Check compatibility with `surf_list`
} else {
if (do_left) {
stopifnot(length(mwallL) == length(mwallL_ss))
stopifnot(all(mwallL == mwallL_ss))
}
if (do_right) {
stopifnot(length(mwallR) == length(mwallR_ss))
stopifnot(all(mwallR == mwallR_ss))
}
}
# Grab BOLD data (input NAs in medial wall locations)
if (do_left) {
if (is.null(xii_ss$data$cortex_left)) {
stop("No left cortex data for session ", ss, ".")
}
BOLD_list[["left"]][[ss]] <- fMRItools::unmask_mat(xii_ss$data$cortex_left, mwallL)
}
if (do_right) {
if (is.null(xii_ss$data$cortex_right)) {
stop("No right cortex data for session ", ss, ".")
}
BOLD_list[["right"]][[ss]] <- fMRItools::unmask_mat(xii_ss$data$cortex_right, mwallR)
}
}
BOLD_list <- BOLD_list[!vapply(BOLD_list, is.null, FALSE)]
if (need_mesh) { surf_list <- surf_list[!vapply(surf_list, is.null, FALSE)] }
## Design and nuisance matrices. ---------------------------------------------
if (is.null(design)) {
if (verbose>0) cat("\tMaking design matrices.\n")
design <- vector("list", nS)
for (ss in seq(nS)) {
HRF_ss <- make_HRFs(
onsets[[ss]], TR=TR, duration=ntime[ss],
dHRF=dHRF, dHRF_as=dHRF_as,
verbose=ss==1
)
design[[ss]] <- HRF_ss$design
if (!is.null(HRF_ss$nuisance)) {
if (!is.null(nuisance[[ss]])) {
nuisance[[ss]] <- cbind(nuisance[[ss]], HRF_ss$nuisance)
} else {
nuisance[[ss]] <- HRF_ss$nuisance
}
}
}
}
# Check that design matrix names are consistent across sessions.
if (nS > 1) {
tmp <- sapply(design, colnames)
if(length(task_names) == 1) {
num_names <- length(unique(tmp))
if (num_names > 1) stop('task names must match across sessions for multi-session modeling')
} else {
num_names <- apply(tmp, 1, function(x) length(unique(x))) #number of unique names per row
if (max(num_names) > 1) stop('task names must match across sessions for multi-session modeling')
}
}
# Warn the user if the number of design matrix columns exceeds five.
if (Bayes && ncol(design[[1]]) > 5) {
message("The number of design matrix columns exceeds five. INLA computation may be very slow. To avoid stalling, you can quit this function call now and modify the analysis. For example, model some signals as nuisance rather than tasks.")
Sys.sleep(10)
}
task_names <- colnames(design[[1]]) # because if dHRF > 0, there will be more task_names.
# Scale design matrix. (Here, rather than in `BayesGLM`, b/c it's returned.)
design <- if (scale_design) {
sapply(design, scale_design_mat, simplify = F)
} else {
sapply(design, scale, scale = F, simplify = F)
}
# Add DCT bases.
DCTs <- vector("numeric", nS)
for (ss in 1:nS) {
# DCT highpass filter
if (!is.null(hpf) || !is.null(DCT)) {
# Get the num. of bases for this session.
if (!is.null(hpf)) {
DCTs[ss] <- round(fMRItools::dct_convert(ntime[ss], TR, f=hpf))
} else {
DCTs[ss] <- DCT
}
# Generate the bases and add them.
DCTb_ss <- fMRItools::dct_bases(ntime[ss], DCTs[ss])
if (DCTs[ss] > 0) {
if (!is.null(nuisance[[ss]])) {
nuisance[[ss]] <- cbind(nuisance[[ss]], DCTb_ss)
} else {
nuisance[[ss]] <- DCTb_ss
}
}
}
}
# Do GLM. --------------------------------------------------------------------
BayesGLM_results <- list(left = NULL, right = NULL)
# >> Loop through brainstructures to complete the analyses on the different hemispheres ----
for (bb in brainstructures) {
if (verbose>0) cat(paste0(toupper(bb)," cortex analysis:\n"))
# set up session list
session_data <- vector('list', nS)
names(session_data) <- session_names
for (ss in seq(nS)) {
session_data[[ss]] <- list(
BOLD = t(BOLD_list[[bb]][[ss]]),
design = design[[ss]]
)
if (!is.null(nuisance[[ss]])) {
session_data[[ss]]$nuisance <- nuisance[[ss]]
}
}
BayesGLM_out <- BayesGLM(
data = session_data,
vertices = surf_list[[bb]]$vertices,
faces = surf_list[[bb]]$faces,
mesh = NULL,
mask = NULL,
task_names = NULL, # in `session_data`
session_names = session_names,
combine_sessions = combine_sessions,
scale_BOLD = scale_BOLD,
scale_design = FALSE, # done above
Bayes = do_Bayesian,
#EM = do_EM,
ar_order = ar_order,
ar_smooth = ar_smooth,
aic = aic,
num.threads = num.threads,
return_INLA = return_INLA,
verbose = verbose,
meanTol=meanTol,
varTol=varTol#,emTol=emTol,
)
BayesGLM_results[[bb]] <- BayesGLM_out
# update session info if averaged over sessions
if (bb == brainstructures[1] && combine_sessions) {
session_names <- BayesGLM_out$session_names
nS <- 1
}
rm(BayesGLM_out); gc()
}
names(BayesGLM_results)[names(BayesGLM_results)=="left"] <- "cortex_left"
names(BayesGLM_results)[names(BayesGLM_results)=="right"] <- "cortex_right"
### CONSTRUCT BETA ESTIMATES AS CIFTI OBJECTS
if (verbose>0) cat("Formatting results.\n")
task_cifti_classical <- task_cifti <- vector('list', nS)
names(task_cifti_classical) <- names(task_cifti) <- session_names
datL <- datR <- NULL
for (ss in seq(nS)) {
# CLASSICAL GLM
if (do_left) {
datL <- BayesGLM_results$cortex_left$result_classical[[ss]]$estimates
mwallL <- !is.na(datL[,1]) # update b/c mask2 can change the medial wall
datL <- datL[mwallL,]
}
if (do_right) {
datR <- BayesGLM_results$cortex_right$result_classical[[ss]]$estimates
mwallR <- !is.na(datR[,1])
datR <- datR[mwallR,]
}
task_cifti_classical[[ss]] <- as.xifti(
cortexL = datL,
cortexL_mwall = mwallL,
cortexR = datR,
cortexR_mwall = mwallR
)
task_cifti_classical[[ss]]$meta$cifti$names <- task_names
# BAYESIAN GLM
if (do_Bayesian) {
if (do_left) {
datL <- BayesGLM_results$cortex_left$task_estimates[[ss]]
mwallL <- !is.na(datL[,1])
datL <- datL[mwallL,]
}
if (do_right) {
datR <- BayesGLM_results$cortex_right$task_estimates[[ss]]
mwallR <- !is.na(datR[,1])
datR <- datR[mwallR,]
}
task_cifti[[ss]] <- as.xifti(
cortexL = datL,
cortexL_mwall = mwallL,
cortexR = datR,
cortexR_mwall = mwallR
)
task_cifti[[ss]]$meta$cifti$names <- task_names
}
}
result <- list(
task_estimates_xii = list(
Bayes = task_cifti,
classical = task_cifti_classical
),
task_names = task_names,
session_names = session_names,
n_sess_orig = nS_orig,
# task part of design matrix after centering/scaling but
# before nuisance regression and prewhitening.
design = design,
BayesGLM_results = BayesGLM_results
)
if (verbose>0) cat('Done!\n')
class(result) <- "BayesGLM_cifti"
result
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/BayesGLM_cifti.R
|
#' Check INLA and PARDISO
#'
#' @param require_PARDISO Is PARDISO required? Default: \code{FALSE}.
#' @return \code{NULL}, invisibly
#'
#' @keywords internal
check_INLA <- function(require_PARDISO=TRUE){
# Check packages -------------------------------------------------------------
# Check to see that the INLA package is installed
if (!requireNamespace("INLA", quietly = TRUE)) {
stop("This function requires the `INLA` package. See www.r-inla.org/download")
}
# Check to see if PARDISO is installed
if (require_PARDISO) {
if (any(grepl("FAILURE", toupper(INLA::inla.pardiso.check())))) {
warning("Consider enabling `PARDISO` for faster computation. See `inla.pardiso()`")
} else {
INLA::inla.setOption(smtp='pardiso')
}
#inla.pardiso()
}
invisible(NULL)
}
#' Make data list for \code{estimate_model}
#'
#' Make data list to be passed to \code{estimate_model}
#'
#' @param y Vectorized BOLD data (all voxels, sessions, etc.)
#' @param X List (length = number of sessions) of sparse design matrices size TVxVK from each session, each created using `organize_data()`
#' @param betas List (length = number of tasks) of bbeta objects from organize_replicates
#' @param repls List (length = number of tasks) of repl objects from organize_replicates
#'
#' @return List
#'
#' @importFrom Matrix bdiag
#'
#' @keywords internal
make_data_list <- function(y, X, betas, repls){
# Check length/dimensions of y, X, elements of betas and repls all match
nx <- length(betas) #check same as length(repls)
#figure out nvox
#check dim(X)
#check length of betas and repls
numel <- 1 + length(betas) + length(repls) + 1
model_data <- vector('list', length=numel)
names(model_data) <- c('y', 'X', names(betas), names(repls))
model_data$y <- y
model_data$X <- bdiag(X) #row/col structure: sess1_beta1, sess1_beta2, sess2_beta1, sess2_beta2, ...
nbeta <- length(betas)
for(i in 1:nbeta){
model_data[[2+i]] <- betas[[i]]
model_data[[2+nbeta+i]] <- repls[[i]]
}
return(model_data)
}
#' Extract Estimates of Activation
#'
#' Obtains the posterior mean or other summary statistic for each latent field
#'
#' @param INLA_model_obj An object of class \code{"inla"}, a result of a call to
#' \code{inla}.
#' @param session_names Vector of fMRI session names
#' @param mask (Optional) Original mask applied to data before model fitting
#' @param stat A string representing the posterior summary statistic to be returned
#'
#' @return Estimates from inla model
#'
#' @keywords internal
extract_estimates <- function(INLA_model_obj, session_names, mask=NULL, stat='mean'){
if (!inherits(INLA_model_obj, "inla")) { stop("Object is not of class 'inla'") }
res.beta <- INLA_model_obj$summary.random
nbeta <- length(res.beta)
task_names <- names(res.beta)
nS <- length(session_names)
n_loc <- length(res.beta[[1]]$mean)/nS #number of locations for which beta is estimated
if(!is.null(mask)) {
V <- length(mask)
if(sum(mask) != n_loc) warning('Number of nonzeros in mask does not equal the number of data locations in the model')
} else {
V <- n_loc
mask <- rep(1,V)
}
betas <- vector('list', nS)
names(betas) <- session_names
stat_names <- names(res.beta[[1]])
if(! (stat %in% stat_names) ) stop(paste0('stat must be one of following: ', paste(stat_names, collapse = ', ')))
stat_ind <- which(stat_names==stat)
for (ss in seq(nS)) {
inds_ss <- (1:n_loc) + (ss-1)*n_loc #indices of beta vector corresponding to session v
betas_ss <- matrix(NA, nrow=n_loc, ncol=nbeta)
for (bb in seq(nbeta)) {
est_iv <- res.beta[[bb]][[stat_ind]][inds_ss]
betas_ss[,bb] <- est_iv
}
betas[[ss]] <- matrix(NA, nrow=V, ncol=nbeta)
betas[[ss]][mask==1,] <- betas_ss
colnames(betas[[ss]]) <- task_names
}
attr(betas, "GLM_type") <- "Bayesian"
return(betas)
}
#' Extracts posterior density estimates for hyperparameters
#'
#' @inheritSection INLA_Description INLA Requirement
#'
#' @param INLA_model_obj An object of class \code{"inla"}, a result of a call to
#' \code{inla()}
#' @param spde The model used for the latent fields in the \code{inla()} call,
#' an object of class \code{"inla.spde"}
#' @param task_names Descriptive names of model regressors (tasks).
#'
#' @return Long-form data frame containing posterior densities for the
#' hyperparameters associated with each latent field
#'
#' @keywords internal
get_posterior_densities <- function(INLA_model_obj, spde, task_names){
numbeta <- length(task_names)
for(b in 1:numbeta){
name_b <- task_names[b]
result.spde.b <- INLA::inla.spde2.result(INLA_model_obj, name_b, spde)
# Kappa and Tau
log_kappa.b <- as.data.frame(result.spde.b$marginals.log.kappa$kappa.1)
log_tau.b <- as.data.frame(result.spde.b$marginals.log.tau$tau.1)
names(log_kappa.b) <- names(log_tau.b) <- c('value','density')
log_kappa.b$param <- 'log_kappa'
log_tau.b$param <- 'log_tau'
df.b <- rbind(log_kappa.b, log_tau.b)
df.b$beta <- name_b
if(b == 1) df <- df.b else df <- rbind(df, df.b)
}
df[,c('beta','param','value','density')]
}
#' Summarize a \code{"BayesGLM"} object
#'
#' Summary method for class \code{"BayesGLM"}
#'
#' @param object Object of class \code{"BayesGLM"}.
#' @param ... further arguments passed to or from other methods.
#' @export
#' @return A \code{"summary.BayesGLM"} object, a list summarizing the properties
#' of \code{object}.
#' @method summary BayesGLM
summary.BayesGLM <- function(object, ...) {
x <- list(
tasks = object$task_names,
sessions = object$session_names,
n_sess_orig = object$n_sess_orig,
n_loc_total = length(object$mask),
n_loc_modeled = sum(object$mask),
GLM_type = attr(object$task_estimates, "GLM_type")
)
class(x) <- "summary.BayesGLM"
return(x)
}
#' @rdname summary.BayesGLM
#' @export
#'
#' @param x Object of class \code{"summary.BayesGLM"}.
#' @return \code{NULL}, invisibly.
#' @method print summary.BayesGLM
print.summary.BayesGLM <- function(x, ...) {
cat("====BayesGLM result====================\n")
cat("Tasks: ", paste0("(", length(x$tasks), ") ", paste(x$tasks, collapse=", ")), "\n")
if (length(x$sessions)==1 && x$sessions == "session_combined") {
cat("Sessions: ", paste0("(", x$n_sess_orig, ", combined) \n"))
} else {
cat("Sessions: ", paste0("(", length(x$sessions), ") ", paste(x$sessions, collapse=", ")), "\n")
}
cat("Locations:", x$n_loc_modeled, "modeled,", x$n_loc_total, "total", "\n")
cat("GLM type: ", x$GLM_type, "\n")
cat("\n")
invisible(NULL)
}
#' @rdname summary.BayesGLM
#' @export
#'
#' @return \code{NULL}, invisibly.
#' @method print BayesGLM
print.BayesGLM <- function(x, ...) {
print.summary.BayesGLM(summary(x))
}
#' Summarize a \code{"BayesGLM_cifti"} object
#'
#' Summary method for class \code{"BayesGLM_cifti"}
#'
#' @param object Object of class \code{"BayesGLM_cifti"}.
#' @param ... further arguments passed to or from other methods.
#' @export
#' @return A \code{"summary.BayesGLM_cifti"} object, a list summarizing the
#' properties of \code{object}.
#' @method summary BayesGLM_cifti
summary.BayesGLM_cifti <- function(object, ...) {
x <- lapply(object$BayesGLM_results, summary)
x <- x[!vapply(object$BayesGLM_results, is.null, FALSE)]
x <- list(
tasks = x[[1]]$tasks,
sessions = x[[1]]$sessions,
n_sess_orig = x[[1]]$n_sess_orig,
n_loc_total = lapply(x, '[[', "n_loc_total"),
n_loc_modeled = lapply(x, '[[', "n_loc_modeled"),
#xii = summary(x$task_estimates_xii$classical[[1]]),
GLM_type = x[[1]]$GLM_type
)
class(x) <- "summary.BayesGLM_cifti"
return(x)
}
#' @rdname summary.BayesGLM_cifti
#' @export
#'
#' @param x Object of class \code{"summary.BayesGLM_cifti"}.
#' @return \code{NULL}, invisibly.
#' @method print summary.BayesGLM_cifti
print.summary.BayesGLM_cifti <- function(x, ...) {
cat("====BayesGLM_cifti result==============\n")
cat("Tasks: ", paste0("(", length(x$tasks), ") ", paste(x$tasks, collapse=", ")), "\n")
if (length(x$sessions)==1 && x$sessions == "session_combined") {
cat("Sessions: ", paste0("(", x$n_sess_orig, ", combined) \n"))
} else {
cat("Sessions: ", paste0("(", length(x$sessions), ") ", paste(x$sessions, collapse=", ")), "\n")
}
cat("Locations:\n")
for (ii in seq(length(x$n_loc_total))) {
cat(
" ", paste0(names(x$n_loc_total)[ii], ": ", x$n_loc_modeled[[ii]]),
"modeled,", x$n_loc_total[[ii]], "total", "\n"
)
}
cat("GLM type: ", x$GLM_type, "\n")
cat("\n")
invisible(NULL)
}
#' @rdname summary.BayesGLM_cifti
#' @export
#'
#' @return \code{NULL}, invisibly.
#' @method print BayesGLM_cifti
print.BayesGLM_cifti <- function(x, ...) {
print.summary.BayesGLM_cifti(summary(x))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/BayesGLM_utils.R
|
## usethis namespace: start
#' @useDynLib BayesfMRI, .registration = TRUE
#' @importFrom Rcpp evalCpp
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/BayesfMRI-package.R
|
#' Fixed point function for the joint BayesGLMEM update algorithm
#'
#' @param theta a list containing kappa2, phi, and sigma2, in that order
#' @param spde the spde object
#' @param model_data the model_data object containing \code{y} and \code{X}
#' @param Psi a conversion matrix (N by V) (or N by n)
#' @param K number of covariates
#' @param A The value for Matrix::crossprod(X%*%Psi) (saves time on computation)
#' @param cl parallelization cluster
#' @param Ns The number of samples used to approximate traces using the Hutchinson
#' estimator. If set to 0, the exact trace is found.
#'
#' @importFrom Matrix bdiag colSums crossprod solve
#' @importFrom parallel detectCores makeCluster parSapply
#' @importFrom stats optimize
#'
#' @return a vector with the same length as \code{theta}, the EM updates
#' @keywords internal
GLMEM_fixptseparate <- function(theta, spde, model_data, Psi, K, A, cl, Ns = 50) {
kappa2_inds <- seq(K)
phi_inds <- seq(K) + K
sigma2_ind <- 2*K + 1
Q_k <-
mapply(
spde_Q_phi,
kappa2 = theta[kappa2_inds],
phi = theta[phi_inds],
MoreArgs = list(spde = spde),
SIMPLIFY = F
)
Q_new <- Matrix::bdiag(Q_k)
n_sess_em <- nrow(A) / nrow(Q_new)
if(n_sess_em > 1) Q_new <- Matrix::bdiag(lapply(seq(n_sess_em),function(x) Q_new))
Sig_inv <- Q_new + A/theta[sigma2_ind]
m <- Matrix::crossprod(model_data$X%*%Psi,model_data$y) / theta[sigma2_ind]
mu <- Matrix::solve(Sig_inv, m)
# if (verbose>0) cat("First 6 values of mu:", head(mu@x), "\n")
X_Psi_mu <- model_data$X%*%Psi%*%mu
cp_X_Psi_mu <- Matrix::crossprod(X_Psi_mu)
# >> Update sigma2 ----
if(Ns == 0) {
Sigma_new <- Matrix::solve(Sig_inv)
traceAEww <-
cp_X_Psi_mu +
sum(Matrix::colSums(A*Sigma_new))
kappa_fn <- neg_kappa_fn
}
if(Ns > 0) {
# set.seed(1) # UNCOMMENT WHEN DEBUGGING
Vh <- matrix(sample(x = c(-1,1), size = Ns * nrow(A), replace = TRUE),
nrow(A), Ns)
P <- Matrix::solve(Sig_inv, Vh)
traceAEww <-
as.numeric(Matrix::crossprod(mu,A %*% mu)) +
TrSigB(P,A,Vh)
# if (verbose>0) cat("TrAEww =",traceAEww,"\n")
kappa_fn <- neg_kappa_fn2
}
sigma2_new <-
as.numeric(crossprod(model_data$y) -
2*Matrix::crossprod(model_data$y,X_Psi_mu) +
traceAEww
) / length(model_data$y)
kappa2_new <- theta[kappa2_inds]
phi_new <- theta[phi_inds]
# kp <- parallel::parSapply(
# cl = cl,
kp <- sapply(
seq(K),
FUN = function(k,
spde,
theta,
kappa2_inds,
phi_inds,
P,
mu,
Vh,
n_sess
) {
# source("~/github/BayesfMRI/R/EM_utils.R") # For debugging
# Rcpp::sourceCpp("src/rcpp_sparsechol.cpp")
big_K <- length(kappa2_inds)
# big_N <- spde$n.spde
big_N <- nrow(spde$Cmat)
n_sess_em <- length(mu) / (big_K * big_N)
k_inds <- c(sapply(seq(n_sess_em), function(ns) {
seq( big_N * (big_K * (ns - 1) + k - 1) + 1, big_N * (big_K * (ns - 1) + k))
}))
# >> Update kappa2 ----
# Move all of this to C
prep_optim <-
prep_kappa2_optim(
spde = spde,
mu = mu[k_inds, ],
phi = theta[phi_inds][k],
P = P[k_inds, ],
vh = Vh[k_inds, ]
)
# rcpp_list <- create_listRcpp(spde = spde)
# kappa2_new <- updateKappa2(phi = theta[phi_inds][k], in_list = spde, n_sess = n_sess_em,a_star = prep_optim$a_star, b_star = prep_optim$b_star, tol=tol)
# optim_output_k <-
# stats::optimize(
# f = neg_kappa_fn4,
# spde = spde,
# a_star = prep_optim$a_star,
# b_star = prep_optim$b_star,
# n_sess = n_sess,
# lower = 0,
# upper = 50
# )
# if (verbose>0) cat("k =",k,"a_star =",prep_optim$a_star,"b_star =",prep_optim$b_star,"\n")
# if (verbose>0) cat("objective =",optim_output_k$objective,", new_kappa2 =", optim_output_k$minimum,"\n")
optim_output_k <-
stats::optimize(
f = neg_kappa_fn4,
spde = spde,
a_star = prep_optim$a_star,
b_star = prep_optim$b_star,
n_sess = n_sess,
lower = 0,
upper = 50
)
# optim_output_k <-
# stats::optimize(
# f = neg_kappa_fn2,
# spde = spde,
# phi = theta[phi_inds][k],
# P = P[k_inds,],
# mu = mu[k_inds, ],
# Vh = Vh[k_inds,], # Comment this out if using neg_kappa_fn
# lower = 0,
# upper = 50
# )
kappa2_new <- optim_output_k$minimum
# >> Update phi ----
Tr_QEww <-
TrQEww(kappa2 = kappa2_new, spde = spde, P = P[k_inds,], mu = mu[k_inds,],Vh = Vh[k_inds,])
# if (verbose>0) cat("TrQEww =", Tr_QEww, "\n")
phi_new <-
Tr_QEww / (4 * pi * big_N * n_sess_em)
return(c(kappa2_new, phi_new))
},
spde = spde,
theta = theta,
kappa2_inds = kappa2_inds,
phi_inds = phi_inds,
P = P,
mu = mu,
Vh = Vh,
n_sess = n_sess_em
)
# parallel::stopCluster(cl)
return(c(kp[1,],kp[2,],sigma2_new))
}
#' Objective function for the BayesGLM EM algorithm
#'
#' This returns the negative of the expected log-likelihood function.
#'
#' @param theta a vector containing kappa2, phi, and sigma2, in that order
#' @param spde the spde object
#' @param model_data the model_data object containing \code{y} and \code{X}
#' @param Psi a conversion matrix (N by V) (or N by n)
#' @param K number of covariates
#' @param A The value for Matrix::crossprod(X%*%Psi) (saves time on computation)
#' @param num.threads Needed for SQUAREM (it is an argument to the fixed-point functions)
#' @param Ns The number of samples used to approximate traces using the Hutchinson
#' estimator. If set to 0, the exact trace is found.
#'
#' @return A scalar value for the negative expected log-likelihood
#' @keywords internal
GLMEM_objfn <- function(theta, spde, model_data, Psi, K, A, num.threads = NULL, Ns = NULL) {
if(length(theta) > 3) { # This condition means that parameters are being updated separately
kappa2_inds <- seq(K)
phi_inds <- seq(K) + K
sigma2_ind <- 2 *K + 1
} else {
kappa2_inds <- 1
phi_inds <- 2
sigma2_ind <- 3
}
TN <- length(model_data$y)
Q_k <- mapply(spde_Q_phi,
kappa2 = theta[kappa2_inds],
phi = theta[phi_inds],
MoreArgs = list(spde = spde),
SIMPLIFY = F)
if(length(Q_k) > 1) {
Q <- Matrix::bdiag(Q_k)
} else {
Q <- Matrix::bdiag(rep(Q_k,K))
}
Sig_inv <- Q + A/theta[sigma2_ind]
m <- Matrix::crossprod(model_data$X%*%Psi,model_data$y) / theta[sigma2_ind]
mu <- Matrix::solve(Sig_inv,m)
XB <- model_data$X%*%Psi%*%mu
y_res <- model_data$y - XB
ELL_out <- as.numeric(-TN * log(theta[sigma2_ind]) / 2 - Matrix::crossprod(y_res))
return(-ELL_out)
}
#' Calculate the SPDE covariance
#'
#' @param kappa2 A scalar
#' @param phi A scalar
#' @param spde An object containing the information about the
#' mesh structure for the SPDE prior
#'
#' @return The SPDE prior matrix
#' @keywords internal
spde_Q_phi <- function(kappa2, phi, spde) {
# Cmat <- spde$M0
# Gmat <- (spde$M1 + Matrix::t(spde$M1))/2
# GtCinvG <- spde$M2
list2env(spde, envir = environment())
Q <- (kappa2*spde$Cmat + 2*spde$Gmat + spde$GtCinvG/kappa2) / (4*pi*phi)
return(Q)
}
#' Make the full SPDE precision based on theta, the spde, and the number of sessions
#'
#' @param theta the hyperparameter theta vector of length K * 2 + 1, where the
#' first K elements are the kappas, the next K elements are the phis, and the
#' last element (unused here) corresponds to sigma2
#' @param spde a list containing three spde elements: Cmat, Gmat, and GtCinvG
#' @param n_sess The integer number of sessions
#'
#' @return An SPDE prior matrix
#' @keywords internal
make_Q <- function(theta, spde, n_sess) {
list2env(spde, envir = environment())
K <- (length(theta) - 1) / 2
Q_k <- sapply(seq(K), function(k) {
out <- (theta[k]*spde$Cmat + 2*spde$Gmat + spde$GtCinvG/theta[k]) / (4*pi*theta[k + K])
}, simplify = F)
Q <- Matrix::bdiag(Q_k)
if(n_sess > 1) Q <- Matrix::bdiag(rep(list(Q),n_sess))
return(Q)
}
#' Gives the portion of the Q matrix independent of phi
#'
#' @param kappa2 scalar
#' @param spde An spde object
#'
#' @return a dgCMatrix
#' @keywords internal
Q_prime <- function(kappa2, spde) {
# Cmat <- spde$M0
# Gmat <- (spde$M1 + Matrix::t(spde$M1))/2
# GtCinvG <- spde$M2
Cmat <- Gmat <- GtCinvG <- NULL
list2env(spde, envir = environment())
Q <- (kappa2*Cmat + 2*Gmat + GtCinvG/kappa2)
return(Q)
}
#' The negative of the objective function for kappa
#'
#' @param kappa2 scalar
#' @param spde an spde object
#' @param phi scalar
#' @param Sigma dgCMatrix
#' @param mu dgeMatrix
#' @importFrom Matrix diag chol bdiag crossprod colSums
#'
#' @return a scalar
#' @keywords internal
neg_kappa_fn <- function(kappa2, spde, phi, Sigma, mu) {
Qt <- Q_prime(kappa2, spde)
KK <- nrow(Sigma) / spde$mesh$n
if(nrow(Sigma) != spde$mesh$n & KK %% 1 == 0) Qt <- Matrix::bdiag(rep(list(Qt),KK))
log_det_Q <- sum(2*log(Matrix::diag(Matrix::chol(Qt,pivot = T)))) # compare direct determinant here
trace_QEww <- (sum(Matrix::colSums(Qt*Sigma)) + Matrix::crossprod(mu,Qt%*%mu))@x
out <- trace_QEww / (4*pi*phi) - log_det_Q
return(out)
}
#' The negative of the objective function for kappa without Sig_inv
#'
#' @param kappa2 scalar
#' @param spde an spde object
#' @param phi scalar
#' @param P Matrix of dimension nk by N_s found by \code{solve(Sig_inv,Vh)}
#' @param mu dgeMatrix
#' @param Vh random matrix of -1 and 1 of dim \code{dim(P)}
#' @importFrom Matrix diag chol bdiag
#'
#' @return a scalar
#' @keywords internal
neg_kappa_fn2 <- function(kappa2, spde, phi, P, mu, Vh) {
Qt <- Q_prime(kappa2, spde)
n_spde <- spde$mesh$n
if(is.null(n_spde)) n_spde <- spde$n.spde
KK <- nrow(P) / n_spde
if(nrow(P) != n_spde & KK %% 1 == 0) Qt <- Matrix::bdiag(rep(list(Qt),KK))
# In RcppEigen, there are two functions: symbolic Cholesky (analyzePattern) and the numeric cholesky (factorize)
log_det_Q <- sum(2*log(Matrix::diag(Matrix::chol(Qt,pivot = T)))) # compare direct determinant here
trace_QEww <-
TrQEww(
kappa2 = kappa2,
spde = spde,
P = P,
mu = mu,
Vh = Vh
)
out <- trace_QEww / (4*pi*phi) - log_det_Q
return(out)
}
#' Streamlined negative objective function for kappa2 using precompiled values
#'
#' @param kappa2 scalar
#' @param spde an spde object
#' @param a_star precomputed coefficient (scalar)
#' @param b_star precomputed coefficient (scalar)
#' @param n_sess number of sessions (scalar)
#'
#' @return scalar output of the negative objective function
#' @keywords internal
neg_kappa_fn3 <- function(kappa2, spde, a_star, b_star, n_sess) {
Qt <- Q_prime(kappa2, spde)
Rt <- Matrix::chol(Qt, pivot = T)
if(n_sess > 1) Rt <- Matrix::bdiag(rep(list(Rt),n_sess))
log_det_Q <- sum(2*log(Matrix::diag(Rt)))
out <- kappa2*a_star + b_star / kappa2 - log_det_Q
return(out)
}
#' Streamlined negative objective function for kappa2 using precompiled values
#'
#' @param kappa2 scalar
#' @param spde SPDE prior matrix
#' @param a_star precomputed coefficient (scalar)
#' @param b_star precomputed coefficient (scalar)
#' @param n_sess number of sessions (scalar)
#'
#' @return scalar output of the negative objective function
#' @keywords internal
neg_kappa_fn4 <- function(kappa2, spde, a_star, b_star, n_sess) {
log_det_Qt <- .logDetQt(kappa2, spde, n_sess)
out <- kappa2*a_star + b_star / kappa2 - log_det_Qt
return(out)
}
#' Find values for coefficients used in objective function for kappa2
#'
#' @param spde an spde object
#' @param mu the mean
#' @param phi scale parameter
#' @param P Matrix of dimension nk by N_s found by \code{solve(Sig_inv,Vh)}
#' @param vh random matrix of -1 and 1 of dim \code{dim(P)}
#'
#' @return a list with two elements, which are precomputed coefficients for the
#' optimization function
#' @keywords internal
prep_kappa2_optim <- function(spde, mu, phi, P, vh) {
Cmat <- spde$Cmat
Gmat <- spde$Gmat
GtCinvG <- spde$GtCinvG
n_spde <- nrow(Cmat)
n_sess_em <- nrow(P) / n_spde
if(n_sess_em > 1) {
Cmat <- Matrix::bdiag(rep(list(Cmat),n_sess_em))
Gmat <- Matrix::bdiag(rep(list(Gmat),n_sess_em))
GtCinvG <- Matrix::bdiag(rep(list(GtCinvG),n_sess_em))
}
muCmu <- Matrix::crossprod(mu,Cmat%*%mu)@x
diagPCV <- Matrix::diag(Matrix::crossprod(P,Cmat%*%vh))
TrCSig <- sum(diagPCV) / ncol(vh)
a_star <- (muCmu + TrCSig) / (4*pi*phi)
muGCGmu <- Matrix::crossprod(mu, GtCinvG %*% mu)@x
diagPGCGV <- Matrix::diag(Matrix::crossprod(P, GtCinvG %*% vh))
TrGCGSig <- sum(diagPGCGV) / ncol(vh)
b_star <- (muGCGmu + TrGCGSig) / (4*pi*phi)
# if (verbose>0) cat("muCmu =", muCmu, ", muGCGmu =",muGCGmu,", TrCSig =",TrCSig,", TrGCGSig =",TrGCGSig,"\n")
# if (verbose>0) cat("head(diagPCV) =",head(diagPCV), ", head(diagPGCGV) =", head(diagPGCGV),"\n")
return(
list(a_star = as.numeric(a_star),
b_star = as.numeric(b_star))
)
}
#' Function to prepare objects for use in Rcpp functions
#'
#' @param spde an spde object
#'
#' @return The SPDE matrices with the correct data formats
#'
#' @importFrom methods as
#' @keywords internal
create_listRcpp <- function(spde) {
Cmat <- as(spde$M0,"dgCMatrix")
Gmat <- as((spde$M1 + Matrix::t(spde$M1)) / 2,"CsparseMatrix")
GtCinvG <- as(spde$M2,"CsparseMatrix")
out <- list(Cmat = Cmat,
Gmat = Gmat,
GtCinvG = GtCinvG)
return(out)
}
#' Trace approximation function
#'
#' @param kappa2 a scalar
#' @param spde spde object
#' @param P Matrix of dimension nk by N_s found by \code{solve(Sig_inv,Vh)}
#' @param mu posterior mean
#' @param Vh matrix of random variables with \code{nrow(Sig_inv)} rows and Ns
#' columns
#' @importFrom Matrix t crossprod bdiag
#'
#' @return a scalar
#' @keywords internal
TrQEww <- function(kappa2, spde, P, mu, Vh){
Cmat <- Gmat <- GtCinvG <- NULL
list2env(spde, envir = environment())
n_spde <- nrow(Cmat)
n_sess_em <- nrow(P) / n_spde
if(n_sess_em > 1) {
Cmat <- Matrix::bdiag(rep(list(Cmat),n_sess_em))
Gmat <- Matrix::bdiag(rep(list(Gmat),n_sess_em))
GtCinvG <- Matrix::bdiag(rep(list(GtCinvG),n_sess_em))
}
k2TrCSig <- kappa2 * TrSigB(P,Cmat,Vh)
k2uCu <- kappa2*Matrix::crossprod(mu,Cmat%*%mu)
two_uGu <- 2*Matrix::crossprod(mu,Gmat)%*%mu # This does not depend on kappa2, but needed for phi
twoTrGSig <- 2 * TrSigB(P,Gmat,Vh)
kneg2uGCGu <- (1/kappa2)*Matrix::crossprod(mu,GtCinvG%*%mu)
kneg2GCGSig <- TrSigB(P,GtCinvG,Vh) / kappa2
trace_QEww <- as.numeric(k2uCu + k2TrCSig + two_uGu + twoTrGSig + kneg2uGCGu + kneg2GCGSig)
return(trace_QEww)
}
#' Hutchinson estimator of the trace
#'
#' @param P Matrix of dimension nk by N_s found by \code{solve(Sig_inv,Vh)}
#' @param B Matrix of dimension nk by nk inside the desired trace product
#' @param vh Matrix of dimension nk by N_s in which elements are -1 or 1 with
#' equal probability.
#'
#' @return a scalar estimate of the trace of \code{Sigma %*% B}
#'
#' @importFrom Matrix diag crossprod
#' @keywords internal
TrSigB <- function(P,B,vh) {
Ns <- ncol(P)
out <- sum(Matrix::diag(Matrix::crossprod(P,B %*% vh))) / Ns
return(out)
}
#' Expected log-likelihood function
#'
#' @param Q precision matrix
#' @param sigma2 noise variance
#' @param model_data list with X and y
#' @param Psi data locations to mesh locations matrix
#' @param mu posterior mean of w
#' @param Sigma posterior covariance of w
#' @param A crossprod(X%*%Psi)
#'
#' @return scalar expected log-likelihood
#' @keywords internal
ELL <- function(Q, sigma2, model_data, Psi, mu, Sigma, A) {
TN <- length(model_data$y)
R1 <- -TN * log(sigma2) / 2 - crossprod(model_data$y)/(2*sigma2) +
crossprod(model_data$y,model_data$X%*%Psi%*%mu) -
Matrix::crossprod(model_data$X%*%Psi%*%mu) / (2*sigma2) -
sum(Matrix::colSums(A*Sigma)) / (2*sigma2)
R2 <- sum(2*log(Matrix::diag(Matrix::chol(Q,pivot = T))))/2 -
sum(Matrix::colSums(Q*Sigma)) / 2 - crossprod(mu,Q%*%mu) / 2
ELL_out <- R1@x + R2@x
return(ELL_out)
}
#' Trace of Q beta' beta
#'
#' @param kappa2 scalar
#' @param beta_hat a vector
#' @param spde an spde object
#'
#' @return a scalar
#' @keywords internal
TrQbb <- function(kappa2, beta_hat, spde) {
Qt <- Q_prime(kappa2, spde)
KK <- length(beta_hat) / spde$mesh$n
if(length(beta_hat) != spde$mesh$n & KK %% 1 == 0) Qt <- Matrix::bdiag(rep(list(Qt),KK))
out <- sum(diag(Qt %*% tcrossprod(beta_hat)))
# The below is an approximation that may be faster in higher dimensions
# Ns <- 50
# v <- matrix(sample(x = c(-1,1), size = nrow(Qt) * Ns, replace = TRUE), nrow(Qt), Ns)
# vQ <- apply(v,2, crossprod, y = Qt)
# vbb <- apply(v,2,crossprod, y = tcrossprod(beta_hat))
# out2 <- sum(unlist(mapply(function(q,b) (q %*% b)@x, q = vQ, b = split(vbb, col(vbb))))) / Ns
return(out)
}
#' Function to optimize over kappa2
#'
#' @param kappa2 scalar
#' @param phi scalar
#' @param spde an spde object
#' @param beta_hat vector
#'
#' @return a scalar
#' @keywords internal
kappa_init_fn <- function(kappa2, phi, spde, beta_hat) {
Qt <- Q_prime(kappa2, spde)
# Rt <- Matrix::chol(Qt, pivot = T)
n_spde <- nrow(spde$Cmat)
KK <- length(beta_hat) / n_spde
if(length(beta_hat) != n_spde & KK %% 1 == 0) {
# Rt <- Matrix::bdiag(rep(list(Rt),KK))
Qt <- Matrix::bdiag(rep(list(Qt),KK))
}
# log_det_Q <- sum(2*log(Matrix::diag(Rt)))
log_det_Q <- .logDetQt(kappa2,in_list = spde,n_sess = KK)
bQb <- as.numeric(Matrix::crossprod(beta_hat, Qt %*% beta_hat))
# if (verbose>0) cat("log_det_Q =", log_det_Q, ", bQb =",bQb,"\n")
# out <- log_det_Q / 2 - TrQbb(kappa2,beta_hat,spde) / (8*pi*phi)
out <- log_det_Q / 2 - bQb / (8*pi*phi)
return(-out)
}
#' Objective function for the initialization of kappa2 and phi
#'
#' @param theta a vector c(kappa2,phi)
#' @param spde an spde object
#' @param beta_hat vector
#'
#' @return scalar
#' @keywords internal
init_objfn <- function(theta, spde, beta_hat) {
QQ <- spde_Q_phi(kappa2 = theta[1],phi = theta[2], spde)
KK <- length(beta_hat) / spde$mesh$n
if(length(beta_hat) != spde$mesh$n & KK %% 1 == 0) QQ <- Matrix::bdiag(rep(list(QQ),KK))
log_det_Q <- sum(2*log(Matrix::diag(Matrix::chol(QQ,pivot = T))))
out <- (log_det_Q / 2 - crossprod(beta_hat,QQ)%*%beta_hat / 2)@x
return(-out)
}
#' The fix point function for the initialization of kappa2 and phi
#'
#' @param theta a vector c(kappa2,phi)
#' @param spde an spde object
#' @param beta_hat vector
#'
#' @importFrom stats optimize
#'
#' @return scalar
#' @keywords internal
init_fixpt <- function(theta, spde, beta_hat) {
# kappa2 <- theta[1]
phi <- theta[2]
# n_spde <- spde$mesh$n
# if(is.null(n_spde)) n_spde <- spde$n.spde
n_spde <- nrow(spde$Cmat)
num_sessions <- length(beta_hat) / n_spde
kappa2 <-
stats::optimize(
f = kappa_init_fn,
phi = phi,
spde = spde,
beta_hat = beta_hat,
lower = 0,
upper = 50,
maximum = FALSE
)$minimum
Qp <- Q_prime(kappa2, spde)
if(num_sessions > 1) Qp <- Matrix::bdiag(rep(list(Qp), num_sessions))
phi <- as.numeric(Matrix::crossprod(beta_hat, Qp %*% beta_hat)) / (4 * pi * n_spde * num_sessions)
return(c(kappa2, phi))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/EM_utils.R
|
#' Internal function used in joint approach to group-analysis for combining across models
#'
#' @param theta A vector of hyperparameter values at which to compute the posterior log density
#' @param spde A SPDE object from inla.spde2.matern() function, determines prior precision matrix
#' @param mu_theta Posterior mean from combined subject-level models.
#' @param Q_theta Posterior precision matrix from combined subject-level models.
#' @param M Number of subjects
#' @return A list containing...
#'
#' @inheritSection INLA_Description INLA Requirement
#'
# F.logwt <- function(theta, spde, mu_theta, Q_theta, M){
# #mu_theta - posterior mean from combined subject-level models
# #Q_theta - posterior precision matrix from combined subject-level models
# #M - number of subjects
# a <- 1; b <- 5e-5
# n.spde <- (length(theta) - 1)/2
# mu.tmp <- spde$f$hyper$theta1$param[1:2]
# mu <- rep(mu.tmp, n.spde)
# Q.tmp <- matrix(spde$f$hyper$theta1$param[-(1:2)], 2, 2, byrow = TRUE)
# Q <- kronecker(diag(1, n.spde, n.spde), Q.tmp)
#
# ## Prior density
# pr.delta <- dgamma(exp(theta[1]), a, b, log = TRUE) #log prior density on residual precision
# pr.tk <- as.vector(-t(theta[-1] - mu)%*%Q%*%(theta[-1] - mu))/2 + log(det(Q))/2 - dim(Q)[1]*log(2*pi)/2 #joint log prior density on 2K spde parameters
# pr.theta <- pr.delta + pr.tk
#
# (1-M)*pr.theta
# }
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/F.logwt.R
|
#' Import INLA dependencies
#'
#' Roxygen entry for additional INLA dependencies
#'
#' @import sp
#' @import foreach
#' @name INLA_deps
#' @keywords internal
NULL
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/INLA_deps.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Find the log of the determinant of Q_tilde
#'
#' @param kappa2 a scalar
#' @param in_list a list with elements Cmat, Gmat, and GtCinvG
#' @param n_sess the integer number of sessions
#'
.logDetQt <- function(kappa2, in_list, n_sess) {
.Call(`_BayesfMRI_logDetQt`, kappa2, in_list, n_sess)
}
#' Find the initial values of kappa2 and phi
#'
#' @param theta a vector of length two containing the range and scale parameters
#' kappa2 and phi, in that order
#' @param spde a list containing the sparse matrix elements Cmat, Gmat, and GtCinvG
#' @param w the beta_hat estimates for a single task
#' @param n_sess the number of sessions
#' @param tol the stopping rule tolerance
#' @param verbose (logical) Should intermediate output be displayed?
#'
.initialKP <- function(theta, spde, w, n_sess, tol, verbose) {
.Call(`_BayesfMRI_initialKP`, theta, spde, w, n_sess, tol, verbose)
}
#' Perform the EM algorithm of the Bayesian GLM fitting
#'
#' @param theta the vector of initial values for theta
#' @param spde a list containing the sparse matrix elements Cmat, Gmat, and GtCinvG
#' @param y the vector of response values
#' @param X the sparse matrix of the data values
#' @param QK a sparse matrix of the prior precision found using the initial values of the hyperparameters
#' @param Psi a sparse matrix representation of the basis function mapping the data locations to the mesh vertices
#' @param A a precomputed matrix crossprod(X%*%Psi)
#' @param Ns the number of columns for the random matrix used in the Hutchinson estimator
#' @param tol a value for the tolerance used for a stopping rule (compared to
#' the squared norm of the differences between \code{theta(s)} and \code{theta(s-1)})
#' @param verbose (logical) Should intermediate output be displayed?
#'
.findTheta <- function(theta, spde, y, X, QK, Psi, A, Ns, tol, verbose = FALSE) {
.Call(`_BayesfMRI_findTheta`, theta, spde, y, X, QK, Psi, A, Ns, tol, verbose)
}
#' Get the prewhitening matrix for a single data location
#'
#' @param AR_coeffs a length-p vector where p is the AR order
#' @param nTime (integer) the length of the time series that is being prewhitened
#' @param avg_var a scalar value of the residual variances of the AR model
#'
.getSqrtInvCpp <- function(AR_coeffs, nTime, avg_var) {
.Call(`_BayesfMRI_getSqrtInvCpp`, AR_coeffs, nTime, avg_var)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/RcppExports.R
|
#' Activations prevalence.
#'
#' @param act_list List of activations from \code{\link{id_activations}}. All
#' should have the same sessions, fields, and brainstructures.
#'
#' @return A list containing the prevalances of activation, as a proportion of
#' the results from \code{act_list}.
#'
#' @importFrom stats setNames
#' @importFrom ciftiTools convert_xifti
#'
#' @export
act_prevalance <- function(act_list){
# Determine if `act_BayesGLM_cifti` or `act_BayesGLM`.
is_cifti <- all(vapply(act_list, function(q){ inherits(q, "act_BayesGLM_cifti") }, FALSE))
if (!is_cifti) {
if (!all(vapply(act_list, function(q){ inherits(q, "act_BayesGLM") }, FALSE))) {
stop("All objects in `act_list` must be the same type of result from `id_activations`: either `act_BayesGLM_cifti` or `act_BayesGLM`.")
}
}
# Get the number of results, sessions, and fields, and brainstructures (for CIFTI).
# Ensure sessions, fields, and brainstructures match for all results.
# [TO DO] could check that the number of locations is also the same.
# but maybe not here because that's more complicated for CIFTI.
nA <- length(act_list)
session_names <- act_list[[1]]$session_names
nS <- length(session_names)
task_names <- act_list[[1]]$task_names
nK <- length(task_names)
if (is_cifti) {
bs_names <- names(act_list[[1]]$activations)
} else {
bs_names <- "activations"
}
nB <- length(bs_names)
for (aa in seq(2, nA)) {
if (length(act_list[[aa]]$session_names) != nS) {
stop("Result ", aa, " has a different number of sessions than the first result.")
}
if (!all(act_list[[aa]]$session_names == session_names)) {
warning("Result ", aa, " has different session names than the first result.")
}
if (length(act_list[[aa]]$task_names) != nK) {
stop("Result ", aa, " has a different number of tasks than the first result.")
}
if (!all(act_list[[aa]]$task_names == task_names)) {
warning("Result ", aa, " has different task names than the first result.")
}
if (is_cifti) {
if (length(act_list[[aa]]$activations) != nB) {
stop("Result ", aa, " has a different number of brain structures than the first result.")
}
if (!all(names(act_list[[aa]]$activations) == bs_names)) {
warning("Result ", aa, " has different brain structure names than the first result.")
}
}
}
# Compute prevalance, for every session and every task.
prev <- setNames(rep(list(setNames(vector("list", nS), session_names)), nB), bs_names)
for (bb in seq(nB)) {
for (ss in seq(nS)) {
x <- lapply(act_list, function(y){
y <- if (is_cifti) { y$activations[[bb]] } else { y$activations }
y[[ss]]$active
})
prev[[bb]][[ss]] <- Reduce("+", x)/nA
}
}
if (!is_cifti) { prev <- prev[[1]] }
result <- list(
prevalence = prev,
n_results = nA,
task_names = task_names,
session_names = session_names
)
# If BayesGLM, return.
if (!is_cifti) {
class(result) <- "prev_BayesGLM"
return(result)
}
# If BayesGLM_cifti, create 'xifti' with activations.
prev_xii <- vector("list", nS)
names(prev_xii) <- session_names
for (session in session_names) {
prev_xii_ss <- 0*convert_xifti(act_list[[1]]$activations_xii[[session]], "dscalar")
prev_xii_ss$meta$cifti$names <- task_names
for (bs in names(prev_xii_ss$data)) {
if (!is.null(prev_xii_ss$data[[bs]])) {
dat <- prev[[bs]][[session]]
colnames(dat) <- NULL
prev_xii_ss$data[[bs]] <- dat
}
}
prev_xii[[session]] <- prev_xii_ss
}
result <- c(list(prev_xii=prev_xii), result)
class(result) <- "prev_BayesGLM_cifti"
result
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/act_prevalence.R
|
#' Summarize a \code{"prev_BayesGLM"} object
#'
#' Summary method for class \code{"prev_BayesGLM"}
#'
#' @param object Object of class \code{"prev_BayesGLM"}.
#' @param ... further arguments passed to or from other methods.
#' @export
#' @return A \code{"summary.prev_BayesGLM"} object, a list summarizing the
#' properties of \code{object}.
#' @method summary prev_BayesGLM
summary.prev_BayesGLM <- function(object, ...) {
class(x) <- "summary.prev_BayesGLM"
x
}
#' @rdname summary.prev_BayesGLM
#' @export
#'
#' @param x Object of class \code{"summary.prev_BayesGLM"}.
#' @return \code{NULL}, invisibly.
#' @method print summary.prev_BayesGLM
print.summary.prev_BayesGLM <- function(x, ...) {
cat("====BayesGLM Prevalences====================\n")
cat("Summary for prevalences is not implemented yet.\n")
invisible(NULL)
}
#' @rdname summary.prev_BayesGLM
#' @export
#'
#' @return \code{NULL}, invisibly.
#' @method print prev_BayesGLM
print.prev_BayesGLM <- function(x, ...) {
print.summary.prev_BayesGLM(summary(x))
}
#' Summarize a \code{"prev_BayesGLM_cifti"} object
#'
#' Summary method for class \code{"prev_BayesGLM_cifti"}
#'
#' @param object Object of class \code{"prev_BayesGLM_cifti"}.
#' @param ... further arguments passed to or from other methods.
#' @export
#' @return A \code{"summary.prev_BayesGLM_cifti"} object, a list summarizing the
#' properties of \code{object}.
#' @method summary prev_BayesGLM_cifti
summary.prev_BayesGLM_cifti <- function(object, ...) {
class(x) <- "summary.prev_BayesGLM_cifti"
x
}
#' @rdname summary.prev_BayesGLM_cifti
#' @export
#'
#' @param x Object of class \code{"summary.prev_BayesGLM_cifti"}.
#' @return \code{NULL}, invisibly.
#' @method print summary.prev_BayesGLM_cifti
print.summary.prev_BayesGLM_cifti <- function(x, ...) {
cat("====BayesGLM_cifti Prevalences==============\n")
cat("Summary for prevalences is not implemented yet.\n")
invisible(NULL)
}
#' @rdname summary.prev_BayesGLM_cifti
#' @export
#'
#' @return \code{NULL}, invisibly.
#' @method print prev_BayesGLM_cifti
print.prev_BayesGLM_cifti <- function(x, ...) {
print.summary.prev_BayesGLM_cifti(summary(x))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/act_prevalence_utils.R
|
#' Corrected AIC
#'
#' Computes corrected AIC (AICc).
#'
#' @param y The autocorrelated data
#' @param demean Demean \code{y}? Default: \code{FALSE}.
#' @param order.max The model order limit. Default: \code{10}.
#'
#' @return The cAIC
#' @keywords internal
#'
#' @importFrom fMRItools is_posNum
AICc <- function(y, demean=FALSE, order.max = 10) {
N <- length(y)
stopifnot(is_posNum(order.max))
# Get regular AIC values.
ar_mdl <- ar.yw(x = y, aic = FALSE, demean = demean, order.max = order.max)
AIC_vals <- ar_mdl$aic
# Get corrected AIC values.
kseq <- seq(0, order.max)
AICc <- AIC_vals - (2*(kseq+1)) + 2*N*(kseq+1)/(N-kseq-2)
AICc <- AICc - min(AICc)
# Format and return.
names(AICc) <- paste0("AR(", kseq, ")")
AICc
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/cAIC.R
|
# #' Create SPDE for 3D volumetric data
# #'
# #' @inheritSection INLA_Description INLA Requirement
# #'
# #' @param locs Locations of data points (Vx3 matrix)
# #' @param labs Region labels of data points (vector of length V). If NULL, treat observations as a single region.
# #' @param lab_set Only used if labs is not NULL. Vector of region labels for which to construct spde object. If NULL, use all region labels.
# #'
# #' @return SPDE object representing triangular mesh structure on data locations
# #'
# #' @importFrom Matrix sparseMatrix colSums Diagonal t solve
# #'
# #' @export
# create_spde_vol3D <- function(locs, labs, lab_set = NULL){
# # Check to see that the `rdist` package is installed
# if (!requireNamespace("rdist", quietly = TRUE)) {
# stop("`create_spde_vol3D` requires the `rdist` package. Please install it.", call. = FALSE)
# }
# # Check to see that the `geometry` package is installed
# if (!requireNamespace("geometry", quietly = TRUE)) {
# stop("`create_spde_vol3D` requires the `geometry` package. Please install it.", call. = FALSE)
# }
# check_INLA(FALSE)
# if(is.null(labs) & !is.null(lab_set)) stop('If labs is NULL, then lab_set must not be specified.')
# # If no labels provided, construct mesh over all regions (treat as a single region)
# if(is.null(labs)) labs <- rep(1, nrow(locs))
# # If no set of labels specified, use all labels
# if(is.null(lab_set)) lab_set <- unique(labs)
# # For each of the labels specified, create a triangularization
# G_all <- C_all <- vector('list', length=length(lab_set))
# P_new_all <- FV_new_all <- vector('list', length=length(lab_set))
# I_all <- A_all <- vector('list', length=length(lab_set))
# for(value in lab_set){
# ii <- which(lab_set==value)
# ind <- (labs == value)
# P <- locs[ind,] #(x,y,z) coordinates of selected locations
# # In Triangulations for 3D data from original locations, note that there are big triangles that we don't want to keep.
# # FV_orig <- geometry::delaunayn(P)
# # open3d()
# # rgl.viewpoint(60)
# # rgl.light(120,60)
# # tetramesh(FV_orig, P, alpha=0.7)
# # Determine grid spacing (assumes isotropic voxels)
# max_dist = get_spacing(P[,1])
# # Create grid to get reasonable delaunay triangulations. Reasonable being triangles with similar size.
# x = unique(P[,1])
# y = unique(P[,2])
# z = unique(P[,3])
# #add vertices around the extreme of the boundary
# x = c(x, min(x)-max_dist, max(x)+max_dist)
# y = c(y, min(y)-max_dist, max(y)+max_dist)
# z = c(z, min(z)-max_dist, max(z)+max_dist)
# # # Subsample to a specified proportion
# # if(!is.null(subsample)){
# # if(subsample < 0 | subsample > 1) stop('subsample must be a number between 0 and 1')
# # length.out.x <- length(x)*subsample
# # length.out.y <- length(y)*subsample
# # length.out.z <- length(z)*subsample
# # x = seq(min(x)-max_dist, max(x)+max_dist, length.out=length.out.x)
# # y = seq(min(y)-max_dist, max(y)+max_dist, length.out=length.out.y)
# # z = seq(min(z)-max_dist, max(z)+max_dist, length.out=length.out.z)
# # }
# PP <- expand.grid(x, y, z) #full lattice within a cube surrounding the data locations
# # Create Triangulations for 3D data based on the grid
# FV <- geometry::delaunayn(PP)
# # open3d()
# # rgl.viewpoint(60)
# # rgl.light(120,60)
# # tetramesh(FV, PP, alpha=0.7)
# # Remove locations that are far from original data locations
# D <- rdist::cdist(P,PP)
# md <- apply(D, 2, min)
# ind_keep <- md < max_dist
# indices <- which(ind_keep == 1)
# # Remove vertices from FV that are not associated with an original vertex
# FV_new <- NULL
# for (i in 1:nrow(FV)) {
# if (sum(FV[i,] %in% indices) > 0) {
# FV_new = rbind(FV_new, FV[i,])
# }
# }
# fu <- unique(as.vector(FV_new))
# v <- rep(0, max(fu))
# v[fu] <- 1:length(fu)
# for (i in 1:nrow(FV_new)){
# FV_new[i,] <- v[FV_new[i,]]
# }
# P_new <- PP[fu,]
# FV_new_all[[ii]] <- FV_new
# P_new_all[[ii]] <- P_new
# # Now P_new and FV contain points not in P originally
# # # Visualize the mesh
# # open3d()
# # rgl.viewpoint(60)
# # rgl.light(120,60)
# # tetramesh(FV_new, P_new, alpha=0.7)
# # Create observation matrix A (this assumes that all original data locations appear in mesh)
# D <- rdist::cdist(P,P_new)
# I_v <- apply(D, 1, function(x) {which(x == min(x))})
# A_v <- diag(1, nrow = nrow(P_new))
# A_all[[ii]] <- A_v[I_v,]
# I_all[[ii]] <- I_v
# # ij <- which(A!=0,arr.ind = T)
# # i <- ij[,1]
# # j <- ij[,2]
# #construct G and C matrices that appear in SPDE precision
# gal <- galerkin_db(FV = FV_new, P = P_new)
# G_all[[ii]] <- gal$G
# C_all[[ii]] <- gal$C
# }
# G <- bdiag(G_all)
# C <- bdiag(C_all)
# # Part 2
# tG <- t(G)
# M0 <- C
# M1 <- G + tG
# M2 <- tG %*% solve(C,G)
# # Create the spde object. Note that the priors theta.mu and theta.Q have to be set reasonably here!!!
# # spde <- INLA::inla.spde2.generic(M0 = M0, M1 = M1, M2 = M2,
# # B0 = matrix(c(0,1,0),1,3),
# # B1 = matrix(c(0,0,1),1,3),
# # B2 = 1,
# # theta.mu = rep(0,2),
# # theta.Q = Diagonal(2,c(1e-6,1e-6)),
# # transform = "identity")
# spde = list(M0 = M0, M1 = M1, M2 = M2,n.spde = nrow(M0))
# out <- list(spde = spde,
# vertices = P_new_all,
# faces = FV_new_all,
# idx = I_all,
# Amat = bdiag(A_all))
# class(out) <- 'BayesfMRI.spde'
# return(out)
# }
# #' Determine grid spacing
# #'
# #' @param locations Vector of vertex locations in one dimension
# #'
# #' @return Value of minimum spacing between two locations
# #' @export
# get_spacing <- function(locations){
# #locations is a vector of locations along one dimension
# x = sort(unique(locations))
# dx = diff(x)
# return(min(dx))
# }
#' Create FEM matrices
#'
#' @param FV Matrix of faces in triangularization
#' @param P Matrix of vertex locations in triangularization
#' @param surface (logical) Will this create the SPDE matrices for a surface
#' or not?
#'
#' @return A list of matrices C and G appearing in sparse SPDE precision
#'
#' @keywords internal
galerkin_db <- function(FV, P, surface=FALSE){
d <- ncol(FV)-1
if(surface){
if(ncol(P) != (d + 1)){P <- t(P)}
if(ncol(P) != (d + 1)){stop("Wrong dimension of P")}
} else {
if(ncol(P) != d){P <- t(P)}
if(ncol(P) != d){stop("Wrong dimension of P")}
}
nV <- nrow(P)
nF <- nrow(FV)
Gi <- matrix(0, nrow = nF*(d+1), ncol = d+1)
Gj = Gi; Gz = Gi; Ci = Gi; Cj = Gi; Cz = Gi;
for( f in 1:nF){
dd <- (d+1)*(f-1)+(1:(d+1))
Gi[dd,] <- Ci[dd,] <- FV[f,] %*% t(rep(1,d+1))
Gj[dd,] <- Cj[dd,] <- t(Gi[dd,])
if(surface){
r = t(P[FV[f,c(3,1,2)],]-P[FV[f,c(2,3,1)],])
r1 = r[,1,drop=FALSE]
r2 = r[,2,drop=FALSE]
f_area <- as.double(sqrt((t(r1)%*%r1)*(t(r2)%*%r2)-(t(r1)%*%r2)^2)/2)
Gz[dd,] = (t(r)%*%r)/(4*f_area)
Cz[dd,] = (f_area/12)*(matrix(1,3,3)+diag(3))
} else {
m1 <- rbind(rep(1, d+1), t(P[FV[f,],]))
m2 <- rbind(rep(0, d), diag(1, d))
m <- solve(m1, m2)
ddet <- abs(det(m1))
Gz[dd,] <- ddet * (m %*% t(m)) / prod(1:d)
Cz[dd,] <- ddet * (rep(1,d+1)+diag(d+1)) / prod(1:(d+2))
}
}
G <- Matrix::sparseMatrix(i = as.vector(Gi), j = as.vector(Gj), x = as.vector(Gz), dims = c(nV,nV))
Ce <- Matrix::sparseMatrix(i = as.vector(Ci), j = as.vector(Cj), x = as.vector(Cz), dims = c(nV,nV))
# C <- Matrix::diag(Matrix::colSums(Ce), nrow = nV, ncol = nV)
C <- Matrix::Diagonal(n = nV, x = Matrix::colSums(Ce))
return(list(G = G, Ce = Ce, C = C))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/create_spde_vol3D.R
|
#' Identify task activations
#'
#' Identify areas of activation for each task from the result of \code{BayesGLM}
#' or \code{BayesGLM_cifti}.
#'
#' @param model_obj Result of \code{BayesGLM} or \code{BayesGLM_cifti} model
#' call, of class \code{"BayesGLM"} or \code{"BayesGLM_cifti"}.
# @param method The method to be used for identifying activations, either 'posterior' (Default) or '2means'
#' @param tasks The task(s) to identify activations for. Give either the name(s)
#' as a character vector, or the numerical indices. If \code{NULL} (default),
#' analyze all tasks.
#' @param sessions The session(s) to identify activations for. Give either the
#' name(s) as a character vector, or the numerical indices. If \code{NULL}
#' (default), analyze the first session.
#'
#' Currently, if multiple sessions are provided, activations are identified
#' separately for each session. (Information is not combined between the
#' different sessions.)
#' @param method \code{"Bayesian"} (default) or \code{"classical"}. If
#' \code{model_obj} does not have Bayesian results because \code{Bayes} was set
#' to \code{FALSE}, only the \code{"classical"} method can be used.
#' @param alpha Significance level. Default: \code{0.05}.
#' @param gamma Activation threshold, for example \code{1} for 1\% signal
#' change if \code{scale_BOLD=="mean"} during model estimation. Setting a
#' \code{gamma} is required for the Bayesian method; \code{NULL} (default)
#' will use a \code{gamma} of zero for the classical method.
# @param excur_method For method = 'Bayesian' only: Either \code{EB} (empirical Bayes) or \code{QC} (Quantile Correction), depending on the method that should be used to find the
# excursions set. Note that if any contrasts (including averages across sessions) are used in the modeling, the method chosen must be \code{EB}.
# The difference in the methods is that the \code{EB} method assumes Gaussian posterior distributions for the parameters.
# @param area.limit For method = 'Bayesian' only: Below this value, clusters of activations will be considered spurious. If NULL (default), no limit.
#' @param correction For the classical method only: Type of multiple comparisons
#' correction: \code{"FWER"} (Bonferroni correction, the default), \code{"FDR"}
#' (Benjamini Hochberg), or \code{"none"}.
# @param excur_method Either \code{"EB"} (empirical Bayes) or \code{"QC"} (Quantile Correction),
# depending on the method that should be used to find the excursions set. Note that to ID
# activations for averages across sessions, the method chosen must be \code{EB}. The difference
# in the methods is that the \code{EB} method assumes Gaussian posterior distributions for the parameters.
#' @inheritParams verbose_Param
# @param type For method='2means' only: The type of 2-means clustering to perform ('point' or 'sequential')
# @param n_sample The number of samples to generate if the sequential 2-means type is chosen. By default, this takes a value of 1000.
#'
# @return A list containing activation maps for each IC and the joint and marginal PPMs for each IC.
#'
#' @importFrom ciftiTools convert_xifti
#' @importFrom fMRItools is_posNum is_1
#'
#' @return An \code{"act_BayesGLM"} or \code{"act_BayesGLM_cifti"} object, a
#' list which indicates the activated locations along with related information.
#' @export
#'
id_activations <- function(
model_obj,
tasks=NULL,
sessions=NULL,
method=c("Bayesian", "classical"),
alpha=0.05,
gamma=NULL,
#area.limit=NULL,
correction = c("FWER", "FDR", "none"),
#excur_method = c("EB", "QC"),
verbose = 1){
# If 'BayesGLM_cifti', we will loop over the brain structures.
is_cifti <- inherits(model_obj, "BayesGLM_cifti")
if (is_cifti) {
cifti_obj <- model_obj
model_obj <- cifti_obj$BayesGLM_results
} else {
if (!inherits(model_obj, "BayesGLM")) {
stop("`model_obj` is not a `'BayesGLM'` or 'BayesGLM_cifti' object.")
}
model_obj <- list(obj=model_obj)
}
idx1 <- min(which(!vapply(model_obj, is.null, FALSE)))
# Argument checks.
stopifnot(is.null(tasks) || is.character(tasks) || is.numeric(tasks))
if (is.character(tasks)) { stopifnot(all(tasks %in% model_obj[[idx1]]$task_names)) }
if (is.numeric(tasks)) {
stopifnot(all.equal(tasks, round(tasks))==TRUE)
stopifnot(min(tasks) >= 1)
stopifnot(max(tasks) <= length(model_obj[[idx1]]$task_names))
stopifnot(length(tasks) == length(unique(tasks)))
tasks <- model_obj[[idx1]]$task_names[tasks]
}
if (is.null(tasks)) { tasks <- model_obj[[idx1]]$task_names }
stopifnot(is.null(sessions) || is.character(sessions) || is.numeric(sessions))
if (is.character(sessions)) { stopifnot(all(sessions %in% model_obj[[idx1]]$session_names)) }
if (is.numeric(sessions)) {
stopifnot(all.equal(sessions, round(sessions))==TRUE)
stopifnot(min(sessions) >= 1)
stopifnot(max(sessions) <= length(model_obj[[idx1]]$session_names))
stopifnot(length(sessions) == length(unique(sessions)))
sessions <- model_obj[[idx1]]$session_names[sessions]
}
if (is.null(sessions)) { sessions <- model_obj[[idx1]]$session_names[1] }
method <- match.arg(method, c("Bayesian", "classical"))
stopifnot(is_posNum(alpha) && alpha < 1)
stopifnot(is.null(gamma) || is_posNum(gamma, zero_ok=TRUE))
correction <- match.arg(correction, c("FWER", "FDR", "none"))
stopifnot(is_posNum(verbose, zero_ok=TRUE))
# Check that Bayesian results are available, if requested.
if (method=="Bayesian" && is.null(model_obj[[idx1]]$INLA_model_obj)) {
warning("`method=='Bayesian'` but only classical model results are available. Setting `method` to `'classical'`.")
method <- "classical"
}
# Check gamma was set, if computing Bayesian activations.
if (is.null(gamma)) {
if (method=='Bayesian') stop("Must specify an activation threshold, `gamma`, when `method=='Bayesian'`.")
if (method=='classical') gamma <- 0
}
# Initialize list of activations.
nModels <- length(model_obj)
activations <- vector('list', length=nModels)
names(activations) <- names(model_obj)
actFUN <- switch(method,
Bayesian = id_activations.posterior,
classical = id_activations.classical
)
actArgs <- list(tasks=tasks, alpha=alpha, gamma=gamma)
if (method == "classical") {
actArgs <- c(actArgs, list(correction=correction))
} else {
correction <- "not applicable"
}
# Loop over model objects (brain structures, if `is_cifti`).
for (mm in seq(nModels)) {
if (is.null(model_obj[[mm]])) { next }
if (method=="Bayesian" && identical(attr(model_obj[[mm]]$INLA_model_obj, "format"), "minimal")) {
stop("Bayesian activations are not available because `return_INLA` was set to `'minimal'` in the `BayesGLM` call. Request the classical activations, or re-run `BayesGLM`.")
}
name_obj_mm <- names(model_obj)[mm]
if (method=="Bayesian" && name_obj_mm!="obj") {
if (verbose>0) cat(paste0("Identifying Bayesian GLM activations in ",name_obj_mm,'\n'))
}
# Loop over sessions
activations[[mm]] <- vector("list", length(sessions))
names(activations[[mm]]) <- sessions
for (session in sessions) {
actArgs_ms <- c(actArgs, list(model_obj=model_obj[[mm]], session=session))
activations[[mm]][[session]] <- do.call(actFUN, actArgs_ms)
}
}
result <- list(
activations = activations,
method=method,
alpha=alpha,
gamma=gamma,
correction=correction,
task_names = model_obj[[idx1]]$task_names,
session_names = model_obj[[idx1]]$session_names
#excur_method = c("EB", "QC")
)
# If BayesGLM, return.
if (!is_cifti) {
result$activations <- result$activations[[1]]
class(result) <- "act_BayesGLM"
return(result)
}
# If BayesGLM_cifti, create 'xifti' with activations.
act_xii <- vector("list", length(sessions))
names(act_xii) <- sessions
for (session in sessions) {
the_xii <- cifti_obj$task_estimates_xii$classical[[session]]
act_xii_ss <- 0*select_xifti(the_xii, match(tasks, the_xii$meta$cifti$names))
for (bs in names(the_xii$data)) {
if (bs=="subcort") { next }
if (!is.null(the_xii$data[[bs]])) {
dat <- 1*activations[[bs]][[session]]$active
colnames(dat) <- NULL
if (method=="classical") { dat <- dat[!is.na(dat[,1]),,drop=FALSE] }
act_xii_ss$data[[bs]] <- dat
}
}
# if (!is.null(model_obj$subcortical)) {
# if(method == "EM") {
# for(m in 1:length(activations$subcortical)){
# datS <- 1*activations$subcortical[[m]]$active
# act_xii_ss$data$subcort[!is.na(model_obj$betas_EM[[1]]$data$subcort[,1]),] <-
# datS
# }
# }
# if(method == "classical") {
# datS <- 1*activations$subcortical$active
# act_xii_ss$data$subcort[!is.na(model_obj$betas_classical[[1]]$data$subcort[,1]),] <-
# datS
# }
# act_xii_ss$data$subcort <- matrix(datS, ncol=length(tasks))
# }
if (utils::packageVersion("ciftiTools") < "0.13.1") { # approximate package version
act_xii_ss <- convert_xifti(
act_xii_ss, "dlabel",
values=setNames(seq(0, 1), c("Inactive", "Active")),
colors="red"
)
} else {
act_xii_ss <- convert_xifti(
act_xii_ss, "dlabel",
levels_old=seq(0, 1), labels=c("Inactive", "Active"),
colors="red"
)
}
act_xii[[session]] <- act_xii_ss
}
result <- c(list(activations_xii=act_xii), result)
class(result) <- "act_BayesGLM_cifti"
result
}
#' Identify activations using joint posterior probabilities
#'
#' Identifies areas of activation given an activation threshold and significance
#' level using joint posterior probabilities
#'
#' For a given latent field, identifies locations that exceed a certain activation
#' threshold (e.g. 1 percent signal change) at a given significance level, based on the joint
#' posterior distribution of the latent field.
#'
#' @param model_obj Result of \code{BayesGLM}, of class \code{"BayesGLM"}.
#' @param tasks,session,alpha,gamma See \code{\link{id_activations}}.
# @param excur_method Either \code{"EB"} (empirical Bayes) or \code{"QC"} (Quantile Correction),
# depending on the method that should be used to find the excursions set. Note that to ID
# activations for averages across sessions, the method chosen must be \code{EB}. The difference
# in the methods is that the \code{EB} method assumes Gaussian posterior distributions for the parameters.
#'
#' @return A list with two elements: \code{active}, which gives a matrix of zeros
#' and ones of the same dimension as \code{model_obj$task_estimates${session}},
#' and \code{excur_result}, an object of class \code{"excurobj"} (see \code{\link{excursions.inla}} for
#' more information).
#'
#' @importFrom excursions excursions.inla
#'
#' @keywords internal
id_activations.posterior <- function(
model_obj,
tasks, session,
alpha=0.05, gamma){
#area.limit=NULL,
#excur_method = c("EB","QC")
stopifnot(inherits(model_obj, "BayesGLM"))
#excur_method <- match.arg(excur_method, c("EB","QC"))
sess_ind <- which(model_obj$session_names == session)
mesh <- model_obj$mesh
n_vox <- mesh$n
#indices of beta vector corresponding to specified session
inds <- (1:n_vox) + (sess_ind-1)*n_vox
#loop over latent fields
excur <- vector('list', length=length(tasks))
act <- matrix(NA, nrow=n_vox, ncol=length(tasks))
colnames(act) <- tasks
for(f in tasks){
#if(is.null(area.limit)){
res.exc <- excursions.inla(
model_obj$INLA_model_obj,
name=f, ind=inds, u=gamma, type='>', alpha=alpha, method="EB"
)
#} else {
# res.exc <- excursions.inla.no.spurious(model_obj$INLA_model_obj, mesh=mesh, name=f, ind=inds, u=gamma, type='>', method=excur_method, alpha=alpha, area.limit = area.limit, use.continuous=FALSE, verbose=FALSE)
#}
which_f <- which(tasks==f)
act[,which_f] <- res.exc$E[inds] == 1
excur[[which_f]] <- res.exc
}
result <- list(active=act, excursions_result=excur)
#compute size of activations
areas_all <- diag(INLA::inla.fmesher.smorg(mesh$loc, mesh$graph$tv, fem = 0, output = list("c0"))$c0) #area of each vertex
areas_act <- apply(act, 2, function(x) sum(areas_all[x==1]))
result$areas_all <- areas_all
result$areas_act <- areas_act
result
}
#' Identification of areas of activation in a General Linear Model using classical methods
#'
#' @param model_obj A \code{BayesGLM} object
#' @param tasks,session,alpha,gamma See \code{\link{id_activations}}.
#' @param correction (character) Either 'FWER' or 'FDR'. 'FWER' corresponds to the
#' family-wise error rate with Bonferroni correction, and 'FDR' refers to the
#' false discovery rate using Benjamini-Hochberg.
#' @param mesh (Optional) An \code{"inla.mesh"} object (see \code{\link{make_mesh}} for
#' surface data). Only necessary for computing surface areas of identified activations.
#'
#' @return A matrix corresponding to the
#' 0-1 activation status for the model coefficients.
#'
#' @importFrom stats sd pt p.adjust
#' @importFrom matrixStats colVars
#'
#' @keywords internal
id_activations.classical <- function(model_obj,
tasks,
session,
alpha = 0.05,
gamma = 0,
correction = c("FWER", "FDR", "none"),
mesh = NULL) {
# Argument checks ------------------------------------------------------------
if (!inherits(model_obj, "BayesGLM")) {
stop(paste0(
"The model object is of class ",
paste0(class(model_obj), collapse=", "),
" but should be of class 'BayesGLM'."
))
}
# tasks, session, alpha, gamma checked in `id_activations`
correction <- match.arg(correction, c("FWER","FDR","none"))
beta_est <- model_obj$result_classical[[session]]$estimates
se_beta <- model_obj$result_classical[[session]]$SE_estimates
DOF <- model_obj$result_classical[[session]]$DOF
nvox <- nrow(beta_est)
#if(any(!(tasks %in% 1:K))) stop(paste0('tasks must be between 1 and the number of tasks, ',K))
beta_est <- matrix(beta_est[,tasks], nrow=nvox) #need matrix() in case beta_est[,tasks] is a vector
se_beta <- matrix(se_beta[,tasks], nrow=nvox) #need matrix() in case beta_est[,tasks] is a vector
K <- length(tasks)
#Compute t-statistics and p-values
t_star <- (beta_est - gamma) / se_beta
if(!is.matrix(t_star)) t_star <- matrix(t_star, nrow=nvox)
#perform multiple comparisons correction
p_values <- p_values_adj <- active <- matrix(NA, nvox, K)
for (kk in 1:K) {
p_values_k <- sapply(t_star[,kk], pt, df = DOF, lower.tail = F)
p_vals_adj_k <- switch(correction,
FWER = p.adjust(p_values_k, method='bonferroni'),
FDR = p.adjust(p_values_k, method='BH'),
none = p_values_k
)
p_values[,kk] <- p_values_k
p_values_adj[,kk] <- p_vals_adj_k
active[,kk] <- (p_vals_adj_k < alpha)
}
#compute size of activations
if(!is.null(mesh)){
mask <- model_obj$result_classical[[session]]$mask
if(sum(mask) != nrow(mesh$loc)) stop('Supplied mesh is not consistent with mask in model_obj.')
areas_all <- diag(INLA::inla.fmesher.smorg(mesh$loc, mesh$graph$tv, fem = 0, output = list("c0"))$c0) #area of each vertex
areas_act <- apply(active[mask==1,,drop=FALSE], 2, function(x) sum(areas_all[x==1]))
} else {
areas_all <- areas_act <- NULL
}
na_pvalues <- which(is.na(p_values[,1]))
result <- list(
p_values = p_values[-na_pvalues,, drop = F],
p_values_adj = p_values_adj[-na_pvalues,, drop = F],
active = active[-na_pvalues,, drop = F],
correction = correction,
alpha = alpha,
gamma = gamma,
areas_all = areas_all,
areas_act = areas_act
)
result
}
# #' Identify activations using joint posterior probabilities with EM results
# #'
# #' Identifies areas of activation given an activation threshold and significance
# #' level using joint posterior probabilities
# #'
# #' For a given latent field, identifies locations that exceed a certain activation
# #' threshold (e.g. 1 percent signal change) at a given significance level, based on the joint
# #' posterior distribution of the latent field.
# #'
# #' @param model_obj An object of class \code{"BayesGLM"}, a result of a call
# #' to \code{BayesGLMEM}.
# #' @param tasks Name of latent field or vector of names on which to identify activations. By default, analyze all tasks.
# #' @param sessions (character) The name of the session that should be examined.
# #' If \code{NULL} (default), the first session is used.
# #' @param alpha Significance level (e.g. 0.05)
# #' @param gamma Activation threshold (e.g. 1 for 1 percent signal change if scale=TRUE in model estimation)
# #' @param area.limit Below this value, activations will be considered spurious. If NULL, no limit.
# #'
# #'
# #' @return A list with two elements: \code{active}, which gives a matrix of zeros
# #' and ones of the same dimension as \code{model_obj$task_estimates${sessions}},
# #' and \code{excur_result}, an object of class \code{"excurobj"} (see \code{\link{excursions.inla}} for
# #' more information).
# #'
# #' @importFrom excursions excursions
# #' @importFrom stats na.omit
# #'
# #' @keywords internal
# id_activations.em <-
# function(model_obj,
# tasks = NULL,
# sessions = NULL,
# alpha = 0.05,
# gamma,
# area.limit = NULL) {
# if (!inherits(model_obj, "BayesGLM"))
# stop(paste0(
# "The model object is of class ",
# class(model_obj),
# " but should be of class 'BayesGLM'."
# ))
# #check sessions argument
# #if only one session, analyze that one
# all_sessions <- model_obj$sessions
# n_sess <- length(all_sessions)
# if (n_sess == 1) {
# sessions <- all_sessions
# }
# #check sessions not NULL, check that a valid session name
# if(!is.null(sessions)){
# if(!(sessions %in% all_sessions)) stop(paste0('sessions does not appear in the list of sessions: ', paste(all_sessions, collapse=', ')))
# sess_ind <- which(all_sessions == sessions)
# }
# #if averages not available and sessions=NULL, pick first session and return a warning
# # has_avg <- is.matrix(model_obj$avg_task_estimates)
# has_avg <- !is.null(model_obj$avg_task_estimates)
# if(is.null(sessions) & !has_avg){
# sessions <- all_sessions[1]
# warning(paste0("Your model object does not have averaged beta estimates. Using the first session instead. For a different session, specify a session name from among: ", paste(all_sessions, collapse = ', ')))
# }
# if(is.null(sessions) & has_avg){
# sessions <- 'avg'
# }
# # Make an indicator for subcortical data
# is_subcort <- "BayesGLMEM_vol3D" %in% as.character(model_obj$call)
# #if sessions is still NULL, use average and check excur_method argument
# # if(is.null(sessions)){
# # if(has_avg & excur_method != "EB") {
# # excur_method <- 'EB'
# # warning("To id activations for averaged beta estimates, only the excur_method='EB' is supported. Setting excur_method to 'EB'.")
# # }
# # }
# #check tasks argument
# if(is.null(tasks)) tasks <- model_obj$tasks
# if(!any(tasks %in% model_obj$tasks)) stop(paste0("Please specify only field names that corresponds to one of the latent fields: ",paste(model_obj$tasks, collapse=', ')))
# #check alpha argument
# if(alpha > 1 | alpha < 0) stop('alpha value must be between 0 and 1, and it is not')
# mesh <- model_obj$mesh
# if(is_subcort) {
# n_subcort_models <- length(model_obj$spde_obj)
# result <- vector("list", length = n_subcort_models)
# for(m in 1:n_subcort_models){
# # mesh <- make_mesh(model_obj$spde_obj[[m]]$vertices[[1]],
# # model_obj$spde_obj[[m]]$faces[[1]])
# # n_vox <- mesh$n
# # excur <- vector('list', length=length(tasks))
# # act <- matrix(NA, nrow=n_vox, ncol=length(tasks))
# # colnames(act) <- tasks
# if(sessions == "avg") {
# beta_est <- c(model_obj$EM_result_all[[m]]$posterior_mu)
# }
# if(sessions != "avg") {
# beta_mesh_est <- c(model_obj$task_estimates[[which(all_sessions == sessions)]])
# beta_est <- beta_mesh_est[!is.na(beta_mesh_est)]
# }
# Phi_k <- model_obj$EM_result_all[[m]]$mesh$Amat
# # Phi <-
# # Matrix::bdiag(rep(
# # list(Phi_k),
# # length(tasks)
# # ))
# # w <- beta_est %*% Phi
# Sig_inv <- model_obj$EM_result_all[[m]]$posterior_Sig_inv
# # Q_est <- Matrix::tcrossprod(Phi %*% Sig_inv, Phi)
# ### use the ind argument for excursions to get the marginal posterior
# ### excursions sets (much faster)
# n_vox <- length(beta_est) / length(tasks)
# act <- matrix(NA, nrow=n_vox, ncol=length(tasks))
# V <- Matrix::diag(INLA::inla.qinv(Sig_inv))
# for(f in tasks) {
# which_f <- which(tasks==f)
# f_inds <- (1:n_vox) + (which_f - 1)*n_vox
# res.exc <-
# excursions(
# alpha = alpha,
# u = gamma,
# mu = beta_est,
# Q = Sig_inv,
# vars = V,
# ind = f_inds,
# type = ">", method = "EB"
# )
# act[,which_f] <- res.exc$E[f_inds]
# }
# act <- as.matrix(Phi_k %*% act)
# result[[m]] <- list(active = act, excursions_result=res.exc)
# }
# }
# if(!is_subcort) {
# n_vox <- mesh$n
# #for a specific session
# if(!is.null(sessions)){
# # inds <- (1:n_vox) + (sess_ind-1)*n_vox #indices of beta vector corresponding to session v
# #loop over latent fields
# excur <- vector('list', length=length(tasks))
# act <- matrix(NA, nrow=n_vox, ncol=length(tasks))
# colnames(act) <- tasks
# res.exc <-
# excursions(
# alpha = alpha,
# u = gamma,
# mu = stats::na.omit(
# c(model_obj$task_estimates[[sessions]])
# ),
# Q = model_obj$posterior_Sig_inv,
# type = ">", method = "EB"
# )
# for(f in tasks) {
# which_f <- which(tasks==f)
# f_inds <- (1:n_vox) + (which_f - 1)*n_vox
# act[,which_f] <- res.exc$E[f_inds]
# }
# }
# result <- list(active=act, excursions_result=res.exc)
# }
# return(result)
# }
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/id_activations.R
|
#' Summarize a \code{"act_BayesGLM"} object
#'
#' Summary method for class \code{"act_BayesGLM"}
#'
#' @param object Object of class \code{"act_BayesGLM"}.
#' @param ... further arguments passed to or from other methods.
#' @export
#' @return A \code{"summary.act_BayesGLM"} object, a list summarizing the
#' properties of \code{object}.
#' @method summary act_BayesGLM
summary.act_BayesGLM <- function(object, ...) {
act <- object$activations[!vapply(object$activations, is.null, 0)]
if ("p_values" %in% names(act)) { act <- list(single_session=act) }
x <- list(
activations = lapply(act, function(x_a){
if ("active" %in% names(x_a)) {
q <- apply(x_a$active, 2, function(avec){c(`TRUE`=sum(avec), `FALSE`=sum(!avec))})
colnames(q) <- object$task_names
q
} else {
lapply(x_a, function(x_b){
q <- apply(x_b$active, 2, function(avec){c(`TRUE`=sum(avec), `FALSE`=sum(!avec))})
colnames(q) <- object$task_names
q
})
}
}),
method=object$method,
alpha=object$alpha,
gamma=object$gamma,
correction=object$correction
#excur_method
)
class(x) <- "summary.act_BayesGLM"
x
}
#' @rdname summary.act_BayesGLM
#' @export
#'
#' @param x Object of class \code{"summary.act_BayesGLM"}.
#' @return \code{NULL}, invisibly.
#' @method print summary.act_BayesGLM
print.summary.act_BayesGLM <- function(x, ...) {
cat("====BayesGLM Activations====================\n")
cat(paste0("Activated locations (", sum(x$activations[[1]][,1]), " modeled locations):\n"))
for (ii in seq(length(x$activations))) {
act_ii <- x$activations[[ii]]
cat(paste0(
" ", names(x$activations)[ii],
"\n"
))
for (kk in seq(ncol(act_ii))) {
cat(paste0(
" ", colnames(act_ii)[kk],
": ", act_ii["TRUE",kk], "\n"
))
}
}
cat("GLM type: ", x$method, "\n")
cat("alpha: ", x$alpha, "\n")
cat("gamma: ", x$gamma, "\n")
if (x$correction != "not applicable") {
cat("Correction: ", x$correction, "\n")
}
cat("\n")
invisible(NULL)
}
#' @rdname summary.act_BayesGLM
#' @export
#'
#' @return \code{NULL}, invisibly.
#' @method print act_BayesGLM
print.act_BayesGLM <- function(x, ...) {
print.summary.act_BayesGLM(summary(x))
}
#' Summarize a \code{"act_BayesGLM_cifti"} object
#'
#' Summary method for class \code{"act_BayesGLM_cifti"}
#'
#' @param object Object of class \code{"act_BayesGLM_cifti"}.
#' @param ... further arguments passed to or from other methods.
#' @export
#' @return A \code{"summary.act_BayesGLM_cifti"} object, a list summarizing the
#' properties of \code{object}.
#' @method summary act_BayesGLM_cifti
summary.act_BayesGLM_cifti <- function(object, ...) {
x <- summary.act_BayesGLM(object, ...)
class(x) <- "summary.act_BayesGLM_cifti"
x
}
#' @rdname summary.act_BayesGLM_cifti
#' @export
#'
#' @param x Object of class \code{"summary.act_BayesGLM_cifti"}.
#' @return \code{NULL}, invisibly.
#' @method print summary.act_BayesGLM_cifti
print.summary.act_BayesGLM_cifti <- function(x, ...) {
cat("====BayesGLM_cifti Activations==============\n")
cat(paste0("Activated locations:\n"))
if (!("active" %in% names(x$activations[[1]]))) {
# for (ii in seq(length(x$activations))) {
# names(x$activations[[ii]]) <- paste0(
# names(x$activations[[ii]]), ", ", names(x$activations)[ii]
# )
# }
x$activations <- do.call(c, x$activations)
}
for (ii in seq(length(x$activations))) {
act_ii <- x$activations[[ii]]
cat(paste0(
" ", names(x$activations)[ii],
" (", sum(act_ii[,1]), " modeled locations)\n"
))
for (kk in seq(ncol(act_ii))) {
cat(paste0(
" ", colnames(act_ii)[kk],
": ", act_ii["TRUE",kk], "\n"
))
}
}
cat("GLM type: ", x$method, "\n")
cat("alpha: ", x$alpha, "\n")
cat("gamma: ", x$gamma, "\n")
if (x$correction != "not applicable") {
cat("Correction: ", x$correction, "\n")
}
cat("\n")
invisible(NULL)
}
#' @rdname summary.act_BayesGLM_cifti
#' @export
#'
#' @return \code{NULL}, invisibly.
#' @method print act_BayesGLM_cifti
print.act_BayesGLM_cifti <- function(x, ...) {
print.summary.act_BayesGLM_cifti(summary(x))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/id_activations_utils.R
|
#' Validate an individual session in a \code{"BfMRI.sess"} object.
#'
#' Check if object is valid for a list entry in \code{"BfMRI.sess"}.
#'
#' A valid entry in a \code{"BfMRI.sess"} object is a list with these named
#' fields:
#' \describe{
#' \item{BOLD}{a \eqn{T \times V} BOLD matrix. Rows are time points; columns are data locations (vertices/voxels).}
#' \item{design}{a \eqn{T \times K} matrix containing the \eqn{K} task regressors. See \code{\link{make_HRFs}}.}
#' \item{nuisance}{an optional argument. \eqn{T \times J} matrix containing the \eqn{L} nuisance regressors.}
#' }
#'
#' @param x The putative entry in a \code{"BfMRI.sess"} object.
#'
#' @return Logical. Is \code{x} a valid entry in a \code{"BfMRI.sess"} object?
#'
#' @keywords internal
is.a_session <- function(x){
# `x` is a list.
if (!is.list(x)) { message("`x` must be a list."); return(FALSE) }
# `x` has fields 'BOLD', 'design', and maybe 'nuisance'.
fields <- c("BOLD", "design", "nuisance")
if (length(x) == 2) {
if (!all(names(x) == fields[seq(2)])) {
message("`x` should have fields 'BOLD' and 'design'."); return(FALSE)
}
} else if (length(x) == 3) {
if (!all(names(x) == fields[seq(3)])) {
message("`x` should have fields 'BOLD', 'design', and 'nuisance'.")
return(FALSE)
}
} else {
message("`x` should be length 2 or 3."); return(FALSE)
}
has_nus <- length(x) == 3
# The data types are ok.
if (!(is.numeric(x$BOLD) && is.matrix(x$BOLD))) {
message("`x$BOLD` must be a numeric matrix."); return(FALSE)
}
des_pw <- (!has_nus) && (inherits(x$design, "dgCMatrix"))
if (!des_pw) {
if (!(is.numeric(x$design) && is.matrix(x$design))) {
message("`x$design` must be a numeric matrix.")
if (!has_nus) {
message("(It may also be a 'dgCMatrix' if prewhitening has been done.)")
}
return(FALSE)
}
}
if (has_nus) {
if (!(is.matrix(x$nuisance))) {
message("`x$nuisance` must be a matrix."); return(FALSE)
}
}
# The dimensions are ok.
if (des_pw) {
nvox <- sum(!is.na(x$BOLD[1,]))
if (nrow(x$BOLD) != nrow(x$design)/nvox) {
message(
"If prewhitening has been performed, the number of rows in the design ",
"matrix should be T*V, where T=nrow(BOLD) and V is the number of ",
"columns of BOLD that are non-NA."
)
return(FALSE)
}
} else {
if ((nrow(x$BOLD) != nrow(x$design))) {
message("'BOLD' and 'design' must have the same number of rows (time points).")
return(FALSE)
}
}
if (has_nus) {
if (nrow(x$BOLD) != nrow(x$nuisance)) {
message("'BOLD' and 'nuisance' must have the same number of rows (time points).")
return(FALSE)
}
}
return(TRUE)
}
#' Validate a \code{"BfMRI.sess"} object.
#'
#' Check if object is valid for a \code{"BfMRI.sess"} object.
#'
#' A \code{"BfMRI.sess"} object is a list of length \eqn{S}, where \eqn{S} is
#' the number of sessions in the analysis. Each list entry corresponds to a
#' separate session, and should itself be a list with these named fields:
#'
#' \describe{
#' \item{BOLD}{a \eqn{T \times V} BOLD matrix. Rows are time points; columns are data locations (vertices/voxels).}
#' \item{design}{a \eqn{T \times K} matrix containing the \eqn{K} task regressors. See \code{\link{make_HRFs}}.}
#' \item{nuisance}{an optional argument. \eqn{T \times J} matrix containing the \eqn{L} nuisance regressors.}
#' }
#'
#' In addition, all sessions must have the same number of data locations, \eqn{V}, and tasks, \eqn{K}.
#'
#' @examples
#' nT <- 180
#' nV <- 700
#' BOLD1 <- matrix(rnorm(nT*nV), nrow=nT)
#' BOLD2 <- matrix(rnorm(nT*nV), nrow=nT)
#' onsets1 <- list(taskA=cbind(c(2,17,23),4)) # one task, 3 four sec-long stimuli
#' onsets2 <- list(taskA=cbind(c(1,18,25),4))
#' TR <- .72 # .72 seconds per volume, or (1/.72) Hz
#' duration <- nT # session is 180 volumes long (180*.72 seconds long)
#' design1 <- make_HRFs(onsets1, TR, duration)$design
#' design2 <- make_HRFs(onsets2, TR, duration)$design
#' x <- list(
#' sessionOne = list(BOLD=BOLD1, design=design1),
#' sessionTwo = list(BOLD=BOLD2, design=design2)
#' )
#' stopifnot(is.BfMRI.sess(x))
#'
#' @param x The putative \code{"BfMRI.sess"} object.
#'
#' @return Logical. Is \code{x} a valid \code{"BfMRI.sess"} object?
#'
#' @export
is.BfMRI.sess <- function(x){
if (!is.list(x)) { message("`x` must be a list."); return(FALSE) }
nS <- length(x)
# Check the first session.
if (!is.a_session(x[[1]])) {
message("The first entry of `x` is not a valid session.")
return(FALSE)
}
# We're done now, if there's only one session.
if (nS < 2) { return(TRUE) }
# Check the rest of the sessions.
is_sess_vec <- vapply(x, is.a_session, FALSE)
if (!all(is_sess_vec)) {
message(sum(!is_sess_vec), " out of ", nS, " entries in `x` are not valid sessions.")
return(FALSE)
}
# Check that all sessions have the same number of data locations and tasks.
nV <- ncol(x[[1]]$BOLD)
nK <- ncol(x[[1]]$design)
for (ii in seq(2, nS)) {
if (ncol(x[[ii]]$BOLD) != nV) {
message(
"The first session has ", nV, " locations, but session ", ii,
" (and maybe others) does not. All sessions must have the same number ",
"of locations."
)
return(FALSE)
}
if (ncol(x[[ii]]$design) != nK) {
message(
"The first session has ", nK, " locations, but session ", ii,
" (and maybe others) does not. All sessions must have the same number ",
"of locations."
)
return(FALSE)
}
}
return(TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/is.BfMRI.sess.R
|
#' Central derivative
#'
#' Take the central derivative of numeric vectors by averaging the forward and
#' backward differences.
#' @param x A numeric matrix, or a vector which will be converted to a
#' single-column matrix.
#' @return A matrix or vector the same dimensions as \code{x}, with the
#' derivative taken for each column of \code{x}. The first and last rows may
#' need to be deleted, depending on the application.
#' @export
#'
#' @examples
#' x <- cderiv(seq(5))
#' stopifnot(all(x == c(.5, 1, 1, 1, .5)))
#'
cderiv <- function(x){
x <- as.matrix(x)
dx <- diff(x)
(rbind(0, dx) + rbind(dx, 0)) / 2
}
#' Make HRFs
#'
#' Create HRF design matrix columns from onsets and durations
#'
#' @param onsets \eqn{L}-length list in which the name of each element is the
#' name of the corresponding task, and the value of each element is a matrix of
#' onsets (first column) and durations (second column) for each stimuli (each
#' row) of the corresponding task.
#'
#' @param TR Temporal resolution of the data, in seconds.
#' @param duration The number of volumes in the fMRI data.
#' @param dHRF Set to \code{1} to add the temporal derivative of each column
#' in the design matrix, \code{2} to add the second derivatives too, or
#' \code{0} to not add any columns. Default: \code{1}.
#' @param dHRF_as Only applies if \code{dHRF > 0}. Model the temporal
#' derivatives as \code{"nuisance"} signals to regress out, \code{"tasks"}, or
#' \code{"auto"} to treat them as tasks unless the total number of columns in
#' the design matrix (i.e. the total number of tasks, times `dHRF+1`), would be
#' \code{>=10}, the limit for INLA.
#' @param downsample Downsample factor for convolving stimulus boxcar or stick
#' function with canonical HRF. Default: \code{100}.
#' @param verbose If applicable, print a message saying how the HRF derivatives
#' will be modeled? Default: \code{FALSE}.
#'
#' @return List with the design matrix and/or the nuisance matrix containing the
#' HRF-convolved stimuli as columns, depending on \code{dHRF_as}.
#'
#' @importFrom stats convolve
#'
#' @examples
#' onsets <- list(taskA=cbind(c(2,17,23),4)) # one task, 3 four sec-long stimuli
#' TR <- .72 # .72 seconds per volume, or (1/.72) Hz
#' duration <- 300 # session is 300 volumes long (300*.72 seconds long)
#' make_HRFs(onsets, TR, duration)
#'
#' @export
make_HRFs <- function(
onsets, TR, duration,
dHRF=c(0, 1, 2),
dHRF_as=c("auto", "nuisance", "task"),
downsample=100,
verbose=FALSE){
dHRF <- as.numeric(match.arg(as.character(dHRF), c("0", "1", "2")))
if (dHRF == 0) {
if (identical(dHRF_as, "nuisance") || identical(dHRF_as, "task")) {
warning("`dHRF_as` is only applicable if `dHRF > 0`. If `dHRF == 0`, there's no need to specify `dHRF_as`.")
}
}
dHRF_as <- match.arg(dHRF_as, c("auto", "nuisance", "task"))
nK <- length(onsets) #number of tasks
if (dHRF > 0 && dHRF_as=="auto") {
nJ <- (dHRF+1) * nK # number of design matrix columns
if (nJ > 5) {
if (verbose) {
message("Modeling the HRF derivatives as nuisance signals.")
}
dHRF_as <- "nuisance"
} else {
if (verbose) {
message("Modeling the HRF derivatives as tasks signals.")
}
dHRF_as <- "task"
}
}
task_names <- if (is.null(names(onsets))) {
task_names <- paste0('task', 1:nK)
} else {
names(onsets)
}
nsec <- duration*TR; # Total time of experiment in seconds
stimulus <- rep(0, nsec*downsample) # For stick function to be used in convolution
inds <- seq(TR*downsample, nsec*downsample, by = TR*downsample) # Extract EVs in a function of TR
design <- nuisance <- vector("list", length=dHRF+1)
for (dd in seq(0, dHRF)) {
dname_dd <- switch(dd+1, "HRF", "dHRF", "d2HRF")
theHRF_dd <- matrix(NA, nrow=duration, ncol=nK)
colnames(theHRF_dd) <- paste0(task_names, "_", dname_dd)
# canonical HRF to be used in convolution
HRF_dd <- HRF(seq(0, 30, by=1/downsample), deriv=dd)[-1]
for (kk in seq(nK)) {
onsets_k <- onsets[[kk]][,1] #onset times in scans
durations_k <- onsets[[kk]][,2] #durations in scans
# Define stimulus function
stimulus_k <- stimulus
for (ii in seq(length(onsets_k))) {
start_ii <- round(onsets_k[ii]*downsample)
end_ii <- round(onsets_k[ii]*downsample + durations_k[ii]*downsample)
stimulus_k[start_ii:end_ii] <- 1
}
# Convolve boxcar with canonical HRF & add to design2[[ii]] matrix
HRF_k <- convolve(stimulus_k, rev(HRF_dd), type='open')
theHRF_dd[,kk] <- HRF_k[inds]
}
if (dd > 0 && dHRF_as == "nuisance") {
nuisance[[dd+1]] <- theHRF_dd
} else {
design[[dd+1]] <- theHRF_dd
}
}
list(
design=do.call(cbind, design),
nuisance=do.call(cbind, nuisance)
)
}
#' Canonical (double-gamma) HRF
#'
#' Calculate the HRF from a time vector and parameters. Optionally compute the
#' first or second derivative of the HRF instead.
#'
#' @param t time vector
#' @param deriv \code{0} (default) for the HRF, \code{1} for the first derivative
#' of the HRF, or \code{2} for the second derivative of the HRF.
#' @param a1 delay of response. Default: \code{6}
#' @param b1 response dispersion. Default: \code{0.9}
#' @param a2 delay of undershoot. Default: \code{12}
#' @param b2 dispersion of undershoot. Default: \code{0.9}
#' @param c scale of undershoot. Default: \code{0.35}
#'
#' @return HRF vector (or dHRF, or d2HRF) corresponding to time
#'
#' @examples
#' downsample <- 100
#' HRF(seq(0, 30, by=1/downsample))
#'
#' @importFrom fMRItools is_1
#' @export
HRF <- function(t, deriv=0, a1 = 6,b1 = 0.9,a2 = 12,b2 = 0.9,c = 0.35) {
# Arg checks
stopifnot(is.numeric(t))
deriv <- as.numeric(match.arg(as.character(deriv), c("0", "1", "2")))
stopifnot(is_1(a1, "numeric"))
stopifnot(is_1(b1, "numeric"))
stopifnot(is_1(a2, "numeric"))
stopifnot(is_1(b2, "numeric"))
stopifnot(is_1(c, "numeric"))
# HRF
if (deriv==0) {
out <- ((t/(a1*b1))^a1) * exp(-(t-a1*b1)/b1) - c * ((t/(a2*b2))^a2) * exp(-(t - a2*b2)/b2)
# dHRF
} else if (deriv==1) {
C1 <- (1/(a1*b1))^a1
C2 <- c*(1/(a2*b2))^a2
A1 <- a1*t^(a1 - 1)*exp(-(t - a1*b1)/b1)
A2 <- a2*t^(a2 - 1)*exp(-(t - a2*b2)/b2)
B1 <- t^a1 / b1 * exp(-(t - a1*b1)/b1)
B2 <- t^a2 / b2 * exp(-(t - a2*b2)/b2)
out <- C1*(A1 - B1) - C2 * (A2 - B2)
# ddHRF
} else if (deriv==2) {
C1 <- (1/(a1*b1))^a1
C2 <- c*(1/(a2*b2))^a2
dA1 <- a1 * ((a1 - 1) - t/b1) * t^(a1-2) * exp(-(t - a1*b1)/b1)
dB1 <- (1/b1) * (a1 - (t / b1)) * t^(a1 - 1) * exp(-(t - a1*b1)/b1)
dA2 <- a2 * ((a2 - 1) - t/b2) * t^(a2-2) * exp(-(t - a2*b2)/b2)
dB2 <- (1/b2) * (a2 - (t / b2)) * t^(a2 - 1) * exp(-(t - a2*b2)/b2)
out <- C1 * (dA1 - dB1) - C2 * (dA2 - dB2)
}
out
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/make_HRFs.R
|
#' Make Mesh
#'
#' Make INLA triangular mesh from faces and vertices
#'
#' @inheritSection INLA_Description INLA Requirement
#'
#' @inheritParams vertices_Param
#' @inheritParams faces_Param
#' @param use_INLA (logical) Use the INLA package to make the mesh? Default:
#' \code{TRUE}. Otherwise, mesh construction is based on an internal function,
#' \code{galerkin_db}.
#'
#' @return INLA triangular mesh
#'
#' @export
make_mesh <- function(vertices, faces, use_INLA = TRUE){
# Check index of faces
if(min(faces) == 0){
faces <- faces + 1
}
# Construct mesh
if(use_INLA) {
if (!requireNamespace("INLA", quietly = TRUE)) {
stop("`make_mesh` requires the `INLA` package. Please install it.", call. = FALSE)
}
mesh <- INLA::inla.mesh.create(loc = as.matrix(vertices), tv = as.matrix(faces))
} else {
gal_mesh <- galerkin_db(faces, vertices, surface = TRUE)
mesh <- list(
n = nrow(gal_mesh$C),
loc = vertices,
graph = list(tv = faces),
idx = list(loc = seq(nrow(gal_mesh$C)))
)
class(mesh) <- "inla.mesh"
}
return(mesh)
}
#' Remove part of a mesh without using INLA functions
#'
#' @param mask a 0-1 vector
#' @param mesh a mesh resulting from a call to \code{make_mesh} with
#' \code{use_INLA = FALSE}
#'
#' @return a mesh object with fewer vertices than the original input mesh
#' @keywords internal
submesh <- function(mask, mesh) {
t.count <- Matrix::rowSums(matrix((mask >= 0.5)[mesh$graph$tv], nrow(mesh$graph$tv),3))
tri <- which(t.count == 3)
tv <- mesh$graph$tv[tri, , drop = FALSE]
v <- sort(unique(as.vector(tv)))
idx <- rep(as.integer(NA), nrow(mesh$loc))
idx[v] <- seq_len(length(v))
tv <- matrix(idx[tv], nrow(tv), 3)
loc <- mesh$loc[v, , drop = FALSE]
mesh <- INLA::inla.mesh.create(loc = loc, tv = tv)
# mesh <- make_mesh(vertices = loc,faces = tv,use_INLA = FALSE)
idx <- rep(as.integer(NA), length(idx))
idx[v] <- mesh$idx$loc
mesh$idx$loc <- idx
class(mesh) <- "inla.mesh"
return(mesh)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/make_mesh.R
|
#' Organize data for Bayesian GLM
#'
#' Transforms the usual TxV BOLD data matrix Y into vector form, and
#' the usual TxK design matrix X into big sparse matrix form for use in
#' Bayesian GLM.
#'
#' The Bayesian GLM requires \code{y} (a vector of length TV containing the BOLD data)
#' and \code{X_k} (a sparse TVxV matrix corresponding to the kth task regressor) for each task k.
#' The design matrices are combined as \code{A=cbind(X_1,...,X_K)}.
#'
#' @param y the TxV data matrix containing the fMRI timeseries
#' @param X the TxK design matrix with K task-related columns
#'
#' @return A list containing fields \code{y} and \code{A} (see Details)
#'
#' @details The Bayesian GLM requires \code{y} (a vector of length TV containing the BOLD data)
#' and \code{X_k} (a sparse TVxV matrix corresponding to the kth task regressor) for each task k.
#' The design matrices are combined as \code{A=cbind(X_1,...,X_K)}.
#'
#' @importFrom Matrix sparseMatrix
#'
#' @keywords internal
organize_data <- function(y, X){
if (ncol(y) == nrow(X)) {
warning('Transposing fMRI data (`y`) so rows are time points and columns are locations.')
y <- t(y)
}
nT <- nrow(y)
nV <- ncol(y)
y <- as.vector(y) #makes a vector (y_1,...,y_V), where y_v is the timeseries for data location v
ix <- seq(nT*nV)
iy <- rep(seq(nV), each = nT)
nK <- ncol(X)
for (kk in seq(nK)) {
X_k <- Matrix::sparseMatrix(ix, iy, x=rep(X[,kk], nV))
bigX <- if (kk==1) { X_k } else { cbind(bigX, X_k) }
}
list(BOLD=y, design=bigX)
}
#' Organize prewhitened data for Bayesian GLM
#'
#' Transforms the usual TxV BOLD data matrix Y into vector form, and
#' the usual TxK design matrix X into big sparse matrix form for use in
#' Bayesian GLM.
#'
#' The Bayesian GLM requires \code{y} (a vector of length TV containing the BOLD data)
#' and \code{X_k} (a sparse TVxV matrix corresponding to the kth task regressor) for each task k.
#' The design matrices are combined as \code{A=cbind(X_1,...,X_K)}.
#'
#' @param y the TxV data matrix containing the fMRI timeseries
#' @param X the TxK design matrix with K task-related columns
#' @param transpose Check orientation of data, which, if \code{TRUE}, will transpose
#' the data when the number of time points is greater than the number of voxels.
#' Note: this is not always true for subcortical regions.
#'
#' @return A list containing fields \code{y} and \code{A} (see Details)
#'
#' @details The Bayesian GLM requires \code{y} (a vector of length TV containing the BOLD data)
#' and \code{X_k} (a sparse TVxV matrix corresponding to the kth task regressor) for each task k.
#' The design matrices are combined as \code{A=cbind(X_1,...,X_K)}.
#'
#' @importFrom Matrix bdiag
#'
#' @keywords internal
organize_data_pw <- function(y, X, transpose = TRUE){
if (nrow(y) > ncol(y) && transpose) {
warning('More columns than rows. Transposing matrix so rows are data locations and columns are time points')
y <- t(y)
}
nT <- nrow(y)
is_missing <- is.na(y[1,])
nK <- ncol(X) / sum(!is_missing)
nV <- sum(!is_missing)
y <- as.vector(y) #makes a vector (y_1,...,y_V), where y_v is the timeseries for data location v
# bigX <- Matrix(0,nT*V,V*K)
for (kk in seq(nK)) {
k_inds <- seq(kk, nV*nK,by = nK)
X_k <- X[,k_inds]
bigX <- if (kk==1) { X_k } else { cbind(bigX, X_k) }
}
list(y=y, X=bigX)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/organize_data.R
|
#' Organize replicates
#'
#' beta and repl vectors are of length \eqn{nvox \times n_sess \times n_task}.
#' The ith repl vector is an indicator vector for the cells corresponding to the ith column of x.
#' The ith beta vector contains data indices (e.g. 1,...,V) in the cells corresponding to the ith column of x.
#'
#' @param n_sess The number of sessions sharing hyperparameters (can be different tasks)
#' @param task_names Vector of names for each task
#' @inheritParams mesh_Param_inla
# @inheritParams mesh_Param_either
#'
#' @return replicates vector and betas for sessions
#'
#' @keywords internal
#'
organize_replicates <- function(n_sess, task_names, mesh){
if (!(inherits(mesh, "inla.mesh") || inherits(mesh, "BayesfMRI.spde"))) {
stop('mesh must be of class inla.mesh (for surface data, see `help(make_mesh)`) or BayesfMRI.spde (for subcortical data, see `help(create_spde_vol3D)`)')
}
spatial <- mesh$idx$loc
nvox <- length(spatial)
n_task <- length(task_names)
grps <- ((1:(n_sess*n_task) + (n_task-1)) %% n_task) + 1 # 1, 2, .. n_task, 1, 2, .. n_task, ...
repls <- vector('list', n_task)
betas <- vector('list', n_task)
names(betas) <- task_names
for(i in 1:n_task){
inds_i <- (grps == i)
#set up replicates vectors
sess_NA_i <- rep(NA, n_sess*n_task)
sess_NA_i[inds_i] <- 1:n_sess
repls[[i]] <- rep(sess_NA_i, each=nvox)
names(repls)[i] <- paste0('repl',i)
#set up ith beta vector with replicates for sessions
NAs <- rep(NA, nvox)
preNAs <- rep(NAs, times=(i-1))
postNAs <- rep(NAs, times=(n_task-i))
betas[[i]] <- rep(c(preNAs, spatial, postNAs), n_sess)
}
result <- list(betas=betas, repls=repls)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/organize_replicates.R
|
#' S3 method: use \code{\link[ciftiTools]{view_xifti_surface}} to plot a \code{"BayesGLM_cifti"} object
#'
#' @param x An object of class "BayesGLM_cifti"
#' @param idx Which task should be plotted? Give the numeric indices or the
#' names. \code{NULL} (default) will show all tasks. This argument overrides
#' the \code{idx} argument to \code{\link[ciftiTools]{view_xifti_surface}}.
#' @param session Which session should be plotted? \code{NULL} (default) will
#' use the first.
#' @param method "Bayes" or "classical". \code{NULL} (default) will use
#' the Bayesian results if available, and the classical results if not.
#' @param zlim Overrides the \code{zlim} argument for
#' \code{\link[ciftiTools]{view_xifti_surface}}. Default: \code{c(-1, 1)}.
#' @param ... Additional arguments to \code{\link[ciftiTools]{view_xifti_surface}}
#'
#' @method plot BayesGLM_cifti
#'
#' @importFrom ciftiTools view_xifti_surface
#' @export
#'
#' @return Result of the call to \code{ciftiTools::view_cifti_surface}.
#'
plot.BayesGLM_cifti <- function(x, idx=NULL, session=NULL, method=NULL, zlim=c(-1, 1), ...){
# Method
if (is.null(method)) {
method <- ifelse(
is.null(x$task_estimates_xii$Bayes[[1]]),
"classical", "Bayes"
)
}
method <- match.arg(method, c("classical", "Bayes"))
if (is.null(x$task_estimates_xii[[method]])) {
stop(paste("Method", gsub("betas_", "", method, fixed=TRUE), "does not exist."))
}
# Session
if (is.null(session)) {
if (length(x$task_estimates_xii[[method]]) > 1) { message("Plotting the first session.") }
session <- 1
} else if (is.numeric(session)) {
stopifnot(session %in% seq(length(x$session_names)))
}
the_xii <- x$task_estimates_xii[[method]][[session]]
if (is.null(the_xii)) { stop(paste("Session", session, "does not exist.")) }
# Column index
if (is.null(idx)) {
idx <- seq_len(ncol(do.call(rbind, the_xii$data)))
} else if (is.character(idx)) {
idx <- match(idx, the_xii$meta$cifti$names)
}
# Plot
ciftiTools::view_xifti_surface(the_xii, idx=idx, zlim=zlim, ...)
}
#' S3 method: use \code{\link[ciftiTools]{view_xifti_surface}} to plot a \code{"act_BayesGLM_cifti"} object
#'
#' @param x An object of class "act_BayesGLM_cifti"
#' @param idx Which task should be plotted? Give the numeric indices or the
#' names. \code{NULL} (default) will show all tasks. This argument overrides
#' the \code{idx} argument to \code{\link[ciftiTools]{view_xifti_surface}}.
#' @param session Which session should be plotted? \code{NULL} (default) will
#' use the first.
#' @param ... Additional arguments to \code{\link[ciftiTools]{view_xifti_surface}}
#'
#' @method plot act_BayesGLM_cifti
#'
#' @importFrom ciftiTools view_xifti_surface
#' @export
#'
#' @return Result of the call to \code{ciftiTools::view_cifti_surface}.
#'
plot.act_BayesGLM_cifti <- function(x, idx=NULL, session=NULL, ...){
# Session
if (is.null(session)) {
if (length(x$activations_xii) > 1) { message("Plotting the first session.") }
session <- 1
} else if (is.numeric(session)) {
stopifnot(session %in% seq(length(x$activations_xii)))
}
the_xii <- x$activations_xii[[session]]
if (is.null(the_xii)) { stop(paste("Session", session, "does not exist.")) }
# Column index
if (is.null(idx)) {
idx <- seq_len(ncol(do.call(rbind, the_xii$data)))
} else if (is.character(idx)) {
idx <- match(idx, the_xii$meta$cifti$names)
}
# Plot
ciftiTools::view_xifti_surface(the_xii, idx=idx, ...)
}
#' S3 method: use \code{\link[ciftiTools]{view_xifti_surface}} to plot a \code{"BayesGLM2_cifti"} object
#'
#' @param x An object of class "BayesGLM2_cifti"
#' @param idx Which contrast should be plotted? Give the numeric index.
#' \code{NULL} (default) will show all contrasts. This argument overrides
#' the \code{idx} argument to \code{\link[ciftiTools]{view_xifti_surface}}.
#' @param what Estimates of the \code{"contrasts"} (default), or their
#' thresholded \code{"activations"}.
#' @param ... Additional arguments to \code{\link[ciftiTools]{view_xifti_surface}}
#'
#' @method plot BayesGLM2_cifti
#'
#' @importFrom ciftiTools view_xifti_surface
#' @export
#'
#' @return Result of the call to \code{ciftiTools::view_cifti_surface}.
#'
plot.BayesGLM2_cifti <- function(x, idx=NULL, what=c("contrasts", "activations"), ...){
what <- match.arg(what, c("contrasts", "activations"))
what <- switch(what, contrasts="contrast_estimates_xii", activations="activations_xii")
the_xii <- x[[what]]
if (what=="activations_xii") {
if (is.null(the_xii)) {
stop("No activations in `'BayesGLM2'` object. Specify `excursion_type` in the `BayesGLM2` call and re-run.")
}
names(the_xii$meta$cifti$labels) <- paste0(
names(the_xii$meta$cifti$labels), ", '",
x$BayesGLM2_results$excursion_type, "'"
)
}
# Column index
if (is.null(idx)) { idx <- seq_len(ncol(do.call(rbind, the_xii$data))) }
# Plot
ciftiTools::view_xifti_surface(the_xii, idx=idx, ...)
}
#' S3 method: use \code{\link[ciftiTools]{view_xifti}} to plot a \code{"prev_BayesGLM_cifti"} object
#'
#' @param x An object of class "prev_BayesGLM_cifti"
#' @param idx Which task should be plotted? Give the numeric indices or the
#' names. \code{NULL} (default) will show all tasks. This argument overrides
#' the \code{idx} argument to \code{\link[ciftiTools]{view_xifti}}.
#' @param session Which session should be plotted? \code{NULL} (default) will
#' use the first.
#' @param drop_zeros Color locations without any activation across all results
#' (zero prevalence) the same color as the medial wall? Default: \code{NULL} to
#' drop the zeros if only one \code{idx} is being plotted.
#' @param colors,zlim See \code{\link[ciftiTools]{view_xifti}}. Here, the defaults
#' are overrided to use the Viridis \code{"plasma"} color scale between
#' \code{1/nA} and 1, where \code{nA} is the number of results in \code{x}.
#' @param ... Additional arguments to \code{\link[ciftiTools]{view_xifti}}
#'
#' @method plot prev_BayesGLM_cifti
#'
#' @importFrom ciftiTools view_xifti
#' @importFrom fMRItools is_1
#' @export
#'
#' @return Result of the call to \code{ciftiTools::view_cifti_surface}.
#'
plot.prev_BayesGLM_cifti <- function(
x, idx=NULL, session=NULL,
drop_zeros=NULL, colors="plasma",
zlim=c(round(1/x$n_results-.005, 2), 1), ...){
# Session
if (is.null(session)) {
if (length(x$prev_xii) > 1) { message("Plotting the first session.") }
session <- 1
} else if (is.numeric(session)) {
stopifnot(length(session)==1)
stopifnot(session %in% seq(length(x$prev_xii)))
}
the_xii <- x$prev_xii[[session]]
if (is.null(the_xii)) { stop(paste("Session", session, "does not exist.")) }
# Column index
if (is.null(idx)) {
idx <- seq_len(ncol(do.call(rbind, the_xii$data)))
} else if (is.character(idx)) {
idx <- match(idx, the_xii$meta$cifti$names)
}
if (is.null(drop_zeros)) { drop_zeros <- length(idx) == 1 }
stopifnot(is_1(drop_zeros, "logical"))
if (drop_zeros) {
the_xii <- move_to_mwall(the_xii, 0)
}
# Plot
ciftiTools::view_xifti_surface(the_xii, idx=idx, colors=colors, zlim=zlim, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/plot.R
|
#' Estimate residual autocorrelation for prewhitening
#'
#' @param resids Estimated residuals
#' @param ar_order,aic Order of the AR model used to prewhiten the data at each location.
#' If \code{!aic} (default), the order will be exactly \code{ar_order}. If \code{aic},
#' the order will be between zero and \code{ar_order}, as determined by the AIC.
#' @importFrom stats ar.yw
#'
#' @return Estimated AR coefficients and residual variance at every vertex
pw_estimate <- function(resids, ar_order, aic=FALSE){
V <- ncol(resids)
AR_coefs <- matrix(NA, V, ar_order)
AR_resid_var <- rep(NA, V)
AR_AIC <- if (aic) {rep(NA, V) } else { NULL }
for (v in seq(V)) {
if (is.na(resids[1,v])) { next }
# # If `AIC`, overwrite the model order with the one selected by `cAIC`.
# if (aic) { ar_order <- which.min(cAIC(resids, order.max=ar_order)) - 1 }
ar_v <- ar.yw(resids[,v], aic = aic, order.max = ar_order)
aic_order <- ar_v$order # same as length(ar_v$ar)
AR_coefs[v,] <- c(ar_v$ar, rep(0, ar_order-aic_order)) # The AR parameter estimates
AR_resid_var[v] <- ar_v$var.pred # Residual variance
if (aic) { AR_AIC[v] <- ar_v$order } # Model order
}
list(phi = AR_coefs, sigma_sq = AR_resid_var, aic = AR_AIC)
}
#' Corrected AIC To-Do
#'
#' Get corrected AIC
#'
#' @keywords internal
cAIC <- function(...){invisible(NULL)}
#' Smooth AR coefficients and white noise variance
#'
#' @inheritParams vertices_Param
#' @inheritParams faces_Param
#' @param mask A logical vector indicating, for each vertex, whether to include
#' it in smoothing. \code{NULL} (default) will use a vector of all \code{TRUE},
#' meaning that no vertex is masked out; all are used for smoothing.
#' @param AR A Vxp matrix of estimated AR coefficients, where V is the number of vertices and p is the AR model order
#' @param var A vector length V containing the white noise variance estimates from the AR model
#' @param FWHM FWHM parameter for smoothing. Remember that
#' \eqn{\sigma = \frac{FWHM}{2*sqrt(2*log(2)}}. Set to \code{0} or \code{NULL}
#' to not do any smoothing. Default: \code{5}.#'
#'
#' @importFrom ciftiTools smooth_cifti make_surf
#'
#' @return Smoothed AR coefficients and residual variance at every vertex
pw_smooth <- function(vertices, faces, mask=NULL, AR, var, FWHM=5){
if (is.null(mask)) { mask <- rep(TRUE, nrow(vertices)) }
V <- sum(mask)
V1 <- nrow(AR)
V2 <- length(var)
if(V != V1) stop('Number of rows in AR must match number of vertices')
if(V != V2) stop('Length of var must match number of vertices')
surf_smooth <- make_surf(
list(
pointset = vertices,
triangle = faces
)
)
AR_xif <- ciftiTools:::make_xifti(
cortexL = AR,
surfL = surf_smooth,
cortexL_mwall = mask
)
#AR_xif$meta$cifti$brainstructures <- "left"
AR_smoothed <- suppressWarnings(smooth_cifti(AR_xif, surf_FWHM = FWHM))
AR_smoothed <- AR_smoothed$data$cortex_left
var_xif <- ciftiTools:::make_xifti(
cortexL = var,
surfL = surf_smooth,
cortexL_mwall = mask
)
#var_xif$meta$cifti$brainstructures <- "left"
var_smoothed <- suppressWarnings(smooth_cifti(var_xif, surf_FWHM = FWHM))
var_smoothed <- var_smoothed$data$cortex_left
return(list(AR = AR_smoothed, var = var_smoothed))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/prewhitening.R
|
#' Retroactively mask locations from BayesGLM result.
#'
#' Work in progress.
#'
#' @param x The BayesGLM result
#' @param mask The mask to be applied to \code{x} (on top of any masks already
#' applied to it.)
#' @return The masked result
#'
#' @keywords internal
retro_mask_BGLM <- function(x, mask){
stopifnot(inherits(x, "BayesGLM"))
nS <- length(x$session_names)
nK <- length(x$task_names)
nV <- sum(x$mask)
nT <- length(x$y) / nV / nS
stopifnot(nT == round(nT))
stopifnot(is.logical(mask))
stopifnot(nV == length(mask))
stopifnot(sum(mask) > 0)
mask2 <- x$mask
x$mask[x$mask][!mask] <- FALSE
x$mask[!mask2] <- FALSE
for (ii in seq(length(x$task_estimates))) {
x$task_estimates[[ii]][!mask2,] <- NA
}
if ("result_classical" %in% names(x)) {
for (ii in seq(length(x$result_classical))) {
x$result_classical[[ii]]$estimates[!mask2,] <- NA
x$result_classical[[ii]]$SE_estimates[!mask2,] <- NA
x$result_classical[[ii]]$resids <- x$result_classical[[ii]]$resids[mask,]
x$result_classical[[ii]]$mask[!mask2] <- FALSE
}
}
x$mesh <- retro_mask_mesh(x$mesh, mask)
x$y <- c(matrix(x$y, ncol=nV)[,mask])
for (ii in seq(length(x$X))) {
x$X[[ii]] <- x$X[[ii]][rep(mask, each=nT),rep(mask, each=nK)]
}
x
}
#' Retroactively mask locations from mesh.
#'
#' Work in progress.
#'
#' @param x The mesh
#' @param mask The mask to be applied to \code{x} (on top of any masks already
#' applied to it.)
#' @return The masked result
#'
#' @keywords internal
retro_mask_mesh <- function(x, mask){
stopifnot(inherits(x, "inla.mesh"))
stopifnot(is.logical(mask))
# Determine which faces to keep.
face_ok <- rowSums(matrix(x$graph$tv %in% which(mask), ncol=3)) == 3
# Re-number the faces.
fidx <- vector("numeric", length(mask))
fidx[mask] <- seq(sum(mask))
faces <- matrix(fidx[c(x$graph$tv[face_ok,])], ncol=3)
# Make the new mesh.
make_mesh(vertices=x$loc[mask,], faces=faces)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/retro_mask.R
|
#' INLA
#'
#' @section INLA Requirement:
#' This function requires the \code{INLA} package, which is not a CRAN package.
#' See \url{https://www.r-inla.org/download-install} for easy installation instructions.
#'
#' @name INLA_Description
NULL
#' aic
#'
#' @param aic Use the AIC to select AR model order between \code{0} and
#' \code{ar_order}? Default: \code{FALSE}.
#'
#' @name aic_Param
NULL
#' ar_order
#'
#' @param ar_order (numeric) Controls prewhitening. If greater than zero, this
#' should be a number indicating the order of the autoregressive model to use
#' for prewhitening. If zero, do not prewhiten. Default: \code{6}. For
#' multi-session models, note that a single AR model is used; the parameters
#' are estimated by averaging the estimates from each session.
#'
#' @name ar_order_Param
NULL
#' ar_smooth
#'
#' @param ar_smooth (numeric) FWHM parameter for smoothing the AR model
#' coefficient estimates for prewhitening. Remember that
#' \eqn{\sigma = \frac{FWHM}{2*sqrt(2*log(2)}}. Set to \code{0} or \code{NULL}
#' to not do any smoothing. Default: \code{5}.
#'
#' @name ar_smooth_Param
NULL
#' combine_sessions
#'
#' @param combine_sessions If multiple sessions are provided, should their data
#' be combined and analyzed as a single session?
#'
#' If \code{TRUE} (default), the multiple sessions will be concatenated along
#' time after scaling and nuisance regression, but before prewhitening. If
#' \code{FALSE}, each session will be analyzed separately, except that a single
#' estimate of the AR model coefficients for prewhitening is used, estimated
#' across all sessions.
#'
#' @name combine_sessions_Param
NULL
#' Bayes
#'
#' @param Bayes If \code{TRUE} (default), will fit a spatial Bayesian GLM in
#' addition to the classical GLM. (The classical GLM is always returned.)
#'
#' @name Bayes_Param
NULL
#' contrasts
#'
#' @param contrasts List of contrast vectors to be passed to \code{inla::inla}.
#'
#' @name contrasts_Param
NULL
#' EM
#'
#' @param EM (logical) Should the EM implementation of the Bayesian GLM be used?
#' Default: \code{FALSE}. This method is still in development.
#'
#' @name EM_Param
NULL
#' emTol
#'
#' @param emTol The stopping tolerance for the EM algorithm. Default:
#' \code{1e-3}.
#'
#' @name emTol_Param
NULL
#' faces
#'
#' @param faces An \eqn{F \times 3} matrix, where each row contains the vertex
#' indices for a given triangular face in the mesh. \eqn{F} is the number of
#' faces in the mesh.
#'
#' @name faces_Param
NULL
#' mask: vertices
#'
#' @param mask A length \eqn{V} logical vector indicating if each vertex is
#' within the input mask.
#'
#' @name mask_Param_vertices
NULL
#' mesh: INLA only
#'
#' @param mesh An \code{"inla.mesh"} object (see \code{\link{make_mesh}} for
#' surface data).
#'
#' @name mesh_Param_inla
NULL
#' mesh: either
#'
#' @param mesh An \code{"inla.mesh"} object (see \code{\link{make_mesh}} for
#' surface data)
# or \code{"BayesfMRI.spde"} object (see \code{\link{create_spde_vol3D}} for subcortical data).
#'
#' @name mesh_Param_either
NULL
#' max.threads
#'
#' @param max.threads The maximum number of threads to use in the inla-program
#' for model estimation. \code{0} (default) will use the maximum number of
#' threads allowed by the system.
#'
#' @name max.threads_Param
NULL
#' num.threads
#'
#' @param num.threads The maximum number of threads to use for parallel
#' computations: prewhitening parameter estimation, and the inla-program model
#' estimation. Default: \code{4}. Note that parallel prewhitening requires the
#' \code{parallel} package.
#'
#' @name num.threads_Param
NULL
#' return_INLA
#'
#' @param return_INLA Return the INLA model object? (It can be large.) Use
#' \code{"trimmed"} (default) to return only the more relevant results, which
#' is enough for both \code{\link{id_activations}} and \code{BayesGLM2},
#' \code{"minimal"} to return just enough for \code{\link{BayesGLM2}} but not
#' \code{id_activations}, or \code{"full"} to return the full output of
#' \code{inla}.
#'
#' @name return_INLA_Param
NULL
#' scale_BOLD
#'
#' @param scale_BOLD Option for scaling the BOLD response.
#'
#' \code{"auto"} (default) will use \code{"mean"} scaling except if demeaned
#' data is detected (if any mean is less than one), in which case \code{"sd"}
#' scaling will be used instead.
#'
#' \code{"mean"} scaling will scale the data to percent local signal change.
#'
#' \code{"sd"} scaling will scale the data by local standard deviation.
#'
#' \code{"none"} will only center the data, not scale it.
#'
#' @name scale_BOLD_Param
NULL
#' scale_design
#'
#' @param scale_design Scale the design matrix by dividing each column by its
#' maximum and then subtracting the mean? Default: \code{TRUE}. If
#' \code{FALSE}, the design matrix is centered but not scaled.
#'
#' @name scale_design_Param
NULL
#' seed
#'
#' @param seed Random seed (optional). Default: \code{NULL}.
#'
#' @name seed_Param
NULL
#' session_names
#'
#' @param session_names (Optional, and only relevant for multi-session modeling)
#' Names of each session. Default: \code{NULL}. In \code{\link{BayesGLM}} this
#' argument will overwrite the names of the list entries in \code{data}, if
#' both exist.
#'
#' @name session_names_Param
NULL
#' task_names
#'
#' @param task_names (Optional) Names of tasks represented in design matrix.
#'
#' @name task_names_Param
NULL
#' trim_INLA
#'
#' @param trim_INLA (logical) should the \code{INLA_model_obj} within the
#' result be trimmed to only what is necessary to use \code{id_activations}?
#' Default: \code{TRUE}.
#'
#' @name trim_INLA_Param
NULL
#' verbose
#'
#' @param verbose Should updates be printed? Use \code{1} (default) for
#' occasional updates, \code{2} for occasional updates as well as running INLA
#' in verbose mode (if applicable), or \code{0} for no updates.
#'
#' @name verbose_Param
NULL
#' vertices
#'
#' @param vertices A \eqn{V \times 3} matrix, where each row contains the Euclidean
#' coordinates at which a given vertex in the mesh is located. \eqn{V} is the
#' number of vertices in the mesh
#'
#' @name vertices_Param
NULL
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/rox_args_docs.R
|
#' Scale the design matrix
#'
#' @param design_mat The original (unscaled) design matrix that is T x K, where
#' T is the number of time points, and k is the number of task covariates
#'
#' @return A scaled design matrix
#'
#' @keywords internal
#'
scale_design_mat <- function(design_mat) {
stopifnot(is.matrix(design_mat))
apply(design_mat,2,function(task) {
(task - mean(task)) / max(abs(task))
})
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/scale_design_mat.R
|
#' Trim INLA object
#'
#' Trim an INLA object to only include what is necessary for
#' \code{id_activations} or \code{BayesGLM2}.
#'
#' @param INLA_model_obj An object of class \code{"inla"}.
#' @param minimal Just keep the two parameters needed for \code{BayesGLM2}?
#' Default: \code{FALSE}. \code{!minimal} is required for
#' \code{id_activations}, but \code{minimal} is sufficient for
#' \code{BayesGLM2}.
#'
#' @return A trimmed \code{"inla"} object.
#' @keywords internal
trim_INLA_model_obj <- function(INLA_model_obj, minimal=FALSE) {
if (!inherits(INLA_model_obj, "inla")) {
stop("This function only applies to objects with the 'inla' class.")
}
out_object <- list()
out_object$misc$theta.mode <- INLA_model_obj$misc$theta.mode
out_object$misc$cov.intern <- INLA_model_obj$misc$cov.intern
if (!minimal) {
out_object$.args$control.compute$config <- INLA_model_obj$.args$control.compute$config
out_object$marginals.random <- INLA_model_obj$marginals.random
out_object$misc$configs <- INLA_model_obj$misc$configs
}
class(out_object) <- "inla"
out_object
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/trim_INLA_model_obj.R
|
# #' Plot BayesfMRI.spde objects
# #'
# #' @param object Object of class BayesfMRI.spde (see \code{help(create_spde_vol3D)})
# #' @param colors (Optional) Vector of colors to represent each region.
# #' @param alpha Transparency level.
# #'
# #' @return
# #' @export
# #' @importFrom geometry tetramesh
# #' @import rgl
# #' @importFrom viridis viridis_pal
#
# plot.BayesfMRI.spde <- function(object, colors=NULL, alpha=0.5){
# if(class(object) != 'BayesfMRI.spde') stop('object argument must be a BayesfMRI.spde object. See help(create_spde_vol3D).')
# num_regions <- length(object$vertices)
# if(is.null(colors)) colors <- viridis_pal()(num_regions)
# if(length(colors) < num_regions) {
# colors <- rep(colors, length.out=num_regions)
# warning('Fewer colors specified than number of regions in the mesh. Recycling colors to equal number of regions.')
# }
# for(ii in 1:num_regions){
# if(ii==1) tetramesh(object$faces[[ii]], object$vertices[[ii]], col=colors[ii], alpha=alpha)
# if(ii > 1) tetramesh(object$faces[[ii]], object$vertices[[ii]], clear=FALSE, col=colors[ii], alpha=alpha)
#
# }
# }
# #' Plot BayesGLM objects
# #'
# #' Summary method for class "BayesGLM"
# #'
# #' @param object an object of class "BayesGLM"
# #' @param session_name NULL if BayesGLM object contains a single session; otherwise, the name of the session whose estimates to plot
# #' @param pal If NULL, viridis palette with 64 colors will be used. Otherwise, specify a vector of color names.
# #' @param ... further arguments passed to or from other methods.
# #' @export
# #' @import viridis
# #' @method plot BayesGLM
# plot.BayesGLM <- function(object, session_name=NULL, pal=NULL, ...)
# {
# session_names <- names(object$task_estimates)
#
# if((is.null(session_name)) & (length(session_names) > 1)) stop('If BayesGLM object includes multiple sessions, you must specify which session to plot.')
# if(!is.null(session_name) & !(session_name %in% session_names)) stop('I expect the session_names argument to be one of the session names of the BayesGLM object, but it is not.')
#
# if(is.null(session_name) & (length(session_names) == 1)) session_name <- session_names
#
# ind <- which(names(object$task_estimates) == session_name) #which element of list
# est <- (object$task_estimates)[[ind]]
# K <- ncol(est)
#
#
# if(is.null(pal)) {
# nColors <- 64
# pal <- viridis_pal(option='plasma', direction=1)(nColors)
# } else {
# if(min(areColors(pal)) < 1) stop('At least one of the elements of the pal argument is not a valid color representation. See help(areColors).')
# nColors <- length(pal)
# }
#
#
# for(k in 1:K){
# x = est[,k]
# colindex <- as.integer(cut(x,breaks=nColors))
#
# #NEED TO CHECK WHICH TYPE OF BAYESGLM OBJECT (VOL OR CORTICAL) -- maybe use the mesh class? or the spde_obj class?
# #plot(mesh_LH_s$mesh,col=pal[colindex], rgl=TRUE)
#
# #tetramesh(object$spde_obj$faces, object$spde_obj$vertices, col=pal[colindex], clear=FALSE)
#
# }
#
#
# }
#
# #' Check whether each element of vector x is a valid color representation
# #'
# #' @param x Character vector
# #'
# #' @return A logical vector indicating which of the elements of x are valid color representations
# #' @importFrom grDevices col2rgb
# #' @export
#
# areColors <- function(x) {
# sapply(x, function(X) {
# tryCatch(is.matrix(col2rgb(X)),
# error = function(e) FALSE)
# })
# }
#' Sequential 2-means variable selection
#'
#' @param x A vector consisting of all variables of interest for a single draw
#' from a posterior distribution
#' @param b A scale parameter used to determine at what distance cluster centers
#' are considered to be the same.
#'
#' @return The number of nonzero values detected within x
#'
#' @importFrom stats kmeans
#' @keywords internal
s2m <- function(x,b){
two_means <- kmeans(abs(x),2)
zero_idx <- which(two_means$cluster == which.min(two_means$centers))
A <- x[zero_idx]
two_centers <- kmeans(abs(A),2,algorithm=c("Lloyd"))
iterations <- 1
while(abs(two_centers$centers[1, 1] - two_centers$centers[2, 1]) > b) {
zero_idx <- which(two_centers$cluster == which.min(two_centers$centers))
A <- A[zero_idx]
two_centers <- kmeans(abs(A),2,algorithm=c("Lloyd"))
iterations <- iterations + 1
}
num_nonzero <- length(x) - length(A)
return(num_nonzero)
}
#' Sequential 2-means on array B
#'
#' @param B An array of posterior samples (typically a matrix), in which the last margin corresponds to a single posterior sample
#' @param sigma A scale parameter used to determine at what distance cluster centers are considered to be the same.
#'
#' @return An array of dimension `head(dim(B),-1)` with a point estimate of B based on the sequential 2-means method
#'
#' @importFrom stats quantile median
#'
#' @keywords internal
#'
#' @md
s2m_B <- function(B,sigma){
nonzero_nums <- sapply(asplit(B,length(dim(B))),function(B_s) s2m(c(B_s),sigma))
num_nonzero <- ceiling(median(nonzero_nums))
median_B <- apply(B,seq(length(dim(B)) - 1),median)
cutoff <- quantile(c(abs(median_B)),1 - num_nonzero/length(median_B))
out <- median_B
out[which(out < cutoff)] <- 0
return(out)
}
#' Mask out invalid data
#'
#' Mask out data locations that are invalid (missing data, low mean, or low
#' variance) for any session.
#'
#' @param data A list of sessions, where each session is a list with elements
#' \code{BOLD}, \code{design}, and optionally \code{nuisance}. See
#' \code{?is.BfMRI.sess} for details.
#' @param meanTol,varTol Tolerance for mean and variance of each data location.
#' Locations which do not meet these thresholds are masked out of the analysis.
#' Defaults: \code{1e-6}.
#' @param verbose Print messages counting how many locations are removed?
#' Default: \code{TRUE}.
#'
#' @importFrom matrixStats colVars
#' @return A logical vector indicating locations that are valid across all sessions.
#'
#' @examples
#' nT <- 30
#' nV <- 400
#' BOLD1 <- matrix(rnorm(nT*nV), nrow=nT)
#' BOLD1[,seq(30,50)] <- NA
#' BOLD2 <- matrix(rnorm(nT*nV), nrow=nT)
#' BOLD2[,65] <- BOLD2[,65] / 1e10
#' data <- list(sess1=list(BOLD=BOLD1, design=NULL), sess2=list(BOLD=BOLD2, design=NULL))
#' make_mask(data)
#'
#' @export
make_mask <- function(data, meanTol=1e-6, varTol=1e-6, verbose=TRUE){
# For each BOLD data matrix,
mask_na <- mask_mean <- mask_var <- rep(TRUE, ncol(data[[1]]$BOLD))
for (ss in seq(length(data))) {
dss <- data[[ss]]$BOLD
# Mark columns with any NA or NaN values for removal.
dss_na <- is.na(dss) | is.nan(dss)
mask_na[apply(dss_na, 2, any)] <- FALSE
# Calculate means and variances of columns, except those with any NA or NaN.
# Mark columns with mean/var falling under the thresholds for removal.
mask_mean[mask_na][colMeans(dss[,mask_na,drop=FALSE]) < meanTol] <- FALSE
mask_var[mask_na][matrixStats::colVars(dss[,mask_na,drop=FALSE]) < varTol] <- FALSE
}
# Print counts of locations removed, for each reason.
if (verbose) {
warn_part1 <- " locations"
warn_part2 <- if (length(data) > 1) { " in at least one session.\n" } else { ".\n" }
if (any(!mask_na)) {
cat(paste0(
"\t", sum(!mask_na), warn_part1,
" removed due to NA/NaN values", warn_part2
))
warn_part1 <- " additional locations"
}
# Do not include NA locations in count.
mask_mean2 <- mask_mean | (!mask_na)
if (any(!mask_mean2)) {
cat(paste0(
"\t", sum(!mask_mean2), warn_part1,
" removed due to low mean", warn_part2
))
warn_part1 <- " additional locations"
}
# Do not include NA or low-mean locations in count.
mask_var2 <- mask_var | (!mask_mean) | (!mask_na)
if (any(!mask_var2)) {
cat(paste0(
"\t", sum(!mask_var2), warn_part1,
" removed due to low variance", warn_part2
))
}
}
# Return composite mask.
mask_na & mask_mean & mask_var
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/util.R
|
#' Surface area of each vertex
#'
#' Compute surface areas of each vertex in a triangular mesh.
#'
#' @inheritSection INLA_Description INLA Requirement
#'
#' @inheritParams mesh_Param_inla
#'
#' @return Vector of areas
#'
#' @export
vertex_areas <- function(mesh) {
if(missing(mesh)) { stop("`mesh` input is required.")}
if (!inherits(mesh, "inla.mesh")) {
stop("`mesh` needs to be of class `'inla.mesh'`.")
}
diag(INLA::inla.fmesher.smorg(
mesh$loc,mesh$graph$tv, fem = 0, output = list("c0")
)$c0)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesfMRI/R/vertex_areas.R
|
###########################################
########## Bayesian Fama-MacBeth ##########
###########################################
#' Bayesian Fama-MacBeth
#'
#' @description This function provides the Bayesian Fama-MacBeth regression.
#'
#' @param f A matrix of factors with dimension \eqn{t \times k}, where \eqn{k} is the number of factors
#' and \eqn{t} is the number of periods;
#' @param R A matrix of test assets with dimension \eqn{t \times N}, where \eqn{t} is the number of periods
#' and \eqn{N} is the number of test assets;
#' @param sim_length The length of MCMCs;
#'
#' @details
#'
#' \code{BayesianFM} is similar to another twin function in this package, \code{BayesianSDF},
#' except that we estimate factors' risk premia rather than risk prices in this function.
#' Unlike \code{BayesianSDF}, we use factor loadings, \eqn{\beta_f}, instead of covariance exposures, \eqn{C_f},
#' in the Fama-MacBeth regression. In particular, after we obtain the posterior draws of \eqn{\mu_{Y}} and \eqn{\Sigma_{Y}}
#' (details can be found in the section introducing \code{BayesianSDF} function),
#' we calculate \eqn{\beta_f} as follows: \eqn{\beta_f = C_f \Sigma_f^{-1}}, and \eqn{\beta = (1_N, \beta_f)}.
#'
#' \strong{Bayesian Fama-MacBeth (BFM)}
#'
#' The posterior distribution of \eqn{\lambda} conditional on \eqn{\mu_{Y}}, \eqn{\Sigma_{Y}}, and the data, is a Dirac distribution at
#' \eqn{(\beta^\top \beta)^{-1} \beta^\top \mu_R}.
#'
#' \strong{Bayesian Fama-MacBeth GLS (BFM-GLS)}
#'
#' The posterior distribution of \eqn{\lambda} conditional on \eqn{\mu_{Y}}, \eqn{\Sigma_{Y}}, and the data, is a Dirac distribution at
#' \eqn{ (\beta^\top \Sigma_R^{-1} \beta)^{-1} \beta^\top \Sigma_R^{-1} \mu_R }.
#'
#'
#'
#'
#' @return
#' The return of \code{BayesianFM} is a list of the following elements:
#' \itemize{
#' \item \code{lambda_ols_path}: A \code{sim_length}\eqn{\times (k+1)} matrix of OLS risk premia estimates (Each row represents a draw.
#' Note that the first column is \eqn{\lambda_c} corresponding to the constant term.
#' The next \eqn{k} columns are the risk premia estimates of the \eqn{k} factors);
#' \item \code{lambda_gls_path}: A \code{sim_length}\eqn{\times (k+1)} matrix of the risk premia estimates \eqn{\lambda} (GLS);
#' \item \code{R2_ols_path}: A \code{sim_length}\eqn{\times 1} matrix of the \eqn{R^2_{OLS}};
#' \item \code{R2_gls_path}: A \code{sim_length}\eqn{\times 1} matrix of the \eqn{R^2_{GLS}}.
#' }
#'
#'
#' @export
#'
#' @examples
#'
#' ## <-------------------------------------------------------------------------------->
#' ## Example: Bayesian Fama-MacBeth
#' ## <-------------------------------------------------------------------------------->
#'
#' library(reshape2)
#' library(ggplot2)
#'
#' # Load Data
#' data("BFactor_zoo_example")
#' HML <- BFactor_zoo_example$HML
#' lambda_ols <- BFactor_zoo_example$lambda_ols
#' R2.ols.true <- BFactor_zoo_example$R2.ols.true
#' sim_f <- BFactor_zoo_example$sim_f
#' sim_R <- BFactor_zoo_example$sim_R
#' uf <- BFactor_zoo_example$uf
#'
#' ## <-------------------Case 1: strong factor---------------------------------------->
#'
#' # the Frequentist Fama-MacBeth
#' # sim_f: simulated factor, sim_R: simulated return
#' # sim_f is the useful (i.e., strong) factor
#' results.fm <- Two_Pass_Regression(sim_f, sim_R)
#'
#' # the Bayesian Fama-MacBeth with 10000 simulations
#' results.bfm <- BayesianFM(sim_f, sim_R, 2000)
#'
#' # Note that the first element correspond to lambda of the constant term
#' # So we choose k=2 to get lambda of the strong factor
#' k <- 2
#' m1 <- results.fm$lambda[k]
#' sd1 <- sqrt(results.fm$cov_lambda[k,k])
#'
#' bfm<-results.bfm$lambda_ols_path[1001:2000,k]
#' fm<-rnorm(20000,mean = m1, sd=sd1)
#' data<-data.frame(cbind(fm, bfm))
#' colnames(data)<-c("Frequentist FM", "Bayesian FM")
#' data.long<-melt(data)
#'
#' p <- ggplot(aes(x=value, colour=variable, linetype=variable), data=data.long)
#' p+
#' stat_density(aes(x=value, colour=variable),
#' geom="line",position="identity", size = 2, adjust=1) +
#' geom_vline(xintercept = lambda_ols[2], linetype="dotted", color = "#8c8c8c", size=1.5)+
#' guides(colour = guide_legend(override.aes=list(size=2), title.position = "top",
#' title.hjust = 0.5, nrow=1,byrow=TRUE))+
#' theme_bw()+
#' labs(color=element_blank()) +
#' labs(linetype=element_blank()) +
#' theme(legend.key.width=unit(4,"line")) +
#' theme(legend.position="bottom")+
#' theme(text = element_text(size = 26))+
#' xlab(bquote("Risk premium ("~lambda[strong]~")")) +
#' ylab("Density" )
#'
#'
#' ## <-------------------Case 2: useless factor--------------------------------------->
#'
#' # uf is the useless factor
#' # the Frequentist Fama-MacBeth
#' results.fm <- Two_Pass_Regression(uf, sim_R)
#'
#' # the Bayesian Fama-MacBeth with 10000 simulations
#' results.bfm <- BayesianFM(uf, sim_R, 2000)
#'
#' # Note that the first element correspond to lambda of the constant term
#' # So we choose k=2 to get lambda of the useless factor
#' k <- 2
#' m1 <- results.fm$lambda[k]
#' sd1 <- sqrt(results.fm$cov_lambda[k,k])
#'
#'
#' bfm<-results.bfm$lambda_ols_path[1001:2000,k]
#' fm<-rnorm(20000,mean = m1, sd=sd1)
#' data<-data.frame(cbind(fm, bfm))
#' colnames(data)<-c("Frequentist FM", "Bayesian FM")
#' data.long<-melt(data)
#'
#' p <- ggplot(aes(x=value, colour=variable, linetype=variable), data=data.long)
#' p+
#' stat_density(aes(x=value, colour=variable),
#' geom="line",position="identity", size = 2, adjust=1) +
#' geom_vline(xintercept = lambda_ols[2], linetype="dotted", color = "#8c8c8c", size=1.5)+
#' guides(colour = guide_legend(override.aes=list(size=2),
#' title.position = "top", title.hjust = 0.5, nrow=1,byrow=TRUE))+
#' theme_bw()+
#' labs(color=element_blank()) +
#' labs(linetype=element_blank()) +
#' theme(legend.key.width=unit(4,"line")) +
#' theme(legend.position="bottom")+
#' theme(text = element_text(size = 26))+
#' xlab(bquote("Risk premium ("~lambda[strong]~")")) +
#' ylab("Density" )
#'
#'
#'
BayesianFM <- function(f, R, sim_length) {
# f: a matrix of factors with dimension t times k, where k is the number of factors
# and t is the number of periods;
# R: a matrix of test assets with dimension t times N, where N is the number of test assets;
# sim_length: the length of monte carlo simulation;
k <- dim(f)[2] # the number of factors
t <- dim(f)[1] # the number of time periods
N <- dim(R)[2] # the number of test assets
Y <- cbind(f, R)
# Check the prequisite condition
check_input2(f,R);
ET_f <- as.matrix(colMeans(f), ncol = 1) # mean of the factors;
ET_R <- as.matrix(colMeans(R), ncol = 1) # the sample mean of test assets;
lambda_ols_path <- matrix(0, ncol = (1+k), nrow = sim_length) # store the estimates of lambda_ols;
lambda_gls_path <- matrix(0, ncol = (1+k), nrow = sim_length) # store the estimates of lambda_gls;
R2_ols_path <- rep(0, sim_length)
R2_gls_path <- rep(0, sim_length)
ER_path <- matrix(0, nrow = sim_length, ncol = N)
Sigma_ols <- cov(Y)
ones.N <- matrix(1,nrow = N, ncol = 1)
ones.t <- matrix(1,nrow = t, ncol = 1)
ones.k <- matrix(1,nrow = k, ncol = 1)
mu_ols <- matrix(colMeans(Y), ncol = 1)
for (i in 1:sim_length) {
#print(i)
## (1) First-Stage: time-series regression
Sigma <- riwish(v=t-1, S=t*Sigma_ols)
Sigma_R <- Sigma[(k+1):(N+k), (k+1):(N+k)]
Sigma_f <- Sigma[1:k, 1:k]
Sigma_Rf <- Sigma[(k+1):(N+k), 1:k]
# expected returns - beta representation
C <- Sigma[(k+1):(N+k), 1:k] %*% solve(Sigma_f)
Var_mu_half <- chol(Sigma/t)
mu <- mu_ols + t(Var_mu_half) %*% matrix(rnorm(N+k), ncol = 1)
a <- mu[(k+1):(N+k),1,drop=FALSE]
mu_f <- mu[1:k,1,drop=FALSE]
Sigma.eps <- Sigma_R - Sigma_Rf %*% solve(Sigma_f) %*% t(Sigma_Rf)
## (2) Second-Stage: cross-sectional regression
H <- cbind(ones.N, C)
HH.inv <- chol2inv(chol(t(H)%*%H))
Sigma.inv <- solve(Sigma.eps)
Lambda_ols <- HH.inv %*% t(H) %*% a
Lambda_gls <- chol2inv(chol(t(H)%*%Sigma.inv%*%H)) %*% t(H)%*%Sigma.inv%*%a
sigma2_ols <- as.vector((1/N) * t(a - H %*% Lambda_ols) %*% (a - H %*% Lambda_ols))
sigma2 <- rinvgamma(n = 1, shape = 0.5*(N-k-1), 0.5*N*sigma2_ols)
## R-Squared and pricing errors alpha
alpha <- a - H %*% Lambda_ols
R2_ols <- 1 - ((t(a - H %*% Lambda_ols) %*% (a - H %*% Lambda_ols)) / as.vector((N-1)*var(a)))
R2_gls <- (1 - t(a - H %*% Lambda_gls)%*%Sigma.inv%*%(a - H %*% Lambda_gls) /
(t(a - mean(a))%*%Sigma.inv%*%(a - mean(a))))
## Record the estimates
lambda_ols_path[i, ] <- Lambda_ols
lambda_gls_path[i, ] <- Lambda_gls
R2_ols_path[i] <- 1 - (1-R2_ols) * (N-1) / (N-1-k)
R2_gls_path[i] <- 1 - (1-R2_gls) * (N-1) / (N-1-k)
}
return(list(lambda_ols_path=lambda_ols_path,
lambda_gls_path=lambda_gls_path,
R2_ols_path=R2_ols_path,
R2_gls_path=R2_gls_path))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianFactorZoo/R/BayesianFamaMacBeth.R
|
#' Bayesian estimation of Linear SDF (B-SDF)
#'
#' @description This function provides the Bayesian estimates of factors' risk prices.
#' The estimates with the flat prior are given by Definitions 1 and 2 in
#' \insertCite{bryzgalova2023bayesian;textual}{BayesianFactorZoo}.
#' The estimates with the normal prior are used in Table I (see the footnote of Table I).
#'
#' @param f A \eqn{t \times k} matrix of factors, where \eqn{k} is the number of factors
#' and \eqn{t} is the number of periods
#' @param R A \eqn{t \times N} matrix of test assets, where \eqn{t} is the number of periods
#' and \eqn{N} is the number of test assets
#' @param sim_length The length of MCMCs
#' @param intercept If \code{intercept = TRUE} (\code{intercept = FALSE}), the model includes (does not include) the intercept.
#' The default is \code{intercept = TRUE}
#' @param type If \code{type = 'OLS'} (\code{type = 'GLS'}), the function returns Bayesian OLS (GLS) estimates of risk prices \eqn{\lambda}. The default is 'OLS'
#' @param prior If \code{type = 'Flat'} (\code{type = 'Normal'}), the function executes the Bayesian estimation with the flat prior (normal prior).
#' The default is 'Flat'
#' @param psi0 The hyper-parameter of the prior distribution of risk prices \eqn{\lambda} used in the normal prior (see \bold{Details}).
#' This parameter is needed only when the user chooses the normal prior. The default value is 5
#' @param d The hyper-parameter of the prior distribution of risk prices \eqn{\lambda} used in the normal prior (see \bold{Details}). The default value is 0.5
#'
#' @details
#'
#' \strong{Intercept}
#'
#' Consider the cross-sectional step. If one includes the intercept, the model is
#' \deqn{\mu_R = \lambda_c 1_N + C_f \lambda_f = C \lambda,}
#' where \eqn{C = (1_N, C_f)} and \eqn{\lambda^\top = (\lambda_c^\top, \lambda_f^\top)^\top }.
#' If one doesn't include the intercept, the model is
#' \deqn{\mu_R = C_f \lambda_f = C \lambda,}
#' where \eqn{C = C_f} and \eqn{\lambda = \lambda_f}.
#'
#' \strong{Bayesian Estimation}
#'
#' Let \eqn{Y_t = f_t \cup R_t}. Conditional on the data \eqn{Y = \{Y_t\}_{t=1}^T}, we can draw \eqn{\mu_{Y}} and \eqn{\Sigma_{Y}} from the Normal-inverse-Wishart system
#' \deqn{\mu_Y | \Sigma_Y, Y \sim N (\hat{\mu}_Y , \Sigma_Y / T) , }
#' \deqn{\Sigma_Y | Y \sim W^{-1} (T-1, \Sigma_{t=1}^{T} (Y_t - \hat{\mu}_Y ) ( Y_t - \hat{\mu}_Y )^\top ) , }
#' where \eqn{W^{-1}} is the inverse-Wishart distribution.
#' We do not standardize \eqn{Y_t} in the time-series regression.
#' In the empirical implementation, after obtaining posterior draws for \eqn{\mu_{Y}} and \eqn{\Sigma_{Y}},
#' we calculate \eqn{\mu_R} and \eqn{C_f} as the standardized expected returns of test assets and correlation
#' between test assets and factors. It follows that \eqn{C} is a matrix containing a vector of ones and \eqn{C_f}.
#'
#' The prior distribution of risk prices is either the flat prior or the normal prior.
#'
#'
#' With \code{prior = 'Flat'} and \code{type = 'OLS'}, for each draw, the risk price estimate is
#' \deqn{\hat{\lambda} = (C^{\top} C)^{-1}C^{T} \mu_{R} .}
#'
#' With \code{prior = 'Flat'} and \code{type = 'GLS'}, for each draw, the risk price estimate is
#' \deqn{\hat{\lambda} = (C^{\top} \Sigma^{-1}_{R} C)^{-1} C^{\top} \Sigma^{-1}_{R} \mu_{R} }
#'
#' If one chooses \code{prior = 'Normal'}, the prior of factor \eqn{j}'s risk price is
#' \deqn{ \lambda_j | \sigma^2 \sim N(0, \sigma^2 \psi \tilde{\rho}_j^\top \tilde{\rho}_j T^d ) ,}
#' where \eqn{ \tilde{\rho}_j = \rho_j - (\frac{1}{N} \Sigma_{i=1}^{N} \rho_{j,i} ) \times 1_N } is the cross-sectionally
#' demeaned vector of factor \eqn{j}'s correlations with asset returns. Equivalently,
#' \deqn{ \lambda | \sigma^2 \sim N(0, \sigma^2 D^{-1}) ,}
#' \deqn{D = diag \{ (\psi \tilde{\rho}_1^\top \tilde{\rho}_1 T^d)^{-1}, ..., (\psi \tilde{\rho}_k^\top \tilde{\rho}_k T^d)^{-1} \} \ \ without \ intercept;}
#' \deqn{D = diag \{ c, (\psi \tilde{\rho}_1^\top \tilde{\rho}_1 T^d)^{-1}, ..., (\psi \tilde{\rho}_k^\top \tilde{\rho}_k T^d)^{-1} \} \ \ with \ intercept;}
#' where \eqn{c} is a small positive number corresponding to the common cross-sectional intercept (\eqn{\lambda_c}).
#' Default values for \eqn{\psi} (\code{psi0}) and \eqn{d} (\code{d}) are 5 and 0.5, respectively.
#'
#' With \code{prior = 'Normal'} and \code{type = 'OLS'}, for each draw, the risk price estimate is
#' \deqn{ \hat{\lambda} = ( C^{\top} C +D )^{-1} C^{\top} \mu_R .}
#'
#' With \code{prior = 'Normal'} and \code{type = 'GLS'}, for each draw, the risk price estimate is
#' \deqn{ \hat{\lambda} = ( C^{\top} \Sigma_R^{-1} C +D )^{-1} C^{\top} \Sigma_R^{-1} \mu_R .}
#'
#'
#' @references
#' \insertRef{bryzgalova2023bayesian}{BayesianFactorZoo}
#'
#'
#' @return
#' The return of \code{BayesianSDF} is a list that contains the following elements:
#' \itemize{
#' \item \code{lambda_path}: A \code{sim_length}\eqn{\times (k+1)} matrix if the intercept is included.
#' NOTE: the first column \eqn{\lambda_c} corresponds to the intercept. The next \eqn{k} columns (i.e., the 2th -- \eqn{(k+1)}-th columns)
#' are the risk prices of \eqn{k} factors. If the intercept is excluded, the dimension of \code{lambda_path} is \code{sim_length}\eqn{\times k}.
#' \item \code{R2_path}: A \code{sim_length}\eqn{\times 1} matrix, which contains the posterior draws of the OLS or GLS \eqn{R^2}.
#' }
#'
#'
#'
#' @export
#'
#' @examples
#' ## <-------------------------------------------------------------------------------->
#' ## Example: Bayesian estimates of risk prices and R2
#' ## This example is from the paper (see Section III. Simulation)
#' ## <-------------------------------------------------------------------------------->
#'
#' library(reshape2)
#' library(ggplot2)
#'
#' # Load the example data
#' data("BFactor_zoo_example")
#' HML <- BFactor_zoo_example$HML
#' lambda_ols <- BFactor_zoo_example$lambda_ols
#' R2.ols.true <- BFactor_zoo_example$R2.ols.true
#' sim_f <- BFactor_zoo_example$sim_f
#' sim_R <- BFactor_zoo_example$sim_R
#' uf <- BFactor_zoo_example$uf
#' W_ols <- BFactor_zoo_example$W_ols
#'
#' cat("Load the simulated example \n")
#'
#' cat("Cross-section: Fama-French 25 size and value portfolios \n")
#' cat("True pricing factor in simulations: HML \n")
#' cat("Pseudo-true cross-sectional R-squared:", R2.ols.true, "\n")
#' cat("Pseudo-true (monthly) risk price:", lambda_ols[2], "\n")
#'
#' cat("----------------------------- Bayesian SDF ----------------------------\n")
#' cat("------------------------ See definitions 1 and 2 ----------------------\n")
#'
#' cat("--------------------- Bayesian SDF: Strong factor ---------------------\n")
#'
#' sim_result <- SDF_gmm(sim_R, sim_f, W_ols) # GMM estimation
#' # sim_result$lambda_gmm
#' # sqrt(sim_result$Avar_hat[2,2])
#' # sim_result$R2_adj
#'
#' ## Now estimate the model using Bayesian method
#' two_step <- BayesianSDF(sim_f, sim_R, sim_length = 2000, psi0 = 5, d = 0.5)
#' # apply(X = two_step$lambda_path, FUN = quantile, MARGIN = 2, probs = c(0.05, 0.95))
#' # quantile(two_step$R2_path, probs = c(0.05, 0.5, 0.95))
#'
#' # Note that the first element correspond to lambda of the constant term
#' # So we choose k=2 to get lambda of the strong factor
#' k <- 2
#' m1 <- sim_result$lambda_gmm[k]
#' sd1 <- sqrt(sim_result$Avar_hat[k,k])
#'
#' bfm<-two_step$lambda_path[1001:2000, k]
#' fm<-rnorm(5000,mean = m1, sd=sd1)
#' data<-data.frame(cbind(fm, bfm))
#' colnames(data)<-c("GMM-OLS", "BSDF-OLS")
#' data.long<-melt(data)
#'
#' #
#' ### Figure 1(c)
#' #
#' p <- ggplot(aes(x=value, colour=variable, linetype=variable), data=data.long)
#' p+
#' stat_density(aes(x=value, colour=variable),
#' geom="line",position="identity", size = 2, adjust=1) +
#' geom_vline(xintercept = lambda_ols[2], linetype="dotted", color = "#8c8c8c", size=1.5)+
#' guides(colour = guide_legend(override.aes=list(size=2), title.position = "top",
#' title.hjust = 0.5, nrow=1,byrow=TRUE))+
#' theme_bw()+
#' labs(color=element_blank()) +
#' labs(linetype=element_blank()) +
#' theme(legend.key.width=unit(4,"line")) +
#' theme(legend.position="bottom")+
#' theme(text = element_text(size = 26))+
#' xlab(bquote("Risk price ("~lambda[strong]~")")) +
#' ylab("Density" )
#'
#'
#' cat("--------------------- Bayesian SDF: Useless factor --------------------\n")
#'
#' sim_result <- SDF_gmm(sim_R, uf, W_ols)
#' # sim_result$lambda_gmm
#' # sqrt(sim_result$Avar_hat[2,2])
#' # sim_result$R2_adj
#'
#' two_step <- BayesianSDF(uf, sim_R, sim_length = 2000, psi0 = 5, d = 0.5)
#' #apply(X = two_step$lambda_path, FUN = quantile, MARGIN = 2, probs = c(0.05, 0.95))
#'
#'
#' ## Posterior (Asymptotic) Distribution of lambda
#' k <- 2
#' m1 <- sim_result$lambda[k]
#' sd1 <- sqrt(sim_result$Avar_hat[k,k])
#'
#' bfm<-two_step$lambda_path[1001:2000, k]
#' fm<-rnorm(5000,mean = m1, sd=sd1)
#' data<-data.frame(cbind(fm, bfm))
#' colnames(data)<-c("GMM-OLS", "BSDF-OLS")
#' data.long<-melt(data)
#'
#' #
#' ### Figure 1(a)
#' #
#' p <- ggplot(aes(x=value, colour=variable, linetype=variable), data=data.long)
#' p+
#' stat_density(aes(x=value, colour=variable),
#' geom="line",position="identity", size = 2, adjust=2) +
#' geom_vline(xintercept = 0, linetype="dotted", color = "#8c8c8c", size=1.5)+
#' guides(colour = guide_legend(override.aes=list(size=2),
#' title.position = "top", title.hjust = 0.5, nrow=1,byrow=TRUE))+
#' theme_bw()+
#' labs(color=element_blank()) +
#' labs(linetype=element_blank()) +
#' theme(legend.key.width=unit(4,"line")) +
#' theme(legend.position="bottom")+
#' theme(text = element_text(size = 26))+
#' xlab(bquote("Risk price ("~lambda[spurious]~")")) +
#' ylab("Density" )
#'
#'
#'
BayesianSDF <- function(f, R, sim_length = 10000, intercept = TRUE, type = 'OLS',
prior = 'Flat', psi0 = 5, d = 0.5) {
f <- as.matrix(f) # factors: txk dimension
R <- as.matrix(R) # test assets: txN dimension
k <- dim(f)[2] # the number of factors
t <- dim(f)[1] # the number of time periods in factors
N <- dim(R)[2] # the number of test assets
# Check whether the prerequisite conditions satisfy
check_input(f, R , intercept, type, prior);
p <- N+k
Y <- cbind(f, R)
Sigma_ols <- cov(Y)
ones.N <- matrix(1,nrow = N, ncol = 1)
mu_ols <- matrix(colMeans(Y), ncol = 1)
## Store the ordinary estimates of lambda_ols or lambda_gls:
if (intercept == TRUE) {
# the first element is constant
lambda_path <- matrix(0, ncol = (1+k), nrow = sim_length)
} else {
lambda_path <- matrix(0, ncol = k, nrow = sim_length)
}
## Store the estimates of cross-sectional R-squared: R2_ols or R_gls
R2_path <- rep(0, sim_length)
## Determine the degree of shrinkage based on correlation between asset returns and factors:
rho <- cor(Y)[(k+1):(N+k), 1:k, drop = FALSE]
rho.demean <- rho - matrix(1, ncol = 1, nrow = N) %*% matrix(colMeans(rho), nrow = 1)
if (prior=='Normal') {
psi <- psi0 * diag(t(rho.demean)%*%rho.demean)
if (intercept==TRUE) {
D <- diag(c(1/100000, 1/psi)) * (1/t)^d
} else {
if (k == 1) {
D <- matrix((1/psi)*(1/t)^d, ncol=1, nrow=1)
} else {
D <- diag(1/psi) * (1/t)^d
}
}
}
for (i in 1:sim_length) {
## (1) First-Stage: time-series regression
Sigma <- riwish(v=t-1, S=t*Sigma_ols)
Sigma_R <- Sigma[(k+1):(N+k), (k+1):(N+k)]
Var_mu_half <- chol(Sigma/t)
mu <- mu_ols + t(Var_mu_half) %*% matrix(rnorm(N+k), ncol = 1)
sd_Y <- matrix(sqrt(diag(Sigma)), ncol=1) # standard deviation of Y(t)
corr_Y <- Sigma / (sd_Y%*%t(sd_Y))
C_f <- corr_Y[(k+1):p, 1:k, drop=FALSE] # corr[R(t), f(t)]
a <- mu[(1+k):p,1,drop=FALSE] / sd_Y[(1+k):p] # Sharpe ratio of test assets;
ER <- mu[(1+k):p,1,drop=FALSE]
sd_R <- sd_Y[(1+k):p]
## (2) Second-Stage: cross-sectional regression
if (intercept == TRUE) { # if the intercept is included
H <- cbind(ones.N, C_f)
} else {
H <- C_f
}
Sigma.inv <- chol2inv(chol(corr_Y[(k+1):(N+k), (k+1):(N+k)]))
## The Lambda estimates and R2
if (prior=='Flat') {
if (type=='OLS') { # Case I.1: Flat prior and OLS
Lambda <- chol2inv(chol(t(H)%*%H)) %*% t(H) %*% a
R2 <- 1 - ((t(a - H %*% Lambda) %*% (a - H %*% Lambda)) / as.vector((N-1)*var(a)))
} else if (type=='GLS') { # Case I.2: Flat prior and GLS
Lambda <- chol2inv(chol(t(H)%*%Sigma.inv%*%H)) %*% t(H)%*%Sigma.inv%*%a
R2 <- (1 - t(a - H %*% Lambda)%*%Sigma.inv%*%(a - H %*% Lambda) /
(t(a - mean(a))%*%Sigma.inv%*%(a - mean(a))))
}
} else if (prior=='Normal') {
if (type=='OLS') { # Case II.1 Normal prior and OLS
Lambda <- chol2inv(chol(t(H)%*%H + D)) %*% t(H) %*% a
R2 <- 1 - ((t(a - H%*%Lambda) %*% (a - H%*%Lambda)) / as.vector((N-1)*var(a)))
} else if (type=='GLS') { # Case II.2 Normal prior and GLS
Lambda <- chol2inv(chol(t(H)%*%Sigma.inv%*%H + D)) %*% t(H)%*%Sigma.inv%*%a
R2 <- (1 - t(a - H%*%Lambda)%*%Sigma.inv%*%(a-H %*%Lambda) /
(t(a - mean(a))%*%Sigma.inv%*%(a - mean(a))))
}
}
## Record the estimates and R2
lambda_path[i, ] <- Lambda
R2_path[i] <- 1 - (1-R2) * (N-1) / (N-1-k)
}
return(list(lambda_path=lambda_path, R2_path=R2_path))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianFactorZoo/R/BayesianSDF.R
|
######################################################
########## Fama-MacBeth Two-Pass Regression ##########
######################################################
#' Fama MacBeth Two-Pass Regression
#'
#' @description This function provides the frequentist Fama-MacBeth Two-Pass Regression.
#'
#' @param f A matrix of factors with dimension \eqn{t \times k}, where \eqn{k} is the number of factors
#' and \eqn{t} is the number of periods;
#' @param R A matrix of test assets with dimension \eqn{t \times N}, where \eqn{t} is the number of periods
#' and \eqn{N} is the number of test assets;
#'
#' @details
#'
#' See Chapter 12.2 in \insertCite{cochrane2009asset;textual}{BayesianFactorZoo}. \code{t_stat} and \code{t_stat_gls}
#' are t-statistics of OLS and GLS risk premia estimates based on the asymptotic standard errors in equation (12.19) in
#' \insertCite{cochrane2009asset;textual}{BayesianFactorZoo}.
#'
#' @references
#' \insertRef{cochrane2009asset}{BayesianFactorZoo}
#'
#'
#' @return
#' The return of \code{Two_Pass_Regression} is a list of the following elements:
#' \itemize{
#' \item lambda: Risk premia estimates in the OLS two-pass regression;
#' \item lambda_gls: Risk premia estimates in the GLS two-pass regression;
#' \item t_stat: The t-statistics of risk premia estimates in the OLS two-pass regression;
#' \item t_stat_gls: The t-statistics of risk premia estimates in the GLS two-pass regression;
#' \item R2_adj: Adjusted \eqn{R2} in the OLS two-pass regression;
#' \item R2_adj_GLS: Adjusted \eqn{R2} in the GLS two-pass regression.
#' }
#'
#' @export
#'
Two_Pass_Regression <- function(f, R) {
# f: a matrix of factors with dimension t times k, where k is the number of factors and t is the number of periods;
# R: a matrix of test assets with dimension t times N, where N is the number of test assets;
t <- dim(R)[1]
k <- dim(f)[2]
N <- dim(R)[2]
ET_f <- as.matrix(colMeans(f), ncol = 1, nrow = k)
f <- f - matrix(1, nrow = t, ncol = 1) %*% t(ET_f) # demean the factors
### Step 1. Time-Series Regression
X <- cbind(matrix(1, nrow=t, ncol=1), f)
B <- t(R) %*% X %*% solve(t(X) %*% X)
beta <- B[ ,2:(k+1), drop=FALSE]
epsilon <- t(R) - B %*% t(X) # store the error terms epsilon
cov_epsilon <- epsilon %*% t(epsilon) / t # estimate the variance-covariance matrix of epsilon under i.i.d assumption
XX.inv <- solve((t(X)%*%X))
cov_beta <- kronecker(cov_epsilon, XX.inv)
### Step 2. Cross-Sectional Regression (OLS)
C <- cbind(matrix(1, nrow=N, ncol=1), beta)
mu <- t(R) %*% matrix(1, ncol = 1, nrow = t) / t
lambda <- solve(t(C)%*%C) %*% t(C) %*% mu
alpha <- mu - C %*% lambda # calculate the pricing errors
# From Cochrane (2010): we don't have the term Sigma_f in equation (12.19) since
# all factors are demeaned.
Omega_F <- matrix(0, nrow = (1+k), ncol = (1+k))
Omega_F_inv <- matrix(0, nrow = (1+k), ncol = (1+k))
Omega_F[2:(1+k), 2:(1+k)] <- cov(f)
Omega_F_inv[2:(1+k), 2:(1+k)] <- solve(cov(f))
cov_lambda <- (1/t) * (((solve(t(C)%*%C) %*% t(C) %*% cov_epsilon %*% C %*% solve(t(C)%*%C))
* as.vector(1 + t(lambda)%*%Omega_F_inv%*%lambda)) + Omega_F)
tstat <- as.vector(lambda) / sqrt(diag(cov_lambda))
y <- diag(N) - C %*% solve(t(C)%*%C) %*% t(C)
cov_alpha <- (1/t) * y %*% cov_epsilon %*% y * as.vector(1 + t(lambda)%*%Omega_F_inv%*%lambda)
t_alpha <- as.vector(alpha) / sqrt(diag(cov_alpha))
### Step 3. Cross-Sectional Regression (GLS)
lambda_gls <- solve(t(C)%*% solve(cov_epsilon) %*% C) %*% t(C) %*% solve(cov_epsilon) %*% mu
cov_lambda_gls <- (1/t) * ((solve(t(C)%*% solve(cov_epsilon) %*% C) *
as.vector(1 + t(lambda)%*%Omega_F_inv%*%lambda)) + Omega_F)
tstat_gls <- as.vector(lambda_gls) / sqrt(diag(cov_lambda_gls))
### Reported Statistics
## (1) Adjusted R-Squared
#R2 <- 1 - t(alpha) %*% alpha / ((N-1)*var(mu))
R2 <- 1 - t(alpha) %*% alpha / (t(mu - mean(mu))%*%(mu - mean(mu)))
R2_adj <- 1 - (1-R2) * (N-1) / (N-1-k)
## (2) GLS Adjusted R-Squared
alpha_GLS <- mu - C %*% lambda_gls # calculate the pricing errors
#R2_GLS <- (1 - t(alpha_GLS-mean(alpha_GLS))%*%solve(cov_epsilon)%*%(alpha_GLS-mean(alpha_GLS)) /
# (t(mu - mean(mu))%*%solve(cov_epsilon)%*%(mu - mean(mu))))
R2_GLS <- (1 - t(alpha_GLS)%*%solve(cov_epsilon)%*%alpha_GLS /
(t(mu - mean(mu))%*%solve(cov_epsilon)%*%(mu - mean(mu))))
# R2_GLS <- 1 - t(alpha_GLS)%*%solve(V)%*%alpha_GLS / (t(mu)%*%solve(V)%*%mu)
R2_adj_GLS <- 1 - (1-R2_GLS) * (N-1) / (N-1-k)
R2_GLS2 <- (1 - t(alpha)%*%solve(cov_epsilon)%*%alpha /
(t(mu - mean(mu))%*%solve(cov_epsilon)%*%(mu - mean(mu))))
R2_adj_GLS2 <- 1 - (1-R2_GLS2) * (N-1) / (N-1-k)
## (3) Cross-Section T-Squared Statistics of Shanken (1985):
T_square <- t(alpha) %*% ginv(cov_alpha) %*% alpha
## (4) Quadratic q:
q <- t(alpha) %*% ginv(y %*% cov_epsilon %*% y) %*% alpha
reported_statistics <- list(lambda = lambda, t_stat = tstat, R2 = R2, R2_adj = R2_adj, R2_GLS = R2_GLS,
R2_adj_GLS = R2_adj_GLS, lambda_gls = lambda_gls, t_stat_gls = tstat_gls,
T_square = T_square, q = q, alpha = alpha, t_alpha = t_alpha,
beta=beta, cov_epsilon = cov_epsilon, cov_lambda = cov_lambda,
cov_lambda_gls = cov_lambda_gls, R2_GLS2 = R2_GLS2,
R2_adj_GLS2 = R2_adj_GLS2, cov_beta = cov_beta)
return(reported_statistics)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianFactorZoo/R/FamaMacBeth.R
|
#' GMM Estimates of Factors' Risk Prices under the Linear SDF Framework
#'
#' @description This function provides the GMM estimates of factors' risk prices under the linear SDF framework (including the common intercept).
#'
#' @param R A matrix of test assets with dimension \eqn{t \times N}, where \eqn{t} is the number of periods
#' and \eqn{N} is the number of test assets;
#' @param f A matrix of factors with dimension \eqn{t \times k}, where \eqn{k} is the number of factors
#' and \eqn{t} is the number of periods;
#' @param W Weighting matrix in GMM estimation (see \bold{Details}).
#'
#' @details
#'
#' We follow the notations in Section I of \insertCite{bryzgalova2023bayesian;textual}{BayesianFactorZoo}.
#' Suppose that there are \eqn{K} factors, \eqn{f_t = (f_{1t},...,f_{Kt})^\top, t=1,...,T}.
#' The returns of \eqn{N} test assets are denoted by \eqn{R_t = (R_{1t},...,R_{Nt})^\top}.
#'
#' Consider linear SDFs (\eqn{M}), that is, models of the form \eqn{M_t = 1- (f_t -E[f_t])^\top \lambda_f}.
#'
#' The model is estimated via GMM with moment conditions
#'
#' \deqn{E[g_t (\lambda_c, \lambda_f, \mu_f)] =E\left(\begin{array}{c} R_t - \lambda_c 1_N - R_t (f_t - \mu_f)^\top \lambda_f \\ f_t - \mu_f \end{array} \right) =\left(\begin{array}{c} 0_N \\ 0_K \end{array} \right)}
#' and the corresponding sample analog function \eqn{ g_T (\lambda_c, \lambda_f, \mu_f) = \frac{1}{T} \Sigma_{t=1}^T g_t (\lambda_c, \lambda_f, \mu_f)}. Different weighting matrices deliver different point estimates. Two popular choices are
#' \deqn{ W_{ols}=\left(\begin{array}{cc}I_N & 0_{N \times K} \\ 0_{K \times N} & \kappa I_K\end{array}\right), \ \ W_{gls}=\left(\begin{array}{cc} \Sigma_R^{-1} & 0_{N \times K} \\ 0_{K \times N} & \kappa I_K\end{array}\right), }
#' where \eqn{\Sigma_R} is the covariance matrix of returns and \eqn{\kappa >0} is a large constant so that \eqn{\hat{\mu}_f = \frac{1}{T} \Sigma_{t=1}^{T} f_t }.
#'
#' The asymptotic covariance matrix of risk premia estimates, \code{Avar_hat}, is based on the assumption that
#' \eqn{g_t (\lambda_c, \lambda_f, \mu_f)} is independent over time.
#'
#' @references
#' \insertRef{bryzgalova2023bayesian}{BayesianFactorZoo}
#'
#'
#' @return
#' The return of \code{SDF_gmm} is a list of the following elements:
#' \itemize{
#' \item \code{lambda_gmm}: Risk price estimates;
#' \item \code{mu_f}: Sample means of factors;
#' \item \code{Avar_hat}: Asymptotic covariance matrix of GMM estimates (see \bold{Details});
#' \item \code{R2_adj}: Adjusted cross-sectional \eqn{R^2};
#' \item \code{S_hat}: Spectral matrix.
#' }
#'
#' @export
#'
SDF_gmm <- function(R, f, W) {
# f: matrix of factors with dimension t times k, where k is the number of factors and t is
# the number of periods;
# R: matrix of test assets with dimension t times N, where N is the number of test assets;
# W: weighting matrix in GMM estimation.
T1 <- dim(R)[1] # the sample size
N <- dim(R)[2] # the number of test assets
K <- dim(f)[2] # the number of factors
C_f <- cov(R, f) # covariance between test assets and factors
one_N <- matrix(1, ncol=1, nrow=N)
one_K <- matrix(1, ncol=1, nrow=K)
one_T <- matrix(1, ncol=1, nrow=T1)
C <- cbind(one_N, C_f) # include a common intercept into regression
mu_R <- matrix(colMeans(R), ncol=1) # sample mean of test assets
mu_f <- matrix(colMeans(f), ncol=1) # sample mean of factors
## GMM estimates
W1 <- W[1:N, 1:N]
lambda_gmm <- solve(t(C)%*%W1%*%C) %*% t(C)%*%W1%*%mu_R
lambda_c <- lambda_gmm[1]
lambda_f <- lambda_gmm[2:(1+K),,drop=FALSE] # price of risk, lambda_f
## Estimate the spectral matrix
f_demean <- f - one_T %*% t(mu_f)
moments <- matrix(0, ncol=N+K, nrow=T1)
moments[1:T1, (1+N):(K+N)] <- f_demean
# moments[1:T1, 1:N] <- (R - lambda_c*matrix(1,ncol=N,nrow=T1)
# - diag(as.vector(f_demean%*%lambda_f)) %*% R)
for (t in 1:T1) {
R_t <- matrix(R[t,], ncol=1)
f_t <- matrix(f[t,], ncol=1)
moments[t, 1:N] <- t(R_t - lambda_c*one_N - R_t%*%t(f_t-mu_f)%*%lambda_f)
}
S_hat <- cov(moments)
## Estimate the asymptotic variance of GMM estimates
G_hat <- matrix(0, ncol=2*K+1, nrow=N+K)
G_hat[1:N, 1] <- -1
G_hat[1:N, 2:(1+K)] <- -C_f
G_hat[1:N, (K+2):(1+2*K)] <- mu_R %*% t(lambda_f)
G_hat[(N+1):(N+K), (K+2):(1+2*K)] <- -diag(K)
Avar_hat <- (1/T1)*(solve(t(G_hat)%*%W%*%G_hat) %*% t(G_hat)%*%W%*%S_hat%*%W%*%G_hat
%*% solve(t(G_hat)%*%W%*%G_hat))
## Cross-sectional R-Squared
R2 <- (1 - t(mu_R - C%*%lambda_gmm) %*% W1 %*% (mu_R - C%*%lambda_gmm) /
(t(mu_R - mean(mu_R))%*%W1%*%(mu_R - mean(mu_R))))
R2_adj <- 1 - (1-R2) * (N-1) / (N-1-K)
return(list(lambda_gmm = lambda_gmm,
mu_f = mu_f,
Avar_hat = Avar_hat,
R2_adj = R2_adj,
S_hat = S_hat))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianFactorZoo/R/SDF_GMM.R
|
# Check whether the inputs are proper
check_input <- function(f, R , intercept, type, prior){
k <- dim(f)[2] # the number of factors
t_f <- dim(f)[1] # the number of time periods in factor
N <- dim(R)[2] # the number of test assets
t_asset <- dim(R)[1] # the number of time periods in test assets
# Check whether the prerequisite conditions satisfy
# The time periods of factors and assets should be equal
if (t_f!=t_asset) {
stop("Error: the time periods of factor should be equal to the time periods of assets.")
}
# When the intercept is included, it should be the case that N>k
if ((intercept==TRUE)&(k>=N)) {
stop("Error: the number of test assets should be larger (>) than the number of factors becasue of the cross-sectional regression requirement.")
}
# When the intercept is not included, it should be the case that N>=k
if ((intercept==FALSE)&(k>N)) {
stop("Error: the number of test assets should be larger (>=) than the number of factors because of the cross-sectional regression requirement.")
}
# The type should be 'OLS' or 'GLS'
if ((type!='OLS')&(type!='GLS')) {
stop("Error: the type should be 'OLS' or 'GLS'.")
}
# The prior should be 'flat' or 'normal'
if ((prior!='Flat')&(prior!='Spike-and-Slab')&(prior!='Normal')) {
stop("Error: the prior should be 'Flat' or 'Spike-and-Slab' or 'Normal'.")
}
}
check_input2 <- function(f, R){
k <- dim(f)[2] # the number of factors
t_f <- dim(f)[1] # the number of time periods in factor
N <- dim(R)[2] # the number of test assets
t_asset <- dim(R)[1] # the number of time periods in test assets
# Check whether the prerequisite conditions satisfy
# The time periods of factors and assets should be equal
if (t_f!=t_asset) {
stop("Error: the time periods of factor should be equal to the time periods of assets.")
}
# The number of test asset should be larger than factors
if (k>=N) {
stop("Error: the number of test assets should be larger (>) than the number of factors becasue of the cross-sectional regression requirement.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianFactorZoo/R/check_input.R
|
#' SDF model selection with continuous spike-and-slab prior
#'
#' @description This function provides the SDF model selection procedure using the continuous spike-and-slab prior.
#' See Propositions 3 and 4 in \insertCite{bryzgalova2023bayesian;textual}{BayesianFactorZoo}.
#'
#' @param f A matrix of factors with dimension \eqn{t \times k}, where \eqn{k} is the number of factors
#' and \eqn{t} is the number of periods;
#' @param R A matrix of test assets with dimension \eqn{t \times N}, where \eqn{t} is the number of periods
#' and \eqn{N} is the number of test assets;
#' @param sim_length The length of monte-carlo simulations;
#' @param psi0 The hyper-parameter in the prior distribution of risk prices (see \bold{Details});
#' @param r The hyper-parameter related to the prior of risk prices (see \bold{Details});
#' @param aw The hyper-parameter related to the prior of \eqn{\gamma} (see \bold{Details});
#' @param bw The hyper-parameter related to the prior of \eqn{\gamma} (see \bold{Details});
#' @param type If \code{type = 'OLS'} (\code{type = 'GLS'}), the function returns Bayesian OLS (GLS) estimates of risk prices. The default is 'OLS'.
#'
#' @details
#'
#' To model the variable selection procedure, we introduce a vector of binary latent variables \eqn{\gamma^\top = (\gamma_0,\gamma_1,...,\gamma_K)},
#' where \eqn{\gamma_j \in \{0,1\} }. When \eqn{\gamma_j = 1}, factor \eqn{j} (with associated loadings \eqn{C_j}) should be included
#' in the model and vice verse.
#'
#' The continuous spike-and-slab prior of risk prices \eqn{\lambda} is
#' \deqn{ \lambda_j | \gamma_j, \sigma^2 \sim N (0, r(\gamma_j) \psi_j \sigma^2 ) .}
#' When the factor \eqn{j} is included, we have \eqn{ r(\gamma_j = 1)=1 }.
#' When the factor is excluded from the model, \eqn{ r(\gamma_j = 0) =r \ll 1 }.
#' Hence, the Dirac "spike" is replaced by a Gaussian spike, which is extremely concentrated at zero
#' (the default value for \eqn{r} is 0.001).
#' We choose \eqn{ \psi_j = \psi \tilde{\rho}_j^\top \tilde{\rho}_j },
#' where \eqn{ \tilde{\rho}_j = \rho_j - (\frac{1}{N} \Sigma_{i=1}^{N} \rho_{j,i} ) \times 1_N }
#' is the cross-sectionally demeaned vector of factor \eqn{j}'s correlations with asset returns.
#' In the codes, \eqn{\psi} is equal to the value of \code{psi0}.
#'
#' The prior \eqn{\pi (\omega)} encoded the belief about the sparsity of the true model using the prior distribution
#' \eqn{\pi (\gamma_j = 1 | \omega_j) = \omega_j }. Following the literature on the variable selection, we set
#' \deqn{ \pi (\gamma_j = 1 | \omega_j) = \omega_j, \ \ \omega_j \sim Beta(a_\omega, b_\omega) . }
#' Different hyperparameters \eqn{a_\omega} and \eqn{b_\omega} determine whether one a priori favors more parsimonious models or not.
#' We choose \eqn{a_\omega = 1} (\code{aw}) and \eqn{b_\omega=1} (\code{bw}) as the default values.
#'
#' For each posterior draw of factors' risk prices \eqn{\lambda^{(j)}_f}, we can define the SDF as
#' \eqn{m^{(j)}_t = 1 - (f_t - \mu_f)^\top \lambda^{(j)}_f}.The Bayesian model averaging of the SDF (BMA-SDF)
#' over \eqn{J} draws is
#' \deqn{m^{bma}_t = \frac{1}{J} \sum^J_{j=1} m^{(j)}_t.}
#'
#' @references
#' \insertRef{bryzgalova2023bayesian}{BayesianFactorZoo}
#'
#'
#' @return
#' The return of \code{continuous_ss_sdf} is a list of the following elements:
#' \itemize{
#' \item \code{gamma_path}: A \code{sim_length}\eqn{\times k} matrix of the posterior draws of \eqn{\gamma}. Each row represents
#' a draw. If \eqn{\gamma_j = 1} in one draw, factor \eqn{j} is included in the model in this draw and vice verse.
#' \item \code{lambda_path}: A \code{sim_length}\eqn{\times (k+1)} matrix of the risk prices \eqn{\lambda}. Each row represents
#' a draw. Note that the first column is \eqn{\lambda_c} corresponding to the constant term. The next \eqn{k} columns (i.e., the 2-th -- \eqn{(k+1)}-th columns) are the risk prices of the \eqn{k} factors.
#' \item \code{sdf_path}: A \code{sim_length}\eqn{\times t} matrix of posterior draws of SDFs. Each row represents a draw.
#' \item \code{bma_sdf}: BMA-SDF.
#' }
#'
#' @importFrom MCMCpack rinvgamma
#'
#' @export
#'
#' @examples
#'
#' ## Load the example data
#' data("BFactor_zoo_example")
#' HML <- BFactor_zoo_example$HML
#' lambda_ols <- BFactor_zoo_example$lambda_ols
#' R2.ols.true <- BFactor_zoo_example$R2.ols.true
#' sim_f <- BFactor_zoo_example$sim_f
#' sim_R <- BFactor_zoo_example$sim_R
#' uf <- BFactor_zoo_example$uf
#'
#' ## sim_f: simulated strong factor
#' ## uf: simulated useless factor
#'
#' psi_hat <- psi_to_priorSR(sim_R, cbind(sim_f,uf), priorSR=0.1)
#' shrinkage <- continuous_ss_sdf(cbind(sim_f,uf), sim_R, 5000, psi0=psi_hat, r=0.001, aw=1, bw=1)
#' cat("Null hypothesis: lambda =", 0, "for each factor", "\n")
#' cat("Posterior probabilities of rejecting the above null hypotheses are:",
#' colMeans(shrinkage$gamma_path), "\n")
#'
#' ## We also have the posterior draws of SDF: m(t) = 1 - lambda_g %*% (f(t) - mu_f)
#' sdf_path <- shrinkage$sdf_path
#'
#' ## We also provide the Bayesian model averaging of the SDF (BMA-SDF)
#' bma_sdf <- shrinkage$bma_sdf
#'
continuous_ss_sdf <- function(f, R, sim_length, psi0 = 1, r = 0.001, aw = 1, bw = 1, type = "OLS") {
# f: matrix of factors with dimension t times k, where k is the number of factors and t is
# the number of periods.
# R: matrix of test assets with dimension t times N, where N is the number of test assets;
# sim_length: the length of MCMC;
# psi0, r, aw, bw: hyper-parameters;
k <- dim(f)[2] # the number of factors
t <- dim(f)[1] # the number of time periods
N <- dim(R)[2] # the number of test assets
p <- k + N # the number of variables in Y(t)
Y <- cbind(f, R) # factors + tradable portfolios
Sigma_ols <- cov(Y) # sample covariance matrix of Y(t)
Corr_ols <- cor(Y) # sample correlation matrix of Y(t)
sd_ols <- colSds(Y) # sample standard deviations of Y(t)
mu_ols <- matrix(colMeans(Y), ncol = 1) # sample mean of Y(t);
# Check the prequisite condition
check_input2(f,R);
## Matrices as outputs
lambda_path <- matrix(0, ncol = (1+k), nrow = sim_length)
gamma_path <- matrix(0, ncol = k, nrow = sim_length)
sdf_path <- matrix(0, ncol=t, nrow = sim_length)
# Initialize some parameters:
beta_ols <- cbind(matrix(1, nrow = N, ncol = 1), Corr_ols[(k+1):p, 1:k])
a_ols <- mu_ols[(1+k):p,,drop=FALSE] / sd_ols[(k+1):p]
Lambda_ols <- chol2inv(chol(t(beta_ols)%*%beta_ols)) %*% t(beta_ols) %*% a_ols
omega <- rep(0.5, k)
gamma <- rbinom(prob=omega, n = k, size = 1)
sigma2 <- as.vector((1/N) * t(a_ols - beta_ols %*% Lambda_ols) %*% (a_ols - beta_ols %*% Lambda_ols))
r_gamma <- ifelse(gamma==1, 1, r)
### Set the prior distribution for lambda_f
rho <- cor(Y)[(k+1):p, 1:k, drop = FALSE]
rho.demean <- rho - matrix(1, ncol = 1, nrow = N) %*% matrix(colMeans(rho), nrow = 1)
if (k == 1) {
psi <- psi0 * c(t(rho.demean)%*%rho.demean)
} else {
psi <- psi0 * diag(t(rho.demean)%*%rho.demean)
}
### Start the MCMC
for (i in 1:sim_length) {
#if (i %% 1000 == 0) {print(i)}
set.seed(i)
## (1) First-Stage: time-series regression
Sigma <- riwish(v=t-1, S=t*Sigma_ols) # draw the covariance matrix of Y(t)
Var_mu_half <- chol(Sigma/t)
mu <- mu_ols + t(Var_mu_half) %*% matrix(rnorm(p), ncol = 1) # draw the mean of Y(t)
sd_Y <- matrix(sqrt(diag(Sigma)), ncol=1) # standard deviation of Y(t)
corr_Y <- Sigma / (sd_Y%*%t(sd_Y))
C_f <- corr_Y[(k+1):p, 1:k] # corr[R(t), f(t)]
a <- mu[(1+k):p,1,drop=FALSE] / sd_Y[(1+k):p] # Sharpe ratio of test assets;
#### II. Second-Stage: cross-section regression (Gibbs Sampling)
beta <- cbind(matrix(1,nrow = N, ncol = 1), C_f)
corrR <- corr_Y[(k+1):p, (k+1):p]
## Step II.1. Draw lambda conditional on (data, sigma2, gamma, omega): equation (28)
D <- diag(c(1/100000, 1/(r_gamma*psi)))
if (type=='OLS') {
beta_D_inv <- chol2inv(chol(t(beta)%*%beta + D))
cov_Lambda <- sigma2 * beta_D_inv
Lambda_hat <- beta_D_inv %*% t(beta)%*%a
}
if (type=='GLS') {
beta_D_inv <- chol2inv(chol(t(beta)%*%solve(corrR)%*%beta + D))
cov_Lambda <- sigma2 * beta_D_inv
Lambda_hat <- beta_D_inv %*% t(beta)%*%solve(corrR)%*%a
}
Lambda <- Lambda_hat + t(chol(cov_Lambda)) %*% matrix(rnorm(k+1), ncol = 1)
## Step II.2. Draw gamma_j conditional on (data, Lambda, psi, sigma2, gamma_{-j}, omega):
## See equation (29)
log.odds <- log((omega/(1-omega))) + 0.5*log(r) + 0.5*Lambda[2:(k+1)]^2*(1/r-1)/(sigma2*psi)
odds <- exp(log.odds)
odds <- ifelse(odds > 1000, 1000, odds)
prob = odds / (1 + odds)
gamma <- rbinom(prob=prob, n = k, size = 1)
r_gamma <- ifelse(gamma==1, 1, r)
gamma_path[i, ] <- gamma
## Step II.3. Draw omega: equation (30)
omega <- rbeta(k, aw+gamma, bw+1-gamma)
## Step II.4. Draw sigma-squared: equation (31)
if (type=='OLS') {
sigma2 <- rinvgamma(1,shape=(N+k+1)/2,
scale=(t(a-beta%*%Lambda)%*%(a-beta%*%Lambda)+t(Lambda)%*%D%*%Lambda)/2)
}
if (type=='GLS') {
sigma2 <- rinvgamma(1,shape=(N+k+1)/2,
scale=(t(a-beta%*%Lambda)%*%solve(corrR)%*%(a-beta%*%Lambda)+t(Lambda)%*%D%*%Lambda)/2)
}
lambda_path[i, ] <- as.vector(Lambda)
Lambda_f <- Lambda[2:length(Lambda)]/colSds(f)
sdf_path[i,] <- as.vector(1 - f %*% Lambda_f)
sdf_path[i,] <- 1 + sdf_path[i,] - mean(sdf_path[i,]) # normalize the SDF st it has a mean of one
}
return(list(gamma_path = gamma_path,
lambda_path = lambda_path,
sdf_path = sdf_path,
bma_sdf = colMeans(sdf_path)))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianFactorZoo/R/continuous_ss_sdf.R
|
#' SDF model selection with continuous spike-and-slab prior (tradable factors are treated as test assets)
#'
#' @description This function provides the SDF model selection procedure using the continuous spike-and-slab prior.
#' See Propositions 3 and 4 in \insertCite{bryzgalova2023bayesian;textual}{BayesianFactorZoo}.
#' Unlike \code{continuous_ss_sdf}, tradable factors are treated as test assets in this function.
#'
#' @param f1 A matrix of nontradable factors with dimension \eqn{t \times k_1}, where \eqn{k_1} is the number of nontradable factors
#' and \eqn{t} is the number of periods.
#' @param f2 A matrix of tradable factors with dimension \eqn{t \times k_2}, where \eqn{k_2} is the number of tradable factors
#' and \eqn{t} is the number of periods.
#' @param R A matrix of test assets with dimension \eqn{t \times N}, where \eqn{t} is the number of periods
#' and \eqn{N} is the number of test assets (\bold{\code{R} should NOT contain tradable factors \code{f2}});
#' @param sim_length The length of monte-carlo simulations;
#' @param psi0 The hyper-parameter in the prior distribution of risk prices (see \bold{Details});
#' @param r The hyper-parameter related to the prior of risk prices (see \bold{Details});
#' @param aw The hyper-parameter related to the prior of \eqn{\gamma} (see \bold{Details});
#' @param bw The hyper-parameter related to the prior of \eqn{\gamma} (see \bold{Details});
#' @param type If \code{type = 'OLS'} (\code{type = 'GLS'}), the function returns Bayesian OLS (GLS) estimates of risk prices. The default is 'OLS'.
#'
#' @details
#'
#' See the description in the twin function \code{continuous_ss_sdf}.
#'
#' @references
#' \insertRef{bryzgalova2023bayesian}{BayesianFactorZoo}
#'
#' @return
#' The return of \code{continuous_ss_sdf_v2} is a list of the following elements:
#' \itemize{
#' \item \code{gamma_path}: A \code{sim_length}\eqn{\times k} matrix of the posterior draws of \eqn{\gamma} (\eqn{k = k_1 + k_2}). Each row represents
#' a draw. If \eqn{\gamma_j = 1} in one draw, factor \eqn{j} is included in the model in this draw and vice verse.
#' \item \code{lambda_path}: A \code{sim_length}\eqn{\times (k+1)} matrix of the risk prices \eqn{\lambda}. Each row represents
#' a draw. Note that the first column is \eqn{\lambda_c} corresponding to the constant term. The next \eqn{k} columns (i.e., the 2-th -- \eqn{(k+1)}-th columns) are the risk prices of the \eqn{k} factors.
#' \item \code{sdf_path}: A \code{sim_length}\eqn{\times t} matrix of posterior draws of SDFs. Each row represents a draw.
#' \item \code{bma_sdf}: BMA-SDF.
#' }
#'
#' @importFrom MCMCpack rinvgamma
#'
#' @export
#'
#' @examples
#'
#' library(timeSeries)
#'
#' ## Load the example data
#' data("BFactor_zoo_example")
#' HML <- BFactor_zoo_example$HML
#' lambda_ols <- BFactor_zoo_example$lambda_ols
#' R2.ols.true <- BFactor_zoo_example$R2.ols.true
#' sim_f <- BFactor_zoo_example$sim_f
#' sim_R <- BFactor_zoo_example$sim_R
#' uf <- BFactor_zoo_example$uf
#'
#' ## sim_f: simulated strong factor
#' ## uf: simulated useless factor
#'
#' psi_hat <- psi_to_priorSR(sim_R, cbind(sim_f,uf,sim_R[,1]), priorSR=0.1)
#'
#' ## We include the first test asset, sim_R[,1], into factors, so f2 = sim_R[,1,drop=FALSE].
#' ## Also remember excluding sim_R[,1,drop=FALSE] from test assets, so R = sim_R[,-1].
#' shrinkage <- continuous_ss_sdf_v2(cbind(sim_f,uf), sim_R[,1,drop=FALSE], sim_R[,-1], 1000,
#' psi0=psi_hat, r=0.001, aw=1, bw=1)
#' cat("Null hypothesis: lambda =", 0, "for each of these three factors", "\n")
#' cat("Posterior probabilities of rejecting the above null hypotheses are:",
#' colMeans(shrinkage$gamma_path), "\n")
#'
#' ## We also have the posterior draws of SDF: m(t) = 1 - lambda_g %*% (f(t) - mu_f)
#' sdf_path <- shrinkage$sdf_path
#'
#' ## We also provide the Bayesian model averaging of the SDF (BMA-SDF)
#' bma_sdf <- shrinkage$bma_sdf
#'
#' ## We can further estimate the posterior distributions of model-implied Sharpe ratios:
#' cat("The 5th, 50th, and 95th quantiles of model-implied Sharpe ratios:",
#' quantile(colSds(t(sdf_path)), probs=c(0.05, 0.5, 0.95)), "\n")
#'
#' ## Finally, we can estimate the posterior distribution of model dimensions:
#' cat("The posterior distribution of model dimensions (= 0, 1, 2, 3):",
#' prop.table(table(rowSums(shrinkage$gamma_path))), "\n")
#'
#' ## We now use the 17th test asset, sim_R[,17,drop=FALSE], as the tradable factor,
#' ## so f2 = sim_R[,17,drop=FALSE].
#' ## Also remember excluding sim_R[,17,drop=FALSE] from test assets, so R = sim_R[,-17].
#' psi_hat <- psi_to_priorSR(sim_R, cbind(sim_f,uf,sim_R[,17]), priorSR=0.1)
#' shrinkage <- continuous_ss_sdf_v2(cbind(sim_f,uf), sim_R[,17,drop=FALSE], sim_R[,-17],
#' 1000, psi0=psi_hat, r=0.001, aw=1, bw=1)
#' cat("Null hypothesis: lambda =", 0, "for each of these three factors", "\n")
#' cat("Posterior probabilities of rejecting the above null hypotheses are:",
#' colMeans(shrinkage$gamma_path), "\n")
#'
#'
#'
# data("BFactor_zoo_example")
#
# psi_hat <- psi_to_priorSR(sim_R, cbind(sim_f,uf,sim_R[,17]), priorSR=0.1)
# f1 <- cbind(sim_f,uf)
# f2 <- sim_R[,17,drop=FALSE]
# R <- sim_R[,-17,drop=FALSE]
continuous_ss_sdf_v2 <- function(f1, f2, R, sim_length, psi0 = 1, r = 0.001, aw = 1, bw = 1, type = "OLS") {
## f1: matrix of nontradable factors with dimension t times k1, where k1 is the number of nontradable factors
## and t is the number of periods.
## f2: matrix of tradable factors with dimension t times k1, where k1 is the number of tradable factors
## and t is the number of periods. The tradable factors are also included as test assets.
## R: matrix of test assets with dimension t times N, where N is the number of test assets;
# psi0, r, aw, bw: hyper-parameters;
f <- cbind(f1, f2)
k1 <- dim(f1)[2] # the number of nontraded factors
k2 <- dim(f2)[2] # the number of traded factors
k <- k1 + k2 # the number of factors
N <- dim(R)[2]+k2 # the number of test assets
t <- dim(R)[1] # the number of time periods
p <- k1 + N # the number of variables in Y(t) (the union of f(t) and R(t))
Y <- cbind(f, R) # non-tradable factors + tradable portfolios
Sigma_ols <- cov(Y) # sample covariance matrix of Y(t)
Corr_ols <- cor(Y) # sample correlation matrix of Y(t)
sd_ols <- colSds(Y) # sample standard deviations of Y(t)
mu_ols <- matrix(colMeans(Y), ncol = 1) # sample mean of Y(t);
# Check the prequisite condition
check_input2(f, cbind(R,f2));
## Matrices as outputs
lambda_path <- matrix(0, ncol = (1+k), nrow = sim_length)
gamma_path <- matrix(0, ncol = k, nrow = sim_length)
sdf_path <- matrix(0, ncol=t, nrow = sim_length)
# Initialize some parameters:
beta_ols <- cbind(matrix(1, nrow = N, ncol = 1), Corr_ols[(k1+1):p, 1:k])
a_ols <- mu_ols[(1+k1):p,,drop=FALSE] / sd_ols[(k1+1):p]
Lambda_ols <- chol2inv(chol(t(beta_ols)%*%beta_ols)) %*% t(beta_ols) %*% a_ols
omega <- rep(0.5, k)
gamma <- rbinom(prob=omega, n = k, size = 1)
sigma2 <- as.vector((1/N) * t(a_ols - beta_ols %*% Lambda_ols) %*% (a_ols - beta_ols %*% Lambda_ols))
r_gamma <- ifelse(gamma==1, 1, r)
### Set the prior distribution for lambda_f
rho <- cor(Y)[(k1+1):p, 1:k, drop = FALSE]
rho.demean <- rho - matrix(1, ncol = 1, nrow = N) %*% matrix(colMeans(rho), nrow = 1)
#psi <- psi0 * diag(t(rho.demean)%*%rho.demean)
if (k == 1) {
psi <- psi0 * c(t(rho.demean)%*%rho.demean)
} else {
psi <- psi0 * diag(t(rho.demean)%*%rho.demean)
}
### Start the MCMC
for (i in 1:sim_length) {
#if (i %% 1000 == 0) {print(i)}
set.seed(i)
## (1) First-Stage: time-series regression
Sigma <- riwish(v=t-1, S=t*Sigma_ols) # draw the covariance matrix of Y(t)
Var_mu_half <- chol(Sigma/t)
mu <- mu_ols + t(Var_mu_half) %*% matrix(rnorm(p), ncol = 1) # draw the mean of Y(t)
sd_Y <- matrix(sqrt(diag(Sigma)), ncol=1) # standard deviation of Y(t)
corr_Y <- Sigma / (sd_Y%*%t(sd_Y))
C_f <- corr_Y[(k1+1):p, 1:k] # corr[R(t), f(t)]
a <- mu[(1+k1):p,1,drop=FALSE] / sd_Y[(1+k1):p] # Sharpe ratio of test assets;
#### II. Second-Stage: cross-section regression (Gibbs Sampling)
beta <- cbind(matrix(1,nrow = N, ncol = 1), C_f)
corrR <- corr_Y[(k1+1):p, (k1+1):p]
## Step II.1. Draw lambda conditional on (data, sigma2, gamma, omega): equation (28)
D <- diag(c(1/100000, 1/(r_gamma*psi)))
if (type=='OLS') {
beta_D_inv <- chol2inv(chol(t(beta)%*%beta + D))
cov_Lambda <- sigma2 * beta_D_inv
Lambda_hat <- beta_D_inv %*% t(beta)%*%a
}
if (type=='GLS') {
beta_D_inv <- chol2inv(chol(t(beta)%*%solve(corrR)%*%beta + D))
cov_Lambda <- sigma2 * beta_D_inv
Lambda_hat <- beta_D_inv %*% t(beta)%*%solve(corrR)%*%a
}
Lambda <- Lambda_hat + t(chol(cov_Lambda)) %*% matrix(rnorm(k+1), ncol = 1)
## Step II.2. Draw gamma_j conditional on (data, Lambda, psi, sigma2, gamma_{-j}, omega):
## See equation (29)
log.odds <- log((omega/(1-omega))) + 0.5*log(r) + 0.5*Lambda[2:(k+1)]^2*(1/r-1)/(sigma2*psi)
odds <- exp(log.odds)
odds <- ifelse(odds > 1000, 1000, odds)
prob = odds / (1 + odds)
gamma <- rbinom(prob=prob, n = k, size = 1)
r_gamma <- ifelse(gamma==1, 1, r)
gamma_path[i, ] <- gamma
## Step II.3. Draw omega: equation (30)
omega <- rbeta(k, aw+gamma, bw+1-gamma)
## Step II.4. Draw sigma-squared: equation (31)
if (type=='OLS') {
sigma2 <- rinvgamma(1,shape=(N+k+1)/2,
scale=(t(a-beta%*%Lambda)%*%(a-beta%*%Lambda)+t(Lambda)%*%D%*%Lambda)/2)
}
if (type=='GLS') {
sigma2 <- rinvgamma(1,shape=(N+k+1)/2,
scale=(t(a-beta%*%Lambda)%*%solve(corrR)%*%(a-beta%*%Lambda)+t(Lambda)%*%D%*%Lambda)/2)
}
lambda_path[i, ] <- as.vector(Lambda)
Lambda_f <- Lambda[2:length(Lambda)]/colSds(f)
sdf_path[i,] <- as.vector(1 - f %*% Lambda_f)
sdf_path[i,] <- 1 + sdf_path[i,] - mean(sdf_path[i,]) # normalize the SDF st it has a mean of one
}
return(list(gamma_path = gamma_path,
lambda_path = lambda_path,
sdf_path = sdf_path,
bma_sdf = colMeans(sdf_path)))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianFactorZoo/R/continuous_ss_sdf_v2.R
|
#' Hypothesis testing for risk prices (Bayesian p-values) with Dirac spike-and-slab prior
#'
#' @description This function tests the null hypothesis, \eqn{H_0: \lambda = \lambda_0}, when \eqn{\gamma=0}.
#' When \eqn{\lambda_0 = 0}, we compare factor models using the algorithm in Proposition 1 of \insertCite{bryzgalova2023bayesian;textual}{BayesianFactorZoo}.
#' When \eqn{\lambda_0 \neq 0}, this function corresponds to Corollary 2 in Section II.A.2 of \insertCite{bryzgalova2023bayesian;textual}{BayesianFactorZoo}.
#' The function can also be used to compute the posterior probabilities of all possible models with up to a
#' given maximum number of factors (see examples).
#'
#' @param f A matrix of factors with dimension \eqn{t \times k}, where \eqn{k} is the number of factors
#' and \eqn{t} is the number of periods;
#' @param R A matrix of test assets with dimension \eqn{t \times N}, where \eqn{t} is the number of periods
#' and \eqn{N} is the number of test assets;
#' @param sim_length The length of Monte-Carlo simulations;
#' @param max_k The maximal number of factors in models (\code{max_k} is a positive integer or \code{NULL} if the user does not impose any restriction on the model dimension).
#' @param psi0 The hyper-parameter in the prior distribution of risk price \eqn{\lambda} (see \bold{Details});
#' @param lambda0 A \eqn{k \times 1} vector of risk prices under the null hypothesis (\eqn{\gamma=0});
#'
#' @details
#'
#' Let \eqn{D} denote a diagonal matrix with elements \eqn{c, \psi_1^{-1},..., \psi_K^{-1}}, and \eqn{D_\gamma} the submatrix of \eqn{D}
#' corresponding to model \eqn{\gamma}, where \eqn{c} is a small positive number corresponding to the common cross-sectional intercept
#' (\eqn{\lambda_c}). The prior for the prices of risk (\eqn{\lambda_\gamma}) of model \eqn{\gamma} is then
#' \deqn{ \lambda_\gamma | \sigma^2, \gamma \sim N (0, \sigma^2, D_{\gamma}^{-1}). }
#'
#' We choose
#' \eqn{ \psi_j = \psi \tilde{\rho}_j^\top \tilde{\rho}_j }, where \eqn{ \tilde{\rho}_j = \rho_j - (\frac{1}{N} \Sigma_{i=1}^{N} \rho_{j,i} ) \times 1_N } is the cross-sectionally
#' demeaned vector of factor \eqn{j}'s correlations with asset returns. In the codes, \eqn{\psi} is equal to the value of \code{psi0}.
#'
#' @references
#' \insertRef{bryzgalova2023bayesian}{BayesianFactorZoo}
#'
#'
#' @return
#' The return of \code{dirac_ss_sdf_pvalue} is a list of the following elements:
#' \itemize{
#' \item \code{gamma_path}: A \code{sim_length}\eqn{\times k} matrix of the posterior draws of \eqn{\gamma}. Each row represents
#' a draw. If \eqn{\gamma_j = 1} in one draw, factor \eqn{j} is included in the model in this draw and vice verse.
#' \item \code{lambda_path}: A \code{sim_length}\eqn{\times (k+1)} matrix of the risk prices \eqn{\lambda}. Each row represents
#' a draw. Note that the first column is \eqn{\lambda_c} corresponding to the constant term. The next \eqn{k} columns (i.e., the 2-th -- \eqn{(k+1)}-th columns) are the risk prices of the \eqn{k} factors;
#' \item \code{model_probs}: A \eqn{2^k \times (k+1)} matrix of posterior model probabilities, where the first k columns are the model indices and the final column is a vector of model probabilities.
#' }
#'
#'
#'
#' @export
#'
#' @examples
#'
#' ## <-------------------------------------------------------------------------------->
#' ## Example: Bayesian p-value (with the dirac spike-and-slab prior)
#' ## <-------------------------------------------------------------------------------->
#'
#' # Load the example data
#' data("BFactor_zoo_example")
#' HML <- BFactor_zoo_example$HML
#' lambda_ols <- BFactor_zoo_example$lambda_ols
#' R2.ols.true <- BFactor_zoo_example$R2.ols.true
#' sim_f <- BFactor_zoo_example$sim_f
#' sim_R <- BFactor_zoo_example$sim_R
#' uf <- BFactor_zoo_example$uf
#'
#' ### Now we estimate the Bayesian p-values defined in Corollary 2.
#'
#' #
#' ### Prior Sharpe ratio of factor model for different values of psi: see equation (27):
#' #
#' cat("--------------- Choose psi based on prior Sharpe ratio ----------------\n")
#' cat("if psi = 1, prior Sharpe ratio is", psi_to_priorSR(sim_R, sim_f, psi0=1), "\n")
#' cat("if psi = 2, prior Sharpe ratio is", psi_to_priorSR(sim_R, sim_f, psi0=2), "\n")
#' cat("if psi = 5, prior Sharpe ratio is", psi_to_priorSR(sim_R, sim_f, psi0=5), "\n")
#'
#' ## Test whether factors' risk prices equal 'matrix(lambda_ols[2]*sd(HML),ncol=1)'
#' ## Bayesian p-value is given by mean(shrinkage$gamma_path)
#' shrinkage <- dirac_ss_sdf_pvalue(sim_f, sim_R, 1000, matrix(lambda_ols[2]*sd(HML),ncol=1))
#' cat("Null hypothesis: lambda =", matrix(lambda_ols[2]*sd(HML)), "\n")
#' cat("Posterior probability of rejecting the above null hypothesis is:",
#' mean(shrinkage$gamma_path), "\n")
#'
#' ## Test whether the risk price of factor 'sim_f' is equal to 0
#' shrinkage <- dirac_ss_sdf_pvalue(sim_f, sim_R, 1000, 0, psi0=1)
#' cat("Null hypothesis: lambda =", 0, "\n")
#' cat("Posterior probability of rejecting the above null hypothesis is:",
#' mean(shrinkage$gamma_path), "\n")
#'
#'
#' ## One can also put more than one factor into the test
#' two_f = cbind(sim_f,uf) # sim_f is the strong factor while uf is the useless factor
#' # Test1: lambda of sim_f = 0, Test2: lambda of uf = 0
#' lambda0_null_vec = t(cbind(0,0)) # 2x1 vector
#' shrinkage <- dirac_ss_sdf_pvalue(two_f, sim_R, 1000, lambda0_null_vec, psi0=1)
#' cat("Null hypothesis: lambda =", 0, "for each factor", "\n")
#' cat("Posterior probabilities of rejecting the above null hypothesis are:",
#' colMeans(shrinkage$gamma_path), "\n")
#'
#' ## We can also print the posterior model probabilities:
#' cat('Posterior model probabilities are:\n')
#' print(shrinkage$model_probs)
#'
#'
#' ## One can compute the posterior probabilities of all possible models with up to
#' ## a given maximum number of factors. For example, we consider two factors, but
#' ## the number of factors is restricted to be less than two.
#' lambda0_null_vec = t(cbind(0,0)) # 2x1 vector
#' shrinkage <- dirac_ss_sdf_pvalue(two_f, sim_R, 1000, lambda0_null_vec, psi0=1, max_k=1)
#' cat('Posterior model probabilities are:\n')
#' print(shrinkage$model_probs)
#' ## Comment: You may notice that the model with index (1, 1) has a posterior probability
#' ## of exactly zero since the maximal number of factors is one.
#'
dirac_ss_sdf_pvalue <- function(f, R, sim_length, lambda0, psi0 = 1, max_k=NULL) {
## f: a matrix of factors with dimension t times k, where k is the number of factors and t is the number of periods;
## R: a matrix of test assets with dimension t times N, where N is the number of test assets;
## sim_length: the length of MCMC;
## psi0: is a tune parameter that controls the shrinkage in our method;
## lambda0: is the k times 1 vector of risk prices under the null hypothesis gamma = 0.
## max_k: the maximal number of factors in models.
## Remark: We are not testing the null of lambda_c (the intercept) since we always include the
## intercept. Also, an improper (flat) prior is used for the intercept.
compute_models_probs <- function(model_draws, max_k) {
ndraws <- dim(model_draws)[1]
k <- dim(model_draws)[2]
modelsets <- matrix(0, ncol = k, nrow = 1) # the null model;
for (l in 1:max_k) {
modelsets <- rbind(modelsets, cbind(t(combn(k,l)), matrix(0, ncol = k-l, nrow = dim(t(combn(k,l)))[1])))
}
out_table <- matrix(0, nrow=dim(modelsets)[1], ncol=dim(modelsets)[2])
for (jj in 1:dim(modelsets)[1]) {
index <- modelsets[jj,]
if (sum(index) > 0) {
index <- index[index!=0]
out_table[jj, index] <- 1
}
}
models_probs <- rep(NA, dim(out_table)[1])
for (jj in 1:dim(out_table)[1]) {
index <- out_table[jj,]
models_probs[jj] <- mean(rowSums(model_draws == matrix(1,ncol=1,nrow=ndraws) %*% index) == k)
}
return(cbind(out_table, models_probs))
}
k <- dim(f)[2] # the number of factors
t <- dim(f)[1] # the number of time periods
N <- dim(R)[2] # the number of test assets
p <- k + N # the number of variables in Y(t)
Y <- cbind(f, R) # factors + tradable portfolios
Sigma_ols <- cov(Y) # sample covariance matrix of Y(t)
Corr_ols <- cor(Y) # sample correlation matrix of Y(t)
sd_ols <- colSds(Y) # sample standard deviations of Y(t)
mu_ols <- matrix(colMeans(Y), ncol = 1) # sample mean of Y(t);
if (is.null(max_k) == TRUE) {
max_k = k
}
# Check the prequisite condition
check_input2(f,R);
## Matrices as outputs
lambda_path <- matrix(0, ncol = (1+k), nrow = sim_length)
gamma_path <- matrix(0, ncol = k, nrow = sim_length)
### Set the prior distribution for lambda_f
rho <- cor(Y)[(k+1):p, 1:k, drop = FALSE]
rho.demean <- rho - matrix(1, ncol = 1, nrow = N) %*% matrix(colMeans(rho), nrow = 1)
#psi <- psi0 * diag(t(rho.demean)%*%rho.demean)
if (k == 1) {
psi <- psi0 * c(t(rho.demean)%*%rho.demean)
} else {
psi <- psi0 * diag(t(rho.demean)%*%rho.demean)
}
D <- diag(c(1/100000, 1/psi))
### Generate a set of candidate models
modelsets <- matrix(0, ncol = k, nrow = 1) # the null model with only the common intercept
for (l in 1:max_k) {
modelsets <- rbind(modelsets, cbind(t(combn(k,l)), matrix(0, ncol = k-l, nrow = dim(t(combn(k,l)))[1])))
}
# if (is.null(max_k) == FALSE) {
# subset_index <- (rowSums(modelsets > 0) <= max_k)
# modelsets <- modelsets[subset_index,]
# }
# subset_index <- (rowSums(modelsets > 0) <= max_k)
# modelsets <- modelsets[subset_index,]
model.num <- dim(modelsets)[1] # the number of candidate models
### Start the MCMC:
for (j in 1:sim_length) {
#print(j)
set.seed(j)
## (1) First-Stage: time-series regression
Sigma <- riwish(v=t-1, S=t*Sigma_ols) # draw the covariance matrix of Y(t)
Var_mu_half <- chol(Sigma/t)
mu <- mu_ols + t(Var_mu_half) %*% matrix(rnorm(p), ncol = 1) # draw the mean of Y(t)
sd_Y <- matrix(sqrt(diag(Sigma)), ncol=1) # standard deviation of Y(t)
corr_Y <- Sigma / (sd_Y%*%t(sd_Y))
C_f <- corr_Y[(k+1):p, 1:k, drop=FALSE] # corr[R(t), f(t)]
a <- mu[(1+k):p,1,drop=FALSE] / sd_Y[(1+k):p] # Sharpe ratio of test assets;
## (2) Second-Stage: cross-sectional regression
beta_f <- C_f
beta <- cbind(matrix(1,nrow = N, ncol = 1), C_f)
log_prob <- rep(0, model.num)
## Calculate the (log) probability of each model: see Proposition 2 and Corollary 2
for (i in 1:model.num) {
if (i == 1) { # null model with only the common intercept
H_i <- matrix(1, nrow = N, ncol = 1)
D_i <- matrix(1/100000)
a_gamma <- a - beta_f %*% lambda0
} else {
index <- modelsets[i,]
index <- index[index!=0]
H_i <- cbind(matrix(1,nrow = N, ncol = 1), beta_f[,index,drop=FALSE])
D_i <- D[c(1,1+index), c(1,1+index)]
if (length(index)==k) { # full model with all factors included
a_gamma <- a
} else {
a_gamma <- a - beta_f[,-index,drop=FALSE] %*% lambda0[-index,,drop=FALSE]
}
}
HH_D.inv <- chol2inv(chol(t(H_i)%*%H_i + D_i))
lambda_i <- HH_D.inv %*% t(H_i)%*%a_gamma
SSR_i <- t(a_gamma) %*% a_gamma - t(a_gamma)%*%H_i %*% HH_D.inv %*% t(H_i)%*%a_gamma
SSR_i <- as.vector(SSR_i)
log_prob_i <- 0.5 * log(det(D_i)/det(t(H_i)%*%H_i + D_i)) - 0.5*N*log(0.5*SSR_i)
#print(SSR_i)
#print(log_prob_i)
log_prob[i] <- log_prob_i
#print(a_gamma)
}
probs <- exp(log_prob)
probs <- probs/sum(probs)
#print(lambda_i)
#print(probs)
## draw the model according to its posterior probability probs
i <- sample(1:model.num, size = 1, prob = probs)
if (i == 1) { # null model with only the common intercept
index <- modelsets[i,]
index <- index[index!=0]
H_i <- matrix(1,nrow = N, ncol = 1)
D_i <- matrix(1/100000)
a_gamma <- a - beta_f %*% lambda0
} else {
index <- modelsets[i,]
index <- index[index!=0]
H_i <- cbind(matrix(1,nrow = N, ncol = 1), beta_f[,index,drop=FALSE])
D_i <- D[c(1,1+index), c(1,1+index)]
gamma_path[j,index] <- 1
if (length(index)==k) { # full model with all factors included
a_gamma <- a
} else {
a_gamma <- a - beta_f[,-index,drop=FALSE] %*% lambda0[-index,,drop=FALSE]
}
}
## draw sigma2 and lambda conditional on model index gamma: see equation (16) and (17)
HH_D.inv <- chol2inv(chol(t(H_i)%*%H_i + D_i))
Lambda_hat <- HH_D.inv %*% t(H_i)%*%a_gamma
sigma2 <- rinvgamma(1, shape=(N/2), scale=t(a_gamma-H_i%*%Lambda_hat)%*%(a_gamma-H_i%*%Lambda_hat)/2)
cov_Lambda <- sigma2 * HH_D.inv
Lambda <- Lambda_hat + t(chol(cov_Lambda)) %*% matrix(rnorm(length(Lambda_hat)), ncol = 1)
lambda_path[j, c(1,1+index)] <- as.vector(Lambda)
unselected <- setdiff(1:k, index)
lambda_path[j, 1+unselected] <- lambda0[unselected]
#print(Lambda)
#print(index)
}
return(list(lambda_path=lambda_path,
gamma_path=gamma_path,
model_probs=compute_models_probs(gamma_path, max_k)))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianFactorZoo/R/dirac_ss_sdf_pval.R
|
#' @import reshape2
#' @import coda
#' @import ggplot2
#' @import MASS
#' @import timeSeries
#' @import mvtnorm
#' @import nse
#' @importFrom matrixcalc vech
#' @importFrom MCMCpack rinvgamma riwish
#' @import Rdpack
#' @importFrom stats, cor, cov, pchisq, rbeta, rbinom, rnorm, var
#' @importFrom utils, combn
#' @useDynLib BayesianFactorZoo
NULL
|
/scratch/gouwar.j/cran-all/cranData/BayesianFactorZoo/R/import-packages.R
|
#' Mapping \eqn{\psi} (\code{psi0}) to the prior Sharpe ratio of factors (\code{priorSR}), and vice versa.
#'
#' @description This function provides the one-to-one mapping between \eqn{\psi} and the prior Sharpe ratio of factors.
#' See Section II.A.3 in \insertCite{bryzgalova2023bayesian;textual}{BayesianFactorZoo}.
#'
#' @param f A matrix of factors with dimension \eqn{t \times k}, where \eqn{k} is the number of factors
#' and \eqn{t} is the number of periods;
#' @param R A matrix of test assets with dimension \eqn{t \times N}, where \eqn{t} is the number of periods
#' and \eqn{N} is the number of test assets;
#' @param psi0 The hyper-parameter in the prior distribution of risk prices (see \bold{Details} in the function \code{continuous_ss_sdf});
#' @param priorSR The prior Sharpe ratio of all factors (see \bold{Details});
#' @param aw The hyper-parameter in the prior of \eqn{\gamma} (default value = 1, see \bold{Details});
#' @param bw The hyper-parameter in the prior of \eqn{\gamma} (default value = 1, see \bold{Details});
#'
#' @details
#'
#' According to equation (27) in \insertCite{bryzgalova2023bayesian;textual}{BayesianFactorZoo}, we learn that
#' \deqn{\frac{E_{\pi} [ SR^2_f \mid \gamma, \sigma^2 ] }{E_{\pi} [ SR^2_{\alpha} \mid \sigma^2] } = \frac{\psi \sum^K_{k=1} r(\gamma_k) \tilde{\rho}^\top_k \tilde{\rho}_k }{N}, }
#' where \eqn{SR^2_f} and \eqn{SR^2_{\alpha}} denote the Sharpe ratios of all factors (\eqn{f_t}) and of the pricing errors
#' (\eqn{\alpha}), and \eqn{E_{\pi}} denotes prior expectations.
#'
#' The prior \eqn{\pi (\omega)} encodes the belief about the sparsity of the true model using the prior distribution
#' \eqn{\pi (\gamma_j = 1 | \omega_j) = \omega_j, \ \ \omega_j \sim Beta(a_\omega, b_\omega) .} We further integrate out
#' \eqn{\gamma_j} in \eqn{E_{\pi} [ SR^2_f \mid \gamma, \sigma^2 ]} and show the following:
#'
#' \deqn{\frac{E_{\pi} [ SR^2_f \mid \sigma^2 ] }{E_{\pi} [ SR^2_{\alpha} \mid \sigma^2 ] } \approx \frac{a_\omega}{a_\omega+b_\omega} \psi \frac{ \sum^K_{k=1} \tilde{\rho}^\top_k \tilde{\rho}_k }{N}, \ as \ r \to 0 .}
#'
#' Since we can decompose the Sharpe ratios of all test assets, \eqn{SR^2_R}, into \eqn{SR^2_f} and \eqn{SR^2_{\alpha}} (i.e., \eqn{SR^2_R = SR^2_f + SR^2_{\alpha}}), we can
#' represent \eqn{SR^2_f} as follows:
#'
#' \deqn{ E_{\pi} [ SR^2_f \mid \sigma^2 ] \approx \frac{\frac{a_\omega}{a_\omega+b_\omega} \psi \frac{ \sum^K_{k=1} \tilde{\rho}^\top_k \tilde{\rho}_k }{N}}{1 + \frac{a_\omega}{a_\omega+b_\omega} \psi \frac{ \sum^K_{k=1} \tilde{\rho}^\top_k \tilde{\rho}_k }{N}} SR^2_R.}
#'
#' We define the prior Sharpe ratio implied by the factor models as \eqn{\sqrt{E_{\pi} [ SR^2_f \mid \sigma^2 ]}}.
#' Given \eqn{a_\omega}, \eqn{b_\omega}, \eqn{\frac{ \sum^K_{k=1} \tilde{\rho}^\top_k \tilde{\rho}_k }{N}}, and the observed
#' Sharpe ratio of test assets, we have one-to-one mapping between \eqn{\psi} and \eqn{\sqrt{E_{\pi} [ SR^2_f \mid \sigma^2 ]}}.
#'
#' If the user aims to convert \eqn{\psi} to the prior Sharpe ratio, she should input only \code{psi0}.
#' In contrast, if she wants to convert the prior Sharpe ratio to \eqn{\psi}, \code{priorSR} should be entered.
#'
#' @references
#' \insertRef{bryzgalova2023bayesian}{BayesianFactorZoo}
#'
#'
#' @return
#' The return of \code{psi_to_priorSR} is:
#' \itemize{
#' \item \code{psi0} or \code{priorSR}.
#' }
#'
#' @export
#'
#' @examples
#'
#' ## Load the example data
#' data("BFactor_zoo_example")
#' HML <- BFactor_zoo_example$HML
#' lambda_ols <- BFactor_zoo_example$lambda_ols
#' R2.ols.true <- BFactor_zoo_example$R2.ols.true
#' sim_f <- BFactor_zoo_example$sim_f
#' sim_R <- BFactor_zoo_example$sim_R
#' uf <- BFactor_zoo_example$uf
#'
#' ## If the user aims to convert \eqn{\psi} to the prior Sharpe ratio:
#' print(psi_to_priorSR(sim_R, sim_f, priorSR=0.1))
#'
#' ## If the user wants to convert the prior Sharpe ratio to \eqn{\psi}:
#' psi0_to_map <- psi_to_priorSR(sim_R, sim_f, priorSR=0.1)
#' print(psi_to_priorSR(sim_R, sim_f, psi0=psi0_to_map))
#'
#' ## If we enter both psi0 and priorSR (or forget to input them simultaneously),
#' ## a warning will be printed:
#' print(psi_to_priorSR(sim_R, sim_f))
#' print(psi_to_priorSR(sim_R, sim_f, priorSR=0.1, psi0=2))
#'
#'
#'
# f <- sim_f
# R <- sim_R
# priorSR <- 0.1
# psi0 <- NULL
# aw <- 1
# bw <- 1
#
# print(psi_to_priorSR(R, f, priorSR=0.1))
#
# print(psi_to_priorSR(R, f, psi0=psi_to_priorSR(R, f, priorSR=0.1)))
#
# print(psi_to_priorSR(R, f))
# print(psi_to_priorSR(R, f, priorSR=0.1, psi0=2))
psi_to_priorSR <- function(R, f, psi0=NULL, priorSR=NULL, aw=1, bw=1) {
if ((is.null(psi0)&is.null(priorSR)) | (isFALSE(is.null(psi0))&isFALSE(is.null(priorSR)))) {
#cat("Please enter either psi0 or priorSR!")
return("Please enter either psi0 or priorSR!")
}
### In-sample squared Sharpe ratio
SharpeRatio <- function(R) {
ER <- matrix(colMeans(R), ncol=1)
covR <- cov(R)
return(t(ER)%*%solve(covR)%*%ER)
}
SR.max <- sqrt(SharpeRatio(R))[1,1]
N <- dim(R)[2]
corr_Rf <- cor(R, f)
corr_Rf.demean <- corr_Rf - matrix(1, ncol = 1, nrow = N) %*% matrix(colMeans(corr_Rf), nrow = 1)
eta <- (aw/(aw+bw))*sum(diag(t(corr_Rf.demean)%*%corr_Rf.demean)) / N
if (is.null(psi0)&isFALSE(is.null(priorSR))) {
return(priorSR^2 / ((SR.max^2-priorSR^2)*eta))
} else {
return(sqrt((psi0*eta/(1+psi0*eta))) * SR.max)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianFactorZoo/R/psi_to_priorSR.R
|
#' Block Gibbs sampler function
#'
#' Blockwise sampling from the conditional distribution of a permuted column/row
#' for simulating the posterior distribution for the concentration matrix specifying
#' a Gaussian Graphical Model
#' @param X Data matrix
#' @param iterations Length of Markov chain after burn-in
#' @param burnIn Number of burn-in iterations
#' @param lambdaPriora Shrinkage hyperparameter (lambda) gamma distribution shape
#' @param lambdaPriorb Shrinkage hyperparameter (lambda) gamma distribution scale
#' @param verbose logical; if TRUE return MCMC progress
#' @details Implements the block Gibbs sampler for the Bayesian graphical lasso
#' introduced in Wang (2012). Samples from the conditional distribution of a
#' permuted column/row for simulating the posterior distribution for the concentration
#' matrix specifying a Gaussian Graphical Model
#' @return
#' \item{Sigma}{List of covariance matrices from the Markov chain}
#' \item{Omega}{List of concentration matrices from the Markov chains}
#' \item{Lambda}{Vector of simulated lambda parameters}
#' @author Patrick Trainor (University of Louisville)
#' @author Hao Wang
#' @references Wang, H. (2012). Bayesian graphical lasso models and efficient
#' posterior computation. \emph{Bayesian Analysis, 7}(4). <doi:10.1214/12-BA729> .
#' @examples
#' \donttest{
#' # Generate true covariance matrix:
#' s<-.9**toeplitz(0:9)
#' # Generate multivariate normal distribution:
#' set.seed(5)
#' x<-MASS::mvrnorm(n=100,mu=rep(0,10),Sigma=s)
#' blockGLasso(X=x)
#' }
#' # Same example with short MCMC chain:
#' s<-.9**toeplitz(0:9)
#' set.seed(6)
#' x<-MASS::mvrnorm(n=100,mu=rep(0,10),Sigma=s)
#' blockGLasso(X=x,iterations=100,burnIn=100)
#' @export
blockGLasso<-function(X,iterations=2000,burnIn=1000,lambdaPriora=1,lambdaPriorb=1/10,
verbose=TRUE)
{
# Total iterations:
totIter<-iterations+burnIn
# Sum of product matrix, covariance matrix, n
S<-t(X)%*%X
Sigma=stats::cov(X)
n=nrow(X)
# Concentration matrix and it's dimension:
Omega<-MASS::ginv(Sigma)
p<-dim(Omega)[1]
# Indicator matrix and permutation matrix for looping through columns & rows ("blocks")
indMat<-matrix(1:p**2,ncol=p,nrow=p)
perms<-matrix(NA,nrow=p-1,ncol=p)
permInt<-1:p
for(i in 1:ncol(perms))
{
perms[,i]<-permInt[-i]
}
# Structures for storing each MCMC iteration:
SigmaMatList<-OmegaMatList<-list()
lambdas<-rep(NA,totIter)
# Latent tau:
tau<-matrix(NA,nrow=p,ncol=p)
# Gamma distirbution posterior parameter a:
lambdaPosta<-(lambdaPriora+(p*(p+1)/2))
# Main block sampling loop:
for(iter in 1:totIter)
{
# Gamma distirbution posterior parameter b:
lambdaPostb<-(lambdaPriorb+sum(abs(c(Omega)))/2)
# Sample lambda:
lambda<-stats::rgamma(1,shape=lambdaPosta,scale=1/lambdaPostb)
OmegaTemp<-Omega[lower.tri(Omega)]
#cat("Omega Temp min=",min(OmegaTemp)," max=",max(OmegaTemp),"\n")
# Sample tau:
rinvgaussFun<-function(x)
{
x<-ifelse(x<1e-12,1e-12,x)
#cat("lambda=",lambda," mu=",x,"\n")
return(statmod::rinvgauss(n=1,mean=x,shape=lambda**2))
}
tau[lower.tri(tau)]<-1/sapply(sqrt(lambda**2/(OmegaTemp**2)),rinvgaussFun)
tau[upper.tri(tau)]<-t(tau)[upper.tri(t(tau))]
# Sample from conditional distribution by column:
for(i in 1:p)
{
tauI<-tau[perms[,i],i]
Sigma11<-Sigma[perms[,i],perms[,i]]
Sigma12<-Sigma[perms[,i],i]
S21<-S[i,perms[,i]]
Omega11inv<-Sigma11-Sigma12%*%t(Sigma12)/Sigma[i,i]
Ci<-(S[i,i]+lambda)*Omega11inv+diag(1/tauI)
CiChol<-chol(Ci)
mui<-solve(-Ci,S[perms[,i],i])
# Sampling:
beta<-mui+solve(CiChol,stats::rnorm(p-1))
# Replacing omega entries
Omega[perms[,i],i]<-beta
Omega[i,perms[,i]]<-beta
gamm<-stats::rgamma(n=1,shape=n/2+1,rate=(S[1,1]+lambda)/2)
Omega[i,i]<-gamm+t(beta) %*% Omega11inv %*% beta
# Replacing sigma entries
OmegaInvTemp<-Omega11inv %*% beta
Sigma[perms[,i],perms[,i]]<-Omega11inv+(OmegaInvTemp %*% t(OmegaInvTemp))/gamm
Sigma[perms[,i],i]<-Sigma[i,perms[,i]]<-(-OmegaInvTemp/gamm)
Sigma[i,i]<-1/gamm
}
if(iter %% 100 ==0)
{
cat("Total iterations= ",iter, "Iterations since burn in= ",
ifelse(iter-burnIn>0,iter-burnIn,0), "\n")
}
# Save lambda:
lambdas[iter]<-lambda
# Save Sigma and Omega:
SigmaMatList[[iter]]<-Sigma
OmegaMatList[[iter]]<-Omega
}
list(Sigmas=SigmaMatList,Omegas=OmegaMatList,lambdas=lambdas)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianGLasso/R/blockGLasso.R
|
#' @keywords internal
"_PACKAGE"
## usethis namespace: start
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/BayesianLaterality/R/BayesianLaterality-package.R
|
#' Example dataset with a single measurement of three individuals.
"example_data1"
#' Example dataset with three measurements each on 100 individuals.
"example_data2"
|
/scratch/gouwar.j/cran-all/cranData/BayesianLaterality/R/data.R
|
#' Predict hemispheric dominance
#'
#' Predict hemispheric dominance based on observed laterality measures, using the methods
#' described in \insertCite{Sorensen2020;textual}{BayesianLaterality}.
#'
#' @param data Data frame with the following columns:
#' \itemize{
#' \item \code{listening}: Score between -100 and 100.
#' \item \code{handedness}: \code{"left"} for adextral (non-right-handed) and
#' \code{"right"} for dextral (right-handed)
#' }
#' In addition, an optional column named \code{ID}
#' can be provided, giving the subject ID. If a subject has multiple
#' measurements, the posterior based on all measurements is provided. If the
#' \code{ID} column is missing, each row is assumed to be measured on a
#' separate subject.
#'
#' @param parameters Data frame in which the first two columns specify combinations
#' of hemispheric dominance and handedness and the last three columns specify
#' the corresponding parameter values. In particular, the columns are defined as follows:
#' \itemize{
#' \item \code{dominance}: character specifying hemispheric dominance.
#' \item \code{handedness}: character specifying handedness.
#' \item \code{mean_li}: mean dichotic listening score.
#' \item \code{sd_li}: standard deviation of dichotic listening score.
#' \item \code{prob_dominance}: probability of hemispheric dominance given handedness.
#' }
#' @param truncation Numeric vector with two elements specifying the lower and upper
#' bounds for truncation of the normal distribution for dichotic listening scores.
#' @param icc Intraclass correlation for repeated measurements on the same individual.
#' Defaults to 0.
#'
#' @return The probability of left or right hemispheric dominance in additional
#' columns of \code{data}.
#' @export
#' @examples
#' # The package comes with two example datasets.
#' # The first contains single measurements on three subjects.
#' # We can first take a look at the data
#' example_data1
#' # Next, compute predictions.
#' # Since there is no ID column, predict_dominance() will print a message telling
#' # the user that the rows are assumed to contain observations from different subjects.
#' predict_dominance(example_data1)
#'
#' # The next example dataset contains repeated measurements
#' example_data2
#'
#' # We compute the predictions as before:
#' predict_dominance(example_data2)
#'
#' @references
#' \insertAllCited{}
#'
#' @importFrom rlang .data
#' @importFrom Rdpack reprompt
predict_dominance <- function(data,
parameters = dplyr::tibble(
dominance = rep(c("left", "right", "none"), each = 2),
handedness = rep(c("left", "right"), 3),
mean_li = c(10, 12, -24, -24, 0, 0),
sd_li = c(24.9, 17.0, 24.9, 17.0, 22, 22),
prob_dominance = c(.65, .87, .35, .13, 0, 0)
),
truncation = c(-100, 100),
icc = 0
){
stopifnot(icc >= -1 && icc <= 1)
# Check if data contains an ID column
if(!"ID" %in% colnames(data)) {
message("No ID column in data, assuming one subject per row.")
data$ID = as.character(seq(1, nrow(data), by = 1))
}
dat1 <- dplyr::select_at(data, dplyr::vars("ID", "listening", "handedness"))
dat1 <- dplyr::inner_join(dat1, parameters, by = "handedness")
dat1 <- dplyr::select_at(dat1, dplyr::vars("ID", "handedness", "dominance",
"prob_dominance", "mean_li", "sd_li", "listening"))
dat2 <- tidyr::nest(dat1, df = c("listening", "mean_li", "sd_li"))
dat3 <- dplyr::mutate(dat2,
log_prob_listening = purrr::map_dbl(.data$df, function(x) {
tmvtnorm::dtmvnorm(x$listening,
mean = x$mean_li,
sigma = x$sd_li^2 * (diag(nrow(x)) * (1 - icc) + icc),
lower = rep(truncation[[1]], nrow(x)),
upper = rep(truncation[[2]], nrow(x)),
log = TRUE
)
}),
log_posterior = log(.data$prob_dominance) + .data$log_prob_listening,
probability = exp(.data$log_posterior)
)
dat4 <- dplyr::group_by_at(dat3, dplyr::vars("ID"))
dat5 <- dplyr::mutate_at(dat4, dplyr::vars("probability"), ~ . / sum(.))
dplyr::select_at(dplyr::ungroup(dat5), dplyr::vars("ID", "handedness",
"dominance", "probability"))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianLaterality/R/predict_dominance.R
|
data_org<-function(pred, m, y, refy = rep(NA, ncol(data.frame(y))),
predref = rep(NA, ncol(data.frame(pred))), fpy = NULL,
deltap = rep(0.001,ncol(data.frame(pred))), fmy = NULL,
deltam = rep(0.001,ncol(data.frame(m))), fpm = NULL,
mref = rep(NA, ncol(data.frame(m))),cova = NULL,
mcov = NULL, mclist=NULL) #mcov is the data frame with all covariates for TVs, mind is the indicator for covariates
#if mclist is null but not mcov, mcov is applied to all tvs.
{ns.dev<-function (x, df = NULL, knots = NULL, qnots=NULL,intercept = FALSE, Boundary.knots = range(x),derivs1=0)
{
nx <- names(x)
x <- as.vector(x)
nax <- is.na(x)
if (nas <- any(nax))
x <- x[!nax]
if (!missing(Boundary.knots)) {
Boundary.knots <- sort(Boundary.knots)
outside <- (ol <- x < Boundary.knots[1L]) | (or <- x >
Boundary.knots[2L])
}
else outside <- FALSE
if (!is.null(df) && is.null(knots)) {
nIknots <- df - 1L - intercept
if (nIknots < 0L) {
nIknots <- 0L
warning(gettextf("'df' was too small; have used %d",
1L + intercept), domain = NA)
}
knots <- if (nIknots > 0L) {
knots <- seq.int(0, 1, length.out = nIknots + 2L)[-c(1L,
nIknots + 2L)]
stats::quantile(x[!outside], knots)
}
}
else {if(is.null(df) && is.null(knots) && !is.null(qnots))
knots<-quantile(x[!outside], qnots)
nIknots <- length(knots)}
Aknots <- sort(c(rep(Boundary.knots, 4L), knots))
if (any(outside)) {
basis <- array(0, c(length(x), nIknots + 4L))
if (any(ol)) {
k.pivot <- Boundary.knots[1L]
xl <- cbind(1, x[ol] - k.pivot)
tt <- splineDesign(Aknots, rep(k.pivot, 2L), 4, c(0,
1),derivs=rep(derivs1,2L))
basis[ol, ] <- xl %*% tt
}
if (any(or)) {
k.pivot <- Boundary.knots[2L]
xr <- cbind(1, x[or] - k.pivot)
tt <- splineDesign(Aknots, rep(k.pivot, 2L), 4, c(0,1),derivs=rep(derivs1,2L))
basis[or, ] <- xr %*% tt
}
if (any(inside <- !outside))
basis[inside, ] <- splineDesign(Aknots, x[inside],
4,derivs=rep(derivs1,length(x[inside])))
}
else basis <- splineDesign(Aknots, x, 4,derivs=rep(derivs1,length(x)))
const <- splineDesign(Aknots, Boundary.knots, 4, c(2, 2),derivs=rep(derivs1,length(Boundary.knots)))
if (!intercept) {
const <- const[, -1, drop = FALSE]
basis <- basis[, -1, drop = FALSE]
}
qr.const <- qr(t(const))
basis <- as.matrix((t(qr.qty(qr.const, t(basis))))[, -(1L:2L),
drop = FALSE])
n.col <- ncol(basis)
if (nas) {
nmat <- matrix(NA, length(nax), n.col)
nmat[!nax, ] <- basis
basis <- nmat
}
dimnames(basis) <- list(nx, 1L:n.col)
a <- list(degree = 3L, knots = if (is.null(knots)) numeric() else knots,
Boundary.knots = Boundary.knots, intercept = intercept)
attributes(basis) <- c(attributes(basis), a)
class(basis) <- c("ns", "basis", "matrix")
basis
}
bs.dev<-function (x, df = NULL, knots = NULL, degree = 3, intercept = FALSE,
Boundary.knots = range(x),derivs1=0)
{
nx <- names(x)
x <- as.vector(x)
nax <- is.na(x)
if (nas <- any(nax))
x <- x[!nax]
if (!missing(Boundary.knots)) {
Boundary.knots <- sort(Boundary.knots)
outside <- (ol <- x < Boundary.knots[1L]) | (or <- x >
Boundary.knots[2L])
}
else outside <- FALSE
ord <- 1L + (degree <- as.integer(degree))
if (ord <= 1)
stop("'degree' must be integer >= 1")
if (!is.null(df) && is.null(knots)) {
nIknots <- df - ord + (1L - intercept)
if (nIknots < 0L) {
nIknots <- 0L
warning(gettextf("'df' was too small; have used %d",
ord - (1L - intercept)), domain = NA)
}
knots <- if (nIknots > 0L) {
knots <- seq.int(from = 0, to = 1, length.out = nIknots +
2L)[-c(1L, nIknots + 2L)]
stats::quantile(x[!outside], knots)
}
}
Aknots <- sort(c(rep(Boundary.knots, ord), knots))
if (any(outside)) {
warning("some 'x' values beyond boundary knots may cause ill-conditioned bases")
derivs <- 0:degree
scalef <- gamma(1L:ord)
basis <- array(0, c(length(x), length(Aknots) - degree -
1L))
if (any(ol)) {
k.pivot <- Boundary.knots[1L]
xl <- cbind(1, outer(x[ol] - k.pivot, 1L:degree,
"^"))
tt <- splineDesign(Aknots, rep(k.pivot, ord), ord,
derivs+derivs1)
basis[ol, ] <- xl %*% (tt/scalef)
}
if (any(or)) {
k.pivot <- Boundary.knots[2L]
xr <- cbind(1, outer(x[or] - k.pivot, 1L:degree,
"^"))
tt <- splineDesign(Aknots, rep(k.pivot, ord), ord,
derivs+derivs1)
basis[or, ] <- xr %*% (tt/scalef)
}
if (any(inside <- !outside))
basis[inside, ] <- splineDesign(Aknots, x[inside],
ord,derivs=rep(derivs1,length(x[inside])))
}
else basis <- splineDesign(Aknots, x, ord, derivs=rep(derivs1,length(x)))
if (!intercept)
basis <- basis[, -1L, drop = FALSE]
n.col <- ncol(basis)
if (nas) {
nmat <- matrix(NA, length(nax), n.col)
nmat[!nax, ] <- basis
basis <- nmat
}
dimnames(basis) <- list(nx, 1L:n.col)
a <- list(degree = degree, knots = if (is.null(knots)) numeric(0L) else knots,
Boundary.knots = Boundary.knots, intercept = intercept)
attributes(basis) <- c(attributes(basis), a)
class(basis) <- c("bs", "basis", "matrix")
basis
}
x2fx<-function(x,func) #x is the list of original numerical vector, func is a vector of character functions.
{ # eg. func <- c("x","x+1","x+2","x+3","log(x)")
func.list <- list()
#test.data <- matrix(data=rep(x,length(func)),length(x),length(func))
#test.data <- data.frame(test.data)
result<-NULL
for(i in 1:length(func)){
func.list[[i]] <- function(x){}
body(func.list[[i]]) <- parse(text=func[i])
}
#result <- mapply(do.call,func.list,lapply(test.data,list))
col_fun<-NULL
z<-1
for (i in 1:length(func.list))
{res<-as.matrix(func.list[[i]](x))
result<-cbind(result,res)
col_fun<-cbind(col_fun,c(z,z+ncol(res)-1))
z<-z+ncol(res)}
list(values=as.matrix(result),col_fun=as.matrix(col_fun))
}
x2fdx<-function(x,func) #x is the list of original numerical vector, func is a vector of character functions.
{ fdx<-NULL # eg. func <- c("x","x+1","x+2","x+3","log(x)")
for(i in 1:length(func)){
if (length(grep("ifelse",func[i]))>0)
{str<-unlist(strsplit(func[i],","))
fun1<-D(parse(text=str[2]), "x")
fun2<-D(parse(text=unlist(strsplit(str[3],")"))),"x")
x1<-eval(fun1)
x2<-eval(fun2)
if(length(x1)==1)
x1<-rep(x1,length(x))
if(length(x2)==1)
x2<-rep(x2,length(x))
fun3<-paste(str[1],"x1,x2)",sep=",")
fdx<-cbind(fdx,eval(parse(text=fun3)))
}
else if(length(grep("ns",func[i]))>0)
{temp<-paste("ns.dev",substring(func[i],3,nchar(func[i])-1),",derivs1=1)",sep="")
fdx<-cbind(fdx,eval(parse(text=temp)))}
else if(length(grep("bs",func[i]))>0)
{temp<-paste("bs.dev",substring(func[i],3,nchar(func[i])-1),",derivs1=1)",sep="")
fdx<-cbind(fdx,eval(parse(text=temp)))}
else{
dx2x <- D(parse(text=func[i]), "x")
temp<-eval(dx2x)
if(length(temp)==1)
fdx<-cbind(fdx,rep(temp,length(x)))
else fdx<-cbind(fdx,temp)}
}
as.matrix(fdx)
}
bin_cat <- function(M2, M1, cat1, catref = 1)
#turn a categorical variable to binary dummy variables
#M2 is the original data frame
#cat1 is the column number of the categorical variable in M2
#catref is the reference group
{a <- factor(M2[, cat1])
b <- sort(unique(a[a != catref]))
d<-NULL
e<-rep(1,nrow(M2))
for(i in 1:length(b))
{d=cbind(d,ifelse(a==b[i],1,0))
e=ifelse(a==b[i],i+1,e)
}
d[is.na(M2[,cat1]),]=NA
e[is.na(M2[,cat1])]=NA
M2[,cat1]=e
xnames=colnames(M1)
M1=cbind(M1,d)
colnames(M1)=c(xnames,paste(colnames(M2)[cat1],b,sep="."))
list(M1=M1,M2=M2,cat=c(ncol(M1)-ncol(d)+1,ncol(M1)))
}
order_char<-function(char1,char2) #find the position of char2 in char1
{a<-1:length(char1)
b<-NULL
for (i in 1:length(char2))
b<-c(b,a[char1==char2[i]])
b
}
###start the main code
#clean up the outcomes:y_type=type of outcomes;
#y_type=2:binary, 3:category, 1:continuous, 4:time-to-event
#consider only 1 outcome for now
#consider only complete case
data.temp=cbind(pred,m,y)
if (!is.null(mcov) & !is.null(mclist))
data.temp=cbind(pred,m,y,mcov)
if(!is.null(cova))
data.temp=cbind(data.temp,cova)
choose.temp=complete.cases(data.temp)
if(ncol(data.frame(pred))==1)
pred=pred[choose.temp]
else
pred=pred[choose.temp,]
if(ncol(data.frame(y))==1)
y=y[choose.temp]
else
y=y[choose.temp,]
if(ncol(data.frame(m))==1)
m=m[choose.temp]
else
m=m[choose.temp,]
if(!is.null(cova)){
if(ncol(data.frame(cova))==1)
cova=cova[choose.temp]
else
cova=cova[choose.temp,]}
if(!is.null(mcov)){
if(ncol(data.frame(mcov))==1)
mcov=mcov[choose.temp]
else
mcov=mcov[choose.temp,]}
if (!is(y,"Surv"))
{if (nlevels(droplevels(as.factor(y))) == 2) {#binary
y_type <- 2
if (!is.na(refy))
y <- ifelse(y == refy, 0, 1)
else {
refy <- levels(droplevels(as.factor(y)))[1]
y <- ifelse(as.factor(y) == refy,0, 1)
}
}
else if (is.character(y) | is.factor(y)) {#categorical
y_type <- 3
y <- droplevels(y)
if (is.na(refy))
refy <- levels(as.factor(y))[1]
a <- factor(y)
b <- sort(unique(a[a != refy]))
e <- rep(1,nrow(as.matrix(y)))
for(j in 1:length(b))
e=ifelse(a==b[j],j+1,e)
e[is.na(y)]=NA
y=factor(e)
}
else #continuous
y_type = 1}
else
{y_type<-4
y=cbind(y[,1],y[,2])}
#clean up m and pred
mnames <- colnames(m)
if (!is.null(cova)) {
if (is.null(colnames(cova)))
cova_names = paste("cova",1:ncol(data.frame(cova)))
else
cova_names = colnames(cova)
cova=data.frame(cova)
colnames(cova)=cova_names
}
if (!is.null(mcov)) {
if (length(grep("for.m", names(mcov))) == 0)
mcov_names = colnames(mcov)
else mcov_names = colnames(mcov[[1]])
mcov=data.frame(mcov)
}
pred_names = names(pred)
##prepare for the predictor(s)
pred1 <- data.frame(pred) #original format
pred1.0 <- NULL #original format with binarized categorical predictor
pred1.0_names <-NULL
pred2 <- NULL #all transformed
pred2_names = NULL
pred3 <- NULL #transformed continuous predictors with pred+delta(pred)
pred3_names = NULL
pred.cont.der <- NULL #derivative of the transformation function for cont predictors
if (is.null(pred_names))
pred_names = paste("pred",1:ncol(pred1),sep='')
colnames(pred1) = pred_names
binpred = NULL #binary predictor in pred2
catpred = NULL #categorical predictor in pred2, each row is for one categorical predictor
contpred = NULL #continuos predictor in pred2, each row is for a continuous predictor
binpred1 = NULL #binary predictor in pred1
catpred1 = NULL #categorical predictor in pred1
contpred1 = NULL #continuous predictor in pred1
binpred1.0 = NULL #binary predictor in pred1.0
catpred1.0 = NULL #categorical predictor in pred1.0
contpred1.0 = NULL #continuous predictor in pred1.0
contpred3 = NULL #index for pred3 and pred.cont.dev
npred = ncol(pred1)
n1=nrow(pred1)
for (i in 1:npred)
if (nlevels(droplevels(as.factor(pred1[,i]))) == 2) { #binary predictor
if (!is.na(predref[i]))
{pred2 <- cbind(pred2,ifelse(pred1[, i] == predref[i],0, 1))
pred1.0 <- cbind(pred1.0, ifelse(pred1[, i] == predref[i],0, 1))
#pred3 <- cbind(pred3,as.factor(ifelse(pred1[, i] == predref[i],0, 1)))
pred1[,i] <- ifelse(pred1[, i] == predref[i],0, 1)}
else {
temp.pred <- as.factor(pred1[, i])
pred2 <- cbind(pred2,ifelse(temp.pred == levels(droplevels(temp.pred))[1], 0, 1))
pred1.0 <- cbind(pred1.0,ifelse(temp.pred == levels(droplevels(temp.pred))[1], 0, 1))
#pred3 <- cbind(pred3,as.factor(ifelse(temp.pred == levels(droplevels(temp.pred))[1], 0, 1)))
pred1[,i] <- ifelse(temp.pred == levels(droplevels(temp.pred))[1], 0, 1)
}
binpred1 = c(binpred1, i)
pred2_names=c(pred2_names,pred_names[i])
pred1.0_names=c(pred1.0_names,pred_names[i])
binpred = c(binpred,ncol(pred2))
binpred1.0 = c(binpred1.0,ncol(pred1.0))
}
else if (is.character(pred1[, i]) | is.factor(pred1[, i])) { #category predictor
pred1[, i] = droplevels(pred1[, i])
if(!is.null(pred2))
colnames(pred2)=pred2_names
if(!is.null(pred1.0))
colnames(pred1.0)=pred1.0_names
# catn = catn + 1
if (!is.na(predref[i]))
{pred.temp1 <- bin_cat(pred1, pred2, i, predref[i])
pred.temp2 <- bin_cat(pred1, pred1.0, i, predref[i])}
else
{pred.temp1 <- bin_cat(pred1, pred2, i, levels(as.factor(pred1[,i]))[1])
pred.temp2 <- bin_cat(pred1, pred1.0, i, levels(as.factor(pred1[,i]))[1])}
pred2 = pred.temp1$M1
pred1.0 = pred.temp2$M1
pred1 = pred.temp1$M2
#pred3 = cbind(pred3,pred.temp1$M1[,pred.temp1$cat[1]:pred.temp1$cat[2]])
catpred1 = c(catpred1,i)
catpred = rbind(catpred,pred.temp1$cat)
catpred1.0 = rbind(catpred1.0,pred.temp2$cat)
pred2_names = colnames(pred.temp1$M1)
pred1.0_names = colnames(pred.temp2$M1)
}
else #consider the transformation of continuous x
{contpred1 = c(contpred1, i)
pred1.0=cbind(pred1.0,pred1[,i])
pred1.0_names<-c(pred1.0_names,pred_names[i])
contpred1.0= c(contpred1.0, ncol(pred1.0))
if(!(i %in% fpy[[1]])) #fpy has the transformation functions for pred to y, [[1]] list all cont pred to be transformed
{pred2<-cbind(pred2,pred1[,i])
pred.cont.der<-cbind(pred.cont.der,rep(1,n1))
contpred3 = rbind(contpred3,c(ncol(pred.cont.der),ncol(pred.cont.der)))
pred3 = cbind(pred3,pred1[,i]+deltap[i]) #deltap is the changing amount for predictors
contpred = rbind(contpred,c(ncol(pred2),ncol(pred2)))
pred2_names=c(pred2_names,pred_names[i])
pred3_names=c(pred3_names,pred_names[i])
}
else if (i %in% fpy[[1]])
{p=match(i,fpy[[1]])
temp.pred=x2fx(pred1[,i],fpy[[1+p]])$values
pred2<-cbind(pred2,temp.pred)
pred.cont.der<-cbind(pred.cont.der,x2fdx(pred1[,i],fpy[[1+p]]))
if(is.null(pred3))
contpred3 = rbind(contpred3,c(1,ncol(pred.cont.der)))
else
contpred3 = rbind(contpred3,c(ncol(pred3)+1,ncol(pred.cont.der)))
pred3<-cbind(pred3,x2fx(pred1[,i]+deltap[i],fpy[[1+p]])$values)
l=length(fpy[[1+p]])
contpred=rbind(contpred,c(ncol(pred2)-ncol(temp.pred)+1,ncol(pred2)))
pred2_names<-c(pred2_names,paste(pred_names[i],1:l,sep="."))
pred3_names=c(pred3_names,paste(pred_names[i],1:l,sep="."))
}
}
colnames(pred1.0)=pred1.0_names
colnames(pred2)=pred2_names
if(!is.null(pred3))
{colnames(pred3)=pred3_names
colnames(pred.cont.der)=pred3_names}
## prepare mediators for y
m1 <- data.frame(m) #original format
m2 <- NULL #all transformed
m2_names = NULL
m3 <- NULL #transformed continuous mediators with mediator+delta(med)
m3_names = NULL
m.cont.der <- NULL #derivative of the transformation function for cont mediators
if (is.null(mnames))
mnames = paste("m",1:ncol(m1),sep='')
colnames(m1) = mnames
binm = NULL
catm = NULL
contm = NULL
binm1 = NULL
catm1 = NULL
contm1 = NULL
contm3 = NULL #index for m3 and m.cont.dev
nm = ncol(m1)
n2=nrow(m1)
for (i in 1:nm)
if (nlevels(droplevels(as.factor(m1[,i]))) == 2) { #binary mediator
if (!is.na(mref[i]))
{m2 <- cbind(m2,ifelse(m1[, i] == mref[i],0, 1))
#m3 <- cbind(m3,as.factor(ifelse(m1[, i] == mref[i],0, 1)))
m1[,i] <- ifelse(m1[, i] == mref[i],0, 1)}
else {
temp.m <- as.factor(m1[, i])
m2 <- cbind(m2,ifelse(temp.m == levels(droplevels(temp.m))[1], 0, 1))
#m3 <- cbind(m3,as.factor(ifelse(temp.m == levels(droplevels(temp.m))[1], 0, 1)))
m1[,i] <- ifelse(temp.m == levels(droplevels(temp.m))[1], 0, 1)
}
binm1 = c(binm1, i)
m2_names=c(m2_names,mnames[i])
colnames(m2)=m2_names
binm = c(binm,ncol(m2))
}
else if (is.character(m1[, i]) | is.factor(m1[, i])) { #category mediator
m1[, i] = droplevels(as.factor(m1[, i]))
if (!is.na(mref[i]))
m.temp1 <- bin_cat(m1, m2, i, mref[i])
else
m.temp1 <- bin_cat(m1, m2, i, levels(as.factor(m1[,i]))[1])
m2 = m.temp1$M1
m1 = m.temp1$M2
#m3 = cbind(m3,m.temp1$M1[,m.temp1$cat[1]:m.temp1$cat[2]])
catm1 = c(catm1,i)
catm = rbind(catm,m.temp1$cat)
m2_names = c(m2_names, colnames(m.temp1$M1)[m.temp1$cat[1]:m.temp1$cat[2]])
colnames(m2)=m2_names
}
else #consider the transformation of continuous m
{contm1 = c(contm1, i)
if(!(i %in% fmy[[1]])) #fmy has the transformation functions for m to y, [[1]] list all cont mediators in m to be transformed
{m2<-cbind(m2,m1[,i])
m.cont.der<-cbind(m.cont.der,rep(1,n1))
contm3 = rbind(contm3,c(ncol(m.cont.der),ncol(m.cont.der)))
m3 = cbind(m3,m1[,i]+deltam[i]) #deltam is the changing amount for mediators
m2_names=c(m2_names,mnames[i])
colnames(m2)=m2_names
m3_names=c(m3_names,mnames[i])
contm=rbind(contm,c(ncol(m2),ncol(m2)))
}
else if (i %in% fmy[[1]])
{p=match(i,fmy[[1]])
temp.m=x2fx(m1[,i],fmy[[1+p]])$values
m2<-cbind(m2,temp.m)
m.cont.der<-cbind(m.cont.der,x2fdx(as.matrix(m)[,i],fmy[[1+p]]))
if(is.null(m3))
contm3 = rbind(contm3,c(1,ncol(m.cont.der)))
else
contm3 = rbind(contm3,c(ncol(m3)+1,ncol(m.cont.der)))
m3<-cbind(m3,x2fx(m1[,i]+deltap[i],fmy[[1+p]])$values)
contm = rbind(contm,c(ncol(m2)-ncol(temp.m)+1,ncol(m2)))
l=length(fmy[[1+p]])
m2_names<-c(m2_names,paste(mnames[i],1:l,sep="."))
colnames(m2)=m2_names
m3_names=c(m3_names,paste(mnames[i],1:l,sep="."))
}
}
colnames(m2)=m2_names
if(!is.null(m3))
{colnames(m3)=m3_names
colnames(m.cont.der)=m3_names}
## prepare predictors for mediators
pm=rep(0,n1) #the first row are all 0s
fpm.2=fpm #the transformed predictors in pm
binp=NULL
catp=NULL
contp=NULL
j=1
names.pm=("zero")
if(!is.null(binpred))
for (i in binpred)
{pm=cbind(pm,pred2[,i])
j=j+1
binp=c(binp,j)
names.pm=c(names.pm,pred_names[i])}
if(!is.null(catpred))
for (i in 1:nrow(catpred))
{pm=cbind(pm,pred2[,catpred[i,1]:catpred[i,2]])
catp=rbind(catp,c(j+1,j+1+catpred[i,2]-catpred[i,1]))
j=j+1+catpred[i,2]-catpred[i,1]
names.pm=c(names.pm,colnames(pred2)[catpred[i,1]:catpred[i,2]])
}
if(!is.null(contpred1))
for (i in contpred1)
{pm=cbind(pm,pred1[,i])
j=j+1
contp=rbind(contp,rep(j,3))
names.pm=c(names.pm,pred_names[i])}
pm.der=matrix(1,n1,ncol(pm))
pm.idx=as.list(rep(1,ncol(m1))) #the predictors in pm to predict the ith mediator
for (i in 1:ncol(m1))
pm.idx[[i]]=2:ncol(pm)
if(!is.null(fpm))
{k=unique(fpm[[1]][,2]) #the first column is for the mediators,
#the second column the continuous predictors to be transformed
for (l in k){
temp<-(2:length(fpm))[fpm[[1]][,2]==l]
allfun=fpm[[temp[1]]]
if (length(temp)>1)
for(i in 2:length(temp))
allfun<-c(allfun,fpm[[temp[i]]])
unifun<-unique(allfun)
unifun1<-unifun[unifun!="x"]
unifun2<-c("x",unifun1)
d_d<-x2fx(pred1[,l],unifun1)
d.der<-x2fdx(pred1[,l],unifun1)
d<-as.matrix(d_d$values)
names.pm<-c(names.pm,paste(pred_names[l],1:ncol(d),sep="."))
pm.der<-cbind(pm.der,d.der)
place=match(l,contpred1)
contp[place,2]=j+1
contp[place,3]=j+ncol(d)
pm<-cbind(pm,d)
for(i in temp)
{ttemp<-order_char(unifun1,fpm[[i]])
fpm.2[[i]]=j+ttemp #what does this do?
pm.indx[[fpm[[1]][i-1,1]]]=c(pm.indx[[fpm[[1]][i-1,1]]],j+ttemp)
if(length(order_char('x',fpm[[i]]))==0)
pm.indx[[fpm[[1]][i-1,1]]]= (pm.indx[[fpm[[1]][i-1,1]]])[pm.indx[[fpm[[1]][i-1,1]]]!=l]
}
j=j+ncol(d)
}}
colnames(pm)=names.pm
colnames(pm.der)=names.pm
pm.ind=matrix(1,length(pm.idx),max(sapply(pm.idx,length)))
for (i in 1:nrow(pm.ind))
pm.ind[i,1:length(pm.idx[[i]])]=pm.idx[[i]]
p2=ifelse(is.null(binm1),0,length(binm1))
p3=ifelse(is.null(catm1),0,length(catm1))
p1=ifelse(is.null(contm1),0,length(contm1))
#prepare for covariates of mediators
if(is.null(mcov))
{mcov=data.frame(intercept=rep(1,nrow(m1)))
mind=matrix(T,p1+p2+p3,1)}
else
{mcov=data.frame(intercept=rep(1,nrow(m)),mcov)
mind=matrix(T,p1+p2+p3,ncol(mcov))
mcov_names=colnames(mcov)
if (!is.null(mclist))
{if (is.character(mclist[[1]]))
mclist[[1]]=match(mclist[[1]],mnames)
mcov=data.frame(mcov,no=rep(0,nrow(mcov))) #add a column of 0 in mcov
mind=matrix(rep(1:(ncol(mcov)-1),each=p1+p2+p3),p1+p2+p3,ncol(mcov)-1)
for (i in 1:length(mclist[[1]]))
{if(sum(is.na(mclist[[i+1]]))>=1)
temp=1
else if(is.character(mclist[[i+1]]))
temp=c(1,match(mclist[[i+1]],mcov_names))
else
temp=c(1,mclist[[i+1]]+1)
mind[mclist[[1]][i],(1:(ncol(mcov)-1))[-temp]]=ncol(mcov)
}}
} #use all covariates for all tv if mclist is NULL. Otherwise, the first item of mclist lists all tvs
#that are using different mcov, the following items gives the mcov for the tv in order. use NA is no mcov to be used
results = list(N=nrow(data.frame(y)), y_type=y_type, y=y, pred1=pred1, pred1.0=pred1.0,
pred2=pred2, pred3=pred3, cova=cova,
pred.cont.der=pred.cont.der, binpred2=binpred, catpred2=catpred,
contpred2=contpred, binpred1=binpred1, catpred1=catpred1,contpred1=contpred1,
contpred1.0=contpred1.0, binpred1.0=binpred1.0, catpred1.0=catpred1.0,
contpred3=contpred3, npred=npred,
m1=m1, m2=m2, m3=m3, m.cont.der=m.cont.der, binm2=binm, catm2=catm,
contm2=contm,binm1=binm1, catm1=catm1, contm1=contm1, contm3=contm3,
nm=nm, pm=pm, pm.der=pm.der, pm.idx=pm.idx, pm.ind=pm.ind, fpm.2=fpm.2,
binp=binp, catp=catp, contp=contp,p1=p1,p2=p2,p3=p3,mcov=mcov,mind=mind)
return(results)
}
bma.bx.cy<-function(pred, m, y, refy = rep(NA, ncol(data.frame(y))),
predref = rep(NA, ncol(data.frame(pred))), fpy = NULL,
deltap = rep(0.001,ncol(data.frame(pred))), fmy = NULL,
deltam = rep(0.001,ncol(data.frame(m))), fpm = NULL,
mref = rep(NA, ncol(data.frame(m))),cova = NULL,
mcov = NULL, mclist=NULL, inits=NULL,
n.chains = 1, n.iter = 1100,n.burnin=100,n.thin = 1,
mu=NULL, Omega=NULL, Omegac=NULL,muc=NULL,mucv=NULL, Omegacv=NULL,
mu0.1=NULL, Omega0.1=NULL, mu1.1=NULL, Omega1.1=NULL,
mu0.a=NULL, Omega0.a=NULL, mu1.a=NULL, Omega1.a=NULL,
mu0.b=NULL, Omega0.b=NULL, mu1.b=NULL, Omega1.b=NULL,
mu0.c=NULL, Omega0.c=NULL, mu1.c=NULL, Omega1.c=NULL,
preci=0.000001,tmax=Inf,multi=NULL,filename=NULL)
{ ### build the bugs model if it is not defined
jags_model <- function (contm=c('ncontm', 'ycontm'),binm=c('nbinm','ybinm'),
catm=c('ncatm','ycatm'),
cova=c('ncova','ycova'),
ytype=c('contc','binc','catc','survc','contnnc',
'binnc','catnc','survnc')) {
contm <- match.arg(contm)
binm <- match.arg(binm)
catm <- match.arg(catm)
cova <- match.arg(cova)
# raw script
script <-
"model {temp <- 1:nmc
for(i in 1:N){
$yfunc
$contm
$binm
$catm
}
$ypriors
$cntmprior
$bmprior
$cmprior
var4 ~ dgamma(1,0.1)
prec4 <-1/var4
}"
# define macros
macros <- list(list("$yfunc",
switch(ytype,
contc='mu_y[i] <- beta0 + inprod(c, x[i,]) + inprod(beta,M1[i,]) + inprod(eta,cova[i,])
y[i] ~ dnorm(mu_y[i],prec4)',
contnc='mu_y[i] <- beta0 + inprod(c, x[i,]) + inprod(beta,M1[i,])
y[i] ~ dnorm(mu_y[i],prec4)',
binc='logit(mu_y[i]) <- beta0 + inprod(c, x[i,]) + inprod(beta,M1[i,]) + inprod(eta,cova[i,])
y[i] ~ dbern(mu_y[i])',
binnc='logit(mu_y[i]) <- beta0 + inprod(c, x[i,]) + inprod(beta,M1[i,])
y[i] ~ dbern(mu_y[i])',
catc='mu_y1[i,1] <- 1
for (k in 2:caty)
{mu_y1[i,k] <- exp(beta0[k-1] + inprod(c[k-1,], x[i,]) + inprod(beta[k-1,],M1[i,]) + inprod(eta[k-1,],cova[i,]))}
sum_y[i] <- sum(mu_y1[i,1:caty])
for (l in 1:caty)
{mu_y[i,l] <- mu_y1[i,l]/sum_y[i]}
y[i] ~ dcat(mu_y[i,])',
catnc='mu_y1[i,1] <- 1
for (k in 2:caty)
{mu_y1[i,k] <- exp(beta0[k-1] + inprod(c[k-1,], x[i,]) + inprod(beta[k-1,],M1[i,]))}
sum_y[i] <- sum(mu_y1[i,1:caty])
for (l in 1:caty)
{mu_y[i,l] <- mu_y1[i,l]/sum_y[i]}
y[i]~dcat(mu_y[i,])',
survc= 'elinpred[i] <- exp(inprod(c, x[i,]) + inprod(beta,M1[i,]) + inprod(eta,cova[i,]))
base[i] <- lambda*r*pow(y[i,1], r-1)
loghaz[i] <- log(base[i]*elinpred[i])
phi[i] <- 100000-y[i,2]*loghaz[i]-log(exp(-lambda*pow(y[i,1],r)*elinpred[i])-exp(-lambda*pow(tmax,r)*elinpred[i])) +log(1-exp(-lambda*pow(tmax,r)*elinpred[i]))
zero[i] ~ dpois(phi[i])' ,
survnc= 'elinpred[i] <- exp(inprod(c, x[i,]) + inprod(beta,M1[i,]))
base[i] <- lambda*r*pow(y[i,1], r-1)
loghaz[i] <- log(base[i]*elinpred[i])
phi[i] <- 100000-y[i,2]*loghaz[i]-log(exp(-lambda*pow(y[i,1],r)*elinpred[i])-exp(-lambda*pow(tmax,r)*elinpred[i])) +log(1-exp(-lambda*pow(tmax,r)*elinpred[i]))
zero[i] ~ dpois(phi[i])')),
list("$contm",
switch(contm,
ycontm='for (j in 1:p1){
mu_M1[i,contm[j]] <- inprod(alpha0.a[j,mind[contm[j],]],mcov[i,temp[mind[contm[j],]]])+inprod(alpha1.a[j,1:c1],x1[i,])
M2[i,contm[j]] ~ dnorm(mu_M1[i,contm[j]],prec1[j])
for (k in contm1[j,1]:contm1[j,2]){
mu_M1_c[i,k] <- inprod(alpha0[k,mind[contm[j],]],mcov[i,mind[contm[j],]])+inprod(alpha1[k,1:c1],x1[i,])
M1[i,k] ~ dnorm(mu_M1_c[i,k],prec2[k])
}
}
',
ncontm='')),
list("$cntmprior",
switch(contm,
ycontm=' for(j in 1:P)
{alpha1[j,1:c1] ~ dmnorm(mu1.1[j,1:c1], Omega1.1[1:c1, 1:c1])
alpha0[j,1:nmc] ~ dmnorm(mu0.1[j,1:nmc], Omega0.1[1:nmc, 1:nmc])}
for (i in 1:P){
var2[i] ~ dgamma(1,0.1)
prec2[i] <- 1/var2[i]
}
for(j in 1:p1)
{alpha1.a[j,1:c1] ~ dmnorm(mu1.a[j,1:c1], Omega1.a[1:c1, 1:c1])
alpha0.a[j,1:nmc] ~ dmnorm(mu0.a[j,1:nmc], Omega0.a[1:nmc, 1:nmc])}
for (i in 1:p1){
var1[i] ~ dgamma(1,0.1)
prec1[i] <- 1/var1[i]
}',
ncontm='')),
list("$binm",
switch(binm,
ybinm=" for (k in 1:p2){
logit(mu_M1[i,binm[k]]) <- inprod(alpha0.b[k,mind[binm[k],]],mcov[i,mind[binm[k],]])+inprod(alpha1.b[k,1:c1],x1[i,])
M2[i,binm[k]] ~ dbern(mu_M1[i,binm[k]])
}",
nbinm='')),
list("$bmprior",
switch(binm,
nbinm='',
ybinm=' for(j in 1:p2)
{alpha1.b[j,1:c1] ~ dmnorm(mu1.b[j,1:c1], Omega1.b[1:c1, 1:c1])
alpha0.b[j,1:nmc] ~ dmnorm(mu0.b[j,1:nmc], Omega0.b[1:nmc, 1:nmc])
}')),
list("$catm",
switch(catm,
ycatm=" for (j in 1:p3){
mu_Mc[i,j,1] <- 1 #baseline is the 1st category
for (k in 2:cat2[j]){
mu_Mc[i,j,k] <- exp(inprod(alpha0.c[j,k-1,mind[catm[j],]],mcov[i,mind[catm[j],]])+inprod(alpha1.c[j,k-1,1:c1],x1[i,]))
}
sum_Mc[i,j] <- sum(mu_Mc[i,j,1:cat2[j]])
for (l in 1:cat2[j])
{mu_Mc0[i,j,l] <- mu_Mc[i,j,l]/sum_Mc[i,j]}
M2[i,catm[j]] ~ dcat(mu_Mc0[i,j,1:cat2[j]])
}",
ncatm='')),
list("$cmprior",
switch(catm,
ncatm='',
ycatm=' for (i in 1:p3){
for(j in 1:cat1)
{alpha1.c[i,j,1:c1] ~ dmnorm(mu1.c[j,1:c1], Omega1.c[1:c1, 1:c1])
alpha0.c[i,j,1:nmc] ~ dmnorm(mu0.c[j,1:nmc], Omega0.c[1:nmc, 1:nmc])}
}')),
list("$ypriors",
switch(ytype,
contc=' beta[1:P] ~ dmnorm(mu[1:P], Omega[1:P, 1:P])
beta0 ~ dnorm(0, 1.0E-6)
c[1:c2] ~ dmnorm(muc[1:c2], Omegac[1:c2,1:c2])
eta[1:cv1] ~ dmnorm(mucv[1:cv1], Omegacv[1:cv1,1:cv1])',
contnc=' beta[1:P] ~ dmnorm(mu[1:P], Omega[1:P, 1:P])
beta0 ~ dnorm(0, 1.0E-6)
c[1:c2] ~ dmnorm(muc[1:c2], Omegac[1:c2,1:c2])
',
binc=' beta[1:P] ~ dmnorm(mu[1:P], Omega[1:P, 1:P])
beta0 ~ dnorm(0, 1.0E-6)
c[1:c2] ~ dmnorm(muc[1:c2], Omegac[1:c2,1:c2])
eta[1:cv1] ~ dmnorm(mucv[1:cv1], Omegacv[1:cv1,1:cv1])',
binnc=' beta[1:P] ~ dmnorm(mu[1:P], Omega[1:P, 1:P])
beta0 ~ dnorm(0, 1.0E-6)
c[1:c2] ~ dmnorm(muc[1:c2], Omegac[1:c2,1:c2])
',
catc=' for(j in 1:(caty-1))
{beta[j,1:P] ~ dmnorm(mu[1:P], Omega[1:P, 1:P])
beta0[j] ~ dnorm(0, 1.0E-6)
c[j,1:c2] ~ dmnorm(muc[1:c2], Omegac[1:c2,1:c2])
eta[j,1:cv1] ~ dmnorm(mucv[1:cv1], Omegacv[1:cv1,1:cv1])}',
catnc=' for(j in 1:(caty-1))
{beta[j,1:P] ~ dmnorm(mu[1:P], Omega[1:P, 1:P])
beta0[j] ~ dnorm(0, 1.0E-6)
c[j,1:c2] ~ dmnorm(muc[1:c2], Omegac[1:c2,1:c2])}',
survc= ' beta[1:P] ~ dmnorm(mu[1:P], Omega[1:P, 1:P])
beta0 ~ dnorm(0, 1.0E-6)
c[1:c2] ~ dmnorm(muc[1:c2], Omegac[1:c2,1:c2])
r~duif(0,10) # dunif(0.5,1.5)
lambda~dgamma(1,0.01)
eta[1:cv1] ~ dmnorm(mucv[1:cv1], Omegacv[1:cv1,1:cv1])',
survnc= ' beta[1:P] ~ dmnorm(mu[1:P], Omega[1:P, 1:P])
beta0 ~ dnorm(0, 1.0E-6)
c[1:c2] ~ dmnorm(muc[1:c2], Omegac[1:c2,1:c2])
r ~ dunif(0,10) # dunif(1,1.2)
lambda ~ dgamma(1,0.01)')) # dunif(1.0E-8,3.0E-7)
)
# apply macros
for (m in seq(macros)) {
script <- gsub(macros[[m]][1], macros[[m]][2], script, fixed=TRUE)
}
script
}
### Incomplete Gamma function
Igamma<-function(z,u)pgamma(u,z)*gamma(z)
### Truncated Weibull distribution moments
weib.trunc.mean<-function(vec, right)vec[-1]*Igamma(1/vec[1]+1,(right/vec[-1])^vec[1])/(1-exp(-(right/vec[-1])^vec[1]))
matrix.prod<-function(m1)
{return(m1[1,]%*%t(m1[-1,]))}
expp<-function(v1,v2)
{v1^v2}
data0<- data_org(pred=pred, m=m, y=y, refy=refy, predref=predref, fpy=fpy,
deltap=deltap, fmy=fmy, deltam=deltam, fpm=fpm, mref=mref,
cova=cova,mcov = mcov, mclist=mclist)
y.type=data0$y_type #1 for continuous outcome, 4 for time-to-event, 2 for binary, 3 for categorical
N=data0$N
x=data0$pred2
c2=ncol(x)
x1=data0$pred1.0
c1=ncol(x1) #c1 is the number of predictors, k-class categorical variable are considered as k-1 predictors
y=data0$y
M1=data0$m2
M2=data0$m1
M3=data0$m3
contm=data0$contm1
contm1=data0$contm2
contm3=data0$contm3
p1=data0$p1
binm=data0$binm1
binm1=data0$binm2
p2=data0$p2
p3=data0$p3
if(p3>0){
cat1=max(data0$catm2[,2]-data0$catm2[,1]+1)
cat2=data0$catm2[,2]-data0$catm2[,1]+2
catm=data0$catm1
catm1=data0$catm2}
else{
cat1=NULL
cat2=NULL
catm=NULL
catm1=NULL
}
P=ncol(data0$m2)
cova=data0$cova
mcov=data0$mcov
mind=data0$mind
nmc=ncol(mcov)
# pm=data0$pm
# pm.ind=data1$pm.ind
if(is.null(mu))
mu=rep(0,P)
if(is.null(Omega))
Omega=diag(preci,P)
if(p1>0){
if(is.null(mu0.1))
mu0.1=matrix(0,P,nmc)
if(is.null(Omega0.1))
Omega0.1=diag(preci,nmc)
if(is.null(mu1.1))
mu1.1=matrix(0,P,c1)
if(is.null(Omega1.1))
Omega1.1=diag(preci,c1)
if(is.null(mu0.a))
mu0.a=matrix(0,p1,nmc)
if(is.null(Omega0.a))
Omega0.a=diag(preci,nmc)
if(is.null(mu1.a))
mu1.a=matrix(0,p1,c1)
if(is.null(Omega1.a))
Omega1.a=diag(preci,c1)
}
if(p2>0){
if(is.null(mu0.b))
mu0.b=matrix(0,p2,nmc)
if(is.null(Omega0.b))
Omega0.b=diag(preci,nmc)
if(is.null(mu1.b))
mu1.b=matrix(0,p2,c1)
if(is.null(Omega1.b))
Omega1.b=diag(preci,c1)
}
if(p3>0){
if(is.null(mu0.c))
mu0.c=matrix(0,cat1,nmc)
if(is.null(Omega0.c))
Omega0.c=diag(preci,nmc)
if(is.null(mu1.c))
mu1.c=matrix(0,cat1,c1)
if(is.null(Omega1.c))
Omega1.c=diag(preci,c1)
}
if(is.null(muc))
muc=rep(0,c2)
if(is.null(Omegac))
Omegac=diag(preci,c2)
# data0.1<- list (N=N,x=x,y=y,M1=M1,M2=M2,contm=contm,contm1=contm1,p1=p1,
# binm=binm,p2=p2,cat1=cat1,cat2=cat2,catm=catm,p3=p3,P=P,
# mu=mu,Omega=Omega,mu0.1=mu0.1,mu1.1=mu1.1,Omega0.1=Omega0.1,Omega1.1=Omega1.1,
# mu0.a=mu0.a,mu1.a=mu1.a,Omega0.a=Omega0.a,Omega1.a=Omega1.a,
# mu0.b=mu0.b,mu1.b=mu1.b,Omega0.b=Omega0.b,Omega1.b=Omega1.b,
# mu0.c=mu0.c,mu1.c=mu1.c,Omega0.c=Omega0.c,Omega1.c=Omega1.c)
if(y.type==3)
{caty=nlevels(y)}
if(y.type==4)
{zero=rep(0,nrow(y))#is.cen=ifelse(y[,2]==1,0,1) #censored or not
if(is.null(tmax))
tmax=max(y[,1], na.rm=T)+100
if(is.null(multi))
multi=TRUE} #maximum time to event
#Cen=ifelse(y[,2]==1,tmax,y[,1]) #censoring time
#y=ifelse(y[,2]==1,y[,1],NA)}
data0.1<- list (N=N,x=x,x1=x1,y=y,M1=M1,M2=M2,Omega=Omega,mu=mu,P=P,c1=c1,c2=c2,
muc=muc,Omegac=Omegac,nmc=nmc,mcov=mcov,mind=mind)
para=c("beta0","c","beta")
if(y.type==3)
{data0.1[['caty']]=caty}
if(y.type==4)
{data0.1[['zero']]=zero
#data0.1[['is.cen']]=is.cen
data0.1[['tmax']]=tmax
para=c(para,"lambda","r")}
# if(c1>1)
# {data0.1[['muc']]=rep(0,c1)
# data0.1[['Omegac']]=diag(0.000001,c1)
# } #data0.1[['c1']]=c1
if(p1>0)
{data0.1[['contm']]=contm
data0.1[['contm1']]=contm1
data0.1[['p1']]=p1
data0.1[['mu0.1']]=mu0.1
data0.1[['mu1.1']]=mu1.1
data0.1[['Omega0.1']]=Omega0.1
data0.1[['Omega1.1']]=Omega1.1
data0.1[['mu0.a']]=mu0.a
data0.1[['mu1.a']]=mu1.a
data0.1[['Omega0.a']]=Omega0.a
data0.1[['Omega1.a']]=Omega1.a
para=c(para,"alpha0","alpha1","alpha0.a","alpha1.a")
}
if(p2>0)
{data0.1[['binm']]=binm
data0.1[['p2']]=p2
data0.1[['mu0.b']]=mu0.b
data0.1[['mu1.b']]=mu1.b
data0.1[['Omega0.b']]=Omega0.b
data0.1[['Omega1.b']]=Omega1.b
para=c(para,"alpha0.b","alpha1.b")}
if(p3>0)
{data0.1[['cat1']]=cat1
data0.1[['cat2']]=cat2
data0.1[['catm']]=catm
data0.1[['p3']]=p3
data0.1[['mu0.c']]=mu0.c
data0.1[['mu1.c']]=mu1.c
data0.1[['Omega0.c']]=Omega0.c
data0.1[['Omega1.c']]=Omega1.c
para=c(para,"alpha0.c","alpha1.c")}
if(!is.null(cova)){
cv1=ncol(cova)
if(is.null(mucv))
mucv=rep(0,cv1)
if(is.null(Omegacv))
Omegacv=diag(preci,cv1)
data0.1[['cv1']]=cv1
data0.1[['mucv']]=mucv
data0.1[['Omegacv']]=Omegacv
data0.1[['cova']]=cova
para=c(para,"eta")
}
if(is.null(inits))
inits<- function(){list()}
if (y.type==1 & is.null(cova))
ytype="contnc"
else if(y.type==1 & !is.null(cova))
ytype="contc"
else if(y.type==2 & is.null(cova))
ytype="binnc"
else if(y.type==2 & !is.null(cova))
ytype="binc"
else if(y.type==3 & is.null(cova))
ytype="catnc"
else if(y.type==3 & !is.null(cova))
ytype="catc"
else if(y.type==4 & is.null(cova))
ytype="survnc"
else if(y.type==4 & !is.null(cova))
ytype="survc"
if (is.null(filename))
{filename=paste(tempdir(),"model.txt",sep="/")
writeLines(jags_model(contm=ifelse(p1==0,'ncontm', 'ycontm'),
binm=ifelse(p2==0, 'nbinm','ybinm'),
catm=ifelse(p3==0, 'ncatm','ycatm'),
cova=ifelse(is.null(cova),'ncova','ycova'),
ytype=ytype), filename)
}
med0<- jags(data0.1, inits,
model.file = filename, #"O:/My Documents/My Research/Research/Bayesian Mediation Analysis/codes/promis/bx_cy.txt",
parameters.to.save = para,
n.chains = n.chains, n.iter = n.iter, n.burnin=n.burnin, n.thin = n.thin)
#check the results
#calculate the mediation effects
N1=(n.iter-n.burnin)/n.thin #10000
#m.mcov=apply(mcov,2,mean,na.rm=T) #effects are calculated at the mean of mcov
#med0$BUGSoutput$sims.list$r=1/med0$BUGSoutput$sims.list$r
#attach(med0$BUGSoutput$sims.list)
if(y.type==3){
aie1=array(0,c(N1,p1+p2+p3,c1,caty-1))
ie1=array(0,dim=c(N1,N,p1+p2+p3,c1,caty-1))
tt2=1
de1=array(0,c(N1,c1,caty-1))
n.cont=1
aie2=array(0,c(N1,p1+p2+p3,c1,caty-1))
ie2=array(0,dim=c(N1,N,p1+p2+p3,c1,caty-1))
de2=array(0,c(N1,c1,caty-1))
te3=array(0,c(N1,N,caty-1))
ate3=array(0,c(N1,c1,caty-1))
omu3=ate3
ade3=ate3
aie3=array(0,c(N1,p1+p2+p3,c1,caty-1))
ie3=array(0,dim=c(N1,N,p1+p2+p3,c1,caty-1))
de3=array(0,c(N1,N,caty-1))
mu_M2=array(0,c(dim(M1),N1))
mu_M3=mu_M2
te4=array(0,c(N1,N,caty-1))
de4=te4
ade4=array(0,c(N1,c1,caty-1))
ie4=array(0,dim=c(N1,N,p1+p2+p3,c1,caty-1))
mu.M0=array(0,c(dim(M1),N1))
mu.M1=mu.M0
ate4=ade4
omu4=ate4
aie4=array(0,c(N1,p1+p2+p3,c1,caty-1))
for (l in 1:c1){
x1.temp=x1
x3.temp=x1
if(l%in%data0$contpred1.0){
x3.temp[,l]=x1[,l]+deltap[l]
}
else if(l%in%data0$binpred1.0)
{x1.temp[,l]=0
x3.temp[,l]=1
deltap[l]=1}
else{ #categorical predictor
for (i in 1:nrow(data0$catpred1.0))
if(l%in%(data0$catpred1.0[i,1]:data0$catpred1.0[i,2]))
{x1.temp[,data0$catpred1.0[i,1]:data0$catpred1.0[i,2]]=0
x3.temp[,data0$catpred1.0[i,1]:data0$catpred1.0[i,2]]=0
x3.temp[,l]=1}
deltap[l]=1
}
#method 1: the same for binary or continuous predictors
if(p1>0){
for(k in 1:(caty-1))
if (p1+p2+p3==1 & c1==1 & contm1[1,1]==contm1[1,2])
aie1[,contm[1],l,k]=med0$BUGSoutput$sims.list$alpha1[,1]*med0$BUGSoutput$sims.list$beta[,k,contm1[1,1]]#*mean(data0$m.cont.der[,contm3[1,1]]) since alpha1 is the coefficient of x on the change of f(M), beta is the coefficient of f(M) on the change of y
else
for (j in 1: p1)
aie1[,contm[j],l,k]=apply(as.matrix(med0$BUGSoutput$sims.list$alpha1[,contm1[j,1]:contm1[j,2],l])*as.matrix(med0$BUGSoutput$sims.list$beta[,k,contm1[j,1]:contm1[j,2]]),1,sum)#*mean(data0$m.cont.der[,contm3[j,1]])
}
if(p2>0){
if(p2==1 & c1==1) #since c1=1, the predictor can only be binary or continuous
{if (nmc==1)
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)}
else
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)}
for(j in 1:(caty-1)){
if(!1%in%data0$contpred1.0) #if the predictor is binary
ie1[,,binm[1],1,j]=med0$BUGSoutput$sims.list$beta[,j,binm1[1]]*(exp(temp.x3)/(1+exp(temp.x3))-exp(temp.x1)/(1+exp(temp.x1)))
else #if the predictor is continuous
ie1[,,binm[1],1,j]=med0$BUGSoutput$sims.list$alpha1.b[,1]*med0$BUGSoutput$sims.list$beta[,j,binm1[1]]*exp(temp.x1)/(1+exp(temp.x1))^2
aie1[,binm[1],l,j] <-apply(ie1[,,binm[1],1,j],1,mean)}
}
else
for (k in 1:p2){
if (nmc==1 & p2==1)
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x3.temp)
}
else
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x3.temp)
}
for(j in 1:(caty-1)){
if(!l%in%data0$contpred1.0) #if the predictor is binary or categorical
ie1[,,binm[k],l,j]=med0$BUGSoutput$sims.list$beta[,j,binm1[k]]*(exp(temp.x3)/(1+exp(temp.x3))-exp(temp.x1)/(1+exp(temp.x1)))
else if(data0$contpred1.0) #for continuous predictor
ie1[,,binm[1],l,j]=med0$BUGSoutput$sims.list$alpha1.b[,k,l]*med0$BUGSoutput$sims.list$beta[,j,binm1[k]]*exp(temp.x1)/(1+exp(temp.x1))^2
aie1[,binm[k],l,j] <- apply(ie1[,,binm[k],l,j],1,mean)}
}
}
if(p3>0){
for (j in 1:p3){
mu_Mc1<-array(0,c(N1,N,cat2[j]-1))
mu_Mc0<-array(0,c(N1,N,cat2[j]-1))
for (k in 2:cat2[j]){
mu_Mc0[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,])%*%t(x1.temp))
mu_Mc1[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,])%*%t(x3.temp))
}
sum_Mc1 <-apply(mu_Mc1,c(1,2),sum)+1
sum_Mc0 <-apply(mu_Mc0,c(1,2),sum)+1
if(l%in%data0$contpred1.0) #for continuous predictor
{tt.0=rep(0,N1)
for (k in 2:cat2[j])
tt.0=mu_Mc0[,,k-1]/sum_Mc0*med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,l]
for (q1 in 1:(caty-1))
for (k in 2:cat2[j])
ie1[,,catm[j],l,q1]=ie1[,,catm[j],l,q1]+(mu_Mc0[,,k-1]/sum_Mc0)*med0$BUGSoutput$sims.list$beta[,q1,catm1[j,1]+k-2]*(med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,l]-tt.0)
}
else #for binary or categorical predictor
for (q1 in 1:(caty-1))
for (k in 2:cat2[j])
ie1[,,catm[j],l,q1]=ie1[,,catm[j],l]+(mu_Mc1[,,k-1]/sum_Mc1-mu_Mc0[,,k-1]/sum_Mc0)*med0$BUGSoutput$sims.list$beta[,q1,catm1[j,1]+k-2]
for (q1 in 1:(caty-1))
aie1[,catm[j],l,q1]<-apply(ie1[,,catm[j],l,q1],1,mean)
}}
if(l%in%data0$contpred1)
{tt1=match(l,data0$contpred1)
for (q1 in 1:(caty-1))
de1[,l,q1]=as.matrix(med0$BUGSoutput$sims.list$c[,q1,data0$contpred3[tt1,1]:data0$contpred3[tt1,2]])%*%
apply(as.matrix(data0$pred.cont.der[,data0$contpred3[tt1,1]:data0$contpred3[tt1,2]]),2,mean)
tt2=tt2+data0$contpred3[tt1,2]-data0$contpred3[tt1,1]+1
}
else
{for (q1 in 1:(caty-1))
de1[,l,q1]=med0$BUGSoutput$sims.list$c[,q1,tt2]
tt2=tt2+1}
#method2: the same for binary and continuous predictors
if(p1>0){
if(p1==1 & c1==1 & contm1[1,1]==contm1[1,2])
{temp.M3=M1
temp.M3[,contm1[1,1]]=M3[,contm3[1,1]]
temp.mu1<-array(0,c(N1,N,caty))
temp.mu3<-temp.mu1
for(q1 in 2:caty)
{temp.mu1[,,q1]=med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x)+med0$BUGSoutput$sims.list$beta[,q1-1,]%*%t(M1)+matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,nrow(x))
temp.mu3[,,q1]=med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x)+med0$BUGSoutput$sims.list$beta[,q1-1,]%*%t(temp.M3)+matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,nrow(x))
if(!is.null(cova))
{temp.mu1[,,q1]=temp.mu1[,,q1]+med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)
temp.mu3[,,q1]=temp.mu3[,,q1]+med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)}}
temp.mu1=exp(temp.mu1)
temp.mu3=exp(temp.mu3)
temp.mu1.sum=apply(temp.mu1,c(1,2),sum)
temp.mu3.sum=apply(temp.mu3,c(1,2),sum)
for(q1 in 1:(caty-1))
{ie2[,,contm[1],l,q1]=(med0$BUGSoutput$sims.list$alpha1.a[,1]/deltam[contm[1]])*(temp.mu3[,,q1+1]/temp.mu3.sum-temp.mu1[,,q1+1]/temp.mu1.sum)
aie2[,contm[1],l,q1]=apply(ie2[,,contm[1],l,q1],1,mean,na.rm=T)}
}
else
for (j in 1:p1){
temp.M3=M1
temp.M3[,contm1[j,1]:contm1[j,2]]=M3[,contm3[j,1]:contm3[j,2]]
temp.mu1<-array(0,c(N1,N,caty))
temp.mu3<-temp.mu1
for(q1 in 2:caty)
{temp.mu1[,,q1]=med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x)+med0$BUGSoutput$sims.list$beta[,q1-1,]%*%t(M1)+matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,nrow(x))
temp.mu3[,,q1]=med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x)+med0$BUGSoutput$sims.list$beta[,q1-1,]%*%t(temp.M3)+matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,nrow(x))
if(!is.null(cova))
{temp.mu1[,,q1]=temp.mu1[,,q1]+med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)
temp.mu3[,,q1]=temp.mu3[,,q1]+med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)}}
temp.mu1=exp(temp.mu1)
temp.mu3=exp(temp.mu3)
temp.mu1.sum=apply(temp.mu1,c(1,2),sum)
temp.mu3.sum=apply(temp.mu3,c(1,2),sum)
for(q1 in 1:(caty-1))
{ie2[,,contm[j],l,q1]=(med0$BUGSoutput$sims.list$alpha1.a[,j,l]/deltam[contm[j]])*(temp.mu3[,,q1+1]/temp.mu3.sum-temp.mu1[,,q1+1]/temp.mu1.sum)
aie2[,contm[j],l,q1]=apply(ie2[,,contm[j],l,q1],1,mean,na.rm=T)}
}}
#for binary and categorical mediators, method 2 and method 1 are not the same
if(p2>0){
if(p2==1 & c1==1){
temp.M3=M1
temp.M1=M1
temp.M3[,binm1[1]]=1
temp.M1[,binm1[1]]=0
temp.mu1<-array(0,c(N1,N,caty))
temp.mu3<-temp.mu1
for(q1 in 2:caty)
{temp.mu1[,,q1]=med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x)+med0$BUGSoutput$sims.list$beta[,q1-1,]%*%t(temp.M1)+matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,nrow(x))
temp.mu3[,,q1]=med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x)+med0$BUGSoutput$sims.list$beta[,q1-1,]%*%t(temp.M3)+matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,nrow(x))
if(!is.null(cova))
{temp.mu1[,,q1]=temp.mu1[,,q1]+med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)
temp.mu3[,,q1]=temp.mu3[,,q1]+med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)}}
temp.mu1=exp(temp.mu1)
temp.mu3=exp(temp.mu3)
temp.mu1.sum=apply(temp.mu1,c(1,2),sum)
temp.mu3.sum=apply(temp.mu3,c(1,2),sum)
bpart=array(0,c(N1,N,caty))
for(q1 in 1:(caty-1))
bpart[,,q1]=temp.mu3[,,q1+1]/temp.mu3.sum-temp.mu1[,,q1+1]/temp.mu1.sum
if (nmc==1){
temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1]+matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1]+matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)
}
else
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)}
for(q1 in 1:(caty-1)){
ie2[,,binm[1],1,q1]=bpart[,,q1]*(exp(temp.x3)/(1+exp(temp.x3))-exp(temp.x1)/(1+exp(temp.x1)))/deltap[l]
aie2[,binm[1],1,q1] <-apply(ie2[,,binm[1],1,q1],1,mean,na.rm=T)}}
else
for (k in 1:p2){
temp.M3=M1
temp.M1=M1
temp.M3[,binm1[k]]=1
temp.M1[,binm1[k]]=0
temp.mu1<-array(0,c(N1,N,caty))
temp.mu3<-temp.mu1
for(q1 in 2:caty)
{temp.mu1[,,q1]=med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x)+med0$BUGSoutput$sims.list$beta[,q1-1,]%*%t(temp.M1)+matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,nrow(x))
temp.mu3[,,q1]=med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x)+med0$BUGSoutput$sims.list$beta[,q1-1,]%*%t(temp.M3)+matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,nrow(x))
if(!is.null(cova))
{temp.mu1[,,q1]=temp.mu1[,,q1]+med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)
temp.mu3[,,q1]=temp.mu3[,,q1]+med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)}}
temp.mu1=exp(temp.mu1)
temp.mu3=exp(temp.mu3)
temp.mu1.sum=apply(temp.mu1,c(1,2),sum)
temp.mu3.sum=apply(temp.mu3,c(1,2),sum)
bpart=array(0,c(N1,N,caty-1))
for(q1 in 1:(caty-1))
bpart[,,q1]=temp.mu3[,,q1+1]/temp.mu3.sum-temp.mu1[,,q1+1]/temp.mu1.sum
if(nmc==1 & p2==1){
temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x3.temp)
}
else{
temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x3.temp)
}
for(q1 in 1:(caty-1)){
ie2[,,binm[k],l,q1]=bpart[,,q1]*(exp(temp.x3)/(1+exp(temp.x3))-exp(temp.x1)/(1+exp(temp.x1)))/deltap[l]
aie2[,binm[k],l,q1] <- apply(ie2[,,binm[k],l,q1],1,mean,na.rm=T)}
}
}
if(p3>0){
for (j in 1:p3){
bpart=array(0,c(N1,N,cat2[j]-1,caty-1))
M1.temp=M1
M1.temp[,catm1[j,1]:catm1[j,2]]=0
for (k in catm1[j,1]:catm1[j,2]){
temp.M3=M1.temp
temp.M1=M1.temp
temp.M3[,k]=1
temp.mu1<-array(0,c(N1,N,caty))
temp.mu3<-temp.mu1
for(q1 in 2:caty)
{temp.mu1[,,q1]=med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x)+med0$BUGSoutput$sims.list$beta[,q1-1,]%*%t(temp.M1)+matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,nrow(x))
temp.mu3[,,q1]=med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x)+med0$BUGSoutput$sims.list$beta[,q1-1,]%*%t(temp.M3)+matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,nrow(x))
if(!is.null(cova))
{temp.mu1[,,q1]=temp.mu1[,,q1]+med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)
temp.mu3[,,q1]=temp.mu3[,,q1]+med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)}}
temp.mu1=exp(temp.mu1)
temp.mu3=exp(temp.mu3)
temp.mu1.sum=apply(temp.mu1,c(1,2),sum)
temp.mu3.sum=apply(temp.mu3,c(1,2),sum)
for(q1 in 1:(caty-1))
bpart[,,k-catm1[j,1]+1,q1]=exp(temp.mu3)/(1+exp(temp.mu3))-exp(temp.mu1)/(1+exp(temp.mu1))
}
mu_Mc1<-array(0,c(N1,N,cat2[j]-1))
mu_Mc0<-array(0,c(N1,N,cat2[j]-1))
for (k in 2:cat2[j]){
mu_Mc1[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,]%*%t(x3.temp))
mu_Mc0[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,]%*%t(x1.temp))
}
sum_Mc1 <-apply(mu_Mc1,c(1,2),sum)+1
sum_Mc0 <-apply(mu_Mc0,c(1,2),sum)+1
for(q1 in 1:(caty-1))
{for (k in 2:cat2[j])
ie2[,,catm[j],l,q1]=ie2[,,catm[j],l,q1]+(mu_Mc1[,,k-1]/sum_Mc1-mu_Mc0[,,k-1]/sum_Mc0)*bpart[,,k-1,q1]
aie2[,catm[j],l,q1]<-apply(ie2[,,catm[j],l,q1],1,mean,na.rm=T)}
}
}
temp.x1=x
temp.x3=x
if(l%in%data0$contpred1.0)
{tt1=match(l,data0$contpred1.0)
temp.x3[,data0$contpred2[tt1,1]:data0$contpred2[tt1,2]]=data0$pred3[,data0$contpred3[tt1,1]:data0$contpred3[tt1,2]]}
else if(l%in%data0$binpred1.0)
{tt1=match(l,data0$binpred1)
temp.x1[,data0$binpred2[tt1]]=0
temp.x3[,data0$binpred2[tt1]]=1
deltap[l]=1}
else
{for (i in 1:nrow(data0$catpred1.0))
if(l%in%(data0$catpred1.0[i,1]:data0$catpred1.0[i,2]))
tt1=i
temp.x1[,data0$catpred2[tt1,1]:data0$catpred2[tt1,2]]=0
temp.x3[,data0$catpred2[tt1,1]:data0$catpred2[tt1,2]]=0
temp.x3[,l]=1
deltap[l]=1}
temp.mu1<-array(0,c(N1,N,caty))
temp.mu3<-temp.mu1
for(q1 in 2:caty)
{temp.mu1[,,q1]=med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(temp.x1)+med0$BUGSoutput$sims.list$beta[,q1-1,]%*%t(M1)+matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,nrow(x))
temp.mu3[,,q1]=med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(temp.x3)+med0$BUGSoutput$sims.list$beta[,q1-1,]%*%t(M1)+matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,nrow(x))
if(!is.null(cova))
{temp.mu1[,,q1]=temp.mu1[,,q1]+med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)
temp.mu3[,,q1]=temp.mu3[,,q1]+med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)}}
temp.mu1=exp(temp.mu1)
temp.mu3=exp(temp.mu3)
temp.mu1.sum=apply(temp.mu1,c(1,2),sum)
temp.mu3.sum=apply(temp.mu3,c(1,2),sum)
for(q1 in 1:(caty-1))
{de2.1=(temp.mu3[,,q1+1]/temp.mu3.sum-temp.mu1[,,q1+1]/temp.mu1.sum)/deltap[l]
de2[,l,q1]=apply(de2.1,1,mean,na.rm=T)}
#method 3:parametric
#3.1. get M1(x) and M1(x+dx) for y
if(p1>0){
if(c1==1 & p1+p2+p3==1 & contm1[1,1]==contm1[1,2]){
if(nmc==1)
{mu_M2[,contm1[1,1],] <- med0$BUGSoutput$sims.list$alpha0[,contm1[1,1]]+as.matrix(med0$BUGSoutput$sims.list$alpha1[,contm1[1,1]])%*%t(x1.temp)
mu_M3[,contm1[1,1],] <- med0$BUGSoutput$sims.list$alpha0[,contm1[1,1]]+as.matrix(med0$BUGSoutput$sims.list$alpha1[,contm1[1,1]])%*%t(x3.temp)}
else
{mu_M2[,contm1[1,1],] <- med0$BUGSoutput$sims.list$alpha0[,contm1[1,1],mind[contm[1],]]%*%t(mcov[,mind[contm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1[,contm1[1,1]])%*%t(x1.temp)
mu_M3[,contm1[1,1],] <- med0$BUGSoutput$sims.list$alpha0[,contm1[1,1],mind[contm[1],]]%*%t(mcov[,mind[contm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1[,contm1[1,1]])%*%t(x3.temp)}
}
else
for (j in 1:p1){
for (k in contm1[j,1]:contm1[j,2]){
if(nmc==1 & p1+p2+p3==1)
{mu_M2[,k,] <- med0$BUGSoutput$sims.list$alpha0[,k]+med0$BUGSoutput$sims.list$alpha1[,k,l]%*%t(x1.temp)
mu_M3[,k,] <- med0$BUGSoutput$sims.list$alpha0[,k]+med0$BUGSoutput$sims.list$alpha1[,k,l]%*%t(x3.temp)}
else
{mu_M2[,k,] <- med0$BUGSoutput$sims.list$alpha0[,k,mind[contm[j],]]%*%t(mcov[,mind[contm[j],]])+med0$BUGSoutput$sims.list$alpha1[,k,]%*%t(x1.temp)
mu_M3[,k,] <- med0$BUGSoutput$sims.list$alpha0[,k,mind[contm[j],]]%*%t(mcov[,mind[contm[j],]])+med0$BUGSoutput$sims.list$alpha1[,k,]%*%t(x3.temp)}
}}}
if(p2>0){
if(p2==1 & c1==1)
{if(nmc==1)
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)}
else
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)}
mu_M2[,binm[1],] <- exp(temp.x1)/(1+exp(temp.x1))
mu_M3[,binm[1],] <- exp(temp.x3)/(1+exp(temp.x3))
}
else{
for (k in 1:p2){
if(nmc==1 & p2==1)
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k]+med0$BUGSoutput$sims.list$alpha1.b[,k,]%*%t(x3.temp)}
else
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x3.temp)}
mu_M2[,binm1[k],] <- exp(temp.x1)/(1+exp(temp.x1))
mu_M3[,binm1[k],] <- exp(temp.x3)/(1+exp(temp.x3))}
}}
if(p3>0){
for (j in 1:p3){
mu_Mc1<-array(0,c(N1,N,cat2[j]-1))
mu_Mc0<-array(0,c(N1,N,cat2[j]-1))
for (k in 2:cat2[j]){
mu_Mc1[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,]%*%t(x3.temp))
mu_Mc0[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,]%*%t(x1.temp))
}
sum_Mc1 <-apply(mu_Mc1,c(1,2),sum)+1
sum_Mc0 <-apply(mu_Mc0,c(1,2),sum)+1
for (k in 2:cat2[j])
{mu_M2[,catm1[j,1]+k-2,]=mu_Mc0[,,k-1]/sum_Mc0
mu_M3[,catm1[j,1]+k-2,]=mu_Mc1[,,k-1]/sum_Mc1}
}}
#3.2. get x and dx for y
x1.temp1=x
x3.temp1=x
if(l%in%as.vector(data0$contpred1.0)){ #need the continuous predictor in its original format
i=match(l,data0$contpred1.0)
x3.temp1[,data0$contpred2[i,1]:data0$contpred2[i,2]]=data0$pred3[,data0$contpred3[i,1]:data0$contpred3[i,2]]
}
else if(l%in%data0$binpred1.0)
{i=match(l,data0$binpred1.0)
x1.temp1[,data0$binpred2[i]]=0
x3.temp1[,data0$binpred2[i]]=1
deltap[l]=1}
else{ #categorical predictor
for (i in 1:nrow(data0$catpred1.0))
if(l%in%(data0$catpred1.0[i,1]:data0$catpred1.0[i,2]))
{x1.temp1[,data0$catpred2[i,1]:data0$catpred2[i,2]]=0
x3.temp1[,data0$catpred2[i,1]:data0$catpred2[i,2]]=0
di=match(l,data0$catpred1.0[i,1]:data0$catpred1.0[i,2])
x3.temp1[,data0$catpred2[i,1]+di-1]=1}
deltap[l]=1
}
#3.3. get the total effect
mu_y0<-array(0,c(N1,N,caty))
mu_y1<-mu_y0
for(q1 in 2:caty)
{temp1=array(0,c(nrow(M1)+1,ncol(M1),N1))
temp1[2:(nrow(M1)+1),,]=mu_M2
temp1[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
temp2=temp1
temp2[2:(nrow(M1)+1),,]=mu_M3
if(is.null(cova))
{mu_y0[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + t(apply(temp1,3,matrix.prod))
mu_y1[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + t(apply(temp2,3,matrix.prod))}
else
{mu_y0[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(as.matrix(cova))+t(apply(temp1,3,matrix.prod))
mu_y1[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(as.matrix(cova))+t(apply(temp2,3,matrix.prod))
}} #get the linear part
mu_y0=exp(mu_y0)
mu_y1=exp(mu_y1)
mu_y0.sum=apply(mu_y0,c(1,2),sum)
mu_y1.sum=apply(mu_y1,c(1,2),sum)
for(q1 in 1:(caty-1)){
te3[,,q1]=(mu_y1[,,q1+1]/mu_y1.sum-mu_y0[,,q1+1]/mu_y0.sum)/deltap[l]
ate3[,l,q1]=apply(te3[,,q1],1,mean,na.rm=T)}
#3.4. calculate the ie
j1=sample(1:N,size=N*N1,replace=T)
j2=sample(1:N,size=N*N1,replace=T)
#3.4.1. continuous mediators
if(p1>0){
for (j in 1:p1){
mu_y0.2<-array(0,c(N1,N,caty))
mu_y1.2<-mu_y0.2
for(q1 in 2:caty)
{temp1.1=temp1
temp1.2=temp2
temp1.1[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
temp1.2[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
for (i in contm1[j,1]:contm1[j,2])
{temp1.1[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)
temp1.2[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)} #j2/j1
if(is.null(cova))
{mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) +t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) +t(apply(temp1.2,3,matrix.prod)) }
else{
mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
}
mu_y0.2=exp(mu_y0.2)
mu_y1.2=exp(mu_y1.2)
mu_y0.2.sum=apply(mu_y0.2,c(1,2),sum)
mu_y1.2.sum=apply(mu_y1.2,c(1,2),sum)
for(q1 in 1:(caty-1))
ie3[,,contm[j],l,q1]<-te3[,,q1]-(mu_y1.2[,,q1+1]/mu_y1.2.sum-mu_y0.2[,,q1+1]/mu_y0.2.sum)/deltap[l]
}}
#3.4.2. binary mediators
if(p2>0){
for (k in 1:p2){
mu_y0.2<-array(0,c(N1,N,caty))
mu_y1.2<-mu_y0.2
for(q1 in 2:caty)
{temp1.1=temp1
temp1.2=temp2
temp1.1[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
temp1.2[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
temp1.1[2:(nrow(M1)+1),binm1[k],]=matrix(M1[j1,binm1[k]],N,N1)
temp1.2[2:(nrow(M1)+1),binm1[k],]=matrix(M1[j1,binm1[k]],N,N1) #j2/j1
if (is.null(cova)){
mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + t(apply(temp1.2,3,matrix.prod))}
else{
mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
}
mu_y0.2=exp(mu_y0.2)
mu_y1.2=exp(mu_y1.2)
mu_y0.2.sum=apply(mu_y0.2,c(1,2),sum)
mu_y1.2.sum=apply(mu_y1.2,c(1,2),sum)
for(q1 in 1:(caty-1))
ie3[,,binm[k],l,q1]<-te3[,,q1]-(mu_y1.2[,,q1+1]/mu_y1.2.sum-mu_y0.2[,,q1+1]/mu_y0.2.sum)/deltap[l]
}}
if(p3>0){
for (j in 1:p3){
mu_y0.2<-array(0,c(N1,N,caty))
mu_y1.2<-mu_y0.2
for(q1 in 2:caty)
{temp1.1=temp1
temp1.2=temp2
temp1.1[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
temp1.2[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
for (i in catm1[j,1]:catm1[j,2])
{temp1.1[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)
temp1.2[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)} #j2/j1
if(is.null(cova)){
mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + t(apply(temp1.2,3,matrix.prod))}
else{
mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
}
mu_y0.2=exp(mu_y0.2)
mu_y1.2=exp(mu_y1.2)
mu_y0.2.sum=apply(mu_y0.2,c(1,2),sum)
mu_y1.2.sum=apply(mu_y1.2,c(1,2),sum)
for(q1 in 1:(caty-1))
ie3[,,catm[j],l,q1]<-te3[,,q1]-(mu_y1.2[,,q1+1]/mu_y1.2.sum-mu_y0.2[,,q1+1]/mu_y0.2.sum)/deltap[l]
}}
aie3[,,l,]<-apply(array(ie3[,,,l,],c(N1,N,p1+p2+p3,caty-1)),c(1,3,4),mean,na.rm=T)
#3.5. Calculate the de
mu_y0.2<-array(0,c(N1,N,caty))
mu_y1.2<-mu_y0.2
for(q1 in 2:caty)
{temp1.1=temp1
temp1.2=temp2
temp1.1[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
temp1.2[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
for (i in 1:ncol(M1))
{temp1.1[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)
temp1.2[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)} #j2/j1
if(is.null(cova)){
mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + t(apply(temp1.2,3,matrix.prod))}
else{
mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
}
mu_y0.2=exp(mu_y0.2)
mu_y1.2=exp(mu_y1.2)
mu_y0.2.sum=apply(mu_y0.2,c(1,2),sum)
mu_y1.2.sum=apply(mu_y1.2,c(1,2),sum)
for(q1 in 1:(caty-1))
{de3[,,q1]<-(mu_y1.2[,,q1+1]/mu_y1.2.sum-mu_y0.2[,,q1+1]/mu_y0.2.sum)/deltap[l]
ade3[,l,q1]=apply(de3[,,q1],1,mean,na.rm=T)}
#method3: semi-parametric for binary or categorical predictors
if(!l%in%data0$contpred1.0){
if(!is.null(data0$binpred1.0))
{if (l%*%data0$binpred1.0)
{M.0=data.frame(M1[x1[,l]==0,])
y.0=y[x1[,l]==0]}
else
{for(i in 1:nrow(data0$catpred1.0))
if(l%in%(data0$catpred1.0[i,1]:data0$catpred1.0[i,2]))
tt1=i
M.0=data.frame(M1[apply(x1[,data0$catpred1.0[tt1,1]:data0$catpred1.0[[tt1,2]]]==1,1,sum)==0,])
y.0=y[apply(x1[,data0$catpred1.0[tt1,1]:data0$catpred1.0[[tt1,2]]]==1,1,sum)==0]}}
else
{for(i in 1:nrow(data0$catpred1.0))
if(l%in%(data0$catpred1.0[i,1]:data0$catpred1.0[i,2]))
tt1=i
M.0=data.frame(M1[apply(x1[,data0$catpred1.0[tt1,1]:data0$catpred1.0[[tt1,2]]]==1,1,sum)==0,])
y.0=y[apply(x1[,data0$catpred1.0[tt1,1]:data0$catpred1.0[[tt1,2]]]==1,1,sum)==0]}
y.1=y[x1[,l]==1]
M.1=data.frame(M1[x1[,l]==1,])
j1=sample(1:N,size=N*N1,replace=T)
j2=sample(1:N,size=N*N1,replace=T)
n3=nrow(M.0)
n4=nrow(M.1)
j3=sample(1:n3,size=N*N1,replace = T)
j4=sample(1:n4,size=N*N1,replace = T)
for (i in 1:ncol(M1))
{mu.M0[,i,]=matrix(M.0[j3,i],N,N1)
mu.M1[,i,]=matrix(M.1[j4,i],N,N1)}
#4.1. get the total effect
mu_y0=matrix(y.0[j3],N1,N)
mu_y1=matrix(y.1[j4],N1,N)
temp1=array(0,c(nrow(M1)+1,ncol(M1),N1))
temp1[2:(nrow(M1)+1),,]=mu.M0
temp2=temp1
temp2[2:(nrow(M1)+1),,]=mu.M1
temp.levels=levels(y)[-1]
for(q1 in 1:length(temp.levels))
{te4[,,q1]<- (mu_y1==temp.levels[q1])-(mu_y0==temp.levels[q1])
ate4[,l,q1]=apply(te4[,,q1],1,mean,na.rm=T)}
#4.2. Get the ies
#ie for continuous mediators
if(p1>0){
for (j in 1:p1){
mu_y0.2<-array(0,c(N1,N,caty))
mu_y1.2<-mu_y0.2
for(q1 in 2:caty)
{temp1.1=temp1
temp1.2=temp2
temp1.1[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
temp1.2[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
for (i in contm1[j,1]:contm1[j,2])
{temp1.1[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)
temp1.2[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)} #j2/j1
if(is.null(cova))
{mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) +t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) +t(apply(temp1.2,3,matrix.prod)) }
else{
mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
}
mu_y0.2=exp(mu_y0.2)
mu_y1.2=exp(mu_y1.2)
mu_y0.2.sum=apply(mu_y0.2,c(1,2),sum)
mu_y1.2.sum=apply(mu_y1.2,c(1,2),sum)
for(q1 in 1:(caty-1))
ie4[,,contm[j],l,q1]<-te4[,,q1]-(mu_y1.2[,,q1+1]/mu_y1.2.sum-mu_y0.2[,,q1+1]/mu_y0.2.sum)/deltap[l]
}}
#ie for binary mediators
if(p2>0){
for (k in 1:p2){
mu_y0.2<-array(0,c(N1,N,caty))
mu_y1.2<-mu_y0.2
for(q1 in 2:caty)
{temp1.1=temp1
temp1.2=temp2
temp1.1[2:(nrow(M1)+1),binm1[k],]=matrix(M1[j1,binm1[k]],N,N1)
temp1.2[2:(nrow(M1)+1),binm1[k],]=matrix(M1[j1,binm1[k]],N,N1) #j2/j1
temp1.1[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
temp1.2[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
if (is.null(cova)){
mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + t(apply(temp1.2,3,matrix.prod))}
else{
mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
}
mu_y0.2=exp(mu_y0.2)
mu_y1.2=exp(mu_y1.2)
mu_y0.2.sum=apply(mu_y0.2,c(1,2),sum)
mu_y1.2.sum=apply(mu_y1.2,c(1,2),sum)
for(q1 in 1:(caty-1))
ie4[,,binm[k],l,q1]<-te4[,,q1]-(mu_y1.2[,,q1+1]/mu_y1.2.sum-mu_y0.2[,,q1+1]/mu_y0.2.sum)
}}
#ie for categorical mediators
if(p3>0){
for (j in 1:p3){
mu_y0.2<-array(0,c(N1,N,caty))
mu_y1.2<-mu_y0.2
for(q1 in 2:caty)
{temp1.1=temp1
temp1.2=temp2
temp1.1[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
temp1.2[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
for (i in catm1[j,1]:catm1[j,2])
{temp1.1[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)
temp1.2[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)} #j2/j1
if(is.null(cova)){
mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + t(apply(temp1.2,3,matrix.prod))}
else{
mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
}
mu_y0.2=exp(mu_y0.2)
mu_y1.2=exp(mu_y1.2)
mu_y0.2.sum=apply(mu_y0.2,c(1,2),sum)
mu_y1.2.sum=apply(mu_y1.2,c(1,2),sum)
for(q1 in 1:(caty-1))
ie4[,,catm[j],l,q1]<-te4[,,q1]-(mu_y1.2[,,q1+1]/mu_y1.2.sum-mu_y0.2[,,q1+1]/mu_y0.2.sum)
}}
aie4[,,l,]<-apply(array(ie4[,,,l,],c(N1,N,p1+p2+p3,caty-1)),c(1,3,4),mean,na.rm=T)
#4.3. Calculate the de
mu_y0.2<-array(0,c(N1,N,caty))
mu_y1.2<-mu_y0.2
for(q1 in 2:caty)
{temp1.1=temp1
temp1.2=temp2
for (i in 1:ncol(M1))
{temp1.1[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)
temp1.2[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)} #j2/j1
temp1.1[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
temp1.2[1,,]=t(med0$BUGSoutput$sims.list$beta[,q1-1,])
if(is.null(cova)){
mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + t(apply(temp1.2,3,matrix.prod))}
else{
mu_y0.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2[,,q1]<- matrix(med0$BUGSoutput$sims.list$beta0[,q1-1],N1,N) + med0$BUGSoutput$sims.list$c[,q1-1,]%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta[,q1-1,]%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
}
mu_y0.2=exp(mu_y0.2)
mu_y1.2=exp(mu_y1.2)
mu_y0.2.sum=apply(mu_y0.2,c(1,2),sum)
mu_y1.2.sum=apply(mu_y1.2,c(1,2),sum)
}
}
ate1=apply(aie1,c(1,3,4),sum)+de1
ate2=apply(aie2,c(1,3,4),sum)+de2
colnames(aie4)=colnames(M2)
colnames(aie1)=colnames(M2)
colnames(aie2)=colnames(M2)
colnames(aie3)=colnames(M2)
}
else{
aie1=array(0,c(N1,p1+p2+p3,c1))
ie1=array(0,dim=c(N1,N,p1+p2+p3,c1))
tt2=1
de1=NULL
n.cont=1
aie2=array(0,c(N1,p1+p2+p3,c1))
ie2=array(0,dim=c(N1,N,p1+p2+p3,c1))
de2=NULL
ate3=matrix(0,N1,c1)
omu3=ate3
ade3=matrix(0,N1,c1)
aie3=array(0,c(N1,p1+p2+p3,c1))
ie3=array(0,dim=c(N1,N,p1+p2+p3,c1))
mu_M2=array(0,c(dim(M1),N1))
mu_M3=mu_M2
ade4=matrix(0,N1,c1)
ie4=array(0,dim=c(N1,N,p1+p2+p3,c1))
mu.M0=array(0,c(dim(M1),N1))
mu.M1=mu.M0
ate4=matrix(0,N1,c1)
omu4=ate4
aie4=array(0,c(N1,p1+p2+p3,c1))
for (l in 1:c1){
x1.temp=x1
x3.temp=x1
if(l%in%data0$contpred1.0){
x3.temp[,l]=x1[,l]+deltap[l]
}
else if(l%in%data0$binpred1.0)
{x1.temp[,l]=0
x3.temp[,l]=1
deltap[l]=1}
else{ #categorical predictor
for (i in 1:nrow(data0$catpred1.0))
if(l%in%(data0$catpred1.0[i,1]:data0$catpred1.0[i,2]))
{x1.temp[,data0$catpred1.0[i,1]:data0$catpred1.0[i,2]]=0
x3.temp[,data0$catpred1.0[i,1]:data0$catpred1.0[i,2]]=0
x3.temp[,l]=1}
deltap[l]=1
}
#method 1: the same for binary or continuous predictors
if(p1>0){
if (p1+p2+p3==1 & c1==1 & contm1[1,1]==contm1[1,2])
aie1[,contm[1],l]=med0$BUGSoutput$sims.list$alpha1[,1]*med0$BUGSoutput$sims.list$beta[,contm1[1,1]]#*mean(data0$m.cont.der[,contm3[1,1]]) since alpha1 is the coefficient of x on the change of f(M), beta is the coefficient of f(M) on the change of y
else
for (j in 1: p1)
aie1[,contm[j],l]=apply(as.matrix(med0$BUGSoutput$sims.list$alpha1[,contm1[j,1]:contm1[j,2],l])*as.matrix(med0$BUGSoutput$sims.list$beta[,contm1[j,1]:contm1[j,2]]),1,sum)#*mean(data0$m.cont.der[,contm3[j,1]])
}
if(p2>0){
if(p2==1 & c1==1) #since c1=1, the predictor can only be binary or continuous
{if (nmc==1)
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)}
else
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)}
if(!1%in%data0$contpred1.0) #if the predictor is binary
ie1[,,binm[1],1]=med0$BUGSoutput$sims.list$beta[,binm1[1]]*(exp(temp.x3)/(1+exp(temp.x3))-exp(temp.x1)/(1+exp(temp.x1)))
else #if the predictor is continuous
ie1[,,binm[1],1]=med0$BUGSoutput$sims.list$alpha1.b[,1]*med0$BUGSoutput$sims.list$beta[,binm1[1]]*exp(temp.x1)/(1+exp(temp.x1))^2
aie1[,binm[1],l] <-apply(ie1[,,binm[1],1],1,mean)
}
else
for (k in 1:p2){
if (nmc==1 & p2==1)
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x3.temp)
}
else
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x3.temp)
}
if(!l%in%data0$contpred1.0) #if the predictor is binary or categorical
ie1[,,binm[k],l]=med0$BUGSoutput$sims.list$beta[,binm1[k]]*(exp(temp.x3)/(1+exp(temp.x3))-exp(temp.x1)/(1+exp(temp.x1)))
else if(data0$contpred1.0) #for continuous predictor
ie1[,,binm[1],l]=med0$BUGSoutput$sims.list$alpha1.b[,k,l]*med0$BUGSoutput$sims.list$beta[,binm1[k]]*exp(temp.x1)/(1+exp(temp.x1))^2
aie1[,binm[k],l] <- apply(ie1[,,binm[k],l],1,mean)
}
}
if(p3>0){
for (j in 1:p3){
mu_Mc1<-array(0,c(N1,N,cat2[j]-1))
mu_Mc0<-array(0,c(N1,N,cat2[j]-1))
for (k in 2:cat2[j]){
mu_Mc0[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,])%*%t(x1.temp))
mu_Mc1[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,])%*%t(x3.temp))
}
sum_Mc1 <-apply(mu_Mc1,c(1,2),sum)+1
sum_Mc0 <-apply(mu_Mc0,c(1,2),sum)+1
if(l%in%data0$contpred1.0) #for continuous predictor
{tt.0=rep(0,N1)
for (k in 2:cat2[j])
tt.0=mu_Mc0[,,k-1]/sum_Mc0*med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,l]
for (k in 2:cat2[j])
ie1[,,catm[j],l]=ie1[,,catm[j],l]+(mu_Mc0[,,k-1]/sum_Mc0)*med0$BUGSoutput$sims.list$beta[,catm1[j,1]+k-2]*(med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,l]-tt.0)
}
else #for binary or categorical predictor
for (k in 2:cat2[j])
ie1[,,catm[j],l]=ie1[,,catm[j],l]+(mu_Mc1[,,k-1]/sum_Mc1-mu_Mc0[,,k-1]/sum_Mc0)*med0$BUGSoutput$sims.list$beta[,catm1[j,1]+k-2]
aie1[,catm[j],l]<-apply(ie1[,,catm[j],l],1,mean)
}}
if(l%in%data0$contpred1)
{tt1=match(l,data0$contpred1)
de1=cbind(de1,as.matrix(med0$BUGSoutput$sims.list$c[,data0$contpred3[tt1,1]:data0$contpred3[tt1,2]])%*%
apply(as.matrix(data0$pred.cont.der[,data0$contpred3[tt1,1]:data0$contpred3[tt1,2]]),2,mean))
tt2=tt2+data0$contpred3[tt1,2]-data0$contpred3[tt1,1]+1
}
else
{de1=cbind(de1,med0$BUGSoutput$sims.list$c[,tt2])
tt2=tt2+1}
#method2: the same for binary and continuous predictors
if(p1>0){
if(y.type==1){
if(p1==1 & c1==1 & contm1[1,1]==contm1[1,2])
{ ie2[,,contm[1],l]=(med0$BUGSoutput$sims.list$alpha1.a[,1]/deltam[1])* #
(as.matrix(med0$BUGSoutput$sims.list$beta[,contm1[1,1]])%*%(M3[,contm3[1,1]]-M1[,contm1[1,1]]))
aie2[,contm[1],l]=apply(ie2[,,contm[1],l],1,mean,na.rm=T)
}
else
for (j in 1:p1){
ie2[,,contm[j],l]=(med0$BUGSoutput$sims.list$alpha1.a[,j,l]/deltam[j])* #a unit change in x, result in the unit change in m
(med0$BUGSoutput$sims.list$beta[,contm1[j,1]:contm1[j,2]]%*%t(M3[,contm3[j,1]:contm3[j,2]]-M1[,contm1[j,1]:contm1[j,2]]))
aie2[,contm[j],l]=apply(ie2[,,contm[j],l],1,mean,na.rm=T) #unit change in m, result in the unit change in y
}}
else if(y.type==2)
{if(p1==1 & c1==1 & contm1[1,1]==contm1[1,2])
{ temp.M3=M1
temp.M3[,contm1[1,1]]=M3[,contm3[1,1]]
temp.mu1=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(M1)+matrix(med0$BUGSoutput$sims.list$beta0,N1,nrow(x))
temp.mu3=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M3)+matrix(med0$BUGSoutput$sims.list$beta0,N1,nrow(x))
if(!is.null(cova))
{temp.mu1=temp.mu1+med0$BUGSoutput$sims.list$eta%*%t(cova)
temp.mu3=temp.mu3+med0$BUGSoutput$sims.list$eta%*%t(cova)}
ie2[,,contm[1],l]=(med0$BUGSoutput$sims.list$alpha1.a[,1]/deltam[1])*(exp(temp.mu3)/(1+exp(temp.mu3))-exp(temp.mu1)/(1+exp(temp.mu1)))
aie2[,contm[1],l]=apply(ie2[,,contm[1],l],1,mean,na.rm=T)
}
else
for (j in 1:p1){
temp.M3=M1
temp.M3[,contm1[j,1]:contm1[j,2]]=M3[,contm3[j,1]:contm3[j,2]]
temp.mu1=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(M1)+matrix(med0$BUGSoutput$sims.list$beta0,N1,nrow(x))
temp.mu3=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M3)+matrix(med0$BUGSoutput$sims.list$beta0,N1,nrow(x))
if(!is.null(cova))
{temp.mu1=temp.mu1+med0$BUGSoutput$sims.list$eta%*%t(cova)
temp.mu3=temp.mu3+med0$BUGSoutput$sims.list$eta%*%t(cova)}
ie2[,,contm[j],l]=(med0$BUGSoutput$sims.list$alpha1.a[,j,l]/deltam[j])*(exp(temp.mu3)/(1+exp(temp.mu3))-exp(temp.mu1)/(1+exp(temp.mu1)))
aie2[,contm[j],l]=apply(ie2[,,contm[j],l],1,mean,na.rm=T)
}}
else if(y.type==4)
{if(p1==1 & c1==1 & contm1[1,1]==contm1[1,2])
{temp.M3=M1
temp.M3[,contm1[1,1]]=M3[,contm3[1,1]]
temp.mu1=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(M1)
temp.mu3=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M3)
if(!is.null(cova))
{temp.mu1=temp.mu1+med0$BUGSoutput$sims.list$eta%*%t(cova)
temp.mu3=temp.mu3+med0$BUGSoutput$sims.list$eta%*%t(cova)}
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(temp.mu3),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
#if(multi)
# tmean3=log(tmean3)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(temp.mu1),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
#if(multi)
# tmean1=log(tmean1)
#tmean3=ifelse(tmean3==Inf,tmax-runif(1,0,0.1),tmean3)
#tmean1=ifelse(tmean1==Inf,tmax-runif(1,0,0.1),tmean1)
ie2[,,contm[1],l]=as.vector(med0$BUGSoutput$sims.list$alpha1.a[,1]/deltam[1])*(tmean3-tmean1)
aie2[,contm[1],l]=apply(ie2[,,contm[1],l],1,mean,na.rm=T)
}
else
for (j in 1:p1){
temp.M3=M1
temp.M3[,contm1[j,1]:contm1[j,2]]=M3[,contm3[j,1]:contm3[j,2]]
temp.mu1=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(M1)
temp.mu3=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M3)
if(!is.null(cova))
{temp.mu1=temp.mu1+med0$BUGSoutput$sims.list$eta%*%t(cova)
temp.mu3=temp.mu3+med0$BUGSoutput$sims.list$eta%*%t(cova)}
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(temp.mu3),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
#if(multi)
# tmean3=log(tmean3)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(temp.mu1),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
#if(multi)
# tmean1=log(tmean1)
#tmean3=ifelse(tmean3==Inf,tmax-runif(1,0,0.1),tmean3)
#tmean1=ifelse(tmean1==Inf,tmax-runif(1,0,0.1),tmean1)
ie2[,,contm[j],l]=as.vector(med0$BUGSoutput$sims.list$alpha1.a[,j,l]/deltam[j])*(tmean3-tmean1)
aie2[,contm[j],l]=apply(ie2[,,contm[j],l],1,mean)
}}
}
#for binary and categorical mediators, method 2 and method 1 are not the same
if(p2>0){
if(y.type==1){
if(p2==1 & c1==1){
if (nmc==1){
temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)
}
else
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)}
ie2[,,binm[1],1]=med0$BUGSoutput$sims.list$beta[,binm1[1]]*(exp(temp.x3)/(1+exp(temp.x3))-exp(temp.x1)/(1+exp(temp.x1)))/deltap[l]
aie2[,binm[1],1] <-apply(ie2[,,binm[1],1],1,mean,na.rm=T)}
else
for (k in 1:p2){
if(nmc==1 & p2==1){
temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k]+med0$BUGSoutput$sims.list$alpha1.b[,k,]%*%t(x3.temp)
}
else{
temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x3.temp)
}
ie2[,,binm[k],l]=med0$BUGSoutput$sims.list$beta[,binm1[k]]*(exp(temp.x3)/(1+exp(temp.x3))-exp(temp.x1)/(1+exp(temp.x1)))/deltap[l]
aie2[,binm[k],l] <- apply(ie2[,,binm[k],l],1,mean,na.rm=T)
}}
else if(y.type==2){
if(p2==1 & c1==1){
temp.M3=M1
temp.M1=M1
temp.M3[,binm1[1]]=1
temp.M1[,binm1[1]]=0
temp.mu1=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M1)+matrix(med0$BUGSoutput$sims.list$beta0,N1,nrow(x))
temp.mu3=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M3)+matrix(med0$BUGSoutput$sims.list$beta0,N1,nrow(x))
if(!is.null(cova))
{temp.mu1=temp.mu1+med0$BUGSoutput$sims.list$eta%*%t(cova)
temp.mu3=temp.mu3+med0$BUGSoutput$sims.list$eta%*%t(cova)}
bpart=exp(temp.mu3)/(1+exp(temp.mu3))-exp(temp.mu1)/(1+exp(temp.mu1))
if (nmc==1){
temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1]+matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1]+matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)
}
else
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)}
ie2[,,binm[1],1]=bpart*(exp(temp.x3)/(1+exp(temp.x3))-exp(temp.x1)/(1+exp(temp.x1)))/deltap[l]
aie2[,binm[1],1] <-apply(ie2[,,binm[1],1],1,mean,na.rm=T)}
else
for (k in 1:p2){
temp.M3=M1
temp.M1=M1
temp.M3[,binm1[k]]=1
temp.M1[,binm1[k]]=0
temp.mu1=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M1)+matrix(med0$BUGSoutput$sims.list$beta0,N1,nrow(x))
temp.mu3=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M3)+matrix(med0$BUGSoutput$sims.list$beta0,N1,nrow(x))
if(!is.null(cova))
{temp.mu1=temp.mu1+med0$BUGSoutput$sims.list$eta%*%t(cova)
temp.mu3=temp.mu3+med0$BUGSoutput$sims.list$eta%*%t(cova)}
bpart=exp(temp.mu3)/(1+exp(temp.mu3))-exp(temp.mu1)/(1+exp(temp.mu1))
if(nmc==1 & p2==1){
temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x3.temp)
}
else{
temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x3.temp)
}
ie2[,,binm[k],l]=bpart*(exp(temp.x3)/(1+exp(temp.x3))-exp(temp.x1)/(1+exp(temp.x1)))/deltap[l]
aie2[,binm[k],l] <- apply(ie2[,,binm[k],l],1,mean,na.rm=T)
}}
else if(y.type==4){
if(p2==1 & c1==1){
temp.M3=M1
temp.M1=M1
temp.M3[,binm1[1]]=1
temp.M1[,binm1[1]]=0
temp.mu1=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M1)
temp.mu3=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M3)
if(!is.null(cova))
{temp.mu1=temp.mu1+med0$BUGSoutput$sims.list$eta%*%t(cova)
temp.mu3=temp.mu3+med0$BUGSoutput$sims.list$eta%*%t(cova)}
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(temp.mu3),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
#if(multi)
# tmean3=log(tmean3)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(temp.mu1),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
#if(multi)
# tmean1=log(tmean1)
#tmean3=ifelse(tmean3==Inf,tmax-runif(1,0,0.1),tmean3)
#tmean1=ifelse(tmean1==Inf,tmax-runif(1,0,0.1),tmean1)
bpart=tmean3-tmean1
if (nmc==1){
temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1]+matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1]+matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)
}
else
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)}
ie2[,,binm[1],1]=bpart*(exp(temp.x3)/(1+exp(temp.x3))-exp(temp.x1)/(1+exp(temp.x1)))/deltap[l]
aie2[,binm[1],1] <-apply(ie2[,,binm[1],1],1,mean,na.rm=T)}
else
for (k in 1:p2){
temp.M3=M1
temp.M1=M1
temp.M3[,binm1[k]]=1
temp.M1[,binm1[k]]=0
temp.mu1=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M1)
temp.mu3=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M3)
if(!is.null(cova))
{temp.mu1=temp.mu1+med0$BUGSoutput$sims.list$eta%*%t(cova)
temp.mu3=temp.mu3+med0$BUGSoutput$sims.list$eta%*%t(cova)}
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(temp.mu3),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
#if(multi)
# tmean3=log(tmean3)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(temp.mu1),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
#if(multi)
# tmean1=log(tmean1)
#tmean3=ifelse(tmean3==Inf,tmax-runif(1,0,0.1),tmean3)
#tmean1=ifelse(tmean1==Inf,tmax-runif(1,0,0.1),tmean1)
bpart=tmean3-tmean1
if(nmc==1 & p2==1){
temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x3.temp)
}
else{
temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x3.temp)
}
ie2[,,binm[k],l]=bpart*(exp(temp.x3)/(1+exp(temp.x3))-exp(temp.x1)/(1+exp(temp.x1)))/deltap[l]
aie2[,binm[k],l] <- apply(ie2[,,binm[k],l],1,mean,na.rm=T)
}}
}
if(p3>0){
if(y.type==1)
for (j in 1:p3){
mu_Mc1<-array(0,c(N1,N,cat2[j]-1))
mu_Mc0<-array(0,c(N1,N,cat2[j]-1))
for (k in 2:cat2[j]){
mu_Mc1[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,]%*%t(x3.temp))
mu_Mc0[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,]%*%t(x1.temp))
}
sum_Mc1 <-apply(mu_Mc1,c(1,2),sum)+1
sum_Mc0 <-apply(mu_Mc0,c(1,2),sum)+1
for (k in 2:cat2[j])
ie2[,,catm[j],l]=ie2[,,catm[j],l]+(mu_Mc1[,,k-1]/sum_Mc1-mu_Mc0[,,k-1]/sum_Mc0)*med0$BUGSoutput$sims.list$beta[,catm1[j,1]+k-2]/deltap[l]
aie2[,catm[j],l]<-apply(ie2[,,catm[j],l],1,mean,na.rm=T)
}
else if(y.type==2)
for (j in 1:p3){
bpart=array(0,c(N1,N,cat2[j]-1))
M1.temp=M1
M1.temp[,catm1[j,1]:catm1[j,2]]=0
for (k in catm1[j,1]:catm1[j,2]){
temp.M3=M1.temp
temp.M1=M1.temp
temp.M3[,k]=1
temp.mu1=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M1)+matrix(med0$BUGSoutput$sims.list$beta0,N1,nrow(x))
temp.mu3=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M3)+matrix(med0$BUGSoutput$sims.list$beta0,N1,nrow(x))
if(!is.null(cova))
{temp.mu1=temp.mu1+med0$BUGSoutput$sims.list$eta%*%t(cova)
temp.mu3=temp.mu3+med0$BUGSoutput$sims.list$eta%*%t(cova)}
bpart[,,k-catm1[j,1]+1]=exp(temp.mu3)/(1+exp(temp.mu3))-exp(temp.mu1)/(1+exp(temp.mu1))
}
mu_Mc1<-array(0,c(N1,N,cat2[j]-1))
mu_Mc0<-array(0,c(N1,N,cat2[j]-1))
for (k in 2:cat2[j]){
mu_Mc1[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,]%*%t(x3.temp))
mu_Mc0[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,]%*%t(x1.temp))
}
sum_Mc1 <-apply(mu_Mc1,c(1,2),sum)+1
sum_Mc0 <-apply(mu_Mc0,c(1,2),sum)+1
for (k in 2:cat2[j])
ie2[,,catm[j],l]=ie2[,,catm[j],l]+(mu_Mc1[,,k-1]/sum_Mc1-mu_Mc0[,,k-1]/sum_Mc0)*bpart[,,k-1]
# for (k in 1:N)
# ie2[,k,catm[j],l]=diag((diag(1/sum_Mc1[,k])%*%mu_Mc1[,k,]-diag(1/sum_Mc0[,k])%*%mu_Mc0[,k,])%*%t(bpart[,k,]))
aie2[,catm[j],l]<-apply(ie2[,,catm[j],l],1,mean,na.rm=T)
}
else if(y.type==4)
for (j in 1:p3){
bpart=array(0,c(N1,N,cat2[j]-1))
M1.temp=M1
M1.temp[,catm1[j,1]:catm1[j,2]]=0
for (k in catm1[j,1]:catm1[j,2]){
temp.M3=M1.temp
temp.M1=M1.temp
temp.M3[,k]=1
temp.mu1=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M1)
temp.mu3=med0$BUGSoutput$sims.list$c%*%t(x)+med0$BUGSoutput$sims.list$beta%*%t(temp.M3)
if(!is.null(cova))
{temp.mu1=temp.mu1+med0$BUGSoutput$sims.list$eta%*%t(cova)
temp.mu3=temp.mu3+med0$BUGSoutput$sims.list$eta%*%t(cova)}
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(temp.mu3),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
#if(multi)
# tmean3=log(tmean3)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(temp.mu1),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
#if(multi)
# tmean1=log(tmean1)
#tmean3=ifelse(tmean3==Inf,tmax-runif(1,0,0.1),tmean3)
#tmean1=ifelse(tmean1==Inf,tmax-runif(1,0,0.1),tmean1)
bpart[,,k-catm1[j,1]+1]=tmean3-tmean1
# bpart[,,k-catm1[j,1]+1]=as.vector(gamma(1+1/med0$BUGSoutput$sims.list$r)/med0$BUGSoutput$sims.list$lambda^(1/med0$BUGSoutput$sims.list$r))*
# (1/apply(exp(temp.mu3),2,expp, 1/med0$BUGSoutput$sims.list$r)-1/apply(exp(temp.mu1),2,expp,1/med0$BUGSoutput$sims.list$r))
}
mu_Mc1<-array(0,c(N1,N,cat2[j]-1))
mu_Mc0<-array(0,c(N1,N,cat2[j]-1))
for (k in 2:cat2[j]){
mu_Mc1[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,]%*%t(x3.temp))
mu_Mc0[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,]%*%t(x1.temp))
}
sum_Mc1 <-apply(mu_Mc1,c(1,2),sum)+1
sum_Mc0 <-apply(mu_Mc0,c(1,2),sum)+1
for (k in 2:cat2[j])
ie2[,,catm[j],l]=ie2[,,catm[j],l]+(mu_Mc1[,,k-1]/sum_Mc1-mu_Mc0[,,k-1]/sum_Mc0)*bpart[,,k-1]
aie2[,catm[j],l]<-apply(ie2[,,catm[j],l],1,mean,na.rm=T)
}
}
if(y.type==2)
{temp.x1=x
temp.x3=x
if(l%in%data0$contpred1.0)
{tt1=match(l,data0$contpred1.0)
temp.x3[,data0$contpred2[tt1,1]:data0$contpred2[tt1,2]]=data0$pred3[,data0$contpred3[tt1,1]:data0$contpred3[tt1,2]]}
else if(l%in%data0$binpred1.0)
{tt1=match(l,data0$binpred1)
temp.x1[,data0$binpred2[tt1]]=0
temp.x3[,data0$binpred2[tt1]]=1
deltap[l]=1}
else
{for (i in 1:nrow(data0$catpred1.0))
if(l%in%(data0$catpred1.0[i,1]:data0$catpred1.0[i,2]))
tt1=i
temp.x1[,data0$catpred2[tt1,1]:data0$catpred2[tt1,2]]=0
temp.x3[,data0$catpred2[tt1,1]:data0$catpred2[tt1,2]]=0
temp.x3[,l]=1
deltap[l]=1}
temp.mu1=med0$BUGSoutput$sims.list$c%*%t(temp.x1)+med0$BUGSoutput$sims.list$beta%*%t(M1)+matrix(med0$BUGSoutput$sims.list$beta0,N1,nrow(x))
temp.mu3=med0$BUGSoutput$sims.list$c%*%t(temp.x3)+med0$BUGSoutput$sims.list$beta%*%t(M1)+matrix(med0$BUGSoutput$sims.list$beta0,N1,nrow(x))
if(!is.null(cova))
{temp.mu1=temp.mu1+med0$BUGSoutput$sims.list$eta%*%t(cova)
temp.mu3=temp.mu3+med0$BUGSoutput$sims.list$eta%*%t(cova)}
de2.1=(exp(temp.mu3)/(1+exp(temp.mu3))-exp(temp.mu1)/(1+exp(temp.mu1)))/deltap[l]
de2=cbind(de2,apply(de2.1,1,mean,na.rm=T))}
else if(y.type==4)
{temp.x1=x
temp.x3=x
if(l%in%data0$contpred1.0)
{tt1=match(l,data0$contpred1.0)
temp.x3[,data0$contpred2[tt1,1]:data0$contpred2[tt1,2]]=data0$pred3[,data0$contpred3[tt1,1]:data0$contpred3[tt1,2]]}
else if(l%in%data0$binpred1.0)
{tt1=match(l,data0$binpred1)
temp.x1[,data0$binpred2[tt1]]=0
temp.x3[,data0$binpred2[tt1]]=1
deltap[l]=1}
else
{for (i in 1:nrow(data0$catpred1.0))
if(l%in%(data0$catpred1.0[i,1]:data0$catpred1.0[i,2]))
tt1=i
temp.x1[,data0$catpred2[tt1,1]:data0$catpred2[tt1,2]]=0
temp.x3[,data0$catpred2[tt1,1]:data0$catpred2[tt1,2]]=0
temp.x3[,l]=1
deltap[l]=1}
temp.mu1=med0$BUGSoutput$sims.list$c%*%t(temp.x1)+med0$BUGSoutput$sims.list$beta%*%t(M1)
temp.mu3=med0$BUGSoutput$sims.list$c%*%t(temp.x3)+med0$BUGSoutput$sims.list$beta%*%t(M1)
if(!is.null(cova))
{temp.mu1=temp.mu1+med0$BUGSoutput$sims.list$eta%*%t(cova)
temp.mu3=temp.mu3+med0$BUGSoutput$sims.list$eta%*%t(cova)}
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(temp.mu3),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
#if(multi)
# tmean3=log(tmean3)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(temp.mu1),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
#if(multi)
# tmean1=log(tmean1)
de2.1=(tmean3-tmean1)/deltap[l]
de2=cbind(de2,apply(de2.1,1,mean,na.rm=T))}
#method 3:parametric
#3.1. get M1(x) and M1(x+dx) for y
if(p1>0){
if(c1==1 & p1+p2+p3==1 & contm1[1,1]==contm1[1,2]){
if(nmc==1)
{mu_M2[,contm1[1,1],] <- med0$BUGSoutput$sims.list$alpha0[,contm1[1,1]]+as.matrix(med0$BUGSoutput$sims.list$alpha1[,contm1[1,1]])%*%t(x1.temp)
mu_M3[,contm1[1,1],] <- med0$BUGSoutput$sims.list$alpha0[,contm1[1,1]]+as.matrix(med0$BUGSoutput$sims.list$alpha1[,contm1[1,1]])%*%t(x3.temp)}
else
{mu_M2[,contm1[1,1],] <- med0$BUGSoutput$sims.list$alpha0[,contm1[1,1],mind[contm[1],]]%*%t(mcov[,mind[contm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1[,contm1[1,1]])%*%t(x1.temp)
mu_M3[,contm1[1,1],] <- med0$BUGSoutput$sims.list$alpha0[,contm1[1,1],mind[contm[1],]]%*%t(mcov[,mind[contm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1[,contm1[1,1]])%*%t(x3.temp)}
}
else
for (j in 1:p1){
for (k in contm1[j,1]:contm1[j,2]){
if(nmc==1 & p1+p2+p3==1)
{mu_M2[,k,] <- med0$BUGSoutput$sims.list$alpha0[,k]+med0$BUGSoutput$sims.list$alpha1[,k,l]%*%t(x1.temp)
mu_M3[,k,] <- med0$BUGSoutput$sims.list$alpha0[,k]+med0$BUGSoutput$sims.list$alpha1[,k,l]%*%t(x3.temp)}
else
{mu_M2[,k,] <- med0$BUGSoutput$sims.list$alpha0[,k,mind[contm[j],]]%*%t(mcov[,mind[contm[j],]])+med0$BUGSoutput$sims.list$alpha1[,k,]%*%t(x1.temp)
mu_M3[,k,] <- med0$BUGSoutput$sims.list$alpha0[,k,mind[contm[j],]]%*%t(mcov[,mind[contm[j],]])+med0$BUGSoutput$sims.list$alpha1[,k,]%*%t(x3.temp)}
}}}
if(p2>0){
if(p2==1 & c1==1)
{if(nmc==1)
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)}
else
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,1,mind[binm[1],]]%*%t(mcov[,mind[binm[1],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,1])%*%t(x3.temp)}
mu_M2[,binm[1],] <- exp(temp.x1)/(1+exp(temp.x1))
mu_M3[,binm[1],] <- exp(temp.x3)/(1+exp(temp.x3))
}
else{
for (k in 1:p2){
if(nmc==1 & p2==1)
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k]+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k]+med0$BUGSoutput$sims.list$alpha1.b[,k,]%*%t(x3.temp)}
else
{temp.x1=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x1.temp)
temp.x3=med0$BUGSoutput$sims.list$alpha0.b[,k,mind[binm[k],]]%*%t(mcov[,mind[binm[k],]])+as.matrix(med0$BUGSoutput$sims.list$alpha1.b[,k,])%*%t(x3.temp)}
mu_M2[,binm1[k],] <- exp(temp.x1)/(1+exp(temp.x1))
mu_M3[,binm1[k],] <- exp(temp.x3)/(1+exp(temp.x3))}
}}
if(p3>0){
for (j in 1:p3){
mu_Mc1<-array(0,c(N1,N,cat2[j]-1))
mu_Mc0<-array(0,c(N1,N,cat2[j]-1))
for (k in 2:cat2[j]){
mu_Mc1[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,]%*%t(x3.temp))
mu_Mc0[,,k-1] <- exp(as.matrix(med0$BUGSoutput$sims.list$alpha0.c[,j,k-1,mind[catm[j],]])%*%t(mcov[,mind[catm[j],]])+med0$BUGSoutput$sims.list$alpha1.c[,j,k-1,]%*%t(x1.temp))
}
sum_Mc1 <-apply(mu_Mc1,c(1,2),sum)+1
sum_Mc0 <-apply(mu_Mc0,c(1,2),sum)+1
for (k in 2:cat2[j])
{mu_M2[,catm1[j,1]+k-2,]=mu_Mc0[,,k-1]/sum_Mc0
mu_M3[,catm1[j,1]+k-2,]=mu_Mc1[,,k-1]/sum_Mc1}
}}
#3.2. get x and dx for y
x1.temp1=x
x3.temp1=x
if(l%in%as.vector(data0$contpred1.0)){ #need the continuous predictor in its original format
i=match(l,data0$contpred1.0)
x3.temp1[,data0$contpred2[i,1]:data0$contpred2[i,2]]=data0$pred3[,data0$contpred3[i,1]:data0$contpred3[i,2]]
}
else if(l%in%data0$binpred1.0)
{i=match(l,data0$binpred1.0)
x1.temp1[,data0$binpred2[i]]=0
x3.temp1[,data0$binpred2[i]]=1
deltap[l]=1}
else{ #categorical predictor
for (i in 1:nrow(data0$catpred1.0))
if(l%in%(data0$catpred1.0[i,1]:data0$catpred1.0[i,2]))
{x1.temp1[,data0$catpred2[i,1]:data0$catpred2[i,2]]=0
x3.temp1[,data0$catpred2[i,1]:data0$catpred2[i,2]]=0
di=match(l,data0$catpred1.0[i,1]:data0$catpred1.0[i,2])
x3.temp1[,data0$catpred2[i,1]+di-1]=1}
deltap[l]=1
}
#3.3. get the total effect
temp1=array(0,c(nrow(M1)+1,ncol(M1),N1))
temp1[2:(nrow(M1)+1),,]=mu_M2
temp1[1,,]=t(med0$BUGSoutput$sims.list$beta)
temp2=temp1
temp2[2:(nrow(M1)+1),,]=mu_M3
if(is.null(cova))
{mu_y0<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + t(apply(temp1,3,matrix.prod))
mu_y1<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + t(apply(temp2,3,matrix.prod))
}
else
{mu_y0<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta%*%t(as.matrix(cova))+t(apply(temp1,3,matrix.prod))
mu_y1<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta%*%t(as.matrix(cova))+t(apply(temp2,3,matrix.prod))
} #get the linear part
if(y.type==2)
te3=(exp(mu_y1)/(1+exp(mu_y1))-exp(mu_y0)/(1+exp(mu_y0)))/deltap[l]
else if(y.type==1)
te3<- (mu_y1-mu_y0)/deltap[l]
else if (y.type==4)
{mu_y0<- mu_y0-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
mu_y1<- mu_y1-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y1),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y0),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
{tmean1=log(tmean1)
tmean3=log(tmean3)}
te3=(tmean3-tmean1)/deltap[l]
}
ate3[,l]=apply(te3,1,mean,na.rm=T)
if(y.type==4){
if(multi)
omu3[,l]=apply(exp(tmean1),1,mean,na.rm=T)
else
omu3[,l]=apply(tmean1,1,mean,na.rm=T)}
#3.4. calculate the ie
j1=sample(1:N,size=N*N1,replace=T)
j2=sample(1:N,size=N*N1,replace=T)
#3.4.1. continuous mediators
if(p1>0){
for (j in 1:p1){
temp1.1=temp1
temp1.2=temp2
for (i in contm1[j,1]:contm1[j,2])
{temp1.1[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)
temp1.2[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)} #j2/j1
if(is.null(cova))
{mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) +t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) +t(apply(temp1.2,3,matrix.prod)) }
else{
mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
if(y.type==1)
ie3[,,contm[j],l]=te3-(mu_y1.2-mu_y0.2)/deltap[l]
else if(y.type==2)
ie3[,,contm[j],l]=te3-(exp(mu_y1.2)/(1+exp(mu_y1.2))-exp(mu_y0.2)/(1+exp(mu_y0.2)))/deltap[l]
else if (y.type==4)
{mu_y0.2<- mu_y0.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
mu_y1.2<- mu_y1.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y1.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean3=log(tmean3)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y0.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean1=log(tmean1)
ie3[,,contm[j],l]<-te3-(tmean3-tmean1)/deltap[l]
}
}}
#3.4.2. binary mediators
if(p2>0){
for (k in 1:p2){
temp1.1=temp1
temp1.2=temp2
temp1.1[2:(nrow(M1)+1),binm1[k],]=matrix(M1[j1,binm1[k]],N,N1)
temp1.2[2:(nrow(M1)+1),binm1[k],]=matrix(M1[j1,binm1[k]],N,N1) #j2/j1
if (is.null(cova)){
mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + t(apply(temp1.2,3,matrix.prod))}
else{
mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
if(y.type==1)
ie3[,,binm[k],l]=te3-(mu_y1.2-mu_y0.2)/deltap[l]
else if(y.type==2)
ie3[,,binm[k],l]=te3-(exp(mu_y1.2)/(1+exp(mu_y1.2))-exp(mu_y0.2)/(1+exp(mu_y0.2)))/deltap[l]
else if (y.type==4)
{mu_y0.2<- mu_y0.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
mu_y1.2<- mu_y1.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y1.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean3=log(tmean3)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y0.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean1=log(tmean1)
ie3[,,binm[k],l]<-te3-(tmean3-tmean1)/deltap[l]
}
}}
if(p3>0){
for (j in 1:p3){
temp1.1=temp1
temp1.2=temp2
for (i in catm1[j,1]:catm1[j,2])
{temp1.1[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)
temp1.2[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)} #j2/j1
if(is.null(cova)){
mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + t(apply(temp1.2,3,matrix.prod))}
else{
mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
if(y.type==1)
ie3[,,catm[j],l]=te3-(mu_y1.2-mu_y0.2)/deltap[l]
else if(y.type==2)
ie3[,,catm[j],l]=te3-(exp(mu_y1.2)/(1+exp(mu_y1.2))-exp(mu_y0.2)/(1+exp(mu_y0.2)))/deltap[l]
else if (y.type==4)
{mu_y0.2<- mu_y0.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
mu_y1.2<- mu_y1.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y1.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean3=log(tmean3)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y0.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean1=log(tmean1)
ie3[,,catm[j],l]<-te3-(tmean3-tmean1)/deltap[l]
}
}}
aie3[,,l]<-apply(array(ie3[,,,l],c(N1,N,p1+p2+p3)),c(1,3),mean,na.rm=T)
#3.5. Calculate the de
temp1.1=temp1
temp1.2=temp2
for (i in 1:ncol(M1))
{temp1.1[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)
temp1.2[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)} #j2/j1
if(is.null(cova)){
mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + t(apply(temp1.2,3,matrix.prod))}
else{
mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
if(y.type==1)
de3=(mu_y1.2-mu_y0.2)/deltap[l]
else if(y.type==2)
de3=(exp(mu_y1.2)/(1+exp(mu_y1.2))-exp(mu_y0.2)/(1+exp(mu_y0.2)))/deltap[l]
else if (y.type==4)
{mu_y0.2<- mu_y0.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
mu_y1.2<- mu_y1.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y1.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean3=log(tmean3)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y0.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean1=log(tmean1)
de3=(tmean3-tmean1)/deltap[l]
}
ade3[,l]=apply(de3,1,mean,na.rm=T)
#method3: semi-parametric for binary or categorical predictors
if(!l%in%data0$contpred1.0){
if(!is.null(data0$binpred1.0))
{if (l%*%data0$binpred1.0)
{M.0=data.frame(M1[x1[,l]==0,])
y.0=y[x1[,l]==0]}
else
{for(i in 1:nrow(data0$catpred1.0))
if(l%in%(data0$catpred1.0[i,1]:data0$catpred1.0[i,2]))
tt1=i
M.0=data.frame(M1[apply(x1[,data0$catpred1.0[tt1,1]:data0$catpred1.0[[tt1,2]]]==1,1,sum)==0,])
y.0=y[apply(x1[,data0$catpred1.0[tt1,1]:data0$catpred1.0[[tt1,2]]]==1,1,sum)==0]}}
else
{for(i in 1:nrow(data0$catpred1.0))
if(l%in%(data0$catpred1.0[i,1]:data0$catpred1.0[i,2]))
tt1=i
M.0=data.frame(M1[apply(x1[,data0$catpred1.0[tt1,1]:data0$catpred1.0[[tt1,2]]]==1,1,sum)==0,])
y.0=y[apply(x1[,data0$catpred1.0[tt1,1]:data0$catpred1.0[[tt1,2]]]==1,1,sum)==0]}
y.1=y[x1[,l]==1]
M.1=data.frame(M1[x1[,l]==1,])
j1=sample(1:N,size=N*N1,replace=T)
j2=sample(1:N,size=N*N1,replace=T)
n3=nrow(M.0)
n4=nrow(M.1)
j3=sample(1:n3,size=N*N1,replace = T)
j4=sample(1:n4,size=N*N1,replace = T)
for (i in 1:ncol(M1))
{mu.M0[,i,]=matrix(M.0[j3,i],N,N1)
mu.M1[,i,]=matrix(M.1[j4,i],N,N1)}
#4.1. get the total effect
mu_y0=matrix(y.0[j3],N1,N)
mu_y1=matrix(y.1[j4],N1,N)
temp1=array(0,c(nrow(M1)+1,ncol(M1),N1))
temp1[2:(nrow(M1)+1),,]=mu.M0
temp1[1,,]=t(med0$BUGSoutput$sims.list$beta)
temp2=temp1
temp2[2:(nrow(M1)+1),,]=mu.M1
if(is.null(cova))
{mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) +t(apply(temp1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) +t(apply(temp2,3,matrix.prod)) }
else{
mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp2,3,matrix.prod))}
if(y.type==1)
te4<- (mu_y1.2-mu_y0.2)/deltap[l]
else if(y.type==2)
te4=(exp(mu_y1.2)/(1+exp(mu_y1.2))-exp(mu_y0.2)/(1+exp(mu_y0.2)))/deltap[l]
else if (y.type==4)
{mu_y0.2<- mu_y0.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
mu_y1.2<- mu_y1.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y1.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y0.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
{tmean1=log(tmean1)
tmean3=log(tmean3)}
te4=(tmean3-tmean1)/deltap[l]
}
ate4[,l]=apply(te4,1,mean,na.rm=T)
if(y.type==4){
if(multi)
omu4[,l]=apply(exp(tmean1),1,mean,na.rm=T)
else
omu4[,l]=apply(tmean1,1,mean,na.rm=T)}
#4.2. Get the ies
#ie for continuous mediators
if(p1>0){
for (j in 1:p1){
temp1.1=temp1
temp1.2=temp2
for (i in contm1[j,1]:contm1[j,2])
{temp1.1[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)
temp1.2[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)} #j2/j1
if(is.null(cova))
{mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) +t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) +t(apply(temp1.2,3,matrix.prod)) }
else{
mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
if(y.type==1)
ie4[,,contm[j],l]=te4-(mu_y1.2-mu_y0.2)/deltap[l]
else if(y.type==2)
ie4[,,contm[j],l]=te4-(exp(mu_y1.2)/(1+exp(mu_y1.2))-exp(mu_y0.2)/(1+exp(mu_y0.2)))/deltap[l]
else if (y.type==4)
{mu_y0.2<- mu_y0.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
mu_y1.2<- mu_y1.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y1.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean3=log(tmean3)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y0.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean1=log(tmean1)
ie4[,,contm[j],l]=te4-(tmean3-tmean1)/deltap[l]
}
}}
#ie for binary mediators
if(p2>0){
for (k in 1:p2){
temp1.1=temp1
temp1.2=temp2
temp1.1[2:(nrow(M1)+1),binm1[k],]=matrix(M1[j1,binm1[k]],N,N1)
temp1.2[2:(nrow(M1)+1),binm1[k],]=matrix(M1[j1,binm1[k]],N,N1) #j2/j1
if (is.null(cova)){
mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + t(apply(temp1.2,3,matrix.prod))}
else{
mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
if(y.type==1)
ie4[,,binm[k],l]=te4-(mu_y1.2-mu_y0.2)/deltap[l]
else if(y.type==2)
ie4[,,binm[k],l]=te4-(exp(mu_y1.2)/(1+exp(mu_y1.2))-exp(mu_y0.2)/(1+exp(mu_y0.2)))/deltap[l]
else if (y.type==4)
{mu_y0.2<- mu_y0.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
mu_y1.2<- mu_y1.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y1.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean3=log(tmean3)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y0.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean1=log(tmean1)
ie4[,,binm[k],l]=te4-(tmean3-tmean1)/deltap[l]
}
}}
#ie for categorical mediators
if(p3>0){
for (j in 1:p3){
temp1.1=temp1
temp1.2=temp2
for (i in catm1[j,1]:catm1[j,2])
{temp1.1[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)
temp1.2[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)} #j2/j1
if(is.null(cova)){
mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + t(apply(temp1.2,3,matrix.prod))}
else{
mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
if(y.type==1)
ie4[,,catm[j],l]=te4-(mu_y1.2-mu_y0.2)/deltap[l]
else if(y.type==2)
ie4[,,catm[j],l]=te4-(exp(mu_y1.2)/(1+exp(mu_y1.2))-exp(mu_y0.2)/(1+exp(mu_y0.2)))/deltap[l]
else if (y.type==4)
{mu_y0.2<- mu_y0.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
mu_y1.2<- mu_y1.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y1.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean3=log(tmean3)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y0.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean1=log(tmean1)
ie4[,,catm[j],l]=te4-(tmean3-tmean1)/deltap[l]
}
}}
aie4[,,l]<-apply(array(ie4[,,,l],c(N1,N,p1+p2+p3)),c(1,3),mean,na.rm=T)
#4.3. Calculate the de
temp1.1=temp1
temp1.2=temp2
for (i in 1:ncol(M1))
{temp1.1[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)
temp1.2[2:(nrow(M1)+1),i,]=matrix(M1[j1,i],N,N1)} #j2/j1
if(is.null(cova)){
mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + t(apply(temp1.2,3,matrix.prod))}
else{
mu_y0.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x1.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.1,3,matrix.prod))
mu_y1.2<- matrix(med0$BUGSoutput$sims.list$beta0,N1,N) + med0$BUGSoutput$sims.list$c%*%t(x3.temp1) + med0$BUGSoutput$sims.list$eta%*%t(cova)+t(apply(temp1.2,3,matrix.prod))}
if(y.type==1)
de4=(mu_y1.2-mu_y0.2)/deltap[l]
else if(y.type==2)
de4=(exp(mu_y1.2)/(1+exp(mu_y1.2))-exp(mu_y0.2)/(1+exp(mu_y0.2)))/deltap[l]
else if (y.type==4)
{mu_y0.2<- mu_y0.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
mu_y1.2<- mu_y1.2-matrix(med0$BUGSoutput$sims.list$beta0,N1,N)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y1.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean3=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean3=log(tmean3)
vec=cbind(med0$BUGSoutput$sims.list$r,
as.vector(med0$BUGSoutput$sims.list$lambda^(-1/med0$BUGSoutput$sims.list$r))*(1/apply(exp(mu_y0.2),2,expp, 1/med0$BUGSoutput$sims.list$r)))
tmean1=t(apply(vec,1,weib.trunc.mean,right=tmax))
if(multi)
tmean1=log(tmean1)
de4=(tmean3-tmean1)/deltap[l]
}
ade4[,l]=apply(de4,1,mean,na.rm=T)
}
}
if(y.type==1)
de2=de1
ate1=apply(aie1,c(1,3),sum)+de1
ate2=apply(aie2,c(1,3),sum)+de2
colnames(aie4)=colnames(M2)
colnames(aie1)=colnames(M2)
colnames(aie2)=colnames(M2)
colnames(aie3)=colnames(M2)
}
#detach(med0$BUGSoutput$sims.list)
result=list(aie1=aie1, ade1=de1, ate1=ate1, aie2=aie2, ade2=de2, ate2=ate2,
aie3=aie3, ade3=de3, ate3=ate3, aie4=aie4, ade4=ade4, ate4=ate4,
sims.list=med0,data0=data0,omu3=omu3,omu4=omu4) #ie2=ie2, med0$BUGSoutput$sims.list
class(result)='bma.bx.cy'
return(result)
}
summary.bma.bx.cy<-function(object, ..., plot= TRUE, RE=TRUE, quant=c(0.025, 0.25, 0.5, 0.75,0.975),digit=4,method=3)
{
summary.med<-function(vec,qua=quant, digit=digit)
{c(mean=mean(vec,na.rm=T),sd=sd(vec,na.rm=T),quantile(vec,qua,na.rm=T))
}
summary.med.re<-function(vec,vec1,qua=quant, digit=digit)
{vec=vec/vec1
c(mean=mean(vec,na.rm=T),sd=sd(vec,na.rm=T),quantile(vec,qua,na.rm=T))}
if(object$data0$y_type!=3){
result1<-array(0,c(7,2+ncol(object$aie1),ncol(object$ade1)))
result1.re<-array(0,c(7,1+ncol(object$aie1),ncol(object$ade1)))
result2<-result1
result2.re<-result1.re
result3<-result1
result3.re<-result1.re
result4<-result1
result4.re<-result1.re
for(j in 1:ncol(object$ade1)){
result1[,,j]<-apply(cbind(TE=object$ate1[,j],DE=object$ade1[,j],object$aie1[,,j]),2,summary.med)
result1.re[,,j]<-apply(cbind(DE=object$ade1[,j],object$aie1[,,j]),2,summary.med.re,object$ate1[,j])
result2[,,j]<-apply(cbind(TE=object$ate2[,j],DE=object$ade2[,j],object$aie2[,,j]),2,summary.med)
result2.re[,,j]<-apply(cbind(DE=object$ade2[,j],object$aie2[,,j]),2,summary.med.re,object$ate2[,j])
result3[,,j]<-apply(cbind(TE=object$ate3[,j],DE=object$ade3[,j],object$aie3[,,j]),2,summary.med)
result3.re[,,j]<-apply(cbind(DE=object$ade3[,j],object$aie3[,,j]),2,summary.med.re,object$ate3[,j])
result4[,,j]<-apply(cbind(TE=object$ate4[,j],DE=object$ade4[,j],object$aie4[,,j]),2,summary.med)
result4.re[,,j]<-apply(cbind(DE=object$ade4[,j],object$aie4[,,j]),2,summary.med.re,object$ate4[,j])}}
else{
result1<-array(0,c(7,2+ncol(object$aie1),ncol(object$ade1),dim(object$ade1)[3]))
result1.re<-array(0,c(7,1+ncol(object$aie1),ncol(object$ade1),dim(object$ade1)[3]))
result2<-result1
result2.re<-result1.re
result3<-result1
result3.re<-result1.re
result4<-result1
result4.re<-result1.re
for(j in 1:ncol(object$ade1)){
for(q1 in 1:dim(object$ade1)[3]){
result1[,,j,q1]<-apply(cbind(TE=object$ate1[,j,q1],DE=object$ade1[,j,q1],object$aie1[,,j,q1]),2,summary.med)
result1.re[,,j,q1]<-apply(cbind(DE=object$ade1[,j,q1],object$aie1[,,j,q1]),2,summary.med.re,object$ate1[,j,q1])
result2[,,j,q1]<-apply(cbind(TE=object$ate2[,j,q1],DE=object$ade2[,j,q1],object$aie2[,,j,q1]),2,summary.med)
result2.re[,,j,q1]<-apply(cbind(DE=object$ade2[,j,q1],object$aie2[,,j,q1]),2,summary.med.re,object$ate2[,j,q1])
result3[,,j,q1]<-apply(cbind(TE=object$ate3[,j,q1],DE=object$ade3[,j,q1],object$aie3[,,j,q1]),2,summary.med)
result3.re[,,j,q1]<-apply(cbind(DE=object$ade3[,j,q1],object$aie3[,,j,q1]),2,summary.med.re,object$ate3[,j,q1])
result4[,,j,q1]<-apply(cbind(TE=object$ate4[,j,q1],DE=object$ade4[,j,q1],object$aie4[,,j,q1]),2,summary.med)
result4.re[,,j,q1]<-apply(cbind(DE=object$ade4[,j,q1],object$aie4[,,j,q1]),2,summary.med.re,object$ate4[,j,q1])}
}
}
c.names=colnames(object$aie1)
colnames(result1)=c("TE","DE",c.names)
colnames(result2)=c("TE","DE",c.names)
colnames(result3)=c("TE","DE",c.names)
colnames(result4)=c("TE","DE",c.names)
colnames(result1.re)=c("DE",c.names)
colnames(result2.re)=c("DE",c.names)
colnames(result3.re)=c("DE",c.names)
colnames(result4.re)=c("DE",c.names)
rownames(result1)=c("mean","sd",paste("q",quant,sep="_"))
rownames(result2)=c("mean","sd",paste("q",quant,sep="_"))
rownames(result3)=c("mean","sd",paste("q",quant,sep="_"))
rownames(result4)=c("mean","sd",paste("q",quant,sep="_"))
rownames(result1.re)=c("mean","sd",paste("q",quant,sep="_"))
rownames(result2.re)=c("mean","sd",paste("q",quant,sep="_"))
rownames(result3.re)=c("mean","sd",paste("q",quant,sep="_"))
rownames(result4.re)=c("mean","sd",paste("q",quant,sep="_"))
result=list(result1=result1,result1.re=result1.re,result2=result2,result2.re=result2.re,
result3=result3,result3.re=result3.re,result4=result4,result4.re=result4.re,method=method,
digit=digit,plot=plot,RE=RE,y.type=object$data0$y_type)
class(result)="summary.bma"
result
}
print.summary.bma<-function (x, ..., digit = x$digit, method=x$method, RE=x$RE)
{plot.sum<-function(obj1,main1="Estimated Effects")
{oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
re <- obj1[1,]
upper <- obj1[7,]
lower <- obj1[3,]
name1 <- colnames(obj1)
par(mfrow = c(1, 1), mar = c(1, 6, 1, 1), oma = c(3, 2, 2, 4))
bp <- barplot2(re, horiz = TRUE, main = main1,
names.arg = name1, plot.ci = TRUE, ci.u = upper,
ci.l = lower, cex.names = 0.9, beside = FALSE,
cex.axis = 0.9, las = 1, xlim = range(c(upper,lower), na.rm = TRUE,finite=T),
col = rainbow(length(re), start = 3/6, end = 4/6))}
if(x$y.type!=3){
for (j in 1:dim(x$result1)[3])
{cat('\n For Predictor', j, ':\n')
if (method==1)
{if(!RE)
{cat('Estimated Effects for Method 1:\n')
print(round(x$result1[,,j],digits = digit))
if(x$plot)
plot.sum(x$result1[,,j],main1=paste("Estimated Effects Using Method", method, "(predictor",j,")"))}
else{
cat('Estimated Relative Effects for Method 1:\n')
print(round(x$result1.re[,,j],digits = digit))
if(x$plot)
plot.sum(x$result1.re[,,j],main1=paste("Estimated Relative Effects Using Method", method, "(predictor",j,")"))}}
else if(method==2){
if(!RE)
{cat('Estimated Effects for Method 2:\n')
print(round(x$result2[,,j],digits = digit))
if(x$plot)
plot.sum(x$result2[,,j],main1=paste("Estimated Effects Using Method", method, "(predictor",j,")"))}
else{
cat('Estimated Relative Effects for Method 2:\n')
print(round(x$result2.re[,,j],digits = digit))
if(x$plot)
plot.sum(x$result2.re[,,j],main1=paste("Estimated Relative Effects Using Method", method, "(predictor",j,")"))}
}
else if(method==3){
if(!RE)
{cat('Estimated Effects for Method 3:\n')
print(round(x$result3[,,j],digits = digit))
if(x$plot)
plot.sum(x$result3[,,j],main1=paste("Estimated Effects Using Method", method, "(predictor",j,")"))}
else{
cat('Estimated Relative Effects for Method 3:\n')
print(round(x$result3.re[,,j],digits = digit))
if(x$plot)
plot.sum(x$result3.re[,,j],main1=paste("Estimated Relative Effects Using Method", method, "(predictor",j,")"))}
}
else if(method==4){
if(!RE)
{cat('Estimated Effects for Method 4:\n')
print(round(x$result4[,,j],digits = digit))
if(x$plot)
plot.sum(x$result4[,,j],main1=paste("Estimated Effects Using Method", method, "(predictor",j,")"))}
else{
cat('Estimated Relative Effects for Method 4:\n')
print(round(x$result4.re[,,j],digits = digit))
if(x$plot)
plot.sum(x$result4.re[,,j],main1=paste("Estimated Relative Effects Using Method", method, "(predictor",j,")"))}
}
}}
else{
for (j in 1:dim(x$result1)[3])
for (q1 in 1:dim(x$result1)[4])
{cat('\n For Predictor', j, 'outcome',q1, ':\n')
if (method==1)
{if(!RE)
{cat('Estimated Effects for Method 1:\n')
print(round(x$result1[,,j,q1],digits = digit))
if(x$plot)
plot.sum(x$result1[,,j,q1],main1=paste("Estimated Effects Using Method", method, "(predictor",j,"outcome",q1,")"))}
else{
cat('Estimated Relative Effects for Method 1:\n')
print(round(x$result1.re[,,j,q1],digits = digit))
if(x$plot)
plot.sum(x$result1.re[,,j,q1],main1=paste("Estimated Relative Effects Using Method", method, "(predictor",j,"outcome",q1,")"))}}
else if(method==2){
if(!RE)
{cat('Estimated Effects for Method 2:\n')
print(round(x$result2[,,j,q1],digits = digit))
if(x$plot)
plot.sum(x$result2[,,j,q1],main1=paste("Estimated Effects Using Method", method, "(predictor",j,"outcome",q1,")"))}
else{
cat('Estimated Relative Effects for Method 2:\n')
print(round(x$result2.re[,,j,q1],digits = digit))
if(x$plot)
plot.sum(x$result2.re[,,j,q1],main1=paste("Estimated Relative Effects Using Method", method, "(predictor",j,"outcome",q1,")"))}
}
else if(method==3){
if(!RE)
{cat('Estimated Effects for Method 3:\n')
print(round(x$result3[,,j,q1],digits = digit))
if(x$plot)
plot.sum(x$result3[,,j,q1],main1=paste("Estimated Effects Using Method", method, "(predictor",j,"outcome",q1,")"))}
else{
cat('Estimated Relative Effects for Method 3:\n')
print(round(x$result3.re[,,j,q1],digits = digit))
if(x$plot)
plot.sum(x$result3.re[,,j,q1],main1=paste("Estimated Relative Effects Using Method", method, "(predictor",j,"outcome",q1,")"))}
}
else if(method==4){
if(!RE)
{cat('Estimated Effects for Method 4:\n')
print(round(x$result4[,,j,q1],digits = digit))
if(x$plot)
plot.sum(x$result4[,,j,q1],main1=paste("Estimated Effects Using Method", method, "(predictor",j,"outcome",q1,")"))}
else{
cat('Estimated Relative Effects for Method 4:\n')
print(round(x$result4.re[,,j,q1],digits = digit))
if(x$plot)
plot.sum(x$result4.re[,,j,q1],main1=paste("Estimated Relative Effects Using Method", method, "(predictor",j,"outcome",q1,")"))}
}
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianMediationA/R/bma.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
error=TRUE,
warning=FALSE
)
## ---- include=FALSE-----------------------------------------------------------
library(BayesianMediationA)
#source('O:/My Documents/My Research/Research/Multilevel mediation analysis/mlma package/current version/R/mlma.r')
## -----------------------------------------------------------------------------
#use a simulation
set.seed(1)
N=100
alpha=0.5
x=rnorm(N,0,1)
x=ifelse(x>0,1,0) #the binary exposure. If want to use a continuous exposure, remove this line
e1=rnorm(N,0,1)
M=alpha*x+e1 #the mediator
lambda=0.01
rho=1
beta=1.2
c=-1
rateC=0.001
v=runif(n=N)
Tlat =(- log(v) / (lambda * exp(c*x+M*beta)))^(1 / rho) #the event time
C=rexp(n=N, rate=rateC) #the censoring time
time=pmin(Tlat, C)
status <- as.numeric(Tlat <= C)
example2 <-cbind(x, M, time, status) #the dataset
## -----------------------------------------------------------------------------
data("weight_behavior")
#n.iter and n.burnin are set to be very small, should be adjusted
#binary predictor
test.b.c<- bma.bx.cy(pred=weight_behavior[,3], m=weight_behavior[,c(14,12,13)],
y=weight_behavior[,1],n.iter=500,n.burnin = 100)
summary(test.b.c)
## -----------------------------------------------------------------------------
test.b.c<- bma.bx.cy(pred=weight_behavior[,3], m=weight_behavior[,c(5:11,12:14)],
y=weight_behavior[,1],cova=weight_behavior[,2],mcov=weight_behavior[,c(2,5)],
mclist = list(1,2),n.iter=500,n.burnin = 100)
summary(test.ca.c)
## -----------------------------------------------------------------------------
test.c.c.2<- bma.bx.cy(pred=weight_behavior[,2], m=weight_behavior[,12:14],
y=weight_behavior[,1],fpy=list(1,c("x","x^2")),n.iter=5,n.burnin = 1)
summary(test.c.c.2,method=1)
## -----------------------------------------------------------------------------
test.m.c<- bma.bx.cy(pred=weight_behavior[,2:4], m=weight_behavior[,12:14],
y=weight_behavior[,1],n.iter=10,n.burnin = 1)
summary(test.m.c,method=3)
## -----------------------------------------------------------------------------
test.m.b<- bma.bx.cy(pred=weight_behavior[,3], m=weight_behavior[,12:14],
y=weight_behavior[,15],cova=weight_behavior[,5],n.iter=500,n.burnin = 100)
summary(test.m.b,method=2)
## -----------------------------------------------------------------------------
test.m.t.1<- bma.bx.cy(pred=example2[,"x"], m=example2[,"M"], y=Surv(example2[,"time"],example2[,"status"]), inits=function(){ list(r=1,lambda=0.01)},n.iter=10,n.burnin = 1)
temp1=summary(test.m.t.1)
print(temp1,method=1,RE=FALSE)
## -----------------------------------------------------------------------------
test.m.c<- bma.bx.cy(pred=weight_behavior[,2:4], m=weight_behavior[,12:13],
y=as.factor(weight_behavior[,14]),cova=weight_behavior[,5],n.iter=5,n.burnin = 1)
summary(test.m.c,method=3)
|
/scratch/gouwar.j/cran-all/cranData/BayesianMediationA/inst/doc/BayesianMediationAvignette.R
|
---
title: "Examples for Bayesian Mediation Analysis"
author:
- Qingzhao Yu and Bin Li
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
keep_md: true
fig_caption: yes
bibliography: vignette.bib
link-citations: true
vignette: >
%\VignetteIndexEntry{Examples for Bayesian Mediation Analysis}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
error=TRUE,
warning=FALSE
)
```
## Package installation
The R package BayesianMediationA is created for linear or nonlinear mediation analysis with binary, continuous, or time-to-event outcomes under the Bayesian setting [@Yu2022_2, @Yu2022]. The vignette is composed of three parts. Part I focuses on the data sets used for examples, and part II on how to transform variables and prepare data for the mediation analysis. Part III walks through the function on Bayesian mediation analysis, and explains how to make inferences on mediation effects of interests.
To use the R package BayesianMediationA, we first install the package in R (`install.packages("BayesianMediationA")`) and load it.
```{r, include=FALSE}
library(BayesianMediationA)
#source('O:/My Documents/My Research/Research/Multilevel mediation analysis/mlma package/current version/R/mlma.r')
```
## The Data Set
We use the data set ``weight_behavior' which is included in the package as examples for mediation analysis with binary or continuous outcomes [@Yu2017a]. In addition, a dataset is generated for the time-to-event outcome as the following:
```{r}
#use a simulation
set.seed(1)
N=100
alpha=0.5
x=rnorm(N,0,1)
x=ifelse(x>0,1,0) #the binary exposure. If want to use a continuous exposure, remove this line
e1=rnorm(N,0,1)
M=alpha*x+e1 #the mediator
lambda=0.01
rho=1
beta=1.2
c=-1
rateC=0.001
v=runif(n=N)
Tlat =(- log(v) / (lambda * exp(c*x+M*beta)))^(1 / rho) #the event time
C=rexp(n=N, rate=rateC) #the censoring time
time=pmin(Tlat, C)
status <- as.numeric(Tlat <= C)
example2 <-cbind(x, M, time, status) #the dataset
```
## Data Transformation and Organization
The $data_org$ function is used to do the transformation before the mediation analysis. In the function, the exposure variable(s) ($pred$) and the mediator(s) ($m$) are required to input. The response variable ($y$) is also required. If $y$ is binary or categorical, its reference level is input in the argument $levely$. Similarly, the reference levels for the exposure variables and mediators are input in the argument $predref$ and $mref$ respectively.
Other input data include $cova$, the covaritates that are used to explain $y$, and $mcov$, the covariates for mediators. Covariates for $y$ are defined as predictors for $y$, but not explained by the exposure variables $pred$. Covariates for mediators are explanatory variables for mediators other than the exposure variable(s). Accompanying $mcov$, we have $mclist$ to specify different covariates for different mediators. If $mclist$ is NULL (by default), all covariates in $mcov$ are used for all mediators in $m$. Otherwise, the first item of $mclist$ lists all column numbers/names of mediators in $m$ that are to be explained by covariates in $mcov$, the following items give the covariates in $mcov$ for the mediators in the order of the first item. $NA$ is used when no covariance is to be used for the corresponding mediator. For example, `mclist=list(c(2,4),NA,2:3)' means that all mediators use all covariates in $mcov$, except for the second mediator which use none of the covariates, and the fourth mediator, which uses only columns 2 to 3 of $mcov$ as its covariates.
Variables can be transformed to denote potential nonlinear relationships. The transformation functions are expressed in arguments $fpy$, $fmy$, and $fpm$ separately. In the name of the arguments, the `p' stands for the predictors, `m' mediators, and `y' the outcome. Namely, $fpy$ define the transformation functions of the predictors in explaining the outcome $y$. The first item lists column numbers/variable names of the exposure variable in $pred$, which needs to be transformed to explain $y$. By the order of the first item, each of the rest items of $fpy$ lists the transformation functional expressions for the predictor. The exposures/predictors not specified in the list will not be transformed in any way in explaining y. For example, list(1,c("x^2","log(x)")) means that the first column of the pred will be transformed to square and log forms to explain $y$. The $fmy$ is defined the same way for the transformed mediators to explain the outcome variable $y$.
$fpm$ denotes the transformation-function-expression list on exposure variable(s) ($pred$) in explaining mediators ($m$). The definition of $fpm$ is similar to those of $fpy$ and $fmy$ except that the first item is a matrix with two columns: the first column is the column numbers of the mediators in $m$, which should be explained by the transformed predictor(s) indicated by the second column. The second column indicates the column number of the exposure in $pred$ that will be transformed to explain the mediator identified by the 1st column of the same row. By the order of the rows of the first item, each of the rest items of fpm lists the transformation functional expressions for the exposure (identified by column 2) in explaining each mediator (identified by column 1). The mediators not specified in the list will be explained by the original format of the exposures in $pred$. For example, `fpm=list(matrix(c(1,2,1,1),2,2), "x^2",c("x","x^2"))' means that $pred[,1]^2$ is used to explain $m[,1]$, and both $pred[,1]$ and $pred[,1]^2$ are used to explain $m[,2]$.
Finally, deltap and deltam define the change in predictors or mediators respectively in calculating the mediation effect.[@Yu2022, @Yu2022_2]
Users do not run the $data\_org$ function by itself. All arguments are included in the main Bayesian mediation analysis function $bma.bx.cy$, which runs the $data\_org$ function in it to organize data for mediation analysis.
## The function $bma.bx.cy$ for Bayesian mediation analysis
The function $bma.bx.cy$ are used to perform the Bayesian mediation analysis. In the function, the $data\_org$ function is called first, which involves all arguments described above. In addition, prior distributions in the generalized linear models can be set up. By default, all coefficients in the Bayesian generalized linear models are independently normal distributed with mean $0$ and the precision term specified by $speci$, default at $10^{-6}$.
The prior means and the variance-covariance matrix can be altered. $mu$ defines the prior mean for coefficients of mediators, $muc$ the prior mean vector (of length $p2$) for coefficients of exposure(s), and $mucv$ the prior mean vector for coefficients of covariate(s)in the final model for $y$. Related, $Omega$, $Omegac$, and $Omegacv$ defines the prior variance-covariance matrix for the coefficients of mediators, exposure(s), and covariates respectively.
The prior distributions for coefficients of intercept/covariates, and exposures in explaining mediators are also assumed to be normal. The default prior mean and variance-covariance matrix are as above, and can be changed in $mu0.1$ and $mu1.1$ for means, and $Omega0.1$ and $Omega1.1$ for variance-covariance matrices respectively for the intercept/covariates and exposures. Separately, the mean and variance-covariance matrix of the prior distributions for coefficients for estimating the mediators can be specified by $mu0$, $mu1$, $Omega0$, and $Omega1$ followed by $.a$, $.b$, and $.c$ for continuous, binary, or categorical mediators respectively.
The function calls for `jags' to perform the Bayesian analysis. Default models are fitted for mediators and outcomes. Namely, if the response variable is continuous, linear regression model is fitted, binary response is fitted with logistic regression, categorical response with multivariate logistic regression, and time-to-event response with cox hazard model.
###Binary predictor and continuous outcome
In the following example, the exposure variable is sex and the outcome is the bmi. The variables exercise (in hours), sports (in a sport team or not), and sweat (have sweating activity or not) are used to explain the sexual difference in bmi. The summary function returns inferences of the estimated effects with a graph of estimated effects with $95\%$ credible sets.
```{r}
data("weight_behavior")
#n.iter and n.burnin are set to be very small, should be adjusted
#binary predictor
test.b.c<- bma.bx.cy(pred=weight_behavior[,3], m=weight_behavior[,c(14,12,13)],
y=weight_behavior[,1],n.iter=500,n.burnin = 100)
summary(test.b.c)
```
The $summary$ function returns inference results for all four methods. By default, the relative effects are shown using method 3. To show the results of other methods, we can change by setting the argument $method$. Method 4 is calculated for binary/categorical exposures only. If the user would like to see the effect estimations rather than the relative effects, one should set $RE=F$.
### Categorical predictor
The following example is given for a categorical exposure: race. In the data set, race takes six categories: empty (not reported), other, mixed, Caucasian, Indian, and African. ``CAUCASIAN'' is used as the reference group, each other race group is compared with Caucasian in bmi and the relative effects from mediators are reported by the $summary$ function.
```{r}
test.b.c<- bma.bx.cy(pred=weight_behavior[,3], m=weight_behavior[,c(5:11,12:14)],
y=weight_behavior[,1],cova=weight_behavior[,2],mcov=weight_behavior[,c(2,5)],
mclist = list(1,2),n.iter=500,n.burnin = 100)
summary(test.ca.c)
```
The jags model fitted for the above model is as following:
```
#the jags models for the outcomes and mediators
model {temp <- 1:nmc
for(i in 1:N){
mu_y[i] <- beta0 + inprod(c, x[i,]) + inprod(beta,M1[i,]) + inprod(eta,cova[i,])
y[i] ~ dnorm(mu_y[i],prec4) #the final model since y is continuous. The model is changed for
#different format of the outcome, as is shown in the following
#sections
for (j in 1:p1){ #the model for p1 contiuous mediators
mu_M1[i,contm[j]] <- inprod(alpha0.a[j,mind[contm[j],]],mcov[i,temp[mind[contm[j],]]])+inprod(alpha1.a[j,1:c1],x1[i,])
M2[i,contm[j]] ~ dnorm(mu_M1[i,contm[j]],prec1[j])
for (k in contm1[j,1]:contm1[j,2]){
mu_M1_c[i,k] <- inprod(alpha0[k,mind[contm[j],]],mcov[i,mind[contm[j],]])+inprod(alpha1[k,1:c1],x1[i,])
M1[i,k] ~ dnorm(mu_M1_c[i,k],prec2[k])
}
}
for (k in 1:p2){ #the model for p2 binary mediators
logit(mu_M1[i,binm[k]]) <- inprod(alpha0.b[k,mind[binm[k],]],mcov[i,mind[binm[k],]])+inprod(alpha1.b[k,1:c1],x1[i,])
M2[i,binm[k]] ~ dbern(mu_M1[i,binm[k]])
}
for (j in 1:p3){ #the model for p3 categorical mediators
mu_Mc[i,j,1] <- 1 #baseline is the 1st category
for (k in 2:cat2[j]){
mu_Mc[i,j,k] <- exp(inprod(alpha0.c[j,k-1,mind[catm[j],]],mcov[i,mind[catm[j],]])+inprod(alpha1.c[j,k-1,1:c1],x1[i,]))
}
sum_Mc[i,j] <- sum(mu_Mc[i,j,1:cat2[j]])
for (l in 1:cat2[j])
{mu_Mc0[i,j,l] <- mu_Mc[i,j,l]/sum_Mc[i,j]}
M2[i,catm[j]] ~ dcat(mu_Mc0[i,j,1:cat2[j]])
}
}
beta[1:P] ~ dmnorm(mu[1:P], Omega[1:P, 1:P]) #prior distributions for coefficients of model for y
beta0 ~ dnorm(0, 1.0E-6)
c[1:c2] ~ dmnorm(muc[1:c2], Omegac[1:c2,1:c2])
eta[1:cv1] ~ dmnorm(mucv[1:cv1], Omegacv[1:cv1,1:cv1])
for(j in 1:P) #prior distributions for coefficients of model for not transformed mediators
{alpha1[j,1:c1] ~ dmnorm(mu1.1[j,1:c1], Omega1.1[1:c1, 1:c1])
alpha0[j,1:nmc] ~ dmnorm(mu0.1[j,1:nmc], Omega0.1[1:nmc, 1:nmc])}
for (i in 1:P){
var2[i] ~ dgamma(1,0.1)
prec2[i] <- 1/var2[i]
}
for(j in 1:p1) #prior distributions for coefficients of model for p1 continuous mediators
{alpha1.a[j,1:c1] ~ dmnorm(mu1.a[j,1:c1], Omega1.a[1:c1, 1:c1])
alpha0.a[j,1:nmc] ~ dmnorm(mu0.a[j,1:nmc], Omega0.a[1:nmc, 1:nmc])}
for (i in 1:p1){
var1[i] ~ dgamma(1,0.1)
prec1[i] <- 1/var1[i]
}
for(j in 1:p2) #prior distributions for coefficients of model for p2 binary mediators
{alpha1.b[j,1:c1] ~ dmnorm(mu1.b[j,1:c1], Omega1.b[1:c1, 1:c1])
alpha0.b[j,1:nmc] ~ dmnorm(mu0.b[j,1:nmc], Omega0.b[1:nmc, 1:nmc])
}
for (i in 1:p3){ #prior distributions for coefficients of model for p3 categorical mediators
for(j in 1:cat1)
{alpha1.c[i,j,1:c1] ~ dmnorm(mu1.c[j,1:c1], Omega1.c[1:c1, 1:c1])
alpha0.c[i,j,1:nmc] ~ dmnorm(mu0.c[j,1:nmc], Omega0.c[1:nmc, 1:nmc])}
}
var4 ~ dgamma(1,0.1) #the prior for the variance of y when it is continuous
prec4 <-1/var4
}
```
The rjags model can be revised when different priors or models are to be used. To do that, write the model file and input it to the argument $filename$.
### use transfered continuous predictors for y
We can use transformed predictors for the outcome. The following is an example
```{r}
test.c.c.2<- bma.bx.cy(pred=weight_behavior[,2], m=weight_behavior[,12:14],
y=weight_behavior[,1],fpy=list(1,c("x","x^2")),n.iter=5,n.burnin = 1)
summary(test.c.c.2,method=1)
```
We can also have multiple predictors
```{r}
test.m.c<- bma.bx.cy(pred=weight_behavior[,2:4], m=weight_behavior[,12:14],
y=weight_behavior[,1],n.iter=10,n.burnin = 1)
summary(test.m.c,method=3)
```
### Binary outcome
The following is an example for binary outcome overweight (yes or no)
```{r}
test.m.b<- bma.bx.cy(pred=weight_behavior[,3], m=weight_behavior[,12:14],
y=weight_behavior[,15],cova=weight_behavior[,5],n.iter=500,n.burnin = 100)
summary(test.m.b,method=2)
```
For such case, the model for y in the jags is set as following by default, which can be revised.
```
logit(mu_y[i]) <- beta0 + inprod(c, x[i,]) + inprod(beta,M1[i,]) + inprod(eta,cova[i,])
y[i] ~ dbern(mu_y[i])
```
### Time-to-Event outcome
The following is an example for survival model
```{r}
test.m.t.1<- bma.bx.cy(pred=example2[,"x"], m=example2[,"M"], y=Surv(example2[,"time"],example2[,"status"]), inits=function(){ list(r=1,lambda=0.01)},n.iter=10,n.burnin = 1)
temp1=summary(test.m.t.1)
print(temp1,method=1,RE=FALSE)
```
For such case, the model for y in the jags is set as following by default, which can be revised.
```
elinpred[i] <- exp(inprod(c, x[i,]) + inprod(beta,M1[i,]))
base[i] <- lambda*r*pow(y[i,1], r-1)
loghaz[i] <- log(base[i]*elinpred[i])
phi[i] <- 100000-y[i,2]*loghaz[i]-log(exp(-lambda*pow(y[i,1],r)*elinpred[i])-exp(-lambda*pow(tmax,r)*elinpred[i])) +log(1-exp(-lambda*pow(tmax,r)*elinpred[i]))
zero[i] ~ dpois(phi[i])
```
The following is an example of setting the priors for r and lambda:
```
r ~ dunif(0,10) # dunif(1,1.2)
lambda ~ dgamma(1,0.01)
```
### Categorical outcome
Finally, the following is an example for categorical outcomes
```{r}
test.m.c<- bma.bx.cy(pred=weight_behavior[,2:4], m=weight_behavior[,12:13],
y=as.factor(weight_behavior[,14]),cova=weight_behavior[,5],n.iter=5,n.burnin = 1)
summary(test.m.c,method=3)
```
For such case, the model for y in the jags is set as following by default, which can be revised.
```
mu_y1[i,1] <- 1
for (k in 2:caty) #caty is the number of categories of y
{mu_y1[i,k] <- exp(beta0[k-1] + inprod(c[k-1,], x[i,]) + inprod(beta[k-1,],M1[i,]) + inprod(eta[k-1,],cova[i,]))}
sum_y[i] <- sum(mu_y1[i,1:caty])
for (l in 1:caty)
{mu_y[i,l] <- mu_y1[i,l]/sum_y[i]}
y[i] ~ dcat(mu_y[i,])
```
## References
|
/scratch/gouwar.j/cran-all/cranData/BayesianMediationA/inst/doc/BayesianMediationAvignette.Rmd
|
---
title: "Examples for Bayesian Mediation Analysis"
author:
- Qingzhao Yu and Bin Li
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
keep_md: true
fig_caption: yes
bibliography: vignette.bib
link-citations: true
vignette: >
%\VignetteIndexEntry{Examples for Bayesian Mediation Analysis}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
error=TRUE,
warning=FALSE
)
```
## Package installation
The R package BayesianMediationA is created for linear or nonlinear mediation analysis with binary, continuous, or time-to-event outcomes under the Bayesian setting [@Yu2022_2, @Yu2022]. The vignette is composed of three parts. Part I focuses on the data sets used for examples, and part II on how to transform variables and prepare data for the mediation analysis. Part III walks through the function on Bayesian mediation analysis, and explains how to make inferences on mediation effects of interests.
To use the R package BayesianMediationA, we first install the package in R (`install.packages("BayesianMediationA")`) and load it.
```{r, include=FALSE}
library(BayesianMediationA)
#source('O:/My Documents/My Research/Research/Multilevel mediation analysis/mlma package/current version/R/mlma.r')
```
## The Data Set
We use the data set ``weight_behavior' which is included in the package as examples for mediation analysis with binary or continuous outcomes [@Yu2017a]. In addition, a dataset is generated for the time-to-event outcome as the following:
```{r}
#use a simulation
set.seed(1)
N=100
alpha=0.5
x=rnorm(N,0,1)
x=ifelse(x>0,1,0) #the binary exposure. If want to use a continuous exposure, remove this line
e1=rnorm(N,0,1)
M=alpha*x+e1 #the mediator
lambda=0.01
rho=1
beta=1.2
c=-1
rateC=0.001
v=runif(n=N)
Tlat =(- log(v) / (lambda * exp(c*x+M*beta)))^(1 / rho) #the event time
C=rexp(n=N, rate=rateC) #the censoring time
time=pmin(Tlat, C)
status <- as.numeric(Tlat <= C)
example2 <-cbind(x, M, time, status) #the dataset
```
## Data Transformation and Organization
The $data_org$ function is used to do the transformation before the mediation analysis. In the function, the exposure variable(s) ($pred$) and the mediator(s) ($m$) are required to input. The response variable ($y$) is also required. If $y$ is binary or categorical, its reference level is input in the argument $levely$. Similarly, the reference levels for the exposure variables and mediators are input in the argument $predref$ and $mref$ respectively.
Other input data include $cova$, the covaritates that are used to explain $y$, and $mcov$, the covariates for mediators. Covariates for $y$ are defined as predictors for $y$, but not explained by the exposure variables $pred$. Covariates for mediators are explanatory variables for mediators other than the exposure variable(s). Accompanying $mcov$, we have $mclist$ to specify different covariates for different mediators. If $mclist$ is NULL (by default), all covariates in $mcov$ are used for all mediators in $m$. Otherwise, the first item of $mclist$ lists all column numbers/names of mediators in $m$ that are to be explained by covariates in $mcov$, the following items give the covariates in $mcov$ for the mediators in the order of the first item. $NA$ is used when no covariance is to be used for the corresponding mediator. For example, `mclist=list(c(2,4),NA,2:3)' means that all mediators use all covariates in $mcov$, except for the second mediator which use none of the covariates, and the fourth mediator, which uses only columns 2 to 3 of $mcov$ as its covariates.
Variables can be transformed to denote potential nonlinear relationships. The transformation functions are expressed in arguments $fpy$, $fmy$, and $fpm$ separately. In the name of the arguments, the `p' stands for the predictors, `m' mediators, and `y' the outcome. Namely, $fpy$ define the transformation functions of the predictors in explaining the outcome $y$. The first item lists column numbers/variable names of the exposure variable in $pred$, which needs to be transformed to explain $y$. By the order of the first item, each of the rest items of $fpy$ lists the transformation functional expressions for the predictor. The exposures/predictors not specified in the list will not be transformed in any way in explaining y. For example, list(1,c("x^2","log(x)")) means that the first column of the pred will be transformed to square and log forms to explain $y$. The $fmy$ is defined the same way for the transformed mediators to explain the outcome variable $y$.
$fpm$ denotes the transformation-function-expression list on exposure variable(s) ($pred$) in explaining mediators ($m$). The definition of $fpm$ is similar to those of $fpy$ and $fmy$ except that the first item is a matrix with two columns: the first column is the column numbers of the mediators in $m$, which should be explained by the transformed predictor(s) indicated by the second column. The second column indicates the column number of the exposure in $pred$ that will be transformed to explain the mediator identified by the 1st column of the same row. By the order of the rows of the first item, each of the rest items of fpm lists the transformation functional expressions for the exposure (identified by column 2) in explaining each mediator (identified by column 1). The mediators not specified in the list will be explained by the original format of the exposures in $pred$. For example, `fpm=list(matrix(c(1,2,1,1),2,2), "x^2",c("x","x^2"))' means that $pred[,1]^2$ is used to explain $m[,1]$, and both $pred[,1]$ and $pred[,1]^2$ are used to explain $m[,2]$.
Finally, deltap and deltam define the change in predictors or mediators respectively in calculating the mediation effect.[@Yu2022, @Yu2022_2]
Users do not run the $data\_org$ function by itself. All arguments are included in the main Bayesian mediation analysis function $bma.bx.cy$, which runs the $data\_org$ function in it to organize data for mediation analysis.
## The function $bma.bx.cy$ for Bayesian mediation analysis
The function $bma.bx.cy$ are used to perform the Bayesian mediation analysis. In the function, the $data\_org$ function is called first, which involves all arguments described above. In addition, prior distributions in the generalized linear models can be set up. By default, all coefficients in the Bayesian generalized linear models are independently normal distributed with mean $0$ and the precision term specified by $speci$, default at $10^{-6}$.
The prior means and the variance-covariance matrix can be altered. $mu$ defines the prior mean for coefficients of mediators, $muc$ the prior mean vector (of length $p2$) for coefficients of exposure(s), and $mucv$ the prior mean vector for coefficients of covariate(s)in the final model for $y$. Related, $Omega$, $Omegac$, and $Omegacv$ defines the prior variance-covariance matrix for the coefficients of mediators, exposure(s), and covariates respectively.
The prior distributions for coefficients of intercept/covariates, and exposures in explaining mediators are also assumed to be normal. The default prior mean and variance-covariance matrix are as above, and can be changed in $mu0.1$ and $mu1.1$ for means, and $Omega0.1$ and $Omega1.1$ for variance-covariance matrices respectively for the intercept/covariates and exposures. Separately, the mean and variance-covariance matrix of the prior distributions for coefficients for estimating the mediators can be specified by $mu0$, $mu1$, $Omega0$, and $Omega1$ followed by $.a$, $.b$, and $.c$ for continuous, binary, or categorical mediators respectively.
The function calls for `jags' to perform the Bayesian analysis. Default models are fitted for mediators and outcomes. Namely, if the response variable is continuous, linear regression model is fitted, binary response is fitted with logistic regression, categorical response with multivariate logistic regression, and time-to-event response with cox hazard model.
###Binary predictor and continuous outcome
In the following example, the exposure variable is sex and the outcome is the bmi. The variables exercise (in hours), sports (in a sport team or not), and sweat (have sweating activity or not) are used to explain the sexual difference in bmi. The summary function returns inferences of the estimated effects with a graph of estimated effects with $95\%$ credible sets.
```{r}
data("weight_behavior")
#n.iter and n.burnin are set to be very small, should be adjusted
#binary predictor
test.b.c<- bma.bx.cy(pred=weight_behavior[,3], m=weight_behavior[,c(14,12,13)],
y=weight_behavior[,1],n.iter=500,n.burnin = 100)
summary(test.b.c)
```
The $summary$ function returns inference results for all four methods. By default, the relative effects are shown using method 3. To show the results of other methods, we can change by setting the argument $method$. Method 4 is calculated for binary/categorical exposures only. If the user would like to see the effect estimations rather than the relative effects, one should set $RE=F$.
### Categorical predictor
The following example is given for a categorical exposure: race. In the data set, race takes six categories: empty (not reported), other, mixed, Caucasian, Indian, and African. ``CAUCASIAN'' is used as the reference group, each other race group is compared with Caucasian in bmi and the relative effects from mediators are reported by the $summary$ function.
```{r}
test.b.c<- bma.bx.cy(pred=weight_behavior[,3], m=weight_behavior[,c(5:11,12:14)],
y=weight_behavior[,1],cova=weight_behavior[,2],mcov=weight_behavior[,c(2,5)],
mclist = list(1,2),n.iter=500,n.burnin = 100)
summary(test.ca.c)
```
The jags model fitted for the above model is as following:
```
#the jags models for the outcomes and mediators
model {temp <- 1:nmc
for(i in 1:N){
mu_y[i] <- beta0 + inprod(c, x[i,]) + inprod(beta,M1[i,]) + inprod(eta,cova[i,])
y[i] ~ dnorm(mu_y[i],prec4) #the final model since y is continuous. The model is changed for
#different format of the outcome, as is shown in the following
#sections
for (j in 1:p1){ #the model for p1 contiuous mediators
mu_M1[i,contm[j]] <- inprod(alpha0.a[j,mind[contm[j],]],mcov[i,temp[mind[contm[j],]]])+inprod(alpha1.a[j,1:c1],x1[i,])
M2[i,contm[j]] ~ dnorm(mu_M1[i,contm[j]],prec1[j])
for (k in contm1[j,1]:contm1[j,2]){
mu_M1_c[i,k] <- inprod(alpha0[k,mind[contm[j],]],mcov[i,mind[contm[j],]])+inprod(alpha1[k,1:c1],x1[i,])
M1[i,k] ~ dnorm(mu_M1_c[i,k],prec2[k])
}
}
for (k in 1:p2){ #the model for p2 binary mediators
logit(mu_M1[i,binm[k]]) <- inprod(alpha0.b[k,mind[binm[k],]],mcov[i,mind[binm[k],]])+inprod(alpha1.b[k,1:c1],x1[i,])
M2[i,binm[k]] ~ dbern(mu_M1[i,binm[k]])
}
for (j in 1:p3){ #the model for p3 categorical mediators
mu_Mc[i,j,1] <- 1 #baseline is the 1st category
for (k in 2:cat2[j]){
mu_Mc[i,j,k] <- exp(inprod(alpha0.c[j,k-1,mind[catm[j],]],mcov[i,mind[catm[j],]])+inprod(alpha1.c[j,k-1,1:c1],x1[i,]))
}
sum_Mc[i,j] <- sum(mu_Mc[i,j,1:cat2[j]])
for (l in 1:cat2[j])
{mu_Mc0[i,j,l] <- mu_Mc[i,j,l]/sum_Mc[i,j]}
M2[i,catm[j]] ~ dcat(mu_Mc0[i,j,1:cat2[j]])
}
}
beta[1:P] ~ dmnorm(mu[1:P], Omega[1:P, 1:P]) #prior distributions for coefficients of model for y
beta0 ~ dnorm(0, 1.0E-6)
c[1:c2] ~ dmnorm(muc[1:c2], Omegac[1:c2,1:c2])
eta[1:cv1] ~ dmnorm(mucv[1:cv1], Omegacv[1:cv1,1:cv1])
for(j in 1:P) #prior distributions for coefficients of model for not transformed mediators
{alpha1[j,1:c1] ~ dmnorm(mu1.1[j,1:c1], Omega1.1[1:c1, 1:c1])
alpha0[j,1:nmc] ~ dmnorm(mu0.1[j,1:nmc], Omega0.1[1:nmc, 1:nmc])}
for (i in 1:P){
var2[i] ~ dgamma(1,0.1)
prec2[i] <- 1/var2[i]
}
for(j in 1:p1) #prior distributions for coefficients of model for p1 continuous mediators
{alpha1.a[j,1:c1] ~ dmnorm(mu1.a[j,1:c1], Omega1.a[1:c1, 1:c1])
alpha0.a[j,1:nmc] ~ dmnorm(mu0.a[j,1:nmc], Omega0.a[1:nmc, 1:nmc])}
for (i in 1:p1){
var1[i] ~ dgamma(1,0.1)
prec1[i] <- 1/var1[i]
}
for(j in 1:p2) #prior distributions for coefficients of model for p2 binary mediators
{alpha1.b[j,1:c1] ~ dmnorm(mu1.b[j,1:c1], Omega1.b[1:c1, 1:c1])
alpha0.b[j,1:nmc] ~ dmnorm(mu0.b[j,1:nmc], Omega0.b[1:nmc, 1:nmc])
}
for (i in 1:p3){ #prior distributions for coefficients of model for p3 categorical mediators
for(j in 1:cat1)
{alpha1.c[i,j,1:c1] ~ dmnorm(mu1.c[j,1:c1], Omega1.c[1:c1, 1:c1])
alpha0.c[i,j,1:nmc] ~ dmnorm(mu0.c[j,1:nmc], Omega0.c[1:nmc, 1:nmc])}
}
var4 ~ dgamma(1,0.1) #the prior for the variance of y when it is continuous
prec4 <-1/var4
}
```
The rjags model can be revised when different priors or models are to be used. To do that, write the model file and input it to the argument $filename$.
### use transfered continuous predictors for y
We can use transformed predictors for the outcome. The following is an example
```{r}
test.c.c.2<- bma.bx.cy(pred=weight_behavior[,2], m=weight_behavior[,12:14],
y=weight_behavior[,1],fpy=list(1,c("x","x^2")),n.iter=5,n.burnin = 1)
summary(test.c.c.2,method=1)
```
We can also have multiple predictors
```{r}
test.m.c<- bma.bx.cy(pred=weight_behavior[,2:4], m=weight_behavior[,12:14],
y=weight_behavior[,1],n.iter=10,n.burnin = 1)
summary(test.m.c,method=3)
```
### Binary outcome
The following is an example for binary outcome overweight (yes or no)
```{r}
test.m.b<- bma.bx.cy(pred=weight_behavior[,3], m=weight_behavior[,12:14],
y=weight_behavior[,15],cova=weight_behavior[,5],n.iter=500,n.burnin = 100)
summary(test.m.b,method=2)
```
For such case, the model for y in the jags is set as following by default, which can be revised.
```
logit(mu_y[i]) <- beta0 + inprod(c, x[i,]) + inprod(beta,M1[i,]) + inprod(eta,cova[i,])
y[i] ~ dbern(mu_y[i])
```
### Time-to-Event outcome
The following is an example for survival model
```{r}
test.m.t.1<- bma.bx.cy(pred=example2[,"x"], m=example2[,"M"], y=Surv(example2[,"time"],example2[,"status"]), inits=function(){ list(r=1,lambda=0.01)},n.iter=10,n.burnin = 1)
temp1=summary(test.m.t.1)
print(temp1,method=1,RE=FALSE)
```
For such case, the model for y in the jags is set as following by default, which can be revised.
```
elinpred[i] <- exp(inprod(c, x[i,]) + inprod(beta,M1[i,]))
base[i] <- lambda*r*pow(y[i,1], r-1)
loghaz[i] <- log(base[i]*elinpred[i])
phi[i] <- 100000-y[i,2]*loghaz[i]-log(exp(-lambda*pow(y[i,1],r)*elinpred[i])-exp(-lambda*pow(tmax,r)*elinpred[i])) +log(1-exp(-lambda*pow(tmax,r)*elinpred[i]))
zero[i] ~ dpois(phi[i])
```
The following is an example of setting the priors for r and lambda:
```
r ~ dunif(0,10) # dunif(1,1.2)
lambda ~ dgamma(1,0.01)
```
### Categorical outcome
Finally, the following is an example for categorical outcomes
```{r}
test.m.c<- bma.bx.cy(pred=weight_behavior[,2:4], m=weight_behavior[,12:13],
y=as.factor(weight_behavior[,14]),cova=weight_behavior[,5],n.iter=5,n.burnin = 1)
summary(test.m.c,method=3)
```
For such case, the model for y in the jags is set as following by default, which can be revised.
```
mu_y1[i,1] <- 1
for (k in 2:caty) #caty is the number of categories of y
{mu_y1[i,k] <- exp(beta0[k-1] + inprod(c[k-1,], x[i,]) + inprod(beta[k-1,],M1[i,]) + inprod(eta[k-1,],cova[i,]))}
sum_y[i] <- sum(mu_y1[i,1:caty])
for (l in 1:caty)
{mu_y[i,l] <- mu_y1[i,l]/sum_y[i]}
y[i] ~ dcat(mu_y[i,])
```
## References
|
/scratch/gouwar.j/cran-all/cranData/BayesianMediationA/vignettes/BayesianMediationAvignette.Rmd
|
#' Bayesian Network modeling and analysis.
#'
#' BayesianNetwork is a Shiny web application for Bayesian Network modeling and
#' analysis.
#' @import bnlearn
#' @import heatmaply
#' @import plotly
#' @import rintrojs
#' @import lattice
#' @import networkD3
#' @import shiny
#' @import shinyAce
#' @import shinydashboard
#' @import shinyWidgets
#' @export
#' @seealso \url{http://paulgovan.github.io/BayesianNetwork/}
#' @return This function does not return a value.
#' @examples
#' if (interactive()) {
#' BayesianNetwork()
#' }
BayesianNetwork <- function() {
shiny::runApp(system.file('bn', package = 'BayesianNetwork'))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianNetwork/R/BayesianNetwork.R
|
require(bnlearn)
require(heatmaply)
require(lattice)
require(networkD3)
require(shiny)
require(shinydashboard)
|
/scratch/gouwar.j/cran-all/cranData/BayesianNetwork/inst/bn/dependencies.R
|
# By default, the file size limit is 5MB. It can be changed by
# setting this option. Here we'll raise limit to 30MB.
options(shiny.maxRequestSize = 30 * 1024 ^ 2)
# Load demo data from 'bnlearn'
data(alarm, package = "bnlearn")
data(gaussian.test, package = "bnlearn")
data(hailfinder, package = "bnlearn")
data(insurance, package = "bnlearn")
data(learning.test, package = "bnlearn")
# Help data for Home tab
homeHelp <-
data.frame(
step = c(1, 2),
intro = c(
"Here is the sidebar menu. Each link opens a new tab. You will probably start with the <b>Structure</b> tab and work your way down.",
"The body of the app is where you will find different features for modeling and analyzing your network. Each tab has its own help button."
),
element = c(
"#sidebarMenu",
"#dashboardBody"
),
position = c("auto", "auto")
)
# Help data for Structure data
structureHelp <-
data.frame(
step = c(1),
intro = c("Here is where you can view your Bayesian network. Using the 3 boxes to the left, you can (1) upload your data in csv format, (2) select a structural learning algorithm, and (3) estimate the network score. Data should be numeric or factored and should not contain any NULL/NaN/NA values."),
element = c("#netPlot"),
position = c("auto")
)
# Help data for Parameters tab
parametersHelp <-
data.frame(
step = c(1),
intro = c("Here is where you can view the parameters of your network. Using the 2 boxes to the left, you can (1) select a parameter learning method and (2) select the type of graphic to view."),
element = c("#condPlot"),
position = c("auto")
)
# Help data for Inference tab
inferenceHelp <-
data.frame(
step = c(1),
intro = c("Here is where you can view the conditional probability distribution of an event. Using the 2 boxes to the left, you can (1) add evidence to the model and (2) select a conditional event to view."),
element = c("#distPlot"),
position = c("auto")
)
# Help data for Measures tab
measuresHelp <-
data.frame(
step = c(1, 2),
intro = c(
"Select a node measure in the box to the left and the result is displayed here.",
"Here is where you can view the adjacency matrix. Configure the matrix using the control to the left."
),
element = c(
"#nodeText",
"#netTable"
),
position = c("auto", "auto")
)
# Help data for Editor tab
editorHelp <-
data.frame(
step = c(1, 2),
intro = c(
"Here is the editor. Click <b>Run</b> to knit the rmarkdown report.",
"The resulting rmarkdown report is displayed here."
),
element = c(
"#rmd",
"#knitr"
),
position = c("auto", "auto")
)
# Enable bookmarking for the app
shiny::enableBookmarking(store = "url")
|
/scratch/gouwar.j/cran-all/cranData/BayesianNetwork/inst/bn/global.R
|
#' @import bnlearn
#' @import heatmaply
#' @import plotly
#' @import rintrojs
#' @import shiny
#' @import shinyAce
#' @import shinydashboard
#' @import shinyWidgets
options(shiny.testmode=TRUE)
# Define required server logic
shinyServer(function(input, output, session) {
# Get the data selection from user
dat <- shiny::reactive({
if (input$dataInput == 1) {
if (input$net == 1) {
dat <- learning.test
} else if (input$net == 2) {
dat <- gaussian.test
} else if (input$net == 3) {
dat <- alarm
} else if (input$net == 4) {
dat <- insurance
} else if (input$net == 5) {
dat <- hailfinder
}
} else if (input$dataInput == 2) {
# Get the uploaded file from user
inFile <- input$file
if (is.null(inFile))
return(NULL)
dat <- read.csv(inFile$datapath)
}
})
# bnlearn no longer supports character vars.
# Temp step to convert character to factor
# dat <- shiny::reactive({
# dat <- dplyr::mutate_if(dat0(), is.character, as.factor)
# })
# Learn the structure of the network
dag <- shiny::reactive({
if (is.null(dat()))
return(NULL)
# Create a Progress object
progress <- shiny::Progress$new()
# Make sure it closes when we exit this reactive, even if there's an error
on.exit(progress$close())
progress$set(message = "Learning network structure", value = 0)
# Get the selected learning algorithm from the user and learn the network
if (input$alg == "gs") {
dag <- bnlearn::cextend(bnlearn::gs(dat()), strict = FALSE)
} else if (input$alg == "iamb") {
dag <- bnlearn::cextend(bnlearn::iamb(dat()), strict = FALSE)
} else if (input$alg == "fast.iamb") {
dag <- bnlearn::cextend(bnlearn::fast.iamb(dat()), strict = FALSE)
} else if (input$alg == "inter.iamb") {
dag <- bnlearn::cextend(bnlearn::inter.iamb(dat()), strict = FALSE)
} else if (input$alg == "hc") {
dag <- bnlearn::cextend(bnlearn::hc(dat()), strict = FALSE)
} else if (input$alg == "tabu") {
dag <- bnlearn::cextend(bnlearn::tabu(dat()), strict = FALSE)
} else if (input$alg == "mmhc") {
dag <- bnlearn::cextend(bnlearn::mmhc(dat()), strict = FALSE)
} else if (input$alg == "rsmax2") {
dag <- bnlearn::cextend(bnlearn::rsmax2(dat()), strict = FALSE)
} else if (input$alg == "mmpc") {
dag <- bnlearn::cextend(bnlearn::mmpc(dat()), strict = FALSE)
} else if (input$alg == "si.hiton.pc") {
dag <- bnlearn::cextend(bnlearn::si.hiton.pc(dat()), strict = FALSE)
} else if (input$alg == "aracne") {
dag <- bnlearn::cextend(bnlearn::aracne(dat()), strict = FALSE)
} else if (input$alg == "chow.liu") {
dag <- bnlearn::cextend(bnlearn::chow.liu(dat()), strict = FALSE)
}
})
# Create the nodes value box
output$nodesBox <- shiny::renderUI({
if (is.null(dat()))
return(NULL)
# Get the number of nodes in the network
nodes <- bnlearn::nnodes(dag())
shinydashboard::valueBox(nodes,
"Nodes",
icon = shiny::icon("circle"),
color = "blue")
})
# Create the arcs value box
output$arcsBox <- renderUI({
if (is.null(dat()))
return(NULL)
# Get the number of arcs in the network
arcs <- bnlearn::narcs(dag())
shinydashboard::valueBox(arcs,
"Arcs",
icon = shiny::icon("arrow-right"),
color = "green")
})
# Observe intro btn and start the intro
shiny::observeEvent(input$homeIntro,
rintrojs::introjs(session, options = list(steps = homeHelp))
)
# Plot the d3 force directed network
output$netPlot <- networkD3::renderSimpleNetwork({
if (is.null(dat()))
return(NULL)
# Get the arc directions
networkData <- data.frame(bnlearn::arcs(dag()))
networkD3::simpleNetwork(
networkData,
Source = "from",
Target = "to",
opacity = 0.75,
zoom = TRUE
)
})
# Print the network score
output$score <- shiny::renderText({
if (bnlearn::directed(dag())) {
# If all of the data is numeric,...
if (all(sapply(dat(), is.numeric))) {
# Get the selected score function from the user and calculate the score
if (input$type == "loglik") {
bnlearn::score(dag(), dat(), type = "loglik-g")
} else if (input$type == "aic") {
bnlearn::score(dag(), dat(), type = "aic-g")
} else if (input$type == "bic") {
bnlearn::score(dag(), dat(), type = "bic-g")
} else {
bnlearn::score(dag(), dat(), type = "bge")
}
}
# If the data is discrete,...
else {
if (input$type == "loglik") {
bnlearn::score(dag(), dat(), type = "loglik")
} else if (input$type == "aic") {
bnlearn::score(dag(), dat(), type = "aic")
} else if (input$type == "bic") {
bnlearn::score(dag(), dat(), type = "bic")
} else {
bnlearn::score(dag(), dat(), type = "bde")
}
}
} else
shiny::validate(
shiny::need(
try(score != "")
,
"Make sure your network is completely directed in order to view your network's score..."
)
)
})
# Observe intro btn and start the intro
shiny::observeEvent(input$structureIntro,
rintrojs::introjs(session, options = list(steps = structureHelp))
)
# Fit the model parameters
fit <- shiny::reactive({
if (is.null(dat()))
return(NULL)
if (bnlearn::directed(dag())) {
if (all(sapply(dat(), is.numeric))) met = "mle-g"
else met = input$met
# Get the selected parameter learning method from the user and learn the paramaters
fit <- bnlearn::bn.fit(dag(), dat(), method = met)
}
})
# # Create data frame for selected parameter
# param <- shiny::reactive({
# param <- data.frame(coef(fit()[[input$Node]]))
# if (is.numeric(dat()[,1])) {
# colnames(param) <- "Param"
# param <- cbind(param = rownames(param), param)
# param[,"Param"] <- round(param[,"Param"], digits = 3)
# param <- transform(param, Param = as.numeric(Param))
# } else {
# param[,"Freq"] <- round(param[,"Freq"], digits = 3)
# param <- transform(param, Freq = as.numeric(Freq))
# }
# })
# # Plot Handsontable for selected parameter
# values = shiny::reactiveValues()
# setHot = function(x) values[["hot"]] <<- x
# output$hot = rhandsontable::renderRHandsontable({
# if (!is.null(input$hot)) {
# DF = rhandsontable::hot_to_r(input$hot)
# } else {
# DF = param()
# }
# if (is.numeric(dat()[,1])) {
# col <- "Param"
# } else {
# col <- "Freq"
# }
# setHot(DF)
# rhandsontable::rhandsontable(DF, readOnly = TRUE, rowHeaders = NULL) %>%
# rhandsontable::hot_table(highlightCol = TRUE, highlightRow = TRUE) %>%
# rhandsontabl::hot_context_menu(allowRowEdit = FALSE, allowColEdit = FALSE) %>%
# rhandsontable::hot_col(col, readOnly = FALSE)
# })
#
# # Add expert knowledge to the model
# expertFit <- shiny::reactive({
# if (!is.null(values[["hot"]])) {
# expertFit <- fit()
# temp <- data.frame(values[["hot"]])
# if (is.numeric(dat()[,1])) {
# stdev <- as.numeric(fit()[[input$Node]]["sd"])
# expertFit[[input$Node]] <- list(coef = as.numeric(temp[,"Param"]), sd = stdev)
# } else {
# cpt <- coef(expertFit()[[input$Node]])
# cpt[1:length(param()[,"Freq"])] <- as.numeric(temp[,"Freq"])
# expertFit[[input$Node]] <- cpt
# }
# } else {
# expertFit <- fit()
# }
# })
# Set the parameter graphic options
graphic <- shiny::reactive({
# If data is continuous, ...
if (all(sapply(dat(), is.numeric))) {
graphic <- c("Histogram" = "histogram",
"XY Plot" = "xyplot",
"QQ Plot" = "qqplot")
# If data is discrete,...
} else {
graphic <- c("Bar Chart" = "barchart",
"Dot Plot" = "dotplot")
}
})
# Send the parameter choices to the user
shiny::observe({
shiny::updateSelectInput(session, "param", choices = graphic())
})
# Send the node choices to the user
shiny::observe({
shiny::updateSelectInput(session, "Node", choices = colnames(dat()))
})
# Plot the model parameters
output$condPlot <- shiny::renderPlot({
if (is.null(dat()))
return(NULL)
if (bnlearn::directed(dag())) {
# Get the selected graphic from the user and plot the parameters
if (input$param == "histogram") {
bnlearn::bn.fit.histogram(fit())
} else if (input$param == "xyplot") {
bnlearn::bn.fit.xyplot(fit())
} else if (input$param == "qqplot") {
bnlearn::bn.fit.qqplot(fit())
} else if (input$param == "barchart") {
bnlearn::bn.fit.barchart(fit()[[input$Node]])
} else if (input$param == "dotplot") {
bnlearn::bn.fit.dotplot(fit()[[input$Node]])
}
} else
shiny::validate(
shiny::need(
try(condPlot != "")
,
"Make sure your network is completely directed in order to view the parameter infographics..."
)
)
})
# Observe intro btn and start the intro
shiny::observeEvent(input$parametersIntro,
rintrojs::introjs(session, options = list(steps = parametersHelp))
)
# Send the evidence node choices to the user
shiny::observe({
shiny::updateSelectInput(session, "evidenceNode", choices = names(dat()))
})
# Send the evidence choices to the user
shiny::observe({
whichNode <- which(colnames(dat()) == input$evidenceNode)
evidenceLevels <- as.vector(unique(dat()[,whichNode]))
shiny::updateSelectInput(session, "evidence", choices = evidenceLevels)
})
# Send the event node choices to the user
shiny::observe({
shiny::updateSelectInput(session, "event", choices = names(dat()))
})
# Perform Bayesian inference based on evidence and plot results
output$distPlot <- shiny::renderPlot({
if (is.null(dat()))
return(NULL)
if (all(sapply(dat(), is.numeric)))
shiny::validate(
shiny::need(
try(distPlot != ""),
"Inference is currently not supported for continuous variables..."
)
)
# Create a string of the selected evidence
str1 <<- paste0("(", input$evidenceNode, "=='", input$evidence, "')")
# Estimate the conditional PD and tabularize the results
nodeProbs <- prop.table(table(bnlearn::cpdist(fit(), input$event, eval(parse(text = str1)))))
# Create a bar plot of the conditional PD
barplot(
nodeProbs,
col = "lightblue",
main = "Conditional Probabilities",
border = NA,
xlab = "Levels",
ylab = "Probabilities",
ylim = c(0, 1)
)
})
# Observe intro btn and start the intro
shiny::observeEvent(input$inferenceIntro,
rintrojs::introjs(session, options = list(steps = inferenceHelp))
)
# Send the node names to the user
shiny::observe({
shiny::updateSelectInput(session, "nodeNames", choices = colnames(dat()))
})
# Get the selected node measure from the user and print the results
output$nodeText <- shiny::renderText({
if (is.null(dat()))
return(NULL)
if (input$nodeMeasure == "mb") {
bnlearn::mb(dag(), input$nodeNames)
} else if (input$nodeMeasure == "nbr") {
bnlearn::nbr(dag(), input$nodeNames)
} else if (input$nodeMeasure == "parents") {
bnlearn::parents(dag(), input$nodeNames)
} else if (input$nodeMeasure == "children") {
bnlearn::children(dag(), input$nodeNames)
} else if (input$nodeMeasure == "in.degree") {
bnlearn::in.degree(dag(), input$nodeNames)
} else if (input$nodeMeasure == "out.degree") {
bnlearn::out.degree(dag(), input$nodeNames)
} else if (input$nodeMeasure == "incident.arcs") {
bnlearn::incident.arcs(dag(), input$nodeNames)
} else if (input$nodeMeasure == "incoming.arcs") {
bnlearn::incoming.arcs(dag(), input$nodeNames)
} else if (input$nodeMeasure == "outgoing.arcs") {
bnlearn::outgoing.arcs(dag(), input$nodeNames)
} else
bnlearn::incident.arcs(dag(), input$nodeNames)
})
# Get the selected network measure from the user and plot the results
output$netTable <- plotly::renderPlotly({
if (is.null(dat()))
return(NULL)
# Plot a d3 heatmap of the adjacency matrix
heatmaply::heatmaply(
bnlearn::amat(dag()),
grid_gap = 1,
colors = blues9,
dendrogram = input$dendrogram,
symm = TRUE,
margins = c(100, 100, NA, 0),
hide_colorbar = TRUE
)
})
# Observe intro btn and start the intro
shiny::observeEvent(input$measuresIntro,
rintrojs::introjs(session, options = list(steps = measuresHelp))
)
# Knit shinyAce editor code
output$knitr <- shiny::renderUI({
# Create a Progress object
progress <- shiny::Progress$new()
# Make sure it closes when we exit this reactive, even if there's an error
on.exit(progress$close())
progress$set(message = "Building report...", value = 0)
input$eval
return(
shiny::isolate(
shiny::HTML(
knitr::knit2html(text = input$rmd, quiet = TRUE)
)
)
)
})
# Observe intro btn and start the intro
shiny::observeEvent(input$editorIntro,
rintrojs::introjs(session, options = list(steps = editorHelp))
)
# Trigger bookmarking
observeEvent(input$bookmark, {
session$doBookmark()
})
# Need to exclude the buttons from themselves being bookmarked
setBookmarkExclude("bookmark")
})
|
/scratch/gouwar.j/cran-all/cranData/BayesianNetwork/inst/bn/server.R
|
code =
'## Sample Code
Here is some sample markdown to help illustrate the editor.
### Get some sample data and show the first few values
### Use `dat()` to get the active data set
```{r}
dat <- learning.test
head(dat)
```
### Learn the structure of the network
```{r}
dag <- bnlearn::cextend(bnlearn::gs(dat))
```
### Plot the force directed network
```{r}
networkData <- data.frame(bnlearn::arcs(dag))
networkD3::simpleNetwork(
networkData,
Source = "from",
Target = "to"
)
```
### Print the network score
```{r}
bnlearn::score(dag, dat)
```
### Fit the model parameters and show the CPT for node A
```{r}
fit <- bnlearn::bn.fit(dag, dat)
fit$A
```
### Plot the model parameters for node A
```{r}
bnlearn::bn.fit.barchart(fit[["A"]])
```
### Get the Markov blanket for node A
```{r}
bnlearn::mb(dag, "A")
```
### Plot a d3 heatmap of the adjacency matrix
```{r}
heatmaply::heatmaply(
bnlearn::amat(dag),
grid_gap = 1,
colors = blues9,
dendrogram = "both",
symm = TRUE,
margins = c(100, 100, NA, 0),
hide_colorbar = TRUE
)
```
### Generate some random data from the network and show the first few values
```{r}
set.seed(1)
simData <- bnlearn::rbn(fit, n = 100, dat)
head(simData)
```
```{r}
# Put your own code here...
'
# Green dashboard page
shinydashboard::dashboardPage(
skin = "green",
# Dashboard header and title
shinydashboard::dashboardHeader(
title = "BayesianNetwork"
),
# Dashboard sidebar
shinydashboard::dashboardSidebar(
# Sidebar menu
shinydashboard::sidebarMenu(
id = "sidebarMenu",
# Home menu item
shinydashboard::menuItem(
"Home",
tabName = "home",
icon = shiny::icon("home")
),
# Structure menu item
shinydashboard::menuItem(
"Structure",
icon = shiny::icon("globe"),
tabName = "structure"
),
# Parameters menu item
shinydashboard::menuItem(
"Parameters",
tabName = "parameters",
icon = shiny::icon("bar-chart")
),
# Inference menu item
shinydashboard::menuItem(
"Inference",
icon = shiny::icon("arrow-right"),
tabName = "inference"
),
# Measures menu item
shinydashboard::menuItem(
"Measures",
tabName = "measures",
icon = shiny::icon("table")
),
# Editor menu item
shinydashboard::menuItem(
"Editor",
tabName = "editor",
icon = shiny::icon("code")
),
br(),
# Help page link
shinydashboard::menuItem("Help",
icon = icon("info-circle"),
href = "http://paulgovan.github.io/BayesianNetwork/"),
# Source code link
shinydashboard::menuItem("Source Code",
icon = icon("github"),
href = "https://github.com/paulgovan/BayesianNetwork"),
# Bookmark button
shiny::br(),
shiny::bookmarkButton(id = "bookmark"),
tags$style(type='text/css', "#bookmark { display: block; margin: 0 auto; }")
)
),
# Dashboard body
shinydashboard::dashboardBody(
id = "dashboardBody",
# Add favicon and title to header
tags$head(
tags$link(rel = "icon", type = "image/png", href = "favicon.png"),
tags$title("BayesianNetwork")
),
# Include introjs UI
rintrojs::introjsUI(),
# Dashboard tab items
shinydashboard::tabItems(
# Home tab item
shinydashboard::tabItem(
tabName = "home",
shiny::fluidRow(
# Welcome box
shinydashboard::box(
title = "",
status = "success",
width = 8,
shiny::img(src = "favicon.png",
height = 200,
width = 175
),
shiny::h2("BayesianNetwork"),
shiny::h4("Bayesian Network Modeling and Analysis"),
br(),
shiny::h4("BayesianNetwork is a ",
shiny::a(href = 'http://shiny.rstudio.com', 'Shiny'),
"web application for Bayesian network modeling and analysis, powered by",
shiny::a(href = 'http://www.bnlearn.com', 'bnlearn'),
'and',
shiny::a(href = 'http://christophergandrud.github.io/networkD3/', 'networkD3'),
'.'
),
shiny::h4("Click",
shiny::a("Structure", href="#shiny-tab-structure", "data-toggle" = "tab"),
" in the sidepanel to get started."
),
br(),
shiny::h4(shiny::HTML('©'),
'2016 By Paul Govan. ',
shiny::a(href = 'http://www.apache.org/licenses/LICENSE-2.0', 'Terms of Use.')
),
br(),
# Add introjs btn
shiny::actionButton("homeIntro", "Show me how it works"),
tags$style(type='text/css', "#homeIntro { display: block; margin: 0 auto; }")
),
# Nodes and arcs value boxes
shiny::uiOutput("nodesBox"),
shiny::uiOutput("arcsBox")
)
),
# Structure tab item
shinydashboard::tabItem(tabName = "structure",
shiny::fluidRow(
shiny::column(
width = 4,
# Network input box
shinydashboard::box(
title = "Data Input",
status = "success",
collapsible = TRUE,
width = NULL,
shiny::helpText("Select a sample network or upload your Bayesian network data:"),
shinyWidgets::radioGroupButtons(inputId = "dataInput",
choices = c("Sample Network" = 1,
"Upload Data" = 2),
selected = 1,
justified = TRUE
),
# Conditional panel for sample network selection
shiny::conditionalPanel(
condition = "input.dataInput == 1",
# Demo network input select
shiny::selectInput(
inputId = "net",
h5("Bayesian Network:"),
c("Sample Discrete Network" = 1,
"Sample Gaussian Network" = 2,
"Alarm Network" = 3,
"Insurance Network" = 4,
"Hailfinder Network" = 5
)
)
),
# Conditional panel for file input selection
shiny::conditionalPanel(
condition = "input.dataInput == 2",
shiny::p('Note: your data should be structured as a csv file with header of variable names.'
),
# File input
shiny::fileInput(
'file',
strong('File Input:'),
accept = c('text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'.csv',
'.tsv'
)
)
)
),
# Structural learning box
shinydashboard::box(
title = "Structural Learning",
status = "success",
collapsible = TRUE,
width = NULL,
shiny::helpText("Select a structural learning algorithm:"),
# Structural learning algorithm input select
shiny::selectizeInput(
inputId = "alg",
shiny::h5("Learning Algorithm:"),
choices = list(
"Constraint-based Learning" =
c("Grow-Shrink" = "gs",
"Incremental Association" = "iamb",
"Fast IAMB" = "fast.iamb",
"Inter IAMB" = "inter.iamb"
),
"Score-based Learning" =
c("Hill Climbing" = "hc",
"Tabu" = "tabu"),
"Hybrid Learning" =
c("Max-Min Hill Climbing" = "mmhc",
"2-phase Restricted Maximization" = 'rsmax2'
),
"Local Discovery Learning" =
c("Max-Min Parents and Children" = 'mmpc',
"Semi-Interleaved HITON-PC" = "si.hiton.pc",
"ARACNE" = "aracne",
"Chow-Liu" = "chow.liu"
)
)
)
),
# Network score box
shinydashboard::box(
title = "Network Score",
status = "success",
collapsible = TRUE,
width = NULL,
# Network score function input select
shiny::selectInput(
"type",
h5("Network Score:"),
c("Log-Likelihood" = "loglik",
"Akaike Information Criterion" = "aic",
"Bayesian Information Criterion" = "bic",
"Bayesian Equivalent" = "be"
), 'loglik-g'
),
# Network score output
shiny::verbatimTextOutput("score")
)
),
shiny::column(
width = 8,
# Bayesian network box
shinydashboard::box(
title = "Bayesian Network",
status = "success",
collapsible = TRUE,
width = NULL,
# d3 force directed network
networkD3::simpleNetworkOutput("netPlot")
)
)
),
# Add introjs btn
shiny::actionButton("structureIntro", "Show me how")
),
# parameters tab item
shinydashboard::tabItem(tabName = "parameters",
shiny::fluidRow(
shiny::column(
width = 4,
# parameter learning box
shinydashboard::box(
title = "Parameter Learning",
status = "success",
collapsible = TRUE,
width = NULL,
shiny::helpText("Select a parameter learning method:"),
# Parameter learning method input select
shiny::selectInput(
"met",
shiny::h5("Learning Method:"),
c("Maximum Likelihood Estimation" = "mle",
"Bayesian Estimation" = "bayes"
)
)
# shiny::helpText("Select an imaginary sample size:"),
# Imaginary Sample Size for illustrative purposes
# shiny::numericInput(
# "iss",
# shiny::h5("Sample Size:"),
# value = 10,
# min = 1
# )
),
# Parameter infographic box
shinydashboard::box(
title = "Parameter Graphic",
status = "success",
collapsible = TRUE,
width = NULL,
helpText("Select a parameter infographic:"),
# Parameter infographic input select
selectInput("param", label = h5("Parameter Infographic:"),
""),
# Conditional panel for discrete data
shiny::conditionalPanel(
"input.param == 'barchart' || input.param == 'dotplot'",
# Node input select
shiny::selectInput("Node", label = shiny::h5("Node:"), "")
)
)
# shinydashboard::box(
# title = "Expert Knowledge", status = "success", solidHeader = TRUE, collapsible = TRUE, width = NULL, height = 1000,
# shiny::selectInput("Node", label = h5("Node:"),
# ""),
# shiny::helpText("Add expert knowledge to your model (Experimental):"),
# shiny::actionButton("saveBtn", "Save"),
# rhandsontable::rHandsontableOutput("hot")
# )
),
shiny::column(
width = 8,
# Network parameters box
shinydashboard::box(
title = "Network Parameters",
status = "success",
collapsible = TRUE,
width = NULL,
# Conditional PD plot
shiny::plotOutput("condPlot")
)
)
),
# Add introjs btn
shiny::actionButton("parametersIntro", "Show me how")
),
# Inference tab item
shinydashboard::tabItem(tabName = "inference",
shiny::fluidRow(
shiny::column(
width = 4,
# Evidence box
shinydashboard::box(
title = "Evidence",
status = "success",
collapsible = TRUE,
width = NULL,
helpText("Select evidence to add to the model:"),
shiny::fluidRow(
shiny::column(6,
# Evidence node input select
shiny::selectInput(
"evidenceNode", label = shiny::h5("Evidence Node:"),
""
)),
shiny::column(6,
# Conditional panel for discrete data
shiny::conditionalPanel(
"input.param == 'barchart' || input.param == 'dotplot'",
# Evidence input select
shiny::selectInput(
"evidence", label = shiny::h5("Evidence:"),
""
)
)
)
)
),
# Event box
shinydashboard::box(
title = "Event",
status = "success",
collapsible = TRUE,
width = NULL,
helpText("Select an event of interest:"),
# Event node input select
shiny::selectInput("event", label = shiny::h5("Event Node:"),
"")
)
),
shiny::column(
width = 8,
# Event parameter box
shinydashboard::box(
title = "Event Parameter",
status = "success",
collapsible = TRUE,
width = NULL,
# Event conditional PD plot
shiny::plotOutput("distPlot")
)
)
),
# Add introjs btn
shiny::actionButton("inferenceIntro", "Show me how")
),
# Measures tab item
shinydashboard::tabItem(tabName = "measures",
shiny::fluidRow(
# Measure controls box
shinydashboard::box(
title = "Controls",
status = "success",
collapsible = TRUE,
width = 4,
shiny::helpText("Select a node measure:"),
# Node measure input select
shiny::selectInput(
"nodeMeasure",
h5("Node Measure:"),
c("Markov Blanket" = "mb",
"Neighborhood" = "nbr",
"Parents" = "parents",
"Children" = "children",
"In Degree" = "in.degree",
"Out Degree" = "out.degree",
"Incident Arcs" = "incident.arcs",
"Incoming Arcs" = "incoming.arcs",
"Outgoing Arcs" = "outgoing.arcs"
)
),
# Node input select
shiny::selectInput("nodeNames", label = shiny::h5("Node:"),
""),
shiny::helpText("Select a network measure:"),
# Network measure input select
shiny::selectInput(
"dendrogram",
h5("Dendrogram:"),
c("Both" = "both",
"Row" = "row",
"Column" = "column",
"None" = "none"
)
)
),
# Node measure box
shinydashboard::box(
title = "Node Measure",
status = "success",
collapsible = TRUE,
width = 8,
# Node measure output
shiny::verbatimTextOutput("nodeText")
),
# Network measure box
shinydashboard::box(
title = "Adjacency Matrix",
status = "success",
collapsible = TRUE,
width = 8,
# d3 heatmap
plotly::plotlyOutput("netTable")
)
),
# Add introjs btn
shiny::actionButton("measuresIntro", "Show me how")
)
,
# Editor tab item
shinydashboard::tabItem(tabName = "editor",
shiny::fluidRow(
column(6,
# shinyAce editor box
shinydashboard::box(
title = "Editor",
status = "success",
collapsible = TRUE,
width = 12,
# shinyAce Editor
shinyAce::aceEditor("rmd", mode = "markdown", value = code),
shiny::actionButton("eval", "Run")
),
# Add introjs btn
shiny::actionButton("editorIntro", "Show me how")
),
column(6,
# knitr output
shiny::htmlOutput("knitr")
)
)
)
)
)
)
|
/scratch/gouwar.j/cran-all/cranData/BayesianNetwork/inst/bn/ui.R
|
#' @title perHtypeIerror_powerfunc
#' @description This function reads in the output matrix of a number of trial replicates to calculate the error rate
#' @param res A list of output matrix of a number of trial replicates
#'
#' @return "per-hypothesis" Type I error rate or power (marginal power) for each treatment - control comparison
#' @export
#'
#' @examples
#' \dontrun{perHtypeIerror_powerfunc(res)}
#' @author Ziyan Wang
perHtypeIerror_marginalpowerfunc = function(res) {
colMeans(t(sapply(res, function(x) {
resname = colnames(x)
K = sum(stringr::str_detect(colnames(x), "H")) + 1
#Indentify which hypothesis is rejected
reject = which(matrix(x[, (K - 1 + 2 * K + 1):(K - 1 + 2 * K + K - 1)] %in% 1, ncol =
K - 1), arr.ind = TRUE)[, 2]
if (length(reject) >= 1) {
rejectres = rep(0, K - 1)
rejectres[reject] = 1
return(rejectres)
}
else{
return(rep(0, K - 1))
}
})))
}
#' @title disconjunctivepowerfunc
#' @description This function reads in the output matrix of a number of trial replicates to calculate the Family wise error rate or disconjunctive power
#' @param res A list of output matrix of a number of trial replicates
#'
#' @return Disconjunctive power
#' @export
#'
#' @examples
#' \dontrun{disconjunctivepowerfunc(res)}
disconjunctivepowerfunc = function(res) {
mean(sapply(res, function(x) {
resname = colnames(x)
K = sum(stringr::str_detect(colnames(x), "H")) + 1
if (sum(x[, (K - 1 + 2 * K + 1):(K - 1 + 2 * K + K - 1)] %in% 1) >= 1) {
return(1)
}
else{
return(0)
}
}))
}
#' @title conjuncativepower_or_FWER
#'
#' @description This function reads in the output matrix of a number of trial replicates to calculate the Family wise error rate or Conjunctive power
#'
#' @param res A list of output matrix of a number of trial replicates
#' @param scenario The true scenario used to generate the res list
#' @param test.type The indicator of whether using one side or two side testing.
#' Please make sure that the input test.type does not conflicts to the data. Otherwise the conjunctive power calculation is wrong
#'
#' @return Family wise error rate or Conjunctive power
#' @export
#'
#' @examples
#' \dontrun{conjuncativepower_or_FWER(res)}
conjuncativepower_or_FWER=function(res,scenario,test.type){
hypres=mean(sapply(res,function(x){
stage=dim(x)[1]
resname=colnames(x)
K=sum(stringr::str_detect(colnames(x),"H"))+1
# True scenario construction
if (sum(scenario-min(scenario))>0 & test.type == "Twoside"){
sce = (scenario-scenario[1])[-1]!=0
}
else if (sum(scenario-min(scenario))>0 & test.type == "Oneside"){
sce = (scenario-scenario[1])[-1]>0
}
else{
sce = rep(0,K-1)
}
# # Check whether the data output conflicts the test type. This will lead to error if the hypothesis test data input are all zero under the alternative
# if (test.type == "Twoside"){
# test.data = x[,(K-1+2*K+1):(K-1+2*K+K-1)]
# superiorarm.loc = which((scenario-scenario[1])[-1]>0)
# inferiorarm.loc = which((scenario-scenario[1])[-1]<0)
# for (i in 1:length(inferiorarm.loc)){
# if (test.data[min(which(is.na(test.data[,inferiorarm.loc[i]])))-1,inferiorarm.loc[i]] == 0){
# stop("The data type used a Two side test while the input variable is one side.")
# }
# }
# for (i in 1:length(superiorarm.loc)){
# if (test.data[min(which(is.na(test.data[,superiorarm.loc[i]])))-1,superiorarm.loc[i]] == 0){
# stop("The data type used a Two side test while the input variable is one side.")
# }
# }
# }
# else if (test.type == "Oneside"){
# inferiorarm.loc = which((scenario-scenario[1])[-1]<0)
# test.data = x[,(K-1+2*K+1):(K-1+2*K+K-1)]
# # check if any inferior arm is identified to be inferior for one side test
# for (i in 1:length(inferiorarm.loc)){
# if (test.data[min(which(is.na(test.data[,inferiorarm.loc[i]])))-1,inferiorarm.loc[i]] == 1){
# stop("The data type used a Two side test while the input variable is one side.")
# }
# }
# }
# else{
# Stop("Input test.type is invalid.")
# }
#Identify which hypothesis is rejected
test.data = x[,(K-1+2*K+1):(K-1+2*K+K-1)]
drop.at.mat = which(matrix((test.data %in% NA),ncol = K-1), arr.ind = TRUE)
if (length(drop.at.mat)>=1){
# Find unique column indices
unique_cols <- unique(drop.at.mat[, 2])
drop.at.all=rep(stage,K-1)
treatmentindex=seq(1,K-1)
# Initialize a result matrix
trtmean.loc <- matrix(NA, nrow = length(unique_cols), ncol = 2)
colnames(trtmean.loc) <- c("drop.at.all", "treatmentindex")
# Iterate through unique column indices
for (i in 1:length(unique_cols)) {
trtdrop <- unique_cols[i]
drop.at <- min(drop.at.mat[drop.at.mat[, 2] == trtdrop, 1]) - 1
drop.at.all[trtdrop]=drop.at
}
trtmean.loc=cbind(c(max(drop.at.all),drop.at.all),c(1,treatmentindex+1))
res=matrix(x[,(K-1+2*K+1):(K-1+2*K+K-1)] %in% 1,ncol=K-1)
result=rep(NA,K-1)
for (i in 1:(K-1)){
result[i] = res[trtmean.loc[i+1,1],trtmean.loc[i+1,2]-1]
}
}
else{
drop.at.all=rep(stage,K)
treatmentindex=seq(1,K-1)
trtmean.loc=cbind(drop.at.all,c(1,treatmentindex+1))
res=matrix(x[,(K-1+2*K+1):(K-1+2*K+K-1)] %in% 1,ncol=K-1)
result=rep(NA,K-1)
for (i in 1:(K-1)){
result[i]=res[trtmean.loc[i+1,1],trtmean.loc[i+1,2]-1]
}
}
if (sum(result == sce) == (K-1)){
return(1)
}
else{
return(0)
}
}))
if (test.type == "Twoside"){
if (sum(scenario-min(scenario))>0){
current_scenario = "Alt"
return(hypres)
}
else{
current_scenario = "Null"
return(1-hypres)
}
}
else{
if ((max(scenario)-scenario[1]) <= 0){
current_scenario = "Null"
return(1-hypres)
}
else{
current_scenario = "Alt"
return(hypres)
}
}
}
#' @title Meanfunc
#' @description This function reads in the output matrix of a number of trial replicates to calculate mean treatment effect estimate.
#' @param res A list of output matrix of a number of trial replicates
#'
#' @return Mean treatment effect estimates of each treatment arm
#' @export
#'
#' @examples
#' \dontrun{Meanfunc(res)}
Meanfunc = function(res) {
K=mean(sapply(res,function(x){
K=sum(stringr::str_detect(colnames(x),"H"))+1
return(K)
}))
meaneffect = colMeans(matrix(t(sapply(res,function(x){
stage=dim(x)[1]
resname=colnames(x)
K=sum(stringr::str_detect(colnames(x),"H"))+1
test.data = x[,(K-1+2*K+1):(K-1+2*K+K-1)]
drop.at.mat = which(matrix((test.data %in% NA),ncol = K-1), arr.ind = TRUE)
if (length(drop.at.mat)>=1){
# Find unique column indices
unique_cols <- unique(drop.at.mat[, 2])
drop.at.all=rep(stage,K-1)
treatmentindex=seq(1,K-1)
# Initialize a result matrix
trtmean.loc <- matrix(NA, nrow = length(unique_cols), ncol = 2)
colnames(trtmean.loc) <- c("drop.at.all", "treatmentindex")
# Iterate through unique column indices
for (i in 1:length(unique_cols)) {
trtdrop <- unique_cols[i]
drop.at <- min(drop.at.mat[drop.at.mat[, 2] == trtdrop, 1]) - 1
drop.at.all[trtdrop]=drop.at
}
trtmean.loc=cbind(c(max(drop.at.all),drop.at.all),c(1,treatmentindex+1))
meanres = matrix(x[, (K - 1 + 2 * K + K - 1 + 1 + 1):(K - 1 + 2 *
K + K - 1 + 1 + K - 1)], ncol = K - 1)
result = rep(NA, K - 1)
for (i in 1:(K - 1)) {
result[i] = meanres[trtmean.loc[i+1, 1], trtmean.loc[i+1, 2]-1]
}
return(result)
}
else{
drop.at.all=rep(stage,K)
treatmentindex=seq(1,K-1)
trtmean.loc=cbind(drop.at.all,c(1,treatmentindex+1))
meanres = matrix(x[, (K - 1 + 2 * K + K - 1 + 1 + 1):(K - 1 + 2 *
K + K - 1 + 1 + K - 1)], ncol = K - 1)
result = rep(NA, K - 1)
for (i in 1:(K - 1)) {
result[i] = meanres[trtmean.loc[i+1, 1], trtmean.loc[i+1, 2]-1]
}
return(result)
}
})),ncol = K-1))
return(meaneffect)
}
#' @title varfunc
#' @description This function reads in the output matrix of a number of trial replicates to calculate the variance of treatment effect estimate.
#' @param res A list of output matrix of a number of trial replicates
#'
#' @return The variance of Treatment effect estimates of each treatment arm
#' @export
#'
#' @examples
#' \dontrun{varfunc(res)}
varfunc = function(res) {
K=mean(sapply(res,function(x){
K=sum(stringr::str_detect(colnames(x),"H"))+1
return(K)
}))
meaneffect = matrixStats::colVars(matrix(t(sapply(res,function(x){
stage=dim(x)[1]
resname=colnames(x)
K=sum(stringr::str_detect(colnames(x),"H"))+1
test.data = x[,(K-1+2*K+1):(K-1+2*K+K-1)]
drop.at.mat = which(matrix((test.data %in% NA),ncol = K-1), arr.ind = TRUE)
if (length(drop.at.mat)>=1){
# Find unique column indices
unique_cols <- unique(drop.at.mat[, 2])
drop.at.all=rep(stage,K-1)
treatmentindex=seq(1,K-1)
# Initialize a result matrix
trtmean.loc <- matrix(NA, nrow = length(unique_cols), ncol = 2)
colnames(trtmean.loc) <- c("drop.at.all", "treatmentindex")
# Iterate through unique column indices
for (i in 1:length(unique_cols)) {
trtdrop <- unique_cols[i]
drop.at <- min(drop.at.mat[drop.at.mat[, 2] == trtdrop, 1]) - 1
drop.at.all[trtdrop]=drop.at
}
trtmean.loc=cbind(c(max(drop.at.all),drop.at.all),c(1,treatmentindex+1))
meanres = matrix(x[, (K - 1 + 2 * K + K - 1 + 1 + 1):(K - 1 + 2 *
K + K - 1 + 1 + K - 1)], ncol = K - 1)
result = rep(NA, K - 1)
for (i in 1:(K - 1)) {
result[i] = meanres[trtmean.loc[i+1, 1], trtmean.loc[i+1, 2]-1]
}
return(result)
}
else{
drop.at.all=rep(stage,K)
treatmentindex=seq(1,K-1)
trtmean.loc=cbind(drop.at.all,c(1,treatmentindex+1))
meanres = matrix(x[, (K - 1 + 2 * K + K - 1 + 1 + 1):(K - 1 + 2 *
K + K - 1 + 1 + K - 1)], ncol = K - 1)
result = rep(NA, K - 1)
for (i in 1:(K - 1)) {
result[i] = meanres[trtmean.loc[i+1, 1], trtmean.loc[i+1, 2]-1]
}
return(result)
}
})),ncol = K-1))
return(meaneffect)
}
#' @title Nfunc
#' @description This function reads in the output matrix of a number of trial replicates to calculate mean estimate of total number of patients allocated to each arm
#' @param res A list of output matrix of a number of trial replicates
#'
#' @return The mean estimate of total number of patients allocated to each arm
#' @export
#'
#' @examples
#' \dontrun{Nfunc(res)}
#'
Nfunc=function(res){
K=mean(sapply(res,function(x){
K=sum(stringr::str_detect(colnames(x),"H"))+1
return(K)
}))
Nmean=colMeans(matrix(t(sapply(res,function(x){
stage=dim(x)[1]
resname=colnames(x)
K=sum(stringr::str_detect(colnames(x),"H"))+1
test.data = x[,(K-1+2*K+1):(K-1+2*K+K-1)]
drop.at.mat = which(matrix((test.data %in% NA),ncol = K-1), arr.ind = TRUE)
if (length(drop.at.mat)>=1){
# Find unique column indices
unique_cols <- unique(drop.at.mat[, 2])
drop.at.all=rep(stage,K-1)
treatmentindex=seq(1,K-1)
# Initialize a result matrix
trtmean.loc <- matrix(NA, nrow = length(unique_cols), ncol = 2)
colnames(trtmean.loc) <- c("drop.at.all", "treatmentindex")
# Iterate through unique column indices
for (i in 1:length(unique_cols)) {
trtdrop <- unique_cols[i]
drop.at <- min(drop.at.mat[drop.at.mat[, 2] == trtdrop, 1]) - 1
drop.at.all[trtdrop]=drop.at
}
trtmean.loc=cbind(c(max(drop.at.all),drop.at.all),c(1,treatmentindex+1))
Nres=matrix(x[,seq(K,K-1+2*K-1,2)],ncol = K)
result=rep(NA,K)
for (i in 1:K){
result[i]=Nres[trtmean.loc[i,1],trtmean.loc[i,2]]
}
return(result)
}
else{
drop.at.all=rep(stage,K)
treatmentindex=seq(1,K-1)
trtmean.loc=cbind(drop.at.all,c(1,treatmentindex+1))
Nres=matrix(x[,seq(K,K-1+2*K-1,2)],ncol = K)
result=rep(NA,K)
for (i in 1:K){
result[i]=Nres[trtmean.loc[i,1],trtmean.loc[i,2]]
}
return(result)
}
})),ncol = K))
return(Nmean)
}
#' @title Sperarmfunc
#' @description This function reads in the output matrix of a number of trial replicates to calculate mean total number of survived patients of each arm
#' @param res A list of output matrix of a number of trial replicates
#'
#' @return The mean total number of survived patients of each arm
#' @export
#'
#' @examples
#' \dontrun{Sperarmfunc(res)}
Sperarmfunc = function(res) {
K=mean(sapply(res,function(x){
K=sum(stringr::str_detect(colnames(x),"H"))+1
return(K)
}))
Smean = colMeans(matrix(t(sapply(res,function(x){
stage=dim(x)[1]
resname=colnames(x)
K=sum(stringr::str_detect(colnames(x),"H"))+1
test.data = x[,(K-1+2*K+1):(K-1+2*K+K-1)]
drop.at.mat = which(matrix((test.data %in% NA),ncol = K-1), arr.ind = TRUE)
if (length(drop.at.mat)>=1){
# Find unique column indices
unique_cols <- unique(drop.at.mat[, 2])
drop.at.all=rep(stage,K-1)
treatmentindex=seq(1,K-1)
# Initialize a result matrix
trtmean.loc <- matrix(NA, nrow = length(unique_cols), ncol = 2)
colnames(trtmean.loc) <- c("drop.at.all", "treatmentindex")
# Iterate through unique column indices
for (i in 1:length(unique_cols)) {
trtdrop <- unique_cols[i]
drop.at <- min(drop.at.mat[drop.at.mat[, 2] == trtdrop, 1]) - 1
drop.at.all[trtdrop]=drop.at
}
trtmean.loc=cbind(c(max(drop.at.all),drop.at.all),c(1,treatmentindex+1))
Nres=matrix(x[, seq(K + 1, K - 1 + 2 * K, 2)], ncol = K)
result=rep(NA,K)
for (i in 1:K){
result[i]=Nres[trtmean.loc[i,1],trtmean.loc[i,2]]
}
return(result)
}
else{
drop.at.all=rep(stage,K)
treatmentindex=seq(1,K-1)
trtmean.loc=cbind(drop.at.all,c(1,treatmentindex+1))
Nres=matrix(x[, seq(K + 1, K - 1 + 2 * K, 2)], ncol = K)
result=rep(NA,K)
for (i in 1:K){
result[i]=Nres[trtmean.loc[i,1],trtmean.loc[i,2]]
}
return(result)
}
})),ncol = K))
return(Smean)
}
# list.of.analysisfunction <-
# list(
# perHtypeIerrorfunc = perHtypeIerrorfunc,
# FWERfunc = FWERfunc,
# Meanfunc = Meanfunc,
# varfunc = varfunc,
# Nfunc = Nfunc,
# Sperarmfunc = Sperarmfunc
# )
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Analysis_listofanalysisfunction.R
|
#' The 'BayesianPlatformDesignTimeTrend' package.
#'
#' @description This package simulates the multi-arm multi-stage or platform trial with bayesian approach using the 'rstan' package, which provides the R interface for to the stan.
#' The package uses Thall's and Trippa's randomisation approach for Bayesian adaptive randomisation.
#' In addition, the time trend problem of platform trial can be studied in this package.
#' There is a demo for multi-arm multi-stage trial for two different null scenario in this package.
#'
#' @docType package
#' @name BayesianPlatformDesignTimeTrend-package
#' @aliases BayesianPlatformDesignTimeTrend
#' @useDynLib BayesianPlatformDesignTimeTrend, .registration = TRUE
#' @import methods
#' @import Rcpp
#' @importFrom rstan sampling
#'
#' @references
#' Stan Development Team (2020). RStan: the R interface to Stan. R package version 2.21.2. https://mc-stan.org
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/BayesianPlatformDesignTimeTrend-package.R
|
#' @title demo_Cutoffscreening
#' @description This function does a cutoff screening for trial simulation.
#' @param ntrials A numeric variable indicating how many trial replicates you want to run
#' @param trial.fun The function of trial simulation, related to MainFunction.R
#' @param grid.inf A list of grid information to create start grid and extend grid for cutoff screening.
#' @param input.info A list of input information including all information required for trial simulation.
#' @param cl A numeric variable indicating how many cores you want to use in parallel programming.
#'
#' @return A vector of recommended cutoff. The final value is the latest recommended value. A plot for all tested cutoff and error rate
#' @importFrom doParallel registerDoParallel
#' @importFrom foreach %dopar%
#' @importFrom foreach foreach
#' @importFrom iterators icount
#' @importFrom stats lm
#' @importFrom stats predict
#' @importFrom rstan rstan_options
#' @importFrom RcppParallel RcppParallelLibs CxxFlags
#' @importFrom graphics lines
#' @import BiocManager
#' @export
#'
#' @examples
#' \donttest{demo_Cutoffscreening(ntrials = 2, cl = 2,
#' grid.inf = list(start = c(0.9, 0.95, 1), extendlength = 2))}
#' @author Ziyan Wang
demo_Cutoffscreening = function(ntrials = 1000,
trial.fun = simulatetrial,
grid.inf = list(start = c(0.9, 0.95, 1), extendlength =
15),
input.info = list(
response.probs = c(0.4, 0.4),
ns = c(30, 60, 90, 120, 150),
max.ar = 0.75,
rand.algo = "Urn",
max.deviation = 3,
test.type = "Twoside",
model.inf = list(
model = "tlr",
ibb.inf = list(
pi.star = 0.5,
pess = 2,
betabinomialmodel = ibetabinomial.post
),
tlr.inf = list(
beta0_prior_mu = 0,
beta1_prior_mu = 0,
beta0_prior_sigma = 2.5,
beta1_prior_sigma = 2.5,
beta0_df = 7,
beta1_df = 7,
reg.inf = "main",
variable.inf = "Fixeffect"
)
),
Stop.type = "Early-Pocock",
Boundary.type = "Symmetric",
Random.inf = list(
Fixratio = FALSE,
Fixratiocontrol = NA,
BARmethod = "Thall",
Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1)
),
trend.inf = list(
trend.type = "step",
trend.effect = c(0, 0),
trend_add_or_multip = "mult"
)
),
cl = 2) {
old <- options()# code line i
on.exit(options(old))
#Set start grid of screening
startgrid <-
data.frame(tpIE = rep(NA, length(grid.inf$start)), cutoff = grid.inf$start)
extendgrid <-
data.frame(
tpIE = rep(NA, grid.inf$extendlength),
cutoff = rep(NA, grid.inf$extendlength)
)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores(logical = FALSE))
registerDoParallel(cores = cl)
message("Start the start grid screening")
for (j in 1:dim(startgrid)[1]) {
#Construct the stop.inf list
Stopbound.inf = Stopboundinf(
Stop.type = input.info$Stop.type,
Boundary.type = input.info$Boundary.type,
cutoff = c(startgrid[j, 2], 1 - startgrid[j, 2])
)
result = foreach(icount(ntrials)) %dopar% trial.fun(
response.probs = input.info$response.probs,
ns = input.info$ns,
max.ar = input.info$max.ar,
rand.algo = input.info$rand.algo,
max.deviation = input.info$max.deviation,
model.inf = input.info$model.inf,
Stopbound.inf = Stopbound.inf,
Random.inf = input.info$Random.inf,
trend.inf = input.info$trend.inf
)
# perHtypeIerror=mean(perHtypeIerrorfunc(result))
FWER = conjuncativepower_or_FWER(result,input.info$response.probs,test.type = input.info$test.type)
startgrid[j, 1] = FWER
}
startgrid$cutoff2 <- startgrid$cutoff ^ 2
quadratic.model <-
lm(tpIE ~ cutoff + cutoff2, data = data.frame(startgrid))
cutoffgrid <- seq(0.9, 1, 0.0001)
predictedtpIE <-
predict(quadratic.model,
list(cutoff = cutoffgrid, cutoff2 = cutoffgrid ^ 2))
# plot(tpIE~cutoff, pch=16, xlab = "cutoff", ylab = "tpIE", cex.lab = 1.3, col = "blue",data = data.frame(startgrid))
# lines(cutoffgrid, predictedtpIE, col = "darkgreen", lwd = 3)
potentialcutoff = cutoffgrid[abs(predictedtpIE - 0.05) <= 0.0025]
e = 1e-10
randomprobability = (1 / (abs(predictedtpIE[abs(predictedtpIE - 0.05) <=
0.0025] - 0.05) + e)) / sum(1 / (abs(predictedtpIE[abs(predictedtpIE - 0.05) <=
0.0025] - 0.05) + e))
if (length(potentialcutoff) > 1){
nextcutoff = sample(potentialcutoff, 1, replace = T, prob = randomprobability)
}
else {
nextcutoff = cutoffgrid[which.min(abs(predictedtpIE - 0.05))]
}
extendgrid[1, 2] = nextcutoff
recommand = {
}
message(paste("Start the extend grid screening.","There are", grid.inf$extendlength ,"cutoff values under investigation in the extend grid"))
for (cutoffindex in 1:(dim(extendgrid)[1])) {
#Construct the stop.inf list
Stopbound.inf = Stopboundinf(
Stop.type = input.info$Stop.type,
Boundary.type = input.info$Boundary.type,
cutoff = c(extendgrid[cutoffindex, 2], 1 - extendgrid[cutoffindex, 2])
)
restlr090five = foreach(icount(ntrials)) %dopar% trial.fun(
response.probs = input.info$response.probs,
ns = input.info$ns,
max.ar = input.info$max.ar,
rand.algo = input.info$rand.algo,
max.deviation = input.info$max.deviation,
model.inf = input.info$model.inf,
Stopbound.inf = Stopbound.inf,
Random.inf = input.info$Random.inf,
trend.inf = input.info$trend.inf
)
FWER = conjuncativepower_or_FWER(restlr090five, input.info$response.probs,test.type = input.info$test.type)
extendgrid[cutoffindex, 1] = FWER
extendgrid$cutoff2 <- extendgrid$cutoff ^ 2
quadratic.model <-
lm(tpIE ~ cutoff + cutoff2, data = data.frame(rbind(startgrid, extendgrid)))
cutoffgrid <- seq(0.9, 1, 0.0001)
predictedtpIE <-
predict(quadratic.model,
list(cutoff = cutoffgrid, cutoff2 = cutoffgrid ^ 2))
# plot(tpIE~cutoff, pch=16, xlab = "cutoff", ylab = "tpIE", cex.lab = 1.3, col = "blue",data = data.frame(rbind(startgrid,extendgrid)))
# lines(cutoffgrid, predictedtpIE, col = "darkgreen", lwd = 3)
potentialcutoff = cutoffgrid[abs(predictedtpIE - 0.05) <= 0.0025]
randomprobability = (1 / (abs(predictedtpIE[abs(predictedtpIE - 0.05) <=
0.0025] - 0.05) + e)) / sum(1 / (abs(predictedtpIE[abs(predictedtpIE - 0.05) <=
0.0025] - 0.05) + e))
if (length(potentialcutoff) == 0) {
randomprobability = 1
potentialcutoff = extendgrid[cutoffindex, 2]
}
if (length(potentialcutoff) > 1){
extendgrid[cutoffindex + 1, 2] = sample(potentialcutoff, 1, replace = T, prob = randomprobability)
recommand = c(recommand, cutoffgrid[as.numeric(names(which.max(randomprobability)))])
message(paste("Finished extend grid screening round", cutoffindex))
}
else {
extendgrid[cutoffindex + 1, 2] = cutoffgrid[which.min(abs(predictedtpIE - 0.05))]
recommand = c(recommand, cutoffgrid[which.min(abs(predictedtpIE - 0.05))])
message(paste("Finished extend grid screening round", cutoffindex))
}
}
message("Output data recording")
dataloginformd = data.frame(rbind(startgrid, extendgrid))
recommandloginformd = recommand
quadratic.model <-
lm(tpIE ~ cutoff + cutoff2, data = dataloginformd)
cutoffgrid <- seq(0.9, 1, 0.0001)
predictedtpIEinformd <-
predict(quadratic.model,
list(cutoff = cutoffgrid, cutoff2 = cutoffgrid ^ 2))
doParallel::stopImplicitCluster()
return(
list(
detailsforgrid = dataloginformd,
recommandcutoff = recommandloginformd,
predictedtpIEinformd = predictedtpIEinformd
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Demo_CutoffScreening.R
|
#' @title A demo for cutoff screening using Bayesian optimisation
#' @description This function does a cutoff screening for trial simulation using Bayesian optimisation.
#'
#' @param ntrials A numeric variable indicating how many trial replicates you want to run
#' @param trial.fun The function of trial simulation, related to MainFunction.R
#' @param grid.inf A list of grid information to create start grid and extend grid for cutoff screening.
#' 'start.length' is the size of start grid. Default is 10.
#' 'grid.min' A numeric value or vector (for asymmetric boundary) indicating the lower bound of the grid for screening. For asymmetric boundary, the first value is efficacy minimum value and the second value is futility minimum value.
#' 'grid.max' A numeric value or vector (for asymmetric boundary) indicating the upper bound of the grid for screening. For asymmetric boundary, the first value is efficacy maximum value and the second value is futility maximum value.
#' 'errorrate' refers to the target of type I error rate or family-wise error rate. Default is 0.05. User can change it to 0.1 for FWER if they think 0.05 is too conservative. The per-hypothesis type I error equals errorrate / (K-1) where (K-1) is the number of treatment arms.
#' 'confidence.level' is a numeric value indicating the confidence level of estimate. Default is 0.95.
#' 'grid.length' A numeric value indicating the grid resolution. Default is 5000 for symmetric boundary. For asymmetric boundary, the length of grid is 101 for both efficacy grid and futility grid. A numeric value indicating the grid resolution. Default is 5000 for symmetric boundary. For asymmetric boundary, the length of grid is 101 for both efficacy grid and futility grid.
#' 'change.scale' is a logic value indicating whether we want to change scale when doing Gaussian process. Default is FALSE.
#' 'noise' is a logic value indicating whether the input x is noisy. Default is TRUE.
#' 'simulationerror' is a numeric value indicating the tolerable error for simulated type I error rate. Default is 0.01,
#' 'iter.max' is a numeric value indicating the maximum number of evaluations. Default is 15.
#' 'plotornot' is a logic value indicating whether the errorrate vs grid plot needed to be generated at each iteration. Default is FALSE.
#' @param input.info A list of input information including all information required for trial simulation.
#' @param cl A numeric variable indicating how many cores you want to use in parallel programming.
#' @param power.type A indicator of which type of power we need to optimise when tuning the cutoff value for asymmetric boundary. Default is NA (Symmetric boundary).
#' The choice of power type is Conjunctive power ("Conjunctive") and Disconjunctive power ("Disconjunctive"). In a two arm trial design, these power type are the same.
#' @param response.probs.alt A vector of response probability of each arm under the alternative scenario. This is used for power optimisation when tuning the cutoff values for asymmetric boundary. Default is NA.
#'
#' @return A vector of recommended cutoff. The final value is the latest recommended value. A plot for all tested cutoff and error rate
#' @importFrom doParallel registerDoParallel
#' @importFrom foreach %dopar%
#' @importFrom foreach foreach
#' @importFrom iterators icount
#' @importFrom stats lm dist runif
#' @importFrom stats predict
#' @importFrom rstan rstan_options
#' @importFrom RcppParallel RcppParallelLibs CxxFlags
#' @importFrom graphics lines
#' @importFrom grDevices colorRampPalette
#' @importFrom graphics image par points
#' @importFrom ggpubr ggarrange
#' @importFrom RColorBrewer brewer.pal
#' @import BiocManager
#' @export
#'
#' @examples
#' \donttest{
#' #Two arm asymmetric boundary screening. Default is OBF boundary.
#' demo_Cutoffscreening.GP(ntrials = 2, cl = 2,
#' power.type = NA,
#' response.probs.alt = NA,
#' grid.inf = list(
#' start.length = 10,
#' confidence.level = 0.95,
#' grid.length = 5000,
#' change.scale = FALSE,
#' noise = TRUE,
#' errorrate = 0.1,
#' simulationerror = 0.01,
#' iter.max = 15,
#' plotornot = FALSE))
#'
#' #Four arm asymmetric OBF boundary screening where conjunctive power is optimised.
#' demo_Cutoffscreening.GP(ntrials = 2, cl = 2,
#' power.type = "Conjunctive",
#' response.probs.alt = c(0.4,0.6,0.6,0.4),
#' grid.inf = list(
#' start.length = 10,
#' confidence.level = 0.95,
#' grid.length = 101,
#' change.scale = FALSE,
#' noise = TRUE,
#' errorrate = 0.1,
#' simulationerror = 0.01,
#' iter.max = 15,
#' plotornot = FALSE))
#' input.info = list(
#' response.probs.null = c(0.4,0.4,0.4,0.4),
#' ns = c(120, 240, 360, 480, 600),
#' max.ar = 0.85,
#' rand.algo = "Urn",
#' max.deviation = 3,
#' test.type = "Twoside",
#' model.inf = list(
#' model = "tlr",
#' ibb.inf = list(
#' pi.star = 0.5,
#' pess = 2,
#' betabinomialmodel = ibetabinomial.post
#' ),
#' tlr.inf = list(
#' beta0_prior_mu = 0,
#' beta1_prior_mu = 0,
#' beta0_prior_sigma = 2.5,
#' beta1_prior_sigma = 2.5,
#' beta0_df = 7,
#' beta1_df = 7,
#' reg.inf = "main",
#' variable.inf = "Fixeffect"
#' )
#' ),
#' Stop.type = "Early-OBF",
#' Boundary.type = "Asymmetric",
#' Random.inf = list(
#' Fixratio = FALSE,
#' Fixratiocontrol = NA,
#' BARmethod = "Thall",
#' Thall.tuning.inf = list(tuningparameter = "Unfixed", fixvalue = 1)
#' ),
#' trend.inf = list(
#' trend.type = "step",
#' trend.effect = c(0, 0, 0, 0),
#' trend_add_or_multip = "mult"
#' )
#' )
#' }
#'
#' @author Ziyan Wang
demo_Cutoffscreening.GP = function(ntrials = 1000,
trial.fun = simulatetrial,
grid.inf = list(
start.length = 10,
grid.min = NULL,
grid.max = NULL,
confidence.level = 0.95,
grid.length =
5000,
change.scale = FALSE,
noise = TRUE,
errorrate = 0.1,
simulationerror = 0.01,
iter.max = 15,
plotornot = FALSE
),
power.type = NA,
response.probs.alt = NA,
input.info = list(
response.probs.null = c(0.4,0.4,0.4,0.4),
ns = c(120, 240, 360, 480, 600),
max.ar = 0.85,
rand.algo = "Urn",
max.deviation = 3,
test.type = "Twoside",
model.inf = list(
model = "tlr",
ibb.inf = list(
pi.star = 0.5,
pess = 2,
betabinomialmodel = ibetabinomial.post
),
tlr.inf = list(
beta0_prior_mu = 0,
beta1_prior_mu = 0,
beta0_prior_sigma = 2.5,
beta1_prior_sigma = 2.5,
beta0_df = 7,
beta1_df = 7,
reg.inf = "main",
variable.inf = "Fixeffect"
)
),
Stop.type = "Early-OBF",
Boundary.type = "Symmetric",
Random.inf = list(
Fixratio = FALSE,
Fixratiocontrol = NA,
BARmethod = "Thall",
Thall.tuning.inf = list(tuningparameter = "Unfixed", fixvalue = 1)
),
trend.inf = list(
trend.type = "step",
trend.effect = c(0, 0, 0, 0),
trend_add_or_multip = "mult"
)
),
cl = 2) {
old <- options()# code line i
on.exit(options(old))
Boundary.type=input.info$Boundary.type
grid.length=grid.inf$grid.length
#Set start grid of screening
if (Boundary.type == "Symmetric") {
if (is.null(grid.inf$grid.min) | is.null(grid.inf$grid.max)) {
if (input.info$Stop.type == "Early-OBF") {
grid.min = 1
grid.max = 8
}
else{
grid.min = 0.9
grid.max = 1
}
}
else if (grid.inf$grid.min >= grid.inf$grid.max) {
stop("Error: grid.min should be greater tha grid.max")
}
else{
grid.min = grid.inf$grid.min
grid.max = grid.inf$grid.max
}
start = lhs::maximinLHS(grid.inf$start.length, 1) * (grid.max - grid.min) +
grid.min
startgrid <-
data.frame(tpIE = rep(NA, grid.inf$start.length),
cutoff = start)
extendgrid <-
data.frame(
tpIE = rep(NA, grid.inf$iter.max),
cutoff = rep(NA, grid.inf$iter.max)
)
}
else{
if (is.na(response.probs.alt) | input.info$response.probs.null[1] != response.probs.alt[1]) {
stop("Error: For asymmetric boundary, please input one null scenario and one of alternative scenarios related to the null for type I error control and power optimisation")
}
# Define the number of samples and the number of variables
num_samples <- grid.inf$start.length # Change this to the desired number of samples
num_variables <- 2 # Change this to the number of variables you have
# Define the number of optimization iterations
num_iterations <- 10000
dis_vec<-{}
# Create a function to calculate the minimum pairwise distance in the LHS sample
calculate_min_pairwise_distance <- function(lhs_sample) {
dist_matrix <- as.matrix(dist(lhs_sample))
min_distance <- min(dist_matrix[upper.tri(dist_matrix)])
return(min_distance)
}
# Initialize an LHS sample
lhs_sample <- matrix(NA, nrow = num_samples, ncol = num_variables)
if (is.null(grid.inf$grid.min[1]) | is.null(grid.inf$grid.max[1])|is.null(grid.inf$grid.min[1]) | is.null(grid.inf$grid.max[2])) {
if (input.info$Stop.type == "Early-OBF") {
grideff.min = 1
grideff.max = 8
gridfut.min = 1
gridfut.max = 8
}
else{
grideff.min = 0.9
grideff.max = 1
gridfut.min = 0
gridfut.max = 0.1
}
}
else if ((grid.inf$grid.min[1] >= grid.inf$grid.max[1]|
grid.inf$grid.min[2] >= grid.inf$grid.max[2]) & input.info$Stop.type != "Early-OBF") {
stop("Error: grid.min should be greater tha grid.max for boundary excepting for OBF")
}
else{
grideff.min = grid.inf$grid.min[1]
grideff.max = grid.inf$grid.max[1]
gridfut.min = grid.inf$grid.min[2]
gridfut.max = grid.inf$grid.max[2]
}
# Generate a set of random permutations for each variable within their respective ranges
for (i in 1:num_variables) {
if (i == 1) {
lhs_sample[, i] <- runif(num_samples, min = grid.inf$grid.min[1], max = grid.inf$grid.max[1])
} else if (i == 2) {
lhs_sample[, i] <- runif(num_samples, min = grid.inf$grid.min[2], max = grid.inf$grid.max[2])
}
}
# Calculate the initial minimum pairwise distance
best_min_distance <- calculate_min_pairwise_distance(lhs_sample)
best_lhs_sample <- lhs_sample
lhs_sample.temp=lhs_sample
# Perform optimization
for (iteration in 1:num_iterations) {
# Shuffle the columns of the LHS sample
lhs_sample.temp[,1] <- lhs_sample[sample(num_samples), 1]
lhs_sample.temp[,2] <- lhs_sample[sample(num_samples), 2]
# Calculate the minimum pairwise distance of the shuffled sample
current_min_distance <- calculate_min_pairwise_distance(lhs_sample.temp)
dis_vec<-c(dis_vec,current_min_distance)
# If the current minimum distance is greater, update the best sample
if (current_min_distance > best_min_distance) {
best_min_distance <- current_min_distance
best_lhs_sample <- lhs_sample.temp
}
}
startgrid.asy <- data.frame(tpIE =rep(NA,15),
cutoff.eff = best_lhs_sample[,1],
cutoff.fut = best_lhs_sample[,2])
extendgrid <-
data.frame(
tpIE = rep(NA, grid.inf$iter.max),
cutoff.eff = rep(NA, grid.inf$iter.max),
cutoff.fut = rep(NA, grid.inf$iter.max)
)
}
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores(logical = FALSE))
registerDoParallel(cores = cl)
message("Start the start grid screening")
for (j in 1:dim(startgrid)[1]) {
#Construct the stop.inf list
if (Boundary.type == "Symmetric") {
if (input.info$Stop.type == "Early-OBF") {
cutoff = c(startgrid[j, 2], startgrid[j, 2])
}
else{
cutoff = c(startgrid[j, 2], 1 - startgrid[j, 2])
}
Stopbound.inf = Stopboundinf(
Stop.type = input.info$Stop.type,
Boundary.type = input.info$Boundary.type,
cutoff = cutoff
)
result = foreach(icount(ntrials)) %dopar% trial.fun(
response.probs = input.info$response.probs.null,
ns = input.info$ns,
max.ar = input.info$max.ar,
rand.algo = input.info$rand.algo,
max.deviation = input.info$max.deviation,
model.inf = input.info$model.inf,
Stopbound.inf = Stopbound.inf,
Random.inf = input.info$Random.inf,
trend.inf = input.info$trend.inf
)
FWER = conjuncativepower_or_FWER(result,input.info$response.probs.null,test.type = input.info$test.type)
startgrid[j, 1] = FWER
}
else{
# Record effective sample size data for each cutoff pair
samplesize.start.null={}
samplesize.start.alt={}
samplesize.ext.null={}
samplesize.ext.alt={}
if (power.type %in% c("Conjunctive", "Disconjunctive")){
conj.pow = {}
disconj.pow = {}
marg.pow = {}
}
else {
stop("Error: Please specify a correct power type for asymmetric cutoff screening.")
}
cutoff = c(startgrid.asy[j, 2], startgrid.asy[j, 3])
Stopbound.inf = Stopboundinf(
Stop.type = input.info$Stop.type,
Boundary.type = input.info$Boundary.type,
cutoff = cutoff
)
result.null = foreach(icount(ntrials)) %dopar% trial.fun(
response.probs = input.info$response.probs.null,
ns = input.info$ns,
max.ar = input.info$max.ar,
rand.algo = input.info$rand.algo,
max.deviation = input.info$max.deviation,
model.inf = input.info$model.inf,
Stopbound.inf = Stopbound.inf,
Random.inf = input.info$Random.inf,
trend.inf = input.info$trend.inf
)
FWER = conjuncativepower_or_FWER(result.null,input.info$response.probs.null,test.type = input.info$test.type)
startgrid.asy[j, 1] = FWER
samplesize.start.null = c(samplesize.start.null, sum(Nfunc(result.null)))
result.alt = foreach(icount(ntrials)) %dopar% trial.fun(
response.probs = response.probs.alt,
ns = input.info$ns,
max.ar = input.info$max.ar,
rand.algo = input.info$rand.algo,
max.deviation = input.info$max.deviation,
model.inf = input.info$model.inf,
Stopbound.inf = Stopbound.inf,
Random.inf = input.info$Random.inf,
trend.inf = input.info$trend.inf
)
conj.pow = c(conj.pow,conjuncativepower_or_FWER(result.alt,response.probs.alt,test.type = input.info$test.type))
disconj.pow = c(disconj.pow, disconjunctivepowerfunc(result.alt))
samplesize.start.alt = c(samplesize.start.alt, sum(Nfunc(result.alt)))
# marg.pow = c(marg.pow)
}
}
if (Boundary.type == "Symmetric") {
GP.res = GP.optim(x = matrix(startgrid$cutoff,ncol=1), y.t1E = startgrid$tpIE,
grid.min=grid.min, grid.max=grid.max,errorrate = grid.inf$errorrate,
grid.length=grid.length,
Boundary.type = Boundary.type)
nextcutoff = GP.res$next.cutoff
extendgrid[1, 2] = nextcutoff
}
else{
if (power.type == "Conjunctive"){
startgrid.asy$pow = conj.pow
}
else{
startgrid.asy$pow = disconj.pow
}
GP.res = GP.optim(x = cbind(startgrid.asy$cutoff.eff, startgrid.asy$cutoff.fut),
y.t1E = startgrid.asy$tpIE, y.pow = startgrid.asy$pow,
errorrate = grid.inf$errorrate,
grid.min=c(grideff.min,gridfut.min), grid.max=c(grideff.max,gridfut.max),
grid.length=grid.length,
Boundary.type = Boundary.type)
nextcutoff = GP.res$next.cutoff
extendgrid[1, 2] = nextcutoff[1]
extendgrid[1, 3] = nextcutoff[2]
}
message(
paste(
"Start the extend grid screening.",
"There are at most",
grid.inf$iter.max ,
"cutoff values under evaluation in the extend grid"
)
)
for (cutoffindex in 1:(dim(extendgrid)[1])) {
if (Boundary.type == "Symmetric") {
if (input.info$Stop.type == "Early-OBF") {
cutoff = c(extendgrid[cutoffindex, 2], extendgrid[cutoffindex, 2])
}
else{
cutoff = c(extendgrid[cutoffindex, 2], 1 - extendgrid[cutoffindex, 2])
}
#Construct the stop.inf list
Stopbound.inf = Stopboundinf(
Stop.type = input.info$Stop.type,
Boundary.type = input.info$Boundary.type,
cutoff = cutoff
)
result.null = foreach(icount(ntrials)) %dopar% trial.fun(
response.probs = input.info$response.probs.null,
ns = input.info$ns,
max.ar = input.info$max.ar,
rand.algo = input.info$rand.algo,
max.deviation = input.info$max.deviation,
model.inf = input.info$model.inf,
Stopbound.inf = Stopbound.inf,
Random.inf = input.info$Random.inf,
trend.inf = input.info$trend.inf
)
FWER = conjuncativepower_or_FWER(result.null,input.info$response.probs.null,test.type = input.info$test.type)
if (FWER <= grid.inf$errorrate * (1 + grid.inf$simulationerror) &
FWER >= grid.inf$errorrate * (1 - grid.inf$simulationerror)) {
earlyend = TRUE
}
else{
earlyend = FALSE
}
extendgrid[cutoffindex, 1] = FWER
dnew=data.frame(rbind(startgrid,extendgrid))
dnew=dnew[!is.na(dnew$cutoff),]
GP.res = GP.optim(dnew$cutoff, dnew$tpIE, grid.min=grid.min, grid.max=grid.max,
grid.length=grid.length,
errorrate = grid.inf$errorrate)
nextcutoff = GP.res$next.cutoff
extendgrid[cutoffindex + 1, 2] = nextcutoff
prediction = data.frame(
yhat.t1E = GP.res$prediction$yhat.t1E,
sd.t1E = GP.res$prediction$sd.t1E,
qup.t1E = GP.res$prediction$qup.t1E,
qdown.t1E = GP.res$prediction$qdown.t1E,
xgrid = GP.res$prediction$xgrid
)
# Plot or not
if (isTRUE(grid.inf$plotornot)) {
GPplot = ggplot(data = prediction) +
geom_ribbon(aes(
x = prediction$xgrid,
ymin = prediction$qdown.t1E,
ymax = prediction$qup.t1E
),col="#f8766d", alpha = 0.5,linetype = 2) +
geom_line(ggplot2::aes(prediction$xgrid, prediction$yhat.t1E),col = "#f8766d") +
geom_point(ggplot2::aes(cutoff[1:sum(!is.na(prediction$tpIE))], prediction$tpIE[1:sum(!is.na(prediction$tpIE))]),
data = data.frame(rbind(startgrid, extendgrid)),col = "#00bfc4") +
geom_hline(yintercept = grid.inf$errorrate) +
geom_text(aes(x=grid.min,y=grid.inf$errorrate+0.05,label=paste0("FWER target is ",grid.inf$errorrate)),hjust=0,vjust=1)+
geom_vline(xintercept = nextcutoff, linetype = 2) +
geom_text(aes(x=nextcutoff,y=grid.inf$errorrate*2,label=paste0("Next cutoff value is ",round(nextcutoff,3))))+
theme_minimal() +ylab("FWER")+xlab("Cutoff grid")+
geom_point(aes(nextcutoff, 0.1),
data = data.frame(tpIE=prediction$tpIE,cutoff=cutoff),col = "#f8766d") +
theme(plot.background = element_rect(fill = "#e6dfba"))
labs(title = paste0("Iteration", cutoffindex))
}
}
else{
cutoff = c(extendgrid[cutoffindex, 2], extendgrid[cutoffindex, 3])
#Construct the stop.inf list
Stopbound.inf = Stopboundinf(
Stop.type = input.info$Stop.type,
Boundary.type = input.info$Boundary.type,
cutoff = cutoff
)
result.null = foreach(icount(ntrials)) %dopar% trial.fun(
response.probs = input.info$response.probs.null,
ns = input.info$ns,
max.ar = input.info$max.ar,
rand.algo = input.info$rand.algo,
max.deviation = input.info$max.deviation,
model.inf = input.info$model.inf,
Stopbound.inf = Stopbound.inf,
Random.inf = input.info$Random.inf,
trend.inf = input.info$trend.inf
)
FWER = conjuncativepower_or_FWER(result.null,input.info$response.probs.null,test.type = input.info$test.type)
extendgrid[cutoffindex, 1] = FWER
samplesize.ext.null = c(samplesize.ext.null, sum(Nfunc(result.null)))
result.alt = foreach(icount(ntrials)) %dopar% trial.fun(
response.probs = response.probs.alt,
ns = input.info$ns,
max.ar = input.info$max.ar,
rand.algo = input.info$rand.algo,
max.deviation = input.info$max.deviation,
model.inf = input.info$model.inf,
Stopbound.inf = Stopbound.inf,
Random.inf = input.info$Random.inf,
trend.inf = input.info$trend.inf
)
conj.pow = conjuncativepower_or_FWER(result.alt,response.probs.alt,test.type = input.info$test.type)
disconj.pow = disconjunctivepowerfunc(result.alt)
samplesize.ext.alt = c(samplesize.ext.alt, sum(Nfunc(result.alt)))
# marg.pow = c(marg.pow)
if (power.type == "Conjunctive"){
extendgrid$pow[cutoffindex] = conj.pow
}
else{
extendgrid$pow[cutoffindex] = disconj.pow
}
dnew=data.frame(rbind(startgrid.asy,extendgrid))
dnew=dnew[!is.na(dnew$cutoff.eff),]
GP.res = GP.optim(x = cbind(dnew$cutoff.eff, dnew$cutoff.fut),
y.t1E = dnew$tpIE, y.pow = dnew$pow,grid.length=grid.length,
errorrate = grid.inf$errorrate,
grid.min=c(grideff.min,gridfut.min), grid.max=c(grideff.max,gridfut.max),
Boundary.type = Boundary.type)
nextcutoff = GP.res$next.cutoff
extendgrid[cutoffindex + 1, 2] = nextcutoff[1]
extendgrid[cutoffindex + 1, 3] = nextcutoff[2]
prediction = data.frame(
yhat.t1E = GP.res$prediction$yhat.t1E,
yhat.pow = GP.res$prediction$yhat.pow,
yhat.ESS.null = GP.res$prediction$yhat.ESS.null,
yhat.ESS.alt = GP.res$prediction$yhat.ESS.alt,
sd.t1E = GP.res$prediction$sd.t1E,
sd.pow = GP.res$prediction$sd.pow,
sd.ESS.null = GP.res$prediction$sd.ESS.null,
sd.ESS.alt = GP.res$prediction$sd.ESS.alt,
qup.ESS.null = GP.res$prediction$qup.ESS.null,
qup.ESS.alt = GP.res$prediction$qup.ESS.alt,
qdown.ESS.null = GP.res$prediction$qdown.ESS.null,
qdown.ESS.alt = GP.res$prediction$qdown.ESS.alt,
potentialcutoff = GP.res$prediction$potentialcutoff,
qup.t1E = GP.res$prediction$qup.t1E,
qdown.t1E = GP.res$prediction$qdown.t1E,
qup.pow = GP.res$prediction$qup.pow,
qdown.pow = GP.res$prediction$qdown.pow,
xgrid = GP.res$prediction$xgrid
)
# Construct the ESS data frame for plot or analysis
sample.mat=cbind(c(samplesize.start.null,samplesize.ext.null),c(samplesize.start.alt,samplesize.ext.alt))
# Plot or not
if (isTRUE(grid.inf$plotornot)) {
prediction=GP.res$prediction
colormap=colorRampPalette(rev(brewer.pal(11,'Spectral')))(32)
target_line=grid.inf$errorrate
xgrid.eff=prediction$xgrid[,1]
xgrid.fut=prediction$xgrid[,2]
nextcutoff.predict = nextcutoff
colnames(nextcutoff.predict)=c("eff","fut","FWER")
cleandata = dnew
colnames(cleandata)=c("tpIE","eff","fut","pow")
df=data.frame(FWER=prediction$yhat.t1E,eff=xgrid.eff,fut=xgrid.fut)
Contour.tIE<-ggplot(df,aes(x=df$eff,y=df$fut,z=FWER))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=FWER))+
geom_contour(breaks=c(target_line, seq(min(df$FWER),max(df$FWER),by=(max(df$FWER)-min(df$FWER))/10)),color="black")+
geom_contour(breaks=target_line,color="white",linewidth=1.1)+
labs(title="Mean type I error rate (FWER)", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.tIE=Contour.tIE+geom_point(data=cleandata,aes(x=cleandata$eff,y=cleandata$fut),color="black")+
geom_point(data=nextcutoff.predict,aes(x=nextcutoff.predict$eff,y=nextcutoff.predict$fut),color="pink")
# Extract the contour data
contour_data_tIE <- ggplot_build(Contour.tIE)$data[[2]]
# Record the contour that has FWER equal to the target
contour_data_tIE_subset <- contour_data_tIE[contour_data_tIE$level == target_line, ]
# Order and split the data to ensure the plot is drawn correctly
contour_data_tIE_subset=contour_data_tIE_subset[order(contour_data_tIE_subset$piece,contour_data_tIE_subset$x),]
contour_data_tIE_subset_1=contour_data_tIE_subset[contour_data_tIE_subset$piece==1,]
contour_data_tIE_subset_2=contour_data_tIE_subset[contour_data_tIE_subset$piece==2,]
# To make sure the data frame is not empty
if (nrow(contour_data_tIE_subset_1) == 0){
contour_data_tIE_subset_1[1,]=(rep(NA,dim(contour_data_tIE_subset_1)[[2]]))
} else if (nrow(contour_data_tIE_subset_2) == 0){
contour_data_tIE_subset_2[1,]=(rep(NA,dim(contour_data_tIE_subset_2)[[2]]))
}
df=data.frame(precision=prediction$sd.t1E,eff=xgrid.eff,fut=xgrid.fut)
Contour.sd<-ggplot(df,aes(x=df$eff,y=df$fut,z=df$precision))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=df$precision))+
geom_contour(breaks=seq(min(df$precision),max(df$precision),by=(max(df$precision)-min(df$precision))/10),color="black")+labs(title="sd of each contour plot", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.sd=Contour.sd+
geom_path(data = contour_data_tIE_subset_1,
aes(contour_data_tIE_subset_2$x,contour_data_tIE_subset_2$y,z=NA),color="white",linewidth=1.1)+
geom_path(data = contour_data_tIE_subset_2, aes(contour_data_tIE_subset_2$x,contour_data_tIE_subset_2$y,z=NA),color="white",linewidth=1.1)+
geom_point(data=cleandata,aes(cleandata$eff,cleandata$fut,z=NA),color="black")+
geom_point(data=nextcutoff.predict,aes(x=nextcutoff.predict$eff,y=nextcutoff.predict$fut,z=NA),color="pink")
df=data.frame(Power=prediction$yhat.pow,eff=xgrid.eff,fut=xgrid.fut)
Contour.pow<-ggplot(df,aes(x=df$eff,y=df$fut,z=df$Power))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=df$Power))+
geom_contour(breaks=seq(min(df$Power),max(df$Power),by=(max(df$Power)-min(df$Power))/10),color="black")+labs(title="Mean power", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.pow=Contour.pow+
geom_path(data = contour_data_tIE_subset_1,
aes(contour_data_tIE_subset_2$x,contour_data_tIE_subset_2$y,z=NA),color="white",linewidth=1.1)+
geom_path(data = contour_data_tIE_subset_2, aes(contour_data_tIE_subset_2$x,contour_data_tIE_subset_2$y,z=NA),color="white",linewidth=1.1)+
geom_point(data=cleandata,aes(cleandata$eff,cleandata$fut,z=NA),color="black")+
geom_point(data=nextcutoff.predict,aes(x=nextcutoff.predict$eff,y=nextcutoff.predict$fut,z=NA),color="pink")
df=data.frame(NullESS=prediction$yhat.ESS.null,eff=xgrid.eff,fut=xgrid.fut)
Contour.nullESS<-ggplot(df,aes(x=df$eff,y=df$fut,z=df$NullESS))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=df$NullESS))+
geom_contour(breaks=seq(min(df$NullESS),max(df$NullESS),by=(max(df$NullESS)-min(df$NullESS))/10),color="black")+labs(title="Mean ESS under null", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.nullESS=Contour.nullESS+
geom_path(data = contour_data_tIE_subset_1,
aes(contour_data_tIE_subset_2$x,contour_data_tIE_subset_2$y,z=NA),color="white",linewidth=1.1)+
geom_path(data = contour_data_tIE_subset_2, aes(contour_data_tIE_subset_2$x,contour_data_tIE_subset_2$y,z=NA),color="white",linewidth=1.1)+
geom_point(data=cleandata,aes(cleandata$eff,cleandata$fut,z=NA),color="black")+
geom_point(data=nextcutoff.predict,aes(x=nextcutoff.predict$eff,y=nextcutoff.predict$fut,z=NA),color="pink")
df=data.frame(AltESS=prediction$yhat.ESS.alt,eff=xgrid.eff,fut=xgrid.fut)
Contour.altESS<-ggplot(df,aes(x=df$eff,y=df$fut,z=df$AltESS))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=df$AltESS))+
geom_contour(breaks=seq(min(df$AltESS),max(df$AltESS),by=(max(df$AltESS)-min(df$AltESS))/10),color="black")+labs(title="Mean ESS under alternative", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.altESS=Contour.altESS+
geom_path(data = contour_data_tIE_subset_1,
aes(contour_data_tIE_subset_2$x,contour_data_tIE_subset_2$y,z=NA),color="white",linewidth=1.1)+
geom_path(data = contour_data_tIE_subset_2, aes(contour_data_tIE_subset_2$x,contour_data_tIE_subset_2$y,z=NA),color="white",linewidth=1.1)+
geom_point(data=cleandata,aes(cleandata$eff,cleandata$fut,z=NA),color="black")+
geom_point(data=nextcutoff.predict,aes(x=nextcutoff.predict$eff,y=nextcutoff.predict$fut,z=NA),color="pink")
# Plot these figures
ggarrange(Contour.tIE,Contour.pow,Contour.nullESS,Contour.altESS,Contour.sd,ncol = 2,nrow=3)
}
# Need to add a early stop criteria discuss with Dave on 05/09/2023
}
# Stop the iteration
if (isTRUE(earlyend)) {
message(paste("Finished extend grid screening round", cutoffindex))
message("The iteration stopped early because the optimal cutoff value is found.")
break
}
else{
message(paste("Finished extend grid screening round", cutoffindex))
}
}
message("Output data recording")
dataloginformd = data.frame(rbind(startgrid, extendgrid))
doParallel::stopImplicitCluster()
return(list(
detailsforgrid = dataloginformd,
predictedtpIEinformd = GP.res$prediction
))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Demo_CutoffScreening_GP.R
|
#' @title demo_multscenario
#' @description This is a demo function simulating multi-arm multi-stage design with two different null scenarios where the response probability of control is 0.15 and 0.4, respectively.
#' The clinically meaningful increment on probability scale is 0.2. The stopping boundary is the OBF. The cutoff vector in the demo is tuned to keep Type I error rate to be 0.05. The output data can be saved as .RData file
#'
#' @param ntrials A numeric value. The number of total trail replicates for each scenario.
#' @param cl A numeric variable indicating how many cores you want to use in parallel programming.
#' @param save_data An indicator of whether the output data need to be saved. Default is FALSE.
#'
#' @return A list of data for plotting. One is results of trial replicates for all scenarios. The other one is a data frame containing all summarised evaluation metrics for all scenarios
#' @export
#'
#' @examples
#' \donttest{demo_multscenario(ntrials = 2, cl = 2, save_data = FALSE)}
#' @author Ziyan Wang
demo_multscenario = function(ntrials = 1000,
cl = 2,
save_data = FALSE) {
message("Start trial information initialisation")
# ns = list(seq(60, 300, 60), seq(30, 300, 30))
ns = list(seq(60, 300, 60))
null.response.probs1 = 0.15
alt.response.probs1 = 0.35
null.response.probs2 = 0.4
alt.response.probs2 = 0.6
scenario = matrix(
c(
null.response.probs1,
null.response.probs1,
null.response.probs1,
alt.response.probs1,
null.response.probs2,
null.response.probs2,
null.response.probs2,
alt.response.probs2
),
ncol = 2,
nrow = 4,
byrow = T
)
cutoffearlyOBF = c(4.391, 4.661, 4.281, 4.512)
result = {
}
OPC = {
}
cutoffindex = 1
message(
"Start trial simulation. This is a two arm trial simulation. There are two null scenarios and two alternative scenarios and for each scenario there are two vectors of number of patients at each stage in this demo. There are 8 rounds."
)
for (i in 1:dim(scenario)[1]) {
for (z in 1:length(ns)) {
message(paste(
"Scenario",
i,
"with patient number sequence",
ns[z],
"under simulation"
))
restlr = Trial.simulation(
ntrials = ntrials,
trial.fun = simulatetrial,
input.info = list(
response.probs = scenario[i,],
ns = ns[[z]],
max.ar = 0.75,
test.type = "Twoside",
rand.algo = "Urn",
max.deviation = 3,
model.inf = list(
model = "tlr",
ibb.inf = list(
pi.star = 0.5,
pess = 2,
betabinomialmodel = ibetabinomial.post
),
tlr.inf = list(
beta0_prior_mu = 0,
beta1_prior_mu = 0,
beta0_prior_sigma = 2.5,
beta1_prior_sigma = 2.5,
beta0_df = 7,
beta1_df = 7,
reg.inf = "main",
variable.inf = "Fixeffect"
)
),
Stopbound.inf = Stopboundinf(
Stop.type = "Early-OBF",
Boundary.type = "Symmetric",
cutoff = c(cutoffearlyOBF[cutoffindex], cutoffearlyOBF[cutoffindex])
),
Random.inf = list(
Fixratio = FALSE,
Fixratiocontrol = NA,
BARmethod = "Thall",
Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1)
),
trend.inf = list(
trend.type = "step",
trend.effect = c(0, 0),
trend_add_or_multip = "mult"
)
),
cl = cl
)
cutoffindex = cutoffindex + 1
result = c(result, restlr$result)
OPC = rbind(OPC, restlr$OPC)
message(paste("Finished round", cutoffindex))
}
}
if (isTRUE(save_data)) {
save(result, file = restlr$Nameofsaveddata$nameData)
save(OPC, file = restlr$Nameofsaveddata$nameTable)
}
return(list(result = result, OPC = OPC))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Demo_multiplescenariotrialsimulation.R
|
#' @title simulatetrial
#' @description This function simulates a MAMS trial applying adaptive methods where the time trend effect can be studied.
#' @param ii Meaning less parameter but required for foreach function in doParallel package
#' @param response.probs A vector of true response probability for each arm. Default response.probs = c(0.4, 0.4).
#' @param test.type A indicator of whether to use one side test or two side test for each treatment-control comparison.
#' @param ns A vector of accumulated number of patient at each stage. Default is ns = c(30, 60, 90, 120, 150).
#' @param max.ar The upper boundary for randomisation ratio for each arm. Default is 0.75 for a two arm trial. The minimum value depends on K where 1 - max.ar <= 1/K
#' @param rand.algo The method of applying patient allocation with a given randomisation probability vector. Default is "Urn".
#' @param max.deviation The tuning parameter for Urn randomisation method. Default is 3.
#' @param Stopbound.inf The list of stop boundary information for more see \code{\link{Stopboundinf}}
#' @param Random.inf The list of Adaptive randomisation information for more see \code{\link{Randomisation.inf}}
#' @param trend.inf The list of time trend information
#' @param model.inf The list of interim data analysis model information for more see \code{\link{modelinf.fun}}
#'
#' @return A matrix including all evaluation metrics
#' @export
#'
#' @examples
#' set.seed(1)
#' simulatetrial(response.probs = c(0.4, 0.4),
#' ns = c(30, 60, 90, 120, 150),
#' max.ar = 0.75,
#' test.type = "Twoside",
#' rand.algo = "Urn",
#' max.deviation = 3,
#' model.inf = list(
#' model = "tlr",
#' ibb.inf = list(
#' pi.star = 0.5,
#' pess = 2,
#' betabinomialmodel = ibetabinomial.post
#' ),
#' tlr.inf = list(
#' beta0_prior_mu = 0,
#' beta1_prior_mu = 0,
#' beta0_prior_sigma = 2.5,
#' beta1_prior_sigma = 2.5,
#' beta0_df = 7,
#' beta1_df = 7,
#' reg.inf = "main",
#' variable.inf = "Fixeffect"
#' )
#' ),
#' Stopbound.inf = Stopboundinf(
#' Stop.type = "Early-Pocock",
#' Boundary.type = "Symmetric",
#' cutoff = c(0.99,0.01)
#' ),
#' Random.inf = list(
#' Fixratio = FALSE,
#' Fixratiocontrol = NA,
#' BARmethod = "Thall",
#' Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1),
#' Trippa.tuning.inf = list(a = 10, b = 0.75)
#' ),
#' trend.inf = list(
#' trend.type = "step",
#' trend.effect = c(0, 0),
#' trend_add_or_multip = "mult"
#' ))
#' @author Ziyan Wang
simulatetrial <- function(ii,
response.probs = c(0.4, 0.4),
ns = c(30, 60, 90, 120, 150),
test.type = "Twoside",
max.ar = 0.75,
rand.algo = "Urn",
max.deviation = 3,
model.inf = list(
model = "tlr",
ibb.inf = list(
pi.star = 0.5,
pess = 2,
betabinomialmodel = ibetabinomial.post
),
tlr.inf = list(
beta0_prior_mu = 0,
beta1_prior_mu = 0,
beta0_prior_sigma = 2.5,
beta1_prior_sigma = 2.5,
beta0_df = 7,
beta1_df = 7,
reg.inf = "main",
variable.inf = "Fixeffect"
)
),
Stopbound.inf = Stopbound.inf,
Random.inf = Random.inf,
trend.inf = trend.inf) {
#-Boundary construction-
Boundary = Boundaryconstruction(Stopbound.inf, ns = ns)
cutoffeff = Boundary$Efficacy.boundary
cutoffful = Boundary$Fultility.boundary
#-Randomisation inf check-
Random.inf = Randomisation.inf(Random.inf)
Fixratio = Random.inf$Fixratio
Fixratiocontrol = Random.inf$Fixratiocontrol
if (!is.logical(Fixratio)) stop("Error: Fixratio should be a logical value (TRUE/FALSE)")
if (isTRUE(Fixratio) & !is.numeric(Fixratiocontrol)) stop("Error: Fixratiocontrol argument should be numeric for fix ratio approach")
BARmethod = Random.inf$BARmethod
#List of information required for Thall's approach
Thall.tuning.inf = Random.inf$Thall.tuning.inf
#List of information required for Trippa's approach
Trippa.tuning.inf = Random.inf$Trippa.tuning.inf
#Identify whether the tuning parameter for Thall's approach is fixed or not
tuningparameter = Thall.tuning.inf$tuningparameter
#Fixed tuning parameter for Thall's approach if existing
c = Thall.tuning.inf$c
#Fixed tuning parameter for Trippa's approach if existing
a = Trippa.tuning.inf$a
b = Trippa.tuning.inf$b
#-Simulation setting-
#Initialize Data
initialised.par = Initializetrialparameter(response.probs, ns)
#Extract the list of initialized parameter
K = initialised.par$K
armleft = initialised.par$armleft
treatmentindex = initialised.par$treatmentindex
n = initialised.par$n
y1 = initialised.par$y1
groupwise.response.probs = initialised.par$groupwise.response.probs
randomprob = initialised.par$randomprob
z = initialised.par$z
y = initialised.par$y
group_indicator = initialised.par$group_indicator
post.prob.best.mat = initialised.par$post.prob.best.mat
if (test.type == "Twoside" & isFALSE(model.inf$Random.inf$Fixratio)){
warning("Adaptive randomisation aims to allocate to superior arms,
while two side test conclude both superiority and inferiority.
Therefore, these two approaches have conflicting purposes.")
}
#-max.ar check
if (1 - max.ar > 1/K){
stop("Error: The lower allocation ratio should be at least 1/K. Please check the number of arm at the beginning and the max.ar")
}
#Initialize the output data frame: stats
stats = OutputStats.initialising(model.inf$tlr.inf$variable.inf,
model.inf$tlr.inf$reg.inf,
ns,
K)
# ----For random effect initialisation
ntemp=matrix(rep(NA,nrow(stats)*K),nrow=K)
ytemp=matrix(rep(NA,nrow(stats)*K),nrow=K)
# -----
#Generating time trend function ("trend.function") based on input trend information
#and check whether the input is reasonable using the "Timeindicator" variable
Timetrendfunctionlist = Timetrend.fun(trend.inf)
trend.function = Timetrendfunctionlist$trend.function
Timeindicator = Timetrendfunctionlist$timetrendornot
trend_add_or_multip = Timetrendfunctionlist$trend_add_or_multip
trend.effect = Timetrendfunctionlist$trend.effect
#-Start data generation and analysing stage by stage-
for (group in 1:nrow(stats)) {
# Number of new enrolls during current group
n.new = c(0, ns)[group + 1] - c(0, ns)[group]
# Patient randomisation based on given randomization ratio
# Parameter checking
if (rand.algo %in% c("Coin", "Urn")) {
random.output = AdaptiveRandomisation(
Fixratio,
rand.algo,
K,
n.new,
randomprob,
treatmentindex,
groupwise.response.probs,
group,
armleft,
max.deviation,
trend_add_or_multip,
trend.function,
trend.effect,
ns,
Fixratiocontrol
)
}
else {
stop("Error: randomisation type wrong")
}
# Extract the output list
nstage = random.output$nstage
ystage = random.output$ystage
znew = random.output$znew
ynew = random.output$ynew
# Update parameters n: accumulated patients to each arm; y1: accumulated outcomes
# z: treatment index vector; y: outcome index vector; N: current total number of patients
# group_indicator: stage index vector indicating the stage at which each patient is treated
n = n + nstage
y1 = y1 + ystage
ntemp[,group]=nstage
ytemp[,group]=ystage
stats2 = as.vector(matrix(c(n, y1), nrow = 2, byrow = TRUE))
z = c(z, znew)
y = c(y, ynew)
N = length(z)
group_indicator = c(group_indicator, rep(group, n.new))
#-Modelling-
if (model.inf$model == "ibb") {
# Beta-binomial model can not estimate mean treatment effect (stats4);
# estimate treatment effect variance (stats5);
# can not deal with time trend effect (stats6);
# and time trend treatment effect interaction (stats7);
# Therefore stats4,5,6,7 are set to be blank to generate output matrix.
stats4 = {
}
stats5 = {
}
stats6 = {
}
stats7 = {
}
if (BARmethod == "Thall") {
#Debugged for K arm by Ziyan Wang on 18:21 11/08/2022. Used to be stoperror for K arm)
zdropped = z[z %in% as.numeric(names(randomprob[, randomprob != 0]))]
ydropped = y[z %in% as.numeric(names(randomprob[, randomprob != 0]))]
Ndropped = length(zdropped)
group_indicator_dropped = group_indicator[z %in% as.numeric(names(randomprob[, randomprob != 0]))]
zlevel = as.numeric(levels(factor(zdropped)))
for (zindex in 1:armleft) {
zdropped[zdropped == levels(factor(zdropped))[zindex]] = zindex
}
data = list(
K = armleft,
N = Ndropped,
y = array(ydropped, dim = Ndropped),
z = array(zdropped, dim = Ndropped),
group = group,
pistar = model.inf$ibb.inf$pi.star,
pess = model.inf$ibb.inf$pess
)
fit <- rstan::sampling(
stanmodels$betabinom,
data = data,
chains = 1,
refresh = 0,
warmup = 2500,
iter = 5000
)
sampeff = rstan::extract(fit, 'theta')[[1]]
colnames(sampeff) <- c(0, treatmentindex)
control = matrix(sampeff[, 1])
colnames(control) <- 0
treatment = matrix(sampeff[, -1], ncol = dim(sampeff)[2] - 1)
colnames(treatment) <- treatmentindex
#posterior of each treatment better than control
post.prob.btcontrol = colMeans(sapply(treatmentindex, function(treatmentindex) {
postprob = treatment[, as.numeric(colnames(treatment)) == treatmentindex] >
control
return(postprob)
}))
stats1 = rep(NA, K - 1)
names(stats1) = seq(1, K - 1)
stats1[treatmentindex] = post.prob.btcontrol
post.prob.best = colMeans(rstan::extract(fit, 'times_to_be_best')[[1]])
for (q in 1:armleft) {
post.prob.best.mat[group, zlevel[q]] = post.prob.best[q]
}
post.prob.best = post.prob.best.mat[group,]
#Normalizing in case any value equals zero
post.prob.best = post.prob.best + 1e-7
post.prob.best = post.prob.best / sum(post.prob.best)
# Drop both superior and inferior arm and make hypothesis testing
test_drop.inf = testing_and_armdropping(
K = K,
armleft = armleft,
post.prob.btcontrol = post.prob.btcontrol,
group = group,
cutoffeff = cutoffeff,
cutoffful = cutoffful,
treatmentindex = treatmentindex,
test.type = test.type
)
stats3 = test_drop.inf$stats3
armleft = test_drop.inf$armleft
treatmentindex = test_drop.inf$treatmentindex
}
else {
nibb = n[c(1, treatmentindex + 1)]
y1ibb = y1[c(1, treatmentindex + 1)]
# Analytic result of posterior probability
# More than one posterior probability for more than one treatment arm situation
# (Much faster than stan,
# however it can not generate posterior probability of each arm to be the best).
resultibb = model.inf$ibb.inf$ibetabinomial.post(
n = nibb,
y = y1ibb,
pi.star = model.inf$ibb.inf$pi.star,
pess = model.inf$ibb.inf$pess
)
post.prob.btcontrol = resultibb
stats1 = rep(NA, K - 1)
names(stats1) = seq(1, K - 1)
stats1[treatmentindex] = post.prob.btcontrol
# Drop both superior and inferior arm and make hypothesis testing
test_drop.inf = testing_and_armdropping(
K = K,
armleft = armleft,
post.prob.btcontrol = post.prob.btcontrol,
group = group,
cutoffeff = cutoffeff,
cutoffful = cutoffful,
treatmentindex = treatmentindex,
test.type = test.type
)
stats3 = test_drop.inf$stats3
armleft = test_drop.inf$armleft
treatmentindex = test_drop.inf$treatmentindex
}
if (isFALSE(Fixratio)) {
#-Adjust the posterior randomisation ratio-
randomprob = ARmethod(
BARmethod,
group,
stats,
post.prob.btcontrol,
K,
n,
tuningparameter,
c,
a,
b,
post.prob.best,
max.ar,
armleft,
treatmentindex
)
}
}
else if (model.inf$model == "tlr") {
stan.data.temp = stan.logisticmodeltrans(
z,
y,
randomprob,
group_indicator,
armleft,
group,
model.inf$tlr.inf$variable.inf,
model.inf$tlr.inf$reg.inf
)
zdropped = stan.data.temp$zdropped
ydropped = stan.data.temp$ydropped
Ndropped = stan.data.temp$Ndropped
group_indicator_dropped = stan.data.temp$group_indicator_dropped
zlevel = stan.data.temp$zlevel
xdummy = stan.data.temp$xdummy
if (model.inf$tlr.inf$variable.inf == "Fixeffect" |
group == 1) {
data = list(
K = dim(xdummy)[2],
N = Ndropped,
y = array(ydropped, dim = Ndropped),
z = array(zdropped, dim = Ndropped),
x = xdummy,
group = group_indicator_dropped,
beta0_prior_mu = model.inf$tlr.inf$beta0_prior_mu,
beta1_prior_mu = model.inf$tlr.inf$beta1_prior_mu,
beta0_prior_sigma = model.inf$tlr.inf$beta0_prior_sigma,
beta1_prior_sigma = model.inf$tlr.inf$beta1_prior_sigma,
beta0_nu = model.inf$tlr.inf$beta0_df,
beta1_nu = model.inf$tlr.inf$beta1_df
)
fit <- rstan::sampling(
stanmodels$logisticdummy,
data = data,
chains = 1,
refresh = 0,
warmup = 2500,
iter = 5000
)
beta0 = matrix(rstan::extract(fit, 'b_Intercept')[[1]], ncol = 1)
statsbeta0 = colMeans(beta0)
processedfitresult = resultstantoRfunc(
group = group,
reg.inf = model.inf$tlr.inf$reg.inf,
variable.inf = model.inf$tlr.inf$variable.inf,
fit = fit,
armleft = armleft,
treatmentindex = treatmentindex,
K = K,
ns = ns
)
stats4 = processedfitresult$stats4
stats5 = processedfitresult$stats5
stats6 = processedfitresult$stats6
stats7 = processedfitresult$stats7
stats1 = processedfitresult$stats1
sampefftotal = processedfitresult$sampefftotal
post.prob.btcontrol = processedfitresult$post.prob.btcontrol
#-Calculating posterior probability of each arm (including control) to be the best arm-
# post.prob.best: The posterior probability of each arm (including control) to be the best arm
# This is required for Thall's randomisation approach
for (q in 1:armleft) {
post.prob.best.mat[group, zlevel[q]] = (sum(max.col(sampefftotal) == q)) /
2500
}
post.prob.best = post.prob.best.mat[group,]
#Normalizing in case any value equals zero
post.prob.best = post.prob.best + 1e-7
post.prob.best = post.prob.best / sum(post.prob.best)
# Drop both superior and inferior arm and make hypothesis testing
test_drop.inf = testing_and_armdropping(
K = K,
armleft = armleft,
post.prob.btcontrol = post.prob.btcontrol,
group = group,
cutoffeff = cutoffeff,
cutoffful = cutoffful,
treatmentindex = treatmentindex,
test.type = test.type
)
stats3 = test_drop.inf$stats3
armleft = test_drop.inf$armleft
treatmentindex = test_drop.inf$treatmentindex
}
else if (model.inf$tlr.inf$variable.inf == "Mixeffect.stan") {
dataran = list(
K = armleft,
N = Ndropped,
Y = array(ydropped, dim = Ndropped),
z = array(zdropped, dim = Ndropped),
X = xdummy,
groupmax = max(group_indicator_dropped),
group = group_indicator_dropped,
beta0_prior_mu = model.inf$tlr.inf$beta0_prior_mu,
beta1_prior_mu = model.inf$tlr.inf$beta1_prior_mu,
beta0_prior_sigma = model.inf$tlr.inf$beta0_prior_sigma,
beta1_prior_sigma = model.inf$tlr.inf$beta1_prior_sigma,
beta0_nu = model.inf$tlr.inf$beta0_df,
beta1_nu = model.inf$tlr.inf$beta1_df
)
fit <- rstan::sampling(
stanmodels$randomeffect,
data = dataran,
chains = 1,
refresh = 0,
warmup = 2500,
iter = 5000
)
beta0 = matrix(rstan::extract(fit, 'b_Intercept')[[1]], ncol = 1)
beta1 = rstan::extract(fit, "beta")[[1]]
statsbeta0 = mean(beta0+beta1[,1])
processedfitresult.rand = resultstantoRfunc.rand(
group = group,
fit = fit,
armleft = armleft,
treatmentindex = treatmentindex,
K = K,
ns = ns
)
stats4 = processedfitresult.rand$stats4
stats5 = processedfitresult.rand$stats5
stats6 = processedfitresult.rand$stats6
names(stats6) = max(group_indicator_dropped) - as.numeric(names(stats6)) +
2
stats6 = stats6[order(as.numeric(names(stats6)))]
stats7 = processedfitresult.rand$stats7
stats1 = processedfitresult.rand$stats1
sampefftotal = processedfitresult.rand$sampefftotal
post.prob.btcontrol = processedfitresult.rand$post.prob.btcontrol
#-Calculating posterior probability of each arm (including control) to be the best arm-
# post.prob.best: The posterior probability of each arm (including control) to be the best arm
# This is required for Thall's randomisation approach
for (q in 1:armleft) {
post.prob.best.mat[group, zlevel[q]] = (sum(max.col(sampefftotal) == q)) /
2500
}
post.prob.best = post.prob.best.mat[group,]
#Normalizing in case any value equals zero
post.prob.best = post.prob.best + 1e-7
post.prob.best = post.prob.best / sum(post.prob.best)
# Drop both superior and inferior arm and make hypothesis testing
test_drop.inf = testing_and_armdropping(
K = K,
armleft = armleft,
post.prob.btcontrol = post.prob.btcontrol,
group = group,
cutoffeff = cutoffeff,
cutoffful = cutoffful,
treatmentindex = treatmentindex,
test.type = test.type
)
stats3 = test_drop.inf$stats3
armleft = test_drop.inf$armleft
treatmentindex = test_drop.inf$treatmentindex
}
if (isFALSE(Fixratio)) {
#-Adjust the posterior randomisation ratio-
randomprob = ARmethod(
BARmethod,
group,
stats,
post.prob.btcontrol,
K,
n,
tuningparameter,
c,
a,
b,
post.prob.best,
max.ar,
armleft,
treatmentindex
)
}
}
stats[group,] = c(stats1, stats2, stats3, round(statsbeta0, 3), stats4, stats5, stats6, stats7)
if (armleft == 1) {
break
}
}
return(stats)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/MainFunction.R
|
#' @title alphaspending
#' @description This function estimates the mean error rate spent at each interim analysis for a trial
#' Example usage:
#' 1. sapply(res = result, fun = alphaspending) will generate list of the proportion of trial replicates are stopped at each stage for all scenarios in result where result is a list containing output data for different scenario
#' 2. sapply(sapply(result,FUN = alphaspending),sum) will generate the type I error rate or power for all scenario on the result list
#' 3. alpha(result) generate the proportion of trial replicates are stopped at each stage where result is the output data for one specific scenario
#' 4. sum(alpha(result)) will generate the type I error rate or power for a specific scenario
#' @param res A list of output matrix of a number of trial replicates
#'
#' @return The error rate at each interim analysis
#' @export
#'
#' @examples
#' \dontrun{alphaspending(res)}
#' @author Ziyan Wang
alphaspending = function(res) {
K = mean(sapply(res, function(x) {
K = sum(stringr::str_detect(colnames(x), "H")) + 1
return(K)
}))
ntrials = length(res)
round(colSums(t(sapply(res, function(x) {
rejectres = matrix(x[, (K - 1 + 2 * K + 1):(K - 1 + 2 * K + K - 1)], ncol =
K - 1)
return(rejectres)
})), na.rm = T) / ntrials, 4)
}
# Example:
# 1. sapply(res = result, fun = alphaspending) will generate list of the proportion of trial replicates are stopped at each stage for all scenarios in result
# where result is a list containing output data for different scenario
# 2. sapply(sapply(result,FUN = alphaspending),sum) will generate the type I error rate or power for all scenario on the result list
# 3. alpha(result) generate the proportion of trial replicates are stopped at each stage
# where result is the output data for one specific scenario
# 4. sum(alpha(result)) will generate the type I error rate or power for a specific scenario
#' @title trtbias
#' @description This function estimates the mean bias of treatment effect
#' @param res A list of output matrix of a number of trial replicates
#'
#' @param trueeffect A vector of true treatment effect in each scenario
#'
#' @return A matrix of mean treatment effect bias
#' @export
#'
#' @examples
#' \dontrun{trtbias(res, trueeffect)}
trtbias = function(res, trueeffect) {
namedata = names(res)
datatempmeanout = {
}
count = 1
for (temp in 1:length(res)) {
K = mean(sapply(res[[temp]], function(x) {
K = sum(stringr::str_detect(colnames(x), "H")) + 1
return(K)
}))
ntrials = length(res[[temp]])
datatempmean = matrix(rep(NA, ntrials * (K - 1)), ncol = (K - 1))
datatempmean = matrix(t(sapply(res[[temp]], function(x) {
stage = dim(x)[1]
resname = colnames(x)
K = sum(stringr::str_detect(colnames(x), "H")) + 1
reject = which(matrix(x[, (K - 1 + 2 * K + 1):(K - 1 + 2 * K + K -
1)] %in% 1, ncol = K - 1), arr.ind = TRUE)[, 2]
if (length(reject) >= 1) {
drop.at = which(matrix(x[, (K - 1 + 2 * K + 1):(K - 1 + 2 * K + K - 1)] %in% 1, ncol =
K - 1), arr.ind = TRUE)[, 1]
drop.at.all = rep(stage, K - 1)
drop.at.all[reject] = drop.at
treatmentindex = seq(1, K - 1)
trtmean.loc = cbind(drop.at.all, treatmentindex)
meanres = matrix(x[, (K - 1 + 2 * K + K - 1 + 1 + 1):(K - 1 + 2 *
K + K - 1 + 1 + K - 1)], ncol = K - 1)
result = rep(NA, K - 1)
for (i in 1:K - 1) {
result[i] = meanres[trtmean.loc[i, 1], trtmean.loc[i, 2]]
}
return(result)
}
else{
drop.at.all = rep(stage, K - 1)
treatmentindex = seq(1, K - 1)
trtmean.loc = cbind(drop.at.all, treatmentindex)
meanres = matrix(x[, (K - 1 + 2 * K + K - 1 + 1 + 1):(K - 1 + 2 *
K + K - 1 + 1 + K - 1)], ncol = K - 1)
result = rep(NA, K - 1)
for (i in 1:K - 1) {
result[i] = meanres[trtmean.loc[i, 1], trtmean.loc[i, 2]]
}
return(result)
}
})), ncol = K - 1)
datatempmeanout = cbind(datatempmeanout, datatempmean - trueeffect[count])
count = count + 1
}
tempname = rep(namedata, each = K - 1)
colnames(datatempmeanout) = tempname
# colnames(datatempmeanout) = paste0(tempname,rep(c(1,2),length(res)))
datatemp = reshape::melt(datatempmeanout)
names(datatemp)[names(datatemp) == "value"] = "Treatmenteffect"
names(datatemp)[names(datatemp) == "X2"] = "Model"
return(datatemp)
}
# intdataout = function(res) {
# namedata = names(res)
# datatempmean = {
#
# }
# for (temp in 1:length(res)) {
# K = mean(sapply(res[[temp]], function(x) {
# K = sum(stringr::str_detect(colnames(x), "H")) + 1
# return(K)
# }))
# datatempmean = cbind(datatempmean, matrix(t(sapply(res[[temp]], function(x) {
# stage = dim(x)[1]
# resname = colnames(x)
# K = sum(stringr::str_detect(colnames(x), "H")) + 1
# reject = which(matrix(x[, (K - 1 + 2 * K + 1):(K - 1 + 2 * K + K -
# 1)] %in% 1, ncol = K - 1), arr.ind = TRUE)[, 2]
# if (length(reject) >= 1) {
# drop.at = which(matrix(x[, (K - 1 + 2 * K + 1):(K - 1 + 2 * K + K - 1)] %in% 1, ncol =
# K - 1), arr.ind = TRUE)[, 1]
# drop.at.all = rep(stage, K - 1)
# drop.at.all[reject] = drop.at
# treatmentindex = seq(1, K - 1)
# trtmean.loc = cbind(drop.at.all, treatmentindex)
# meanres = matrix(x[, (K - 1 + 2 * K + K - 1 + 1 + K - 1 + K - 1 +
# (stage - 1) + 1):(K - 1 + 2 * K + K - 1 + 1 + K - 1 + K - 1 + (stage - 1) +
# (stage - 1) * (K - 1))], ncol = (stage - 1) * (K - 1))
# result = rep(NA, K - 1)
# for (i in 1:(K - 1)) {
# if (trtmean.loc[i, 1] == 1) {
# result[i] = NA
# }
# else{
# result[i] = meanres[trtmean.loc[i, 1], trtmean.loc[i, 2] * (trtmean.loc[i, 1] -
# 1)]
# }
# }
# return(c(result, drop.at.all))
# }
# else{
# drop.at.all = rep(stage, K - 1)
# treatmentindex = seq(1, K - 1)
# trtmean.loc = cbind(drop.at.all, treatmentindex)
# meanres = matrix(x[, (K - 1 + 2 * K + K - 1 + 1 + K - 1 + K - 1 +
# (stage - 1) + 1):(K - 1 + 2 * K + K - 1 + 1 + K - 1 + K - 1 + (stage - 1) +
# (stage - 1) * (K - 1))], ncol = (stage - 1) * (K - 1))
# result = rep(NA, K - 1)
# for (i in 1:K - 1) {
# result[i] = meanres[trtmean.loc[i, 1], trtmean.loc[i, 2] * (stage - 1)]
# }
# return(c(result, drop.at.all))
# }
# })), ncol = 2 * (K - 1)))
# }
# dataname = {
#
# }
# for (nameind in 1:length(namedata)) {
# dataname = c(dataname, namedata[nameind], paste0(namedata[nameind], "_stage"))
# }
# colnames(datatempmean) = dataname
# # datatemp=reshape::melt(datatempmean)
# # names(datatemp)[names(datatemp)=="value"]="interactioneffect"
# # names(datatemp)[names(datatemp)=="X2"]="Model"
# return(datatempmean)
# }
#' @title intbias
#' @description This function estimates the mean bias of treatment - stage interaction effect
#' @param res A list of output matrix of a number of trial replicates
#'
#' @return A matrix of mean treatment - stage interaction effect bias
#' @export
#'
#' @examples
#' \dontrun{intbias(res)}
intbias = function(res) {
namedata = names(res)
datatempmean = {
}
for (temp in 1:length(res)) {
K = mean(sapply(res[[temp]], function(x) {
K = sum(stringr::str_detect(colnames(x), "H")) + 1
return(K)
}))
datatempmean = cbind(datatempmean, matrix(t(sapply(res[[temp]], function(x) {
stage = dim(x)[1]
resname = colnames(x)
K = sum(stringr::str_detect(colnames(x), "H")) + 1
reject = which(matrix(x[, (K - 1 + 2 * K + 1):(K - 1 + 2 * K + K -
1)] %in% 1, ncol = K - 1), arr.ind = TRUE)[, 2]
if (length(reject) >= 1) {
drop.at = which(matrix(x[, (K - 1 + 2 * K + 1):(K - 1 + 2 * K + K - 1)] %in% 1, ncol =
K - 1), arr.ind = TRUE)[, 1]
drop.at.all = rep(stage, K - 1)
drop.at.all[reject] = drop.at
treatmentindex = seq(1, K - 1)
trtmean.loc = cbind(drop.at.all, treatmentindex)
meanres = matrix(x[, (K - 1 + 2 * K + K - 1 + 1 + K - 1 + K - 1 +
(stage - 1) + 1):(K - 1 + 2 * K + K - 1 + 1 + K - 1 + K - 1 + (stage - 1) +
(stage - 1) * (K - 1))], ncol = (stage - 1) * (K - 1))
result = rep(NA, K - 1)
for (i in 1:(K - 1)) {
if (trtmean.loc[i, 1] == 1) {
result[i] = NA
}
else{
result[i] = meanres[trtmean.loc[i, 1], trtmean.loc[i, 2] * (trtmean.loc[i, 1] -
1)]
}
}
return(result)
}
else{
drop.at.all = rep(stage, K - 1)
treatmentindex = seq(1, K - 1)
trtmean.loc = cbind(drop.at.all, treatmentindex)
meanres = matrix(x[, (K - 1 + 2 * K + K - 1 + 1 + K - 1 + K - 1 +
(stage - 1) + 1):(K - 1 + 2 * K + K - 1 + 1 + K - 1 + K - 1 + (stage - 1) +
(stage - 1) * (K - 1))], ncol = (stage - 1) * (K - 1))
result = rep(NA, K - 1)
for (i in 1:K - 1) {
result[i] = meanres[trtmean.loc[i, 1], trtmean.loc[i, 2] * (stage - 1)]
}
return(result)
}
})), ncol = K - 1))
}
colnames(datatempmean) = namedata
datatemp = reshape::melt(datatempmean)
names(datatemp)[names(datatemp) == "value"] = "interactioneffect"
names(datatemp)[names(datatemp) == "X2"] = "Model"
return(datatemp)
}
#' trteffect
#' @description This function estimates the mean treatment effect bias and its rooted mean squared error
#' @param res A list of output matrix of a number of trial replicates
#' @param trueeff A vector of true treatment effect in each scenario
#'
#' @return A vector of mean treatment effect bias and its rooted mean squared error
#' @export
#'
#' @examples
#' \dontrun{trteffect(res, trueeff)}
trteffect = function(res, trueeff) {
K = sapply(res, function(x) {
K = sum(stringr::str_detect(colnames(x), "H")) + 1
return(K)
})
ntrials = length(res)
samp = t(sapply(res, function(x) {
stage = dim(x)[1]
resname = colnames(x)
K = sum(stringr::str_detect(colnames(x), "H")) + 1
reject = which(matrix(x[, (K - 1 + 2 * K + 1):(K - 1 + 2 * K + K - 1)] %in% 1, ncol =
K - 1), arr.ind = TRUE)[, 2]
meanres = matrix(x[, (K - 1 + 2 * K + K - 1 + 1 + 1):(K - 1 + 2 * K +
K - 1 + 1 + K - 1)], ncol = K - 1)
})) - trueeff
meaneffect = colMeans(samp, na.rm = T)
sdeffect = apply(samp, 2, stats::sd, na.rm = T)
replicate = {
}
for (i in 1:dim(samp)[2]) {
replicate[i] = ntrials - sum(is.na(samp[, i]))
}
seeffect = sdeffect / sqrt(replicate)
return(rbind(meaneffect, seeffect))
}
# list.of.Plotfunction <-
# list(
# alphaspending = alphaspending,
# trtbias = trtbias,
# intdataout = intdataout,
# intbias = intbias,
# trteffect = trteffect,
# Nfunc = Nfunc,
# Sperarmfunc = Sperarmfunc
# )
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Plot_listoffunctionforplotgeneration.R
|
#' @title Save.resulttoRDatafile
#' @description This function generates the name of file for output table and dataset
#'
#' @param input.info A list of input information require for trial simulation
#'
#' @return A list of name for table and dataset
#' @export
#'
#' @examples
#' Save.resulttoRDatafile(
#' input.info = list(
#' response.probs = c(0.4, 0.4),
#' ns = c(30, 60, 90, 120, 150),
#' max.ar = 0.75,
#' rand.type = "Urn",
#' max.deviation = 3,
#' model.inf = list(
#' model = "tlr",
#' ibb.inf = list(
#' pi.star = 0.5,
#' pess = 2,
#' betabinomialmodel = ibetabinomial.post
#' ),
#' tlr.inf = list(
#' beta0_prior_mu = 0,
#' beta1_prior_mu = 0,
#' beta0_prior_sigma = 2.5,
#' beta1_prior_sigma = 2.5,
#' beta0_df = 7,
#' beta1_df = 7,
#' reg.inf = "main",
#' variable.inf = "Fixeffect"
#' )
#' ),
#' Stop.type = "Early-Pocock",
#' Boundary.type = "Symmetric",
#' Random.inf = list(
#' Fixratio = FALSE,
#' Fixratiocontrol = NA,
#' BARmethod = "Thall",
#' Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1)
#' ),
#' trend.inf = list(
#' trend.type = "step",
#' trend.effect = c(0, 0),
#' trend_add_or_multip = "mult"
#' )
#' ))
#' @author Ziyan Wang
Save.resulttoRDatafile = function(input.info = list(
response.probs = c(0.4, 0.4),
ns = c(30, 60, 90, 120, 150),
max.ar = 0.75,
rand.type = "Urn",
max.deviation = 3,
model.inf = list(
model = "tlr",
ibb.inf = list(
pi.star = 0.5,
pess = 2,
betabinomialmodel = ibetabinomial.post
),
tlr.inf = list(
beta0_prior_mu = 0,
beta1_prior_mu = 0,
beta0_prior_sigma = 2.5,
beta1_prior_sigma = 2.5,
beta0_df = 7,
beta1_df = 7,
reg.inf = "main",
variable.inf = "Fixeffect"
)
),
Stopbound.inf = Stopboundinf(
Stop.type = "Early-Pocock",
Boundary.type = "Symmetric",
cutoff = c(0.99, 0.01)
),
Random.inf = list(
Fixratio = FALSE,
Fixratiocontrol = NA,
BARmethod = "Thall",
Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1)
),
trend.inf = list(
trend.type = "step",
trend.effect = c(0, 0),
trend_add_or_multip = "mult"
)
)) {
if (sum(input.info$trend.inf$trend.effect != 0) > 0) {
trendornot = "TREND"
}
else{
trendornot = "NOTREND"
}
nameTable = paste0(
"TABLE",
trendornot,
toupper(input.info$Stopbound.inf$Stop.type),
toupper(input.info$Stopbound.inf$Boundary.type),
toupper(input.info$Random.inf$BARmethod),
".RData"
)
nameData = paste0(
"DATA",
trendornot,
toupper(input.info$Stopbound.inf$Stop.type),
toupper(input.info$Stopbound.inf$Boundary.type),
toupper(input.info$Random.inf$BARmethod),
".RData"
)
return(list(nameTable = nameTable, nameData = nameData))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Saveoutput_toRData.R
|
#' GP.optim: optimiser to give the next cutoff for evaluation
#' @description A function to predict the next cutoff value for evaluation.
#'
#' @param x A numeric vector of cutoff data
#' @param confidence.level A numeric value indicating the confidence level of estimate. Default is 0.95
#' @param grid.length A numeric value indicating the grid resolution. Default is 1000.
#' @param change.scale A logic value indicating whether we want to change scale when doing Gaussian process. Default is FALSE.
#' @param noise A logic value indicating whether the input x is noisy. Default is TRUE.
#' @param grid.min A numeric value or vector (for asymmetric boundary) indicating the lower bound of the grid for screening. For asymmetric boundary, the first value is efficacy minimum value and the second value is futility minimum value.
#' @param grid.max A numeric value or vector (for asymmetric boundary) indicating the upper bound of the grid for screening. For asymmetric boundary, the first value is efficacy maximum value and the second value is futility maximum value.
#' @param y.t1E A numeric vector of type I error rate data
#' @param y.pow A numeric vector of power data. You can input conjucntive, disconjunctive and marginal power data. Default is NA. Only used when Boundary.type == "Asymmetric"
#' @param Boundary.type A text indicating what type of boundary used. Default is "Symmetric"
#' @param ESS A matrix of effective sample size. This is only called for asymmetric boundary cutoff screening. Default is NA for symmetric boundary.
#' The first column is the ESS for different cutoff pair under the null scenario, the second column is the ESS for different cutoff pair under the alternative scenario.
#' @param errorrate 'errorrate' refers to the target of type I error rate or family-wise error rate. Default is 0.05. User can change it to 0.1 for FWER if they think 0.05 is too conservative. The per-hypothesis type I error equals errorrate / (K-1) where (K-1) is the number of treatment arms.
#'
#' @return A list including the next cutoff value for evaluation `next.cutoff` and a list of predictions for screening grid.
#' @importFrom laGP distance
#' @importFrom stats qnorm
#' @importFrom stats optimize
#' @export
#'
#' @examples
#' x = c(7.123968, 6.449631, 1.984406,
#' 3.507463, 4.972510, 2.925768,
#' 5.816682, 4.367796,
#' 7.349160, 1.113648)
#' y.t1E = c(0.0396, 0.0450,
#' 0.5116, 0.2172,
#' 0.1040, 0.3058,
#' 0.0592, 0.1384,
#' 0.0296, 0.7936)
#' grid.min=1
#' grid.max=8
#' GP.res=GP.optim(x=x, y.t1E=y.t1E, errorrate = 0.1, grid.min = grid.min, grid.max = grid.max)
#' GP.res$next.cutoff
#'
#' x = data.frame(matrix(c(
#' 0.9563408, 0.006295626,
#' 0.9669739, 0.014395030,
#' 0.9959410, 0.034858339,
#' 0.9635357, 0.048435579,
#' 0.9794314, 0.021659226,
#' 0.9552201, 0.018442535,
#' 0.9655374, 0.035281833,
#' 0.9837123, 0.010656442,
#' 0.9974910, 0.047741842,
#' 0.9989172, 0.012982826), byrow=TRUE, ncol = 2))
#' y.t1E = c(0.3044, 0.2938, 0.2573, 0.4780, 0.2923, 0.3733, 0.4263, 0.1962, 0.2941, 0.1131)
#' y.pow = c(0.8300, 0.8239, 0.7102, 0.7291, 0.8205, 0.7984, 0.7709, 0.8418, 0.6359, 0.5609)
#' ESS = data.frame(matrix(c(
#' 594.672, 560.580,
#' 596.148, 566.328,
#' 597.840, 590.124,
#' 590.052, 544.800,
#' 597.024, 574.716,
#' 593.952, 554.580,
#' 593.676, 554.400,
#' 598.500, 583.896,
#' 595.740, 590.520,
#' 599.580, 598.644),byrow=TRUE,ncol=2))
#' grid.min=c(0.95,0)
#' grid.max=c(1,0.05)
#' GP.res_asy=GP.optim(x=x, y.t1E=y.t1E, y.pow=y.pow, ESS=ESS,errorrate = 0.1,
#' grid.min = grid.min, grid.max = grid.max, Boundary.type="Asymmetric")
#' GP.res_asy$next.cutoff
#' @references Surrogates: Gaussian process modeling, design, and optimization for the applied sciences. CRC press. Gramacy, R.B., 2020.
#' Bayesian optimization for adaptive experimental design: A review. IEEE access, 8, 13937-13948. Greenhill, S., Rana, S., Gupta, S., Vellanki, P., & Venkatesh, S. (2020).
#' @author Ziyan Wang
GP.optim = function(x,
y.t1E,
y.pow=NA,
ESS=NA,
errorrate = 0.05,
confidence.level = 0.95,
grid.length = 1000,
change.scale = FALSE,
noise = T,
grid.min,
grid.max,
Boundary.type = "Symmetric") {
# Debug here for GP model with Bayesian optimisation on April 25, 2023
eps = .Machine$double.eps
# loglikelihood function
nlg = function(g, D, y) {
n = length(y)
K = exp(-D) + diag(g, n)
Ki = solve(K)
logdetK = determinant(K, logarithm = T)$modulus
loglike = -(n / 2) * log(t(y) %*% Ki %*% y) - 0.5 * logdetK
return(-loglike)
}
D = distance(x)
if (noise) {
g = optimize(nlg,
interval = c(eps ^ 0.5, var(y.t1E)),
D = D,
y = y.t1E)$minimum
}
else{
g = eps
}
if (Boundary.type == "Symmetric"){
gridx = matrix(seq(grid.min, grid.max, length.out = grid.length), ncol=1)
if (change.scale) {
grid.new = (gridx - min(x)) / (max(x) - min(x))
x.new = (x - min(x)) / (max(x) - min(x))
}
else{
x.new = x
grid.new = gridx
}
}
else{
# if (grid.length > 101) {
# warning(
# "The grid length(accuracy) is too large for asymmetric boundary screening.
# This may lead to the error message:'Error: vector memory exhausted (limit reached?)'.
# This is due to the calculation of distance between each grid point (generating a (grid.length^2) rows with (grid.length^2) columns matrix).
# I recommand use grid.length = 101 which is enough for the accuracy."
# )
# }
gridx.eff = seq(grid.min[1], grid.max[1], length.out = grid.length)
gridx.fut = seq(grid.min[2], grid.max[2], length.out = grid.length)
if (change.scale) {
grid.new.eff = (gridx.eff - min(x[,1])) / (max(x[,1]) - min(x[,1]))
grid.new.fut = (gridx.fut - min(x[,2])) / (max(x[,2]) - min(x[,2]))
grid.new = expand.grid(grid.new.eff, grid.new.fut)
x.new.eff = (x[,1] - min(x[,1])) / (max(x[,1]) - min(x[,1]))
x.new.fut = (x[,2] - min(x[,2])) / (max(x[,2]) - min(x[,2]))
x.new = cbind(x.new.eff, x.new.fut)
}
else{
x.new = x
grid.new = expand.grid(gridx.eff, gridx.fut)
}
}
D = distance(x.new)
K = exp(-D) + diag(g, ncol(D))
# DXX = distance(grid.new)
# Only take the diagonal of distance matrix which is zero for further use to save memory.
# If user want to use the whole distance matrix which can be used to generate a full variance covariance matrix for further use,
# Please use DXX and KXX instead as marked by #. However, the covariance is very huge if you want a fine grid.
DXX_diag = rep(0, dim(grid.new)[[1]])
KXX_diag = exp(-DXX_diag) + rep(g, length(DXX_diag))
# KXX = exp(-DXX) + diag(g, ncol(DXX))
DX = distance(grid.new, x.new)
KX = exp(-DX)
Ki = solve(K)
if (Boundary.type == "Symmetric"){
yhat.t1E = KX %*% Ki %*% y.t1E
yhat.pow = NA
tau_squared.t1E = drop(t(y.t1E) %*% Ki %*% y.t1E / length(y.t1E))
sigma.t1E_diag = tau_squared.t1E * (KXX_diag - rowSums((KX %*% Ki) * KX))
sd.t1E = sqrt(abs(sigma.t1E_diag))
sd.pow = NA
# construct confidence interval
qup.t1E = yhat.t1E + qnorm(confidence.level, 0, sd.t1E)
qdown.t1E = yhat.t1E - qnorm(confidence.level, 0, sd.t1E)
qdown.pow = NA
qup.pow = NA
yhat.ESS.null = NA
yhat.ESS.alt = NA
sd.ESS.null = NA
sd.ESS.alt = NA
qup.ESS.null = NA
qup.ESS.alt = NA
qdown.ESS.null = NA
qdown.ESS.alt = NA
# GP finished
# Bayesian optimisation start
# construct the target. The ideal value should locate in (errorrate - 1%, errorrate + 1%)
target = abs(yhat.t1E - errorrate) <= errorrate / 100
# construct the potential cutoff set
potentialcutoff = grid.new[which(target)]
e = 1e-10
weighs = 1 / sqrt((abs(yhat.t1E[target] - errorrate) + e) * abs(sigma.t1E_diag)[which(target)])
randomprobability = weighs / sum(weighs)
# GP finished
# randomise the next value from the potential set
# Debugged on 11/06/2023 by Ziyan wang. Cran check and find one error due to the use of sample()
if (length(potentialcutoff) >= 1){
next.cutoff = potentialcutoff[sample(length(potentialcutoff), 1, replace = T, prob = randomprobability)]
}
else {
next.cutoff = grid.new[which.min(abs(yhat.t1E - errorrate))]
}
}
else{
# Model Type I error rate
yhat.t1E = KX %*% Ki %*% y.t1E
tau_squared.t1E = drop(t(y.t1E) %*% Ki %*% y.t1E / length(y.t1E))
sigma.t1E_diag = tau_squared.t1E * (KXX_diag - rowSums((KX %*% Ki) * KX))
sd.t1E = sqrt(abs(sigma.t1E_diag))
# construct confidence interval
qup.t1E = yhat.t1E + qnorm(confidence.level, 0, sd.t1E)
qdown.t1E = yhat.t1E - qnorm(confidence.level, 0, sd.t1E)
# Model power
yhat.pow = KX %*% Ki %*% y.pow
tau_squared.pow = drop(t(y.pow) %*% Ki %*% y.pow / length(y.pow))
sigma.pow_diag = tau_squared.pow * (KXX_diag - rowSums((KX %*% Ki) * KX))
sd.pow = sqrt(abs(sigma.pow_diag))
# construct confidence interval
qup.pow = yhat.pow + qnorm(confidence.level, 0, sd.pow)
qdown.pow = yhat.pow - qnorm(confidence.level, 0, sd.pow)
# Model ESS under null
y.ESS.null = ESS[,1]
yhat.ESS.null = KX %*% Ki %*% y.ESS.null
tau_squared.ESS.null = drop(t(y.ESS.null) %*% Ki %*% y.ESS.null / length(y.ESS.null))
sigma.ESS.null_diag = tau_squared.ESS.null * (KXX_diag - rowSums((KX %*% Ki) * KX))
sd.ESS.null= sqrt(abs(sigma.ESS.null_diag))
# construct confidence interval
qup.ESS.null = yhat.ESS.null + qnorm(confidence.level, 0, sd.ESS.null)
qdown.ESS.null = yhat.ESS.null - qnorm(confidence.level, 0, sd.ESS.null)
# Model ESS under alternative
y.ESS.alt = ESS[,2]
yhat.ESS.alt = KX %*% Ki %*% y.ESS.alt
tau_squared.ESS.alt = drop(t(y.ESS.alt) %*% Ki %*% y.ESS.alt / length(y.ESS.alt))
sigma.ESS.alt_diag = tau_squared.ESS.alt * (KXX_diag - rowSums((KX %*% Ki) * KX))
sd.ESS.alt = sqrt(abs(sigma.ESS.alt_diag))
# construct confidence interval
qup.ESS.alt = yhat.ESS.alt + qnorm(confidence.level, 0, sd.ESS.alt)
qdown.ESS.alt = yhat.ESS.alt - qnorm(confidence.level, 0, sd.ESS.alt)
# GP finished
# Bayesian optimisation start
# construct the target. The ideal value should locate in (errorrate - 1%, errorrate + 1%)
target = abs(yhat.t1E - errorrate) <= errorrate / 100
# construct the potential cutoff set
potentialcutoff = grid.new[which(target),]
# We optimise power for asymmetric boundary so we dont do weighed randomisation.
# Instead, we pick the cutoff pair that has highest predicted power when type I error is under control.
if (dim(potentialcutoff)[1] >= 1){
next.cutoff = potentialcutoff[which.max(yhat.pow[as.numeric(row.names(potentialcutoff))]),]
}
else {
next.cutoff = grid.new[which.min(abs(yhat.t1E - errorrate)),]
}
}
return(list(
next.cutoff = next.cutoff,
prediction = list(
yhat.t1E = yhat.t1E,
yhat.pow = yhat.pow,
yhat.ESS.null = yhat.ESS.null,
yhat.ESS.alt = yhat.ESS.alt,
sd.t1E = sd.t1E,
sd.pow = sd.pow,
sd.ESS.null = sd.ESS.null,
sd.ESS.alt = sd.ESS.alt,
qup.t1E = qup.t1E,
qdown.t1E = qdown.t1E,
qup.pow = qup.pow,
qdown.pow= qdown.pow,
qup.ESS.null = qup.ESS.null,
qup.ESS.alt = qup.ESS.alt,
qdown.ESS.null = qdown.ESS.null,
qdown.ESS.alt = qdown.ESS.alt,
potentialcutoff = potentialcutoff,
xgrid = grid.new
)
))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Screening_GP_optim.R
|
#' @title ARmethod
#' @description This function adjusts the posterior randomisation probability for each arm using many approaches.
#' Currently Thall's approach and Trippa's approach are used.
#' Double biased coin and other method will be added in the next version.
#' @param BARmethod The indicator of which adaptive randomisation method is used
#' @param group The current stage
#' @param stats The output matrix
#' @param post.prob.btcontrol The vector of posterior probability of each active treatment arm better than control
#' @param K Total number of arms at the beginning
#' @param n The vector of sample size for each arm
#' @param tuningparameter The tuning parameter indicator for Thall's approach
#' @param c The tuning parameter for Thall's approach
#' @param a The hyperparamter parameter for Trippa's approach
#' @param b The hyperparamter parameter for Trippa's approach
#' @param post.prob.best Posterior probability of each arm to be the best
#' @param max.ar The upper boundary for randomisation ratio for each arm, which is used in Thall's approach since Trippa's approach has protection on control arm.
#' @param armleft The number of treatment left in the platform (>2)
#' @param treatmentindex The vector of treatment arm index excluding the control arm whose index is 0
#'
#'
#' @return randomprob: The vector of adjusted randomisation probability to each arm
#' @export
#'
#' @examples
#' ARmethod(
#' BARmethod = "Thall",
#' group = 1,
#' stats = matrix(rep(NA, 40), ncol = 8, nrow = 5),
#' post.prob.btcontrol = 0.5,
#' K = 2,
#' n = c(30, 30),
#' tuningparameter = "fixed",
#' c = 1,
#' post.prob.best = c(0.5, 0.5),
#' max.ar = 0.75,
#' armleft = 2,
#' treatmentindex = 1)
#'
#' ARmethod(
#' BARmethod = "Trippa",
#' group = 1,
#' stats = matrix(rep(NA, 40), ncol = 8, nrow = 5),
#' post.prob.btcontrol = c(0.5, 0.6),
#' K = 3,
#' n = c(30, 30, 40),
#' tuningparameter = NA,
#' c = NA,
#' a = 3,
#' b = 0.75,
#' post.prob.best = c(0.3, 0.3, 0.4),
#' max.ar = NA,
#' armleft = 3,
#' treatmentindex = c(1, 2))
#'
#' @references Bayesian adaptive randomized trial design for patients with recurrent glioblastoma. Trippa, Lorenzo, Eudocia Q. Lee, Patrick Y. Wen, Tracy T. Batchelor, Timothy Cloughesy, Giovanni Parmigiani, and Brian M. Alexander. Journal of Clinical Oncology 30, no. 26 (2012): 3258.
#' A simulation study of outcome adaptive randomization in multi-arm clinical trials. Wathen, J. Kyle, and Peter F. Thall. Clinical Trials 14, no. 5 (2017): 432-440.
#'
#' @author Ziyan Wang
ARmethod = function(BARmethod,
group,
stats,
post.prob.btcontrol,
K,
n,
tuningparameter = NA,
c = NA,
a = NA,
b = NA,
post.prob.best,
max.ar = NA,
armleft,
treatmentindex) {
# Validate inputs
if (!is.character(BARmethod)) stop("Error: BARmethod should be a character value")
if (!is.numeric(group) || group <= 0) stop("Error: group should be a positive numeric value")
if (K < 2) stop("Error: K should be an integer value greater than or equal to 2")
#---------------------Trippa's approach---------------------
if (BARmethod == "Trippa") {
if ((!is.numeric(a) || !is.numeric(b))) stop("hyperparameters a and b should be a numeric value for Trippa's approach")
##Tuning the paprameter using method mentioned in Trippa's paper (2014)
gamma_stage = a * ((group / dim(stats)[1])) ^ b
eta_stage = 0.25 * (group / dim(stats)[1])
##Reweigh the allocation probability
###K >= 1, treatment group
allocate_trt = post.prob.btcontrol ^ gamma_stage / sum(post.prob.btcontrol ^
gamma_stage)
###k = 0, control group
allocate_control = 1 / (armleft - 1) * (exp(max(n[-1]) - n[1])) ^ eta_stage
sum_pi = allocate_control + sum(allocate_trt)
alloc.prob.btcontrol = c(allocate_control / sum_pi, allocate_trt / sum_pi)
rpk = matrix(rep(0,armleft),ncol = armleft)
randomprob = matrix(rep(0,K),ncol = K)
colnames(rpk) = c(1,treatmentindex+1)
colnames(randomprob) = seq(1,K)
rpk[1] = alloc.prob.btcontrol[1]
rpk[-1] = alloc.prob.btcontrol[-1]
rpk = rpk/sum(rpk)
randomprob[as.numeric(colnames(rpk))] = randomprob[as.numeric(colnames(rpk))]+rpk
}
#---------------------Thall's approach---------------------
else if (BARmethod == "Thall") {
if ((!is.numeric(max.ar) || max.ar <= 0 || max.ar >= 1)) stop("max.ar should be a numeric value between 0 and 1 for Thall's approach")
##Tuning parameter c for Thall's approach
if (tuningparameter == "Unfixed") {
c = group / (2 * dim(stats)[1])
}
else {
c = c
}
##Reweigh the allocation probability
alloc.prob.best = post.prob.best ^ c / sum(post.prob.best ^ c)
rpblocktwoarm = min(max.ar, max(1 - max.ar, post.prob.btcontrol))
randomprob = alloc.prob.best
#-------------------Allocation bounds restriction (two arm)---------------
if (K == 2) {
lower = ifelse(alloc.prob.best < (1 - max.ar), 1 - max.ar, alloc.prob.best)
upper = ifelse(lower > max.ar, max.ar, lower)
randomprob = upper
randomprob = matrix(randomprob, ncol = length(upper))
colnames(randomprob) = seq(1, K)
}
#-------------------Allocation bounds restriction (K arm: restriction on control)---------------
else if (K > 2) {
rpk = matrix(rep(0,armleft),ncol = armleft)
randomprob = matrix(rep(0,K),ncol = K)
colnames(rpk) = c(1,treatmentindex+1)
colnames(randomprob) = seq(1,K)
rpk[1] = alloc.prob.best[1]
rpk[-1] = alloc.prob.best[-1][treatmentindex]
rpk = rpk/sum(rpk)
lower = ifelse(rpk<(1-max.ar),1-max.ar,rpk)
rpk = lower
upper = ifelse(rpk>max.ar,max.ar,rpk)
rpk = upper
rpk[!(rpk==(1-max.ar))]=(1-sum(rpk[rpk==1-max.ar]))*(rpk[!(rpk==1-max.ar)]/sum(rpk[!(rpk==1-max.ar)]))
randomprob[as.numeric(colnames(rpk))] = randomprob[as.numeric(colnames(rpk))]+rpk
}
}
else{
stop("Error: Please check the input of Fixratio and BARmethod")
}
return(randomprob)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Simulation_AdaptiveRandomisationmethodRatioCalc.R
|
#' @title Boundaryconstruction
#' @description This function constructs the stopping boundary based on input information
#' @param Stopbound.inf The list of stop boundary information for more see \code{\link{Stopboundinf}}
#' @param ns A vector of accumulated number of patient at each stage
#'
#' @return A list of the futility boundary and the efficacy boundary
#' @importFrom stats pnorm
#' @importFrom stats var
#' @export
#'
#' @examples
#' Stopbound.inf=list(Stop.type="Early-Pocock",Boundary.type="Symmetric",cutoff=c(0.9928,0.0072))
#' ns=c(60,120,180,240,300)
#' Boundaryconstruction(Stopbound.inf, ns)
#' @author Ziyan Wang
Boundaryconstruction = function(Stopbound.inf = Stopbound.inf, ns = ns) {
cutoff.temp = Stopbound.inf$cutoff #(cutoff1, cutoff2): cutoff1 is efficacy cutoff; cutoff2 is futility cutoff
if (length(cutoff.temp) != 2 | sum(is.na(cutoff.temp)) != 0) {
stop(
"Error: Please input the cutoff value as a vector with two values for both symmetric and asymmetric boundary"
)
}
boundary = Stopbound.inf$Boundary.type
#------------Need to Check--------------
stage = length(ns)
#---------------------------------------
Stoptype = Stopbound.inf$Stop.type
if (boundary == "Symmetric") {
if (Stoptype == "Noearly") {
cutoffeff = c(rep(1.01, stage - 1), cutoff.temp[1]) #Ensure no stop during the trial (The efficacy cutoff before the final stage > 1)
cutoffful = c(rep(-0.01, stage - 1), cutoff.temp[2]) #Ensure no stop during the trial (The futility cutoff before the final stage < 0)
if (sum(cutoff.temp) != 1) {
stop(
"Error: The cutoff inputted is Asymmetric. Please revise the Boundary.type in Stopbound.inf list. The sum of them for symmetric Noearly should be 1"
)
}
if ((cutoff.temp[1] > 1) > 0 |
(cutoff.temp[1] < cutoff.temp[2]) > 0 | cutoff.temp[2] < 0) {
stop(
"Error: The fultility cutoff should be smaller than efficacy at the final stage. Also, both cutoffs should be <= 1 except for OBF boundary"
)
}
}
else if (Stoptype == "Early-Pocock") {
cutoffeff = rep(cutoff.temp[1], stage)
cutoffful = rep(cutoff.temp[2], stage)
if (sum(cutoff.temp) != 1) {
stop(
"Error: The cutoff inputted is Asymmetric. Please revise the Boundary.type in Stopbound.inf list. The sum of them for symmetric Noearly should be 1"
)
}
if (sum(cutoffeff > 1) > 0 |
sum(cutoffeff < cutoffful) > 0 | cutoff.temp[2] < 0) {
stop(
"Error: The cutoff of Pocock boundary should be smaller or equal to 1 (<= 1), The fultility cutoff should be smaller than efficacy at each stage"
)
}
}
else if (Stoptype == "Early-OBF") {
cutoffeff = pnorm(sqrt(stage / seq(1:stage)) * cutoff.temp[1])
cutoffful = pnorm(-sqrt(stage / seq(1:stage)) * cutoff.temp[2])
if (var(cutoff.temp) != 0) {
stop(
"Error: For symmetric OBF boundary, the cutoff for efficacy and fultility should be the same. The function will atuomatically construct an symmetric boundary set. cutoff can be greater than 1"
)
}
}
else {
stop(
"Error: Please input the stopping boundary type correctly should be in c(Noearly,Early-Pocock,Early-OBF)"
)
}
}
else if (boundary == "Asymmetric") {
if (Stoptype == "Noearly") {
cutoffeff = c(rep(1.01, stage - 1), cutoff.temp[1]) #Ensure no stop during the trial (The efficacy cutoff before the final stage > 1)
cutoffful = c(rep(-0.01, stage - 1), cutoff.temp[2]) #Ensure no stop during the trial (The futility cutoff before the final stage < 0)
if (sum(cutoff.temp) == 1) {
stop(
"Error: The cutoff inputted is symmetric. Please revise the Boundary.type in Stopbound.inf list."
)
}
if (cutoff.temp[1] > 1 |
(cutoff.temp[1] < cutoff.temp[2]) > 0 | cutoff.temp[2] < 0) {
stop(
"Error: The fultility cutoff should be smaller than efficacy at the final stage. Also, both cutoffs should be <= 1 except for OBF boundary"
)
}
}
else if (Stoptype == "Early-Pocock") {
cutoffeff = rep(cutoff.temp[1], stage)
cutoffful = rep(cutoff.temp[2], stage)
if (sum(cutoff.temp) == 1) {
stop(
"Error: The cutoff inputted is symmetric. Please revise the Boundary.type in Stopbound.inf list."
)
}
if (sum(cutoffeff > 1) > 0 |
sum(cutoffeff < cutoffful) > 0 | cutoff.temp[2] < 0) {
stop(
"Error: The cutoff of Pocock boundary should be smaller or equal to 1 (<= 1), The fultility cutoff should be smaller than efficacy at each stage"
)
}
}
else if (Stoptype == "Early-OBF") {
cutoffeff = pnorm(sqrt(stage / seq(1:stage)) * cutoff.temp[1])
cutoffful = pnorm(-sqrt(stage / seq(1:stage)) * cutoff.temp[2])
if (var(cutoff.temp) == 0) {
stop(
"Error: For Asymmetric OBF boundary, the cutoff for efficacy and fultility should be the different. The function will atuomatically construct a asymmetric boundary set. cutoff can be greater than 1"
)
}
}
else {
stop(
"Error: Please input the stopping boundary type correctly should be in c(Noearly,Early-Pocock,Early-OBF)"
)
}
}
else {
stop("Error: The boundary type is invalid")
}
return(list(
Efficacy.boundary = cutoffeff,
Fultility.boundary = cutoffful
))
}
#---------------------------------------------------------------------
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Simulation_Boundaryconstruction.R
|
#' @title testing_and_armdropping
#' @description This function makes a decision on whether any active arm should be dropped based on posterior probability and
#' return the vector of decision on each arm, the vector of active arms index and the number of arms left for further study.
#'
#' @param post.prob.btcontrol A numeric vector of posterior probability of each treatment arm better than control
#' @param group A numeric value. The current stage index.
#' @param cutoffeff A numeric vector of the cutoff value at each stage for efficacy boundary.
#' @param cutoffful A numeric vector of the cutoff value at each stage for futility boundary.
#' @param treatmentindex A numeric vector of the current active treatment arm index
#' @param test.type A character indicating which hypothesis testing we are use.
#' "Oneside": H_0: \\pi_k \\leq \\pi_0; H_0: \\pi_k \> \\pi_0
#' "Twoside": H_0: \\pi_k \\eq \\pi_0; H_0: \\pi_k \\neq \\pi_0
#' @param K A numeric value indicating the total number of arm at the beginning of trial including both control and treatment.
#' @param armleft A numeric vector indicating the number of active arms before this interim analysis;
#'
#' @return A list of information including armleft: the number of active arms after this interim analysis;
#' treatmentindex: the index vector of active arm after this interim analysis;
#' stats3: the vector of conclusion on whether null hypothesis is rejected
#' @export
#'
#' @examples
#' testing_and_armdropping(
#' K = 4,
#' armleft = 4,
#' post.prob.btcontrol = c(0.5,0.99,0.02),
#' group = 3,
#' cutoffeff = c(1, 0.99, 0.975, 0.96, 0.95),
#' cutoffful = c(0, 0.01, 0.025, 0.04, 0.05),
#' treatmentindex = c(1,2,3),
#' test.type = "Oneside")
#'
#' testing_and_armdropping(
#' K = 4,
#' armleft = 4,
#' post.prob.btcontrol = c(0.5,0.99,0.02),
#' group = 3,
#' cutoffeff = c(1, 0.99, 0.975, 0.96, 0.95),
#' cutoffful = c(0, 0.01, 0.025, 0.04, 0.05),
#' treatmentindex = c(1,2,3),
#' test.type = "Twoside")
testing_and_armdropping = function(K,
armleft,
post.prob.btcontrol,
group,
cutoffeff,
cutoffful,
treatmentindex,
test.type) {
#----Justify if type I error was made for each arm----
# post.prob.btcontrol>cutoffeff[group]: Efficacy boundary is hit at this stage
# post.prob.btcontrol<cutoffful[group]: Fultility boundary is hit at this stage
Justify = post.prob.btcontrol > cutoffeff[group] |
post.prob.btcontrol < cutoffful[group]
# If one side test, we would only make conclusion on arm for superiority.
# However, if two side test, we could make conclusion on arm for either inferiority and superiority.
Conclude.efficacy = post.prob.btcontrol > cutoffeff[group]
Conclude.twoside = Justify
#Identify which active arm should be dropped at current stage
treatmentdrop = treatmentindex[post.prob.btcontrol > cutoffeff[group] |
post.prob.btcontrol < cutoffful[group]]
# Delete the arm dropped in this round information
post.prob.btcontrol = post.prob.btcontrol[!Justify]
stats3 = rep(NA, K - 1)
names(stats3) = seq(1, K - 1)
if (test.type == "Oneside") {
stats3[treatmentindex] = Conclude.efficacy
}
else if (test.type == "Twoside") {
stats3[treatmentindex] = Conclude.twoside
}
else{
stop("The hypothesis testing type should be specified")
}
if (sum(Justify) > 0) {
armleft = armleft - sum(Justify)
#Debugged for K arm by Ziyan Wang on 12:00 26/07/2022 for three arm. Used to be treatmentindex = treatmentindex[-treatmentdrop]
#Debugged for K arm by Ziyan Wang on 18:58 26/07/2022 for more than 3 arm. Used to be treatmentindex = treatmentindex[!(treatmentindex==treatmentdrop)]
treatmentindex = treatmentindex[is.na(match(treatmentindex, treatmentdrop))]
}
return(list(
stats3 = stats3,
armleft = armleft,
treatmentindex = treatmentindex
))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Simulation_Hypothesistesting_and_armdropping.R
|
#' @title OutputStats.initialising
#' @description This function initializes the output matrix including all evaluation metrics based on input information
#' @param variable.inf The parameter information in the model
#' @param reg.inf The model information. For the fixed effect model, the input of reg.inf can be main, main + stage_continuous, main * stage_continuous, main + stage_discrete,
#' main * stage_discrete.
#' For the mixed effect model, the reg.inf is invalid.
#' @param ns A vector of accumulated number of patient at each stage
#' @param K Total number of arm including control
#'
#' @return The empty output matrix including different evaluation metrics.
#' @export
#'
#' @examples
#' OutputStats.initialising(
#' variable.inf = "Fixeffect",
#' reg.inf = "main",
#' ns = c(15, 30, 45, 60, 75),
#' K = 2)
#' @author Ziyan Wang
OutputStats.initialising = function(variable.inf, reg.inf, ns, K) {
#Storage object for posterior probabilities
if (variable.inf == "Fixeffect") {
if (reg.inf == "main") {
stats = matrix(NA,
nrow = length(ns),
ncol = K - 1 + K * 2 + K - 1 + 1 + K - 1 + K - 1)
#K-1 posterior probability better than control and K columns number of success + K columns number of patient
#K-1 Indicator of K-1 hypothesis
#1: control mean estimates Debugged at 02:25 10/10/2022 by ZIYAN WANG
#K-1 Mean estimates of treatment effect at logit scale. Debugged at 14:52 05/09/2022 by ZIYAN WANG
#K-1 Variance of treatment effect at logit scale. Debugged at 14:52 05/09/2022 by ZIYAN WANG
}
else if (reg.inf == "main + stage_continuous") {
stats = matrix(NA,
nrow = length(ns),
ncol = K - 1 + K * 2 + K - 1 + 1 + K - 1 + K - 1 + 1)
#K-1 posterior probability better than control and K columns number of success + K columns number of patient
#K-1 Indicator of K-1 hypothesis
#1: control mean estimates Debugged at 02:25 10/10/2022 by ZIYAN WANG
#K-1 Mean estimates of treatment effect at logit scale. Debugged at 14:52 05/09/2022 by ZIYAN WANG
#K-1 Variance of treatment effect at logit scale. Debugged at 14:52 05/09/2022 by ZIYAN WANG
#1 Stage effect when stage is treated as continuous. Debugged at 22:00 16/09/2022 by ZIYAN WANG
}
else if (reg.inf == "main * stage_continuous") {
stats = matrix(NA,
nrow = length(ns),
ncol = K - 1 + K * 2 + K - 1 + 1 + K - 1 + K - 1 + 1 + K - 1)
#K-1 posterior probability better than control and K columns number of success + K columns number of patient
#K-1 Indicator of K-1 hypothesis
#1: control mean estimates Debugged at 02:25 10/10/2022 by ZIYAN WANG
#K-1 Mean estimates of treatment effect at logit scale. Debugged at 14:52 05/09/2022 by ZIYAN WANG
#K-1 Variance of treatment effect at logit scale. Debugged at 14:52 05/09/2022 by ZIYAN WANG
#1 Stage effect when stage is treated as continuous. Debugged at 22:00 16/09/2022 by ZIYAN WANG
#K-1 Interaction effect when stage is treated as continuous. Debugged at 22:00 16/09/2022 by ZIYAN WANG
}
else if (reg.inf == "main + stage_discrete") {
stats = matrix(NA,
nrow = length(ns),
ncol = K - 1 + K * 2 + K - 1 + 1 + K - 1 + K - 1 + length(ns) - 1)
#K-1 posterior probability better than control and K columns number of success + K columns number of patient
#K-1 Indicator of K-1 hypothesis
#1: control mean estimates Debugged at 02:25 10/10/2022 by ZIYAN WANG
#K-1 Mean estimates of treatment effect at logit scale. Debugged at 14:52 05/09/2022 by ZIYAN WANG
#K-1 Variance of treatment effect at logit scale. Debugged at 14:52 05/09/2022 by ZIYAN WANG
#length(ns)-1 Total number of Stage effect when stage is treated as discrete Debugged at 22:00 16/09/2022 by ZIYAN WANG
}
else if (reg.inf == "main * stage_discrete") {
stats = matrix(
NA,
nrow = length(ns),
ncol = K - 1 + K * 2 + K - 1 + 1 + K - 1 + K - 1 + length(ns) - 1 + (K -
1) * (length(ns) - 1)
)
#K-1 posterior probability better than control and K columns number of success + K columns number of patient
#K-1 Indicator of K-1 hypothesis
#1: control mean estimates Debugged at 02:25 10/10/2022 by ZIYAN WANG
#K-1 Mean estimates of treatment effect at logit scale. Debugged at 14:52 05/09/2022 by ZIYAN WANG
#K-1 Variance of treatment effect at logit scale. Debugged at 14:52 05/09/2022 by ZIYAN WANG
#length(ns)-1 Total number of Stage effect when stage is treated as discrete Debugged at 22:00 16/09/2022 by ZIYAN WANG
#(K-1)*(length(ns)-1) Total number of Interaction effect when stage is treated as discrete Debugged at 22:00 16/09/2022 by ZIYAN WANG
}
else{
stop(
"Error: reg.inf must be in c(main, main + stage_continuous, main * stage_continuous, main + stage_discrete, main * stage_discrete)"
)
}
#Debugged at 02:01 17/09/2022 by ZIYAN WANG. For compressing different version of codes.
#Use name function to assign the names of each column of stats matrix
namefunc = function(K, ns) {
ngroup = length(ns)
#probability of each treatment better than control
name1 = {
}
#number of patients in each arm and their outcomes
name2 = {
}
#Hypothesis testing result
name3 = {
}
#mean treatment effect
name4 = {
}
#varance of treatment effect
name5 = {
}
#mean Stage effect
name6 = {
}
#mean Interaction effect
name7 = {
}
#mean fix intercept
name8 = paste0("Intercept")
if (reg.inf == "main") {
for (n in 1:(K - 1)) {
name1 = c(name1, paste0("PP", n, "C"))
name2 = c(name2, paste0("nE", n), paste0("yE", n))
name3 = c(name3, paste0("H1^", n, "tpIE"))
name4 = c(name4, paste0("Trt", n, "_Mean"))
name5 = c(name5, paste0("Trt", n, "_Var"))
}
}
else if (reg.inf == "main + stage_continuous") {
for (n in 1:(K - 1)) {
name1 = c(name1, paste0("PP", n, "C"))
name2 = c(name2, paste0("nE", n), paste0("yE", n))
name3 = c(name3, paste0("H1^", n, "tpIE"))
name4 = c(name4, paste0("Trt", n, "_Mean"))
name5 = c(name5, paste0("Trt", n, "_Var"))
}
name6 = c(name6, paste0("stageeffect"))
}
else if (reg.inf == "main * stage_continuous") {
for (n in 1:(K - 1)) {
name1 = c(name1, paste0("PP", n, "C"))
name2 = c(name2, paste0("nE", n), paste0("yE", n))
name3 = c(name3, paste0("H1^", n, "tpIE"))
name4 = c(name4, paste0("Trt", n, "_Mean"))
name5 = c(name5, paste0("Trt", n, "_Var"))
name7 = c(name7, paste0("stage_treatment", n, "interaction"))
}
name6 = c(name6, paste0("stageeffect"))
}
else if (reg.inf == "main + stage_discrete") {
for (n in 1:(K - 1)) {
name1 = c(name1, paste0("PP", n, "C"))
name2 = c(name2, paste0("nE", n), paste0("yE", n))
name3 = c(name3, paste0("H1^", n, "tpIE"))
name4 = c(name4, paste0("Trt", n, "_Mean"))
name5 = c(name5, paste0("Trt", n, "_Var"))
}
for (grouptemp in 2:ngroup) {
name6 = c(name6, paste0("stageeffect_", grouptemp))
}
}
else if (reg.inf == "main * stage_discrete") {
for (n in 1:(K - 1)) {
name1 = c(name1, paste0("PP", n, "C"))
name2 = c(name2, paste0("nE", n), paste0("yE", n))
name3 = c(name3, paste0("H1^", n, "tpIE"))
name4 = c(name4, paste0("Trt", n, "_Mean"))
name5 = c(name5, paste0("Trt", n, "_Var"))
for (grouptemp in 2:ngroup) {
name7 = c(name7,
paste0("trt", n, "_stage", grouptemp, "interaction"))
}
}
for (grouptemp in 2:ngroup) {
name6 = c(name6, paste0("stageeffect_", grouptemp))
}
}
return(c(
name1,
"nC",
"yC",
name2,
name3,
name8,
name4,
name5,
name6,
name7
))
}
}
else if (variable.inf == "Mixeffect") {
stats = matrix(NA,
nrow = length(ns),
ncol = K - 1 + K * 2 + K - 1 + 1 + K - 1 + K - 1 + length(ns) - 1)
#K-1 posterior probability better than control and K columns number of success + K columns number of patient
#K-1 Indicator of K-1 hypothesis
#1: control mean estimates Debugged at 02:25 10/10/2022 by ZIYAN WANG
#K-1 Mean estimates of treatment effect at logit scale. Debugged at 14:52 05/09/2022 by ZIYAN WANG
#K-1 Variance of treatment effect at logit scale. Debugged at 14:52 05/09/2022 by ZIYAN WANG
#length(ns)-1 Total number of Stage effect when stage is treated as discrete Debugged at 22:00 16/09/2022 by ZIYAN WANG
namefunc = function(K, ns) {
ngroup = length(ns)
#probability of each treatment better than control
name1 = {
}
#number of patients in each arm and their outcomes
name2 = {
}
#Hypothesis testing result
name3 = {
}
#mean treatment effect
name4 = {
}
#varance of treatment effect
name5 = {
}
#mean Stage effect
name6 = {
}
#mean Interaction effect
name7 = {
}
#mean fix intercept
name8 = paste0("Intercept")
for (n in 1:(K - 1)) {
name1 = c(name1, paste0("PP", n, "C"))
name2 = c(name2, paste0("nE", n), paste0("yE", n))
name3 = c(name3, paste0("H1^", n, "tpIE"))
name4 = c(name4, paste0("Trt", n, "_Mean"))
name5 = c(name5, paste0("Trt", n, "_Var"))
}
for (grouptemp in 2:ngroup) {
name6 = c(name6, paste0("Randomintercept", grouptemp))
}
return(c(
name1,
"nC",
"yC",
name2,
name3,
name8,
name4,
name5,
name6,
name7
))
}
}
statsname = namefunc(K, ns)
colnames(stats) = statsname
rownames(stats) = 1:length(ns)
return(stats)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Simulation_Outputmatrixinitialization.R
|
#' @title Initializetrialparameter
#' @description This function initialises the inner parameter used in simulate.trial function
#' @param response.probs A vector of response probability of each arm
#' @param ns A vector of accumulated number of patient at each stage
#'
#' @return A list of initialised parameters including the number of arms for this trial 'K',
#' the number of arm active 'armleft', the index of treatment arm 'treatmentindex', the vector of total number of patients allocated to each arm 'n'
#' the number of total number of patients survived for each arm 'y1',
#' the matrix for true response probability of each arm at each stage 'groupwise.response.probs' which is required for the time trend study,
#' the vector of randomisation probability for each arm 'randomprob',
#' the array of arm assignment for each patient 'z',
#' the array of outcome for each patient 'y',
#' the array of the stage index for each patient 'group_indicator',
#' the matrix of the probability of each arm to be the best at each stage 'post.prob.best.mat'.
#' @export
#'
#' @examples
#' Initializetrialparameter(response.probs = c(0.4,0.6), ns = c(15,30,45,60,75,90))
#'
#' #$K
#' #[1] 2
#'
#' #$armleft
#' #[1] 2
#'
#' #$treatmentindex
#' #[1] 1
#'
#' #$n
#' #[1] 0 0
#'
#' #$y1
#' #[1] 0 0
#'
#' #$groupwise.response.probs
#' # [,1] [,2]
#' #[1,] 0.4 0.6
#' #[2,] 0.4 0.6
#' #[3,] 0.4 0.6
#' #[4,] 0.4 0.6
#' #[5,] 0.4 0.6
#' #[6,] 0.4 0.6
#'
#' #$randomprob
#' # 1 2
#' #[1,] 0.5 0.5
#'
#' #$z
#' #numeric(0)
#'
#' #$y
#' #numeric(0)
#'
#' #$group_indicator
#' #numeric(0)
#'
#' #$post.prob.best.mat
#' # 0 1
#' #[1,] 0 0
#' #[2,] 0 0
#' #[3,] 0 0
#' #[4,] 0 0
#' #[5,] 0 0
#' #[6,] 0 0
#'
#' @author Ziyan Wang
Initializetrialparameter = function(response.probs, ns) {
K = length(response.probs)
armleft = K
treatmentindex = seq(1, K - 1)
n = rep(0, K)
y1 = rep(0, K)
groupwise.response.probs = matrix(rep(response.probs, length(ns)),
nrow = length(ns),
byrow = T)
rand.prob = 1 / K
randomprob = matrix(rep(rand.prob, K), ncol = K)
colnames(randomprob) = seq(1, K)
z <- array(0.0, 0)
y <- array(0.0, 0)
group_indicator <- array(0.0, 0)
post.prob.best.mat = matrix(0,length(ns),K)
colnames(post.prob.best.mat) = seq(0,K-1)
return(
list(
K = K,
armleft = armleft,
treatmentindex = treatmentindex,
n = n,
y1 = y1,
groupwise.response.probs = groupwise.response.probs,
randomprob = randomprob,
z = z,
y = y,
group_indicator = group_indicator,
post.prob.best.mat = post.prob.best.mat
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Simulation_Parameterinitialisation.R
|
#' @title Randomisation.inf
#' @description This function checks the validity of the randomisation information input
#' @param Random.inf A list of adaptive randomisation information.
#' 'Fixratio' a indicator of whether the randomisation process uses fix ratio. Default is FALSE.
#' 'Fixratiocontrol' the numerical value indicating the randomisation weight of the control arm compared to the treatment arms. Default is NA for Fixratio = FALSE.
#' 'BARmethod' the Bayesian adaptive randomisation type. Default is "Thall" indicating the use of Thall's approach in the randomisation process. The other value is 'Trippa'.
#' 'Thall.tuning.inf' the list of tuning parameter for Thall's approach including 'tuningparameter' (Default is "Fixed" indicating that the tuning paramter is fixed for all stages) and 'fixvalue' (Default is 1).
#' @return A list of input randomisation information
#' @export
#'
#' @examples
#' Randomisation.inf(Random.inf = list(
#' Fixratio = FALSE,
#' Fixratiocontrol = NA,
#' BARmethod = "Thall",
#' Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1),
#' Trippa.tuning.inf = list(a = NA, b = NA)
#' ))
#' @author Ziyan Wang
Randomisation.inf = function(Random.inf = list(
Fixratio = FALSE,
Fixratiocontrol = NA,
BARmethod = "Thall",
Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1),
Trippa.tuning.inf = list(a = 10, b = 0.75)
)) {
Fixratio = Random.inf$Fixratio
if (Fixratio == T) {
if (is.na(Random.inf$Fixratiocontrol) |
Random.inf$Fixratiocontrol <= 0) {
stop(
"Error: The value R > 0 for fix randomisation (R:1:1:1:......) should be input which is Fixratiocontrol"
)
}
else{
Fixratiocontrol = Random.inf$Fixratiocontrol
}
return(
list(
Fixratio = Fixratio,
Fixratiocontrol = Fixratiocontrol,
BARmethod = NA,
Thall.tuning.inf = list(tuningparameter = NA, c = NA),
Trippa.tuning.inf = list(a = NA, b = NA)
)
)
}
else {
BARmethod = Random.inf$BARmethod
if (BARmethod == "Thall") {
tuning.inf = Random.inf$Thall.tuning.inf$tuningparameter
if (tuning.inf == "Fixed") {
tuningparameter = "Fixed"
c = Random.inf$Thall.tuning.inf$fixvalue
if (is.na(Random.inf$Thall.tuning.inf$fixvalue)) {
stop("Error: The value of tuning parameter in Thall's approach should be specified.")
}
return(
list(
BARmethod = BARmethod,
Thall.tuning.inf = list(tuningparameter = tuningparameter, c = c),
Fixratio = Fixratio,
Fixratiocontrol = NA
)
)
}
else {
tuningparameter = "Unfixed"
}
return(
list(
BARmethod = BARmethod,
Thall.tuning.inf = list(tuningparameter = tuningparameter, c = NA),
Fixratio = Fixratio,
Fixratiocontrol = NA
)
)
}
else{
BARmethod = "Trippa"
a = Random.inf$Trippa.tuning.inf$a
b = Random.inf$Trippa.tuning.inf$b
return(
list(
BARmethod = BARmethod,
Trippa.tuning.inf = list(a = a, b = b),
Thall.tuning.inf = list(tuningparameter = NA, c = NA),
Fixratio = Fixratio,
Fixratiocontrol = NA
)
)
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Simulation_Randomisationinformationcheck.R
|
#' @title AdaptiveRandomisation
#' @description This is a function doing the randomisation process. This Function generates the Sequence for patient allocation to each arm, patient outcomes.
#' @param Fixratio A indicator TRUE/FALSE
#' @param rand.algo Randomisation algorithm: "Coin": Biased coin; "Urn": Urn method
#' @param K Total number of arms at the beginning
#' @param n.new The cohort size
#' @param randomprob A named vector of randomisation probability to each arm
#' @param groupwise.response.probs A matrix of response probability of each arm
#' @param group The current stage
#' @param armleft The number of treatment left in the platform (>2)
#' @param max.deviation Tuning parameter of using urn randomisation method.
#' @param trend_add_or_multip How time trend affects the true response probability: "add" or "mult"
#' @param trend.function The function returns time trend effect regarding to different time trend pattern
#' @param trend.effect The strength of time trend effect as a parameter in trend.function()
#' @param treatmentindex The vector of treatment arm index excluding the control arm whose index is 0
#' @param ns A vector of accumulated number of patient at each stage
#' @param Fixratiocontrol A numeric value indicating the weight of control in randomisation.
#' Eg. 1 means equal randomisation, 2 means thw number of patients allocated to control is twice as large as other treatment arm.
#'
#' @return A list of patient allocation and patient outcome
#' nstage: A vector of the number of patients allocated to each arm
#' ystage: A vector of the patients outcome after treating with each arm
#' znew: A vector of treatment index assigned to each patient in the current cohort
#' ynew: A vector of outcome index record for each patient after treatment in the current cohort
#' @importFrom stats rbinom
#' @importFrom boot logit
#' @importFrom boot inv.logit
#' @export
#'
#' @examples
#' AdaptiveRandomisation(
#' Fixratio = FALSE,
#' rand.algo = "Urn",
#' K = 2,
#' n.new = 30,
#' randomprob = matrix(c(0.5, 0.5), ncol = 2, dimnames = list(c(),c("1","2"))),
#' treatmentindex = 1,
#' groupwise.response.probs = matrix(rep(c(0.4, 0.4), 5), byrow = TRUE, ncol = 2, nrow = 5),
#' group = 1,
#' armleft = 2,
#' max.deviation = 3,
#' trend_add_or_multip = "mult",
#' trend.function = function(ns, group, i, trend.effect) {delta = 0; return(delta)},
#' trend.effect = c(0, 0),
#' ns = c(30, 60, 90, 120, 150),
#' Fixratiocontrol = NA)
#'
#' AdaptiveRandomisation(
#' Fixratio = TRUE,
#' rand.algo = "Urn",
#' K = 4,
#' n.new = 30,
#' randomprob = NA,
#' treatmentindex = c(1,3),
#' groupwise.response.probs = matrix(rep(c(0.4, 0.4,0.4, 0.4), 5), byrow = TRUE, ncol = 4, nrow = 5),
#' group = 1,
#' armleft = 3,
#' max.deviation = 3,
#' trend_add_or_multip = "mult",
#' trend.function = function(ns, group, i, trend.effect) {delta = 0; return(delta)},
#' trend.effect = c(0, 0),
#' ns = c(30, 60, 90, 120, 150),
#' Fixratiocontrol = 1)
#'
#' @references Mass weighted urn design—a new randomization algorithm for unequal allocations. Zhao, Wenle. Contemporary clinical trials 43 (2015): 209-216.
#' @author Ziyan Wang
AdaptiveRandomisation = function(Fixratio,
rand.algo,
K,
n.new,
randomprob,
treatmentindex,
groupwise.response.probs,
group,
armleft,
max.deviation,
trend_add_or_multip,
trend.function,
trend.effect,
ns,
Fixratiocontrol) {
#unfix ratio
if (Fixratio == FALSE) {
#Generate assignments and outcomes for current interval
if (rand.algo == "Coin") {
#Assuming K arms including Control (K-1 treatment vs 1 Control)
#Randomisation to each k arm including control arm, where rand.prob=c(AR1,AR2,AR3,...,ARK-1) for K arm allocation
#rand.prob was suggested by Trippa et.al (2012; 2014)
#The first element is the control group, the elements 2 to K are treatment groups
randomsample = sample(K, n.new, replace = T, prob = randomprob)
randcount = table(factor(randomsample, levels = seq(1, K)))
nstage = randcount
#Simulate the response of randomised patients
ystage = rbinom(K, nstage, prob = groupwise.response.probs[group,])
#Debugged due to adding time trend at 17:36 on 30/08/2022 by Ziyan Wang
znew = randomsample
ynew = rep(0, sum(nstage))
m = seq(1, K)
y.outcome.for.each.k = sapply(m, function(m) {
ynew[which(znew == m)[sample(ystage[m])]] <- 1
return(ynew)
})
ynew = rowSums(y.outcome.for.each.k)
}
if (rand.algo == "Urn") {
#Data generation
outcome = NULL
allocation = NULL
rand.prob.temp = randomprob
count = matrix(rep(0, K), ncol = K)
ycount = matrix(rep(0, K), ncol = K)
for (i in 1:n.new) {
urnprob = randomprob * max.deviation - as.vector(count) + (i - 1) * randomprob
maxcon = ifelse(urnprob > 0, urnprob, 0)
rand.prob.temp = maxcon / sum(maxcon)
# allocation[i] = rbinom(1,1,min(max(rand.prob.temp,0),1))
allocation[i] = sample(as.numeric(colnames(randomprob)), 1, T, prob = rand.prob.temp)
count[allocation[i]] = count[allocation[i]] + 1
#Time trend add to response probability. Coded due to adding time trend at 18:01 on 30/08/2022 by Ziyan Wang
if (trend_add_or_multip == "add") {
timetrend.response.probs = groupwise.response.probs[group,] + trend.function(ns, group , i, trend.effect)
}
else if (trend_add_or_multip == "mult") {
timetrend.response.probs = inv.logit(
logit(groupwise.response.probs[group,]) + trend.function(ns, group , i, trend.effect)
)
}
else {
stop("Error: Trend should be additive of multiplicative")
}
outcome[i] = rbinom(1, 1, prob = timetrend.response.probs[allocation[i]])
ycount[allocation[i]] = ycount[allocation[i]] + outcome[i]
# print(timetrend.response.probs)
}
#Data recording. Coded due to adding time trend at 18:01 on 30/08/2022 by Ziyan Wang
randomsample = allocation
randomoutcome = outcome
nstage = as.vector(count)
ystage = as.vector(ycount)
#Debugged due to adding time trend at 17:36 on 30/08/2022 by Ziyan Wang
znew = randomsample
ynew = randomoutcome
}
#Update dataset
}
#Debugged at 0002 on 01112022 by Ziyan wang for adding additional fix ratio scenario
#PS: Need additional code to be feasible to all fix ratio scenarios.
else{
# -----------------------Fix ratio----------------------
rpk = matrix(rep(0, armleft), ncol = armleft)
randomprob = matrix(rep(0, K), ncol = K)
colnames(rpk) = c(1, treatmentindex + 1)
colnames(randomprob) = seq(1, K)
rpk[1] = Fixratiocontrol / (Fixratiocontrol + 1 * (armleft - 1))
# Debugged on 02052023 by ziyan wang. The original code is not suitable for multiarm fix ratio.
rpk[-1] = rep(1 / (armleft - 1 + Fixratiocontrol), armleft -
1)
rpk = rpk / sum(rpk)
randomprob[as.numeric(colnames(rpk))] = randomprob[as.numeric(colnames(rpk))] + rpk
#Data generation
outcome = NULL
allocation = NULL
rand.prob.temp = randomprob
count = matrix(rep(0, K), ncol = K)
ycount = matrix(rep(0, K), ncol = K)
for (i in 1:n.new) {
urnprob = randomprob * max.deviation - as.vector(count) + (i - 1) * randomprob
maxcon = ifelse(urnprob > 0, urnprob, 0)
rand.prob.temp = maxcon / sum(maxcon)
# allocation[i] = rbinom(1,1,min(max(rand.prob.temp,0),1))
allocation[i] = sample(as.numeric(colnames(randomprob)), 1, T, prob = rand.prob.temp)
count[allocation[i]] = count[allocation[i]] + 1
#Time trend add to response probability. Coded due to adding time trend at 18:01 on 30/08/2022 by Ziyan Wang
if (trend_add_or_multip == "add") {
timetrend.response.probs = groupwise.response.probs[group,] + trend.function(ns, group , i, trend.effect)
}
else if (trend_add_or_multip == "mult") {
timetrend.response.probs = inv.logit(
logit(groupwise.response.probs[group,]) + trend.function(ns, group , i, trend.effect)
)
}
else {
stop("Error: Trend should be additive of multiplicative")
}
outcome[i] = rbinom(1, 1, prob = timetrend.response.probs[allocation[i]])
ycount[allocation[i]] = ycount[allocation[i]] + outcome[i]
# print(timetrend.response.probs)
}
#Data recording. Coded due to adding time trend at 18:01 on 30/08/2022 by Ziyan Wang
randomsample = allocation
randomoutcome = outcome
nstage = as.vector(count)
ystage = as.vector(ycount)
#Debugged due to adding time trend at 17:36 on 30/08/2022 by Ziyan Wang
znew = randomsample
ynew = randomoutcome
}
return(list(
nstage = nstage,
ystage = ystage,
znew = znew,
ynew = ynew
))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Simulation_Randomisepatientstotrt.R
|
#' @title stan.logisticmodeltrans
#' @description This function transform the data in trial simulation to the data required for stan modelling
#' @param z A vector of all treatment index data from the beginning of a trial
#' @param y A vector of all outcome data from the beginning of a trial
#' @param randomprob A named vector of randomisation probability to each arm
#' @param group_indicator A vector for the stage at which each patient was treated
#' @param armleft The number of treatment left in the platform (>2)
#' @param group The current stage
#' @param variable.inf Fixeffect/Mixeffect for logistic model parameter
#' @param reg.inf The information of how much accumulated information will be used
#'
#' @return A list of information require for the stan model including:
#' zdropped: The vector of treatment index for each patient
#' whose treatment arm is active at current stage.
#' ydropped: The vector of outcome index for each patient
#' whose treatment arm is active at current stage.
#' Ndropped: The total number of patients
#' that are treated with active treatment arms at current stage.
#' group_indicator_dropped: The vector of stage index for each patient
#' whose treatment arm is active at current stage.
#' zlevel: The active treatment arm index at current stage
#' xdummy: A design matrix transformed from zdropped and group_indicator_dropped for modelling
#' @importFrom stats model.matrix
#' @export
#'
#' @examples
#' stan.logisticmodeltrans(
#' z = c(1,2,1,2,2,1,2,1),
#' y = c(0,0,0,0,1,1,1,1),
#' randomprob = matrix(c( 0.5, 0.5), ncol = 2, dimnames = list(c("Stage1"), c("1", "2"))),
#' group_indicator = c(1,1,1,1,1,1,1,1),
#' armleft = 2,
#' group = 1,
#' variable.inf = "Fixeffect",
#' reg.inf = "main")
#' @author Ziyan Wang
stan.logisticmodeltrans = function(z,
y,
randomprob,
group_indicator,
armleft,
group,
variable.inf,
reg.inf) {
zdropped = z[z %in% as.numeric(names(randomprob[, randomprob != 0]))]
ydropped = y[z %in% as.numeric(names(randomprob[, randomprob != 0]))]
Ndropped = length(zdropped)
group_indicator_dropped = group_indicator[z %in% as.numeric(names(randomprob[, randomprob != 0]))]
zlevel = as.numeric(levels(factor(zdropped)))
for (zindex in 1:armleft) {
zdropped[zdropped == levels(factor(zdropped))[zindex]] = zindex
}
#Construct data set x for different regression models because of rewriting stan model.
#Debugged at 18:29 16/09/2022 by ZIYAN WANG.
if (group == 1) {
#Stage 1, there is no stage effect and interaction effect
xdummy = model.matrix( ~ factor(zdropped))
}
else if (variable.inf == "Mixeffect") {
xdummy = model.matrix( ~ factor(zdropped))
}
else if (group > 1 &
reg.inf == "main" & variable.inf == "Fixeffect") {
xdummy = model.matrix( ~ factor(zdropped))
}
else if (group > 1 &
reg.inf == "main + stage_continuous" &
variable.inf == "Fixeffect") {
xdummy = model.matrix( ~ factor(zdropped) + group_indicator_dropped)
}
else if (group > 1 &
reg.inf == "main * stage_continuous" &
variable.inf == "Fixeffect") {
xdummy = model.matrix( ~ factor(zdropped) * group_indicator_dropped)
}
else if (group > 1 &
reg.inf == "main + stage_discrete" &
variable.inf == "Fixeffect") {
xdummy = model.matrix( ~ factor(zdropped) + as.factor(group_indicator_dropped))
}
else if (group > 1 &
reg.inf == "main * stage_discrete" &
variable.inf == "Fixeffect") {
xdummy = model.matrix( ~ factor(zdropped) * as.factor(group_indicator_dropped))
}
return(
list(
zdropped = zdropped,
ydropped = ydropped,
Ndropped = Ndropped,
group_indicator_dropped = group_indicator_dropped,
zlevel = zlevel,
xdummy = xdummy
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Simulation_Stanlogisticdatatransform.R
|
#' @title resultrtostats
#' @description This is an inner function of the function \code{\link{resultstantoRfunc}}
#' @param trteff Stan posterior samples of treatment effect sample distribution
#' @param treatmentindex A vector of treatment index at the beginning of a trial
#' @param armleft The number of treatment left in the platform (>2)
#' @param K Total number of arms at the beginning
#' @param group The current stage
#' @param reg.inf The information of how much accumulated information will be used
#' @param fit The stan output
#' @param ns A vector of accumulated number of patient at each stage
#'
#' @return A list of stan result inference
#' stats1: A vector of posterior probability for all treatment arms
#' including dropped and active treatment arm
#' stats4: The mean treatment effect estimate of each treatment compared to control
#' stats5: The variance of treatment effect estimate of each treatment compared to control
#' post.prob.btcontrol: a vector including Posterior probability
#' of each active treatment arm better than control
#' @export
#'
#' @examples
#' \dontrun{resultrtostats(trteff = NA, treatmentindex = NA, armleft, K, group, reg.inf, fit, ns)}
#' @author Ziyan Wang
resultrtostats = function(trteff = NA,
treatmentindex = NA,
armleft,
K,
group,
reg.inf,
fit,
ns) {
sampeff = rstan::extract(fit, 'beta1')[[1]]
if (group > 1 & stringr::str_detect(reg.inf, "\\*")) {
maineffect = trteff
if (stringr::str_detect(reg.inf, "continuous")) {
interactioneff = matrix(sampeff[, (armleft + 1):(2 * armleft - 1)], ncol = armleft - 1)
for (temp in 1:dim(trteff)[2]) {
trteff[, temp] = trteff[, temp] + interactioneff[, temp] * group
}
#Mean estimates and variance of treatment effect
beta1mean = matrix(colMeans(maineffect), ncol = armleft - 1)
colnames(beta1mean) = treatmentindex
beta1var = matrix(sapply(data.frame(maineffect), var), ncol = armleft - 1)
colnames(beta1var) = treatmentindex
#Record mean estimate and variance
stats4 = rep(NA, K - 1)
names(stats4) = seq(1, K - 1)
stats4[treatmentindex] = round(beta1mean, 3)
# stats4[treatmentindex] = paste0(round(beta1mean,3),"(", round(beta1var,3), ")")
stats5 = rep(NA, K - 1)
names(stats5) = seq(1, K - 1)
stats5[treatmentindex] = round(beta1var, 3)
# #posterior probability of each trt to be better than control.
# post.prob.btcontrol = colMeans(trteff>0)
# stats1 = rep(NA,K-1)
# names(stats1) = seq(1,K-1)
# stats1[treatmentindex] = post.prob.btcontrol
#Debugged at 00:13 on 18/10/2022
#Because there are interactions, stats1 and post.prob.btcontrol will be calculated in resultstantoRfunc
return(list(stats4 = stats4, stats5 = stats5))
}
else {
maineffect = trteff
interactioneff = matrix(sampeff[,-(1:(armleft - 1 + group - 1))], ncol = (group - 1) * (armleft - 1))
for (temp in 1:dim(trteff)[2]) {
trteff[, temp] = trteff[, temp] + interactioneff[, (group - 1) * temp]
}
#Mean estimates and variance of treatment effect
beta1mean = matrix(colMeans(maineffect), ncol = armleft - 1)
colnames(beta1mean) = treatmentindex
beta1var = matrix(sapply(data.frame(maineffect), var), ncol = armleft - 1)
colnames(beta1var) = treatmentindex
#Record mean estimate and variance
stats4 = rep(NA, K - 1)
names(stats4) = seq(1, K - 1)
stats4[treatmentindex] = round(beta1mean, 3)
# stats4[treatmentindex] = paste0(round(beta1mean,3),"(", round(beta1var,3), ")")
stats5 = rep(NA, K - 1)
names(stats5) = seq(1, K - 1)
stats5[treatmentindex] = round(beta1var, 3)
# #posterior probability of each arm including control to be the best.
# post.prob.btcontrol = colMeans(trteff>0)
# stats1 = rep(NA,K-1)
# names(stats1) = seq(1,K-1)
# stats1[treatmentindex] = post.prob.btcontrol
#Debugged at 00:13 on 18/10/2022
#Because there are interactions, stats1 and post.prob.btcontrol will be calculated in resultstantoRfunc
return(list(stats4 = stats4, stats5 = stats5))
}
}
else {
#Mean estimates and variance of treatment effect
beta1mean = matrix(colMeans(trteff), ncol = armleft - 1)
colnames(beta1mean) = treatmentindex
beta1var = matrix(sapply(data.frame(trteff), var), ncol = armleft - 1)
colnames(beta1var) = treatmentindex
#Record mean estimate and variance
stats4 = rep(NA, K - 1)
names(stats4) = seq(1, K - 1)
stats4[treatmentindex] = round(beta1mean, 3)
# stats4[treatmentindex] = paste0(round(beta1mean,3),"(", round(beta1var,3), ")")
stats5 = rep(NA, K - 1)
names(stats5) = seq(1, K - 1)
stats5[treatmentindex] = round(beta1var, 3)
#posterior probability of each arm including control to be the best.
post.prob.btcontrol = colMeans(trteff > 0)
stats1 = rep(NA, K - 1)
names(stats1) = seq(1, K - 1)
stats1[treatmentindex] = post.prob.btcontrol
return(
list(
stats1 = stats1,
stats4 = stats4,
stats5 = stats5,
post.prob.btcontrol = post.prob.btcontrol
)
)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Simulation_Stansamplesummary_Fixeffect.R
|
#' @title resultrtostats.rand
#' @description The inner function of function \code{\link{resultstantoRfunc.rand}}
#' @param trteff Stan posterior samples of treatment effect sample distribution
#' @param treatmentindex A vector of treatment index at the beginning of a trial
#' @param armleft The number of treatment left in the platform (>2)
#' @param K Total number of arms at the beginning
#' @param group The current stage
#' @param fit The stan output
#' @param ns A vector of accumulated number of patient at each stage
#'
#' @return A list of stan result inference
#' stats1: A vector of posterior probability for all treatment arms
#' including dropped and active treatment arm
#' stats4: The mean treatment effect estimate of each treatment compared to control
#' stats5: The variance of treatment effect estimate of each treatment compared to control
#' post.prob.btcontrol: a vector including Posterior probability
#' of each active treatment arm better than control
#' @export
#'
#' @examples
#' \dontrun{resultrtostats.rand(trteff = NA, treatmentindex = NA, armleft, K, group, fit, ns)}
#' @author Ziyan Wang
resultrtostats.rand = function(trteff = NA,
treatmentindex = NA,
armleft,
K,
group,
fit,
ns) {
#Mean estimates and variance of treatment effect
beta1mean = matrix(colMeans(trteff), ncol = armleft - 1)
colnames(beta1mean) = treatmentindex
beta1var = matrix(sapply(data.frame(trteff), var), ncol = armleft - 1)
colnames(beta1var) = treatmentindex
#Record mean estimate and variance
stats4 = rep(NA, K - 1)
names(stats4) = seq(1, K - 1)
stats4[treatmentindex] = round(beta1mean, 3)
# stats4[treatmentindex] = paste0(round(beta1mean,3),"(", round(beta1var,3), ")")
stats5 = rep(NA, K - 1)
names(stats5) = seq(1, K - 1)
stats5[treatmentindex] = round(beta1var, 3)
#posterior probability of each arm including control to be the best.
post.prob.btcontrol = colMeans(trteff > 0)
stats1 = rep(NA, K - 1)
names(stats1) = seq(1, K - 1)
stats1[treatmentindex] = post.prob.btcontrol
return(
list(
stats1 = stats1,
stats4 = stats4,
stats5 = stats5,
post.prob.btcontrol = post.prob.btcontrol
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Simulation_Stansamplesummary_Mixeffect.R
|
#' @title resultstantoRfunc
#' @description This function summarise the fix effect stan output data to and transform them to be readable.
#' @param group The current stage
#' @param reg.inf The information of how much accumulated information will be used
#' @param variable.inf The information of whether to use random effect model or fix effect model.
#' @param fit The stan output
#' @param armleft The number of treatment left in the platform (>2)
#' @param treatmentindex A vector of treatment index at the beginning of a trial
#' @param K Total number of arms at the beginning
#' @param ns A vector of accumulated number of patient at each stage
#'
#' @return A list of stan result inference
#' stats1: A vector of posterior probability for all treatment arms
#' including dropped and active treatment arm
#' stats4: The mean treatment effect estimate of each treatment compared to control
#' stats5: The variance of treatment effect estimate of each treatment compared to control
#' post.prob.btcontrol: a vector including Posterior probability
#' of each active treatment arm better than control
#' stats6: A vector of mean stage (time trend) effect estimate
#' stats7: A vector of mean treatment - stage (time trend) interaction effect estimate
#' sampefftotal: The posterior samples of response probability
#' of each active arm on logit scale.
#' This can be transformed to probit scale by using inv.logit() function.
#' @export
#'
#' @examples
#' \dontrun{resultstantoRfunc(group, reg.inf, fit, armleft, treatmentindex, K, ns)}
#' @author Ziyan Wang
resultstantoRfunc = function(group,
reg.inf,
variable.inf,
fit,
armleft,
treatmentindex,
K,
ns) {
sampeff = rstan::extract(fit, 'beta1')[[1]]
sampeff0 = matrix(rstan::extract(fit, 'b_Intercept')[[1]], ncol = 1)
if (reg.inf == "main") {
stats6 = {
}
stats7 = {
}
}
else if (reg.inf == "main + stage_continuous") {
stats6 = rep(NA, 1)
stats7 = {
}
}
else if (reg.inf == "main * stage_continuous") {
stats6 = rep(NA, 1)
stats7 = rep(NA, K - 1)
}
else if (reg.inf == "main + stage_discrete") {
stats6 = rep(NA, length(ns) - 1)
stats7 = {
}
}
else if (reg.inf == "main * stage_discrete") {
stats6 = rep(NA, length(ns) - 1)
stats7 = rep(NA, (K - 1) * (length(ns) - 1))
}
#--------------------Only Main effect----------------
if (group == 1 | reg.inf == "main") {
trteff = matrix(sampeff[, 1:(armleft - 1)], ncol = armleft - 1)
resulttrt = resultrtostats(
trteff = trteff,
treatmentindex = treatmentindex,
armleft = armleft,
K = K,
fit = fit,
reg.inf = reg.inf,
group = group,
ns = ns
)
#stats4: Treatmenteffect mean
#stats5: Treatmenteffect variance
#stats1: probability of treatment better than control
stats4 = resulttrt$stats4
stats5 = resulttrt$stats5
stats1 = resulttrt$stats1
post.prob.btcontrol = resulttrt$post.prob.btcontrol
#Sample distribution of reference in logistic regression
sampefftotal = sampeff0
#Sample distribution of treatment in logistic regression
for (temp in 1:dim(trteff)[2]) {
sampefftotal = cbind(sampefftotal, sampeff0 + trteff[, temp])
}
#Transfer from logit scale to probability scale
sampoutcome = inv.logit(sampefftotal)
}
#--------------------Main effect and stage effect independent----------------
else if (group > 1 & stringr::str_detect(reg.inf, "\\+")) {
trteff = matrix(sampeff[, 1:(armleft - 1)], ncol = armleft - 1)
resulttrt = resultrtostats(
trteff = trteff,
treatmentindex = treatmentindex,
armleft = armleft,
K = K,
fit = fit,
reg.inf = reg.inf,
group = group,
ns = ns
)
stats4 = resulttrt$stats4
stats5 = resulttrt$stats5
stats1 = resulttrt$stats1
post.prob.btcontrol = resulttrt$post.prob.btcontrol
if (stringr::str_detect(reg.inf, "continuous")) {
stageeff = matrix(sampeff[,-(1:(armleft - 1))], ncol = 1)
stats6 = rep(NA, 1)
names(stats6) = "stageeffect"
stats6 = round(colMeans(stageeff), 3)
#Sample distribution of reference in logistic regression
sampefftotal = sampeff0 + stageeff * group
#Sample distribution of treatment in logistic regression
for (temp in 1:dim(trteff)[2]) {
sampefftotal = cbind(sampefftotal, sampeff0 + trteff[, temp] + stageeff * group)
}
#Transfer from logit scale to probability scale
sampoutcome = inv.logit(sampefftotal)
}
else {
stageeff = matrix(sampeff[,-(1:(armleft - 1))], ncol = group - 1)
stats6 = rep(NA, length(ns) - 1)
names(stats6) = seq(2, length(ns))
stats6[1:group - 1] = round(colMeans(stageeff), 3)
#Sample distribution of reference in logistic regression
sampefftotal = sampeff0 + stageeff[, group - 1]
#Sample distribution of treatment in logistic regression
for (temp in 1:dim(trteff)[2]) {
sampefftotal = cbind(sampefftotal, sampeff0 + trteff[, temp] + stageeff[, group - 1])
}
#Transfer from logit scale to probability scale
sampoutcome = inv.logit(sampefftotal)
}
}
#--------------------Main effect and stage effect dependent----------------
else if (group > 1 & stringr::str_detect(reg.inf, "\\*")) {
trteff = matrix(sampeff[, 1:(armleft - 1)], ncol = armleft - 1)
resulttrt = resultrtostats(
trteff = trteff,
treatmentindex = treatmentindex,
armleft = armleft,
K = K,
fit = fit,
reg.inf = reg.inf,
group = group,
ns = ns
)
stats4 = resulttrt$stats4
stats5 = resulttrt$stats5
# post.prob.btcontrol = resulttrt$post.prob.btcontrol
if (stringr::str_detect(reg.inf, "continuous")) {
stageeff = matrix(sampeff[, armleft], ncol = 1)
stats6 = rep(NA, 1)
names(stats6) = "stageeffect"
stats6 = round(colMeans(stageeff), 3)
interactioneff = matrix(sampeff[, (armleft + 1):(2 * armleft - 1)], ncol = armleft - 1)
stats7 = rep(NA, K - 1)
names(stats7) = seq(1, K - 1)
stats7[treatmentindex] = round(colMeans(interactioneff), 3)
trteffect_with_int = {
}
#Sample distribution of reference in logistic regression
sampefftotal = sampeff0 + stageeff * group
#Sample distribution of treatment in logistic regression
for (temp in 1:dim(trteff)[2]) {
sampefftotal = cbind(sampefftotal,
sampeff0 + trteff[, temp] + stageeff * group + interactioneff[, temp] * group)
trteffect_with_int = cbind(trteffect_with_int, trteff[, temp] + interactioneff[, temp] * group)
}
#posterior probability of each arm including control to be the best.
post.prob.btcontrol = colMeans(trteffect_with_int > 0)
stats1 = rep(NA, K - 1)
names(stats1) = seq(1, K - 1)
stats1[treatmentindex] = post.prob.btcontrol
#Transfer from logit scale to probability scale
sampoutcome = inv.logit(sampefftotal)
}
else {
stageeff = matrix(sampeff[, (armleft - 1 + 1):(armleft - 1 + group - 1)], ncol = group - 1)
stats6 = rep(NA, length(ns) - 1)
names(stats6) = seq(2, length(ns))
stats6[1:group - 1] = round(colMeans(stageeff), 3)
interactioneff = matrix(sampeff[,-(1:(armleft - 1 + group - 1))], ncol = (group - 1) * (armleft - 1))
stats7 = rep(NA, (K - 1) * (length(ns) - 1))
names(stats7) = seq(1, (K - 1) * (length(ns) - 1))
for (stat7temp in 1:length(treatmentindex)) {
stats7[(1 + (length(ns) - 1) * (treatmentindex[stat7temp] - 1)):((group -
1) + (length(ns) - 1) * (treatmentindex[stat7temp] - 1))] =
round(colMeans(interactioneff)[(1 + (group - 1) * (stat7temp -
1)):((group - 1) * stat7temp)], 3)
}
trteffect_with_int = {
}
#Sample distribution of reference in logistic regression
sampefftotal = sampeff0 + stageeff[, group - 1]
#Sample distribution of treatment in logistic regression
for (temp in 1:dim(trteff)[2]) {
sampefftotal = cbind(sampefftotal,
sampeff0 + trteff[, temp] + stageeff[, group - 1] + interactioneff[, (group - 1) * temp])
trteffect_with_int = cbind(trteffect_with_int, trteff[, temp] + interactioneff[, (group - 1) * temp])
}
#posterior probability of each arm including control to be the best.
post.prob.btcontrol = colMeans(trteffect_with_int > 0)
stats1 = rep(NA, K - 1)
names(stats1) = seq(1, K - 1)
stats1[treatmentindex] = post.prob.btcontrol
#Transfer from logit scale to probability scale
sampoutcome = inv.logit(sampefftotal)
}
}
else if (group > 1 & reg.inf != "main") {
stop("Regression information inputted wrong")
}
# Because only the fixed effect model is used at stage 1,
# this command is written to make sure that when using random effect model the output matrix can be written correctly.
if (variable.inf == "Mixeffect"){
stats6 = rep(NA, length(ns) - 1)
stats7 = {
}
}
return(
list(
stats1 = stats1,
stats4 = stats4,
stats5 = stats5,
stats6 = stats6,
stats7 = stats7,
sampefftotal = sampefftotal,
post.prob.btcontrol = post.prob.btcontrol
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Simulation_Stansamplesummarytolist_Fixeffect.R
|
#' @title resultstantoRfunc.rand
#' @description This function summarise the mix effect stan output data to and transform them to be readable.
#' @param group The current stage
#' @param fit The stan output
#' @param armleft The number of treatment left in the platform (>2)
#' @param treatmentindex A vector of treatment index at the beginning of a trial
#' @param K Total number of arms at the beginning
#' @param ns A vector of accumulated number of patient at each stage
#'
#' @return A list of stan result inference
#' stats1: A vector of posterior probability for all treatment arms
#' including dropped and active treatment arm
#' stats4: The mean treatment effect estimate of each treatment compared to control
#' stats5: The variance of treatment effect estimate of each treatment compared to control
#' post.prob.btcontrol: a vector including Posterior probability
#' of each active treatment arm better than control
#' stats6: A vector of mean stage (time trend) effect estimate
#' stats7: A vector of mean treatment - stage (time trend) interaction effect estimate
#' sampefftotal: The posterior samples of response probability
#' of each active arm on logit scale.
#' This can be transformed to probit scale by using inv.logit() function.
#' @export
#'
#' @examples
#' \dontrun{resultstantoRfunc.rand(group, fit, armleft, treatmentindex, K, ns)}
#' @author Ziyan Wang
resultstantoRfunc.rand = function(group, fit, armleft, treatmentindex, K, ns) {
stats6 = rep(NA, length(ns) - 1)
stats7 = {
}
if (group == 1) {
stop("Error: Random effect model is not used for the first stage")
}
else if (group > 1) {
beta0 = matrix(rstan::extract(fit, 'b_Intercept')[[1]], ncol = 1)
beta1 = rstan::extract(fit, "beta")[[1]]
statsbeta0 = mean(beta0+beta1[,1])
alpha = rstan::extract(fit, "alpha")[[1]][, -1]
sampeff = cbind(beta1[,-1]-beta1[,1], alpha)
trteff = matrix(sampeff[, 1:(armleft - 1)], ncol = armleft - 1)
resulttrt = resultrtostats.rand(
trteff = trteff,
treatmentindex = treatmentindex,
armleft = armleft,
K = K,
fit = fit,
group = group,
ns = ns
)
stats4 = resulttrt$stats4
stats5 = resulttrt$stats5
stats1 = resulttrt$stats1
post.prob.btcontrol = resulttrt$post.prob.btcontrol
stageeff = matrix(sampeff[,-(1:(armleft - 1))], ncol = group - 1)
stats6 = rep(NA, length(ns) - 1)
names(stats6) = seq(2, length(ns))
stats6[1:group - 1] = round(colMeans(stageeff), 3)
#Sample distribution of reference in logistic regression
sampefftotal = beta0 + beta1[,1]
#Sample distribution of treatment in logistic regression
for (temp in 1:dim(trteff)[2]) {
sampefftotal = cbind(sampefftotal, beta0 + trteff[, temp] + 0)
}
#Transfer from logit scale to probability scale
sampoutcome = inv.logit(sampefftotal)
}
return(
list(
stats1 = stats1,
stats4 = stats4,
stats5 = stats5,
stats6 = stats6,
stats7 = stats7,
sampefftotal = sampefftotal,
post.prob.btcontrol = resulttrt$post.prob.btcontrol
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Simulation_Stansamplesummarytolist_Mixeffect.R
|
#' @title Timetrend.fun
#' @description This function generate the time trend function based on trend information. This function also check the validity of the input time trend information.
#' @param trend.inf The list of information for time trend effect including 'trend.type', 'trend.effect', 'trend_add_or_multip'.
#' 'trend.type' is the shape of time trend. Default is "step". Other types are "linear", "inverse.U.linear", "plateau".
#' "trend.effect" the vector of the strength of time trend for each arm. The first element is for the control arm.
#' The value of time trend is the gap between the start of the trial and the end of the trial. The change between each interim or each patient is calculated in the function.
#' For example, for linear time trend with trend.effect = c(0.2, 0.2). The trend effect increment in control group for patient $i$ is 0.2(i-1)/(N_max-1), for stage $j$ is 0.2(j-1)/(length(ns)-1).
#' "trend_add_or_multip" the pattern of time trend affecting the true response probability. Default is "mult".
#'
#' @return A list containing the time trend function according to input trend.type variable,
#' and a indicator of whether there is a time trend in data generation
#' based on input trend information
#' @export
#'
#' @examples
#' Timetrend.fun(trend.inf = list(
#' trend.type = "step",
#' trend.effect = c(0, 0),
#' trend_add_or_multip = "mult"
#' ))
#' @author Ziyan Wang
Timetrend.fun = function(trend.inf) {
# Time trend pattern function
trend.type = trend.inf$trend.type
trend.effect = trend.inf$trend.effect
trend_add_or_multip = trend.inf$trend_add_or_multip
switch(
trend.type,
"step" = {
if (sum(trend.effect != 0) > 0) {
trend.function = function(ns, group , i, trend.effect) {
delta = (group - 1) * trend.effect/(length(ns)-1)
return(delta)
}
timetrendornot = c("There is time trend during data generation")
}
},
"linear" = {
if (sum(trend.effect != 0) > 0) {
trend.function = function(ns, group , i, trend.effect) {
delta = (i - 1 + ns[group] - ns[1]) * trend.effect / (ns[length(ns)] - 1)
return(delta)
}
timetrendornot = c("There is time trend during data generation")
}
},
"inverse.U.linear" = {
if (sum(trend.effect != 0) > 0) {
trend.function = function(ns, group , i, trend.effect) {
delta = ifelse(
group <= round(length(ns) / 2),
(i - 1 + ns[group] - ns[1]) * trend.effect / (ns[length(ns)] - 1),
(ns[1] - 1 + ns[round(length(ns) / 2)] - ns[1]) * trend.effect / (ns[length(ns)] - 1) - (i - 1 +
ns[group - round(length(ns) / 2)] - ns[1]) * trend.effect / (ns[length(ns)] - 1)
)
return(delta)
}
timetrendornot = c("There is time trend during data generation")
}
},
"plateau" = {
if (sum(trend.effect != 0) > 0) {
trend.function = function(ns, group , i, trend.effect) {
delta = trend.effect * (i - 1 + ns[group] - ns[1]) / (max(ns) / 10 + (i -
1 + ns[group] - ns[1]))
return(delta)
}
timetrendornot = c("There is time trend during data generation")
}
},
stop(
"Error: Wrong trend type or strength of time effect for data generation"
)
)
if (sum(trend.effect != 0) == 0) {
trend.function = function(ns, group, i, trend.effect) {
delta = 0
return(delta)
}
timetrendornot = c("There is no time trend during data generation")
}
return(
list(
trend.function = trend.function,
timetrendornot = timetrendornot,
trend_add_or_multip = trend_add_or_multip,
trend.effect = trend.effect
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Simulation_Timetrendfunctiongeneration.R
|
#' @title modelinf.fun
#' @description This function summarize the input parameters describing the model for analysis and transfer them into a list
#' @param model The statistical model. ibb: betabinomial model / tlr: logistic model
#' @param ibb.inf The list of information for betabinomial model including:
#' betabinomialmodel: The betabinomial model, pi.star: prior response rate,
#' pess: prior effective sample size
#' @param tlr.inf The list of information for logistic model including:
#' The mean (mu), variance (sigma), degree of freedom (df) of the intercept
#' and the main effect of the linear terms in logistic model.
#' reg.inf: The type of linear function in logistic model.
#' variable.inf: Fixeffect/Mixeffect. Indicating whether a mix effect model
#' is used (for time trend effect modelling)
#'
#' @return A list of model information including model, ibb.inf and tlr.inf
#' @export
#'
#' @examples
#' modelinf.fun(model = "tlr",
#' ibb.inf = list(pi.star = 0.5,
#' pess = 2,
#' betabinomialmodel = ibetabinomial.post),
#' tlr.inf = list(beta0_prior_mu = 0,
#' beta1_prior_mu = 0,
#' beta0_prior_sigma = 2.5,
#' beta1_prior_sigma = 2.5,
#' beta0_df = 7,
#' beta1_df = 7,
#' reg.inf = "main",
#' variable.inf = "Fixeffect"
#' ))
#' @author Ziyan Wang
modelinf.fun <- function(model = "tlr",
ibb.inf = list(pi.star = 0.5,
pess = 2,
betabinomialmodel = ibetabinomial.post),
tlr.inf = list(
beta0_prior_mu = 0,
beta1_prior_mu = 0,
beta0_prior_sigma = 2.5,
beta1_prior_sigma = 2.5,
beta0_df = 7,
beta1_df = 7,
reg.inf = "main",
variable.inf = "Fixeffect"
)) {
return(list(model,
ibb.inf = ibb.inf,
tlr.inf = tlr.inf))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Transferdatatolist_Modelinf.R
|
#' @title Stopboundinf
#' @description This function summaries and checks stopping boundary information.
#' @param Stop.type The type of stopping boundary should be "Early-Pocock", "Early-OBF" and "Noearly". Default is "Early-Pocock" which is the Pocock boundary with early stopping.
#'
#' @param Boundary.type Whether the futility boundary and the efficacy boundary are the same conservative.
#' Default is "Symmetric" which means they are as conservative as each other.
#' Boundary.type = "Asymmetric" means that the efficacy boundary and the futility boundary are not as conservative as each other
#' @param cutoff = c(cutoff1, cutoff2). A numerical vector of cutoff value for each boundary.
#' The first element is the efficacy boundary cutoff. The second element is the futility boundary cutoff
#' Pr(theta_1 > theta_0|D_n) > cutoff1. Should input the cutoff1 for efficacy boundary as the first element
#' Pr(theta_1 < theta_0|D_n) < cutoff2. Should input the cutoff2 for futility boundary as the first element
#'
#' @return The list of information required for boundary construction function 'Stopbound.inf'
#' @export
#'
#' @examples
#' Stop.type = "Early-Pocock" #(Pocock boundarty is a flat boundary across time)
#' Boundary.type = "Symmetric"
#' cutoff = c(0.9928, 0.0072)
#'
#' Stopbound.inf = Stopboundinf(Stop.type, Boundary.type, cutoff)
#' #Stopbound.inf
#' #$Stop.type
#' # [1] "Early-Pocock"
#' #$Boundary.type
#' #[1] "Symmetric"
#' #$cutoff
#' # [1] 0.9928 0.0072
#' @author Ziyan Wang
Stopboundinf = function(Stop.type="Early-Pocock", Boundary.type="Symmetric", cutoff=c(0.9928, 0.0072)) {
if (Boundary.type == "Symmetric") {
if (Stop.type == "Early-OBF" & cutoff[1] == cutoff[2]) {
cutofftemp = cutoff
}
else if (sum(Stop.type == c("Early-Pocock","Noearly")) & sum(cutoff) == 1) {
cutofftemp = cutoff
}
else{
stop("Error: The input of cutoff should be Symmetric")
}
}
else if (Boundary.type == "Asymmetric") {
if (sum(Stop.type == c("Early-Pocock","Noearly")) & sum(cutoff) != 1) {
cutofftemp = cutoff
}
else if (Stop.type == "Early-OBF" & cutoff[1] != cutoff[2]) {
cutofftemp = cutoff
}
else{
stop("Error: The input of cutoff should be Asymmetric")
}
}
else {
stop("Error: The boundary type is invalid. Should input Symmetric or Asymmetric.")
}
Stopbound.inf = list(
Stop.type = Stop.type,
Boundary.type = Boundary.type,
cutoff = cutofftemp
)
return(Stopbound.inf)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/Transferdatatolist_Stopboundinf.R
|
#' @title Trial simulation
#' @description This function simulates and does final analysis of a trial with one scenario.
#' The time cost of this function depend on the cpu cores of the user's cpmputer.
#' @param ntrials A numeric variable indicating how many trial replicates you want to run. Default is 5000.
#' @param trial.fun The function of trial simulation for more see \code{\link{simulatetrial}}
#' @param input.info A list of input information including all information required for trial simulation.
#' @param cl A numeric variable indicating how many cores you want to use in parallel programming.
#'
#' @return A list of output including the final output of each trial replicates called 'result'
#' The analysis result table of the specific trial called 'OPC' and the file name for saving these output on the computer
#' @importFrom foreach foreach
#' @importFrom matrixStats colVars
#' @import ggplot2
#' @import rstantools
#' @export
#'
#' @examples
#' set.seed(1)
#' \donttest{Trial.simulation(ntrials = 2, cl = 2)}
#' @author Ziyan Wang
Trial.simulation = function(ntrials = 5000,
trial.fun = simulatetrial,
input.info = list(
response.probs = c(0.4, 0.4),
ns = c(30, 60, 90, 120, 150),
max.ar = 0.75,
test.type = "Twoside",
rand.algo = "Urn",
max.deviation = 3,
model.inf = list(
model = "tlr",
ibb.inf = list(
pi.star = 0.5,
pess = 2,
betabinomialmodel = ibetabinomial.post
),
tlr.inf = list(
beta0_prior_mu = 0,
beta1_prior_mu = 0,
beta0_prior_sigma = 2.5,
beta1_prior_sigma = 2.5,
beta0_df = 7,
beta1_df = 7,
reg.inf = "main",
variable.inf = "Fixeffect"
)
),
Stopbound.inf = Stopboundinf(
Stop.type = "Early-Pocock",
Boundary.type = "Symmetric",
cutoff = c(0.99, 0.01)
),
Random.inf = list(
Fixratio = TRUE,
Fixratiocontrol = 1,
BARmethod = NA,
Thall.tuning.inf = NA,
Trippa.tuning.inf = NA
),
trend.inf = list(
trend.type = "step",
trend.effect = c(0, 0),
trend_add_or_multip = "mult"
)
),
cl = 2) {
old <- options()# code line i
on.exit(options(old))
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores(logical = FALSE))
registerDoParallel(cores = cl)
message("Start trial information initialisation")
#-Initialising evaluation metrics-
bias = {
}
TIE_POWER = {
}
rMSE = {
}
totalsample = {
}
totaleachN = {
}
totaleachS = {
}
result = {
}
message("Trial information initialisation done")
#-----------------------------------------
message("Start trial simulation")
result = foreach(icount(ntrials)) %dopar% trial.fun(
response.probs = input.info$response.probs,
test.type = input.info$test.type,
ns = input.info$ns,
max.ar = input.info$max.ar,
rand.algo = input.info$rand.algo,
max.deviation = input.info$max.deviation,
model.inf = input.info$model.inf,
Stopbound.inf = input.info$Stopbound.inf,
Random.inf = input.info$Random.inf,
trend.inf = input.info$trend.inf
)
message("Trial simulation done")
#Save output data
FWER = conjuncativepower_or_FWER(result,input.info$response.probs,test.type = input.info$test.type)
TIE_POWER = rbind(TIE_POWER, FWER)
meanres = Meanfunc(result) - (logit(input.info$response.probs[-1]) - logit(input.info$response.probs[1]))
# varres=Varfunc(result)
# bias=rbind(bias,paste0(meanres," (",varres,")"))
bias = rbind(bias, meanres)
var = varfunc(result)
rmse = sqrt(meanres ^ 2 + var)
rMSE = rbind(rMSE, rmse)
Nperarm = Nfunc(result)
totaleachN = rbind(totaleachN, Nperarm)
Sperarm = Sperarmfunc(result)
totaleachS = rbind(totaleachS, Sperarm)
totalN = sum(Nperarm)
totalsample = rbind(totalsample, totalN)
OPC = data.frame(
"Type I Error or Power" = TIE_POWER,
Bias = bias,
rMSE = rMSE,
"N per arm" = totaleachN,
"Survive per arm" = totaleachS,
N = totalsample
)
Nameofsaveddata = Save.resulttoRDatafile(input.info)
result = list(result)
names(result) = paste0(
paste0(paste(
as.character(
stringr::str_replace_all(input.info$response.probs, "[^[:alnum:]]", "")
)
), collapse = ""),
"TimeTrend",
paste0(paste(
as.character(
stringr::str_replace_all(input.info$trend.inf$trend.effect, "[^[:alnum:]]", "")
), collapse = ""
)),
"stage",
length(input.info$ns),
input.info$model.inf$tlr.inf$reg.inf
)
rownames(OPC) =
paste0(
paste0(paste(
as.character(
stringr::str_replace_all(input.info$response.probs, "[^[:alnum:]]", "")
)
), collapse = ""),
"TimeTrend",
paste0(paste(
as.character(
stringr::str_replace_all(input.info$trend.inf$trend.effect, "[^[:alnum:]]", "")
), collapse = ""
)),
"stage",
length(input.info$ns),
input.info$model.inf$tlr.inf$reg.inf
)
doParallel::stopImplicitCluster()
return(list(
result = result,
OPC = OPC,
Nameofsaveddata = Nameofsaveddata
))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/TrialSimulation.R
|
#' @title Operation characteristic table for alternative scenario
#' @description Operation characteristic table for alternative scenario using main + continuousstage model. The time trend pattern is step.
#' The strength of time trend is 0.1 equally for all arm. The effect of time trend on true response probability is multiplicative.
#' @format A data frame with 3 rows and 16 variables:
#' \describe{
#' \item{\code{Type.I.Error.or.Power}}{Power}
#' \item{\code{Bias.1}}{Treatment effect bias for treatment 1}
#' \item{\code{Bias.2}}{Treatment effect bias for treatment 2}
#' \item{\code{Bias.3}}{Treatment effect bias for treatment 3}
#' \item{\code{rMSE.1}}{Rooted mean squared error for treatment 1}
#' \item{\code{rMSE.2}}{Rooted mean squared error for treatment 2}
#' \item{\code{rMSE.3}}{Rooted mean squared error for treatment 3}
#' \item{\code{N.per.arm.1}}{Mean total number of patient allocated to control}
#' \item{\code{N.per.arm.2}}{Mean total number of patient allocated to treatment 1}
#' \item{\code{N.per.arm.3}}{Mean total number of patient allocated to treatment 2}
#' \item{\code{N.per.arm.4}}{Mean total number of patient allocated to treatment 3}
#' \item{\code{Survive.per.arm.1}}{Mean total number of patient allocated to control}
#' \item{\code{Survive.per.arm.2}}{Mean total number of patient survived when using treatment 1}
#' \item{\code{Survive.per.arm.3}}{Mean total number of patient survived when usin treatment 2}
#' \item{\code{Survive.per.arm.4}}{Mean total number of patient survived when usin treatment 3}
#' \item{\code{N}}{Mean total number of patient in a trial}
#'}
"OPC_alt"
#' @title Operation characteristic table for null scenario
#' @description Operation characteristic table for null scenario using main and main + continuousstage model. The main effect model was run for a null scenario with and without time trend.
#' The time trend pattern is step. The strength of time trend is 0.1 equally for all arm. The effect of time trend on true response probability is multiplicative.
#' @format A data frame with 3 rows and 16 variables:
#' \describe{
#' \item{\code{Type.I.Error.or.Power}}{Family wise error rate}
#' \item{\code{Bias.1}}{Treatment effect bias for treatment 1}
#' \item{\code{Bias.2}}{Treatment effect bias for treatment 2}
#' \item{\code{Bias.3}}{Treatment effect bias for treatment 3}
#' \item{\code{rMSE.1}}{Rooted mean squared error for treatment 1}
#' \item{\code{rMSE.2}}{Rooted mean squared error for treatment 2}
#' \item{\code{rMSE.3}}{Rooted mean squared error for treatment 3}
#' \item{\code{N.per.arm.1}}{Mean total number of patient allocated to control}
#' \item{\code{N.per.arm.2}}{Mean total number of patient allocated to treatment 1}
#' \item{\code{N.per.arm.3}}{Mean total number of patient allocated to treatment 2}
#' \item{\code{N.per.arm.4}}{Mean total number of patient allocated to treatment 3}
#' \item{\code{Survive.per.arm.1}}{Mean total number of patient allocated to control}
#' \item{\code{Survive.per.arm.2}}{Mean total number of patient survived when using treatment 1}
#' \item{\code{Survive.per.arm.3}}{Mean total number of patient survived when usin treatment 2}
#' \item{\code{Survive.per.arm.4}}{Mean total number of patient survived when usin treatment 3}
#' \item{\code{N}}{Mean total number of patient in a trial}
#'}
"OPC_null"
#' @title Cutoff screening example: the recommended grid value at each time point
#' @description he recommended grid value at each time point. There are 20 cutoff value explored
#' @format A data frame with 20 rows and 1 variables:
#' \describe{
#' \item{\code{recommandloginformd}}{The cutoff value at each time point}
#'}
"recommandloginformd"
#' @title Cutoff screening example: the predicted value from quadratic model
#' @description The predicted value from quadratic model for famliy wise error rate vs cutoff value plotting
#' @format A data frame with 1001 rows and 1 variables:
#' \describe{
#' \item{\code{predictedtpIEinformd}}{The predicted FWER value of a large grid}
#'}
"predictedtpIEinformd"
#' @title Cutoff screening example: the details of grid
#' @description Details of grid including famliy wise error rate of a cutoff value, the cutoff value and the square of cutoff value for modelling and prediction
#' @format A data frame with 24 rows and 3 variables:
#' \describe{
#' \item{\code{tpIE}}{FWER}
#' \item{\code{cutoff}}{Cutoff value}
#' \item{\code{cutoff2}}{Square of cutoff value}
#'}
"dataloginformd"
#' @title Operation characteristic table for Trial.simulation() null scenario
#' @description Operation characteristic table for null scenario using main model.
#' @format A data frame with 1 rows and 8 variables:
#' \describe{
#' \item{\code{Type.I.Error.or.Power}}{Power}
#' \item{\code{Bias}}{Treatment effect bias for treatment 1}
#' \item{\code{rMSE}}{Rooted mean squared error for treatment effect 1}
#' \item{\code{N.per.arm.1}}{Mean total number of patient allocated to control}
#' \item{\code{N.per.arm.2}}{Mean total number of patient allocated to treatment 1}
#' \item{\code{Survive.per.arm.1}}{Mean total number of patient allocated to control}
#' \item{\code{Survive.per.arm.2}}{Mean total number of patient survived when using treatment 1}
#' \item{\code{N}}{Mean total number of patient in a trial}
#'}
"OPC_Trial.simulation"
#' @title A list of data from Gaussian process for symmetric cutoff screening.
#' @description A list of data from Gaussian process for symmetric cutoff screening.
#' @format A list with two element:
#' \describe{
#' \item{\code{next.cutoff}}{The cutoff value for the next evaluation}
#' \item{\code{prediction}}{A list of values from Gaussian process model}
#' \item{\code{tpIE}}{A vector of type I error rate data}
#' \item{\code{cutoff}}{A vector of cutoff data}
#'}
"optimdata_sym"
#' @title A list of data from Gaussian process and trial simulation for asymmetric cutoff screening.
#' @description A list of data from Gaussian process and trial simulation for asymmetric cutoff screening.
#' @format A list with four element:
#' \describe{
#' \item{\code{next.cutoff}}{The cutoff value for the next evaluation}
#' \item{\code{prediction}}{A list of values from Gaussian process model}
#' \item{\code{ESS}}{A two column twenty five rows matrix with the effective sample size for each cutoff pair under both null (first column) and alternative (second column) scenario}
#' \item{\code{testeddata}}{A data frame containing each tested cutoff pair (column two and three for efficacy and futility, respectively), their FWER under null (column one) and conjunctive power under alternative (column four)}
#'}
"optimdata_asy"
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/datafile_document.R
|
#' @title ibetabinomial.post
#' @description This function calculates the posterior probability of each active treatment arm better than control using betabinomial model
#'
#' @param n A vector of treated patients for each arm (The first element is for control)
#' @param y A vector of treated patient outcomes for each arm (The first element is for control)
#' @param pi.star The prior response probability. The default is 0.5
#' @param pess The effective sample size of beta prior. The default is 2
#'
#' @return A vector posterior probability of each active treatment arm better than control
#' @importFrom stats dbeta
#' @importFrom stats integrate
#' @importFrom stats lm
#' @importFrom stats pbeta
#' @export
#'
#' @examples
#' n <- c(20,20,20,20)
#' y <- c(12,12,12,6)
#' ibetabinomial.post(n, y, pi.star = 0.5, pess = 2)
#' #[1] 0.5000000 0.5000000 0.0308018
#' @author Ziyan Wang
ibetabinomial.post = function(n, y, pi.star = 0.5, pess = 2) {
#First element of n and y are from control
K = length(n)
# Posterior Probability of each arm better than the same control arm
# See https://www.evanmiller.org/bayesian-ab-testing.html#cite1
# Treatment_k~beta(a_k,b_k), Control~beta(a_1,b_1)
# p.prior*ess.prior: prior success
# (1-p.prior)*ess.prior: prior failure
post.prob = {
}
for (k in 2:K) {
post.prob[k - 1] <- unlist(integrate(
function(x)
pbeta(x, y[k] + pi.star * pess, (n[k] - y[k]) + (1 - pi.star) * pess, lower.tail =
FALSE) * dbeta(x, y[1] + pi.star * pess, (n[1] - y[1]) + (1 - pi.star) *
pess),
lower = 0,
upper = 1
))$value
names(post.prob[k - 1]) = paste("Treatment", k - 1, "vs", "Control", sep = " ")
}
#Slower version
# # p.prior*ess.prior: prior success
# # (1-p.prior)*ess.prior: prior failure
# narm=length(n)
# rn=matrix(rbeta(random.number*narm,
# y+p.prior*ess.prior,
# n-y+(1-p.prior)*ess.prior),
# random.number, byrow = TRUE)
#
# controlrn=rn[,1]
# treatmentrn=as.matrix(rn[,-1])
#
# # rnT=rbeta(random.number,y[1]+p.prior*ess.prior,n[1]-y[1]+(1-p.prior)*ess.prior)
# # rnC=rbeta(random.number,y[2]+p.prior*ess.prior,n[2]-y[2]+(1-p.prior)*ess.prior)
# # postprob<-mean(rnT>rnC)
#
# post.prob=colMeans(treatmentrn>controlrn)
return(post.prob)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/ibetabinomialpost.R
|
# Generated by rstantools. Do not edit by hand.
# names of stan models
stanmodels <- c("betabinom", "logisticdummy", "randomeffect")
# load each stan module
Rcpp::loadModule("stan_fit4betabinom_mod", what = TRUE)
Rcpp::loadModule("stan_fit4logisticdummy_mod", what = TRUE)
Rcpp::loadModule("stan_fit4randomeffect_mod", what = TRUE)
# instantiate each stanmodel object
stanmodels <- sapply(stanmodels, function(model_name) {
# create C++ code for stan model
stan_file <- if(dir.exists("stan")) "stan" else file.path("inst", "stan")
stan_file <- file.path(stan_file, paste0(model_name, ".stan"))
stanfit <- rstan::stanc_builder(stan_file,
allow_undefined = TRUE,
obfuscate_model_name = FALSE)
stanfit$model_cpp <- list(model_cppname = stanfit$model_name,
model_cppcode = stanfit$cppcode)
# create stanmodel object
methods::new(Class = "stanmodel",
model_name = stanfit$model_name,
model_code = stanfit$model_code,
model_cpp = stanfit$model_cpp,
mk_cppmodule = function(x) get(paste0("rstantools_model_", model_name)))
})
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/R/stanmodels.R
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(BayesianPlatformDesignTimeTrend)
## ----eval=FALSE---------------------------------------------------------------
#
# ntrials = 1000 # Number of trial replicates
# ns = seq(120, 600, 60) # Sequence of total number of accrued patients at each interim analysis
# null.reponse.prob = 0.4
# alt.response.prob = 0.6
#
# # We investigate the type I error rate for different time trend strength
# null.scenario = matrix(
# c(
# null.reponse.prob,
# null.reponse.prob,
# null.reponse.prob,
# null.reponse.prob
# ),
# nrow = 1,
# ncol = 4,
# byrow = T
# )
# alt.scenario = matrix(
# c(
# null.reponse.prob,
# alt.response.prob,
# alt.response.prob,
# null.reponse.prob
# ),
# nrow = 1,
# ncol = 4,
# byrow = T
# )
# model = "tlr" #logistic model
# max.ar = 0.85 #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
# #------------Select the data generation randomisation methods-------
# rand.type = "Urn" # Urn design
# max.deviation = 3 # The recommended value for the tuning parameter in the Urn design
#
# # Require multiple cores for parallel running on HPC (Here is the number of cores i ask on Iridis 5 in University of Southampton)
# cl = 40
#
# # Set the model we want to use and the time trend effect for each model used.
# # Here the main model will be used twice for two different strength of time trend c(0,0,0,0) and c(1,1,1,1) to investigate how time trend affect the evaluation metrics in BAR setting.
# # Then the main + stage_continuous model which is the treatment effect + stage effect model will be applied for strength equal c(1,1,1,1) to investigate how the main + stage effect model improve the evaluation metrics.
# reg.inf = "main"
# trend.effect = c(0,0,0,0)
#
# result = {
#
# }
# OPC = {
#
# }
# K = dim(null.scenario)[2]
# cutoffindex = 1
# trendindex = 1
#
# cutoff.information=demo_Cutoffscreening.GP (
# ntrials = ntrials,
# # Number of trial replicates
# trial.fun = simulatetrial,
# # Call the main function
# power.type = "Conjunctive",
# response.probs.alt = alt.scenario,
# grid.inf = list(
# start.length = 15,
# grid.min = NULL,
# grid.max = NULL,
# confidence.level = 0.95,
# grid.length = 101,
# change.scale = FALSE,
# noise = T,
# errorrate = 0.1,
# simulationerror = 0.01,
# iter.max = 15,
# plotornot = FALSE),
# # Set up the cutoff grid for screening. The start grid has three elements. The extended grid has fifteen cutoff value under investigation
# input.info = list(
# response.probs = null.scenario[1,],
# #The scenario vector in this round
# ns = ns,
# # Sequence of total number of accrued patients at each interim analysis
# max.ar = max.ar,
# #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
# rand.type = rand.type,
# # Which randomisation methods in data generation.
# max.deviation = max.deviation,
# # The recommended value for the tuning parameter in the Urn design
# model.inf = list(
# model = model,
# #Use which model?
# ibb.inf = list(
# #independent beta-binomial model which can be used only for no time trend simulation
# pi.star = 0.5,
# # beta prior mean
# pess = 2,
# # beta prior effective sample size
# betabinomialmodel = ibetabinomial.post # beta-binomial model for posterior estimation
# ),
# tlr.inf = list(
# beta0_prior_mu = 0,
# # Stan logistic model t prior location
# beta1_prior_mu = 0,
# # Stan logistic model t prior location
# beta0_prior_sigma = 2.5,
# # Stan logistic model t prior sigma
# beta1_prior_sigma = 2.5,
# # Stan logistic model t prior sigma
# beta0_df = 7,
# # Stan logistic model t prior degree of freedom
# beta1_df = 7,
# # Stan logistic model t prior degree of freedom
# reg.inf = reg.inf,
# # The model we want to use
# variable.inf = "Fixeffect" # Use fix effect logistic model
# )
# ),
# Stop.type = "Early-Pocock",
# # Use Pocock like early stopping boundary
# Boundary.type = "Asymmetric",
# # Use Symmetric boundary where cutoff value for efficacy boundary and futility boundary sum up to 1
# Random.inf = list(
# Fixratio = FALSE,
# # Do not use fix ratio allocation
# Fixratiocontrol = NA,
# # Do not use fix ratio allocation
# BARmethod = "Thall",
# # Use Thall's Bayesian adaptive randomisation approach
# Thall.tuning.inf = list(tuningparameter = "Unfixed", fixvalue = 1) # Specified the tunning parameter value for fixed tuning parameter
# ),
# trend.inf = list(
# trend.type = "linear",
# # Linear time trend pattern
# trend.effect = trend.effect,
# # Stength of time trend effect
# trend_add_or_multip = "mult" # Multiplicative time trend effect on response probability
# )
# ),
# cl = 2
# )
#
## -----------------------------------------------------------------------------
library(ggplot2)
# Details of grid
optimdata=optimdata_asy
# Recommend cutoff at each screening round
nextcutoff = optimdata$next.cutoff
nextcutoff$FWER=0.05
nextcutoff.predict = nextcutoff
colnames(nextcutoff.predict)=c("eff","fut","FWER")
prediction = optimdata$prediction
point.tested=optimdata$testeddata[,2:3]
tpIE=optimdata$testeddata[,1]
pow=optimdata$testeddata[,4]
point.tested=point.tested[1:sum(!is.na(tpIE)),]
tpIE=tpIE[1:sum(!is.na(tpIE))]
pow=pow[1:sum(!is.na(pow))]
cleandata=data.frame(FWER=tpIE,pow=pow,point.tested)
colnames(cleandata)[c(3,4)]=c("eff","fut")
GP.res = optimdata
xgrid.eff=optimdata$prediction$xgrid[,1]
xgrid.fut=optimdata$prediction$xgrid[,2]
grid.min=c(0.95,0)
grid.max=c(1,0.05)
library(grDevices)
library(RColorBrewer)
colormap=colorRampPalette(rev(brewer.pal(11,'Spectral')))(32)
target_line=0.1
df=data.frame(FWER=optimdata$prediction$yhat.t1E,eff=xgrid.eff,fut=xgrid.fut)
Contour.tIE<-ggplot(df,aes(eff,fut,z=FWER))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=FWER))+
geom_contour(breaks=c(target_line, seq(min(df$FWER),max(df$FWER),by=(max(df$FWER)-min(df$FWER))/10)),color="black")+
geom_contour(breaks=target_line,color="white",linewidth=1.1)+
labs(title="Mean type I error rate (FWER)", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.tIE=Contour.tIE+geom_point(data=cleandata,aes(eff,fut),color="black")+geom_point(data=nextcutoff.predict,aes(eff,fut),color="pink")
# Extract the contour data
contour_data_tIE <- ggplot_build(Contour.tIE)$data[[2]]
# Record the contour that has FWER equal to the target
contour_data_tIE_subset <- contour_data_tIE[contour_data_tIE$level == target_line, ]
# Order and split the data to ensure the plot is drawn correctly
contour_data_tIE_subset=contour_data_tIE_subset[order(contour_data_tIE_subset$piece,contour_data_tIE_subset$x),]
contour_data_tIE_subset_1=contour_data_tIE_subset[contour_data_tIE_subset$piece==1,]
contour_data_tIE_subset_2=contour_data_tIE_subset[contour_data_tIE_subset$piece==2,]
# To make sure the data frame is not empty
if (nrow(contour_data_tIE_subset_1) == 0){
contour_data_tIE_subset_1[1,]=(rep(NA,dim(contour_data_tIE_subset_1)[[2]]))
} else if (nrow(contour_data_tIE_subset_2) == 0){
contour_data_tIE_subset_2[1,]=(rep(NA,dim(contour_data_tIE_subset_2)[[2]]))
}
df=data.frame(sd=optimdata$prediction$sd.t1E,eff=xgrid.eff,fut=xgrid.fut)
Contour.sd<-ggplot(df,aes(eff,fut,z=sd))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=sd))+
geom_contour(breaks=seq(min(df$sd),max(df$sd),by=(max(df$sd)-min(df$sd))/10),color="black")+labs(title="sd of contour", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.sd=Contour.sd+
geom_path(data = contour_data_tIE_subset_1, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_path(data = contour_data_tIE_subset_2, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_point(data=cleandata,aes(eff,fut,z=NA),color="black")+geom_point(data=nextcutoff.predict,aes(eff,fut,z=NA),color="pink")
df=data.frame(Power=optimdata$prediction$yhat.pow,eff=xgrid.eff,fut=xgrid.fut)
Contour.pow<-ggplot(df,aes(eff,fut,z=Power))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=Power))+
geom_contour(breaks=seq(min(df$Power),max(df$Power),by=(max(df$Power)-min(df$Power))/10),color="black")+labs(title="Mean power", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.pow=Contour.pow+
geom_path(data = contour_data_tIE_subset_1, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_path(data = contour_data_tIE_subset_2, aes(x,y,z=NA),color="white",linewidth=1.1)+
geom_point(data=cleandata,aes(eff,fut,z=NA),color="black")+geom_point(data=nextcutoff.predict,aes(eff,fut,z=NA),color="pink")
df=data.frame(NullESS=optimdata$prediction$yhat.ESS.null,eff=xgrid.eff,fut=xgrid.fut)
Contour.nullESS<-ggplot(df,aes(eff,fut,z=NullESS))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=NullESS))+
geom_contour(breaks=seq(min(df$NullESS),max(df$NullESS),by=(max(df$NullESS)-min(df$NullESS))/10),color="black")+labs(title="Mean ESS under null", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.nullESS=Contour.nullESS+
geom_path(data = contour_data_tIE_subset_1, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_path(data = contour_data_tIE_subset_2, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_point(data=cleandata,aes(eff,fut,z=NA),color="black")+geom_point(data=nextcutoff.predict,aes(eff,fut,z=NA),color="pink")
df=data.frame(AltESS=optimdata$prediction$yhat.ESS.alt,eff=xgrid.eff,fut=xgrid.fut)
Contour.altESS<-ggplot(df,aes(eff,fut,z=AltESS))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=AltESS))+
geom_contour(breaks=seq(min(df$AltESS),max(df$AltESS),by=(max(df$AltESS)-min(df$AltESS))/10),color="black")+labs(title="Mean ESS under alternative", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.altESS=Contour.altESS+
geom_path(data = contour_data_tIE_subset_1, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_path(data = contour_data_tIE_subset_2, aes(x,y,z=NA),color="white",linewidth=1.1)+
geom_point(data=cleandata,aes(eff,fut,z=NA),color="black")+geom_point(data=nextcutoff.predict,aes(eff,fut,z=NA),color="pink")
## ----fig.align='center',fig.height=9,fig.width=7,warning=FALSE----------------
library(ggpubr)
ggarrange(Contour.tIE,Contour.pow,Contour.nullESS,Contour.altESS,Contour.sd,ncol = 2,nrow=3)
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/inst/doc/MAMS-CutoffScreening-GP-Asymmetric-tutorial.R
|
---
title: "MAMS-CutoffScreening-GP-Asymmetric-tutorial"
output: rmarkdown::html_vignette
author: "Ziyan Wang"
vignette: >
%\VignetteIndexEntry{MAMS-CutoffScreening-GP-Asymmetric-tutorial}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(BayesianPlatformDesignTimeTrend)
```
## Four arm trial cutoff screening
The 'BayesianPlatformDesignTimeTrend' package simulation process requires stopping boundary cutoff screening first (details refers to demo \code{\link{demo_Cutoffscreening_GP}} and MAMS-CutoffScreening-GP-tutorial). After the cutoff screening process, we need to record the cutoff value of both efficacy and futility boundary for use in the trial simulation process. The data of each trail replicates will be created sequentially during the simulation.
In this tutorial, the cutoff screening process for asymmetric boundary will be presented. The example is a four-arm MAMS trial with one control and three treatment arms. The control arm will not be stopped during the trial. The time trend pattern are set to be 'linear'. The way of time trend impacting the beginning response probability is set to be 'mult' (multiplicative). The time trend strength is set to be zero, which means that there is no time trend effect in this example. The randomisation method used is the unfixed Thall's approach. The early stop boundary is the Asymmetric Pocock boundary. The model used in this example are fixed effect model (the model with only treatment effect and the model with both treatment effect and discrete stage effect). The evaluation metrics are error rate, mean treatment effect bias, rooted MSE, mean number of patients allocated to each arm and mean total number of patients in the trial. For asymmetric boundary screening, we can find a contour for FWER = 10\%. For each value on this contour, the (conjunctive, disconjunctive or marginal) power is optimized under the alternative scenario user specified. In this example, the alternative scenario is \pi_0 = \pi_3 = 0.4, \pi_1 = \pi_2 = 0.6. In this tutorial, we will recommend a cutoff value for each power definition. The contour plot will also be presented for interpretation.
```{r,eval=FALSE}
ntrials = 1000 # Number of trial replicates
ns = seq(120, 600, 60) # Sequence of total number of accrued patients at each interim analysis
null.reponse.prob = 0.4
alt.response.prob = 0.6
# We investigate the type I error rate for different time trend strength
null.scenario = matrix(
c(
null.reponse.prob,
null.reponse.prob,
null.reponse.prob,
null.reponse.prob
),
nrow = 1,
ncol = 4,
byrow = T
)
alt.scenario = matrix(
c(
null.reponse.prob,
alt.response.prob,
alt.response.prob,
null.reponse.prob
),
nrow = 1,
ncol = 4,
byrow = T
)
model = "tlr" #logistic model
max.ar = 0.85 #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
#------------Select the data generation randomisation methods-------
rand.type = "Urn" # Urn design
max.deviation = 3 # The recommended value for the tuning parameter in the Urn design
# Require multiple cores for parallel running on HPC (Here is the number of cores i ask on Iridis 5 in University of Southampton)
cl = 40
# Set the model we want to use and the time trend effect for each model used.
# Here the main model will be used twice for two different strength of time trend c(0,0,0,0) and c(1,1,1,1) to investigate how time trend affect the evaluation metrics in BAR setting.
# Then the main + stage_continuous model which is the treatment effect + stage effect model will be applied for strength equal c(1,1,1,1) to investigate how the main + stage effect model improve the evaluation metrics.
reg.inf = "main"
trend.effect = c(0,0,0,0)
result = {
}
OPC = {
}
K = dim(null.scenario)[2]
cutoffindex = 1
trendindex = 1
cutoff.information=demo_Cutoffscreening.GP (
ntrials = ntrials,
# Number of trial replicates
trial.fun = simulatetrial,
# Call the main function
power.type = "Conjunctive",
response.probs.alt = alt.scenario,
grid.inf = list(
start.length = 15,
grid.min = NULL,
grid.max = NULL,
confidence.level = 0.95,
grid.length = 101,
change.scale = FALSE,
noise = T,
errorrate = 0.1,
simulationerror = 0.01,
iter.max = 15,
plotornot = FALSE),
# Set up the cutoff grid for screening. The start grid has three elements. The extended grid has fifteen cutoff value under investigation
input.info = list(
response.probs = null.scenario[1,],
#The scenario vector in this round
ns = ns,
# Sequence of total number of accrued patients at each interim analysis
max.ar = max.ar,
#limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
rand.type = rand.type,
# Which randomisation methods in data generation.
max.deviation = max.deviation,
# The recommended value for the tuning parameter in the Urn design
model.inf = list(
model = model,
#Use which model?
ibb.inf = list(
#independent beta-binomial model which can be used only for no time trend simulation
pi.star = 0.5,
# beta prior mean
pess = 2,
# beta prior effective sample size
betabinomialmodel = ibetabinomial.post # beta-binomial model for posterior estimation
),
tlr.inf = list(
beta0_prior_mu = 0,
# Stan logistic model t prior location
beta1_prior_mu = 0,
# Stan logistic model t prior location
beta0_prior_sigma = 2.5,
# Stan logistic model t prior sigma
beta1_prior_sigma = 2.5,
# Stan logistic model t prior sigma
beta0_df = 7,
# Stan logistic model t prior degree of freedom
beta1_df = 7,
# Stan logistic model t prior degree of freedom
reg.inf = reg.inf,
# The model we want to use
variable.inf = "Fixeffect" # Use fix effect logistic model
)
),
Stop.type = "Early-Pocock",
# Use Pocock like early stopping boundary
Boundary.type = "Asymmetric",
# Use Symmetric boundary where cutoff value for efficacy boundary and futility boundary sum up to 1
Random.inf = list(
Fixratio = FALSE,
# Do not use fix ratio allocation
Fixratiocontrol = NA,
# Do not use fix ratio allocation
BARmethod = "Thall",
# Use Thall's Bayesian adaptive randomisation approach
Thall.tuning.inf = list(tuningparameter = "Unfixed", fixvalue = 1) # Specified the tunning parameter value for fixed tuning parameter
),
trend.inf = list(
trend.type = "linear",
# Linear time trend pattern
trend.effect = trend.effect,
# Stength of time trend effect
trend_add_or_multip = "mult" # Multiplicative time trend effect on response probability
)
),
cl = 2
)
```
Summary of the output data from cutoff screening example
```{r}
library(ggplot2)
# Details of grid
optimdata=optimdata_asy
# Recommend cutoff at each screening round
nextcutoff = optimdata$next.cutoff
nextcutoff$FWER=0.05
nextcutoff.predict = nextcutoff
colnames(nextcutoff.predict)=c("eff","fut","FWER")
prediction = optimdata$prediction
point.tested=optimdata$testeddata[,2:3]
tpIE=optimdata$testeddata[,1]
pow=optimdata$testeddata[,4]
point.tested=point.tested[1:sum(!is.na(tpIE)),]
tpIE=tpIE[1:sum(!is.na(tpIE))]
pow=pow[1:sum(!is.na(pow))]
cleandata=data.frame(FWER=tpIE,pow=pow,point.tested)
colnames(cleandata)[c(3,4)]=c("eff","fut")
GP.res = optimdata
xgrid.eff=optimdata$prediction$xgrid[,1]
xgrid.fut=optimdata$prediction$xgrid[,2]
grid.min=c(0.95,0)
grid.max=c(1,0.05)
library(grDevices)
library(RColorBrewer)
colormap=colorRampPalette(rev(brewer.pal(11,'Spectral')))(32)
target_line=0.1
df=data.frame(FWER=optimdata$prediction$yhat.t1E,eff=xgrid.eff,fut=xgrid.fut)
Contour.tIE<-ggplot(df,aes(eff,fut,z=FWER))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=FWER))+
geom_contour(breaks=c(target_line, seq(min(df$FWER),max(df$FWER),by=(max(df$FWER)-min(df$FWER))/10)),color="black")+
geom_contour(breaks=target_line,color="white",linewidth=1.1)+
labs(title="Mean type I error rate (FWER)", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.tIE=Contour.tIE+geom_point(data=cleandata,aes(eff,fut),color="black")+geom_point(data=nextcutoff.predict,aes(eff,fut),color="pink")
# Extract the contour data
contour_data_tIE <- ggplot_build(Contour.tIE)$data[[2]]
# Record the contour that has FWER equal to the target
contour_data_tIE_subset <- contour_data_tIE[contour_data_tIE$level == target_line, ]
# Order and split the data to ensure the plot is drawn correctly
contour_data_tIE_subset=contour_data_tIE_subset[order(contour_data_tIE_subset$piece,contour_data_tIE_subset$x),]
contour_data_tIE_subset_1=contour_data_tIE_subset[contour_data_tIE_subset$piece==1,]
contour_data_tIE_subset_2=contour_data_tIE_subset[contour_data_tIE_subset$piece==2,]
# To make sure the data frame is not empty
if (nrow(contour_data_tIE_subset_1) == 0){
contour_data_tIE_subset_1[1,]=(rep(NA,dim(contour_data_tIE_subset_1)[[2]]))
} else if (nrow(contour_data_tIE_subset_2) == 0){
contour_data_tIE_subset_2[1,]=(rep(NA,dim(contour_data_tIE_subset_2)[[2]]))
}
df=data.frame(sd=optimdata$prediction$sd.t1E,eff=xgrid.eff,fut=xgrid.fut)
Contour.sd<-ggplot(df,aes(eff,fut,z=sd))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=sd))+
geom_contour(breaks=seq(min(df$sd),max(df$sd),by=(max(df$sd)-min(df$sd))/10),color="black")+labs(title="sd of contour", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.sd=Contour.sd+
geom_path(data = contour_data_tIE_subset_1, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_path(data = contour_data_tIE_subset_2, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_point(data=cleandata,aes(eff,fut,z=NA),color="black")+geom_point(data=nextcutoff.predict,aes(eff,fut,z=NA),color="pink")
df=data.frame(Power=optimdata$prediction$yhat.pow,eff=xgrid.eff,fut=xgrid.fut)
Contour.pow<-ggplot(df,aes(eff,fut,z=Power))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=Power))+
geom_contour(breaks=seq(min(df$Power),max(df$Power),by=(max(df$Power)-min(df$Power))/10),color="black")+labs(title="Mean power", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.pow=Contour.pow+
geom_path(data = contour_data_tIE_subset_1, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_path(data = contour_data_tIE_subset_2, aes(x,y,z=NA),color="white",linewidth=1.1)+
geom_point(data=cleandata,aes(eff,fut,z=NA),color="black")+geom_point(data=nextcutoff.predict,aes(eff,fut,z=NA),color="pink")
df=data.frame(NullESS=optimdata$prediction$yhat.ESS.null,eff=xgrid.eff,fut=xgrid.fut)
Contour.nullESS<-ggplot(df,aes(eff,fut,z=NullESS))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=NullESS))+
geom_contour(breaks=seq(min(df$NullESS),max(df$NullESS),by=(max(df$NullESS)-min(df$NullESS))/10),color="black")+labs(title="Mean ESS under null", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.nullESS=Contour.nullESS+
geom_path(data = contour_data_tIE_subset_1, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_path(data = contour_data_tIE_subset_2, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_point(data=cleandata,aes(eff,fut,z=NA),color="black")+geom_point(data=nextcutoff.predict,aes(eff,fut,z=NA),color="pink")
df=data.frame(AltESS=optimdata$prediction$yhat.ESS.alt,eff=xgrid.eff,fut=xgrid.fut)
Contour.altESS<-ggplot(df,aes(eff,fut,z=AltESS))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=AltESS))+
geom_contour(breaks=seq(min(df$AltESS),max(df$AltESS),by=(max(df$AltESS)-min(df$AltESS))/10),color="black")+labs(title="Mean ESS under alternative", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.altESS=Contour.altESS+
geom_path(data = contour_data_tIE_subset_1, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_path(data = contour_data_tIE_subset_2, aes(x,y,z=NA),color="white",linewidth=1.1)+
geom_point(data=cleandata,aes(eff,fut,z=NA),color="black")+geom_point(data=nextcutoff.predict,aes(eff,fut,z=NA),color="pink")
```
```{r,fig.align='center',fig.height=9,fig.width=7,warning=FALSE}
library(ggpubr)
ggarrange(Contour.tIE,Contour.pow,Contour.nullESS,Contour.altESS,Contour.sd,ncol = 2,nrow=3)
```
Red indicates higher value, and purple indicates lower value. The black solid point is the tested cutoff pairs. The white line is the contour for FWER equal to 0.1. The pink point is the next cutoff recommended where power is optimised. As we can see, the pink point control the FWER to 0.1, maximise the power, minimise the Effective sample size (ESS) under the alternative scenario. The ESS under null has a close contour direction to FWER plot which means that same FWER leads to similar ESS under the trial setting (maximum acceptable sample size is picked before simulation due to budget).
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/inst/doc/MAMS-CutoffScreening-GP-Asymmetric-tutorial.Rmd
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(BayesianPlatformDesignTimeTrend)
## ----eval=FALSE---------------------------------------------------------------
#
# ntrials = 1000 # Number of trial replicates
# ns = seq(120, 600, 60) # Sequence of total number of accrued patients at each interim analysis
# null.reponse.prob = 0.15
# alt.response.prob = 0.35
#
# # We investigate the type I error rate for different time trend strength
# null.scenario = matrix(
# c(
# null.reponse.prob,
# null.reponse.prob,
# null.reponse.prob,
# null.reponse.prob
# ),
# nrow = 1,
# ncol = 4,
# byrow = T
# )
# # alt.scenario = matrix(c(null.reponse.prob,null.reponse.prob,null.reponse.prob,null.reponse.prob,
# # null.reponse.prob,alt.response.prob,null.reponse.prob,null.reponse.prob,
# # null.reponse.prob,alt.response.prob,alt.response.prob,null.reponse.prob,
# # null.reponse.prob,alt.response.prob,alt.response.prob,alt.response.prob), nrow=3, ncol = 4,byrow=T)
# model = "tlr" #logistic model
# max.ar = 0.85 #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
# #------------Select the data generation randomisation methods-------
# rand.type = "Urn" # Urn design
# max.deviation = 3 # The recommended value for the tuning parameter in the Urn design
#
# # Require multiple cores for parallel running
# cl = 2
#
# # Set the model we want to use and the time trend effect for each model used.
# # Here the main model will be used twice for two different strength of time trend c(0,0,0,0) and c(1,1,1,1) to investigate how time trend affect the evaluation metrics in BAR setting.
# # Then the main + stage_continuous model which is the treatment effect + stage effect model will be applied for strength equal c(1,1,1,1) to investigate how the main + stage effect model improve the evaluation metrics.
# reg.inf = "main"
# trend.effect = c(0,0,0,0)
#
# result = {
#
# }
# OPC = {
#
# }
# K = dim(null.scenario)[2]
# cutoffindex = 1
# trendindex = 1
#
# cutoff.information=demo_Cutoffscreening.GP (
# ntrials = ntrials,
# # Number of trial replicates
# trial.fun = simulatetrial,
# # Call the main function
# grid.inf = list(
# start.length = 10,
# grid.min = NULL,
# grid.max = NULL,
# confidence.level = 0.95,
# grid.length = 5000,
# change.scale = FALSE,
# noise = T,
# errorrate = 0.1,
# simulationerror = 0.01,
# iter.max = 15,
# plotornot = FALSE),
# # Set up the cutoff grid for screening. The start grid has three elements. The extended grid has fifteen cutoff value under investigation
# input.info = list(
# response.probs = null.scenario[1,],
# #The scenario vector in this round
# ns = ns,
# # Sequence of total number of accrued patients at each interim analysis
# max.ar = max.ar,
# #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
# rand.type = rand.type,
# # Which randomisation methods in data generation.
# max.deviation = max.deviation,
# # The recommended value for the tuning parameter in the Urn design
# model.inf = list(
# model = model,
# #Use which model?
# ibb.inf = list(
# #independent beta-binomial model which can be used only for no time trend simulation
# pi.star = 0.5,
# # beta prior mean
# pess = 2,
# # beta prior effective sample size
# betabinomialmodel = ibetabinomial.post # beta-binomial model for posterior estimation
# ),
# tlr.inf = list(
# beta0_prior_mu = 0,
# # Stan logistic model t prior location
# beta1_prior_mu = 0,
# # Stan logistic model t prior location
# beta0_prior_sigma = 2.5,
# # Stan logistic model t prior sigma
# beta1_prior_sigma = 2.5,
# # Stan logistic model t prior sigma
# beta0_df = 7,
# # Stan logistic model t prior degree of freedom
# beta1_df = 7,
# # Stan logistic model t prior degree of freedom
# reg.inf = reg.inf,
# # The model we want to use
# variable.inf = "Fixeffect" # Use fix effect logistic model
# )
# ),
# Stop.type = "Early-OBF",
# # Use Pocock like early stopping boundary
# Boundary.type = "Symmetric",
# # Use Symmetric boundary where cutoff value for efficacy boundary and futility boundary sum up to 1
# Random.inf = list(
# Fixratio = FALSE,
# # Do not use fix ratio allocation
# Fixratiocontrol = NA,
# # Do not use fix ratio allocation
# BARmethod = "Thall",
# # Use Thall's Bayesian adaptive randomisation approach
# Thall.tuning.inf = list(tuningparameter = "Unfixed", fixvalue = 1) # Specified the tunning parameter value for fixed tuning parameter
# ),
# trend.inf = list(
# trend.type = "linear",
# # Linear time trend pattern
# trend.effect = trend.effect,
# # Stength of time trend effect
# trend_add_or_multip = "mult" # Multiplicative time trend effect on response probability
# )
# ),
# cl = 2
# )
#
## -----------------------------------------------------------------------------
library(ggplot2)
# Details of grid
optimdata=optimdata_sym
# Recommend cutoff at each screening round
nextcutoff = optimdata$next.cutoff
prediction = optimdata$prediction
cutoff=optimdata$cutoff
tpIE=optimdata$tpIE
cutoff=cutoff[1:sum(!is.na(tpIE))]
tpIE=tpIE[1:sum(!is.na(tpIE))]
GP.res = optimdata
prediction = data.frame(yhat = GP.res$prediction$yhat.t1E,
sd = matrix(GP.res$prediction$sd.t1E,ncol=1),
qup = GP.res$prediction$qup.t1E,
qdown = GP.res$prediction$qdown.t1E,
xgrid = GP.res$prediction$xgrid)
GPplot=ggplot(data = prediction) +
geom_ribbon(aes(x = xgrid, ymin = qdown, ymax = qup),col="#f8766d", alpha = 0.5,linetype = 2) +
geom_line(aes(xgrid, yhat),col = "#f8766d") +
geom_point(aes(cutoff[1:sum(!is.na(tpIE))], tpIE[1:sum(!is.na(tpIE))]),
data = data.frame(tpIE=tpIE,cutoff=cutoff),col = "#00bfc4") +
geom_point(aes(nextcutoff, 0.1),
data = data.frame(tpIE=tpIE,cutoff=cutoff),col = "#f8766d") +
geom_hline(yintercept = 0.1,linetype = 2) +
geom_text(aes(x=1,y=0.15,label=paste0("FWER target is 0.1")),hjust=0,vjust=1)+
geom_vline(xintercept = nextcutoff, linetype = 2) +
geom_text(aes(x=6,y=0.8,label=paste0("Next cutoff value is ",round(nextcutoff,3))))+
theme_minimal()+ylab("FWER")+xlab("Cutoff value of the OBF boundary (c*)")+
geom_point(aes(nextcutoff, 0.1),
data = data.frame(tpIE=tpIE,cutoff=cutoff),col = "#f8766d") +
theme(plot.background = element_rect(fill = "#e6dfba"))
print(GPplot)
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/inst/doc/MAMS-CutoffScreening-GP-Symmetric-tutorial.R
|
---
title: "MAMS-CutoffScreening-GP-Symmetric-tutorial"
output: rmarkdown::html_vignette
author: "Ziyan Wang"
vignette: >
%\VignetteIndexEntry{MAMS-CutoffScreening-GP-Symmetric-tutorial}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(BayesianPlatformDesignTimeTrend)
```
## Four arm trial cutoff screening
The 'BayesianPlatformDesignTimeTrend' package simulation process requires stopping boundary cutoff screening first (details refers to demo \code{\link{demo_Cutoffscreening_GP}} and MAMS-CutoffScreening-GP-tutorial). After the cutoff screening process, we need to record the cutoff value of both efficacy and futility boundary for use in the trial simulation process. The data of each trail replicates will be created sequentially during the simulation.
In this tutorial, the cutoff screening process for symmertic boundary will be presented. The example is a four-arm MAMS trial with one control and three treatment arms. The control arm will not be stopped during the trial. The time trend pattern are set to be 'linear'. The way of time trend impacting the beginning response probability is set to be 'mult' (multiplicative). The time trend strength is set to be zero. The randomisation method used is the unfixed Thall's approach. The early stop boundary is the symmetric OBF boundary. The model used in this example are fixed effect model (the model with only treatment effect and the model with both treatment effect and discrete stage effect). The evaluation metrics are error rate, mean treatment effect bias, rooted MSE, mean number of patients allocated to each arm and mean total number of patients in the trial.
```{r,eval=FALSE}
ntrials = 1000 # Number of trial replicates
ns = seq(120, 600, 60) # Sequence of total number of accrued patients at each interim analysis
null.reponse.prob = 0.15
alt.response.prob = 0.35
# We investigate the type I error rate for different time trend strength
null.scenario = matrix(
c(
null.reponse.prob,
null.reponse.prob,
null.reponse.prob,
null.reponse.prob
),
nrow = 1,
ncol = 4,
byrow = T
)
# alt.scenario = matrix(c(null.reponse.prob,null.reponse.prob,null.reponse.prob,null.reponse.prob,
# null.reponse.prob,alt.response.prob,null.reponse.prob,null.reponse.prob,
# null.reponse.prob,alt.response.prob,alt.response.prob,null.reponse.prob,
# null.reponse.prob,alt.response.prob,alt.response.prob,alt.response.prob), nrow=3, ncol = 4,byrow=T)
model = "tlr" #logistic model
max.ar = 0.85 #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
#------------Select the data generation randomisation methods-------
rand.type = "Urn" # Urn design
max.deviation = 3 # The recommended value for the tuning parameter in the Urn design
# Require multiple cores for parallel running
cl = 2
# Set the model we want to use and the time trend effect for each model used.
# Here the main model will be used twice for two different strength of time trend c(0,0,0,0) and c(1,1,1,1) to investigate how time trend affect the evaluation metrics in BAR setting.
# Then the main + stage_continuous model which is the treatment effect + stage effect model will be applied for strength equal c(1,1,1,1) to investigate how the main + stage effect model improve the evaluation metrics.
reg.inf = "main"
trend.effect = c(0,0,0,0)
result = {
}
OPC = {
}
K = dim(null.scenario)[2]
cutoffindex = 1
trendindex = 1
cutoff.information=demo_Cutoffscreening.GP (
ntrials = ntrials,
# Number of trial replicates
trial.fun = simulatetrial,
# Call the main function
grid.inf = list(
start.length = 10,
grid.min = NULL,
grid.max = NULL,
confidence.level = 0.95,
grid.length = 5000,
change.scale = FALSE,
noise = T,
errorrate = 0.1,
simulationerror = 0.01,
iter.max = 15,
plotornot = FALSE),
# Set up the cutoff grid for screening. The start grid has three elements. The extended grid has fifteen cutoff value under investigation
input.info = list(
response.probs = null.scenario[1,],
#The scenario vector in this round
ns = ns,
# Sequence of total number of accrued patients at each interim analysis
max.ar = max.ar,
#limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
rand.type = rand.type,
# Which randomisation methods in data generation.
max.deviation = max.deviation,
# The recommended value for the tuning parameter in the Urn design
model.inf = list(
model = model,
#Use which model?
ibb.inf = list(
#independent beta-binomial model which can be used only for no time trend simulation
pi.star = 0.5,
# beta prior mean
pess = 2,
# beta prior effective sample size
betabinomialmodel = ibetabinomial.post # beta-binomial model for posterior estimation
),
tlr.inf = list(
beta0_prior_mu = 0,
# Stan logistic model t prior location
beta1_prior_mu = 0,
# Stan logistic model t prior location
beta0_prior_sigma = 2.5,
# Stan logistic model t prior sigma
beta1_prior_sigma = 2.5,
# Stan logistic model t prior sigma
beta0_df = 7,
# Stan logistic model t prior degree of freedom
beta1_df = 7,
# Stan logistic model t prior degree of freedom
reg.inf = reg.inf,
# The model we want to use
variable.inf = "Fixeffect" # Use fix effect logistic model
)
),
Stop.type = "Early-OBF",
# Use Pocock like early stopping boundary
Boundary.type = "Symmetric",
# Use Symmetric boundary where cutoff value for efficacy boundary and futility boundary sum up to 1
Random.inf = list(
Fixratio = FALSE,
# Do not use fix ratio allocation
Fixratiocontrol = NA,
# Do not use fix ratio allocation
BARmethod = "Thall",
# Use Thall's Bayesian adaptive randomisation approach
Thall.tuning.inf = list(tuningparameter = "Unfixed", fixvalue = 1) # Specified the tunning parameter value for fixed tuning parameter
),
trend.inf = list(
trend.type = "linear",
# Linear time trend pattern
trend.effect = trend.effect,
# Stength of time trend effect
trend_add_or_multip = "mult" # Multiplicative time trend effect on response probability
)
),
cl = 2
)
```
Summary of the output data from cutoff screening example
```{r}
library(ggplot2)
# Details of grid
optimdata=optimdata_sym
# Recommend cutoff at each screening round
nextcutoff = optimdata$next.cutoff
prediction = optimdata$prediction
cutoff=optimdata$cutoff
tpIE=optimdata$tpIE
cutoff=cutoff[1:sum(!is.na(tpIE))]
tpIE=tpIE[1:sum(!is.na(tpIE))]
GP.res = optimdata
prediction = data.frame(yhat = GP.res$prediction$yhat.t1E,
sd = matrix(GP.res$prediction$sd.t1E,ncol=1),
qup = GP.res$prediction$qup.t1E,
qdown = GP.res$prediction$qdown.t1E,
xgrid = GP.res$prediction$xgrid)
GPplot=ggplot(data = prediction) +
geom_ribbon(aes(x = xgrid, ymin = qdown, ymax = qup),col="#f8766d", alpha = 0.5,linetype = 2) +
geom_line(aes(xgrid, yhat),col = "#f8766d") +
geom_point(aes(cutoff[1:sum(!is.na(tpIE))], tpIE[1:sum(!is.na(tpIE))]),
data = data.frame(tpIE=tpIE,cutoff=cutoff),col = "#00bfc4") +
geom_point(aes(nextcutoff, 0.1),
data = data.frame(tpIE=tpIE,cutoff=cutoff),col = "#f8766d") +
geom_hline(yintercept = 0.1,linetype = 2) +
geom_text(aes(x=1,y=0.15,label=paste0("FWER target is 0.1")),hjust=0,vjust=1)+
geom_vline(xintercept = nextcutoff, linetype = 2) +
geom_text(aes(x=6,y=0.8,label=paste0("Next cutoff value is ",round(nextcutoff,3))))+
theme_minimal()+ylab("FWER")+xlab("Cutoff value of the OBF boundary (c*)")+
geom_point(aes(nextcutoff, 0.1),
data = data.frame(tpIE=tpIE,cutoff=cutoff),col = "#f8766d") +
theme(plot.background = element_rect(fill = "#e6dfba"))
print(GPplot)
```
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/inst/doc/MAMS-CutoffScreening-GP-Symmetric-tutorial.Rmd
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(BayesianPlatformDesignTimeTrend)
## ----eval=FALSE---------------------------------------------------------------
#
# ntrials = 1000 # Number of trial replicates
# ns = seq(120, 600, 120) # Sequence of total number of accrued patients at each interim analysis
# null.reponse.prob = 0.4
# alt.response.prob = 0.6
#
# # We investigate the type I error rate for different time trend strength
# null.scenario = matrix(
# c(
# null.reponse.prob,
# null.reponse.prob,
# null.reponse.prob,
# null.reponse.prob
# ),
# nrow = 1,
# ncol = 4,
# byrow = T
# )
# # alt.scenario = matrix(c(null.reponse.prob,null.reponse.prob,null.reponse.prob,null.reponse.prob,
# # null.reponse.prob,alt.response.prob,null.reponse.prob,null.reponse.prob,
# # null.reponse.prob,alt.response.prob,alt.response.prob,null.reponse.prob,
# # null.reponse.prob,alt.response.prob,alt.response.prob,alt.response.prob), nrow=3, ncol = 4,byrow=T)
# model = "tlr" #logistic model
# max.ar = 0.75 #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
# #------------Select the data generation randomisation methods-------
# rand.type = "Urn" # Urn design
# max.deviation = 3 # The recommended value for the tuning parameter in the Urn design
#
# # Require multiple cores for parallel running
# cl = 2
#
# # Set the model we want to use and the time trend effect for each model used.
# # Here the main model will be used twice for two different strength of time trend c(0,0,0,0) and c(1,1,1,1) to investigate how time trend affect the evaluation metrics in BAR setting.
# # Then the main + stage_continuous model which is the treatment effect + stage effect model will be applied for strength equal c(1,1,1,1) to investigate how the main + stage effect model improve the evaluation metrics.
# reg.inf = "main"
# trend.effect = c(0,0,0,0)
#
# result = {
#
# }
# OPC = {
#
# }
# K = dim(null.scenario)[2]
# cutoffindex = 1
# trendindex = 1
#
# cutoff.information=demo_Cutoffscreening (
# ntrials = ntrials,
# # Number of trial replicates
# trial.fun = simulatetrial,
# # Call the main function
# grid.inf = list(start = c(0.9, 0.95, 1), extendlength =
# 20),
# # Set up the cutoff grid for screening. The start grid has three elements. The extended grid has fifteen cutoff value under investigation
# input.info = list(
# response.probs = null.scenario[1,],
# #The scenario vector in this round
# ns = ns,
# # Sequence of total number of accrued patients at each interim analysis
# max.ar = max.ar,
# #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
# rand.type = rand.type,
# # Which randomisation methods in data generation.
# max.deviation = max.deviation,
# # The recommended value for the tuning parameter in the Urn design
# model.inf = list(
# model = model,
# #Use which model?
# ibb.inf = list(
# #independent beta-binomial model which can be used only for no time trend simulation
# pi.star = 0.5,
# # beta prior mean
# pess = 2,
# # beta prior effective sample size
# betabinomialmodel = ibetabinomial.post # beta-binomial model for posterior estimation
# ),
# tlr.inf = list(
# beta0_prior_mu = 0,
# # Stan logistic model t prior location
# beta1_prior_mu = 0,
# # Stan logistic model t prior location
# beta0_prior_sigma = 2.5,
# # Stan logistic model t prior sigma
# beta1_prior_sigma = 2.5,
# # Stan logistic model t prior sigma
# beta0_df = 7,
# # Stan logistic model t prior degree of freedom
# beta1_df = 7,
# # Stan logistic model t prior degree of freedom
# reg.inf = reg.inf,
# # The model we want to use
# variable.inf = "Fixeffect" # Use fix effect logistic model
# )
# ),
# Stop.type = "Early-Pocock",
# # Use Pocock like early stopping boundary
# Boundary.type = "Symmetric",
# # Use Symmetric boundary where cutoff value for efficacy boundary and futility boundary sum up to 1
# Random.inf = list(
# Fixratio = FALSE,
# # Do not use fix ratio allocation
# Fixratiocontrol = NA,
# # Do not use fix ratio allocation
# BARmethod = "Thall",
# # Use Thall's Bayesian adaptive randomisation approach
# Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1) # Specified the tunning parameter value for fixed tuning parameter
# ),
# trend.inf = list(
# trend.type = "linear",
# # Linear time trend pattern
# trend.effect = trend.effect,
# # Stength of time trend effect
# trend_add_or_multip = "mult" # Multiplicative time trend effect on response probability
# )
# ),
# cl = 2
# )
#
## -----------------------------------------------------------------------------
# Details of grid
dataloginformd
# Recommend cutoff at each screening round
t(recommandloginformd)
# Plot
plot(
tpIE ~ cutoff,
pch = 16,
xlab = "Cutoff",
ylab = "Type I Error",
cex.lab = 1.3,
col = "#f8766d",
data = data.frame(dataloginformd)
)
cutoffgrid <- seq(0.9, 1, 0.0001)
lines(cutoffgrid, t(predictedtpIEinformd), col = "#00bfc4", lwd = 3)
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/inst/doc/MAMS-CutoffScreening-tutorial.R
|
---
title: "MAMS-CutoffScreening-tutorial"
output: rmarkdown::html_vignette
author: "Ziyan Wang"
vignette: >
%\VignetteIndexEntry{MAMS-CutoffScreening-tutorial}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(BayesianPlatformDesignTimeTrend)
```
## Four arm trial cutoff screening
The 'BayesianPlatformDesignTimeTrend' package simulation process requires stopping boundary cutoff screening first (details refers to demo \code{\link{demo_Cutoffscreening}} and MAMS-CutoffScreening-tutorial). After the cutoff screening process, we need to record the cutoff value of both efficacy and futility boundary for use in the trial simulation process. The data of each trail replicates will be created sequentially during the simulation.
In this tutorial, the cutoff screening process will be presented. The example is a four-arm MAMS trial with one control and three treatment arms. The control arm will not be stopped during the trial. The time trend pattern are set to be 'linear'. The way of time trend impacting the beginning response probability is set to be 'mult' (multiplicative). The time trend strength is set to be zero. The randomisation method used is the unfixed Thall's approach. The early stop boundary is the symmetric Pocock boundary. The model used in this example are fixed effect model (the model with only treatment effect and the model with both treatment effect and discrete stage effect). The evaluation metrics are error rate, mean treatment effect bias, rooted MSE, mean number of patients allocated to each arm and mean total number of patients in the trial.
```{r,eval=FALSE}
ntrials = 1000 # Number of trial replicates
ns = seq(120, 600, 120) # Sequence of total number of accrued patients at each interim analysis
null.reponse.prob = 0.4
alt.response.prob = 0.6
# We investigate the type I error rate for different time trend strength
null.scenario = matrix(
c(
null.reponse.prob,
null.reponse.prob,
null.reponse.prob,
null.reponse.prob
),
nrow = 1,
ncol = 4,
byrow = T
)
# alt.scenario = matrix(c(null.reponse.prob,null.reponse.prob,null.reponse.prob,null.reponse.prob,
# null.reponse.prob,alt.response.prob,null.reponse.prob,null.reponse.prob,
# null.reponse.prob,alt.response.prob,alt.response.prob,null.reponse.prob,
# null.reponse.prob,alt.response.prob,alt.response.prob,alt.response.prob), nrow=3, ncol = 4,byrow=T)
model = "tlr" #logistic model
max.ar = 0.75 #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
#------------Select the data generation randomisation methods-------
rand.type = "Urn" # Urn design
max.deviation = 3 # The recommended value for the tuning parameter in the Urn design
# Require multiple cores for parallel running
cl = 2
# Set the model we want to use and the time trend effect for each model used.
# Here the main model will be used twice for two different strength of time trend c(0,0,0,0) and c(1,1,1,1) to investigate how time trend affect the evaluation metrics in BAR setting.
# Then the main + stage_continuous model which is the treatment effect + stage effect model will be applied for strength equal c(1,1,1,1) to investigate how the main + stage effect model improve the evaluation metrics.
reg.inf = "main"
trend.effect = c(0,0,0,0)
result = {
}
OPC = {
}
K = dim(null.scenario)[2]
cutoffindex = 1
trendindex = 1
cutoff.information=demo_Cutoffscreening (
ntrials = ntrials,
# Number of trial replicates
trial.fun = simulatetrial,
# Call the main function
grid.inf = list(start = c(0.9, 0.95, 1), extendlength =
20),
# Set up the cutoff grid for screening. The start grid has three elements. The extended grid has fifteen cutoff value under investigation
input.info = list(
response.probs = null.scenario[1,],
#The scenario vector in this round
ns = ns,
# Sequence of total number of accrued patients at each interim analysis
max.ar = max.ar,
#limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
rand.type = rand.type,
# Which randomisation methods in data generation.
max.deviation = max.deviation,
# The recommended value for the tuning parameter in the Urn design
model.inf = list(
model = model,
#Use which model?
ibb.inf = list(
#independent beta-binomial model which can be used only for no time trend simulation
pi.star = 0.5,
# beta prior mean
pess = 2,
# beta prior effective sample size
betabinomialmodel = ibetabinomial.post # beta-binomial model for posterior estimation
),
tlr.inf = list(
beta0_prior_mu = 0,
# Stan logistic model t prior location
beta1_prior_mu = 0,
# Stan logistic model t prior location
beta0_prior_sigma = 2.5,
# Stan logistic model t prior sigma
beta1_prior_sigma = 2.5,
# Stan logistic model t prior sigma
beta0_df = 7,
# Stan logistic model t prior degree of freedom
beta1_df = 7,
# Stan logistic model t prior degree of freedom
reg.inf = reg.inf,
# The model we want to use
variable.inf = "Fixeffect" # Use fix effect logistic model
)
),
Stop.type = "Early-Pocock",
# Use Pocock like early stopping boundary
Boundary.type = "Symmetric",
# Use Symmetric boundary where cutoff value for efficacy boundary and futility boundary sum up to 1
Random.inf = list(
Fixratio = FALSE,
# Do not use fix ratio allocation
Fixratiocontrol = NA,
# Do not use fix ratio allocation
BARmethod = "Thall",
# Use Thall's Bayesian adaptive randomisation approach
Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1) # Specified the tunning parameter value for fixed tuning parameter
),
trend.inf = list(
trend.type = "linear",
# Linear time trend pattern
trend.effect = trend.effect,
# Stength of time trend effect
trend_add_or_multip = "mult" # Multiplicative time trend effect on response probability
)
),
cl = 2
)
```
Summary of the output data from cutoff screening example
```{r}
# Details of grid
dataloginformd
# Recommend cutoff at each screening round
t(recommandloginformd)
# Plot
plot(
tpIE ~ cutoff,
pch = 16,
xlab = "Cutoff",
ylab = "Type I Error",
cex.lab = 1.3,
col = "#f8766d",
data = data.frame(dataloginformd)
)
cutoffgrid <- seq(0.9, 1, 0.0001)
lines(cutoffgrid, t(predictedtpIEinformd), col = "#00bfc4", lwd = 3)
```
```
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/inst/doc/MAMS-CutoffScreening-tutorial.Rmd
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(BayesianPlatformDesignTimeTrend)
## -----------------------------------------------------------------------------
ntrials = 1000 # Number of trial replicates
ns = seq(120,600,120) # Sequence of total number of accrued patients at each interim analysis
null.reponse.prob = 0.4
alt.response.prob = 0.6
# We investigate the type I error rate for different time trend strength
null.scenario = matrix(
c(
null.reponse.prob,
null.reponse.prob,
null.reponse.prob,
null.reponse.prob
),
nrow = 1,
ncol = 4,
byrow = T
)
# alt.scenario = matrix(c(null.reponse.prob,null.reponse.prob,null.reponse.prob,null.reponse.prob,
# null.reponse.prob,alt.response.prob,null.reponse.prob,null.reponse.prob,
# null.reponse.prob,alt.response.prob,alt.response.prob,null.reponse.prob,
# null.reponse.prob,alt.response.prob,alt.response.prob,alt.response.prob), nrow=3, ncol = 4,byrow=T)
model = "tlr" #logistic model
max.ar = 0.75 #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
#------------Select the data generation randomisation methods-------
rand.type = "Urn" # Urn design
max.deviation = 3 # The recommended value for the tuning parameter in the Urn design
# Require multiple cores for parallel running
cl = 2
# Set the model we want to use and the time trend effect for each model used.
# Here the main model will be used twice for two different strength of time trend c(0,0,0,0) and c(1,1,1,1) to investigate how time trend affect the evaluation metrics in BAR setting.
# Then the main + stage_continuous model which is the treatment effect + stage effect model will be applied for strength equal c(1,1,1,1) to investigate how the main + stage effect model improve the evaluation metrics.
reg.inf = c("main", "main", "main + stage_continuous")
trend.effect = matrix(
c(0, 0, 0, 0, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1),
ncol = 4,
nrow = 3,
byrow = T
)
#
cutoffearly = matrix(rep(0.994, dim(null.scenario)[1]), ncol = 1)
K = dim(null.scenario)[2]
print(
paste0(
"Start trial simulation. This is a ",
K,
"-arm trial simulation. There are one null scenario and ",
K - 1 ,
" alternative scenarios. There are ",
K ,
" rounds."
)
)
cutoffindex = 1
## ----eval=FALSE---------------------------------------------------------------
# result = {
#
# }
# OPC_null = {
#
# }
# for (i in 1:dim(null.scenario)[1]) {
# trendindex = 1
# for (j in 1:length(reg.inf)){
# restlr = Trial.simulation(
# ntrials = ntrials,
# # Number of trial replicates
# trial.fun = simulatetrial,
# # Call the main function
# input.info = list(
# response.probs = null.scenario[cutoffindex, ],
# #The scenario vector in this round
# ns = ns,
# # Sequence of total number of accrued patients at each interim analysis
# max.ar = max.ar,
# #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
# rand.type = rand.type,
# # Which randomisation methods in data generation.
# max.deviation = max.deviation,
# # The recommended value for the tuning parameter in the Urn design
# model.inf = list(
# model = model,
# #Use which model?
# ibb.inf = list(
# #independent beta-binomial model which can be used only for no time trend simulation
# pi.star = 0.5,
# # beta prior mean
# pess = 2,
# # beta prior effective sample size
# betabinomialmodel = ibetabinomial.post # beta-binomial model for posterior estimation
# ),
# tlr.inf = list(
# beta0_prior_mu = 0,
# # Stan logistic model t prior location
# beta1_prior_mu = 0,
# # Stan logistic model t prior location
# beta0_prior_sigma = 2.5,
# # Stan logistic model t prior sigma
# beta1_prior_sigma = 2.5,
# # Stan logistic model t prior sigma
# beta0_df = 7,
# # Stan logistic model t prior degree of freedom
# beta1_df = 7,
# # Stan logistic model t prior degree of freedom
# reg.inf = reg.inf[trendindex],
# # The model we want to use
# variable.inf = "Fixeffect" # Use fix effect logistic model
# )
# ),
# Stopbound.inf = Stopboundinf(
# Stop.type = "Early-Pocock",
# # Use Pocock like early stopping boundary
# Boundary.type = "Symmetric",
# # Use Symmetric boundary where cutoff value for efficacy boundary and futility boundary sum up to 1
# cutoff = c(cutoffearly[cutoffindex, 1], 1 - cutoffearly[cutoffindex, 1]) # The cutoff value for stopping boundary
# ),
# Random.inf = list(
# Fixratio = FALSE,
# # Do not use fix ratio allocation
# Fixratiocontrol = NA,
# # Do not use fix ratio allocation
# BARmethod = "Thall",
# # Use Thall's Bayesian adaptive randomisation approach
# Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1) # Specified the tunning parameter value for fixed tuning parameter
# ),
# trend.inf = list(
# trend.type = "step",
# # Linear time trend pattern
# trend.effect = trend.effect[trendindex, ],
# # Stength of time trend effect
# trend_add_or_multip = "mult" # Multiplicative time trend effect on response probability
# )
# ),
# cl = 2 # 2 cores required
# )
#
# trendindex = trendindex + 1
# # The result list can be used for plotting and the OPC table is the summary evaluaton metrics for each scenario
# result = c(result, restlr$result)
# OPC_null = rbind(OPC_null, restlr$OPC)
# }
# cutoffindex = cutoffindex + 1
# }
## -----------------------------------------------------------------------------
print("Finished null scenario study")
save_data = FALSE
if (isTRUE(save_data)) {
save(result, file = restlr$Nameofsaveddata$nameData)
save(OPC_null, file = restlr$Nameofsaveddata$nameTable)
}
## -----------------------------------------------------------------------------
# Characteristic table
print(OPC_null)
## -----------------------------------------------------------------------------
ntrials = 1000 # Number of trial replicates
ns = seq(120,600,120) # Sequence of total number of accrued patients at each interim analysis
null.reponse.prob = 0.4
alt.response.prob = 0.6
# We investigate the type I error rate for different time trend strength
alt.scenario = matrix(
c(
null.reponse.prob,
alt.response.prob,
null.reponse.prob,
null.reponse.prob,
null.reponse.prob,
alt.response.prob,
alt.response.prob,
null.reponse.prob,
null.reponse.prob,
alt.response.prob,
alt.response.prob,
alt.response.prob
),
nrow = 3,
ncol = 4,
byrow = T
)
model = "tlr" #logistic model
max.ar = 0.75 #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
#------------Select the data generation randomisation methods-------
rand.type = "Urn" # Urn design
max.deviation = 3 # The recommended value for the tuning parameter in the Urn design
# Require multiple cores for parallel running
cl = 2
# Set the model we want to use and the time trend effect for each model used.
# Here the main model will be used twice for two different strength of time trend c(0,0,0,0) and c(1,1,1,1) to investigate how time trend affect the evaluation metrics in BAR setting.
# Then the main + stage_continuous model which is the treatment effect + stage effect model will be applied for strength equal c(1,1,1,1) to investigate how the main + stage effect model improve the evaluation metrics.
reg.inf = c("main + stage_continuous")
trend.effect = matrix(c(0.1, 0.1, 0.1, 0.1),
ncol = 4,
nrow = 1,
byrow = T)
#
cutoffearly = matrix(rep(0.994, dim(alt.scenario)[1]), ncol = 1)
K = dim(alt.scenario)[2]
print(
paste0(
"Start trial simulation. This is a ",
K,
"-arm trial simulation. There are one null scenario and ",
K - 1 ,
" alternative scenarios. There are ",
K ,
" rounds."
)
)
cutoffindex = 1
## ----eval=FALSE---------------------------------------------------------------
#
# result = {
#
# }
# OPCalt = {
#
# }
# for (i in 1:dim(alt.scenario)[1]) {
# trendindex = 1
# for (j in 1:length(reg.inf)){
# restlr = Trial.simulation(
# ntrials = ntrials,
# # Number of trial replicates
# trial.fun = simulatetrial,
# # Call the main function
# input.info = list(
# response.probs = alt.scenario[cutoffindex, ],
# #The scenario vector in this round
# ns = ns,
# # Sequence of total number of accrued patients at each interim analysis
# max.ar = max.ar,
# #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
# rand.type = rand.type,
# # Which randomisation methods in data generation.
# max.deviation = max.deviation,
# # The recommended value for the tuning parameter in the Urn design
# model.inf = list(
# model = model,
# #Use which model?
# ibb.inf = list(
# #independent beta-binomial model which can be used only for no time trend simulation
# pi.star = 0.5,
# # beta prior mean
# pess = 2,
# # beta prior effective sample size
# betabinomialmodel = ibetabinomial.post # beta-binomial model for posterior estimation
# ),
# tlr.inf = list(
# beta0_prior_mu = 0,
# # Stan logistic model t prior location
# beta1_prior_mu = 0,
# # Stan logistic model t prior location
# beta0_prior_sigma = 2.5,
# # Stan logistic model t prior sigma
# beta1_prior_sigma = 2.5,
# # Stan logistic model t prior sigma
# beta0_df = 7,
# # Stan logistic model t prior degree of freedom
# beta1_df = 7,
# # Stan logistic model t prior degree of freedom
# reg.inf = reg.inf[trendindex],
# # The model we want to use
# variable.inf = "Fixeffect" # Use fix effect logistic model
# )
# ),
# Stopbound.inf = Stopboundinf(
# Stop.type = "Early-Pocock",
# # Use Pocock like early stopping boundary
# Boundary.type = "Symmetric",
# # Use Symmetric boundary where cutoff value for efficacy boundary and futility boundary sum up to 1
# cutoff = c(cutoffearly[cutoffindex, 1], 1 - cutoffearly[cutoffindex, 1]) # The cutoff value for stopping boundary
# ),
# Random.inf = list(
# Fixratio = FALSE,
# # Do not use fix ratio allocation
# Fixratiocontrol = NA,
# # Do not use fix ratio allocation
# BARmethod = "Thall",
# # Use Thall's Bayesian adaptive randomisation approach
# Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1) # Specified the tunning parameter value for fixed tuning parameter
# ),
# trend.inf = list(
# trend.type = "step",
# # Linear time trend pattern
# trend.effect = trend.effect[trendindex, ],
# # Stength of time trend effect
# trend_add_or_multip = "mult" # Multiplicative time trend effect on response probability
# )
# ),
# cl = 2 # 2 cores required
# )
# trendindex = trendindex + 1
# # The result list can be used for plotting and the OPC table is the summary evaluaton metrics for each scenario
# result = c(result, restlr$result)
# OPC_alt = rbind(OPC_alt, restlr$OPC)
# }
# cutoffindex = cutoffindex + 1
# }
## -----------------------------------------------------------------------------
print("Finished alternative scenario study")
save_data = FALSE
if (isTRUE(save_data)) {
save(result, file = restlr$Nameofsaveddata$nameData)
save(OPC_alt, file = restlr$Nameofsaveddata$nameTable)
}
## -----------------------------------------------------------------------------
# Characteristic table
print(OPC_alt)
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/inst/doc/MAMS-trial-simulation-tutorial.R
|
---
title: "MAMS-trial-simulation-tutorial"
output: rmarkdown::html_vignette
author: "Ziyan Wang"
vignette: >
%\VignetteIndexEntry{MAMS-trial-simulation-tutorial}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(BayesianPlatformDesignTimeTrend)
```
## Four arm trial simulation
The 'BayesianPlatformDesignTimeTrend' package simulation process requires stopping boundary cutoff screening first (details refers to demo \code{\link{demo_Cutoffscreening}} and MAMS-CutoffScreening-tutorial). After the cutoff screening process, we need to record the cutoff value of both efficacy and futility boundary for use in the trial simulation process. The data of each trail replicates will be created sequentailly during the simulation.
In this tutorial, the example is a four-arm MAMS trial with one control and three treatment arms. The control arm will not be stopped during the trial. The time trend pattern are set to be 'linear'. The way of time trend impacting the beginning response probability is set to be 'mult' (multiplicative). The time trend strength is set to be zero first and then set to be 0.5 to study the impact of time trend on different evaluation metrics. The model used in this example are fixed effect model (the model with only treatment effect and the model with both treatment effect and discrete stage effect). The evaluation metrics are error rate, mean treatment effect bias, rooted MSE, mean number of patients allocated to each arm and mean total number of patients in the trial.
Firstly, We investigate the family wise error rate (FWER) for different time trend strength and how model with stage effect help control the family wise error rate. The cutoff value was screened for null scenario using model only main effect in order to control the FWER under 0.1. The false positive rate is 0.037 equally for each treatment - control comparison.
```{r}
ntrials = 1000 # Number of trial replicates
ns = seq(120,600,120) # Sequence of total number of accrued patients at each interim analysis
null.reponse.prob = 0.4
alt.response.prob = 0.6
# We investigate the type I error rate for different time trend strength
null.scenario = matrix(
c(
null.reponse.prob,
null.reponse.prob,
null.reponse.prob,
null.reponse.prob
),
nrow = 1,
ncol = 4,
byrow = T
)
# alt.scenario = matrix(c(null.reponse.prob,null.reponse.prob,null.reponse.prob,null.reponse.prob,
# null.reponse.prob,alt.response.prob,null.reponse.prob,null.reponse.prob,
# null.reponse.prob,alt.response.prob,alt.response.prob,null.reponse.prob,
# null.reponse.prob,alt.response.prob,alt.response.prob,alt.response.prob), nrow=3, ncol = 4,byrow=T)
model = "tlr" #logistic model
max.ar = 0.75 #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
#------------Select the data generation randomisation methods-------
rand.type = "Urn" # Urn design
max.deviation = 3 # The recommended value for the tuning parameter in the Urn design
# Require multiple cores for parallel running
cl = 2
# Set the model we want to use and the time trend effect for each model used.
# Here the main model will be used twice for two different strength of time trend c(0,0,0,0) and c(1,1,1,1) to investigate how time trend affect the evaluation metrics in BAR setting.
# Then the main + stage_continuous model which is the treatment effect + stage effect model will be applied for strength equal c(1,1,1,1) to investigate how the main + stage effect model improve the evaluation metrics.
reg.inf = c("main", "main", "main + stage_continuous")
trend.effect = matrix(
c(0, 0, 0, 0, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1),
ncol = 4,
nrow = 3,
byrow = T
)
#
cutoffearly = matrix(rep(0.994, dim(null.scenario)[1]), ncol = 1)
K = dim(null.scenario)[2]
print(
paste0(
"Start trial simulation. This is a ",
K,
"-arm trial simulation. There are one null scenario and ",
K - 1 ,
" alternative scenarios. There are ",
K ,
" rounds."
)
)
cutoffindex = 1
```
```{r, eval=FALSE}
result = {
}
OPC_null = {
}
for (i in 1:dim(null.scenario)[1]) {
trendindex = 1
for (j in 1:length(reg.inf)){
restlr = Trial.simulation(
ntrials = ntrials,
# Number of trial replicates
trial.fun = simulatetrial,
# Call the main function
input.info = list(
response.probs = null.scenario[cutoffindex, ],
#The scenario vector in this round
ns = ns,
# Sequence of total number of accrued patients at each interim analysis
max.ar = max.ar,
#limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
rand.type = rand.type,
# Which randomisation methods in data generation.
max.deviation = max.deviation,
# The recommended value for the tuning parameter in the Urn design
model.inf = list(
model = model,
#Use which model?
ibb.inf = list(
#independent beta-binomial model which can be used only for no time trend simulation
pi.star = 0.5,
# beta prior mean
pess = 2,
# beta prior effective sample size
betabinomialmodel = ibetabinomial.post # beta-binomial model for posterior estimation
),
tlr.inf = list(
beta0_prior_mu = 0,
# Stan logistic model t prior location
beta1_prior_mu = 0,
# Stan logistic model t prior location
beta0_prior_sigma = 2.5,
# Stan logistic model t prior sigma
beta1_prior_sigma = 2.5,
# Stan logistic model t prior sigma
beta0_df = 7,
# Stan logistic model t prior degree of freedom
beta1_df = 7,
# Stan logistic model t prior degree of freedom
reg.inf = reg.inf[trendindex],
# The model we want to use
variable.inf = "Fixeffect" # Use fix effect logistic model
)
),
Stopbound.inf = Stopboundinf(
Stop.type = "Early-Pocock",
# Use Pocock like early stopping boundary
Boundary.type = "Symmetric",
# Use Symmetric boundary where cutoff value for efficacy boundary and futility boundary sum up to 1
cutoff = c(cutoffearly[cutoffindex, 1], 1 - cutoffearly[cutoffindex, 1]) # The cutoff value for stopping boundary
),
Random.inf = list(
Fixratio = FALSE,
# Do not use fix ratio allocation
Fixratiocontrol = NA,
# Do not use fix ratio allocation
BARmethod = "Thall",
# Use Thall's Bayesian adaptive randomisation approach
Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1) # Specified the tunning parameter value for fixed tuning parameter
),
trend.inf = list(
trend.type = "step",
# Linear time trend pattern
trend.effect = trend.effect[trendindex, ],
# Stength of time trend effect
trend_add_or_multip = "mult" # Multiplicative time trend effect on response probability
)
),
cl = 2 # 2 cores required
)
trendindex = trendindex + 1
# The result list can be used for plotting and the OPC table is the summary evaluaton metrics for each scenario
result = c(result, restlr$result)
OPC_null = rbind(OPC_null, restlr$OPC)
}
cutoffindex = cutoffindex + 1
}
```
```{r}
print("Finished null scenario study")
save_data = FALSE
if (isTRUE(save_data)) {
save(result, file = restlr$Nameofsaveddata$nameData)
save(OPC_null, file = restlr$Nameofsaveddata$nameTable)
}
```
Present the evaluation metrics for null scenario. The FWER is 0.1 when there is no time trend. FWER inflated to 0.1296 when there is a step time trend pattern and modeled by main fixed effect model. The main effect plus stage effect model controls the FWER again under 0.1.
```{r}
# Characteristic table
print(OPC_null)
```
Then, We investigate the other evaluation metrics for alternative scenario with different time trend strength. The cutoff value was the same as the value in precious example since the control arm response probability in alternative scenario are the same as that in null scenario.
```{r}
ntrials = 1000 # Number of trial replicates
ns = seq(120,600,120) # Sequence of total number of accrued patients at each interim analysis
null.reponse.prob = 0.4
alt.response.prob = 0.6
# We investigate the type I error rate for different time trend strength
alt.scenario = matrix(
c(
null.reponse.prob,
alt.response.prob,
null.reponse.prob,
null.reponse.prob,
null.reponse.prob,
alt.response.prob,
alt.response.prob,
null.reponse.prob,
null.reponse.prob,
alt.response.prob,
alt.response.prob,
alt.response.prob
),
nrow = 3,
ncol = 4,
byrow = T
)
model = "tlr" #logistic model
max.ar = 0.75 #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
#------------Select the data generation randomisation methods-------
rand.type = "Urn" # Urn design
max.deviation = 3 # The recommended value for the tuning parameter in the Urn design
# Require multiple cores for parallel running
cl = 2
# Set the model we want to use and the time trend effect for each model used.
# Here the main model will be used twice for two different strength of time trend c(0,0,0,0) and c(1,1,1,1) to investigate how time trend affect the evaluation metrics in BAR setting.
# Then the main + stage_continuous model which is the treatment effect + stage effect model will be applied for strength equal c(1,1,1,1) to investigate how the main + stage effect model improve the evaluation metrics.
reg.inf = c("main + stage_continuous")
trend.effect = matrix(c(0.1, 0.1, 0.1, 0.1),
ncol = 4,
nrow = 1,
byrow = T)
#
cutoffearly = matrix(rep(0.994, dim(alt.scenario)[1]), ncol = 1)
K = dim(alt.scenario)[2]
print(
paste0(
"Start trial simulation. This is a ",
K,
"-arm trial simulation. There are one null scenario and ",
K - 1 ,
" alternative scenarios. There are ",
K ,
" rounds."
)
)
cutoffindex = 1
```
```{r, eval=FALSE}
result = {
}
OPCalt = {
}
for (i in 1:dim(alt.scenario)[1]) {
trendindex = 1
for (j in 1:length(reg.inf)){
restlr = Trial.simulation(
ntrials = ntrials,
# Number of trial replicates
trial.fun = simulatetrial,
# Call the main function
input.info = list(
response.probs = alt.scenario[cutoffindex, ],
#The scenario vector in this round
ns = ns,
# Sequence of total number of accrued patients at each interim analysis
max.ar = max.ar,
#limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
rand.type = rand.type,
# Which randomisation methods in data generation.
max.deviation = max.deviation,
# The recommended value for the tuning parameter in the Urn design
model.inf = list(
model = model,
#Use which model?
ibb.inf = list(
#independent beta-binomial model which can be used only for no time trend simulation
pi.star = 0.5,
# beta prior mean
pess = 2,
# beta prior effective sample size
betabinomialmodel = ibetabinomial.post # beta-binomial model for posterior estimation
),
tlr.inf = list(
beta0_prior_mu = 0,
# Stan logistic model t prior location
beta1_prior_mu = 0,
# Stan logistic model t prior location
beta0_prior_sigma = 2.5,
# Stan logistic model t prior sigma
beta1_prior_sigma = 2.5,
# Stan logistic model t prior sigma
beta0_df = 7,
# Stan logistic model t prior degree of freedom
beta1_df = 7,
# Stan logistic model t prior degree of freedom
reg.inf = reg.inf[trendindex],
# The model we want to use
variable.inf = "Fixeffect" # Use fix effect logistic model
)
),
Stopbound.inf = Stopboundinf(
Stop.type = "Early-Pocock",
# Use Pocock like early stopping boundary
Boundary.type = "Symmetric",
# Use Symmetric boundary where cutoff value for efficacy boundary and futility boundary sum up to 1
cutoff = c(cutoffearly[cutoffindex, 1], 1 - cutoffearly[cutoffindex, 1]) # The cutoff value for stopping boundary
),
Random.inf = list(
Fixratio = FALSE,
# Do not use fix ratio allocation
Fixratiocontrol = NA,
# Do not use fix ratio allocation
BARmethod = "Thall",
# Use Thall's Bayesian adaptive randomisation approach
Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1) # Specified the tunning parameter value for fixed tuning parameter
),
trend.inf = list(
trend.type = "step",
# Linear time trend pattern
trend.effect = trend.effect[trendindex, ],
# Stength of time trend effect
trend_add_or_multip = "mult" # Multiplicative time trend effect on response probability
)
),
cl = 2 # 2 cores required
)
trendindex = trendindex + 1
# The result list can be used for plotting and the OPC table is the summary evaluaton metrics for each scenario
result = c(result, restlr$result)
OPC_alt = rbind(OPC_alt, restlr$OPC)
}
cutoffindex = cutoffindex + 1
}
```
```{r}
print("Finished alternative scenario study")
save_data = FALSE
if (isTRUE(save_data)) {
save(result, file = restlr$Nameofsaveddata$nameData)
save(OPC_alt, file = restlr$Nameofsaveddata$nameTable)
}
```
Present the evaluation metrics for alternative scenarios. The power used here is the conjunctive power where the trial will be sucessful only if the effective arms are correctly claimed to be effective and all other null arms are claimed to be ineffective.
```{r}
# Characteristic table
print(OPC_alt)
```
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/inst/doc/MAMS-trial-simulation-tutorial.Rmd
|
---
title: "MAMS-CutoffScreening-GP-Asymmetric-tutorial"
output: rmarkdown::html_vignette
author: "Ziyan Wang"
vignette: >
%\VignetteIndexEntry{MAMS-CutoffScreening-GP-Asymmetric-tutorial}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(BayesianPlatformDesignTimeTrend)
```
## Four arm trial cutoff screening
The 'BayesianPlatformDesignTimeTrend' package simulation process requires stopping boundary cutoff screening first (details refers to demo \code{\link{demo_Cutoffscreening_GP}} and MAMS-CutoffScreening-GP-tutorial). After the cutoff screening process, we need to record the cutoff value of both efficacy and futility boundary for use in the trial simulation process. The data of each trail replicates will be created sequentially during the simulation.
In this tutorial, the cutoff screening process for asymmetric boundary will be presented. The example is a four-arm MAMS trial with one control and three treatment arms. The control arm will not be stopped during the trial. The time trend pattern are set to be 'linear'. The way of time trend impacting the beginning response probability is set to be 'mult' (multiplicative). The time trend strength is set to be zero, which means that there is no time trend effect in this example. The randomisation method used is the unfixed Thall's approach. The early stop boundary is the Asymmetric Pocock boundary. The model used in this example are fixed effect model (the model with only treatment effect and the model with both treatment effect and discrete stage effect). The evaluation metrics are error rate, mean treatment effect bias, rooted MSE, mean number of patients allocated to each arm and mean total number of patients in the trial. For asymmetric boundary screening, we can find a contour for FWER = 10\%. For each value on this contour, the (conjunctive, disconjunctive or marginal) power is optimized under the alternative scenario user specified. In this example, the alternative scenario is \pi_0 = \pi_3 = 0.4, \pi_1 = \pi_2 = 0.6. In this tutorial, we will recommend a cutoff value for each power definition. The contour plot will also be presented for interpretation.
```{r,eval=FALSE}
ntrials = 1000 # Number of trial replicates
ns = seq(120, 600, 60) # Sequence of total number of accrued patients at each interim analysis
null.reponse.prob = 0.4
alt.response.prob = 0.6
# We investigate the type I error rate for different time trend strength
null.scenario = matrix(
c(
null.reponse.prob,
null.reponse.prob,
null.reponse.prob,
null.reponse.prob
),
nrow = 1,
ncol = 4,
byrow = T
)
alt.scenario = matrix(
c(
null.reponse.prob,
alt.response.prob,
alt.response.prob,
null.reponse.prob
),
nrow = 1,
ncol = 4,
byrow = T
)
model = "tlr" #logistic model
max.ar = 0.85 #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
#------------Select the data generation randomisation methods-------
rand.type = "Urn" # Urn design
max.deviation = 3 # The recommended value for the tuning parameter in the Urn design
# Require multiple cores for parallel running on HPC (Here is the number of cores i ask on Iridis 5 in University of Southampton)
cl = 40
# Set the model we want to use and the time trend effect for each model used.
# Here the main model will be used twice for two different strength of time trend c(0,0,0,0) and c(1,1,1,1) to investigate how time trend affect the evaluation metrics in BAR setting.
# Then the main + stage_continuous model which is the treatment effect + stage effect model will be applied for strength equal c(1,1,1,1) to investigate how the main + stage effect model improve the evaluation metrics.
reg.inf = "main"
trend.effect = c(0,0,0,0)
result = {
}
OPC = {
}
K = dim(null.scenario)[2]
cutoffindex = 1
trendindex = 1
cutoff.information=demo_Cutoffscreening.GP (
ntrials = ntrials,
# Number of trial replicates
trial.fun = simulatetrial,
# Call the main function
power.type = "Conjunctive",
response.probs.alt = alt.scenario,
grid.inf = list(
start.length = 15,
grid.min = NULL,
grid.max = NULL,
confidence.level = 0.95,
grid.length = 101,
change.scale = FALSE,
noise = T,
errorrate = 0.1,
simulationerror = 0.01,
iter.max = 15,
plotornot = FALSE),
# Set up the cutoff grid for screening. The start grid has three elements. The extended grid has fifteen cutoff value under investigation
input.info = list(
response.probs = null.scenario[1,],
#The scenario vector in this round
ns = ns,
# Sequence of total number of accrued patients at each interim analysis
max.ar = max.ar,
#limit the allocation ratio for the control group (1-max.ar < r_control < max.ar)
rand.type = rand.type,
# Which randomisation methods in data generation.
max.deviation = max.deviation,
# The recommended value for the tuning parameter in the Urn design
model.inf = list(
model = model,
#Use which model?
ibb.inf = list(
#independent beta-binomial model which can be used only for no time trend simulation
pi.star = 0.5,
# beta prior mean
pess = 2,
# beta prior effective sample size
betabinomialmodel = ibetabinomial.post # beta-binomial model for posterior estimation
),
tlr.inf = list(
beta0_prior_mu = 0,
# Stan logistic model t prior location
beta1_prior_mu = 0,
# Stan logistic model t prior location
beta0_prior_sigma = 2.5,
# Stan logistic model t prior sigma
beta1_prior_sigma = 2.5,
# Stan logistic model t prior sigma
beta0_df = 7,
# Stan logistic model t prior degree of freedom
beta1_df = 7,
# Stan logistic model t prior degree of freedom
reg.inf = reg.inf,
# The model we want to use
variable.inf = "Fixeffect" # Use fix effect logistic model
)
),
Stop.type = "Early-Pocock",
# Use Pocock like early stopping boundary
Boundary.type = "Asymmetric",
# Use Symmetric boundary where cutoff value for efficacy boundary and futility boundary sum up to 1
Random.inf = list(
Fixratio = FALSE,
# Do not use fix ratio allocation
Fixratiocontrol = NA,
# Do not use fix ratio allocation
BARmethod = "Thall",
# Use Thall's Bayesian adaptive randomisation approach
Thall.tuning.inf = list(tuningparameter = "Unfixed", fixvalue = 1) # Specified the tunning parameter value for fixed tuning parameter
),
trend.inf = list(
trend.type = "linear",
# Linear time trend pattern
trend.effect = trend.effect,
# Stength of time trend effect
trend_add_or_multip = "mult" # Multiplicative time trend effect on response probability
)
),
cl = 2
)
```
Summary of the output data from cutoff screening example
```{r}
library(ggplot2)
# Details of grid
optimdata=optimdata_asy
# Recommend cutoff at each screening round
nextcutoff = optimdata$next.cutoff
nextcutoff$FWER=0.05
nextcutoff.predict = nextcutoff
colnames(nextcutoff.predict)=c("eff","fut","FWER")
prediction = optimdata$prediction
point.tested=optimdata$testeddata[,2:3]
tpIE=optimdata$testeddata[,1]
pow=optimdata$testeddata[,4]
point.tested=point.tested[1:sum(!is.na(tpIE)),]
tpIE=tpIE[1:sum(!is.na(tpIE))]
pow=pow[1:sum(!is.na(pow))]
cleandata=data.frame(FWER=tpIE,pow=pow,point.tested)
colnames(cleandata)[c(3,4)]=c("eff","fut")
GP.res = optimdata
xgrid.eff=optimdata$prediction$xgrid[,1]
xgrid.fut=optimdata$prediction$xgrid[,2]
grid.min=c(0.95,0)
grid.max=c(1,0.05)
library(grDevices)
library(RColorBrewer)
colormap=colorRampPalette(rev(brewer.pal(11,'Spectral')))(32)
target_line=0.1
df=data.frame(FWER=optimdata$prediction$yhat.t1E,eff=xgrid.eff,fut=xgrid.fut)
Contour.tIE<-ggplot(df,aes(eff,fut,z=FWER))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=FWER))+
geom_contour(breaks=c(target_line, seq(min(df$FWER),max(df$FWER),by=(max(df$FWER)-min(df$FWER))/10)),color="black")+
geom_contour(breaks=target_line,color="white",linewidth=1.1)+
labs(title="Mean type I error rate (FWER)", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.tIE=Contour.tIE+geom_point(data=cleandata,aes(eff,fut),color="black")+geom_point(data=nextcutoff.predict,aes(eff,fut),color="pink")
# Extract the contour data
contour_data_tIE <- ggplot_build(Contour.tIE)$data[[2]]
# Record the contour that has FWER equal to the target
contour_data_tIE_subset <- contour_data_tIE[contour_data_tIE$level == target_line, ]
# Order and split the data to ensure the plot is drawn correctly
contour_data_tIE_subset=contour_data_tIE_subset[order(contour_data_tIE_subset$piece,contour_data_tIE_subset$x),]
contour_data_tIE_subset_1=contour_data_tIE_subset[contour_data_tIE_subset$piece==1,]
contour_data_tIE_subset_2=contour_data_tIE_subset[contour_data_tIE_subset$piece==2,]
# To make sure the data frame is not empty
if (nrow(contour_data_tIE_subset_1) == 0){
contour_data_tIE_subset_1[1,]=(rep(NA,dim(contour_data_tIE_subset_1)[[2]]))
} else if (nrow(contour_data_tIE_subset_2) == 0){
contour_data_tIE_subset_2[1,]=(rep(NA,dim(contour_data_tIE_subset_2)[[2]]))
}
df=data.frame(sd=optimdata$prediction$sd.t1E,eff=xgrid.eff,fut=xgrid.fut)
Contour.sd<-ggplot(df,aes(eff,fut,z=sd))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=sd))+
geom_contour(breaks=seq(min(df$sd),max(df$sd),by=(max(df$sd)-min(df$sd))/10),color="black")+labs(title="sd of contour", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.sd=Contour.sd+
geom_path(data = contour_data_tIE_subset_1, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_path(data = contour_data_tIE_subset_2, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_point(data=cleandata,aes(eff,fut,z=NA),color="black")+geom_point(data=nextcutoff.predict,aes(eff,fut,z=NA),color="pink")
df=data.frame(Power=optimdata$prediction$yhat.pow,eff=xgrid.eff,fut=xgrid.fut)
Contour.pow<-ggplot(df,aes(eff,fut,z=Power))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=Power))+
geom_contour(breaks=seq(min(df$Power),max(df$Power),by=(max(df$Power)-min(df$Power))/10),color="black")+labs(title="Mean power", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.pow=Contour.pow+
geom_path(data = contour_data_tIE_subset_1, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_path(data = contour_data_tIE_subset_2, aes(x,y,z=NA),color="white",linewidth=1.1)+
geom_point(data=cleandata,aes(eff,fut,z=NA),color="black")+geom_point(data=nextcutoff.predict,aes(eff,fut,z=NA),color="pink")
df=data.frame(NullESS=optimdata$prediction$yhat.ESS.null,eff=xgrid.eff,fut=xgrid.fut)
Contour.nullESS<-ggplot(df,aes(eff,fut,z=NullESS))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=NullESS))+
geom_contour(breaks=seq(min(df$NullESS),max(df$NullESS),by=(max(df$NullESS)-min(df$NullESS))/10),color="black")+labs(title="Mean ESS under null", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.nullESS=Contour.nullESS+
geom_path(data = contour_data_tIE_subset_1, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_path(data = contour_data_tIE_subset_2, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_point(data=cleandata,aes(eff,fut,z=NA),color="black")+geom_point(data=nextcutoff.predict,aes(eff,fut,z=NA),color="pink")
df=data.frame(AltESS=optimdata$prediction$yhat.ESS.alt,eff=xgrid.eff,fut=xgrid.fut)
Contour.altESS<-ggplot(df,aes(eff,fut,z=AltESS))+
scale_fill_gradientn(colors = colormap)+geom_tile(aes(fill=AltESS))+
geom_contour(breaks=seq(min(df$AltESS),max(df$AltESS),by=(max(df$AltESS)-min(df$AltESS))/10),color="black")+labs(title="Mean ESS under alternative", x="Cutoff value for efficacy",y="Cutoff value for futility")
Contour.altESS=Contour.altESS+
geom_path(data = contour_data_tIE_subset_1, aes(x,y,z=NA),color="white",linewidth=1.1)+geom_path(data = contour_data_tIE_subset_2, aes(x,y,z=NA),color="white",linewidth=1.1)+
geom_point(data=cleandata,aes(eff,fut,z=NA),color="black")+geom_point(data=nextcutoff.predict,aes(eff,fut,z=NA),color="pink")
```
```{r,fig.align='center',fig.height=9,fig.width=7,warning=FALSE}
library(ggpubr)
ggarrange(Contour.tIE,Contour.pow,Contour.nullESS,Contour.altESS,Contour.sd,ncol = 2,nrow=3)
```
Red indicates higher value, and purple indicates lower value. The black solid point is the tested cutoff pairs. The white line is the contour for FWER equal to 0.1. The pink point is the next cutoff recommended where power is optimised. As we can see, the pink point control the FWER to 0.1, maximise the power, minimise the Effective sample size (ESS) under the alternative scenario. The ESS under null has a close contour direction to FWER plot which means that same FWER leads to similar ESS under the trial setting (maximum acceptable sample size is picked before simulation due to budget).
|
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/vignettes/MAMS-CutoffScreening-GP-Asymmetric-tutorial.Rmd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.