content
stringlengths
0
14.9M
filename
stringlengths
44
136
########################################### ## Index generator ########################################### index.generator <- function(tab1, tab2){ if (ncol(tab1)==0 && ncol(tab2)==0) { ind.exp <- matrix(c(1,1), nrow=1) } else if (ncol(tab1)==0) { ind.exp <- cbind(rep(1, nrow(tab2)), 1:nrow(tab2)) } else if (ncol(tab2)==0) { ind.exp <- cbind(1:nrow(tab1), rep(1, nrow(tab1))) } else { # ind.exp <- index.gen.special(tab1, tab2) } return(ind.exp) } ################################## ## Index generator special case ################################## # tab1 <- bag.post@config; tab2 <- bag.lp@config index.gen.special <- function(tab1, tab2){ ###### index generation config.var.int <- sort(intersect(colnames(tab1), colnames(tab2))) ## No intersection if (length(config.var.int)==0){ ind.exp <- expand.grid(1:nrow(tab1), 1:nrow(tab2)) return(ind.exp) } ## tab1o <- cbind(tab1, 1:nrow(tab1)) tab2o <- cbind(tab2, 1:nrow(tab2)) # do quick check to see which of tab1 or tab2 is smaller if(length(tab1) <= length(tab2)){ # if tab1 is smaller or same size unique.vec <- tab1[!duplicated(tab1[, config.var.int]), config.var.int, drop = FALSE] # this works but gives a different order?? Should be fine since we only care about number of rows } else{ unique.vec <- tab2[!duplicated(tab2[, config.var.int]), config.var.int, drop = FALSE] } config_cols_1 <- tab1o[, config.var.int, drop = FALSE] # SIMPLY RIP EACH COLUMN INTO A SEPARATE VECTOR IN A LIST ncols_cc1 <- ncol(config_cols_1) config_split_1 <- vector("list", ncols_cc1) for(j in 1:ncols_cc1){ config_split_1[[j]] <- config_cols_1[,j] } tab1o.s <- tab1o[do.call(order, config_split_1), , drop = FALSE] config_cols_2 <- tab2o[, config.var.int, drop = FALSE] ncols_cc2 <- ncol(config_cols_2) config_split_2 <- vector("list", ncols_cc2) for(j in 1:ncols_cc2){ config_split_2[[j]] <- config_cols_2[,j] } tab2o.s <- tab2o[do.call(order, config_split_2), , drop = FALSE] n.uni <- nrow(unique.vec) r.1 <- nrow(tab1o.s)/n.uni r.2 <- nrow(tab2o.s)/n.uni order.1 <- tab1o.s[, ncol(tab1o.s)] order.2 <- tab2o.s[, ncol(tab2o.s)] ind.1 <- rep(order.1, each=r.2) ind.2 <- vector("list", n.uni) for (i in 1:n.uni) { this.vec <- order.2[((i-1)*r.2+1):(i*r.2)] ind.2[[i]] <- rep(this.vec, r.1) } ind.2 <- unlist(ind.2, use.names = FALSE) ind.exp <- cbind(ind.1, ind.2) class(ind.exp) <- "numeric" ind.exp <- orderBy(~ind.2+ind.1, ind.exp) return(ind.exp) } # system.time(exp0 <- index.gen.special.0(tab1,tab2)) # system.time(exp1 <- index.gen.special(tab1,tab2)) ############################################################ ## Factor product ############################################################ # pot1 <- tree.init.p@jpt[["HDL"]]; pot2 <- tree.init.p@jpt[["Cyp2b10"]] factor.product <- function(pot1, pot2, normalize=TRUE){ if (ncol(pot1$cpt)==0) { return(pot2) } if (ncol(pot2$cpt)==0) { return(pot1) } ind <- index.generator(pot1$cpt, pot2$cpt) ind.1 <- ind[,1] ind.2 <- ind[,2] p <- pot1$prob[ind.1]*pot2$prob[ind.2] if (normalize) {p <- p/sum(p)} ## processing distribution table config.var.1 <- colnames(pot1$cpt) config.var.2 <- colnames(pot2$cpt) config.var.int <- intersect(config.var.1, config.var.2) config.rem.1 <- setdiff(config.var.1, config.var.int) config.rem.2 <- setdiff(config.var.2, config.var.int) config.after <- cbind(pot1$cpt[ind.1, config.rem.1, drop=FALSE], pot2$cpt[ind.2, config.rem.2, drop=FALSE], pot2$cpt[ind.2, config.var.int, drop=FALSE]) rownames(config.after) <- NULL result <- list(cpt=config.after, prob=p) return(result) } ############################################################ ## Factor divide ############################################################ factor.divide <- function(pot1, pot2){ if (ncol(pot1$cpt)==0) { return(pot2) } if (ncol(pot2$cpt)==0) { return(pot1) } ind <- index.generator(pot1$cpt, pot2$cpt) ind.1 <- ind[,1] ind.2 <- ind[,2] p1 <- pot1$prob[ind.1] p2 <- pot2$prob[ind.2] p <- c() vld <- which(p2!=0) p[vld] <- p1[vld]/p2[vld] p[p2==0] <- 0 ## processing distribution table config.var.1 <- colnames(pot1$cpt) config.var.2 <- colnames(pot2$cpt) config.var.int <- intersect(config.var.1, config.var.2) config.rem.1 <- setdiff(config.var.1, config.var.int) config.rem.2 <- setdiff(config.var.2, config.var.int) config.after <- cbind(pot1$cpt[ind.1, config.rem.1, drop=FALSE], pot2$cpt[ind.2, config.rem.2, drop=FALSE], pot2$cpt[ind.2, config.var.int, drop=FALSE]) rownames(config.after) <- NULL result <- list(cpt=config.after, prob=p) return(result) } ############################################################ ## Conditional dsitribution ## vars: the variables conditioned on ############################################################ conditional <- function(pot, vars) { pot2 <- marginalize.discrete(pot, vars) pot3 <- factor.divide(pot, pot2) return (pot3) } ############################################################ ## Marginalize ############################################################ # pot <- tree.init.p@jpt[["HDL"]]; vars <- c("HDL") # system.time(mg1 <- marginalize.discrete(pot, vars) ) #' @importFrom doBy summaryBy marginalize.discrete <- function(pot, vars){ # get intersection b/wn pot$cpt and vars names pot.vars <- names(pot$cpt) ###### vars <- intersect(pot.vars, vars) ###### if(length(vars)==0) { result <- list(cpt=data.frame(matrix(0,nrow=0,ncol=0)), prob=1) return(result) } #df <- data.frame(pot$cpt, prob_=pot$prob) # combine pot$cpt and pot$prob (under _prob) df <- cbind(pot$cpt, prob_ = pot$prob) fmr.str <- paste0("prob_~", paste0(rev(vars), collapse="+")) # ordering fmr <- as.formula(fmr.str) dfs <- aggregate(fmr, df, FUN = sum) dfs_vars <- dfs[,vars, drop = FALSE] result <- list(cpt=dfs_vars, prob=dfs[,(length(vars)+1)]) return(result) }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/D6_FactorOperations.R
#' Obtain marginal distributions #' #' Get the marginal distributions of multiple variables #' #' @details Get the marginal distributions of multiple variables. The function \code{Marginals} #' returns a \code{list} of marginal distributions. The marginal distribution of a discrete variable #' is a named vector of probabilities. Meanwhile, the marginal distributions of #' continous variables in a CG-BN model are mixtures of Gaussian distributions. #' To fully represent this information, the marginal of a continuous variable is represented by #' a \code{data.frame} with three columns to specify #' parameters for each Gaussian distribution in the mixture, which are #' #' \describe{ #' \item{\code{mean}}{the mean value of a Gaussian distribution.} #' \item{\code{sd}}{the standard deviation of a Gaussian distribution.} #' \item{\code{n}}{the number of Gaussian mixtures} #' } #' #' @param tree a \code{\linkS4class{ClusterTree}} object #' @param vars a \code{vector} of variables for query of marginal distributions #' #' @return #' #' \describe{ #' \item{\code{marginals}}{a \code{list} of marginal distributions} #' \item{\code{types}}{a named \code{vector} indicating the types of the variables whose #' marginals are queried: \code{TRUE} for discrete, \code{FALSE} for continuous.} #' } #' #' @author Han Yu #' #' @references Cowell, R. G. (2005). Local propagation in conditional Gaussian Bayesian networks. #' Journal of Machine Learning Research, 6(Sep), 1517-1550. \cr #' \cr #' Yu H, Moharil J, Blair RH (2020). BayesNetBP: An R Package for Probabilistic Reasoning in Bayesian #' Networks. Journal of Statistical Software, 94(3), 1-31. <doi:10.18637/jss.v094.i03>. #' #' @examples #' #' data(liver) #' tree.init.p <- Initializer(dag=liver$dag, data=liver$data, #' node.class=liver$node.class, #' propagate = TRUE) #' tree.post <- AbsorbEvidence(tree.init.p, c("Nr1i3", "chr1_42.65"), list(1,"1")) #' marg <- Marginals(tree.post, c("HDL", "Ppap2a")) #' marg$marginals$HDL #' head(marg$marginals$Ppap2a) #' #' @seealso \code{\link{PlotMarginals}} for visualization of the marginal distributions, #' \code{\link{SummaryMarginals}} for summarization of the marginal distributions of #' continuous variables. #' #' @export Marginals <- function(tree, vars) { if(!tree@propagated) { stop("The ClusterTree object must be propagated before making queries.") } if(sum(vars %in% [email protected])!=0) { var.in <- vars[vars %in% [email protected]] msg1 <- paste0(var.in, collapse=", ") stop(paste0(msg1, " is/are already observed.")) } node.class <- [email protected] marginal.types <- node.class[vars] margs <- list() for (i in 1:length(vars)) { var <- vars[i] if (node.class[[var]]) { margs[[i]] <- DiscreteMarginal(tree, var) } else { margs[[i]] <- PushMarginal(tree, var) } } names(margs) <- vars output <- list() output$marginals <- margs output$types <- marginal.types class(output) <- "marginals" return(output) }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/E1_Marginals.R
############################################################ ## Function for getting marginal of a discrete node ############################################################ DiscreteMarginal <- function(tree, var){ if (length(tree@jpt)==0) stop("Joint distribution table not found. No discrete nodes in model or the ClusterTree is not propagated.") for (i in 1:length(tree@jpt)) { j.pot <- tree@jpt[[i]] tab <- j.pot$cpt if (var %in% colnames(tab)) { pot <- marginalize.discrete(j.pot, var) v.name <- as.vector(t(pot$cpt)) result <- pot$prob names(result) <- v.name return(result) } } }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/E2_DiscreteMarginal.R
############################################################ ## Function for getting marginal of a continuous node ############################################################ # tree.push <- tree.post; var <- "Neu1" PushMarginal <- function(tree.push, var){ ###### ###### tree.push@postbag[[var]] <- tree.push@lppotential[[var]] ###### ###### this.par <- tree.push@parent[[var]] this.var <- var while(!is.na(this.par) && [email protected][[this.par]]){ tree.push@postbag[[this.par]] <- tree.push@postbag[[this.var]] ## Check if it is necessary to perform exchange operation flag <- tree.push@activeflag[[this.par]] if (length(tree.push@lppotential[[this.par]])==0){ flag <- FALSE } else { ## this check might be redundant, as it is also checked in Exchange function lp.head <- tree.push@lppotential[[this.par]][[1]]@head postbag.tail <- tree.push@postbag[[this.par]][[1]]@tail if( !lp.head %in% postbag.tail){ flag <- FALSE } } ## if (flag) { newBag <- Exchange(tree.push@postbag[[this.par]][[1]], tree.push@lppotential[[this.par]][[1]]) tree.push@postbag[[this.par]][[1]] <- newBag$postbag tree.push@lppotential[[this.par]][[1]] <- newBag$lppotential } tree.push@postbag[[this.var]] <- list() this.var <- this.par this.par <- tree.push@parent[[this.var]] } ###### ###### if (is.na(this.par)) { # pure continuous scenario marg <- data.frame(prob=1, mu=tree.push@postbag[[this.var]][[1]]@const[1], sd=sqrt( tree.push@postbag[[this.var]][[1]]@variance[1] )) rownames(marg) <- NULL } else { this.pot <- tree.push@postbag[[this.var]][[1]] disc.pars <- colnames(this.pot@config) par.marg <- marginalize.discrete(tree.push@jpt[[this.par]], disc.pars) ind <- index.generator(this.pot@config, par.marg$cpt) ind.1 <- ind[,1] ind.2 <- ind[,2] marg <- data.frame(prob=par.marg$prob[ind.2], mu=this.pot@const[ind.1], sd=sqrt( this.pot@variance[ind.1] )) rownames(marg) <- NULL } return(marg) }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/E3_PushMarginal.R
#' Queries of discrete variable distributions #' #' Obtain the joint, marginal, and conditional distributions of discrete variables #' #' @details Query the joint distribution of any combination of discrete variables when #' mode is "joint", or conditional distribution of a discrete variable. The mode "list" #' return a \code{list} of variable combinations, such that joint distributions of any subset #' of them are ready for extraction. Queries outside this list are also supported but may #' take longer computing time. This function will also return marginal distribution if only #' one variable is queried. #' #' @param tree a \code{\linkS4class{ClusterTree}} object #' @param vars the variables to be queried #' @param mode type of desired distribution #' @return \code{data.frame} object specifying a joint or conditional distribution. #' #' @author Han Yu #' #' @references Cowell, R. G. (2005). Local propagation in conditional Gaussian Bayesian networks. #' Journal of Machine Learning Research, 6(Sep), 1517-1550. \cr #' \cr #' Yu H, Moharil J, Blair RH (2020). BayesNetBP: An R Package for Probabilistic Reasoning in Bayesian #' Networks. Journal of Statistical Software, 94(3), 1-31. <doi:10.18637/jss.v094.i03>. #' #' @importFrom igraph neighbors all_simple_paths induced_subgraph #' #' @examples #' #' data(chest) #' dag <- chest$dag #' node.class <- rep(TRUE, length(dag@nodes)) #' names(node.class) <- dag@nodes #' tree.init.p <- Initializer(dag=dag, data=chest$data, #' node.class=node.class, #' propagate=TRUE) #' # joint distribution #' FactorQuery(tree=tree.init.p, vars=c("tub", "xray", "dysp", "asia"), mode="joint") #' #' # conditional distribution #' FactorQuery(tree=tree.init.p, vars=c("xray"), mode="conditional") #' #' @export FactorQuery <- function(tree, vars=c(), mode=c("joint", "conditional", "list")) { if (length(tree@jpt)==0) stop("Joint distribution table not found. No discrete nodes in model or the ClusterTree is not propagated.") if(sum(vars %in% [email protected])!=0) { var.in <- vars[vars %in% [email protected]] msg1 <- paste0(var.in, collapse=", ") stop(paste0(msg1, " is/are already observed.")) } if (mode=="list") { result <- list() j <- 1 for (i in 1:length(tree@jpt)) { if(all(vars %in% colnames(tree@jpt[[i]]$cpt))) { result[[j]] <- colnames(tree@jpt[[i]]$cpt) j <- j+1 } } return(result) } dag.graph <- igraph.from.graphNEL(tree@graph$dag) if (mode=="conditional") { if (length(vars)!=1) { stop("If mode is conditional, vars should be a single variable.") } parents <- names(neighbors(dag.graph, vars, mode="in")) parents <- setdiff(parents, [email protected]) allvar <- c(vars, parents) for (i in 1:length(tree@jpt)){ if(all(allvar %in% colnames(tree@jpt[[i]]$cpt))) { pot <- tree@jpt[[i]] result <- marginalize.discrete(pot, allvar) result <- conditional(result, parents) output <- data.frame(result$cpt, prob=result$prob) break } } rownames(output) <- NULL return(output) } if (mode=="joint") { for (i in 1:length(tree@jpt)) { if(all(vars %in% colnames(tree@jpt[[i]]$cpt))) { pot <- tree@jpt[[i]] result <- marginalize.discrete(pot, vars) output <- data.frame(result$cpt, prob=result$prob) rownames(output) <- NULL return(output) } } result <- query.ooc(tree, vars) output <- data.frame(result$cpt, prob=result$prob) rownames(output) <- NULL return(output) } }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/E4_FactorQuery.R
############################################################ ## Function for getting joints for factors across clusters ############################################################ query.ooc <- function(tree, vars){ tree.graph <- igraph.from.graphNEL(tree@graph$tree) cs.sets <- list() discrete.clusters <- names([email protected])[[email protected]] cs.sets <- tree@member[discrete.clusters] vars.temp <- vars cs <- c() while(length(vars.temp)>0) { maxl <- 0 inter.temp <- c() temp <- character(0) for (i in 1:length(cs.sets)) { inter <- intersect(vars.temp, cs.sets[[i]]) if (length(inter)>=maxl) { maxl <- length(inter) temp <- discrete.clusters[i] inter.temp <- inter } } cs <- c(cs, temp) vars.temp <- setdiff(vars.temp, inter.temp) } sub.memb <- c() for (i in 2:length(cs)) { path <- all_simple_paths(tree.graph, cs[1], cs[i], mode="all")[[1]] sub.memb <- union(sub.memb, names(path)) } sub.graph <- induced_subgraph(tree.graph, sub.memb) # x11(); plot(sub.graph) node <- sub.memb[1] ######################### ######################### ooc <- list(jpt=tree@jpt, sub.graph=sub.graph, active=c(), nom=tree@jpt[[node]], denom=list(cpt=matrix(0,nrow=0,ncol=0), prob=1), cs.sets=cs.sets) obj <- Distribute.OOC(ooc, node) temp.pot <- factor.divide(obj$nom, obj$denom) jnt <- marginalize.discrete(temp.pot, vars) return(jnt) } ## object.ooc <- ooc Distribute.OOC <- function(object.ooc, node) { ngbs <- neighbors(object.ooc$sub.graph, node, mode = "all")$name inactive <- setdiff(ngbs, object.ooc$active) object.ooc$active <- c(object.ooc$active, node) jpt <- object.ooc$jpt[[node]] # cat(cluster@members, "\n") if (length(inactive)>0) { for (i in 1:length(inactive)) { # cat(node, "->", inactive[i], "\n") this.jpt <- object.ooc$jpt[[inactive[i]]] object.ooc$nom <- factor.product(object.ooc$nom, this.jpt, normalize=FALSE) # cat(names(object.ooc$nom$cpt), "\n") separator <- intersect(object.ooc$cs.sets[[node]], object.ooc$cs.sets[[inactive[i]]]) # cat(separator, "\n") margin <- marginalize.discrete(jpt, separator) object.ooc$denom <- factor.product(object.ooc$denom, margin, normalize=FALSE) # object.ooc$joint <- factor.divide(this.cluster@joint, margin) object.ooc <- Distribute.OOC(object.ooc, inactive[i]) } } return(object.ooc) }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/E5_CrossClusterQuery.R
#' Possible values of a discrete variable #' #' Obtain all the possible values of a discrete variable. #' #' @param tree a \code{\linkS4class{ClusterTree}} object #' @param var the variables to be queried #' @param message type of desired distribution #' @return a \code{vector} of the possible values of discrete variable. If the variable is continuous, #' the returned value will be \code{NULL}. #' #' @examples #' data(toytree) #' GetValue(toytree, "HDL") #' #' @author Han Yu #' #' @export GetValue <- function(tree, var, message=TRUE) { if (!var %in% tree@node) { if (message) { cat("This node is not found. \n") } return(NULL) } if ([email protected][var]) { if (message) { cat("The node is continuous. \n") } return(NULL) } if (var %in% [email protected]) { if (message) { cat("The node is absorbed with value", [email protected][[var]], "\n") } return(NULL) } if (var %in% tree@cluster) { tab <- tree@cpt[[var]]$cpt clname <- colnames(tab) var.ind <- which(clname==var) return(as.character(unique(tab[,var.ind]))) } for (i in 1:length(tree@cpt)) { tab <- tree@cpt[[i]]$cpt clname <- colnames(tab) if (var %in% clname) { var.ind <- which(clname==var) return(as.character(unique(tab[,var.ind]))) } } }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/E6_GetValues.R
#' Sampling from the Bayesian network #' #' Sampling from the joint distribution of all applicable nodes in the Bayesian network. #' #' @param tree a \code{\linkS4class{ClusterTree}} object #' @param n a \code{integer} number of observations to generate #' @return a \code{dataframe} of generated data #' #' @author Han Yu #' #' @references Cowell, R. G. (2005). Local propagation in conditional Gaussian Bayesian networks. #' Journal of Machine Learning Research, 6(Sep), 1517-1550. \cr #' \cr #' Yu H, Moharil J, Blair RH (2020). BayesNetBP: An R Package for Probabilistic Reasoning in Bayesian #' Networks. Journal of Statistical Software, 94(3), 1-31. <doi:10.18637/jss.v094.i03>. #' #' @import doBy #' @importFrom graph nodes #' @importFrom igraph neighbors #' @importFrom methods new #' @examples #' #' data(toytree) #' Sampler(tree = toytree, n = 10) #' #' @export Sampler <- function(tree, n) { # tree <- tree.post.2; n <- 100 abd <- [email protected] discrete.nodes <- names([email protected])[[email protected]] continuous.nodes <- names([email protected])[[email protected]] disc.v <- setdiff(discrete.nodes, abd) cont.v <- setdiff(continuous.nodes, abd) ## Special case, all discrete nodes observed if (length(disc.v) == 0) { cont.g <- data.frame() for (i in 1:n){ vec.g <- continuous.single.sampler.special(tree, cont.v) cont.g <- rbind(cont.g, vec.g) } colnames(cont.g) <- cont.v rownames(cont.g) <- NULL return(cont.g) } ########################################### disc.jd <- FactorQuery(tree, vars = disc.v, mode = "joint") cnts <- rmultinom(n = 1, size = n, prob = disc.jd$prob) config.tab <- disc.jd[, 1:(ncol(disc.jd)-1)] config.tab <- data.frame(lapply(config.tab, as.character), stringsAsFactors=FALSE) cont.g <- data.frame() for (i in 1:nrow(config.tab)) { if (cnts[i] == 0) { next } # i <- 1 this.config <- unlist(config.tab[i, , drop = TRUE]) ## generate continuous variables for (j in 1:cnts[i]) { vec.g <- continuous.single.sampler(tree, cont.v, this.config) cont.g <- rbind(cont.g, vec.g) } } colnames(cont.g) <- cont.v disc.g <- config.tab[rep(1:nrow(config.tab), cnts), ] colnames(disc.g) <- colnames(config.tab) generated <- cbind(disc.g, cont.g) rownames(generated) <- NULL return(generated) } ###### compatible <- function(config.1, config.2) { var.1 <- names(config.1) var.2 <- names(config.2) var.b <- intersect(var.1, var.2) # no_intersection <- TRUE # tryCatch( # { # capture.output(graphNEL(nodes = c(var.1, var.2))) # throws error if duplicate nodes # }, # error = function(e){ # no_intersection <- FALSE # }, warning = function(e){ # no_intersection <- FALSE # }) # # if(no_intersection){ # return(TRUE) # } if (length(var.b)==0) { return(TRUE) } config.sub.1 <- config.1[var.b] config.sub.2 <- config.2[var.b] return(identical(config.sub.1, config.sub.2)) } ###################################### # new version continuous.single.sampler <- function(tree, cont.v, this.config) { x.cont <- rep(NA, length(cont.v)) names(x.cont) <- cont.v x.gen <- c() ## "protime", "ast", "alk", "trig", "copper", "chol", "albumin", "bili" for (nd in rev(cont.v)) { this.pot <- tree@lppotential[[nd]][[1]] if(ncol(this.pot@config) == 0) { selectedConfig <- 1 } else { same_named_values <- this.config[intersect(colnames(this.pot@config), names(this.config))] # new implementation selectedConfig <- which(apply(this.pot@config, 1, function(x) identical(x, same_named_values))) # print(c("CONFIG:", selectedConfig)) # compat <- apply(this.pot@config, 1, compatible, config.2 = this.config) # selectedConfig <- which(compat) } if(length(selectedConfig) > 1){ warning("More than one configuration selected!") } if(ncol(this.pot@beta) == 0) { mu <- this.pot@const[selectedConfig] } else { this.beta <- this.pot@beta beta.var <- colnames(this.beta) var.g <- intersect(x.gen, beta.var) betas <- this.beta[selectedConfig, var.g] mu <- this.pot@const[selectedConfig] + sum(betas * x.cont[var.g]) } sd <- sqrt(this.pot@variance[selectedConfig]) x.cont[nd] <- rnorm(1, mean = mu, sd = sd) x.gen <- c(x.gen, nd) } return(x.cont) } ###################################### ## Sampler for no discrete variables ###################################### continuous.single.sampler.special <- function(tree, cont.v) { x.cont <- rep(NA, length(cont.v)) names(x.cont) <- cont.v x.gen <- c() for (nd in rev(cont.v)) { this.pot <- tree@lppotential[[nd]][[1]] if(ncol(this.pot@beta) == 0) { mu <- this.pot@const[1] } else { this.beta <- this.pot@beta beta.var <- colnames(this.beta) var.g <- intersect(x.gen, beta.var) betas <- this.beta[1, var.g] mu <- this.pot@const[1] + sum(betas * x.cont[var.g]) } sd <- sqrt(this.pot@variance[1]) x.cont[nd] <- rnorm(1, mean = mu, sd = sd) x.gen <- c(x.gen, nd) } return(x.cont) }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/E7_Sampler.R
#' Compute signed and symmetric Kullback-Leibler divergence #' #' Compute signed and symmetric Kullback-Leibler divergence of variables over a spectrum of evidence #' #' @details Compute signed and symmetric Kullback-Leibler divergence of variables over a spectrum of evidence. #' The signed and symmetric Kullback-Leibler divergence is also known as Jeffery's signed information (JSI) for #' continuous variables. #' #' @param tree a \code{\linkS4class{ClusterTree}} object #' @param var0 the variable to have evidence absrobed #' @param vars the variables to have divergence computed #' @param seq a \code{vector} of numeric values as the evidences #' @param pbar \code{logical(1)} whether to show progress bar #' @param method method for divergence computation: #' \code{gaussian} for Gaussian approximation, \code{} for Monte Carlo integration #' @param epsilon \code{numeric(1)} the KL divergence is undefined if certain states of a discrete variable #' have probabilities of 0. In this case, a small positive number epsilon is assigned as their probabilities for #' calculating the divergence. The probabilities of other states are shrunked proportionally to ensure they sum up to 1. #' @return a \code{data.frame} of the divergence #' #' @author Han Yu #' #' @references Cowell, R. G. (2005). Local propagation in conditional Gaussian Bayesian networks. #' Journal of Machine Learning Research, 6(Sep), 1517-1550. \cr #' \cr #' Yu H, Moharil J, Blair RH (2020). BayesNetBP: An R Package for Probabilistic Reasoning in Bayesian #' Networks. Journal of Statistical Software, 94(3), 1-31. <doi:10.18637/jss.v094.i03>. #' #' @examples #' \dontrun{ #' data(liver) #' tree.init.p <- Initializer(dag=liver$dag, data=liver$data, #' node.class=liver$node.class, #' propagate = TRUE) #' klds <- ComputeKLDs(tree=tree.init.p, var0="Nr1i3", #' vars=setdiff(tree.init.p@node, "Nr1i3"), #' seq=seq(-3,3,0.5)) #' head(klds) #' } #' @export ComputeKLDs <- function(tree, var0, vars, seq, pbar=TRUE, method = "gaussian", epsilon = 10^-6) { # cat(method, "\n") node.class <- [email protected] tree.graph <- tree@graph$tree x.seq <- seq posteriors.1 <- Marginals(tree, vars) n.v <- length(vars) klds <- matrix(NA, nrow=length(x.seq), ncol=n.v) sys <- Sys.info()[1] if(pbar){ if(sys=="Windows") { pb <- winProgressBar(title = "Computing divergence", min = 0, max = length(x.seq), width = 300) } else { pb <- txtProgressBar(title = "Computing divergence", min = 0, max = length(x.seq), style = 3) } } for (i in 1:length(x.seq)) { object.2 <- AbsorbEvidence(tree, var0, list(x.seq[i])) posteriors.2 <- Marginals(object.2, vars) for(j in 1:n.v){ klds[i,j] <- SymmetricKLD(posteriors.1$marginals[[j]], posteriors.2$marginals[[j]], discrete = node.class[vars[j]], method = method, epsilon = epsilon) ###### } if(pbar){ if(sys=="Windows") { setWinProgressBar(pb, i, title=paste("Computing divergence:", round(i/length(x.seq)*100, 0), "% complete")) } else { setTxtProgressBar(pb, i, title=paste("Computing divergence:", round(i/length(x.seq)*100, 0), "% complete")) } } } if(pbar){close(pb)} df <- data.frame(x.seq, klds) colnames(df) <- c("x", vars) return(df) }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/F1_ComputeKLDs.R
#' Summary a continuous marginal distribution #' #' This function summary the marginal distributions of continuous variables by outputing the #' mean, standard deviation, and number of subpopulations #' #' @param marginals the marginal distributions obtained from \code{\link{Marginals}} function #' #' @return a \code{data.frame} object containing information about the marginal distributions for continuous variables. #' The marginal distributions of continous variables in a CG-BN model are mixtures of Gaussian distributions. #' Therefore, besides the mean and standard deviation, the object has an additional column to specify the number of Gaussian #' mixtures. #' #' \describe{ #' \item{\code{mean}}{the mean value of a Gaussian distribution.} #' \item{\code{sd}}{the standard deviation of a Gaussian distribution.} #' \item{\code{n}}{the number of Gaussian distributions in the mixture.} #' } #' #' @references Cowell, R. G. (2005). Local propagation in conditional Gaussian Bayesian networks. #' Journal of Machine Learning Research, 6(Sep), 1517-1550. \cr #' \cr #' Yu H, Moharil J, Blair RH (2020). BayesNetBP: An R Package for Probabilistic Reasoning in Bayesian #' Networks. Journal of Statistical Software, 94(3), 1-31. <doi:10.18637/jss.v094.i03>. #' #' @examples #' #' data(liver) #' tree.init.p <- Initializer(dag=liver$dag, data=liver$data, #' node.class=liver$node.class, #' propagate = TRUE) #' marg <- Marginals(tree.init.p, c("HDL", "Ppap2a", "Neu1")) #' SummaryMarginals(marginals=marg) #' #' @seealso \code{\link{Marginals}} #' #' @export SummaryMarginals <- function(marginals) { margs <- marginals$marginals nms0 <- names(margs) types <- marginals$types nms <- mu <- sd <- n <-c() for (i in 1:length(types)) { if(!types[i]) { msd <- MeanSD(margs[[i]]) nms <- c(nms, nms0[i]) mu <- c(mu, msd[1]) sd <- c(sd, msd[2]) n <- c(n, nrow(margs[[i]])) } } df <- data.frame(Mean=mu, SD=sd, n=n) rownames(df) <- nms return(df) }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/F2_SummaryMarginal.R
########################################### ## Means and SDs of Mixture Gaussians ########################################### MeanSD <- function(df) { Mean <- sum(df$prob*df$mu) SD <- sqrt( sum(df$prob*df$sd^2) + sum(df$prob*df$mu^2) - (sum(df$prob*df$mu))^2 ) return(c(Mean=Mean, SD=SD)) } ########################################### ## Divergence calculation ########################################### SymmetricKLD <- function(post1, post2, discrete, method = "gaussian", epsilon = 10^-6) { if (!discrete) { if (method == "mc"){ return(SymKLD.continuous.mc(post1, post2)) } else { return(SymKLD.continuous.gaussian(post1, post2)) } } else { return(SymKLD.discrete(post1, post2, epsilon = epsilon)) } } ########################################### ## Divergence for continuous node ########################################### density.generator <- function(g,x) { return(g[1]*dnorm(x, mean=g[2], sd=g[3] )) } # calclulate KL divergence by Monte Carlo integration SymKLD.continuous.mc <- function(post.1, post.2) { n.sub.1 <- nrow(post.1) n.sub.2 <- nrow(post.2) ###### if (n.sub.1 == 1 && n.sub.2 == 1) { mu1 <- post.1$mu[1] mu2 <- post.2$mu[1] sigma1 <- post.1$sd[1] sigma2 <- post.2$sd[1] kld.1 <- log(sigma2/sigma1) + (sigma1^2 + (mu1 - mu2)^2)/(2*sigma2^2) - 0.5 kld.2 <- log(sigma1/sigma2) + (sigma2^2 + (mu2 - mu1)^2)/(2*sigma1^2) - 0.5 kld <- sign(mu2-mu1) * (kld.1 + kld.2)/2 return(kld) } ###### cnt <- rmultinom(1, 10000, prob = post.1$prob) x <- c() for (i in 1:n.sub.1) { this.x <- rnorm(cnt[i], post.1$mu[i], post.1$sd[i]) x <- c(x, this.x) } f1 <- apply(post.1, 1, density.generator, x) f2 <- apply(post.2, 1, density.generator, x) y1 <- rowSums(f1) y2 <- rowSums(f2) kld.1 <- mean(log(y1/y2)) ######### cnt <- rmultinom(1, 10000, prob = post.2$prob) x <- c() for (i in 1:n.sub.2) { this.x <- rnorm(cnt[i], post.2$mu[i], post.2$sd[i]) x <- c(x, this.x) } f1 <- apply(post.1, 1, density.generator, x) f2 <- apply(post.2, 1, density.generator, x) y1 <- rowSums(f1) y2 <- rowSums(f2) kld.2 <- mean(log(y2/y1)) mu1 <- sum(post.1$prob * post.1$mu) mu2 <- sum(post.2$prob * post.2$mu) kld <- sign(mu2-mu1) * (kld.1 + kld.2)/2 return(kld) } # calclulate KL divergence by Gaussian approximation SymKLD.continuous.gaussian <- function(post.1, post.2) { meansd.1 <- MeanSD(post.1) meansd.2 <- MeanSD(post.2) names(meansd.1) <- names(meansd.2) <- NULL mu1 <- meansd.1[1] mu2 <- meansd.2[1] sigma1 <- meansd.1[2] sigma2 <- meansd.2[2] kld.1 <- log(sigma2/sigma1) + (sigma1^2 + (mu1 - mu2)^2)/(2*sigma2^2) - 0.5 kld.2 <- log(sigma1/sigma2) + (sigma2^2 + (mu2 - mu1)^2)/(2*sigma1^2) - 0.5 kld <- sign(mu2-mu1) * (kld.1 + kld.2)/2 return(kld) } ############## SymKLD.continuous <- function(post1, post2) { step <- 0.01 x <- seq(-20,20,by=step) f1 <- apply(post1, 1, density.generator, x) y1 <- rowSums(f1) f2 <- apply(post2, 1, density.generator, x) y2 <- rowSums(f2) m1 <- sum(step*x*y1) m2 <- sum(step*x*y2) kld1 <- sum(step*y1*log(y1/y2)) kld2 <- sum(step*y2*log(y2/y1)) return(0.5*(kld1+kld2)*sign(m2-m1)) } ########################################### ## Divergence for discrete node ########################################### SymKLD.discrete <- function(p1, p2, epsilon = 10^-6) { ind_01 <- which(p1 == 0) ind_11 <- which(p1 != 0) ind_02 <- which(p2 == 0) ind_12 <- which(p2 != 0) n1 <- length(ind_01) n2 <- length(ind_02) if(n1 > 0) { p1[ind_01] <- epsilon compensate <- epsilon*n1 p1[ind_11] <- p1[ind_11] - compensate*p1[ind_11] } if(n2 > 0) { p2[ind_02] <- epsilon compensate <- epsilon*n2 p2[ind_12] <- p2[ind_12] - compensate*p2[ind_12] } kld1 <- sum(p1*log(p1/p2)) kld2 <- sum(p2*log(p2/p1)) return(0.5*(kld1+kld2)) }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/F3_Summary_helpers.R
#' Plot the Bayesian network #' #' Plot and compare two Bayesian networks with different evidence(s) absorbed and propagated. #' #' @details Network visualization of the node-specific differences between Bayesian Networks #' with the same topology, but evidence that has been absorbed and propagated. The change of #' marginal distribution of each node is measured by signed and symmetric Kullback-Leibler #' divergence. The sign indicates the direction of change, with \code{tree.1} considered as the baseline. #' The magnitude of the change is reflected by the value. Nodes that are white are d-separated #' from the evidence. This function requires \code{Rgraphviz} package. #' #' @param tree.1 a \code{\linkS4class{ClusterTree}} #' @param tree.2 a \code{\linkS4class{ClusterTree}} #' @param fontsize font size for the node labels #' @param pbar \code{logical(1)} whether to show progress bar #' @param plotting \code{logical(1)} whether to output plot #' @param epsilon \code{numeric(1)} the KL divergence is undefined if certain states of a discrete variable #' have probabilities of 0. In this case, a small positive number epsilon is assigned as their probabilities for calculating #' the divergence. The probabilities of other states are shrunked proportionally to ensure they sum up to 1. #' @return a plot of Bayesian network #' @return a \code{vector} of signed symmetric Kullback-Leibler divergence #' #' @import RColorBrewer #' #' @author Han Yu #' #' @references Cowell, R. G. (2005). Local propagation in conditional Gaussian Bayesian networks. #' Journal of Machine Learning Research, 6(Sep), 1517-1550. \cr #' \cr #' Yu H, Moharil J, Blair RH (2020). BayesNetBP: An R Package for Probabilistic Reasoning in Bayesian #' Networks. Journal of Statistical Software, 94(3), 1-31. <doi:10.18637/jss.v094.i03>. #' #' @importFrom fields image.plot #' @importFrom grDevices colorRampPalette #' @importFrom graphics par #' #' @examples #' \dontrun{ #' library("Rgraphviz") #' data(toytree) #' tree.post <- AbsorbEvidence(toytree, c("Nr1i3"), list(1)) #' PlotCGBN(tree.1=toytree, tree.2=tree.post) #' } #' @export PlotCGBN <- function(tree.1, tree.2, fontsize=NULL, pbar=FALSE, plotting=TRUE, epsilon = 10^-6) { # tree.1 <- tree.init.p; tree.2 <- tree.post; fontsize=NULL; pbar=TRUE; plotting=TRUE; dag <- tree.1@graph$dag node.class <- [email protected] absorbed.1 <- [email protected] absorbed.2 <- [email protected] absorbed <- union(absorbed.1, absorbed.2) var.inter <- intersect(absorbed.1, absorbed.2) var.1 <- setdiff(absorbed.1, var.inter) var.2 <- setdiff(absorbed.2, var.inter) vars <- absorbed node.names <- tree.1@node active.vars <- setdiff(node.names, vars) disc.all <- names(node.class)[node.class] cont.all <- names(node.class)[!node.class] disc.active <- intersect(disc.all, active.vars) cont.active <- intersect(cont.all, active.vars) all.active <- c(disc.active, cont.active) klds.0 <- c() sys <- Sys.info()[1] if(pbar){ if(sys=="Windows") { pb <- winProgressBar(title = "Computing posterior", min = 0, max = length(all.active), width = 300) } else { pb <- txtProgressBar(min = 0, max = length(all.active), style = 3) } } for (i in 1:length(all.active)){ if(pbar){ if(sys=="Windows") { setWinProgressBar(pb, i, title=paste("Computing posterior for ", all.active[i], ": ", round((i-1)/length(all.active)*100, 0), "% complete")) } else { setTxtProgressBar(pb, i) } } post.1 <- Marginals(tree.1, all.active[i]) post.2 <- Marginals(tree.2, all.active[i]) klds.0[i] <- SymmetricKLD(post.1[[1]][[1]], post.2[[1]][[1]], discrete = node.class[all.active[i]], epsilon = epsilon) ## } names(klds.0) <- all.active klds <- abs(klds.0) if(pbar){close(pb)} if (!plotting){ return(klds.0) } nAttrs <- list() node.shape <- c(rep("circle", (length(cont.all)) ), rep("box",(length(disc.all)) )) names(node.shape) <- c(cont.all, disc.all) nAttrs$shape <- node.shape if(!is.null(fontsize)){ nAttrs$fontsize <- rep(fontsize, length(node.names)) names(nAttrs$fontsize) <- node.names } max.kl <- max(klds) min.kl <- -max(klds) if (max.kl==0) { fill.post <- rep("white", length(klds.0)) } else { pseudo <- c(klds.0, -klds.0)/max.kl # cl.kl <- (klds-min.kl)/(max.kl-min.kl) pseudo <- sign(pseudo)*abs(pseudo)^0.5 rbPal <- colorRampPalette(c('dodgerblue','white','red')) fill.post <- rbPal(100)[as.numeric(cut(pseudo,breaks = 100))] fill.post <- fill.post[1:length(klds.0)] } names(fill.post) <- all.active # fill.evn <- rep("green", length(vars)) # names(fill.evn) <- vars fill.1 <- rep("khaki", length(vars)) names(fill.1) <- var.1 fill.2 <- rep("orange", length(vars)) names(fill.2) <- var.2 fill.inter <- rep("gray", length(vars)) names(fill.inter) <- var.inter nAttrs$fillcolor <- c(fill.1, fill.2, fill.inter, fill.post) if(length(cont.active)>0){ par(oma=c(0,0,0,4)) # Rgraphviz::plot(dag, nodeAttrs=nAttrs, main="") graph_plot <- Rgraphviz::layoutGraph(dag, nodeAttrs=nAttrs) Rgraphviz::renderGraph(graph_plot) par(oma=c( 0,0,0,0.5)) color.bar(colorRampPalette(c("dodgerblue", "white", "red"))(1000), -signif(max.kl,1), sym=TRUE) } else { par(oma=c(0,0,0,4)) # Rgraphviz::plot(dag, nodeAttrs=nAttrs, main="") graph_plot <- Rgraphviz::layoutGraph(dag, nodeAttrs=nAttrs) Rgraphviz::renderGraph(graph_plot) par(oma=c( 0,0,0,0.5)) color.bar(colorRampPalette(c("white", "red"))(1000), min=0, max=signif(max.kl,1), sym=FALSE) } return(klds.0) } ####### function for plotting colorbar color.bar <- function(lut, min, max=-min, nticks=11, ticks=seq(min, max, len=nticks), title='', sym=TRUE) { scale = (length(lut)-1)/(max-min) n.half <- length(lut)/2 cl <- c() for (i in 1:(length(lut)-1)) { if (sym) { j <- n.half+1+sign(i-n.half)*(abs(i-n.half)/n.half)^0.5*n.half } else { j <- 1+(i/length(lut))^0.5*length(lut) } cl[i] <- lut[j] } image.plot(legend.only=TRUE, col=cl, zlim=c(min(ticks, na.rm=T), max(ticks, na.rm=T)), add=TRUE, horizontal=FALSE, legend.shrink=0.3, legend.cex=0.7, legend.lab=title) }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/G1_PlotCGBN.R
#' Plot the marginal distributions #' #' Plot the marginal distributions. #' #' @details Plot the marginal distributions. Marginals of discrete variables are plotted as #' bar plots, while those of continuous variables as density plots. #' #' @param marginals the marginal distributions returned by \code{Marginals} for plotting #' @param groups names of the marginals to be shown on plots #' #' @author Han Yu #' #' @references Cowell, R. G. (2005). Local propagation in conditional Gaussian Bayesian networks. #' Journal of Machine Learning Research, 6(Sep), 1517-1550. #' #' @importFrom graphics lines legend barplot plot.default #' @examples #' data(toytree) #' marg <- Marginals(toytree, c("Neu1", "Nr1i3", "chr1_42.65", "Spgl1")) #' PlotMarginals(marginals=marg, groups=NULL) #' #' @seealso \code{\link{Marginals}} #' #' @export PlotMarginals <- function(marginals, groups=NULL) { nms <- names(marginals$marginals) discrete.nodes <- nms[marginals$types] continuous.nodes <- nms[!marginals$types] posteriors <- marginals$marginals group.disc <- NULL group.cont <- NULL if(!is.null(groups)) { if(length(groups)!=length(posteriors)) { warning("Group and marginal lengths do not match.") groups <- NULL } else { group.disc <- groups[which(marginals$types)] group.cont <- groups[which(!marginals$types)] } } if(length(discrete.nodes)==0){ PlotPosteriorContinuous(posteriors, groups=group.cont) } if(length(continuous.nodes)==0){ par(mfrow=c(1,length(discrete.nodes))) for (i in 1:length(discrete.nodes)) { this.node <- discrete.nodes[i] PlotPosteriorDiscrete(posteriors[i], group=group.disc[i]) } par(mfrow=c(1,1)) } if(length(discrete.nodes)!=0 & length(continuous.nodes)!=0){ par(mfrow=c(1,length(discrete.nodes)+1)) PlotPosteriorContinuous(posteriors[continuous.nodes], groups=group.cont) for (i in 1:length(discrete.nodes)) { this.node <- discrete.nodes[i] PlotPosteriorDiscrete(posteriors[this.node], group=group.disc[i]) } par(mfrow=c(1,1)) } }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/G2_PlotMarginals.R
PlotPosteriorContinuous <- function(posteriors, main="", groups=NULL, col=NULL) { n <- length(posteriors) xys <- PlotPosteriorXY(posteriors) if(is.null(groups)) { groups <- names(posteriors) } if(length(groups)!=n){ groups <- names(posteriors) warning("Number of groups does not match number of marginals.") } x <- c() y <- c() for(i in 1:n){ x <- c(x, xys[[i]][[1]]) y <- c(y, xys[[i]][[2]]) } x.up <- max(x) x.low <- min(x) y.up <- max(y) y.low <- min(y) if(is.null(col)) { colors <- 1:n } else { colors <- col } # series <- names(posteriors) graphics::plot.default(xys[[1]][[1]], xys[[1]][[2]], xlim=c(x.low, x.up), ylim=c(y.low, y.up), type="l", main=main, xlab="", ylab="Density", col=colors) if(n>1) { for(i in 2:n) { lines(xys[[i]][[1]], xys[[i]][[2]], col=colors[i]) } } legend("topright", legend=groups, fill=colors, bty="n") } ########################################### ## Genereate data for plotting ########################################### PlotPosteriorXY <- function(posteriors) { n <- length(posteriors) result <- vector("list", n) for (j in 1:n){ msd <- lapply(posteriors, MeanSD)[[j]] lower <- msd[1]-5*msd[2] upper <- msd[1]+5*msd[2] x <- seq(lower, upper, by=0.001) y <- rep(0, length(x)) post.df <- posteriors[[j]] for (i in 1:nrow(post.df)){ y <- y + post.df[i,1]*dnorm(x, mean=post.df[i,2], sd=sqrt(post.df[i,3])) } this.xy <- list(x,y) result[[j]] <- this.xy } names(result) <- names(posteriors) return(result) } ########################################### ## Means and SDs of Mixture Gaussians ########################################### MeanSD <- function(df) { Mean <- sum(df$prob*df$mu) SD <- sqrt( sum(df$prob*df$sd^2) + sum(df$prob*df$mu^2) - (sum(df$prob*df$mu))^2 ) return(c(Mean=Mean, SD=SD)) }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/G3_PlotContinuous.R
PlotPosteriorDiscrete <- function(posteriors, group=NULL) { if(is.null(group)){ nms <- names(posteriors) } else { nms <- group } ncl <- length(posteriors[[1]]) if (ncl<3) ncl <- 3 barplot(t(t(posteriors[[1]])), width=0.5, space=0.2, main=nms, col=brewer.pal(ncl, "Blues"), xlab=names(posteriors), beside=TRUE, names.arg=names(posteriors[[1]])) }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/G4_PlotDiscrete.R
#' Plot the cluster tree #' #' Plot the structure of a \code{\linkS4class{ClusterTree}} object #' #' @details Plot the structure of \code{clustertree} object, with the nodes labeled by corresponding #' elimination node. The circles represent continuous clusters, while the boxes represent discrete clusters. #' This function requires \code{Rgraphviz} package. #' @param tree a \code{\linkS4class{ClusterTree}} object #' @param color nodes color #' #' @author Han Yu #' #' @references Cowell, R. G. (2005). Local propagation in conditional Gaussian Bayesian networks. #' Journal of Machine Learning Research, 6(Sep), 1517-1550. \cr #' \cr #' Yu H, Moharil J, Blair RH (2020). BayesNetBP: An R Package for Probabilistic Reasoning in Bayesian #' Networks. Journal of Statistical Software, 94(3), 1-31. <doi:10.18637/jss.v094.i03>. #' #' @examples #' #' \dontrun{ #' library("Rgraphviz") #' data(toytree) #' PlotTree(toytree) #' } #' #' @references Cowell, R. G. (2005). Local propagation in conditional Gaussian Bayesian networks. #' Journal of Machine Learning Research, 6(Sep), 1517-1550. #' #' @export PlotTree <- function(tree, color="gray90") { tree.graph <- tree@graph$tree cs.names <- tree@cluster cluster.type <- [email protected] nAttrs <- list() node.shape <- c() node.shape[cluster.type] <- "box" node.shape[!cluster.type] <- "circle" names(node.shape) <- cs.names nAttrs$shape <- node.shape nAttrs$fontsize <- rep(16, length(cs.names)) # nAttrs$height <- rep(1, length(cs.names)) # nAttrs$width <- rep(2, length(cs.names)) names(nAttrs$fontsize) <- cs.names # names(nAttrs$height) <- cs.names # names(nAttrs$width) <- cs.names fill.color <- rep(color, length(cs.names)) names(fill.color) <- cs.names nAttrs$fillcolor <- fill.color # Rgraphviz::plot(tree.graph, nodeAttrs=nAttrs, main="") graph_plot <- Rgraphviz::layoutGraph(tree.graph, nodeAttrs=nAttrs) Rgraphviz::renderGraph(graph_plot) }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/G5_PlotTree.R
#' Launch the BayesNetBP Shiny App #' #' @details The function \code{runBayesNetApp} lauches the \code{Shiny App} accompanied #' with this package. The app loads the \code{toytree} example by default and allows users #' to load customized \code{\linkS4class{ClusterTree}} object. In order to use this feature, a #' \code{\linkS4class{ClusterTree}} object should be built, propagated and named \code{tree.init.p}, and #' then saved as a \code{.RDATA} file. This file can be read in by the app. \cr #' #' The console of \code{BayesNetBP} Shiny App comprises three panels. The first #' part controls the model loading, visualization and subnetwork selection. The \code{Fit} function fits #' the entire graph in the window. The \code{Fit Selected} function fits the selected subnetwork to the window. #' The user can subset the network for visualization. The \code{Expand} function can trace the one hop neighbor of #' selected nodes in a stepwise manner. #' After selecting desired node sets, the user can subset the graph by the \code{Subset} function. #' \cr #' #' The second panel is used for absorption of fixed and hard evidences. #' The users can add multiple pieces of evidence to a list and absorb them into the model simultaneously. #' Marginals of other nodes can be quried as #' density or bar plots by node types. If a set of evidence has been absorbed, the marginals both #' before and after absorption will be returned to facilitate comparison. To query the marginals, the user can #' select the node of interest in the graph, and then click \code{Marginal of Selected}. The \code{Shift in Marginals} #' function computes the signed and symmetric Kullback-Liebler divergence for all applicable nodes #' in the network, and colors the nodes by their divergence and change in directions. \cr #' #' The function for systematic assessment of variable marginal shifts is provided in the third panel. #' It allows user to specify which node to absorb the spectrum of evidence in the select menu and click \code{Select Observed}, and to select whose #' divergence to be calculated by selecting the node in the menu and then clicking \code{Add to Plot}. #' Alternatively, the user can use \code{Add All} function to select all applicable nodes into the plotting list. #' The result is visualized in an interactive plot. The \code{Min}, \code{Max} and \code{Step} controls the range of values #' of the evidence to be absorbed. #' #' @param launch.browser \code{logical(1)} whether launch the App in browser #' #' @author Han Yu #' #' @references Yu H, Moharil J, Blair RH (2020). BayesNetBP: An R Package for Probabilistic Reasoning in Bayesian #' Networks. Journal of Statistical Software, 94(3), 1-31. <doi:10.18637/jss.v094.i03>. #' #' @examples #' #' \dontrun{ #' # load or install required packages to run App #' library("shiny") #' library("googleVis") #' library("devtools") #' devtools::install_github("cytoscape/cyjShiny") #' library("cyjShiny") #' # run the App in browser #' runBayesNetApp(launch.browser=TRUE) #' } #' #' @importFrom igraph igraph.from.graphNEL as_edgelist #' #' @references Yu H, Moharil J, Blair RH (2020). BayesNetBP: An R Package for Probabilistic Reasoning in Bayesian #' Networks. Journal of Statistical Software, 94(3), 1-31. <doi:10.18637/jss.v094.i03>. #' #' @export runBayesNetApp <- function(launch.browser=TRUE) { appDir <- system.file("BayesNetApp", package = "BayesNetBP") if (appDir == "") { stop("Could not find BayesNetApp directory. Try re-installing `BayesNetBP`.", call. = FALSE) } data(toytree, envir = environment()) shiny::runApp(appDir, launch.browser=launch.browser) }
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/H1_runBayesNetApp.R
#' A simulated data from the Chest Clinic example #' #' Simulated data from the Chest Clinic example (also known as the Asia example) #' from Lauritzen and Spiegelhalter, 1988. #' #' @name chest #' @usage #' #' data(chest) #' #' @format The data set \code{chest} contains two objects: #' \describe{ #' \item{\code{data}}{a \code{data.frame} object of 10000 observations and 8 discrete variables.} #' \item{\code{dag}}{a \code{graphNEL} object specifying the network structure.} #' } #' #' @docType data #' @references Lauritzen and Spiegelhalter (1988) Local Computations with Probabilities on Graphical Structures and #' their Application to Expert Systems (with Discussion). J. Roy. Stat. Soc. 50, p. 157-224. \cr #' \cr #' Dethlefsen, C., & Hojsgaard, S. (2005). A common platform for graphical models in R: The gRbase package. #' Journal of Statistical Software, 14(17), 1-12. #' NULL
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/R1_data_chest.R
#' Saccharomyces Cerevisiae eQTL data from Kruglak et. al. (2005) #' #' eQTL data from 112 F1 segregants from a cross between BY4716 and #' RM11-1a strains of \emph{Saccharomyces Cerevisiae}. #' #' @details #' The \code{yeast} dataset is a subset of the widely studied yeast expression #' dataset comprising of 112 F1 segregants from a cross between BY4716 and RM11-1a #' strains of \emph{Saccharomyces Cerevisiae}. The original dataset consists of #' expression values reported as log2(sample/ BY reference) for 6216 genes. #' The data can be accessed in Gene Expression Omnibus (GEO) by accession number (GSE1990). #' After linkage analysis and filtering based on location and significance of QTL, #' a final set of 38 genes and their corresponding 12 SNP markers were identified and #' included in the yeast dataset. The gene expression values are discretized around #' the median and have two states, 1 (above or equal to median) and -1 (below median). #' re are two genotype states: 1 or 2. Thus the final dataset is a data frame of 112 observations #' (genotype) of 12 variables (SNP markers) and normalized gene expression of 38 variables (genes). #' #' @format The data set \code{yeast} is a data frame of 112 observations of 50 variables: genotype #' data (genotype states at 12 SNP markers) and phenotype data (normalized and discretized #' expression values of 38 genes). Both genotypes and phenotypes are of class \code{factor}. #' @usage data(yeast) #' @name yeast #' @docType data #' #' @references #' Brem RB, Kruglyak L. The landscape of genetic complexity across 5,700 gene expression traits in yeast. #' Proc Natl Acad Sci U S A 2005 Feb 1;102(5):1572-7.\cr #' \cr #' Brem RB, Storey JD, Whittle J, Kruglyak L. Genetic interactions between polymorphisms that affect gene #' expression in yeast. Nature 2005 Aug 4;436(7051):701-3. #' NULL
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/R2_data_yeast.R
#' A ClusterTree Example of Liver Model #' #' A propagated \code{\linkS4class{ClusterTree}} object named \code{toytree}, obtained from #' \code{\link{liver}} QTL data. #' #' @format The data set contains a propagated \code{\linkS4class{ClusterTree}} object \code{toytree}, #' which is ready for evidence absorption and making queries. #' #' @usage data(toytree) #' @name toytree #' @docType data #' NULL
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/R3_data_toytree.R
#' Mus Musculus HDL QTL data from Leduc et. al. (2012) #' #' Liver QTL data was obtained from a F2 inner-cross between inbred MRL/MpJ and SM/J #' strains of mice. #' #' @name liver #' @usage #' data(liver) #' @format The data set \code{liver} contains three objects: the data, a learned Bayesian network structure #' and \code{vector} specifying node type. The fields are described as follows: #' #' \describe{ #' \item{\code{data}}{a \code{data.frame} object that contains 280 samples (rows) and 15 variables: genotype data #' (genotype states at 5 SNP markers) and phenotype data (HDL levels and normalized expression values of 10 genes). #' Three of these phenotypes are dichotomized, including Cyp2b10, Spgl1 and HDL. Genotypes and dichotomized phenotypes #' are of class \code{factor} and continuous phenotypes are of class \code{numeric}.} #' \item{\code{dag}}{a \code{graphNEL} object, which is the network structure learned by \code{qtlnet} package.} #' \item{\code{node.class}}{a named \code{vector} of \code{logical} values indicating whether each node is discrete.} #' } #' #' @docType data #' #' @references Leduc MS, Blair RH, Verdugo RA, Tsaih SW, Walsh K, Churchill GA, Paigen B.(2012). #' "Using bioinformatics and systems genetics to dissect HDL-cholesterol genetics in an MRL/MpJ #' x SM/J intercross." J Lipid Res., 6, 1163-75. NULL
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/R4_data_liver.R
#' A ClusterTree Example of Emission Model #' #' A propagated \code{\linkS4class{ClusterTree}} object named \code{emission}. This model contains nine variables, indlucing #' three discrete: Filter State (Fs), Waste Type (W), Burning Regimen (B) and six continuous variables: Metals in Waste (Min), #' Metals Emission (Mout), Filter Efficiency (E), Dust Emission (D), CO2 Concentration in Emission (C), Light Penetrability (L). #' #' @references Lauritzen, Steffen L., and Frank Jensen. Stable local computation with conditional Gaussian distributions. #' Statistics and Computing 11.2 (2001): 191-203. #' #' @format The data set contains a propagated \code{\linkS4class{ClusterTree}} object \code{emission} #' ready for evidence absorption and making queries. #' #' @usage data(emission) #' @name emission #' @docType data #' NULL
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/R5_data_emission.R
#' A simulated data from the Emission example #' #' Simulated data from the Emission example (also known as the Waste Incinerator example) #' #' @format The data set \code{emission1000} contains two objects: #' \describe{ #' \item{\code{data}}{a \code{data.frame} object of 1000 observations and 3 discrete variables and 6 continuous variables.} #' \item{\code{dag}}{a \code{graphNEL} object specifying the network structure.} #' } #' #' @usage data(emission1000) #' @name emission1000 #' @docType data #' #' @references Lauritzen, S. L., & Jensen, F. (2001). #' Stable local computation with conditional Gaussian distributions. Statistics and Computing, 11(2), 191-203. #' NULL
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/R6_data_emission1000.R
setClass("LPPotential", slots = list(head = "character", tail = "character", config = "matrix", beta = "matrix", # each row for a configuration # each column for a tail variable const = "numeric", variance = "numeric" ) )
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/S4_1_LPPotential.R
#' An S4 class of the cluster tree. #' #' @description The \code{ClusterTree} object is the computational object for belief propagation. #' @slot cluster A \code{vector} storing the name of clusters in the cluster tree. #' @slot node A \code{vector} storing the name of nodes in the Bayesian network. #' @slot graph A \code{list} of two graphNEL objects: \code{$dag} stores the graph of Bayesian network, #' \code{$tree} stores the graph of the cluster tree. #' @slot member A named \code{list} of the node cluster membership. #' @slot parent A named \code{vector} indicating the parent node of a given cluster in the cluster tree. #' @slot cluster.class A named \code{vector} of logical values indicating whether a cluster is continuous or discrete. #' @slot node.class A named \code{vector} of logical values indicating whether a node is continuous or discrete. #' @slot assignment A named \code{list} indicating the assignment of discrete nodes discrete clusters. #' @slot propagated A \code{logical} value indicating whether the discrete compartment has been propagated. #' #' @slot cpt A named \code{list} of the conditional probability tables. #' @slot jpt A named \code{list} of the joint distribution tables. #' @slot lppotential A named \code{list} of the linear predictor potentials assigned to each cluster in the lppotential slots. #' @slot postbag A named \code{list} of the linear predictor potentials assigned to each cluster in the postbag slots. #' @slot activeflag A named \code{vector} of logical values indicating whether a continuous cluster is active. #' @slot absorbed.variables A \code{vector} of characters indicating variables observed with hard evidence. #' @slot absorbed.values A \code{list} indicating the values of the variables observed with hard evidence. #' @slot absorbed.soft.variables A \code{vector} of characters indicating variables observed with soft or likelihood evidence. #' @slot absorbed.soft.values A \code{list} of the likelihoods of the soft or likelihood evidence. setClass("ClusterTree", slots = list(cluster = "character", node = "character", graph = "list", member = "list", parent = "character", cluster.class = "logical", node.class = "logical", assignment = "list", propagated = "logical", cpt = "list", jpt = "list", lppotential = "list", postbag = "list", activeflag = "logical", absorbed.variables = "character", absorbed.values = "list", absorbed.soft.variables = "character", absorbed.soft.values = "list" ) )
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/S4_2_ClusterTree.R
library(shiny) library(cyjShiny) library(BayesNetBP) library(googleVis) library(igraph) options(shiny.maxRequestSize = 300*1024^2) data("toytree") # `tree` and `subtree` are objects of class ClusterTree tree <- toytree ## Helper functions ################## MeanSD <- function(df) { Mean <- sum(df$prob*df$mu) SD <- sqrt( sum(df$prob*df$sd^2) + sum(df$prob*df$mu^2) - (sum(df$prob*df$mu))^2 ) return(c(Mean=Mean, SD=SD)) } #************************************************************ ui <- fluidPage( titlePanel("BayesNetBP"), fluidRow( column(4, h4("Load Model"), # actionButton("compile", label = "Compile"), fileInput("file1", "Select your file"), actionButton("update", label = "Update"), actionButton("sg_fit", label = "Fit"), actionButton("sg_fitsel", label = "Fit Selected"), h4("Subgraph"), actionButton("sg_exp", label = "Expand"), actionButton("sg_sub", label = "Subset"), actionButton("sg_reset", label = "Reset"), h4("Set Evidence"), selectInput("var", label = "Variable to set evidence", choices = sort(tree@node) ) ), # column(2,), column(4, h4("Evidence value"), selectInput("getvalue", label = "Select value (discrete)", choices = c() ), textInput("evidence", "Enter value (continuous)", value = "", width = NULL, placeholder = NULL), h4("Observe and Query"), actionButton("add", label = "Add evidence"), actionButton("clear", label = "Clear"), actionButton("absorb", label = "Absorb"), helpText("Evidence to absorb:"), verbatimTextOutput("added"), actionButton("marg", label= "Marginal of Selected"), actionButton("post", label = "Shift in marginals"), actionButton("reset", label = "Reset") # # selectInput("abvar", # label = "Choose the observed variable", # choices = tree@node), # # actionButton("addplot", label = "Add to plot list"), # actionButton("addall", label = "Add all"), # actionButton("clearpvar", label = "Clear"), # actionButton("plotkld", label = "Plot"), # # helpText("Variables to plot:"), # verbatimTextOutput("addplot"), # # sliderInput("range", "Range:", # min = -10, max = 10, value = c(-3,3)), # sliderInput("increment", "Step:", # min = 0, max = 1, value = 0.5, step= 0.1) ), column(4, h4("Effects of a spectrum of evidence"), selectInput("abvar", label = "Choose the variable", choices = names([email protected])[[email protected]]), #selectInput("var2", # label = "Choose variables to plot", # choices = tree.init.p$nodes), actionButton("sel_obvar", label = "Select observed"), actionButton("addplot", label = "Add to plot"), actionButton("addall", label = "Add all"), actionButton("clearpvar", label = "Clear"), actionButton("plotkld", label = "Plot"), helpText("Observed variable:"), verbatimTextOutput("obvar"), helpText("Variables to plot:"), verbatimTextOutput("addplot"), #sliderInput("range", "Range:", min = -10, max = 10, value = c(-3,3)), #sliderInput("increment", "Step:", min = 0, max = 1, value = 0.5, step= 0.1) #numericInput("kld_min", "Min", -10, min = NA, max = NA, step = NA, width = 4), #numericInput("kld_max", "Max", 10, min = NA, max = NA, step = NA, width = 4), #numericInput("kld_step", "Step", 1, min = NA, max = NA, step = NA, width = 4) div(style="display: inline-block; vertical-align:top; width: 90px;",numericInput("kld_min", "Min", -10)), div(style="display: inline-block; vertical-align:top; width: 90px;",numericInput("kld_max", "Max", 10)), div(style="display: inline-block; vertical-align:top; width: 70px;",numericInput("kld_step", "Step", 1)) ), # graph panels, main graph and subgraph fluidRow( column(8, cyjShinyOutput('cyjShiny') # h4("View"), ), column(4, htmlOutput("gtable"), htmlOutput("plot3") ), ## spectrum column(4, htmlOutput("plot4") ) ######### ), # extra white space at the bottom mainPanel( h3(textOutput("whitespace")) ) ) ) #************************************************************ server <- function(input, output, session) { v <- reactiveValues(absorb =FALSE, absorbed = rep(0, length(tree@node)), reset=FALSE, post=FALSE, marg=FALSE, plotkld=FALSE, plotmarg=FALSE, tree.init=tree, tree.post=tree, vars=c(), values=list(), pvars=c(), df.list=list(data.frame(), FALSE), df.kld=list(), newNodes=c() ) tbl.nodes <- data.frame(id=sort(tree@node), type=c(1, 0)[[email protected][sort(tree@node)] + 1], kld = 0, absorbed = "n", stringsAsFactors=FALSE) elist <- as_edgelist(igraph.from.graphNEL(tree@graph$dag)) tbl.edges <- data.frame(source=elist[,1], target=elist[,2], interaction=rep("phosphorylates", nrow(elist)), stringsAsFactors=FALSE) graph.json <- dataFramesToJSON(tbl.edges, tbl.nodes) output$cyjShiny <- renderCyjShiny( cyjShiny(graph=graph.json, layoutName="dagre", styleFile="biologicalStyle.js") ) # event observation observeEvent(input$update, { if(!is.null(input$file1) & class(toytree) == "ClusterTree"){ tree <- get(load(input$file1$datapath)) tbl.nodes <- data.frame(id=sort(tree@node), type=c(1, 0)[[email protected][sort(tree@node)] + 1], kld = 0, absorbed="n", stringsAsFactors=FALSE) elist <- as_edgelist(igraph.from.graphNEL(tree@graph$dag)) tbl.edges <- data.frame(source=elist[,1], target=elist[,2], interaction=rep("phosphorylates", nrow(elist)), stringsAsFactors=FALSE) graph.json <- dataFramesToJSON(tbl.edges, tbl.nodes) ### output$cyjShiny <- renderCyjShiny( cyjShiny(graph=graph.json, layoutName="dagre", styleFile="biologicalStyle.js") ) v$tree.init=tree v$tree.post=tree updateSelectInput(session, "var", label = "Select variable to set evidence", choices = sort(v$tree.init@node), selected = sort(v$tree.init@node)[1] ) updateSelectInput(session, "abvar", label = "Choose the variable", choices = names([email protected])[[email protected]], selected = sort(v$tree.init@node)[1] ) # reset all variables v$absorbed = rep(0, length(tree@node)) v$vars=c() v$values=list() v$df.list=list(data.frame(), FALSE) # v$df.kld=list() # v$pvars=c() v$plotmarg <- FALSE } # probably remove/change this later else{ print("Invalid file") } }) ############################################################## observeEvent(input$sg_exp, { selectFirstNeighbors(session) }) observeEvent(input$sg_sub, { invertSelection(session) hideSelection(session) invertSelection(session) }) observeEvent(input$sg_reset, { showAll(session) }) observeEvent(input$sg_fit, { fit(session, padding = 20) }) observeEvent(input$sg_fitsel, { fitSelected(session, padding = 20) }) ## Select nodes ################################ #observeEvent(input$getSelectedNodes, { # v$vars <- c(v$vars, getSelectedNodes(session)) # getSelectedNodes(session) #}) #observeEvent(input$click, ignoreInit=TRUE, { # output$selectedNodesDisplay <- renderText({" "}) # getSelectedNodes(session) #}) observeEvent(input$selectedNodes, { # getSelectedNodes(session) v$newNodes <- input$selectedNodes; output$selectedNodesDisplay <- renderText({paste(v$newNodes)}) # print(v$newNodes) this.var <- input$selectedNodes[1] this.mar.init.temp <- Marginals(v$tree.init, this.var) this.mar.post.temp <- Marginals(v$tree.post, this.var) this.mar.init <- this.mar.init.temp[[1]][[1]] this.mar.post <- this.mar.post.temp[[1]][[1]] if (this.mar.init.temp$types[1]) { v$df.list <- list(data.frame(var = names(this.mar.init), Before = this.mar.init, After = this.mar.post), TRUE) } else { msd1 <- MeanSD(this.mar.init) msd2 <- MeanSD(this.mar.post) x.min <- min(msd1[1] - 3*msd1[2], msd2[1] - 3*msd2[2]) x.max <- max(msd1[1] + 3*msd1[2], msd2[1] + 3*msd2[2]) x.vec <- seq(x.min, x.max, length.out = 1000) v$df.list <- list(data.frame(x = x.vec, Before = dnorm(x.vec, msd1[1], msd1[2]), After = dnorm(x.vec, msd2[1], msd2[2])), FALSE) } }) ############################################################## output$added <- renderPrint({ if (length(v$vars)==0) return ("") evid <- "" for (i in 1:length(v$vars)) { this.evid <- paste0(v$vars[i],"=", v$values[i], "; ") evid <- paste0(evid, this.evid) } return(evid) }) #output$clickedNode = renderPrint({ # paste(v$newNodes) #}) ## Add evidence ############################################ observeEvent(input$add, { node.class <- [email protected] if (input$var %in% v$vars) { k <- which(v$vars==input$var) if (node.class[input$var]) { v$values[k] <- input$getvalue } else { v$values[k] <- as.numeric(input$evidence) } } else { v$vars <- c(v$vars, input$var) if (node.class[input$var]) { v$values[length(v$values)+1] <- input$getvalue } else { v$values[length(v$values)+1] <- as.numeric(input$evidence) } } }) observeEvent(input$clear, { v$vars <- c() v$values <- list() }) ############################################## observe({ x <- input$var values <- GetValue(v$tree.post, x, message=FALSE) # Can use character(0) to remove all choices if (is.null(values)) values <- character(0) # Can also set the label and select items updateSelectInput(session, "getvalue", label = "Select value (discrete)", choices = values, selected = values[1] ) }) ## Absorb evidence ################# observeEvent(input$absorb, { #v$absorb <- TRUE #v$reset <- FALSE text.in <- input$evidence text.sep <- strsplit(text.in, split=",")[[1]] vars <- v$vars values <- v$values if(length(vars)>0){ v$tree.post <- AbsorbEvidence(v$tree.init, vars, values) # v$absorbed[v$vars] <- 1 setNodeAttributes(session, "absorbed", vars, rep("y", length(vars))) } }) ## Shift in marginals ########## observeEvent(input$post, { #v$reset <- TRUE v$absorb <- FALSE #v$vars <- c() #v$values <- c() v$post <- TRUE name <- v$tree.init@node klds <- PlotCGBN(v$tree.init, v$tree.post, fontsize = 30, plotting =FALSE, pbar=TRUE) # cls <- color.generator(klds) klds <- klds/(max(abs(klds))) kld.nodes <- names(klds) names(klds) <- NULL setNodeAttributes(session, "kld", kld.nodes, klds) }) ## Get marginal ################### observeEvent(input$marg, { v$plotmarg <- TRUE getSelectedNodes(session) # if(length(v$pre_sub)==0) return() #this.var <- input$selectedNodes #print(this.var) #print(Marginals(v$tree.post, this.var)[[1]][[1]]) # print(this.mar) # v$df.list[[1]] <- data.frame(var = names(this.mar), value = this.mar) }) observeEvent(input$reset, { v$vars <- c() v$values <- c() v$tree.post <- v$tree.init setNodeAttributes(session, "kld", v$tree.init@node, rep(0, length(v$tree.init@node))) }) ############ output$plot3 <- renderGvis({ if(v$plotmarg){ if(ncol(v$df.list[[1]]) == 0) return() ## if(v$df.list[[2]]) { glplot <- gvisBarChart(v$df.list[[1]], options=list(width=400, height=400)) } else { glplot <- gvisLineChart(v$df.list[[1]], options=list(width=400, height=400)) } return (glplot) } }) output$gtable <- renderGvis({ if(v$plotmarg){ if(v$df.list[[2]]) { gtable <- gvisTable(v$df.list[[1]]) } else { gtable <- NULL } return (gtable) } }) ######### observeEvent(input$sel_obvar, { v$obvar <- input$abvar }) output$obvar <- renderPrint({ if (length(v$obvar)==0) return ("") return(v$obvar) }) observeEvent(input$addplot, { v$pvars <- c(v$pvars, input$abvar) }) observeEvent(input$clearpvar, { v$pvars <- c() }) observeEvent(input$addall, { v$pvars <- setdiff(v$tree.init@node, v$obvar) }) observeEvent(input$plotkld, { if(length(setdiff(v$pvars, v$obvar)) > 0) { v$plotkld <- TRUE df <- ComputeKLDs(tree=v$tree.init, var0=v$obvar, vars=setdiff(v$pvars, v$obvar), seq=seq(input$kld_min, input$kld_max, input$kld_step), pbar=TRUE) v$df.kld <- list(df) } }) output$addplot <- renderPrint({ if (length(v$pvars)==0) return ("") return(paste0(v$pvars, collapse=",")) }) output$plot4 <- renderGvis({ if(v$plotkld){ Line <- gvisLineChart(v$df.kld[[1]], options=list(width=500, height=500)) return (Line) } }) } # Create Shiny app ---- shinyApp(ui = ui, server = server)
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/inst/BayesNetApp/app.R
#' Determine the sample size for Bayesian two-stage trial design #' of ordinal endpoints without proportional odds assumption #' #' @description #' #' Obtain estimated sample size based on user-specified type I #' error, power and effect size defined by the odds ratio between #' the treatment and control groups, without the proportional #' odds (PO) assumption. #' #' #' @param nmax the maximum sample size for searching to get the desirable power #' @param or_alt effect size to be detected (under H_1) in #' terms of odds ratio #' @param pro_ctr distribution of clinical categories for the #' control group #' @param U the desirability of each outcome level #' @param alpha the desirable type I error rate to be controlled #' @param power the desirable power to be achieved #' @param ntrial the number of simulated trials #' @param method whether the statistical test for interim/final analysis is Bayesian or #' Frequentist. method = "Frequentist" for Frequentist approach; method = "Bayesian" #' for Bayesian approach #' #' #' @details #' Grid search of sample size is used for guarantee a desirable type I error rate. #' The upper limitation is 200, and lower limitation default is sample size 50 #' for the control and treatment groups at each stage. Default increment of the #' sequence is 50. #' #' For the parameter estimation section, we have two options, and can be selected using #' the method argument.Two following options are available: (i) method = "Frequentist", #' (ii) method = "Bayesian". If method = "Frequentist", parameters are estimated via package #' ordinal, which is based on frequentist method, while method = "Bayesian", parameters are #' estimated through Bayesian model. #' #' Specifically, the numerical utilities U reflect the desirability of each outcome #' level. To do this, in our example, we first set U[1] = 100 and U[5] = 0, and then #' asked physicians to specify numerical values for the intermediate levels, that #' reflect their desirability relative to the best and worst levels. #' #' Please note, in our example, argument ntrial = 5 is for the time saving purpose. #' #' #' #' @return ss_npo() returns recommended sample size for each #' of two groups for the interim and final stages, by assuming 1:1 #' equal randomization for the two groups at each stage; and corresponding power. #' #' @export #' #' @examples #' set.seed(123) #' ss_npo(nmax = 200, or_alt = c(1.6,1.5,1.5,1.4,1.4), #' pro_ctr = c(0.58,0.05,0.17,0.03,0.04,0.13), U = c(100,80,65,25,10,0), #' alpha = 0.05, power = 0.8, ntrial = 5, method = "Frequentist") #---------------------------------------------------------------------------- ## sample size calculator ss_npo = function(nmax, or_alt, pro_ctr, U, alpha, power,ntrial, method){ N = 200 # under null, calculate thresholds cf_grid = 0.2#seq(0.5, 0.7, by=0.1) threshold_grid = seq(0.8, 0.95, by=0.05) or_null = rep(1, length(pro_ctr)-1) or.mat = matrix(rep(or_null, ntrial), nrow = ntrial, ncol = length(or_null), byrow = TRUE) output = c() for (cf in cf_grid){ for (threshold in threshold_grid){ out = multiple_trial_npo(or.mat, sd = 0.2, pro_ctr, U, n = N, cf=cf, threshold=threshold, method = method) rr = c(cf, threshold, out) output = rbind(output, rr) colnames(output) = c("cf", "threshold", "PET(%)", "alpha", "avgss") results = as.data.frame(output) } } index = min(which(abs(results$alpha-alpha)==min(abs(results$alpha-alpha)))) vec = c(results[index,c(1,2,4)]) thrsh = c(vec$cf, vec$threshold) names(thrsh) = c("futility", "superiority") # calculate power or.mat = matrix(rep(or_alt, ntrial), nrow = ntrial, ncol = length(or_alt), byrow = TRUE) n_grid = seq(50, nmax, by = 25) output = c() for (n in n_grid){ out = multiple_trial_npo(or.mat, sd = 0.2, pro_ctr, U, n=n, cf=vec$cf, threshold=vec$threshold, method = method) rr = c(n, out) output = rbind(output, rr) colnames(output) = c("samplesize", "PET(%)", "Power(%)", "avgss") } results = list() index = min(which(output[,3] > power)) results$total_sample_size_for_each_group = output[index, 1] results$power = output[index, 3]*100 results$threshold = thrsh results$typeIerror = round(vec$alpha,digits = 3) return(results) }
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/Bayes_Ord_Design_NPO.R
#' Determine the sample size for Bayesian two-stage trial design #' of ordinal endpoints with proportional odds assumption #' #' @description #' Obtain estimated sample size based on user-specified type I #' error, power and effect size defined by the odds ratio between #' the treatment and control groups, under the proportional #' odds (PO) assumption. #' #' @param or_alt effect size to be detected (under H_1) #' in terms of odds ratio #' @param pro_ctr distribution of clinical categories for the #' control group #' @param alpha the desirable type I error rate to be controlled #' @param power the desirable power to be achieved #' @param nmax the maximum sample size for searching to get the desirable power #' @param ntrial the number of simulated trials #' @param method whether the statistical test for interim/final analysis is Bayesian or #' Frequentist. method = "Frequentist" for Frequentist approach; method = "Bayesian" #' for Bayesian approach #' #' #' @return ss_po() returns recommended sample size for each of #' two groups for the interim and final stages, by assuming 1:1 equal #' randomization for the two groups at each stage; and the corresponding power. #' @export #' @details #' Grid search of sample size is used for guarantee a desirable type I error rate. #' The upper limitation is 200, and lower limitation default is sample size 50 #' for the control and treatment groups at each stage. Default increment of the #' sequence is 50. #' #' For the parameter estimation section, we have two options, and can be selected using #' the method argument.Two following options are available: (i) method = "Frequentist", #' (ii) method = "Bayesian". If method = "Frequentist", parameters are estimated via package #' ordinal, which is based on frequentist method, while method = "Bayesian", parameters are #' estimated through Bayesian model. #' #' Please note, in our example, argument ntrial = 5 is for the time saving purpose. #' #' #' @examples #' ss_po(or_alt = 1.5, pro_ctr = c(0.58,0.05,0.17,0.03,0.04,0.13), alpha = 0.05, #' power = 0.8, nmax = 100, ntrial = 5, method ="Frequentist") #' ## sample size calculator ss_po = function(or_alt, pro_ctr, alpha, power, nmax, ntrial,method){ N = 200 # maximum sample size # under null, calculate thresholds cf_grid = 0.2#seq(0.6, 0.7, by=0.1) threshold_grid = seq(0.99, 0.999, by=0.001) log_or = rnorm(ntrial, log(1), sd = 0.2) or = exp(log_or) or.mat = matrix(rep(or, each = length(pro_ctr)-1, times=1),ncol = length(pro_ctr)-1,byrow = TRUE) output = c() for (cf in cf_grid){ for (threshold in threshold_grid){ out = multiple_trial_po(sim_runs = ntrial, or.mat, pro_ctr = pro_ctr, n = N, cf = cf, threshold = threshold, method = method) rr = c(cf, threshold, out) output = rbind(output, rr) colnames(output) = c("cf", "threshold", "PET(%)", "alpha", "avgss") results = as.data.frame(output) } } index = min(which(abs(results$alpha-alpha)==min(abs(results$alpha-alpha)))) vec = c(results[index,c(1,2,4)]) thrsh = c(vec$cf, vec$threshold) names(thrsh) = c("futility", "superiority") output = c() log_or = rnorm(ntrial, log(or_alt), sd = 0.2) or = exp(log_or) or.mat = matrix(rep(or, each = length(pro_ctr)-1, times=1), ncol = length(pro_ctr)-1,byrow = TRUE) n_grid = seq(50, nmax, by = 25) for (n in n_grid){ out = multiple_trial_po(sim_runs = ntrial, or.mat, pro_ctr = pro_ctr, n, cf = vec$cf, threshold = vec$threshold, method = method) rr = c(n, out) output = rbind(output, rr) colnames(output) = c("sample size", "PET(%)", "Power(%)", "avgss") } results = list() index = min(which(output[,3] >= power)) results$total_sample_size_for_each_group = output[index, 1] results$power = 100*output[index, 3] results$threshold = thrsh results$typeIerror = vec$alpha return(results) }
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/Bayes_Ord_Design_PO.R
#' Determine the sample size for Bayesian two-stage trial design for #' ordinal endpoints based on switch model #' #' @description #' When there lacks of sufficient information to determine which of these two #' models (PO or NPO) is more appropriate, PO/NPO switch model-based design is #' utilized to obtain estimated sample size based on user specified type I #' error, power and expected effect. #' #' @returns ss_switch() returns recommended sample size for each group at every #' interim look, with assumption that the sample size in the control arm of the #' study is same as in the treatment arm, and the sample size at each interim #' look is same. #' @export #' @param alpha the desirable type I error rate to be controlled #' @param power the desirable power to be achieved #' @param n_po sample size for the treatment and control groups, at each stage #' based on PO model #' @param n_npo sample size for the treatment and control groups, at each stage #' based on NPO model #' @param or_alt expected treatment efficacy effect size to be #' detected (under H_1) in terms of odds ratio #' @param pro_ctr distribution of clinical categories for the #' control group #' @param U the desirability of each outcome level #' @param ntrial the number of simulated trials #' @param method whether the statistical test for interim/final analysis is Bayesian or #' Frequentist. method = "Frequentist" for Frequentist approach; method = "Bayesian" #' for Bayesian approach #' @param n_range the additional sample size for each arm each stage after n_po, n_npo. #' #' @details #' Grid search of sample size is used for guarantee a desirable type I error rate. #' The upper limitation is 200, and lower limitation default is sample size 50 #' for the control and treatment groups at each stage. Default increment of the #' sequence is 10. #' #' For the parameter estimation section, we have two options, and can be selected using #' the method argument.Two following options are available: (i) method = "Frequentist", #' (ii) method = "Bayesian". If method = "Frequentist", parameters are estimated via package #' ordinal, which is based on frequentist method, while method = "Bayesian", parameters are #' estimated through Bayesian model. #' #' Specifically, the numerical utilities U reflect the desirability of each outcome #' level. To do this, in our example, we first set U[1] = 100 and U[5] = 0, and then #' asked physicians to specify numerical values for the intermediate levels, that #' reflect their desirability relative to the best and worst levels. #' #' Arguments n_po and n_npo are the estimated sample size for the treatment and #' control groups at each stage based on PO model and NPO model respectively. Users can #' obtained them through function ss_po and ss_npo. #' #' #' @examples #' \donttest{ #' ss_switch(alpha = 0.05, power=0.8, n_po = 475, n_npo = 75, n_range = 10, #' or_alt = c(1.5,1.5,1.5,1.5,1.5), pro_ctr = c(0.58,0.05,0.17,0.03,0.04,0.13), #' U = c(100,80,65,25,10,0), ntrial = 5, method = "Frequentist") #' } #' ## sample size calculator ss_switch = function(alpha, power, n_po, n_npo, or_alt, pro_ctr, U, ntrial, method, n_range){ # under null, calculate thresholds or = rep(1,length(pro_ctr)-1) cf_grid = 0.2#seq(0.7, 0.7, by=0.05) threshold_grid = seq(0.99, 0.99, by=0.01) output = c() for (cf in cf_grid){ for (threshold in threshold_grid){ # adding noise step integrates in the function out = multiple_trial_switch(or, sim_runs = ntrial, sd=0.2, pro_ctr, n_po = n_po, n_npo = n_npo, U, cf, threshold, method = method) rr = c(cf, threshold, out) output = rbind(output, rr) colnames(output)[1:5] = c("cf", "threshold", "PET(%)","alpha", "Avg SS") results = as.data.frame(output) } } index = min(which(abs(results$alpha-alpha)==min(abs(results$alpha-alpha)))) vec = c(results[index,c(1,2,4)]) thrsh = c(vec$cf, vec$threshold) names(thrsh) = c("futility", "superiority") # calculate power #po = seq(n_po, n_po+n_range, by = 10) #npo = seq(n_npo, n_npo+n_range, by = 10) po = seq(n_po, n_po+n_range, by = 10) npo = seq(n_npo, n_npo+n_range, by = 10) ngrid = cbind(po, npo) output = c() or_alt = c(1.6,1.5,1.5,1.4,1.4) for (i in 1:dim(ngrid)[1]){ out = multiple_trial_switch(or_alt, sim_runs = ntrial, sd = 0.2, pro_ctr = pro_ctr, n_po = ngrid[i,1], n_npo = ngrid[i,2], U, cf = vec$cf, threshold = vec$threshold, method = method) output = as.data.frame(rbind(output, out)) colnames(output) = c("PET(%)", "Power(%)", "Avg SS","PO(%)", "NPO(%)") } results = list() index = min(which(output[,2] > power)) results$total_sample_size_for_each_group = output[index,3] results$power = output[index,2]*100 results$threshold = thrsh results$typeIerror = round(vec$alpha, digits = 3) model_sele = output[index,c(4,5)] names(model_sele) = c("PO(%)", "NPO(%)") results$model_selection = model_sele return(results) }
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/Bayes_Ord_Design_Switch.R
#' Bayesian ordinal regression analysis Estimate the correlation coefficients of treatment variable, #' with and without the proportional odds assumption #' #' @description #' Bayesian ordinal regression based on cumulative likelihood function Estimate the correlation coefficients #' of treatment variable, with or without the proportional odds assumption #' #' @param formula a formula expression as for regression models, of the form response ~ predictors. #' The response should be a factor (preferably an ordered factor), which will be interpreted as an #' ordinal response with levels ordered as in the factor. #' @param data a data frame in which to interpret the variables occurring in the formula. #' @param structure the data structure. i.e., structure = "PO" or structure = "NPO". #' @param U the desirability of each outcome level #' #' @details #' This function estimates the coefficients and threshold coefficients. #' Specifically, the numerical utilities U reflect the desirability of each outcome #' level. To do this, in our example, we first set U[1] = 100 and U[5] = 0, and then #' asked physicians to specify numerical values for the intermediate levels, that #' reflect their desirability relative to the best and worst levels. #' #' @return Bayes_ord() returns the regression coefficients, including: (1) estimator coefficients #' (2) thresholds coefficients #' #' @export #' #' @examples #' \donttest{ #' ### Example One: PO data structure #' fm1 = Bayes_ord(response~treatment, example.data, "PO") #' #' ### Example Two: NPO data structure #' fm2 = Bayes_ord(response~treatment, example.data, "NPO", U = c(100,80,65,25,10,0)) #' } Bayes_ord = function(formula, data, structure, U){ output = list() if (missing(U)) U = NULL if (structure == "NPO" & length(U)==0) { stop("function needs to specify utility.") } if (structure == "PO" & is.numeric(U)) { stop("structure and Utility should not be specified at the same time.") } if(structure == "PO"){ results = jags_po_model(data) x = all.vars(formula)[1] C = nlevels(data[,match(x, colnames(data))]) #summary statistics coef = results$EST[1,c(1,2)] coef_z = coef[1]/coef[2] pvalue = 2*pnorm(coef_z) bet = c(coef, coef_z, pvalue) names(bet) = c("Estimate","Std.Error","z value", "Pr(>|z|)") thr_coef = results$EST[2:C,c(1,2)] thr_coef_z = thr_coef[,1]/thr_coef[,2] gam = cbind(thr_coef, thr_coef_z) colnames(gam) = c("Estimate","Std.Error","z value") names_gamma = c() for (i in 1:length(thr_coef_z)){ names_gamma = c(names_gamma, paste0(i, "|", i+1)) } rownames(gam) = c(names_gamma) output$Coefficients = round(bet, digits = 3) output$`Threshold coefficients` = round(gam, digits = 3) }else if(structure == "NPO"){ results = jags_npo_model(data, U) x = all.vars(formula)[1] C = nlevels(data[,match(x, colnames(data))]) #summary statistics coef = results$EST[1:(length(U)-1),c(1,2)] coef_z = coef[,1]/coef[,2] pvalue = 2*pnorm(coef_z) bet = cbind(coef, coef_z, pvalue) colnames(bet) = c("Estimate","Std.Error","z value", "Pr(>|z|)") thr_coef = results$EST[C:((C-1)*2),c(1,2)] thr_coef_z = thr_coef[,1]/thr_coef[,2] gam = cbind(thr_coef, thr_coef_z) colnames(gam) = c("Estimate","Std.Error","z value") names_gamma = c() names_beta = c() for (i in 1:length(thr_coef_z)){ names_beta = c(names_beta, paste0(i,"|",i+1,".treatment")) names_gamma = c(names_gamma, paste0(i, "|", i+1)) } rownames(gam) = c(names_gamma) rownames(bet) = c(names_beta) output$Coefficients = round(bet, digits = 3) output$`Threshold coefficients` = round(gam, digits = 3) } return(output) }
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/Bayes_ord.R
#' Generate operating characteristics for Bayesian two-stage trial design #' of ordinal endpoints without proportional odds assumption #' #' @description #' Obtain operating characteristics (OC) of the Bayesian two-stage trial #' design with ordinal endpoints while the proportional odds assumption are #' violated. #' #' @param alpha the desired type I error to be controlled #' @param pro_ctr distribution of clinical categories for the #' control group #' @param U the desirability of each outcome level #' @param fixed_ss fixed sample size when simulates the OC for various effect #' size #' @param ors a user-defined matrix, each row denotes the various scenarios, #' the number of columns depend on the number of outcome scales. #' @param nmax the maximum sample size when simulates the OC for different sample size, the increment is 50 and #' the initial sample size is 50 for each arm each stage. #' @param fixed_es fixed effect size when simulate the OC for various sample #' size #' @param ntrial the number of simulated trials #' @param method whether the statistical test for interim/final analysis is Bayesian or #' Frequentist. method = "Frequentist" for Frequentist approach; method = "Bayesian" #' for Bayesian approach #' #' @details #' Grid search of sample size is used for guarantee a desirable type I error rate. #' The upper limitation is 400, and lower limitation default is sample size 50 #' for the control and treatment groups at each stage. Default increment of the #' sequence is 50. #' #' For the parameter estimation section, we have two options, and can be selected using #' the method argument.Two following options are available: (i) method = "Frequentist", #' (ii) method = "Bayesian". If method = "Frequentist", parameters are estimated via package #' ordinal, which is based on frequentist method, while method = "Bayesian", parameters are #' estimated through Bayesian model. #' #' Specifically, the numerical utilities U reflect the desirability of each outcome #' level. To do this, in our example, we first set U[1] = 100 and U[5] = 0, and then #' asked physicians to specify numerical values for the intermediate levels, that #' reflect their desirability relative to the best and worst levels. #' #' Function provides two types of operating characteristics via simulation. If user #' specifies the value of ors and fixed_ss, function will calculate the design's power #' in terms of effect size. If user specifies the value of nmax and fixed_es, function #' will calculate the design's power in terms of sample size, and nmax is the upper #' limitation of sample size for the treatment and control groups at each stage, the lower #' limitation is 50, the default increment of the sequence is 10. #' #' Please note, in our example, argument ntrial = 5 is for the time saving purpose. #' #' #' #' @return get_oc_NPO() returns the operating characteristics of design as a #' table, including (1) user-defined value, either sample size or effect size #' (2) corresponding power (3) average sample size #' @export #' @import rjmcmc #' @import stats #' @importFrom ggplot2 ggplot #' @importFrom ordinal clm #' @import superdiag #' @import rjags #' @import R2jags #' @import gsDesign #' @import schoolmath #' @importFrom madness det #' @importFrom graphics matplot #' @importFrom stats predict.lm #' @importFrom methods is #' #' @examples #' #' ors = matrix(c(1.5,1.5,1,1,1,1.5,1.5,1.1,1.1,1.1), nrow=2, ncol=5, byrow=TRUE) #' #' get_oc_NPO(alpha = 0.05, pro_ctr = c(0.58,0.05,0.17,0.03,0.04,0.13), #' U = c(100,80,65,25,10,0), fixed_ss = 200, ors, ntrial = 5, #' method = "Frequentist") #' #' set.seed(123) #' get_oc_NPO(alpha = 0.05, pro_ctr = c(0.58,0.05,0.17,0.03,0.04,0.13), #' U = c(100,80,65,25,10,0), nmax = 100, fixed_es = c(1.5,1.3,1,1,1), #' ntrial = 5, method = "Frequentist") #' get_oc_NPO = function(alpha, pro_ctr, U, fixed_ss, ors, nmax, fixed_es, ntrial, method){ N = 100 if (missing(nmax)) nmax = NULL if (missing(ors)) ors = NULL if (missing(fixed_es)) fixed_es = NULL if (missing(fixed_ss)) fixed_ss = NULL if (is.numeric(nmax) & is.numeric(ors)) { stop("nmax and ors can not be specified at the same time.") } if (is.numeric(fixed_es) & is.numeric(fixed_ss)) { stop("fixed_es and fixed_ss can not be specified at the same time.") } or = rep(1, length(pro_ctr)-1) or.mat = matrix(rep(or, ntrial),nrow = ntrial, ncol = length(or), byrow = TRUE) # calculate threshold through grid search cf_grid = 0.2#seq(0.5, 0.7, by=0.1) threshold_grid = seq(0.8, 0.95, by=0.05) output = c() for (cf in cf_grid){ for (threshold in threshold_grid){ out = multiple_trial_npo(or.mat, sd = 0.2, pro_ctr, U, n = N, cf=cf, threshold=threshold, method = method) rr = c(cf, threshold, out) output = rbind(output, rr) colnames(output) = c("cf", "threshold", "PET(%)", "alpha", "Avg SS") results = as.data.frame(output) } } index = min(which(abs(results$alpha-alpha)==min(abs(results$alpha-alpha)))) vec = c(results[index,c(1,2,4)]) thrsh = c(vec$cf, vec$threshold) names(thrsh) = c("futility", "superiority") output = c() if(is.numeric(fixed_es) & is.numeric(nmax)){ or.mat = matrix(rep(fixed_es, ntrial), nrow = ntrial, ncol = length(fixed_es), byrow = TRUE) n_grid = seq(50, nmax, by = 50) for (n in n_grid){ out = multiple_trial_npo(or.mat, sd = 0.2, pro_ctr, U, n=n, cf=vec$cf, threshold = vec$threshold, method = method) rr = c(n, out) output = rbind(output, rr) colnames(output) = c("Sample Size", "PET(%)", "Power(%)", "Avg SS") } }else if(is.numeric(fixed_ss)&is.numeric(ors)){ for (i in 1:dim(ors)[1]){ or = ors[i,] p2 = pro_trt_cal(or, pro_ctr) dif_utility = round(mean_u(U,pro_ctr,p2)[3],digits = 2) or.mat = matrix(rep(or, ntrial),nrow = ntrial, ncol = length(or), byrow = TRUE) out = multiple_trial_npo(or.mat, sd = 0.2, pro_ctr, U, n = fixed_ss, cf=vec$cf, threshold = vec$threshold, method = method) rr = c(dif_utility, out) output = rbind(output, rr) colnames(output) = c("Effect Size", "PET(%)", "Power(%)", "Avg SS") } } output[,3] = output[,3]*100 rownames(output) = paste0("Scenario ", 1:dim(output)[1]) results = list() results$design = output results$threshold = thrsh results$typeIerror = round(vec$alpha,digits = 3) return(results) }
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/OC_func_NPO.R
#' Generate operating characteristics for Bayesian two-stage trial design #' of ordinal endpoints with proportional odds assumption #' #' @description #' Obtain operating characteristics (OC) of the Bayesian two-stage trial #' design of ordinal endpoints with proportional odds assumption. #' #' @param alpha the desirable type I error rate to be controlled #' @param pro_ctr distribution of clinical categories for the #' control group #' @param nmax the maximum sample size for operating characteristics #' @param fixed_es fixed effect size when simulate the OC for various sample #' size #' @param ormax the maximum effect size for OC #' @param fixed_ss fixed sample size when simulate the OC for various effect #' size #' @param ntrial the number of simulated trials #' @param method whether the statistical test for interim/final analysis is Bayesian or #' Frequentist. method = "Frequentist" for Frequentist approach; method = "Bayesian" #' for Bayesian approach #' #' #' @details #' Grid search of sample size is used for guarantee a desirable type I error rate. #' The upper limitation is 200, and lower limitation default is sample size 50 #' for the control and treatment groups at each stage. Default increment of the #' sequence is 10. #' #' For the parameter estimation section, we have two options, and can be selected using #' the method argument.Two following options are available: (i) method = "Frequentist", #' (ii) method = "Bayesian". If method = "Frequentist", parameters are estimated via package #' ordinal, which is based on frequentist method, while method = "Bayesian", parameters are #' estimated through Bayesian model. #' #' Two types of operating characteristics can be implemented through this function. #' #' Please note, in our example, argument ntrial = 5 is for the time saving purpose. #' #' @return get_oc_PO() returns the operating characteristics of design as a #' table, including: (1) user-defined value, either sample size or effect size #' (2) corresponding power (3) average sample size #' @export #' #' #' @examples #' #' get_oc_PO(alpha = 0.05, pro_ctr = c(0.58,0.05,0.17,0.03,0.04,0.13), #' ormax = 1.5, fixed_ss = 150, #' ntrial = 5, method = "Frequentist") #' #' #' get_oc_PO(alpha = 0.05, pro_ctr = c(0.58,0.05,0.17,0.03,0.04,0.13), #' nmax = 200, fixed_es = 1.5, #' ntrial = 5, method = "Frequentist") #' get_oc_PO = function(alpha, pro_ctr, nmax, fixed_es, ormax, fixed_ss, ntrial, method){ N = 200 if (missing(nmax)) nmax = NULL if (missing(ormax)) ormax = NULL if (missing(fixed_es)) fixed_es = NULL if (missing(fixed_ss)) fixed_ss = NULL if (is.numeric(nmax) & is.numeric(ormax)) { stop("namx and ormax can not be specified at the same time.") } if (is.numeric(fixed_es) & is.numeric(fixed_ss)) { stop("fixed_es and fixed_ss can not be specified at the same time.") } output = c() # search cutoff points cf_grid = 0.2#seq(0.6, 0.7, by=0.1) threshold_grid = seq(0.99, 0.999, by=0.001) log_or = rnorm(ntrial, log(1), sd = 0.2) or = exp(log_or) or.mat = matrix(rep(or, each = length(pro_ctr)-1, times=1), ncol = length(pro_ctr)-1, byrow = TRUE) for (cf in cf_grid){ for (threshold in threshold_grid){ out = multiple_trial_po(sim_runs = ntrial, or.mat, pro_ctr = pro_ctr, n = N, cf = cf, threshold = threshold, method = method) rr = c(cf, threshold, out) output = rbind(output, rr) colnames(output) = c("cf", "threshold", "PET(%)", "alpha", "Avg SS") results = as.data.frame(output) } index = min(which(abs(results$alpha-alpha)==min(abs(results$alpha-alpha)))) vec = c(results[index,c(1,2,4)]) } thrsh = c(vec$cf, vec$threshold) names(thrsh) = c("futility", "superiority") output = c() if (is.numeric(fixed_es) & is.numeric(nmax)){ log_or = rnorm(ntrial, log(fixed_es), sd = 0.2) or = exp(log_or) or.mat = matrix(rep(or, each = length(pro_ctr)-1, times=1), ncol = length(pro_ctr)-1, byrow = TRUE) n_grid = seq(50, nmax, by = 50) for (n in n_grid){ out = multiple_trial_po(sim_runs = ntrial, or.mat, pro_ctr = pro_ctr, n, cf = vec$cf, threshold = vec$threshold, method = method) rr = c(2*n, out) output = rbind(output, rr) colnames(output) = c("Sample Size", "PET(%)", "Power(%)", "Avg SS") } }else if (is.numeric(fixed_ss)&is.numeric(ormax)){ or_seq = seq(1, ormax, by = 0.5) for (i in 1:length(or_seq)){ or = or_seq[i] log_or = rnorm(ntrial, log(or), sd = 0.2) or.mat = matrix(rep(exp(log_or), each = length(pro_ctr)-1, times = 1), ncol = length(pro_ctr)-1, byrow = TRUE) prob = multiple_trial_po(sim_runs = ntrial, or.mat, pro_ctr = pro_ctr, n = fixed_ss, cf = vec$cf, threshold = vec$threshold, method = method) rr = c(or, prob) output = rbind(output, rr) colnames(output) = c("Effect Size", "PET(%)", "Power(%)", "Avg SS") } } output[,3] = output[,3]*100 rownames(output) = paste0("Scenario ", 1:dim(output)[1]) results = list() results$design = output results$threshold = thrsh results$typeIerror = round(vec$alpha,digits = 3) return(results) }
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/OC_func_PO.R
#' Generate operating characteristics for Bayesian two-stage trial design #' of ordinal endpoints without proportional odds assumption. #' #' @description #' Obtain operating characteristics (OC) of the Bayesian two-stage trial #' design with ordinal endpoints while the proportional odds assumption are #' violated. #' #' @param alpha the desired type I error to be controlled. #' @param pro_ctr distribution of clinical categories for the #' control group. #' @param U the desirability of each outcome level. #' @param ors a user-defined matrix, each row denotes the various scenarios, #' the number of columns depend on the number of outcome scales. #' @param n_range the additional sample size for each arm each stage after n_po, n_npo. #' @param fixed_es fixed effect size when simulate the OC for various sample #' size. #' @param n_po sample size for the treatment and control groups, at each stage #' based on PO model. #' @param n_npo sample size for the treatment and control groups, at each stage #' based on NPO model. #' @param ntrial the number of simulated trials. #' @param method whether the statistical test for interim/final analysis is Bayesian or #' Frequentist. method = "Frequentist" for Frequentist approach; method = "Bayesian" #' for Bayesian approach. #' #' @details #' Grid search of sample size is used for guarantee a desirable type I error rate. #' The upper limitation is 200, and lower limitation default is sample size 50 #' for the control and treatment groups at each stage. Default increment of the #' sequence is 10. #' #' For the parameter estimation section, we have two options, and can be selected using #' the method argument. Two following options are available: (i) method = "Frequentist", #' (ii) method = "Bayesian". If method = "Frequentist", parameters are estimated via package #' ordinal, which is based on frequentist method, while method = "Bayesian", parameters are #' estimated through Bayesian model. #' #' Specifically, the numerical utilities U reflect the desirability of each outcome #' level. To do this, in our example, we first set U[1] = 100 and U[5] = 0, and then #' asked physicians to specify numerical values for the intermediate levels, that #' reflect their desirability relative to the best and worst levels. #' #' Function provides two types of operating characteristics via simulation. If user #' specifies the value of ors and fixed_ss, function will calculate the design's power #' in terms of effect size. If user specifies the value of n_range and fixed_es, function #' will calculate the design's power in terms of sample size, and n_range is the upper #' limitation of sample size for the treatment and control groups at each stage, the lower #' limitation is 50, the default increment of the sequence is 10. #' #' Arguments n_po and n_npo are the estimated sample size for the treatment and #' control groups at each stage based on PO model and NPO model respectively. Users can #' obtained them through function ss_po and ss_npo. #' #' #' @return get_oc_NPO() returns the operating characteristics of design as a #' table, including (1) user-defined value, either sample size or effect size #' (2) corresponding power (3) average sample size #' @export #' #' @examples #' #' \donttest{ #' #'get_oc_Switch(alpha = 0.05, pro_ctr = c(0.58,0.05,0.17,0.03,0.04,0.13), #' U = c(100,80,65,25,10,0), n_range = 10, fixed_es = c(1.5,1.5,1,1,1), #' n_po = 475,n_npo = 75, ntrial = 5, method = "Frequentist") #' #' #' or2 = matrix(rep(seq(1,1.3, by=0.1), times=1, each=3),ncol = 3,byrow = TRUE) #' or1 = matrix(rep(1.5, dim(or2)[1]*2), ncol = 2, byrow = TRUE) #' ors = cbind(or1, or2) #' #' get_oc_Switch(alpha = 0.05, pro_ctr = c(0.58,0.05,0.17,0.03,0.04,0.13), #' U = c(100,80,65,25,10,0), ors, n_po = 475, n_npo = 75, #' ntrial = 5, method = "Frequentist") #' } get_oc_Switch = function(alpha, pro_ctr, U, ors, n_range, fixed_es, n_po, n_npo, ntrial, method){ if (missing(n_range)) n_range = NULL if (missing(ors)) ors = NULL if (missing(fixed_es)) fixed_es = NULL if (is.numeric(n_range) & is.numeric(ors)) { stop("n_range and ors can not be specified at the same time.") } #if (is.numeric(fixed_es) & is.numeric(fixed_ss)) { # stop("fixed_es and fixed_ss can not be specified at the same time.") #} # grid search or = rep(1,length(pro_ctr)-1) cf_grid = seq(0.7, 0.7, by = 0.1) threshold_grid = seq(0.9, 0.9, by = 0.1) output = c() for (cf in cf_grid){ for (threshold in threshold_grid){ out = multiple_trial_switch(or, sim_runs = ntrial, sd=0.2, pro_ctr, n_po=n_po, n_npo=n_npo, U, cf, threshold, method = method) rr = c(cf, threshold, out) output = rbind(output, rr) colnames(output) = c("cf", "threshold", "PET(%)", "alpha", "Avg SS", "PO(%)", "NPO(%)") results = as.data.frame(output) } } index = min(which(abs(results$alpha-alpha)==min(abs(results$alpha-alpha)))) vec = c(results[index,c(1,2,4)]) thrsh = c(vec$cf, vec$threshold) names(thrsh) = c("futility", "superiority") output = c() if(is.numeric(fixed_es)&is.numeric(n_range)){ po = seq(n_po, n_po+n_range, by = 10) npo = seq(n_npo, n_npo+n_range, by = 10) ngrid = cbind(po, npo) for (i in 1:dim(ngrid)[1]){ out = multiple_trial_switch(or = fixed_es, sim_runs = ntrial, sd=0.2, pro_ctr, n_po = ngrid[i,1], n_npo = ngrid[i,2], U, #cf =0.6, threshold = 0.9, cf = vec$cf, threshold = vec$threshold, method = method) output = rbind(output, out) colnames(output) = c("PET(%)", "Power(%)", "Avg SS", "PO(%)", "NPO(%)") } output[,2] = output[,2]*100 }else if(is.numeric(ors)){ for (i in 1:dim(ors)[1]){ or = ors[i,] p2 = pro_trt_cal(or, pro_ctr) dif_utility = round(mean_u(U,pro_ctr,p2)[3],digits = 2) out = multiple_trial_switch(or, sim_runs = ntrial, sd=0.2, pro_ctr, n_po, n_npo, U, cf = vec$cf, threshold = vec$threshold, method = method) rr = c(dif_utility, out) output = rbind(output, rr) colnames(output) = c("Effect Size", "PET(%)", "Power(%)", "Avg SS", "PO(%)", "NPO(%)") } output[,3] = output[,3]*100 } output = as.data.frame(output) rownames(output) = paste0("Scenario ", 1:dim(output)[1]) results = list() results$design = round(output,digits = 2) results$threshold = thrsh results$typeIerror = round(vec$alpha,digits = 2) #results$model_selection = c(output$`PO(%)`, output$`NPO(%)`) return(results) }
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/OC_func_Switch.R
#' Clinical ordinal endpoints and treatments assignment for 200 patient #' #' A dataset containing the ordinal outcomes and corresponding groups. #' #' @format A data frame with 200 rows and 2 variables: #' \describe{ #' \item{response}{outcome} #' \item{treatment}{0 denotes control, 1 denotes treatment} #' ... #' } 'example.data'
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/data.R
### data generator ### pro_trt_cal = function(or, pro_ctr){ # or: # pro_ctr: pro_trt_sum = c() for (i in 1:length(or)){ #pro_trt_sum[i] = 1/((1-sum(pro_ctr[1:i]))/(sum(pro_ctr[1:i])*or[i])+1) pro_trt_sum[i] = or[i]*sum(pro_ctr[1:i])/(1-sum(pro_ctr[1:i])+or[i]*sum(pro_ctr[1:i])) } pro_trt_sum_new = c(pro_trt_sum,1) pro_trt = c(pro_trt_sum[1], diff(pro_trt_sum_new)) return(pro_trt) } data_gene_npo = function(or, sd, pro_ctr, N){ # or: # pro_ctr: # N: L = length(or)+1 #x = 0 while (TRUE){ log_or = unlist(lapply(or, function(x) rnorm(1, mean = log(x), sd = sd))) OR = exp(log_or) pro_trt = pro_trt_cal(OR, pro_ctr) s = sum(schoolmath::is.negative(pro_trt)) #x = sum(x,1) if (s <= 0){break} } table = rbind(pro_ctr, pro_trt) rownames(table) = c("control", "treatment") count = round(table*N) treatment = c(rep("0", sum(count[1,])), rep("1", sum(count[2,]))) response = c(rep(1:L, count[1,]), rep(1:L, count[2,])) data = data.frame(response, treatment) data$treatment = as.factor(data$treatment) data$response = as.factor(data$response) #output = list(pro_trt, OR,x) #names(output) = c('proportion_treatment', 'data',"count") #return(output) return(data) }
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/data_generator_npo.R
pro_trt_cal = function(or, pro_ctr){ # or: # pro_ctr: pro_trt_sum = c() for (i in 1:length(or)){ #pro_trt_sum[i] = 1/((1-sum(pro_ctr[1:i]))/(sum(pro_ctr[1:i])*or[i])+1) pro_trt_sum[i] = or[i]*sum(pro_ctr[1:i])/(1-sum(pro_ctr[1:i])+or[i]*sum(pro_ctr[1:i])) } pro_trt_sum_new = c(pro_trt_sum,1) pro_trt = c(pro_trt_sum[1], diff(pro_trt_sum_new)) return(pro_trt) } data_gene = function(or, sd, pro_ctr, N){ # or: # pro_ctr: # N: L = length(or)+1 pro_trt = pro_trt_cal(or, pro_ctr) table = rbind(pro_ctr, pro_trt) rownames(table) = c("control", "treatment") count = round(table*N) treatment = c(rep("0", sum(count[1,])), rep("1", sum(count[2,]))) response = c(rep(1:L, count[1,]), rep(1:L, count[2,])) data = data.frame(response, treatment) data$treatment = as.factor(data$treatment) data$response = as.factor(data$response) #output = list(pro_trt, data) #names(output) = c('proportion_treatment', 'data') return(data) }
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/data_simulator.R
#---------------------------------------------------------------------------- #single trial single_trial_po = function(or, pro_ctr, n, cf, threshold, method){ C = length(pro_ctr) #or = rep(or, C-1) dat_I = data_gene(or, sd, pro_ctr, n) N = n # Pick Bayesian or Frequentest method if (method == "Bayesian"){ results = jags_po_model(dat_I) post_draws_I = results$Samples[[1]][,1] pr_trt_futility_I = mean(post_draws_I < 0) }else{ fit.prior = ordinal::clm(response ~ treatment, data=dat_I, threshold = "flexible") mean = as.numeric(fit.prior$beta) sd = sqrt(fit.prior$vcov[C, C]) samples_draws_I = rnorm(10000, mean, sd) pr_trt_futility_I = mean(samples_draws_I < 0) } stop_for_futility = (pr_trt_futility_I <= cf) if(stop_for_futility){ result_string = "early stop due to the futility" }else{ #get the rest of data into trial #dat_II = data_gene(or, sd, pro_ctr, n) dat_II = dat_I N = N + n #combine two stages data dat = rbind(dat_I, dat_II) if (method == "Bayesian"){ results = jags_po_model(dat) post_draws_II = results$Samples[[1]][,1] pr_trt_futility_II = mean(post_draws_II < 0) }else{ fit.prior = ordinal::clm(response ~ treatment, data=dat, threshold = "flexible") mean = as.numeric(fit.prior$beta) sd = sqrt(fit.prior$vcov[C, C]) samples_draws_II = rnorm(10000, mean, sd) pr_trt_futility_II = mean(samples_draws_II < 0) } if(pr_trt_futility_II >= threshold) {result_string = "success" }else if(pr_trt_futility_II < threshold) {result_string = "failure"} } output = c(result_string, N) return(output) } #---------------------------------------------------------------------------- #multiple trial multiple_trial_po = function(sim_runs, or, pro_ctr, n, cf, threshold, method){ preout = apply(or, MARGIN = 1, FUN=single_trial_po, pro_ctr=pro_ctr, n=n, cf=cf, threshold=threshold, method = method) prob = round(mean(preout[1,] == "success"), digits = 4) prob_et = round(100*mean(preout[1,] == "early stop due to the futility"), digits = 2) avgss = round(mean(as.numeric(preout[2,])), digits = 0) return(c(prob_et, prob, avgss)) } #---------------------------------------------------------------------------- #single trial single_trial_npo = function(or, sd, pro_ctr, U, n, cf, threshold, method){ C = length(pro_ctr) dat_I = data_gene_npo(or, sd, pro_ctr, n) N = n fit.error = c() if (method == "Bayesian"){ results = jags_npo_model(dat_I, U) pr_trt_futility_I = results$PPUtilities }else if(method == "Frequentist"){ fit = ordinal::clm(response ~ treatment, nominal = ~ treatment, data=dat_I, threshold = "flexible") message = tryCatch(ordinal::clm(response ~ treatment, nominal = ~ treatment, data=dat_I, threshold = "flexible"), error=function(e) e, warning=function(w) w) #utility function u = utility_func(fit, U, C) #obtain utility from proportions pr_trt_futility_I = mean(u[2,] > u[1,]) if(is(message,"warning")){ fit.error = fit warnings_I = "Yes" }else{warnings_I = "No"} } if(is.na(pr_trt_futility_I)){ stop_for_futility = TRUE warnings_I = "No pr_trt_futility_I" }else{stop_for_futility = (pr_trt_futility_I <= cf)} # check if warning message exists warnings_II = "No" if(stop_for_futility){ result_string = "early stop due to the futility" }else{ #get the rest of data into trial dat_II = dat_I N = N + n #combine two stages data dat = rbind(dat_I, dat_II) if (method == "Bayesian"){ results = jags_npo_model(dat, U) pr_trt_futility_II = results$PPUtilities }else if(method == "Frequentist"){ message = tryCatch(ordinal::clm(response ~ treatment, nominal = ~ treatment, data=dat, threshold = "flexible"), error=function(e) e, warning=function(w) w) fit = ordinal::clm(response ~ treatment, nominal = ~ treatment, data=dat, threshold = "flexible") #utility function u = utility_func(fit, U, C) #obtain utility from proportions pr_trt_futility_II = mean(u[2,] > u[1,]) # check if warning message exists if(is(message,"warning")) { warnings_II = "Yes" }else{warnings_II = "No"} } if(pr_trt_futility_II >= threshold) { result_string = "success" } else if(pr_trt_futility_II < threshold) { result_string = "failure" } } #return(fit.error) return(c(result_string, N, warnings_I, warnings_II)) } #---------------------------------------------------------------------------- #multiple trial multiple_trial_npo = function(or.mat, sd, pro_ctr, U, n, cf, threshold, method){ preout = apply(or.mat, MARGIN = 1, FUN=single_trial_npo, sd = sd, pro_ctr=pro_ctr, U=U, n=n, cf=cf, threshold = threshold, method = method) # remove those results with warning messages preout_filter = preout[,which(preout[3,] == "No" & preout[4,] == "No")] prob = round(mean(preout_filter[1,] == "success"),digits = 4) prob_et = round(100*mean(preout_filter[1,] == "early stop due to the futility"),digits = 2) avgss = round(mean(as.numeric(preout_filter[2,])),digits = 0) return(c(prob_et, prob, avgss)) } # 3rd design - switch model # 3rd design - switch model single_trial_switch = function(or, sd, pro_ctr, n_po, n_npo, U, cf, threshold, alpha, power, method){ model_sele = NA; #choose larger sample size for stage 1 if(n_po >= n_npo){ n = n_po }else{n = n_npo} C = length(pro_ctr) if(all(or == or[1])){ dat_I = data_gene(or, sd, pro_ctr, n) }else{ dat_I = data_gene_npo(or, sd, pro_ctr, n) } N = n #1. Stage I select PO or NPO through MCMC result_rjmcmc = rjmcmc_inter(or, sd, pro_ctr, n, U) prob = result_rjmcmc$result$`Posterior Model Probabilities` #print(prob) #2. Estimate parameters based on the model selection result (PO/NPO) if(prob[1] > prob[2]){ model_sele = 'PO' results = jags_po_model(dat_I) post_draws_I = results$Samples[[1]][,1] pr_trt_futility_I = mean(post_draws_I < 0) }else{ model_sele = 'NPO' results = jags_npo_model(dat_I, U) pr_trt_futility_I = results$PPUtilities } stop_for_futility = (pr_trt_futility_I <= cf) warnings = "No" #3. Decide if we go/no go to stage 2 if(stop_for_futility){ result_string = "early stop due to the futility" }else{ #4. Re-estimate sample size based on the model selection result if(prob[1] > prob[2]){ dat_II = data_gene(or, sd, pro_ctr, n_po) N = N + n_po dat = rbind(dat_I, dat_II) if(method == "Bayesian"){ results = jags_po_model(dat) post_draws_II = results$Samples[[1]][,1] pr_trt_futility_II = mean(post_draws_II < 0) }else if(method == "Frequentist"){ fit.prior = ordinal::clm(response ~ treatment, data=dat, threshold = "flexible") mean.fit = as.numeric(fit.prior$beta) sd.fit = sqrt(fit.prior$vcov[C, C]) samples_draws_II = rnorm(10000, mean.fit, sd.fit) pr_trt_futility_II = mean(samples_draws_II < 0) } } if(prob[2] > prob[1]){ dat_II = dat_I #dat_II = data_gene_npo(or, sd, pro_ctr, n_npo) N = N + n_npo dat = rbind(dat_I, dat_II) if(method == "Bayesian"){ results = jags_npo_model(dat, U) pr_trt_futility_II = results$PPUtilities }else if(method == "Frequentist"){ message = tryCatch(ordinal::clm(response ~ treatment, nominal = ~ treatment, data=dat, threshold = "flexible"), error=function(e) e, warning=function(w) w) fit = ordinal::clm(response ~ treatment, nominal = ~ treatment, data=dat, threshold = "flexible") #utility function u = utility_func(fit, U, C) #obtain utility from proportions pr_trt_futility_II = mean(u[2,] > u[1,]) # check if warning message exists if(is(message,"warning")) { warnings = "Yes" }else{warnings = "No"} } } if(pr_trt_futility_II >= threshold){ result_string = "success" }else if(pr_trt_futility_II < threshold) {result_string = "failure"} } output = c(result_string, N, warnings, model_sele) return(output) } multiple_trial_switch = function(or, sim_runs, sd, pro_ctr, n_po, n_npo, U, cf, threshold, method){ #nmax: maximum sample size used for type I error control #n: sample size for first stage if(all(or == or[1])){ #random generate or ors = exp(rnorm(sim_runs, mean = log(or[1]), sd = sd)) or.mat = matrix(rep(ors, times = 1, each=length(or)), nrow = sim_runs, ncol = length(or), byrow = TRUE) }else{ #directly put into func or.mat = matrix(rep(or, sim_runs), nrow = sim_runs, ncol = length(or), byrow = TRUE) } preout = apply(or.mat, MARGIN = 1, FUN = single_trial_switch, sd=sd, pro_ctr, n_po, n_npo, U, cf = cf, threshold=threshold, method = method) preout_filter = preout[,which(preout[3,] == "No")] prob = mean(preout_filter[1,] == "success") prob_et = 100*mean(preout_filter[1,] == "early stop due to the futility") avgss = mean(as.numeric(preout_filter[2,])) #m = table(factor(preout_filter[4,], levels = c('PO', 'NPO')))/dim(preout_filter[,which(preout_filter[1,]!= 'early stop due to the futility')])[2]*100 m = table(factor(preout_filter[4,], levels = c('PO', 'NPO')))/dim(preout_filter)[2]*100 return(c(prob_et, prob, avgss, m)) } single_trial_switch_new = function(or, sd, pro_ctr, n, U, cf, threshold, alpha, power, method){ model_sele = NA; C = length(pro_ctr) if(all(or == or[1])){ dat_I = data_gene(or, sd, pro_ctr, n) }else{ dat_I = data_gene_npo(or, sd, pro_ctr, n) } N = n #1. Stage I select PO or NPO through MCMC result_rjmcmc = rjmcmc_inter(or, sd, pro_ctr, n, U) prob = result_rjmcmc$result$`Posterior Model Probabilities` #print(prob) #2. Estimate parameters based on the model selection result (PO/NPO) if(prob[1] > prob[2]){ model_sele = 'PO' results = jags_po_model(dat_I) post_draws_I = results$Samples[[1]][,1] pr_trt_futility_I = mean(post_draws_I < 0) }else{ model_sele = 'NPO' results = jags_npo_model(dat_I, U) pr_trt_futility_I = results$PPUtilities } stop_for_futility = (pr_trt_futility_I <= cf) warnings = "No" #3. Decide if we go/no go to stage 2 if(stop_for_futility){ result_string = "early stop due to the futility" }else{ #4. Re-estimate sample size based on the model selection result if(prob[1] > prob[2]){ dat_II = data_gene(or, sd, pro_ctr, n) N = N + n dat = rbind(dat_I, dat_II) if(method == "Bayesian"){ results = jags_po_model(dat) post_draws_II = results$Samples[[1]][,1] pr_trt_futility_II = mean(post_draws_II < 0) }else if(method == "Frequentist"){ fit.prior = ordinal::clm(response ~ treatment, data=dat, threshold = "flexible") mean.fit = as.numeric(fit.prior$beta) sd.fit = sqrt(fit.prior$vcov[C, C]) samples_draws_II = rnorm(10000, mean.fit, sd.fit) pr_trt_futility_II = mean(samples_draws_II < 0) } } if(prob[2] > prob[1]){ dat_II = dat_I #dat_II = data_gene_npo(or, sd, pro_ctr, n_npo) N = N + n dat = rbind(dat_I, dat_II) if(method == "Bayesian"){ results = jags_npo_model(dat, U) pr_trt_futility_II = results$PPUtilities }else if(method == "Frequentist"){ message = tryCatch(ordinal::clm(response ~ treatment, nominal = ~ treatment, data=dat, threshold = "flexible"), error=function(e) e, warning=function(w) w) fit = ordinal::clm(response ~ treatment, nominal = ~ treatment, data=dat, threshold = "flexible") #utility function u = utility_func(fit, U, C) #obtain utility from proportions pr_trt_futility_II = mean(u[2,] > u[1,]) # check if warning message exists if(is(message,"warning")) { warnings = "Yes" }else{warnings = "No"} } } if(pr_trt_futility_II >= threshold){ result_string = "success" }else if(pr_trt_futility_II < threshold) {result_string = "failure"} } output = c(result_string, N, warnings, model_sele) return(output) } multiple_trial_switch_new = function(or, sim_runs, sd, pro_ctr, n, U, cf, threshold, method){ #nmax: maximum sample size used for type I error control #n: sample size for first stage if(all(or == or[1])){ #random generate or ors = exp(rnorm(sim_runs, mean = log(or[1]), sd = sd)) or.mat = matrix(rep(ors, times = 1, each=length(or)), nrow = sim_runs, ncol = length(or), byrow = TRUE) }else{ #directly put into func or.mat = matrix(rep(or, sim_runs), nrow = sim_runs, ncol = length(or), byrow = TRUE) } preout = apply(or.mat, MARGIN = 1, FUN = single_trial_switch, sd=sd, pro_ctr, n, U, cf = cf, threshold=threshold, method = method) preout_filter = preout[,which(preout[3,] == "No")] prob = round(mean(preout_filter[1,] == "success"), digits = 4) prob_et = round(100*mean(preout_filter[1,] == "early stop due to the futility"), digits = 2) avgss = round(mean(as.numeric(preout_filter[2,])), digits = 0) m = table(factor(preout_filter[4,], levels = c('PO', 'NPO')))/dim(preout_filter[,which(preout_filter[1,]!= 'early stop due to the futility')])[2]*100 return(c(prob_et, prob, avgss, m)) } mean_u = function(U,p1,p2){ u1 = t(U)%*%p1 u2 = t(U)%*%p2 dif = u2-u1 return(c(u1, u2, dif)) } jags_po_model = function(dat){ dat.ord <- dat dat.ord$treatment <- factor(dat.ord$treatment) dat.ord$response <- factor(dat.ord$response) fit.prior <- ordinal::clm(response ~ treatment, data=dat.ord, threshold = "flexible") modelstring = " model { for (i in 1:N){ mu[i] <- x[i]*delta logit(Q[i,1]) <- gamma[1] - mu[i] prob[i,1] <- Q[i,1] for (c in 2:(C-1)){ logit(Q[i,c]) <- gamma[c] - mu[i] prob[i,c] <- Q[i,c] - Q[i,(c-1)] } prob[i,C] <- 1-Q[i,(C-1)] y[i] ~ dcat(prob[i,1:C]) } for (c in 1:(C-1)){ gamma.star[c] ~ dnorm(0, 0.1) } gamma[1:(C-1)] <- sort(gamma.star) for (j in 1:4){ z[j] ~ dnorm(0, 1/(sigma^2)) } delta ~ dnorm(b, sd.prior) alpha ~ dnorm(0, 1) }" x <- as.numeric(dat.ord$treatment)-1 y <- as.numeric(dat.ord$response) data <- list(x = x, y = y, N = length(x), C = length(unique(dat.ord$response))) hyper <- list(#gamma1.prior = as.numeric(fit.prior$coefficients[1]), #gamma5.prior = as.numeric(fit.prior$coefficients[5]), b = as.numeric(fit.prior$beta), sd.prior = sqrt(fit.prior$vcov[data$C, data$C]), sigma = 1.5 ) init = list( gamma.star = rnorm(data$C-1), delta = rnorm(1), z = rnorm(4)) model = rjags::jags.model(textConnection(modelstring), data = append(data, hyper), n.chains = 3, inits = init, n.adapt = 500) update(model, n.iter = 2000) output=rjags::coda.samples(model = model, variable.names = c("gamma", "delta", "z"), n.iter = 20000, thin = 1) ess = coda::effectiveSize(output) estimation = summary(output)$statistics SummaryOutput = list(estimation, ess, output) names(SummaryOutput) = c("EST", "ESS","Samples") return(SummaryOutput) } jags_npo_model = function(dat, U){ dat.ord <- dat dat.ord$treatment <- factor(dat.ord$treatment) dat.ord$response <- factor(dat.ord$response) #fit.prior <- clm(response ~ treatment, nominal = ~ treatment, # data=dat.ord, threshold = "flexible") #print(fit.prior) modelstring = " model { for (c in 1:(C-1)){ logit(Q[1,c]) <- gamma[c] logit(Q[2,c]) <- gamma[c] - delta[c] } for (j in 1:2){ prob[j,1] <- Q[j,1] for (c in 2:(C-1)){ prob[j,c] <- Q[j,c] - Q[j,(c-1)] } prob[j,C] <- 1- Q[j, (C-1)] } for (i in 1:N){ y[i] ~ dcat(prob[x[i],]) } # Prior for (c in 1:(C-1)){ gamma.star[c] ~ dnorm(0, 0.1) delta[c] ~ dnorm(b[c], sd.prior[c]) } gamma[1:(C-1)] <- sort(gamma.star) # Mean Utilities for (j in 1:2){ u[j] <- inprod(U[1:C], prob[j,1:C]) } }" x <- as.numeric(dat.ord$treatment) y <- as.numeric(dat.ord$response) data <- list(x = x, y = y, N = length(x), C = length(unique(dat.ord$response)), U = U) #hyper <- list(b = -as.numeric(fit.prior$alpha.mat[2, 1:(data$C-1)]), # sd.prior = as.vector(sqrt(diag(as.matrix(fit.prior$vcov))[data$C:dim(fit.prior$vcov)[1]])) #) C = length(unique(dat.ord$response)) hyper <-list(b=rep(0, (C-1)), sd.prior = rep(0.1,(C-1))) init = list(delta = rep(0,(C-1)), gamma.star = rnorm(C-1)) model = rjags::jags.model(textConnection(modelstring), data = append(data, hyper), n.chains = 1, inits = init, n.adapt = 500) update(model, n.iter = 2000) output=rjags::coda.samples(model = model, variable.names = c( "gamma","delta", "u"), n.iter = 20000, thin = 1) ess = coda::effectiveSize(output) estimation = summary(output)$statistics samples = as.matrix(output) pp.u = mean(samples[,dim(samples)[2]] > samples[,(dim(samples)[2]-1)]) SummaryOutput = list(estimation, ess, output, pp.u) names(SummaryOutput) = c("EST", "ESS", "Samples", "PPUtilities") return(SummaryOutput) } utility_func = function(fit.prior, Uscore, C){ #delta delta = -as.numeric(fit.prior$alpha.mat[2, 1:(C-1)]) delta.sd = as.vector(sqrt(diag(as.matrix(fit.prior$vcov))[C:dim(fit.prior$vcov)[1]])) #gamma gamma = as.numeric(fit.prior$alpha.mat[1, 1:(C-1)]) gamma.sd = as.vector(sqrt(diag(as.matrix(fit.prior$vcov))[1:(C-1)])) d = rbind(cbind(delta, delta.sd),cbind(gamma, gamma.sd)) samples = apply(d, 1, function(x) rnorm(100, mean = x[1], sd = abs(x[2]))) u = matrix(NA, nrow = 2, ncol = dim(samples)[1]) #use posterior samples to calculate proportions for (i in 1:dim(samples)[1]){ Q = matrix(NA, nrow = 2, ncol = C-1) prob = matrix(NA, nrow = 2, ncol = C) sample = samples[i,] for (c in 1:(C-1)){ Q[1,c] = plogis(sample[c+5]) Q[2,c] = plogis(sample[c+5] - sample[c]) } for (j in 1:2){ prob[j,1] <- Q[j,1] for (c in 2:(C-1)){ prob[j,c] <- Q[j,c] - Q[j,(c-1)] } prob[j,C] <- 1- Q[j, (C-1)] } u[,i] = prob %*% Uscore } return(u) }
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/funcs.R
jags_npo_model = function(dat, U){ dat.ord <- dat dat.ord$treatment <- factor(dat.ord$treatment) dat.ord$response <- factor(dat.ord$response) #fit.prior <- clm(response ~ treatment, nominal = ~ treatment, # data=dat.ord, threshold = "flexible") #print(fit.prior) modelstring = " model { for (c in 1:(C-1)){ logit(Q[1,c]) <- gamma[c] logit(Q[2,c]) <- gamma[c] - delta[c] } for (j in 1:2){ prob[j,1] <- Q[j,1] for (c in 2:(C-1)){ prob[j,c] <- Q[j,c] - Q[j,(c-1)] } prob[j,C] <- 1- Q[j, (C-1)] } for (i in 1:N){ y[i] ~ dcat(prob[x[i],]) } # Prior for (c in 1:(C-1)){ gamma.star[c] ~ dnorm(0, 0.1) delta[c] ~ dnorm(b[c], sd.prior[c]) } gamma[1:(C-1)] <- sort(gamma.star) # Mean Utilities for (j in 1:2){ u[j] <- inprod(U[1:C], prob[j,1:C]) } }" x <- as.numeric(dat.ord$treatment) y <- as.numeric(dat.ord$response) data <- list(x = x, y = y, N = length(x), C = length(unique(dat.ord$response)), U = U) #hyper <- list(b = -as.numeric(fit.prior$alpha.mat[2, 1:(data$C-1)]), # sd.prior = as.vector(sqrt(diag(as.matrix(fit.prior$vcov))[data$C:dim(fit.prior$vcov)[1]])) #) C = length(unique(dat.ord$response)) hyper <-list(b=rep(0, (C-1)), sd.prior = rep(0.1,(C-1))) init = list(delta = rep(0,(C-1)), gamma.star = rnorm(C-1)) model = rjags::jags.model(textConnection(modelstring), data = append(data, hyper), n.chains = 1, inits = init, n.adapt = 500) update(model, n.iter = 2000) output=rjags::coda.samples(model = model, variable.names = c( "gamma","delta", "u"), n.iter = 20000, thin = 1) ess = coda::effectiveSize(output) estimation = summary(output)$statistics samples = as.matrix(output) pp.u = mean(samples[,dim(samples)[2]] > samples[,(dim(samples)[2]-1)]) SummaryOutput = list(estimation, ess, output, pp.u) names(SummaryOutput) = c("EST", "ESS", "Samples", "PPUtilities") return(SummaryOutput) }
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/jags_npo.R
jags_po_model = function(dat){ dat.ord <- dat dat.ord$treatment <- factor(dat.ord$treatment) dat.ord$response <- factor(dat.ord$response) fit.prior <- ordinal::clm(response ~ treatment, data=dat.ord, threshold = "flexible") modelstring = " model { for (i in 1:N){ mu[i] <- x[i]*delta logit(Q[i,1]) <- gamma[1] - mu[i] prob[i,1] <- Q[i,1] for (c in 2:(C-1)){ logit(Q[i,c]) <- gamma[c] - mu[i] prob[i,c] <- Q[i,c] - Q[i,(c-1)] } prob[i,C] <- 1-Q[i,(C-1)] y[i] ~ dcat(prob[i,1:C]) } for (c in 1:(C-1)){ gamma.star[c] ~ dnorm(0, 0.1) } gamma[1:(C-1)] <- sort(gamma.star) for (j in 1:4){ z[j] ~ dnorm(0, 1/(sigma^2)) } delta ~ dnorm(b, sd.prior) alpha ~ dnorm(0, 1) }" x <- as.numeric(dat.ord$treatment)-1 y <- as.numeric(dat.ord$response) data <- list(x = x, y = y, N = length(x), C = length(unique(dat.ord$response))) hyper <- list(#gamma1.prior = as.numeric(fit.prior$coefficients[1]), #gamma5.prior = as.numeric(fit.prior$coefficients[5]), b = as.numeric(fit.prior$beta), sd.prior = sqrt(fit.prior$vcov[data$C, data$C]), sigma = 1.5 ) init = list( gamma.star = rnorm(data$C-1), delta = rnorm(1), z = rnorm(4)) model = rjags::jags.model(textConnection(modelstring), data = append(data, hyper), n.chains = 3, inits = init, n.adapt = 500) update(model, n.iter = 2000) output=rjags::coda.samples(model = model, variable.names = c("gamma", "delta", "z"), n.iter = 20000, thin = 1) ess = coda::effectiveSize(output) estimation = summary(output)$statistics SummaryOutput = list(estimation, ess, output) names(SummaryOutput) = c("EST", "ESS","Samples") return(SummaryOutput) }
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/jags_po.R
#' @import rjmcmc #' @import stats #' @importFrom ggplot2 ggplot #' @importFrom ordinal clm #' @import superdiag #' @import rjags #' @import R2jags #' @import EnvStats #' @import gsDesign #' @import schoolmath #' @importFrom madness det #' @importFrom graphics matplot #' @importFrom stats predict.lm
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/packages.R
#' Perform reversible-jump MCMC post-process to select appropriate model between #' proportional odds (PO) model and non-proportional odds (NPO) model #' #' @description Performs Bayesian multi-model inference, estimating posterior #' model probabilities for 2 candidate models. #' @param g1 specify the bi-jections from the universal parameter psi to PO model parameter set #' @param ginv1 specify the bi-jections from the PO model parameter set to psi. It is the inverse transformation of g1. #' @param g2 specify the bi-jections from the universal parameter psi to NPO model parameter set #' @param ginv2 specify the bi-jections from the NPO model parameter set to psi. It is the inverse transformation of g2. #' @param or_alt effect size to be detected (under H_1) #' in terms of odds ratio #' @param sd the standard error #' @param pro_ctr distribution of clinical categories for the #' control group #' @param n sample size for each group and each interim look #' @param U the desirability of each outcome level #' @return rjmcmc_func() returns the selection probabilities for PO and NPO model #' @export #' #' @examples #' \donttest{ #' #' g1 = function(psi){ #' w = sum(psi[6:10])/5 #' theta = c(psi[1], psi[2], psi[3], psi[4], psi[5], #' w, w-psi[7], w-psi[8], w-psi[9], w-psi[10]) #' return(theta) #' } #' #' ginv1 = function(theta){ #' #' w = sum(theta[6:10]) #' psi = c(theta[1], theta[2], theta[3], theta[4], theta[5], #' w, theta[6]-theta[7], theta[6]-theta[8], #' theta[6]-theta[9], theta[6]-theta[10]) #' return(psi) #' } #' #' #' g2 = function(psi){ #' theta = psi #' return(theta) #' } #' ginv2 = function(theta){ #' psi = theta #' return(psi) #' } #' #' out = rjmcmc_func(g1, ginv1, g2, ginv2, or_alt = c(1.4,1.4,1.4,1.4,1.4), sd = 0.2, #' pro_ctr = c(0.58,0.05,0.17,0.03,0.04,0.13), #' n = 100, U = c(100,80,65,25,10,0)) #' } rjmcmc_func = function(g1, ginv1, g2, ginv2, or_alt, sd, pro_ctr, n, U){ dat = data_gene(or_alt, sd, pro_ctr, n) L = length(unique(dat$response)) #counts of outcome level C = 2*(length(unique(dat$response))-1) # counts of parameters and thresholds po_fit = jags_po_model(dat) npo_fit = jags_npo_model(dat, U) C1 = as.matrix(po_fit$Samples) draw1 = function(){ r = C1[sample(dim(C1)[1], 1, replace=T), ] r = r[c(2:L,1,(L+1):dim(C1)[2])] } C2 = as.matrix(npo_fit$Samples) draw2 = function(){ r = C2[sample(dim(C2)[1], 1, replace=T), c(-11,-12)] r = r[c(L:(dim(C2)[2]-2),1:(L-1))] } x = as.numeric(dat$treatment)-1 y = as.numeric(dat$response) # Likelihood PO L1 = function(theta){ C = length(unique(y)) N = nrow(dat) prob = c() for (i in 1:N){ if (y[i] == 1){prob[i] = plogis(theta[y[i]] - theta[L]*x[i]) }else if(y[i] == C){ prob[i] = 1 - plogis(theta[y[i]-1]-theta[L]*x[i]) }else{ prob[i] = plogis(theta[y[i]] - theta[L]*x[i]) - plogis(theta[y[i]-1] - theta[L]*x[i]) } } ll = sum(log(prob), na.rm = TRUE) return(ll) } # Likelihood NPO L2 = function(theta){ C = length(unique(y)) N = nrow(dat) prob = c() for (i in 1:N){ if (y[i] == 1){prob[i] = plogis(theta[y[i]] - theta[y[i]+5]*x[i]) }else if(y[i] == C){ prob[i] = 1 - plogis(theta[y[i]-1] - theta[y[i]+5-1]*x[i]) }else{ prob[i] = plogis(theta[y[i]] - theta[y[i]+5]*x[i]) - plogis(theta[y[i]-1] - theta[y[i]+5-1]*x[i]) } } ll = sum(log(prob), na.rm = TRUE) return(ll) } fit.prior.po = ordinal::clm(response ~ treatment, data=dat, threshold = "flexible") fit.prior.npo = ordinal::clm(response ~ treatment, nominal = ~ treatment, data=dat, threshold = "flexible") # Prior PO p.prior1 = function(theta){ sum(dnorm(theta[1:(L-1)], 0, 1, log = TRUE)) + dnorm(theta[L], as.numeric(fit.prior.po$beta), sqrt(fit.prior.po$vcov[L, L]), log = TRUE) } # Prior NPO p.prior2 = function(theta){ sum(dnorm(theta[1:(L-1)], 0, 1, log = TRUE)) + sum(dnorm(theta[L:C], 0, 1, log = TRUE)) } sigma = 1.5 goals_post = rjmcmc::rjmcmcpost(post.draw = list(draw1, draw2), g = list(g1, g2), ginv = list(ginv1, ginv2), likelihood = list(L1, L2), param.prior = list(p.prior1, p.prior2), model.prior = c(0.5, 0.5), chainlength = 1e3, save.all = TRUE) prob = goals_post$result$`Posterior Model Probabilities` cat("The probability of choosing PO model is ", prob[1], " while the probability of choosing NPO model is ", prob[2], ".", sep = "") return(goals_post) }
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/rjmcmc.R
rjmcmc_inter = function(or_alt, sd, pro_ctr, n, U){ if(all(or_alt == or_alt[1])){ dat = data_gene(or_alt, sd, pro_ctr, n) }else{ dat = data_gene_npo(or_alt, sd, pro_ctr, n) } L = length(unique(dat$response)) #counts of outcome level C = 2*(length(unique(dat$response))-1) # counts of parameters and thresholds po_fit = jags_po_model(dat) npo_fit = jags_npo_model(dat, U) C1 = as.matrix(po_fit$Samples) draw1 = function(){ r = C1[sample(dim(C1)[1], 1, replace=T), ] #r = r[c(2:6,1,7:10)] r = r[c(2:L,1,(L+1):dim(C1)[2])] } C2 = as.matrix(npo_fit$Samples) draw2 = function(){ r = C2[sample(dim(C2)[1], 1, replace=T), c(-11,-12)] #r = r[c(6:10,1:5)] r = r[c(L:(dim(C2)[2]-2),1:(L-1))] } # bijective function g1 = function(psi){ w = sum(psi[L:C])/(C-L+1) theta = c(psi[1], psi[2], psi[3], psi[4], psi[5], w, w-psi[7], w-psi[8], w-psi[9], w-psi[10]) return(theta) } ginv1 = function(theta){ w = sum(theta[L:C]) psi = c(theta[1], theta[2], theta[3], theta[4], theta[5], w, theta[6]-theta[7], theta[6]-theta[8], theta[6]-theta[9], theta[6]-theta[10]) return(psi) } g2 = function(psi){ theta = psi return(theta) } ginv2 = function(theta){ psi = theta return(psi) } x = as.numeric(dat$treatment)-1 y = as.numeric(dat$response) # Likelihood PO L1 = function(theta){ C = length(unique(y)) N = nrow(dat) prob = c() for (i in 1:N){ if (y[i] == 1){prob[i] = plogis(theta[y[i]] - theta[6]*x[i]) }else if(y[i] == C){ prob[i] = 1 - plogis(theta[y[i]-1]-theta[6]*x[i]) }else{ prob[i] = plogis(theta[y[i]] - theta[6]*x[i]) - plogis(theta[y[i]-1] - theta[6]*x[i]) } } ll = sum(log(prob), na.rm = TRUE) return(ll) } # Likelihood NPO L2 = function(theta){ C = length(unique(y)) N = nrow(dat) prob = c() for (i in 1:N){ if (y[i] == 1){prob[i] = plogis(theta[y[i]] - theta[y[i]+5]*x[i]) }else if(y[i] == C){ prob[i] = 1 - plogis(theta[y[i]-1] - theta[y[i]+5-1]*x[i]) }else{ prob[i] = plogis(theta[y[i]] - theta[y[i]+5]*x[i]) - plogis(theta[y[i]-1] - theta[y[i]+5-1]*x[i]) } } ll = sum(log(prob), na.rm = TRUE) return(ll) } fit.prior.po = ordinal::clm(response ~ treatment, data=dat, threshold = "flexible") fit.prior.npo = ordinal::clm(response ~ treatment, nominal = ~ treatment, data=dat, threshold = "flexible") # Prior PO p.prior1 = function(theta){ sum(dnorm(theta[1:(L-1)], 0, 1, log = TRUE)) #+ dnorm(theta[6], 0, 1, log = TRUE) + dnorm(theta[L], as.numeric(fit.prior.po$beta), sqrt(fit.prior.po$vcov[L, L]), log = TRUE) #+ sum(dnorm(theta[7:10], 0, sigma, log = TRUE)) } # Prior NPO p.prior2 = function(theta){ sum(dnorm(theta[1:(L-1)], 0, 1, log = TRUE)) + sum(dnorm(theta[L:10], 0, 1, log = TRUE)) #+ sum(dnorm(theta[6:10], -as.numeric(fit.prior.npo$alpha.mat[2,]), # as.vector(sqrt(diag(as.matrix(fit.prior.npo$vcov))[L:dim(fit.prior.npo$vcov)[1]])), # log = TRUE)) } sigma = 1.5 goals_post = rjmcmc::rjmcmcpost(post.draw = list(draw1, draw2), g = list(g1, g2), ginv = list(ginv1, ginv2), likelihood = list(L1, L2), param.prior = list(p.prior1, p.prior2), model.prior = c(0.5, 0.5), chainlength = 1e3, save.all = TRUE) prob = goals_post$result$`Posterior Model Probabilities` cat("The probability of choosing PO model is ", prob[1], " while the probability of choosing NPO model is ", prob[2], ".", sep = "") return(goals_post) }
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/rjmcmc_internal.R
utility_func = function(fit.prior, Uscore, C){ #delta delta = -as.numeric(fit.prior$alpha.mat[2, 1:(C-1)]) delta.sd = as.vector(sqrt(diag(as.matrix(fit.prior$vcov))[C:dim(fit.prior$vcov)[1]])) #gamma gamma = as.numeric(fit.prior$alpha.mat[1, 1:(C-1)]) gamma.sd = as.vector(sqrt(diag(as.matrix(fit.prior$vcov))[1:(C-1)])) d = rbind(cbind(delta, delta.sd),cbind(gamma, gamma.sd)) samples = apply(d, 1, function(x) rnorm(100, mean = x[1], sd = abs(x[2]))) u = matrix(NA, nrow = 2, ncol = dim(samples)[1]) #use posterior samples to calculate proportions for (i in 1:dim(samples)[1]){ Q = matrix(NA, nrow = 2, ncol = C-1) prob = matrix(NA, nrow = 2, ncol = C) sample = samples[i,] for (c in 1:(C-1)){ Q[1,c] = plogis(sample[c+5]) Q[2,c] = plogis(sample[c+5] - sample[c]) } for (j in 1:2){ prob[j,1] <- Q[j,1] for (c in 2:(C-1)){ prob[j,c] <- Q[j,c] - Q[j,(c-1)] } prob[j,C] <- 1- Q[j, (C-1)] } u[,i] = prob %*% Uscore } return(u) }
/scratch/gouwar.j/cran-all/cranData/BayesOrdDesign/R/utility_function.R
## usethis namespace: start #' @importFrom Rcpp sourceCpp #' @useDynLib BayesPPD,.registration = TRUE ## usethis namespace: end NULL #' Bayesian sample size determination using the power and normalized power prior for generalized linear models #' #' The \pkg{BayesPPD} (Bayesian Power Prior Design) package provides two categories of functions: #' functions for Bayesian power/type I error calculation and functions for model fitting. #' Supported distributions include normal, binary (Bernoulli/binomial), Poisson and exponential. #' The power parameter \eqn{a_0} can be fixed or modeled as random using a normalized power prior. #' #' @details #' Following Chen et al.(2011), for two group models (i.e., treatment and control group with no covariates), denote the parameter for the treatment group by \eqn{\mu_t} #' and the parameter for the control group by \eqn{\mu_c}. Suppose there are \eqn{K} historical datasets \eqn{D_0 = (D_{01},\cdots, D_{0K})'}. We consider the following normalized power prior #' for \eqn{\mu_c} given multiple historical datasets \eqn{D_0} #' \deqn{\pi(\mu_c|D_0,a_0) = \frac{1}{C(a_0)}\prod_{k=1}^K \left[L(\mu_c|D_{0k})^{a_{0k}}\right]\pi_0(\mu_c)} #' where \eqn{a_0 = (a_{01},\cdots,a_{0K})'}, \eqn{0\le a_{0k} \le 1} for \eqn{k=1,\cdots,K}, \eqn{L(\mu_c|D_{0k})} is the historical data likelihood, #' \eqn{\pi_0(\mu_c)} is an initial prior, and \eqn{C(a_0)=\int \prod_{k=1}^K [L(\mu_c|D_{0k})^{a_{0k}}]\pi_0(\mu_c)d\mu_c}. When \eqn{a_0} is fixed, #' the normalized power prior is equivalent to the power prior #' \deqn{\pi(\mu_c|D_0,a_0) = \prod_{k=1}^K \left[L(\mu_c|D_{0k})^{a_{0k}}\right]\pi_0(\mu_c).} #' By default, the power/type I error calculation algorithm assumes the null and alternative hypotheses are given by #' \deqn{H_0: \mu_t - \mu_c \ge \delta} and \deqn{H_1: \mu_t - \mu_c < \delta,} where \eqn{\delta} is a prespecified constant. To test hypotheses of #' the opposite direction, i.e., \eqn{H_0: \mu_t - \mu_c \le \delta} and \eqn{H_1: \mu_t - \mu_c > \delta} , one can set the parameter \code{nullspace.ineq} to "<". #' To determine Bayesian sample size, we estimate the quantity \deqn{\beta_{sj}^{(n)}=E_s[I\{P(\mu_t-\mu_c<\delta|y^{(n)}, \pi^{(f)})\ge \gamma\}]} #' where \eqn{\gamma > 0} is a prespecified posterior probability threshold for rejecting the null hypothesis (e.g., \eqn{0.975}), the probability is computed with respect to the posterior distribution given the data #' \eqn{y^{(n)}} and the fitting prior \eqn{\pi^{(f)}}, and the expectation is taken with respect to the marginal distribution of \eqn{y^{(n)}} #' defined based on the sampling prior \eqn{\pi^{(s)}(\theta)}, where \eqn{\theta=(\mu_t, \mu_c, \eta)} and \eqn{\eta} denotes any nuisance parameter in the model. #' Let \eqn{\Theta_0} and \eqn{\Theta_1} denote the parameter spaces corresponding to \eqn{H_0} and \eqn{H_1}. #' Let \eqn{\pi_0^{(s)}(\theta)} denote a sampling prior that puts mass in the null region, i.e., \eqn{\theta \subset \Theta_0}. #' Let \eqn{\pi_1^{(s)}(\theta)} denote a sampling prior that puts mass in the alternative region, i.e., \eqn{\theta \subset \Theta_1}. #' Then \eqn{\beta_{s0}^{(n)}} corresponding to \eqn{\pi^{(s)}(\theta)=\pi_0^{(s)}(\theta)} is a Bayesian type I error, #' while \eqn{\beta_{s1}^{(n)}} corresponding to \eqn{\pi^{(s)}(\theta)=\pi_1^{(s)}(\theta)} is a Bayesian power. #' We compute \eqn{n_{\alpha_0} = \min\{n: \beta_{s0}^{(n)} \le \alpha_0\}} and \eqn{n_{\alpha_1} = \min\{n: \beta_{s1}^{(n)} \ge 1-\alpha_1\}}. #' Then Bayesian sample size is max\eqn{\{n_{\alpha_0}, n_{\alpha_1}\}}. Choosing \eqn{\alpha_0=0.05} and \eqn{\alpha_1=0.2} #' guarantees that the Bayesian type I error rate is at most \eqn{0.05} and the Bayesian power is at least \eqn{0.8}. #' #' To compute \eqn{\beta_{sj}^{(n)}}, the following algorithm is used: #' \describe{ #' \item{Step 1:}{Generate \eqn{\theta \sim \pi_j^{(s)}(\theta)}} #' \item{Step 2:}{Generate \eqn{y^{(n)} \sim f(y^{(n)}|\theta)}} #' \item{Step 3:}{Compute \eqn{P(\mu_t < \mu_c + \delta|y^{(n)}, \pi^{(f)})}} #' \item{Step 4:}{Check whether \eqn{P(\mu_t < \mu_c + \delta|y^{(n)}, \pi^{(f)}) \ge \gamma}} #' \item{Step 5:}{Repeat Steps 1-4 \eqn{N} times} #' \item{Step 6:}{Compute the proportion of times that \eqn{\{\mu_t < \mu_c + \delta|y^{(n)}, \pi^{(f)} \ge \gamma\}} is true out of the \eqn{N} simulated datasets, which gives an estimate of \eqn{\beta_{sj}^{(n)}}.} #' } #' #' For positive continuous data assumed to follow exponential distribution, the hypotheses are given by #' \deqn{H_0: \mu_t/\mu_c \ge \delta} and \deqn{H_1: \mu_t/\mu_c < \delta,} where \eqn{\mu_t} and \eqn{\mu_c} are the hazards for the treatment and the control group, respectively. #' The definition of \eqn{\beta_{sj}^{(n)}} and the algorithm change accordingly. #' #' #' If there are covariates to adjust for, we assume the first column of the covariate matrix is the treatment indicator, #' and the corresponding parameter is \eqn{\beta_1}, which, for example, corresponds to a difference in means for the linear regression model and a log hazard ratio for the exponential regression model. #' The hypotheses are given by #' \deqn{H_0: \beta_1 \ge \delta} and \deqn{H_1: \beta_1 < \delta.} #' The definition of \eqn{\beta_{sj}^{(n)}} and the algorithm change accordingly. #' #' By default, the package assumes the historical data is #' composed of control group subjects only. If the user wants to use historical data to inform treatment effect, one can set \code{borrow.treat=TRUE} #' and include the treatment indicator in the historical covariate matrix. #' #' This implementation of the method does not assume any particular distribution for the sampling priors. #' The user is allowed to specify a vector or matrix of samples for \eqn{\theta} (matrix if \eqn{\theta} is of dimension >1) from any distribution, and the algorithm samples with replacement #' from the vector or matrix at each iteration of data simulation. In order to accurately approximate a joint distribution #' for multiple parameters, the number of iterations should be large (e.g., 10,000). #' #' Gibbs sampling is used for normally distributed data. Slice sampling is used for all other data distributions. #' For two group models with fixed \eqn{a_0}, #' numerical integration using the \pkg{RcppNumerical} package is used. #' #' @references Chen, Ming-Hui, et al. "Bayesian design of noninferiority trials for medical devices using historical data." Biometrics 67.3 (2011): 1163-1170. #' @docType package #' @name BayesPPD-package NULL #> NULL
/scratch/gouwar.j/cran-all/cranData/BayesPPD/R/BayesPPD-package.R
# The following comments appeared in the source code for # Wang YB, Chen MH, Kuo L, Lewis PO (2018). “A New Monte Carlo Method for Estimating Marginal Likelihoods.” Bayesian Analysis, 13(2), 311–333. library(stats) logpowerprior <- function(mcmc, a0, historical, data.type, data.link,init_var){ beta = mcmc lp = 0; # add independent normal priors for beta for(i in 1:length(beta)){ lp = lp + dnorm(beta[i], mean=0, sd=sqrt(init_var[i]), log=TRUE) } for(i in 1:length(historical)){ dat = historical[[i]] y_h = dat[["y0"]] x_h = dat[["x0"]] x_h = cbind(1,x_h) if (data.type=="Bernoulli") {n_h = rep(1,length(y_h))} if (data.type=="Binomial") {n_h = dat[["n0"]]} a0_i = a0[i] mean = x_h%*%beta if (data.link=="Logistic") { mean = exp(mean) / (1 + exp(mean)) } if (data.link=="Probit") { mean = pnorm(mean, 0.0, 1.0) } if (data.link=="Log") { mean = exp(mean) } if (data.link=="Identity-Positive") { for (j in 1:length(mean)) { mean[j] = max(mean[j],0.00001)} } if (data.link=="Identity-Probability") { for (j in 1:length(mean)) { mean[j] = min(max(mean[j],0.00001),0.99999) } } if (data.link=="Complementary Log-Log") { mean = 1 - exp(-exp(mean)) } #mean = min(max(mean, 10^(-4)), 0.9999) if (data.type=="Bernoulli"|data.type=="Binomial"){ lp = lp + a0_i * sum(y_h * log(mean) + (n_h - y_h) * log1p(-mean) ) } if (data.type=="Poisson") { lp = lp + a0_i * sum(y_h * log(mean) - mean) } if (data.type=="Exponential") { lp = lp + a0_i * sum(log(mean) - y_h * mean) } } return(lp) } #Purpose: floating control for summing all "ratios" #Input: "ratios" are a vector saving all log(q(\theta^*_k)/q(\theta_t) ) for PWK #Output: sum of "ratios" divided by an MC/MCMC sample size in log scale denominator_control <- function(ratios) { tot <- length(ratios) b <- ratios[!is.na(ratios)] est_d_r <- 0 b.max <- max(b) est_d_r <- log(sum(exp(b-b.max)))+b.max - log(tot) return(est_d_r) } #Purpose: forming the rings for the estimation of c_0 #Input: "r" denotes the maximum radius to form the working parameter space, and "nslice" is the number of partition subsets #Ouput: a matrix recording the interval of each ring, #the posterior kernel (log-scale) of the representative point in each ring, #and the volume of each ring LOR_partition_pp <- function(r,nslice,mcmc,a0,historical, data.type, data.link,init_var){ interval <- seq(0, r, length=(nslice+1) ) rings <- cbind(interval[-(nslice+1)],interval[-1]) P <- ncol(mcmc) reprp <- apply(rings, 1, mean)/sqrt(P) # square root of number of parameters sds <- apply(mcmc, 2, sd) means <- apply(mcmc, 2, mean) partjo1 <- log(prod(sds)) kreprp <- rep(NA, nslice) for (i in 1:nslice ){ rpp <- means+sds*reprp[i] kreprp[i] <- 0 kreprp[i] <- kreprp[i] + logpowerprior(rpp, a0, historical, data.type, data.link,init_var) # not plugging in actual mcmc kreprp[i] <- kreprp[i] + partjo1 } rings <- cbind(rings, kreprp) rarea <- pi^(P/2)*interval^P/gamma(P/2+1) # pi^(p/2)/gamma(p/2+1) 0.4 rvol <- log(rarea[-1]-rarea[-(nslice+1)]) + kreprp rings <- cbind(rings, rvol) return(rings) }
/scratch/gouwar.j/cran-all/cranData/BayesPPD/R/PWK_helper.R
# Some of the following comments appeared in the source code for # Wang YB, Chen MH, Kuo L, Lewis PO (2018). “A New Monte Carlo Method for Estimating Marginal Likelihoods.” Bayesian Analysis, 13(2), 311–333. calc_marg_l <- function(a0, mcmc, historical, data.type, data.link,init_var){ sds <- apply(mcmc, 2, stats::sd) means <- apply(mcmc, 2, mean) # Standardize MCMC samples mcmc_scaled <- scale(mcmc, center=TRUE, scale = TRUE) # Determine r ("rcover") and K ("ncutslice") for PWK rcover <- sqrt(qchisq(0.95,df=ncol(mcmc))) ncutslice <- 100 c_est_pp <- 0 #Forming the partition subsets for the PWK estimation of c_0 rings_pp <- LOR_partition_pp(r=rcover, nslice=ncutslice, mcmc=mcmc, a0=a0, historical, data.type, data.link,init_var) #Estimate c_0 t1 <- dim(mcmc_scaled)[1] val <- rep(NA, t1) r_ <- sqrt(apply(mcmc_scaled^2, 1, sum )) # norm of mcmc samples kreprp <- rep(NA, t1) for (j in 1:t1 ){ ring_position <- which(r_[j]>=rings_pp[,1] & r_[j]<rings_pp[,2]) if (sum(ring_position)>0){ # phi_t is in A_k partjo1 <- log(prod(sds))# log of jacobian kreprp[j] <- 0 kreprp[j] <- kreprp[j] + logpowerprior(mcmc[j,], a0, historical, data.type, data.link,init_var) + partjo1 val[j] <- rings_pp[ring_position, 3] - kreprp[j] } } lcons <- max(rings_pp[, 4]) tcubevol <- log(sum(exp(rings_pp[,4] - lcons) ) ) + lcons den <- denominator_control(val) c_est_pp <- NA c_est_pp <- tcubevol - den return(c_est_pp) # log(c0) } # This function computes the normalizing constant for a given a_0 value. calc_a0_func <- function(a0, historical, data.type, data.link, init_var,lower_limits,upper_limits,slice_widths,nMC,nBI){ dCurrent <- FALSE historical2 <- list() for(i in 1:length(historical)){ l <- historical[[i]] l[["a0"]] = a0[i] historical2[[i]] = l } y <- rep(0,1) x <- matrix(0, nrow=1, ncol=1) n <- rep(0, 1) samples <- glm_fixed_a0(data.type,data.link,y,n,x,FALSE,historical2,init_var,lower_limits,upper_limits,slice_widths,nMC,nBI,dCurrent) marg_l <- calc_marg_l(a0, samples, historical, data.type, data.link,init_var) return(marg_l) } #' Function for approximating the normalizing constant for generalized linear models with random a0 #' #' #' #' #' @description This function returns a vector of coefficients that defines a function \eqn{f(a_0)} that approximates the normalizing constant for generalized linear models with random \eqn{a_0}. #' The user should input the values returned to \code{\link{glm.random.a0}} or \code{\link{power.glm.random.a0}}. #' #' @param grid Matrix of potential values for \eqn{a_0}, where the number of columns should equal the number of historial datasets. Note that the algorithm may fail if some grid values are close to zero. See \emph{Details} below. #' @param historical List of historical dataset(s). East historical dataset is stored in a list which constains two \emph{named} elements: \code{y0} and \code{x0}. #' \itemize{ #' \item \code{y0} is a vector of responses. #' \item \code{x0} is a matrix of covariates. #' } #' For binomial data, an additional element \code{n0} is required. #' \itemize{ #' \item \code{n0} is vector of integers specifying the number of subjects who have a particular value of the covariate vector. #' } #' @param data.type Character string specifying the type of response. The options are "Bernoulli", "Binomial", "Poisson" and "Exponential". #' #' @inheritParams power.glm.fixed.a0 #' @inheritParams glm.random.a0 #' #' @details #' #' This function performs the following steps: #' #' \enumerate{ #' #' \item Suppose there are K historical datasets. The user inputs a grid of M rows and K columns of potential values for \eqn{a_0}. For example, one can choose the vector \code{v = c(0.1, 0.25, 0.5, 0.75, 1)} #' and use \code{expand.grid(a0_1=v, a0_2=v, a0_3=v)} when \eqn{K=3} to get a grid with \eqn{M=5^3=125} rows and 3 columns. If there are more than three historical datasets, the dimension of \code{v} can be reduced #' to limit the size of the grid. A large grid will increase runtime. #' \item For each row of \eqn{a_0} values in the grid, obtain \eqn{M} samples for \eqn{\beta} from the power prior associated with the current values of \eqn{a_0} using the slice sampler. #' \item For each of the M sets of posterior samples, execute the PWK algorithm (Wang et al., 2018) to estimate the log of normalizing constant \eqn{d_1,...,d_M} for the normalized power prior. #' \item At this point, one has a dataset with outcomes \eqn{d_1,...,d_M} and predictors corresponding to the rows of the \eqn{a_0} grid matrix. A polynomial regression is applied to estimate a function \eqn{d=f(a0)}. #' The degree of the polynomial regression is determined by the algorithm to ensure \eqn{R^2 > 0.99}. #' \item The vector of coefficients from the polynomial regression model is returned by the function, which the user must input into \code{\link{glm.random.a0}} or \code{\link{power.glm.random.a0}}. #' #' } #' #' When a row of the \code{grid} contains elements that are close to zero, the resulting power prior will be flat and estimates of normalizing constants may be inaccurate. #' Therefore, it is recommended that \code{grid} values should be at least 0.05. #' #' If one encounters the error message "some coefficients are not defined because of singularities", #' it could be due to the following factors: number of \code{grid} rows too large or too small, insufficient sample size of the historical data, insufficient number of iterations for the slice sampler, #' or near-zero \code{grid} values. #' #' Note that due to computational intensity, the \code{normalizing.constant} function has not been evaluated for accuracy for high dimensional \eqn{\beta} (e.g., dimension > 10) or high dimensional \eqn{a_0} (e.g., dimension > 5). #' #' @return Vector of coefficients for \eqn{a_0} that defines a function \eqn{f(a_0)} that approximates the normalizing constant, necessary for functions \code{\link{glm.random.a0}} and \code{\link{power.glm.random.a0}}. #' The length of the vector is equal to 1+K*L where K is the number of historical datasets and L is the degree of the polynomial regression determined by the algorithm. #' @references Wang, Yu-Bo; Chen, Ming-Hui; Kuo, Lynn; Lewis, Paul O. A New Monte Carlo Method for Estimating Marginal Likelihoods. Bayesian Anal. 13 (2018), no. 2, 311--333. #' @seealso \code{\link{glm.random.a0}} and \code{\link{power.glm.random.a0}} #' @examples #' #' data.type <- "Bernoulli" #' data.link <- "Logistic" #' data.size <- 50 #' #' # Simulate two historical datasets #' p <- 1 #' set.seed(111) #' x1 <- matrix(rnorm(p*data.size),ncol=p,nrow=data.size) #' set.seed(222) #' x2 <- matrix(rnorm(p*data.size),ncol=p,nrow=data.size) #' beta <- c(1,2) #' mean1 <- exp(x1*beta)/(1+exp(x1*beta)) #' mean2 <- exp(x2*beta)/(1+exp(x2*beta)) #' historical <- list(list(y0=rbinom(data.size,size=1,prob=mean1),x0=x1), #' list(y0=rbinom(data.size, size=1, prob=mean2),x0=x2)) #' #' # Create grid of possible values of a0 with two columns corresponding to a0_1 and a0_2 #' g <- c(0.1, 0.25, 0.5, 0.75, 1) #' grid <- expand.grid(a0_1=g, a0_2=g) #' #' nMC <- 100 # nMC should be larger in practice #' nBI <- 50 #' result <- normalizing.constant(grid=grid, historical=historical, #' data.type=data.type, data.link=data.link, #' nMC=nMC, nBI=nBI) #' @importFrom stats sd qchisq pnorm dnorm lm #' @export normalizing.constant <- function(grid, historical, data.type, data.link, prior.beta.var=rep(10,50), lower.limits=rep(-100, 50), upper.limits=rep(100, 50), slice.widths=rep(1, 50), nMC=10000, nBI=250){ d <- apply(grid, 1, calc_a0_func, historical, data.type, data.link, prior.beta.var,lower.limits,upper.limits,slice.widths,nMC,nBI) # not a number when a0 too low m <- as.matrix(grid) r2 <- 0 degree <- 0 while(r2 < 0.99){ degree = degree + 1 mat <- matrix(0, nrow=nrow(m), ncol=ncol(m)*degree) # create m degree polynomials for(i in 1:degree){ mat[,((i-1)*ncol(m)+1):(ncol(m)*i)] <- m^i } fit <- lm(d ~ mat) r2 <- summary(fit)$r.squared if(NA %in% fit$coefficients){ stop("Some coefficients are not defined because of singularities. Potential causes include number of grid rows too large or too small, insufficient sample size of the historical data, insufficient number of iterations for the slice sampler, or near-zero grid values.") } } result <- fit$coefficients #return(list("coef"=result, "logc"=d)) return(result) }
/scratch/gouwar.j/cran-all/cranData/BayesPPD/R/PWK_main.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 two_grp_fixed_a0 <- function(dType, y_c, n_c, historical, b_01, b_02) { .Call(`_BayesPPD_two_grp_fixed_a0`, dType, y_c, n_c, historical, b_01, b_02) } power_two_grp_fixed_a0 <- function(dType, n_t, n_c, historical, ns, p_t_prior_samps, p_c_prior_samps, b_t1, b_t2, b_01, b_02, delta, gamma, N, upper_inf) { .Call(`_BayesPPD_power_two_grp_fixed_a0`, dType, n_t, n_c, historical, ns, p_t_prior_samps, p_c_prior_samps, b_t1, b_t2, b_01, b_02, delta, gamma, N, upper_inf) } two_grp_fixed_a0_normal <- function(y_c, n_c, v, historical, nMC, nBI) { .Call(`_BayesPPD_two_grp_fixed_a0_normal`, y_c, n_c, v, historical, nMC, nBI) } power_two_grp_fixed_a0_normal <- function(n_t, n_c, historical, ns, mu_t_prior_samps, mu_c_prior_samps, var_t_prior_samps, var_c_prior_samps, delta, gamma, nMC, nBI, N) { .Call(`_BayesPPD_power_two_grp_fixed_a0_normal`, n_t, n_c, historical, ns, mu_t_prior_samps, mu_c_prior_samps, var_t_prior_samps, var_c_prior_samps, delta, gamma, nMC, nBI, N) } glm_fixed_a0 <- function(dType0, dLink0, y0, n0, x0, borrow_treat0, historical0, init_var0, lower_limits0, upper_limits0, slice_widths0, nMC, nBI, dCurrent0) { .Call(`_BayesPPD_glm_fixed_a0`, dType0, dLink0, y0, n0, x0, borrow_treat0, historical0, init_var0, lower_limits0, upper_limits0, slice_widths0, nMC, nBI, dCurrent0) } glm_fixed_a0_normal <- function(y, x, borrow_treat, historical, nMC, nBI) { .Call(`_BayesPPD_glm_fixed_a0_normal`, y, x, borrow_treat, historical, nMC, nBI) } power_glm_fixed_a0 <- function(dType0, dLink0, n_total, n0, prob_treat0, borrow_treat0, historical0, ns, x_samps, beta_c_prior_samps, var_prior_samps, lower_limits0, upper_limits0, slice_widths0, delta, gamma, nMC, nBI, N, dCurrent0) { .Call(`_BayesPPD_power_glm_fixed_a0`, dType0, dLink0, n_total, n0, prob_treat0, borrow_treat0, historical0, ns, x_samps, beta_c_prior_samps, var_prior_samps, lower_limits0, upper_limits0, slice_widths0, delta, gamma, nMC, nBI, N, dCurrent0) } power_glm_fixed_a0_approx <- function(dType0, n_total, prob_treat, borrow_treat, historical0, ns, x_samps, beta_c_prior_samps, var_prior_samps, delta, gamma, nNR, tol, N) { .Call(`_BayesPPD_power_glm_fixed_a0_approx`, dType0, n_total, prob_treat, borrow_treat, historical0, ns, x_samps, beta_c_prior_samps, var_prior_samps, delta, gamma, nNR, tol, N) } two_grp_random_a0 <- function(dType0, y0, n0, historical0, b_010, b_020, c_10, c_20, lower_limits0, upper_limits0, slice_widths0, nMC, nBI) { .Call(`_BayesPPD_two_grp_random_a0`, dType0, y0, n0, historical0, b_010, b_020, c_10, c_20, lower_limits0, upper_limits0, slice_widths0, nMC, nBI) } two_grp_random_a0_normal <- function(y0, n0, v0, historical0, c_10, c_20, lower_limits0, upper_limits0, slice_widths0, nMC, nBI) { .Call(`_BayesPPD_two_grp_random_a0_normal`, y0, n0, v0, historical0, c_10, c_20, lower_limits0, upper_limits0, slice_widths0, nMC, nBI) } glm_random_a0_normal <- function(y_normal0, x_normal0, borrow_treat0, historical_normal0, c_10, c_20, lower_limits0, upper_limits0, slice_widths0, nMC, nBI) { .Call(`_BayesPPD_glm_random_a0_normal`, y_normal0, x_normal0, borrow_treat0, historical_normal0, c_10, c_20, lower_limits0, upper_limits0, slice_widths0, nMC, nBI) } power_two_grp_random_a0 <- function(dType0, n_t, n0, historical0, ns, mu_t_prior_samps, mu_c_prior_samps, var_t_prior_samps, var_c_prior_samps, b_t1, b_t2, b_010, b_020, c_10, c_20, lower_limits0, upper_limits0, slice_widths0, delta, gamma, nMC, nBI, N) { .Call(`_BayesPPD_power_two_grp_random_a0`, dType0, n_t, n0, historical0, ns, mu_t_prior_samps, mu_c_prior_samps, var_t_prior_samps, var_c_prior_samps, b_t1, b_t2, b_010, b_020, c_10, c_20, lower_limits0, upper_limits0, slice_widths0, delta, gamma, nMC, nBI, N) } power_glm_random_a0_normal <- function(n_total, prob_treat0, borrow_treat0, historical0, ns, beta_c_prior_samps, var_prior_samps, c_10, c_20, lower_limits0, upper_limits0, slice_widths0, delta, gamma, nMC, nBI, N) { .Call(`_BayesPPD_power_glm_random_a0_normal`, n_total, prob_treat0, borrow_treat0, historical0, ns, beta_c_prior_samps, var_prior_samps, c_10, c_20, lower_limits0, upper_limits0, slice_widths0, delta, gamma, nMC, nBI, N) } glm_random_a0 <- function(dType0, dLink0, y0, n0, x0, borrow_treat0, historical0, init_var0, c_10, c_20, coef0, lower_limits0, upper_limits0, slice_widths0, nMC, nBI) { .Call(`_BayesPPD_glm_random_a0`, dType0, dLink0, y0, n0, x0, borrow_treat0, historical0, init_var0, c_10, c_20, coef0, lower_limits0, upper_limits0, slice_widths0, nMC, nBI) } power_glm_random_a0 <- function(dType0, dLink0, n_total, n0, prob_treat0, borrow_treat0, historical0, ns, beta_c_prior_samps, init_var0, c_10, c_20, coef0, lower_limits0, upper_limits0, slice_widths0, delta, gamma, nMC, nBI, N) { .Call(`_BayesPPD_power_glm_random_a0`, dType0, dLink0, n_total, n0, prob_treat0, borrow_treat0, historical0, ns, beta_c_prior_samps, init_var0, c_10, c_20, coef0, lower_limits0, upper_limits0, slice_widths0, delta, gamma, nMC, nBI, N) }
/scratch/gouwar.j/cran-all/cranData/BayesPPD/R/RcppExports.R
#' AIDS Clinical Trial ACTG019 (1990). #' #' A dataset containing the ACTG019 clinical trial placebo group data (1990) in adults with asymptomatic HIV. #' #' @format A data frame with 404 rows and 4 variables: #' \describe{ #' \item{outcome}{binary variable with 1 indicating death, development of AIDS or ARC and 0 otherwise} #' \item{age}{patient age in years} #' \item{race}{binary variable with 1 indicating white and 0 otherwise} #' \item{T4count}{CD4 cell count (cell count per cubicmillimetre of serum)} #' } #' @source Chen, Ming-Hui, et al. "Prior Elicitation, Variable Selection and Bayesian Computation for Logistic Regression Models." Journal of the Royal Statistical Society. Series B, vol. 61, no. 1, 1999, pp. 223-242. "actg019" #' AIDS Clinical Trial ACTG036 (1991). #' #' A dataset containing the ACTG036 clinical trial data (1991) comparing zidovudine (AZT) with a placebo in asymptomatic patients with hereditary coagulation disorders and HIV infection. #' The ACTG036 trial had the same response variable and covariates as the ACTG019 study. The ATCG019 data can be used as a historical dataset. #' #' #' #' @format A data frame with 183 rows and 5 variables: #' \describe{ #' \item{outcome}{binary variable with 1 indicating death, development of AIDS or ARC and 0 otherwise} #' \item{treat}{binary variable with 1 indicating Zidovudine (AZT) treatment and 0 indicating placebo} #' \item{age}{patient age in years} #' \item{race}{binary variable with 1 indicating white and 0 otherwise} #' \item{T4count}{CD4 cell count (cell count per cubicmillimetre of serum)} #' } #' @source Chen, Ming-Hui, et al. "Prior Elicitation, Variable Selection and Bayesian Computation for Logistic Regression Models." Journal of the Royal Statistical Society. Series B, vol. 61, no. 1, 1999, pp. 223-242. "actg036"
/scratch/gouwar.j/cran-all/cranData/BayesPPD/R/data.R
### glm, fixed a0 ##### #' Model fitting for generalized linear models with fixed a0 #' #' @description Model fitting using power priors for generalized linear models with fixed \eqn{a_0} #' #' @param y Vector of responses. #' @param x Matrix of covariates. The first column should be the treatment indicator with 1 indicating treatment group. The number of rows should equal the length of the response vector \code{y}. #' @param n (For binomial data only) vector of integers specifying the number of subjects who have a particular value of the covariate vector. If the data is binary and all covariates are discrete, collapsing Bernoulli data into a binomial structure can make the slice sampler much faster. #' The length of \code{n} should be equal to the number of rows of \code{x}. #' @param historical (Optional) list of historical dataset(s). East historical dataset is stored in a list which contains three \emph{named} elements: \code{y0}, \code{x0} and \code{a0}. #' \itemize{ #' \item \code{y0} is a vector of responses. #' \item \code{x0} is a matrix of covariates. If \code{borrow.treat} is FALSE (the default), \code{x0} should NOT have the treatment indicator. Apart from missing the treatment indicator, \code{x0} should have the same set of covariates in the same order as \code{x}. #' If \code{borrow.treat} is TRUE, \code{x0} should have the same set of covariates in the same order as \code{x}, where the first column of \code{x0} must be the treatment indicator. #' \item \code{a0} is a number between 0 and 1 indicating the discounting parameter value for that historical dataset. #' } #' For binomial data, an additional element \code{n0} is required. #' \itemize{ #' \item \code{n0} is vector of integers specifying the number of subjects who have a particular value of the covariate vector. #' The length of \code{n0} should be equal to the number of rows of \code{x0}. #' } #' @param current.data Logical value indicating whether current data is included. The default is TRUE. If FALSE, only historical data is included in the analysis, #' and the posterior samples can be used as a discrete approximation to the sampling prior in \code{\link{power.glm.fixed.a0}}. #' @param prior.beta.var Only applies if current.data = FALSE. If no current data is provided, the initial priors used for \eqn{\beta} are i.i.d. normal distributions with mean zero and variance equal to \code{prior.beta.var}. #' The length of the vector should be equal to the length of \eqn{\beta}. The default variance is 10. #' #' @inheritParams power.glm.fixed.a0 #' #' @details If \code{data.type} is "Normal", the response \eqn{y_i} is assumed to follow \eqn{N(x_i'\beta, \tau^{-1})} where \eqn{x_i} is the vector of covariates for subject \eqn{i}. #' Each historical dataset \eqn{D_{0k}} is assumed to have a different precision parameter \eqn{\tau_k}. #' The initial prior for \eqn{\tau} is the Jeffery's prior, \eqn{\tau^{-1}}, and the initial prior for \eqn{\tau_k} is \eqn{\tau_k^{-1}}. #' The initial prior for \eqn{\beta} is the uniform improper prior. Posterior samples are obtained through Gibbs sampling. #' #' For all other data types, posterior samples are obtained through slice sampling. #' The default lower limits for the parameters are -100. The default upper limits #' for the parameters are 100. The default slice widths for the parameters are 1. #' The defaults may not be appropriate for all situations, and the user can specify the appropriate limits #' and slice width for each parameter. #' #' When \code{current.data} is set to FALSE, only historical data is included in the analysis, #' and the posterior samples can be used as a discrete approximation to the sampling prior in \code{\link{power.glm.fixed.a0}}. #' #' @return The function returns a S3 object with a \code{summary} method. If \code{data.type} is "Normal", posterior samples of \eqn{\beta}, \eqn{\tau} and \eqn{\tau_k}'s (if historical data is given) are returned. #' For all other data types, a matrix of posterior samples of \eqn{\beta} is returned. The first column contains posterior samples of the intercept. #' The second column contains posterior samples of \eqn{\beta_1}, the parameter for the treatment indicator. #' @references Neal, Radford M. Slice sampling. Ann. Statist. 31 (2003), no. 3, 705--767. #' @seealso \code{\link{power.glm.fixed.a0}} #' @examples #' data.type <- "Bernoulli" #' data.link <- "Logistic" #' #' # Simulate current data #' set.seed(1) #' p <- 3 #' n_total <- 100 #' y <- rbinom(n_total,size=1,prob=0.6) #' # The first column of x is the treatment indicator. #' x <- cbind(rbinom(n_total,size=1,prob=0.5), #' matrix(rnorm(p*n_total),ncol=p,nrow=n_total)) #' #' # Simulate two historical datasets #' # Note that x0 does not have the treatment indicator #' historical <- list(list(y0=rbinom(n_total,size=1,prob=0.2), #' x0=matrix(rnorm(p*n_total),ncol=p,nrow=n_total), a0=0.2), #' list(y0=rbinom(n_total, size=1, prob=0.5), #' x0=matrix(rnorm(p*n_total),ncol=p,nrow=n_total), a0=0.3)) #' #' # Set parameters of the slice sampler #' lower.limits <- rep(-100, 5) # The dimension is the number of columns of x plus 1 (intercept) #' upper.limits <- rep(100, 5) #' slice.widths <- rep(1, 5) #' #' nMC <- 1000 # nMC should be larger in practice #' nBI <- 250 #' result <- glm.fixed.a0(data.type=data.type, data.link=data.link, y=y, x=x, historical=historical, #' lower.limits=lower.limits, upper.limits=upper.limits, #' slice.widths=slice.widths, nMC=nMC, nBI=nBI) #' #' summary(result) #' #' @export glm.fixed.a0 <- function(data.type, data.link, y=0, x=matrix(), n=1, borrow.treat=FALSE, historical=list(), lower.limits=rep(-100, 50), upper.limits=rep(100, 50), slice.widths=rep(1, 50), nMC=10000, nBI=250, current.data=TRUE, prior.beta.var = rep(10, 50)) { if(is.na(x[1,1])){ x <- historical[[1]]$x0 } if(is.null(colnames(x))){ colnames(x) <- paste0("X", 1:ncol(x)) } if(data.type == "Normal"){ result <- glm_fixed_a0_normal(y, x, borrow.treat, historical, nMC, nBI) colnames(result$`posterior samples of beta`) <- c("intercept",colnames(x)) }else{ result <- glm_fixed_a0(data.type, data.link, y, n, x, borrow.treat, historical, prior.beta.var, lower.limits, upper.limits, slice.widths, nMC, nBI, current.data) colnames(result) <- c("intercept",colnames(x)) } out <- list(posterior.samples=result, data.type=data.type) structure(out, class=c("glmfixed")) } #' @importFrom stats quantile #' @export summary.glmfixed <- function(object, ...) { r <- object$posterior.samples if(object$data.type=="Normal"){ # beta betas <- r$`posterior samples of beta` m <- apply(betas, 2, mean) std <- apply(betas, 2, sd) q1 <- apply(betas, 2, quantile, probs=0.025) q2 <- apply(betas, 2, quantile, probs=0.975) output1 <- cbind("mean"=m,"sd"=std,"2.5%"=q1,"97.5%"=q2) output1 <- round(output1, digits=3) # tau taus <- r$`posterior samples of tau` output2 <- cbind("mean"=mean(taus),"sd"=sd(taus),"2.5%"=quantile(taus,probs=0.025),"97.5%"=quantile(taus,probs=0.975)) rownames(output2) <- "tau" output2 <- round(output2, digits=3) # tau0 if(length(r)==2){ output3 <- NULL }else{ tau0s <- r$`posterior samples of tau_0` m <- apply(tau0s, 2, mean) std <- apply(tau0s, 2, sd) q1 <- apply(tau0s, 2, quantile, probs=0.025) q2 <- apply(tau0s, 2, quantile, probs=0.975) output3 <- cbind("mean"=m,"sd"=std,"2.5%"=q1,"97.5%"=q2) rownames(output3) <- paste0("tau0_", 1:nrow(output3)) output3 <- round(output3, digits=3) } out <- rbind(output1, output2, output3) out }else{ m <- apply(r, 2, mean) std <- apply(r, 2, sd) q1 <- apply(r, 2, quantile, probs=0.025) q2 <- apply(r, 2, quantile, probs=0.975) output <- cbind("mean"=m,"sd"=std,"2.5%"=q1,"97.5%"=q2) round(output, digits=3) } } #' Power/type I error calculation for generalized linear models with fixed a0 #' #' @description Power/type I error calculation for generalized linear models with fixed \eqn{a_0} using power priors #' #' @param data.type Character string specifying the type of response. The options are "Normal", "Bernoulli", "Binomial", "Poisson" and "Exponential". #' @param data.link Character string specifying the link function. The options are "Logistic", "Probit", "Log", "Identity-Positive", "Identity-Probability" and "Complementary Log-Log". Does not apply if \code{data.type} is "Normal". #' @param n (For binomial data only) vector of integers specifying the number of subjects who have a particular value of the covariate vector. If the data is binary and all covariates are discrete, collapsing Bernoulli data into a binomial structure can make the slice sampler much faster. #' The sum of \code{n} should be equal to \code{data.size}. The length of \code{n} should be equal to the number of rows of \code{x0}. #' @param borrow.treat Logical value indicating whether the historical information is used to inform the treatment effect parameter. The default value is FALSE. If TRUE, the first column of the historical covariate matrix must be the treatment indicator. #' If FALSE, the historical covariate matrix must NOT have the treatment indicator, since the historical data is assumed to be from the control group only. #' @param treat.assign.prob Probability of being assigned to the treatment group. The default value is 0.5. Only applies if \code{borrow.treat=FALSE}. #' @param historical (Optional) list of historical dataset(s). East historical dataset is stored in a list which contains three \emph{named} elements: \code{y0}, \code{x0} and \code{a0}. #' \itemize{ #' \item \code{y0} is a vector of responses. #' \item \code{x0} is a matrix of covariates. If \code{borrow.treat} is FALSE (the default), \code{x0} should NOT have the treatment indicator. #' If \code{borrow.treat} is TRUE, the first column of \code{x0} must be the treatment indicator. #' \item \code{a0} is a number between 0 and 1 indicating the discounting parameter value for that historical dataset. #' } #' For binomial data, an additional element \code{n0} is required. #' \itemize{ #' \item \code{n0} is vector of integers specifying the number of subjects who have a particular value of the covariate vector. #' The length of \code{n0} should be equal to the number of rows of \code{x0}. #' } #' @param nullspace.ineq Character string specifying the inequality of the null hypothesis. The options are ">" and "<". If ">" is specified, the null hypothesis is \eqn{H_0}: \eqn{\beta_1} \eqn{\ge} \eqn{\delta}. If "<" is specified, the null hypothesis is \eqn{H_0}: \eqn{\beta_1} \eqn{\le} \eqn{\delta}. The default choice is ">". #' @param x.samples (Only applies when there is no historical dataset) matrix of possible values of covariates from which covariate vectors are sampled with replacement. #' @param samp.prior.beta Matrix of possible values of \eqn{\beta} to sample (with replacement) from. Each row is a possible \eqn{\beta} vector (a realization from the sampling prior for \eqn{\beta}), where the first element is the coefficient for the intercept and the second element is the coefficient for the treatment indicator. #' The length of the vector should be equal to the total number of parameters. If P is the number of columns of \code{x0} in \code{historical}, the total number of parameters is P+2 if \code{borrow.treat=FALSE}, and is P+1 if \code{borrow.treat=TRUE}. #' @param samp.prior.var Vector of possible values of \eqn{\sigma^2} to sample (with replacement) from. Only applies if \code{data.type} is "Normal". The vector contains realizations from the sampling prior (e.g. inverse-gamma distribution) for \eqn{\sigma^2}. #' @param data.size Sample size of the simulated datasets. #' @param lower.limits Vector of lower limits for parameters to be used by the slice sampler. The length of the vector should be equal to the total number of parameters, i.e. P+1 where P is the number of covariates. The default is -100 for all parameters (may not be appropriate for all situations). Does not apply if \code{data.type} is "Normal". #' @param upper.limits Vector of upper limits for parameters to be used by the slice sampler. The length of the vector should be equal to the total number of parameters, i.e. P+1 where P is the number of covariates. The default is 100 for all parameters (may not be appropriate for all situations). Does not apply if \code{data.type} is "Normal". #' @param slice.widths Vector of initial slice widths for parameters to be used by the slice sampler. The length of the vector should be equal to the total number of parameters, i.e. P+1 where P is the number of covariates. The default is 1 for all parameter (may not be appropriate for all situations). Does not apply if \code{data.type} is "Normal". #' @param nMC Number of iterations (excluding burn-in samples) for the slice sampler or Gibbs sampler. The default is 10,000. #' @param nBI Number of burn-in samples for the slice sampler or Gibbs sampler. The default is 250. #' @param delta Prespecified constant that defines the boundary of the null hypothesis. The default is zero. #' @param gamma Posterior probability threshold for rejecting the null. The null hypothesis is rejected if posterior probability is greater \code{gamma}. The default is 0.95. #' @param N Number of simulated datasets to generate. The default is 10,000. #' @param approximate Logical value indicating whether the approximation method based on asymptotic theory is used. The default is FALSE. If TRUE, an approximation method based on the Newton-Raphson algorithm (assuming canonical links) is used. #' This feature helps users quickly obtain a rough estimate of the sample size required for the desired level of power or type I error rate. #' @param nNR (Only applies if \code{approximate=TRUE}) number of iterations of the Newton-Raphson algorithm. The default value is 10,000. #' @param tol (Only applies if \code{approximate=TRUE}) absolute tolerance of the Newton-Raphson algorithm. The default value is 0.00001. #' #' @details If historical datasets are provided, the algorithm samples with replacement from the historical covariates to construct the simulated datasets. #' Otherwise, the algorithm samples with replacement from \code{x.samples}. One of the arguments \code{historical} and \code{x.samples} must be provided. #' #' The sampling prior for the treatment parameter can be generated from a normal distribution (see examples). #' For example, suppose one wants to compute the power for the hypotheses \eqn{H_0: \beta_1 \ge 0} and \eqn{H_1: \beta_1 < 0.} #' To approximate the sampling prior for \eqn{\beta_1}, one can simply sample from a normal distribution with negative mean, #' so that the mass of the prior falls in the alternative space. Conversely, to compute the type I error rate, one can #' sample from a normal distribution with positive mean, so that the mass of the prior falls in the null space. #' The sampling prior for the other parameters can be generated by using the \code{glm.fixed.a0} function with \code{current.data} set to FALSE. #' The posterior samples based on only historical data can be used as a discrete approximation to the sampling prior. #' #' \code{samp.prior.var} is necessary for generating normally distributed data. #' #' If \code{data.type} is "Normal", the response \eqn{y_i} is assumed to follow \eqn{N(x_i'\beta, \tau^{-1})} where \eqn{x_i} is the vector of covariates for subject \eqn{i}. #' Each historical dataset \eqn{D_{0k}} is assumed to have a different precision parameter \eqn{\tau_k}. #' The initial prior for \eqn{\tau} is the Jeffery's prior, \eqn{\tau^{-1}}, and the initial prior for \eqn{\tau_k} is \eqn{\tau_k^{-1}}. #' The initial prior for \eqn{\beta} is the uniform improper prior. Posterior samples are obtained through Gibbs sampling. #' #' For all other data types, posterior samples are obtained through slice sampling. #' The default lower limits for the parameters are -100. The default upper limits #' for the parameters are 100. The default slice widths for the parameters are 1. #' The defaults may not be appropriate for all situations, and the user can specify the appropriate limits #' and slice width for each parameter. #' #' If a sampling prior with support in the null space is used, the value returned is a Bayesian type I error rate. #' If a sampling prior with support in the alternative space is used, the value returned is a Bayesian power. #' #' Because running \code{power.glm.fixed.a0()} and \code{power.glm.random.a0()} is potentially time-consuming, #' an approximation method based on asymptotic theory has been implemented for the model with fixed \eqn{a_0}. #' In order to attain the exact sample size needed for the desired power, the user can start with the approximation #' to get a rough estimate of the sample size required, using \code{power.glm.fixed.a0()} with \code{approximate=TRUE}. #' #' @return The function returns a S3 object with a \code{summary} method. Power or type I error is returned, depending on the sampling prior used. #' The posterior probabilities of the alternative hypothesis are returned. #' The average posterior mean of \eqn{\beta} and its corresponding bias are returned. #' If \code{data.type} is "Normal", average posterior means of \eqn{\tau} and \eqn{\tau_k}'s (if historical data is given) are also returned. #' The first column of \eqn{\beta} contains posterior samples of the intercept. The second column contains posterior samples of \eqn{\beta_1}, the parameter for the treatment indicator. #' @references Chen, Ming-Hui, et al. "Bayesian design of noninferiority trials for medical devices using historical data." Biometrics 67.3 (2011): 1163-1170. #' #' Neal, Radford M. Slice sampling. Ann. Statist. 31 (2003), no. 3, 705--767. #' #' @seealso \code{\link{glm.fixed.a0}} #' @examples #' #' data.type <- "Bernoulli" #' data.link <- "Logistic" #' data.size <- 100 #' #' # Simulate two historical datasets #' p <- 3 #' historical <- list(list(y0=rbinom(data.size,size=1,prob=0.2), #' x0=matrix(rnorm(p*data.size),ncol=p,nrow=data.size), a0=0.2), #' list(y0=rbinom(data.size, size=1, prob=0.5), #' x0=matrix(rnorm(p*data.size),ncol=p,nrow=data.size), a0=0.3)) #' #' # Generate sampling priors #' #' # The null hypothesis here is H0: beta_1 >= 0. To calculate power, #' # we can provide samples of beta_1 such that the mass of beta_1 < 0. #' # To calculate type I error, we can provide samples of beta_1 such that #' # the mass of beta_1 >= 0. #' samp.prior.beta1 <- rnorm(100, mean=-3, sd=1) #' # Here, mass is put on the alternative region, so power is calculated. #' samp.prior.beta <- cbind(rnorm(100), samp.prior.beta1, matrix(rnorm(100*p), 100, p)) #' #' nMC <- 100 # nMC should be larger in practice #' nBI <- 50 #' N <- 5 # N should be larger in practice #' result <- power.glm.fixed.a0(data.type=data.type, data.link=data.link, #' data.size=data.size, historical=historical, #' samp.prior.beta=samp.prior.beta, #' delta=0, nMC=nMC, nBI=nBI, N=N) #' summary(result) #' #' @export power.glm.fixed.a0 <- function(data.type, data.link="", data.size, n=1, borrow.treat=FALSE, treat.assign.prob=0.5, historical=list(), nullspace.ineq=">", x.samples=matrix(), samp.prior.beta, samp.prior.var=0, lower.limits=rep(-100, 50), upper.limits=rep(100, 50), slice.widths=rep(1, 50), delta=0, gamma=0.95, nMC=10000, nBI=250, N=10000, approximate=FALSE, nNR=10000, tol=0.00001) { if(approximate==TRUE){ return(power_glm_fixed_a0_approx(data.type, data.size, treat.assign.prob, borrow.treat, historical, nullspace.ineq, x.samples, samp.prior.beta, samp.prior.var, delta, gamma, nNR, tol, N)) }else{ if(length(historical)!=0){ x <- historical[[1]]$x0 if(is.null(colnames(x))){ colnames(x) <- paste0("X", 1:ncol(x)) } }else{ if(is.null(colnames(x.samples))){ colnames(x) <- paste0("X", 1:ncol(x.samples)) } } out <- power_glm_fixed_a0(data.type, data.link, data.size, n, treat.assign.prob, borrow.treat, historical, nullspace.ineq, x.samples, samp.prior.beta, samp.prior.var, lower.limits, upper.limits, slice.widths, delta, gamma, nMC, nBI, N, TRUE) if(borrow.treat==FALSE){ rownames(out$`average posterior mean of beta`) <- c("intercept","treatment (beta_1)",colnames(x)) }else{ rownames(out$`average posterior mean of beta`) <- c("intercept", paste(colnames(x)[1],"(beta_1)"),colnames(x)[-1]) } structure(out, class=c("powerglm")) } } #' @export summary.powerglm <- function(object, ...) { r <- round(object$`power/type I error`, digits=3) # beta betas <- object$`average posterior mean of beta` bias <- object$`bias of the average posterior mean of beta` postprob <- mean(object[[2]]) output1 <- cbind(betas,bias) colnames(output1) <- c("average posterior mean","bias") output1 <- round(output1, digits=3) print(output1) cat("The power/type I error rate is ",r,".\n") cat("The average of the", names(object[2]), "is",round(mean(object[[2]]),3),".") }
/scratch/gouwar.j/cran-all/cranData/BayesPPD/R/glm_fixed.R
### glm, random a0 ##### #' Model fitting for generalized linear models with random a0 #' #' @description Model fitting using normalized power priors for generalized linear models with random \eqn{a_0} #' #' @param historical List of historical dataset(s). East historical dataset is stored in a list which contains two \emph{named} elements: \code{y0} and \code{x0}. #' \itemize{ #' \item \code{y0} is a vector of responses. #' \item \code{x0} is a matrix of covariates. If \code{borrow.treat} is FALSE (the default), \code{x0} should NOT have the treatment indicator. Apart from missing the treatment indicator, \code{x0} should have the same set of covariates in the same order as \code{x}. #' If \code{borrow.treat} is TRUE, \code{x0} should have the same set of covariates in the same order as \code{x}, where the first column of \code{x0} must be the treatment indicator. #' } #' For binomial data, an additional element \code{n0} is required. #' \itemize{ #' \item \code{n0} is vector of integers specifying the number of subjects who have a particular value of the covariate vector. #' The length of \code{n0} should be equal to the number of rows of \code{x0}. #' } #' @param prior.beta.var Vector of variances of the independent normal initial priors on \eqn{\beta} with mean zero. The length of the vector should be equal to the length of \eqn{\beta}. The default variance is 10. #' @param lower.limits Vector of lower limits for parameters to be used by the slice sampler. If \code{data.type} is "Normal", slice sampling is used for \eqn{a_0}, and the length of the vector should be equal to the number of historical datasets. #' For all other data types, slice sampling is used for \eqn{\beta} and \eqn{a_0}. The first P+1 elements apply to the sampling of \eqn{\beta} and the rest apply to the sampling of \eqn{a_0}. #' The length of the vector should be equal to the sum of the total number of parameters (i.e. P+1 where P is the number of covariates) and the number of historical datasets. #' The default is -100 for \eqn{\beta} and 0 for \eqn{a_0} (may not be appropriate for all situations). #' @param upper.limits Vector of upper limits for parameters to be used by the slice sampler. If \code{data.type} is "Normal", slice sampling is used for \eqn{a_0}, and the length of the vector should be equal to the number of historical datasets. #' For all other data types, slice sampling is used for \eqn{\beta} and \eqn{a_0}. The first P+1 elements apply to the sampling of \eqn{\beta} and the rest apply to the sampling of \eqn{a_0}. #' The length of the vector should be equal to the sum of the total number of parameters (i.e. P+1 where P is the number of covariates) and the number of historical datasets. #' The default is 100 for \eqn{\beta} and 1 for \eqn{a_0} (may not be appropriate for all situations). #' @param slice.widths Vector of initial slice widths used by the slice sampler. If \code{data.type} is "Normal", slice sampling is used for \eqn{a_0}, and the length of the vector should be equal to the number of historical datasets. #' For all other data types, slice sampling is used for \eqn{\beta} and \eqn{a_0}. The first P+1 elements apply to the sampling of \eqn{\beta} and the rest apply to the sampling of \eqn{a_0}. #' The length of the vector should be equal to the sum of the total number of parameters (i.e. P+1 where P is the number of covariates) and the number of historical datasets. #' The default is 0.1 for all parameter (may not be appropriate for all situations). #' @param a0.coefficients Vector of coefficients for \eqn{a_0} returned by the function \code{\link{normalizing.constant}}. This is necessary for estimating the normalizing constant for the normalized power prior. Does not apply if \code{data.type} is "Normal". #' #' @inheritParams power.glm.fixed.a0 #' @inheritParams glm.fixed.a0 #' @inheritParams two.grp.random.a0 #' #' @details #' The user should use the function \code{\link{normalizing.constant}} to obtain \code{a0.coefficients} (does not apply if \code{data.type} is "Normal"). #' #' If \code{data.type} is "Normal", the response \eqn{y_i} is assumed to follow \eqn{N(x_i'\beta, \tau^{-1})} where \eqn{x_i} is the vector of covariates for subject \eqn{i}. #' Historical datasets are assumed to have the same precision parameter as the current dataset for computational simplicity. #' The initial prior for \eqn{\tau} is the Jeffery's prior, \eqn{\tau^{-1}}. #' Independent normal priors with mean zero and variance \code{prior.beta.var} are used for \eqn{\beta} to ensure the propriety of the normalized power prior. Posterior samples for \eqn{\beta} and \eqn{\tau} are obtained through Gibbs sampling. #' Independent beta(\code{prior.a0.shape1}, \code{prior.a0.shape1}) priors are used for \eqn{a_0}. Posterior samples for \eqn{a_0} are obtained through slice sampling. #' #' For all other data types, posterior samples are obtained through slice sampling. #' The default lower limits are -100 for \eqn{\beta} and 0 for \eqn{a_0}. The default upper limits #' for the parameters are 100 for \eqn{\beta} and 1 for \eqn{a_0}. The default slice widths for the parameters are 0.1. #' The defaults may not be appropriate for all situations, and the user can specify the appropriate limits #' and slice width for each parameter. #' #' #' @return The function returns a S3 object with a \code{summary} method. If \code{data.type} is "Normal", posterior samples of \eqn{\beta}, \eqn{\tau} and \eqn{a_0} are returned. #' For all other data types, posterior samples of \eqn{\beta} and \eqn{a_0} are returned. #' The first column of the matrix of posterior samples of \eqn{\beta} contains posterior samples of the intercept. #' The second column contains posterior samples of \eqn{\beta_1}, the parameter for the treatment indicator. #' @references Neal, Radford M. Slice sampling. Ann. Statist. 31 (2003), no. 3, 705--767. #' @seealso \code{\link{normalizing.constant}} and \code{\link{power.glm.random.a0}} #' @examples #' #' data.type <- "Bernoulli" #' data.link <- "Logistic" #' #' # Simulate current data #' set.seed(1) #' p <- 3 #' n_total <- 100 #' y <- rbinom(n_total,size=1,prob=0.6) #' # The first column of x is the treatment indicator. #' x <- cbind(rbinom(n_total,size=1,prob=0.5), #' matrix(rnorm(p*n_total),ncol=p,nrow=n_total)) #' #' # Simulate two historical datasets #' # Note that x0 does not have the treatment indicator #' historical <- list(list(y0=rbinom(n_total,size=1,prob=0.2), #' x0=matrix(rnorm(p*n_total),ncol=p,nrow=n_total)), #' list(y0=rbinom(n_total, size=1, prob=0.5), #' x0=matrix(rnorm(p*n_total),ncol=p,nrow=n_total))) #' #' # Please see function "normalizing.constant" for how to obtain a0.coefficients #' # Here, suppose one-degree polynomial regression is chosen by the "normalizing.constant" #' # function. The coefficients are obtained for the intercept, a0_1 and a0_2. #' a0.coefficients <- c(1, 0.5, -1) #' #' # Set parameters of the slice sampler #' # The dimension is the number of columns of x plus 1 (intercept) #' # plus the number of historical datasets #' lower.limits <- c(rep(-100, 5), rep(0, 2)) #' upper.limits <- c(rep(100, 5), rep(1, 2)) #' slice.widths <- rep(0.1, 7) #' #' nMC <- 500 # nMC should be larger in practice #' nBI <- 100 #' result <- glm.random.a0(data.type=data.type, data.link=data.link, y=y, x=x, #' historical=historical, a0.coefficients=a0.coefficients, #' lower.limits=lower.limits, upper.limits=upper.limits, #' slice.widths=slice.widths, nMC=nMC, nBI=nBI) #' summary(result) #' #' @export glm.random.a0 <- function(data.type, data.link, y, x, n=1, borrow.treat=FALSE, historical, prior.beta.var=rep(10,50), prior.a0.shape1=rep(1,10), prior.a0.shape2=rep(1,10), a0.coefficients, lower.limits=NULL, upper.limits=NULL, slice.widths=rep(0.1, 50), nMC=10000, nBI=250) { if(is.null(colnames(x))){ colnames(x) <- paste0("X", 1:ncol(x)) } if(data.type == "Normal"){ if(is.null(lower.limits)){ lower.limits = rep(0,length(historical)) } if(is.null(upper.limits)){ upper.limits = rep(1,length(historical)) } result <- glm_random_a0_normal(y, x, borrow.treat, historical, prior.a0.shape1, prior.a0.shape2, lower.limits, upper.limits, slice.widths, nMC, nBI) }else{ if(is.null(lower.limits)){ lower.limits = c(rep(-100, ncol(x)+1), rep(0,length(historical))) } if(is.null(upper.limits)){ upper.limits = c(rep(100, ncol(x)+1), rep(1,length(historical))) } result <- glm_random_a0(data.type, data.link, y, n, x, borrow.treat, historical, prior.beta.var, prior.a0.shape1, prior.a0.shape2, a0.coefficients, lower.limits, upper.limits, slice.widths, nMC, nBI) } colnames(result$`posterior samples of beta`) <- c("intercept",colnames(x)) out <- list(posterior.samples=result, data.type=data.type, x=x) structure(out, class=c("glmrandom")) } #' @importFrom stats quantile #' @export summary.glmrandom <- function(object, ...) { r <- object$posterior.samples # beta betas <- r$`posterior samples of beta` m <- apply(betas, 2, mean) std <- apply(betas, 2, sd) q1 <- apply(betas, 2, quantile, probs=0.025) q2 <- apply(betas, 2, quantile, probs=0.975) output1 <- cbind("mean"=m,"sd"=std,"2.5%"=q1,"97.5%"=q2) if(is.null(colnames(object$x))){ colnames(object$x) <- paste0("V", 1:ncol(object$x)) } rownames(output1) <- c("intercept",colnames(object$x)) output1 <- round(output1, digits=3) # tau if(object$data.type=="Normal"){ # tau taus <- r$`posterior samples of tau` output2 <- cbind("mean"=mean(taus),"sd"=sd(taus),"2.5%"=quantile(taus,probs=0.025),"97.5%"=quantile(taus,probs=0.975)) rownames(output2) <- "tau" output2 <- round(output2, digits=3) }else{ output2 <- NULL } # a0 a0s <- r$`posterior samples of a0` m <- apply(a0s, 2, mean) std <- apply(a0s, 2, sd) q1 <- apply(a0s, 2, quantile, probs=0.025) q2 <- apply(a0s, 2, quantile, probs=0.975) output3 <- cbind("mean"=m,"sd"=std,"2.5%"=q1,"97.5%"=q2) rownames(output3) <- paste0("a0_", 1:nrow(output3)) output3 <- round(output3, digits=3) out <- rbind(output1, output2, output3) out } #' Power/type I error calculation for generalized linear models with random a0 #' #' @description Power/type I error calculation using normalized power priors for generalized linear models with random \eqn{a_0} #' #' @param historical List of historical dataset(s). East historical dataset is stored in a list which contains two \emph{named} elements: \code{y0} and \code{x0}. #' \itemize{ #' \item \code{y0} is a vector of responses. #' \item \code{x0} is a matrix of covariates. If \code{borrow.treat} is FALSE (the default), \code{x0} should NOT have the treatment indicator. #' If \code{borrow.treat} is TRUE, the first column of \code{x0} must be the treatment indicator. #' } #' For binomial data, an additional element \code{n0} is required. #' \itemize{ #' \item \code{n0} is vector of integers specifying the number of subjects who have a particular value of the covariate vector. #' The length of \code{n0} should be equal to the number of rows of \code{x0}. #' } #' @inheritParams glm.random.a0 #' @inheritParams power.glm.fixed.a0 #' @inheritParams two.grp.random.a0 #' @details The user should use the function \code{\link{normalizing.constant}} to obtain \code{a0.coefficients} (does not apply if \code{data.type} is "Normal"). #' #' The sampling prior for the treatment parameter can be generated from a normal distribution (see examples). #' For example, suppose one wants to compute the power for the hypotheses \eqn{H_0: \beta_1 \ge 0} and \eqn{H_1: \beta_1 < 0.} #' To approximate the sampling prior for \eqn{\beta_1}, one can simply sample from a normal distribution with negative mean, #' so that the mass of the prior falls in the alternative space. Conversely, to compute the type I error rate, one can #' sample from a normal distribution with positive mean, so that the mass of the prior falls in the null space. #' The sampling prior for the other parameters can be generated by using the \code{glm.fixed.a0} function with \code{current.data} set to FALSE. #' The posterior samples based on only historical data can be used as a discrete approximation to the sampling prior. #' #' \code{samp.prior.var} is necessary for generating normally distributed data. #' #' If \code{data.type} is "Normal", the response \eqn{y_i} is assumed to follow \eqn{N(x_i'\beta, \tau^{-1})} where \eqn{x_i} is the vector of covariates for subject \eqn{i}. #' Historical datasets are assumed to have the same precision parameter as the current dataset for computational simplicity. #' The initial prior for \eqn{\tau} is the Jeffery's prior, \eqn{\tau^{-1}}. #' Independent normal priors with mean zero and variance \code{prior.beta.var} are used for \eqn{\beta} to ensure the propriety of the normalized power prior. Posterior samples for \eqn{\beta} and \eqn{\tau} are obtained through Gibbs sampling. #' Independent beta(\code{prior.a0.shape1}, \code{prior.a0.shape1}) priors are used for \eqn{a_0}. Posterior samples for \eqn{a_0} are obtained through slice sampling. #' #' For all other data types, posterior samples are obtained through slice sampling. #' The default lower limits are -100 for \eqn{\beta} and 0 for \eqn{a_0}. The default upper limits #' for the parameters are 100 for \eqn{\beta} and 1 for \eqn{a_0}. The default slice widths for the parameters are 0.1. #' The defaults may not be appropriate for all situations, and the user can specify the appropriate limits #' and slice width for each parameter. #' #' If a sampling prior with support in the null space is used, the value returned is a Bayesian type I error rate. #' If a sampling prior with support in the alternative space is used, the value returned is a Bayesian power. #' #' Because running \code{power.glm.fixed.a0()} and \code{power.glm.random.a0()} is potentially time-consuming, #' an approximation method based on asymptotic theory has been implemented for the model with fixed \eqn{a_0}. #' In order to attain the exact sample size needed for the desired power, the user can start with the approximation #' to get a rough estimate of the sample size required, using \code{power.glm.fixed.a0()} with \code{approximate=TRUE}. #' #' @return The function returns a S3 object with a \code{summary} method. Power or type I error is returned, depending on the sampling prior used. #' The posterior probabilities of the alternative hypothesis are returned. #' The average posterior mean of \eqn{\beta} and its corresponding bias are returned. #' The average posterior mean of \eqn{a_0} is returned. #' If \code{data.type} is "Normal", the average posterior mean of \eqn{\tau} is also returned. #' The first element of the average posterior means of \eqn{\beta} is the average posterior mean of the intercept. #' The second element is the average posterior mean of \eqn{\beta_1}, the parameter for the treatment indicator. #' #' @references Chen, Ming-Hui, et al. "Bayesian design of noninferiority trials for medical devices using historical data." Biometrics 67.3 (2011): 1163-1170. #' #' Neal, Radford M. Slice sampling. Ann. Statist. 31 (2003), no. 3, 705--767. #' @seealso \code{\link{normalizing.constant}} and \code{\link{glm.random.a0}} #' @examples #' #' data.type <- "Bernoulli" #' data.link <- "Logistic" #' data.size <- 100 #' #' # Simulate two historical datasets #' p <- 3 #' historical <- list(list(y0=rbinom(data.size,size=1,prob=0.2), #' x0=matrix(rnorm(p*data.size),ncol=p,nrow=data.size)), #' list(y0=rbinom(data.size, size=1, prob=0.5), #' x0=matrix(rnorm(p*data.size),ncol=p,nrow=data.size))) #' #' # Generate sampling priors #' #' # The null hypothesis here is H0: beta_1 >= 0. To calculate power, #' # we can provide samples of beta_1 such that the mass of beta_1 < 0. #' # To calculate type I error, we can provide samples of beta_1 such that #' # the mass of beta_1 >= 0. #' samp.prior.beta1 <- rnorm(100, mean=-3, sd=1) #' # Here, mass is put on the alternative region, so power is calculated. #' samp.prior.beta <- cbind(rnorm(100), samp.prior.beta1, matrix(rnorm(100*p), 100, p)) #' #' # Please see function "normalizing.constant" for how to obtain a0.coefficients #' # Here, suppose one-degree polynomial regression is chosen by the "normalizing.constant" #' # function. The coefficients are obtained for the intercept, a0_1 and a0_2. #' a0.coefficients <- c(1, 0.5, -1) #' #' nMC <- 100 # nMC should be larger in practice #' nBI <- 50 #' N <- 3 # N should be larger in practice #' result <- power.glm.random.a0(data.type=data.type, data.link=data.link, #' data.size=data.size, historical=historical, #' samp.prior.beta=samp.prior.beta, a0.coefficients=a0.coefficients, #' delta=0, nMC=nMC, nBI=nBI, N=N) #' summary(result) #' #' @export power.glm.random.a0 <- function(data.type, data.link, data.size, n=1, treat.assign.prob=0.5, borrow.treat=FALSE, historical,nullspace.ineq=">", samp.prior.beta, samp.prior.var, prior.beta.var=rep(10,50), prior.a0.shape1=rep(1,10), prior.a0.shape2=rep(1,10), a0.coefficients, lower.limits=NULL, upper.limits=NULL,slice.widths=rep(0.1, 50), delta=0, gamma=0.95, nMC=10000, nBI=250, N=10000) { x <- historical[[1]]$x0 if(is.null(colnames(x))){ colnames(x) <- paste0("X", 1:ncol(x)) } if(data.type == "Normal"){ if(is.null(lower.limits)){ lower.limits = rep(0,length(historical)) } if(is.null(upper.limits)){ upper.limits = rep(1,length(historical)) } out <- power_glm_random_a0_normal(data.size, treat.assign.prob, borrow.treat, historical,nullspace.ineq, samp.prior.beta, samp.prior.var, prior.a0.shape1, prior.a0.shape2, lower.limits, upper.limits, slice.widths, delta, gamma, nMC, nBI, N) }else{ if(is.null(lower.limits)){ lower.limits = c(rep(-100, ncol(samp.prior.beta)), rep(0,length(historical))) } if(is.null(upper.limits)){ upper.limits = c(rep(100, ncol(samp.prior.beta)), rep(1,length(historical))) } out <- power_glm_random_a0(data.type, data.link, data.size, n, treat.assign.prob, borrow.treat, historical,nullspace.ineq, samp.prior.beta, prior.beta.var, prior.a0.shape1, prior.a0.shape2, a0.coefficients, lower.limits, upper.limits, slice.widths, delta, gamma, nMC, nBI, N) } if(borrow.treat==FALSE){ rownames(out$`average posterior mean of beta`) <- c("intercept","treatment (beta_1)",colnames(x)) }else{ rownames(out$`average posterior mean of beta`) <- c("intercept", paste(colnames(x)[1],"(beta_1)"),colnames(x)[-1]) } structure(out, class=c("powerglm")) }
/scratch/gouwar.j/cran-all/cranData/BayesPPD/R/glm_random.R
### two group, fixed a0 ##### #' Model fitting for two groups (treatment and control group, no covariates) with fixed a0 #' #' @description Model fitting using power priors for two groups (treatment and control group, no covariates) with fixed \eqn{a_0} #' #' @param historical (Optional) matrix of historical dataset(s). If \code{data.type} is "Normal", \code{historical} is a matrix with four columns: #' \itemize{ #' \item The first column contains the sum of responses for the control group. #' \item The second column contains the sample size of the control group. #' \item The third column contains the sample variance of responses for the control group. #' \item The fourth column contains the discounting parameter value \eqn{a_0} (between 0 and 1). #' } #' For all other data types, \code{historical} is a matrix with three columns: #' \itemize{ #' \item The first column contains the sum of responses for the control group. #' \item The second column contains the sample size of the control group. #' \item The third column contains the discounting parameter value \eqn{a_0} (between 0 and 1). #' } #' Each row represents a historical dataset. #' @param nMC (For normal data only) number of iterations (excluding burn-in samples) for the Gibbs sampler. The default is 10,000. #' @param nBI (For normal data only) number of burn-in samples for the Gibbs sampler. The default is 250. #' @inheritParams two.grp.random.a0 #' @inheritParams power.two.grp.fixed.a0 #' #' @details The power prior is applied on the data of the control group only. #' Therefore, only summaries of the responses of the control group need to be entered. #' #' If \code{data.type} is "Bernoulli", "Poisson" or "Exponential", a single response from the treatment group is assumed to follow Bern(\eqn{\mu_t}), Pois(\eqn{\mu_t}) or Exp(rate=\eqn{\mu_t}), respectively, #' where \eqn{\mu_t} is the mean of responses for the treatment group. The distributional assumptions for the control group data are analogous. #' #' If \code{data.type} is "Bernoulli", the initial prior for \eqn{\mu_t} is beta(\code{prior.mu.t.shape1}, \code{prior.mu.t.shape2}). #' If \code{data.type} is "Poisson", the initial prior for \eqn{\mu_t} is Gamma(\code{prior.mu.t.shape1}, rate=\code{prior.mu.t.shape2}). #' If \code{data.type} is "Exponential", the initial prior for \eqn{\mu_t} is Gamma(\code{prior.mu.t.shape1}, rate=\code{prior.mu.t.shape2}). #' The initial priors used for the control group data are analogous. #' #' If \code{data.type} is "Normal", the responses are assumed to follow \eqn{N(\mu_c, \tau^{-1})} where \eqn{\mu_c} is the mean of responses for the control group #' and \eqn{\tau} is the precision parameter. Each historical dataset \eqn{D_{0k}} is assumed to have a different precision parameter \eqn{\tau_k}. #' The initial prior for \eqn{\tau} is the Jeffery's prior, \eqn{\tau^{-1}}, and the initial prior for \eqn{\tau_k} is \eqn{\tau_k^{-1}}. The initial prior for the \eqn{\mu_c} is the uniform improper prior. #' Posterior samples are obtained through Gibbs sampling. #' #' @return The function returns a S3 object with a \code{summary} method. If \code{data.type} is "Normal", posterior samples of \eqn{\mu_c}, \eqn{\tau} and \eqn{\tau_k}'s (if historical data is given) are returned #' in the list item named \code{posterior.params}. #' For all other data types, two scalars, \eqn{c_1} and \eqn{c_2}, are returned in the list item named \code{posterior.params}, representing the two parameters of the posterior distribution of \eqn{\mu_c}. #' For Bernoulli responses, the posterior distribution of \eqn{\mu_c} is beta(\eqn{c_1}, \eqn{c_2}). #' For Poisson responses, the posterior distribution of \eqn{\mu_c} is Gamma(\eqn{c_1}, \eqn{c_2}) where \eqn{c_2} is the rate parameter. #' For exponential responses, the posterior distribution of \eqn{\mu_c} is Gamma(\eqn{c_1}, \eqn{c_2}) where \eqn{c_2} is the rate parameter. #' @references Chen, Ming-Hui, et al. "Bayesian design of noninferiority trials for medical devices using historical data." Biometrics 67.3 (2011): 1163-1170. #' @seealso \code{\link{power.two.grp.fixed.a0}} #' @examples #' data.type <- "Bernoulli" #' y.c <- 70 #' n.c <- 100 #' #' # Simulate three historical datasets #' historical <- matrix(0, ncol=3, nrow=3) #' historical[1,] <- c(70, 100, 0.3) #' historical[2,] <- c(60, 100, 0.5) #' historical[3,] <- c(50, 100, 0.7) #' #' set.seed(1) #' result <- two.grp.fixed.a0(data.type=data.type, y.c=y.c, n.c=n.c, historical=historical) #' summary(result) #' @export two.grp.fixed.a0 <- function(data.type, y.c, n.c, v.c, historical=matrix(0,1,4), prior.mu.c.shape1=1, prior.mu.c.shape2=1, nMC=10000, nBI=250) { if(data.type == "Normal"){ result <- two_grp_fixed_a0_normal(y.c, n.c, v.c, historical, nMC, nBI) }else{ result <- two_grp_fixed_a0(data.type, y.c, n.c, historical, prior.mu.c.shape1, prior.mu.c.shape2) } out <- list(posterior.params=result, data.type=data.type) structure(out, class=c("tgfixed")) } #' @importFrom stats quantile #' @export summary.tgfixed <- function(object, ...) { r <- object$posterior.params if(object$data.type=="Normal"){ # mu_c mu_c <- r$`posterior samples of mu_c` output1 <- cbind("mean"=mean(mu_c),"sd"=sd(mu_c),"2.5%"=quantile(mu_c,probs=0.025),"97.5%"=quantile(mu_c,probs=0.975)) rownames(output1) <- "mu_c" output1 <- round(output1, digits=3) # tau taus <- r$`posterior samples of tau` output2 <- cbind("mean"=mean(taus),"sd"=sd(taus),"2.5%"=quantile(taus,probs=0.025),"97.5%"=quantile(taus,probs=0.975)) rownames(output2) <- "tau" output2 <- round(output2, digits=3) # tau0 if(length(r)==2){ output3 <- NULL }else{ tau0s <- r$`posterior samples of tau_0` m <- apply(tau0s, 2, mean) std <- apply(tau0s, 2, sd) q1 <- apply(tau0s, 2, quantile, probs=0.025) q2 <- apply(tau0s, 2, quantile, probs=0.975) output3 <- cbind("mean"=m,"sd"=std,"2.5%"=q1,"97.5%"=q2) rownames(output3) <- paste0("tau0_", 1:nrow(output3)) output3 <- round(output3, digits=3) } out <- rbind(output1, output2, output3) out }else{ c1 <- round(r[1],3) c2 <- round(r[2],3) if(object$data.type=="Bernoulli"){ cat("The posterior distribution for mu_c is beta(",c1,", ",c2,").")} if(object$data.type=="Poisson"){ cat("The posterior distribution for mu_c is gamma(",c1,", rate=",c2,").")} if(object$data.type=="Exponential"){ cat("The posterior distribution for mu_c is gamma(",c1,", rate=",c2,").")} } } #' Power/type I error calculation for data with two groups (treatment and control group, no covariates) with fixed a0 #' #' @description Power/type I error calculation for data with two groups (treatment and control group, no covariates) with fixed \eqn{a_0} using power priors #' @param data.type Character string specifying the type of response. The options are "Normal", "Bernoulli", "Poisson" and "Exponential". #' @param n.t Sample size of the treatment group for the simulated datasets. #' @param n.c Sample size of the control group for the simulated datasets. #' @param historical (Optional) matrix of historical dataset(s). If \code{data.type} is "Normal", \code{historical} is a matrix with four columns: #' \itemize{ #' \item The first column contains the sum of responses for the control group. #' \item The second column contains the sample size of the control group. #' \item The third column contains the sample variance of responses for the control group. #' \item The fourth column contains the discounting parameter value \eqn{a_0} (between 0 and 1). #' } #' For all other data types, \code{historical} is a matrix with three columns: #' \itemize{ #' \item The first column contains the sum of responses for the control group. #' \item The second column contains the sample size of the control group. #' \item The third column contains the discounting parameter value \eqn{a_0} (between 0 and 1). #' } #' Each row represents a historical dataset. #' @param nullspace.ineq Character string specifying the inequality of the null hypothesis. The options are ">" and "<". If ">" is specified, the null hypothesis (for non-exponential data) is \eqn{H_0}: \eqn{\mu_t} - \eqn{\mu_c} \eqn{\ge} \eqn{\delta}. If "<" is specified, the null hypothesis is \eqn{H_0}: \eqn{\mu_t} - \eqn{\mu_c} \eqn{\le} \eqn{\delta}. The default choice is ">". #' @param samp.prior.mu.t Vector of possible values of \eqn{\mu_t} to sample (with replacement) from. The vector contains realizations from the sampling prior (e.g. normal distribution) for \eqn{\mu_t}. #' @param samp.prior.mu.c Vector of possible values of \eqn{\mu_c} to sample (with replacement) from. The vector contains realizations from the sampling prior (e.g. normal distribution) for \eqn{\mu_c}. #' @param samp.prior.var.t Vector of possible values of \eqn{\sigma^2_t} to sample (with replacement) from. Only applies if \code{data.type} is "Normal". The vector contains realizations from the sampling prior (e.g. inverse-gamma distribution) for \eqn{\sigma^2_t}. #' @param samp.prior.var.c Vector of possible values of \eqn{\sigma^2_c} to sample (with replacement) from. Only applies if \code{data.type} is "Normal". The vector contains realizations from the sampling prior (e.g. inverse-gamma distribution) for \eqn{\sigma^2_c} #' @param prior.mu.t.shape1 First hyperparameter of the initial prior for \eqn{\mu_t}. The default is 1. Does not apply if \code{data.type} is "Normal". #' @param prior.mu.t.shape2 Second hyperparameter of the initial prior for \eqn{\mu_t}. The default is 1. Does not apply if \code{data.type} is "Normal". #' @param prior.mu.c.shape1 First hyperparameter of the initial prior for \eqn{\mu_c}. The default is 1. Does not apply if \code{data.type} is "Normal". #' @param prior.mu.c.shape2 Second hyperparameter of the initial prior for \eqn{\mu_c}. The default is 1. Does not apply if \code{data.type} is "Normal". #' @inheritParams power.glm.fixed.a0 #' #' #' @details If \code{data.type} is "Bernoulli", "Poisson" or "Exponential", a single response from the treatment group is assumed to follow Bern(\eqn{\mu_t}), Pois(\eqn{\mu_t}) or Exp(rate=\eqn{\mu_t}), respectively, #' where \eqn{\mu_t} is the mean of responses for the treatment group. If \code{data.type} is "Normal", a single response from the treatment group is assumed to follow \eqn{N(\mu_t, \tau^{-1})} #' where \eqn{\tau} is the precision parameter. #' The distributional assumptions for the control group data are analogous. #' #' \code{samp.prior.mu.t} and \code{samp.prior.mu.c} can be generated using the sampling priors (see example). #' #' If \code{data.type} is "Bernoulli", the initial prior for \eqn{\mu_t} is #' beta(\code{prior.mu.t.shape1}, \code{prior.mu.t.shape2}). #' If \code{data.type} is "Poisson", the initial prior for \eqn{\mu_t} is #' Gamma(\code{prior.mu.t.shape1}, rate=\code{prior.mu.t.shape2}). #' If \code{data.type} is "Exponential", the initial prior for \eqn{\mu_t} is #' Gamma(\code{prior.mu.t.shape1}, rate=\code{prior.mu.t.shape2}). #' The initial priors used for the control group data are analogous. #' #' If \code{data.type} is "Normal", each historical dataset \eqn{D_{0k}} is assumed to have a different precision parameter \eqn{\tau_k}. #' The initial prior for \eqn{\tau} is the Jeffery's prior, \eqn{\tau^{-1}}, and the initial prior for \eqn{\tau_k} is \eqn{\tau_k^{-1}}. #' The initial prior for the \eqn{\mu_c} is the uniform improper prior. #' #' If a sampling prior with support in the null space is used, the value returned is a Bayesian type I error rate. #' If a sampling prior with support in the alternative space is used, the value returned is a Bayesian power. #' #' If \code{data.type} is "Normal", Gibbs sampling is used for model fitting. For all other data types, #' numerical integration is used for modeling fitting. #' #' @return The function returns a S3 object with a \code{summary} method. Power or type I error is returned, depending on the sampling prior used. #' The posterior probabilities of the alternative hypothesis are returned. #' Average posterior means of \eqn{\mu_t} and \eqn{\mu_c} and their corresponding biases are returned. #' If \code{data.type} is "Normal", average posterior means of \eqn{\tau} and \eqn{\tau_k}'s (if historical data is given) are also returned. #' @references Yixuan Qiu, Sreekumar Balan, Matt Beall, Mark Sauder, Naoaki Okazaki and Thomas Hahn (2019). RcppNumerical: 'Rcpp' Integration for Numerical Computing Libraries. R package version 0.4-0. https://CRAN.R-project.org/package=RcppNumerical #' #' Chen, Ming-Hui, et al. "Bayesian design of noninferiority trials for medical devices using historical data." Biometrics 67.3 (2011): 1163-1170. #' @seealso \code{\link{two.grp.fixed.a0}} #' @examples #' data.type <- "Bernoulli" #' n.t <- 100 #' n.c <- 100 #' #' # Simulate three historical datasets #' historical <- matrix(0, ncol=3, nrow=3) #' historical[1,] <- c(70, 100, 0.3) #' historical[2,] <- c(60, 100, 0.5) #' historical[3,] <- c(50, 100, 0.7) #' #' # Generate sampling priors #' set.seed(1) #' b_st1 <- b_st2 <- 1 #' b_sc1 <- b_sc2 <- 1 #' samp.prior.mu.t <- rbeta(50000, b_st1, b_st2) #' samp.prior.mu.c <- rbeta(50000, b_st1, b_st2) #' # The null hypothesis here is H0: mu_t - mu_c >= 0. To calculate power, #' # we can provide samples of mu.t and mu.c such that the mass of mu_t - mu_c < 0. #' # To calculate type I error, we can provide samples of mu.t and mu.c such that #' # the mass of mu_t - mu_c >= 0. #' sub_ind <- which(samp.prior.mu.t < samp.prior.mu.c) #' # Here, mass is put on the alternative region, so power is calculated. #' samp.prior.mu.t <- samp.prior.mu.t[sub_ind] #' samp.prior.mu.c <- samp.prior.mu.c[sub_ind] #' #' N <- 1000 # N should be larger in practice #' result <- power.two.grp.fixed.a0(data.type=data.type, n.t=n.t, n.c=n.t, historical=historical, #' samp.prior.mu.t=samp.prior.mu.t, samp.prior.mu.c=samp.prior.mu.c, #' delta=0, N=N) #' summary(result) #' @export power.two.grp.fixed.a0 <- function(data.type, n.t, n.c, historical=matrix(0,1,4), nullspace.ineq=">", samp.prior.mu.t, samp.prior.mu.c, samp.prior.var.t, samp.prior.var.c, prior.mu.t.shape1=1, prior.mu.t.shape2=1, prior.mu.c.shape1=1, prior.mu.c.shape2=1, delta=0, gamma=0.95, nMC=10000, nBI=250, N=10000) { if(data.type == "Normal"){ out <- power_two_grp_fixed_a0_normal(n.t, n.c, historical, nullspace.ineq, samp.prior.mu.t, samp.prior.mu.c, samp.prior.var.t, samp.prior.var.c, delta, gamma, nMC, nBI, N) }else{ out <- power_two_grp_fixed_a0(data.type, n.t, n.c, historical, nullspace.ineq, samp.prior.mu.t, samp.prior.mu.c, prior.mu.t.shape1, prior.mu.t.shape2, prior.mu.c.shape1, prior.mu.c.shape2, delta, gamma, N, Inf) } structure(out, class=c("powertg")) } #' @export summary.powertg <- function(object, ...) { r <- round(object$`power/type I error`, digits=3) # beta mus <- c(object$`average posterior mean of mu_t`, object$`average posterior mean of mu_c`) bias <- c(object$`bias of the average posterior mean of mu_t`, object$`bias of the average posterior mean of mu_c`) postprob <- mean(object[[2]]) output1 <- cbind(mus,bias) colnames(output1) <- c("average posterior mean","bias") rownames(output1) <- c("mu_t","mu_c") output1 <- round(output1, digits=3) print(output1) cat("The power/type I error rate is ",r,".\n") cat("The average of the", names(object[2]), "is",round(mean(object[[2]]),3),".") }
/scratch/gouwar.j/cran-all/cranData/BayesPPD/R/two_grp_fixed.R
### two group, random a0 ##### #' Model fitting for two groups (treatment and control group, no covariates) with random a0 #' #' @description Model fitting using normalized power priors for two groups (treatment and control group, no covariates) with random \eqn{a_0} #' #' @param y.c Sum of responses for the control group. #' @param n.c Sample size of the control group. #' @param v.c (For normal data only) sample variance of responses for the control group. #' @param historical Matrix of historical dataset(s). If \code{data.type} is "Normal", \code{historical} is a matrix with three columns: #' \itemize{ #' \item The first column contains the sum of responses for the control group. #' \item The second column contains the sample size of the control group. #' \item The third column contains the sample variance of responses for the control group. #' } #' For all other data types, \code{historical} is a matrix with two columns: #' \itemize{ #' \item The first column contains the sum of responses for the control group. #' \item The second column contains the sample size of the control group. #' } #' Each row represents a historical dataset. #' @param lower.limits Vector of lower limits for parameters to be used by the slice sampler. The length of the vector should be equal to the number of historical datasets. The default is 0 for all parameters (may not be appropriate for all situations). #' @param upper.limits Vector of upper limits for parameters to be used by the slice sampler. The length of the vector should be equal to the number of historical datasets. The default is 1 for all parameters (may not be appropriate for all situations). #' @param slice.widths Vector of initial slice widths used by the slice sampler. The length of the vector should be equal to the number of historical datasets. The default is 0.1 for all parameter (may not be appropriate for all situations). #' @param prior.a0.shape1 Vector of the first shape parameters of the independent beta priors for \eqn{a_0}. The length of the vector should be equal to the number of historical datasets. The default is a vector of one's. #' @param prior.a0.shape2 Vector of the second shape parameters of the independent beta priors for \eqn{a_0}. The length of the vector should be equal to the number of historical datasets. The default is a vector of one's. #' #' @inheritParams power.two.grp.fixed.a0 #' @inheritParams power.glm.fixed.a0 #' #' @details If \code{data.type} is "Bernoulli", "Poisson" or "Exponential", a single response from the treatment group is assumed to follow Bern(\eqn{\mu_t}), Pois(\eqn{\mu_t}) or Exp(rate=\eqn{\mu_t}), respectively, #' where \eqn{\mu_t} is the mean of responses for the treatment group. If \code{data.type} is "Normal", a single response from the treatment group is assumed to follow \eqn{N(\mu_t, \tau^{-1})} #' where \eqn{\tau} is the precision parameter. #' The distributional assumptions for the control group data are analogous. #' #' If \code{data.type} is "Bernoulli", the initial prior for \eqn{\mu_t} is beta(\code{prior.mu.t.shape1}, \code{prior.mu.t.shape2}). #' If \code{data.type} is "Poisson", the initial prior for \eqn{\mu_t} is Gamma(\code{prior.mu.t.shape1}, rate=\code{prior.mu.t.shape2}). #' If \code{data.type} is "Exponential", the initial prior for \eqn{\mu_t} is Gamma(\code{prior.mu.t.shape1}, rate=\code{prior.mu.t.shape2}). #' The initial priors used for the control group data are analogous. #' #' If \code{data.type} is "Normal", historical datasets are assumed to have the same precision parameter \eqn{\tau} as the current dataset for computational simplicity. #' The initial prior for \eqn{\tau} is the Jeffery's prior, \eqn{\tau^{-1}}. The initial prior for the \eqn{\mu_c} is the uniform improper prior. #' Posterior samples of \eqn{\mu_c} and \eqn{\tau} are obtained through Gibbs sampling. #' #' Independent beta(\code{prior.a0.shape1},\code{prior.a0.shape1}) priors are used for \eqn{a_0}. Posterior samples of \eqn{a_0} are obtained through slice sampling. The default lower limits for the parameters are 0. The default upper limits #' for the parameters are 1. The default slice widths for the parameters are 0.1. #' The defaults may not be appropriate for all situations, and the user can specify the appropriate limits #' and slice width for each parameter. #' #' @return The function returns a S3 object with a \code{summary} method. If \code{data.type} is "Normal", posterior samples of \eqn{\mu_c}, \eqn{\tau} and \eqn{a_0} are returned. #' For all other data types, posterior samples of \eqn{\mu_c} and \eqn{a_0} are returned. If there are \eqn{K} historical datasets, #' then \eqn{a_0 = (a_{01},\cdots,a_{0K})}. #' #' @references Neal, Radford M. Slice sampling. Ann. Statist. 31 (2003), no. 3, 705--767. #' @seealso \code{\link{power.two.grp.random.a0}} #' @examples #' #' data.type <- "Bernoulli" #' y.c <- 70 #' n.c <- 100 #' #' # Simulate three historical datasets #' historical <- matrix(0, ncol=2, nrow=3) #' historical[1,] <- c(70, 100) #' historical[2,] <- c(60, 100) #' historical[3,] <- c(50, 100) #' #' # Set parameters of the slice sampler #' lower.limits <- rep(0, 3) # The dimension is the number of historical datasets #' upper.limits <- rep(1, 3) #' slice.widths <- rep(0.1, 3) #' #' set.seed(1) #' result <- two.grp.random.a0(data.type=data.type, y.c=y.c, n.c=n.c, historical=historical, #' lower.limits=lower.limits, upper.limits=upper.limits, #' slice.widths=slice.widths, nMC=10000, nBI=250) #' summary(result) #' @export two.grp.random.a0 <- function(data.type, y.c, n.c, v.c, historical, prior.mu.c.shape1=1,prior.mu.c.shape2=1, prior.a0.shape1=rep(1,10),prior.a0.shape2=rep(1,10), lower.limits=rep(0, 10), upper.limits=rep(1, 10), slice.widths=rep(0.1, 10), nMC=10000, nBI=250) { if(data.type == "Normal"){ result <- two_grp_random_a0_normal(y.c, n.c, v.c, historical, prior.a0.shape1, prior.a0.shape2, lower.limits, upper.limits, slice.widths, nMC, nBI) }else{ result <- two_grp_random_a0(data.type, y.c, n.c, historical, prior.mu.c.shape1, prior.mu.c.shape2, prior.a0.shape1, prior.a0.shape2, lower.limits, upper.limits, slice.widths, nMC, nBI) } out <- list(posterior.samples=result, data.type=data.type) structure(out, class=c("tgrandom")) } #' @importFrom stats quantile #' @export summary.tgrandom <- function(object, ...) { r <- object$posterior.samples # mu_c mu_c <- r$`posterior samples of mu_c` output1 <- cbind("mean"=mean(mu_c),"sd"=sd(mu_c),"2.5%"=quantile(mu_c,probs=0.025),"97.5%"=quantile(mu_c,probs=0.975)) rownames(output1) <- "mu_c" output1 <- round(output1, digits=3) if(object$data.type=="Normal"){ # tau taus <- r$`posterior samples of tau` output2 <- cbind("mean"=mean(taus),"sd"=sd(taus),"2.5%"=quantile(taus,probs=0.025),"97.5%"=quantile(taus,probs=0.975)) rownames(output2) <- "tau" output2 <- round(output2, digits=3) }else{ output2 <- NULL } # a0 a0s <- r$`posterior samples of a0` m <- apply(a0s, 2, mean) std <- apply(a0s, 2, sd) q1 <- apply(a0s, 2, quantile, probs=0.025) q2 <- apply(a0s, 2, quantile, probs=0.975) output3 <- cbind("mean"=m,"sd"=std,"2.5%"=q1,"97.5%"=q2) rownames(output3) <- paste0("a0_", 1:nrow(output3)) output3 <- round(output3, digits=3) out <- rbind(output1, output2, output3) out } #' Power/type I error calculation for two groups (treatment and control group, no covariates) with random a0 #' #' @description Power/type I error calculation using normalized power priors for two groups (treatment and control group, no covariates) with random \eqn{a_0} #' #' @param n.t Sample size of the treatment group for the simulated datasets. #' @param n.c Sample size of the control group for the simulated datasets. #' @inheritParams two.grp.random.a0 #' @inheritParams power.two.grp.fixed.a0 #' @inheritParams power.glm.fixed.a0 #' #' @details If \code{data.type} is "Bernoulli", "Poisson" or "Exponential", a single response from the treatment group is assumed to follow Bern(\eqn{\mu_t}), Pois(\eqn{\mu_t}) or Exp(rate=\eqn{\mu_t}), respectively, #' where \eqn{\mu_t} is the mean of responses for the treatment group. If \code{data.type} is "Normal", a single response from the treatment group is assumed to follow \eqn{N(\mu_t, \tau^{-1})} #' where \eqn{\tau} is the precision parameter. #' The distributional assumptions for the control group data are analogous. #' #' \code{samp.prior.mu.t} and \code{samp.prior.mu.c} can be generated using the sampling priors (see example). #' #' If \code{data.type} is "Bernoulli", the initial prior for \eqn{\mu_t} is beta(\code{prior.mu.t.shape1}, \code{prior.mu.t.shape2}). #' If \code{data.type} is "Poisson", the initial prior for \eqn{\mu_t} is Gamma(\code{prior.mu.t.shape1}, rate=\code{prior.mu.t.shape2}). #' If \code{data.type} is "Exponential", the initial prior for \eqn{\mu_t} is Gamma(\code{prior.mu.t.shape1}, rate=\code{prior.mu.t.shape2}). #' The initial priors used for the control group data are analogous. #' #' If \code{data.type} is "Normal", historical datasets are assumed to have the same precision parameter as the current dataset for computational simplicity. #' The initial prior for \eqn{\tau} is the Jeffery's prior, \eqn{\tau^{-1}}. The initial prior for the \eqn{\mu_c} is the uniform improper prior. #' Posterior samples of \eqn{\mu_c} and \eqn{\tau} are obtained through Gibbs sampling. #' #' Independent beta(\code{prior.a0.shape1},\code{prior.a0.shape1}) priors are used for \eqn{a_0}. Posterior samples of \eqn{a_0} are obtained through slice sampling. The default lower limits for the parameters are 0. The default upper limits #' for the parameters are 1. The default slice widths for the parameters are 0.1. #' The defaults may not be appropriate for all situations, and the user can specify the appropriate limits #' and slice width for each parameter. #' #' If a sampling prior with support in the null space is used, the value returned is a Bayesian type I error rate. #' If a sampling prior with support in the alternative space is used, the value returned is a Bayesian power. #' #' @return The function returns a S3 object with a \code{summary} method. Power or type I error is returned, depending on the sampling prior used. #' The posterior probabilities of the alternative hypothesis are returned. #' Average posterior means of \eqn{\mu_t} and \eqn{\mu_c} and their corresponding biases are returned. #' The average posterior mean of \eqn{a_0} is returned. #' If \code{data.type} is "Normal", the average posterior mean of \eqn{\tau} is also returned. #' @references Chen, Ming-Hui, et al. "Bayesian design of noninferiority trials for medical devices using historical data." Biometrics 67.3 (2011): 1163-1170. #' #' Neal, Radford M. Slice sampling. Ann. Statist. 31 (2003), no. 3, 705--767. #' #' @seealso \code{\link{two.grp.random.a0}} #' @examples #' #' data.type <- "Bernoulli" #' n.t <- 100 #' n.c <- 100 #' #' # Simulate three historical datasets #' historical <- matrix(0, ncol=2, nrow=3) #' historical[1,] <- c(70, 100) #' historical[2,] <- c(60, 100) #' historical[3,] <- c(50, 100) #' #' # Generate sampling priors #' set.seed(1) #' b_st1 <- b_st2 <- 1 #' b_sc1 <- b_sc2 <- 1 #' samp.prior.mu.t <- rbeta(50000, b_st1, b_st2) #' samp.prior.mu.c <- rbeta(50000, b_st1, b_st2) #' # The null hypothesis here is H0: mu_t - mu_c >= 0. To calculate power, #' # we can provide samples of mu.t and mu.c such that the mass of mu_t - mu_c < 0. #' # To calculate type I error, we can provide samples of mu.t and mu.c such that #' # the mass of mu_t - mu_c >= 0. #' sub_ind <- which(samp.prior.mu.t < samp.prior.mu.c) #' # Here, mass is put on the alternative region, so power is calculated. #' samp.prior.mu.t <- samp.prior.mu.t[sub_ind] #' samp.prior.mu.c <- samp.prior.mu.c[sub_ind] #' #' N <- 10 # N should be larger in practice #' result <- power.two.grp.random.a0(data.type=data.type, n.t=n.t, n.c=n.c, historical=historical, #' samp.prior.mu.t=samp.prior.mu.t, samp.prior.mu.c=samp.prior.mu.c, #' delta=0, nMC=10000, nBI=250, N=N) #' summary(result) #' @export power.two.grp.random.a0 <- function(data.type, n.t, n.c, historical,nullspace.ineq=">", samp.prior.mu.t, samp.prior.mu.c, samp.prior.var.t=0, samp.prior.var.c=0, prior.mu.t.shape1=1,prior.mu.t.shape2=1, prior.mu.c.shape1=1,prior.mu.c.shape2=1, prior.a0.shape1=rep(1,10),prior.a0.shape2=rep(1,10), lower.limits=rep(0, 10), upper.limits=rep(1, 10), slice.widths=rep(0.1, 10), delta=0, gamma=0.95, nMC=10000, nBI=250, N=10000) { out <- power_two_grp_random_a0(data.type, n.t, n.c, historical,nullspace.ineq, samp.prior.mu.t, samp.prior.mu.c, samp.prior.var.t, samp.prior.var.c, prior.mu.t.shape1, prior.mu.t.shape2, prior.mu.c.shape1, prior.mu.c.shape2, prior.a0.shape1, prior.a0.shape2, lower.limits, upper.limits, slice.widths, delta, gamma, nMC, nBI, N) structure(out, class=c("powertg")) }
/scratch/gouwar.j/cran-all/cranData/BayesPPD/R/two_grp_random.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ---- echo=FALSE-------------------------------------------------------------- library(kableExtra) df <- data.frame(Cat = c("Historical Trial 1", "Historical Trial 2"), Sad = c("8.2% (44/535)", "10.9% (33/304)")) kable(df, col.names = c("", "% TLF (# of failure/sample size)"), escape = F, caption = "Summary of the historical data.") %>% kable_styling(latex_options = "hold_position") ## ---- eval=TRUE--------------------------------------------------------------- historical <- matrix(0, ncol=3, nrow=2) historical[1,] <- c(44, 535, 0.3) historical[2,] <- c(33, 304, 0.3) ## ---- eval=TRUE--------------------------------------------------------------- library(BayesPPD) set.seed(1) n.t_vals <- seq(from=600, to=1000, by=50) powers <- NULL for(i in 1:length(n.t_vals)){ n.t <- n.t_vals[i] results <- power.two.grp.fixed.a0(data.type="Bernoulli", n.t=n.t, n.c=round(n.t/3), historical=historical, samp.prior.mu.t=0.092, samp.prior.mu.c=0.092, prior.mu.t.shape1=0.0001, prior.mu.t.shape2=0.0001, prior.mu.c.shape1=0.0001,prior.mu.c.shape2=0.0001, delta=0.041, N=10000) power <- results$`power/type I error` powers <- c(powers, power) } powers ## ---- eval=TRUE--------------------------------------------------------------- library(ggplot2) df <- data.frame(sample_size=n.t_vals, power=powers) ggplot(data=df, aes(x=sample_size, y=powers)) + geom_smooth(method = lm, formula = y ~ x, se = FALSE) + geom_point() + xlab("Sample Size") + ylab("Power") ## ---- eval=TRUE--------------------------------------------------------------- TIEs <- NULL for(i in 1:length(n.t_vals)){ n.t <- n.t_vals[i] results <- power.two.grp.fixed.a0(data.type="Bernoulli", n.t=n.t, n.c=round(n.t/3), historical=historical, samp.prior.mu.t=0.092+0.041, samp.prior.mu.c=0.092, prior.mu.t.shape1=0.0001, prior.mu.t.shape2=0.0001, prior.mu.c.shape1=0.0001,prior.mu.c.shape2=0.0001, delta=0.041, N=10000) TIE <- results$`power/type I error` TIEs <- c(TIEs, TIE) } TIEs ## ---- eval=FALSE-------------------------------------------------------------- # historical <- matrix(0, ncol=2, nrow=2) # historical[1,] <- c(44, 535) # historical[2,] <- c(33, 304) ## ---- eval=FALSE-------------------------------------------------------------- # n.t <- 750 # results <- power.two.grp.random.a0(data.type="Bernoulli", # n.t=n.t, n.c=round(n.t/3),historical=historical, # samp.prior.mu.t=0.092, samp.prior.mu.c=0.092, # prior.mu.t.shape1=0.0001, prior.mu.t.shape2=0.0001, # prior.mu.c.shape1=0.0001,prior.mu.c.shape2=0.0001, # prior.a0.shape1=1,prior.a0.shape2=1, # delta=0.041, gamma=0.95, # nMC=10000, nBI=250, N=10000) # summary(results) ## ---- eval=TRUE--------------------------------------------------------------- data.type <- "Normal" n.t <- 100 n.c <- 100 # Simulate three historical datasets K <- 3 historical <- matrix(0, ncol=4, nrow=K) # The columns are the sum of the responses, the sample size, the sample variance and a_0 historical[1,] <- c(50, 50, 1, 0.3) historical[2,] <- c(30, 50, 1, 0.5) historical[3,] <- c(20, 50, 1, 0.7) ## ---- eval=TRUE--------------------------------------------------------------- # Generate sampling priors set.seed(1) samp.prior.mu.t <- rnorm(50000) samp.prior.mu.c <- rnorm(50000) sub_ind <- which(samp.prior.mu.t < samp.prior.mu.c) # Here, mass is put on the alternative region, so power is calculated. samp.prior.mu.t <- samp.prior.mu.t[sub_ind] samp.prior.mu.c <- samp.prior.mu.c[sub_ind] samp.prior.var.t <- rgamma(100, 1, 1) samp.prior.var.c <- rgamma(100, 1, 1) ## ---- eval=TRUE--------------------------------------------------------------- set.seed(1) results <- power.two.grp.fixed.a0(data.type=data.type, n.t=n.t, n.c=n.t, historical=historical, samp.prior.mu.t=samp.prior.mu.t, samp.prior.mu.c=samp.prior.mu.c, samp.prior.var.t=samp.prior.var.t, samp.prior.var.c=samp.prior.var.c, delta=0, nMC=10000, nBI=250, N=100) summary(results) ## ---- eval=TRUE--------------------------------------------------------------- # Generate sampling priors set.seed(1) samp.prior.mu.t <- rnorm(50000) samp.prior.mu.c <- rnorm(50000) sub_ind <- which(samp.prior.mu.t >= samp.prior.mu.c) # Here, mass is put on the null region, so type I error rate is calculated. samp.prior.mu.t <- samp.prior.mu.t[sub_ind] samp.prior.mu.c <- samp.prior.mu.c[sub_ind] set.seed(1) results <- power.two.grp.fixed.a0(data.type=data.type, n.t=n.t, n.c=n.t, historical=historical, samp.prior.mu.t=samp.prior.mu.t, samp.prior.mu.c=samp.prior.mu.c, samp.prior.var.t=samp.prior.var.t, samp.prior.var.c=samp.prior.var.c, delta=0, nMC=10000, nBI=250, N=100) summary(results) ## ---- eval=TRUE--------------------------------------------------------------- data.type <- "Normal" n.t <- 100 n.c <- 100 # Simulate three historical datasets K <- 3 historical <- matrix(0, ncol=3, nrow=K) # The columns are the sum of the responses, the sample size, and the sample variance historical[1,] <- c(50, 50, 1) historical[2,] <- c(30, 50, 1) historical[3,] <- c(20, 50, 1) ## ---- eval=TRUE--------------------------------------------------------------- # Generate sampling priors set.seed(1) samp.prior.mu.t <- rnorm(50000) samp.prior.mu.c <- rnorm(50000) sub_ind <- which(samp.prior.mu.t < samp.prior.mu.c) # Here, mass is put on the alternative region, so power is calculated. samp.prior.mu.t <- samp.prior.mu.t[sub_ind] samp.prior.mu.c <- samp.prior.mu.c[sub_ind] samp.prior.var.t <- rgamma(100, 1, 1) samp.prior.var.c <- rgamma(100, 1, 1) ## ---- eval=TRUE--------------------------------------------------------------- set.seed(1) results <- power.two.grp.random.a0(data.type=data.type, n.t=n.t, n.c=n.t, historical=historical, samp.prior.mu.t=samp.prior.mu.t, samp.prior.mu.c=samp.prior.mu.c, samp.prior.var.t=samp.prior.var.t, samp.prior.var.c=samp.prior.var.c, delta=0, nMC=10000, nBI=250, N=100) summary(results) results$`average posterior means of a0` results$`average posterior mean of tau` ## ---- eval=TRUE--------------------------------------------------------------- # Generate sampling priors set.seed(1) samp.prior.mu.t <- rnorm(50000) samp.prior.mu.c <- rnorm(50000) sub_ind <- which(samp.prior.mu.t >= samp.prior.mu.c) # Here, mass is put on the null region, so type I error rate is calculated. samp.prior.mu.t <- samp.prior.mu.t[sub_ind] samp.prior.mu.c <- samp.prior.mu.c[sub_ind] set.seed(1) results <- power.two.grp.random.a0(data.type=data.type, n.t=n.t, n.c=n.t, historical=historical, samp.prior.mu.t=samp.prior.mu.t, samp.prior.mu.c=samp.prior.mu.c, samp.prior.var.t=samp.prior.var.t, samp.prior.var.c=samp.prior.var.c, delta=0, nMC=10000, nBI=250, N=100) summary(results)
/scratch/gouwar.j/cran-all/cranData/BayesPPD/inst/doc/bayesppd-vignette.R
--- title: "Bayesian Sample Size Determination for Two Group Models (Binary and Normal Outcomes)" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{bayesppd-vignette} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} header-includes: - \usepackage{caption} --- For two group models (i.e., treatment and control group with no covariates), we denote the parameter for the treatment group by $\mu_t$ and the parameter for the control group by $\mu_c$. The default null and alternative hypotheses are given by $$H_0: \mu_t - \mu_c \ge \delta$$ and $$H_1: \mu_t - \mu_c < \delta,$$ where $\delta$ is a prespecified constant. We use the following definition of Bayesian power / type I error rate. Let $\Theta_0$ and $\Theta_1$ denote the parameter spaces corresponding to $H_0$ and $H_1$. Let $y^{(n)}$ denote the simulated current data associated with a sample size of $n$ and let $\theta=(\mu_t, \mu_c, \tau_c)$ denote the model parameters. Let $\pi^{(s)}(\theta)$ denote the sampling prior and let $\pi^{(f)}(\theta)$ denote the fitting prior. The sampling prior is used to generate the hypothetical data while the fitting prior is used to fit the model after the data is generated. Let $\pi_0^{(s)}(\theta)$ denote a sampling prior that only puts mass in the null region, i.e., $\theta \subset \Theta_0$. Let $\pi_1^{(s)}(\theta)$ denote a sampling prior that only puts mass in the alternative region, i.e., $\theta \subset \Theta_1$. To determine Bayesian sample size, we estimate the quantity $$\beta_{sj}^{(n)}=E_s[I\{P(\mu_t-\mu_c<\delta|y^{(n)}, \pi^{(f)})\ge \gamma\}]$$ where $j=0$ or $1$, corresponding to the expectation taken with respect to $\pi_0^{(s)}(\theta)$ or $\pi_1^{(s)}(\theta)$. The constant $\gamma > 0$ is a prespecified posterior probability threshold for rejecting the null hypothesis (e.g., $0.975$). The probability is computed with respect to the posterior distribution given the simulated data $y^{(n)}$ and the fitting prior $\pi^{(f)}(\theta)$, and the expectation is taken with respect to the marginal distribution of $y^{(n)}$ defined based on the sampling prior $\pi^{(s)}(\theta)$. Then $\beta_{s0}^{(n)}$ corresponding to $\pi^{(s)}(\theta)=\pi_0^{(s)}(\theta)$ is the Bayesian type I error rate, while $\beta_{s1}^{(n)}$ corresponding to $\pi^{(s)}(\theta)=\pi_1^{(s)}(\theta)$ is the Bayesian power. # 1. Two Group Cases with Binary Outcomes We first demonstrate a model for binary outcomes for treatment and control groups with no covariates. We consider the non-inferiority design application of Chen et al. (2011). The goal was to design a trial to evaluate a new generation of drug-eluting stent (DES) (“test device”) with the first generation of DES (“control device”). The primary endpoint is the 12-month Target Lesion Failure (TLF) (binary). Historical information can be borrowed from two previously conducted trials involving the first generation of DES. The table below summarizes the historical data. ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r, echo=FALSE} library(kableExtra) df <- data.frame(Cat = c("Historical Trial 1", "Historical Trial 2"), Sad = c("8.2% (44/535)", "10.9% (33/304)")) kable(df, col.names = c("", "% TLF (# of failure/sample size)"), escape = F, caption = "Summary of the historical data.") %>% kable_styling(latex_options = "hold_position") ``` Let $\textbf{y}_t^{(n_t)}=(y_{t1},\cdots, y_{tn_t})$ and $\textbf{y}_c^{(n_c)}=(y_{c1},\cdots, y_{cn_c})$ denote the responses from the current trial for the test device and the control device, respectively. The total sample size is $n=n_t+n_c$. We assume the $i$-th observation from the test group $y_{ti}$ follows Bern($\mu_t$), and the $i$-th observation from the control group $y_{ci}$ follows Bern($\mu_c$). We will illustrate Bayesian sample size determination (SSD) incorporating historical data using the power prior with fixed $a_0$ and the normalized power for $a_0$ modeled as random. The hypotheses for non-inferiority testing are $$H_0: \mu_t - \mu_c \ge \delta$$ and $$H_1: \mu_t - \mu_c < \delta,$$ where $\delta$ is a prespecified non-inferiority margin. We set $\delta=4.1\%$. We choose beta$(10^{-4}, 10^{-4})$ for the initial prior for $\mu_c$, which performs similarly to the uniform improper initial prior for $\log\left(\frac{\mu_c}{1-\mu_c}\right)$ used in Chen et al. (2011) in terms of operating characteristics. Power is computed under the assumption that $\mu_t=\mu_c$ and type I error rate is computed under the assumption that ${\mu_t=\mu_c+\delta}$. For sampling priors, a point mass prior at $\mu_c = 9.2\%$ is used for $\pi^{(s)}(\mu_c)$ where $9.2\%$ is the pooled proportion for the two historical control datasets, and a point mass prior at $\mu_t = \mu_c$ is used for $\pi^{(s)}(\mu_t)$. For all computations, we use $N=10,000$, $\frac{n_t}{n_c} = 3$, and $\gamma=0.95$. ## 1.1 Power Prior with Fixed $a_0$ When $a_0$ is fixed, the *historical* matrix is defined where each row represents a historical dataset, and the three columns represent the sum of responses, sample size and $a_0$, respectively, of the historical control data. We use $a_{01}=a_{02}=0.3$. ```{r, eval=TRUE} historical <- matrix(0, ncol=3, nrow=2) historical[1,] <- c(44, 535, 0.3) historical[2,] <- c(33, 304, 0.3) ``` We consider $n_t$ values ranging from $600$ to $1000$ to achieve the desired power of $0.8$. Since point mass sampling priors are used for $\mu_t$ and $\mu_c$, *samp.prior.mu.t* and *samp.prior.mu.c* are both scalars. For Bernoulli outcomes, beta initial priors are used for $\mu_t$ and $\mu_c$, with hyperparameters specified by *prior.mu.t.shape1*, *prior.mu.t.shape2*, *prior.mu.c.shape1* and *prior.mu.c.shape2*. ```{r, eval=TRUE} library(BayesPPD) set.seed(1) n.t_vals <- seq(from=600, to=1000, by=50) powers <- NULL for(i in 1:length(n.t_vals)){ n.t <- n.t_vals[i] results <- power.two.grp.fixed.a0(data.type="Bernoulli", n.t=n.t, n.c=round(n.t/3), historical=historical, samp.prior.mu.t=0.092, samp.prior.mu.c=0.092, prior.mu.t.shape1=0.0001, prior.mu.t.shape2=0.0001, prior.mu.c.shape1=0.0001,prior.mu.c.shape2=0.0001, delta=0.041, N=10000) power <- results$`power/type I error` powers <- c(powers, power) } powers ``` We can see that a sample size of $650$ is required to achieve a power of at least $0.8$. A power curve is plotted below for sample sizes ranging from $600$ to $1000$. ```{r, eval=TRUE} library(ggplot2) df <- data.frame(sample_size=n.t_vals, power=powers) ggplot(data=df, aes(x=sample_size, y=powers)) + geom_smooth(method = lm, formula = y ~ x, se = FALSE) + geom_point() + xlab("Sample Size") + ylab("Power") ``` We then compute the type I error rate for these sample sizes. Since the type I error rate is computed under the assumption that ${\mu_t=\mu_c+\delta}$, we use a point mass at $\mu_c = 9.2\%$ for the sampling prior for $\mu_c$, and a point mass at $\mu_t = 9.2\% + 4.1\%$ for the sampling prior for $\mu_t$. The following type I error rate calculations match the results given in Table 2 of Chen et al. (2011). ```{r, eval=TRUE} TIEs <- NULL for(i in 1:length(n.t_vals)){ n.t <- n.t_vals[i] results <- power.two.grp.fixed.a0(data.type="Bernoulli", n.t=n.t, n.c=round(n.t/3), historical=historical, samp.prior.mu.t=0.092+0.041, samp.prior.mu.c=0.092, prior.mu.t.shape1=0.0001, prior.mu.t.shape2=0.0001, prior.mu.c.shape1=0.0001,prior.mu.c.shape2=0.0001, delta=0.041, N=10000) TIE <- results$`power/type I error` TIEs <- c(TIEs, TIE) } TIEs ``` ## 1.2 Normalized Power Prior ($a_0$ Modeled as Random) When $a_0$ is modeled as random, the normalized power prior is used and the priors for $a_{01}$ and $a_{02}$ are beta(1,1), as in Chen et al. (2011). We run 10,000 iterations of the slice sampler. We use the default settings for the upper limits, lower limits and slice widths for $a_{01}$ and $a_{02}$. The same initial priors and sampling priors are used as in the fixed $a_0$ case. When $a_0$ is modeled as random, the *historical* matrix is defined where each row represents a historical dataset, and the two columns represent the sum of the responses and the sample size, respectively. ```{r, eval=FALSE} historical <- matrix(0, ncol=2, nrow=2) historical[1,] <- c(44, 535) historical[2,] <- c(33, 304) ``` The code below computes the power when $n_t=750$. ```{r, eval=FALSE} n.t <- 750 results <- power.two.grp.random.a0(data.type="Bernoulli", n.t=n.t, n.c=round(n.t/3),historical=historical, samp.prior.mu.t=0.092, samp.prior.mu.c=0.092, prior.mu.t.shape1=0.0001, prior.mu.t.shape2=0.0001, prior.mu.c.shape1=0.0001,prior.mu.c.shape2=0.0001, prior.a0.shape1=1,prior.a0.shape2=1, delta=0.041, gamma=0.95, nMC=10000, nBI=250, N=10000) summary(results) ``` # 2. Two Group Cases with Normally Distributed Outcomes We now demonstrate a model for normally distributed outcomes for treatment and control groups with no covariates. We use simulated data for this example. We assume the $i$-th observation from the treatment group $y_{ti}$ follows N($\mu_t$, $\tau^{-1}$) and the $i$-th observation from the control group $y_{ci}$ follows N($\mu_c$, $\tau^{-1}$), where $\tau$ is the precision parameter for the current data. The null hypothesis is $H_0: \mu_t - \mu_c \ge \delta$. We set $\delta=0$. We assume the treatment group sample size ($n_t$) and the control group sample size ($n_c$) are both $100$. ## 2.1 Power Prior with Fixed $a_0$ First, we assume $a_0$ is fixed. We simulate three historical datasets. For normally distributed data, the *historical* matrix is defined where each row represents a historical dataset, and the four columns represent the sum of the responses, the sample size, the sample variance and $a_0$, respectively. ```{r, eval=TRUE} data.type <- "Normal" n.t <- 100 n.c <- 100 # Simulate three historical datasets K <- 3 historical <- matrix(0, ncol=4, nrow=K) # The columns are the sum of the responses, the sample size, the sample variance and a_0 historical[1,] <- c(50, 50, 1, 0.3) historical[2,] <- c(30, 50, 1, 0.5) historical[3,] <- c(20, 50, 1, 0.7) ``` To calculate power, we can provide the sampling prior of $\mu_t$ and $\mu_c$ such that the mass of $\mu_t - \mu_c < 0$. We generate the sampling prior for the variance parameter from a Gamma(1, 1) distribution. ```{r, eval=TRUE} # Generate sampling priors set.seed(1) samp.prior.mu.t <- rnorm(50000) samp.prior.mu.c <- rnorm(50000) sub_ind <- which(samp.prior.mu.t < samp.prior.mu.c) # Here, mass is put on the alternative region, so power is calculated. samp.prior.mu.t <- samp.prior.mu.t[sub_ind] samp.prior.mu.c <- samp.prior.mu.c[sub_ind] samp.prior.var.t <- rgamma(100, 1, 1) samp.prior.var.c <- rgamma(100, 1, 1) ``` We run $10,000$ iterations of the Gibbs sampler for $N=100$ simulated datasets. Note that $N$ should be larger in practice. ```{r, eval=TRUE} set.seed(1) results <- power.two.grp.fixed.a0(data.type=data.type, n.t=n.t, n.c=n.t, historical=historical, samp.prior.mu.t=samp.prior.mu.t, samp.prior.mu.c=samp.prior.mu.c, samp.prior.var.t=samp.prior.var.t, samp.prior.var.c=samp.prior.var.c, delta=0, nMC=10000, nBI=250, N=100) summary(results) ``` Next, to calculate type I error, we can provide the sampling prior of $\mu_t$ and $\mu_c$ such that the mass of $\mu_t - \mu_c >= 0$. ```{r, eval=TRUE} # Generate sampling priors set.seed(1) samp.prior.mu.t <- rnorm(50000) samp.prior.mu.c <- rnorm(50000) sub_ind <- which(samp.prior.mu.t >= samp.prior.mu.c) # Here, mass is put on the null region, so type I error rate is calculated. samp.prior.mu.t <- samp.prior.mu.t[sub_ind] samp.prior.mu.c <- samp.prior.mu.c[sub_ind] set.seed(1) results <- power.two.grp.fixed.a0(data.type=data.type, n.t=n.t, n.c=n.t, historical=historical, samp.prior.mu.t=samp.prior.mu.t, samp.prior.mu.c=samp.prior.mu.c, samp.prior.var.t=samp.prior.var.t, samp.prior.var.c=samp.prior.var.c, delta=0, nMC=10000, nBI=250, N=100) summary(results) ``` ## 2.2 Normalized Power Prior ($a_0$ Modeled as Random) Next, we model $a_0$ as random with the normalized power prior. We simulate three historical datasets. Here, the *historical* matrix is defined where each row represents a historical dataset, and the three columns represent the sum of the responses, the sample size, and the sample variance, respectively. ```{r, eval=TRUE} data.type <- "Normal" n.t <- 100 n.c <- 100 # Simulate three historical datasets K <- 3 historical <- matrix(0, ncol=3, nrow=K) # The columns are the sum of the responses, the sample size, and the sample variance historical[1,] <- c(50, 50, 1) historical[2,] <- c(30, 50, 1) historical[3,] <- c(20, 50, 1) ``` To calculate power, we can provide the sampling prior of $\mu_t$ and $\mu_c$ such that the mass of $\mu_t - \mu_c < 0$. We generate the sampling prior for the variance parameter from a Gamma(1, 1) distribution. ```{r, eval=TRUE} # Generate sampling priors set.seed(1) samp.prior.mu.t <- rnorm(50000) samp.prior.mu.c <- rnorm(50000) sub_ind <- which(samp.prior.mu.t < samp.prior.mu.c) # Here, mass is put on the alternative region, so power is calculated. samp.prior.mu.t <- samp.prior.mu.t[sub_ind] samp.prior.mu.c <- samp.prior.mu.c[sub_ind] samp.prior.var.t <- rgamma(100, 1, 1) samp.prior.var.c <- rgamma(100, 1, 1) ``` We use the default prior on $a_0$, the uniform prior. The average posterior means of $a_0$ and $\tau$ are also returned below. We run $10,000$ iterations of the Gibbs sampler (for $\mu_c$) and the slice sampler (for $a_0$) for $N=100$ simulated datasets. Note that $N$ should be larger in practice. ```{r, eval=TRUE} set.seed(1) results <- power.two.grp.random.a0(data.type=data.type, n.t=n.t, n.c=n.t, historical=historical, samp.prior.mu.t=samp.prior.mu.t, samp.prior.mu.c=samp.prior.mu.c, samp.prior.var.t=samp.prior.var.t, samp.prior.var.c=samp.prior.var.c, delta=0, nMC=10000, nBI=250, N=100) summary(results) results$`average posterior means of a0` results$`average posterior mean of tau` ``` Next, to calculate type I error, we can provide the sampling prior of $\mu_t$ and $\mu_c$ such that the mass of $\mu_t - \mu_c >= 0$. ```{r, eval=TRUE} # Generate sampling priors set.seed(1) samp.prior.mu.t <- rnorm(50000) samp.prior.mu.c <- rnorm(50000) sub_ind <- which(samp.prior.mu.t >= samp.prior.mu.c) # Here, mass is put on the null region, so type I error rate is calculated. samp.prior.mu.t <- samp.prior.mu.t[sub_ind] samp.prior.mu.c <- samp.prior.mu.c[sub_ind] set.seed(1) results <- power.two.grp.random.a0(data.type=data.type, n.t=n.t, n.c=n.t, historical=historical, samp.prior.mu.t=samp.prior.mu.t, samp.prior.mu.c=samp.prior.mu.c, samp.prior.var.t=samp.prior.var.t, samp.prior.var.c=samp.prior.var.c, delta=0, nMC=10000, nBI=250, N=100) summary(results) ```
/scratch/gouwar.j/cran-all/cranData/BayesPPD/inst/doc/bayesppd-vignette.Rmd
--- title: "Bayesian Sample Size Determination for Two Group Models (Binary and Normal Outcomes)" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{bayesppd-vignette} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} header-includes: - \usepackage{caption} --- For two group models (i.e., treatment and control group with no covariates), we denote the parameter for the treatment group by $\mu_t$ and the parameter for the control group by $\mu_c$. The default null and alternative hypotheses are given by $$H_0: \mu_t - \mu_c \ge \delta$$ and $$H_1: \mu_t - \mu_c < \delta,$$ where $\delta$ is a prespecified constant. We use the following definition of Bayesian power / type I error rate. Let $\Theta_0$ and $\Theta_1$ denote the parameter spaces corresponding to $H_0$ and $H_1$. Let $y^{(n)}$ denote the simulated current data associated with a sample size of $n$ and let $\theta=(\mu_t, \mu_c, \tau_c)$ denote the model parameters. Let $\pi^{(s)}(\theta)$ denote the sampling prior and let $\pi^{(f)}(\theta)$ denote the fitting prior. The sampling prior is used to generate the hypothetical data while the fitting prior is used to fit the model after the data is generated. Let $\pi_0^{(s)}(\theta)$ denote a sampling prior that only puts mass in the null region, i.e., $\theta \subset \Theta_0$. Let $\pi_1^{(s)}(\theta)$ denote a sampling prior that only puts mass in the alternative region, i.e., $\theta \subset \Theta_1$. To determine Bayesian sample size, we estimate the quantity $$\beta_{sj}^{(n)}=E_s[I\{P(\mu_t-\mu_c<\delta|y^{(n)}, \pi^{(f)})\ge \gamma\}]$$ where $j=0$ or $1$, corresponding to the expectation taken with respect to $\pi_0^{(s)}(\theta)$ or $\pi_1^{(s)}(\theta)$. The constant $\gamma > 0$ is a prespecified posterior probability threshold for rejecting the null hypothesis (e.g., $0.975$). The probability is computed with respect to the posterior distribution given the simulated data $y^{(n)}$ and the fitting prior $\pi^{(f)}(\theta)$, and the expectation is taken with respect to the marginal distribution of $y^{(n)}$ defined based on the sampling prior $\pi^{(s)}(\theta)$. Then $\beta_{s0}^{(n)}$ corresponding to $\pi^{(s)}(\theta)=\pi_0^{(s)}(\theta)$ is the Bayesian type I error rate, while $\beta_{s1}^{(n)}$ corresponding to $\pi^{(s)}(\theta)=\pi_1^{(s)}(\theta)$ is the Bayesian power. # 1. Two Group Cases with Binary Outcomes We first demonstrate a model for binary outcomes for treatment and control groups with no covariates. We consider the non-inferiority design application of Chen et al. (2011). The goal was to design a trial to evaluate a new generation of drug-eluting stent (DES) (“test device”) with the first generation of DES (“control device”). The primary endpoint is the 12-month Target Lesion Failure (TLF) (binary). Historical information can be borrowed from two previously conducted trials involving the first generation of DES. The table below summarizes the historical data. ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r, echo=FALSE} library(kableExtra) df <- data.frame(Cat = c("Historical Trial 1", "Historical Trial 2"), Sad = c("8.2% (44/535)", "10.9% (33/304)")) kable(df, col.names = c("", "% TLF (# of failure/sample size)"), escape = F, caption = "Summary of the historical data.") %>% kable_styling(latex_options = "hold_position") ``` Let $\textbf{y}_t^{(n_t)}=(y_{t1},\cdots, y_{tn_t})$ and $\textbf{y}_c^{(n_c)}=(y_{c1},\cdots, y_{cn_c})$ denote the responses from the current trial for the test device and the control device, respectively. The total sample size is $n=n_t+n_c$. We assume the $i$-th observation from the test group $y_{ti}$ follows Bern($\mu_t$), and the $i$-th observation from the control group $y_{ci}$ follows Bern($\mu_c$). We will illustrate Bayesian sample size determination (SSD) incorporating historical data using the power prior with fixed $a_0$ and the normalized power for $a_0$ modeled as random. The hypotheses for non-inferiority testing are $$H_0: \mu_t - \mu_c \ge \delta$$ and $$H_1: \mu_t - \mu_c < \delta,$$ where $\delta$ is a prespecified non-inferiority margin. We set $\delta=4.1\%$. We choose beta$(10^{-4}, 10^{-4})$ for the initial prior for $\mu_c$, which performs similarly to the uniform improper initial prior for $\log\left(\frac{\mu_c}{1-\mu_c}\right)$ used in Chen et al. (2011) in terms of operating characteristics. Power is computed under the assumption that $\mu_t=\mu_c$ and type I error rate is computed under the assumption that ${\mu_t=\mu_c+\delta}$. For sampling priors, a point mass prior at $\mu_c = 9.2\%$ is used for $\pi^{(s)}(\mu_c)$ where $9.2\%$ is the pooled proportion for the two historical control datasets, and a point mass prior at $\mu_t = \mu_c$ is used for $\pi^{(s)}(\mu_t)$. For all computations, we use $N=10,000$, $\frac{n_t}{n_c} = 3$, and $\gamma=0.95$. ## 1.1 Power Prior with Fixed $a_0$ When $a_0$ is fixed, the *historical* matrix is defined where each row represents a historical dataset, and the three columns represent the sum of responses, sample size and $a_0$, respectively, of the historical control data. We use $a_{01}=a_{02}=0.3$. ```{r, eval=TRUE} historical <- matrix(0, ncol=3, nrow=2) historical[1,] <- c(44, 535, 0.3) historical[2,] <- c(33, 304, 0.3) ``` We consider $n_t$ values ranging from $600$ to $1000$ to achieve the desired power of $0.8$. Since point mass sampling priors are used for $\mu_t$ and $\mu_c$, *samp.prior.mu.t* and *samp.prior.mu.c* are both scalars. For Bernoulli outcomes, beta initial priors are used for $\mu_t$ and $\mu_c$, with hyperparameters specified by *prior.mu.t.shape1*, *prior.mu.t.shape2*, *prior.mu.c.shape1* and *prior.mu.c.shape2*. ```{r, eval=TRUE} library(BayesPPD) set.seed(1) n.t_vals <- seq(from=600, to=1000, by=50) powers <- NULL for(i in 1:length(n.t_vals)){ n.t <- n.t_vals[i] results <- power.two.grp.fixed.a0(data.type="Bernoulli", n.t=n.t, n.c=round(n.t/3), historical=historical, samp.prior.mu.t=0.092, samp.prior.mu.c=0.092, prior.mu.t.shape1=0.0001, prior.mu.t.shape2=0.0001, prior.mu.c.shape1=0.0001,prior.mu.c.shape2=0.0001, delta=0.041, N=10000) power <- results$`power/type I error` powers <- c(powers, power) } powers ``` We can see that a sample size of $650$ is required to achieve a power of at least $0.8$. A power curve is plotted below for sample sizes ranging from $600$ to $1000$. ```{r, eval=TRUE} library(ggplot2) df <- data.frame(sample_size=n.t_vals, power=powers) ggplot(data=df, aes(x=sample_size, y=powers)) + geom_smooth(method = lm, formula = y ~ x, se = FALSE) + geom_point() + xlab("Sample Size") + ylab("Power") ``` We then compute the type I error rate for these sample sizes. Since the type I error rate is computed under the assumption that ${\mu_t=\mu_c+\delta}$, we use a point mass at $\mu_c = 9.2\%$ for the sampling prior for $\mu_c$, and a point mass at $\mu_t = 9.2\% + 4.1\%$ for the sampling prior for $\mu_t$. The following type I error rate calculations match the results given in Table 2 of Chen et al. (2011). ```{r, eval=TRUE} TIEs <- NULL for(i in 1:length(n.t_vals)){ n.t <- n.t_vals[i] results <- power.two.grp.fixed.a0(data.type="Bernoulli", n.t=n.t, n.c=round(n.t/3), historical=historical, samp.prior.mu.t=0.092+0.041, samp.prior.mu.c=0.092, prior.mu.t.shape1=0.0001, prior.mu.t.shape2=0.0001, prior.mu.c.shape1=0.0001,prior.mu.c.shape2=0.0001, delta=0.041, N=10000) TIE <- results$`power/type I error` TIEs <- c(TIEs, TIE) } TIEs ``` ## 1.2 Normalized Power Prior ($a_0$ Modeled as Random) When $a_0$ is modeled as random, the normalized power prior is used and the priors for $a_{01}$ and $a_{02}$ are beta(1,1), as in Chen et al. (2011). We run 10,000 iterations of the slice sampler. We use the default settings for the upper limits, lower limits and slice widths for $a_{01}$ and $a_{02}$. The same initial priors and sampling priors are used as in the fixed $a_0$ case. When $a_0$ is modeled as random, the *historical* matrix is defined where each row represents a historical dataset, and the two columns represent the sum of the responses and the sample size, respectively. ```{r, eval=FALSE} historical <- matrix(0, ncol=2, nrow=2) historical[1,] <- c(44, 535) historical[2,] <- c(33, 304) ``` The code below computes the power when $n_t=750$. ```{r, eval=FALSE} n.t <- 750 results <- power.two.grp.random.a0(data.type="Bernoulli", n.t=n.t, n.c=round(n.t/3),historical=historical, samp.prior.mu.t=0.092, samp.prior.mu.c=0.092, prior.mu.t.shape1=0.0001, prior.mu.t.shape2=0.0001, prior.mu.c.shape1=0.0001,prior.mu.c.shape2=0.0001, prior.a0.shape1=1,prior.a0.shape2=1, delta=0.041, gamma=0.95, nMC=10000, nBI=250, N=10000) summary(results) ``` # 2. Two Group Cases with Normally Distributed Outcomes We now demonstrate a model for normally distributed outcomes for treatment and control groups with no covariates. We use simulated data for this example. We assume the $i$-th observation from the treatment group $y_{ti}$ follows N($\mu_t$, $\tau^{-1}$) and the $i$-th observation from the control group $y_{ci}$ follows N($\mu_c$, $\tau^{-1}$), where $\tau$ is the precision parameter for the current data. The null hypothesis is $H_0: \mu_t - \mu_c \ge \delta$. We set $\delta=0$. We assume the treatment group sample size ($n_t$) and the control group sample size ($n_c$) are both $100$. ## 2.1 Power Prior with Fixed $a_0$ First, we assume $a_0$ is fixed. We simulate three historical datasets. For normally distributed data, the *historical* matrix is defined where each row represents a historical dataset, and the four columns represent the sum of the responses, the sample size, the sample variance and $a_0$, respectively. ```{r, eval=TRUE} data.type <- "Normal" n.t <- 100 n.c <- 100 # Simulate three historical datasets K <- 3 historical <- matrix(0, ncol=4, nrow=K) # The columns are the sum of the responses, the sample size, the sample variance and a_0 historical[1,] <- c(50, 50, 1, 0.3) historical[2,] <- c(30, 50, 1, 0.5) historical[3,] <- c(20, 50, 1, 0.7) ``` To calculate power, we can provide the sampling prior of $\mu_t$ and $\mu_c$ such that the mass of $\mu_t - \mu_c < 0$. We generate the sampling prior for the variance parameter from a Gamma(1, 1) distribution. ```{r, eval=TRUE} # Generate sampling priors set.seed(1) samp.prior.mu.t <- rnorm(50000) samp.prior.mu.c <- rnorm(50000) sub_ind <- which(samp.prior.mu.t < samp.prior.mu.c) # Here, mass is put on the alternative region, so power is calculated. samp.prior.mu.t <- samp.prior.mu.t[sub_ind] samp.prior.mu.c <- samp.prior.mu.c[sub_ind] samp.prior.var.t <- rgamma(100, 1, 1) samp.prior.var.c <- rgamma(100, 1, 1) ``` We run $10,000$ iterations of the Gibbs sampler for $N=100$ simulated datasets. Note that $N$ should be larger in practice. ```{r, eval=TRUE} set.seed(1) results <- power.two.grp.fixed.a0(data.type=data.type, n.t=n.t, n.c=n.t, historical=historical, samp.prior.mu.t=samp.prior.mu.t, samp.prior.mu.c=samp.prior.mu.c, samp.prior.var.t=samp.prior.var.t, samp.prior.var.c=samp.prior.var.c, delta=0, nMC=10000, nBI=250, N=100) summary(results) ``` Next, to calculate type I error, we can provide the sampling prior of $\mu_t$ and $\mu_c$ such that the mass of $\mu_t - \mu_c >= 0$. ```{r, eval=TRUE} # Generate sampling priors set.seed(1) samp.prior.mu.t <- rnorm(50000) samp.prior.mu.c <- rnorm(50000) sub_ind <- which(samp.prior.mu.t >= samp.prior.mu.c) # Here, mass is put on the null region, so type I error rate is calculated. samp.prior.mu.t <- samp.prior.mu.t[sub_ind] samp.prior.mu.c <- samp.prior.mu.c[sub_ind] set.seed(1) results <- power.two.grp.fixed.a0(data.type=data.type, n.t=n.t, n.c=n.t, historical=historical, samp.prior.mu.t=samp.prior.mu.t, samp.prior.mu.c=samp.prior.mu.c, samp.prior.var.t=samp.prior.var.t, samp.prior.var.c=samp.prior.var.c, delta=0, nMC=10000, nBI=250, N=100) summary(results) ``` ## 2.2 Normalized Power Prior ($a_0$ Modeled as Random) Next, we model $a_0$ as random with the normalized power prior. We simulate three historical datasets. Here, the *historical* matrix is defined where each row represents a historical dataset, and the three columns represent the sum of the responses, the sample size, and the sample variance, respectively. ```{r, eval=TRUE} data.type <- "Normal" n.t <- 100 n.c <- 100 # Simulate three historical datasets K <- 3 historical <- matrix(0, ncol=3, nrow=K) # The columns are the sum of the responses, the sample size, and the sample variance historical[1,] <- c(50, 50, 1) historical[2,] <- c(30, 50, 1) historical[3,] <- c(20, 50, 1) ``` To calculate power, we can provide the sampling prior of $\mu_t$ and $\mu_c$ such that the mass of $\mu_t - \mu_c < 0$. We generate the sampling prior for the variance parameter from a Gamma(1, 1) distribution. ```{r, eval=TRUE} # Generate sampling priors set.seed(1) samp.prior.mu.t <- rnorm(50000) samp.prior.mu.c <- rnorm(50000) sub_ind <- which(samp.prior.mu.t < samp.prior.mu.c) # Here, mass is put on the alternative region, so power is calculated. samp.prior.mu.t <- samp.prior.mu.t[sub_ind] samp.prior.mu.c <- samp.prior.mu.c[sub_ind] samp.prior.var.t <- rgamma(100, 1, 1) samp.prior.var.c <- rgamma(100, 1, 1) ``` We use the default prior on $a_0$, the uniform prior. The average posterior means of $a_0$ and $\tau$ are also returned below. We run $10,000$ iterations of the Gibbs sampler (for $\mu_c$) and the slice sampler (for $a_0$) for $N=100$ simulated datasets. Note that $N$ should be larger in practice. ```{r, eval=TRUE} set.seed(1) results <- power.two.grp.random.a0(data.type=data.type, n.t=n.t, n.c=n.t, historical=historical, samp.prior.mu.t=samp.prior.mu.t, samp.prior.mu.c=samp.prior.mu.c, samp.prior.var.t=samp.prior.var.t, samp.prior.var.c=samp.prior.var.c, delta=0, nMC=10000, nBI=250, N=100) summary(results) results$`average posterior means of a0` results$`average posterior mean of tau` ``` Next, to calculate type I error, we can provide the sampling prior of $\mu_t$ and $\mu_c$ such that the mass of $\mu_t - \mu_c >= 0$. ```{r, eval=TRUE} # Generate sampling priors set.seed(1) samp.prior.mu.t <- rnorm(50000) samp.prior.mu.c <- rnorm(50000) sub_ind <- which(samp.prior.mu.t >= samp.prior.mu.c) # Here, mass is put on the null region, so type I error rate is calculated. samp.prior.mu.t <- samp.prior.mu.t[sub_ind] samp.prior.mu.c <- samp.prior.mu.c[sub_ind] set.seed(1) results <- power.two.grp.random.a0(data.type=data.type, n.t=n.t, n.c=n.t, historical=historical, samp.prior.mu.t=samp.prior.mu.t, samp.prior.mu.c=samp.prior.mu.c, samp.prior.var.t=samp.prior.var.t, samp.prior.var.c=samp.prior.var.c, delta=0, nMC=10000, nBI=250, N=100) summary(results) ```
/scratch/gouwar.j/cran-all/cranData/BayesPPD/vignettes/bayesppd-vignette.Rmd
## usethis namespace: start #' @importFrom Rcpp evalCpp #' @useDynLib BayesPPDSurv,.registration = TRUE ## usethis namespace: end NULL #' Bayesian sample size determination using the power and normalized power prior for survival data #' #' The \pkg{BayesPPDSurv} (Bayesian Power Prior Design for Survival Data) package provides two categories of functions: #' functions for Bayesian power/type I error calculation and functions for model fitting. #' #' @references Ibrahim, J. G., Chen, M.-H. and Sinha, D. (2001). Bayesian Survival Analysis. New York: Springer Science & Business Media. #' #' Psioda, M. A. and Ibrahim, J. G. (2019). Bayesian clinical trial design using historical data that inform the treatment effect. Biostatistics 20, 400–415. #' #' Shen, Y., Psioda, M. A., and Joseph, J. G. (2023). BayesPPD: an R package for Bayesian sample size determination using the power and normalized power prior for generalized linear models. The R Journal, 14(4). #' @docType package #' @name BayesPPDSurv-package NULL #> NULL
/scratch/gouwar.j/cran-all/cranData/BayesPPDSurv/R/BayesPPDSurv-package.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 phm_fixed_a0 <- function(curr_tables0, hist_tables0, a0_vec0, n_intervals0, shared_blh0, P0, prior_beta0, prior_beta_mu0, prior_beta_sigma0, prior_lambda0, prior_lambda_hp10, prior_lambda_hp20, prior_lambdah_hp10, prior_lambdah_hp20, lower_limits0, upper_limits0, slice_widths0, nMC, nBI, dCurrent0) { .Call(`_BayesPPDSurv_phm_fixed_a0`, curr_tables0, hist_tables0, a0_vec0, n_intervals0, shared_blh0, P0, prior_beta0, prior_beta_mu0, prior_beta_sigma0, prior_lambda0, prior_lambda_hp10, prior_lambda_hp20, prior_lambdah_hp10, prior_lambdah_hp20, lower_limits0, upper_limits0, slice_widths0, nMC, nBI, dCurrent0) } phm_random_a0 <- function(curr_tables0, hist_tables0, n_intervals0, P0, prior_beta_mvn0, prior_lambda_hp10, prior_lambda_hp20, lower_limits0, upper_limits0, slice_widths0, nMC, nBI) { .Call(`_BayesPPDSurv_phm_random_a0`, curr_tables0, hist_tables0, n_intervals0, P0, prior_beta_mvn0, prior_lambda_hp10, prior_lambda_hp20, lower_limits0, upper_limits0, slice_widths0, nMC, nBI) } npp_beta <- function(L, hist_tables0, n_intervals0, P0, prior_a0_shape1, prior_a0_shape2, prior_beta_mu0, prior_beta_sigma0, prior_lambdah_hp10, prior_lambdah_hp20, lower_limits0, upper_limits0, slice_widths0, nBI) { .Call(`_BayesPPDSurv_npp_beta`, L, hist_tables0, n_intervals0, P0, prior_a0_shape1, prior_a0_shape2, prior_beta_mu0, prior_beta_sigma0, prior_lambdah_hp10, prior_lambdah_hp20, lower_limits0, upper_limits0, slice_widths0, nBI) }
/scratch/gouwar.j/cran-all/cranData/BayesPPDSurv/R/RcppExports.R
utils::globalVariables(c("r", "ev", "interval", "rtime", "evtime")) # function for creating stratum-specific time intervals so that all intervals have the same number of events #' @importFrom stats quantile create_intervals <- function(time, event, S, historical, n.intervals){ df <- data.frame(time=time, event=event, S=S) df <- df[order(df[,"time"],decreasing=FALSE),] unique_strat <- sort(unique(S)) # loop through the strata interval_list <- list() for(k in 1:length(unique_strat)){ df_s <- df %>% filter(S==unique_strat[k]) ev_time <- df_s$time[df_s$event==1] for(i in 1:length(historical)){ data <- historical[[i]] df_data <- data.frame(time=data[["time"]], event=data[["event"]], S=data[["S"]]) df_h <- df_data %>% filter(S==unique_strat[k]) ev_time <- c(ev_time, df_h$time[df_h$event==1]) } intervals <- quantile(ev_time, probs=seq(from=0,to=1,length.out=n.intervals[k]+1)) intervals[1] <- 0 intervals[n.intervals[k]+1] <- Inf interval_list[[k]] <- intervals } return(interval_list) } # function for creating stratum-specific time intervals using only historical data # used in the function for design, and when there is no current data #' @importFrom stats quantile create_intervals_historical <- function(historical, n.intervals){ df <- historical[[1]] S <- df[["S"]] unique_strat <- sort(unique(S)) # loop through the strata interval_list <- list() for(k in 1:length(unique_strat)){ ev_time <- NULL if(length(historical) != 0){ for(i in 1:length(historical)){ data <- historical[[i]] df_data <- data.frame(time=data[["time"]], event=data[["event"]], S=data[["S"]]) df_h <- df_data %>% filter(S==unique_strat[k]) ev_time <- c(ev_time, df_h$time[df_h$event==1]) } } intervals <- quantile(ev_time, probs=seq(from=0,to=1,length.out=n.intervals[k]+1)) intervals[1] <- 0 intervals[n.intervals[k]+1] <- Inf interval_list[[k]] <- intervals } return(interval_list) } create_tables <- function(time, event, X, S, change.points){ X <- as.matrix(X) P <- ncol(X) dffull <- data.frame(time=time, event=event, X=X, S=S) dffull <- dffull[order(dffull[,"time"],decreasing=FALSE),] list_tbs <- list() unique_strat <- sort(unique(S)) for(k in 1:length(unique_strat)){ df <- dffull %>% filter(S==unique_strat[k]) get_group <- df %>% group_by(across(starts_with("X"))) %>% summarise(count=n()) cov_group <- get_group[,-ncol(get_group)] rtime_table1 <- evtime_table1 <- NULL intervals <- change.points[[k]] for(j in 2:length(intervals)){ # table for risk times int <- intervals[j] - intervals[j-1] df$r <- ifelse(df$time > intervals[j], int, df$time-intervals[j-1]) df$r[df$r < 0] <- 0 tab <- df %>% group_by(across(starts_with("X"))) %>% summarise(risk_time=sum(r)) rtime_table1 <- cbind(rtime_table1, tab$risk_time) #df$ev <- ifelse(df$time>=intervals[j-1] & df$time<intervals[j] & df$event==1, 1, 0) df$ev <- ifelse(df$time>intervals[j-1] & df$time<=intervals[j] & df$event==1, 1, 0) tab <- df %>% group_by(across(starts_with("X"))) %>% summarise(events_sum=sum(ev)) evtime_table1 <- cbind(evtime_table1, tab$events_sum) } colnames(rtime_table1) <- 1:(ncol(rtime_table1)) colnames(evtime_table1) <- 1:(ncol(evtime_table1)) # table for events rtime_table <- cbind(cov_group, as.data.frame(rtime_table1)) rtime_long <- gather(rtime_table, interval, rtime, (P+1):(P+ncol(rtime_table1))) evtime_table <- cbind(cov_group, as.data.frame(evtime_table1)) evtime_long <- gather(evtime_table, interval, evtime, (P+1):(P+ncol(evtime_table1))) rtime_long$evt <- evtime_long$evtime table <- rtime_long %>% filter(rtime !=0) table$interval <- as.integer(table$interval) list_tbs[[k]] <- as.matrix(table) } return(list_tbs) } # function for creating current and historical tables collapse_data <- function(time, event, X, S, historical, n.intervals, change.points, dCurrent){ # create a list of intervals for the strata #change.points <- create_intervals(time, event, S, historical, n.intervals) # create tables for current data if(dCurrent==TRUE){ curr_tables <- suppressMessages(create_tables(time, event, X, S, change.points)) } # create tables for historical data hist_tables <- list() if(length(historical)!=0){ for(i in 1:length(historical)){ data <- historical[[i]] tabs <- suppressMessages(create_tables(data[["time"]],data[["event"]],data[["X"]],S=data[["S"]],change.points)) hist_tables <- c(hist_tables, list(tabs)) } } if(dCurrent==FALSE){ result <- list(hist_tables=hist_tables) }else if(length(historical)==0){ result <- list(curr_tables=curr_tables) }else{ result <- list(curr_tables=curr_tables, hist_tables=hist_tables) } return(result) } #' Power/type I error calculation for the proportional hazards model with piecewise constant hazard and fixed a0 #' #' @description Power/type I error calculation using power priors for the proportional hazards model with piecewise constant hazard and fixed \eqn{a_0} #' #' @param historical List of historical dataset(s). East historical dataset is stored in a list which contains four \emph{named} elements: \code{time}, \code{event}, \code{X} and \code{S}. #' \itemize{ #' \item \code{time} is a vector of follow up times. #' \item \code{event} is a vector of status indicators. Normally 0=alive and 1=dead. #' \item \code{X} is a matrix of covariates. The first column must be the treatment indicator. #' \item \code{S} is a vector of integers, where each integer represents the stratum that the subject belongs to. For example, if there are three strata, S can take values 1, 2 or 3. #' } #' @param a0 Vector containing numbers between 0 and 1 indicating the discounting parameter value for each historical dataset. The length of the vector should be equal to the length of \code{historical}. #' @param n.subjects Number of subjects enrolled. #' @param n.events Number of events at which the trial will stop. #' @param n.intervals Vector of integers, indicating the number of intervals for the baseline hazards for each stratum. The length of the vector should be equal to the total number of strata. #' @param change.points List of vectors. Each vector in the list contains the change points for the baseline hazards for each stratum. The length of the list should be equal to the total number of strata. #' For a given stratum, if there is only one interval, then \code{change.points} should be \code{NULL} for that stratum. #' These change points are used for data generation. The change points used during model fitting are assigned by default so that the same number of events are observed in all the intervals in the pooled historical and generated current data. #' @param shared.blh Logical value indicating whether baseline hazard parameters are shared between the current and historical data. If TRUE, baseline hazard parameters are shared. The default value is FALSE. #' @param samp.prior.beta Matrix of possible values of \eqn{\beta} to sample (with replacement) from. Each row is a possible \eqn{\beta} vector (a realization from the sampling prior for \eqn{\beta}). #' @param samp.prior.lambda List of matrices, where each matrix represents the sampling prior for the baseline hazards for each stratum. The number of columns of each matrix should be equal to the number of intervals for that stratum. #' @param x.samples (Only applies when there is no historical dataset) matrix of possible values of covariates from which covariate vectors are sampled with replacement. #' @param s.samples (Only applies when there is no historical dataset) vector of possible values of the stratum index from which the stratum indices are sampled with replacement. #' @param dist.enroll Distribution for enrollment times. The choices are "Uniform" or "Exponential". #' @param param.enroll Parameter for the distribution of enrollment times. If \code{dist.enroll} is "Uniform", the enrollment times follow Unif(0, \code{param.enroll}). If \code{dist.enroll} is "Exponential", #' the enrollment times follow Exponential(rate=\code{param.enroll}). #' @param rand.prob Randomization probability for the treated group. The default value is 0.5. #' @param prob.drop Probability of subjects dropping out of the study (non-administrative censoring). The default value is zero. #' @param param.drop Parameter for dropout time simulations. The dropout times follow Unif(0, \code{param.drop}). The default value is zero. #' @param dist.csr Distribution for (administrative) censorship times. The choices are "Uniform", "Constant" and "Exponential". The default choice is "Constant". #' @param param.csr Parameter for the (administrative) censorship times. If \code{dist.csr} is "Uniform", the censorship times follow Unif(0, \code{param.csr}). #' If \code{dist.csr} is "Constant", the censorship times of all subjects are equal to \code{param.csr}. #' If \code{dist.csr} is "Exponential", the censorship times follow Exponential(rate=\code{param.csr}). #' The default value is 10^4. #' @param min.follow.up Minimum amount of time for which subjects are followed up. The default value is zero. #' @param max.follow.up Maximum amount of time for which subjects are followed up. The default value is 10^4. #' @param prior.beta Prior used for \eqn{\beta}. The choices are "Uniform" and "Normal". If \code{prior.beta} is "Uniform", the uniform improper prior is used. #' If \code{prior.beta} is "Normal", independent normal priors are used for each element of \eqn{\beta}. The default choice is "Normal". #' @param prior.beta.mean (Only applies if \code{prior.beta} is "Normal") vector of means of the normal prior on \eqn{\beta}. The default value is zero for all the elements of \eqn{\beta}. #' @param prior.beta.sd (Only applies if \code{prior.beta} is "Normal") vector of standard deviations of the normal prior on \eqn{\beta}. The default value is 10^3 for all the elements of \eqn{\beta}. #' @param prior.lambda Prior used for \eqn{\lambda}. The choices are "Gamma", "Log-normal" and "Improper". The default choice is "Gamma". #' #' If \code{prior.lambda} is "Gamma", then the prior on the first element of \eqn{\lambda} is #' #' Gamma(shape=\code{prior.lambda.hp1[1]}, rate=\code{prior.lambda.hp2[1]}). #' #' If \code{prior.lambda} is "Log-normal", then the prior on the first element of \eqn{\lambda} is Log-normal(mean=\code{prior.lambda.hp1[1]}, sd=\code{prior.lambda.hp2[1]}). #' #' If \code{prior.lambda} is "Improper", then the prior on each element of \eqn{\lambda} is the improper prior \eqn{\lambda^{-1}}. #' @param prior.lambda.hp1 (Only applies if \code{prior.lambda} is "Gamma" or "Log-normal") Vector of first hyperparameters of the prior on \eqn{\lambda}. #' The length of the vector should be equal to the dimension of \eqn{\lambda}, i.e., the total number of intervals for all strata. The default value is 10^(-5) for all the elements of \eqn{\lambda}. #' @param prior.lambda.hp2 (Only applies if \code{prior.lambda} is "Gamma" or "Log-normal") Vector of second hyperparameters of the prior on \eqn{\lambda}. #' The length of the vector should be equal to the dimension of \eqn{\lambda}, i.e., the total number of intervals for all strata. The default value is 10^(-5) for all the elements of \eqn{\lambda}. #' @param prior.lambda0.hp1 (Only applies if \code{shared.blh} is FALSE and if \code{prior.lambda} is "Gamma" or "Log-normal") Vector of first hyperparameters of the prior on \eqn{\lambda_0}. #' We assume the same distribution choice for the prior for \eqn{\lambda_0} and \eqn{\lambda}. #' The length of the vector should be equal to the dimension of \eqn{\lambda_0}, i.e., the total number of intervals for all strata. The default value is 10^(-5) for all the elements of \eqn{\lambda_0}. #' @param prior.lambda0.hp2 (Only applies if \code{shared.blh} is FALSE and if \code{prior.lambda} is "Gamma" or "Log-normal") Vector of second hyperparameters of the prior on \eqn{\lambda_0}. #' We assume the same distribution choice for the prior for \eqn{\lambda_0} and \eqn{\lambda}. #' The length of the vector should be equal to the dimension of \eqn{\lambda_0}, i.e., the total number of intervals for all strata. The default value is 10^(-5) for all the elements of \eqn{\lambda_0}. #' @param lower.limits Vector of lower limits for parameters (\eqn{\beta}, \eqn{\lambda}, and \eqn{\lambda_0}, in this order) to be used by the slice sampler. The length of the vector should be equal to the total number of parameters. The default is -100 for \eqn{\beta} and 0 for \eqn{\lambda} and \eqn{\lambda_0} (may not be appropriate for all situations). #' @param upper.limits Vector of upper limits for parameters (\eqn{\beta}, \eqn{\lambda}, and \eqn{\lambda_0}, in this order) to be used by the slice sampler. The length of the vector should be equal to the total number of parameters. The default is 100 for all parameters (may not be appropriate for all situations). #' @param slice.widths Vector of initial slice widths for parameters (\eqn{\beta}, \eqn{\lambda}, and \eqn{\lambda_0}, in this order) to be used by the slice sampler. The length of the vector should be equal to the total number of parameters. The default is 0.1 for all parameters (may not be appropriate for all situations). #' @param nMC Number of iterations (excluding burn-in samples) for the slice sampler. The default is 10,000. #' @param nBI Number of burn-in samples for the slice sampler. The default is 250. #' @param delta Prespecified constant that defines the boundary of the null hypothesis. The default is zero. #' @param nullspace.ineq Character string specifying the inequality of the null hypothesis. The options are ">" and "<". If ">" is specified, the null hypothesis is \eqn{H_0}: \eqn{\beta_1} \eqn{\ge} \eqn{\delta}. If "<" is specified, the null hypothesis is \eqn{H_0}: \eqn{\beta_1} \eqn{\le} \eqn{\delta}. The default choice is ">". #' @param gamma Posterior probability threshold for rejecting the null. The null hypothesis is rejected if posterior probability is greater \code{gamma}. The default is 0.95. #' @param N Number of simulated datasets to generate. The default is 10,000. #' #' @details The proportional hazards model with piecewise constant hazard is implemented. #' We assume \eqn{\beta} is the regression coefficients. We assume the first column of the covariate matrix is the treatment indicator, #' and the corresponding parameter is \eqn{\beta_1}. The baseline hazards of the current data are denoted by \eqn{\lambda}. #' The baseline hazards of the historical data are denoted by \eqn{\lambda_0}. #' If the baseline hazards are shared between the historical and current data, then \eqn{\lambda_0}=\eqn{\lambda}. #' #' To perform sample size determination, we test the hypotheses #' \deqn{H_0: \beta_1 \ge \delta} and \deqn{H_1: \beta_1 < \delta.} #' #' If historical datasets are provided, the algorithm samples with replacement from the historical covariates to construct the simulated datasets. #' Otherwise, the algorithm samples with replacement from \code{x.samples}. One of the arguments \code{historical} and \code{x.samples} must be provided. #' #' The sampling prior for the treatment parameter can be generated from a normal distribution (see examples). #' For example, suppose one wants to compute the power for the hypotheses \eqn{H_0: \beta_1 \ge 0} and \eqn{H_1: \beta_1 < 0.} #' To approximate the sampling prior for \eqn{\beta_1}, one can simply sample from a normal distribution with negative mean, #' so that the mass of the prior falls in the alternative space. Conversely, to compute the type I error rate, one can #' sample from a normal distribution with positive mean, so that the mass of the prior falls in the null space. #' #' The sampling prior for the other parameters (\eqn{\beta_2}, ..., \eqn{\beta_p}, \eqn{\lambda} and \eqn{\lambda_0}) can be generated from the posterior based on the historical data. #' This can be achieved by the function \link{phm.fixed.a0} #' with \code{current.data} set to \code{FALSE} (see the vignette). #' #' Posterior samples are obtained through slice sampling. #' The default lower limits are -100 for \eqn{\beta} and 0 for \eqn{\lambda} and \eqn{\lambda_0}. The default upper limits #' for the parameters are 100. The default slice widths for the parameters are 0.1. #' The defaults may not be appropriate for all situations, and the user can specify the appropriate limits #' and slice width for each parameter. #' #' If a sampling prior with support in the null space is used, the value returned is a Bayesian type I error rate. #' If a sampling prior with support in the alternative space is used, the value returned is a Bayesian power. #' #' #' @return Power or type I error is returned, depending on the sampling prior used. #' The posterior probabilities of the alternative hypothesis are returned. #' The average posterior means of \eqn{\beta}, \eqn{\lambda} and \eqn{\lambda_0} #' (if the baseline hazard parameters are not shared) are also returned. #' @references Ibrahim, J. G., Chen, M.-H. and Sinha, D. (2001). Bayesian Survival Analysis. New York: Springer Science & Business Media. #' #' Psioda, M. A. and Ibrahim, J. G. (2019). Bayesian clinical trial design using historical data that inform the treatment effect. Biostatistics 20, 400–415. #' #' Shen, Y., Psioda, M. A., and Joseph, J. G. (2023). BayesPPD: an R package for Bayesian sample size determination using the power and normalized power prior for generalized linear models. The R Journal, 14(4). #' @seealso \code{\link{phm.fixed.a0}} #' @examples #' #' #' # Simulate two historical datasets #' set.seed(1) #' n <- 100 #' P <- 4 #' time1 <- round(rexp(n, rate=0.5),1) #' event1 <- rep(1,n) #' X1 <- matrix(rbinom(n*P,prob=0.5,size=1), ncol=P) #' S1 <- c(rep(1,n/2),rep(2,n/2)) #' time2 <- round(rexp(n, rate=0.7),1) #' event2 <- rep(1,n) #' X2 <- matrix(rbinom(n*P,prob=0.5,size=1), ncol=P) #' S2 <- c(rep(1,n/2),rep(2,n/2)) #' historical <- list(list(time=time1, event=event1, X=X1, S=S1), #' list(time=time2, event=event2, X=X2, S=S2)) #' #' # a0 is 0.3 for the first historical dataset and 0.6 for the second #' a0 <- c(0.3, 0.6) #' #' n.subjects <- 100 #' n.events <- 30 #' #' # We choose three intervals for the first stratum and two intervals for the second stratum #' n.intervals <- c(3,2) #' change.points <- list(c(1,2),1) #' #' #' # Generate sampling priors #' #' # The null hypothesis here is H0: beta_1 >= 0. To calculate power, #' # we can provide samples of beta_1 such that the mass of beta_1 < 0. #' # To calculate type I error, we can provide samples of beta_1 such that #' # the mass of beta_1 >= 0. #' samp.prior.beta1 <- rnorm(100, mean=-1, sd=1) #' # Here, mass is put on the alternative region, so power is calculated. #' samp.prior.beta <- cbind(samp.prior.beta1, matrix(rnorm(100*(P-1)), 100, P-1)) #' #' # Point mass sampling priors are used for lambda #' lambda_strat1 <- matrix(c(0.5, 0.5, 0.5), nrow=1) #' lambda_strat2 <- matrix(c(0.7, 0.7), nrow=1) #' samp.prior.lambda <- list(lambda_strat1, lambda_strat2) #' #' #' nMC <- 100 # nMC should be larger in practice #' nBI <- 50 #' N <- 5 # N should be larger in practice #' #' result <- power.phm.fixed.a0(historical=historical, a0=a0, n.subjects=n.subjects, #' n.events=n.events, n.intervals=n.intervals, #' change.points=change.points, #' samp.prior.beta=samp.prior.beta, #' samp.prior.lambda=samp.prior.lambda, #' dist.enroll="Uniform", param.enroll=0.5, #' nMC=nMC, nBI=nBI, delta=0, nullspace.ineq=">", N=N) #' result$`power/type I error` #' result$`average posterior mean of beta` #' result$`average posterior mean of lambda` #' result$`average posterior mean of lambda0` #' #' #' #' @export #' @import dplyr tidyr #' @importFrom stats runif rexp rbinom power.phm.fixed.a0 <- function(historical, a0, n.subjects, n.events, n.intervals, change.points, shared.blh=FALSE, samp.prior.beta, samp.prior.lambda, # list of matrices x.samples=matrix(), s.samples=NULL, # these two are matrices dist.enroll, param.enroll, rand.prob=0.5, prob.drop=0, param.drop=0, dist.csr="Constant", param.csr=10000, min.follow.up=0, max.follow.up=10000, prior.beta="Normal", prior.beta.mean=rep(0,50), prior.beta.sd=rep(1000,50), prior.lambda="Gamma", prior.lambda.hp1=rep(10^(-5),50), prior.lambda.hp2=rep(10^(-5),50), prior.lambda0.hp1=rep(10^(-5),50), prior.lambda0.hp2=rep(10^(-5),50), lower.limits=NULL, upper.limits=rep(100, 50), slice.widths=rep(0.1, 50), nMC=10000, nBI=250, delta=0, nullspace.ineq=">", gamma=0.95, N=10000){ # add zero and infinity to change.points change.points.new <- list() for(i in 1:length(n.intervals)){ if(n.intervals[i]==1){ l1 <- c(0, Inf) }else{ l <- change.points[[i]] l1 <- unique(c(0, l, Inf)) } change.points.new[[i]] <- l1 } # build matrix of covariates to sample from if(length(historical)==0){ P <- ncol(x.samples) }else{ P <- ncol(historical[[1]]$X) } if(length(historical)==0){ x_sim <- x.samples; # dimension P-1 }else{ x_sim <- matrix(NA, nrow=0, ncol=P-1) for(k in 1:length(historical)){ dat <- historical[[k]] x_h <- as.matrix(dat$X[,-1]) x_sim <- rbind(x_sim, x_h) } } # build matrix of strata to sample from if(length(historical)==0){ s_sim <- s.samples; # dimension 1 }else{ s_sim <- NULL for(k in 1:length(historical)){ dat <- historical[[k]] s_h <- dat$S s_sim <- c(s_sim, s_h) } } # repeat N times posterior_probs <- NULL # save the posterior means of beta and lambda beta_mean <- matrix(NA, nrow=N, ncol=P) lambda_sum <- list() for(j in 1:length(n.intervals)){ lambda_sum[[j]] <- rep(0,n.intervals[j]) } # save posterior means of lambda0 if(shared.blh==FALSE){ lambda0_sum <- list() for(j in 1:length(n.intervals)){ lambda0_sum[[j]] <- rep(0,n.intervals[j]) } } for(l in 1:N){ # print(l) # sample stratum variable ind <- sample(1:length(s_sim), n.subjects, replace = TRUE) s <- s_sim[ind] # sample beta ind <- sample(1:nrow(samp.prior.beta), 1, replace = TRUE) beta <- samp.prior.beta[ind,] # sample lambda list_lambda <- list() for(j in 1:length(unique(s))){ lams <- samp.prior.lambda[[j]] ind <- sample(1:nrow(lams), 1, replace = TRUE) list_lambda <- c(list_lambda, list(lams[ind,])) } # sample enrollment times if(dist.enroll=="Uniform"){ r <- runif(n.subjects,0,param.enroll) }else{ r <- rexp(n.subjects,rate=param.enroll) } # sample covariates and treatment variable ind <- sample(1:nrow(x_sim), n.subjects, replace = TRUE) x <- x_sim[ind,] z <- rbinom(n.subjects,size=1,rand.prob) x <- cbind(trt=z, x) phi <- exp(x%*%beta) # simulate event time ti eventtimes <- NULL for(i in 1:n.subjects){ strat <- s[i] lambda_s <- list_lambda[[strat]] st <- change.points.new[[strat]] st_1 <- st[-length(st)] k <- 1 while(TRUE){ theta_k <- phi[i]*lambda_s[k] t_tilde <- rexp(1,rate=theta_k) + st_1[k] if(k > length(st_1) | t_tilde <= st[k+1]){ break } k <- k + 1 } eventtimes <- c(eventtimes, t_tilde) } # sample (administrative) censorship times if(dist.csr=="Uniform"){ ctime <- runif(n.subjects,0,param.csr) }else if(dist.csr=="Constant"){ ctime <- rep(param.csr, n.subjects) }else{ ctime <- rexp(n.subjects,rate=param.csr) } y <- ifelse(eventtimes <= ctime, eventtimes, ctime) nu <- ifelse(eventtimes <= ctime, 1, 0) # simulate dropouts # simulate a dropout time; if dropout time < y, the person is a dropout; # otherwise the person has an event or is administratively censored num_drops <- round(n.subjects * prob.drop, 0) if(num_drops > 0){ drop_ind <- sample(1:n.subjects, size=num_drops, replace=FALSE) droptime <- runif(num_drops, 0, param.drop) obstime <- ifelse(y[drop_ind] <= droptime, y[drop_ind], droptime) bool_drop <- ifelse(y[drop_ind] <= droptime, 0, 1) nu[drop_ind][bool_drop==1] <- 0 y[drop_ind] <- obstime } e <- r+y complete_data <- data.frame(X=x, S=s, enrtimes=r, eventtimes=eventtimes, ctime=ctime, y=y, nu=nu, t_elps=e) # create original data df_events <- complete_data[complete_data$nu==1,] df_events1 <- df_events[order(df_events$t_elps),] if(nrow(df_events1) < n.events){ stoptime <- max.follow.up }else{ stoptime <- df_events1[n.events, "t_elps"] if(stoptime < min.follow.up){ stoptime <- min.follow.up } } finaldf <- complete_data[complete_data$enrtimes < stoptime,] finaldf$new_y <- ifelse(finaldf$t_elps > stoptime, stoptime-finaldf$enrtimes, finaldf$y) finaldf$new_nu <- ifelse(finaldf$t_elps > stoptime, 0, finaldf$nu) # create tables # choose change points so that there are equal number of events in the intervals in pooled current and historical data change.points.analysis <- create_intervals(time=finaldf$new_y, event=finaldf$new_nu, S=finaldf$S, historical=historical, n.intervals=n.intervals) tables <- collapse_data(time=finaldf$new_y, event=finaldf$new_nu, X=finaldf[,1:ncol(x)], S=finaldf$S, historical=historical, n.intervals=n.intervals, change.points=change.points.analysis, dCurrent=TRUE) t1 <- tables[["curr_tables"]] t2 <- tables[["hist_tables"]] if(is.null(lower.limits)){ lower.limits = c(rep(-100,P), rep(0,2*sum(n.intervals))) } samples <- phm_fixed_a0(t1, t2, a0, n.intervals, shared.blh, P, prior.beta, prior.beta.mean, prior.beta.sd, prior.lambda, prior.lambda.hp1, prior.lambda.hp2, prior.lambda0.hp1, prior.lambda0.hp2, lower.limits, upper.limits, slice.widths, nMC, nBI, TRUE) # compute probability of success beta1 <- samples$beta_samples[,1] if(nullspace.ineq == ">"){ pr <- sum(beta1 < delta) / length(beta1) }else{ pr <- sum(beta1 > delta) / length(beta1) } posterior_probs <- c(posterior_probs, pr) # average posterior mean of beta beta_mean[l,] <- colMeans(samples$beta_samples) # sum of posterior mean of lambda list_lambda <- samples$lambda_samples list_lambda_mean <- list() for(j in 1:length(list_lambda)){ list_lambda_mean[[j]] <- colMeans(list_lambda[[j]]) } for(j in 1:length(list_lambda)){ lambda_sum[[j]] <- lambda_sum[[j]]+list_lambda_mean[[j]] } # sum of posterior mean of lambda0 if(shared.blh==FALSE){ list_lambda0 <- samples$lambda0_samples list_lambda0_mean <- list() for(j in 1:length(list_lambda0)){ list_lambda0_mean[[j]] <- colMeans(list_lambda0[[j]]) } for(j in 1:length(list_lambda0)){ lambda0_sum[[j]] <- lambda0_sum[[j]]+list_lambda0_mean[[j]] } } } # average of posterior mean of lambda for(j in 1:length(n.intervals)){ lambda_sum[[j]] <- lambda_sum[[j]]/N } # average of posterior mean of lambda0 if(shared.blh==FALSE){ for(j in 1:length(n.intervals)){ lambda0_sum[[j]] <- lambda0_sum[[j]]/N } } power <- mean(posterior_probs >= gamma) if(shared.blh==FALSE){ return(list(#"simulated dataset"=simdf, "power/type I error"=power, "posterior probabilities"=posterior_probs, "average posterior mean of beta"=colMeans(beta_mean), "average posterior mean of lambda"=lambda_sum, "average posterior mean of lambda0"=lambda0_sum)) }else{ return(list(#"simulated dataset"=simdf, "power/type I error"=power, "posterior probabilities"=posterior_probs, "average posterior mean of beta"=colMeans(beta_mean), "average posterior mean of lambda"=lambda_sum)) } } #' Model fitting for the proportional hazards model with piecewise constant hazard and fixed a0 #' #' @description Model fitting using power priors for the proportional hazards model with piecewise constant hazard and fixed \eqn{a_0} #' @param time Vector of follow up times. #' @param event Vector of status indicators. Normally 0=alive and 1=dead. #' @param X Matrix of covariates. The first column must be the treatment indicator. #' @param S Vector of integers, where each integer represents the stratum that the subject belongs to. For example, if there are three strata, S can take values 1, 2 or 3. #' @param change.points List of vectors. Each vector in the list contains the change points for the baseline hazards for each stratum. The length of the list should be equal to the total number of strata. #' For a given stratum, if there is only one interval, then \code{change.points} should be \code{NULL} for that stratum. #' By default, we assign the change points so that the same number of events are observed in all the intervals in the pooled current and historical data. #' @param current.data Logical value indicating whether current data is included. The default is TRUE. If FALSE, only historical data is included in the analysis, #' and the posterior samples can be used as a discrete approximation to the sampling prior in #' \code{\link{power.phm.fixed.a0}} and \code{\link{power.phm.random.a0}}. #' @inheritParams power.phm.fixed.a0 #' #' @details The proportional hazards model with piecewise constant hazard is implemented. #' We assume \eqn{\beta} is the regression coefficients. We assume the first column of the covariate matrix is the treatment indicator, #' and the corresponding parameter is \eqn{\beta_1}. The baseline hazards of the current data are denoted by \eqn{\lambda}. #' The baseline hazards of the historical data are denoted by \eqn{\lambda_0}. #' If the baseline hazards are shared between the historical and current data, then \eqn{\lambda_0}=\eqn{\lambda}. #' #' Posterior samples are obtained through slice sampling. #' The default lower limits are -100 for \eqn{\beta} and 0 for \eqn{\lambda} and \eqn{\lambda_0}. The default upper limits #' for the parameters are 100. The default slice widths for the parameters are 0.1. #' The defaults may not be appropriate for all situations, and the user can specify the appropriate limits #' and slice width for each parameter. #' #' @references Ibrahim, J. G., Chen, M.-H. and Sinha, D. (2001). Bayesian Survival Analysis. New York: Springer Science & Business Media. #' #' Psioda, M. A. and Ibrahim, J. G. (2019). Bayesian clinical trial design using historical data that inform the treatment effect. Biostatistics 20, 400–415. #' #' Shen, Y., Psioda, M. A., and Joseph, J. G. (2023). BayesPPD: an R package for Bayesian sample size determination using the power and normalized power prior for generalized linear models. The R Journal, 14(4). #' #' #' @return Posterior samples of \eqn{\beta}, \eqn{\lambda} and \eqn{\lambda_0} (if baseline hazards are not shared between the current and historical data) are returned. #' @seealso \code{\link{power.phm.fixed.a0}} #' @examples #' set.seed(1) #' # Simulate current data #' n <- 50 #' P <- 4 #' time <- round(rexp(n, rate=0.5),1) #' event <- rep(1,n) #' X <- matrix(rbinom(n*P,prob=0.5,size=1), ncol=P) #' S <- c(rep(1,n/2),rep(2,n/2)) #' #' # Simulate two historical datasets #' n <- 100 #' time1 <- round(rexp(n, rate=0.5),1) #' event1 <- rep(1,n) #' X1 <- matrix(rbinom(n*P,prob=0.5,size=1), ncol=P) #' S1 <- c(rep(1,n/2),rep(2,n/2)) #' time2 <- round(rexp(n, rate=0.7),1) #' event2 <- rep(1,n) #' X2 <- matrix(rbinom(n*P,prob=0.5,size=1), ncol=P) #' S2 <- c(rep(1,n/2),rep(2,n/2)) #' historical <- list(list(time=time1, event=event1, X=X1, S=S1), #' list(time=time2, event=event2, X=X2, S=S2)) #' #' # a0 is 0.3 for the first historical dataset and 0.6 for the second #' a0 <- c(0.3, 0.6) #' #' #' # We choose three intervals for the first stratum and two intervals for the second stratum #' n.intervals <- c(3,2) #' change.points <- list(c(1,2), 2) #' #' #' nMC <- 1000 # nMC should be larger in practice #' nBI <- 50 #' #' result <- phm.fixed.a0(time=time, event=event, X=X, S=S, #' historical=historical, a0=a0, n.intervals=n.intervals, #' change.points=change.points, nMC=nMC, nBI=nBI) #' #' # posterior mean of beta #' colMeans(result$beta_samples) #' # posterior mean of baseline hazards for stratum 1 #' colMeans(result$lambda_samples[[1]]) #' # posterior mean of baseline hazards for stratum 2 #' colMeans(result$lambda_samples[[2]]) #' # posterior mean of historical baseline hazards for stratum 1 #' colMeans(result$lambda0_samples[[1]]) #' # posterior mean of historical baseline hazards for stratum 2 #' colMeans(result$lambda0_samples[[2]]) #' #' #' #' #' @export #' @import dplyr tidyr phm.fixed.a0 <- function(time=NULL, event=NULL, X=NULL, S=NULL, historical, a0, n.intervals, change.points=NULL, shared.blh=FALSE, prior.beta="Normal", prior.beta.mean=rep(0,50), prior.beta.sd=rep(1000,50), prior.lambda="Gamma", prior.lambda.hp1=rep(10^(-5),50), prior.lambda.hp2=rep(10^(-5),50), prior.lambda0.hp1=rep(10^(-5),50), prior.lambda0.hp2=rep(10^(-5),50), lower.limits=NULL, upper.limits=rep(100, 50), slice.widths=rep(0.1, 50), current.data=TRUE, nMC=10000, nBI=250){ # add zero and infinity to change.points change.points.new <- list() if(is.null(change.points)){ if(current.data==TRUE){ change.points.new <- create_intervals(time, event, S, historical, n.intervals) }else{ change.points.new <- create_intervals_historical(historical, n.intervals) } }else{ for(i in 1:length(n.intervals)){ if(n.intervals[i]==1){ l1 <- c(0, Inf) }else{ l <- change.points[[i]] l1 <- unique(c(0, l, Inf)) } change.points.new[[i]] <- l1 } } # create tables tables <- collapse_data(time=time, event=event, X=X, S=S, historical=historical, n.intervals=n.intervals, change.points=change.points.new, dCurrent=current.data) if(current.data==TRUE){ t1 <- tables[["curr_tables"]]}else{ t1 <- NULL} t2 <- tables[["hist_tables"]] if(length(historical)==0){ P <- ncol(X) }else{ P <- ncol(historical[[1]]$X) } if(is.null(lower.limits)){ lower.limits = c(rep(-100,P), rep(0,2*sum(n.intervals))) } samples <- phm_fixed_a0(t1, t2, a0, n.intervals, shared.blh, P, prior.beta, prior.beta.mean, prior.beta.sd, prior.lambda, prior.lambda.hp1, prior.lambda.hp2, prior.lambda0.hp1, prior.lambda0.hp2, lower.limits, upper.limits, slice.widths, nMC, nBI, current.data) return(samples) }
/scratch/gouwar.j/cran-all/cranData/BayesPPDSurv/R/phm_fixed_a0.R
utils::globalVariables(c("r", "ev", "interval", "rtime", "evtime")) #' Approximating the normalized power prior for \eqn{\beta} for the proportional hazards model with piecewise constant hazard and random a0 #' #' @description Approximation of the normalized power prior for \eqn{\beta} for the proportional hazards model with piecewise constant hazard and random \eqn{a_0}. #' The function returns discrete samples of \eqn{\beta} from the normalized power prior, and the user can use any mixture of multivariate normal distributions as an #' approximation for the normalized power prior for \eqn{\beta}. #' This function is used to produce \code{prior.beta.mvn} in the function \code{\link{power.phm.random.a0}}. #' @param change.points List of vectors. Each vector in the list contains the change points for the baseline hazards for each stratum. The length of the list should be equal to the total number of strata. #' For a given stratum, if there is only one interval, then \code{change.points} should be \code{NULL} for that stratum. #' By default, we assign the change points so that the same number of events are observed in all the intervals in the historical data. #' @param prior.a0.shape1 Vector of the first shape parameters of the independent beta priors for \eqn{a_0}. The length of the vector should be equal to the number of historical datasets. The default is a vector of one's. #' @param prior.a0.shape2 Vector of the second shape parameters of the independent beta priors for \eqn{a_0}. The length of the vector should be equal to the number of historical datasets. The default is a vector of one's. #' @param prior.beta.mean Vector of means of the normal initial prior on \eqn{\beta}. The default value is zero for all the elements of \eqn{\beta}. #' @param prior.beta.sd Vector of standard deviations of the normal initial prior on \eqn{\beta}. The default value is 10^3 for all the elements of \eqn{\beta}. #' @param prior.lambda0.hp1 Vector of first hyperparameters of the Gamma initial prior on \eqn{\lambda_0}. #' The length of the vector should be equal to the dimension of \eqn{\lambda_0}, i.e., the total number of intervals for all strata. The default value is 10^(-5) for all the elements of \eqn{\lambda_0}. #' @param prior.lambda0.hp2 Vector of second hyperparameters of the Gamma initial prior on \eqn{\lambda_0}. #' The length of the vector should be equal to the dimension of \eqn{\lambda_0}, i.e., the total number of intervals for all strata. The default value is 10^(-5) for all the elements of \eqn{\lambda_0}. #' @param lower.limits Vector of lower limits for \eqn{\beta} to be used by the slice sampler. The length of the vector should be equal to the length of \eqn{\beta}. The default is -100 for all the elements of \eqn{\beta} (may not be appropriate for all situations). #' @param upper.limits Vector of upper limits for \eqn{\beta} to be used by the slice sampler. The length of the vector should be equal to the length of \eqn{\beta}. The default is 100 for all the elements of \eqn{\beta} (may not be appropriate for all situations). #' @param slice.widths Vector of initial slice widths for \eqn{\beta} to be used by the slice sampler. The length of the vector should be equal to the total number of parameters. The default is 0.1 for all the elements of \eqn{\beta} (may not be appropriate for all situations). #' #' @inheritParams power.phm.fixed.a0 #' @details #' #' This function is used to produce \code{prior.beta.mvn} in the function \code{\link{power.phm.random.a0}}. It approximates the normalized power prior for \eqn{\beta} when \eqn{a_0} is modeled as random. #' The function returns discrete samples of \eqn{\beta} from the normalized power prior, and the user can use any mixture of multivariate normal distributions as an #' approximation for the normalized power prior for \eqn{\beta}. #' #' Baseline hazard parameters for the #' current and historical data are NOT shared. #' The baseline hazards of the historical data are denoted by \eqn{\lambda_0}. We assume Gamma initial priors for \eqn{\lambda_0} #' and independent normal initial priors for \eqn{\beta}. #' #' Posterior samples are obtained through slice sampling. #' The default lower limits are -100 for \eqn{\beta}. The default upper limits #' for the parameters are 100. The default slice widths for the parameters are 0.1. #' The defaults may not be appropriate for all situations, and the user can specify the appropriate limits #' and slice width for each parameter. #' #' @return Samples of \eqn{\beta} (approximating the normalized power prior) are returned. #' @references Ibrahim, J. G., Chen, M.-H. and Sinha, D. (2001). Bayesian Survival Analysis. New York: Springer Science & Business Media. #' #' Psioda, M. A. and Ibrahim, J. G. (2019). Bayesian clinical trial design using historical data that inform the treatment effect. Biostatistics 20, 400–415. #' #' Shen, Y., Psioda, M. A., and Joseph, J. G. (2023). BayesPPD: an R package for Bayesian sample size determination using the power and normalized power prior for generalized linear models. The R Journal, 14(4). #' #' @seealso \code{\link{phm.random.a0}} and \code{\link{power.phm.random.a0}} #' @examples #' #' # Simulate two historical datasets #' n <- 100 #' P <- 4 #' time1 <- round(rexp(n, rate=0.5),1) #' event1 <- rep(1,n) #' X1 <- matrix(rbinom(n*P,prob=0.5,size=1), ncol=P) #' S1 <- c(rep(1,n/2),rep(2,n/2)) #' time2 <- round(rexp(n, rate=0.7),1) #' event2 <- rep(1,n) #' X2 <- matrix(rbinom(n*P,prob=0.5,size=1), ncol=P) #' S2 <- c(rep(1,n/2),rep(2,n/2)) #' historical <- list(list(time=time1, event=event1, X=X1, S=S1), #' list(time=time2, event=event2, X=X2, S=S2)) #' #' # We choose three intervals for the first stratum and two intervals for the second stratum #' n.intervals <- c(3,2) #' change.points <- list(c(1,2), 2) #' #' # Get samples from the approximate normalized power prior for beta #' nMC <- 100 # nMC should be larger in practice #' nBI <- 50 #' prior.beta <- approximate.prior.beta(historical, n.intervals, change.points=change.points, #' prior.a0.shape1=c(1,1), prior.a0.shape2=c(1,1), #' nMC=nMC, nBI=nBI) #' prior_beta_mu=colMeans(prior.beta) #' prior_beta_sigma=cov(prior.beta) #' #' # Aprroximate the discrete sames with a single multivariate normal with weight one. #' # The user can use any mixture of multivariate normal distributions as an #' # approximation for the normalized power prior for beta. #' prior.beta.mvn <- list(list(prior_beta_mu, prior_beta_sigma, 1)) #' # prior.beta.mvn is a parameter for phm.random.a0() and power.phm.random.a0() #' #' #' #' @export approximate.prior.beta <- function(historical, n.intervals, change.points=NULL, prior.a0.shape1=rep(1,10), prior.a0.shape2=rep(1,10), prior.beta.mean=rep(0,50), prior.beta.sd=rep(1000,50), prior.lambda0.hp1=rep(10^(-5),50), prior.lambda0.hp2=rep(10^(-5),50), lower.limits=rep(-100, 50), upper.limits=rep(100, 50), slice.widths=rep(0.1, 50), nMC=10000, nBI=250){ # add zero and infinity to change.points change.points.new <- list() if(is.null(change.points)){ change.points.new <- create_intervals_historical(historical, n.intervals) }else{ for(i in 1:length(n.intervals)){ if(n.intervals[i]==1){ l1 <- c(0, Inf) }else{ l <- change.points[[i]] l1 <- unique(c(0, l, Inf)) } change.points.new[[i]] <- l1 } } tables <- collapse_data(historical=historical, n.intervals=n.intervals, change.points=change.points.new, dCurrent=FALSE) t0 <- tables[["hist_tables"]] P <- ncol(historical[[1]]$X) samples <- npp_beta(nMC, t0, n.intervals, P, prior.a0.shape1, prior.a0.shape2, prior.beta.mean, prior.beta.sd, prior.lambda0.hp1, prior.lambda0.hp2, lower.limits, upper.limits, slice.widths, nBI) res <- samples$beta_matrix return(res) } #' Power/type I error calculation for the proportional hazards model with piecewise constant hazard and random a0 #' #' @description Power/type I error calculation using the normalized power prior for the proportional hazards model with piecewise constant hazard and random \eqn{a_0} #' #' @param prior.beta.mvn List of vectors of multivariate normal approximations of the normalized power prior for \eqn{\beta}. Each vector has three elements, #' the mean vector, the covariance matrix and the weight of the multivariate normal distribution. The normalized power prior for \eqn{\beta} #' is approximated by the weighted mixture of the multivariate normal distributions provided. By default, a single multivariate normal distribution is assumed. #' The user can use the \code{\link{approximate.prior.beta}} function to obtain samples of \eqn{\beta} from the normalized power prior, and use any mixture of multivariate normals to approximate #' the normalized power prior for \eqn{\beta}. #' @param prior.lambda.hp1 Vector of first hyperparameters of the Gamma initial prior on \eqn{\lambda}. #' The length of the vector should be equal to the dimension of \eqn{\lambda}, i.e., the total number of intervals for all strata. The default value is 10^(-5) for all the elements of \eqn{\lambda}. #' @param prior.lambda.hp2 Vector of second hyperparameters of the Gamma initial prior on \eqn{\lambda}. #' The length of the vector should be equal to the dimension of \eqn{\lambda}, i.e., the total number of intervals for all strata. The default value is 10^(-5) for all the elements of \eqn{\lambda}. #' @param lower.limits Vector of lower limits for parameters (\eqn{\beta} and \eqn{\lambda}, in this order) to be used by the slice sampler. The length of the vector should be equal to the total number of parameters. The default is -100 for \eqn{\beta} and 0 for \eqn{\lambda} (may not be appropriate for all situations). #' @param upper.limits Vector of upper limits for parameters (\eqn{\beta} and \eqn{\lambda}, in this order) to be used by the slice sampler. The length of the vector should be equal to the total number of parameters. The default is 100 for all parameters (may not be appropriate for all situations). #' @param slice.widths Vector of initial slice widths for parameters (\eqn{\beta} and \eqn{\lambda}, in this order) to be used by the slice sampler. The length of the vector should be equal to the total number of parameters. The default is 0.1 for all parameters (may not be appropriate for all situations). #' #' @inheritParams power.phm.fixed.a0 #' @inheritParams approximate.prior.beta #' #' @details The proportional hazards model with piecewise constant hazard is implemented. #' We assume \eqn{\beta} is the regression coefficients. We assume the first column of the covariate matrix is the treatment indicator, #' and the corresponding parameter is \eqn{\beta_1}. Here \eqn{a_0} is modeled as random with a normalized power prior. #' #' The normalized power prior for \eqn{\beta} is approximated by a weighted mixture of multivariate normal distributions provided in \code{prior.beta.mvn}. #' The user can use the \code{\link{approximate.prior.beta}} function to obtain samples of \eqn{\beta} from the normalized power prior, and use any mixture of multivariate normals to approximate #' the normalized power prior for \eqn{\beta}. By default, a single multivariate normal distribution is assumed. #' #' Baseline hazard parameters for the #' current and historical data are NOT shared. The baseline hazards of the current data are denoted by \eqn{\lambda}. #' The baseline hazards of the historical data are denoted by \eqn{\lambda_0}. We assume Gamma initial priors for #' \eqn{\lambda} and \eqn{\lambda_0}. #' #' To perform sample size determination, we test the hypotheses #' \deqn{H_0: \beta_1 \ge \delta} and \deqn{H_1: \beta_1 < \delta.} #' #' The sampling prior for the treatment parameter can be generated from a normal distribution (see examples). #' For example, suppose one wants to compute the power for the hypotheses \eqn{H_0: \beta_1 \ge 0} and \eqn{H_1: \beta_1 < 0.} #' To approximate the sampling prior for \eqn{\beta_1}, one can simply sample from a normal distribution with negative mean, #' so that the mass of the prior falls in the alternative space. Conversely, to compute the type I error rate, one can #' sample from a normal distribution with positive mean, so that the mass of the prior falls in the null space. #' #' The sampling prior for the other parameters (\eqn{\beta_2}, ..., \eqn{\beta_p} and \eqn{\lambda}) can be generated from the posterior based on the historical data. #' This can be achieved by the function \link{phm.fixed.a0} #' with \code{current.data} set to \code{FALSE} (see the vignette). #' #' Posterior samples are obtained through slice sampling. #' The default lower limits are -100 for \eqn{\beta} and 0 for \eqn{\lambda}. The default upper limits #' for the parameters are 100. The default slice widths for the parameters are 0.1. #' The defaults may not be appropriate for all situations, and the user can specify the appropriate limits #' and slice width for each parameter. #' #' If a sampling prior with support in the null space is used, the value returned is a Bayesian type I error rate. #' If a sampling prior with support in the alternative space is used, the value returned is a Bayesian power. #' #' @return Power or type I error is returned, depending on the sampling prior used. #' The posterior probabilities of the alternative hypothesis are returned. #' The average posterior means of \eqn{\beta} and \eqn{\lambda} are also returned. #' @references Ibrahim, J. G., Chen, M.-H. and Sinha, D. (2001). Bayesian Survival Analysis. New York: Springer Science & Business Media. #' #' Psioda, M. A. and Ibrahim, J. G. (2019). Bayesian clinical trial design using historical data that inform the treatment effect. Biostatistics 20, 400–415. #' #' Shen, Y., Psioda, M. A., and Joseph, J. G. (2023). BayesPPD: an R package for Bayesian sample size determination using the power and normalized power prior for generalized linear models. The R Journal, 14(4). #' @seealso \code{\link{phm.random.a0}} and \code{\link{approximate.prior.beta}} #' @examples #' #' #' # Simulate two historical datasets #' set.seed(1) #' n <- 100 #' P <- 4 #' time1 <- round(rexp(n, rate=0.5),1) #' event1 <- rep(1,n) #' X1 <- matrix(rbinom(n*P,prob=0.5,size=1), ncol=P) #' S1 <- c(rep(1,n/2),rep(2,n/2)) #' time2 <- round(rexp(n, rate=0.7),1) #' event2 <- rep(1,n) #' X2 <- matrix(rbinom(n*P,prob=0.5,size=1), ncol=P) #' S2 <- c(rep(1,n/2),rep(2,n/2)) #' historical <- list(list(time=time1, event=event1, X=X1, S=S1), #' list(time=time2, event=event2, X=X2, S=S2)) #' #' n.subjects <- 100 #' n.events <- 30 #' #' # We choose three intervals for the first stratum and two intervals for the second stratum #' n.intervals <- c(3,2) #' change.points <- list(c(1,2),1) #' #' # Generate sampling priors #' #' # The null hypothesis here is H0: beta_1 >= 0. To calculate power, #' # we can provide samples of beta_1 such that the mass of beta_1 < 0. #' # To calculate type I error, we can provide samples of beta_1 such that #' # the mass of beta_1 >= 0. #' samp.prior.beta1 <- rnorm(100, mean=-1, sd=1) #' # Here, mass is put on the alternative region, so power is calculated. #' samp.prior.beta <- cbind(samp.prior.beta1, matrix(rnorm(100*(P-1)), 100, P-1)) #' #' # Point mass sampling priors are used for lambda #' lambda_strat1 <- matrix(c(0.5, 0.5, 0.5), nrow=1) #' lambda_strat2 <- matrix(c(0.7, 0.7), nrow=1) #' samp.prior.lambda <- list(lambda_strat1, lambda_strat2) #' #' #' nMC <- 50 # nMC should be larger in practice #' nBI <- 50 #' N <- 5 # N should be larger in practice #' #' result <- power.phm.random.a0(historical=historical, n.subjects=n.subjects, #' n.events=n.events, n.intervals=n.intervals, #' change.points=change.points, #' samp.prior.beta=samp.prior.beta, #' samp.prior.lambda=samp.prior.lambda, #' prior.a0.shape1 = c(1,1), prior.a0.shape2 = c(1,1), #' dist.enroll="Uniform", param.enroll=0.5, #' nMC=nMC, nBI=nBI, delta=0, nullspace.ineq=">", N=N) #' result$`power/type I error` #' result$`average posterior mean of beta` #' result$`average posterior mean of lambda` #' #' #' #' @export #' @import dplyr tidyr #' @importFrom stats cov runif rexp rbinom power.phm.random.a0 <- function(historical, n.subjects, n.events, n.intervals, change.points, samp.prior.beta, samp.prior.lambda, # list of matrices dist.enroll, param.enroll, rand.prob=0.5, prob.drop=0, param.drop=0, dist.csr="Constant", param.csr=10000, min.follow.up=0, max.follow.up=10000, prior.beta.mvn=NULL, prior.a0.shape1=rep(1,10), prior.a0.shape2=rep(1,10), prior.lambda.hp1=rep(10^(-5),50), prior.lambda.hp2=rep(10^(-5),50), lower.limits=NULL, upper.limits=rep(100, 50), slice.widths=rep(0.1, 50), nMC=10000, nBI=250, delta=0, nullspace.ineq=">", gamma=0.95, N=10000){ # add zero and infinity to change.points change.points.new <- list() for(i in 1:length(n.intervals)){ if(n.intervals[i]==1){ l1 <- c(0, Inf) }else{ l <- change.points[[i]] l1 <- unique(c(0, l, Inf)) } change.points.new[[i]] <- l1 } # if prior.beta.mvn is NULL, make its default value a single multivariate normal if(is.null(prior.beta.mvn)){ prior.beta <- approximate.prior.beta(historical, n.intervals, change.points=change.points.new, prior.a0.shape1=prior.a0.shape1, prior.a0.shape2=prior.a0.shape2, nMC=nMC, nBI=nBI) prior_beta_mu=colMeans(prior.beta) prior_beta_sigma=cov(prior.beta) # sigma is standard deviation prior.beta.mvn <- list(list(prior_beta_mu, prior_beta_sigma, 1)) } # build matrix of covariates to sample from P <- ncol(historical[[1]]$X) x_sim <- matrix(NA, nrow=0, ncol=P-1) for(k in 1:length(historical)){ dat <- historical[[k]] x_h <- as.matrix(dat$X[,-1]) x_sim <- rbind(x_sim, x_h) } # build matrix of strata to sample from s_sim <- NULL for(k in 1:length(historical)){ dat <- historical[[k]] s_h <- dat$S s_sim <- c(s_sim, s_h) } # repeat N times posterior_probs <- NULL # save the posterior means of beta and lambda beta_mean <- matrix(NA, nrow=N, ncol=P) lambda_sum <- list() for(j in 1:length(n.intervals)){ lambda_sum[[j]] <- rep(0,n.intervals[j]) } for(l in 1:N){ # print(l) # sample stratum variable ind <- sample(1:length(s_sim), n.subjects, replace = TRUE) s <- s_sim[ind] # sample beta ind <- sample(1:nrow(samp.prior.beta), 1, replace = TRUE) beta <- samp.prior.beta[ind,] # sample lambda list_lambda <- list() for(j in 1:length(unique(s))){ lams <- samp.prior.lambda[[j]] ind <- sample(1:nrow(lams), 1, replace = TRUE) list_lambda <- c(list_lambda, list(lams[ind,])) } # sample enrollment times if(dist.enroll=="Uniform"){ r <- runif(n.subjects,0,param.enroll) }else{ r <- rexp(n.subjects,rate=param.enroll) } # sample covariates and treatment variable ind <- sample(1:nrow(x_sim), n.subjects, replace = TRUE) x <- x_sim[ind,] z <- rbinom(n.subjects,size=1,rand.prob) x <- cbind(trt=z, x) phi <- exp(x%*%beta) # simulate event time ti eventtimes <- NULL for(i in 1:n.subjects){ strat <- s[i] lambda_s <- list_lambda[[strat]] st <- change.points.new[[strat]] st_1 <- st[-length(st)] k <- 1 while(TRUE){ theta_k <- phi[i]*lambda_s[k] t_tilde <- rexp(1,rate=theta_k) + st_1[k] if(k > length(st_1) | t_tilde <= st[k+1]){ break } k <- k + 1 } eventtimes <- c(eventtimes, t_tilde) } # sample (administrative) censorship times if(dist.csr=="Uniform"){ ctime <- runif(n.subjects,0,param.csr) }else if(dist.csr=="Constant"){ ctime <- rep(param.csr, n.subjects) }else{ ctime <- rexp(n.subjects,rate=param.csr) } y <- ifelse(eventtimes <= ctime, eventtimes, ctime) nu <- ifelse(eventtimes <= ctime, 1, 0) # simulate dropouts # simulate a dropout time; if dropout time < y, the person is a dropout; # otherwise the person has an event or is administratively censored num_drops <- round(n.subjects * prob.drop, 0) if(num_drops > 0){ drop_ind <- sample(1:n.subjects, size=num_drops, replace=FALSE) droptime <- runif(num_drops, 0, param.drop) obstime <- ifelse(y[drop_ind] <= droptime, y[drop_ind], droptime) bool_drop <- ifelse(y[drop_ind] <= droptime, 0, 1) nu[drop_ind][bool_drop==1] <- 0 y[drop_ind] <- obstime } e <- r+y complete_data <- data.frame(X=x, S=s, enrtimes=r, eventtimes=eventtimes, ctime=ctime, y=y, nu=nu, t_elps=e) # create original data df_events <- complete_data[complete_data$nu==1,] df_events1 <- df_events[order(df_events$t_elps),] if(nrow(df_events1) < n.events){ stoptime <- max.follow.up }else{ stoptime <- df_events1[n.events, "t_elps"] if(stoptime < min.follow.up){ stoptime <- min.follow.up } } finaldf <- complete_data[complete_data$enrtimes < stoptime,] finaldf$new_y <- ifelse(finaldf$t_elps > stoptime, stoptime-finaldf$enrtimes, finaldf$y) finaldf$new_nu <- ifelse(finaldf$t_elps > stoptime, 0, finaldf$nu) # create tables # choose change points so that there are equal number of events in the intervals in pooled current and historical data change.points.analysis <- create_intervals(time=finaldf$new_y, event=finaldf$new_nu, S=finaldf$S, historical=historical, n.intervals=n.intervals) tables <- collapse_data(time=finaldf$new_y, event=finaldf$new_nu, X=finaldf[,1:ncol(x)], S=finaldf$S, historical=historical, n.intervals=n.intervals, change.points=change.points.analysis, dCurrent=TRUE) t1 <- tables[["curr_tables"]] t2 <- tables[["hist_tables"]] #print(t1) #print(t2) if(is.null(lower.limits)){ lower.limits = c(rep(-100,P), rep(0,2*sum(n.intervals))) } samples <- phm_random_a0(t1, t2, n.intervals, P, prior.beta.mvn, prior.lambda.hp1, prior.lambda.hp2, lower.limits, upper.limits, slice.widths, nMC, nBI) # compute probability of success beta1 <- samples$beta_samples[,1] if(nullspace.ineq == ">"){ pr <- sum(beta1 < delta) / length(beta1) }else{ pr <- sum(beta1 > delta) / length(beta1) } posterior_probs <- c(posterior_probs, pr) # average posterior mean of beta beta_mean[l,] <- colMeans(samples$beta_samples) # sum of posterior mean of lambda list_lambda <- samples$lambda_samples list_lambda_mean <- list() for(j in 1:length(list_lambda)){ list_lambda_mean[[j]] <- colMeans(list_lambda[[j]]) } for(j in 1:length(list_lambda)){ lambda_sum[[j]] <- lambda_sum[[j]]+list_lambda_mean[[j]] } } # average of posterior mean of lambda for(j in 1:length(n.intervals)){ lambda_sum[[j]] <- lambda_sum[[j]]/N } power <- mean(posterior_probs >= gamma) return(list(#"simulated dataset"=simdf, "power/type I error"=power, "posterior probabilities"=posterior_probs, "average posterior mean of beta"=colMeans(beta_mean), "average posterior mean of lambda"=lambda_sum)) } #' Model fitting for the proportional hazards model with piecewise constant hazard and random a0 #' #' @description Model fitting using the normalized power prior for the proportional hazards model with piecewise constant hazard and random \eqn{a_0} #' @param change.points List of vectors. Each vector in the list contains the change points for the baseline hazards for each stratum. The length of the list should be equal to the total number of strata. #' For a given stratum, if there is only one interval, then \code{change.points} should be \code{NULL} for that stratum. #' By default, we assign the change points so that the same number of events are observed in all the intervals in the pooled current and historical data. #' #' @inheritParams power.phm.random.a0 #' @inheritParams phm.fixed.a0 #' #' @details The proportional hazards model with piecewise constant hazard is implemented. #' We assume \eqn{\beta} is the regression coefficients. We assume the first column of the covariate matrix is the treatment indicator, #' and the corresponding parameter is \eqn{\beta_1}. Here \eqn{a_0} is modeled as random with a normalized power prior. #' #' The normalized power prior for \eqn{\beta} is approximated by a weighted mixture of multivariate normal distributions provided in \code{prior.beta.mvn}. #' The user can use the \code{\link{approximate.prior.beta}} function to obtain samples of \eqn{\beta} from the normalized power prior, and use any mixture of multivariate normals to approximate #' the normalized power prior for \eqn{\beta}. By default, a single multivariate normal distribution is assumed. #' #' Posterior samples are obtained through slice sampling. #' The default lower limits are -100 for \eqn{\beta} and 0 for \eqn{\lambda}. The default upper limits #' for the parameters are 100. The default slice widths for the parameters are 0.1. #' The defaults may not be appropriate for all situations, and the user can specify the appropriate limits #' and slice width for each parameter. #' #' #' @return Posterior samples of \eqn{\beta} and \eqn{\lambda} are returned. #' @references Ibrahim, J. G., Chen, M.-H. and Sinha, D. (2001). Bayesian Survival Analysis. New York: Springer Science & Business Media. #' #' Psioda, M. A. and Ibrahim, J. G. (2019). Bayesian clinical trial design using historical data that inform the treatment effect. Biostatistics 20, 400–415. #' #' Shen, Y., Psioda, M. A., and Joseph, J. G. (2023). BayesPPD: an R package for Bayesian sample size determination using the power and normalized power prior for generalized linear models. The R Journal, 14(4). #' @seealso \code{\link{power.phm.random.a0}} and \code{\link{approximate.prior.beta}} #' @examples #' #' #' #' set.seed(1) #' # Simulate current data #' n <- 50 #' P <- 4 #' time <- round(rexp(n, rate=0.5),1) #' event <- rep(1,n) #' X <- matrix(rbinom(n*P,prob=0.5,size=1), ncol=P) #' S <- c(rep(1,n/2),rep(2,n/2)) #' #' # Simulate two historical datasets #' n <- 100 #' time1 <- round(rexp(n, rate=0.5),1) #' event1 <- rep(1,n) #' X1 <- matrix(rbinom(n*P,prob=0.5,size=1), ncol=P) #' S1 <- c(rep(1,n/2),rep(2,n/2)) #' time2 <- round(rexp(n, rate=0.7),1) #' event2 <- rep(1,n) #' X2 <- matrix(rbinom(n*P,prob=0.5,size=1), ncol=P) #' S2 <- c(rep(1,n/2),rep(2,n/2)) #' historical <- list(list(time=time1, event=event1, X=X1, S=S1), #' list(time=time2, event=event2, X=X2, S=S2)) #' #' # We choose three intervals for the first stratum and two intervals for the second stratum #' n.intervals <- c(3,2) #' change.points <- list(c(1,2), 2) #' #' # Get samples from the approximate normalized power prior for beta #' nMC <- 100 # nMC should be larger in practice #' nBI <- 50 #' prior.beta <- approximate.prior.beta(historical, n.intervals, change.points=change.points, #' prior.a0.shape1=c(1,1), prior.a0.shape2=c(1,1), #' nMC=nMC, nBI=nBI) #' prior_beta_mu=colMeans(prior.beta) #' prior_beta_sigma=cov(prior.beta) #' #' # Aprroximate the discrete sames with a single multivariate normal with weight one #' prior.beta.mvn <- list(list(prior_beta_mu, prior_beta_sigma, 1)) #' #' result <- phm.random.a0(time=time, event=event, X=X, S=S, #' historical=historical, n.intervals=n.intervals, #' change.points=change.points, #' prior.beta.mvn=prior.beta.mvn, #' nMC=nMC, nBI=nBI) #' #' # posterior mean of beta #' colMeans(result$beta_samples) #' # posterior mean of baseline hazards for stratum 1 #' colMeans(result$lambda_samples[[1]]) #' # posterior mean of baseline hazards for stratum 2 #' colMeans(result$lambda_samples[[2]]) #' #' #' #' @export #' @import dplyr tidyr #' @importFrom stats cov phm.random.a0 <- function(time, event, X, S, historical, n.intervals, change.points=NULL, prior.beta.mvn=NULL, prior.lambda.hp1=rep(10^(-5),50), prior.lambda.hp2=rep(10^(-5),50), prior.a0.shape1=rep(1,10), prior.a0.shape2=rep(1,10), lower.limits=NULL, upper.limits=rep(100, 50), slice.widths=rep(0.1, 50), nMC=10000, nBI=250){ # add zero and infinity to change.points change.points.new <- list() if(is.null(change.points)){ change.points.new <- create_intervals(time, event, S, historical, n.intervals) }else{ for(i in 1:length(n.intervals)){ if(n.intervals[i]==1){ l1 <- c(0, Inf) }else{ l <- change.points[[i]] l1 <- unique(c(0, l, Inf)) } change.points.new[[i]] <- l1 } } # create tables tables <- collapse_data(time=time, event=event, X=X, S=S, historical=historical, n.intervals=n.intervals, change.points=change.points.new, dCurrent=TRUE) t1 <- tables[["curr_tables"]] t2 <- tables[["hist_tables"]] P <- ncol(X) if(is.null(lower.limits)){ lower.limits = c(rep(-100,P), rep(0,sum(n.intervals))) } # if prior.beta.mvn is NULL, make its default value a single multivariate normal if(is.null(prior.beta.mvn)){ prior.beta <- approximate.prior.beta(historical, n.intervals, change.points=change.points.new, prior.a0.shape1=prior.a0.shape1, prior.a0.shape2=prior.a0.shape2, nMC=nMC, nBI=nBI) prior_beta_mu=colMeans(prior.beta) prior_beta_sigma=cov(prior.beta) # sigma is standard deviation prior.beta.mvn <- list(list(prior_beta_mu, prior_beta_sigma, 1)) } samples <- phm_random_a0(t1, t2, n.intervals, P, prior.beta.mvn, prior.lambda.hp1, prior.lambda.hp2, lower.limits, upper.limits, slice.widths, nMC, nBI) return(samples) }
/scratch/gouwar.j/cran-all/cranData/BayesPPDSurv/R/phm_random_a0.R
#' PiecewiseBayesSelect #' @param Y1 Vector Containing event times (or censoring time due to death/censoring) #' @param I1 Vector Containing event indicators (1 if l event for a patient, 0 otherwise) #' @param X Matrix of Patient Covariates, the last inc are left out of the selection procedure #' @param hyperparameters List containing 11 hyperparameters and four starting values. In order they are: psi-the swap rate of the SVSS algorithm. #' c-parameter involved in Sigma matrix for selection. z1a, z1b - beta hyper parameters on probability of inclusion for each of the three hazard functions. #' a1,b1- hyperparameters on sigma_lambda. #' clam1- spatial dependency of baseline hazard (between 0 and 1) for the hazard function. #' Alpha1 - The parameter for the number of split points in the hazard (must be whole number). #' J1max - Maximum number of split points allowed (must be whole number). #' J1- Starting number of split points. cl1 -Tuning parameter for log baseline hazard height sampler. #' @param beta1start Starting Values for Beta1 #' @param B Number of iterations #' @param inc Number of variables left out of selection #' @param Path Where to save posterior samples #' @param burn percent of posterior sample to burn in (burn*B must be a whole number) #'@import graphics #'@import stats #'@import mvtnorm #'@import utils #' @examples #' ##Randomly Generate Semicompeting Risks Data #' ####Generates random patient time, indicator and covariates. #' n=100 #' Y1=runif(n,0,100) #' I1=rbinom(n,1,.5) #' library(mvtnorm) #' X=rmvnorm(n,rep(0,13),diag(13)) #' ####Read in Hyperparameters #' ##Swap Rate #' psi=.5 #' c=20 #' ###Eta Beta function probabilities #' z1a=.4 #' z1b=1.6 #' ####Hierarchical lam params #' ###Sigma^2 lambda_ hyperparameters #' a1=.7 #' b1=.7 #' ##Spacing dependence c in [0,1] #' clam1=1 #' #####NumSplit #' alpha1=3 #' J1max=10 #' ####Split Point Starting Value ### #' J1=3 #' ##Tuning parameter for lambda #' cl1=.25 #' ###Beta Starting Values #' beta1start=c(0,0,-1,0,0,0,1,1,1,1,1,-1,-1) #' hyper=c(psi,c,z1a,z1b,a1,b1,clam1,alpha1,J1max,J1,cl1) #' ###Number of iterations and output location #' B=200 #'Path=tempdir() #'inc=2 #'burn=.4 #' PiecewiseBayesSelect(Y1,I1,X,hyper,beta1start,B,inc,Path,burn) #' @export PiecewiseBayesSelect=function(Y1,I1,X,hyperparameters,beta1start,B,inc,Path,burn){ if(inc%%1>0){ cat("inc must be a natural number") }else{ ####Hyperparameters## ##Swap Rate psi=hyperparameters[1] ## c=hyperparameters[2] ###Eta Beta function probabilities z1a=hyperparameters[3] z1b=hyperparameters[4] ####Hierarchical lam params ###Siglam a1=hyperparameters[5] b1=hyperparameters[6] ##Spacing dependence c in [0,1] clam1=hyperparameters[7] ##Lamsampler params #####NumSplit alpha1=hyperparameters[8] J1max=hyperparameters[9] ####Split Points### J1=hyperparameters[10] cl1=hyperparameters[11] p1=ncol(X)-inc n=length(Y1) #####In program ###Make Acceptance Matrices ###Beta/Eta### beta1=matrix(rep(1,B*(p1+inc)),nrow=B) eta1=matrix(rep(1,B*p1),nrow=B) ####Frailty Matrix### ### Mulam1=rep(0,B) Siglam1=rep(1,B) ###Make Eta1Start beta1[1,]=beta1start ## eta1start=rep(1,p1) for(i in 1:p1){ if(beta1start[i]==0){ eta1start[i]=0 } } eta1[1,]=eta1start m1 = max(Y1[I1==1])+.001 ####Acceptance Matrices Acceptlam1=matrix(rep(NA,B*(J1max+1)),nrow=B) accepts1=rep(0,B) Indmix1=rep(0,B) sum1=rep(0,B) split1=rep(0,B) Indcond1=matrix(rep(NA,p1*B),nrow=B) #########################S Matrices!!! #Reset up lam and S1 matrices s1=matrix(rep(NA,B*(J1max+2)),nrow=B) s1[1,1:(J1+2)]=sort(seq(0,m1,length.out = J1+2)) lam1=matrix(rep(NA,B*(J1max+1)),nrow=B) lam1[1,1:(J1+1)]=rep(0,J1+1) ###Acceptance split1=rep(0,B) IndB1=rep(0,B) ###Death IndD1=rep(0,B) Indeta1=rep(0,B) Ind1s=rep(0,B) n=length(Y1) G1=J1+1 ##### LK1L=function(Y1,I1,X,Beta1,s1,lam1){ LOGBH=0 et1=X%*%Beta1 for(k in 1:G1){ Del=pmax(0,pmin(Y1,s1[k+1])-s1[k]) LOGBH=LOGBH-sum(Del*exp(lam1[k])*exp(et1)) zu=Y1<=s1[k+1] zl=Y1>s1[k] LOGBH=LOGBH+sum(zu*zl*I1)*lam1[k] } return(LOGBH) } ###Haz 2 ### ##### LK1=function(Y1,I1,X,Beta1,s1,lam1){ LOGBH=0 et1=X%*%Beta1 for(k in 1:G1){ Del=pmax(0,pmin(Y1,s1[k+1])-s1[k]) LOGBH=LOGBH-sum(Del*exp(lam1[k])*exp(et1)) } LOGBH=LOGBH+sum(I1*et1) return(LOGBH) } ###Haz 2 if(inc>1){ cat("More than One Variable Included", " ") ###Set Up Additional Acceptance Matrix IncCond1=matrix(rep(0,B*inc),nrow=B) iter=c(0,0) ##Sampler for(b in 2:B){ if(b%%10000==0){cat(b, "iterations",date(), " ")}else{ if(b%%5000==0){cat(b, " iterations ")}} U=runif(1,0,1) iter[1]="etabeta1" ###eta1,beta1 eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] if(sum(eta1[b-1,])==0|sum(eta1[b-1,])==p1){ if(sum(eta1[b-1,])==0){ ###Add Automatically iter[2]="Add" Ind=sample(1:p1,1) eta1[b,Ind]=1 includednew=rep(0,p1) for(k in 1:p1){if(eta1[b,k]==1){includednew[k]=k}} includednew=includednew[includednew != 0] spotnew=rep(0,length(includednew)) for(k in 1:length(includednew)){if(Ind==includednew[k]){spotnew[k]=k}} spot2=max(spotnew) ###Make sigma matrices## Sigmanew=c*solve(t(X[,c(includednew,(p1+1):(p1+inc))])%*%X[,c(includednew,(p1+1):(p1+inc))]) #### V1 = Sigmanew[spot2,spot2] V2 = as.matrix(Sigmanew[-spot2,-spot2]) V12 = as.matrix(Sigmanew[spot2,-spot2]) thetab=beta1[b-1,c(includednew, (p1+1):(p1+inc))] thetano = as.matrix(thetab[-spot2]) meannew = t(V12)%*%solve(V2)%*%thetano varnew = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) ################## beta1[b,Ind]=rnorm(1,meannew,varnew) dn=log(dnorm(beta1[b,Ind],meannew,varnew)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo+dn + log(beta(sum(eta1[b,])+z1a,p1-sum(eta1[b,])+z1b)) - log(beta(sum(eta1[b-1,])+z1a,p1-sum(eta1[b-1,])+z1b)) U=log(runif(1,0,1)) if(is.finite(alphab1)==FALSE){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{ if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} } } if(sum(eta1[b-1,])==p1){ ###Delete Automatically Ind=sample(1:p1,1) iter[2]="delete" eta1[b,Ind]=0 beta1[b,Ind]=0 includedold=rep(0,p1) for(k in 1:p1){if(eta1[b-1,k]==1){includedold[k]=k}} includedold=includedold[includedold != 0] spotold=rep(0,length(includedold)) for(k in 1:length(includedold)){if(includedold[k]==Ind){spotold[k]=k}} spot1=max(spotold) ###Make sigma matrices## Sigmaold=c*solve(t(X[,c(includedold,(p1+1):(p1+inc))])%*%X[,c(includedold,(p1+1):(p1+inc))]) ###Old density### V1 = Sigmaold[spot1,spot1] V2 = as.matrix(Sigmaold[-spot1,-spot1]) V12 = as.matrix(Sigmaold[spot1,-spot1]) thetab=beta1[b-1,c(includedold,(p1+1):(p1+inc))] thetano = as.matrix(thetab[-spot1]) meanold = t(V12)%*%solve(V2)%*%thetano varold = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) do=log(dnorm(beta1[b-1,Ind],meanold,varold)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo-do + log(beta(sum(eta1[b,])+z1a,p1-sum(eta1[b,])+z1b)) - log(beta(sum(eta1[b-1,])+z1a,p1-sum(eta1[b-1,])+z1b)) U=log(runif(1,0,1)) if(is.finite(alphab1)==FALSE){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{ if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} }} }else{ U=runif(1,0,1) if(U<psi){ ###Swapper includedold=rep(0,p1) iter[2]="swap" for(k in 1:p1){if(eta1[b-1,k]==1){includedold[k]=k}} includedold=includedold[includedold != 0] ones=includedold zeros=rep(0,p1) for(k in 1:p1){if(eta1[b-1,k]==0){zeros[k]=k}} zeros=zeros[zeros != 0] ###Sample swap indices### if(length(ones)==1){ Indone=ones}else{ Indone=sample(ones,1)} if(length(zeros)==1){Indzero=zeros}else{ Indzero=sample(zeros,1)} ####Change Beta/eta eta1[b,Indone]=0 eta1[b,Indzero]=1 includednew=rep(0,p1) for(k in 1:p1){if(eta1[b,k]==1){includednew[k]=k}} includednew=includednew[includednew != 0] spotold=rep(0,length(includedold)) for(k in 1:length(includedold)){if(Indone==includedold[k]){spotold[k]=k}} spot1=max(spotold) spotnew=rep(0,length(includednew)) for(k in 1:length(includednew)){if(Indzero==includednew[k]){spotnew[k]=k}} spot2=max(spotnew) ###Make sigma matrices## Sigmaold=c*solve(t(X[,c(includedold,(p1+1):(p1+inc))])%*%X[,c(includedold,(p1+1):(p1+inc))]) Sigmanew=c*solve(t(X[,c(includednew,(p1+1):(p1+inc))])%*%X[,c(includednew,(p1+1):(p1+inc))]) ###Generate new vector## beta1[b,Indone]=0 ##meannew,varnew## V1 = Sigmanew[spot2,spot2] V2 = as.matrix(Sigmanew[-spot2,-spot2]) V12 = as.matrix(Sigmanew[spot2,-spot2]) thetab=beta1[b-1,c(includednew,(p1+1):(p1+inc))] thetano = as.matrix(thetab[-spot2]) meannew = t(V12)%*%solve(V2)%*%thetano varnew = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) ################## beta1[b,Indzero]=rnorm(1,meannew,varnew) dn=log(dnorm(beta1[b,Indzero],meannew,varnew)) ###Old density### V1 = Sigmaold[spot1,spot1] V2 = as.matrix(Sigmaold[-spot1,-spot1]) V12 = as.matrix(Sigmaold[spot1,-spot1]) thetab=beta1[b-1,c(includedold,(p1+1):(p1+inc))] thetano = as.matrix(thetab[-spot1]) meanold = t(V12)%*%solve(V2)%*%thetano varold = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) do=log(dnorm(beta1[b-1,Indone],meanold,varold)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo+dn-do U=log(runif(1,0,1)) if(is.finite(alphab1)==FALSE){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{ if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} } }else{ ###Add/Delete Ind=sample(1:p1,1) if(eta1[b-1,Ind]==1){ ##delete## iter[2]="delete" eta1[b,Ind]=0 beta1[b,Ind]=0 includedold=rep(0,p1) for(k in 1:p1){if(eta1[b-1,k]==1){includedold[k]=k}} includedold=includedold[includedold != 0] spotold=rep(0,length(includedold)) for(k in 1:length(includedold)){if(Ind==includedold[k]){spotold[k]=k}} spot1=max(spotold) ###Make sigma matrices## Sigmaold=c*solve(t(X[,c(includedold,(p1+1):(p1+inc))])%*%X[,c(includedold,(p1+1):(p1+inc))]) ###Old density### V1 = Sigmaold[spot1,spot1] V2 = as.matrix(Sigmaold[-spot1,-spot1]) V12 = as.matrix(Sigmaold[spot1,-spot1]) thetab=beta1[b-1,c(includedold,(p1+1):(p1+inc))] thetano = as.matrix(thetab[-spot1]) meanold = t(V12)%*%solve(V2)%*%thetano varold = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) do=log(dnorm(beta1[b-1,Ind],meanold,varold)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo-do + log(beta(sum(eta1[b,])+z1a,p1-sum(eta1[b,])+z1b)) - log(beta(sum(eta1[b-1,])+z1a,p1-sum(eta1[b-1,])+z1b)) U=log(runif(1,0,1)) if(is.finite(alphab1)==FALSE){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{ if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} } }else{ ###Add### eta1[b,Ind]=1 iter[2]="add" includednew=rep(0,p1) for(k in 1:p1){if(eta1[b,k]==1){includednew[k]=k}} includednew=includednew[includednew != 0] spotnew=rep(0,length(includednew)) for(k in 1:length(includednew)){if(Ind==includednew[k]){spotnew[k]=k}} spot2=max(spotnew) ###Make sigma matrices## Sigmanew=c*solve(t(X[,c(includednew,(p1+1):(p1+inc))])%*%X[,c(includednew,(p1+1):(p1+inc))]) #### V1 = Sigmanew[spot2,spot2] V2 = as.matrix(Sigmanew[-spot2,-spot2]) V12 = as.matrix(Sigmanew[spot2,-spot2]) thetab=beta1[b-1,c(includednew,(p1+1):(p1+inc))] thetano = as.matrix(thetab[-spot2]) meannew = t(V12)%*%solve(V2)%*%thetano varnew = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) ################## beta1[b,Ind]=rnorm(1,meannew,varnew) dn=log(dnorm(beta1[b,Ind],meannew,varnew)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo+dn + log(beta(sum(eta1[b,])+z1a,p1-sum(eta1[b,])+z1b)) - log(beta(sum(eta1[b-1,])+z1a,p1-sum(eta1[b-1,])+z1b)) U=log(runif(1,0,1)) if(is.finite(alphab1)==FALSE){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{ if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} } } }} iter[1]="Beta1" iter[2]="Included" if(sum(eta1[b,])==0){ ##Sample Included Sigmanew= c*solve(t(X[,(p1+1):(p1+inc)])%*%X[,(p1+1):(p1+inc)]) zeta1n=beta1[b,(p1+1):(p1+inc)] for(k in 1:inc){ zeta1=zeta1n V1 = Sigmanew[k,k] V2 = as.matrix(Sigmanew[-k,-k]) V12 = as.matrix(Sigmanew[k,-k]) thetano=zeta1[-k] meannew = t(V12)%*%solve(V2)%*%thetano varnew = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) zeta1[k]=rnorm(1,meannew,varnew) dn=log(dnorm(zeta1[k],meannew,varnew)) ###density old do=log(dnorm(zeta1n[k],meannew,varnew)) beta=beta1[b,] beta[(p1+1):(p1+inc)]=zeta1 Likeo=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta,s1[b-1,],lam1[b-1,]) alphab1m=Liken-Likeo+dn -do U=log(runif(1,0,1)) if(is.finite(alphab1m)==FALSE){ IncCond1[b,k]=0 }else{ if(U>alphab1m){ IncCond1[b,k]=0 }else{IncCond1[b,k]=1 beta1[b,]=beta zeta1n=zeta1 }} ##End Inc Sampler } }else{ includednew=rep(0,p1) for(k in 1:p1){if(eta1[b,k]==1){includednew[k]=k}} includednew=includednew[includednew != 0] zeta1n=beta1[b,c(includednew,(p1+1):(p1+inc))] ###Make sigma matrices## Sigmanew=c*solve(t(X[,c(includednew,(p1+1):(p1+inc))])%*%X[,c(includednew,(p1+1):(p1+inc))]) #### p=length(includednew)+inc ####Update All included variables for(k in (length(includednew)+1):(length(includednew)+inc)){ zeta1=zeta1n V1 = Sigmanew[k,k] V2 = as.matrix(Sigmanew[-k,-k]) V12 = as.matrix(Sigmanew[k,-k]) thetano = as.matrix(zeta1[-k]) meannew = t(V12)%*%solve(V2)%*%thetano varnew = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) ################## zeta1[k]=rnorm(1,meannew,varnew) dn=log(dnorm(zeta1[k],meannew,varnew)) ###density old do=log(dnorm(beta1[b,(p1+k-length(includednew))],meannew,varnew)) ######Accept reject### Likeo=LK1(Y1,I1,X,c(beta1[b,1:p1],zeta1n[(length(zeta1n)-inc+1):length(zeta1n)]), s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,c(beta1[b,1:p1],zeta1[(length(zeta1n)-inc+1):length(zeta1n)]), s1[b-1,],lam1[b-1,]) alphab1s=Liken-Likeo+dn -do U=log(runif(1,0,1)) if(is.finite(alphab1s)==FALSE){ IncCond1[b,(k-p1)]=0 }else{ if(U>alphab1s){ IncCond1[b,(k-p1)]=0 }else{IncCond1[b,(k-p1)]=1 zeta1n=zeta1 beta1[b,]=c(beta1[b,1:p1],zeta1[(length(zeta1)-inc+1):length(zeta1)]) } } } ###End included sampler### } #####Conditional Sampler for Included!### if(sum(eta1[b,])>0){ iter[2]="Conditional Inclusion" ##Jointly Update nonzero betas zeta1=beta1[b,] zeta1=zeta1[zeta1!=0] zeta1n=zeta1 Sigmanew=c*solve(t(X[,c(includednew,(p1+1):(p1+inc))])%*%X[,c(includednew,(p1+1):(p1+inc))]) ############### #### for(k in 1:length(includednew)){ V1 = Sigmanew[k,k] V2 = as.matrix(Sigmanew[-k,-k]) V12 = as.matrix(Sigmanew[k,-k]) thetab=beta1[b,c(includednew,(p1+1):(p1+inc))] thetano = as.matrix(thetab[-k]) meannew = t(V12)%*%solve(V2)%*%thetano varnew = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) ################## zeta1n[k]=rnorm(1,meannew,varnew) dn=log(dnorm(zeta1n[k],meannew,varnew)) ###density old do=log(dnorm(zeta1[k],meannew,varnew)) beta=beta1[b,] beta[c(includednew,(p1+1):(p1+inc))]=zeta1n Likeo=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta,s1[b-1,],lam1[b-1,]) alphab1m=Liken-Likeo+dn -do U=log(runif(1,0,1)) if(is.finite(alphab1m)==FALSE){ Indcond1[b,k]=0 }else{ if(U>alphab1m){ Indcond1[b,includednew[k]]=0 zeta1n[k]=zeta1[k] }else{Indcond1[b,includednew[k]]=1 beta1[b,]=beta zeta1[k]=zeta1n[k] }} } ##Jointly Update nonzero betas iter[2]="mixing" zeta1n=beta1[b,] Sigmanew=c*solve(t(X[,c(includednew,(p1+1):(p1+inc))])%*%X[,c(includednew,(p1+1):(p1+inc))]) zeta1n[c(includednew,(p1+1):(p1+inc))]=rmvnorm(1,rep(0,length(includednew)+inc),Sigmanew) beta=beta1[b,] beta=beta[beta!=0] dn=log(dmvnorm(zeta1n[c(includednew,(p1+1):(p1+inc))],rep(0,length(includednew)+inc),Sigmanew)) ###density old do=log(dmvnorm(beta,rep(0,length(includednew)+inc),Sigmanew)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,zeta1n, s1[b-1,],lam1[b-1,]) alphamix1=Liken-Likeo+dn -do U=log(runif(1,0,1)) if(is.finite(alphamix1)==FALSE){ Indmix1[b]=0 }else{ if(U>alphamix1){ Indmix1[b]=0 }else{Indmix1[b]=1 beta1[b,]=zeta1n }} }else{ ##Jointly Update nonzero betas iter[2]="mixing No eta" zeta1n=beta1[b,] Sigmanew=c*solve(t(X[,(p1+1):(p1+inc)])%*%X[,(p1+1):(p1+inc)]) zeta1n[(p1+1):(p1+inc)]=rmvnorm(1,rep(0,inc),Sigmanew) beta=beta1[b,] beta=beta[beta!=0] dn=log(dmvnorm(zeta1n[(p1+1):(p1+inc)],rep(0,inc),Sigmanew)) ###density old do=log(dmvnorm(beta,rep(0,inc),Sigmanew)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,zeta1n,s1[b-1,],lam1[b-1,]) alphamix1=Liken-Likeo+dn -do U=log(runif(1,0,1)) if(is.finite(alphamix1)==FALSE){ Indmix1[b]=0}else{ if(U>alphamix1){ Indmix1[b]=0 }else{Indmix1[b]=1 beta1[b,]=zeta1n }} } S1=s1[b-1,] S1=S1[!is.na(S1)] L1=lam1[b-1,] L1=as.matrix(L1[!is.na(L1)]) ############################################ #####Start LogBH Samplers################### ############################################ ####Lam1#### iter[1]="LogBH1" iter[2]="matrixsetup" W1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) Q1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) length1=rep(0,J1+1) for(j in 1:length(length1)){ length1[j]=s1[b-1,j+1]-s1[b-1,j] } if(J1<2){ if(J1==1){ W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) SigLam1=solve(diag(J1+1)-W1)%*%Q1 }else{ Q1=as.matrix(2/(m1)) SigLam1=Q1 } }else{ for(j in 2:J1){ W1[j,j-1]=(clam1*(length1[j]+length1[j-1]))/(length1[j-1]+2*length1[j]+length1[j+1]) W1[j,j+1]=(clam1*(length1[j]+length1[j+1]))/(length1[j-1]+2*length1[j]+length1[j+1]) Q1[j,j]=2/(length1[j-1]+2*length1[j]+length1[j+1]) } Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) SigLam1=solve(diag(J1+1)-W1)%*%Q1 } iter[2]="Mu" ##Lambda1 Hierarchical Sampler ##Mulam if(J1>0){ Mulam1[b]=rnorm(1,(t(as.matrix(rep(1,J1+1)))%*%solve(SigLam1)%*%L1)/(t(as.matrix(rep(1,J1+1)))%*%solve(SigLam1)%*%as.matrix(rep(1,J1+1))),sqrt(Siglam1[b-1]/(t(as.matrix(rep(1,J1+1)))%*%solve(SigLam1)%*%as.matrix(rep(1,J1+1))))) Siglam1[b]=1/rgamma(1,a1+(J1+1)/2,b1+.5*(t(as.matrix(rep(Mulam1[b],J1+1))-L1)%*%solve(SigLam1)%*%(as.matrix(rep(Mulam1[b],J1+1))-L1))) ##Siglam iter[2]="Sigma" }else{ Mulam1[b]=rnorm(1,lam1[b-1,1],sqrt(Siglam1[b-1])) Siglam1[b]=1/rgamma(1,a1+1/2,b1+.5*(Mulam1[b]-lam1[b-1,1])^2) } #lambda1 iter[2]="lam1" lam1[b,]=lam1[b-1,] ####### for(m in 1:(J1+1)){ lam=lam1[b,] lam=lam[is.na(lam)==FALSE] lambda=lam lam[m]=lambda[m]+runif(1,-cl1,cl1) if(J1==0){ do=log(dnorm(lambda[m],Mulam1[b],sqrt(Siglam1[b]))) dn=log(dnorm(lam[m],Mulam1[b],sqrt(Siglam1[b]))) }else{ #do=-(t(as.matrix(lambda)-as.matrix(rep(Mulam1[b],J1+1)))%*%solve(SigLam1)%*%(as.matrix(lambda)-as.matrix(rep(Mulam1[b],J1+1))))/(2*Siglam1[b]) #dn=-(t(as.matrix(lam)-as.matrix(rep(Mulam1[b],J1+1)))%*%solve(SigLam1)%*%(as.matrix(lam)-as.matrix(rep(Mulam1[b],J1+1))))/(2*Siglam1[b]) do=dmvnorm(lambda,rep(Mulam1[b],J1+1),Siglam1[b]*SigLam1) do=dmvnorm(lam,rep(Mulam1[b],J1+1),Siglam1[b]*SigLam1) } Likeo=LK1L(Y1,I1,X,as.matrix(beta1[b,]),s1[b-1,],lam1[b,]) Liken=LK1L(Y1,I1,X,as.matrix(beta1[b,]),s1[b-1,],lam) U=log(runif(1,0,1)) alphalam=Liken-Likeo+dn-do if(is.nan(alphalam)==TRUE){ lam1[b,m]=lam1[b-1,m] Acceptlam1[b,m]=0 }else{ if(U<alphalam){ Acceptlam1[b,m]=1 lam1[b,m]=lam[m] }else{Acceptlam1[b,m]=0} } } ##################################################### ################################################### iter[1]="Haz1" iter[2]="Birth" ###Random Perturbation### U1=runif(1,0,1) ##### s=s1[b-1,] s=s[!is.na(s)] if(length(s)<J1max){ Birth=runif(1,0,m1) s1[b,1:(J1+3)]=sort(c(s,Birth)) for(k in 2:(J1+2)){ if(Birth>s1[b-1,k-1] & Birth<s1[b-1,k]){ Ind=k-1 } } lam=rep(0,J1+2) if(Ind==1 | Ind==J1+1){ if(Ind==1){ lam[Ind]=lam1[b,Ind] - ((s1[b-1,Ind+1]-Birth)/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[Ind+1]=lam1[b,Ind] + ((Birth-s1[b-1,Ind])/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[(Ind+2):length(lam)]=lam1[b,(Ind+1):(J1+1)] }else{ lam[Ind]=lam1[b,Ind] - ((s1[b-1,Ind+1]-Birth)/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[Ind+1]=lam1[b,Ind] + ((Birth-s1[b-1,Ind])/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[1:(Ind-1)]=lam1[b,1:(Ind-1)] } }else{ lam[Ind]=lam1[b,Ind] - ((s1[b-1,Ind+1]-Birth)/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[Ind+1]=lam1[b,Ind] + ((Birth-s1[b-1,Ind])/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[1:(Ind-1)]=lam1[b,1:(Ind-1)] lam[(Ind+2):length(lam)]=lam1[b,(Ind+1):(J1+1)] } lam=lam[!is.na(lam)] lambda=lam1[b,] lambda=lambda[!is.na(lambda)] Lo=LK1L(Y1,I1,X,as.matrix(beta1[b,]),s1[b-1,],lam1[b,]) if(J1>0){ do=log(dpois(J1,alpha1))+log(dmvnorm(lambda,rep(Mulam1[b],length(lambda)),SigLam1*Siglam1[b])) }else{ do=log(dpois(J1,alpha1))+log(dnorm(lambda,Mulam1[b],Siglam1[b])) } prior=((2*J1+3)*(2*J1+2)*(Birth-s1[b-1,Ind])*(s1[b-1,Ind+1]-Birth))/((m1^2)*(s1[b-1,Ind+1]-s1[b-1,Ind])) G1=G1+1 J1=J1+1 Ln=LK1L(Y1,I1,X,as.matrix(beta1[b,]),s1[b,],lam) ##Make SigLam1 W1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) Q1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) length1=diff(s1[b,]) if(J1<2){ if(J1==1){ W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) SigLam1n=solve(diag(J1+1)-W1)%*%Q1 }else{ SigLam1n=2/m1 } }else{ for(j in 2:J1){ W1[j,j-1]=(clam1*(length1[j]+length1[j-1]))/(length1[j-1]+2*length1[j]+length1[j+1]) W1[j,j+1]=(clam1*(length1[j]+length1[j+1]))/(length1[j-1]+2*length1[j]+length1[j+1]) Q1[j,j]=2/(length1[j-1]+2*length1[j]+length1[j+1]) } Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) SigLam1n=solve(diag(J1+1)-W1)%*%Q1 } dn=log(dpois(J1,alpha1))+log(dmvnorm(lam,rep(Mulam1[b],length(lam)),Siglam1[b]*SigLam1n)) alpha=Ln-Lo+dn-do-log(U1*(1-U1)) + log(prior) if(is.nan(alpha)==TRUE){ IndB1[b]=0 s1[b,]=s1[b-1,] J1=J1-1 G1=G1-1 }else{ U=log(runif(1,0,1)) if(U<alpha){ IndB1[b]=1 lam1[b,1:(J1+1)]=lam }else{ s1[b,]=s1[b-1,] IndB1[b]=0 J1=J1-1 G1=G1-1 } } }else{ s1[b,]=s1[b-1,] IndB1[b]=0 } ######################################################### ###################Death Sampler######################### ########################################################## iter[2]="Death" U1=runif(1,0,1) if(J1==0){ IndD1[b]=0 s1[b,]=s1[b-1,] }else{ if(J1==1){ Ind=2 }else{ Ind=sample(2:(J1+1),1) } s=s1[b,] s=s[-Ind] lam=lam1[b,] lambda=lam[!is.na(lam)] lam=lam[!is.na(lam)] lam=lam[-Ind] lam[Ind-1]=((s1[b,Ind]-s1[b,Ind-1])*lam1[b,Ind-1]+(s1[b,Ind+1]-s1[b,Ind])*lam1[b,Ind])/(s1[b,Ind+1]-s1[b,Ind-1]) ############################################# ####Sets up SigLam1 matrix for old density### ############################################# W1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) Q1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) length1=diff(s1[b,]) if(J1<2){ if(J1==1){ W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) SigLam1=solve(diag(J1+1)-W1)%*%Q1 do=log(dpois(J1,alpha1))+log(dmvnorm(lambda,rep(Mulam1[b],length(lambda)),SigLam1*Siglam1[b])) }else{ do=log(dpois(J1,alpha1))+log(dnorm(lambda,Mulam1[b],Siglam1[b])) } }else{ for(j in 2:J1){ W1[j,j-1]=(clam1*(length1[j]+length1[j-1]))/(length1[j-1]+2*length1[j]+length1[j+1]) W1[j,j+1]=(clam1*(length1[j]+length1[j+1]))/(length1[j-1]+2*length1[j]+length1[j+1]) Q1[j,j]=2/(length1[j-1]+2*length1[j]+length1[j+1]) } Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) SigLam1=solve(diag(J1+1)-W1)%*%Q1 do=log(dpois(J1,alpha1))+log(dmvnorm(lambda,rep(Mulam1[b],length(lambda)),SigLam1*Siglam1[b])) } ############################################# ############################################# Lo=LK1L(Y1,I1,X,as.matrix(beta1[b,]),s1[b,],lam1[b,]) prior=((m1^2)*(s1[b,Ind+1]-s1[b,Ind-1]))/((2*J1+1)*(2*J1)*(s1[b,Ind]-s1[b,Ind-1])*(s1[b,Ind+1]-s1[b,Ind])) G1=G1-1 J1=J1-1 Ln=LK1L(Y1,I1,X,as.matrix(beta1[b,]), s,lam) ###Make siglam matrix W1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) Q1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) length1=rep(0,J1+1) for(j in 1:length(length1)){ length1[j]=s[j+1]-s[j] } if(J1<2){ if(J1==1){ W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) SigLam1n=solve(diag(J1+1)-W1)%*%Q1 dn=log(dpois(J1,alpha1))+log(dmvnorm(lam,rep(Mulam1[b],length(lam)),SigLam1n*Siglam1[b])) }else{ SigLam1n=2/m1 dn=log(dpois(J1,alpha1))+log(dnorm(lam,Mulam1[b],Siglam1[b])) } }else{ for(j in 2:J1){ W1[j,j-1]=(clam1*(length1[j]+length1[j-1]))/(length1[j-1]+2*length1[j]+length1[j+1]) W1[j,j+1]=(clam1*(length1[j]+length1[j+1]))/(length1[j-1]+2*length1[j]+length1[j+1]) Q1[j,j]=2/(length1[j-1]+2*length1[j]+length1[j+1]) } Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) SigLam1n=solve(diag(J1+1)-W1)%*%Q1 dn=log(dpois(J1,alpha1))+log(dmvnorm(lam,rep(Mulam1[b],length(lam)),SigLam1n*Siglam1[b])) } #### alpha=Ln-Lo+dn-do+log(prior)+log(U1*(1-U1)) if(is.nan(alpha)==TRUE){ IndD1[b]=0 J1=J1+1 G1=G1+1 }else{ U=log(runif(1,0,1)) iter[2]="AcceptRejDeath" if(U<alpha){ s1[b,]=c(s,NA) IndD1[b]=1 lam1[b,1:(J1+1)]=lam lam1[b,(J1+2):J1max]=rep(NA,J1max-J1-1) }else{ IndD1[b]=0 J1=J1+1 G1=G1+1 } } ####End else } ## split1[b]=J1 ## sum1[b]=sum(eta1[b,]) } ################End Samplers cat(c,z1a,z1b," ", " ", " ", "Posterior Inclusion Probabilities after half Burnin", " ", "Hazard 1", " ", colMeans(eta1[(B*burn+1):B,])*100, " ", "IndEta",mean(Indeta1[(B*burn+1):B])*100," ","IndMix",mean(Indmix1[(B*burn+1):B])*100," ", "Included Acceptance", " ", "Haz1", " ", colMeans(IncCond1[(B*burn+1):B,])*100, " ", colMeans(Indcond1[(B*burn+1):B,],na.rm=TRUE)*100," ","Survival"," ","IndDeath",mean(IndD1[(B*burn+1):B])*100," ","IndBirth",mean(IndB1[(B*burn+1):B])*100," ","Lambda"," ", "Lam1", colMeans(Acceptlam1[(B*burn+1):B,],na.rm=TRUE)*100) Path1= paste0(Path,"/IncCond1.txt") write.table(IncCond1[(burn*B+1):B,], Path1, sep="\t") } if(inc==1){ cat("One Variable Included") Ind1s=rep(0,B) for(b in 2:B){ if(b%%10000==0){cat(b, "iterations",date(), " ")}else{ if(b%%5000==0){cat(b, " iterations ")}} U=runif(1,0,1) iter[1]="etabeta1" ###eta1,beta1 eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] if(sum(eta1[b-1,])==0|sum(eta1[b-1,])==p1){ if(sum(eta1[b-1,])==0){ ###Add Automatically iter[2]="Add" Ind=sample(1:p1,1) eta1[b,Ind]=1 includednew=rep(0,p1) for(k in 1:p1){if(eta1[b,k]==1){includednew[k]=k}} includednew=includednew[includednew != 0] spotnew=rep(0,length(includednew)) for(k in 1:length(includednew)){if(Ind==includednew[k]){spotnew[k]=k}} spot2=max(spotnew) ###Make sigma matrices## Sigmanew=c*solve(t(X[,c(includednew,(p1+1):(p1+inc))])%*%X[,c(includednew,(p1+1):(p1+inc))]) #### V1 = Sigmanew[spot2,spot2] V2 = as.matrix(Sigmanew[-spot2,-spot2]) V12 = as.matrix(Sigmanew[spot2,-spot2]) thetab=beta1[b-1,c(includednew, (p1+1):(p1+inc))] thetano = as.matrix(thetab[-spot2]) meannew = t(V12)%*%solve(V2)%*%thetano varnew = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) ################## beta1[b,Ind]=rnorm(1,meannew,varnew) dn=log(dnorm(beta1[b,Ind],meannew,varnew)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo+dn + log(beta(sum(eta1[b,])+z1a,p1-sum(eta1[b,])+z1b)) - log(beta(sum(eta1[b-1,])+z1a,p1-sum(eta1[b-1,])+z1b)) U=log(runif(1,0,1)) if(is.finite(alphab1)==FALSE){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{ if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} } } if(sum(eta1[b-1,])==p1){ ###Delete Automatically Ind=sample(1:p1,1) iter[2]="delete" eta1[b,Ind]=0 beta1[b,Ind]=0 includedold=rep(0,p1) for(k in 1:p1){if(eta1[b-1,k]==1){includedold[k]=k}} includedold=includedold[includedold != 0] spotold=rep(0,length(includedold)) for(k in 1:length(includedold)){if(includedold[k]==Ind){spotold[k]=k}} spot1=max(spotold) ###Make sigma matrices## Sigmaold=c*solve(t(X[,c(includedold,(p1+1):(p1+inc))])%*%X[,c(includedold,(p1+1):(p1+inc))]) ###Old density### V1 = Sigmaold[spot1,spot1] V2 = as.matrix(Sigmaold[-spot1,-spot1]) V12 = as.matrix(Sigmaold[spot1,-spot1]) thetab=beta1[b-1,c(includedold,(p1+1):(p1+inc))] thetano = as.matrix(thetab[-spot1]) meanold = t(V12)%*%solve(V2)%*%thetano varold = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) do=log(dnorm(beta1[b-1,Ind],meanold,varold)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo-do + log(beta(sum(eta1[b,])+z1a,p1-sum(eta1[b,])+z1b)) - log(beta(sum(eta1[b-1,])+z1a,p1-sum(eta1[b-1,])+z1b)) U=log(runif(1,0,1)) if(is.finite(alphab1)==FALSE){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{ if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} }} }else{ U=runif(1,0,1) if(U<psi){ ###Swapper includedold=rep(0,p1) iter[2]="swap" for(k in 1:p1){if(eta1[b-1,k]==1){includedold[k]=k}} includedold=includedold[includedold != 0] ones=includedold zeros=rep(0,p1) for(k in 1:p1){if(eta1[b-1,k]==0){zeros[k]=k}} zeros=zeros[zeros != 0] ###Sample swap indices### if(length(ones)==1){ Indone=ones}else{ Indone=sample(ones,1)} if(length(zeros)==1){Indzero=zeros}else{ Indzero=sample(zeros,1)} ####Change Beta/eta eta1[b,Indone]=0 eta1[b,Indzero]=1 includednew=rep(0,p1) for(k in 1:p1){if(eta1[b,k]==1){includednew[k]=k}} includednew=includednew[includednew != 0] spotold=rep(0,length(includedold)) for(k in 1:length(includedold)){if(Indone==includedold[k]){spotold[k]=k}} spot1=max(spotold) spotnew=rep(0,length(includednew)) for(k in 1:length(includednew)){if(Indzero==includednew[k]){spotnew[k]=k}} spot2=max(spotnew) ###Make sigma matrices## Sigmaold=c*solve(t(X[,c(includedold,(p1+1):(p1+inc))])%*%X[,c(includedold,(p1+1):(p1+inc))]) Sigmanew=c*solve(t(X[,c(includednew,(p1+1):(p1+inc))])%*%X[,c(includednew,(p1+1):(p1+inc))]) ###Generate new vector## beta1[b,Indone]=0 ##meannew,varnew## V1 = Sigmanew[spot2,spot2] V2 = as.matrix(Sigmanew[-spot2,-spot2]) V12 = as.matrix(Sigmanew[spot2,-spot2]) thetab=beta1[b-1,c(includednew,(p1+1):(p1+inc))] thetano = as.matrix(thetab[-spot2]) meannew = t(V12)%*%solve(V2)%*%thetano varnew = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) ################## beta1[b,Indzero]=rnorm(1,meannew,varnew) dn=log(dnorm(beta1[b,Indzero],meannew,varnew)) ###Old density### V1 = Sigmaold[spot1,spot1] V2 = as.matrix(Sigmaold[-spot1,-spot1]) V12 = as.matrix(Sigmaold[spot1,-spot1]) thetab=beta1[b-1,c(includedold,(p1+1):(p1+inc))] thetano = as.matrix(thetab[-spot1]) meanold = t(V12)%*%solve(V2)%*%thetano varold = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) do=log(dnorm(beta1[b-1,Indone],meanold,varold)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo+dn-do U=log(runif(1,0,1)) if(is.finite(alphab1)==FALSE){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{ if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} } }else{ ###Add/Delete Ind=sample(1:p1,1) if(eta1[b-1,Ind]==1){ ##delete## iter[2]="delete" eta1[b,Ind]=0 beta1[b,Ind]=0 includedold=rep(0,p1) for(k in 1:p1){if(eta1[b-1,k]==1){includedold[k]=k}} includedold=includedold[includedold != 0] spotold=rep(0,length(includedold)) for(k in 1:length(includedold)){if(Ind==includedold[k]){spotold[k]=k}} spot1=max(spotold) ###Make sigma matrices## Sigmaold=c*solve(t(X[,c(includedold,(p1+1):(p1+inc))])%*%X[,c(includedold,(p1+1):(p1+inc))]) ###Old density### V1 = Sigmaold[spot1,spot1] V2 = as.matrix(Sigmaold[-spot1,-spot1]) V12 = as.matrix(Sigmaold[spot1,-spot1]) thetab=beta1[b-1,c(includedold,(p1+1):(p1+inc))] thetano = as.matrix(thetab[-spot1]) meanold = t(V12)%*%solve(V2)%*%thetano varold = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) do=log(dnorm(beta1[b-1,Ind],meanold,varold)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo-do + log(beta(sum(eta1[b,])+z1a,p1-sum(eta1[b,])+z1b)) - log(beta(sum(eta1[b-1,])+z1a,p1-sum(eta1[b-1,])+z1b)) U=log(runif(1,0,1)) if(is.finite(alphab1)==FALSE){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{ if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} } }else{ ###Add### eta1[b,Ind]=1 iter[2]="add" includednew=rep(0,p1) for(k in 1:p1){if(eta1[b,k]==1){includednew[k]=k}} includednew=includednew[includednew != 0] spotnew=rep(0,length(includednew)) for(k in 1:length(includednew)){if(Ind==includednew[k]){spotnew[k]=k}} spot2=max(spotnew) ###Make sigma matrices## Sigmanew=c*solve(t(X[,c(includednew,(p1+1):(p1+inc))])%*%X[,c(includednew,(p1+1):(p1+inc))]) #### V1 = Sigmanew[spot2,spot2] V2 = as.matrix(Sigmanew[-spot2,-spot2]) V12 = as.matrix(Sigmanew[spot2,-spot2]) thetab=beta1[b-1,c(includednew,(p1+1):(p1+inc))] thetano = as.matrix(thetab[-spot2]) meannew = t(V12)%*%solve(V2)%*%thetano varnew = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) ################## beta1[b,Ind]=rnorm(1,meannew,varnew) dn=log(dnorm(beta1[b,Ind],meannew,varnew)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo+dn + log(beta(sum(eta1[b,])+z1a,p1-sum(eta1[b,])+z1b)) - log(beta(sum(eta1[b-1,])+z1a,p1-sum(eta1[b-1,])+z1b)) U=log(runif(1,0,1)) if(is.finite(alphab1)==FALSE){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{ if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} } } }} ###End SVSS ###INCLUDED SAMPLERS iter[1]="Beta1" iter[2]="Included" if(sum(eta1[b,])==0){ ##Sample Included Sigmanew= c*solve(t(X[,(p1+1):(p1+inc)])%*%X[,(p1+1):(p1+inc)]) zeta1n=beta1[b,(p1+1):(p1+inc)] meannew=0 varnew = sqrt(Sigmanew) zeta1=rnorm(1,meannew,varnew) dn=log(dnorm(zeta1,meannew,varnew)) ###density old do=log(dnorm(zeta1n,meannew,varnew)) beta=beta1[b,] beta[(p1+1):(p1+inc)]=zeta1 Likeo=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta,s1[b-1,],lam1[b-1,]) alphab1m=Liken-Likeo+dn -do U=log(runif(1,0,1)) if(is.finite(alphab1m)==FALSE){ Ind1s[b]=0 }else{ if(U>alphab1m){ Ind1s[b]=0 }else{Ind1s[b]=1 beta1[b,]=beta zeta1n=zeta1 }} ##End Inc Sampler }else{ includednew=rep(0,p1) for(k in 1:p1){if(eta1[b,k]==1){includednew[k]=k}} includednew=includednew[includednew != 0] zeta1n=beta1[b,c(includednew,(p1+1):(p1+inc))] ###Make sigma matrices## Sigmanew=c*solve(t(X[,c(includednew,(p1+1):(p1+inc))])%*%X[,c(includednew,(p1+1):(p1+inc))]) #### p=length(includednew)+inc ####Update All included variables for(k in (length(includednew)+1):(length(includednew)+inc)){ zeta1=zeta1n V1 = Sigmanew[k,k] V2 = as.matrix(Sigmanew[-k,-k]) V12 = as.matrix(Sigmanew[k,-k]) thetano = as.matrix(zeta1[-k]) meannew = t(V12)%*%solve(V2)%*%thetano varnew = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) ################## zeta1[k]=rnorm(1,meannew,varnew) dn=log(dnorm(zeta1[k],meannew,varnew)) ###density old do=log(dnorm(beta1[b,(p1+k-length(includednew))],meannew,varnew)) ######Accept reject### Likeo=LK1(Y1,I1,X,c(beta1[b,1:p1],zeta1n[(length(zeta1n)-inc+1):length(zeta1n)]),s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,c(beta1[b,1:p1],zeta1[(length(zeta1n)-inc+1):length(zeta1n)]),s1[b-1,],lam1[b-1,]) alphab1s=Liken-Likeo+dn -do U=log(runif(1,0,1)) if(is.finite(alphab1s)==FALSE){ Ins1s[b]=0 }else{ if(U>alphab1s){ Ind1s[b]=0 }else{Ind1s[b]=1 zeta1n=zeta1 beta1[b,]=c(beta1[b,1:p1],zeta1[(length(zeta1)-inc+1):length(zeta1)]) } } } ###End included sampler### } #####Conditional Sampler for Included!### if(sum(eta1[b,])>0){ iter[2]="Conditional Inclusion" ##Jointly Update nonzero betas zeta1=beta1[b,] zeta1=zeta1[zeta1!=0] zeta1n=zeta1 Sigmanew=c*solve(t(X[,c(includednew,(p1+1):(p1+inc))])%*%X[,c(includednew,(p1+1):(p1+inc))]) ############### #### for(k in 1:length(includednew)){ V1 = Sigmanew[k,k] V2 = as.matrix(Sigmanew[-k,-k]) V12 = as.matrix(Sigmanew[k,-k]) thetab=beta1[b,c(includednew,(p1+1):(p1+inc))] thetano = as.matrix(thetab[-k]) meannew = t(V12)%*%solve(V2)%*%thetano varnew = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) ################## zeta1n[k]=rnorm(1,meannew,varnew) dn=log(dnorm(zeta1n[k],meannew,varnew)) ###density old do=log(dnorm(zeta1[k],meannew,varnew)) beta=beta1[b,] beta[c(includednew,(p1+1):(p1+inc))]=zeta1n Likeo=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta,s1[b-1,],lam1[b-1,]) alphab1m=Liken-Likeo+dn -do U=log(runif(1,0,1)) if(is.finite(alphab1m)==FALSE){ Indcond1[b,k]=0 }else{ if(U>alphab1m){ Indcond1[b,includednew[k]]=0 zeta1n[k]=zeta1[k] }else{Indcond1[b,includednew[k]]=1 beta1[b,]=beta zeta1[k]=zeta1n[k] }} } ##Jointly Update nonzero betas iter[2]="mixing" zeta1n=beta1[b,] Sigmanew=c*solve(t(X[,c(includednew,(p1+1):(p1+inc))])%*%X[,c(includednew,(p1+1):(p1+inc))]) zeta1n[c(includednew,(p1+1):(p1+inc))]=rmvnorm(1,rep(0,length(includednew)+inc),Sigmanew) beta=beta1[b,] beta=beta[beta!=0] dn=log(dmvnorm(zeta1n[c(includednew,(p1+1):(p1+inc))],rep(0,length(includednew)+inc),Sigmanew)) ###density old do=log(dmvnorm(beta,rep(0,length(includednew)+inc),Sigmanew)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b,], s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,zeta1n,s1[b-1,],lam1[b-1,]) alphamix1=Liken-Likeo+dn -do U=log(runif(1,0,1)) if(is.finite(alphamix1)==FALSE){ Indmix1[b]=0 }else{ if(U>alphamix1){ Indmix1[b]=0 }else{Indmix1[b]=1 beta1[b,]=zeta1n }} } S1=s1[b-1,] S1=S1[!is.na(S1)] L1=lam1[b-1,] L1=as.matrix(L1[!is.na(L1)]) iter[1]="LogBH1" iter[2]="matrixsetup" W1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) Q1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) length1=rep(0,J1+1) for(j in 1:length(length1)){ length1[j]=s1[b-1,j+1]-s1[b-1,j] } if(J1<2){ if(J1==1){ W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) SigLam1=solve(diag(J1+1)-W1)%*%Q1 }else{ Q1=as.matrix(2/(m1)) SigLam1=Q1 } }else{ for(j in 2:J1){ W1[j,j-1]=(clam1*(length1[j]+length1[j-1]))/(length1[j-1]+2*length1[j]+length1[j+1]) W1[j,j+1]=(clam1*(length1[j]+length1[j+1]))/(length1[j-1]+2*length1[j]+length1[j+1]) Q1[j,j]=2/(length1[j-1]+2*length1[j]+length1[j+1]) } Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) SigLam1=solve(diag(J1+1)-W1)%*%Q1 } iter[2]="Mu" ##Lambda1 Hierarchical Sampler ##Mulam if(J1>0){ Mulam1[b]=rnorm(1,(t(as.matrix(rep(1,J1+1)))%*%solve(SigLam1)%*%L1)/(t(as.matrix(rep(1,J1+1)))%*%solve(SigLam1)%*%as.matrix(rep(1,J1+1))),sqrt(Siglam1[b-1]/(t(as.matrix(rep(1,J1+1)))%*%solve(SigLam1)%*%as.matrix(rep(1,J1+1))))) Siglam1[b]=1/rgamma(1,a1+(J1+1)/2,b1+.5*(t(as.matrix(rep(Mulam1[b],J1+1))-L1)%*%solve(SigLam1)%*%(as.matrix(rep(Mulam1[b],J1+1))-L1))) ##Siglam iter[2]="Sigma" }else{ Mulam1[b]=rnorm(1,lam1[b-1,1],sqrt(Siglam1[b-1])) Siglam1[b]=1/rgamma(1,a1+1/2,b1+.5*(Mulam1[b]-lam1[b-1,1])^2) } #if(is.finite(Mulam1[b])==FALSE){stop("Adjust Hierarchical Hyper-Parameters")} #if(is.finite(Siglam1[b])==FALSE){stop("Adjust Hierarchical Hyper-Parameters")} #lambda1 iter[2]="lam1" lam1[b,]=lam1[b-1,] ####### for(m in 1:(J1+1)){ lam=lam1[b,] lam=lam[is.na(lam)==FALSE] lambda=lam lam[m]=lambda[m]+runif(1,-cl1,cl1) if(J1==0){ do=log(dnorm(lambda[m],Mulam1[b],sqrt(Siglam1[b]))) dn=log(dnorm(lam[m],Mulam1[b],sqrt(Siglam1[b]))) }else{ #do=-(t(as.matrix(lambda)-as.matrix(rep(Mulam1[b],J1+1)))%*%solve(SigLam1)%*%(as.matrix(lambda)-as.matrix(rep(Mulam1[b],J1+1))))/(2*Siglam1[b]) #dn=-(t(as.matrix(lam)-as.matrix(rep(Mulam1[b],J1+1)))%*%solve(SigLam1)%*%(as.matrix(lam)-as.matrix(rep(Mulam1[b],J1+1))))/(2*Siglam1[b]) do=dmvnorm(lambda,rep(Mulam1[b],J1+1),Siglam1[b]*SigLam1) do=dmvnorm(lam,rep(Mulam1[b],J1+1),Siglam1[b]*SigLam1) } Likeo=LK1L(Y1,I1,X,as.matrix(beta1[b,]),s1[b-1,],lam1[b,]) Liken=LK1L(Y1,I1,X,as.matrix(beta1[b,]),s1[b-1,],lam) U=log(runif(1,0,1)) alphalam=Liken-Likeo+dn-do if(is.nan(alphalam)==TRUE){ lam1[b,m]=lam1[b-1,m] Acceptlam1[b,m]=0 }else{ if(U<alphalam){ Acceptlam1[b,m]=1 lam1[b,m]=lam[m] }else{Acceptlam1[b,m]=0} } } ##################################################### ################################################### iter[1]="Haz1" iter[2]="Birth" ###Random Perturbation### U1=runif(1,0,1) ##### s=s1[b-1,] s=s[!is.na(s)] if(length(s)<J1max){ Birth=runif(1,0,m1) s1[b,1:(J1+3)]=sort(c(s,Birth)) for(k in 2:(J1+2)){ if(Birth>s1[b-1,k-1] & Birth<s1[b-1,k]){ Ind=k-1 } } lam=rep(0,J1+2) if(Ind==1 | Ind==J1+1){ if(Ind==1){ lam[Ind]=lam1[b,Ind] - ((s1[b-1,Ind+1]-Birth)/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[Ind+1]=lam1[b,Ind] + ((Birth-s1[b-1,Ind])/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[(Ind+2):length(lam)]=lam1[b,(Ind+1):(J1+1)] }else{ lam[Ind]=lam1[b,Ind] - ((s1[b-1,Ind+1]-Birth)/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[Ind+1]=lam1[b,Ind] + ((Birth-s1[b-1,Ind])/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[1:(Ind-1)]=lam1[b,1:(Ind-1)] } }else{ lam[Ind]=lam1[b,Ind] - ((s1[b-1,Ind+1]-Birth)/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[Ind+1]=lam1[b,Ind] + ((Birth-s1[b-1,Ind])/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[1:(Ind-1)]=lam1[b,1:(Ind-1)] lam[(Ind+2):length(lam)]=lam1[b,(Ind+1):(J1+1)] } lam=lam[!is.na(lam)] lambda=lam1[b,] lambda=lambda[!is.na(lambda)] Lo=LK1L(Y1,I1,X,as.matrix(beta1[b,]),s1[b-1,],lam1[b,]) if(J1>0){ do=log(dpois(J1,alpha1))+log(dmvnorm(lambda,rep(Mulam1[b],length(lambda)),SigLam1*Siglam1[b])) }else{ do=log(dpois(J1,alpha1))+log(dnorm(lambda,Mulam1[b],Siglam1[b])) } prior=((2*J1+3)*(2*J1+2)*(Birth-s1[b-1,Ind])*(s1[b-1,Ind+1]-Birth))/((m1^2)*(s1[b-1,Ind+1]-s1[b-1,Ind])) G1=G1+1 J1=J1+1 Ln=LK1L(Y1,I1,X,as.matrix(beta1[b,]),s1[b,],lam) ##Make SigLam1 W1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) Q1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) length1=diff(s1[b,]) if(J1<2){ if(J1==1){ W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) SigLam1n=solve(diag(J1+1)-W1)%*%Q1 }else{ SigLam1n=2/m1 } }else{ for(j in 2:J1){ W1[j,j-1]=(clam1*(length1[j]+length1[j-1]))/(length1[j-1]+2*length1[j]+length1[j+1]) W1[j,j+1]=(clam1*(length1[j]+length1[j+1]))/(length1[j-1]+2*length1[j]+length1[j+1]) Q1[j,j]=2/(length1[j-1]+2*length1[j]+length1[j+1]) } Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) SigLam1n=solve(diag(J1+1)-W1)%*%Q1 } dn=log(dpois(J1,alpha1))+log(dmvnorm(lam,rep(Mulam1[b],length(lam)),Siglam1[b]*SigLam1n)) alpha=Ln-Lo+dn-do-log(U1*(1-U1)) + log(prior) if(is.nan(alpha)==TRUE){ IndB1[b]=0 s1[b,]=s1[b-1,] J1=J1-1 G1=G1-1 }else{ U=log(runif(1,0,1)) if(U<alpha){ IndB1[b]=1 lam1[b,1:(J1+1)]=lam }else{ s1[b,]=s1[b-1,] IndB1[b]=0 J1=J1-1 G1=G1-1 } } }else{ s1[b,]=s1[b-1,] IndB1[b]=0 } ######################################################### ###################Death Sampler######################### ########################################################## iter[2]="Death" U1=runif(1,0,1) if(J1==0){ IndD1[b]=0 s1[b,]=s1[b-1,] }else{ if(J1==1){ Ind=2 }else{ Ind=sample(2:(J1+1),1) } s=s1[b,] s=s[-Ind] lam=lam1[b,] lambda=lam[!is.na(lam)] lam=lam[!is.na(lam)] lam=lam[-Ind] lam[Ind-1]=((s1[b,Ind]-s1[b,Ind-1])*lam1[b,Ind-1]+(s1[b,Ind+1]-s1[b,Ind])*lam1[b,Ind])/(s1[b,Ind+1]-s1[b,Ind-1]) ############################################# ####Sets up SigLam1 matrix for old density### ############################################# W1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) Q1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) length1=diff(s1[b,]) if(J1<2){ if(J1==1){ W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) SigLam1=solve(diag(J1+1)-W1)%*%Q1 do=log(dpois(J1,alpha1))+log(dmvnorm(lambda,rep(Mulam1[b],length(lambda)),SigLam1*Siglam1[b])) }else{ do=log(dpois(J1,alpha1))+log(dnorm(lambda,Mulam1[b],Siglam1[b])) } }else{ for(j in 2:J1){ W1[j,j-1]=(clam1*(length1[j]+length1[j-1]))/(length1[j-1]+2*length1[j]+length1[j+1]) W1[j,j+1]=(clam1*(length1[j]+length1[j+1]))/(length1[j-1]+2*length1[j]+length1[j+1]) Q1[j,j]=2/(length1[j-1]+2*length1[j]+length1[j+1]) } Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) SigLam1=solve(diag(J1+1)-W1)%*%Q1 do=log(dpois(J1,alpha1))+log(dmvnorm(lambda,rep(Mulam1[b],length(lambda)),SigLam1*Siglam1[b])) } ############################################# ############################################# Lo=LK1L(Y1,I1,X,as.matrix(beta1[b,]),s1[b,],lam1[b,]) prior=((m1^2)*(s1[b,Ind+1]-s1[b,Ind-1]))/((2*J1+1)*(2*J1)*(s1[b,Ind]-s1[b,Ind-1])*(s1[b,Ind+1]-s1[b,Ind])) G1=G1-1 J1=J1-1 Ln=LK1L(Y1,I1,X,as.matrix(beta1[b,]), s,lam) ###Make siglam matrix W1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) Q1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) length1=rep(0,J1+1) for(j in 1:length(length1)){ length1[j]=s[j+1]-s[j] } if(J1<2){ if(J1==1){ W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) SigLam1n=solve(diag(J1+1)-W1)%*%Q1 dn=log(dpois(J1,alpha1))+log(dmvnorm(lam,rep(Mulam1[b],length(lam)),SigLam1n*Siglam1[b])) }else{ SigLam1n=2/m1 dn=log(dpois(J1,alpha1))+log(dnorm(lam,Mulam1[b],Siglam1[b])) } }else{ for(j in 2:J1){ W1[j,j-1]=(clam1*(length1[j]+length1[j-1]))/(length1[j-1]+2*length1[j]+length1[j+1]) W1[j,j+1]=(clam1*(length1[j]+length1[j+1]))/(length1[j-1]+2*length1[j]+length1[j+1]) Q1[j,j]=2/(length1[j-1]+2*length1[j]+length1[j+1]) } Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) SigLam1n=solve(diag(J1+1)-W1)%*%Q1 dn=log(dpois(J1,alpha1))+log(dmvnorm(lam,rep(Mulam1[b],length(lam)),SigLam1n*Siglam1[b])) } #### alpha=Ln-Lo+dn-do+log(prior)+log(U1*(1-U1)) if(is.nan(alpha)==TRUE){ IndD1[b]=0 J1=J1+1 G1=G1+1 }else{ U=log(runif(1,0,1)) iter[2]="AcceptRejDeath" if(U<alpha){ s1[b,]=c(s,NA) IndD1[b]=1 lam1[b,1:(J1+1)]=lam lam1[b,(J1+2):J1max]=rep(NA,J1max-J1-1) }else{ IndD1[b]=0 J1=J1+1 G1=G1+1 } } ####End else } ## split1[b]=J1 sum1[b]=sum(eta1[b,]) } ################End Samplers cat(c,z1a,z1b," ", " ", " ", "Posterior Inclusion Probabilities after half Burnin", " ", "Hazard 1", " ", colMeans(eta1[(B*burn+1):B,])*100, " ", "IndEta",mean(Indeta1[(B*burn+1):B])*100," ","IndMix",mean(Indmix1[(B*burn+1):B])*100," ", "Included Acceptance", " ", "Haz1", " ", mean(Ind1s[(B*burn+1):B])*100, " ", colMeans(Indcond1[(B*burn+1):B,],na.rm=TRUE)*100," ","Survival"," ","IndDeath",mean(IndD1[(B*burn+1):B])*100," ","IndBirth",mean(IndB1[(B*burn+1):B])*100," ","Lambda"," ", "Lam1", colMeans(Acceptlam1[(B*burn+1):B,],na.rm=TRUE)*100) Path1= paste0(Path,"/Ind1s.txt") write.table(Ind1s[(burn*B+1):B], Path1, sep="\t") par(mfrow=c(2,1)) plot(1:B,sum1,type="l",xlab="",ylab="Haz: # Included", main="Traceplot: # Included") plot(1:B,split1,type="l",xlab="",ylab="Haz: Split #", main="Traceplot: # Split points") } ###If 0 inc if(inc==0){ cat("No Variables Included") for(b in 2:B){ if(b%%10000==0){cat(b, "iterations",date(), " ")}else{ if(b%%5000==0){cat(b, " iterations ")}} ###eta1,beta1 eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] if(sum(eta1[b-1,])==0|sum(eta1[b-1,])==p1){ if(sum(eta1[b-1,])==0){ ###Add Automatically Ind=sample(1:p1,1) eta1[b,Ind]=1 includednew=rep(0,p1) for(k in 1:p1){if(eta1[b,k]==1){includednew[k]=k}} includednew=includednew[includednew != 0] spotnew=rep(0,length(includednew)) for(k in 1:length(includednew)){if(Ind==includednew[k]){spotnew[k]=k}} spot2=max(spotnew) ###Make sigma matrices## Sigmanew=c*solve(t(X[,includednew])%*%X[,includednew]) #### meannew = 0 varnew = sqrt(Sigmanew) ################## beta1[b,Ind]=rnorm(1,meannew,varnew) dn=log(dnorm(beta1[b,Ind],meannew,varnew)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo+dn + log(beta(sum(eta1[b,])+z1a,p1-sum(eta1[b,])+z1b)) - log(beta(sum(eta1[b-1,])+z1a,p1-sum(eta1[b-1,])+z1b)) U=log(runif(1,0,1)) if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} } if(sum(eta1[b-1,])==p1){ ###Delete Automatically Ind=sample(1:p1,1) eta1[b,Ind]=0 beta1[b,Ind]=0 includedold=rep(0,p1) for(k in 1:p1){if(eta1[b-1,k]==1){includedold[k]=k}} includedold=includedold[includedold != 0] spotold=rep(0,length(includedold)) for(k in 1:length(includedold)){if(includedold[k]==Ind){spotold[k]=k}} spot1=max(spotold) ###Make sigma matrices## Sigmaold=c*solve(t(X[,includedold])%*%X[,includedold]) ###Old density### V1 = Sigmaold[spot1,spot1] V2 = as.matrix(Sigmaold[-spot1,-spot1]) V12 = as.matrix(Sigmaold[spot1,-spot1]) thetab=beta1[b-1,includedold] thetano = as.matrix(thetab[-spot1]) meanold = t(V12)%*%solve(V2)%*%thetano varold = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) do=log(dnorm(beta1[b-1,Ind],meanold,varold)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo-do + log(beta(sum(eta1[b,])+z1a,p1-sum(eta1[b,])+z1b)) - log(beta(sum(eta1[b-1,])+z1a,p1-sum(eta1[b-1,])+z1b)) U=log(runif(1,0,1)) if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} } }else{ U=runif(1,0,1) if(U<psi){ if(sum(eta1[b-1,])==1){ includedold=rep(0,p1) for(k in 1:p1){if(eta1[b-1,k]==1){includedold[k]=k}} includedold=includedold[includedold != 0] ones=includedold zeros=rep(0,p1) for(k in 1:p1){if(eta1[b-1,k]==0){zeros[k]=k}} zeros=zeros[zeros != 0] ###Sample swap indices### Indone=ones Indzero=sample(zeros,1) ####Change Beta/eta eta1[b,Indone]=0 eta1[b,Indzero]=1 beta1[b,Indone]=0 ## Sigmaold=c*solve(t(X[,Indone])%*%X[,Indone]) Sigmanew=c*solve(t(X[,Indzero])%*%X[,Indzero]) meannew = 0 varnew = sqrt(Sigmanew) ################## beta1[b,Indzero]=rnorm(1,meannew,varnew) dn=log(dnorm(beta1[b,Indzero],meannew,varnew)) ###Old density### meanold = 0 varold = sqrt(Sigmaold) do=log(dnorm(beta1[b-1,Indone],meanold,varold)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo+dn-do U=log(runif(1,0,1)) if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} }else{ ###Swapper includedold=rep(0,p1) for(k in 1:p1){if(eta1[b-1,k]==1){includedold[k]=k}} includedold=includedold[includedold != 0] ones=includedold zeros=rep(0,p1) for(k in 1:p1){if(eta1[b-1,k]==0){zeros[k]=k}} zeros=zeros[zeros != 0] ###Sample swap indices### Indone=sample(ones,1) Indzero=sample(zeros,1) ####Change Beta/eta eta1[b,Indone]=0 eta1[b,Indzero]=1 includednew=rep(0,p1) for(k in 1:p1){if(eta1[b,k]==1){includednew[k]=k}} includednew=includednew[includednew != 0] spotold=rep(0,length(includedold)) for(k in 1:length(includedold)){if(Indone==includedold[k]){spotold[k]=k}} spot1=max(spotold) spotnew=rep(0,length(includednew)) for(k in 1:length(includednew)){if(Indzero==includednew[k]){spotnew[k]=k}} spot2=max(spotnew) ###Make sigma matrices## Sigmaold=c*solve(t(X[,includedold])%*%X[,includedold]) Sigmanew=c*solve(t(X[,includednew])%*%X[,includednew]) ###Generate new vector## beta1[b,Indone]=0 ##meannew,varnew## V1 = Sigmanew[spot2,spot2] V2 = as.matrix(Sigmanew[-spot2,-spot2]) V12 = as.matrix(Sigmanew[spot2,-spot2]) thetab=beta1[b-1,includednew] thetano = as.matrix(thetab[-spot2]) meannew = t(V12)%*%solve(V2)%*%thetano varnew = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) ################## beta1[b,Indzero]=rnorm(1,meannew,varnew) dn=log(dnorm(beta1[b,Indzero],meannew,varnew)) ###Old density### V1 = Sigmaold[spot1,spot1] V2 = as.matrix(Sigmaold[-spot1,-spot1]) V12 = as.matrix(Sigmaold[spot1,-spot1]) thetab=beta1[b-1,includedold] thetano = as.matrix(thetab[-spot1]) meanold = t(V12)%*%solve(V2)%*%thetano varold = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) do=log(dnorm(beta1[b-1,Indone],meanold,varold)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo+dn-do U=log(runif(1,0,1)) if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} } }else{ ###Add/Delete Ind=sample(1:p1,1) if(eta1[b-1,Ind]==1){ ##delete## if(sum(eta1[b-1,])==1){ eta1[b,Ind]=0 beta1[b,Ind]=0 includedold=rep(0,p1) for(k in 1:p1){if(eta1[b-1,k]==1){includedold[k]=k}} includedold=includedold[includedold != 0] spotold=rep(0,length(includedold)) for(k in 1:length(includedold)){if(Ind==includedold[k]){spotold[k]=k}} spot1=max(spotold) ###Make sigma matrices## Sigmaold=c*solve(t(X[,includedold])%*%X[,includedold]) ###Old density### V1 = Sigmaold[spot1,spot1] V2 = as.matrix(Sigmaold[-spot1,-spot1]) V12 = as.matrix(Sigmaold[spot1,-spot1]) thetab=beta1[b-1,includedold] thetano = as.matrix(thetab[-spot1]) meanold = 0 varold = sqrt(Sigmaold) do=log(dnorm(beta1[b-1,Ind],meanold,varold)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo-do + log(beta(sum(eta1[b,])+z1a,p1-sum(eta1[b,])+z1b)) - log(beta(sum(eta1[b-1,])+z1a,p1-sum(eta1[b-1,])+z1b)) U=log(runif(1,0,1)) if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} }else{ eta1[b,Ind]=0 beta1[b,Ind]=0 includedold=rep(0,p1) for(k in 1:p1){if(eta1[b-1,k]==1){includedold[k]=k}} includedold=includedold[includedold != 0] spotold=rep(0,length(includedold)) for(k in 1:length(includedold)){if(Ind==includedold[k]){spotold[k]=k}} spot1=max(spotold) ###Make sigma matrices## Sigmaold=c*solve(t(X[,includedold])%*%X[,includedold]) ###Old density### V1 = Sigmaold[spot1,spot1] V2 = as.matrix(Sigmaold[-spot1,-spot1]) V12 = as.matrix(Sigmaold[spot1,-spot1]) thetab=beta1[b-1,includedold] thetano = as.matrix(thetab[-spot1]) meanold = t(V12)%*%solve(V2)%*%thetano varold = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) do=log(dnorm(beta1[b-1,Ind],meanold,varold)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo-do + log(beta(sum(eta1[b,])+z1a,p1-sum(eta1[b,])+z1b)) - log(beta(sum(eta1[b-1,])+z1a,p1-sum(eta1[b-1,])+z1b)) U=log(runif(1,0,1)) if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} } }else{ ###Add### eta1[b,Ind]=1 includednew=rep(0,p1) for(k in 1:p1){if(eta1[b,k]==1){includednew[k]=k}} includednew=includednew[includednew != 0] spotnew=rep(0,length(includednew)) for(k in 1:length(includednew)){if(Ind==includednew[k]){spotnew[k]=k}} spot2=max(spotnew) ###Make sigma matrices## Sigmanew=c*solve(t(X[,includednew])%*%X[,includednew]) #### V1 = Sigmanew[spot2,spot2] V2 = as.matrix(Sigmanew[-spot2,-spot2]) V12 = as.matrix(Sigmanew[spot2,-spot2]) thetab=beta1[b-1,includednew] thetano = as.matrix(thetab[-spot2]) meannew = t(V12)%*%solve(V2)%*%thetano varnew = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) ################## beta1[b,Ind]=rnorm(1,meannew,varnew) dn=log(dnorm(beta1[b,Ind],meannew,varnew)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b-1,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) alphab1=Liken-Likeo+dn + log(beta(sum(eta1[b,])+z1a,p1-sum(eta1[b,])+z1b)) - log(beta(sum(eta1[b-1,])+z1a,p1-sum(eta1[b-1,])+z1b)) U=log(runif(1,0,1)) if(U>alphab1){ eta1[b,]=eta1[b-1,] beta1[b,]=beta1[b-1,] Indeta1[b]=0 }else{Indeta1[b]=1} } } } ##End Eta Beta includednew=rep(0,p1) for(k in 1:p1){if(eta1[b,k]==1){includednew[k]=k}} includednew=includednew[includednew != 0] if(sum(eta1[b,])>0){ if(sum(eta1[b,])==1){ iter[2]="Conditional Inclusion" includednew=rep(0,p1) for(k in 1:p1){if(eta1[b,k]==1){includednew[k]=k}} includednew=includednew[includednew != 0] Sigmanew=c*solve(t(X[,includednew])%*%X[,includednew]) meannew = 0 varnew = sqrt(Sigmanew) beta=beta1[b,] ################## beta[includednew]=rnorm(1,meannew,varnew) dn=log(dnorm(beta[includednew],meannew,varnew)) ###density old do=log(dnorm(beta1[b,includednew],meannew,varnew)) Likeo=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta,s1[b-1,],lam1[b-1,]) alphab1m=Liken-Likeo+dn -do U=log(runif(1,0,1)) if(is.finite(alphab1m)==FALSE){ Indcond1[b,includednew]=0 }else{ if(U>alphab1m){ Indcond1[b,includednew]=0 }else{Indcond1[b,includednew]=1 beta1[b,]=beta }} }else{ iter[2]="Conditional Inclusion" ##Jointly Update nonzero betas zeta1=beta1[b,] zeta1=zeta1[zeta1!=0] zeta1n=zeta1 Sigmanew=c*solve(t(X[,includednew])%*%X[,includednew]) ############### #### for(k in 1:length(includednew)){ V1 = Sigmanew[k,k] V2 = as.matrix(Sigmanew[-k,-k]) V12 = as.matrix(Sigmanew[k,-k]) thetab=beta1[b,includednew] thetano = as.matrix(thetab[-k]) meannew = t(V12)%*%solve(V2)%*%thetano varnew = sqrt(V1 - t(V12)%*%solve(V2)%*%V12) ################## zeta1n[k]=rnorm(1,meannew,varnew) dn=log(dnorm(zeta1n[k],meannew,varnew)) ###density old do=log(dnorm(zeta1[k],meannew,varnew)) beta=beta1[b,] beta[includednew]=zeta1n Likeo=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,beta,s1[b-1,],lam1[b-1,]) alphab1m=Liken-Likeo+dn -do U=log(runif(1,0,1)) if(is.finite(alphab1m)==FALSE){ Indcond1[b,includednew[k]]=0 }else{ if(U>alphab1m){ Indcond1[b,includednew[k]]=0 zeta1n[k]=zeta1[k] }else{Indcond1[b,includednew[k]]=1 beta1[b,]=beta zeta1[k]=zeta1n[k] }} } ##Jointly Update nonzero betas iter[2]="mixing" zeta1n=beta1[b,] Sigmanew=c*solve(t(X[,includednew])%*%X[,includednew]) zeta1n[includednew]=rmvnorm(1,rep(0,length(includednew)),Sigmanew) beta=beta1[b,] beta=beta[beta!=0] dn=log(dmvnorm(zeta1n[includednew],rep(0,length(includednew)),Sigmanew)) ###density old do=log(dmvnorm(beta,rep(0,length(includednew)),Sigmanew)) ######Accept reject### Likeo=LK1(Y1,I1,X,beta1[b,],s1[b-1,],lam1[b-1,]) Liken=LK1(Y1,I1,X,zeta1n,s1[b-1,],lam1[b-1,]) alphamix1=Liken-Likeo+dn -do U=log(runif(1,0,1)) if(is.finite(alphamix1)==FALSE){ Indmix1[b]=0 }else{ if(U>alphamix1){ Indmix1[b]=0 }else{Indmix1[b]=1 beta1[b,]=zeta1n }} } } S1=s1[b-1,] S1=S1[!is.na(S1)] L1=lam1[b-1,] L1=as.matrix(L1[!is.na(L1)]) ############################################ #####Start LogBH Samplers################### ############################################ ####Lam1#### iter[1]="LogBH1" iter[2]="matrixsetup" W1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) Q1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) length1=rep(0,J1+1) for(j in 1:length(length1)){ length1[j]=s1[b-1,j+1]-s1[b-1,j] } if(J1<2){ if(J1==1){ W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) SigLam1=solve(diag(J1+1)-W1)%*%Q1 }else{ Q1=as.matrix(2/(m1)) SigLam1=Q1 } }else{ for(j in 2:J1){ W1[j,j-1]=(clam1*(length1[j]+length1[j-1]))/(length1[j-1]+2*length1[j]+length1[j+1]) W1[j,j+1]=(clam1*(length1[j]+length1[j+1]))/(length1[j-1]+2*length1[j]+length1[j+1]) Q1[j,j]=2/(length1[j-1]+2*length1[j]+length1[j+1]) } Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) SigLam1=solve(diag(J1+1)-W1)%*%Q1 } iter[2]="Mu" ##Lambda1 Hierarchical Sampler ##Mulam if(J1>0){ Mulam1[b]=rnorm(1,(t(as.matrix(rep(1,J1+1)))%*%solve(SigLam1)%*%L1)/(t(as.matrix(rep(1,J1+1)))%*%solve(SigLam1)%*%as.matrix(rep(1,J1+1))),sqrt(Siglam1[b-1]/(t(as.matrix(rep(1,J1+1)))%*%solve(SigLam1)%*%as.matrix(rep(1,J1+1))))) Siglam1[b]=1/rgamma(1,a1+(J1+1)/2,b1+.5*(t(as.matrix(rep(Mulam1[b],J1+1))-L1)%*%solve(SigLam1)%*%(as.matrix(rep(Mulam1[b],J1+1))-L1))) ##Siglam iter[2]="Sigma" }else{ Mulam1[b]=rnorm(1,lam1[b-1,1],sqrt(Siglam1[b-1])) Siglam1[b]=1/rgamma(1,a1+1/2,b1+.5*(Mulam1[b]-lam1[b-1,1])^2) } #if(is.finite(Mulam1[b])==FALSE){stop("Adjust Hierarchical Hyper-Parameters")} #if(is.finite(Siglam1[b])==FALSE){stop("Adjust Hierarchical Hyper-Parameters")} #lambda1 iter[2]="lam1" lam1[b,]=lam1[b-1,] ####### for(m in 1:(J1+1)){ lam=lam1[b,] lam=lam[is.na(lam)==FALSE] lambda=lam lam[m]=lambda[m]+runif(1,-cl1,cl1) if(J1==0){ do=log(dnorm(lambda[m],Mulam1[b],sqrt(Siglam1[b]))) dn=log(dnorm(lam[m],Mulam1[b],sqrt(Siglam1[b]))) }else{ #do=-(t(as.matrix(lambda)-as.matrix(rep(Mulam1[b],J1+1)))%*%solve(SigLam1)%*%(as.matrix(lambda)-as.matrix(rep(Mulam1[b],J1+1))))/(2*Siglam1[b]) #dn=-(t(as.matrix(lam)-as.matrix(rep(Mulam1[b],J1+1)))%*%solve(SigLam1)%*%(as.matrix(lam)-as.matrix(rep(Mulam1[b],J1+1))))/(2*Siglam1[b]) do=dmvnorm(lambda,rep(Mulam1[b],J1+1),Siglam1[b]*SigLam1) do=dmvnorm(lam,rep(Mulam1[b],J1+1),Siglam1[b]*SigLam1) } Likeo=LK1L(Y1,I1,X,as.matrix(beta1[b,]),s1[b-1,],lam1[b,]) Liken=LK1L(Y1,I1,X,as.matrix(beta1[b,]),s1[b-1,],lam) U=log(runif(1,0,1)) alphalam=Liken-Likeo+dn-do if(is.nan(alphalam)==TRUE){ lam1[b,m]=lam1[b-1,m] Acceptlam1[b,m]=0 }else{ if(U<alphalam){ Acceptlam1[b,m]=1 lam1[b,m]=lam[m] }else{Acceptlam1[b,m]=0} } } ############################################# ################################################### iter[1]="Haz1" iter[2]="Birth" ###Random Perturbation### U1=runif(1,0,1) ##### s=s1[b-1,] s=s[!is.na(s)] if(length(s)<J1max){ Birth=runif(1,0,m1) s1[b,1:(J1+3)]=sort(c(s,Birth)) for(k in 2:(J1+2)){ if(Birth>s1[b-1,k-1] & Birth<s1[b-1,k]){ Ind=k-1 } } lam=rep(0,J1+2) if(Ind==1 | Ind==J1+1){ if(Ind==1){ lam[Ind]=lam1[b,Ind] - ((s1[b-1,Ind+1]-Birth)/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[Ind+1]=lam1[b,Ind] + ((Birth-s1[b-1,Ind])/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[(Ind+2):length(lam)]=lam1[b,(Ind+1):(J1+1)] }else{ lam[Ind]=lam1[b,Ind] - ((s1[b-1,Ind+1]-Birth)/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[Ind+1]=lam1[b,Ind] + ((Birth-s1[b-1,Ind])/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[1:(Ind-1)]=lam1[b,1:(Ind-1)] } }else{ lam[Ind]=lam1[b,Ind] - ((s1[b-1,Ind+1]-Birth)/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[Ind+1]=lam1[b,Ind] + ((Birth-s1[b-1,Ind])/(s1[b-1,Ind+1]-s1[b-1,Ind]))*log((1-U1)/U1) lam[1:(Ind-1)]=lam1[b,1:(Ind-1)] lam[(Ind+2):length(lam)]=lam1[b,(Ind+1):(J1+1)] } lam=lam[!is.na(lam)] lambda=lam1[b,] lambda=lambda[!is.na(lambda)] Lo=LK1L(Y1,I1,X,as.matrix(beta1[b,]),s1[b-1,],lam1[b,]) if(J1>0){ do=log(dpois(J1,alpha1))+log(dmvnorm(lambda,rep(Mulam1[b],length(lambda)),SigLam1*Siglam1[b])) }else{ do=log(dpois(J1,alpha1))+log(dnorm(lambda,Mulam1[b],Siglam1[b])) } prior=((2*J1+3)*(2*J1+2)*(Birth-s1[b-1,Ind])*(s1[b-1,Ind+1]-Birth))/((m1^2)*(s1[b-1,Ind+1]-s1[b-1,Ind])) G1=G1+1 J1=J1+1 Ln=LK1L(Y1,I1,X,as.matrix(beta1[b,]),s1[b,],lam) ##Make SigLam1 W1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) Q1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) length1=diff(s1[b,]) if(J1<2){ if(J1==1){ W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) SigLam1n=solve(diag(J1+1)-W1)%*%Q1 }else{ SigLam1n=2/m1 } }else{ for(j in 2:J1){ W1[j,j-1]=(clam1*(length1[j]+length1[j-1]))/(length1[j-1]+2*length1[j]+length1[j+1]) W1[j,j+1]=(clam1*(length1[j]+length1[j+1]))/(length1[j-1]+2*length1[j]+length1[j+1]) Q1[j,j]=2/(length1[j-1]+2*length1[j]+length1[j+1]) } Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) SigLam1n=solve(diag(J1+1)-W1)%*%Q1 } dn=log(dpois(J1,alpha1))+log(dmvnorm(lam,rep(Mulam1[b],length(lam)),Siglam1[b]*SigLam1n)) alpha=Ln-Lo+dn-do-log(U1*(1-U1)) + log(prior) if(is.nan(alpha)==TRUE){ IndB1[b]=0 s1[b,]=s1[b-1,] J1=J1-1 G1=G1-1 }else{ U=log(runif(1,0,1)) if(U<alpha){ IndB1[b]=1 lam1[b,1:(J1+1)]=lam }else{ s1[b,]=s1[b-1,] IndB1[b]=0 J1=J1-1 G1=G1-1 } } }else{ s1[b,]=s1[b-1,] IndB1[b]=0 } ######################################################### ###################Death Sampler######################### ########################################################## iter[2]="Death" U1=runif(1,0,1) if(J1==0){ IndD1[b]=0 s1[b,]=s1[b-1,] }else{ if(J1==1){ Ind=2 }else{ Ind=sample(2:(J1+1),1) } s=s1[b,] s=s[-Ind] lam=lam1[b,] lambda=lam[!is.na(lam)] lam=lam[!is.na(lam)] lam=lam[-Ind] lam[Ind-1]=((s1[b,Ind]-s1[b,Ind-1])*lam1[b,Ind-1]+(s1[b,Ind+1]-s1[b,Ind])*lam1[b,Ind])/(s1[b,Ind+1]-s1[b,Ind-1]) ############################################# ####Sets up SigLam1 matrix for old density### ############################################# W1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) Q1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) length1=diff(s1[b,]) if(J1<2){ if(J1==1){ W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) SigLam1=solve(diag(J1+1)-W1)%*%Q1 do=log(dpois(J1,alpha1))+log(dmvnorm(lambda,rep(Mulam1[b],length(lambda)),SigLam1*Siglam1[b])) }else{ do=log(dpois(J1,alpha1))+log(dnorm(lambda,Mulam1[b],Siglam1[b])) } }else{ for(j in 2:J1){ W1[j,j-1]=(clam1*(length1[j]+length1[j-1]))/(length1[j-1]+2*length1[j]+length1[j+1]) W1[j,j+1]=(clam1*(length1[j]+length1[j+1]))/(length1[j-1]+2*length1[j]+length1[j+1]) Q1[j,j]=2/(length1[j-1]+2*length1[j]+length1[j+1]) } Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) SigLam1=solve(diag(J1+1)-W1)%*%Q1 do=log(dpois(J1,alpha1))+log(dmvnorm(lambda,rep(Mulam1[b],length(lambda)),SigLam1*Siglam1[b])) } ############################################# ############################################# Lo=LK1L(Y1,I1,X,as.matrix(beta1[b,]),s1[b,],lam1[b,]) prior=((m1^2)*(s1[b,Ind+1]-s1[b,Ind-1]))/((2*J1+1)*(2*J1)*(s1[b,Ind]-s1[b,Ind-1])*(s1[b,Ind+1]-s1[b,Ind])) G1=G1-1 J1=J1-1 Ln=LK1L(Y1,I1,X,as.matrix(beta1[b,]), s,lam) ###Make siglam matrix W1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) Q1=matrix(rep(0,(J1+1)*(J1+1)),nrow=J1+1) length1=rep(0,J1+1) for(j in 1:length(length1)){ length1[j]=s[j+1]-s[j] } if(J1<2){ if(J1==1){ W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) SigLam1n=solve(diag(J1+1)-W1)%*%Q1 dn=log(dpois(J1,alpha1))+log(dmvnorm(lam,rep(Mulam1[b],length(lam)),SigLam1n*Siglam1[b])) }else{ SigLam1n=2/m1 dn=log(dpois(J1,alpha1))+log(dnorm(lam,Mulam1[b],Siglam1[b])) } }else{ for(j in 2:J1){ W1[j,j-1]=(clam1*(length1[j]+length1[j-1]))/(length1[j-1]+2*length1[j]+length1[j+1]) W1[j,j+1]=(clam1*(length1[j]+length1[j+1]))/(length1[j-1]+2*length1[j]+length1[j+1]) Q1[j,j]=2/(length1[j-1]+2*length1[j]+length1[j+1]) } Q1[J1+1,J1+1]=2/(length1[J1]+2*length1[J1+1]) Q1[1,1]=2/(2*length1[1]+length1[2]) W1[1,2]=(clam1*(length1[1]+length1[2]))/(2*length1[1]+length1[2]) W1[J1+1,J1]=(clam1*(length1[J1+1]+length1[J1]))/(length1[J1]+2*length1[J1+1]) SigLam1n=solve(diag(J1+1)-W1)%*%Q1 dn=log(dpois(J1,alpha1))+log(dmvnorm(lam,rep(Mulam1[b],length(lam)),SigLam1n*Siglam1[b])) } #### alpha=Ln-Lo+dn-do+log(prior)+log(U1*(1-U1)) if(is.nan(alpha)==TRUE){ IndD1[b]=0 J1=J1+1 G1=G1+1 }else{ U=log(runif(1,0,1)) iter[2]="AcceptRejDeath" if(U<alpha){ s1[b,]=c(s,NA) IndD1[b]=1 lam1[b,1:(J1+1)]=lam lam1[b,(J1+2):J1max]=rep(NA,J1max-J1-1) }else{ IndD1[b]=0 J1=J1+1 G1=G1+1 } } ####End else } ## ####################### #####End of Death sampler ###################### split1[b]=J1 ## sum1[b]=sum(eta1[b,]) ##End Sampler } ###End of Sampler ################End Samplers cat(c,z1a,z1b," ", " ", " ", "Posterior Inclusion Probabilities after half Burnin", " ", "Hazard", " ", colMeans(eta1[(B*burn+1):B,])*100, " ", "IndEta",mean(Indeta1[(B*burn+1):B])*100," ","IndMix",mean(Indmix1[(B*burn+1):B])*100," ", "Included Acceptance", " ", "Hazard", " ", " ", colMeans(Indcond1[(B*burn+1):B,],na.rm=TRUE)*100," ","Survival"," ","IndDeath",mean(IndD1[(B*burn+1):B])*100," ","IndBirth",mean(IndB1[(B*burn+1):B])*100," ","Lambda"," ", "Lam1", colMeans(Acceptlam1[(B*burn+1):B,],na.rm=TRUE)*100 ) ##End } ###Return Values Path1= paste0(Path,"/beta1.txt") write.table(beta1[(burn*B+1):B,], Path1, sep="\t") Path1= paste0(Path,"/eta1.txt") write.table(eta1[(burn*B+1):B,], Path1, sep="\t") Path1= paste0(Path,"/lam1.txt") write.table(lam1[(burn*B+1):B,], Path1, sep="\t") Path1= paste0(Path,"/s1.txt") write.table(s1[(burn*B+1):B,], Path1, sep="\t") Path1= paste0(Path,"/sum1.txt") write.table(sum1[(burn*B+1):B], Path1, sep="\t") Path1= paste0(Path,"/split1.txt") write.table(split1[(burn*B+1):B], Path1, sep="\t") Path1= paste0(Path,"/siglam1.txt") write.table(Siglam1[(burn*B+1):B], Path1, sep="\t") Path1= paste0(Path,"/mulam1.txt") write.table(Mulam1[(burn*B+1):B], Path1, sep="\t") Path1= paste0(Path,"/Indeta1.txt") write.table(Indeta1[(burn*B+1):B], Path1, sep="\t") Path1= paste0(Path,"/IndD1.txt") write.table(IndD1[(burn*B+1):B], Path1, sep="\t") Path1= paste0(Path,"/IndB1.txt") write.table(IndB1[(burn*B+1):B], Path1, sep="\t") Path1= paste0(Path,"/Acceptlam1.txt") write.table(Acceptlam1[(burn*B+1):B,], Path1, sep="\t") Path1= paste0(Path,"/Indmix1.txt") write.table(Indmix1[(burn*B+1):B], Path1, sep="\t") par(mfrow=c(2,1)) plot(1:B,sum1,type="l",xlab="",ylab=" # Included", main="Traceplot: # Included") plot(1:B,split1,type="l",xlab="",ylab="Hazard: Split #", main="Traceplot: # Split points") } }
/scratch/gouwar.j/cran-all/cranData/BayesPieceHazSelect/R/PiecewiseBayesSelect.R
#' @title Deprecated functions in package \pkg{BayesPostEst}. #' @description The functions listed below are deprecated and will be defunct in #' the near future. When possible, alternative functions with similar #' functionality are also mentioned. Help pages for deprecated functions are #' available at \code{help("-deprecated")}. #' @name BayesPostEst-deprecated #' @keywords internal NULL
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/R/BayesPostEst-deprecated.R
#' BayesPostEst Overview #' #' This package currently has nine main functions that can be used to generate #' and plot postestimation quantities after estimating Bayesian regression models using MCMC. #' The package combines functions written originally for Johannes Karreth's workshop on #' Bayesian modeling at the ICPSR Summer program. Currently BayesPostEst focuses mostly on #' generalized linear regression models for binary outcomes (logistic and probit regression). #' The vignette for this package has a walk-through of each function in action. #' Please refer to that to get an overview of all the functions, or visit the #' documentation for a specific function of your choice. Johannes Karreth's website #' (http://www.jkarreth.net) also has resources for getting started with Bayesian #' analysis, fitting models, and presenting results. #' #' @section Main Functions: #' \itemize{ #' \item \code{mcmcAveProb()} #' \item \code{mcmcObsProb()} #' \item \code{mcmcFD()} #' \item \code{mcmcMargEff()} #' \item \code{mcmcRocPrc()} #' \item \code{mcmcRocPrcGen()} #' \item \code{mcmcTab()} #' \item \code{mcmcReg()} #' \item \code{plot.mcmcFD()} #' } #' #' @docType package #' @name BayesPostEst NULL #> NULL #' @importFrom rlang .data NULL #' @importFrom stats median pnorm model.matrix quantile #' sd variable.names plogis NULL #' @importFrom ggplot2 ggplot geom_rect xlab ylab geom_vline scale_x_continuous #' geom_text geom_bar facet_wrap scale_x_discrete scale_y_continuous aes NULL #' @importFrom dplyr summarize group_by tibble NULL #' @importFrom tidyr gather NULL #' @importFrom ggridges stat_density_ridges NULL #' @importFrom reshape2 melt NULL #' @importFrom caTools trapz NULL #' @importFrom coda as.mcmc HPDinterval NULL #' @importFrom texreg createTexreg texreg htmlreg NULL #' @importFrom utils getFromNamespace NULL #' @importFrom ROCR prediction performance NULL #> NULL
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/R/BayesPostEst.R
#' Fitted JAGS logit model #' #' A fitted JAGS logit model generated with [R2jags::jags()]. See the example #' code below for how it was created. Used in examples and for testing. #' #' @format A class "rjags" object created by [R2jags::jags()] #' #' @examples #' \donttest{ #' if (interactive()) { #' data("sim_data") #' #' ## formatting the data for jags #' datjags <- as.list(sim_data) #' datjags$N <- length(datjags$Y) #' #' ## creating jags model #' model <- function() { #' #' for(i in 1:N){ #' Y[i] ~ dbern(p[i]) ## Bernoulli distribution of y_i #' logit(p[i]) <- mu[i] ## Logit link function #' mu[i] <- b[1] + #' b[2] * X1[i] + #' b[3] * X2[i] #' } #' #' for(j in 1:3){ #' b[j] ~ dnorm(0, 0.001) ## Use a coefficient vector for simplicity #' } #' #' } #' #' params <- c("b") #' inits1 <- list("b" = rep(0, 3)) #' inits2 <- list("b" = rep(0, 3)) #' inits <- list(inits1, inits2) #' #' ## fitting the model with R2jags #' set.seed(123) #' jags_logit <- R2jags::jags(data = datjags, inits = inits, #' parameters.to.save = params, n.chains = 2, #' n.iter = 2000, n.burnin = 1000, model.file = model) #' #' } #' } #' #' @docType data "jags_logit" #' Fitted JAGS probit model #' #' A fitted JAGS probit model generated with [R2jags::jags()]. See the example #' code below for how it was created. Used in examples and for testing. #' #' @format A class "rjags" object created by [R2jags::jags()] #' #' @examples #' \donttest{ #' if (interactive()) { #' data("sim_data") #' #' ## formatting the data for jags #' datjags <- as.list(sim_data) #' datjags$N <- length(datjags$Y) #' #' ## creating jags model #' model <- function() { #' #' for(i in 1:N){ #' Y[i] ~ dbern(p[i]) ## Bernoulli distribution of y_i #' probit(p[i]) <- mu[i] ## Update with probit link function #' mu[i] <- b[1] + #' b[2] * X1[i] + #' b[3] * X2[i] #' } #' #' for(j in 1:3){ #' b[j] ~ dnorm(0, 0.001) ## Use a coefficient vector for simplicity #' } #' #' } #' #' params <- c("b") #' inits1 <- list("b" = rep(0, 3)) #' inits2 <- list("b" = rep(0, 3)) #' inits <- list(inits1, inits2) #' #' ## fitting the model with R2jags #' set.seed(123) #' jags_probit <- R2jags::jags(data = datjags, inits = inits, #' parameters.to.save = params, n.chains = 2, #' n.iter = 2000, n.burnin = 1000, model.file = model) #' #' } #' } #' #' @docType data "jags_probit" #' Fitted JAGS interactive linear model #' #' A fitted JAGS linear model with interaction term generated with #' [R2jags::jags()]. See the example code below for how it was created. Used #' in examples and for testing. #' #' @format A class "rjags" object created by [R2jags::jags()] #' #' @examples #' \donttest{ #' if (interactive()) { #' data("sim_data_interactive") #' #' ## formatting the data for jags #' datjags <- as.list(sim_data_interactive) #' datjags$N <- length(datjags$Y) #' #' ## creating jags model #' model <- function() { #' #' for(i in 1:N){ #' Y[i] ~ dnorm(mu[i], sigma) ## Bernoulli distribution of y_i #' #' mu[i] <- b[1] + #' b[2] * X1[i] + #' b[3] * X2[i] + #' b[4] * X1[i] * X2[i] #' #' } #' #' for(j in 1:4){ #' b[j] ~ dnorm(0, 0.001) ## Use a coefficient vector for simplicity #' } #' #' sigma ~ dexp(1) #' #' } #' #' params <- c("b") #' inits1 <- list("b" = rep(0, 4)) #' inits2 <- list("b" = rep(0, 4)) #' inits <- list(inits1, inits2) #' #' ## fitting the model with R2jags #' set.seed(123) #' jags_interactive <- R2jags::jags(data = datjags, inits = inits, #' parameters.to.save = params, n.chains = 2, #' n.iter = 2000, n.burnin = 1000, #' model.file = model) #' #' } #' } #' @docType data "jags_interactive" # jags_interactive_cat ---------------------------------------------------- #' Fitted JAGS interactive linear model with categorical moderator #' #' A fitted JAGS linear model with interaction term generated with #' [R2jags::jags()]. See the example code below for how it was created. Used #' in examples and for testing. #' #' @format A class "rjags" object created by [R2jags::jags()] #' #' @examples #' \donttest{ #' if (interactive()) { #' data("sim_data_interactive_cat") #' #' ## formatting the data for jags #' datjags <- as.list(sim_data_interactive_cat) #' datjags$N <- length(datjags$Y) #' #' ## creating jags model #' model <- function() { #' #' for(i in 1:N){ #' Y[i] ~ dnorm(mu[i], sigma) ## Bernoulli distribution of y_i #' #' mu[i] <- b[1] + #' b[2] * X1[i] + #' b[3] * X3[i] + #' b[4] * X1[i] * X3[i] #' #' } #' #' for(j in 1:4){ #' b[j] ~ dnorm(0, 0.001) ## Use a coefficient vector for simplicity #' } #' #' sigma ~ dexp(1) #' #' } #' #' params <- c("b") #' inits1 <- list("b" = rep(0, 4)) #' inits2 <- list("b" = rep(0, 4)) #' inits <- list(inits1, inits2) #' #' ## fitting the model with R2jags #' set.seed(123) #' jags_interactive_cat <- R2jags::jags(data = datjags, inits = inits, #' parameters.to.save = params, n.chains = 2, #' n.iter = 2000, n.burnin = 1000, #' model.file = model) #' #' } #' } #' @docType data "jags_interactive_cat" #' Simulated data for examples #' #' Simulated data to fit example models against #' #' @format a data.frame #' #' @examples #' ## simulating data #' set.seed(123456) #' b0 <- 0.2 # true value for the intercept #' b1 <- 0.5 # true value for first beta #' b2 <- 0.7 # true value for second beta #' n <- 500 # sample size #' X1 <- runif(n, -1, 1) #' X2 <- runif(n, -1, 1) #' Z <- b0 + b1 * X1 + b2 * X2 #' pr <- 1 / (1 + exp(-Z)) # inv logit function #' Y <- rbinom(n, 1, pr) #' sim_data <- data.frame(cbind(X1, X2, Y)) "sim_data" #' Simulated data for examples #' #' Simulated data to fit example models against #' #' @format a data.frame #' #' @examples #' set.seed(123456) #' b0 <- 0.2 # true value for the intercept #' b1 <- 0.5 # true value for first beta #' b2 <- 0.7 # true value for second beta #' b3 <- -0.3 # true value for second beta #' n <- 500 # sample size #' X1 <- runif(n, -1, 1) #' X2 <- runif(n, -1, 1) #' Z_interactive <- b0 + b1 * X1 + b2 * X2 + b3 * (X1 * X2) #' Y_interactive <- rnorm(n, Z_interactive, 1) #' sim_data_interactive <- data.frame(cbind(X1, X2, Y = Y_interactive)) "sim_data_interactive" #' Simulated data for examples #' #' Simulated data to fit example models against #' #' @format a data.frame #' #' @examples #' set.seed(123456) #' b0 <- 0.2 # true value for the intercept #' b1 <- 0.5 # true value for first beta #' b2 <- 0.7 # true value for second beta #' b3 <- -0.3 # true value for second beta #' n <- 500 # sample size #' X1 <- runif(n, -1, 1) #' X3 <- rbinom(n, 5, .23) #' Z_interactive_cat <- b0 + b1 * X1 + b2 * X3 + b3 * (X1 * X3) #' Y_interactive_cat <- rnorm(n, Z_interactive_cat, 1) #' sim_data_interactive_cat <- data.frame(cbind(X1, X3, Y = Y_interactive_cat)) "sim_data_interactive_cat"
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/R/data.R
#'This function calculates predicted probabilities for "average" cases after a Bayesian #'logit or probit model. For an explanation of predicted probabilities for "average" cases, #'see e.g. King, Tomz & Wittenberg (2000, American Journal of Political Science 44(2): 347-361) #'@title Predicted Probabilities using Bayesian MCMC estimates for the "Average" Case #'@description This function calculates predicted probabilities for "average" cases after #'a Bayesian logit or probit model. As "average" cases, this function calculates the median #'value of each predictor. For an explanation of predicted probabilities for #'"average" cases, see e.g. King, Tomz & Wittenberg (2000, American Journal of #'Political Science 44(2): 347-361). #'@param modelmatrix model matrix, including intercept (if the intercept is among the #'parameters estimated in the model). Create with model.matrix(formula, data). #'Note: the order of columns in the model matrix must correspond to the order of columns #'in the matrix of posterior draws in the \code{mcmcout} argument. See the \code{mcmcout} #'argument for more. #'@param mcmcout posterior distributions of all logit coefficients, #'in matrix form. This can be created from rstan, MCMCpack, R2jags, etc. and transformed #'into a matrix using the function as.mcmc() from the coda package for \code{jags} class #'objects, as.matrix() from base R for \code{mcmc}, \code{mcmc.list}, \code{stanreg}, and #'\code{stanfit} class objects, and \code{object$sims.matrix} for \code{bugs} class objects. #'Note: the order of columns in this matrix must correspond to the order of columns #'in the model matrix. One can do this by examining the posterior distribution matrix and sorting the #'variables in the order of this matrix when creating the model matrix. A useful function for sorting #'column names containing both characters and numbers as #'you create the matrix of posterior distributions is \code{mixedsort()} from the gtools package. #'@param xcol column number of the posterior draws (\code{mcmcout}) and model matrices #'that corresponds to the explanatory variable for which to calculate associated Pr(y = 1). #'Note that the columns in these matrices must match. #'@param xrange name of the vector with the range of relevant values of the #'explanatory variable for which to calculate associated Pr(y = 1). #'@param xinterest semi-optional argument. Name of the explanatory variable for which #'to calculate associated Pr(y = 1). If \code{xcol} is supplied, this is not needed. #'If both are supplied, the function defaults to \code{xcol} and this argument is ignored. #'@param link type of generalized linear model; a character vector set to \code{"logit"} #'(default) or \code{"probit"}. #'@param ci the bounds of the credible interval. Default is \code{c(0.025, 0.975)} for the 95\% #'credible interval. #'@param fullsims logical indicator of whether full object (based on all MCMC draws #'rather than their average) will be returned. Default is \code{FALSE}. Note: The longer #'\code{xrange} is, the larger the full output will be if \code{TRUE} is selected. #'@references King, Gary, Michael Tomz, and Jason Wittenberg. 2000. “Making the Most #'of Statistical Analyses: Improving Interpretation and Presentation.” American Journal #'of Political Science 44 (2): 347–61. http://www.jstor.org/stable/2669316 #'@return if \code{fullsims = FALSE} (default), a tibble with 4 columns: #'\itemize{ #'\item x: value of variable of interest, drawn from \code{xrange} #'\item median_pp: median predicted Pr(y = 1) when variable of interest is set to x, #'holding all other predictors to average (median) values #'\item lower_pp: lower bound of credible interval of predicted probability at given x #'\item upper_pp: upper bound of credible interval of predicted probability at given x #'} #'if \code{fullsims = TRUE}, a tibble with 3 columns: #'\itemize{ #'\item Iteration: number of the posterior draw #'\item x: value of variable of interest, drawn from \code{xrange} #'\item pp: average predicted Pr(y = 1) when variable of interest is set to x, holding all other predictors to average (median) values #'} #'@examples #' \dontshow{.old_wd <- setwd(tempdir())} #' \donttest{ #' if (interactive()) { #' ## simulating data #' set.seed(123) #' b0 <- 0.2 # true value for the intercept #' b1 <- 0.5 # true value for first beta #' b2 <- 0.7 # true value for second beta #' n <- 500 # sample size #' X1 <- runif(n, -1, 1) #' X2 <- runif(n, -1, 1) #' Z <- b0 + b1 * X1 + b2 * X2 #' pr <- 1 / (1 + exp(-Z)) # inv logit function #' Y <- rbinom(n, 1, pr) #' df <- data.frame(cbind(X1, X2, Y)) #' #' ## formatting the data for jags #' datjags <- as.list(df) #' datjags$N <- length(datjags$Y) #' #' ## creating jags model #' model <- function() { #' #' for(i in 1:N){ #' Y[i] ~ dbern(p[i]) ## Bernoulli distribution of y_i #' logit(p[i]) <- mu[i] ## Logit link function #' mu[i] <- b[1] + #' b[2] * X1[i] + #' b[3] * X2[i] #' } #' #' for(j in 1:3){ #' b[j] ~ dnorm(0, 0.001) ## Use a coefficient vector for simplicity #' } #' #'} #' #' params <- c("b") #' inits1 <- list("b" = rep(0, 3)) #' inits2 <- list("b" = rep(0, 3)) #' inits <- list(inits1, inits2) #' #' ## fitting the model with R2jags #' library(R2jags) #' set.seed(123) #' fit <- jags(data = datjags, inits = inits, #' parameters.to.save = params, n.chains = 2, n.iter = 2000, #' n.burnin = 1000, model.file = model) #' #' ### average value approach #' library(coda) #' xmat <- model.matrix(Y ~ X1 + X2, data = df) #' mcmc <- as.mcmc(fit) #' mcmc_mat <- as.matrix(mcmc)[, 1:ncol(xmat)] #' X1_sim <- seq(from = min(datjags$X1), #' to = max(datjags$X1), #' length.out = 10) #' ave_prob <- mcmcAveProb(modelmatrix = xmat, #' mcmcout = mcmc_mat, #' xrange = X1_sim, #' xcol = 2) #' } #' } #' #' \dontshow{setwd(.old_wd)} #'@export #' mcmcAveProb <- function(modelmatrix, mcmcout, xcol, xrange, xinterest, link = "logit", ci = c(0.025, 0.975), fullsims = FALSE){ # checking arguments if(missing(xcol) & missing(xinterest)) { stop("Please enter a column number or name of your variable of interest.)") } if(!missing(xcol) & !missing(xinterest)) { message("Both xcol and xinterest were supplied by user. Function defaults to xcol") } if(!missing(xinterest)) { if(!(xinterest %in% variable.names(modelmatrix))) stop("Variable name does not match any in the matrix. Please enter another.") } if(missing(modelmatrix) | missing(mcmcout) | missing(xrange)) { stop("Please enter modelmatrix, mcmcout, and xrange arguments") } X <- matrix(rep(apply(X = modelmatrix, MARGIN = 2, FUN = function(x) median(x)), times = length(xrange)), nrow = length(xrange), byrow = TRUE) colnames(X) <- variable.names(modelmatrix) if(!missing(xcol)) { X[, xcol] <- xrange } else { X[ , grepl( xinterest , variable.names( X ) ) ] <- xrange } if(link == "logit"){ pp <- plogis(t(X %*% t(mcmcout))) } if(link == "probit"){ pp <- pnorm(t(X %*% t(mcmcout))) } colnames(pp) <- as.character(xrange) longFrame <- reshape2::melt(pp) pp_dat <- dplyr::summarize(dplyr::group_by(longFrame, .data$Var2), median_pp = quantile(.data$value, probs = 0.5), lower_pp = quantile(.data$value, probs = ci[1]), upper_pp = quantile(.data$value, probs = ci[2])) names(pp_dat) <- c("x", "median_pp", "lower_pp", "upper_pp") if(fullsims == FALSE){ return(pp_dat) # pp_dat was created by summarizing longFrame } if(fullsims == TRUE){ names(longFrame) <- c("Iteration", "x", "pp") return(longFrame) } }
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/R/mcmcAveProb.R
#' Coefficient Plots for MCMC Output #' #' Coefficient plots for MCMC output using \code{ggplot2} #' #' @param mod Bayesian model object generated by R2jags, rjags, R2WinBUGS, R2OpenBUGS, #' MCMCpack, rstan, rstanarm, and brms. #' @param pars a scalar or vector of the parameters you wish to include in the table. #' By default, \code{mcmcCoefPlot} includes all parameters saved in a model object. If a #' model has lots of samples and lots of saved parameters, not explicitly specifying #' a limited number of parameters to include via \code{pars} may take a long time #' or produce an unreadable plot. \code{pars} can either be a vector with the #' specific parameters to be included in the table e.g. \code{pars = c("beta[1]", #' "beta[2]", "beta[3]")}, or they can be partial names that will be matched using #' regular expressions e.g. \code{pars = "beta"} if \code{regex = TRUE}. Both of #' these will include \code{beta[1]}, \code{beta[2]}, and \code{beta[3]} in the #' plot. If \code{pars} is left blank, \code{mcmcCoefPlot} will exclude auxiliary #' parameters such as \code{deviance} from JAGS or \code{lp__} from Stan. #' @param pointest a character indicating whether to use the mean or median for #' point estimates in the table. #' @param ci a scalar indicating the confidence level of the uncertainty intervals. #' @param hpdi a logical indicating whether to use highest posterior density intervals #' or equal tailed credible intervals to capture uncertainty; default \code{FALSE}. #' @param sort logical indicating whether to sort the point estimates to produce #' a caterpillar or dot plot; default \code{FALSE}. #' @param plot logical indicating whether to return a \code{ggplot} object or the #' underlying tidy DataFrame; default \code{TRUE}. #' @param regex use regular expression matching with \code{pars}? #' #' @return a \code{ggplot} object or a tidy DataFrame. #' #' @author Rob Williams, \email{[email protected]} #' #' @examples #' \dontshow{.old_wd <- setwd(tempdir())} #' \donttest{ #' if (interactive()) { #' ## simulating data #' set.seed(123456) #' b0 <- 0.2 # true value for the intercept #' b1 <- 0.5 # true value for first beta #' b2 <- 0.7 # true value for second beta #' n <- 500 # sample size #' X1 <- runif(n, -1, 1) #' X2 <- runif(n, -1, 1) #' Z <- b0 + b1 * X1 + b2 * X2 #' pr <- 1 / (1 + exp(-Z)) # inv logit function #' Y <- rbinom(n, 1, pr) #' df <- data.frame(cbind(X1, X2, Y)) #' #' ## formatting the data for jags #' datjags <- as.list(df) #' datjags$N <- length(datjags$Y) #' #' ## creating jags model #' model <- function() { #' #' for(i in 1:N){ #' Y[i] ~ dbern(p[i]) ## Bernoulli distribution of y_i #' logit(p[i]) <- mu[i] ## Logit link function #' mu[i] <- b[1] + #' b[2] * X1[i] + #' b[3] * X2[i] #' } #' #' for(j in 1:3){ #' b[j] ~ dnorm(0, 0.001) ## Use a coefficient vector for simplicity #' } #' } #' #' params <- c("b") #' inits1 <- list("b" = rep(0, 3)) #' inits2 <- list("b" = rep(0, 3)) #' inits <- list(inits1, inits2) #' #' ## fitting the model with R2jags #' set.seed(123) #' fit <- R2jags::jags(data = datjags, inits = inits, #' parameters.to.save = params, n.chains = 2, n.iter = 2000, #' n.burnin = 1000, model.file = model) #' #' ## generating coefficient plot with all non-auxiliary parameters #' mcmcCoefPlot(fit) #' } #' } #' #' \dontshow{setwd(.old_wd)} #' @export mcmcCoefPlot <- function(mod, pars = NULL, pointest = 'mean', ci = .95, hpdi = FALSE, sort = FALSE, plot = TRUE, regex = FALSE) { ## pull in unexported functions from other packages ## other options for future versions might include lifting this and adding authors as copr holders runjags.as.mcmc.list.runjags = getFromNamespace("as.mcmc.list.runjags", "runjags") if (inherits(mod, what = c("jags", "rjags"))) { samps <- as.matrix(coda::as.mcmc(mod)) } if (inherits(mod, what = "bugs")) { samps <- mod$sims.matrix } if (inherits(mod, what = "runjags")) { samps <- as.matrix(runjags.as.mcmc.list.runjags(mod)) } if (inherits(mod, what = c("mcmc", "mcmc.list", "stanfit", "stanreg", "brmsfit"))) { samps <- as.matrix(mod) } if (is.null(pars)) { samps <- samps[, !grepl(pattern = 'deviance|lp__', x = colnames(samps))] } else if (regex) { samps <- samps[, grepl(pattern = paste(pars, collapse = '|'), x = colnames(samps))] } else { samps <- matrix(samps[, pars], nrow = nrow(samps), byrow = FALSE, dimnames = list(NULL, pars)) } if (hpdi == FALSE) { samps_ci <- t(apply(samps, 2, quantile, probs = c(.5 - ci/2, .5 + ci/2))) } else if (hpdi == TRUE) { samps_ci <- coda::HPDinterval(coda::as.mcmc(samps), prob = ci) } else { stop("hpdi must be either true or false") } if (pointest == 'mean') { samps_pe <- apply(samps, 2, mean) } else if (pointest == 'median') { samps_pe <- apply(samps, 2, median) } else { stop("pointest must be either 'mean' or 'median'") } coefs <- data.frame(pe = samps_pe, samps_ci) if (sort) { coefs$variable <- factor(rownames(coefs), levels = rev(rownames(coefs)[order(coefs$pe, decreasing = TRUE)])) } else { coefs$variable <- factor(rownames(coefs), levels = rev(rownames(coefs))) } colnames(coefs)[2:3] <- c('lo', 'hi') ## return coefficient plot or underlying dataframe if (!plot) { coefs } else { ggplot2::ggplot(coefs, ggplot2::aes(x = .data$variable, y = .data$pe, ymin = .data$lo, ymax = .data$hi)) + ggplot2::geom_hline(yintercept = 0, lty = 2) + ggplot2::geom_pointrange() + ggplot2::coord_flip() + ggplot2::labs(x = '', y = '') } }
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/R/mcmcCoefPlot.R
#'@title First Differences of a Bayesian Logit or Probit model #'@description R function to calculate first differences after a Bayesian logit or probit model. #'First differences are a method to summarize effects across covariates. This quantity represents #'the difference in predicted probabilities for each covariate for cases with low and high values #'of the respective covariate. For each of these differences, all other variables are held constant #'at their median. For more, see Long (1997, Sage Publications) and King, Tomz, and Wittenberg (2000, #'American Journal of Political Science 44(2): 347-361). #'@param modelmatrix model matrix, including intercept (if the intercept is among the #'parameters estimated in the model). Create with model.matrix(formula, data). #'Note: the order of columns in the model matrix must correspond to the order of columns #'in the matrix of posterior draws in the \code{mcmcout} argument. See the \code{mcmcout} #'argument for more. #'@param mcmcout posterior distributions of all logit coefficients, #'in matrix form. This can be created from rstan, MCMCpack, R2jags, etc. and transformed #'into a matrix using the function as.mcmc() from the coda package for \code{jags} class #'objects, as.matrix() from base R for \code{mcmc}, \code{mcmc.list}, \code{stanreg}, and #'\code{stanfit} class objects, and \code{object$sims.matrix} for \code{bugs} class objects. #'Note: the order of columns in this matrix must correspond to the order of columns #'in the model matrix. One can do this by examining the posterior distribution matrix and sorting the #'variables in the order of this matrix when creating the model matrix. A useful function for sorting #'column names containing both characters and numbers as #'you create the matrix of posterior distributions is \code{mixedsort()} from the gtools package. #'@param link type of generalized linear model; a character vector set to \code{"logit"} (default) #'or \code{"probit"}. #'@param ci the bounds of the credible interval. Default is \code{c(0.025, 0.975)} for the 95\% #'credible interval. #'@param percentiles values of each predictor for which the difference in Pr(y = 1) #'is to be calculated. Default is \code{c(0.25, 0.75)}, which will calculate the difference #'between Pr(y = 1) for the 25th percentile and 75th percentile of the predictor. For binary #'predictors, the function automatically calculates the difference between Pr(y = 1) #'for x = 0 and x = 1. #'@param fullsims logical indicator of whether full object (based on all MCMC draws #'rather than their average) will be returned. Default is \code{FALSE}. #'@references #'\itemize{ #'\item King, Gary, Michael Tomz, and Jason Wittenberg. 2000. “Making the Most of Statistical #'Analyses: Improving Interpretation and Presentation.” American Journal of Political Science #'44 (2): 347–61. http://www.jstor.org/stable/2669316 #'\item Long, J. Scott. 1997. Regression Models for Categorical and Limited Dependent Variables. #'Thousand Oaks: Sage Publications #'} #'@return An object of class \code{mcmcFD}. If \code{fullsims = FALSE} (default), #'a data frame with five columns: #'\itemize{ #'\item median_fd: median first difference #'\item lower_fd: lower bound of credible interval of the first difference #'\item upper_fd: upper bound of credible interval of the first difference #'\item VarName: name of the variable as found in \code{modelmatrix} #'\item VarID: identifier of the variable, based on the order of columns in #'\code{modelmatrix} and \code{mcmcout}. Can be adjusted for plotting #'} #'If \code{fullsims = TRUE}, a matrix with as many columns as predictors in the model. Each row #'is the first difference for that variable based on one set of posterior draws. Column names are taken #'from the column names of \code{modelmatrix}. #' #'@examples #' \dontshow{.old_wd <- setwd(tempdir())} #' \donttest{ #' if (interactive()) { #' ## simulating data #' set.seed(1234) #' b0 <- 0.2 # true value for the intercept #' b1 <- 0.5 # true value for first beta #' b2 <- 0.7 # true value for second beta #' n <- 500 # sample size #' X1 <- runif(n, -1, 1) #' X2 <- runif(n, -1, 1) #' Z <- b0 + b1 * X1 + b2 * X2 #' pr <- 1 / (1 + exp(-Z)) # inv logit function #' Y <- rbinom(n, 1, pr) #' df <- data.frame(cbind(X1, X2, Y)) #' #' ## formatting the data for jags #' datjags <- as.list(df) #' datjags$N <- length(datjags$Y) #' #' ## creating jags model #' model <- function() { #' #' for(i in 1:N){ #' Y[i] ~ dbern(p[i]) ## Bernoulli distribution of y_i #' logit(p[i]) <- mu[i] ## Logit link function #' mu[i] <- b[1] + #' b[2] * X1[i] + #' b[3] * X2[i] #' } #' #' for(j in 1:3){ #' b[j] ~ dnorm(0, 0.001) ## Use a coefficient vector for simplicity #' } #' #' } #' #' params <- c("b") #' inits1 <- list("b" = rep(0, 3)) #' inits2 <- list("b" = rep(0, 3)) #' inits <- list(inits1, inits2) #' #' ## fitting the model with R2jags #' set.seed(123) #' fit <- R2jags::jags(data = datjags, inits = inits, #' parameters.to.save = params, n.chains = 2, n.iter = 2000, #' n.burnin = 1000, model.file = model) #' #' ## running function with logit #' xmat <- model.matrix(Y ~ X1 + X2, data = df) #' mcmc <- coda::as.mcmc(fit) #' mcmc_mat <- as.matrix(mcmc)[, 1:ncol(xmat)] #' object <- mcmcFD(modelmatrix = xmat, #' mcmcout = mcmc_mat) #' object #' } #' } #' #' \dontshow{setwd(.old_wd)} #'@export #' mcmcFD <- function(modelmatrix, mcmcout, link = "logit", ci = c(0.025, 0.975), percentiles = c(0.25, 0.75), fullsims = FALSE) { if(missing(modelmatrix) | missing(mcmcout)){ stop("Please enter required arguments.") } fdmat <- matrix(NA, ncol = 3, nrow = ncol(modelmatrix) - 1) colnames(fdmat) <- c("median_fd", "lower_fd", "upper_fd") rownames(fdmat) <- colnames(modelmatrix)[-1] fdfull <- matrix(rep(NA), ncol = ncol(modelmatrix) - 1, nrow = nrow(mcmcout), byrow = TRUE) colnames(fdfull) <- colnames(modelmatrix)[-1] for (i in 2:ncol(modelmatrix)){ X <- matrix(rep(apply(X = modelmatrix, MARGIN = 2, FUN = function(x) median(x)), times = 2), nrow = 2, byrow = TRUE) X[, i] <- ifelse(length(unique(modelmatrix[, i])) == 2 & range(modelmatrix[, i]) == c(0, 1), c(0, 1), quantile(modelmatrix[, i], probs = percentiles)) # X[, i] <- quantile(modelmatrix[, i], probs = percentiles) if(link == "logit") { pp <- plogis(t(X %*% t(mcmcout))) } else if (link == "probit") { pp <- pnorm(t(X %*% t(mcmcout))) } else { stop("Please enter valid link argument.") } fd <- pp[, 2] - pp[, 1] fdmat[i-1, 1] <- quantile(fd, probs = c(0.5)) fdmat[i-1, 2] <- quantile(fd, probs = c(ci[1])) fdmat[i-1, 3] <- quantile(fd, probs = c(ci[2])) fdfull[, i-1] <- fd } fddat <- as.data.frame(fdmat) fddat$VarName <- rownames(fdmat) fddat$VarID <- row(fdmat)[, 1] if (fullsims) { return(structure(fdfull, fullsims = TRUE, class = c("mcmcFD", "matrix"))) } else { return(structure(fddat, fullsims = FALSE, class = c("mcmcFD", "data.frame"))) } } #'@export print.mcmcFD <- function(x, ...) { if (attr(x, "fullsims")) { print.table(x) } else { print.data.frame(x) } } #'@title Plot Method for First Differences from MCMC output #'@description The \code{plot} method for first differences generated from MCMC #'output by \code{\link{mcmcFD}}. For more on this method, see Long #'(1997, Sage Publications), and King, Tomz, and Wittenberg (2000, American #'Journal of Political Science 44(2): 347-361). For a description of this type #'of plot, see Figure 1 in Karreth (2018, International Interactions 44(3): 463-90). #'@param x Output generated from \code{mcmcFD(..., full_sims = TRUE)}. #'@param ROPE defaults to NULL. If not NULL, a numeric vector of length two, #'defining the Region of Practical Equivalence around 0. See Kruschke (2013, Journal of #'Experimental Psychology 143(2): 573-603) for more on the ROPE. #'@param ... optional arguments to \code{\link[ggplot2]{theme}} from \code{\link[ggplot2:ggplot2-package]{ggplot2}}. #'@references #'\itemize{ #'\item Karreth, Johannes. 2018. “The Economic Leverage of International Organizations in Interstate Disputes.” #'International Interactions 44 (3): 463-90. https://doi.org/10.1080/03050629.2018.1389728. #'\item King, Gary, Michael Tomz, and Jason Wittenberg. 2000. “Making the Most of Statistical #'Analyses: Improving Interpretation and Presentation.” American Journal of Political Science #'44 (2): 347–61. http://www.jstor.org/stable/2669316. #'\item Kruschke, John K. 2013. “Bayesian Estimation Supersedes the T-Test.” Journal of #'Experimental Psychology: General 142 (2): 573–603. https://doi.org/10.1037/a0029146. #'\item Long, J. Scott. 1997. Regression Models for Categorical and Limited Dependent Variables. #'Thousand Oaks: Sage Publications. #'} #'@return a density plot of the differences in probabilities. The plot is made with ggplot2 and can be #'passed on as an object to customize. Annotated numbers show the percent of posterior draws with #'the same sign as the median estimate (if \code{ROPE = NULL}) or on the same side of the #'ROPE as the median estimate (if \code{ROPE} is specified). #' #'@seealso \code{\link{mcmcFD}} #' #'@method plot mcmcFD #' #'@examples #' \dontshow{.old_wd <- setwd(tempdir())} #' \donttest{ #' if (interactive()) { #' ## simulating data #' set.seed(1234) #' b0 <- 0.2 # true value for the intercept #' b1 <- 0.5 # true value for first beta #' b2 <- 0.7 # true value for second beta #' n <- 500 # sample size #' X1 <- runif(n, -1, 1) #' X2 <- runif(n, -1, 1) #' Z <- b0 + b1 * X1 + b2 * X2 #' pr <- 1 / (1 + exp(-Z)) # inv logit function #' Y <- rbinom(n, 1, pr) #' df <- data.frame(cbind(X1, X2, Y)) #' #' ## formatting the data for jags #' datjags <- as.list(df) #' datjags$N <- length(datjags$Y) #' #' ## creating jags model #' model <- function() { #' #' for(i in 1:N){ #' Y[i] ~ dbern(p[i]) ## Bernoulli distribution of y_i #' logit(p[i]) <- mu[i] ## Logit link function #' mu[i] <- b[1] + #' b[2] * X1[i] + #' b[3] * X2[i] #' } #' #' for(j in 1:3){ #' b[j] ~ dnorm(0, 0.001) ## Use a coefficient vector for simplicity #' } #' #' } #' #' params <- c("b") #' inits1 <- list("b" = rep(0, 3)) #' inits2 <- list("b" = rep(0, 3)) #' inits <- list(inits1, inits2) #' #' ## fitting the model with R2jags #' set.seed(123) #' fit <- R2jags::jags(data = datjags, inits = inits, #' parameters.to.save = params, n.chains = 2, n.iter = 2000, #' n.burnin = 1000, model.file = model) #' #' ## preparing data for mcmcFD() #' xmat <- model.matrix(Y ~ X1 + X2, data = df) #' mcmc <- coda::as.mcmc(fit) #' mcmc_mat <- as.matrix(mcmc)[, 1:ncol(xmat)] #' #' ## plotting with mcmcFDplot() #' full <- mcmcFD(modelmatrix = xmat, #' mcmcout = mcmc_mat, #' fullsims = TRUE) #' plot(full) #' #' } #' } #' #' \dontshow{setwd(.old_wd)} #' #' @export #' plot.mcmcFD <- function(x, ROPE = NULL, ...) { if (!attr(x, "fullsims")) { stop("full simulations must be used to plot posterior distribution") } ROPE <- check_ROPE_argument(ROPE) ## multiply by 100 for percentage point change in output x <- x * 100 # convert x to long data frame fd_dat <- tidyr::gather(as.data.frame(x)) # create first plot if(!is.null(ROPE)) { fd_plot <- ggplot2::ggplot(data = fd_dat, aes(x = .data$value, y = .data$key)) + ggplot2::geom_rect(xmin = ROPE[1], xmax = ROPE[2], ymin = 0, ymax = Inf, fill = "black") + ggridges::stat_density_ridges(quantile_lines = TRUE, quantiles = c(0.025, 0.5, 0.975), vline_color = "white") + ggplot2::scale_x_continuous(labels = function(x) x * 100) + ggplot2::xlab("Percentage point change in Pr(y = 1)") + ggplot2::ylab("") # calculate area left/right of ROPE fd_outROPE <- apply(x, 2, function(x) ifelse(median(x) < 0, sum(x < ROPE[1]) / length(x), sum(x > ROPE[2]) / length(x))) fd_annotate <- data.frame(xpos = apply(x, 2, function(x) ifelse(median(x) < 0, quantile(x, probs = 0.01) - 0.02, quantile(x, probs = 0.99) + 0.02)), ypos = as.factor(colnames(x)), outROPE = paste(round(fd_outROPE * 100, digits = 1), "%", sep = "")) # final plot fd_plot <- fd_plot + geom_text(data = fd_annotate, aes(x = .data$xpos, y = .data$ypos, label = .data$outROPE), color = "black", nudge_y = 0.1, size = 4) + ggplot2::theme(...) } else { fd_plot <- ggplot2::ggplot(data = fd_dat, aes(x = .data$value, y = .data$key)) + ggplot2::geom_vline(xintercept = 0) + ggridges::stat_density_ridges(quantile_lines = TRUE, quantiles = c(0.025, 0.5, 0.975), vline_color = "white") + ggplot2::xlab("Percentage point change in Pr(y = 1)") + ggplot2::ylab("") # calculate area left/right of 0 fd_out0 <- apply(x, 2, function(x) ifelse(median(x) < 0, sum(x < 0) / length(x), sum(x > 0) / length(x))) fd_annotate <- data.frame(xpos = apply(x, 2, function(x) ifelse(median(x) < 0, quantile(x, probs = 0.01) - 0.02, quantile(x, probs = 0.99) + 0.02)), ypos = as.factor(colnames(x)), out0 = paste(round(fd_out0 * 100, digits = 1), "%", sep = "")) # final plot fd_plot <- fd_plot + ggplot2::geom_text(data = fd_annotate, aes(x = .data$xpos, y = .data$ypos, label = .data$out0), color = "black", nudge_y = 0.1, size = 4) + ggplot2::theme(...) } fd_plot } ## mcmcFDplot - deprecated #'@title Plot First Differences from MCMC output #'@description R function to plot first differences generated from MCMC output. #'For more on this method, see the documentation for \code{mcmcFD()}, Long (1997, #'Sage Publications), and King, Tomz, and Wittenberg (2000, American Journal #'of Political Science 44(2): 347-361). For a description of this type of plot, #'see Figure 1 in Karreth (2018, International Interactions 44(3): 463-90). #'@param fdfull Output generated from \code{mcmcFD(..., full_sims = TRUE)}. #'@param ROPE defaults to NULL. If not NULL, a numeric vector of length two, #'defining the Region of Practical Equivalence around 0. See Kruschke (2013, Journal of #'Experimental Psychology 143(2): 573-603) for more on the ROPE. #'@references #'\itemize{ #'\item Karreth, Johannes. 2018. “The Economic Leverage of International Organizations in Interstate Disputes.” #'International Interactions 44 (3): 463-90. https://doi.org/10.1080/03050629.2018.1389728. #'\item King, Gary, Michael Tomz, and Jason Wittenberg. 2000. “Making the Most of Statistical #'Analyses: Improving Interpretation and Presentation.” American Journal of Political Science #'44 (2): 347–61. http://www.jstor.org/stable/2669316. #'\item Kruschke, John K. 2013. “Bayesian Estimation Supersedes the T-Test.” Journal of #'Experimental Psychology: General 142 (2): 573–603. https://doi.org/10.1037/a0029146. #'\item Long, J. Scott. 1997. Regression Models for Categorical and Limited Dependent Variables. #'Thousand Oaks: Sage Publications. #'} #'@return a density plot of the differences in probabilities. The plot is made with ggplot2 and can be #'passed on as an object to customize. Annotated numbers show the percent of posterior draws with #'the same sign as the median estimate (if \code{ROPE = NULL}) or on the same side of the #'ROPE as the median estimate (if \code{ROPE} is specified). #' #'@examples #' \dontshow{.old_wd <- setwd(tempdir())} #' \donttest{ #' if (interactive()) { #' ## simulating data #' set.seed(1234) #' b0 <- 0.2 # true value for the intercept #' b1 <- 0.5 # true value for first beta #' b2 <- 0.7 # true value for second beta #' n <- 500 # sample size #' X1 <- runif(n, -1, 1) #' X2 <- runif(n, -1, 1) #' Z <- b0 + b1 * X1 + b2 * X2 #' pr <- 1 / (1 + exp(-Z)) # inv logit function #' Y <- rbinom(n, 1, pr) #' df <- data.frame(cbind(X1, X2, Y)) #' #' ## formatting the data for jags #' datjags <- as.list(df) #' datjags$N <- length(datjags$Y) #' #' ## creating jags model #' model <- function() { #' #' for(i in 1:N){ #' Y[i] ~ dbern(p[i]) ## Bernoulli distribution of y_i #' logit(p[i]) <- mu[i] ## Logit link function #' mu[i] <- b[1] + #' b[2] * X1[i] + #' b[3] * X2[i] #' } #' #' for(j in 1:3){ #' b[j] ~ dnorm(0, 0.001) ## Use a coefficient vector for simplicity #' } #' #' } #' #' params <- c("b") #' inits1 <- list("b" = rep(0, 3)) #' inits2 <- list("b" = rep(0, 3)) #' inits <- list(inits1, inits2) #' #' ## fitting the model with R2jags #' set.seed(123) #' fit <- R2jags::jags(data = datjags, inits = inits, #' parameters.to.save = params, n.chains = 2, n.iter = 2000, #' n.burnin = 1000, model.file = model) #' #' ## preparing data for mcmcFD() #' xmat <- model.matrix(Y ~ X1 + X2, data = df) #' mcmc <- coda::as.mcmc(fit) #' mcmc_mat <- as.matrix(mcmc)[, 1:ncol(xmat)] #' #' ## plotting with mcmcFDplot() #' full <- mcmcFD(modelmatrix = xmat, #' mcmcout = mcmc_mat, #' fullsims = TRUE) #' # suppress deprecated warning for R check #' suppressWarnings(mcmcFDplot(full)) #' #' } #' } #' #' \dontshow{setwd(.old_wd)} #' #' @name mcmcFDplot-deprecated #' @usage mcmcFDplot(fdfull, ROPE = NULL) #' @seealso \code{\link{BayesPostEst-deprecated}} #' @seealso \code{\link{mcmcFD}} #' @keywords internal #' #' @rdname BayesPostEst-deprecated #' @section \code{mcmcFDplot}: #' For \code{mcmcFDplot}, use \code{\link{plot.mcmcFD}}. #' #' @export #' mcmcFDplot <- function(fdfull, ROPE = NULL){ .Deprecated("plot.mcmcFD", package = "BayesPostEst") ROPE <- check_ROPE_argument(ROPE) fdfull <- fdfull * 100 fd_dat <- tidyr::gather(as.data.frame(fdfull)) # create first plot if(!is.null(ROPE)) { fd_plot <- ggplot2::ggplot(data = fd_dat, aes(x = .data$value, y = .data$key)) + ggplot2::geom_rect(xmin = ROPE[1], xmax = ROPE[2], ymin = 0, ymax = Inf, fill = "black") + ggridges::stat_density_ridges(quantile_lines = TRUE, quantiles = c(0.025, 0.5, 0.975), vline_color = "white") + ggplot2::scale_x_continuous(labels = function(x) x * 100) + ggplot2::xlab("Percentage point change in Pr(y = 1)") + ggplot2::ylab("") # calculate area left/right of ROPE fd_outROPE <- apply(fdfull, 2, function(x) ifelse(median(x) < 0, sum(x < ROPE[1]) / length(x), sum(x > ROPE[2]) / length(x))) fd_annotate <- data.frame(xpos = apply(fdfull, 2, function(x) ifelse(median(x) < 0, quantile(x, probs = 0.01) - 0.02, quantile(x, probs = 0.99) + 0.02)), ypos = as.factor(colnames(fdfull)), outROPE = paste(round(fd_outROPE * 100, digits = 1), "%", sep = "")) # final plot fd_plot <- fd_plot + geom_text(data = fd_annotate, aes(x = .data$xpos, y = .data$ypos, label = .data$outROPE), color = "black", nudge_y = 0.1, size = 4) } else { fd_plot <- ggplot2::ggplot(data = fd_dat, aes(x = .data$value, y = .data$key)) + ggplot2::geom_vline(xintercept = 0) + ggridges::stat_density_ridges(quantile_lines = TRUE, quantiles = c(0.025, 0.5, 0.975), vline_color = "white") + ggplot2::scale_x_continuous(labels = function(x) x*100) + ggplot2::xlab("Percentage point change in Pr(y = 1)") + ggplot2::ylab("") # calculate area left/right of 0 fd_out0 <- apply(fdfull, 2, function(x) ifelse(median(x) < 0, sum(x < 0) / length(x), sum(x > 0) / length(x))) fd_annotate <- data.frame(xpos = apply(fdfull, 2, function(x) ifelse(median(x) < 0, quantile(x, probs = 0.01) - 0.02, quantile(x, probs = 0.99) + 0.02)), ypos = as.factor(colnames(fdfull)), out0 = paste(round(fd_out0 * 100, digits = 1), "%", sep = "")) # final plot fd_plot <- fd_plot + ggplot2::geom_text(data = fd_annotate, aes(x = .data$xpos, y = .data$ypos, label = .data$out0), color = "black", nudge_y = 0.1, size = 4) } fd_plot }
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/R/mcmcFD.R
#' Marginal Effects Plots for MCMC Output #' #' Marginal effects plots for MCMC output using \code{ggplot2} #' #' @param mod Bayesian model object generated by R2jags, rjags, R2WinBUGS, R2OpenBUGS, #' MCMCpack, rstan, rstanarm, and brms. #' @param main a character with the name of the parameter of interest in the #' interaction term. #' @param int a character with the name of the moderating parameter in the #' interaction term. #' @param moderator a vector of values that the moderating parameter takes on #' in the data. #' @param pointest a character indicating whether to use the mean or median for #' point estimates in the plot. #' @param seq a numeric giving the number of moderator values used to generate #' the marginal effects plot. #' @param ci a scalar indicating the confidence level of the uncertainty intervals. #' @param hpdi a logical indicating whether to use highest posterior density intervals #' or equal tailed credible intervals to capture uncertainty. #' @param plot logical indicating whether to return a \code{ggplot} object or the #' underlying tidy DataFrame. By default, \code{mcmcMargEff} returns a line and #' ribbon plot for continuous variables, and a dot and line plot for factor #' variables and discrete variables with fewer than 25 unique values. #' @param xlab character giving x axis label if \code{plot = TRUE}, default \code{"Moderator"} #' @param ylab character giving y axis label if \code{plot = TRUE}, default \code{"Marginal Effect"} #' #' @return a \code{ggplot} object or a tidy DataFrame. #' #' @author Rob Williams, \email{[email protected]} #' #' @examples #' \dontshow{.old_wd <- setwd(tempdir())} #' \donttest{ #' if (interactive()) { #' ## simulating data #' set.seed(123456) #' b0 <- 0.2 # true value for the intercept #' b1 <- 0.5 # true value for first beta #' b2 <- 0.7 # true value for second beta #' n <- 500 # sample size #' X1 <- runif(n, -1, 1) #' X2 <- runif(n, -1, 1) #' Z <- b0 + b1 * X1 + b2 * X2 #' #' ## linear model data #' Y_linear <- rnorm(n, Z, 1) #' df <- data.frame(cbind(X1, X2, Y = Y_linear)) #' #' ## formatting the data for jags #' datjags <- as.list(df) #' datjags$N <- length(datjags$Y) #' #' ## creating jags model #' model <- function() { #' #' for(i in 1:N){ #' Y[i] ~ dnorm(mu[i], sigma) ## Bernoulli distribution of y_i #' #' mu[i] <- b[1] + #' b[2] * X1[i] + #' b[3] * X2[i] + #' b[4] * X1[i] * X2[i] #' #' } #' #' for(j in 1:4){ #' b[j] ~ dnorm(0, 0.001) ## Use a coefficient vector for simplicity #' } #' #' sigma ~ dexp(1) #' #' } #' #' params <- c("b") #' inits1 <- list("b" = rep(0, 4)) #' inits2 <- list("b" = rep(0, 4)) #' inits <- list(inits1, inits2) #' #' ## fitting the model with R2jags #' set.seed(123) #' fit <- R2jags::jags(data = datjags, inits = inits, #' parameters.to.save = params, n.chains = 2, n.iter = 2000, #' n.burnin = 1000, model.file = model) #' #' mcmcMargEff(mod = fit, #' main = 'b[2]', #' int = 'b[4]', #' moderator = sim_data_interactive$X2, #' plot = TRUE) #' } #' } #' #' \dontshow{setwd(.old_wd)} #' @export #' mcmcMargEff <- function(mod, main, int, moderator, pointest = 'mean', seq = 100, ci = .95, hpdi = FALSE, plot = TRUE, xlab = 'Moderator', ylab = 'Marginal Effect') { ## pull in unexported functions from other packages ## other options for future versions might include lifting this and adding authors as copr holders runjags.as.mcmc.list.runjags = getFromNamespace("as.mcmc.list.runjags", "runjags") coda.hpdinterval.mcmc = getFromNamespace("HPDinterval.mcmc", "coda") if (inherits(mod, what = c("jags", "rjags"))) { samps <- as.matrix(coda::as.mcmc(mod)) } if (inherits(mod, what = "bugs")) { samps <- mod$sims.matrix } if (inherits(mod, what = "runjags")) { samps <- as.matrix(runjags.as.mcmc.list.runjags(mod)) } if (inherits(mod, what = c("mcmc", "mcmc.list", "stanfit", "stanreg", "brmsfit"))) { samps <- as.matrix(mod) } samps <- samps[, c(main, int)] ## expand moderating variable to range of values if(!is.factor(moderator) & all(unique(moderator) %% 1 != 0)) { mod_range <- seq(min(moderator), max(moderator), length.out = seq) categorical <- F } else if ((is.factor(moderator) | all(unique(moderator) %% 1 == 0)) & length(unique(moderator)) >= 25) { mod_range <- seq(min(moderator), max(moderator), length.out = seq) categorical <- F } else if (is.factor(moderator) | all(unique(moderator) %% 1 == 0)) { mod_range <- sort(unique(moderator)) seq = length(mod_range) categorical <- T } ## compute marginal effect for each sample marg <- rep(samps[, 1], seq) + samps[, 2] %o% mod_range if (pointest == 'mean') { marg_pe <- apply(marg, 2, mean) } else if (pointest == 'median') { marg_pe <- apply(marg, 2, median) } else { stop("pointest must be either 'mean' or 'median'") } ## calculate marginal effect for mean if (!hpdi) { marg_ci<- t(apply(marg, 2, quantile, probs = c(.5 - ci/2, .5 + ci/2))) } else if (hpdi) { marg_ci <- t(apply(marg, 2, coda.hpdinterval.mcmc, prob = ci)) } else { stop("hpdi must be either true or false") } ## create dataframe for plotting marg_gg <- data.frame(mod = mod_range, pe = marg_pe, lo = marg_ci[, 1], hi = marg_ci[, 2]) ## return marginal effects plot or underlying dataframe if (!plot) { marg_gg } else { if (categorical) { ggplot2::ggplot(data = marg_gg, ggplot2::aes(x = .data$mod, y = .data$pe, ymin = .data$lo, ymax = .data$hi)) + ggplot2::geom_linerange() + ggplot2::geom_hline(yintercept = 0, lty = 2, color = 'gray40', lwd = .5) + ggplot2::geom_point() + ggplot2::labs(x = xlab, y = ylab) } else { ggplot2::ggplot(data = marg_gg, ggplot2::aes(x = .data$mod, y = .data$pe, ymin = .data$lo, ymax = .data$hi)) + ggplot2::geom_ribbon(alpha = .25) + ggplot2::geom_hline(yintercept = 0, lty = 2, color = 'gray40', lwd = .5) + ggplot2::geom_line() + ggplot2::labs(x = xlab, y = ylab) } } }
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/R/mcmcMargEff.R
#'This function calculates predicted probabilities for #'"observed" cases after a Bayesian logit or probit model #'following Hanmer and Kalkan (2013, American Journal of #'Political Science 57(1): 263-277) #'@title Predicted Probabilities using Bayesian MCMC estimates for the Average of Observed Cases #'@description Implements R function to calculate the predicted probabilities #'for "observed" cases after a Bayesian logit or probit model, following #'Hanmer & Kalkan (2013) (2013, American Journal of Political Science 57(1): 263-277). #'@param modelmatrix model matrix, including intercept (if the intercept is among the #'parameters estimated in the model). Create with model.matrix(formula, data). #'Note: the order of columns in the model matrix must correspond to the order of columns #'in the matrix of posterior draws in the \code{mcmcout} argument. See the \code{mcmcout} #'argument for more. #'@param mcmcout posterior distributions of all logit coefficients, #'in matrix form. This can be created from rstan, MCMCpack, R2jags, etc. and transformed #'into a matrix using the function as.mcmc() from the coda package for \code{jags} class #'objects, as.matrix() from base R for \code{mcmc}, \code{mcmc.list}, \code{stanreg}, and #'\code{stanfit} class objects, and \code{object$sims.matrix} for \code{bugs} class objects. #'Note: the order of columns in this matrix must correspond to the order of columns #'in the model matrix. One can do this by examining the posterior distribution matrix and sorting the #'variables in the order of this matrix when creating the model matrix. A useful function for sorting #'column names containing both characters and numbers as #'you create the matrix of posterior distributions is \code{mixedsort()} from the gtools package. #'@param xcol column number of the posterior draws (\code{mcmcout}) and model matrices #'that corresponds to the explanatory variable for which to calculate associated Pr(y = 1). #'Note that the columns in these matrices must match. #'@param xrange name of the vector with the range of relevant values of the #'explanatory variable for which to calculate associated Pr(y = 1). #'@param xinterest semi-optional argument. Name of the explanatory variable for which #'to calculate associated Pr(y = 1). If \code{xcol} is supplied, this is not needed. #'If both are supplied, the function defaults to \code{xcol} and this argument is ignored. #'@param link type of generalized linear model; a character vector set to \code{"logit"} (default) #'or \code{"probit"}. #'@param ci the bounds of the credible interval. Default is \code{c(0.025, 0.975)} for the 95\% #'credible interval. #'@param fullsims logical indicator of whether full object (based on all MCMC draws #'rather than their average) will be returned. Default is \code{FALSE}. Note: The longer #'\code{xrange} is, the larger the full output will be if \code{TRUE} is selected. #'@references Hanmer, Michael J., & Ozan Kalkan, K. (2013). Behind the curve: Clarifying #'the best approach to calculating predicted probabilities and marginal effects from #'limited dependent variable models. American Journal of Political Science, 57(1), #'263-277. https://doi.org/10.1111/j.1540-5907.2012.00602.x #'@return if \code{fullsims = FALSE} (default), a tibble with 4 columns: #'\itemize{ #'\item x: value of variable of interest, drawn from \code{xrange} #'\item median_pp: median predicted Pr(y = 1) when variable of interest is set to x #'\item lower_pp: lower bound of credible interval of predicted probability at given x #'\item upper_pp: upper bound of credible interval of predicted probability at given x #'} #'if \code{fullsims = TRUE}, a tibble with 3 columns: #'\itemize{ #'\item Iteration: number of the posterior draw #'\item x: value of variable of interest, drawn from \code{xrange} #'\item pp: average predicted Pr(y = 1) of all observed cases when variable of interest is set to x #'} #'@examples #' \dontshow{.old_wd <- setwd(tempdir())} #' \donttest{ #' if (interactive()) { #' ## simulating data #' set.seed(12345) #' b0 <- 0.2 # true value for the intercept #' b1 <- 0.5 # true value for first beta #' b2 <- 0.7 # true value for second beta #' n <- 500 # sample size #' X1 <- runif(n, -1, 1) #' X2 <- runif(n, -1, 1) #' Z <- b0 + b1 * X1 + b2 * X2 #' pr <- 1 / (1 + exp(-Z)) # inv logit function #' Y <- rbinom(n, 1, pr) #' df <- data.frame(cbind(X1, X2, Y)) #' #' ## formatting the data for jags #' datjags <- as.list(df) #' datjags$N <- length(datjags$Y) #' #' ## creating jags model #' model <- function() { #' #' for(i in 1:N){ #' Y[i] ~ dbern(p[i]) ## Bernoulli distribution of y_i #' logit(p[i]) <- mu[i] ## Logit link function #' mu[i] <- b[1] + #' b[2] * X1[i] + #' b[3] * X2[i] #' } #' #' for(j in 1:3){ #' b[j] ~ dnorm(0, 0.001) ## Use a coefficient vector for simplicity #' } #' #'} #' #' params <- c("b") #' inits1 <- list("b" = rep(0, 3)) #' inits2 <- list("b" = rep(0, 3)) #' inits <- list(inits1, inits2) #' #' ## fitting the model with R2jags #' library(R2jags) #' set.seed(123) #' fit <- jags(data = datjags, inits = inits, #' parameters.to.save = params, n.chains = 2, n.iter = 2000, #' n.burnin = 1000, model.file = model) #' #' ### observed value approach #' library(coda) #' xmat <- model.matrix(Y ~ X1 + X2, data = df) #' mcmc <- as.mcmc(fit) #' mcmc_mat <- as.matrix(mcmc)[, 1:ncol(xmat)] #' X1_sim <- seq(from = min(datjags$X1), #' to = max(datjags$X1), #' length.out = 10) #' obs_prob <- mcmcObsProb(modelmatrix = xmat, #' mcmcout = mcmc_mat, #' xrange = X1_sim, #' xcol = 2) #' } #' } #' #' \dontshow{setwd(.old_wd)} #'@export #' mcmcObsProb <- function(modelmatrix, mcmcout, xcol, xrange, xinterest, link = "logit", ci = c(0.025, 0.975), fullsims = FALSE){ # checking arguments if(missing(xcol) & missing(xinterest)) { stop("Please enter a column number or name of your variable of interest)") } if(!missing(xcol) & !missing(xinterest)) { message("Both xcol and xinterest were supplied by user. Function defaults to xcol") } if(!missing(xinterest)) { if(!(xinterest %in% variable.names(modelmatrix))) stop("Variable name does not match any in the matrix. Please enter another.") } X <- matrix(rep(t(modelmatrix), length(xrange)), ncol = ncol(modelmatrix), byrow = TRUE ) colnames(X) <- variable.names(modelmatrix) if(!missing(xcol)) { X[, xcol] <- sort(rep(xrange, times = nrow(X) / length(xrange))) } else { X[ , grepl( xinterest , variable.names( X ) ) ] <- sort(rep(xrange, times = nrow(X) / length(xrange))) } if(link == "logit"){ pp <- plogis(t(X %*% t(mcmcout))) } if(link == "probit"){ pp <- pnorm(t(X %*% t(mcmcout))) } # emptry matrix for PPs pp_mat <- matrix(NA, nrow = nrow(mcmcout), ncol = length(xrange)) # indices pp_mat_lowerindex <- 1 + (0:(length(xrange) - 1) * nrow(modelmatrix)) pp_mat_upperindex <- nrow(modelmatrix) + (0:(length(xrange) - 1) * nrow(modelmatrix)) # fill matrix with PPs, one for each value of the predictor of interest for(i in 1:length(xrange)){ pp_mat[, i] <- apply(X = pp[, c(pp_mat_lowerindex[i]:pp_mat_upperindex[i])], MARGIN = 1, FUN = function(x) mean(x)) } median_pp <- apply(X = pp_mat, MARGIN = 2, function(x) quantile(x, probs = c(0.5))) lower_pp <- apply(X = pp_mat, MARGIN = 2, function(x) quantile(x, probs = ci[1])) upper_pp <- apply(X = pp_mat, MARGIN = 2, function(x) quantile(x, probs = ci[2])) pp_dat <- dplyr::tibble(x = xrange, median_pp = median_pp, lower_pp = lower_pp, upper_pp = upper_pp) if(fullsims == FALSE){ return(pp_dat) # pp_dat was created by summarizing longFrame } if(fullsims == TRUE){ longFrame <- reshape2::melt(pp_mat, id.vars = .data$Var2) names(longFrame) <- c("Iteration", "x", "pp") return(longFrame) } }
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/R/mcmcObsProb.R
#' @title LaTeX or HTML regression tables for MCMC Output #' @description This function creates LaTeX or HTML regression tables for MCMC Output using #' the \code{\link[texreg]{texreg}} function from the \code{\link[texreg:texreg-package]{texreg}} R package. #' @param mod Bayesian model object generated by R2jags, rjags, R2WinBUGS, R2OpenBUGS, #' MCMCpack, rstan, rstanarm, and brms, or a list of model objects of the same class. #' @param pars a scalar or vector of the parameters you wish to include in the table. #' By default, \code{mcmcReg} includes all parameters saved in a model object. If a #' model has lots of samples and lots of saved parameters, not explicitly specifying #' a limited number of parameters to include via \code{pars} may take a long time. #' \code{pars} can either be a vector with the specific parameters to be included #' in the table e.g. \code{pars = c("beta[1]", "beta[2]", "beta[3]")}, or they can #' be partial names that will be matched using regular expressions e.g. #' \code{pars = "beta"} if \code{regex = TRUE}. Both of these will include #' \code{beta[1]}, \code{beta[2]}, and \code{beta[3]} in the table. When #' combining models with different parameters in one table, this argument also #' accepts a list the length of the number of models. #' @param pointest a character indicating whether to use the mean or median for #' point estimates in the table. #' @param ci a scalar indicating the confidence level of the uncertainty intervals. #' @param hpdi a logical indicating whether to use highest posterior density #' intervals instead of equal tailed credible intervals to capture uncertainty #' (default \code{FALSE}). #' @param sd a logical indicating whether to report the standard deviation of #' posterior distributions instead of an uncertainty interval #' (default \code{FALSE}). If \code{TRUE}, overrides \code{ci}, \code{hpdi}, and #' \code{pr}. #' @param pr a logical indicating whether to report the probability that a #' coefficient is in the same direction as the point estimate for that #' coefficient (default \code{FALSE}). If \code{TRUE}, overrides \code{ci} and #' \code{hpdi}. #' @param coefnames an optional vector or list of vectors containing parameter #' names for each model. If there are multiple models, the list must have the same #' number of elements as there are models, and the vector of names in each list #' element must match the number of parameters. If not supplied, the function #' will use the parameter names in the model object(s). Note that this replaces #' the standard \code{custom.coef.names} argument in \code{\link[texreg]{texreg}} #' because there is no \code{extract} method for MCMC model objects, and many #' MCMC model objects do not have unique parameter names. #' @param gof a named list of goodness of fit statistics, or a list of such lists. #' @param gofnames an optional vector or list of vectors containing #' goodness of fit statistic names for each model. Like \code{coefnames} in this function #' (which replaces the \code{custom.coef.names} argument in \code{\link[texreg]{texreg}}), #' \code{gofnames} replaces the standard \code{custom.gof.names} argument in #' \code{\link[texreg]{texreg}}. If #' there are multiple models, the list must have the same number of elements as #' there are models, and the vector of names in each list element must match the #' number of goodness of fit statistics. #' @param format a character indicating \code{latex} or \code{html} output. #' @param file optional file name to write table to file instead of printing to #' console. #' @param regex use regular expression matching with \code{pars}? #' @param ... optional arguments to \code{\link[texreg]{texreg}}. #' #' @details If using \code{custom.coef.map} with more than one model, you should rename #' the parameters in the model objects to ensure that different parameters with the #' same subscript are not conflated by \code{texreg} e.g. \code{beta[1]} could represent age #' in one model and income in another, and \code{texreg} would combine the two if you #' do not rename \code{beta[1]} to more informative names in the model objects. #' #' If \code{mod} is a \code{brmsfit} object or list of \code{brmsfit} objects, note that the #' default \code{brms} names for coefficients are \code{b_Intercept} and \code{b}, so both of #' these should be included in \code{par} if you wish to include the intercept in the #' table. #' #' @return A formatted regression table in LaTeX or HTML format. #' #' @author Rob Williams, \email{[email protected]} #' #' @examples #' \dontshow{.old_wd <- setwd(tempdir())} #' \donttest{ #' if (interactive()) { #' ## simulating data #' set.seed(123456) #' b0 <- 0.2 # true value for the intercept #' b1 <- 0.5 # true value for first beta #' b2 <- 0.7 # true value for second beta #' n <- 500 # sample size #' X1 <- runif(n, -1, 1) #' X2 <- runif(n, -1, 1) #' Z <- b0 + b1 * X1 + b2 * X2 #' pr <- 1 / (1 + exp(-Z)) # inv logit function #' Y <- rbinom(n, 1, pr) #' df <- data.frame(cbind(X1, X2, Y)) #' #' ## formatting the data for jags #' datjags <- as.list(df) #' datjags$N <- length(datjags$Y) #' #' ## creating jags model #' model <- function() { #' #' for(i in 1:N){ #' Y[i] ~ dbern(p[i]) ## Bernoulli distribution of y_i #' logit(p[i]) <- mu[i] ## Logit link function #' mu[i] <- b[1] + #' b[2] * X1[i] + #' b[3] * X2[i] #' } #' #' for(j in 1:3){ #' b[j] ~ dnorm(0, 0.001) ## Use a coefficient vector for simplicity #' } #' #' } #' #' params <- c("b") #' inits1 <- list("b" = rep(0, 3)) #' inits2 <- list("b" = rep(0, 3)) #' inits <- list(inits1, inits2) #' #' ## fitting the model with R2jags #' set.seed(123) #' fit <- R2jags::jags(data = datjags, inits = inits, #' parameters.to.save = params, #' n.chains = 2, #' n.iter = 2000, n.burnin = 1000, #' model.file = model) #' #' ## generating regression table with all parameters #' mcmcReg(fit) #' #' ## generating regression table with only betas and custom coefficent names #' mcmcReg(fit, pars = c('b'), coefnames = c('Variable 1', #' 'Variable 2', #' 'Variable 3'), #' regex = TRUE) #' ## generating regression tables with all betas and custom names #' mcmcReg(fit, coefnames = c('Variable 1', 'Variable 2', #' 'Variable 3', 'deviance')) #' } #' } #' #' \dontshow{setwd(.old_wd)} #' @export #' mcmcReg <- function(mod, pars = NULL, pointest = 'mean', ci = .95, hpdi = FALSE, sd = FALSE, pr = FALSE, coefnames = NULL, gof = numeric(0), gofnames = character(0), format = 'latex', file, regex = FALSE, ...) { ## pull in unexported functions from other packages ## other options for future versions might include lifting this and adding authors as copr holders runjags.as.mcmc.list.runjags = getFromNamespace("as.mcmc.list.runjags", "runjags") ## if only one model object, coerce to a list if (all(class(mod) != 'list')) mod <- list(mod) ## check for heterogeneous model objects if (length(unique(lapply(mod, class))) > 1) stop('More than one object class supplied to argument "mod"') ## if only one custom coefficient names vector, coerce to a list if (!is.null(coefnames) & !is.list(coefnames)) coefnames <- list(coefnames) ## if only one parameter vector, coerce to a list if (class(pars) != 'list' & !is.null(pars)) pars <- list(pars) ## if only one gof statistic scalar or vector, coerce to a list if (class(gof) != 'list') gof <- list(rep(gof, times = length(mod))) ## if only one gof statistic name scalar or vector, coerce to a list if (class(gofnames) != 'list') gofnames <- list(gofnames) ## extract samples and variable names from jags or rjags objects if (lapply(mod, inherits, what = c('jags', 'rjags'))[[1]]) { ## extract posterior samples from list of model objects samps <- lapply(mod, function(x) as.matrix(coda::as.mcmc(x))) } ## extract samples and variable names from bugs object if (lapply(mod, inherits, what = 'bugs')[[1]]) { ## extract posterior samples from list of model objects samps <- lapply(mod, function(x) x$sims.matrix) } ## extract samples and variable names from runjags object if (lapply(mod, inherits, what = 'runjags')[[1]]) { samps <- lapply(mod, function(x) as.matrix(runjags.as.mcmc.list.runjags(x))) } ## extract samples and variable names from remaining objects if (lapply(mod, inherits, what = c("mcmc", "mcmc.list", "stanfit", "stanreg", "brmsfit"))[[1]]) { samps <- lapply(mod, function(x) as.matrix(x)) } ## limit samples to supplied parameters if (regex & !is.null(pars)) { samps <- mapply(function(x, y) x[, grepl(x = colnames(x), pattern = paste(y, collapse = '|'))], samps, pars, SIMPLIFY = FALSE) } else if (!is.null(pars)) { samps <- mapply(function(x, y) matrix(x[, y], nrow = nrow(x), dimnames = list(NULL, y)), samps, pars, SIMPLIFY = FALSE) } ## calculate point estimate of posterior density samps_pe <- lapply(samps, function(x) apply(as.matrix(x), 2, get(pointest))) ## calculate uncertainty interval for or standard deviation if (sd == TRUE) { samps_sd <- lapply(samps, function(x) apply(as.matrix(x), 2, sd)) } else if (pr == TRUE) { samps_sd <- lapply(samps, function(x) apply(as.matrix(x), 2, function(y) mean(sign(y) == sign(mean(y))))) } else if (hpdi == FALSE) { samps_ci <- lapply(samps, function(x) apply(as.matrix(x), 2, quantile, probs = c(.5 - ci/2, .5 + ci/2))) } else { samps_ci <- lapply(samps, function(x) t(coda::HPDinterval(coda::as.mcmc(x), prob = ci))) } ## if coefficent names supplied, replace names from model object(s) if (regex & is.null(coefnames)) { coefnames <- mapply(function(x, y) colnames(x)[grepl(x = colnames(x), pattern = paste(y, collapse = '|'))], samps, pars, SIMPLIFY = FALSE) } else if (is.null(coefnames)) { coefnames <- lapply(samps, colnames) } ## if (length(mod) != length(coefnames)) { stop('number of models does not match number of custom coefficient vectors') } ## create list of texreg object(s) with point estimates and interval if (sd == TRUE | pr == TRUE) { tr_list <- mapply(function(v, w, x, y, z) texreg::createTexreg(coef.names = v, coef = w, se = x, gof = y, gof.names = z), coefnames, samps_pe, samps_sd, gof, gofnames) } else { tr_list <- mapply(function(v, w, x, y, z) texreg::createTexreg(coef.names = v, coef = w, ci.low = x[1, ], ci.up = x[2, ], gof = y, gof.names = z), coefnames, samps_pe, samps_ci, gof, gofnames) } ## create LaTeX output if (grepl('tex$', format)) { ## create LaTeX code if (sd == TRUE) { tr <- texreg::texreg(l = tr_list, stars = NULL, ...) } else if (pr == TRUE) { tr <- texreg::texreg(l = tr_list, stars = NULL, ...) tr <- gsub('\\$\\(|\\)\\$', '$', tr) } else { tr <- texreg::texreg(l = tr_list, ...) ## replace confidence w/ credible or highest posterior density in texreg output if (hpdi == FALSE) { tr <- sub('outside the confidence interval', paste('outside ', ci * 100 ,'\\\\% credible interval', sep = ''), tr) } else { tr <- sub('outside the confidence interval', paste('outside ', ci * 100 ,'\\\\% highest posterior density interval', sep = ''), tr) } } ## return LaTeX code to console or write to file if (missing(file)) { return(tr) } else { ## remove newline at start of LaTeX code tr <- sub('^\\n', '', tr) tex_file <- file(paste(sub('\\.tex$', '', file), 'tex', sep = '.')) writeLines(tr, tex_file, sep = '') close(tex_file) } } ## create HTML output if (format == 'html') { if (sd == TRUE) { hr <- texreg::htmlreg(l = tr_list, stars = NULL, ...) } else if (pr == TRUE) { hr <- texreg::htmlreg(l = tr_list, stars = NULL, ...) hr <- gsub('>\\(([0-9]\\.[0-9]{2})\\)<', '>\\1<', hr) } else { hr <- texreg::htmlreg(l = tr_list, ...) ## replace confidence w/ credible or highest posterior density in texreg output if (hpdi == FALSE) { hr <- sub('outside the confidence interval', paste('outside ', ci * 100, '% credible interval', sep = ''), hr) } else { hr <- sub('outside the confidence interval', paste('outside ', ci * 100, '% highest posterior density interval', sep = ''), hr) } } ## return html code to console or write to file if (missing(file)) { return(hr) } else { html_file <- file(paste(sub('\\.html$', '', file), 'html', sep = '.')) writeLines(hr, html_file, sep = '') close(html_file) } } }
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/R/mcmcReg.R
# # Methods for class "mcmcRocPrc", generated by mcmcRocPrc() # #' @rdname mcmcRocPrc #' #' @export print.mcmcRocPrc <- function(x, ...) { auc_roc <- x$area_under_roc auc_prc <- x$area_under_prc has_curves <- !is.null(x$roc_dat) has_sims <- length(auc_roc) > 1 if (!has_sims) { roc_msg <- sprintf("%.3f", round(auc_roc, 3)) prc_msg <- sprintf("%.3f", round(auc_prc, 3)) } else { roc_msg <- sprintf("%.3f [80%%: %.3f - %.3f]", round(mean(auc_roc), 3), round(quantile(auc_roc, 0.1), 3), round(quantile(auc_roc, 0.9), 3)) prc_msg <- sprintf("%.3f [80%%: %.3f - %.3f]", round(mean(auc_prc), 3), round(quantile(auc_prc, 0.1), 3), round(quantile(auc_prc, 0.9), 3)) } cat("mcmcRocPrc object\n") cat(sprintf("curves: %s; fullsims: %s\n", has_curves, has_sims)) cat(sprintf("AUC-ROC: %s\n", roc_msg)) cat(sprintf("AUC-PR: %s\n", prc_msg)) invisible(x) } #' @rdname mcmcRocPrc #' #' @param n plot method: if `fullsims = TRUE`, how many sample curves to draw? #' @param alpha plot method: alpha value for plotting sampled curves; between 0 and 1 #' #' @export plot.mcmcRocPrc <- function(x, n = 40, alpha = .5, ...) { stopifnot( "Use mcmcRocPrc(..., curves = TRUE) to generate data for plots" = (!is.null(x$roc_dat)), "alpha must be between 0 and 1" = (alpha >= 0 & alpha <= 1), "n must be > 0" = (n > 0) ) obj<- x fullsims <- length(obj$roc_dat) > 1 if (!fullsims) { graphics::par(mfrow = c(1, 2)) plot(obj$roc_dat[[1]], type = "s", xlab = "FPR", ylab = "TPR") graphics::abline(a = 0, b = 1, lty = 3, col = "gray50") prc_dat <- obj$prc_dat[[1]] # use first non-NaN y-value for y[1] prc_dat$y[1] <- prc_dat$y[2] plot(prc_dat, type = "l", xlab = "TPR", ylab = "Precision", ylim = c(0, 1)) graphics::abline(a = attr(x, "y_pos_rate"), b = 0, lty = 3, col = "gray50") } else { graphics::par(mfrow = c(1, 2)) roc_dat <- obj$roc_dat x <- lapply(roc_dat, `[[`, 1) x <- do.call(cbind, x) colnames(x) <- paste0("sim", 1:ncol(x)) y <- lapply(roc_dat, `[[`, 2) y <- do.call(cbind, y) colnames(y) <- paste0("sim", 1:ncol(y)) xavg <- rowMeans(x) yavg <- rowMeans(y) plot(xavg, yavg, type = "n", xlab = "FPR", ylab = "TPR") samples <- sample(1:ncol(x), n) for (i in samples) { graphics::lines( x[, i], y[, i], type = "s", col = grDevices::rgb(127, 127, 127, alpha = alpha*255, maxColorValue = 255) ) } graphics::lines(xavg, yavg, type = "s") # PRC # The elements of prc_dat have different lengths, unlike roc_dat, so we # have to do the central curve differently. prc_dat <- obj$prc_dat x <- lapply(prc_dat, `[[`, 1) y <- lapply(prc_dat, `[[`, 2) # Instead of combining the list of curve coordinates from each sample into # two x and y matrices, we can first make a point cloud with all curve # points from all samples, and then average the y values at all distinct # x coordinates. The x-axis plots recall (TPR), which will only have as # many distinct values as there are positives in the data, so this does # not lose any information about the x coordinates. point_cloud <- data.frame( x = unlist(x), y = unlist(y) ) point_cloud <- stats::aggregate(point_cloud[, "y", drop = FALSE], # factor implicitly encodes distinct values only, # since they will get the same labels by = list(x = as.factor(point_cloud$x)), FUN = mean) point_cloud$x <- as.numeric(as.character(point_cloud$x)) xavg <- point_cloud$x yavg <- point_cloud$y plot(xavg, yavg, type = "n", xlab = "TPR", ylab = "Precision", ylim = c(0, 1)) samples <- sample(1:length(prc_dat), n) for (i in samples) { graphics::lines( x[[i]], y[[i]], col = grDevices::rgb(127, 127, 127, alpha = alpha*255, maxColorValue = 255) ) } graphics::lines(xavg, yavg) } invisible(x) } #' @rdname mcmcRocPrc #' #' @param row.names see [base::as.data.frame()] #' @param optional see [base::as.data.frame()] #' @param what which information to extract and convert to a data frame? #' #' @export as.data.frame.mcmcRocPrc <- function(x, row.names = NULL, optional = FALSE, what = c("auc", "roc", "prc"), ...) { what <- match.arg(what) if (what=="auc") { # all 4 output types have AUC, so this should work across the board return(as.data.frame(x[c("area_under_roc", "area_under_prc")])) } else if (what %in% c("roc", "prc")) { if (what=="roc") element <- "roc_dat" else element <- "prc_dat" # if curves was FALSE, there will be no curve data... if (is.null(x[[element]])) { stop("No curve data; use mcmcRocPrc(..., curves = TRUE)") } # Otherwise, there will be either one set of coordinates if mcmcmRegPrc() # was called with fullsims = FALSE, or else N_sims curve data sets. # If the latter, we can return a long data frame with an identifying # "sim" column to delineate the sim sets. To ensure consistency in output, # also add this column when fullsims = FALSE. # averaged, single coordinate set if (length(x[[element]])==1L) { return(data.frame(sim = 1L, x[[element]][[1]])) } # full sims # add a unique ID to each coordinate set outlist <- x[[element]] outlist <- Map(cbind, sim = (1:length(outlist)), outlist) # combine into long data frame outdf <- do.call(rbind, outlist) return(outdf) } stop("Developer error (I should not be here): please file an issue on GitHub") # nocov }
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/R/mcmcRocPrc-methods.R
# # This file contains the mcmcRocPrc() S3 generic, which constructs objects # of class "mcmcRocPrc". For methods for this class, see mcmcRocPrc-methods.R # S3 methods for the mcmcRocPrc() generic handle different types of input # e.g. "rjags" input produced by R2jags. # #' ROC and Precision-Recall Curves using Bayesian MCMC estimates #' #' Generate ROC and Precision-Recall curves after fitting a Bayesian logit or #' probit regression using [rstan::stan()], [rstanarm::stan_glm()], #' [R2jags::jags()], [R2WinBUGS::bugs()], [MCMCpack::MCMClogit()], or other #' functions that provide samples from a posterior density. #' #' @param object A fitted binary choice model, e.g. "rjags" object #' (see [R2jags::jags()]), or a `[N, iter]` matrix of predicted probabilites. #' @param curves logical indicator of whether or not to return values to plot #' the ROC or Precision-Recall curves. If set to `FALSE` (default), #' results are returned as a list without the extra values. #' @param fullsims logical indicator of whether full object (based on all MCMC #' draws rather than their average) will be returned. Default is `FALSE`. #' Note: If `TRUE` is chosen, the function takes notably longer to execute. #' @param yvec A `numeric(N)` vector of observed outcomes. #' @param yname (`character(1)`)\cr #' The name of the dependent variable, should match the variable name in the #' JAGS data object. #' @param xnames ([base::character()])\cr #' A character vector of the independent variable names, should match the #' corresponding names in the JAGS data object. #' @param posterior_samples a "mcmc" object with the posterior samples #' @param ... Used by methods #' @param x a `mcmcRocPrc()` object #' #' @details If only the average AUC-ROC and PR are of interest, setting #' `curves = FALSE` and `fullsims = FALSE` can greatly speed up calculation #' time. The curve data (`curves = TRUE`) is needed for plotting. The plot #' method will always plot both the ROC and PR curves, but the underlying #' data can easily be extracted from the output for your own plotting; #' see the documentation of the value returned below. #' #' The default method works with a matrix of predicted probabilities and the #' vector of observed incomes as input. Other methods accommodate some of the #' common Bayesian modeling packages like rstan (which returns class "stanfit"), #' rstanarm ("stanreg"), R2jags ("jags"), R2WinBUGS ("bugs"), and #' MCMCpack ("mcmc"). Even if a package-specific method is not implemented, #' the default method can always be used as a fallback by manually calculating #' the matrix of predicted probabilities for each posterior sample. #' #' Note that MCMCpack returns generic "mcmc" output that is annotated with #' some additional information as attributes, including the original function #' call. There is no inherent way to distinguish any other kind of "mcmc" #' object from one generated by a proper MCMCpack modeling function, but as a #' basic precaution, `mcmcRocPrc()` will check the saved call and return an #' error if the function called was not `MCMClogit()` or `MCMCprobit()`. #' This behavior can be suppressed by setting `force = TRUE`. #' #' @references Beger, Andreas. 2016. “Precision-Recall Curves.” Available at #' \doi{10.2139/ssrn.2765419} #' #' @return Returns a list with length 2 or 4, depending on the on the "curves" #' and "fullsims" argument values: #' #' - "area_under_roc": `numeric()`; either length 1 if `fullsims = FALSE`, or #' one value for each posterior sample otherwise #' - "area_under_prc": `numeric()`; either length 1 if `fullsims = FALSE`, or #' one value for each posterior sample otherwise #' - "prc_dat": only if `curves = TRUE`; a list with length 1 if #' `fullsims = FALSE`, longer otherwise #' - "roc_dat": only if `curves = TRUE`; a list with length 1 if #' `fullsims = FALSE`, longer otherwise #' #' @examples #' \donttest{ #' if (interactive()) { #' # load simulated data and fitted model (see ?sim_data and ?jags_logit) #' data("jags_logit") #' #' # using mcmcRocPrc #' fit_sum <- mcmcRocPrc(jags_logit, #' yname = "Y", #' xnames = c("X1", "X2"), #' curves = TRUE, #' fullsims = FALSE) #' fit_sum #' plot(fit_sum) #' #' # Equivalently, we can calculate the matrix of predicted probabilities #' # ourselves; using the example from ?jags_logit: #' library(R2jags) #' #' data("sim_data") #' yvec <- sim_data$Y #' xmat <- sim_data[, c("X1", "X2")] #' #' # add intercept to the X data #' xmat <- as.matrix(cbind(Intercept = 1L, xmat)) #' #' beta <- as.matrix(as.mcmc(jags_logit))[, c("b[1]", "b[2]", "b[3]")] #' pred_mat <- plogis(xmat %*% t(beta)) #' #' # the matrix of predictions has rows matching the number of rows in the data; #' # the column are the predictions for each of the 2,000 posterior samples #' nrow(sim_data) #' dim(pred_mat) #' #' # now we can call mcmcRocPrc; the default method works with the matrix #' # of predictions and vector of outcomes as input #' mcmcRocPrc(object = pred_mat, curves = TRUE, fullsims = FALSE, yvec = yvec) #' } #' } #' #' @export #' @md mcmcRocPrc <- function(object, curves = FALSE, fullsims = FALSE, ...) { UseMethod("mcmcRocPrc", object) } #' Constructor for mcmcRocPrc objects #' #' This function actually does the heavy lifting once we have a matrix of #' predicted probabilities from a model, plus the vector of observed outcomes. #' The reason to have it here in a single function is that we don't replicate #' it in each function that accomodates a JAGS, BUGS, RStan, etc. object. #' #' @param pred_prob a `\[N, iter\]` matrix of predicted probabilities #' @param yvec a `numeric(N)` vector of observed outcomes #' @param curves include curve data in output? #' @param fullsims collapse posterior samples into single summary? #' #' @md #' @keywords internal new_mcmcRocPrc <- function(pred_prob, yvec, curves, fullsims) { stopifnot( "number of predictions and observed outcomes do not match" = nrow(pred_prob)==length(yvec), "yvec must be 0 or 1" = all(yvec %in% c(0L, 1L)), "pred_prob must be in the interval [0, 1]" = all(pred_prob >= 0 & pred_prob <= 1) ) # pred_prob is a [N, iter] matrix, i.e. each column are preds from one # set of posterior samples # if not using fullsims, summarize across columns if (isFALSE(fullsims)) { pred_prob <- as.matrix(apply(pred_prob, MARGIN = 1, median)) } pred_prob <- as.data.frame(pred_prob) curve_data <- lapply(pred_prob, yy = yvec, FUN = function(x, yy) { prc_data <- compute_pr(yvec = yy, pvec = x) roc_data <- compute_roc(yvec = yy, pvec = x) list( prc_dat = prc_data, roc_dat = roc_data ) }) prc_dat <- lapply(curve_data, `[[`, "prc_dat") roc_dat <- lapply(curve_data, `[[`, "roc_dat") # Compute AUC-ROC values v_auc_roc <- sapply(roc_dat, function(xy) { caTools::trapz(xy$x, xy$y) }) v_auc_pr <- sapply(prc_dat, function(xy) { xy <- subset(xy, !is.nan(xy$y)) caTools::trapz(xy$x, xy$y) }) # Recreate original output formats if (curves & fullsims) { out <- list( area_under_roc = v_auc_roc, area_under_prc = v_auc_pr, prc_dat = prc_dat, roc_dat = roc_dat ) } if (curves & !fullsims) { out <- list( area_under_roc = v_auc_roc, area_under_prc = v_auc_pr, prc_dat = prc_dat[1], roc_dat = roc_dat[1] ) } if (!curves & !fullsims) { out <- list( area_under_roc = v_auc_roc[[1]], area_under_prc = v_auc_pr[[1]] ) } if (!curves & fullsims) { out <- data.frame( area_under_roc = v_auc_roc, area_under_prc = v_auc_pr ) } structure( out, y_pos_rate = mean(yvec), class = "mcmcRocPrc" ) } #' @rdname mcmcRocPrc #' #' @md #' @export mcmcRocPrc.default <- function(object, curves, fullsims, yvec, ...) { pred_prob <- object stopifnot( "mcmcRocPrc.default requires 'matrix' like input" = inherits(pred_prob, "matrix") ) new_mcmcRocPrc(pred_prob, yvec, curves, fullsims) } # Under the hood ROC/PRC calculations ------------------------------------- #' Compute ROC and PR curve points #' #' Faster replacements for calculating ROC and PR curve data than with #' [ROCR::prediction()] and [ROCR::performance()] #' #' @details Replacements to use instead of a combination of [ROCR::prediction()] #' and [ROCR::performance()] to calculate ROC and PR curves. These functions are #' about 10 to 20 times faster when using [mcmcRocPrc()] with `curves = TRUE` #' and/or `fullsims = TRUE`. #' #' See this [issue on GH (ShanaScogin/BayesPostEst#25)](https://github.com/ShanaScogin/BayesPostEst/issues/25) for more general details. #' #' And [here is a note](https://github.com/andybega/BayesPostEst/blob/f1da23b9db86461d4f9c671d9393265dd10578c5/tests/profile-mcmcRocPrc.md) with specific performance benchmarks, compared to the #' old approach relying on ROCR. #' #' @keywords internal #' @md compute_roc <- function(yvec, pvec) { porder <- order(pvec, decreasing = TRUE) yvecs <- yvec[porder] pvecs <- pvec[porder] p <- sum(yvecs) n <- length(yvecs) - p tp <- cumsum(yvecs) tpr <- tp/p fp <- 1:length(yvecs) - tp fpr <- fp/n dup_pred <- rev(duplicated(pvecs)) dup_stats <- duplicated(tpr) & duplicated(fpr) dups <- dup_pred | dup_stats fpr <- c(0, fpr[!dups]) tpr <- c(0, tpr[!dups]) roc_data <- data.frame(x = fpr, y = tpr) roc_data } #' @rdname compute_roc #' @aliases compute_pr compute_pr <- function(yvec, pvec) { porder <- order(pvec, decreasing = TRUE) yvecs <- yvec[porder] pvecs <- pvec[porder] p <- sum(yvecs) n <- length(yvecs) - p tp <- cumsum(yvecs) tpr <- tp/p pp <- 1:length(yvecs) prec <- tp/pp dup_pred <- rev(duplicated(pvecs)) dup_stats <- duplicated(tpr) & duplicated(prec) dups <- dup_pred | dup_stats prec <- c(NaN, prec[!dups]) tpr <- c(0, tpr[!dups]) prc_data <- data.frame(x = tpr, y = prec) prc_data } # auc_roc and auc_pr are not really used, but keep around just in case auc_roc <- function(obs, pred) { values <- compute_roc(obs, pred) caTools::trapz(values$x, values$y) } auc_pr <- function(obs, pred) { values <- compute_pr(obs, pred) caTools::trapz(values$x, values$y) } # JAGS-like input (rjags, R2jags, runjags) -------------------------------- #' @rdname mcmcRocPrc #' #' @export mcmcRocPrc.jags <- function(object, curves = FALSE, fullsims = FALSE, yname, xnames, posterior_samples, ...) { stopifnot( inherits(posterior_samples, c("mcmc", "mcmc.list")) ) link_logit <- any(grepl("logit", object$model())) link_probit <- any(grepl("probit", object$model())) if (isFALSE(link_logit | link_probit)) { stop("Could not identify model link function") } mdl_data <- object$data() stopifnot(all(xnames %in% names(mdl_data))) stopifnot(all(yname %in% names(mdl_data))) # add intercept by default, maybe revisit this xdata <- as.matrix(cbind(X0 = 1L, as.data.frame(mdl_data[xnames]))) yvec <- mdl_data[[yname]] pardraws <- as.matrix(posterior_samples) # this is not very robust, assumes pars are 'b[x]' # for both this and the intercept addition above, maybe a more robust solution # down the road would be to dig into the object$model$model() string betadraws <- pardraws[, c(sprintf("b[%s]", 1:ncol(xdata - 1)))] if(isTRUE(link_logit)) { pred_prob <- plogis(xdata %*% t(betadraws)) } else if (isTRUE(link_probit)) { pred_prob <- pnorm(xdata %*% t(betadraws)) } new_mcmcRocPrc(pred_prob = pred_prob, yvec = yvec, curves = curves, fullsims = fullsims) } #' @rdname mcmcRocPrc #' #' @export mcmcRocPrc.rjags <- function(object, curves = FALSE, fullsims = FALSE, yname, xnames, ...) { if (!requireNamespace("R2jags", quietly = TRUE)) { stop("Package \"R2jags\" is needed for this function to work. Please install it.", call. = FALSE) # nocov } jags_object <- object$model pardraws <- coda::as.mcmc(object) # pass it on to the "jags" method mcmcRocPrc(object = jags_object, curves = curves, fullsims = fullsims, yname = yname, xnames = xnames, posterior_samples = pardraws, ...) } #' @rdname mcmcRocPrc #' #' @export mcmcRocPrc.runjags <- function(object, curves = FALSE, fullsims = FALSE, yname, xnames, ...) { jags_object <- runjags::as.jags(object, quiet = TRUE) # as.mcmc.runjags will issue a warning when converting multiple chains # because it combines them pardraws <- suppressWarnings(coda::as.mcmc(object)) # pass it on to the "jags" method mcmcRocPrc(object = jags_object, curves = curves, fullsims = fullsims, yname = yname, xnames = xnames, posterior_samples = pardraws, ...) } # STAN-like input (rstan, rstanarm, brms) --------------------------------- #' @rdname mcmcRocPrc #' #' @param data the data that was used in the `stan(data = ?, ...)` call #' #' @export mcmcRocPrc.stanfit <- function(object, curves = FALSE, fullsims = FALSE, data, xnames, yname, ...) { if (!requireNamespace("rstan", quietly = TRUE)) { stop("Package \"rstan\" is needed for this function to work. Please install it.", call. = FALSE) # nocov } if (!is_binary_model(object)) { stop("the input model does not seem to be a binary choice model; if this is a mistake please file an issue at https://github.com/ShanaScogin/BayesPostEst/issues/") } link_type <- identify_link_function(object) if (is.na(link_type)) { stop("could not identify model link function; please file an issue at https://github.com/ShanaScogin/BayesPostEst/issues/") } mdl_data <- data stopifnot(all(xnames %in% names(mdl_data))) stopifnot(all(yname %in% names(mdl_data))) # add intercept by default, maybe revisit this xdata <- as.matrix(cbind(X0 = 1L, as.data.frame(mdl_data[xnames]))) yvec <- mdl_data[[yname]] pardraws <- as.matrix(object) # this is not very robust, assumes pars are 'b[x]' betadraws <- pardraws[, c(sprintf("b[%s]", 1:ncol(xdata - 1)))] if(link_type=="logit") { pred_prob <- plogis(xdata %*% t(betadraws)) } else if (link_type=="probit") { pred_prob <- pnorm(xdata %*% t(betadraws)) } new_mcmcRocPrc(pred_prob = pred_prob, yvec = yvec, curves = curves, fullsims = fullsims) } #' Try to identify if a stanfit model is a binary choice model #' #' @param obj stanfit object #' #' @keywords internal is_binary_model <- function(obj) { stopifnot(inherits(obj, "stanfit")) model_string <- rstan::get_stancode(obj) grepl("bernoulli", model_string) } #' Try to identify link function #' #' @param obj stanfit object #' #' @return Either "logit" or "probit"; if neither can be identified the function #' will return `NA_character_`. #' #' @keywords internal identify_link_function <- function(obj) { stopifnot(inherits(obj, "stanfit")) model_string <- rstan::get_stancode(obj) if (grepl("logit", model_string)) return("logit") if (grepl("Phi", model_string)) return("probit") NA_character_ } #' @rdname mcmcRocPrc #' #' @export mcmcRocPrc.stanreg <- function(object, curves = FALSE, fullsims = FALSE, ...) { if (!requireNamespace("rstanarm", quietly = TRUE)) { stop("Package \"rstanarm\" is needed for this function to work. Please install it.", call. = FALSE) # nocov } if (!stats::family(object)$family=="binomial") { stop("the input model does not seem to be a binary choice model; should be like 'obj <- stan_glm(family = binomial(), ...)'") } pred_prob <- rstanarm::posterior_linpred(object, transform = TRUE) # posterior_linepred returns a matrix in which data cases are columns, and # MCMC samples are row; we need to transpose this so that columns are samples pred_prob <- t(pred_prob) yvec <- unname(object$y) new_mcmcRocPrc(pred_prob = pred_prob, yvec = yvec, curves = curves, fullsims = fullsims) } #' @rdname mcmcRocPrc #' #' @export mcmcRocPrc.brmsfit <- function(object, curves = FALSE, fullsims = FALSE, ...) { if (!requireNamespace("brms", quietly = TRUE)) { stop("Package \"brms\" is needed for this function to work. Please install it.", call. = FALSE) # nocov } if (!stats::family(object)$family=="bernoulli") { stop("the input model does not seem to be a binary choice model; should be like 'obj <- brm(family = bernoulli(), ...)'") } pred_prob <- brms::posterior_epred(object) # posterior_epred returns a matrix in which data cases are columns, and # MCMC samples are row; we need to transpose this so that columns are samples pred_prob <- t(pred_prob) yvec <- stats::model.response(stats::model.frame(object)) new_mcmcRocPrc(pred_prob = pred_prob, yvec = yvec, curves = curves, fullsims = fullsims) } # Other input types (MCMCpack, ...) --------------------------------------- #' @rdname mcmcRocPrc #' #' @export mcmcRocPrc.bugs <- function(object, curves = FALSE, fullsims = FALSE, data, xnames, yname, type = c("logit", "probit"), ...) { link_type <- match.arg(type) mdl_data <- data stopifnot( all(xnames %in% names(mdl_data)), all(yname %in% names(mdl_data)) ) # add intercept by default, maybe revisit this xdata <- as.matrix(cbind(X0 = 1L, as.data.frame(mdl_data[xnames]))) yvec <- mdl_data[[yname]] sm <- object$sims.matrix # Drop "deviance" column betadraws <- sm[, !colnames(sm) %in% "deviance"] if(link_type=="logit") { pred_prob <- plogis(xdata %*% t(betadraws)) } else if (link_type=="probit") { pred_prob <- pnorm(xdata %*% t(betadraws)) } new_mcmcRocPrc(pred_prob = pred_prob, yvec = yvec, curves = curves, fullsims = fullsims) } #' @rdname mcmcRocPrc #' #' @param type "logit" or "probit" #' @param force for MCMCpack models, suppress warning if the model does not #' appear to be a binary choice model? #' #' @export mcmcRocPrc.mcmc <- function(object, curves = FALSE, fullsims = FALSE, data, xnames, yname, type = c("logit", "probit"), force = FALSE, ...) { if (!force) { if (is.null(attr(object, "call"))) { stop("object does not have a 'call' attribute; was it generated with a MCMCpack function?") } else { func <- as.character(attr(object, "call"))[1] if (!func %in% c("MCMClogit", "MCMCprobit")) { stop("object does not appear to have been fitted using MCMCpack::MCMClogit() or MCMCprobit(); mcmcRocPrc only properly works for those function. To be safe, consider manually calculating the matrix of predicted probabilities.") } } } link_type <- match.arg(type) mdl_data <- data stopifnot( all(xnames %in% names(mdl_data)), all(yname %in% names(mdl_data)) ) # add intercept by default, maybe revisit this xdata <- as.matrix(cbind(X0 = 1L, as.data.frame(mdl_data[xnames]))) yvec <- mdl_data[[yname]] betadraws <- as.matrix(object) if(link_type=="logit") { pred_prob <- plogis(xdata %*% t(betadraws)) } else if (link_type=="probit") { pred_prob <- pnorm(xdata %*% t(betadraws)) } new_mcmcRocPrc(pred_prob = pred_prob, yvec = yvec, curves = curves, fullsims = fullsims) }
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/R/mcmcRocPrc.R
#'This function generates ROC and precision-recall curves #'after fitting a Bayesian logit or probit model. #'@title ROC and Precision-Recall Curves using Bayesian MCMC estimates generalized #'@description This function generates ROC and Precision-Recall curves #'after fitting a Bayesian logit or probit regression. For fast calculation for #'from an "rjags" object use \code{\link{mcmcRocPrc}} #'@param modelmatrix model matrix, including intercept (if the intercept is among the #'parameters estimated in the model). Create with model.matrix(formula, data). #'Note: the order of columns in the model matrix must correspond to the order of columns #'in the matrix of posterior draws in the \code{mcmcout} argument. See the \code{mcmcout} #'argument for more and Beger (2016) for background. #'@param mcmcout posterior distributions of all logit coefficients, #'in matrix form. This can be created from rstan, MCMCpack, R2jags, etc. and transformed #'into a matrix using the function as.mcmc() from the coda package for \code{jags} class #'objects, as.matrix() from base R for \code{mcmc}, \code{mcmc.list}, \code{stanreg}, and #'\code{stanfit} class objects, and \code{object$sims.matrix} for \code{bugs} class objects. #'Note: the order of columns in this matrix must correspond to the order of columns #'in the model matrix. One can do this by examining the posterior distribution matrix and sorting the #'variables in the order of this matrix when creating the model matrix. A useful function for sorting #'column names containing both characters and numbers as #'you create the matrix of posterior distributions is \code{mixedsort()} from the gtools package. #'@param modelframe model frame in matrix form. Can be created using #'as.matrix(model.frame(formula, data)) #'@param curves logical indicator of whether or not to return values to plot the ROC or Precision-Recall #'curves. If set to \code{FALSE} (default), results are returned as a list without the extra #'values. #'@param link type of generalized linear model; a character vector set to \code{"logit"} (default) or \code{"probit"}. #'@param fullsims logical indicator of whether full object (based on all MCMC draws #'rather than their average) will be returned. Default is \code{FALSE}. Note: If \code{TRUE} #'is chosen, the function takes notably longer to execute. #'@references Beger, Andreas. 2016. “Precision-Recall Curves.” Available at SSRN: #'https://ssrn.com/Abstract=2765419. http://dx.doi.org/10.2139/ssrn.2765419. #'@return This function returns a list with 4 elements: #'\itemize{ #'\item area_under_roc: area under ROC curve (scalar) #'\item area_under_prc: area under precision-recall curve (scalar) #'\item prc_dat: data to plot precision-recall curve (data frame) #'\item roc_dat: data to plot ROC curve (data frame) #'} #' #'@examples #' \dontshow{.old_wd <- setwd(tempdir())} #' \donttest{ #' if (interactive()) { #' # simulating data #' #' set.seed(123456) #' b0 <- 0.2 # true value for the intercept #' b1 <- 0.5 # true value for first beta #' b2 <- 0.7 # true value for second beta #' n <- 500 # sample size #' X1 <- runif(n, -1, 1) #' X2 <- runif(n, -1, 1) #' Z <- b0 + b1 * X1 + b2 * X2 #' pr <- 1 / (1 + exp(-Z)) # inv logit function #' Y <- rbinom(n, 1, pr) #' df <- data.frame(cbind(X1, X2, Y)) #' #' # formatting the data for jags #' datjags <- as.list(df) #' datjags$N <- length(datjags$Y) #' #' # creating jags model #' model <- function() { #' #' for(i in 1:N){ #' Y[i] ~ dbern(p[i]) ## Bernoulli distribution of y_i #' logit(p[i]) <- mu[i] ## Logit link function #' mu[i] <- b[1] + #' b[2] * X1[i] + #' b[3] * X2[i] #' } #' #' for(j in 1:3){ #' b[j] ~ dnorm(0, 0.001) ## Use a coefficient vector for simplicity #' } #' #' } #' #' params <- c("b") #' inits1 <- list("b" = rep(0, 3)) #' inits2 <- list("b" = rep(0, 3)) #' inits <- list(inits1, inits2) #' #' ## fitting the model with R2jags #' set.seed(123) #' fit <- R2jags::jags(data = datjags, inits = inits, #' parameters.to.save = params, n.chains = 2, n.iter = 2000, #' n.burnin = 1000, model.file = model) #' #' # processing the data #' mm <- model.matrix(Y ~ X1 + X2, data = df) #' xframe <- as.matrix(model.frame(Y ~ X1 + X2, data = df)) #' mcmc <- coda::as.mcmc(fit) #' mcmc_mat <- as.matrix(mcmc)[, 1:ncol(xframe)] #' #' # using mcmcRocPrcGen #' fit_sum <- mcmcRocPrcGen(modelmatrix = mm, #' modelframe = xframe, #' mcmcout = mcmc_mat, #' curves = TRUE, #' fullsims = FALSE) #' } #' } #' #' \dontshow{setwd(.old_wd)} #'@export mcmcRocPrcGen <- function(modelmatrix, mcmcout, modelframe, curves = FALSE, link = "logit", fullsims = FALSE){ if(link == "logit") { pred_prob <- plogis(t(modelmatrix %*% t(mcmcout))) } else if (link == "probit") { pred_prob <- pnorm(t(modelmatrix %*% t(mcmcout))) } else { stop("Please enter a valid link argument") } if(missing(modelmatrix) | missing(mcmcout) | missing(modelframe)) { "Please enter the required arguments" } if(fullsims == FALSE){ y_pred <- apply(X = pred_prob, MARGIN = 2, FUN = function(x) median(x)) # Observed y and x pred_obs <- data.frame(y_pred = y_pred, y_obs = modelframe[, 1]) auc_roc <- function(obs, pred) { pred <- prediction(pred, obs) auc <- performance(pred, "auc")@y.values[[1]] return(auc) } auc_pr <- function(obs, pred) { xx.df <- prediction(pred, obs) perf <- performance(xx.df, "prec", "rec") xy <- data.frame(recall = [email protected][[1]], precision = [email protected][[1]]) # take out division by 0 for lowest threshold xy <- subset(xy, !is.nan(xy$precision)) res <- caTools::trapz(xy$recall, xy$precision) res } area_under_roc <- auc_roc(obs = pred_obs$y_obs, pred = pred_obs$y_pred) area_under_prc <- auc_pr(obs = pred_obs$y_obs, pred = pred_obs$y_pred) if(curves == FALSE){ # Results as a list results <- list() results$area_under_roc <- area_under_roc results$area_under_prc <- area_under_prc return(results) } if(curves == TRUE){ prediction_obj <- prediction(predictions = pred_obs$y_pred, labels = pred_obs$y_obs) prc_performance_obj <- performance(prediction.obj = prediction_obj, measure = "prec", x.measure = "rec") prc_dat <- data.frame(x = [email protected], y = [email protected]) names(prc_dat) <- c("x", "y") roc_performance_obj <- performance(prediction.obj = prediction_obj, measure = "tpr", x.measure = "fpr") roc_dat <- data.frame(x = [email protected], y = [email protected]) names(roc_dat) <- c("x", "y") # Results as a list results <- list() results$area_under_roc <- area_under_roc results$area_under_prc <- area_under_prc results$prc_dat <- prc_dat results$roc_dat <- roc_dat return(results) } } if(fullsims == TRUE){ RocPrcOneDraw <- function(pred_prob_vector){ # run this function over each row (iteration) of the pred_prob matrix # y_pred <- apply(X = pred_prob, MARGIN = 2, FUN = function(x) median(x)) # Observed y and x pred_obs <- data.frame(y_pred = pred_prob_vector, y_obs = modelframe[, 1]) auc_roc <- function(obs, pred) { pred <- prediction(pred, obs) auc <- performance(pred, "auc")@y.values[[1]] return(auc) } auc_pr <- function(obs, pred) { xx.df <- prediction(pred, obs) perf <- performance(xx.df, "prec", "rec") xy <- data.frame(recall = [email protected][[1]], precision = [email protected][[1]]) # take out division by 0 for lowest threshold xy <- subset(xy, !is.nan(xy$precision)) res <- caTools::trapz(xy$recall, xy$precision) res } area_under_roc <- auc_roc(obs = pred_obs$y_obs, pred = pred_obs$y_pred) area_under_prc <- auc_pr(obs = pred_obs$y_obs, pred = pred_obs$y_pred) if(curves == FALSE){ # Results as a list one_result <- c(area_under_roc, area_under_prc) return(one_result) } if(curves == TRUE){ prediction_obj <- prediction(predictions = pred_obs$y_pred, labels = pred_obs$y_obs) prc_performance_obj <- performance(prediction.obj = prediction_obj, measure = "prec", x.measure = "rec") prc_dat <- data.frame(x = [email protected], y = [email protected]) names(prc_dat) <- c("x", "y") roc_performance_obj <- performance(prediction.obj = prediction_obj, measure = "tpr", x.measure = "fpr") roc_dat <- data.frame(x = [email protected], y = [email protected]) names(roc_dat) <- c("x", "y") # Results as a list one_result <- list() one_result$area_under_roc <- area_under_roc one_result$area_under_prc <- area_under_prc one_result$prc_dat <- prc_dat one_result$roc_dat <- roc_dat return(one_result) } } if(curves == FALSE){ all_results <- matrix(nrow = nrow(pred_prob), ncol = 2) for(i in 1:nrow(pred_prob)){ all_results[i, ] <- RocPrcOneDraw(pred_prob[i, ]) } all_results <- as.data.frame(all_results) names(all_results) <- c("area_under_roc", "area_under_prc") } if(curves == TRUE){ all_results <- list() for(i in 1:nrow(pred_prob)){ all_results[[i]] <- RocPrcOneDraw(pred_prob[i, ]) } } return(all_results) } }
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/R/mcmcRocPrcGen.R
#' Summarize Bayesian MCMC Output #' #' R function for summarizing MCMC output in a regression-style table. #' #' @param sims Bayesian model object generated by R2jags, rjags, R2WinBUGS, #' R2OpenBUGS, MCMCpack, rstan, and rstanarm. #' @param ci desired level for credible intervals; defaults to c(0.025, 0.975). #' @param pars character vector of parameters to be printed; defaults to \code{NULL} #' (all parameters are printed). If not \code{NULL}, the user can either specify the exact names of #' parameters to be printed (e.g. \code{c("alpha", "beta1", "beta2")}) or part of a name #' so that all parameters containing that name will be printed (e.g. \code{"beta"} will print \code{beta1}, \code{beta2}, etc.). #' @param Pr print percent of posterior draws with same sign as median; defaults to \code{FALSE}. #' @param ROPE defaults to \code{NULL}. If not \code{NULL}, a vector of two values defining the region of #' practical equivalence ("ROPE"); returns \% of posterior draws to the left/right of ROPE. For this quantity #' to be meaningful, all parameters must be on the same scale (e.g. standardized coefficients #' or first differences). See Kruschke (2013, Journal of Experimental #' Psychology 143(2): 573-603) for more on the ROPE. #' @param regex use regular expression matching with \code{pars}? #' #' @references Kruschke, John K. 2013. “Bayesian Estimation Supersedes the T-Test.” Journal of #' Experimental Psychology: General 142 (2): 573–603. https://doi.org/10.1037/a0029146. #' #' @return a data frame containing MCMC summary statistics. #' #' @examples #' \dontshow{.old_wd <- setwd(tempdir())} #' \donttest{ #' if (interactive()) { #' data("jags_logit") #' #' ## printing out table #' object <- mcmcTab(jags_logit, #' ci = c(0.025, 0.975), #' pars = NULL, #' Pr = FALSE, #' ROPE = NULL) #' object #' } #' } #' #' \dontshow{setwd(.old_wd)} #' @export mcmcTab <- function(sims, ci = c(0.025, 0.975), pars = NULL, Pr = FALSE, ROPE = NULL, regex = FALSE) { if (inherits(sims, what = c("jags", "rjags"))) { sims <- as.matrix(coda::as.mcmc(sims)) } if (inherits(sims, what = "bugs")) { sims <- sims$sims.matrix } if (inherits(sims, what = c("mcmc", "mcmc.list", "stanfit", "stanreg", "brmsfit"))) { sims <- as.matrix(sims) } ROPE <- check_ROPE_argument(ROPE) if (is.null(pars)) { dat <- sims } else if (regex) { dat <- sims[, grepl(x = colnames(sims), pattern = paste(pars, collapse = "|"))] } else { dat <- matrix(sims[, pars], nrow = nrow(sims), byrow = FALSE, dimnames = list(NULL, pars)) } dat_wide <- t(dat) mcmctab <- apply(dat_wide, 1, function(x) c(Median = round(median(x), digits = 3), # Posterior median SD = round(sd(x), digits = 3), # Posterior SD Lower = as.numeric(round(quantile(x, probs = ci[1]), digits = 3)), # Lower CI of posterior Upper = as.numeric(round(quantile(x, probs = ci[2]), digits = 3)), # Upper CI of posterior Pr = round(ifelse(median(x) > 0, length(x[x > 0]) / length(x), length(x[x < 0]) / length(x)), digits = 3) # % of posterior draws with same sign as median )) if(Pr == FALSE){ mcmctab <- apply(dat_wide, 1, function(x) c(Median = round(median(x), digits = 3), # Posterior median SD = round(sd(x), digits = 3), # Posterior SD Lower = as.numeric(round(quantile(x, probs = ci[1]), digits = 3)), # Lower CI of posterior Upper = as.numeric(round(quantile(x, probs = ci[2]), digits = 3)))) } if(!is.null(ROPE)){ message("This table contains an estimate for parameter values outside of the region of practical equivalence (ROPE). For this quantity to be meaningful, all parameters must be on the same scale (e.g. standardized coefficients or first differences).") mcmctab <- apply(dat_wide, 1, function(x) c(Median = round(median(x), digits = 3), # Posterior median SD = round(sd(x), digits = 3), # Posterior SD Lower = as.numeric(round(quantile(x, probs = ci[1]), digits = 3)), # Lower CI of posterior Upper = as.numeric(round(quantile(x, probs = ci[2]), digits = 3)), PrOutROPE = round(ifelse(median(x) > 0, length(x[x > ROPE[2]]) / length(x), length(x[x < ROPE[1]]) / length(x)), digits = 3))) } # return(t(mcmctab)) out_dat <- data.frame("Variable" = colnames(mcmctab), t(mcmctab), row.names = NULL, stringsAsFactors = TRUE) # check this, new with R 4.0.0 # recommended if sort order used # in the string to factor conversion # does not matter return(out_dat) }
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/R/mcmcTab.R
check_ROPE_argument <- function(x) { if (is.null(x)) return(invisible(x)) valid <- is.numeric(x) && length(x)==2 && (x[2] >= x[1]) if (!valid) { stop("Invalid ROPE argument; must be a length 2 numeric vector like 'c(0, 1)'") } invisible(x) }
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/R/utils.R
## ----setup, include = FALSE--------------------------------------------------- options(rmarkdown.html_vignette.check_title = FALSE) pkgs <- c("R2jags", "rjags", "MCMCpack", "rstan", "rstanarm", "ggplot2", "ggridges") if (!all(sapply(pkgs, require, quietly = TRUE, character.only = TRUE))) { knitr::opts_chunk$set( eval = FALSE, comment = "#>") } else { knitr::opts_chunk$set( collapse = TRUE, echo = TRUE, message = FALSE, warning = FALSE, out.width = "90%", fig.align = "center", fig.width = 8, fig.height = 8, comment = "#>" ) } ## ----eval=FALSE--------------------------------------------------------------- # install.packages("BayesPostEst") ## ----eval=FALSE--------------------------------------------------------------- # library("devtools") # install_github("ShanaScogin/BayesPostEst") ## ----------------------------------------------------------------------------- library("BayesPostEst") ## ----------------------------------------------------------------------------- df <- carData::Cowles ## ----------------------------------------------------------------------------- df$female <- (as.numeric(df$sex) - 2) * (-1) df$volunteer <- as.numeric(df$volunteer) - 1 df$extraversion <- (df$extraversion - mean(df$extraversion)) / (2 * sd(df$extraversion)) df$neuroticism <- (df$neuroticism - mean(df$neuroticism)) / (2 * sd(df$neuroticism)) ## ----------------------------------------------------------------------------- dl <- as.list(df[, c("volunteer", "female", "neuroticism", "extraversion")]) dl$N <- nrow(df) ## ----------------------------------------------------------------------------- mod.jags <- paste(" model { for (i in 1:N){ volunteer[i] ~ dbern(p[i]) logit(p[i]) <- mu[i] mu[i] <- b[1] + b[2] * female[i] + b[3] * neuroticism[i] + b[4] * extraversion[i] } for(j in 1:4){ b[j] ~ dnorm(0, 0.1) } } ") writeLines(mod.jags, "mod.jags") ## ----------------------------------------------------------------------------- params.jags <- c("b") inits1.jags <- list("b" = rep(0, 4)) inits.jags <- list(inits1.jags, inits1.jags, inits1.jags, inits1.jags) ## ----------------------------------------------------------------------------- library("R2jags") set.seed(123) fit.jags <- jags(data = dl, inits = inits.jags, parameters.to.save = params.jags, n.chains = 4, n.iter = 2000, n.burnin = 1000, model.file = "mod.jags") ## ----------------------------------------------------------------------------- library("rjags") mod.rjags <- jags.model(file = "mod.jags", data = dl, inits = inits.jags, n.chains = 4, n.adapt = 1000) fit.rjags <- coda.samples(model = mod.rjags, variable.names = params.jags, n.iter = 2000) ## ----------------------------------------------------------------------------- library("MCMCpack") fit.MCMCpack <- MCMClogit(volunteer ~ female + neuroticism + extraversion, data = df, burning = 1000, mcmc = 2000, seed = 123, b0 = 0, B0 = 0.1) ## ---- eval=FALSE-------------------------------------------------------------- # mod.stan <- paste(" # data { # int<lower=0> N; # int<lower=0,upper=1> volunteer[N]; # vector[N] female; # vector[N] neuroticism; # vector[N] extraversion; # } # parameters { # vector[4] b; # } # model { # volunteer ~ bernoulli_logit(b[1] + b[2] * female + b[3] * neuroticism + b[4] * extraversion); # for(i in 1:4){ # b[i] ~ normal(0, 3); # } # } # ") # writeLines(mod.stan, "mod.stan") ## ---- eval=FALSE-------------------------------------------------------------- # library("rstan") # rstan_options(auto_write = TRUE) # options(mc.cores = 2) ## ---- eval=FALSE-------------------------------------------------------------- # fit.stan <- stan(file = "mod.stan", # data = dl, # pars = c("b"), # chains = 4, # iter = 2000, # seed = 123) ## ---- results = 'hide'-------------------------------------------------------- library("rstanarm") fit.rstanarm <- stan_glm(volunteer ~ female + neuroticism + extraversion, data = df, family = binomial(link = "logit"), prior = normal(0, 3), prior_intercept = normal(0, 3), chains = 4, iter = 2000, seed = 123) ## ----------------------------------------------------------------------------- mcmcTab(fit.jags) ## ----------------------------------------------------------------------------- mcmcTab(fit.rjags) ## ----------------------------------------------------------------------------- mcmcTab(fit.MCMCpack) ## ---- eval=FALSE-------------------------------------------------------------- # mcmcTab(fit.stan) ## ----------------------------------------------------------------------------- mcmcTab(fit.rstanarm) ## ----------------------------------------------------------------------------- mcmcTab(fit.jags, Pr = TRUE) ## ----------------------------------------------------------------------------- mcmcTab(fit.jags, pars = c("b[2]", "b[3]", "b[4]"), ROPE = c(-0.1, 0.1)) ## ---- results = 'asis'-------------------------------------------------------- mcmcReg(fit.jags, format = 'html', doctype = F) ## ---- results = 'asis'-------------------------------------------------------- mcmcReg(fit.jags, pars = 'b', format = 'html', regex = T, doctype = F) ## ---- results = 'asis'-------------------------------------------------------- mcmcReg(fit.jags, pars = c('b\\[1\\]', 'b\\[3\\]', 'b\\[4\\]'), format = 'html', regex = T, doctype = F) ## ---- results = 'asis'-------------------------------------------------------- mcmcReg(fit.jags, pars = c('b', 'dev'), format = 'html', regex = T, doctype = F) ## ---- results = 'asis'-------------------------------------------------------- mcmcReg(fit.jags, pars = 'b', coefnames = c('(Constant)', 'Female', 'Neuroticism', 'Extraversion'), format = 'html', regex = T, doctype = F) ## ---- results = 'asis'-------------------------------------------------------- mcmcReg(fit.jags, pars = 'b', custom.coef.map = list('b[1]' = '(Constant)', 'b[2]' = 'Female', 'b[3]' = 'Nueroticism', 'b[4]' = 'Extraversion'), format = 'html', regex = T, doctype = F) ## ---- results = 'asis'-------------------------------------------------------- mcmcReg(fit.jags, custom.coef.map = list('b[2]' = 'Female', 'b[4]' = 'Extraversion', 'b[1]' = '(Constant)'), format = 'html', doctype = F) ## ---- results = 'asis', eval=FALSE-------------------------------------------- # mcmcReg(list(fit.stan, fit.stan), format = 'html', doctype = F) ## ---- error = T, results = 'asis', eval=FALSE--------------------------------- # mcmcReg(list(fit.jags, fit.stan), format = 'html', doctype = F) ## ---- results = 'asis'-------------------------------------------------------- mcmcReg(list(fit.rstanarm, fit.rstanarm), pars = list(c('female', 'extraversion'), 'neuroticism'), format = 'html', doctype = F) ## ---- results = 'asis'-------------------------------------------------------- mcmcReg(fit.rstanarm, custom.model.names = 'Binary Outcome', format = 'html', doctype = F) ## ----------------------------------------------------------------------------- mcmcmat.jags <- as.matrix(coda::as.mcmc(fit.jags)) mcmcmat.MCMCpack <- as.matrix(fit.MCMCpack) mcmcmat.rstanarm <- as.matrix(fit.rstanarm) ## ---- eval=FALSE-------------------------------------------------------------- # mcmcmat.stan <- as.matrix(fit.stan) ## ----------------------------------------------------------------------------- mm <- model.matrix(volunteer ~ female + neuroticism + extraversion, data = df) ## ----------------------------------------------------------------------------- aveprob.female.jags <- mcmcAveProb(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], xcol = 2, xrange = c(0, 1), link = "logit", ci = c(0.025, 0.975), fullsims = TRUE) ## ----------------------------------------------------------------------------- library("ggplot2") library("ggridges") ggplot(data = aveprob.female.jags, aes(y = factor(x), x = pp)) + stat_density_ridges(quantile_lines = TRUE, quantiles = c(0.025, 0.5, 0.975), vline_color = "white") + scale_y_discrete(labels = c("Male", "Female")) + ylab("") + xlab("Estimated probability of volunteering") + labs(title = "Probability based on average-case approach") + theme_minimal() ## ----------------------------------------------------------------------------- aveprob.extra.jags <- mcmcAveProb(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], xcol = 4, xrange = seq(min(df$extraversion), max(df$extraversion), length.out = 20), link = "logit", ci = c(0.025, 0.975), fullsims = FALSE) ## ----------------------------------------------------------------------------- ggplot(data = aveprob.extra.jags, aes(x = x, y = median_pp)) + geom_ribbon(aes(ymin = lower_pp, ymax = upper_pp), fill = "gray") + geom_line() + xlab("Extraversion") + ylab("Estimated probability of volunteering") + ylim(0, 1) + labs(title = "Probability based on average-case approach") + theme_minimal() ## ----------------------------------------------------------------------------- obsprob.female.jags <- mcmcObsProb(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], xcol = 2, xrange = c(0, 1), link = "logit", ci = c(0.025, 0.975), fullsims = TRUE) ## ----------------------------------------------------------------------------- ggplot(data = obsprob.female.jags, aes(y = factor(x), x = pp)) + stat_density_ridges(quantile_lines = TRUE, quantiles = c(0.025, 0.5, 0.975), vline_color = "white") + scale_y_discrete(labels = c("Male", "Female")) + ylab("") + xlab("Estimated probability of volunteering") + labs(title = "Probability based on observed-case approach") + theme_minimal() ## ----------------------------------------------------------------------------- obsprob.extra.jags <- mcmcObsProb(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], xcol = 4, xrange = seq(min(df$extraversion), max(df$extraversion), length.out = 20), link = "logit", ci = c(0.025, 0.975), fullsims = FALSE) ## ----------------------------------------------------------------------------- ggplot(data = obsprob.extra.jags, aes(x = x, y = median_pp)) + geom_ribbon(aes(ymin = lower_pp, ymax = upper_pp), fill = "gray") + geom_line() + xlab("Extraversion") + ylab("Estimated probability of volunteering") + ylim(0, 1) + labs(title = "Probability based on observed-case approach") + theme_minimal() ## ----------------------------------------------------------------------------- fdfull.jags <- mcmcFD(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], link = "logit", ci = c(0.025, 0.975), fullsims = TRUE) summary(fdfull.jags) ## ----------------------------------------------------------------------------- fdsum.jags <- mcmcFD(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], link = "logit", ci = c(0.025, 0.975), fullsims = FALSE) fdsum.jags ## ----------------------------------------------------------------------------- ggplot(data = fdsum.jags, aes(x = median_fd, y = VarName)) + geom_point() + geom_segment(aes(x = lower_fd, xend = upper_fd, yend = VarName)) + geom_vline(xintercept = 0) + xlab("Change in Pr(Volunteering)") + ylab("") + theme_minimal() ## ----------------------------------------------------------------------------- plot(fdfull.jags, ROPE = c(-0.01, 0.01)) ## ----------------------------------------------------------------------------- p <- plot(fdfull.jags, ROPE = c(-0.01, 0.01)) p + labs(title = "First differences") + ggridges::theme_ridges() ## ----------------------------------------------------------------------------- fitstats <- mcmcRocPrc(object = fit.jags, yname = "volunteer", xnames = c("female", "neuroticism", "extraversion"), curves = TRUE, fullsims = FALSE) ## ----------------------------------------------------------------------------- fitstats$area_under_roc ## ----------------------------------------------------------------------------- fitstats$area_under_prc ## ----------------------------------------------------------------------------- ggplot(data = as.data.frame(fitstats, what = "roc"), aes(x = x, y = y)) + geom_line() + geom_abline(intercept = 0, slope = 1, color = "gray") + labs(title = "ROC curve") + xlab("1 - Specificity") + ylab("Sensitivity") + theme_minimal() ## ----------------------------------------------------------------------------- ggplot(data = as.data.frame(fitstats, what = "prc"), aes(x = x, y = y)) + geom_line() + labs(title = "Precision-Recall curve") + xlab("Recall") + ylab("Precision") + theme_minimal() ## ----------------------------------------------------------------------------- fitstats.fullsims <- mcmcRocPrc(object = fit.jags, yname = "volunteer", xnames = c("female", "neuroticism", "extraversion"), curves = FALSE, fullsims = TRUE) ## ----------------------------------------------------------------------------- ggplot(as.data.frame(fitstats.fullsims), aes(x = area_under_roc)) + geom_density() + labs(title = "Area under the ROC curve") + theme_minimal() ## ----------------------------------------------------------------------------- ggplot(as.data.frame(fitstats.fullsims), aes(x = area_under_prc)) + geom_density() + labs(title = "Area under the Precision-Recall curve") + theme_minimal() ## ----echo=FALSE, results='hide', message=FALSE-------------------------------- rm(mod.jags) rm(mod.stan) rm(mod.rds)
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/inst/doc/getting_started.R
--- title: "Using the BayesPostEst package" author: - "Johannes Karreth" - "Shana Scogin" - "Rob Williams" - "Andreas Beger" date: "`r Sys.Date()`" output: rmarkdown::html_vignette bibliography: "references.bib" vignette: > %\VignetteIndexEntry{getting_started} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r setup, include = FALSE} options(rmarkdown.html_vignette.check_title = FALSE) pkgs <- c("R2jags", "rjags", "MCMCpack", "rstan", "rstanarm", "ggplot2", "ggridges") if (!all(sapply(pkgs, require, quietly = TRUE, character.only = TRUE))) { knitr::opts_chunk$set( eval = FALSE, comment = "#>") } else { knitr::opts_chunk$set( collapse = TRUE, echo = TRUE, message = FALSE, warning = FALSE, out.width = "90%", fig.align = "center", fig.width = 8, fig.height = 8, comment = "#>" ) } ``` # Introduction BayesPostEst contains functions to generate postestimation quantities after estimating Bayesian regression models. The package was inspired by a set of functions written originally for [Johannes Karreth](http://www.jkarreth.net)'s workshop on Bayesian modeling at the [ICPSR Summer program](https://www.icpsr.umich.edu/web/pages/sumprog/). It has grown to include new functions (see `mcmcReg`) and will continue to grow to support Bayesian postestimation. For now, the package focuses mostly on generalized linear regression models for binary outcomes (logistic and probit regression). More details on the package philosophy, its functions, and related packages can be found in @ScoginEtal2019. # Installation To install the latest release on CRAN: ```{r eval=FALSE} install.packages("BayesPostEst") ``` The latest development version on GitHub can be installed with: ```{r eval=FALSE} library("devtools") install_github("ShanaScogin/BayesPostEst") ``` Once you have installed the package, you can access it by calling: ```{r} library("BayesPostEst") ``` After the package is loaded, check out the `?BayesPostEst` to see a help file. # General setup Most functions in this package work with posterior distributions of parameters. These distributions need to be converted into a matrix. All functions in the package do this automatically for posterior draws generated by JAGS, BUGS, MCMCpack, rstan, and rstanarm. For posterior draws generated by other tools, users must convert these objects into a matrix, where rows represent iterations and columns represent parameters. # Example data This vignette uses the `Cowles` dataset [@CowlesDavis1987] from the carData package [@carData2018]. ```{r} df <- carData::Cowles ``` This data frame contains information on 1421 individuals in the following variables: - neuroticism: scale from Eysenck personality inventory. - extraversion: scale from Eysenck personality inventory. - sex: a factor with levels: female; male. - volunteer: volunteering, a factor with levels: no; yes. This is the outcome variable for the running example in this vignette. Before proceeding, we convert the two factor variables `sex` and `volunteer` into numeric variables. We also means-center and standardize the two continuous variables by dividing each by two standard deviations [@Gelman2007]. ```{r} df$female <- (as.numeric(df$sex) - 2) * (-1) df$volunteer <- as.numeric(df$volunteer) - 1 df$extraversion <- (df$extraversion - mean(df$extraversion)) / (2 * sd(df$extraversion)) df$neuroticism <- (df$neuroticism - mean(df$neuroticism)) / (2 * sd(df$neuroticism)) ``` We estimate a Bayesian generalized linear model with the inverse logit link function, where $$ Pr(\text{Volunteering}_i) = \text{logit}^{-1}(\beta_1 + \beta_2 \text{Female}_i + \beta_3 \text{Neuroticism}_i + \beta_4 \text{Extraversion}_i) $$ `BayesPostEst` functions accommodate GLM estimates for both logit and probit link functions. The examples proceed with the logit link function. If we had estimated a probit regression, the corresponding argument `link` in relevant function calls would need to be set to `link = "probit"`. Otherwise, it is set to `link = "logit"` by default. # Model estimation To use `BayesPostEst`, we first estimate a Bayesian regression model. This vignette demonstrates five tools for doing so: JAGS (via the [R2jags](https://cran.r-project.org/package=R2jags) and [rjags](https://cran.r-project.org/package=rjags) packages), [MCMCpack](https://cran.r-project.org/package=MCMCpack), and the two Stan interfaces [rstan](https://cran.r-project.org/package=rstan) and [rstanarm](https://cran.r-project.org/package=rstanarm). ## JAGS First, we prepare the data for JAGS [@jags2017]. Users need to combine all variables into a list and specify any other elements, like in this case N, the number of observations. ```{r} dl <- as.list(df[, c("volunteer", "female", "neuroticism", "extraversion")]) dl$N <- nrow(df) ``` We then write the JAGS model into the working directory. ```{r} mod.jags <- paste(" model { for (i in 1:N){ volunteer[i] ~ dbern(p[i]) logit(p[i]) <- mu[i] mu[i] <- b[1] + b[2] * female[i] + b[3] * neuroticism[i] + b[4] * extraversion[i] } for(j in 1:4){ b[j] ~ dnorm(0, 0.1) } } ") writeLines(mod.jags, "mod.jags") ``` We then define the parameters for which we wish to retain posterior distributions and provide starting values. ```{r} params.jags <- c("b") inits1.jags <- list("b" = rep(0, 4)) inits.jags <- list(inits1.jags, inits1.jags, inits1.jags, inits1.jags) ``` Now, fit the model using the R2jags package. ```{r} library("R2jags") set.seed(123) fit.jags <- jags(data = dl, inits = inits.jags, parameters.to.save = params.jags, n.chains = 4, n.iter = 2000, n.burnin = 1000, model.file = "mod.jags") ``` The same data and model can be used to fit the model using the rjags package: ```{r} library("rjags") mod.rjags <- jags.model(file = "mod.jags", data = dl, inits = inits.jags, n.chains = 4, n.adapt = 1000) fit.rjags <- coda.samples(model = mod.rjags, variable.names = params.jags, n.iter = 2000) ``` ## MCMCpack We estimate the same model using MCMCpack [@MCMCpack]. ```{r} library("MCMCpack") fit.MCMCpack <- MCMClogit(volunteer ~ female + neuroticism + extraversion, data = df, burning = 1000, mcmc = 2000, seed = 123, b0 = 0, B0 = 0.1) ``` ## RStan We write the same model in Stan language. ```{r, eval=FALSE} mod.stan <- paste(" data { int<lower=0> N; int<lower=0,upper=1> volunteer[N]; vector[N] female; vector[N] neuroticism; vector[N] extraversion; } parameters { vector[4] b; } model { volunteer ~ bernoulli_logit(b[1] + b[2] * female + b[3] * neuroticism + b[4] * extraversion); for(i in 1:4){ b[i] ~ normal(0, 3); } } ") writeLines(mod.stan, "mod.stan") ``` We then load rstan [@rstan2019]... ```{r, eval=FALSE} library("rstan") rstan_options(auto_write = TRUE) options(mc.cores = 2) ``` ... and estimate the model, re-using the data in list format created for JAGS earlier. ```{r, eval=FALSE} fit.stan <- stan(file = "mod.stan", data = dl, pars = c("b"), chains = 4, iter = 2000, seed = 123) ``` ## rstanarm Lastly, we use the rstanarm interface [@rstanarm2019] to estimate the same model again. ```{r, results = 'hide'} library("rstanarm") fit.rstanarm <- stan_glm(volunteer ~ female + neuroticism + extraversion, data = df, family = binomial(link = "logit"), prior = normal(0, 3), prior_intercept = normal(0, 3), chains = 4, iter = 2000, seed = 123) ``` # Tables of regression coefficients and other parameters BayesPostEst contains functions to generate regression tables from objects created by the following packages: R2jags, runjags, rjags, R2WinBUGS, MCMCpack, rstan, rstanarm, and brms. This includes the following object classes: `jags`, `rjags`, `bugs`, `mcmc`, `mcmc.list`, `stanreg`, `stanfit`, `brmsfit`. The package contains two different functions to produce regression tables: - `mcmcTab` - `mcmcReg` Each has its own advantages which we discuss in depth below. ## Bayesian regression tables `mcmcTab` generates a table summarizing the posterior distributions of all parameters contained in the model object. This table can then be used to summarize parameter quantities. By default, `mcmcTab` generates a dataframe with one row per parameter and columns containing the median, standard deviation, and 95% credible interval of each parameter's posterior distribution. ```{r} mcmcTab(fit.jags) ``` ```{r} mcmcTab(fit.rjags) ``` ```{r} mcmcTab(fit.MCMCpack) ``` ```{r, eval=FALSE} mcmcTab(fit.stan) ``` ```{r} mcmcTab(fit.rstanarm) ``` ### Proportion of positive/negative draws Users can add a column to the table that calculates the percent of posterior draws that have the same sign as the median of the posterior distribution. ```{r} mcmcTab(fit.jags, Pr = TRUE) ``` ### ROPE Users can also define a "region of practical equivalence" (ROPE; @Kruschke2013; @Kruschke2018). This region is a band of values around 0 that are "practically equivalent" to 0 or no effect. For this to be useful, all parameters (e.g. regression coefficients) must be on the same scale because mcmcTab accepts only one definition of ROPE for all parameters. Users can standardize regression coefficients to achieve this. Because we standardized variables earlier, the coefficients (except the intercept) are on a similar scale and we define the ROPE to be between -0.1 and 0.1. ```{r} mcmcTab(fit.jags, pars = c("b[2]", "b[3]", "b[4]"), ROPE = c(-0.1, 0.1)) ``` ## Conventional regression tables The `mcmcReg` function serves as an interface to `texreg` and produces more polished and publication-ready tables than `mcmcTab`. `mcmcReg` writes tables in HTML or LaTeX format. `mcmcReg` can produce tables with multiple models with each model in a column and supports flexible renaming of parameters. However, these tables are more similar to standard frequentist regression tables, so they do not have a way to incorporate the percent of posterior draws that have the same sign as the median of the posterior distribution or a ROPE like `mcmcTab` is able to. Uncertainty intervals can be either standard credible intervals or highest posterior density intervals [@Kruschke2015] using the `hpdi` argument, and their level can be set with the `ci` argument (default 95%). Separately calculated goodness of fit statistics can be included with the `gof` argument. ```{r, results = 'asis'} mcmcReg(fit.jags, format = 'html', doctype = F) ``` ### Limiting output `mcmcReg` supports limiting the parameters included in the table via the `pars` argument. By default, all parameters saved in the model object will be included. In the case of `fit.jags`, this include the deviance estimate. If we wish to exclude it, we can specify `pars = 'b'` which will capture `b[1]`-`b[4]` using regular expression matching. ```{r, results = 'asis'} mcmcReg(fit.jags, pars = 'b', format = 'html', regex = T, doctype = F) ``` If we only wish to exclude the intercept, we can do this by explicitly specifying the parameters we wish to include as a vector. Note that in this example we have to escape the `[]`s in `pars` because they are a reserved character in regular expressions. ```{r, results = 'asis'} mcmcReg(fit.jags, pars = c('b\\[1\\]', 'b\\[3\\]', 'b\\[4\\]'), format = 'html', regex = T, doctype = F) ``` `mcmcReg` also supports partial regular expression matching of multiple parameter family names as demonstrated below. ```{r, results = 'asis'} mcmcReg(fit.jags, pars = c('b', 'dev'), format = 'html', regex = T, doctype = F) ``` ### Custom coefficient names `mcmcReg` supports custom coefficient names to support publication-ready tables. The simplest option is via the `coefnames` argument. Note that the number of parameters and the number of custom coefficient names must match, so it is a good idea to use `pars` in tandem with `coefnames`. ```{r, results = 'asis'} mcmcReg(fit.jags, pars = 'b', coefnames = c('(Constant)', 'Female', 'Neuroticism', 'Extraversion'), format = 'html', regex = T, doctype = F) ``` A more flexible way to include custom coefficient names is via the `custom.coef.map` argument, which accepts a named list, with names as parameter names in the model and values as the custom coefficient names. ```{r, results = 'asis'} mcmcReg(fit.jags, pars = 'b', custom.coef.map = list('b[1]' = '(Constant)', 'b[2]' = 'Female', 'b[3]' = 'Nueroticism', 'b[4]' = 'Extraversion'), format = 'html', regex = T, doctype = F) ``` The advantage of `custom.coef.map` is that it can flexibly reorder and omit coefficients from the table based on their positions within the list. Notice in the code below that deviance does not have to be included in `pars` because its absence from `custom.coef.map` omits it from the resulting table. ```{r, results = 'asis'} mcmcReg(fit.jags, custom.coef.map = list('b[2]' = 'Female', 'b[4]' = 'Extraversion', 'b[1]' = '(Constant)'), format = 'html', doctype = F) ``` However, it is important to remember that `mcmcReg` will look for the parameter names in the model object, so be sure to inspect it for the correct parameter names. This is important because `stan_glm` will produce a model object with variable names instead of indexed parameter names. ### Multiple models `mcmcReg` accepts multiple model objects and will produce a table with one model per column. To produce a table from multiple models, pass a list of models as the `mod` argument to `mcmcReg`. ```{r, results = 'asis', eval=FALSE} mcmcReg(list(fit.stan, fit.stan), format = 'html', doctype = F) ``` Note, however, that all model objects must be of the same class, so it is *not* possible to generate a table from a `jags` object and a `stanfit` object. ```{r, error = T, results = 'asis', eval=FALSE} mcmcReg(list(fit.jags, fit.stan), format = 'html', doctype = F) ``` When including multiple models, supplying scalars or vectors to arguments will result in them being applied to each model equally. Treating models differentially is possible by supplying a list of scalars or vectors instead. ```{r, results = 'asis'} mcmcReg(list(fit.rstanarm, fit.rstanarm), pars = list(c('female', 'extraversion'), 'neuroticism'), format = 'html', doctype = F) ``` ### `texreg` arguments Although `custom.coef.map` is not an argument to `mcmcReg`, it works because `mcmcReg` supports all standard `texreg` arguments (a few have been overridden, but they are explicit arguments to `mcmcReg`). This introduces a high level of control over the output of `mcmcReg`, as e.g. models can be renamed. ```{r, results = 'asis'} mcmcReg(fit.rstanarm, custom.model.names = 'Binary Outcome', format = 'html', doctype = F) ``` # Predicted probabilities ## `mcmcAveProb` To evaluate the relationship between covariates and a binary outcome, this function calculates the predicted probability ($Pr(y = 1)$) at pre-defined values of one covariate of interest ($x$), while all other covariates are held at a "typical" value. This follows suggestions outlined in @KingEtal2000 and elsewhere, which are commonly adopted by users of GLMs. The `mcmcAveProb` function by default calculates the median value of all covariates other than $x$ as "typical" values. Before moving on, we show how create a matrix of posterior draws of coefficients to pass onto these functions. Eventually, each function will contain code similar to the first section of `mcmcTab` to do this as part of the function. ```{r} mcmcmat.jags <- as.matrix(coda::as.mcmc(fit.jags)) mcmcmat.MCMCpack <- as.matrix(fit.MCMCpack) mcmcmat.rstanarm <- as.matrix(fit.rstanarm) ``` ```{r, eval=FALSE} mcmcmat.stan <- as.matrix(fit.stan) ``` Next, we generate the model matrix to pass on to the function. A model matrix contains as many columns as estimated regression coefficients. The first column is a vector of 1s (corresponding to the intercept); the remaining columns are the observed values of covariates in the model. **Note: the order of columns in the model matrix must correspond to the order of columns in the matrix of posterior draws.** ```{r} mm <- model.matrix(volunteer ~ female + neuroticism + extraversion, data = df) ``` We can now generate predicted probabilities for different values of a covariate of interest. ### Sex First, we generate full posterior distributions of the predicted probability of volunteering for a typical female and a typical male. In this function and `mcmcObsProb`, users specify the range of $x$ (here 0 and 1) as well as the number of the column of $x$ in the matrix of posterior draws as well as the model matrix. ```{r} aveprob.female.jags <- mcmcAveProb(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], xcol = 2, xrange = c(0, 1), link = "logit", ci = c(0.025, 0.975), fullsims = TRUE) ``` Users can then visualize this posterior distribution using the ggplot2 and `ggridges` packages. ```{r} library("ggplot2") library("ggridges") ggplot(data = aveprob.female.jags, aes(y = factor(x), x = pp)) + stat_density_ridges(quantile_lines = TRUE, quantiles = c(0.025, 0.5, 0.975), vline_color = "white") + scale_y_discrete(labels = c("Male", "Female")) + ylab("") + xlab("Estimated probability of volunteering") + labs(title = "Probability based on average-case approach") + theme_minimal() ``` ### Extraversion For continuous variables of interest, users may want to set `fullsims = FALSE` to obtain the median predicted probability along the range of $x$ as well as a lower and upper bound of choice (here, the 95% credible interval). ```{r} aveprob.extra.jags <- mcmcAveProb(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], xcol = 4, xrange = seq(min(df$extraversion), max(df$extraversion), length.out = 20), link = "logit", ci = c(0.025, 0.975), fullsims = FALSE) ``` Users can then plot the resulting probabilities using any plotting functions, such as ggplot2. ```{r} ggplot(data = aveprob.extra.jags, aes(x = x, y = median_pp)) + geom_ribbon(aes(ymin = lower_pp, ymax = upper_pp), fill = "gray") + geom_line() + xlab("Extraversion") + ylab("Estimated probability of volunteering") + ylim(0, 1) + labs(title = "Probability based on average-case approach") + theme_minimal() ``` ## `mcmcObsProb` As an alternative to probabilities for "typical" cases, @HanmerKalkan2013 suggest to calculate predicted probabilities for all observed cases and then derive an "average effect". In their words, the goal of this postestimation "is to obtain an estimate of the average effect in the population ... rather than seeking to understand the effect for the average case." ### Sex We first calculate the average "effect" of sex on volunteering, again generating a full posterior distribution. Again, `xcol` represents the position of the covariate of interest, and `xrange` specifies the values for which $Pr(y = 1)$ is to be calculated. ```{r} obsprob.female.jags <- mcmcObsProb(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], xcol = 2, xrange = c(0, 1), link = "logit", ci = c(0.025, 0.975), fullsims = TRUE) ``` Users can again plot the resulting densities. ```{r} ggplot(data = obsprob.female.jags, aes(y = factor(x), x = pp)) + stat_density_ridges(quantile_lines = TRUE, quantiles = c(0.025, 0.5, 0.975), vline_color = "white") + scale_y_discrete(labels = c("Male", "Female")) + ylab("") + xlab("Estimated probability of volunteering") + labs(title = "Probability based on observed-case approach") + theme_minimal() ``` ### Extraversion For this continuous predictor, we use `fullsims = FALSE`. ```{r} obsprob.extra.jags <- mcmcObsProb(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], xcol = 4, xrange = seq(min(df$extraversion), max(df$extraversion), length.out = 20), link = "logit", ci = c(0.025, 0.975), fullsims = FALSE) ``` We then plot the resulting probabilities across observed cases. ```{r} ggplot(data = obsprob.extra.jags, aes(x = x, y = median_pp)) + geom_ribbon(aes(ymin = lower_pp, ymax = upper_pp), fill = "gray") + geom_line() + xlab("Extraversion") + ylab("Estimated probability of volunteering") + ylim(0, 1) + labs(title = "Probability based on observed-case approach") + theme_minimal() ``` # First differences ## `mcmcFD` To summarize typical effects across covariates, we generate "first differences" (@Long1997, @KingEtal2000). This quantity represents, for each covariate, the difference in predicted probabilities for cases with low and high values of the respective covariate. For each of these differences, all other variables are held constant at their median. ```{r} fdfull.jags <- mcmcFD(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], link = "logit", ci = c(0.025, 0.975), fullsims = TRUE) summary(fdfull.jags) ``` The posterior distribution can be summarized as above, or users can directly obtain a summary when setting `fullsims` to FALSE. ```{r} fdsum.jags <- mcmcFD(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], link = "logit", ci = c(0.025, 0.975), fullsims = FALSE) fdsum.jags ``` Users can plot the median and credible intervals of the summary of the first differences. ```{r} ggplot(data = fdsum.jags, aes(x = median_fd, y = VarName)) + geom_point() + geom_segment(aes(x = lower_fd, xend = upper_fd, yend = VarName)) + geom_vline(xintercept = 0) + xlab("Change in Pr(Volunteering)") + ylab("") + theme_minimal() ``` ## Plotting `mcmcFD` objects To make use of the full posterior distribution of first differences, we provide a dedicated plotting method, `plot.mcmcFD`, which returns a ggplot2 object that can be further customized. The function is modeled after Figure 1 in @Karreth2018. Users can specify a region of practical equivalence and print the percent of posterior draws to the right or left of the ROPE. If ROPE is not specified, the figure automatically prints the percent of posterior draws to the left or right of 0. ```{r} plot(fdfull.jags, ROPE = c(-0.01, 0.01)) ``` The user can further customize the plot. ```{r} p <- plot(fdfull.jags, ROPE = c(-0.01, 0.01)) p + labs(title = "First differences") + ggridges::theme_ridges() ``` # Model fit ## `mcmcRocPrc` One way to assess model fit is to calculate the area under the Receiver Operating Characteristic (ROC) and Precision-Recall curves. A short description of these curves and their utility for model assessment is provided in @Beger2016. The `mcmcRocPrc` function produces an object with four elements: the area under the ROC curve, the area under the PR curve, and two dataframes to plot each curve. When `fullsims` is set to `FALSE`, the elements represent the median of the posterior distribution of each quantity. `mcmcRocPrc` currently requires an "rjags" object (a model fitted in R2jags) as input. Future package versions will generalize this input to allow for model objects fit with any of the other packages used in BayesPostEst. <!-- Because each of these measures relies on comparing the observed $y$ to $Pr(y = 1)$, the function requires both the posterior distribution of all regression coefficients as well as a model frame. This model frame contains all variables used to estimate the model, with the outcome variable in the first column and all other variables following thereafter. --> ```{r} fitstats <- mcmcRocPrc(object = fit.jags, yname = "volunteer", xnames = c("female", "neuroticism", "extraversion"), curves = TRUE, fullsims = FALSE) ``` Users can then print the area under the each curve: ```{r} fitstats$area_under_roc ``` ```{r} fitstats$area_under_prc ``` Users can also plot the ROC curve... ```{r} ggplot(data = as.data.frame(fitstats, what = "roc"), aes(x = x, y = y)) + geom_line() + geom_abline(intercept = 0, slope = 1, color = "gray") + labs(title = "ROC curve") + xlab("1 - Specificity") + ylab("Sensitivity") + theme_minimal() ``` ... as well as the precision-recall curve. ```{r} ggplot(data = as.data.frame(fitstats, what = "prc"), aes(x = x, y = y)) + geom_line() + labs(title = "Precision-Recall curve") + xlab("Recall") + ylab("Precision") + theme_minimal() ``` To plot the posterior distribution of the area under the curves, users set the `fullsims` argument to `TRUE`. Unless a user wishes to plot credible intervals around the ROC and PR curves themselves, we recommend keeping `curves` at `FALSE` to avoid long computation time. ```{r} fitstats.fullsims <- mcmcRocPrc(object = fit.jags, yname = "volunteer", xnames = c("female", "neuroticism", "extraversion"), curves = FALSE, fullsims = TRUE) ``` We can then plot the posterior density of the area under each curve. ```{r} ggplot(as.data.frame(fitstats.fullsims), aes(x = area_under_roc)) + geom_density() + labs(title = "Area under the ROC curve") + theme_minimal() ``` ```{r} ggplot(as.data.frame(fitstats.fullsims), aes(x = area_under_prc)) + geom_density() + labs(title = "Area under the Precision-Recall curve") + theme_minimal() ``` ```{r echo=FALSE, results='hide', message=FALSE} rm(mod.jags) rm(mod.stan) rm(mod.rds) ``` # References
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/inst/doc/getting_started.Rmd
--- title: "Using the BayesPostEst package" author: - "Johannes Karreth" - "Shana Scogin" - "Rob Williams" - "Andreas Beger" date: "`r Sys.Date()`" output: rmarkdown::html_vignette bibliography: "references.bib" vignette: > %\VignetteIndexEntry{getting_started} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r setup, include = FALSE} options(rmarkdown.html_vignette.check_title = FALSE) pkgs <- c("R2jags", "rjags", "MCMCpack", "rstan", "rstanarm", "ggplot2", "ggridges") if (!all(sapply(pkgs, require, quietly = TRUE, character.only = TRUE))) { knitr::opts_chunk$set( eval = FALSE, comment = "#>") } else { knitr::opts_chunk$set( collapse = TRUE, echo = TRUE, message = FALSE, warning = FALSE, out.width = "90%", fig.align = "center", fig.width = 8, fig.height = 8, comment = "#>" ) } ``` # Introduction BayesPostEst contains functions to generate postestimation quantities after estimating Bayesian regression models. The package was inspired by a set of functions written originally for [Johannes Karreth](http://www.jkarreth.net)'s workshop on Bayesian modeling at the [ICPSR Summer program](https://www.icpsr.umich.edu/web/pages/sumprog/). It has grown to include new functions (see `mcmcReg`) and will continue to grow to support Bayesian postestimation. For now, the package focuses mostly on generalized linear regression models for binary outcomes (logistic and probit regression). More details on the package philosophy, its functions, and related packages can be found in @ScoginEtal2019. # Installation To install the latest release on CRAN: ```{r eval=FALSE} install.packages("BayesPostEst") ``` The latest development version on GitHub can be installed with: ```{r eval=FALSE} library("devtools") install_github("ShanaScogin/BayesPostEst") ``` Once you have installed the package, you can access it by calling: ```{r} library("BayesPostEst") ``` After the package is loaded, check out the `?BayesPostEst` to see a help file. # General setup Most functions in this package work with posterior distributions of parameters. These distributions need to be converted into a matrix. All functions in the package do this automatically for posterior draws generated by JAGS, BUGS, MCMCpack, rstan, and rstanarm. For posterior draws generated by other tools, users must convert these objects into a matrix, where rows represent iterations and columns represent parameters. # Example data This vignette uses the `Cowles` dataset [@CowlesDavis1987] from the carData package [@carData2018]. ```{r} df <- carData::Cowles ``` This data frame contains information on 1421 individuals in the following variables: - neuroticism: scale from Eysenck personality inventory. - extraversion: scale from Eysenck personality inventory. - sex: a factor with levels: female; male. - volunteer: volunteering, a factor with levels: no; yes. This is the outcome variable for the running example in this vignette. Before proceeding, we convert the two factor variables `sex` and `volunteer` into numeric variables. We also means-center and standardize the two continuous variables by dividing each by two standard deviations [@Gelman2007]. ```{r} df$female <- (as.numeric(df$sex) - 2) * (-1) df$volunteer <- as.numeric(df$volunteer) - 1 df$extraversion <- (df$extraversion - mean(df$extraversion)) / (2 * sd(df$extraversion)) df$neuroticism <- (df$neuroticism - mean(df$neuroticism)) / (2 * sd(df$neuroticism)) ``` We estimate a Bayesian generalized linear model with the inverse logit link function, where $$ Pr(\text{Volunteering}_i) = \text{logit}^{-1}(\beta_1 + \beta_2 \text{Female}_i + \beta_3 \text{Neuroticism}_i + \beta_4 \text{Extraversion}_i) $$ `BayesPostEst` functions accommodate GLM estimates for both logit and probit link functions. The examples proceed with the logit link function. If we had estimated a probit regression, the corresponding argument `link` in relevant function calls would need to be set to `link = "probit"`. Otherwise, it is set to `link = "logit"` by default. # Model estimation To use `BayesPostEst`, we first estimate a Bayesian regression model. This vignette demonstrates five tools for doing so: JAGS (via the [R2jags](https://cran.r-project.org/package=R2jags) and [rjags](https://cran.r-project.org/package=rjags) packages), [MCMCpack](https://cran.r-project.org/package=MCMCpack), and the two Stan interfaces [rstan](https://cran.r-project.org/package=rstan) and [rstanarm](https://cran.r-project.org/package=rstanarm). ## JAGS First, we prepare the data for JAGS [@jags2017]. Users need to combine all variables into a list and specify any other elements, like in this case N, the number of observations. ```{r} dl <- as.list(df[, c("volunteer", "female", "neuroticism", "extraversion")]) dl$N <- nrow(df) ``` We then write the JAGS model into the working directory. ```{r} mod.jags <- paste(" model { for (i in 1:N){ volunteer[i] ~ dbern(p[i]) logit(p[i]) <- mu[i] mu[i] <- b[1] + b[2] * female[i] + b[3] * neuroticism[i] + b[4] * extraversion[i] } for(j in 1:4){ b[j] ~ dnorm(0, 0.1) } } ") writeLines(mod.jags, "mod.jags") ``` We then define the parameters for which we wish to retain posterior distributions and provide starting values. ```{r} params.jags <- c("b") inits1.jags <- list("b" = rep(0, 4)) inits.jags <- list(inits1.jags, inits1.jags, inits1.jags, inits1.jags) ``` Now, fit the model using the R2jags package. ```{r} library("R2jags") set.seed(123) fit.jags <- jags(data = dl, inits = inits.jags, parameters.to.save = params.jags, n.chains = 4, n.iter = 2000, n.burnin = 1000, model.file = "mod.jags") ``` The same data and model can be used to fit the model using the rjags package: ```{r} library("rjags") mod.rjags <- jags.model(file = "mod.jags", data = dl, inits = inits.jags, n.chains = 4, n.adapt = 1000) fit.rjags <- coda.samples(model = mod.rjags, variable.names = params.jags, n.iter = 2000) ``` ## MCMCpack We estimate the same model using MCMCpack [@MCMCpack]. ```{r} library("MCMCpack") fit.MCMCpack <- MCMClogit(volunteer ~ female + neuroticism + extraversion, data = df, burning = 1000, mcmc = 2000, seed = 123, b0 = 0, B0 = 0.1) ``` ## RStan We write the same model in Stan language. ```{r, eval=FALSE} mod.stan <- paste(" data { int<lower=0> N; int<lower=0,upper=1> volunteer[N]; vector[N] female; vector[N] neuroticism; vector[N] extraversion; } parameters { vector[4] b; } model { volunteer ~ bernoulli_logit(b[1] + b[2] * female + b[3] * neuroticism + b[4] * extraversion); for(i in 1:4){ b[i] ~ normal(0, 3); } } ") writeLines(mod.stan, "mod.stan") ``` We then load rstan [@rstan2019]... ```{r, eval=FALSE} library("rstan") rstan_options(auto_write = TRUE) options(mc.cores = 2) ``` ... and estimate the model, re-using the data in list format created for JAGS earlier. ```{r, eval=FALSE} fit.stan <- stan(file = "mod.stan", data = dl, pars = c("b"), chains = 4, iter = 2000, seed = 123) ``` ## rstanarm Lastly, we use the rstanarm interface [@rstanarm2019] to estimate the same model again. ```{r, results = 'hide'} library("rstanarm") fit.rstanarm <- stan_glm(volunteer ~ female + neuroticism + extraversion, data = df, family = binomial(link = "logit"), prior = normal(0, 3), prior_intercept = normal(0, 3), chains = 4, iter = 2000, seed = 123) ``` # Tables of regression coefficients and other parameters BayesPostEst contains functions to generate regression tables from objects created by the following packages: R2jags, runjags, rjags, R2WinBUGS, MCMCpack, rstan, rstanarm, and brms. This includes the following object classes: `jags`, `rjags`, `bugs`, `mcmc`, `mcmc.list`, `stanreg`, `stanfit`, `brmsfit`. The package contains two different functions to produce regression tables: - `mcmcTab` - `mcmcReg` Each has its own advantages which we discuss in depth below. ## Bayesian regression tables `mcmcTab` generates a table summarizing the posterior distributions of all parameters contained in the model object. This table can then be used to summarize parameter quantities. By default, `mcmcTab` generates a dataframe with one row per parameter and columns containing the median, standard deviation, and 95% credible interval of each parameter's posterior distribution. ```{r} mcmcTab(fit.jags) ``` ```{r} mcmcTab(fit.rjags) ``` ```{r} mcmcTab(fit.MCMCpack) ``` ```{r, eval=FALSE} mcmcTab(fit.stan) ``` ```{r} mcmcTab(fit.rstanarm) ``` ### Proportion of positive/negative draws Users can add a column to the table that calculates the percent of posterior draws that have the same sign as the median of the posterior distribution. ```{r} mcmcTab(fit.jags, Pr = TRUE) ``` ### ROPE Users can also define a "region of practical equivalence" (ROPE; @Kruschke2013; @Kruschke2018). This region is a band of values around 0 that are "practically equivalent" to 0 or no effect. For this to be useful, all parameters (e.g. regression coefficients) must be on the same scale because mcmcTab accepts only one definition of ROPE for all parameters. Users can standardize regression coefficients to achieve this. Because we standardized variables earlier, the coefficients (except the intercept) are on a similar scale and we define the ROPE to be between -0.1 and 0.1. ```{r} mcmcTab(fit.jags, pars = c("b[2]", "b[3]", "b[4]"), ROPE = c(-0.1, 0.1)) ``` ## Conventional regression tables The `mcmcReg` function serves as an interface to `texreg` and produces more polished and publication-ready tables than `mcmcTab`. `mcmcReg` writes tables in HTML or LaTeX format. `mcmcReg` can produce tables with multiple models with each model in a column and supports flexible renaming of parameters. However, these tables are more similar to standard frequentist regression tables, so they do not have a way to incorporate the percent of posterior draws that have the same sign as the median of the posterior distribution or a ROPE like `mcmcTab` is able to. Uncertainty intervals can be either standard credible intervals or highest posterior density intervals [@Kruschke2015] using the `hpdi` argument, and their level can be set with the `ci` argument (default 95%). Separately calculated goodness of fit statistics can be included with the `gof` argument. ```{r, results = 'asis'} mcmcReg(fit.jags, format = 'html', doctype = F) ``` ### Limiting output `mcmcReg` supports limiting the parameters included in the table via the `pars` argument. By default, all parameters saved in the model object will be included. In the case of `fit.jags`, this include the deviance estimate. If we wish to exclude it, we can specify `pars = 'b'` which will capture `b[1]`-`b[4]` using regular expression matching. ```{r, results = 'asis'} mcmcReg(fit.jags, pars = 'b', format = 'html', regex = T, doctype = F) ``` If we only wish to exclude the intercept, we can do this by explicitly specifying the parameters we wish to include as a vector. Note that in this example we have to escape the `[]`s in `pars` because they are a reserved character in regular expressions. ```{r, results = 'asis'} mcmcReg(fit.jags, pars = c('b\\[1\\]', 'b\\[3\\]', 'b\\[4\\]'), format = 'html', regex = T, doctype = F) ``` `mcmcReg` also supports partial regular expression matching of multiple parameter family names as demonstrated below. ```{r, results = 'asis'} mcmcReg(fit.jags, pars = c('b', 'dev'), format = 'html', regex = T, doctype = F) ``` ### Custom coefficient names `mcmcReg` supports custom coefficient names to support publication-ready tables. The simplest option is via the `coefnames` argument. Note that the number of parameters and the number of custom coefficient names must match, so it is a good idea to use `pars` in tandem with `coefnames`. ```{r, results = 'asis'} mcmcReg(fit.jags, pars = 'b', coefnames = c('(Constant)', 'Female', 'Neuroticism', 'Extraversion'), format = 'html', regex = T, doctype = F) ``` A more flexible way to include custom coefficient names is via the `custom.coef.map` argument, which accepts a named list, with names as parameter names in the model and values as the custom coefficient names. ```{r, results = 'asis'} mcmcReg(fit.jags, pars = 'b', custom.coef.map = list('b[1]' = '(Constant)', 'b[2]' = 'Female', 'b[3]' = 'Nueroticism', 'b[4]' = 'Extraversion'), format = 'html', regex = T, doctype = F) ``` The advantage of `custom.coef.map` is that it can flexibly reorder and omit coefficients from the table based on their positions within the list. Notice in the code below that deviance does not have to be included in `pars` because its absence from `custom.coef.map` omits it from the resulting table. ```{r, results = 'asis'} mcmcReg(fit.jags, custom.coef.map = list('b[2]' = 'Female', 'b[4]' = 'Extraversion', 'b[1]' = '(Constant)'), format = 'html', doctype = F) ``` However, it is important to remember that `mcmcReg` will look for the parameter names in the model object, so be sure to inspect it for the correct parameter names. This is important because `stan_glm` will produce a model object with variable names instead of indexed parameter names. ### Multiple models `mcmcReg` accepts multiple model objects and will produce a table with one model per column. To produce a table from multiple models, pass a list of models as the `mod` argument to `mcmcReg`. ```{r, results = 'asis', eval=FALSE} mcmcReg(list(fit.stan, fit.stan), format = 'html', doctype = F) ``` Note, however, that all model objects must be of the same class, so it is *not* possible to generate a table from a `jags` object and a `stanfit` object. ```{r, error = T, results = 'asis', eval=FALSE} mcmcReg(list(fit.jags, fit.stan), format = 'html', doctype = F) ``` When including multiple models, supplying scalars or vectors to arguments will result in them being applied to each model equally. Treating models differentially is possible by supplying a list of scalars or vectors instead. ```{r, results = 'asis'} mcmcReg(list(fit.rstanarm, fit.rstanarm), pars = list(c('female', 'extraversion'), 'neuroticism'), format = 'html', doctype = F) ``` ### `texreg` arguments Although `custom.coef.map` is not an argument to `mcmcReg`, it works because `mcmcReg` supports all standard `texreg` arguments (a few have been overridden, but they are explicit arguments to `mcmcReg`). This introduces a high level of control over the output of `mcmcReg`, as e.g. models can be renamed. ```{r, results = 'asis'} mcmcReg(fit.rstanarm, custom.model.names = 'Binary Outcome', format = 'html', doctype = F) ``` # Predicted probabilities ## `mcmcAveProb` To evaluate the relationship between covariates and a binary outcome, this function calculates the predicted probability ($Pr(y = 1)$) at pre-defined values of one covariate of interest ($x$), while all other covariates are held at a "typical" value. This follows suggestions outlined in @KingEtal2000 and elsewhere, which are commonly adopted by users of GLMs. The `mcmcAveProb` function by default calculates the median value of all covariates other than $x$ as "typical" values. Before moving on, we show how create a matrix of posterior draws of coefficients to pass onto these functions. Eventually, each function will contain code similar to the first section of `mcmcTab` to do this as part of the function. ```{r} mcmcmat.jags <- as.matrix(coda::as.mcmc(fit.jags)) mcmcmat.MCMCpack <- as.matrix(fit.MCMCpack) mcmcmat.rstanarm <- as.matrix(fit.rstanarm) ``` ```{r, eval=FALSE} mcmcmat.stan <- as.matrix(fit.stan) ``` Next, we generate the model matrix to pass on to the function. A model matrix contains as many columns as estimated regression coefficients. The first column is a vector of 1s (corresponding to the intercept); the remaining columns are the observed values of covariates in the model. **Note: the order of columns in the model matrix must correspond to the order of columns in the matrix of posterior draws.** ```{r} mm <- model.matrix(volunteer ~ female + neuroticism + extraversion, data = df) ``` We can now generate predicted probabilities for different values of a covariate of interest. ### Sex First, we generate full posterior distributions of the predicted probability of volunteering for a typical female and a typical male. In this function and `mcmcObsProb`, users specify the range of $x$ (here 0 and 1) as well as the number of the column of $x$ in the matrix of posterior draws as well as the model matrix. ```{r} aveprob.female.jags <- mcmcAveProb(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], xcol = 2, xrange = c(0, 1), link = "logit", ci = c(0.025, 0.975), fullsims = TRUE) ``` Users can then visualize this posterior distribution using the ggplot2 and `ggridges` packages. ```{r} library("ggplot2") library("ggridges") ggplot(data = aveprob.female.jags, aes(y = factor(x), x = pp)) + stat_density_ridges(quantile_lines = TRUE, quantiles = c(0.025, 0.5, 0.975), vline_color = "white") + scale_y_discrete(labels = c("Male", "Female")) + ylab("") + xlab("Estimated probability of volunteering") + labs(title = "Probability based on average-case approach") + theme_minimal() ``` ### Extraversion For continuous variables of interest, users may want to set `fullsims = FALSE` to obtain the median predicted probability along the range of $x$ as well as a lower and upper bound of choice (here, the 95% credible interval). ```{r} aveprob.extra.jags <- mcmcAveProb(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], xcol = 4, xrange = seq(min(df$extraversion), max(df$extraversion), length.out = 20), link = "logit", ci = c(0.025, 0.975), fullsims = FALSE) ``` Users can then plot the resulting probabilities using any plotting functions, such as ggplot2. ```{r} ggplot(data = aveprob.extra.jags, aes(x = x, y = median_pp)) + geom_ribbon(aes(ymin = lower_pp, ymax = upper_pp), fill = "gray") + geom_line() + xlab("Extraversion") + ylab("Estimated probability of volunteering") + ylim(0, 1) + labs(title = "Probability based on average-case approach") + theme_minimal() ``` ## `mcmcObsProb` As an alternative to probabilities for "typical" cases, @HanmerKalkan2013 suggest to calculate predicted probabilities for all observed cases and then derive an "average effect". In their words, the goal of this postestimation "is to obtain an estimate of the average effect in the population ... rather than seeking to understand the effect for the average case." ### Sex We first calculate the average "effect" of sex on volunteering, again generating a full posterior distribution. Again, `xcol` represents the position of the covariate of interest, and `xrange` specifies the values for which $Pr(y = 1)$ is to be calculated. ```{r} obsprob.female.jags <- mcmcObsProb(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], xcol = 2, xrange = c(0, 1), link = "logit", ci = c(0.025, 0.975), fullsims = TRUE) ``` Users can again plot the resulting densities. ```{r} ggplot(data = obsprob.female.jags, aes(y = factor(x), x = pp)) + stat_density_ridges(quantile_lines = TRUE, quantiles = c(0.025, 0.5, 0.975), vline_color = "white") + scale_y_discrete(labels = c("Male", "Female")) + ylab("") + xlab("Estimated probability of volunteering") + labs(title = "Probability based on observed-case approach") + theme_minimal() ``` ### Extraversion For this continuous predictor, we use `fullsims = FALSE`. ```{r} obsprob.extra.jags <- mcmcObsProb(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], xcol = 4, xrange = seq(min(df$extraversion), max(df$extraversion), length.out = 20), link = "logit", ci = c(0.025, 0.975), fullsims = FALSE) ``` We then plot the resulting probabilities across observed cases. ```{r} ggplot(data = obsprob.extra.jags, aes(x = x, y = median_pp)) + geom_ribbon(aes(ymin = lower_pp, ymax = upper_pp), fill = "gray") + geom_line() + xlab("Extraversion") + ylab("Estimated probability of volunteering") + ylim(0, 1) + labs(title = "Probability based on observed-case approach") + theme_minimal() ``` # First differences ## `mcmcFD` To summarize typical effects across covariates, we generate "first differences" (@Long1997, @KingEtal2000). This quantity represents, for each covariate, the difference in predicted probabilities for cases with low and high values of the respective covariate. For each of these differences, all other variables are held constant at their median. ```{r} fdfull.jags <- mcmcFD(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], link = "logit", ci = c(0.025, 0.975), fullsims = TRUE) summary(fdfull.jags) ``` The posterior distribution can be summarized as above, or users can directly obtain a summary when setting `fullsims` to FALSE. ```{r} fdsum.jags <- mcmcFD(modelmatrix = mm, mcmcout = mcmcmat.jags[, 1:ncol(mm)], link = "logit", ci = c(0.025, 0.975), fullsims = FALSE) fdsum.jags ``` Users can plot the median and credible intervals of the summary of the first differences. ```{r} ggplot(data = fdsum.jags, aes(x = median_fd, y = VarName)) + geom_point() + geom_segment(aes(x = lower_fd, xend = upper_fd, yend = VarName)) + geom_vline(xintercept = 0) + xlab("Change in Pr(Volunteering)") + ylab("") + theme_minimal() ``` ## Plotting `mcmcFD` objects To make use of the full posterior distribution of first differences, we provide a dedicated plotting method, `plot.mcmcFD`, which returns a ggplot2 object that can be further customized. The function is modeled after Figure 1 in @Karreth2018. Users can specify a region of practical equivalence and print the percent of posterior draws to the right or left of the ROPE. If ROPE is not specified, the figure automatically prints the percent of posterior draws to the left or right of 0. ```{r} plot(fdfull.jags, ROPE = c(-0.01, 0.01)) ``` The user can further customize the plot. ```{r} p <- plot(fdfull.jags, ROPE = c(-0.01, 0.01)) p + labs(title = "First differences") + ggridges::theme_ridges() ``` # Model fit ## `mcmcRocPrc` One way to assess model fit is to calculate the area under the Receiver Operating Characteristic (ROC) and Precision-Recall curves. A short description of these curves and their utility for model assessment is provided in @Beger2016. The `mcmcRocPrc` function produces an object with four elements: the area under the ROC curve, the area under the PR curve, and two dataframes to plot each curve. When `fullsims` is set to `FALSE`, the elements represent the median of the posterior distribution of each quantity. `mcmcRocPrc` currently requires an "rjags" object (a model fitted in R2jags) as input. Future package versions will generalize this input to allow for model objects fit with any of the other packages used in BayesPostEst. <!-- Because each of these measures relies on comparing the observed $y$ to $Pr(y = 1)$, the function requires both the posterior distribution of all regression coefficients as well as a model frame. This model frame contains all variables used to estimate the model, with the outcome variable in the first column and all other variables following thereafter. --> ```{r} fitstats <- mcmcRocPrc(object = fit.jags, yname = "volunteer", xnames = c("female", "neuroticism", "extraversion"), curves = TRUE, fullsims = FALSE) ``` Users can then print the area under the each curve: ```{r} fitstats$area_under_roc ``` ```{r} fitstats$area_under_prc ``` Users can also plot the ROC curve... ```{r} ggplot(data = as.data.frame(fitstats, what = "roc"), aes(x = x, y = y)) + geom_line() + geom_abline(intercept = 0, slope = 1, color = "gray") + labs(title = "ROC curve") + xlab("1 - Specificity") + ylab("Sensitivity") + theme_minimal() ``` ... as well as the precision-recall curve. ```{r} ggplot(data = as.data.frame(fitstats, what = "prc"), aes(x = x, y = y)) + geom_line() + labs(title = "Precision-Recall curve") + xlab("Recall") + ylab("Precision") + theme_minimal() ``` To plot the posterior distribution of the area under the curves, users set the `fullsims` argument to `TRUE`. Unless a user wishes to plot credible intervals around the ROC and PR curves themselves, we recommend keeping `curves` at `FALSE` to avoid long computation time. ```{r} fitstats.fullsims <- mcmcRocPrc(object = fit.jags, yname = "volunteer", xnames = c("female", "neuroticism", "extraversion"), curves = FALSE, fullsims = TRUE) ``` We can then plot the posterior density of the area under each curve. ```{r} ggplot(as.data.frame(fitstats.fullsims), aes(x = area_under_roc)) + geom_density() + labs(title = "Area under the ROC curve") + theme_minimal() ``` ```{r} ggplot(as.data.frame(fitstats.fullsims), aes(x = area_under_prc)) + geom_density() + labs(title = "Area under the Precision-Recall curve") + theme_minimal() ``` ```{r echo=FALSE, results='hide', message=FALSE} rm(mod.jags) rm(mod.stan) rm(mod.rds) ``` # References
/scratch/gouwar.j/cran-all/cranData/BayesPostEst/vignettes/getting_started.Rmd
# ********************************************************************************************************************* # Rcpp wrapper functions which all return a list with $cusum and $proj and $cpt # exception: wildBinSeg only returns a changepoint vector # all functions rescale variance like Inspect so that threshold can be used from compute.threshold() # ********************************************************************************************************************* #' @useDynLib BayesProject # not exported rescale.variance <- function(x) { p <- nrow(x) for (j in 1:p) { scale <- mad(diff(x[j, ]))/sqrt(2) x[j, ] <- x[j, ]/scale } return(x) } # not exported single.change <- function(n, p, k, z, vartheta, sigma = 1, shape = 3, noise = 0) { mu <- matrix(0, p, n) if (shape == 3) { theta = (1:k)^(-1/2) theta = theta/norm(theta,type="2") * sqrt(k) * vartheta } if (noise != -1) { mu[1:k, (z + 1):n] <- theta } if (noise <= 0) { W = matrix(rnorm(p * n), p, n) * sigma } x <- mu + W return(list(x=x,mu=mu)) } # not exported # function to calibrate the changepoint detection threshold, requires function "cusumFct" to compute cusum(x) with single parameter for the dataset x compute.threshold <- function(n, p, cusumFct, nrep=100) { cusum.stats <- sapply(1:nrep, function(i) { max(cusumFct(single.change(n, p, 1, n - 1, 0)$x)) }) max(cusum.stats) } # not exported cusum.spacing <- function(n,maxTau,alpha=0.6) { # points t, truth tau # tau <= t t <- maxTau v <- t for(tau in maxTau:1) { if(tau/t<alpha) { t <- tau v <- c(t,v) } } # t <= tau t <- 1 w <- t for(tau in 1:maxTau) { if((n-tau)/(n-t)<alpha) { t <- tau w <- c(w,t) } } return(sort(union(v,w))) } # not exported vector.cusum.transform <- function(x) { n <- length(x) leftsums <- cumsum(x) rightsums <- leftsums[n] - leftsums t <- 1:(n - 1) return( (rightsums[t]/(n - t) - leftsums[t]/t) * sqrt(t*(n - t)/n) ) } #' Cpp implementation of the Bayesian projection algorithm to detect single multivariate changepoints. #' #' Detects one multivariate changepoint in a dataset using the fast projection direction algorithm of Hahn et al. (2019). #' Solely required is the dataset as first parameter. #' The testing threshold ("threshold"), the number of timepoints to calculate a projection ("nTimePoints") and the regularisation parameter ("K") are chosen automatically. #' #' @param x A \eqn{p \times n} matrix representing \eqn{p} data series having \eqn{n} observations each. #' @param threshold The testing threshold to detect the single changepoint. If missing, parameter will be calibrated automatically. #' @param nTimePoints The number of equidistant timepoints at which the projection direction is calculated. If no value (NULL) is given, timepoints are chosen automatically. #' @param K The regularisation parameter for the Bayesian projection direction. Default is \eqn{1/\sqrt(2)}. #' @param rescale.var A boolean flag to indicate if the variance should be rescaled before detecting a changepoint. Default is TRUE. #' #' @importFrom Rdpack reprompt #' @references Hahn, G., Fearnhead, P., Eckley, I.A. (2020). Fast computation of a projection direction for multivariate changepoint detection. Stat Comput. #' #' @examples #' library(BayesProject) #' data(testdata) #' res <- bayes(testdata,nTimePoints=100) #' print(res$cpt) #' #' @export bayes <- function(x, threshold, nTimePoints=NULL, K=1/sqrt(2), rescale.var=TRUE) { if(rescale.var) x <- rescale.variance(x) p <- nrow(x) n <- ncol(x) if(is.null(nTimePoints)) { timePoints <- cusum.spacing(n,n-1) } else { timePoints <- floor(seq(1,n-1,length.out=nTimePoints)) } if(missing(threshold)) threshold <- compute.threshold(n=n,p=p,cusumFct=function(d) bayes_cpt(d,timePoints,K)[-(1:p)]) v <- bayes_cpt(x, timePoints, K) proj <- v[1:p] # cusum <- v[-(1:p)] fullcusum <- abs(vector.cusum.transform(proj %*% x)) return(list( cusum=fullcusum, proj=proj, cpt=ifelse(max(fullcusum)>threshold,which.max(fullcusum)[1],NA) )) } #' Cpp implementation of sum-cusum and max-cusum for single changepoint detection. #' #' Detects one multivariate changepoint in a dataset using the sum-cusum or max-cusum technique. #' Solely required is the dataset as first parameter. #' The testing threshold ("threshold") is chosen automatically if missing. #' The parameter "sum_cusum" (default TRUE) indicates if sum-cusum or max-cusum is used. #' #' @param x A \eqn{p \times n} matrix representing \eqn{p} data series having \eqn{n} observations each. #' @param threshold The testing threshold to detect the single changepoint. If missing, parameter will be calibrated automatically. #' @param sum_cusum A boolean flag to indicate if sum cusum (sum_cusum=T) or max cusum (sum_cusum=F) is used. Default is TRUE. #' @param rescale.var A boolean flag to indicate if the variance should be rescaled before detecting a changepoint. Default is TRUE. #' #' @importFrom Rdpack reprompt #' @references Hahn, G., Fearnhead, P., Eckley, I.A. (2020). Fast computation of a projection direction for multivariate changepoint detection. Stat Comput. #' #' @examples #' library(BayesProject) #' data(testdata) #' resSumCusum <- summaxcusum(testdata,sum_cusum=TRUE) #' print(resSumCusum$cpt) #' resMaxCusum <- summaxcusum(testdata,sum_cusum=FALSE) #' print(resMaxCusum$cpt) #' #' @export summaxcusum <- function(x, threshold, sum_cusum=TRUE, rescale.var=TRUE) { if(rescale.var) x <- rescale.variance(x) p <- nrow(x) n <- ncol(x) if(missing(threshold)) threshold <- compute.threshold(n=n,p=p,cusumFct=function(d) sum_max_cusum(d,sum_cusum)[-(1:p)]) v <- sum_max_cusum(x, sum_cusum) proj <- v[1:p] cusum <- v[-(1:p)] return(list( cusum=cusum, proj=proj, cpt=ifelse(max(cusum)>threshold,which.max(cusum)[1],NA) )) } #' Wild Binary Segmentation Wrapper for the functions "bayes" and "summaxcusum". #' #' Detects multivariate changepoints in a dataset using the Wild Binary Segmentation framework of Fryzlewicz (2014). #' The dataset is supplied as the first parameter. #' The second parameter is a calibrated function "cusumFct(x)" which takes a multivariate data matrix \eqn{x} and returns a cusum vector for it. #' The threshold is supplied with parameter "threshold", parameter "m" specifies the number of random WBS intervals on each recursion level, #' and "minwindow" is the minimal window size up to which the dataset is further divided recursively to find more changepoints. #' #' @param x A \eqn{p \times n} matrix representing \eqn{p} data series having \eqn{n} observations each. #' @param cusumFct A calibrated function which returns a cusum vector for a dataset supplied as its single input parameter. Note that rescaling of the variance should be deactivated inside "cusumFct". When using the function "bayes" as in the example below, it is advised to set the threshold to e.g. zero in order to deactivate the time-consuming (and unnecessary) threshold computation inside "bayes". #' @param threshold The testing threshold to detect the single changepoint. The threshold must be specified. #' @param m The number of random WBS intervals on each recursion level. #' @param minwindow The minimal window size up to which the dataset is further divided recursively to find more changepoints. #' @param rescale.var A boolean flag to indicate if the variance should be rescaled before detecting a changepoint. Default is TRUE. #' #' @importFrom Rdpack reprompt #' @references Fryzlewicz, P. (2014). Wild binary segmentation for multiple change-point detection. Ann Statist, 42(6):2243--2281. #' #' @examples #' library(BayesProject) #' data(testdata) #' bayes_cusum <- function(x) bayes(x,threshold=0,rescale.var=FALSE)$cusum #' res <- wildBinSeg(testdata, cusumFct=bayes_cusum, threshold=1) #' print(res) #' #' @export wildBinSeg <- function(x, cusumFct, threshold, m=100, minwindow=10, rescale.var=TRUE) { if(rescale.var) x <- rescale.variance(x) if(!is.matrix(x)) x <- matrix(x,nrow=1) n <- ncol(x) wildbinseg <- function(s,e) { if(s>e-minwindow) return(NULL) if(s<(e-minwindow)) M <- sample(s:(e-minwindow), size=m, replace=T) else M <- rep(s,m) M2 <- sapply(M,function(i) ifelse((i+minwindow)<e, sample((i+minwindow):e,size=1), e) ) M <- cbind(M,M2) colnames(M) <- NULL # compute maximal cusum statistic for each interval allcusum <- sapply(1:m, function(i) max(cusumFct(x[,M[i,1]:M[i,2]])) ) # changepoint found? -- determine wbs interval in which the changepoint was found, and then its exact position if(max(allcusum,na.rm=T)>threshold) { int <- which.max(allcusum) cpt <- M[int,1]-1 + which.max( cusumFct(x[,M[int,1]:M[int,2]]) ) return(c( wildbinseg(s,cpt), cpt, wildbinseg(cpt+1,e) )) } else return(NULL) } wildbinseg(1,n) }
/scratch/gouwar.j/cran-all/cranData/BayesProject/R/Bayes.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 bayes_vhat <- function(x, timePoints, K = 1.0/sqrt(2.0)) { .Call('_BayesProject_bayes_vhat', PACKAGE = 'BayesProject', x, timePoints, K) } bayes_cpt <- function(x, timePoints, K = 1.0/sqrt(2.0)) { .Call('_BayesProject_bayes_cpt', PACKAGE = 'BayesProject', x, timePoints, K) } sum_max_cusum <- function(x, sum_cusum = TRUE) { .Call('_BayesProject_sum_max_cusum', PACKAGE = 'BayesProject', x, sum_cusum) }
/scratch/gouwar.j/cran-all/cranData/BayesProject/R/RcppExports.R
#' Simulated test data. #' #' @description A dataset containing time series for 100 variates with 1000 data points for each variate. #' The dataset contains 5 changepoints with each one being shared independently by 10 variates. #' The observations prior to each changepoint are IID Gaussian, distributed with unit variance and random mean drawn from N(0,1) Gaussians. #' The mean after each changepoint, for each variable that is selected to have a change in mean, is changed by size=1 at each changepoint location with the sign of change chosen uniformly at random. #' #' @name testdata #' #' @docType data #' #' @keywords dataset #' #' @usage data(testdata) #' #' @rdname testdata-data #' #' @format A matrix with 100 rows and 2000 columns. #' #' @references Hahn, G., Fearnhead, P., Eckley, I.A. (2020). Fast computation of a projection direction for multivariate changepoint detection. Stat Comput. #' NULL
/scratch/gouwar.j/cran-all/cranData/BayesProject/R/testdata.R
#' AR(1) correlation matrix #' #' Generate a correlation matrix for AR(1) model #' #' @param n size of matrix #' @param rho correlation between -1 to 1 #' @return \eqn{n\times n} AR(1) correlation matrix #' @section Details: #' The correlation matrix is created as #' \deqn{ \left(\begin{array}{ccccc} #' 1 & \rho & \rho^2 & \cdots & \rho^{n-1}\\ #' \rho & 1 & \rho & \cdots & \rho^{n-2}\\ #' \vdots & \vdots & \vdots & \ddots & \vdots\\ #' \rho^2 & \rho & 1 & \cdots & \rho^{n-3} #' \end{array}\right)}{ (non-Latex version) } #' @examples #' AR1.cor(5, 0.5) AR1.cor = function(n, rho) { exponent <- abs(matrix(1:n - 1, nrow = n, ncol = n, byrow = TRUE) - (1:n - 1)) rho^exponent }
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/R/AR1.cor.r
#' Perform MCMC algorithm to generate the posterior samples for longitudinal ordinal data #' #' This function is used to generate the posterior samples using MCMC algorithm from the #' cumulative probit model with the hypersphere decomposition applied to model the correlation structure #' in the serial dependence of repeated responses. #' #' @param fixed a two-sided linear formula object to describe fixed-effects with the response on the left of #' a \samp{~} operator and the terms separated by \samp{+} or \samp{*} operators, on the right. #' The specification \code{first*second} indicates the cross of \code{first} and \code{second}. #' This is the same as \code{first + second + first:second}. #' @param data an optional data frame containing the variables named in \samp{fixed} and \samp{random}. #' It requires an ``integer'' variable named by \samp{id} to denote the identifications of subjects. #' @param random a one-sided linear formula object to describe random-effects with the terms separated by #' \samp{+} or \samp{*} operators on the right of a \samp{~} operator. #' @param Robustness logical. If 'TRUE' the distribution of random effects is assumed to be \cr #' t-distribution; otherwise normal distribution. #' @param na.action a function that indicates what should happen when the data contain NA’s. #' The default action (\samp{na.omit}, inherited from the \samp{factory fresh} value of \cr #' \samp{getOption("na.action")}) strips any observations with any missing values in any variables. #' @param subset an optional expression indicating the subset of the rows of \samp{data} that should be used in the fit. #' This can be a logical vector, or a numeric vector indicating which observation numbers are to be included, #' or a character vector of the row names to be included. All observations are included by default. #' @param HS.model a specification of the correlation structure in HSD model: #' \itemize{ #' \item \code{HS.model = ~0} denotes independence, that is, \eqn{R_i} is an identity matrix, #' \item \code{HS.model = ~IndTime+}\eqn{\cdots}\code{+IndTimer} denotes AR(r) correlation structure, #' \item \code{HS.model = ~DiffTime1+}\eqn{\cdots}\code{+DiffTimer} denotes correlation structure related to \eqn{r}th order #' of time difference. #' } #' @param hyper.params specify the values in hyperparameters in priors. #' @param num.of.iter an integer to specify the total number of iterations; default is 20000. #' @param Interactive logical. If 'TRUE' when the program is being run interactively for progress bar and 'FALSE' otherwise. #' @return a list of posterior samples, parameters estimates, AIC, BIC, CIC, DIC, MPL, RJR, predicted values, #' and the acceptance rates in MH are returned. #' #' @note Only a model either HSD (\samp{HS.model}) or ARMA (\samp{arma.order}) model should be specified in the function. #' We'll provide the reference for details of the model and the algorithm for performing #' model estimation whenever the manuscript is accepted. #' #' @author Kuo-Jung Lee <[email protected]> #' @references{ #' \insertRef{Lee:etal:2021}{BayesRGMM} #' #' \insertRef{Lee:etal:2020}{BayesRGMM} #' #'} #' #' @examples #' \dontrun{ #' library(BayesRGMM) #' rm(list=ls(all=TRUE)) #' #' Fixed.Effs = c(-0.1, 0.1, -0.1) #c(-0.8, -0.3, 1.8, -0.4) #' P = length(Fixed.Effs) #' q = 1 #number of random effects #' T = 7 #time points #' N = 100 #number of subjects #' Num.of.Cats = 3 #in KBLEE simulation studies, please fix it. #' num.of.iter = 1000 #number of iterations #' #' HSD.para = c(-0.9, -0.6) #the parameters in HSD model #' a = length(HSD.para) #' w = array(runif(T*T*a), c(T, T, a)) #design matrix in HSD model #' #' for(time.diff in 1:a) #' w[, , time.diff] = 1*(as.matrix(dist(1:T, 1:T, method="manhattan")) ==time.diff) #' #' x = array(0, c(T, P, N)) #' for(i in 1:N){ #' #x[,, i] = t(rmvnorm(P, rep(0, T), AR1.cor(T, Cor.in.DesignMat))) #' x[, 1, i] = 1:T #' x[, 2, i] = rbinom(1, 1, 0.5) #' x[, 3, i] = x[, 1, i]*x[, 2, i] #' } #' #' DesignMat = x #' #' #Generate a data with HSD model #' #' #' #MAR #' CPREM.sim.data = SimulatedDataGenerator.CumulativeProbit( #' Num.of.Obs = N, Num.of.TimePoints = T, Num.of.Cats = Num.of.Cats, #' Fixed.Effs = Fixed.Effs, Random.Effs = list(Sigma = 0.5*diag(1), df=3), #' DesignMat = DesignMat, Missing = list(Missing.Mechanism = 2, #' MissingRegCoefs=c(-0.7, -0.2, -0.1)), #' HSD.DesignMat.para = list(HSD.para = HSD.para, DesignMat = w)) #' #' print(table(CPREM.sim.data$sim.data$y)) #' print(CPREM.sim.data$classes) #' #' BCP.output = BayesCumulativeProbitHSD( #' fixed = as.formula(paste("y~", paste0("x", 1:P, collapse="+"))), #' data=CPREM.sim.data$sim.data, random = ~ 1, Robustness = TRUE, #' subset = NULL, na.action='na.exclude', HS.model = ~IndTime1+IndTime2, #' hyper.params=NULL, num.of.iter=num.of.iter, Interactive=0) #' #' BCP.Est.output = BayesRobustProbitSummary(BCP.output) #' } BayesCumulativeProbitHSD = function(fixed, data, random, Robustness, subset, na.action, HS.model, hyper.params, num.of.iter, Interactive) { # process data: reponse, fixed and random effects matrices. cl <- match.call() mf <- match.call(expand.dots = FALSE) m <- match(c("fixed", "data", "subset", "na.action"), names(mf), 0L) mf <- mf[c(1L, m)] mf$drop.unused.levels <- TRUE mf[[1L]] <- quote(model.frame) names(mf)[2] = "formula" fixed.eff = all.vars.character(fixed[-2])$m[[2]] #fixed.eff.intercept.included = !any(grepl("-1", fixed.eff)) random.eff = all.vars.character(random)$m[[2]] HS.model.cov = all.vars.character(HS.model)$m[[2]] #cat("HS.model.cov = ", HS.model.cov, "\n") TimeOrder = sort(gsub("IndTime", "", HS.model.cov[HS.model.cov %in% paste0("IndTime", 1:10)])) #cat("TimeOrder = ", TimeOrder, "\n") DiffTime = sort(gsub("DiffTime", "", HS.model.cov[HS.model.cov %in% paste0("DiffTime", 1:10)])) #cat("DiffTime = ", DiffTime, "\n") #cat("HS.model = \n") #print(as.formula(HS.model)) interaction.terms = attr(terms.formula(as.formula(HS.model)), "term.labels") #cat("HS.model = ", interaction.terms, "\n") mf2 = eval(mf, parent.frame()) Terms = attr(mf2, "terms") fixed.eff = colnames(model.matrix(Terms, mf2)) fixed.eff = fixed.eff[-1] mf[[2L]] = update(fixed, as.formula(paste("~.+", paste(random.eff, collapse="+") ))) mf[[2L]] = update(mf[[2L]], ~.+id) mf <- eval(mf, parent.frame()) m.design.mat <- attr(mf, "terms") #cat("mfixed.design.mat = \n") #print(mfixed.design.mat) yy <- model.response(mf, "numeric") #model.response(mf, "numeric") xx <- model.matrix(m.design.mat, mf) #fixed.eff = attr(terms.formula(fixed), "term.labels") #if(fixed.eff.intercept.included) # fixed.eff = c("(Intercept)", fixed.eff) #cat("fixed.eff = ", fixed.eff, "\n") random.eff[random.eff=="1"] = "(Intercept)" #cat("random.eff = ", random.eff, "\n") x.fixed = xx[, colnames(xx)%in%fixed.eff, drop=FALSE] z.random = xx[, colnames(xx)%in%random.eff, drop=FALSE] id = xx[, colnames(xx)%in%"id"] p = dim(x.fixed)[2] q = dim(z.random)[2] N = length(table(id)) T = range(table(id))[2] a = length(interaction.terms) #cat("a = ", a, "\n") u = NULL delta.num = 0 # for HSD model TimeOrder = unique(TimeOrder) DiffTime = unique(DiffTime) if(a>0){ u = array(0, c(T, T, N, a)) if(length(TimeOrder)>0){ for(t in 1:length(TimeOrder)) u[,,1:N, t][as.matrix(dist(1:T, method="euclidean", diag = TRUE, upper = TRUE))==t] = 1 delta.num = delta.num + length(TimeOrder) } #cat("length(DiffTime) = ", length(DiffTime)>0, "\n") if(length(DiffTime)>0){ for(t in 1:length(DiffTime)) u[,,1:N, delta.num+t] = (as.matrix(dist(1:T, method="euclidean", diag = TRUE, upper = TRUE)))^t delta.num = delta.num + length(DiffTime) } main.terms = interaction.terms[-grep("Time",interaction.terms)] int.terms = interaction.terms[grep(":",interaction.terms)] #cat("main = ", main.terms, "\n") #cat("int.terms = ", int.terms, "\n") if(length(main.terms)>0){ #cat("=============== 1 ============\n") for(t in 1:length(main.terms)){ uu = data[, names(data)%in%c("id", main.terms[t]), drop=FALSE] HSD.cov = unique(uu)[, , drop=FALSE] HSD.cov = HSD.cov[HSD.cov$id%in%id, ] HSD.cov = as.matrix(HSD.cov[, -1]) for(sub in 1:N) u[,,sub, delta.num+t] = matrix(HSD.cov[sub], T, T) } delta.num = delta.num + length(main.terms) } if(length(int.terms)>0){ #cat("=============== 2 ============\n") for(t in 1:length(int.terms)){ #cat("int.terms[t]= ", int.terms[t], "\n") int.terms.tmp = strsplit(int.terms[t], ":")[[1]] #cat("int.terms.tmp = ", int.terms.tmp, "\n") int.terms.tmp.IndTime.TF = int.terms.tmp %in% paste0("IndTime", 1:10) int.terms.tmp.DiffTime.TF = int.terms.tmp %in% paste0("DiffTime", 1:10) int.terms.tmp.IndTime = int.terms.tmp[int.terms.tmp %in% paste0("IndTime", 1:10)] int.terms.tmp.DiffTime = int.terms.tmp[int.terms.tmp %in% paste0("DiffTime", 1:10)] #cat("any(int.terms.tmp.IndTime) = ", length(int.terms.tmp.IndTime), "\n") #cat("any(int.terms.tmp.DiffTime) = ", length(int.terms.tmp.DiffTime), "\n") if(length(int.terms.tmp.IndTime)>0){ #cat("=============== 3 ============\n") #cat("int.terms.tmp.IndTime = ", int.terms.tmp.IndTime, "\n") IndTime.tmp = as.numeric(gsub("\\D", "", int.terms.tmp.IndTime)) #as.numeric(int.terms.tmp.IndTime) #as.numeric(gsub("IndTime", "", int.terms.tmp.IndTime)) #cat("IndTime.tmp = ", IndTime.tmp, "\n") HSD.cov.tmp = int.terms.tmp[!int.terms.tmp.IndTime.TF] #cat("HSD.cov.tmp = ", HSD.cov.tmp, "\n") uu.tmp = matrix(0, T, T) uu.tmp[as.matrix(dist(1:T, method="euclidean", diag = TRUE, upper = TRUE))==IndTime.tmp] = 1 uu = data[, names(data)%in%c("id", HSD.cov.tmp), drop=FALSE] uu = uu[complete.cases(uu), ] #cat("uu = \n") #print(uu) HSD.cov = unique(uu)[, , drop=FALSE] #cat("HSD.cov 1= \n") #print(HSD.cov) HSD.cov = HSD.cov[HSD.cov$id%in%id, ] HSD.cov = as.matrix(HSD.cov[, -1]) #cat("HSD.cov 2= \n") #print(HSD.cov) for(sub in 1:N){ u[,, sub, delta.num+1] = uu.tmp*matrix(HSD.cov[sub], T, T) } delta.num = delta.num + 1 } else if(length(int.terms.tmp.DiffTime)>0){ #cat("=============== 4 ============\n") DiffTime.tmp = as.numeric(gsub("DiffTime", "", int.terms.tmp.DiffTime)) HSD.cov.tmp = int.terms.tmp[!int.terms.tmp.DiffTime.TF] uu.tmp = matrix(0, T, T) uu.tmp[as.matrix(dist(1:T, method="euclidean", diag = TRUE, upper = TRUE))==DiffTime.tmp] = 1 uu = data[, names(data)%in%c("id", HSD.cov.tmp), drop=FALSE] HSD.cov = unique(uu)[, , drop=FALSE] HSD.cov = HSD.cov[HSD.cov$id%in%id, ] HSD.cov = as.matrix(HSD.cov[, -1]) for(sub in 1:N) u[,, sub, delta.num + 1] = uu.tmp*matrix(HSD.cov[sub], T, T) delta.num = delta.num + 1 } else{ #cat("=============== 5 ============\n") #cat("int.terms.tmp = ", int.terms.tmp, "\n") uu = data[, names(data)%in%c("id", int.terms.tmp), drop=FALSE] HSD.cov1 = unique(uu)[, 1, drop=FALSE] #print(head(HSD.cov1)) HSD.cov2 = unique(uu)[, 2, drop=FALSE] #print(head(HSD.cov2)) HSD.cov = cbind(HSD.cov1, HSD.cov2) HSD.cov = HSD.cov[HSD.cov$id%in%id, ] #print(head(HSD.cov)) HSD.cov = as.matrix(HSD.cov[, -1]) for(sub in 1:N) u[,, sub ,delta.num + 1] = matrix(prod(HSD.cov[sub, ]), T, T) } } } } #print(dim(u)) #cat("delta.num = ", delta.num, "\n") #cat("=============== 6 ============\n") if(a != delta.num) stop("Something wrong to assing the design matrix in HSD model.\n") if(any(HS.model.cov==1)){ a = a+1 u = abind(array(1, c(T, T, N)), u, along=4) } uu = u #cat("dim(u) = ", dim(u), "\n") #cat("u = \n") if(a>0) dim(u) = c(T, T, N*a) TimePointsAvailable = as.vector(table(id)) y = matrix(NA, T, N) x = array(0, c(T, p, N)) #intercept, z = array(0, c(T, length(random.eff), N)) #intercept, id.index = unique(id) for(i in 1:N){ y[1:TimePointsAvailable[i], i] = yy[id==id.index[i]] x[1:TimePointsAvailable[i], , i] = as.matrix(x.fixed[id==id.index[i], ]) z[1:TimePointsAvailable[i], , i] = as.matrix(z.random[id==id.index[i], ], drop=FALSE) } #Defult values for hyperparameters #sigma2.alpha = 0.1 #sigma2.beta = 1 #sigma2.delta = 1 #v.gamma = 5 #InvWishart.df = 5 #InvWishart.Lambda = diag(q) sigma2.alpha = ifelse(is.null(hyper.params$sigma2.alpha), 0.1, hyper.params$sigma2.alpha) sigma2.beta = ifelse(is.null(hyper.params$sigma2.beta), 1, hyper.params$sigma2.beta) sigma2.delta = ifelse(is.null(hyper.params$sigma2.delta), 1, hyper.params$sigma2.delta) v.gamma = ifelse(is.null(hyper.params$v.gamma), 5, hyper.params$v.gamma) InvWishart.df = ifelse(is.null(hyper.params$InvWishart.df), 5, hyper.params$InvWishart.df) InvWishart.Lambda = if(is.null(hyper.params$InvWishart.Lambda)) diag(q) else hyper.params$InvWishart.Lambda UpdateYstar = TRUE UpdateAlpha = TRUE UpdateRandomEffect = TRUE UpdateBeta = TRUE UpdateSigma = TRUE UpdateNu = TRUE UpdateDelta = ifelse(is.null(u), FALSE, TRUE) Num.of.Cats = length(unique(na.omit(c(y)))) y.star.ini = matrix(0, T, N) alpha.ini = c(-Inf, seq(-5, 5, length = Num.of.Cats-1), Inf) #print(alpha.ini) y = y-min(y, na.rm=TRUE)+1 # to make the categorical variable begin with 1 #y[is.na(y)] = 1000 # missing values specified by 1000 y[!is.finite(y)] = 1000 #y[is.nan(y)] = 1000 #print(head(is.finite(y))) #print(head(y)) for(i in 1:Num.of.Cats) y.star.ini[y%in%i] = rtnorm(sum(y%in%i), lower=alpha.ini[i], upper=alpha.ini[i+1]) #print(head(y.star.ini)) b.ini = NULL Sigma = diag(q) for(i in 1:N) b.ini = cbind(b.ini, t(rmvnorm(1, rep(0, q), Sigma))) nu.ini = rgamma(N, 5, 5) beta.ini = matrix(rep(0, p), ncol=1) Sigma.ini = as.matrix(rWishart(1,q,diag(q))[,,1]) delta.ini = rep(0, a)#runif(a, -1, 1) Data = list(Y = y, X = x, Z=z, U = u, TimePointsAvailable = TimePointsAvailable) InitialValues = list(y.star = y.star.ini, alpha = alpha.ini, b = b.ini, nu = nu.ini, beta = beta.ini , Sigma = Sigma.ini, delta = delta.ini) HyperPara = list(sigma2.beta = sigma2.beta, sigma2.delta=sigma2.delta, sigma2.alpha=sigma2.alpha, v.gamma = v.gamma, InvWishart.df = InvWishart.df, InvWishart.Lambda=InvWishart.Lambda) UpdatePara = list(UpdateYstar = UpdateYstar, UpdateAlpha = UpdateAlpha, UpdateRandomEffect = UpdateRandomEffect, UpdateNu = UpdateNu, UpdateBeta = UpdateBeta, UpdateSigma = UpdateSigma, UpdateDelta = UpdateDelta) TuningPara = list(TuningDelta = 0.01) if(1){ start.time <- Sys.time() PosteriorSamplesCP = CumulativeProbitMCMC(num.of.iter, Data, Robustness, InitialValues, HyperPara, UpdatePara, TuningPara, Interactive) end.time <- Sys.time() #cat("\nCall:\n", printCall(cl), "\n\n", sep = "") cat("\nData Descriptives:\n") cat("Longitudinal Data Information:") cat("\nNumber of Observations: ", sum(TimePointsAvailable), "\tNumber of Covariates: ", p-1) cat("\nNumber of subjects:", N, "\n\n") out <- list(Posterior.Samples = PosteriorSamplesCP, Fixed.Effects.Names = fixed.eff, Random.Effects.Names = random.eff, Response = y, Fixed.Effects.Mat = x, Random.Effects.Mat = z, HS.model.Mat = uu, call = cl, Num.of.Iter = num.of.iter) #class(out) out } }
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/R/BayesCumulativeProbitHSD.r
BayesProbitARMA = function(fixed, data, random, Robustness, subset, na.action, arma.order, hyper.params, num.of.iter, Interactive) { # process data: reponse, fixed and random effects matrices. cl <- match.call() mf <- match.call(expand.dots = FALSE) m <- match(c("fixed", "data", "subset", "na.action"), names(mf), 0L) mf <- mf[c(1L, m)] mf$drop.unused.levels <- TRUE mf[[1L]] <- quote(model.frame) names(mf)[2] = "formula" fixed.eff = all.vars.character(fixed[-2])$m[[2]] #cat("fixed.eff = ", fixed.eff, "\n") fixed.eff.intercept.included = !any(grepl("-1", fixed.eff)) random.eff = all.vars.character(random)$m[[2]] #cat("random.eff = ", random.eff, "\n") mf[[2L]] = update(fixed, as.formula(paste("~.+", paste(random.eff, collapse="+") ))) mf[[2L]] = update(mf[[2L]], ~.+id) mf <- eval(mf, parent.frame()) m.design.mat <- attr(mf, "terms") #cat("mfixed.design.mat = \n") #print(mfixed.design.mat) yy <- model.response(mf, "numeric") #model.response(mf, "numeric") xx <- model.matrix(m.design.mat, mf) fixed.eff = attr(terms.formula(fixed), "term.labels") if(fixed.eff.intercept.included) fixed.eff = c("(Intercept)", fixed.eff) random.eff[random.eff=="1"] = "(Intercept)" #cat("random.eff = ", random.eff, "\n") x.fixed = xx[, colnames(xx)%in%fixed.eff, drop=FALSE] z.random = xx[, colnames(xx)%in%random.eff, drop=FALSE] id = xx[, colnames(xx)%in%"id"] p = dim(x.fixed)[2] q = dim(z.random)[2] N = length(table(id)) T = range(table(id))[2] TimePointsAvailable = as.vector(table(id)) y = matrix(NA, T, N) x = array(0, c(T, p, N)) #intercept, z = array(0, c(T, length(random.eff), N)) #intercept, for(i in 1:N){ y[1:TimePointsAvailable[i], i] = yy[id==i] x[1:TimePointsAvailable[i], , i] = as.matrix(x.fixed[id==i, ]) z[1:TimePointsAvailable[i], , i] = as.matrix(z.random[id==i, ], drop=FALSE) } AR.order = arma.order[1] MA.order = arma.order[2] SinglePhiPsi = 1 #0: different phi's and psi's 1: same for every subject. if(length(hyper.params)==0){ sigma2.beta = 1 v.gamma = 1 InvWishart.df = 1 InvWishart.Lambda = diag(q) } else{ sigma2.beta = hyper.params$sigma2.beta v.gamma = hyper.params$v.gamma InvWishart.df = hyper.params$InvWishart.df InvWishart.Lambda = hyper.params$InvWishart.Lambda } UpdateYstar = TRUE UpdateRandomEffect = TRUE UpdateBeta = TRUE UpdateSigma = TRUE UpdateNu = TRUE UpdatePhi = (AR.order>0) UpdatePsi = (MA.order>0) y.star.ini = matrix(0, T, N) if(UpdateYstar){ y.star.ini[y%in%1] = rtnorm(sum(y%in%1), lower=0, upper=Inf) y.star.ini[y%in%0] = rtnorm(sum(y%in%0), lower=-Inf, upper=0) } b.ini = NULL Sigma = diag(q) for(i in 1:N) b.ini = cbind(b.ini, t(rmvnorm(1, rep(0, q), Sigma))) nu.ini = rgamma(N, 5, 5) beta.ini = matrix(rnorm(p), ncol=1) Sigma.ini = as.matrix(rWishart(1,q,diag(q))[,,1]) phi.ini = matrix(0, AR.order, N) psi.ini = matrix(0, MA.order, N) if(AR.order==0) phi.ini[]=0 if(MA.order==0) psi.ini[]=0 ARMAorder = c(AR.order, MA.order) Data = list(Y = y, X = x, Z=z, TimePointsAvailable = TimePointsAvailable) InitialValues = list(y.star = y.star.ini, b = b.ini, nu = nu.ini, beta = beta.ini , Sigma = Sigma.ini, phi = phi.ini, psi = psi.ini) HyperPara = list(sigma2.beta = sigma2.beta, v.gamma = v.gamma, InvWishart.df = InvWishart.df, InvWishart.Lambda=InvWishart.Lambda) UpdatePara = list(UpdateYstar = UpdateYstar, UpdateRandomEffect = UpdateRandomEffect, UpdateNu = UpdateNu, UpdateBeta = UpdateBeta, UpdateSigma = UpdateSigma, UpdatePhi = UpdatePhi, UpdatePsi = UpdatePsi, SinglePhiPsi=SinglePhiPsi) TuningPara = list(TuningPhi = 0.05, TuningPsi = 0.05) #cat("\nCall:\n", printCall(cl), "\n\n", sep = "") #cat("Data Descriptives:\n") #cat("Longitudinal Process\t\tEvent Process") #cat("\nNumber of Observations: ", sum(TimePointsAvailable), "\tNumber of Covariates: ", p-1) #cat("\nNumber of subjects:", N, "\n\n") cat("\nData Descriptives:\n") cat("Longitudinal Data Information:") cat("\nNumber of Observations: ", sum(TimePointsAvailable), "\tNumber of Covariates: ", p-1) cat("\nNumber of subjects:", N, "\n\n") PosteriorSamplesARMA = ProbitMCMCARMAKB(num.of.iter, Data, Robustness, InitialValues, HyperPara, UpdatePara, TuningPara, ARMAorder, Interactive) out <- list(Posterior.Samples = PosteriorSamplesARMA, Fixed.Effects.Names = fixed.eff, Random.Effects.Names = random.eff, Response = y, Fixed.Effects.Mat = x, Random.Effects.Mat = z, call = cl, Num.of.Iter = num.of.iter) #class(out) out }
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/R/BayesProbitARMA.r
BayesProbitARMA.Summary = function(object, digits = max(1L, getOption("digits") - 4L)) { post.samples = object$Posterior.Samples$PosteriorSamples para.names = names(post.samples) beta.est = matrix(unlist(apply(post.samples$beta.samples, 1, bm)), ncol = 2, byrow=TRUE) colnames(beta.est) = c("PostMean", "StErr") rownames(beta.est) = object$Fixed.Effects.Names beta.CI = apply(post.samples$beta.samples, 1, quantile, c(0.025, 0.975)) beta.est.CI = as.data.frame(cbind(beta.est, t(beta.CI))) beta.est.CI = format(beta.est.CI, digits = digits) #cat("\nCoefficients:\n\n") #print(beta.est.CI) arma.order = as.numeric(gsub("\\D", "", object$call[[which(names(object$call)=="arma.order")]]))[1:2] ar.order = arma.order[1] ma.order = arma.order[2] arma.est = NULL if(sum(ar.order, ma.order)>0){ if(ar.order>0){ phi.est = matrix(unlist(apply(post.samples$phi.samples[1:ar.order,1 ,,drop=F], c(1,2), bm)), nrow=ar.order, byrow=T) phi.CI = unlist(apply(post.samples$phi.samples[1:ar.order,1 ,,drop=F], c(1,2), quantile, c(0.025, 0.975))) phi.CI = t(adrop(phi.CI, drop=3)) colnames(phi.est) = c("PostMean", "StErr") rownames(phi.est) = paste("phi", 1:ar.order) phi.est.CI = cbind(phi.est, phi.CI) arma.est = rbind(arma.est, phi.est.CI) } if(ma.order>0){ psi.est = matrix(unlist(apply(post.samples$psi.samples[1:ma.order,1 ,,drop=F], c(1,2), bm)), nrow=ma.order, byrow=T) psi.CI = unlist(apply(post.samples$psi.samples[1:ma.order,1 ,,drop=F], c(1,2), quantile, c(0.025, 0.975))) psi.CI = t(adrop(psi.CI, drop=3)) colnames(psi.est) = c("PostMean", "StErr") rownames(psi.est) = paste("psi", 1:ma.order) psi.est.CI = as.data.frame(cbind(psi.est, psi.CI)) arma.est = rbind(arma.est, psi.est.CI) } arma.est = as.data.frame(arma.est) arma.est = format(arma.est, digits = digits) #cat("\n\nAMRA parameters:\n\n") #print(arma.est) } info = object$Posterior.Samples$PosteriorEstimates model.info = data.frame(logL = info$logL, AIC = info$AIC, BIC = info$BIC, CIC = info$CIC, DIC = info$DIC, MPL =info$MPL, RJR = info$RJR, ACC = info$ACC, row.names = "") random.cov = signif(object$Posterior.Samples$PosteriorEstimates$Sigma.mean, digits = digits) if(length(object$Random.Effects.Names)==1) dimnames(random.cov) = list(object$Random.Effects.Names, "Variance") if(length(object$Random.Effects.Names)>1) dimnames(random.cov) = list(object$Random.Effects.Names, object$Random.Effects.Names) #cat("\n\nRandom effect: \n") #print(random.cov) model.info = format(model.info, digits = digits) #cat("\nModel Information:\n") #print(model.info) ARMA.result = list(beta.est.CI = beta.est.CI, arma.est = arma.est, model.info = model.info, random.cov = random.cov) ARMA.result }
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/R/BayesProbitARMASummary.r
BayesProbitHSD = function(fixed, data, random, Robustness, subset, na.action, HS.model, hyper.params, num.of.iter, Interactive) { # process data: reponse, fixed and random effects matrices. cl <- match.call() mf <- match.call(expand.dots = FALSE) m <- match(c("fixed", "data", "subset", "na.action"), names(mf), 0L) mf <- mf[c(1L, m)] mf$drop.unused.levels <- TRUE mf[[1L]] <- quote(model.frame) names(mf)[2] = "formula" fixed.eff = all.vars.character(fixed[-2])$m[[2]] fixed.eff.intercept.included = !any(grepl("-1", fixed.eff)) random.eff = all.vars.character(random)$m[[2]] HS.model.cov = all.vars.character(HS.model)$m[[2]] #cat("HS.model.cov = ", HS.model.cov, "\n") TimeOrder = sort(gsub("IndTime", "", HS.model.cov[HS.model.cov %in% paste0("IndTime", 1:10)])) #cat("TimeOrder = ", TimeOrder, "\n") DiffTime = sort(gsub("DiffTime", "", HS.model.cov[HS.model.cov %in% paste0("DiffTime", 1:10)])) #cat("DiffTime = ", DiffTime, "\n") #cat("HS.model = \n") #print(as.formula(HS.model)) interaction.terms = attr(terms.formula(as.formula(HS.model)), "term.labels") #cat("HS.model = ", interaction.terms, "\n") mf[[2L]] = update(fixed, as.formula(paste("~.+", paste(random.eff, collapse="+") ))) mf[[2L]] = update(mf[[2L]], ~.+id) mf <- eval(mf, parent.frame()) m.design.mat <- attr(mf, "terms") #cat("mfixed.design.mat = \n") #print(mfixed.design.mat) yy <- model.response(mf, "numeric") #model.response(mf, "numeric") xx <- model.matrix(m.design.mat, mf) fixed.eff = attr(terms.formula(fixed), "term.labels") if(fixed.eff.intercept.included) fixed.eff = c("(Intercept)", fixed.eff) #cat("fixed.eff = ", fixed.eff, "\n") random.eff[random.eff=="1"] = "(Intercept)" #cat("random.eff = ", random.eff, "\n") x.fixed = xx[, colnames(xx)%in%fixed.eff, drop=FALSE] z.random = xx[, colnames(xx)%in%random.eff, drop=FALSE] id = xx[, colnames(xx)%in%"id"] p = dim(x.fixed)[2] q = dim(z.random)[2] N = length(table(id)) T = range(table(id))[2] a = length(interaction.terms) #cat("a = ", a, "\n") u = NULL delta.num = 0 # for HSD model TimeOrder = unique(TimeOrder) DiffTime = unique(DiffTime) if(a>0){ u = array(0, c(T, T, N, a)) if(length(TimeOrder)>0){ for(t in 1:length(TimeOrder)) u[,,1:N, t][as.matrix(dist(1:T, method="euclidean", diag = TRUE, upper = TRUE))==t] = 1 delta.num = delta.num + length(TimeOrder) } #cat("length(DiffTime) = ", length(DiffTime)>0, "\n") if(length(DiffTime)>0){ for(t in 1:length(DiffTime)) u[,,1:N, delta.num+t] = (as.matrix(dist(1:T, method="euclidean", diag = TRUE, upper = TRUE)))^t delta.num = delta.num + length(DiffTime) } main.terms = interaction.terms[-grep("Time",interaction.terms)] int.terms = interaction.terms[grep(":",interaction.terms)] #cat("main = ", main.terms, "\n") #cat("int.terms = ", int.terms, "\n") if(length(main.terms)>0){ #cat("=============== 1 ============\n") for(t in 1:length(main.terms)){ uu = data[, names(data)%in%c("id", main.terms[t]), drop=FALSE] HSD.cov = unique(uu)[, , drop=FALSE] HSD.cov = HSD.cov[HSD.cov$id%in%id, ] HSD.cov = as.matrix(HSD.cov[, -1]) for(sub in 1:N) u[,,sub, delta.num+t] = matrix(HSD.cov[sub], T, T) } delta.num = delta.num + length(main.terms) } if(length(int.terms)>0){ #cat("=============== 2 ============\n") for(t in 1:length(int.terms)){ #cat("int.terms[t]= ", int.terms[t], "\n") int.terms.tmp = strsplit(int.terms[t], ":")[[1]] #cat("int.terms.tmp = ", int.terms.tmp, "\n") int.terms.tmp.IndTime.TF = int.terms.tmp %in% paste0("IndTime", 1:10) int.terms.tmp.DiffTime.TF = int.terms.tmp %in% paste0("DiffTime", 1:10) int.terms.tmp.IndTime = int.terms.tmp[int.terms.tmp %in% paste0("IndTime", 1:10)] int.terms.tmp.DiffTime = int.terms.tmp[int.terms.tmp %in% paste0("DiffTime", 1:10)] #cat("any(int.terms.tmp.IndTime) = ", length(int.terms.tmp.IndTime), "\n") #cat("any(int.terms.tmp.DiffTime) = ", length(int.terms.tmp.DiffTime), "\n") if(length(int.terms.tmp.IndTime)>0){ #cat("=============== 3 ============\n") #cat("int.terms.tmp.IndTime = ", int.terms.tmp.IndTime, "\n") IndTime.tmp = as.numeric(gsub("\\D", "", int.terms.tmp.IndTime)) #as.numeric(int.terms.tmp.IndTime) #as.numeric(gsub("IndTime", "", int.terms.tmp.IndTime)) #cat("IndTime.tmp = ", IndTime.tmp, "\n") HSD.cov.tmp = int.terms.tmp[!int.terms.tmp.IndTime.TF] #cat("HSD.cov.tmp = ", HSD.cov.tmp, "\n") uu.tmp = matrix(0, T, T) uu.tmp[as.matrix(dist(1:T, method="euclidean", diag = TRUE, upper = TRUE))==IndTime.tmp] = 1 uu = data[, names(data)%in%c("id", HSD.cov.tmp), drop=FALSE] uu = uu[complete.cases(uu), ] #cat("uu = \n") #print(uu) HSD.cov = unique(uu)[, , drop=FALSE] #cat("HSD.cov 1= \n") #print(HSD.cov) HSD.cov = HSD.cov[HSD.cov$id%in%id, ] HSD.cov = as.matrix(HSD.cov[, -1]) #cat("HSD.cov 2= \n") #print(HSD.cov) for(sub in 1:N){ u[,, sub, delta.num+1] = uu.tmp*matrix(HSD.cov[sub], T, T) } delta.num = delta.num + 1 } else if(length(int.terms.tmp.DiffTime)>0){ #cat("=============== 4 ============\n") DiffTime.tmp = as.numeric(gsub("DiffTime", "", int.terms.tmp.DiffTime)) HSD.cov.tmp = int.terms.tmp[!int.terms.tmp.DiffTime.TF] uu.tmp = matrix(0, T, T) uu.tmp[as.matrix(dist(1:T, method="euclidean", diag = TRUE, upper = TRUE))==DiffTime.tmp] = 1 uu = data[, names(data)%in%c("id", HSD.cov.tmp), drop=FALSE] HSD.cov = unique(uu)[, , drop=FALSE] HSD.cov = HSD.cov[HSD.cov$id%in%id, ] HSD.cov = as.matrix(HSD.cov[, -1]) for(sub in 1:N) u[,, sub, delta.num + 1] = uu.tmp*matrix(HSD.cov[sub], T, T) delta.num = delta.num + 1 } else{ #cat("=============== 5 ============\n") #cat("int.terms.tmp = ", int.terms.tmp, "\n") uu = data[, names(data)%in%c("id", int.terms.tmp), drop=FALSE] HSD.cov1 = unique(uu)[, 1, drop=FALSE] #print(head(HSD.cov1)) HSD.cov2 = unique(uu)[, 2, drop=FALSE] #print(head(HSD.cov2)) HSD.cov = cbind(HSD.cov1, HSD.cov2) HSD.cov = HSD.cov[HSD.cov$id%in%id, ] #print(head(HSD.cov)) HSD.cov = as.matrix(HSD.cov[, -1]) for(sub in 1:N) u[,, sub ,delta.num + 1] = matrix(prod(HSD.cov[sub, ]), T, T) } } } } #print(dim(u)) #cat("delta.num = ", delta.num, "\n") #cat("=============== 6 ============\n") if(a != delta.num) stop("Something wrong to assing the design matrix in HSD model.\n") if(any(HS.model.cov==1)){ a = a+1 u = abind(array(1, c(T, T, N)), u, along=4) } uu = u #cat("dim(u) = ", dim(u), "\n") #cat("u = \n") if(a>0) dim(u) = c(T, T, N*a) TimePointsAvailable = as.vector(table(id)) y = matrix(NA, T, N) x = array(0, c(T, p, N)) #intercept, z = array(0, c(T, length(random.eff), N)) #intercept, for(i in 1:N){ y[1:TimePointsAvailable[i], i] = yy[id==i] x[1:TimePointsAvailable[i], , i] = as.matrix(x.fixed[id==i, ]) z[1:TimePointsAvailable[i], , i] = as.matrix(z.random[id==i, ], drop=FALSE) } if(length(hyper.params)==0){ sigma2.beta = 1 sigma2.delta = rep(1, length(a)) v.gamma = 5 InvWishart.df = 5 InvWishart.Lambda = diag(q) } else{ sigma2.beta = hyper.params$sigma2.beta sigma2.delta = hyper.params$sigma2.delta v.gamma = hyper.params$v.gamma InvWishart.df = hyper.params$InvWishart.df InvWishart.Lambda = hyper.params$InvWishart.Lambda } UpdateYstar = TRUE UpdateRandomEffect = TRUE UpdateBeta = TRUE UpdateSigma = TRUE UpdateNu = TRUE UpdateDelta = ifelse(is.null(u), FALSE, TRUE) y.star.ini = matrix(0, T, N) if(UpdateYstar){ y.star.ini[y%in%1] = rtnorm(sum(y%in%1), lower=0, upper=Inf) y.star.ini[y%in%0] = rtnorm(sum(y%in%0), lower=-Inf, upper=0) } b.ini = NULL Sigma = diag(q) for(i in 1:N) b.ini = cbind(b.ini, t(rmvnorm(1, rep(0, q), Sigma))) nu.ini = rgamma(N, 5, 5) beta.ini = matrix(rep(0, p), ncol=1) Sigma.ini = as.matrix(rWishart(1,q,diag(q))[,,1]) delta.ini = rep(0, a)#runif(a, -1, 1) Data = list(Y = y, X = x, Z=z, U = u, TimePointsAvailable = TimePointsAvailable) InitialValues = list(y.star = y.star.ini, b = b.ini, nu = nu.ini, beta = beta.ini , Sigma = Sigma.ini, delta = delta.ini) HyperPara = list(sigma2.beta = sigma2.beta, sigma2.delta=sigma2.delta, v.gamma = v.gamma, InvWishart.df = InvWishart.df, InvWishart.Lambda=InvWishart.Lambda) UpdatePara = list(UpdateYstar = UpdateYstar, UpdateRandomEffect = UpdateRandomEffect, UpdateNu = UpdateNu, UpdateBeta = UpdateBeta, UpdateSigma = UpdateSigma, UpdateDelta = UpdateDelta) TuningPara = list(TuningDelta = 0.01) if(1){ start.time <- Sys.time() PosteriorSamplesHSD = ProbitMCMCHSD(num.of.iter, Data, Robustness, InitialValues, HyperPara, UpdatePara, TuningPara, Interactive) end.time <- Sys.time() #cat("\nCall:\n", printCall(cl), "\n\n", sep = "") cat("\nData Descriptives:\n") cat("Longitudinal Data Information:") cat("\nNumber of Observations: ", sum(TimePointsAvailable), "\tNumber of Covariates: ", p-1) cat("\nNumber of subjects:", N, "\n\n") out <- list(Posterior.Samples = PosteriorSamplesHSD, Fixed.Effects.Names = fixed.eff, Random.Effects.Names = random.eff, Response = y, Fixed.Effects.Mat = x, Random.Effects.Mat = z, HS.model.Mat = uu, call = cl, Num.of.Iter = num.of.iter) #class(out) out } }
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/R/BayesProbitHSD.r
BayesProbitHSD.Summary = function(object, digits = max(1L, getOption("digits") - 4L)) { post.samples = object$Posterior.Samples$PosteriorSamples para.names = names(post.samples) beta.est = matrix(unlist(apply(post.samples$beta.samples, 1, bm)), ncol = 2, byrow=TRUE) colnames(beta.est) = c("PostMean", "StErr") rownames(beta.est) = object$Fixed.Effects.Names beta.CI = apply(post.samples$beta.samples, 1, quantile, c(0.025, 0.975)) beta.est.CI = as.data.frame(cbind(beta.est, t(beta.CI))) if(!is.null(post.samples$alpha.samples)){ alpha.est = matrix(unlist(apply(post.samples$alpha.samples, 1, bm)), ncol = 2, byrow=TRUE) alpha.CI = t(apply(post.samples$alpha.samples, 1, quantile, c(0.025, 0.975))) alpha.est = alpha.est[-c(1, nrow(alpha.est)), ] alpha.CI = alpha.CI[-c(1, nrow(alpha.CI)), ] colnames(alpha.est) = c("PostMean", "StErr") rownames(alpha.est) = paste0("alpha", 1:nrow(alpha.est)) alpha.est.CI = as.data.frame(cbind(alpha.est, alpha.CI)) beta.est.CI = rbind(beta.est.CI, alpha.est.CI) } beta.est.CI = format(beta.est.CI, digits = digits) #cat("\nCoefficients:\n") #print(beta.est.CI) cl = object$call #print(cl) HDS.cov.all = all.vars.character(cl[match("HS.model", names(cl))])$m[[2]] HSD.cov = attr(terms.formula(as.formula(cl$HS.model)), "term.labels") #all.vars.character(cl[match("HS.model", names(cl))])$m[[2]] #cat("HSD.cov = ", HSD.cov, "\n") if(length(HSD.cov)>0) { if(any(HDS.cov.all %in% "1")) HSD.cov = c("(Intercept)", HSD.cov) delta.est = matrix(unlist(apply(post.samples$delta.samples, 1, bm)), ncol = 2, byrow=TRUE) delta.CI = apply(post.samples$delta.samples, 1, quantile, c(0.025, 0.975)) colnames(delta.est) = c("PostMean", "StErr") rownames(delta.est) = HSD.cov delta.est.CI = as.data.frame(cbind(delta.est, t(delta.CI))) delta.est.CI = format(delta.est.CI, digits = digits) #cat("\nParameters in HSD model:\n") #print(delta.est.CI) } else delta.est.CI = NULL random.cov = signif(object$Posterior.Samples$PosteriorEstimates$Sigma.mean, digits = digits) if(length(object$Random.Effects.Names)==1) dimnames(random.cov) = list(object$Random.Effects.Names, "Variance") if(length(object$Random.Effects.Names)>1) dimnames(random.cov) = list(object$Random.Effects.Names, object$Random.Effects.Names) #cat("\n\nRandom effect: \n") #print(random.cov) info = object$Posterior.Samples$PosteriorEstimates model.info = data.frame(logL = info$logL, AIC = info$AIC, BIC = info$BIC, CIC = info$CIC, DIC = info$DIC, MPL =info$MPL, RJR = info$RJR, ACC = info$ACC, row.names = "") model.info = format(model.info, , digits = digits) #cat("\nModel Information:\n") #print(model.info) if(!is.null(delta.est.CI)){ if(nrow(delta.est.CI)>1) Ri = CorrMat.HSD(object$HS.model.Mat[,,1,], as.numeric(delta.est.CI[, 1])) else Ri = CorrMat.HSD(adrop(object$HS.model.Mat[,,1,, drop=FALSE], drop=3), as.numeric(delta.est.CI[, 1])) Ri = formatC(Ri, digits = digits, format = "f", mode = "real") #cat("\n\nEstimate of Ri: \n") #print(Ri, quote = FALSE) HSD.result = list(beta.est.CI = beta.est.CI, delta.est.CI = delta.est.CI, model.info = model.info, random.cov = random.cov, Ri = Ri) } else HSD.result = list(beta.est.CI = beta.est.CI, delta.est.CI = delta.est.CI, model.info = model.info, random.cov = random.cov) HSD.result #HSD.result }
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/R/BayesProbitHSDSummary.r
#' Perform MCMC algorithm to generate the posterior samples #' #' This function is used to generate the posterior samples using MCMC algorithm from the probit model #' with either the hypersphere decomposition or ARMA models applied to model the correlation structure #' in the serial dependence #' of repeated responses. #' #' @param fixed a two-sided linear formula object to describe fixed-effects with the response on the left of #' a \samp{~} operator and the terms separated by \samp{+} or \samp{*} operators, on the right. #' The specification \code{first*second} indicates the cross of \code{first} and \code{second}. #' This is the same as \code{first + second + first:second}. #' @param data an optional data frame containing the variables named in \samp{fixed} and \samp{random}. #' It requires an ``integer'' variable named by \samp{id} to denote the identifications of subjects. #' @param random a one-sided linear formula object to describe random-effects with the terms separated by #' \samp{+} or \samp{*} operators on the right of a \samp{~} operator. #' @param Robustness logical. If 'TRUE' the distribution of random effects is assumed to be \cr #' t-distribution; otherwise normal distribution. #' @param na.action a function that indicates what should happen when the data contain NA’s. #' The default action (\samp{na.omit}, inherited from the \samp{factory fresh} value of \cr #' \samp{getOption("na.action")}) strips any observations with any missing values in any variables. #' @param subset an optional expression indicating the subset of the rows of \samp{data} that should be used in the fit. #' This can be a logical vector, or a numeric vector indicating which observation numbers are to be included, #' or a character vector of the row names to be included. All observations are included by default. #' @param HS.model a specification of the correlation structure in HSD model: #' \itemize{ #' \item \code{HS.model = ~0} denotes independence, that is, \eqn{R_i} is an identity matrix, #' \item \code{HS.model = ~IndTime+}\eqn{\cdots}\code{+IndTimer} denotes AR(r) correlation structure, #' \item \code{HS.model = ~DiffTime1+}\eqn{\cdots}\code{+DiffTimer} denotes correlation structure related to \eqn{r}th order #' of time difference. #' } #' @param arma.order a specification of the order in an ARMA model: the two integer components (p, q) are the AR order and the MA order. #' @param hyper.params specify the values in hyperparameters in priors. #' @param num.of.iter an integer to specify the total number of iterations; default is 20000. #' @param Interactive logical. If 'TRUE' when the program is being run interactively for progress bar and 'FALSE' otherwise. #' #' @return a list of posterior samples, parameters estimates, AIC, BIC, CIC, DIC, MPL, RJR, predicted values, #' and the acceptance rates in MH are returned. #' #' @note Only a model either HSD (\samp{HS.model}) or ARMA (\samp{arma.order}) model should be specified in the function. #' We'll provide the reference for details of the model and the algorithm for performing #' model estimation whenever the manuscript is accepted. #' #' @author Kuo-Jung Lee <[email protected]> #' @references{ #' \insertRef{Lee:etal:2021}{BayesRGMM} #' #' \insertRef{Lee:etal:2020}{BayesRGMM} #' #'} #' #' @examples #' \dontrun{ #' library(BayesRGMM) #' rm(list=ls(all=TRUE)) #' Fixed.Effs = c(-0.2, -0.3, 0.8, -0.4) #c(-0.2,-0.8, 1.0, -1.2) #' P = length(Fixed.Effs) #' q = 1 #number of random effects #' T = 5 #time points #' N = 100 #number of subjects #' num.of.iter = 100 #number of iterations #' HSD.para = c(-0.5, -0.3) #the parameters in HSD model #' a = length(HSD.para) #' w = array(runif(T*T*a), c(T, T, a)) #design matrix in HSD model #' #' for(time.diff in 1:a) #' w[, , time.diff] = 1*(as.matrix(dist(1:T, 1:T, method="manhattan")) #' ==time.diff) #' #' #Generate a data with HSD model #' HSD.sim.data = SimulatedDataGenerator( #' Num.of.Obs = N, Num.of.TimePoints = T, Fixed.Effs = Fixed.Effs, #' Random.Effs = list(Sigma = 0.5*diag(1), df=3), #' Cor.in.DesignMat = 0., Missing = list(Missing.Mechanism = 2, #' RegCoefs = c(-1.5, 1.2)), Cor.Str = "HSD", #' HSD.DesignMat.para = list(HSD.para = HSD.para, DesignMat = w)) #' #' hyper.params = list( #' sigma2.beta = 1, #' sigma2.delta = 1, #' v.gamma = 5, #' InvWishart.df = 5, #' InvWishart.Lambda = diag(q) ) #' #' HSD.output = BayesRobustProbit( #' fixed = as.formula(paste("y~-1+", paste0("x", 1:P, collapse="+"))), #' data=HSD.sim.data$sim.data, random = ~ 1, Robustness=TRUE, #' HS.model = ~IndTime1+IndTime2, subset = NULL, na.action='na.exclude', #' hyper.params = hyper.params, num.of.iter = num.of.iter, #' Interactive=0) #' } BayesRobustProbit = function(fixed, data, random, Robustness=TRUE, subset=NULL, na.action='na.exclude', arma.order=NULL, HS.model=NULL, hyper.params = NULL, num.of.iter=20000, Interactive=FALSE) { #cat("\nCall:\n", printCall(match.call()), "\n\n", sep = "") if(length(arma.order)==0 && length(HS.model)==0) stop("Please specify at least one model for the correlation structure!!") if(length(arma.order)!=0 && length(HS.model)!=0) stop("Please specify only one model for the correlation structure!!") if( length(unique(na.omit(data[, names(data)==all.vars(fixed)[1]])))>2 ){ output = do.call("BayesCumulativeProbitHSD", list(fixed=fixed, data=data, random=random, Robustness=Robustness, subset=subset, na.action=na.action, HS.model=HS.model, hyper.params = hyper.params, num.of.iter=num.of.iter, Interactive = Interactive)) } else{ if(length(arma.order)>0) output = do.call("BayesProbitARMA", list(fixed=fixed, data=data, random=random, Robustness=Robustness, subset=subset, na.action=na.action, arma.order=arma.order, hyper.params = hyper.params, num.of.iter=num.of.iter, Interactive = Interactive)) if(length(HS.model)>0) output = do.call("BayesProbitHSD", list(fixed=fixed, data=data, random=random, Robustness=Robustness, subset=subset, na.action=na.action, HS.model=HS.model, hyper.params = hyper.params, num.of.iter=num.of.iter, Interactive = Interactive)) #output = BayesProbitHSD(fixed, data, random, HS.model, subset=NULL, na.action, num.of.iter) } output$call$data = deparse(substitute(data)) #print(output$call$data) output }
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/R/BayesRobustProbit.r
#' To summarizes model estimation outcomes #' #' It provides basic posterior summary statistics such as the posterior point and #' confidence interval estimates of parameters and the values of information criterion statistics for model comparison. #' #' @param object output from the function \code{BayesRobustProbit}. #' @param digits rounds the values in its first argument to the specified #' number of significant digits. #' @return a list of posterior summary statistics and corresponding model information #' #' @examples #' \dontrun{ #' library(BayesRGMM) #' rm(list=ls(all=TRUE)) #' Fixed.Effs = c(-0.2, -0.3, 0.8, -0.4) #c(-0.2,-0.8, 1.0, -1.2) #' P = length(Fixed.Effs) #' q = 1 #number of random effects #' T = 5 #time points #' N = 100 #number of subjects #' num.of.iter = 100 #number of iterations #' HSD.para = c(-0.5, -0.3) #the parameters in HSD model #' a = length(HSD.para) #' w = array(runif(T*T*a), c(T, T, a)) #design matrix in HSD model #' #' for(time.diff in 1:a) #' w[, , time.diff] = 1*(as.matrix(dist(1:T, 1:T, method="manhattan")) #' == time.diff) #' #' #Generate a data with HSD model #' HSD.sim.data = SimulatedDataGenerator(Num.of.Obs = N, Num.of.TimePoints = T, #' Fixed.Effs = Fixed.Effs, Random.Effs = list(Sigma = 0.5*diag(1), df=3), #' Cor.in.DesignMat = 0., Missing = list(Missing.Mechanism = 2, #' RegCoefs = c(-1.5, 1.2)), Cor.Str = "HSD", #' HSD.DesignMat.para = list(HSD.para = HSD.para, DesignMat = w)) #' #' hyper.params = list( #' sigma2.beta = 1, #' sigma2.delta = 1, #' v.gamma = 5, #' InvWishart.df = 5, #' InvWishart.Lambda = diag(q) ) #' #' HSD.output = BayesRobustProbit( #' fixed = as.formula(paste("y~-1+", paste0("x", 1:P, collapse="+"))), #' data=HSD.sim.data$sim.data, random = ~ 1, Robustness=TRUE, #' HS.model = ~IndTime1+IndTime2, subset = NULL, na.action='na.exclude', #' hyper.params = hyper.params, num.of.iter = num.of.iter, Interactive =0) #' #' BayesRobustProbitSummary(HSD.output) #' } BayesRobustProbitSummary = function(object, digits = max(1L, getOption("digits") - 4L)) { if(object$call[1]!="BayesProbitHSD()" & object$call[1]!="BayesProbitARMA()" & object$call[1]!="BayesCumulativeProbitHSD()") stop("Please input the correct object!!") if(object$call[1] == "BayesProbitHSD()") output = BayesProbitHSD.Summary(object, digits = digits) if(object$call[1] == "BayesProbitARMA()") output = BayesProbitARMA.Summary(object, digits = digits) if(object$call[1] == "BayesCumulativeProbitHSD()") output = BayesProbitHSD.Summary(object, digits = digits) output }
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/R/BayesRobustProbitSummary.r
#' To compute the correlation matrix in terms of hypersphere decomposition approach #' #' The correlation matrix is reparameterized via hyperspherical coordinates angle parameters for \cr #' trigonometric functions, #' and the angle parameters are referred to hypersphere (HS) parameters. In order to obtain the unconstrained estimation #' of angle parameters and to reduce the number of parameters for facilitating the computation, #' we model the correlation structures of the responses in terms of the generalized linear models #' @param w a design matrix is used to model the HS parameters as functions of subject-specific covariates. #' @param delta an \eqn{a \times 1} vector of unknown parameters to model the HS parameters. #' @return a correlation matrix #' #' @author Kuo-Jung Lee <[email protected]> #' @references{ #' \insertRef{Zhang:etal:2015}{BayesRGMM} #'} #' @examples #' \dontrun{ #' library(BayesRGMM) #' rm(list=ls(all=TRUE)) #' T = 5 #time points #' HSD.para = c(-0.5, -0.3) #the parameters in HSD model #' a = length(HSD.para) #' w = array(runif(T*T*a), c(T, T, a)) #design matrix in HSD model #' signif(CorrMat.HSD(w, HSD.para), 4) #' } CorrMat.HSD = function(w, delta) { T = dim(w)[1] F.tmp = matrix(0, T, T) for(l in 1:T) for(m in 1:T) F.tmp[l, m] = sum(w[l, m, ]*delta) F.tmp = exp(F.tmp)*pi/(1+exp(F.tmp)) F = matrix(0, T, T) F[1, 1] = 1 for(l in 2:T) F[l, 1] = cos(F.tmp[l, 1]) for(m in 2:(T-1)) for(l in (m+1):T) F[l, m] = cos(F.tmp[l, m]) * prod(sin(F.tmp[l, 1:m-1])) for(m in 2:T) F[m, m] = prod(sin(F.tmp[m, 1:m-1])) Ri = F%*%t(F) Ri }
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/R/CorrMat.HSD.r
#' The German socioeconomic panel study data #' #' The German socioeconomic panel study data was taken from the first twelve annual waves (1984 through 1995) #' of the German Socioeconomic Panel (GSOEP) which surveys a representative sample of East and West German households. #' The data provide detailed information on the utilization of health care facilities, characteristics of current #' employment, and the insurance schemes under which individuals are covered. We consider the sample of individuals #' aged 25 through 65 from the West German subsample and of German nationality. #' The sample contained 3691 male and 3689 female individuals which make up a sample of 14,243 male and 13,794 female #' person-year observations. #' #' @docType data #' #' @usage data(GSPS) #' #' @format A data frame with 27326 rows and 25 variables #' \describe{ #' \item{id}{ person - identification number} #' \item{female}{ female = 1; male = 0} #' \item{year}{ calendar year of the observation} #' \item{age}{ age in years} #' \item{hsat}{ health satisfaction, coded 0 (low) - 10 (high)} #' \item{handdum}{ handicapped = 1; otherwise = 0} #' \item{handper}{ degree of handicap in percent (0 - 100)} #' \item{hhninc}{ household nominal monthly net income in German marks / 1000} #' \item{hhkids}{ children under age 16 in the household = 1; otherwise = 0} #' \item{educ}{ years of schooling} #' \item{married}{ married = 1; otherwise = 0} #' \item{haupts}{ highest schooling degree is Hauptschul degree = 1; otherwise = 0} #' \item{reals}{ highest schooling degree is Realschul degree = 1; otherwise = 0} #' \item{fachhs}{ highest schooling degree is Polytechnical degree = 1; otherwise = 0} #' \item{abitur}{ highest schooling degree is Abitur = 1; otherwise = 0} #' \item{univ}{ highest schooling degree is university degree = 1; otherwise = 0} #' \item{working}{ employed = 1; otherwise = 0} #' \item{bluec}{ blue collar employee = 1; otherwise = 0} #' \item{whitec}{ white collar employee = 1; otherwise = 0} #' \item{self}{ self employed = 1; otherwise = 0} #' \item{beamt}{ civil servant = 1; otherwise = 0} #' \item{docvis}{ number of doctor visits in last three months} #' \item{hospvis}{ number of hospital visits in last calendar year} #' \item{public}{ insured in public health insurance = 1; otherwise = 0} #' \item{addon}{ insured by add-on insurance = 1; otherswise = 0} #' } #' #' @keywords datasets #' #' @references{ #' \insertRef{Riphahn:etal:2003}{BayesRGMM} #'} #' @source \href{http://qed.econ.queensu.ca/jae/2003-v18.4/riphahn-wambach-million/}{JAE Archive} "GSPS"
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/R/GSPS.r
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 ProbitMCMCHSD <- function(i_Num_of_iterations, list_Data, logic_Robust, list_InitialValues, list_HyperPara, list_UpdatePara, list_TuningPara, logic_Interactive) { .Call(`_BayesRGMM_ProbitMCMCHSD`, i_Num_of_iterations, list_Data, logic_Robust, list_InitialValues, list_HyperPara, list_UpdatePara, list_TuningPara, logic_Interactive) } ProbitMCMCARMAKB <- function(i_Num_of_iterations, list_Data, logic_Robust, list_InitialValues, list_HyperPara, list_UpdatePara, list_TuningPara, ARMA_Order, logic_Interactive) { .Call(`_BayesRGMM_ProbitMCMCARMAKB`, i_Num_of_iterations, list_Data, logic_Robust, list_InitialValues, list_HyperPara, list_UpdatePara, list_TuningPara, ARMA_Order, logic_Interactive) } CumulativeProbitMCMC <- function(i_Num_of_iterations, list_Data, logic_Robust, list_InitialValues, list_HyperPara, list_UpdatePara, list_TuningPara, logic_Interactive) { .Call(`_BayesRGMM_CumulativeProbitMCMC`, i_Num_of_iterations, list_Data, logic_Robust, list_InitialValues, list_HyperPara, list_UpdatePara, list_TuningPara, logic_Interactive) }
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/R/RcppExports.R
#' Simulating a longitudinal ordinal data with HSD correlation structures. #' #' This function is used to simulate data for the cumulative probit mixed-effects model with HSD correlation structures. #' #' @param Num.of.Obs the number of subjects will be simulated. #' @param Num.of.TimePoints the maximum number of time points among all subjects. #' @param Num.of.Cats the number of categories. #' @param Fixed.Effs a vector of regression coefficients. #' @param Random.Effs a list of covariance matrix and the degree of freedom, \cr #' e.g., \code{list(Sigma = 0.5*diag(1), df=3)}. #' @param DesignMat a design matrix. #' @param Missing a list of the missing mechanism of observations, 0: data is complete, 1: missing complete at random, 2: missing at random related to responses , and 3: 2: missing at random related to covariates and the corresponding regression coefficients (weights) on the previous observed values either responses or covariates, e.g., \code{Missing = list(Missing.Mechanism = 3, RegCoefs = c(0.4, 1.2, -2.1))}. #' @param HSD.DesignMat.para the list of parameters in HSD correlation structure, \cr #' e.g., \code{HSD.DesignMat.para = list(HSD.para = HSD.para, DesignMat = w)}. #' #' @return a list containing the following components: #' \describe{ #' \item{sim.data}{The simulated response variables \eqn{y}, covariates \eqn{x}'s, and subject identifcation \samp{id}.} #' \item{beta.true}{The given values of fixed effects.} #' \item{classes}{The intervals of classes.} #' \item{HSD.para}{The given values of parameters in HSD model.} #' } #' @examples #' \dontrun{ #' library(BayesRGMM) #' rm(list=ls(all=TRUE)) #' #' Fixed.Effs = c(-0.1, 0.1, -0.1) #' P = length(Fixed.Effs) #' q = 1 #number of random effects #' T = 7 #time points #' N = 100 #number of subjects #' Num.of.Cats = 3 #number of categories #' num.of.iter = 1000 #number of iterations #' #' HSD.para = c(-0.9, -0.6) #the parameters in HSD model #' a = length(HSD.para) #' w = array(runif(T*T*a), c(T, T, a)) #design matrix in HSD model #' #' for(time.diff in 1:a) #' w[, , time.diff] = 1*(as.matrix(dist(1:T, 1:T, method="manhattan")) #' ==time.diff) #' #' #' x = array(0, c(T, P, N)) #' for(i in 1:N){ #' x[, 1, i] = 1:T #' x[, 2, i] = rbinom(1, 1, 0.5) #' x[, 3, i] = x[, 1, i]*x[, 2, i] #' } #' #' DesignMat = x #' #' #MAR #' CPREM.sim.data = SimulatedDataGenerator.CumulativeProbit( #' Num.of.Obs = N, Num.of.TimePoints = T, Num.of.Cats = Num.of.Cats, #' Fixed.Effs = Fixed.Effs, Random.Effs = list(Sigma = 0.5*diag(1), df=3), #' DesignMat = DesignMat, Missing = list(Missing.Mechanism = 2, #' MissingRegCoefs=c(-0.7, -0.2, -0.1)), #' HSD.DesignMat.para = list(HSD.para = HSD.para, DesignMat = w)) #' #' #' print(table(CPREM.sim.data$sim.data$y)) #' print(CPREM.sim.data$classes) #' } SimulatedDataGenerator.CumulativeProbit = function(Num.of.Obs, Num.of.TimePoints, Num.of.Cats, Fixed.Effs, Random.Effs, DesignMat, Missing, HSD.DesignMat.para) { P = Num.of.Fixed.Effs = length(Fixed.Effs) q = Num.of.Random.Effs = dim(Random.Effs$Sigma)[1] T = Num.of.TimePoints N = Num.of.Obs MissingMechanism = Missing$Missing.Mechanism #0: complete, 1: MCAR, 2: MAR with Y, 3:MAR with X if(MissingMechanism == 1) MissingRegCoefs = Missing$Probs # probs or weights if(MissingMechanism == 2) MissingRegCoefs = Missing$RegCoefs beta.true = matrix(Fixed.Effs, ncol=1) #matrix(rnorm(P, 0, 1), ncol=1)#matrix(c(-1.2,-0.3, 0.8, -0.4), ncol=1) x = array(0, c(T, P, N)) z = array(0, c(T, q, N)) y.star = matrix(0, T, N) df = Random.Effs$df nu = rgamma(N, df/2, df/2) #rep(1, N) # b = NULL Sigma = Random.Effs$Sigma#*diag(q)#0.5*diag(q) for(i in 1:N) b = cbind(b, t(rmvnorm(1, rep(0, q), Sigma /nu[i]))) #=====================================================================================================# HSD.para = HSD.DesignMat.para$HSD.para a = length(HSD.para) w = HSD.DesignMat.para$DesignMat #array(runif(T*T*a), c(T, T, a)) Ri=CorrMat.HSD(w, HSD.para) x = DesignMat if(dim(x)[2] != length(beta.true)) stop("Check the size of design matrix and regressors!") for(i in 1:N){ #x[,, i] = t(rmvnorm(P, rep(0, T), AR1.cor(T, Cor.in.DesignMat))) #x[, 1, ] = 1 #x[, 2, i] = rbinom(1, 1, 0.5) z[, 1, ] = 1 y.star[, i] = x[,, i] %*% beta.true + z[, , i]%*%b[, i, drop=F] + t(rmvnorm(1, rep(0, T), Ri)) } cut.points = quantile(y.star, seq(0, 1, length=Num.of.Cats+1)) #seq(range(y.star)[1], range(y.star)[2], length = Num.of.Cats+1) #quantile(yy, seq(0, 1, length=4)) y = matrix(as.numeric(cut(y.star, cut.points, include.lowest=TRUE)), dim(y.star)) #alpha.ini = c(-Inf, tail(head(cut.points, -1), -1), Inf) #alpha.ini = c(-Inf, seq(-5, 5, length = Num.of.Cats-1), Inf) #=====================================================================================================# sim.data = as.data.frame(cbind(c(y), adply(x, 3))) colnames(sim.data) = c("y", "id", paste0("x", 1:P)) sim.data$id = as.numeric(sim.data$id) #=====================================================================================================# TimePointsAvailable = rep(T, N) if(MissingMechanism==1) TimePointsAvailable = sample(1:T, N, replace = TRUE, prob = MissingRegCoefs)#c(423, 208, 263, 1416))# rep(T, N) if(MissingMechanism==2){ TimePointsAvailable.MAR = TimePointsAvailable post.time = length(MissingRegCoefs) for(i in 1:N){ for(t1 in 1:TimePointsAvailable[i]){ if(t1>post.time){ eta = sum(MissingRegCoefs*y[(t1-1):(t1-post.time), i])#MissingRegCoefs[1] + MissingRegCoefs[2]*y[t1-1,i] pdrop = exp(eta)/(1+exp(eta)) #cat("MissingRegCoefs = ", MissingRegCoefs, "\teta = ", eta, "\tpdrop=", pdrop, "\n") if(runif(1)<pdrop) TimePointsAvailable.MAR[i]<-t1-1 } } } TimePointsAvailable = TimePointsAvailable.MAR } #print(TimePointsAvailable) for(i in 1:N) sim.data[sim.data$id==i, ][(1:T)>TimePointsAvailable[i], names(sim.data)!="id"] = NA simdata = list(sim.data = sim.data, beta.true = beta.true, classes = cut.points, HSD.para = HSD.para) invisible(simdata) }
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/R/SimulatedDataGenerator.CumulativeProbit.r
#' Generate simulated data with either ARMA or MCD correlation structures. #' #' This function is used to generate simulated data for simulation studies with ARMA and MCD correlation structures. #' #' @param Num.of.Obs the number of subjects will be simulated. #' @param Num.of.TimePoints the maximum number of time points among all subjects. #' @param Fixed.Effs a vector of regression coefficients. #' @param Random.Effs a list of covariance matrix and the degree of freedom, \cr #' e.g., \code{list(Sigma = 0.5*diag(1), df=3)}. #' @param Cor.in.DesignMat the correlation of covariates in the design matrix. #' @param Missing a list of the missing mechanism of observations, 0: data is complete, 1: missing complete at random, 2: missing at random related to responses , and 3: 2: missing at random related to covariates and the corresponding regression coefficients (weights) on the previous observed values either responses or covariates, e.g., \code{Missing = list(Missing.Mechanism = 3, RegCoefs = c(0.4, 1.2, -2.1))}. #' @param Cor.Str the model for correlation structure, ``ARMA'' or ``HSD''. #' @param HSD.DesignMat.para if \code{Cor.Str="HSD"}, you need to specify the list of parameters in HSD correlation structure, \cr #' e.g., \code{HSD.DesignMat.para = list(HSD.para = HSD.para, DesignMat = w)}. #' @param ARMA.para if \code{Cor.Str="ARMA"}, you need to specify the list of parameters in AMRA correlation structure, e.g., \code{ARMA.para = list(AR.para=0.1, MA.para=0.2)}. #' #' @return a list containing the following components: #' \describe{ #' \item{sim.data}{The simulated response variables \eqn{y}, covariates \eqn{x}'s, and subject identifcation \samp{id}.} #' \item{beta.true}{The given values of fixed effects .} #' \item{ARMA.para}{The given values of parameters in ARMA model.} #' \item{HSD.para}{The given values of parameters in ARMA model.} #' } #' @examples #' \dontrun{ #' library(BayesRGMM) #' rm(list=ls(all=TRUE)) #' Fixed.Effs = c(-0.2, -0.3, 0.8, -0.4) #' P = length(Fixed.Effs) #' q = 1 #number of random effects #' T = 5 #time points #' N = 100 #number of subjects #' num.of.iter = 100 #number of iterations #' HSD.para = c(-0.5, -0.3) #the parameters in HSD model #' a = length(HSD.para) #' w = array(runif(T*T*a), c(T, T, a)) #design matrix in HSD model #' #' for(time.diff in 1:a) #' w[, , time.diff] = 1*(as.matrix(dist(1:T, 1:T, method="manhattan")) #' ==time.diff) #' #' #Generate a data with HSD model #' HSD.sim.data = SimulatedDataGenerator(Num.of.Obs = N, Num.of.TimePoints = T, #' Fixed.Effs = Fixed.Effs, Random.Effs = list(Sigma = 0.5*diag(1), df=3), #' Cor.in.DesignMat = 0., Missing = list(Missing.Mechanism = 2, #' RegCoefs = c(-1.5, 1.2)), Cor.Str = "HSD", #' HSD.DesignMat.para = list(HSD.para = HSD.para, DesignMat = w)) #' #' #the proportion of 1's #' ones = sum(HSD.sim.data$sim.data$y==1, na.rm=T) #' num.of.obs = sum(!is.na(HSD.sim.data$sim.data$y)) #' print(ones/num.of.obs) #' #' #the missing rate in the simulated data #' print(sum(is.na(HSD.sim.data$sim.data$y))) #' #' #' #===========================================================================# #' #Generate a data with ARMA model #' ARMA.sim.data = SimulatedDataGenerator(Num.of.Obs = N, Num.of.TimePoints = T, #' Fixed.Effs = Fixed.Effs, Random.Effs = list(Sigma = 0.5*diag(1), df=3), #' Cor.in.DesignMat = 0., list(Missing.Mechanism = 2, RegCoefs = c(-1.5, 1.2)), #' Cor.Str = "ARMA", ARMA.para=list(AR.para = 0.8)) #' } SimulatedDataGenerator = function(Num.of.Obs, Num.of.TimePoints, Fixed.Effs, Random.Effs, Cor.in.DesignMat, Missing, Cor.Str, HSD.DesignMat.para, ARMA.para) { P = Num.of.Fixed.Effs = length(Fixed.Effs) q = Num.of.Random.Effs = dim(Random.Effs$Sigma)[1] T = Num.of.TimePoints N = Num.of.Obs MissingMechanism = Missing$Missing.Mechanism #0: complete, 1: MCAR, 2: MAR with Y, 3:MAR with X if(MissingMechanism == 1) MissingRegCoefs = Missing$Probs if(MissingMechanism == 2) MissingRegCoefs = Missing$RegCoefs beta.true = matrix(Fixed.Effs, ncol=1) #matrix(rnorm(P, 0, 1), ncol=1)#matrix(c(-1.2,-0.3, 0.8, -0.4), ncol=1) x = array(0, c(T, P, N)) z = array(0, c(T, q, N)) y.star = matrix(0, T, N) df = Random.Effs$df nu = rgamma(N, df/2, df/2) #rep(1, N) # b = NULL Sigma = Random.Effs$Sigma#*diag(q)#0.5*diag(q) for(i in 1:N) b = cbind(b, t(rmvnorm(1, rep(0, q), Sigma /nu[i]))) #=====================================================================================================# if(Cor.Str == "HSD"){ #delta = HSD.para #c(-0.5, -0.3) HSD.para = HSD.DesignMat.para$HSD.para a = length(HSD.para) w = HSD.DesignMat.para$DesignMat #array(runif(T*T*a), c(T, T, a)) #for(time.diff in 1:a) # w[, , time.diff] = 1*(as.matrix(dist(1:T, 1:T, method="manhattan")) ==time.diff) Ri=CorrMat.HSD(w, HSD.para) for(i in 1:N){ x[,, i] = t(rmvnorm(P, rep(0, T), AR1.cor(T, Cor.in.DesignMat))) x[, 1, ] = 1 x[, 2, i] = rbinom(1, 1, 0.5) z[, 1, ] = 1 y.star[, i] = x[,, i] %*% beta.true + z[, , i]%*%b[, i, drop=F] + t(rmvnorm(1, rep(0, T), Ri)) } #prob = pnorm(y.star) y = 1*(y.star>0) #apply(prob, 1:2, rbinom, n=1, size=1) } #=====================================================================================================# if(Cor.Str == "ARMA"){ e = matrix(0, T, N) AR.para = ARMA.para$AR.para MA.para = ARMA.para$MA.para AR.order = length(AR.para) MA.order = length(MA.para) if(AR.order>0) phi.true = matrix(rep(AR.para, N), AR.order, N) if(MA.order>0) psi.true = matrix(rep(MA.para, N), MA.order, N) ARMAorder = c(AR.order, MA.order) for(i in 1:N){ x[,, i] = t(rmvnorm(P, rep(0, T), AR1.cor(T, Cor.in.DesignMat))) x[, 1, ] = 1 x[, 2, i] = rbinom(1, 1, 0.5) z[, 1, ] = 1 for(t in 1:T){ if(t<=max(ARMAorder)){ #cat("max ARMA>0\n") e[t, i] = rnorm(1) y.star[t, i] = x[t,, i] %*% beta.true + z[t, , i]%*%b[, i, drop=F] + e[t, i] } else if( (AR.order == 0) & (MA.order == 0) ){ e[t, i] = rnorm(1) y.star[t, i] = x[t,, i] %*% beta.true + z[t, , i]%*%b[, i, drop=F]+ e[t, i] } else if( (AR.order > 0) & (MA.order == 0) ){ #cat("AR>0\n") e[t, i] = rnorm(1) y.star[t, i] = x[t,, i] %*% beta.true + z[t, , i]%*%b[, i, drop=F]+ sum(phi.true[, i]*(y.star[(t-1):(t-AR.order), i]-x[(t-1):(t-AR.order),, i] %*% beta.true)) + + e[t, i] } else if( (AR.order == 0) & (MA.order > 0) ){ #cat("MA>0\n") e[t, i] = rnorm(1) y.star[t, i] = x[t,, i] %*% beta.true + z[t, , i]%*%b[, i, drop=F]+ + sum(psi.true[, i]*e[(t-1):(t-MA.order), i]) + e[t, i] #cat("i = ", i, "\tt = ", t, "\n") #cat("psi.true[, i] = ", psi.true[, i], "\t", "e[(t-1):(t-MA.order), i] = ", e[(t-1):(t-MA.order), i], "\n") #cat("sum(psi.true[, i]*e[(t-1):(t-MA.order), i]) = ", sum(psi.true[, i]*e[(t-1):(t-MA.order), i]), "\n") } else if( (AR.order > 0) & (MA.order > 0) ){ #cat("ARMA>0\n") e[t, i] = rnorm(1) y.star[t, i] = x[t,, i] %*% beta.true + z[t, , i]%*%b[, i, drop=F]+ sum(phi.true[, i]*(y.star[(t-1):(t-AR.order), i]-x[(t-1):(t-AR.order),, i] %*% beta.true)) + sum(psi.true[, i]*e[(t-1):(t-MA.order), i]) + e[t, i] } } } y = 1*(y.star>0) } #=====================================================================================================# sim.data = as.data.frame(cbind(c(y), adply(x, 3))) colnames(sim.data) = c("y", "id", paste0("x", 1:P)) sim.data$id = as.numeric(sim.data$id) #=====================================================================================================# TimePointsAvailable = rep(T, N) if(MissingMechanism==1) TimePointsAvailable = sample(1:T, N, replace = TRUE, prob = MissingRegCoefs)#c(423, 208, 263, 1416))# rep(T, N) if(MissingMechanism==2){ TimePointsAvailable.MAR = TimePointsAvailable post.time = length(MissingRegCoefs) for(i in 1:N){ for(t1 in 1:TimePointsAvailable[i]){ if(t1>post.time){ eta = sum(MissingRegCoefs*y[(t1-1):(t1-post.time), i])#MissingRegCoefs[1] + MissingRegCoefs[2]*y[t1-1,i] pdrop = exp(eta)/(1+exp(eta)) #cat("MissingRegCoefs = ", MissingRegCoefs, "\teta = ", eta, "\tpdrop=", pdrop, "\n") if(runif(1)<pdrop) TimePointsAvailable.MAR[i]<-t1-1 } } } TimePointsAvailable = TimePointsAvailable.MAR } if(0){ if(MissingMechanism==3){ TimePointsAvailable.MAR = TimePointsAvailable for(i in 1:N){ for(t1 in 1:TimePointsAvailable[i]){ if(t1>1){ eta <- MissingRegCoefs[1]*x[t1-1,2, i]+MissingRegCoefs[2]*x[t1-1,3, i]+MissingRegCoefs[3]*x[t1-1,4, i] pdrop <-exp(eta)/(1+exp(eta)) print(pdrop) if(runif(1)<pdrop) TimePointsAvailable.MAR[i]<-t1-1 } } } TimePointsAvailable = TimePointsAvailable.MAR } } #print(TimePointsAvailable) for(i in 1:N) sim.data[sim.data$id==i, ][(1:T)>TimePointsAvailable[i], names(sim.data)!="id"] = NA if(Cor.Str == "HSD") simdata = list(sim.data = sim.data, beta.true = beta.true, HSD.para = HSD.para) if(Cor.Str == "ARMA") simdata = list(sim.data = sim.data, beta.true = beta.true, ARMA.para = ARMA.para) invisible(simdata) }
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/R/SimulatedDataGenerator.r
printCall = function (call) { d <- deparse(call) if (length(d) <= 3) { paste(d, sep = "\n", collapse = "\n") } else { d <- d[1:3] d[3] <- paste0(d[3], "...") paste(d, sep = "\n", collapse = "\n") } }
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/R/printCall.r
### R code from vignette source 'BayesRGMM.Rnw' ################################################### ### code chunk number 1: options ################################################### options(prompt = "R> ", digits = 4, show.signif.stars = FALSE) ################################################### ### code chunk number 2: Simulation 1: MCD Correlation Structure ################################################### # Simulation study for MCD correlation structure library(BayesRGMM) rm(list=ls(all=TRUE)) Fixed.Effs = c(-0.2, -0.3, 0.8, -0.4) P = length(Fixed.Effs) q = 1 T = 5 N = 100 num.of.iter = 100 HSD.para = c(-0.5, -0.3) a = length(HSD.para) w = array(runif(T*T*a), c(T, T, a)) for(time.diff in 1:a) w[, , time.diff]=1*(as.matrix(dist(1:T, 1:T, method="manhattan")) ==time.diff) HSD.sim.data = SimulatedDataGenerator(Num.of.Obs = N, Num.of.TimePoints = T, Fixed.Effs = Fixed.Effs, Random.Effs = list(Sigma = 0.5*diag(1), df=3), Cor.in.DesignMat = 0., Missing = list(Missing.Mechanism = 2, RegCoefs = c(-1.5, 1.2)), Cor.Str = "HSD", HSD.DesignMat.para = list(HSD.para = HSD.para, DesignMat = w)) hyper.params = list( sigma2.beta = 1, sigma2.delta = 1, v.gamma = 5, InvWishart.df = 5, InvWishart.Lambda = diag(q) ) HSD.output = BayesRobustProbit(fixed = as.formula(paste("y~-1+", paste0("x", 1:P, collapse="+"))), data=HSD.sim.data$sim.data, random = ~ 1, HS.model = ~IndTime1+IndTime2, Robustness=TRUE, subset = NULL, na.action='na.exclude', hyper.params = hyper.params, num.of.iter = num.of.iter, Interactive = FALSE) original = options(digits = 4) Model.Estimation = BayesRobustProbitSummary(HSD.output) cat("\nCoefficients:\n") print(Model.Estimation$beta.est.CI) cat("\nParameters in HSD model:\n") print(Model.Estimation$delta.est.CI) cat("\nRandom effect: \n") print(Model.Estimation$random.cov) cat("\nModel Information:\n") print(Model.Estimation$model.info) cat("\nEstimate of Ri: \n") print(Model.Estimation$Ri, quote = FALSE) options(original) ################################################### ### code chunk number 3: Simulation 2: ARMA Correlation Structure ################################################### library(BayesRGMM) rm(list=ls(all=TRUE)) Fixed.Effs = c(-0.2,-0.8, 1.0, -1.2) P = length(Fixed.Effs) q = 1 T = 10 N = 100 num.of.iter = 100 ARMA.sim.data = SimulatedDataGenerator(Num.of.Obs = N, Num.of.TimePoints = T, Fixed.Effs = Fixed.Effs, Random.Effs = list(Sigma = 0.5*diag(1), df=3), Cor.in.DesignMat = 0., list(Missing.Mechanism = 2, RegCoefs = c(-1.5, 1.2)), Cor.Str = "ARMA", ARMA.para=list(AR.para = 0.4, MA.para=0.2)) ARMA.output = BayesRobustProbit(fixed = as.formula(paste("y~-1+", paste0("x", 1:P, collapse="+"))), data=ARMA.sim.data$sim.data, random = ~ 1, Robustness=TRUE, subset = NULL, na.action='na.exclude', arma.order = c(1, 1), num.of.iter = num.of.iter, Interactive = FALSE) original = options(digits = 4) Model.Estimation = BayesRobustProbitSummary(ARMA.output) cat("\nCoefficients:\n") print(Model.Estimation$beta.est.CI) cat("\nAMRA parameters:\n\n") print(Model.Estimation$arma.est) cat("\nRandom effect: \n") print(Model.Estimation$random.cov) cat("\nModel Information:\n") print(Model.Estimation$model.info) options(original) ################################################### ### code chunk number 4: Simulation: Ordinal outcome ################################################### library(BayesRGMM) rm(list=ls(all=TRUE)) Fixed.Effs = c(-0.1, 0.1, -0.1) #c(-0.8, -0.3, 1.8, -0.4) #c(-0.2,-0.8, 1.0, -1.2) P = length(Fixed.Effs) q = 1 #number of random effects T = 7 #time points N = 100 #number of subjects Num.of.Cats = 3 #in KBLEE simulation studies, please fix it. num.of.iter = 1000 #number of iterations HSD.para = c(-0.9, -0.6) #the parameters in HSD model a = length(HSD.para) w = array(runif(T*T*a), c(T, T, a)) #design matrix in HSD model for(time.diff in 1:a) w[, , time.diff] = 1*(as.matrix(dist(1:T, 1:T, method="manhattan")) ==time.diff) x = array(0, c(T, P, N)) for(i in 1:N){ #x[,, i] = t(rmvnorm(P, rep(0, T), AR1.cor(T, Cor.in.DesignMat))) x[, 1, i] = 1:T x[, 2, i] = rbinom(1, 1, 0.5) x[, 3, i] = x[, 1, i]*x[, 2, i] } DesignMat = x #Generate a data with HSD model #MAR CPREM.sim.data = SimulatedDataGenerator.CumulativeProbit(Num.of.Obs = N, Num.of.TimePoints = T, Num.of.Cats = Num.of.Cats, Fixed.Effs = Fixed.Effs, Random.Effs = list(Sigma = 0.5*diag(1), df=3), DesignMat = DesignMat, Missing = list(Missing.Mechanism = 2, MissingRegCoefs=c(-0.7, -0.2, -0.1)), HSD.DesignMat.para = list(HSD.para = HSD.para, DesignMat = w)) print(table(CPREM.sim.data$sim.data$y)) print(CPREM.sim.data$classes) BCP.output = BayesRobustProbit(fixed = as.formula(paste("y~", paste0("x", 1:P, collapse="+"))), data=CPREM.sim.data$sim.data, random = ~ 1, Robustness = TRUE, subset = NULL, na.action='na.exclude', HS.model = ~IndTime1+IndTime2, hyper.params=NULL, num.of.iter=num.of.iter, Interactive = FALSE) BCP.Est.output = BayesRobustProbitSummary(BCP.output) BCP.Est.output
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/inst/doc/BayesRGMM.R
--- title: "BayesRGMM: Methodology and User Guide" author: "Kuo-Jung Lee at Deaprtment of Statistics and Institute of Data Science, NCKU, Taiwan" date: "2021 12 21" output: bookdown::html_document2: default pdf_document: extra_dependencies: ["bm", "amsmath"] bibliography: BayesRGMM.bib vignette: > %\VignetteIndexEntry{BayesRGMM: Methodology and User Guide} %\VignettePackage{BayesRGMM} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} %\VignetteKeywords{Bayesian, longitudinal, mixed-effect, R, robust} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) # URLs: BayesRGMM <- "https://sites.google.com/view/kuojunglee/r-packages/bayesrgmm" # Note: "https://sites.google.com/view/kuojunglee/r-packages/bayesrgmm" redirects there. ``` # Abstract `BayesRGMM` has the functionality to deal with the incomplete longitudinal studies on binary or ordinal outcomes that are measured repeatedly on subjects over time with drop-outs. Here, we briefly describe the background of methodology and provide an overview of the contents in `BayesRGMM`. # Main Methodology Denote the response vector for the $i$th subject by $\boldsymbol{y}_i=(y_{i1},\cdots,y_{it},\cdots,y_{in_i})'$ where $y_{it}$ is a response at time period $t$ ($i=1,\cdots, N$; $t=1,\cdots,n_i$). Note that the model and associated methodology can be applicable to the unequally spaced times and the distinct number of observations from subject to subject.We assume that the responses on different subjects are independent. Also, we assume that $y_{it}$'s are conditionally independent given a random vector $b_{i}$, and that $y_{it}$'s. For categorical responses, we assume that $y_{it}$ has an exponential family distribution so that generalized linear models (GLMs) can be specified by \begin{eqnarray} &&g\left\{E(y_{it})\right\}=x_{it}^T\beta+z_{it}^Tb_{i}, (\#eq:Probit-1)\\ &&b_i=(b_{i1},\ldots,b_{iq})^T\stackrel{\mbox{indep.}}{\sim} N(0,v_i^{-1}\Sigma), \notag\\ &&v_i \stackrel{\mbox{indep.}}{\sim} \Gamma\left(\nu/2,\nu/2\right), \notag \end{eqnarray} where $\beta$ is a $p\times 1$ unknown mean parameter vectors, $x_{it}$ is a $p\times 1$ corresponding vector of covariates, $z_{it}$ is a $q\times 1$ vector, $0$ is a $n_i\times 1$ zero vector, $\Sigma$ is a $q\times q$ covariance matrix reflecting the subject variations, and $\Gamma(a,b)$ denotes the gamma distribution with shape parameter $a$ and scale parameter $b$. In this paper, we consider the normal and binary responses and the corresponding links are identity and probit, respectively. To employ Markov Chain Monte Carlo algorithm techniques for Bayesian estimates and reduce the computational complexity, we introduce a latent variable latent variable $y_{it}^*$ to associate with the binary or ordinal outcome $y_{it}$ as follows, respectively. (a) Binary outcome: The unobservable latent variable $y_{it}^*$ and the observed binary outcome $y_{it}$ are connected by: \[ y_{it}=\mathbf{I}_{(y_{it}^*>0)}, \quad t = 1, \ldots, n_i, \] where $\mathbf{I}_{A}$ is the indicator of event $A$. Note that $y_{it}$ is 1 or 0 according to the sign of $y_{it}^*$. (b) Ordinal outcome: The atent variable $y_{it}^*$ is associated with each ordinal response $y_{it}$. Hence, the probaility of $y_{it}=k$ is modeled through the probability of $y_{it}^*$ falling into the interval of $(\alpha_{k-1},\alpha_k]$, that is, given the random effect $b_i$, \begin{eqnarray}\label{model-3} y_{it}=k \mbox{ if } \alpha_{k-1} < y_{it}^* \leq \alpha_k \mbox{ for }k=1,\ldots, K, \end{eqnarray} where $-\infty=\alpha_0<\alpha_1<\cdots<\alpha_K=\infty$. As consequence, we have the following result: \begin{eqnarray*} p(y_{it}=k | b_i)=p(\alpha_{k-1}< y_{it}^* \leq \alpha_{k} | b_i), \end{eqnarray*} for $k=1,\ldots,K$. We assume that the latent variable is associated with explanatory variable $x_{it}$ and random effect $z_{it}$ with two different approaches to explaining the correlation of the repeated measures within a subject in next two sections. ## Modified Cholesky Decomposition with Hypersphere Decomposition We assume \[ y_{it}^*=x_{it}^T\beta+z_{it}^Tb_i+\epsilon_{it}, \] where $\epsilon_{it}$'s are prediction error and are assumed as \[ \boldsymbol{\epsilon}_i=(\epsilon_{i1},\ldots,\epsilon_{in_i})^T \stackrel{indep.}{\sim} N(0,R_i) \] with a correlation matrix $R_i$. Then the model \@ref(eq:Probit-1) is equivalent to, for $i=1, \ldots, N$ and $t=1, \ldots, n_i$, \begin{equation} \begin{aligned} y_{it} &= \begin{cases} 1, & y_{it}^*>0; \\ 0 & \mbox{otherwhise}. \end{cases} \end{aligned}(\#eq:ProbitLatentVariable) \end{equation} Let $\boldsymbol{y}_i^* = (y_{i1}, \ldots, y_{in_i})'$ and rewrite \@ref(eq:ProbitLatentVariable) in matrix form as \begin{eqnarray*} \boldsymbol{y}_i^*=X_i\beta+Z_i b_i +\boldsymbol{\epsilon}_i, \end{eqnarray*} where $X_i$ and $Z_i$ are $n_i\times p$ and $n_i\times q$ matrices and defined as follows, respectively, \begin{eqnarray*} X_i=\left( \begin{array}{c} x_{i1}^T \\ \vdots \\ x_{in_i}^T \\ \end{array} \right), Z_i=\left( \begin{array}{c} z_{i1}^T \\ \vdots \\ z_{in_i}^T \\ \end{array} \right) . \end{eqnarray*} On account of identifiability, $R_i$ is restricted as a correlation matrix. In addition to the diagonal elements equal to 1 and off-diagonal elements between -1 and 1, $R_i$ is required to be a positive definite matrix. Moreover, the number of parameters to be estimated increases quadratically with the dimension of the matrix. In order to model $R_{i}$ being positive definite, while alleviating the computational expensive, we propose a modeling of the correlation matrix using the hypersphere decomposition (HD) approach [@Zhang:etal:2015]. The correlation matrix $R_i$ is reparameterized via hyperspherical coordinates [@Zhang:etal:2015] by the following decomposition: \begin{eqnarray*} R_i=F_iF_i^T, \end{eqnarray*} where $F_i$ is a lower triangular matrix with the $(t, j)$th element $f_{itj}$ given by \begin{eqnarray*} f_{itj}=\left\{ \begin{array}{ll} 1, & \hbox{for $t=j=1$;}\\ \cos(\omega_{itj}), & \hbox{for $j=1$, $t=2,\cdots,n_i$;} \\ \cos(\omega_{itj})\prod_{r=1}^{j-1}\sin(\omega_{itr}), & \hbox{for $2\leq j<t\leq n_i$;} \\ \prod_{r=1}^{j-1}\sin(\omega_{itr}), & \hbox{for $t=j;~ j=2,\cdots,n_i$.} \end{array} \right. \end{eqnarray*} Here $\omega_{itj}$'s $(\in (0,\pi))$ are angle parameters for trigonometric functions, and the angle parameters are referred to hypersphere (HS) parameters. As in @Zhang:etal:2015, we consider the modeling of the angles $\omega_{itj}$'s instead of the direct modeling of the correlation matrix, and the modeling can be directly interpreted for the correlation [@Zhang:etal:2015]. In order to obtain the unconstrained estimation of $\omega_{itj}$ and to reduce the number of parameters for facilitating the computation, we model the correlation structures of the responses in terms of the generalized linear models which are given by: \begin{eqnarray} &&\log\left(\frac{\omega_{itj}}{\pi-\omega_{itj}}\right)=u_{itj}^T\delta,(\#eq:GARP-IV-1) \end{eqnarray} where $\delta$ is $a \times 1$ vector of unknown parameter vector to model the HS parameters. Importantly, the proposed method reduces the model complexity and obtain fast-to-execute models without loss of accuracy. In addition, note that the design vector $u_{itj}$ in \@ref(eq:GARP-IV-1) is used to model the HS parameters as functions of subject-specific covariates [@Zhang:etal:2015;@Pan:Mackenzie:2015]. As a result, the design vector is specified in a manners similar to those used in heteroscedastic regression models. For example, time lag, $|t - j|$, in the design vector $u_{itj}$ specifies higher lag models. We will introduce the priors of parameters in the model in Section \@ref(S:BayesianMethod). ## Generalized Autoregressive and Moving-Averaging Model In order to give the complete specification of the joint distribution, the latent random vectors $\boldsymbol{y}_{i}^*=(y_{i1}^*,\ldots,y_{in_i}^*)^T$ are jointly normally distributed given by: \begin{equation} \begin{aligned} y_{i1}^*&= x_{i1}^T\beta + \epsilon_{i1}, \\ y_{it}^*&=x_{it}^T\beta+z_{it}^Tb_i+\sum_{j=1}^{u-1}\phi_{ij}(y_{i,t-j}^* - x_{i,t-j}^T\beta)+ \sum_{s=1}^{v-1} \psi_{i,t-s}\epsilon_{i,t-s}+\epsilon_{it}, t=1, \ldots, n_i, \end{aligned}(\#eq:StatModel) \end{equation} where $\phi_{ij}$'s are generalized autoregressive parameters (GARPs) and $\psi_{is}$'s are generalized moving-average parameters (GMAPs). In addition, $\epsilon_{it}$'s are prediction error and are assumed as \[ \boldsymbol{\epsilon}_i=(\epsilon_{i1},\ldots,\epsilon_{in_i})^T \stackrel{indep.}{\sim} N(0,I_i), \] where $I_i$ is an $n_i\times n_i$ identity matrix. We can rewrite \@ref(eq:StatModel) in matrix form as \begin{equation*} \Phi_i (\boldsymbol{y}_i^*-X_i\beta) = Z_i b_i + \Psi_i \boldsymbol{\epsilon}_i, \end{equation*} where $X_i$, $n_i\times p$, $Z_i$, $n_i\times q$, $\Phi_i$, $n_i\times n_i$, $\Psi_i$, $n_i\times n_i$, are matrices and defined as follows, respectively, \begin{eqnarray*} X_i=\left( \begin{array}{c} x_{i1}^T \\ \vdots \\ x_{in_i}^T \\ \end{array} \right),\quad Z_i=\left( \begin{array}{c} z_{i1}^T \\ \vdots \\ z_{in_i}^T \\ \end{array} \right) \end{eqnarray*} \begin{eqnarray*} \Phi_i =\left( \begin{array}{cccccc} 1 & 0 & 0 & \ldots & 0&0\\ -\phi_{i1} & 1 & 0 & \ldots &0&0\\ -\phi_{i2} & -\phi_{i1} & 1 & \ldots & 0&0 \\ \vdots & \vdots & \vdots & \ddots & \vdots &\vdots \\ 0& \ldots & -\phi_{i,u-2} & \ldots & 1 &0 \\ 0 & \ldots &-\phi_{i,u-1} & \ldots & -\phi_{i1} & 1 \end{array} \right), \quad \Psi_i =\left( \begin{array}{cccccc} 1 & 0 & 0 & \ldots & 0&0\\ \psi_{i1} & 1 & 0 & \ldots &0&0\\ \psi_{i2} & \psi_{i1} & 1 & \ldots & 0&0 \\ \vdots & \vdots & \vdots & \ddots & \vdots &\vdots \\ 0& \ldots & \psi_{i,v-2} & \ldots & 1 &0 \\ 0 & \ldots &\psi_{i,v-1} & \ldots & \psi_{i1} & 1 \end{array} \right) \end{eqnarray*} Note that $\Phi_i$ and $\Psi_i$ uniquely exist and are respectively called the generalized autoregressive parameter matrix (GARPM) and moving-average parameter matrix (GMAPM). The density of the latent variable $\boldsymbol{y}^*$ conditional on the random effect $b=(b_1, \ldots, b_q)$ is given by \[ p(\boldsymbol{y}^*|\boldsymbol{b}, \theta) = \prod_{i=1}^N\prod_{t=1}^{n_i} f(y^*_{it}; \mu_{it}, I_i), \] where $\theta = (\beta, \nu, \Sigma, \phi, \psi)$ denote the collection of model parameters, $\mu_{it} = x_{it}^T\beta+z_{it}^Tb_i$ and $f(\cdot)$ is the multivariate normal density function. # Bayesian Methods {#S:BayesianMethod} The density of the latent variable $\boldsymbol{y}^*$ conditional on the random effect $b=(b_1, \ldots, b_q)$ is given by \[ p(\boldsymbol{y}^*|\boldsymbol{b}, \theta) = \prod_{i=1}^N\prod_{t=1}^{n_i} f(y^*_{it}; \mu_{it}, I_i), \] where $\theta $ denote the collection of model parameters, $\mu_{it} = x_{it}^T\beta+z_{it}^Tb_i$ and $f(\cdot)$ is the multivariate normal density function. To complete the Bayesian specification of the model, we use proper prior distributions instead of noninformative priors, in order to guarantee the propriety of posterior distribution. The prior distributions for $\beta$, $\Sigma$, and $\delta$ in the model for binary outcome are given by: \begin{eqnarray*} &&\beta \sim N_p(0,\sigma_{\beta}^2 \mathrm{I}), \\ &&\Sigma \sim \mathcal{IW}(\nu_b, \Lambda^{-1}), \end{eqnarray*} where $\sigma_{\beta}^2$ and $\sigma_{\delta}^2$ are large to be noninformative [@Daniels:Zhao:2003], $\mathrm{I}$ is the identity matrix corresponding to the dimension of the parameter, and $\Lambda$ is the positive definite scale matrix. Here $N_m(\mu,\Omega)$ denotes the $m$-variate multivariate normal distribution with a mean vector $\mu$ and a covariance matrix $\Omega$, and $\mathcal{IW}(\nu,\Lambda^{-1})$ denotes the inverse Wishart distribtion with degrees of freedom $\nu$ and a symmetric positive definite $q\times q$ scale matrix. The prior of the parameters in correlation matrix for two different correlation structures are (a) MCD In the the case of modified Cholesky decomposition with hypersphere decomposition, we assume $\delta \sim N_a(0, \sigma_{\delta}^2\mathrm{I})$. (b) ARMA In ARMA correlation structure, the non-informative priors is assumed for temporal parameters in GARPM $\phi$'s and GAMPM $\psi$'s with constraints on them to ensure the stationary. \end{description} Furthermore, in the ordinal outcome a prior for $\alpha$ is provided \[ \alpha \sim N_{K-1}(0,\sigma_{\alpha}^2I)\mathbf{I}_{(-\infty<\alpha_1<\cdots <\alpha_{K-1}<\infty)}, \] where $\sigma_{\alpha}^2$ is prespecified. # Implementation The aim of this section is to provide a detailed step-by-step in simulation studies to highlight the most important features of package `BayesRGMM`, and to show how to extract the most important results. This section can also be considered as a user manual which allows the readers to run their own similar analyses involving only a mild modification of the example code. The `BayesRGMM` package contains four core functions. The main functions both `BayesRobustProbit` for binary outcome and `BayesCumulativeProbitHSD` for ordinal outcomes carries out the entire MCMC procedure, and outputs the posterior samples and estimates for model parameters along with several useful estimated information criterion statistics. Internally, most of the calculation is provided by a compiled \texttt{C++} code to reduce the computational time. User-friendly summary function `BayesRobustProbitSummary` that summarizes model estimation outcomes is equipped with `BayesRobustProbit` and `BayesCumulativeProbitHSD`. It provides basic posterior summary statistics such as the posterior point and confidence interval estimates of parameters and the values of information criterion statistics for model comparison. The function `SimulatedDataGenerator` and `SimulatedDataGenerator.CumulativeProbit` are used to generate simulated binary and ordinal data, respectively, for simulation studies with ARMA and MCD correlation structures. `CorrMat.HSD` is applied to calculate the correlation matrix in MCD model structure. In this section, we focus primarily on introducing the those functions, and demonstrate their usage with numerical experiments. ## Simulation Studies In the simulation, we demonstrate the use of functions in the `BayesRGMM` package. ### Binary Outcome We consider a simple random intercept model $q=1$ with the regression coefficient vector of size $p=4$ given by \[ \beta = (-0.2,-0.3, 0.8, -0.4)', \] where $x_{it}$'s are independently generated from $N(0, 1)$. In addition, $v_i$ is independently simulated from $\Gamma(3, 3)$, $b_i$ is from $N(0, v_i \times 0.5)$, and $z_{it}=1$ for $i=1, \ldots, n$. That is, $b$'s correspond to a Student's $t$-distribution with the degrees of freedom equal to $6$. We then generate the responses based on \@ref(eq:Probit-1). In addition to the different correlation structures, we also consider a data that is missing completely at random (MCAR). We set the missing machines as follows \begin{align*} \eta_{it} &= -1.5 \times y_{t-1, i} + 1.2\times y_{t-2, i}; \end{align*} Then the missing probability depends on $\eta_{it}$'s defined as \[ p^{\mbox{miss}}_{it} = \frac{e^{\eta_{it}}}{1+e^{\eta_{it}}}. \] The data for subject $i$ at time point $t$ is missing according to three observed responses for the subject. #### Simulation 1: MCD Correlation Structure The correlation matrix $R_i$ is created based on the given values \begin{align}\label{Eq:Order_in_HS} \delta = (-0.5, -0.3)' \quad \mbox{and} \quad u_{itj} = \left(\mathbf{I}\left\{|t-s|=1\right\}, \mathbf{I}\left\{|t-s|=2\right\}\right)' \end{align} ```{r HSD, tidy = TRUE, eval = TRUE} library(BayesRGMM) rm(list=ls(all=TRUE)) Fixed.Effs = c(-0.2, -0.3, 0.8, -0.4) P = length(Fixed.Effs) q = 1 T = 5 N = 100 num.of.iter = 100 HSD.para = c(-0.5, -0.3) a = length(HSD.para) w = array(runif(T*T*a), c(T, T, a)) for(time.diff in 1:a) w[, , time.diff]=1*(as.matrix(dist(1:T, 1:T, method="manhattan")) ==time.diff) HSD.sim.data = SimulatedDataGenerator(Num.of.Obs = N, Num.of.TimePoints = T, Fixed.Effs = Fixed.Effs, Random.Effs = list(Sigma = 0.5*diag(1), df=3), Cor.in.DesignMat = 0., Missing = list(Missing.Mechanism = 2, RegCoefs = c(-1.5, 1.2)), Cor.Str = "HSD", HSD.DesignMat.para = list(HSD.para = HSD.para, DesignMat = w)) hyper.params = list( sigma2.beta = 1, sigma2.delta = 1, v.gamma = 5, InvWishart.df = 5, InvWishart.Lambda = diag(q) ) HSD.output = BayesRobustProbit(fixed = as.formula(paste("y~-1+", paste0("x", 1:P, collapse="+"))), data=HSD.sim.data$sim.data, random = ~ 1, HS.model = ~IndTime1+IndTime2, Robustness=TRUE, subset = NULL, na.action='na.exclude', hyper.params = hyper.params, num.of.iter = num.of.iter, Interactive = FALSE) original = options(digits = 4) Model.Estimation = BayesRobustProbitSummary(HSD.output) cat("\nCoefficients:\n") print(Model.Estimation$beta.est.CI) cat("\nParameters in HSD model:\n") print(Model.Estimation$delta.est.CI) cat("\nRandom effect: \n") print(Model.Estimation$random.cov) cat("\nModel Information:\n") print(Model.Estimation$model.info) cat("\nEstimate of Ri: \n") print(Model.Estimation$Ri, quote = FALSE) options(original) ``` #### Simulation 2: ARMA Correlation Structure To model the serial dependence for the repeated measurement, we consider an ARMA(1, 1) correlation structure with \[ \phi = 0.4, \qquad \mbox{and}\qquad \psi = 0.2. \] ```{r ARMA, tidy = TRUE, eval = TRUE} library(BayesRGMM) rm(list=ls(all=TRUE)) Fixed.Effs = c(-0.2,-0.8, 1.0, -1.2) P = length(Fixed.Effs) q = 1 T = 10 N = 100 num.of.iter = 100 ARMA.sim.data = SimulatedDataGenerator(Num.of.Obs = N, Num.of.TimePoints = T, Fixed.Effs = Fixed.Effs, Random.Effs = list(Sigma = 0.5*diag(1), df=3), Cor.in.DesignMat = 0., list(Missing.Mechanism = 2, RegCoefs = c(-1.5, 1.2)), Cor.Str = "ARMA", ARMA.para=list(AR.para = 0.4, MA.para=0.2)) ARMA.output = BayesRobustProbit(fixed = as.formula(paste("y~-1+", paste0("x", 1:P, collapse="+"))), data=ARMA.sim.data$sim.data, random = ~ 1, Robustness=TRUE, subset = NULL, na.action='na.exclude', arma.order = c(1, 1), num.of.iter = num.of.iter, Interactive = FALSE) original = options(digits = 4) Model.Estimation = BayesRobustProbitSummary(ARMA.output) cat("\nCoefficients:\n") print(Model.Estimation$beta.est.CI) cat("\nAMRA parameters:\n\n") print(Model.Estimation$arma.est) cat("\nRandom effect: \n") print(Model.Estimation$random.cov) cat("\nModel Information:\n") print(Model.Estimation$model.info) options(original) ``` ### Ordinal Outcome We consider a simple random intercept model ($q=1$). For $k = 1, 2, 3$ and $t=1,\ldots,n_i$, the model is given by: \begin{align} &y_{it}^*=\beta_{1}Time_{it}+\beta_{2}Group_{i}+\beta_{3}Time_{it}\times Group_{i}+b_{i0}+\epsilon_{it},\label{sim-1}\\ &b_{i0}\sim N(0,\sigma_b^2),\label{sim-2}\\ &\epsilon_{i}=(\epsilon_{i1},\ldots,\epsilon_{in_i})^T\sim N(0,R_i),\label{sim-3} \end{align} where $Time_{it}\sim N(0,1)$ and $Group_{i}$ equals 0 or 1 with approximately the same sample size for each group. The true parameters in the simulations are as below: \begin{eqnarray*} &&(\beta_{01},\beta_{02})=(-0.5,0.5);~~(\beta_1,\beta_2,\beta_3)=(-0.1,0.1,-0.1);~~ \sigma_b^2=0.2. \end{eqnarray*} The model for correlation matrix $R_i$ is given by \begin{eqnarray} \log\left(\frac{\omega_{itj}}{\pi-\omega_{itj}}\right)=\delta_1 1_{(|t-j|=1)}+\delta_2 1_{(|t-j|=2)}, \end{eqnarray} where $(\delta_1,\delta_2)=(-0.9,-0.6)$. We consider a data that is missing completely at random (MCAR) with a machine defined by \begin{align*} \eta_{it} &= -0.7\times y_{t-1, i} -0.2\times y_{t-2, i}-0.1\times y_{t-3, i}; \end{align*} Then the missing probability depends on $\eta_{it}$'s defined as \[ p^{\mbox{miss}}_{it} = \frac{e^{\eta_{it}}}{1+e^{\eta_{it}}}. \] The data for subject $i$ at time point $t$ is missing according to three observed responses for the subject. ```{r Ordinal, tidy = TRUE, eval = TRUE} library(BayesRGMM) rm(list=ls(all=TRUE)) Fixed.Effs = c(-0.1, 0.1, -0.1) #c(-0.8, -0.3, 1.8, -0.4) #c(-0.2,-0.8, 1.0, -1.2) P = length(Fixed.Effs) q = 1 #number of random effects T = 7 #time points N = 100 #number of subjects Num.of.Cats = 3 #in KBLEE simulation studies, please fix it. num.of.iter = 1000 #number of iterations HSD.para = c(-0.9, -0.6) #the parameters in HSD model a = length(HSD.para) w = array(runif(T*T*a), c(T, T, a)) #design matrix in HSD model for(time.diff in 1:a) w[, , time.diff] = 1*(as.matrix(dist(1:T, 1:T, method="manhattan")) ==time.diff) x = array(0, c(T, P, N)) for(i in 1:N){ #x[,, i] = t(rmvnorm(P, rep(0, T), AR1.cor(T, Cor.in.DesignMat))) x[, 1, i] = 1:T x[, 2, i] = rbinom(1, 1, 0.5) x[, 3, i] = x[, 1, i]*x[, 2, i] } DesignMat = x #Generate a data with HSD model #MAR CPREM.sim.data = SimulatedDataGenerator.CumulativeProbit(Num.of.Obs = N, Num.of.TimePoints = T, Num.of.Cats = Num.of.Cats, Fixed.Effs = Fixed.Effs, Random.Effs = list(Sigma = 0.5*diag(1), df=3), DesignMat = DesignMat, Missing = list(Missing.Mechanism = 2, MissingRegCoefs=c(-0.7, -0.2, -0.1)), HSD.DesignMat.para = list(HSD.para = HSD.para, DesignMat = w)) print(table(CPREM.sim.data$sim.data$y)) print(CPREM.sim.data$classes) BCP.output = BayesCumulativeProbitHSD( fixed = as.formula(paste("y~", paste0("x", 1:P, collapse="+"))), data=CPREM.sim.data$sim.data, random = ~ 1, Robustness = TRUE, subset = NULL, na.action='na.exclude', HS.model = ~IndTime1+IndTime2, hyper.params=NULL, num.of.iter=num.of.iter, Interactive = FALSE) BCP.Est.output = BayesRobustProbitSummary(BCP.output) BCP.Est.output ``` We considered two scenarios in the simulations both to verify the estimation method for the parameters in the proposed model and to examine the robustness of our models compared to the models with misspecified correlation matrices.
/scratch/gouwar.j/cran-all/cranData/BayesRGMM/vignettes/BayesRGMM.Rmd
HDIofMCMC = function( sampleVec , credMass=0.95 ) { # Computes highest density interval from a sample of representative values, # estimated as shortest credible interval. # Arguments: # sampleVec # is a vector of representative values from a probability distribution. # credMass # is a scalar between 0 and 1, indicating the mass within the credible # interval that is to be estimated. # Value: # HDIlim is a vector containing the limits of the HDI sortedPts = sort( sampleVec ) ciIdxInc = floor( credMass * length( sortedPts ) ) nCIs = length( sortedPts ) - ciIdxInc ciWidth = rep( 0 , nCIs ) for ( i in 1:nCIs ) { ciWidth[ i ] = sortedPts[ i + ciIdxInc ] - sortedPts[ i ] } HDImin = sortedPts[ which.min( ciWidth ) ] HDImax = sortedPts[ which.min( ciWidth ) + ciIdxInc ] HDIlim = c( HDImin , HDImax ) return( HDIlim ) }
/scratch/gouwar.j/cran-all/cranData/BayesRS/R/HDIofMCMC.R
#' Example Data Set #' #' @docType data #' @keywords dataset #' @name bayesrsdata #' @usage bayesrsdata #' @format A data.frame with 1200 rows and 4 variables #' @description Example data set used for showing functionality of \code{\link{modelrun}}. The examples give the code used for simulating the data set. #' #' @example examples/example.data.R NULL
/scratch/gouwar.j/cran-all/cranData/BayesRS/R/bayesrs-data.R
# Hello, world! # # This is an example function named 'hello' # which prints 'Hello, world!'. # # You can learn more about package authoring with RStudio at: # # http://r-pkgs.had.co.nz/ # # Some useful keyboard shortcuts for package authoring: # # Build and Reload Package: 'Ctrl + Shift + B' # Check Package: 'Ctrl + Shift + E' # Test Package: 'Ctrl + Shift + T' hello <- function() { print("Hello, world!") }
/scratch/gouwar.j/cran-all/cranData/BayesRS/R/hello.R
#' Computes Bayes Factors for hierarchical linear models including continuous predictors using the Savage-Dickey density ratio #' @title Bayes Factors, Posterior Samples, & DIC #' @name modelrun #' @param data a \code{data.frame} object with the data to be fitted in the long format. #' @param dv \code{string} indicating the dependent variable of the model. Has to be normally distributed. #' @param dat.str a \code{data.frame} object indicating the hierarchical structure in the model with column names "iv" and "type" and an arbitrary number of random variables as the following column names. iv indicates the name of an independent variable as in \code{data}, type its scale of measurement ("cont" for continuous or "cat" for categorical), and the following entries indicate whether a random effect should be modeled for this variable (1) or not (0). Continuous variables have to be entered before categorical variables. The name for the random variable(s) has to be the same as in \code{data}. A categorical variable with n levels is entered as n - 1 simple codes into the model with the first level of the variable being the reference category. #' @param randvar.ia a \code{list} containing n \code{matrix} objects with n being the number of random variables. In each \code{matrix} the lower triangle can be used to declare the respective two-way interaction as random within the specific random variable. The row- and column- ordering of independent variables is the same as in \code{dat.str}. When not specified, interactions are only modeled as fixed effects by default. #' @param corstr a \code{list} containing n \code{matrix} objects with n being the number of random variables. In each \code{matrix} the lower triangle can be used to assign correlations between predictors (including the intercept) for each random effect. The first row and column in each \code{matrix} object represents the intercept. The following rows and columns represent the independent variables with the same ordering as in \code{dat.str}. When not specified, no correlations are modeled by default. #' @param nadapt number of MCMC steps to adapt the sampler (2000 by default). #' @param nburn number of MCMC steps to burn in the sampler (2000 by default). #' @param nsteps number of saved MCMC steps in all chains (100'000 by default). #' @param checkconv indicates that convergence statistics of the main model parameters should be returned in the console and that figures of the chains should be plotted when set to 1 (0 by default). #' @param mcmc.save.indiv indicates that the chains should be saved in a \code{data.frame} object when set to 1 (0 by default). #' @param plot.post indicates that the 95 percent highest-density interval of the posterior of the group parameters should be plotted as a figure with the corresponding Bayes Factors when set to 1 (0 by default). #' @param dic indicates that the deviation information criterion (Spiegelhalter, Best, Carlin, & Linde, 2002) should be computed for a given model when set to 1 (0 by default). #' @param path defines directory where model is saved as .txt file and model name. Is set to file.path(tempdir(), "model.txt") by default. #' @details The argument \code{corstr} can be used to model correlations between (a) pairs of predictors and (b) more than two predictors. When both is done within the same random variable, a predictor can only appear in (a) or (b). #' #' \code{modelrun} z-standardizes the dependent variable and the continuous independent variables. To obtain the posteriors in the original scale they have to be retransformed. #' \subsection{Savage Dickey}{ #' Bayes Factors are computed with the Savage-Dickey density ratio. We use the normal approximation (e.g., Wetzels, Raaijmakers, Jakab, & Wagenmakers, 2009) to estimate the density of the posterior. #' } #' @return returns a list with components: #' \itemize{ #' \item \code{bf}: a \code{data.frame} object with the Bayes Factor estimates of the group parameters (aka fixed effects). #' \item \code{mcmcdf}: a \code{data.frame} object with the saved MCMC chains. #' \item \code{dic}: DIC of the fitted model. #' } #' @author Thalmann, M., Niklaus, M. Part of this package uses code from John Kruschke. #' @references #' Spiegelhalter, D. J., Best, N. G., Carlin, B. P., & van der Linde, A. (2002). Bayesian measures of model complexity and fit. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 64(4), 583. #' #' Wetzels, R., Raaijmakers, J. G. W., Jakab, E., & Wagenmakers, E.-J. (2009). How to quantify support for and against the null hypothesis: A flexible WinBUGS implementation of a default Bayesian t test. Psychonomic Bulletin & Review, 16(4), 752-760. https://doi.org/10.3758/PBR.16.4.752 #' @example examples/example.modelrun.R #' #' @importFrom stats dnorm sd update aggregate var density median #' @importFrom reshape melt #' @importFrom metRology dt.scaled #' @importFrom rjags coda.samples load.module jags.model #' @importFrom coda gelman.diag #' @importFrom methods show #' @importFrom grid grid.draw #' @importFrom ggplot2 ggplot aes geom_point geom_segment coord_flip ggtitle labs theme_bw theme annotate scale_y_continuous element_blank element_text element_line rel aes_string #' @importFrom graphics plot #' @importFrom grDevices dev.new #' @importFrom utils globalVariables #' @export modelrun <- function(data, dv, dat.str, randvar.ia = NULL, corstr = NULL, nadapt = NULL, nburn = NULL, nsteps = NULL, checkconv = NULL, mcmc.save.indiv = NULL, plot.post = NULL ,dic = NULL,path=NULL){ if (is.null(nadapt) ) nadapt=2000 if (is.null(nburn) ) nburn=2000 if (is.null(nsteps) ) nsteps=100000 if (is.null(checkconv) ) checkconv=1 if (is.null(mcmc.save.indiv)) mcmc.save.indiv = 0 if (is.null(plot.post)) plot.post=0 if (is.null(dic)) dic=0 if (is.null(path)) path=file.path(tempdir(), "model.txt") if (!is.null(randvar.ia)){ if(nrow(randvar.ia[[1]])!=nrow(dat.str)|ncol(randvar.ia[[1]])!=nrow(dat.str)){ stop("nr. of independent variables in dat.str and randvar.ia have to match") } } ls<-0 ncont<-sum(dat.str$type=="cont") # coding for cat vars when levels(cat)>2 check <- as.character(dat.str$iv[dat.str$type == "cat"]) add.vars <- names(dat.str)[3:ncol(dat.str)] add.vals <- matrix(dat.str[dat.str$iv %in% check,3:ncol(dat.str)],ncol=1) if (is.null(corstr)) corstr=rep(list(matrix(0)),length(add.vars)) if (any(dat.str$type == "cat")){ nrcat.act <- sum(dat.str$type == "cat") tmp <- lapply(X = data, FUN = levels) lvls <- tmp[check] codevar <- list() # change dat.str df when more than 2 levels for factor X.cat <- data.frame(matrix(NA, nrow = nrow(data), ncol = 1)) for (i in 1:length(lvls)){ ncodes.i <- length(lvls[[i]])-1 add <- ncodes.i/(ncodes.i+1) codevar[[i]] <- matrix(NA, nrow = nrow(data), ncol = ncodes.i) name.i <- vector(length = ncodes.i) if(ncodes.i==1){ for (c in 1:ncodes.i){ name.i[c] <- paste0(check[i]) codevar[[i]][,c][data[,check[i]] == levels(data[,check[i]])[c+1]] <- add codevar[[i]][,c][is.na(codevar[[i]][,c])] <- -add/ncodes.i assign(name.i[c], codevar[[i]][,c]) } }else{ for (c in 1:ncodes.i){ name.i[c] <- paste0(check[i],".spl",c) codevar[[i]][,c][data[,check[i]] == levels(data[,check[i]])[c+1]] <- add codevar[[i]][,c][is.na(codevar[[i]][,c])] <- -add/ncodes.i assign(name.i[c], codevar[[i]][,c]) } } # update dat.str df if(ncodes.i>1){ dat.str <- dat.str[dat.str$iv!=names(lvls[i]),] dat.str.add <- as.data.frame(cbind(name.i, rep("cat", ncodes.i), matrix(unlist(rep(unname(add.vals[i,]), ncodes.i)),nrow = ncodes.i,byrow = T))) names(dat.str.add) <- c("iv", "type", c(add.vars)) dat.str <- rbind(dat.str, dat.str.add) } change<-names(dat.str[3:ncol(dat.str)]) if (length(change)>1){ dat.str[,3:ncol(dat.str)]<-lapply(dat.str[,change],as.numeric) } else {dat.str[,3:ncol(dat.str)]<-as.numeric(dat.str[,change])} # and update data df data <- data[,which(names(data)!=check[i])] data <- cbind(data, codevar[[i]]) names(data)[(ncol(data)-ncodes.i+1):ncol(data)] <- paste0("x",name.i) X.cat<-cbind(X.cat,data[(ncol(data)-ncodes.i+1):ncol(data)]) } pre.names<-names(X.cat)[2:ncol(X.cat)] X.cat<-data.frame(X.cat[,2:ncol(X.cat)]) names(X.cat)<-pre.names dat.str[,3] <- as.numeric(dat.str[,3]) # change corstr mat when > 2 levels for factor if(any(unlist(lapply(corstr,sum))!=0)){ ls <- unlist(unname(lapply(lvls, FUN = function(x) length(x)))) longer <- which(unname(unlist(lapply(lvls, FUN = function(x) length(x)>2)))) corstr.tmp<-corstr counter<-0 for(i.pre in longer){ counter<-counter+1 ind<-1+ncont+i.pre for(i in 1:(ls[i.pre]-2)){ stay1 <- 1+ncont+counter # 1 for intercept corstr.tmp<-lapply(corstr.tmp,FUN=function(x)x[1:stay1,1:stay1]) corstr.tmp<-lapply(corstr.tmp,FUN=function(x)rbind(x, c(x[stay1,1:(stay1-1)],x[stay1,(stay1-1)]))) corstr.tmp<-lapply(corstr.tmp,FUN=function(x)cbind(x, c(x[1:stay1,stay1],x[stay1,stay1]))) counter<-counter+1 } if(i.pre<max(longer)){ remain<-length((ind+1):nrow(corstr.tmp[[1]])) tmp<-lapply(corstr,FUN=function(x)matrix(c(x[(ind+1):nrow(x),1:ind],rep(x[(ind+1):nrow(x),ind],remain)),nrow=1)) corstr.tmp<-mapply(rbind,corstr.tmp,tmp,SIMPLIFY = FALSE) corstr.tmp<-lapply(corstr.tmp,FUN=function(x)cbind(x,x[,ncol(x)])) } } corstr<-corstr.tmp } # same for interaction df if(any(unlist(lapply(randvar.ia,sum))!=0)){ ls <- unlist(unname(lapply(lvls, FUN = function(x) length(x)))) longer <- which(unname(unlist(lapply(lvls, FUN = function(x) length(x)>2)))) randvar.ia.tmp<-randvar.ia counter<-0 for(i.pre in longer){ counter<-counter+1 ind<-ncont+i.pre for(i in 1:(ls[i.pre]-2)){ stay2 <- ncont+counter # no intercept randvar.ia.tmp<-lapply(randvar.ia.tmp,FUN=function(x)x[1:stay2,1:stay2]) randvar.ia.tmp<-lapply(randvar.ia.tmp,FUN=function(x)rbind(x, c(x[stay2,]))) randvar.ia.tmp<-lapply(randvar.ia.tmp,FUN=function(x)cbind(x, c(x[,stay2]))) counter<-counter+1 } if(i.pre<max(longer)){ remain<-length((ind+1):nrow(randvar.ia.tmp[[1]])) tmp<-lapply(randvar.ia,FUN=function(x)matrix(c(x[(ind+1):nrow(x),1:ind],rep(x[(ind+1):nrow(x),counter],remain)),nrow=1)) randvar.ia.tmp<-mapply(rbind,randvar.ia.tmp,tmp,SIMPLIFY = FALSE) randvar.ia.tmp<-lapply(randvar.ia.tmp,FUN=function(x)cbind(x,x[,ncol(x)])) } } randvar.ia<-randvar.ia.tmp } } if (is.null(randvar.ia)) randvar.ia=replicate(length(names(dat.str[3:ncol(dat.str)])), matrix(0, nrow = nrow(dat.str), ncol = nrow(dat.str)), simplify = F) data <- data[!is.na(data[,dv]),] randnames <- names(dat.str[3:ncol(dat.str)]) contnames <- as.character(dat.str$iv[dat.str$type == "cont"]) catnames <- as.character(dat.str$iv[dat.str$type == "cat"]) nrcont <- length(contnames) nrcat <- length(catnames) f.sub<-function(x){ z=0 if(x!=0){ for(i in 1:x){z=z+i} } return(z) } nrIA.min<-0 if(sum(ls)!=0){ ls.calc <- matrix(ls,nrow=1) nrIA.min<-sum(unlist(apply((ls.calc-2),MARGIN = 2,FUN=f.sub))) } # cont as numeric predictors contvars = which(names(data) %in% c(as.character(dat.str$iv[dat.str$type == "cont"]), dv)) data[,contvars] <- mapply(data[,contvars], FUN = as.character) data[,contvars] <- mapply(data[,contvars], FUN = as.numeric) # id and cat as factors catvars = which(names(data) %in% c(names(dat.str[3:ncol(dat.str)]), as.character(dat.str$iv[dat.str$type == "cat"]))) if (length(catvars) > 1){ data[,catvars] <- lapply(data[,catvars], as.factor) } else { data[,catvars] <- as.factor(data[,catvars]) } for (i in randnames){ levels(data[,i]) <- 1:length(unique(data[,i])) } # z-standardize ivs and cont dv mDV <- mean(data[,dv]) sdDV <- sd(data[,dv]) data$z.DV <- (data[,dv]-mDV)/sdDV nxs <- nrow(dat.str) X <- data.frame(matrix(NA, nrow = nrow(data), ncol = 1)) for (i in contnames){ name.m <- paste("m", i, sep = "") name.sd <- paste("sd", i, sep = "") name.z <- paste("z", i, sep = "") assign(name.m, mean(data[,i])) assign(name.sd, sd(data[,i])) assign(name.z, (data[,i]-mean(data[,i]))/sd(data[,i])) z.val <- as.name(paste("z", i, sep = "")) x.name <- paste("x", i, sep = "") do.call("<-", list(x.name, z.val)) X <- do.call("cbind", list(X, z.val)) names(X)[which(i == contnames)+1] <- x.name data <- do.call("cbind", list(data,z.val)) } if(exists("X.cat")){ X <- cbind(X,X.cat) } X <- as.data.frame(X[,2:ncol(X), drop = FALSE]) Ndata <- length(data$z.DV) dataList = list( y = data$z.DV , Ndata = nrow(data) ) name.cyc <- vector() size.cyc <- vector() for (i in randnames){ name.cyc[i] <- paste("N", i, "s", sep = '') size.cyc[i] <- length(levels(data[,i])) } for (i in 1:length(randnames)){ dataList[[name.cyc[i]]] <- size.cyc[i] dataList[[randnames[i]]] <- as.numeric(data[,randnames[i]]) } for (i in names(X)){ dataList[[i]] <- X[,which(i == names(X))] } # write jags model with required parameters parameters <- c(modeltext(dat.str, randvar.ia, corstr,path)) parms.int <- unique(unlist(parameters[["MU"]])) parms.int <-parms.int[!is.na(parms.int)] parms.int <- parms.int[!grepl("mu0g", x = parms.int)] parms.int <- parms.int[!grepl("mu.corr", x = parms.int)] params.mon <- unique(c(unlist(parameters[["MU"]]), unlist(parameters[["SIGMA"]]))) params.mon<-params.mon[!is.na(params.mon)] params.mon <- params.mon[!grepl("mu.corr", x = params.mon)] if (any(unlist(corstr)==1)){ params.corr <- unique(unlist(parameters[["RHO"]])) params.mu.corr <- unique(unlist(parameters[["mu.corr"]])) params.mu.corr <- params.mu.corr[!grepl("mu.corr\\[1\\]", x = params.mu.corr)] # prepare dfs for inverse wishart wishdf <- parameters[["wishdf"]] for (i in 1:length(wishdf)){ name.wish <- paste0("I", i) assign(name.wish, wishdf[i]) dataList[[name.wish]] <- diag(c(1:wishdf[i])) } } else {wishdf <- logical(0)} #------------------------------------------------------------------------------ ### RUN THE CHAINS #### load.module("dic") adaptSteps = nadapt #1000 # Number of steps to "tune" the samplers. burnInSteps = nburn # 1000 # Number of steps to "burn-in" the samplers. nChains = 3 #3 # Number of chains to run. numSavedSteps= nsteps #15000 # Total number of steps in chains to save. thinSteps=1 # Number of steps to "thin" (1=keep every step). nPerChain = ceiling( ( numSavedSteps * thinSteps ) / nChains ) # Steps per chain. add <- NA if (mcmc.save.indiv){ add <- parameters$b.save } mcmc.save <- c(c(params.mon, "tau"), add) if (any(unlist(corstr)==1)){ mcmc.save <- c(mcmc.save, params.corr, params.mu.corr) } mcmc.save <- mcmc.save[!is.na(mcmc.save)] # Create, initialize, and adapt the model: jagsModel = jags.model( path , data=dataList , n.chains=nChains , n.adapt=adaptSteps ) # Burn-in: cat( "Burning in the MCMC chain...\n" ) update( jagsModel, n.iter=burnInSteps ) # The saved MCMC chain: cat( "Sampling final MCMC chain...\n" ) codaSamples = coda.samples(jagsModel , variable.names=c(mcmc.save,"deviance") , n.iter=nPerChain , thin=thinSteps) mcmcChain <- as.matrix(codaSamples) mcmcdf <- as.data.frame(mcmcChain) #### convergence diagnostics #### if (checkconv == 1){ # diagnostics look good nparms <- length(params.mon) cycle <- ceiling(x = nparms/4) # cycle through hypermeans and hypersds for (i in 1:cycle){ ind <- c(params.mon[((i-1)*4+1):(i*4)]) if (any(is.na(ind))){ ind <- ind[1:(min(which(is.na(ind)))-1)] } dev.new() plot( codaSamples[,ind]) show( gelman.diag(codaSamples[,ind], multivariate = F)) } # plot correlations for inspection as well if (length(wishdf)>0){ for (dfs in 1:length(wishdf)){ dummy <- matrix(0, nrow = wishdf[dfs], ncol = wishdf[dfs]) ind <- which(lower.tri(dummy), arr.ind = TRUE) options(warn = -1) if (nrow(ind)>4){ broke <- split(as.data.frame(ind), rep(1:ceiling(nrow(ind)/4),each=4)) for (i in 1:length(broke)){ ind <- broke[[i]] dev.new() plot(codaSamples[,paste0(params.corr[dfs], "[", ind[,1],",",ind[,2],"]")]) show( gelman.diag(codaSamples[,paste0(params.corr[dfs], "[", ind[,1],",",ind[,2],"]")], multivariate = F)) } } else{ dev.new() plot(codaSamples[,paste0(params.corr[dfs], "[", ind[,1],",",ind[,2],"]")]) show( gelman.diag(codaSamples[,paste0(params.corr[dfs], "[", ind[,1],",",ind[,2],"]")], multivariate = F)) } options(warn = 0) } } # plot mus out of mu.cor df if(exists("params.mu.corr")){ if(length(params.mu.corr)!=0){ pl.ind <- match(params.mu.corr, names(mcmcdf)) cycle <- ceiling(x = length(pl.ind)/4) for (i in 1:cycle){ ind <- c(pl.ind[((i-1)*4+1):(i*4)]) if (any(is.na(ind))){ ind <- ind[1:(min(which(is.na(ind)))-1)] } dev.new() plot(codaSamples[,ind]) } } # and plot mus not in mu.corr df if(any(parameters[["pl.ind"]])){ ind <- which(parameters[["pl.ind"]]==1) show <- c(contnames, catnames)[ind] pl.ind <- grep(paste0("\\bmu",show, "\\b",collapse="|"), names(mcmcdf)) cycle <- ceiling(x = length(pl.ind)/4) if(cycle!=0){ for (i in 1:cycle){ ind <- c(pl.ind[((i-1)*4+1):(i*4)]) if (any(is.na(ind))){ ind <- ind[1:(min(which(is.na(ind)))-1)] } plot(codaSamples[,ind]) } } } } if(length(parameters[["pl.nhcl"]])>0){ show <- parameters[["pl.nhcl"]] cycle <- ceiling(x = length(show)/4) for (i in 1:cycle){ dummy[(cycle*4-3):(cycle*4)] <- NA pl <- show[(cycle*4-3):(cycle*4)] pl <- pl[1:(min(which(is.na(pl)))-1)] plot(codaSamples[,pl]) } } } #### computation of bfs, and plotting of hdis and bfs #### b_post <- as.data.frame(cbind(mcmcdf[,parms.int]))#, mcmcdf$muNsyllG, mcmcdf$muiaG)) names(b_post) <- parms.int # b_post <- b_post[,2:ncol(b_post), drop = FALSE] scalecont <- sqrt(2)/4 scalecat <- 1/2 bf <- NA bf.names<-NA #### not from correlation structure #### if (any(!grepl("mu.corr", parms.int))){ # dnorm approach to compute bfs counter <- 1 pl.cont <- contnames[parameters[["pl.ind"]][1:nrcont]] # check that mus are not in correlation structure if (any(!is.na(pl.cont))) { for (i in pl.cont){ if(length(grep(paste0("\\bmu",i,"\\b"),names(b_post),value = TRUE))>0){ bf <- c(bf,dt.scaled(0,1,0,scalecont)/(dnorm(0, mean(b_post[,paste0("mu",i)]), sd(b_post[,paste0("mu",i)]))+1e-300)) bf.names<-c(bf.names,i) counter <- counter + 1 } } } if (length(parameters[["pl.nhclcont"]])>0) { for (i in parameters[["pl.nhclcont"]]){ bf <- c(bf,dt.scaled(0,1,0,scalecont)/(dnorm(0, mean(b_post[,i]), sd(b_post[,i]))+1e-300)) bf.names<-c(bf.names,i) counter <- counter + 1 } } pl.cat <- catnames[parameters[["pl.ind"]][(1+nrcont):(nrcat+nrcont)]] # check that mus are not in correlation structure if (any(!is.na(pl.cat))) { for (i in pl.cat){ if(length(grep(paste0("\\bmu",i,"\\b"),names(b_post),value = TRUE))>0){ bf <- c(bf,dt.scaled(0,1,0,scalecat)/(dnorm(0, mean(b_post[,paste0("mu",i)]), sd(b_post[,paste0("mu",i)]))+1e-300)) bf.names<-c(bf.names,i) counter <- counter + 1 } } } if (length((parameters[["pl.nhclcat"]]))) { for (i in parameters[["pl.nhclcat"]]){ bf <- c(bf,dt.scaled(0,1,0,scalecat)/(dnorm(0, mean(b_post[,i]), sd(b_post[,i]))+1e-300)) bf.names<-c(bf.names,i) counter <- counter + 1 } } if (any(dat.str$type == "cat")){ nrIA <- (length(catnames)+length(contnames)-1)*(nrcat.act+length(contnames))/2 nrIA<-(length(catnames)+length(contnames))*(length(catnames)+length(contnames)-1)/2 nrIA <- nrIA-nrIA.min } else{nrIA <- (length(catnames)+length(contnames)-1)*(length(catnames)+length(contnames))/2} if (nrIA > 0) { for (k in 1:(nrIA)){ bf <- c(bf,dt.scaled(0,1,0,scalecat)/(dnorm(0, mean(b_post[,counter]), sd(b_post[,counter])))+1e-300) bf.names<-c(bf.names,names(b_post)[counter]) counter <- counter + 1 } } bf <- bf[2:length(bf)] bf1 <- prettyNum(bf, digits = 2) bf.names<-bf.names[2:length(bf.names)] bf.tog1<-data.frame(bf.names,bf=bf1) # plotting of 95% hdis with respective bfs plot.bs <- melt(b_post,id.vars = NULL) names(plot.bs) <- c("classify", "samples") plot.bs$classify <- factor(plot.bs$classify) plot.bs$varnames <- plot.bs$classify plot.bs$varnames <- factor(plot.bs$varnames) pl.post <- plotPostMT_HDImeans2(plot.bs, xlab="" , ylab = "Parameter Estimate\n", main="", ylim = c(min(plot.bs$samples)-.1, max(plot.bs$samples)+.1), showHDI = 1, colflag = 1, bfs = bf1, bfpos = min(plot.bs$samples)) if(plot.post==1){ dev.new() grid.draw(pl.post)} # n.b. posteriors are in z-transformed space #### from correlation structure #### } if(exists("params.mu.corr")){ if (any(grepl("mu.corr", params.mu.corr))){ bf <- NA bf.names<-NA comp <- NA ind <- (1+1):(1+nrcont) ind.plot <- NA for (i in ind){ comp <- append(comp, which(grepl(paste0("\\[",i,"\\]"), params.mu.corr))) } comp <- comp[2:length(comp)] if (!is.na(comp[1])) { for (i in ind){ name <- paste0("mu.corr","\\[",i, "\\]") ind.comp <- which(grepl(name,names(mcmcdf))) ind.plot <- c(ind.plot, ind.comp) bf <- c(bf,dt.scaled(0,1,0,scalecont)/(dnorm(0, mean(mcmcdf[,ind.comp]), sd(mcmcdf[,ind.comp]))+1e-300)) bf.names<-c(bf.names,parameters[["corrnames"]][i-1]) } } comp <- NA ind <- (1+nrcont+1):(1+nrcat+nrcont) for (i in ind){ comp <- append(comp, which(grepl(paste0("\\[",i,"\\]"), params.mu.corr))) } comp <- comp[2:length(comp)] if (!is.na(comp[1])) { for (i in ind){ name <- paste0("mu.corr","\\[",i, "\\]") ind.comp <- which(grepl(name,names(mcmcdf))) ind.plot <- c(ind.plot, ind.comp) bf <- c(bf,dt.scaled(0,1,0,scalecat)/(dnorm(0, mean(mcmcdf[,ind.comp]), sd(mcmcdf[,ind.comp]))+1e-300)) bf.names<-c(bf.names,parameters[["corrnames"]][i-1]) } } bf <- bf[2:length(bf)] bf2 <- prettyNum(bf, digits = 2) bf.names<-bf.names[2:length(bf.names)] bf.tog2<-data.frame(bf.names,bf=bf2) ind.plot <- ind.plot[2:length(ind.plot)] # depending on simulation study, de(-comment) the following line of code # bf <- log(bf) # plotting of 95% hdis with respective bfs plot.bs <- melt(mcmcdf[,ind.plot],id.vars = NULL) names(plot.bs) <- c("classify", "samples") plot.bs$classify <- factor(plot.bs$classify) levels(plot.bs$classify)<-bf.names plot.bs$varnames <- plot.bs$classify plot.bs$varnames <- factor(plot.bs$varnames) pl.post <- plotPostMT_HDImeans2(plot.bs, xlab="" , ylab = "Parameter Estimate\n", main="", ylim = c(min(plot.bs$samples)-.1, max(plot.bs$samples)+.1), showHDI = 1, colflag = 1, bfs = bf2, bfpos = min(plot.bs$samples)) if(plot.post==1){ dev.new() grid.draw(pl.post)} # n.b. posteriors are in z-transformed space # } } DIC<-NULL if(dic==1){ meanDev <- mean(mcmcChain[,"deviance"]) pD <- 0.5*var(mcmcChain[,"deviance"]) DIC <- meanDev + pD} if (exists("bf1") & exists("bf2")){ return(list(rbind(bf.tog2,bf.tog1), mcmcdf, DIC)) } if (exists("bf1") & !exists("bf2")){ return(list(bf.tog1, mcmcdf, DIC)) } if (!exists("bf1") & exists("bf2")){ return(list(bf.tog2, mcmcdf, DIC)) } }
/scratch/gouwar.j/cran-all/cranData/BayesRS/R/modelrun.R