content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
sBellEW<-function (x, alpha, beta, theta, lambda , log.p = FALSE, lower.tail = TRUE)
{
G=(1-exp(-alpha*x^(beta)))^theta
cdf <- x
cdf[log.p == FALSE & lower.tail == TRUE] <- (1-exp(-exp(lambda)*(1-exp(-lambda*G))))/(1-(exp(-exp(lambda)+1)))
cdf[log.p == TRUE & lower.tail == TRUE] <- log(1-exp(-exp(lambda)*(1-exp(-lambda*G))))-log(1-(exp(-exp(lambda)+1)))
cdf[log.p == FALSE & lower.tail == FALSE] <- ((exp(-exp(lambda)*(1-exp(-lambda*G))))-(exp(-exp(lambda)+1)))/(1-(exp(-exp(lambda)+1)))
cdf[log.p == TRUE & lower.tail == FALSE] <- log((exp(-exp(lambda)*(1-exp(-lambda*G))))-(exp(-exp(lambda)+1)))-log(1-(exp(-exp(lambda)+1)))
sf<-1-cdf
return(sf)
}
|
/scratch/gouwar.j/cran-all/cranData/BGFD/R/sBellEW.R
|
sBellF<-function (x, a, b, lambda , log.p = FALSE, lower.tail = TRUE)
{
G=(1-((1+(x/a)^b))^(-1))
cdf <- x
cdf[log.p == FALSE & lower.tail == TRUE] <- (1-exp(-exp(lambda)*(1-exp(-lambda*G))))/(1-(exp(-exp(lambda)+1)))
cdf[log.p == TRUE & lower.tail == TRUE] <- log(1-exp(-exp(lambda)*(1-exp(-lambda*G))))-log(1-(exp(-exp(lambda)+1)))
cdf[log.p == FALSE & lower.tail == FALSE] <- ((exp(-exp(lambda)*(1-exp(-lambda*G))))-(exp(-exp(lambda)+1)))/(1-(exp(-exp(lambda)+1)))
cdf[log.p == TRUE & lower.tail == FALSE] <- log((exp(-exp(lambda)*(1-exp(-lambda*G))))-(exp(-exp(lambda)+1)))-log(1-(exp(-exp(lambda)+1)))
sf<-1-cdf
return(sf)
}
|
/scratch/gouwar.j/cran-all/cranData/BGFD/R/sBellF.R
|
sBellL<-function (x, b, q, lambda , log.p = FALSE, lower.tail = TRUE)
{
G=(1-((1+(x/b)))^(-q))
cdf <- x
cdf[log.p == FALSE & lower.tail == TRUE] <- (1-exp(-exp(lambda)*(1-exp(-lambda*G))))/(1-(exp(-exp(lambda)+1)))
cdf[log.p == TRUE & lower.tail == TRUE] <- log(1-exp(-exp(lambda)*(1-exp(-lambda*G))))-log(1-(exp(-exp(lambda)+1)))
cdf[log.p == FALSE & lower.tail == FALSE] <- ((exp(-exp(lambda)*(1-exp(-lambda*G))))-(exp(-exp(lambda)+1)))/(1-(exp(-exp(lambda)+1)))
cdf[log.p == TRUE & lower.tail == FALSE] <- log((exp(-exp(lambda)*(1-exp(-lambda*G))))-(exp(-exp(lambda)+1)))-log(1-(exp(-exp(lambda)+1)))
sf<-1-cdf
return(sf)
}
|
/scratch/gouwar.j/cran-all/cranData/BGFD/R/sBellL.R
|
sBellW<-function (x, alpha, beta, lambda , log.p = FALSE, lower.tail = TRUE)
{
G=(1-exp(-alpha*x^beta))
cdf <- x
cdf[log.p == FALSE & lower.tail == TRUE] <- (1-exp(-exp(lambda)*(1-exp(-lambda*G))))/(1-(exp(-exp(lambda)+1)))
cdf[log.p == TRUE & lower.tail == TRUE] <- log(1-exp(-exp(lambda)*(1-exp(-lambda*G))))-log(1-(exp(-exp(lambda)+1)))
cdf[log.p == FALSE & lower.tail == FALSE] <- ((exp(-exp(lambda)*(1-exp(-lambda*G))))-(exp(-exp(lambda)+1)))/(1-(exp(-exp(lambda)+1)))
cdf[log.p == TRUE & lower.tail == FALSE] <- log((exp(-exp(lambda)*(1-exp(-lambda*G))))-(exp(-exp(lambda)+1)))-log(1-(exp(-exp(lambda)+1)))
sf<-1-cdf
return(sf)
}
|
/scratch/gouwar.j/cran-all/cranData/BGFD/R/sBellW.R
|
sCBellB<-function (x, a, b, k, lambda , log.p = FALSE, lower.tail = TRUE)
{
G=(1-((1+(x/a)^b))^(-k))
cdf <- x
cdf[log.p == FALSE & lower.tail == TRUE] <- (exp(exp(lambda*G)-1)-1)/(exp(exp(lambda)-1)-1)
cdf[log.p == TRUE & lower.tail == TRUE] <- log(exp(exp(lambda*G)-1)-1)-log(exp(exp(lambda)-1)-1)
cdf[log.p == FALSE & lower.tail == FALSE] <- (exp(exp(lambda)-1)-exp(exp(lambda*G)-1))/((exp(exp(lambda)-1)-1))
cdf[log.p == TRUE & lower.tail == FALSE] <- log(exp(exp(lambda)-1)-exp(exp(lambda*G)-1))-log((exp(exp(lambda)-1)-1))
sf<-1-cdf
return(sf)
}
|
/scratch/gouwar.j/cran-all/cranData/BGFD/R/sCBellB.R
|
sCBellBX<-function (x, a, lambda , log.p = FALSE, lower.tail = TRUE)
{
G=(1-exp(-x^2))^a
cdf <- x
cdf[log.p == FALSE & lower.tail == TRUE] <- (exp(exp(lambda*G)-1)-1)/(exp(exp(lambda)-1)-1)
cdf[log.p == TRUE & lower.tail == TRUE] <- log(exp(exp(lambda*G)-1)-1)-log(exp(exp(lambda)-1)-1)
cdf[log.p == FALSE & lower.tail == FALSE] <- (exp(exp(lambda)-1)-exp(exp(lambda*G)-1))/((exp(exp(lambda)-1)-1))
cdf[log.p == TRUE & lower.tail == FALSE] <- log(exp(exp(lambda)-1)-exp(exp(lambda*G)-1))-log((exp(exp(lambda)-1)-1))
sf<-1-cdf
return(sf)
}
|
/scratch/gouwar.j/cran-all/cranData/BGFD/R/sCBellBX.R
|
sCBellE<-function (x, alpha, lambda , log.p = FALSE, lower.tail = TRUE)
{
G=(1-exp(-alpha*x))
cdf <- x
cdf[log.p == FALSE & lower.tail == TRUE] <- (exp(exp(lambda*G)-1)-1)/(exp(exp(lambda)-1)-1)
cdf[log.p == TRUE & lower.tail == TRUE] <- log(exp(exp(lambda*G)-1)-1)-log(exp(exp(lambda)-1)-1)
cdf[log.p == FALSE & lower.tail == FALSE] <- (exp(exp(lambda)-1)-exp(exp(lambda*G)-1))/((exp(exp(lambda)-1)-1))
cdf[log.p == TRUE & lower.tail == FALSE] <- log(exp(exp(lambda)-1)-exp(exp(lambda*G)-1))-log((exp(exp(lambda)-1)-1))
sf<-1-cdf
return(sf)
}
|
/scratch/gouwar.j/cran-all/cranData/BGFD/R/sCBellE.R
|
sCBellEE<-function (x, alpha, beta, lambda , log.p = FALSE, lower.tail = TRUE)
{
G=(1-exp(-alpha*x))^beta
cdf <- x
cdf[log.p == FALSE & lower.tail == TRUE] <- (exp(exp(lambda*G)-1)-1)/(exp(exp(lambda)-1)-1)
cdf[log.p == TRUE & lower.tail == TRUE] <- log(exp(exp(lambda*G)-1)-1)-log(exp(exp(lambda)-1)-1)
cdf[log.p == FALSE & lower.tail == FALSE] <- (exp(exp(lambda)-1)-exp(exp(lambda*G)-1))/((exp(exp(lambda)-1)-1))
cdf[log.p == TRUE & lower.tail == FALSE] <- log(exp(exp(lambda)-1)-exp(exp(lambda*G)-1))-log((exp(exp(lambda)-1)-1))
sf<-1-cdf
return(sf)
}
|
/scratch/gouwar.j/cran-all/cranData/BGFD/R/sCBellEE.R
|
sCBellEW<-function (x, alpha, beta, theta, lambda , log.p = FALSE, lower.tail = TRUE)
{
G=(1-exp(-alpha*x^(beta)))^theta
cdf <- x
cdf[log.p == FALSE & lower.tail == TRUE] <- (exp(exp(lambda*G)-1)-1)/(exp(exp(lambda)-1)-1)
cdf[log.p == TRUE & lower.tail == TRUE] <- log(exp(exp(lambda*G)-1)-1)-log(exp(exp(lambda)-1)-1)
cdf[log.p == FALSE & lower.tail == FALSE] <- (exp(exp(lambda)-1)-exp(exp(lambda*G)-1))/((exp(exp(lambda)-1)-1))
cdf[log.p == TRUE & lower.tail == FALSE] <- log(exp(exp(lambda)-1)-exp(exp(lambda*G)-1))-log((exp(exp(lambda)-1)-1))
sf<-1-cdf
return(sf)
}
|
/scratch/gouwar.j/cran-all/cranData/BGFD/R/sCBellEW.R
|
sCBellF<-function (x, a, b, lambda , log.p = FALSE, lower.tail = TRUE)
{
G=(1-((1+(x/a)^b))^(-1))
cdf <- x
cdf[log.p == FALSE & lower.tail == TRUE] <- (exp(exp(lambda*G)-1)-1)/(exp(exp(lambda)-1)-1)
cdf[log.p == TRUE & lower.tail == TRUE] <- log(exp(exp(lambda*G)-1)-1)-log(exp(exp(lambda)-1)-1)
cdf[log.p == FALSE & lower.tail == FALSE] <- (exp(exp(lambda)-1)-exp(exp(lambda*G)-1))/((exp(exp(lambda)-1)-1))
cdf[log.p == TRUE & lower.tail == FALSE] <- log(exp(exp(lambda)-1)-exp(exp(lambda*G)-1))-log((exp(exp(lambda)-1)-1))
sf<-1-cdf
return(sf)
}
|
/scratch/gouwar.j/cran-all/cranData/BGFD/R/sCBellF.R
|
sCBellL<-function (x, b, q, lambda , log.p = FALSE, lower.tail = TRUE)
{
G=(1-((1+(x/b)))^(-q))
cdf <- x
cdf[log.p == FALSE & lower.tail == TRUE] <- (exp(exp(lambda*G)-1)-1)/(exp(exp(lambda)-1)-1)
cdf[log.p == TRUE & lower.tail == TRUE] <- log(exp(exp(lambda*G)-1)-1)-log(exp(exp(lambda)-1)-1)
cdf[log.p == FALSE & lower.tail == FALSE] <- (exp(exp(lambda)-1)-exp(exp(lambda*G)-1))/((exp(exp(lambda)-1)-1))
cdf[log.p == TRUE & lower.tail == FALSE] <- log(exp(exp(lambda)-1)-exp(exp(lambda*G)-1))-log((exp(exp(lambda)-1)-1))
sf<-1-cdf
return(sf)
}
|
/scratch/gouwar.j/cran-all/cranData/BGFD/R/sCBellL.R
|
sCBellW<-function (x, alpha, beta, lambda , log.p = FALSE, lower.tail = TRUE)
{
G=(1-exp(-alpha*x^beta))
cdf <- x
cdf[log.p == FALSE & lower.tail == TRUE] <- (exp(exp(lambda*G)-1)-1)/(exp(exp(lambda)-1)-1)
cdf[log.p == TRUE & lower.tail == TRUE] <- log(exp(exp(lambda*G)-1)-1)-log(exp(exp(lambda)-1)-1)
cdf[log.p == FALSE & lower.tail == FALSE] <- (exp(exp(lambda)-1)-exp(exp(lambda*G)-1))/((exp(exp(lambda)-1)-1))
cdf[log.p == TRUE & lower.tail == FALSE] <- log(exp(exp(lambda)-1)-exp(exp(lambda*G)-1))-log((exp(exp(lambda)-1)-1))
sf<-1-cdf
return(sf)
}
|
/scratch/gouwar.j/cran-all/cranData/BGFD/R/sCBellW.R
|
#' Genotype x Environment models using regression kernel
#'
#' BGGE function fits Bayesian regression for continuous observations through regression kernels
#'
#' @usage BGGE(y, K, XF = NULL, ne, ite = 1000, burn = 200, thin = 3, verbose = FALSE,
#' tol = 1e-10, R2 = 0.5)
#'
#' @param y Vector of data. Should be numeric and NAs are allowed.
#' @param K list A two-level list Specify the regression kernels (co-variance matrix). The former is the \code{Kernel},
#' where is included the regression kernels. The later is the \code{Type}, specifying if the matrix is either \code{D} Dense or
#' \code{BD} Block Diagonal. A number of regression kernels or random effects to be fitted are specified in this list.
#' @param XF matrix Design matrix (\eqn{n \times p}) for fixed effects
#' @param ne vector Number of genotypes by environment.
#' @param ite numeric Number of iterations.
#' @param burn numeric Number of iterations to be discarded as burn-in.
#' @param thin numeric Thinin interval.
#' @param verbose Should iteration history be printed on console? If TRUE or 1 then it is printed,
#' otherwise, if another number $n$ is choosen the history is printed every $n$ times. The default is FALSE.
#' @param tol a numeric tolerance level. Eigenvalues lower than \code{tol} are discarded. Default is 1e-10.
#' @param R2 the proportion of variance expected to be explained by the regression.
#'
#' @details
#' The goal is to fit genomic prediction models for continuous outcomes through Gibbs sampler. BGGE uses a proposal for dimension reduction
#' through an orthogonal transformation of observed data (y) as well as differential shrinkage because of the prior variance assigned
#' to regression parameters. Further details on this approach can be found in Cuevas et al. (2014).
#' The primaty genetic model is
#' \deqn{y = g + e}
#' where \eqn{y} is the response, \eqn{g} is the unknown random effect and \eqn{e} is the residual effect.
#' You can specify a number of random effects \eqn{g}, as many as desired, through a list of regression kernels related to each random effect in the
#' argument \code{K}.
#' The structure of \code{K} is a two level list, where the first element on the second level is the Kernel and the second element is a definition of
#' type of matrix. There are two definitions, either matrix is \code{D} (dense) or \code{BD} (Block Diagonal). As we make the spectral decomposition
#' on the kernels, for block diagonal matrices, we take advantage of its structure and make decomposition on the submatrices instead of one big matrix.
#' For example, the regression kernels should be an structure like K = list(list(Kernel = G, Type = "D"), list(Kernel = G, Type = "BD")).
#' The definition of one matrix as a block diagonal must be followed by the number of subjects in each submatrix in the block diagonal,
#' present in the \code{ne}, which allows sub matrices to be drawn. Some genotype by environment models has the block diagonal matrix type or similar.
#' The genotype x environment deviation matrix in MDs model (Sousa et al., 2017) has the structure of block diagonal.
#' Also, the matrices for environment-specific variance in MDe models (Sousa et al., 2017) if summed, can form a structure of block diagonal,
#' where is possible to extract sub matrices for each environment. In the case of all kernel be of the dense type, \code{ne} is ignored.
#'
#' @return
#' A list with estimated posterior means of residual and genetic variance component for each term in the linear model and the genetic value predicted. Also the
#' values along with the chains are released.
#'
#'
#' @examples
#' # multi-environment main genotypic model
#' library(BGLR)
#' data(wheat)
#' X<-wheat.X[1:200,1:600] # Subset of 200 subjects and 600 markers
#' rownames(X) <- 1:200
#' Y<-wheat.Y[1:200,]
#' A<-wheat.A[1:200,1:200] # Pedigree
#'
#' GB<-tcrossprod(X)/ncol(X)
#' K<-list(G = list(Kernel = GB, Type = "D"))
#' y<-Y[,1]
#' fit<-BGGE(y = y,K = K, ne = length(y), ite = 300, burn = 100, thin = 2)
#'
#' # multi-environment main genotypic model
#' Env <- as.factor(c(2,3)) #subset of 2 environments
#' pheno_geno <- data.frame(env = gl(n = 2, k = nrow(Y), labels = Env),
#' GID = gl(n = nrow(Y), k = 1,length = nrow(Y) * length(Env)),
#' value = as.vector(Y[,2:3]))
#'
#' K <- getK(Y = pheno_geno, X = X, kernel = "GB", model = "MM")
#' y <- pheno_geno[,3]
#' fit <- BGGE(y = y, K = K, ne = rep(nrow(Y), length(Env)), ite = 300, burn = 100,thin = 1)
#'
#'
#' @seealso
#' \code{\link[BGLR]{BGLR}}
#'
#' @references
#' Cuevas, J., Perez-Elizalde, S., Soberanis, V., Perez-Rodriguez, P., Gianola, D., & Crossa, J. (2014).
#' Bayesian genomic-enabled prediction as an inverse problem. G3: Genes, Genomes, Genetics, 4(10), 1991-2001.
#'
#' Sousa, M. B., Cuevas, J., Oliveira, E. G. C., Perez-Rodriguez, P., Jarquin, D., Fritsche-Neto, R., Burgueno, J.
#' & Crossa, J. (2017). Genomic-enabled prediction in maize using kernel models with genotype x environment interaction.
#' G3: Genes, Genomes, Genetics, 7(6), 1995-2014.
#'
#'
#' @export
BGGE <- function(y, K, XF = NULL, ne, ite = 1000, burn = 200, thin = 3, verbose = FALSE, tol = 1e-10, R2 = 0.5) {
### PART I - Conditional distributions functions and eigen descomposition ####
# Conditional Distribution of tranformed genetic effects b (U'u)
#' @import stats
dcondb <- function(n, media, vari) {
sd <- sqrt(vari)
return(rnorm(n, media, sd))
}
# Conditional distribution of compound variance of genetic effects (sigb)
dcondsigb <- function(b, deltav, n, nu, Sc) {
z <- sum(b ^ 2 * deltav)
return(1 / rgamma(1, (n + nu) / 2, (z + nu * Sc) / 2))
}
# Conditional distribution of residual compound variance
dcondsigsq <- function(Aux, n, nu, Sce) {
return(1 / rgamma(1, (n + nu) / 2, crossprod(Aux) / 2 + Sce / 2))
}
# Conditional fixed effects
rmvnor<-function(n,media,sigma){
z<-rnorm(n)
return( media + crossprod(chol(sigma), z))
}
# Function for eigen descompositions
eig <- function(K, tol) {
ei <- eigen(K)
fil <- which(ei$values > tol)
return(list(ei$values[fil], ei$vectors[, fil]))
}
# Set spectral decomposition
setDEC <- function(K, tol, ne){
sK <- vector("list", length = length(K))
typeM <- sapply(K, function(x) x$Type)
if(!all(typeM %in% c("BD", "D")))
stop("Matrix should be of types BD or D")
if(missing(ne)){
if(any(typeM == "BD"))
stop("For type BD, number of subjects in each sub matrices should be provided")
}else{
if(length(ne) <= 1 & any(typeM == "BD"))
stop("ne invalid. For type BD, number of subjects in each sub matrices should be provided")
nsubK <- length(ne)
if(nsubK > 1){
posf <- cumsum(ne)
posi <- cumsum(c(1,ne[-length(ne)]))
}
}
for (i in 1:length(K)) {
if(K[[i]]$Type == "D") {
tmp <- list()
ei <- eig(K[[i]]$Kernel, tol)
tmp$s <- ei[[1]]
tmp$U <- ei[[2]]
tmp$tU <- t(ei[[2]])
tmp$nr <- length(ei[[1]])
tmp$type <- "D"
tmp$pos <- NA
sK[[i]] <- list(tmp)
}
if(K[[i]]$Type == "BD"){
cont <- 0
tmp <- list()
for (j in 1:nsubK){
Ktemp <- K[[i]]$Kernel[(posi[j]:posf[j]), (posi[j]:posf[j])]
ei <- eig(Ktemp, tol)
if(length(ei[[1]]) != 0){
cont <- cont + 1
tmp[[cont]] <- list()
tmp[[cont]]$s <- ei[[1]]
tmp[[cont]]$U <- ei[[2]]
tmp[[cont]]$tU <- t(ei[[2]])
tmp[[cont]]$nr <- length(ei[[1]])
tmp[[cont]]$type <- "BD"
tmp[[cont]]$pos <- c(posi[j], posf[j])
}
}
if(length(tmp) > 1){
sK[[i]] <- tmp
}else{
sK[[i]] <- list(tmp[[1]])
}
}
}
return(sK)
}
# verbose part I
if(as.numeric(verbose) != 0){
cat("Setting parameters...", "\n", "\n")
}
### PART II Preparation for Gibbs sampler ######
# Identification of NA's values
y <- as.numeric(y)
yNA <- is.na(y)
whichNa <- which(yNA)
nNa <- length(whichNa)
mu <- mean(y, na.rm = TRUE)
n <- length(y)
if(nNa > 0){
y[whichNa] <- mu
}
# name of each kernel (important to following procedures)
if(is.null(names(K))){
names(K) <- paste("K", seq(length(K)), sep = "")
}
# initial values of fixed effects
if(!is.null(XF)) {
Bet <- solve(crossprod(XF), crossprod(XF, y))
nBet <- length(Bet)
tXX <- solve(crossprod(XF))
}
# Eigen descomposition for nk Kernels
nk <- length(K)
nr <- numeric(length(K))
typeM <- sapply(K, function(x) x$Type)
if(!all(typeM %in% c("BD", "D")))
stop("Matrix should be of types BD or D")
if(length(ne) == 1 & any(typeM == "BD"))
stop("Type BD should be used only for block diagonal matrices")
Ei <- setDEC(K = K, ne = ne, tol = tol)
# Initial values for Monte Carlo Markov Chains (MCMC)
nCum <- sum(seq(1, ite) %% thin == 0)
chain <- vector("list", length = 3)
names(chain) <- c("mu", "varE", "K")
chain$varE <- numeric(nCum)
chain$mu <- numeric(nCum)
chain$K <- vector("list", length = nk)
names(chain$K) <- names(K)
chain$K[seq(nk)] <- list(list(varU = numeric(nCum)))
cpred <- vector('list', length = nk)
names(cpred) <- names(K)
cpred[seq(nk)] <- list(U = matrix(NA_integer_, nrow = nCum, ncol = n))
nu <- 3
Sce <- (nu + 2) *(1-R2)*var(y, na.rm=TRUE)
Sc<-numeric(length(K))
for( i in 1:length(K)){
Sc[i]<-(nu+2)*R2*var(y,na.rm=T)/mean(diag(K[[i]]$Kernel))
}
tau <- 0.01
u <- list()
for (j in 1:nk) {
u[[j]] <- rnorm(n, 0, 1 / (2 * n))
}
sigsq <- var(y)
sigb <- rep(0.2, nk)
temp <- y - mu
if(!is.null(XF)){
B.mcmc <- matrix(0, nrow = nCum, ncol = nBet)
temp <- temp - XF %*% Bet
}
temp <- temp - Reduce('+', u)
nSel <- 0
i <- 1
### PART III Fitted model with training data ####
# Iterations of Gibbs sampler
while(i <= ite) {
time.init <- proc.time()[3]
# Conditional of mu
temp <- temp + mu
mu <- rnorm(1, mean(temp), sqrt(sigsq/n))
#mu.mcmc[i] <- mu
temp <- temp - mu
# Conditional of fixed effects
if(!is.null(XF)){
temp <- temp + XF %*% Bet
vari <- sigsq * tXX
media <- tXX %*% crossprod(XF, temp)
Bet <- rmvnor(nBet, media, vari)
temp <- temp - XF %*% Bet
}
# Conditionals x Kernel
for (j in 1:nk) {
# Sampling genetic effects
if(typeM[j] =="D"){
temp <- temp + u[[j]]
d <- crossprod(Ei[[j]][[1]]$U, temp)
s <- Ei[[j]][[1]]$s
deltav <- 1 / s
lambda <- sigb[j]
vari <- s * lambda / (1 + s * lambda * tau)
media <- tau * vari * d
nr <- Ei[[j]][[1]]$nr
b <- dcondb(nr, media, vari)
u[[j]] <-crossprod(Ei[[j]][[1]]$tU ,b)
temp <- temp - u[[j]]
}
if(typeM[j] =="BD"){
nsk <- length(Ei[[j]])
if(length(nsk > 1)){
temp <- temp + u[[j]]
d <- NULL
s <- NULL
neiv <- numeric(nsk)
pos <- matrix(NA, ncol = 2, nrow = nsk)
for(k in 1:nsk){
pos[k,] <- Ei[[j]][[k]]$pos
d <- c(d, crossprod(Ei[[j]][[k]]$U, temp[pos[k, 1]:pos[k, 2]]))
neiv[k] <- length(Ei[[j]][[k]]$s)
s <- c(s, Ei[[j]][[k]]$s)
}
deltav <- 1/s
lambda <- sigb[j]
vari <- s*lambda / (1+s*lambda*tau)
media <- tau*vari*d
nr <- length(s)
b <- dcondb(nr, media, vari)
posf <- cumsum(neiv)
posi <- cumsum(c(1, neiv[-length(neiv)]))
utmp <- numeric(n)
for(k in 1:nsk){
utmp[pos[k, 1]:pos[k, 2] ] <- crossprod(Ei[[j]][[k]]$tU, b[posi[k]:posf[k] ])
}
u[[j]] <- utmp
temp <- temp - u[[j]]
}else{
temp <- temp + u[[j]]
pos <- Ei[[j]]$pos
d <- crossprod(Ei[[j]][[1]]$U, temp[pos[1]:pos[2]])
s <- Ei[[j]][[1]]$s
deltav <- 1/s
lambda <- sigb[j]
vari <- s*lambda/(1+s*lambda*tau)
media <- tau*vari*d
nr <- Ei[[j]][[1]]$nr
b <- dcondb(nr, media, vari)
utmp <- numeric(n)
utmp[pos[1]:pos[2]] <- crossprod(Ei[[j]][[1]]$tU, b)
u[[j]] <- utmp
temp <- temp - u[[j]]
}
}
# Sampling scale hyperparameters and variance of genetic effects
sigb[j] <- dcondsigb(b, deltav, nr, nu, Sc[j])
}
# Sampling residual variance
res <- temp
sigsq <- dcondsigsq(res, n, nu, Sce)
tau <- 1 / sigsq
# Predicting missing values
if(nNa > 0){
uhat <- Reduce('+', u)
if(!is.null(XF)){
aux <- XF[yNA,] %*% Bet
}else{
aux <- 0
}
y[whichNa] <- aux + mu + uhat[whichNa] + rnorm(n = nNa, sd = sqrt(sigsq))
temp[whichNa] <- y[whichNa] - uhat[whichNa] - aux - mu
}
# Separating what is for the chain
if(i %% thin == 0){
nSel <- nSel + 1
chain$varE[nSel] <- sigsq
chain$mu[nSel] <- mu
if(!is.null(XF)){
B.mcmc[nSel,] <- Bet
}
for(j in 1:nk){
cpred[[j]][nSel,] <- u[[j]]
chain$K[[j]]$varU[nSel] <- sigb[j]
}
}
# Verbose
if(as.numeric(verbose) != 0 & i %% as.numeric(verbose) == 0){
time.end <- proc.time()[3]
cat("Iter: ", i, "time: ", round(time.end - time.init, 3),"\n")
}
i <- i + 1
}
###### PART IV Output ######
#Sampling
draw <- seq(ite)[seq(ite) %% thin == 0] > burn
mu.est <- mean(chain$mu[draw])
yHat <- mu.est
if (!is.null(XF)){
B <- colMeans(B.mcmc[draw,])
yHat <- yHat + XF %*% B
}
u.est <- sapply(cpred, FUN = function(x) colMeans(x[draw, ]) )
yHat <- yHat + rowSums(u.est)
out <- list()
out$yHat <- yHat
out$varE <- mean(chain$varE[draw])
out$varE.sd <- sd(chain$varE[draw])
out$K <- vector('list', length = nk)
names(out$K) <- names(cpred)
for(i in 1:nk){
out$K[[i]]$u <- colMeans(cpred[[i]][draw, ])
out$K[[i]]$u.sd <- apply(cpred[[i]][draw, ], MARGIN = 2, sd)
out$K[[i]]$varu <- mean(chain$K[[i]]$varU[draw])
out$K[[i]]$varu.sd <- sd(chain$K[[i]]$varU[draw])
}
out$chain <- chain
class(out) <- "BGGE"
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGGE/R/BGGE.R
|
#' Kernel matrix for GE genomic selection models
#'
#' Create kernel matrix for GE genomic prediction models
#'
#' @usage getK(Y, X, kernel = c("GK", "GB"), setKernel = NULL, bandwidth = 1,
#' model = c("SM", "MM", "MDs", "MDe"), quantil = 0.5,
#' intercept.random = FALSE)
#'
#' @param Y \code{data.frame} Phenotypic data with three columns. The first column is a factor for environments,
#' the second column is a factor identifying genotypes, and the third column contains the trait of interest
#' @param X Marker matrix with individuals in rows and markers in columns. Missing markers are not allowed.
#' @param kernel Kernel to be created internally. Methods currently implemented are the Gaussian \code{GK} and the linear \code{GBLUP} kernel
#' @param setKernel \code{matrix} Single kernel matrix in case it is necessary to use a different kernel from \code{GK} or \code{GBLUP}
#' @param bandwidth \code{vector} Bandwidth parameter to create the Gaussian Kernel (GK) matrix. The default for the \code{bandwidth} is 1.
#' Estimation of this parameter can be made using a Bayesian approach as presented in Perez-Elizalde et al. (2015)
#' @param model Specifies the genotype \eqn{\times} environment model to be fitted. It currently supported the
#' models \code{SM}, \code{MM}, \code{MDs} and \code{MDe}. See Details
#' @param quantil Specifies the quantile to create the Gaussian kernel.
#' @param intercept.random if \code{TRUE}, kernel related to random intercept of genotype is included.
#'
#' @details
#' The aim is to create kernels to fit GE interaction models applied to genomic prediction.
#' Two standard genomic kernels are currently supported:
#' \code{GB} creates a linear kernel resulted from the cross-product of centered and standardized
#' marker genotypes divide by the number of markers \eqn{p}:
#' \deqn{GB = \frac{XX^T}{p}}
#' Another alternative is the Gaussian Kernel \code{GK}, resulted from:
#' \deqn{ GK (x_i, x_{i'}) = exp(\frac{-h d_{ii'}^2}{q(d)})}
#' where \eqn{d_{ii'}^2} is the genetic distance between individuals based on markers scaled
#' by some percentile \eqn{{q(d)}} and \eqn{bandwidth} is the bandwidth parameter. However,
#' other kernels can be provided through \code{setKernel}. In this case, arguments \code{X},
#' \code{kernel} and \code{h} are ignored.
#'
#' Currently, the supported models for GE kernels are:
#' \itemize{
#' \item \code{SM}: is the single-environment main genotypic effect model - It fits the data for a
#' single environment, and only one kernel is produced.
#' \item \code{MM}: is the multi-environment main genotypic effect model - It consideres the main
#' random genetic effects across environments. Thus, just one kernel is produced, of order
#' \eqn{n \times n}, related to the main effect across environments.
#' \item \code{MDs}: is the multi-environment single variance genotype x environment deviation
#' model - It is an extension of \code{MM} by adding the random interaction effect of
#' environments with genotype information. Thus, two kernels are created, one related to the
#' main effect across environment, and the second is associated with single genotype by environment effect.
#' \item \code{MDe}: is the multi-environment, environment-specific variance genotype x environment
#' deviation model - It separates the genetic effects into the main genetic
#' effects and the specific genetic effects (for each environment). Thus, one kernel
#' for across environments effect and \eqn{j} kernels are created, one for each
#' environment.
#' }
#' These GE genomic models were compared and named by Sousa et al. (2017) and can be increased by using
#' the kernel related to random intercept of genotype through \code{intercept.random}.
#'
#' @return
#' This function returns a two-level list, which specifies the kernel and the type of matrix.
#' The latter is a classification according to its structure, i. e.,
#' if the matrix is dense or a block diagonal. For the main effect (\code{G}),
#' the matrix is classified as dense (D). On the other hand, matrices for environment-specific and
#' genotype by environment effect (\code{GE}) are considered diagonal block (BD). This classification is used
#' as part of the prediction through the BGGE function.
#'
#' @references
#' Jarquin, D., J. Crossa, X. Lacaze, P. Du Cheyron, J. Daucourt, J. Lorgeou, F. Piraux, L. Guerreiro, P. Pérez, M. Calus, J. Burgueño,
#' and G. de los Campos. 2014. A reaction norm model for genomic selection using high-dimensional genomic and
#' environmental data. Theor. Appl. Genet. 127(3): 595-607.
#'
#' Lopez-Cruz, M., J. Crossa, D. Bonnett, S. Dreisigacker, J. Poland, J.-L. Jannink, R.P. Singh, E. Autrique,
#' and G. de los Campos. 2015. Increased prediction accuracy in wheat breeding trials using a marker × environment
#' interaction genomic selection model. G3: Genes, Genomes, Genetics. 5(4): 569-82.
#'
#' Perez- Elizalde, S. J. Cuevas, P. Perez-Rodriguez, and J. Crossa. 2015. Selection of the
#' Bandwidth Parameter in a Bayesian Kernel Regression Model for Genomic-Enabled Prediction.
#' Journal of Agricultural, Biological, and Environmental Statistics (JABES), 20(4):512-532.
#'
#' Sousa, M. B., Cuevas, J., Oliveira, E. G. C., Perez-Rodriguez, P., Jarquin, D., Fritsche-Neto, R., Burgueno, J.
#' & Crossa, J. (2017). Genomic-enabled prediction in maize using kernel models with genotype x environment interaction.
#' G3: Genes, Genomes, Genetics, 7(6), 1995-2014.
#'
#' @examples
#' # create kernel matrix for model MDs using wheat dataset
#' library(BGLR)
#'
#' data(wheat)
#' X <- scale(wheat.X, scale = TRUE, center = TRUE)
#' rownames(X) <- 1:599
#' pheno_geno <- data.frame(env = gl(n = 4, k = 599),
#' GID = gl(n=599, k=1, length = 599*4),
#' value = as.vector(wheat.Y))
#'
#' K <- getK(Y = pheno_geno, X = X, kernel = "GB", model = "MDs")
#'
#'
#'
#' @export
getK <- function(Y, X, kernel = c("GK", "GB"), setKernel = NULL, bandwidth = 1, model = c("SM", "MM", "MDs", "MDe"), quantil = 0.5,
intercept.random = FALSE)
{
#Force to data.frame
Y <- as.data.frame(Y)
Y[colnames(Y)[1:2]] <- lapply(Y[colnames(Y)[1:2]], factor)
subjects <- levels(Y[,2])
env <- levels(Y[,1])
nEnv <- length(env)
# check for repeated genotypes
if(any(table(Y[,c(1:2)]) > 1))
warning("There are repeated genotypes in some environment. They were kept")
switch(model,
'SM' = {
if (nEnv > 1)
stop("Single model choosen, but more than one environment is in the phenotype file")
Zg <- model.matrix(~factor(Y[,2L]) - 1)
},
'Cov' = {
Zg <- model.matrix(~factor(subjects) - 1)
},{
Ze <- model.matrix(~factor(Y[,1L]) - 1)
Zg <- model.matrix(~factor(Y[,2L]) - 1)
})
if(is.null(setKernel)){
if(is.null(rownames(X)))
stop("Genotype names are missing")
if (!all(subjects %in% rownames(X)))
stop("Not all genotypes presents in the phenotypic file are in marker matrix")
X <- X[subjects,]
switch(kernel,
'GB' = {
# case 'G-BLUP'...
ker.tmp <- tcrossprod(X) / ncol(X)
#G <- list(Zg %*% tcrossprod(ker.tmp, Zg))
G <- list(list(Kernel = Zg %*% tcrossprod(ker.tmp, Zg), Type = "D"))
},
'GK' = {
# case 'GK'...
D <- (as.matrix(dist(X))) ^ 2
G <- list()
for(i in 1:length(bandwidth)){
ker.tmp <- exp(-bandwidth[i] * D / quantile(D, quantil))
#G[[i]] <- Zg %*% tcrossprod(ker.tmp, Zg)
G[[i]] <- list(Kernel = Zg %*% tcrossprod(ker.tmp, Zg), Type = "D")
}
},
{
stop("kernel selected is not available. Please choose one method available or make available other kernel through argument K")
})
names(G) <- seq(length(G))
}else{
## check kernels
nullNames <- sapply(setKernel, function(x) any(sapply(dimnames(x), is.null)))
if(any(nullNames))
stop("Genotype names are missing in some kernel")
# Condition to check if all genotype names are compatible
equalNames <- sapply(setKernel, function(x) mapply(function(z, y) all(z %in% y), z=list(subjects), y=dimnames(x)) )
if(!all(equalNames))
stop("Not all genotypes presents in phenotypic file are in the kernel matrix.
Please check dimnames")
K <- lapply(setKernel, function(x) x[subjects, subjects]) # reordering kernel
ker.tmp <- K
#G <- list(Zg %*% tcrossprod(ker.tmp, Zg))
G <- lapply(ker.tmp, function(x) list(Kernel = Zg %*% tcrossprod(x, Zg), Type = "D") )
# Setting names
if(is.null(names(K))){
names(G) <- seq(length(G))
}else{
names(G) <- names(setKernel)
}
}
tmp.names <- names(G)
names(G) <- if(length(G) > 1) paste("G", tmp.names, sep ="_") else "G"
switch(model,
'SM' = {
out <- G
},
'MM' = {
out <- G
},
'MDs' = {
E <- tcrossprod(Ze)
#GE <- Map('*', G, list(E))
GE <- lapply(G, function(x) list(Kernel = x$Kernel * E, Type = "BD"))
names(GE) <- if(length(G) > 1) paste("GE", tmp.names, sep ="_") else "GE"
out <- c(G, GE)
},
'MDe' = {
ZEE <- matrix(data = 0, nrow = nrow(Ze), ncol = ncol(Ze))
out.tmp <- list()
for(j in 1:length(G)){
out.tmp <- c(out.tmp, lapply(1:nEnv, function(i){
ZEE[,i] <- Ze[,i]
ZEEZ <- ZEE %*% t(Ze)
#K3 <- G[[j]] * ZEEZ
K3 <- list(Kernel = G[[j]]$Kernel * ZEEZ, Type = "BD")
return(K3)
}))
}
if(length(G) > 1){
names(out.tmp) <- paste(rep(env, length(G)), rep(tmp.names, each = nEnv), sep = "_" )
}else{
names(out.tmp) <- env
}
out <- c(G, out.tmp)
}, #DEFAULT CASE
{
stop("Model selected is not available ")
})
if(intercept.random){
Gi <- list(Kernel = Zg %*% tcrossprod(diag(length(subjects)), Zg), Type = "D")
out <- c(out, list(Gi = Gi))
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGGE/R/getK.R
|
#' BGGM: Bayesian Gaussian Graphical Models
#'
#'
#' @description The \code{R} package \strong{BGGM} provides tools for making Bayesian inference in
#' Gaussian graphical models (GGM). The methods are organized around two general approaches for
#' Bayesian inference: (1) estimation \insertCite{Williams2019}{BGGM} and (2) hypothesis testing
#' \insertCite{Williams2019_bf}{BGGM}. The key distinction is that the former focuses on either the
#' posterior or posterior predictive distribution, whereas the latter focuses on model comparison
#' with the Bayes factor.
#'
#' The methods in \strong{BGGM} build upon existing algorithms that are well-known in the literature.
#' The central contribution of \strong{BGGM} is to extend those approaches:
#'
#' \enumerate{
#'
#' \item Bayesian estimation with the novel matrix-F prior distribution \insertCite{Mulder2018}{BGGM}.
#'
#' \itemize{
#'
#' \item Estimation \code{\link{estimate}}.
#'
#' }
#'
#'
#' \item Bayesian hypothesis testing with the novel matrix-F prior distribution \insertCite{Mulder2018}{BGGM}.
#'
#' \itemize{
#'
#' \item Exploratory hypothesis testing \code{\link{explore}}.
#'
#' \item Confirmatory hypothesis testing \code{\link{confirm}}.
#'
#' }
#'
#' \item Comparing GGMs \insertCite{williams2020comparing}{BGGM}
#'
#' \itemize{
#'
#' \item Partial correlation differences \code{\link{ggm_compare_estimate}}.
#'
#' \item Posterior predictive check \code{\link{ggm_compare_ppc}}.
#'
#' \item Exploratory hypothesis testing \code{\link{ggm_compare_explore}}.
#'
#' \item Confirmatory hypothesis testing \code{\link{ggm_compare_confirm}}.
#'
#'
#' }
#'
#'
#' \item Extending inference beyond the conditional (in)dependence structure
#'
#' \itemize{
#'
#' \item Predictability with Bayesian variance explained \insertCite{gelman_r2_2019}{BGGM}
#' \code{\link{predictability}}.
#'
#' \item Posterior uncertainty in the partial correlations \code{\link{estimate}}.
#'
#' \item Custom Network Statistics \code{\link{roll_your_own}}.
#'
#'
#' }
#'
#' }
#'
#' Furthermore, the computationally intensive tasks are written in \code{c++} via the \code{R}
#' package \strong{Rcpp} \insertCite{eddelbuettel2011rcpp}{BGGM} and the \code{c++}
#' library \strong{Armadillo} \insertCite{sanderson2016armadillo}{BGGM}, there are plotting functions
#' for each method, control variables can be included in the model, and there is support for
#' missing values \code{\link{bggm_missing}}.
#'
#' \bold{Supported Data Types}:
#'
#' \itemize{
#'
#' \item Continuous: The continuous method was described \insertCite{@in @Williams2019_bf;textual}{BGGM}.
#'
#' \item Binary: The binary method builds directly upon \insertCite{@in @talhouk2012efficient;textual}{BGGM},
#' that, in turn, built upon the approaches of \insertCite{lawrence2008bayesian;textual}{BGGM} and
#' \insertCite{webb2008bayesian;textual}{BGGM} (to name a few).
#'
#' \item Ordinal: Ordinal data requires sampling thresholds. There are two approach included in \bold{BGGM}: (1)
#' the customary approach described in \insertCite{@in @albert1993bayesian;textual}{BGGM} (the default) and
#' the 'Cowles' algorithm described in \insertCite{@in @cowles1996accelerating;textual}{BGGM}.
#'
#'
#' \item Mixed: The mixed data (a combination of discrete and continuous) method was introduced
#' \insertCite{@in @hoff2007extending;textual}{BGGM}. This is a semi-parametric copula model
#' (i.e., a copula GGM) based on the ranked likelihood. Note that this can be used for data
#' consisting entirely of ordinal data.
#'
#' }
#'
#' \bold{Additional Features}:
#'
#' The primary focus of \code{BGGM} is Gaussian graphical modeling (the inverse covariance matrix).
#' The residue is a suite of useful methods not explicitly for GGMs:
#'
#' \enumerate{
#'
#' \item Bivariate correlations for binary (tetrachoric), ordinal (polychoric), mixed (rank based),
#' and continuous (Pearson's) data \code{\link{zero_order_cors}}.
#'
#' \item Multivariate regression for binary (probit), ordinal (probit),
#' mixed (rank likelihood), and continous data (\code{\link{estimate}}).
#'
#' \item Multiple regression for binary (probit), ordinal (probit),
#' mixed (rank likelihood), and continuous data (e.g., \code{\link{coef.estimate}}).
#' }
#'
#' \strong{Note on Conditional (In)dependence Models for Latent Data}:
#'
#' All of the data types (besides continuous) model latent data. That is, unobserved
#' (latent) data is assumed to be Gaussian. For example, a tetrachoric correlation
#' (binary data) is a special case of a polychoric correlation (ordinal data).
#' Both capture relations between "theorized normally distributed continuous
#' \strong{latent} variables" (\href{https://en.wikipedia.org/wiki/Polychoric_correlation}{Wikipedia}).
#' In both instances, the corresponding partial correlation between observed variables is conditioned
#' on the remaining variables in the \emph{latent} space. This implies that interpretation
#' is similar to continuous data, but with respect to latent variables. We refer interested users
#' to \insertCite{@page 2364, section 2.2, in @webb2008bayesian;textual}{BGGM}.
#'
#'
#' \strong{High Dimensional Data?}
#'
#' \strong{BGGM} was built specifically for social-behavioral scientists. Of course,
#' the methods can be used by all researchers. However, there is currently \emph{not} support
#' for high-dimensional data (i.e., more variables than observations) that are common
#' place in the genetics literature. These data are rare in the social-behavioral sciences.
#' In the future, support for high-dimensional data may be added to \strong{BGGM}.
#'
#' @references
#' \insertAllCited{}
#'
#'
#' @keywords internal
#' @aliases _PACKAGE
#'
#' @name BGGM-package
#'
#' @import Rcpp
#' @importFrom Rcpp sourceCpp
#' @useDynLib BGGM, .registration = TRUE
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/BGGM-package.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
mean_array <- function(x) {
.Call(`_BGGM_mean_array`, x)
}
quantile_type_1 <- function(x, prob) {
.Call(`_BGGM_quantile_type_1`, x, prob)
}
Sigma_i_not_i <- function(x, index) {
.Call(`_BGGM_Sigma_i_not_i`, x, index)
}
select_col <- function(x, index) {
.Call(`_BGGM_select_col`, x, index)
}
select_row <- function(x, index) {
.Call(`_BGGM_select_row`, x, index)
}
remove_row <- function(x, which) {
.Call(`_BGGM_remove_row`, x, which)
}
remove_col <- function(x, index) {
.Call(`_BGGM_remove_col`, x, index)
}
internal_missing_gaussian <- function(Y, Y_missing, Sigma, iter_missing) {
.Call(`_BGGM_internal_missing_gaussian`, Y, Y_missing, Sigma, iter_missing)
}
missing_gaussian <- function(Y, Y_missing, Sigma, iter_missing, progress_impute, store_all, lambda) {
.Call(`_BGGM_missing_gaussian`, Y, Y_missing, Sigma, iter_missing, progress_impute, store_all, lambda)
}
Theta_continuous <- function(Y, iter, delta, epsilon, prior_only, explore, start, progress, impute, Y_missing) {
.Call(`_BGGM_Theta_continuous`, Y, iter, delta, epsilon, prior_only, explore, start, progress, impute, Y_missing)
}
sample_prior <- function(Y, iter, delta, epsilon, prior_only, explore, progress) {
.Call(`_BGGM_sample_prior`, Y, iter, delta, epsilon, prior_only, explore, progress)
}
mv_continuous <- function(Y, X, delta, epsilon, iter, start, progress) {
.Call(`_BGGM_mv_continuous`, Y, X, delta, epsilon, iter, start, progress)
}
trunc_mvn <- function(mu, rinv, z, y, cutpoints) {
.Call(`_BGGM_trunc_mvn`, mu, rinv, z, y, cutpoints)
}
mv_binary <- function(Y, X, delta, epsilon, iter, beta_prior, cutpoints, start, progress) {
.Call(`_BGGM_mv_binary`, Y, X, delta, epsilon, iter, beta_prior, cutpoints, start, progress)
}
mv_ordinal_cowles <- function(Y, X, delta, epsilon, iter, MH) {
.Call(`_BGGM_mv_ordinal_cowles`, Y, X, delta, epsilon, iter, MH)
}
mv_ordinal_albert <- function(Y, X, iter, delta, epsilon, K, start, progress) {
.Call(`_BGGM_mv_ordinal_albert`, Y, X, iter, delta, epsilon, K, start, progress)
}
copula <- function(z0_start, levels, K, Sigma_start, iter, delta, epsilon, idx, progress) {
.Call(`_BGGM_copula`, z0_start, levels, K, Sigma_start, iter, delta, epsilon, idx, progress)
}
pcor_to_cor_internal <- function(x, p) {
.Call(`_BGGM_pcor_to_cor_internal`, x, p)
}
predictability_helper <- function(Y, y, XX, Xy, n, iter) {
.Call(`_BGGM_predictability_helper`, Y, y, XX, Xy, n, iter)
}
beta_helper_fast <- function(XX, Xy, p, iter) {
.Call(`_BGGM_beta_helper_fast`, XX, Xy, p, iter)
}
pred_helper_latent <- function(Y, XX, Xy, quantiles, n, iter) {
.Call(`_BGGM_pred_helper_latent`, Y, XX, Xy, quantiles, n, iter)
}
KL_univariate <- function(var_1, var_2) {
.Call(`_BGGM_KL_univariate`, var_1, var_2)
}
ppc_helper_nodewise_fast <- function(Theta, n1, n2, p) {
.Call(`_BGGM_ppc_helper_nodewise_fast`, Theta, n1, n2, p)
}
KL_divergnece_mvn <- function(Theta_1, Theta_2) {
.Call(`_BGGM_KL_divergnece_mvn`, Theta_1, Theta_2)
}
sum_squares <- function(Rinv_1, Rinv_2) {
.Call(`_BGGM_sum_squares`, Rinv_1, Rinv_2)
}
my_dnorm <- function(x, means, sds) {
.Call(`_BGGM_my_dnorm`, x, means, sds)
}
hamming_distance <- function(Rinv_1, Rinv_2, df1, df2, dens, pcors, BF_cut) {
.Call(`_BGGM_hamming_distance`, Rinv_1, Rinv_2, df1, df2, dens, pcors, BF_cut)
}
correlation <- function(Rinv_1, Rinv_2) {
.Call(`_BGGM_correlation`, Rinv_1, Rinv_2)
}
ppc_helper_fast <- function(Theta, n1, n2, p, BF_cut, dens, ppc_ss, ppc_cors, ppc_hd) {
.Call(`_BGGM_ppc_helper_fast`, Theta, n1, n2, p, BF_cut, dens, ppc_ss, ppc_cors, ppc_hd)
}
mvnrnd <- function(n, mu, Sigma) {
.Call(`_BGGM_mvnrnd`, n, mu, Sigma)
}
var <- function(Y, X, delta, epsilon, beta_prior, iter, start, progress) {
.Call(`_BGGM_var`, Y, X, delta, epsilon, beta_prior, iter, start, progress)
}
hft_algorithm <- function(Sigma, adj, tol, max_iter) {
.Call(`_BGGM_hft_algorithm`, Sigma, adj, tol, max_iter)
}
bic_fast <- function(Theta, S, n, prior_prob) {
.Call(`_BGGM_bic_fast`, Theta, S, n, prior_prob)
}
find_ids <- function(x) {
.Call(`_BGGM_find_ids`, x)
}
search <- function(S, iter, old_bic, start_adj, n, gamma, stop_early, progress) {
.Call(`_BGGM_search`, S, iter, old_bic, start_adj, n, gamma, stop_early, progress)
}
fast_g_matrix_F <- function(Y, adj, mu_samples, cov_samples, iter, p, N, prior_sd, kappa1, progress) {
.Call(`_BGGM_fast_g_matrix_F`, Y, adj, mu_samples, cov_samples, iter, p, N, prior_sd, kappa1, progress)
}
contrained_helper <- function(cors, adj, iter, progress) {
.Call(`_BGGM_contrained_helper`, cors, adj, iter, progress)
}
missing_copula <- function(Y, Y_missing, z0_start, Sigma_start, levels, iter_missing, progress_impute, K, idx, epsilon, delta) {
.Call(`_BGGM_missing_copula`, Y, Y_missing, z0_start, Sigma_start, levels, iter_missing, progress_impute, K, idx, epsilon, delta)
}
missing_copula_data <- function(Y, Y_missing, z0_start, Sigma_start, levels, iter_missing, progress_impute, K, idx, lambda) {
.Call(`_BGGM_missing_copula_data`, Y, Y_missing, z0_start, Sigma_start, levels, iter_missing, progress_impute, K, idx, lambda)
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/RcppExports.R
|
#' GGM: Missing Data
#'
#' Estimation and exploratory hypothesis testing with missing data.
#'
#' @param x An object of class \code{mid} \code{\link[mice]{mice}}.
#'
#' @param method Character string. Which method should be used (default set to \code{estimate})? The current
#' options are \code{"estimate"} and \code{"explore"}.
#'
#' @param iter Number of iterations for each imputed dataset (posterior samples; defaults to 2000).
#'
#' @param ... Additional arguments passed to either
#' \code{\link{estimate}} or \code{\link{explore}}.
#'
#' @return An object of class \code{estimate} or \code{explore}.
#' @export
#'
#' @note Currently, \strong{BGGM} is compatible with the package \code{\link[mice]{mice}} for handling
#' the missing data. This is accomplished by fitting a model for each imputed dataset
#' (i.e., more than one to account for uncertainty in the imputation step) and then pooling
#' the estimates.
#'
#' In a future version, an additional option will be added that allows for
#' imputing the missing values during model fitting. This option will be incorporated directly into
#' the \code{\link{estimate}} or \code{\link{explore}} functions, such that \code{bggm_missing} will
#' always support missing data with \code{\link[mice]{mice}}.
#'
#'
#' \strong{Support}:
#'
#' There is limited support for missing data. As of version \code{2.0.0}, it is possible to
#' determine the graphical structure with either \code{\link{estimate}} or \code{\link{explore}}, in addition
#' to plotting the graph with \code{\link{plot.select}}. All data types \emph{are} currently supported.
#'
#' \strong{Memory Warning}:
#' A model is fitted for each imputed dataset. This results in a potentially large object.
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' # need this package
#' library(mice, warn.conflicts = FALSE)
#'
#' # data
#' Y <- ptsd[,1:5]
#'
#' # matrix for indices
#' mat <- matrix(0, nrow = 221, ncol = 5)
#'
#' # indices
#' indices <- which(mat == 0, arr.ind = TRUE)
#'
#' # 50 NAs
#' Y[indices[sample(1:nrow(indices), 50),]] <- NA
#'
#' # impute
#' x <- mice(Y, m = 5, print = FALSE)
#'
#' #########################
#' ####### copula #####
#' #########################
#' # rank based parital correlations
#'
#' # estimate the model
#' fit_est <- bggm_missing(x,
#' method = "estimate",
#' type = "mixed",
#' iter = 250,
#' progress = FALSE)
#'
#' # select edge set
#' E <- select(fit_est)
#'
#' # plot E
#' plt_E <- plot(E)$plt
#'
#' plt_E
#'}
bggm_missing <- function(x, iter = 2000,
method = "estimate", ...){
# check for mice
if(!requireNamespace("mice", quietly = TRUE)) {
stop("Please install the '", "mice", "' package.")
}
# check for abind
if(!requireNamespace("abind", quietly = TRUE)) {
stop("Please install the '", "abind", "' package.")
}
# combine data in long format
data_sets <- mice::complete(x, action = "long")
# number of data sets
n_data_sets <- length(unique(data_sets$.imp))
# remove row id
Y <- data_sets[,-c(2)]
if(method == "explore"){
# fit the models
fits <- lapply(1:n_data_sets, function(x) explore(as.matrix(subset(Y, .imp == x)[,-1]),
iter = iter,
impute = FALSE, ...))
# iterations
iter <- fits[[1]]$iter
# partial correlations
post_start_pcors <- fits[[1]]$post_samp$pcors
# fisher z
post_start_fisher <- fits[[1]]$post_samp$fisher_z
# prior fisher z
prior_start_fisher <- fits[[1]]$prior_samp$fisher_z
# regression (for multivariate)
if(!is.null( fits[[1]]$formula)){
post_start_beta <- fits[[1]]$post_samp$beta
}
# combinate the imputations
samps <- for(i in 2:n_data_sets) {
post_start_pcors <- abind::abind(post_start_pcors ,
fits[[i]]$post_samp$pcors[,,])
post_start_fisher <- abind::abind(post_start_fisher,
fits[[i]]$post_samp$fisher_z[,,])
prior_start_fisher <- abind::abind(prior_start_fisher,
fits[[i]]$prior_samp$fisher_z[,,])
# multivarate
if(!is.null(fits[[1]]$formula)){
post_start_beta <- abind::abind(post_start_beta,
fits[[i]]$post_samp$beta[,,])
}
}
# dimensions
dims <- dim(post_start_pcors)
# replace samples
fits[[1]]$post_samp$pcors <- post_start_pcors[,,]
fits[[1]]$post_samp$fisher_z <- post_start_fisher[,,]
fits[[1]]$prior_samp$fisher_z <- prior_start_fisher[,,]
if(!is.null( fits[[1]]$formula)){
fits[[1]]$post_samp$beta <- post_start_beta
}
}
# estimate models
if(method == "estimate"){
# fit the models
fits <- lapply(1:n_data_sets, function(x) estimate(as.matrix(subset(Y, .imp == x)[,-1]),
iter = iter,
impute = FALSE, ...))
iter <- fits[[1]]$iter
post_start_pcors <- fits[[1]]$post_samp$pcors[,,]
post_start_fisher <- fits[[1]]$post_samp$fisher_z[,,]
if(!is.null( fits[[1]]$formula)){
post_start_beta <- fits[[1]]$post_samp$beta
}
samps <- for(i in 2:n_data_sets) {
post_start_pcors <- abind::abind(post_start_pcors,
fits[[i]]$post_samp$pcors[,,])
post_start_fisher <- abind::abind(post_start_fisher,
fits[[i]]$post_samp$fisher_z[,,])
if(!is.null( fits[[1]]$formula)){
post_start_beta <- abind::abind(post_start_beta,
fits[[i]]$post_samp$beta[,,])
}
}
dims <- dim(post_start_pcors)
fits[[1]]$post_samp$pcors <- post_start_pcors
fits[[1]]$post_samp$fisher_z <- post_start_fisher
if(!is.null( fits[[1]]$formula)){
fits[[1]]$post_samp$beta <- post_start_beta
}
}
fit <- fits[[1]]
# total iterations + warmup
fit$iter <- (iter * n_data_sets) + 50
# model
fit
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/bggm_missing.R
|
bma_posterior <- function(object,
param = "pcor",
iter = 5000,
progress = TRUE){
if(!is(object, "ggm_search")){
stop("class not supported. Must be 'ggm_search'")
}
n <- object$n
p <- ncol(object$adj)
I_p <- diag(p)
scatter <- object$S * (n - 1)
approx_marg_ll <- object$approx_marg_ll
graphs <- object$adj_path[,,which(duplicated(approx_marg_ll) == 0)]
graphs <- graphs[,,-1]
graphs_n <- dim(graphs)[3]
probs <- object$probs
if(isTRUE(progress)){
pb <- utils::txtProgressBar(min = 0, max = iter, style = 3)
}
samples <- vapply(1:iter, function(s){
graph_s <- sample(1:graphs_n, 1,replace = FALSE, probs)
Sigma <- solve(rWishart(1, n + p - 1, solve(scatter + I_p * p))[,,1])
Theta <- hft_algorithm(Sigma = Sigma,
graphs[, , graph_s],
tol = 0.0001,
max_iter = 10)$Theta
if(isTRUE(progress)){
utils::setTxtProgressBar(pb, s)
}
if(param == "pcor"){
-cov2cor( Theta ) + diag(2, p)
} else {
Theta
}
}, FUN.VALUE = matrix(0, p, p))
bma_mean <- apply(samples, 1:2, mean)
if( is.null( colnames(scatter) ) ){
colnames(bma_mean) <- 1:p
row.names(bma_mean ) <- 1:p
} else {
colnames(bma_mean) <- colnames(scatter)
row.names(bma_mean) <- colnames(scatter)
}
returned_object <- list(bma_mean = bma_mean,
samples = samples)
class(returned_object) <- c("BGGM",
"bma_posterior")
return(returned_object)
}
print_bma <- function(x,...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Bayesian Model Averaged Graph:\n\n")
print(round(x$bma_mean, 3))
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/bma_posterior.R
|
#' Compute Regression Parameters for \code{estimate} Objects
#'
#' There is a direct correspondence between the inverse covariance matrix and
#' multiple regression \insertCite{kwan2014regression,Stephens1998}{BGGM}. This readily allows
#' for converting the GGM parameters to regression coefficients. All data types are supported.
#'
#'
#' @name coef.estimate
#'
#' @param object An Object of class \code{estimate}
#'
#' @param iter Number of iterations (posterior samples; defaults to the number in the object).
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param ... Currently ignored.
#'
#' @references
#' \insertAllCited{}
#'
#' @return An object of class \code{coef}, containting two lists.
#'
#'
#' \itemize{
#'
#' \item \code{betas} A list of length \emph{p}, each containing a \emph{p} - 1 by \code{iter} matrix of
#' posterior samples
#'
#' \item \code{object} An object of class \code{estimate} (the fitted model).
#' }
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' #########################
#' ### example 1: binary ###
#' #########################
#' # data
#' Y <- women_math[1:500, ]
#'
#' # fit model
#' fit <- estimate(Y, type = "binary",
#' iter = 250,
#' progress = FALSE)
#'
#' # summarize the partial correlations
#' reg <- coef(fit, progress = FALSE)
#'
#' # summary
#' summ <- summary(reg)
#'
#' summ
#'}
#' @export
coef.estimate <- function(object,
iter = NULL,
progress = TRUE,...) {
# check for object class
if(is(object, "estimate") | is(object, "explore")){
# check for default
if(!is(object, "default")){
stop(paste0("\nclass not supported. must be an object\n",
"from either the 'explore' or 'estimate' functions"))
}
# nodes
p <- object$p
# all posterior samples
if(is.null(iter)){
iter <- object$iter
}
# pcor to cor
cors <- pcor_to_cor(object, iter = iter)$R
# betas
if(isTRUE(progress)){
pb <- utils::txtProgressBar(min = 0, max = p, style = 3)
}
betas <- lapply(1:p, function(x) {
beta_p <- .Call("_BGGM_beta_helper_fast",
XX = cors[-x, -x,],
Xy = cors[x, -x,],
p = p - 1,
iter = iter
)$coefs
if(isTRUE(progress)){
utils::setTxtProgressBar(pb, x)
}
beta_p
})
} else {
stop("class not currently supported")
}
# remove samples so
# object does not become huge
object$post_samp <- 0
returned_object <- list(betas = betas, object = object)
class(returned_object) <- c("BGGM", "coef")
returned_object
}
#' Compute Regression Parameters for \code{explore} Objects
#'
#' There is a direct correspondence between the inverse covariance matrix and
#' multiple regression \insertCite{kwan2014regression,Stephens1998}{BGGM}. This readily allows
#' for converting the GGM parameters to regression coefficients. All data types are supported.
#'
#' @name coef.explore
#'
#' @param object An Object of class \code{explore}.
#'
#' @param iter Number of iterations (posterior samples; defaults to the number in the object).
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param ... Currently ignored.
#'
#' @references
#' \insertAllCited{}
#'
#' @return An object of class \code{coef}, containting two lists.
#'
#' \itemize{
#'
#' \item \code{betas} A list of length \emph{p}, each containing a \emph{p} - 1 by \code{iter} matrix of
#' posterior samples
#'
#' \item \code{object} An object of class \code{explore} (the fitted model).
#' }
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' # data
#' Y <- ptsd[,1:4]
#'
#' ##########################
#' ### example 1: ordinal ###
#' ##########################
#'
#' # fit model (note + 1, due to zeros)
#' fit <- explore(Y + 1,
#' type = "ordinal",
#' iter = 250,
#' progress = FALSE)
#'
#' # summarize the partial correlations
#' reg <- coef(fit, progress = FALSE)
#'
#' # summary
#' summ <- summary(reg)
#'
#' summ
#'}
#' @export
coef.explore <- function(object,
iter = NULL,
progress = TRUE, ...) {
# check for object class
if(is(object, "estimate") | is(object, "explore")){
# check for default
if(!is(object, "default")){
stop(paste0("\nclass not supported. must be an object\n",
"from either the 'explore' or 'estimate' functions"))
}
# nodes
p <- object$p
# all posterior samples
if(is.null(iter)){
iter <- object$iter
}
# pcor to cor
cors <- pcor_to_cor(object, iter = iter)$R
if(isTRUE(progress)){
pb <- utils::txtProgressBar(min = 0, max = p, style = 3)
}
betas <- lapply(1:p, function(x) {
beta_p <- .Call("_BGGM_beta_helper_fast",
XX = cors[-x, -x,],
Xy = cors[x, -x,],
p = p - 1,
iter = iter
)$coefs
if(isTRUE(progress)){
utils::setTxtProgressBar(pb, x)
}
beta_p
})
} else {
stop("class not currently supported")
}
# remove samples so
# object does not become huge
object$post_samp <- 0
returned_object <- list(betas = betas,
object = object)
class(returned_object) <- c("BGGM", "coef")
returned_object
}
print_coef <- function(x,...){
# nodes
p <- length(x$betas)
# column names
cn <- colnames(x$object$Y)
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Type:", x$object$type, "\n")
cat("Formula:", paste(as.character(x$object$formula),
collapse = " "), "\n")
cat("--- \n")
cat("Call: \n")
print(x$object$call)
cat("--- \n")
cat("Coefficients: \n \n")
if(is.null(cn)){
cn <- 1:p
}
for (i in seq_len(p)) {
# print outcome
cat(paste0(cn[i], ": \n"))
# coefs for node i
coef_i <- data.frame(t(round(colMeans(x$betas[[i]]), 3)))
# predictor names
colnames(coef_i) <- cn[-i]
# print coefs
print(coef_i, row.names = FALSE)
cat("\n")
}
}
#' Summarize \code{coef} Objects
#'
#' Summarize regression parameters with the posterior mean,
#' standard deviation, and credible interval.
#'
#' @param object An object of class \code{coef}.
#'
#' @param cred Numeric. The credible interval width for summarizing the posterior
#' distributions (defaults to 0.95; must be between 0 and 1).
#'
#' @param ... Currently ignored
#'
#' @return A list of length \emph{p} including the
#' summaries for each multiple regression.
#'
#' @note
#'
#' See \code{\link{coef.estimate}} and \code{\link{coef.explore}} for examples.
#'
#' @export
summary.coef <- function(object,
cred = 0.95,
...){
lb <- (1 - cred) / 2
ub <- 1 - lb
p <- object$object$p
post_mean <- t(sapply(1:p, function(x) apply(object$betas[[x]], MARGIN = 2, mean )))
post_sd <- t(sapply(1:p, function(x) apply(object$betas[[x]], MARGIN = 2, sd)))
post_lb <- t(sapply(1:p, function(x) apply(object$betas[[x]], MARGIN = 2, quantile, lb)))
post_ub <- t(sapply(1:p, function(x) apply(object$betas[[x]], MARGIN = 2, quantile, ub)))
res_i <- list()
for(i in 1:p){
res_i[[i]] <- round(data.frame(post_mean = post_mean[i,],
post_sd = post_sd[i,],
post_lb = post_lb[i,],
post_ub = post_ub[i,]), 3)
}
returned_object <- list(summaries = res_i,
object = object)
class(returned_object) <- c("BGGM",
"coef",
"summary.coef")
returned_object
}
print_summary_coef <- function(x,...){
# node names
cn <- colnames(x$object$object$Y)
# nodes
p <- ncol(x$object$object$Y)
# check for column names
if(is.null(cn)) {
cn <- 1:p
}
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Type:", x$object$object$type, "\n")
cat("Formula:", paste(as.character(x$object$object$formula),
collapse = " "), "\n")
cat("--- \n")
cat("Call: \n")
print(x$object$object$call)
cat("--- \n")
cat("Coefficients: \n \n")
for(i in seq_len(p)){
cat(paste0( cn[i], ": \n"))
summ_i <- x$summaries[[i]]
colnames(summ_i) <- c("Post.mean", "Post.sd", "Cred.lb", "Cred.ub")
print( cbind.data.frame(Node = cn[-i], summ_i), row.names = FALSE)
cat("\n")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/coef.estimate.R
|
#' GGM: Confirmatory Hypothesis Testing
#'
#' @description Confirmatory hypothesis testing in GGMs. Hypotheses are expressed as equality
#' and/or ineqaulity contraints on the partial correlations of interest. Here the focus is \emph{not}
#' on determining the graph (see \code{\link{explore}}) but testing specific hypotheses related to
#' the conditional (in)dependence structure. These methods were introduced in
#' \insertCite{Williams2019_bf;textual}{BGGM}.
#'
#' @param Y Matrix (or data frame) of dimensions \emph{n} (observations) by \emph{p} (variables).
#'
#' @param hypothesis Character string. The hypothesis (or hypotheses) to be tested. See details.
#'
#' @param prior_sd Numeric. Scale of the prior distribution, approximately the standard deviation
#' of a beta distribution (defaults to 0.25).
#'
#' @param formula An object of class \code{\link[stats]{formula}}. This allows for including
#' control variables in the model (e.g.,, \code{~ gender * education}).
#'
#' @param type Character string. Which type of data for \strong{Y} ? The options include \code{continuous},
#' \code{binary}, \code{ordinal}, or \code{mixed}. See the note for further details.
#'
#' @param mixed_type Numeric vector of length \emph{p}. An indicator for which varibles should be treated as ranks.
#' (1 for rank and 0 to assume normality). The default is currently (dev version) to treat all integer variables
#' as ranks when \code{type = "mixed"} and \code{NULL} otherwise. See note for further details.
#'
#' @param iter Number of iterations (posterior samples; defaults to 25,000).
#'
#' @param impute Logicial. Should the missing values (\code{NA})
#' be imputed during model fitting (defaults to \code{TRUE}) ?
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param seed An integer for the random seed.
#'
#' @param ... Currently ignored.
#'
#' @references
#' \insertAllCited{}
#'
#' @return The returned object of class \code{confirm} contains a lot of information that
#' is used for printing and plotting the results. For users of \strong{BGGM}, the following
#' are the useful objects:
#'
#' \itemize{
#'
#' \item \code{out_hyp_prob} Posterior hypothesis probabilities.
#'
#' \item \code{info} An object of class \code{BF} from the R package \strong{BFpack}.
#'
#' }
#'
#' @importFrom MASS ginv
#' @importFrom BFpack BF
#' @importFrom stats cov rbeta
#'
#' @details
#' The hypotheses can be written either with the respective column names or numbers.
#' For example, \code{1--2} denotes the relation between the variables in column 1 and 2.
#' Note that these must correspond to the upper triangular elements of the correlation
#' matrix. This is accomplished by ensuring that the first number is smaller than the second number.
#' This also applies when using column names (i.e,, in reference to the column number).
#'
#' \strong{One Hypothesis}:
#'
#' To test whether some relations are larger than others, while others
#' are expected to be equal, this can be writting as
#'
#'\itemize{
#' \item \code{hyp <- c(1--2 > 1--3 = 1--4 > 0)},
#'}
#'
#' where there is an addition additional contraint that all effects are expected to be positive.
#' This is then compared to the complement.
#'
#' \strong{More Than One Hypothesis}:
#'
#' The above hypothesis can also be compared to, say, a null model by using ";"
#' to seperate the hypotheses, for example,
#'
#' \itemize{
#'
#' \item
#'
#' \code{hyp <- c(1--2 > 1--3 = 1--4 > 0; 1--2 = 1--3 = 1--4 = 0)}.
#'
#'
#' }
#'
#' Any number of hypotheses can be compared this way.
#'
#' \strong{Using "&"}
#'
#' It is also possible to include \code{&}. This allows for testing one constraint \bold{and}
#' another contraint as one hypothesis.
#'
#' \itemize{
#'
#' \item \code{hyp <- c("A1--A2 > A1--A2 & A1--A3 = A1--A3")}
#'
#' }
#'
#' Of course, it is then possible to include additional hypotheses by separating them with ";".
#' Note also that the column names were used in this example (e.g., \code{A1--A2} is the relation
#' between those nodes).
#'
#' \strong{Testing Sums}
#'
#' It might also be interesting to test the sum of partial correlations. For example, that the
#' sum of specific relations is larger than the sum of other relations. This can be written as
#'
#' \itemize{
#'
#' \item \code{hyp <- c("A1--A2 + A1--A3 > A1--A4 + A1--A5;
#' A1--A2 + A1--A3 = A1--A4 + A1--A5")}
#'
#' }
#'
#' \strong{Potential Delays}:
#'
#' There is a chance for a potentially long delay from the time the progress bar finishes
#' to when the function is done running. This occurs when the hypotheses require further
#' sampling to be tested, for example, when grouping relations
#' \code{c("(A1--A2, A1--A3) > (A1--A4, A1--A5)"}. This is not an error.
#'
#'
#' \strong{Controlling for Variables}:
#'
#' When controlling for variables, it is assumed that \code{Y} includes \emph{only}
#' the nodes in the GGM and the control variables. Internally, \code{only} the predictors
#' that are included in \code{formula} are removed from \code{Y}. This is not behavior of, say,
#' \code{\link{lm}}, but was adopted to ensure users do not have to write out each variable that
#' should be included in the GGM. An example is provided below.
#'
#' \strong{Mixed Type}:
#'
#' The term "mixed" is somewhat of a misnomer, because the method can be used for data including \emph{only}
#' continuous or \emph{only} discrete variables \insertCite{hoff2007extending}{BGGM}. This is based on the
#' ranked likelihood which requires sampling the ranks for each variable (i.e., the data is not merely
#' transformed to ranks). This is computationally expensive when there are many levels. For example,
#' with continuous data, there are as many ranks as data points!
#'
#' The option \code{mixed_type} allows the user to determine which variable should be treated as ranks
#' and the "emprical" distribution is used otherwise. This is accomplished by specifying an indicator
#' vector of length \emph{p}. A one indicates to use the ranks, whereas a zero indicates to "ignore"
#' that variable. By default all integer variables are handled as ranks.
#'
#' \strong{Dealing with Errors}:
#'
#' An error is most likely to arise when \code{type = "ordinal"}. The are two common errors (although still rare):
#'
#' \itemize{
#'
#' \item The first is due to sampling the thresholds, especially when the data is heavily skewed.
#' This can result in an ill-defined matrix. If this occurs, we recommend to first try
#' decreasing \code{prior_sd} (i.e., a more informative prior). If that does not work, then
#' change the data type to \code{type = mixed} which then estimates a copula GGM
#' (this method can be used for data containing \strong{only} ordinal variable). This should
#' work without a problem.
#'
#' \item The second is due to how the ordinal data are categorized. For example, if the error states
#' that the index is out of bounds, this indicates that the first category is a zero. This is not allowed, as
#' the first category must be one. This is addressed by adding one (e.g., \code{Y + 1}) to the data matrix.
#'
#' }
#'
#' @note
#'
#' \strong{"Default" Prior}:
#'
#' In Bayesian statistics, a default Bayes factor needs to have several properties. I refer
#' interested users to \insertCite{@section 2.2 in @dablander2020default;textual}{BGGM}. In
#' \insertCite{Williams2019_bf;textual}{BGGM}, some of these propteries were investigated (e.g.,
#' model selection consistency). That said, we would not consider this a "default" or "automatic"
#' Bayes factor and thus we encourage users to perform sensitivity analyses by varying the scale of the prior
#' distribution.
#'
#' Furthermore, it is important to note there is no "correct" prior and, also, there is no need
#' to entertain the possibility of a "true" model. Rather, the Bayes factor can be interpreted as
#' which hypothesis best (relative to each other) predicts the observed data
#' \insertCite{@Section 3.2 in @Kass1995}{BGGM}.
#'
#' \strong{Interpretation of Conditional (In)dependence Models for Latent Data}:
#'
#' See \code{\link{BGGM-package}} for details about interpreting GGMs based on latent data
#' (i.e, all data types besides \code{"continuous"})
#'
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' ##########################
#' ### example 1: cheating ##
#' ##########################
#' # Here a true hypothesis is tested,
#' # which shows the method works nicely
#' # (peeked at partials beforehand)
#'
#' # data
#' Y <- BGGM::bfi[,1:10]
#'
#' hypothesis <- c("A1--A2 < A1--A3 < A1--A4 = A1--A5")
#'
#' # test cheat
#' test_cheat <- confirm(Y = Y,
#' type = "continuous",
#' hypothesis = hypothesis,
#' iter = 250,
#' progress = FALSE)
#'
#' # print (probabilty of nearly 1 !)
#' test_cheat
#' }
#'@export
confirm <- function(Y, hypothesis,
prior_sd = 0.25,
formula = NULL,
type = "continuous",
mixed_type = NULL,
iter = 25000,
progress = TRUE,
impute = TRUE,
seed = 1, ...){
# temporary warning until missing data is fully implemented
if(type != "continuous"){
warning(paste0("imputation during model fitting is\n",
"currently only implemented for 'continuous' data."))
}
# removed per CRAN (8/12/21)
# old <- .Random.seed
set.seed(seed)
priorprob <- 1
# hyperparameter
delta <- delta_solve(prior_sd)
p <- ncol(Y)
I_p <- diag(p)
# number of edges
pcors <- p*(p-1)*0.5
if(isTRUE(progress)){
message("BGGM: Posterior Sampling")
}
# continuous
if(type == "continuous"){
# no control
if(is.null(formula)){
# nodes
p <- ncol(Y)
if(!impute){
# na omit
Y <- as.matrix(na.omit(Y))
Y_miss <- Y
} else {
Y_miss <- ifelse(is.na(Y), 1, 0)
if(sum(Y_miss) == 0){
impute <- FALSE
}
# impute means
for(i in 1:p){
Y[which(is.na(Y[,i])),i] <- mean(na.omit(Y[,i]))
}
}
# scale Y
Y <- scale(Y, scale = F)
n <- nrow(Y)
start <- solve(cov(Y))
# posterior sample
post_samp <- .Call(
'_BGGM_Theta_continuous',
PACKAGE = 'BGGM',
Y = Y,
iter = iter + 50,
delta = delta,
epsilon = 0.01,
prior_only = 0,
explore = 1,
start = start,
progress = progress,
impute = impute,
Y_miss = Y_miss
)
# control for variables
} else {
control_info <- remove_predictors_helper(list(as.data.frame(Y)),
formula = formula)
# data
Y <- as.matrix(scale(control_info$Y_groups[[1]], scale = F))
# nodes
p <- ncol(Y)
# observations
n <- nrow(Y)
# model matrix
X <- as.matrix(control_info$model_matrices[[1]])
start <- solve(cov(Y))
# posterior sample
post_samp <- .Call(
"_BGGM_mv_continuous",
Y = Y,
X = X,
delta = delta,
epsilon = 0.1,
iter = iter + 50,
start = start,
progress = progress
)
} # end control
# binary
} else if (type == "binary"){
# intercept only
if (is.null(formula)) {
# data
Y <- as.matrix(na.omit(Y))
# obervations
n <- nrow(Y)
# nodes
p <- ncol(Y)
X <- matrix(1, n, 1)
start <- solve(cov(Y))
} else {
control_info <- remove_predictors_helper(list(as.data.frame(Y)),
formula = formula)
# data
Y <- as.matrix(control_info$Y_groups[[1]])
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# model matrix
X <- as.matrix(control_info$model_matrices[[1]])
start <- solve(cov(Y))
}
# posterior sample
post_samp <- .Call(
"_BGGM_mv_binary",
Y = Y,
X = X,
delta = delta,
epsilon = 0.01,
iter = iter + 50,
beta_prior = 0.0001,
cutpoints = c(-Inf, 0, Inf),
start = start,
progress = progress
)
# ordinal
} else if(type == "ordinal"){
# intercept only
if(is.null(formula)){
# data
Y <- as.matrix(na.omit(Y))
# obervations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# intercept only
X <- matrix(1, n, 1)
# start
start <- solve(cov(Y))
} else {
control_info <- remove_predictors_helper(list(as.data.frame(Y)),
formula = formula)
# data
Y <- as.matrix(control_info$Y_groups[[1]])
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# model matrix
X <- as.matrix(control_info$model_matrices[[1]])
# start
start <- solve(cov(Y))
}
# categories
K <- max(apply(Y, 2, function(x) { length(unique(x)) } ))
# call c ++
post_samp <- .Call(
"_BGGM_mv_ordinal_albert",
Y = Y,
X = X,
iter = iter + 50,
delta = delta,
epsilon = 0.01,
K = K,
start = start,
progress = progress
)
} else if(type == "mixed"){
# no control variables allowed
if (!is.null(formula)) {
warning("formula ignored for mixed data at this time")
control_info <- remove_predictors_helper(list(as.data.frame(Y)),
formula = formula)
# data
Y <- as.matrix(control_info$Y_groups[[1]])
formula <- NULL
# start
start <- solve(cov(Y))
} else {
Y <- as.matrix(na.omit(Y))
# start
start <- solve(cov(Y))
}
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# default for ranks
if(is.null(mixed_type)) {
idx = colMeans(round(Y) == Y)
idx = ifelse(idx == 1, 1, 0)
# user defined
} else {
idx = mixed_type
}
# rank following hoff (2008)
rank_vars <- rank_helper(Y)
post_samp <- .Call(
"_BGGM_copula",
z0_start = rank_vars$z0_start,
levels = rank_vars$levels,
K = rank_vars$K,
Sigma_start = cov(Y),
iter = iter + 50,
delta = delta,
epsilon = 0.01,
idx = idx,
progress = progress
)
} else {
stop("'type' not supported: must be continuous, binary, ordinal, or mixed.")
}
if(isTRUE(progress)){
message(paste0("BGGM: Prior Sampling "))
}
# sample prior
prior_samp <- .Call(
'_BGGM_sample_prior',
PACKAGE = 'BGGM',
Y = Y,
iter = 25000,
delta = delta,
epsilon = 0.01,
prior_only = 1,
explore = 0,
progress = progress
)$fisher_z
if(isTRUE(progress)){
message("BGGM: Testing Hypotheses")
}
col_names <- numbers2words(1:p)
mat_name <- sapply(col_names, function(x) paste(col_names,x, sep = ""))[upper.tri(I_p)]
posterior_samples <- matrix(post_samp$fisher_z[, , 51:(iter+50)][upper.tri(I_p)],
iter, pcors,
byrow = TRUE)
prior_samples <- matrix(prior_samp[,,][upper.tri(I_p)],
25000,
pcors,
byrow = TRUE)
colnames(posterior_samples) <- mat_name
colnames(prior_samples) <- mat_name
prior_mu <- colMeans(prior_samples)
prior_cov <- cov(prior_samples)
post_mu <- colMeans(posterior_samples)
post_cov <- cov(posterior_samples)
BFprior <- BF(prior_mu,
Sigma = prior_cov,
hypothesis = convert_hyps(hypothesis, Y = Y),
n = 1)
BFpost <- BF(post_mu,
Sigma = post_cov,
hypothesis = convert_hyps(hypothesis, Y = Y),
n = 1)
# number of hypotheses
n_hyps <- nrow(BFpost$BFtable_confirmatory)
# BF against unconstrained
BF_tu <- NA
for(i in seq_len(n_hyps)){
# BF tu
BF_tu[i] <- prod(BFpost$BFtable_confirmatory[i,3:4] / BFprior$BFtable_confirmatory[i,3:4])
}
# posterior hyp probs
out_hyp_prob <- BF_tu*priorprob / sum(BF_tu*priorprob)
# BF matrix
BF_matrix <- matrix(rep(BF_tu, length(BF_tu)),
ncol = length(BF_tu),
byrow = TRUE)
BF_matrix[is.nan(BF_matrix)] <- 0
diag(BF_matrix) <- 1
BF_matrix <- t(BF_matrix) / (BF_matrix)
row.names(BF_matrix) <- row.names( BFpost$BFtable_confirmatory)
colnames(BF_matrix) <- row.names( BFpost$BFtable_confirmatory)
if(isTRUE(progress)){
message("BGGM: Finished")
}
# removed per CRAN (8/12/21)
#.Random.seed <<- old
returned_object <- list(BF_matrix = BF_matrix,
out_hyp_prob = out_hyp_prob,
info = BFpost,
dat = Y,
type = type,
call = match.call(),
hypothesis = hypothesis,
iter = iter, p = p,
delta = delta,
ppd_mean = post_samp$ppd_mean)
class(returned_object) <- c("BGGM", "confirm")
returned_object
}
print_confirm <- function(x, ...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("Type:", x$type , "\n")
cat("--- \n")
cat("Posterior Samples:", x$iter, "\n")
cat("Observations (n):", nrow(x$dat), "\n")
cat("Variables (p):", x$p, "\n")
cat("Delta:", x$delta, "\n")
cat("--- \n")
cat("Call:\n")
print(x$call)
cat("--- \n")
cat("Hypotheses: \n\n")
hyps <- strsplit(x$hypothesis, ";")
n_hyps <- length(hyps[[1]])
x$info$hypotheses[1:n_hyps] <- hyps[[1]]
n_hyps <- length(x$info$hypotheses)
for (h in seq_len(n_hyps)) {
cat(paste0("H", h, ": ", gsub(" ", "", gsub('[\n]', '', x$info$hypotheses[h])), "\n"))
}
cat("--- \n")
cat("Posterior prob: \n\n")
for(h in seq_len(n_hyps)){
cat(paste0("p(H",h,"|data) = ", round(x$out_hyp_prob[h], 3 ) ))
cat("\n")
}
cat("--- \n")
cat('Bayes factor matrix: \n')
print(round(x$BF_matrix, 3))
cat("--- \n")
cat("note: equal hypothesis prior probabilities")
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/confirm.R
|
#' @title Constrained Posterior Distribution
#'
#' @description Compute the posterior distribution
#' with off-diagonal elements of the precision matrix constrained
#' to zero.
#'
#' @param object An object of class \code{estimate} or \code{explore}
#'
#' @param adj A \code{p} by \code{p} adjacency matrix. The zero entries denote the
#' elements that should be constrained to zero.
#'
#' @param method Character string. Which method should be used ? Defaults to
#' the "direct sampler" (i.e., \code{method = "direct"}) described in
#' \insertCite{@page 122, section 2.4, @lenkoski2013direct;textual}{BGGM}. The other
#' option is a Metropolis-Hastings algorithm (\code{MH}).
#' See details.
#'
#' @param iter Number of iterations (posterior samples; defaults to 5000).
#'
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param ... Currently ignored.
#'
#' @references
#' \insertAllCited{}
#'
#'
#' @return An object of class \code{contrained}, including
#'
#' \itemize{
#'
#' \item \code{precision_mean} The posterior mean for the precision matrix.
#'
#' \item \code{pcor_mean} The posterior mean for the precision matrix.
#'
#' \item \code{precision_samps} A 3d array of dimension \code{p} by \code{p} by \code{iter}
#' including the sampled precision matrices.
#'
#' \item \code{pcor_samps} A 3d array of dimension \code{p} by \code{p} by \code{iter}
#' including sampled partial correlations matrices.
#' }
#'
#'
#' @examples
#' \donttest{
#'
#' # data
#' Y <- bfi[,1:10]
#'
#' # sample posterior
#' fit <- estimate(Y, iter = 100)
#'
#' # select graph
#' sel <- select(fit)
#'
#' # constrained posterior
#' post <- constrained_posterior(object = fit,
#' adj = sel$adj,
#' iter = 100,
#' progress = FALSE)
#'
#' }
#' @export
constrained_posterior <- function(object,
adj,
method = "direct",
iter = 5000,
progress = TRUE,
...){
if (!any(class(object) %in% c("estimate", "explore"))) {
stop("object must be of class 'estimate' or 'explore'")
}
if (object$iter < iter) {
stop("iter exceeds iter in the object")
}
# ensure diagonal is 1
diag(adj) <- 1
# number of nodes
p <- object$p
if(progress){
message(paste0("BGGM: Constrained Posterior"))
}
if(method == "direct"){
# covert to correlations
cors <- pcor_to_cor(object, iter = iter)
diag(adj) <- 1
Theta_samps <- .Call("_BGGM_contrained_helper",
cors = cors$R,
adj = adj,
iter = iter,
progress = progress)
mh_object <- NULL
}
if(method == "MH"){
prior_sd <- object$prior_sd
Y <- object$Y
p <- object$p
n <- object$n
prec <- precision(object, progress = FALSE)
iter <- object$iter
samples <- matrix(prec$precision[ ,, 1:iter][lower.tri(diag(p), diag = TRUE)],
nrow = iter,
ncol = p*(p-1)*0.5 + p,
byrow = TRUE)
samps1 <- rw_helper(adj = adj,
means_post = colMeans(samples),
cov_post = cov(samples))
# burning in: 500; thin: 20
iter_comb <- iter * 20 + 500
mh_object <- .Call("_BGGM_fast_g_matrix_F",
PACKAGE = "BGGM",
Y = Y,
adj = adj,
mu_samples = samps1$mean_post,
cov_samples = samps1$cov_post,
iter = iter * 20 + 500,
p = p,
N = n,
prior_sd = prior_sd,
kappa1 = 0.05,
progrss = TRUE)
Theta_samps <- mh_object$Theta_G[,,seq(501, iter_comb, 20)]
mh_object$Theta_G <- NULL
}
if(isTRUE(progress)){
message("BGGM: Finished")
}
# partial correlations
pcor_samps <- vapply(1:iter, function(x)
-cov2cor(Theta_samps[,,x]) + diag(2, p),
FUN.VALUE = matrix(0, p, p)
)
Theta_mu <- mean_array(Theta_samps)
pcor_mu <- mean_array(pcor_samps)
colnames(pcor_mu) <- colnames(object$Y)
row.names(pcor_mu) <- colnames( object$Y )
returned_object <- list(
precision_mean = Theta_mu,
pcor_mean = pcor_mu,
precision_samps = Theta_samps,
pcor_samps = pcor_samps,
mh_object = mh_object
)
class(returned_object) <- c("BGGM",
"constrained")
return(returned_object)
}
print_constrained <- function(x, ...){
cat("BGGM: Bayesian Gaussian Graphical Models\n")
cat("Constrained posterior\n")
cat("---\n")
cat("Estimates: \n\n")
print(round(x$pcor_mean, 3))
}
rw_helper <- function(adj, means_post, cov_post){
diag(adj) <- 1
p <- ncol(adj)
selectlower <- lower.tri(diag(p), diag = TRUE)
which0 <- which(adj[selectlower]==0)
which1 <- which(adj[selectlower]==1)
means1_post <- c(means_post[which1] +
cov_post[which1,which0] %*%
solve(cov_post[which0,which0]) %*%
(-means_post[which0]))
cov1_post <- cov_post[which1,which1] -
cov_post[which1,which0] %*%
solve(cov_post[which0,which0]) %*%
cov_post[which0,which1]
list(mean_post = means1_post, cov_post = cov1_post)
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/constrained_post.R
|
#' MCMC Convergence
#'
#' Monitor convergence of the MCMC algorithms.
#'
#' @param object An object of class \code{estimate} or \code{explore}
#'
#' @param param Character string. Names of parameters for which to monitor MCMC convergence.
#'
#' @param type Character string. Which type of convergence plot ? The current
#' options are \code{trace} (default) and \code{acf}.
#'
#' @param print_names Logical. Should the parameter names be printed (defaults to \code{FALSE})? This
#' can be used to first determine the parameter names to specify in \code{type}.
#'
#' @return A list of \code{ggplot} objects.
#'
#' @note An overview of MCMC diagnostics can be found \href{https://sbfnk.github.io/mfiidd/mcmc_diagnostics.html}{here}.
#'
#' @importFrom stats acf
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' # data
#' Y <- ptsd[,1:5]
#'
#' #########################
#' ###### continuous #######
#' #########################
#' fit <- estimate(Y, iter = 250,
#' progress = FALSE)
#'
#' # print names first
#' convergence(fit, print_names = TRUE)
#'
#' # trace plots
#' convergence(fit, type = "trace",
#' param = c("B1--B2", "B1--B3"))[[1]]
#'
#' # acf plots
#' convergence(fit, type = "acf",
#' param = c("B1--B2", "B1--B3"))[[1]]
#'}
#' @export
convergence <- function(object,
param = NULL,
type = "trace",
print_names = FALSE){
# posterior samples
samps <- posterior_samples(object)
# print names ?
if(!isFALSE(print_names)){
print(colnames(samps))
} else {
# trace plot
if(type == "trace"){
# number of params
params <- length(param)
plts <- lapply(1:params, function(x){
dat <- as.data.frame( samps[,param[x]])
dat$iteration <- 1:nrow(dat)
ggplot(data = dat,
mapping = aes(x = iteration,
y = dat[,1])) +
geom_line(alpha = 0.75) +
geom_hline(yintercept = mean(dat[,1]),
color = "red")+
ggtitle(param[x]) +
ylab("Estimate")
})
} else if(type == "acf"){
params <- length(param)
plts <- lapply(1:params, function(x) {
dat <- with(acf(samps[,param[x]],
plot = FALSE),
data.frame(lag, acf));
ggplot(data = dat,
mapping = aes(x = lag,
y = acf)) +
geom_hline(aes(yintercept = 0)) +
geom_segment(mapping = aes(xend = lag,
yend = 0)) +
ggtitle(param[x])
})
} else {
stop("type not supported. must be 'trace' or 'acf'")
}
plts
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/convergence.R
|
#' Data: Post-Traumatic Stress Disorder
#'
#' A dataset containing items that measure Post-traumatic stress disorder symptoms \insertCite{armour2017network}{BGGM}.
#' There are 20 variables (\emph{p}) and 221 observations (\emph{n}).
#'
#' \itemize{
#' \item Intrusive Thoughts
#' \item Nightmares
#' \item Flashbacks
#' \item Emotional cue reactivity
#' \item Psychological cue reactivity
#' \item Avoidance of thoughts
#' \item Avoidance of reminders
#' \item Trauma-related amnesia
#' \item Negative beliefs
#' \item Negative trauma-related emotions
#' \item Loss of interest
#' \item Detachment
#' \item Restricted affect
#' \item Irritability/anger
#' \item Self-destructive/reckless behavior
#' \item Hypervigilance
#' \item Exaggerated startle response
#' \item Difficulty concentrating
#' \item Sleep disturbance
#' }
#'
#' @docType data
#'
#' @keywords datasets
#'
#' @name ptsd
#'
#' @usage data("ptsd")
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @format A dataframe with 221 rows and 20 variables
NULL
#' Data: Post-Traumatic Stress Disorder (Sample # 1)
#'
#' A correlation matrix that includes 16 variables. The correlation matrix was estimated from 526
#' individuals \insertCite{fried2018replicability}{BGGM}.
#'
#' \itemize{
#' \item Intrusive Thoughts
#' \item Nightmares
#' \item Flashbacks
#' \item Physiological/psychological reactivity
#' \item Avoidance of thoughts
#' \item Avoidance of situations
#' \item Amnesia
#' \item Disinterest in activities
#' \item Feeling detached
#' \item Emotional numbing
#' \item Foreshortened future
#' \item Sleep problems
#' \item Irritability
#' \item Concentration problems
#' \item Hypervigilance
#' \item Startle response
#' }
#'
#' @docType data
#'
#' @keywords datasets
#'
#' @name ptsd_cor1
#'
#' @examples
#'
#' data(ptsd_cor1)
#'
#' Y <- MASS::mvrnorm(n = 526,
#' mu = rep(0, 16),
#' Sigma = ptsd_cor1,
#' empirical = TRUE)
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @format A correlation matrix with 16 variables
NULL
#' Data: Post-Traumatic Stress Disorder (Sample # 2)
#'
#' A correlation matrix that includes 16 variables. The correlation matrix
#' was estimated from 365 individuals \insertCite{fried2018replicability}{BGGM}.
#'
#' \itemize{
#' \item Intrusive Thoughts
#' \item Nightmares
#' \item Flashbacks
#' \item Physiological/psychological reactivity
#' \item Avoidance of thoughts
#' \item Avoidance of situations
#' \item Amnesia
#' \item Disinterest in activities
#' \item Feeling detached
#' \item Emotional numbing
#' \item Foreshortened future
#' \item Sleep problems
#' \item Irritability
#' \item Concentration problems
#' \item Hypervigilance
#' \item Startle response
#' }
#'
#' @docType data
#'
#' @keywords datasets
#'
#' @name ptsd_cor2
#'
#' @examples
#' data(ptsd_cor2)
#' Y <- MASS::mvrnorm(n = 365,
#' mu = rep(0, 16),
#' Sigma = ptsd_cor2,
#' empirical = TRUE)
#' @references
#'
#' \insertAllCited{}
#'
#' @format A correlation matrix with 16 variables
NULL
#' Data: Post-Traumatic Stress Disorder (Sample # 3)
#'
#' A correlation matrix that includes 16 variables. The correlation matrix
#' was estimated from 926 individuals \insertCite{fried2018replicability}{BGGM}.
#'
#' \itemize{
#'
#' \item Intrusive Thoughts
#' \item Nightmares
#' \item Flashbacks
#' \item Physiological/psychological reactivity
#' \item Avoidance of thoughts
#' \item Avoidance of situations
#' \item Amnesia
#' \item Disinterest in activities
#' \item Feeling detached
#' \item Emotional numbing
#' \item Foreshortened future
#' \item Sleep problems
#' \item Irritability
#' \item Concentration problems
#' \item Hypervigilance
#' \item Startle response
#' }
#'
#' @docType data
#'
#' @keywords datasets
#'
#' @name ptsd_cor3
#'
#' @examples
#' data(ptsd_cor3)
#' Y <- MASS::mvrnorm(n = 926,
#' mu = rep(0, 16),
#' Sigma = ptsd_cor3,
#' empirical = TRUE)
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @format A correlation matrix with 16 variables
#'
NULL
#' Data: Post-Traumatic Stress Disorder (Sample # 4)
#'
#' A correlation matrix that includes 16 variables. The correlation matrix
#' was estimated from 965 individuals \insertCite{fried2018replicability}{BGGM}.
#'
#' \itemize{
#' \item Intrusive Thoughts
#' \item Nightmares
#' \item Flashbacks
#' \item Physiological/psychological reactivity
#' \item Avoidance of thoughts
#' \item Avoidance of situations
#' \item Amnesia
#' \item Disinterest in activities
#' \item Feeling detached
#' \item Emotional numbing
#' \item Foreshortened future
#' \item Sleep problems
#' \item Irritability
#' \item Concentration problems
#' \item Hypervigilance
#' \item Startle response
#' }
#'
#' @docType data
#'
#' @keywords datasets
#'
#' @name ptsd_cor4
#'
#' @examples
#' data(ptsd_cor4)
#' Y <- MASS::mvrnorm(n = 965,
#' mu = rep(0, 16),
#' Sigma = ptsd_cor4,
#' empirical = TRUE)
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @format A correlation matrix with 16 variables
NULL
#' Data: 25 Personality items representing 5 factors
#'
#' This dataset and the corresponding documentation was taken from the \strong{psych} package. We refer users to that
#' package for further details \insertCite{psych}{BGGM}.
#'
#' \itemize{
#' \item \code{A1} Am indifferent to the feelings of others. (q_146)
#' \item \code{A2} Inquire about others' well-being. (q_1162)
#' \item \code{A3} Know how to comfort others. (q_1206)
#' \item \code{A4} Love children. (q_1364)
#' \item \code{A5} Make people feel at ease. (q_1419)
#' \item \code{C1} Am exacting in my work. (q_124)
#' \item \code{C2} Continue until everything is perfect. (q_530)
#' \item \code{C3} Do things according to a plan. (q_619)
#' \item \code{C4} Do things in a half-way manner. (q_626)
#' \item \code{C5} Waste my time. (q_1949)
#' \item \code{E1} Don't talk a lot. (q_712)
#' \item \code{E2} Find it difficult to approach others. (q_901)
#' \item \code{E3} Know how to captivate people. (q_1205)
#' \item \code{E4} Make friends easily. (q_1410)
#' \item \code{E5} Take charge. (q_1768)
#' \item \code{N1} Get angry easily. (q_952)
#' \item \code{N2} Get irritated easily. (q_974)
#' \item \code{N3} Have frequent mood swings. (q_1099)
#' \item \code{N4} Often feel blue. (q_1479)
#' \item \code{N5} Panic easily. (q_1505)
#' \item \code{o1} Am full of ideas. (q_128)
#' \item \code{o2} Avoid difficult reading material.(q_316)
#' \item \code{o3} Carry the conversation to a higher level. (q_492)
#' \item \code{o4} Spend time reflecting on things. (q_1738)
#' \item \code{o5} Will not probe deeply into a subject. (q_1964)
#' \item \code{gender} Males = 1, Females =2
#' \item \code{education} 1 = HS, 2 = finished HS, 3 = some college, 4 = college graduate 5 = graduate degree
#' }
#'
#' @docType data
#'
#' @keywords datasets
#'
#' @name bfi
#'
#' @usage data("bfi")
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @format A data frame with 25 variables and 2800 observations (including missing values)
NULL
#' Data: Contingencies of Self-Worth Scale (CSWS)
#'
#' A dataset containing items from the Contingencies of Self-Worth Scale (CSWS) scale. There are 35 variables and
#' 680 observations
#'
#' \itemize{
#' \item \code{1} When I think I look attractive, I feel good about myself
#' \item \code{2} My self-worth is based on God's love
#' \item \code{3} I feel worthwhile when I perform better than others on a task or skill.
#' \item \code{4} My self-esteem is unrelated to how I feel about the way my body looks.
#' \item \code{5} Doing something I know is wrong makes me lose my self-respect
#' \item \code{6} I don't care if other people have a negative opinion about me.
#' \item \code{7} Knowing that my family members love me makes me feel good about myself.
#' \item \code{8} I feel worthwhile when I have God's love.
#' \item \code{9} I can’t respect myself if others don't respect me.
#' \item \code{10} My self-worth is not influenced by the quality of my relationships with my family members.
#' \item \code{11} Whenever I follow my moral principles, my sense of self-respect gets a boost.
#' \item \code{12} Knowing that I am better than others on a task raises my self-esteem.
#' \item \code{13} My opinion about myself isn't tied to how well I do in school.
#' \item \code{14} I couldn't respect myself if I didn't live up to a moral code.
#' \item \code{15} I don't care what other people think of me.
#' \item \code{16} When my family members are proud of me, my sense of self-worth increases.
#' \item \code{17} My self-esteem is influenced by how attractive I think my face or facial features are.
#' \item \code{18} My self-esteem would suffer if I didn’t have God's love.
#' \item \code{19} Doing well in school gives me a sense of selfrespect.
#' \item \code{20} Doing better than others gives me a sense of self-respect.
#' \item \code{21} My sense of self-worth suffers whenever I think I don't look good.
#' \item \code{22} I feel better about myself when I know I'm doing well academically.
#' \item \code{23} What others think of me has no effect on what I think about myself.
#' \item \code{24} When I don’t feel loved by my family, my selfesteem goes down.
#' \item \code{25} My self-worth is affected by how well I do when I am competing with others.
#' \item \code{26} My self-esteem goes up when I feel that God loves me.
#' \item \code{27} My self-esteem is influenced by my academic performance.
#' \item \code{28} My self-esteem would suffer if I did something unethical.
#' \item \code{29} It is important to my self-respect that I have a family that cares about me.
#' \item \code{30} My self-esteem does not depend on whether or not I feel attractive.
#' \item \code{31} When I think that I’m disobeying God, I feel bad about myself.
#' \item \code{32} My self-worth is influenced by how well I do on competitive tasks.
#' \item \code{33} I feel bad about myself whenever my academic performance is lacking.
#' \item \code{34} My self-esteem depends on whether or not I follow my moral/ethical principles.
#' \item \code{35} My self-esteem depends on the opinions others hold of me.
#' \item \code{gender} "M" (male) or "F" (female)
#'
#' }
#'
#' @note There are seven domains
#'
#' FAMILY SUPPORT: items 7, 10, 16, 24, and 29.
#'
#' COMPETITION: items 3, 12, 20, 25, and 32.
#'
#' APPEARANCE: items 1, 4, 17, 21, and 30.
#'
#' GOD'S LOVE: items 2, 8, 18, 26, and 31.
#'
#' ACADEMIC COMPETENCE: items 13, 19, 22, 27, and 33.
#'
#' VIRTUE: items 5, 11, 14, 28, and 34.
#'
#' APPROVAL FROM OTHERS: items: 6, 9, 15, 23, and 35.
#' @docType data
#' @keywords datasets
#' @name csws
#' @usage data("csws")
#' @examples
#' data("csws")
#'
#'
#' @references
#' Briganti, G., Fried, E. I., & Linkowski, P. (2019). Network analysis of Contingencies of Self-Worth
#' Scale in 680 university students. Psychiatry research, 272, 252-257.
#' @format A data frame with 35 variables and 680 observations (7 point Likert scale)
NULL
#' Data: Toronto Alexithymia Scale (TAS)
#'
#' A dataset containing items from the Toronto Alexithymia Scale (TAS). There are 20 variables and
#' 1925 observations
#'
#' \itemize{
#' \item \code{1} I am often confused about what emotion I am feeling
#' \item \code{2} It is difficult for me to find the right words for my feelings
#' \item \code{3} I have physical sensations that even doctors don’t understand
#' \item \code{4} I am able to describe my feelings easily
#' \item \code{5} I prefer to analyze problems rather than just describe them
#' \item \code{6} When I am upset, I don’t know if I am sad, frightened, or angry
#' \item \code{7} I am often puzzled by sensations in my body
#' \item \code{8} I prefer just to let things happen rather than to understand why they turned out that way
#' \item \code{9} I have feelings that I can’t quite identify
#' \item \code{10} Being in touch with emotions is essential
#' \item \code{11} I find it hard to describe how I feel about people
#' \item \code{12} People tell me to describe my feelings more
#' \item \code{13} I don’t know what’s going on inside me
#' \item \code{14} I often don’t know why I am angry
#' \item \code{15} I prefer talking to people about their daily activities rather than their feelings
#' \item \code{16} I prefer to watch “light” entertainment shows rather than psychological dramas
#' \item \code{17} It is difficult for me to reveal my innermost feelings, even to close friends
#' \item \code{18} I can feel close to someone, even in moments of silence
#' \item \code{19} I find examination of my feelings useful in solving personal problems
#' \item \code{20} Looking for hidden meanings in movies or plays distracts from their enjoyment
#' \item \code{gender} "M" (male) or "F" (female)
#'
#' }
#'
#' @note There are three domains
#'
#' Difficulty identifying feelings: items 1, 3, 6, 7, 9, 13, 14
#'
#' Difficulty describing feelings: items 2, 4, 11, 12, 17
#'
#' Externally oriented thinking: items 10, 15, 16, 18, 19
#'
#' @docType data
#' @keywords datasets
#' @name tas
#' @usage data("tas")
#' @examples
#' data("tas")
#'
#' @references
#' Briganti, G., & Linkowski, P. (2019). Network approach to items and domains from
#' the Toronto Alexithymia Scale. Psychological reports.
#' @format A data frame with 20 variables and 1925 observations (5 point Likert scale)
NULL
#' Data: Interpersonal Reactivity Index (IRI)
#'
#' A dataset containing items from the Interpersonal Reactivity Index (IRI; an empathy measure). There are 28 variables and
#' 1973 observations
#'
#' \itemize{
#' \item \code{1} I daydream and fantasize, with some regularity, about things that might happen to me.
#' \item \code{2} I often have tender, concerned feelings for people less fortunate than me.
#' \item \code{3} I sometimes find it difficult to see things from the "other guy's" point of view.
#' \item \code{4} Sometimes I don't feel very sorry for other people when they are having problems.
#' \item \code{5} I really get involved with the feelings of the characters in a novel.
#' \item \code{6} In emergency situations, I feel apprehensive and ill-at-ease.
#' \item \code{7} I am usually objective when I watch a movie or play, and I don't often get completely caught up in it.
#' \item \code{8} I try to look at everybody's side of a disagreement before I make a decision.
#' \item \code{9} When I see someone being taken advantage of, I feel kind of protective towards them.
#' \item \code{10} I sometimes feel helpless when I am in the middle of a very emotional situation.
#' \item \code{11} I sometimes try to understand my friends better
#' by imagining how things look from their perspective
#' \item \code{12} Becoming extremely involved in a good book or movie is somewhat rare for me.
#' \item \code{13} When I see someone get hurt, I tend to remain calm.
#' \item \code{14} Other people's misfortunes do not usually disturb me a great deal.
#' \item \code{15} If I'm sure I'm right about something, I don't waste much
#' time listening to other people's arguments.
#' \item \code{16} After seeing a play or movie, I have felt as though I were one of the characters.
#' \item \code{17} Being in a tense emotional situation scares me.
#' \item \code{18} When I see someone being treated unfairly,
#' I sometimes don't feel very much pity for them.
#' \item \code{19} I am usually pretty effective in dealing with emergencies.
#' \item \code{20} I am often quite touched by things that I see happen.
#' \item \code{21} I believe that there are two sides to every question and try to look at them both.
#' \item \code{22} I would describe myself as a pretty soft-hearted person.
#' \item \code{23} When I watch a good movie, I can very easily put myself in
#' the place of a leading character
#' \item \code{24} I tend to lose control during emergencies.
#' \item \code{25} When I'm upset at someone, I usually try to "put myself in his shoes" for a while.
#' \item \code{26} When I am reading an interesting story or novel, I imagine how I would feel if the
#' events in the story were happening to me.
#' \item \code{27} When I see someone who badly needs help in an emergency, I go to pieces.
#' \item \code{28} Before criticizing somebody, I try to imagine how I would feel if I were in their place.
#' \item \code{gender} "M" (male) or "F" (female)
#'
#' }
#'
#' @note There are four domains
#'
#' Fantasy: items 1, 5, 7, 12, 16, 23, 26
#'
#' Perspective taking: items 3, 8, 11, 15, 21, 25, 28
#'
#' Empathic concern: items 2, 4, 9, 14, 18, 20, 22
#'
#' Personal distress: items 6, 10, 13, 17, 19, 24, 27,
#'
#' @docType data
#' @keywords datasets
#' @name iri
#' @usage data("iri")
#' @examples
#' data("iri")
#'
#' @references
#'Briganti, G., Kempenaers, C., Braun, S., Fried, E. I., & Linkowski, P. (2018). Network analysis of
#'empathy items from the interpersonal reactivity index in 1973
#'young adults. Psychiatry research, 265, 87-92.
#' @format A data frame with 28 variables and 1973 observations (5 point Likert scale)
NULL
#' Data: Resilience Scale of Adults (RSA)
#'
#' A dataset containing items from the Resilience Scale of Adults (RSA). There are 33 items and
#' 675 observations
#'
#' \itemize{
#' \item \code{1} My plans for the future are
#' \item \code{2} When something unforeseen happens
#' \item \code{3} My family understanding of what is important in life is
#' \item \code{4} I feel that my future looks
#' \item \code{5} My goals
#' \item \code{6} I can discuss personal issues with
#' \item \code{7} I feel
#' \item \code{8} I enjoy being
#' \item \code{9} Those who are good at encouraging are
#' \item \code{10} The bonds among my friends
#' \item \code{11} My personal problems
#' \item \code{12} When a family member experiences a crisis/emergency
#' \item \code{13} My family is characterised by
#' \item \code{14} To be flexible in social settings
#' \item \code{15} I get support from
#' \item \code{16} In difficult periods my family
#' \item \code{17} My judgements and decisions
#' \item \code{18} New friendships are something
#' \item \code{19} When needed, I have
#' \item \code{20} I am at my best when I
#' \item \code{21} Meeting new people is
#' \item \code{22} When I am with others
#' \item \code{23} When I start on new things/projects
#' \item \code{24} Facing other people, our family acts
#' \item \code{25} Belief in myself
#' \item \code{26} For me, thinking of good topics of conversation is
#' \item \code{27} My close friends/family members
#' \item \code{28} I am good at
#' \item \code{29} In my family, we like to
#' \item \code{30} Rules and regular routines
#' \item \code{31} In difficult periods I have a tendency to
#' \item \code{32} My goals for the future are
#' \item \code{33} Events in my life that I cannot influence
#' \item \code{gender} "M" (male) or "F" (female)
#'
#' }
#'
#' @note There are 6 domains
#'
#' Planned future: items 1, 4, 5, 32
#'
#' Perception of self: items 2, 11, 17, 25, 31, 33
#'
#' Family cohesion: items 3, 7, 13, 16, 24, 29
#'
#' Social resources: items 6, 9, 10, 12, 15, 19, 27
#'
#' Social Competence: items 8, 14, 18, 21, 22, 26,
#'
#' Structured style: items 23, 28, 30
#'
#' @docType data
#'
#' @keywords datasets
#' @name rsa
#'
#' @usage data("rsa")
#'
#' @examples
#' data("rsa")
#'
#' @references
#' Briganti, G., & Linkowski, P. (2019). Item and domain network structures of the Resilience
#' Scale for Adults in 675 university students. Epidemiology and psychiatric sciences, 1-9.
#' @format A data frame with 28 variables and 1973 observations (5 point Likert scale)
NULL
#' @title Data: Sachs Network
#'
#' @description Protein expression in human immune system cells
#'
#' @name Sachs
#'
#' @docType data
#'
#' @keywords datasets
#'
#' @usage data("Sachs")
#'
#' @examples
#' data("Sachs")
#'
#' @format A data frame containing 7466 cells (n = 7466) and flow cytometry
#' measurements of 11 (p = 11) phosphorylated proteins and phospholipids
#'
#' @references
#' Sachs, K., Gifford, D., Jaakkola, T., Sorger, P., & Lauffenburger, D. A. (2002).
#' Bayesian network approach to cell signaling pathway modeling. Sci. STKE, 2002(148), pe38-pe38.
NULL
#' Data: Autism and Obssesive Compulsive Disorder
#'
#' A correlation matrix with 17 variables in total (autsim: 9; OCD: 8).
#' The sample size was 213.
#'
#'
#' \strong{Autism}:
#'
#' \itemize{
#'
#' \item \code{CI} Circumscribed interests
#' \item \code{UP} Unusual preoccupations
#' \item \code{RO} Repetitive use of objects or interests in parts of objects
#' \item \code{CR} Compulsions and/or rituals
#' \item \code{CI} Unusual sensory interests
#' \item \code{SM} Complex mannerisms or stereotyped body movements
#' \item \code{SU} Stereotyped utterances/delayed echolalia
#' \item \code{NIL} Neologisms and/or idiosyncratic language
#' \item \code{VR} Verbal rituals
#' }
#'
#' \strong{OCD}
#'
#' \itemize{
#' \item \code{CD} Concern with things touched due to dirt/bacteria
#' \item \code{TB} Thoughts of doing something bad around others
#' \item \code{CT} Continual thoughts that do not go away
#' \item \code{HP} Belief that someone/higher power put reoccurring thoughts in their head
#' \item \code{CW} Continual washing
#' \item \code{CCh} Continual checking CntCheck
#' \item \code{CC} Continual counting/repeating
#' \item \code{RD} Repeatedly do things until it feels good or just right
#'
#' }
#'
#'
#' @docType data
#'
#' @keywords datasets
#' @name asd_ocd
#'
#' @usage data("asd_ocd")
#'
#' @examples
#' data("asd_ocd")
#'
#' # generate continuous
#' Y <- MASS::mvrnorm(n = 213,
#' mu = rep(0, 17),
#' Sigma = asd_ocd,
#' empirical = TRUE)
#'
#'
#' @format A correlation matrix including 17 variables. These data were measured on a 4 level likert scale.
#'
#' @references
#' Jones, P. J., Ma, R., & McNally, R. J. (2019). Bridge centrality:
#' A network approach
#' to understanding comorbidity. Multivariate behavioral research, 1-15.
#'
#' Ruzzano, L., Borsboom, D., & Geurts, H. M. (2015).
#' Repetitive behaviors in autism and obsessive-compulsive
#' disorder: New perspectives from a network analysis.
#' Journal of Autism and Developmental Disorders, 45(1),
#' 192-202. doi:10.1007/s10803-014-2204-9
NULL
#' Data: Depression and Anxiety (Time 1)
#'
#' A data frame containing 403 observations (n = 403) and 16 variables (p = 16) measured on the 4-point
#' likert scale (depression: 9; anxiety: 7).
#'
#' \strong{Depression}:
#'
#' \itemize{
#' \item \code{PHQ1} Little interest or pleasure in doing things?
#' \item \code{PHQ2} Feeling down, depressed, or hopeless?
#' \item \code{PHQ3} Trouble falling or staying asleep, or sleeping too much?
#' \item \code{PHQ4} Feeling tired or having little energy?
#' \item \code{PHQ5} Poor appetite or overeating?
#' \item \code{PHQ6} Feeling bad about yourself — or that you are a failure or have let
#' yourself or your family down?
#' \item \code{PHQ7} Trouble concentrating on things, such as reading the newspaper or
#' watching television?
#' \item \code{PHQ8} Moving or speaking so slowly that other people could have noticed? Or so
#' fidgety or restless that you have been moving a lot more than usual?
#' \item \code{PHQ9} Thoughts that you would be better off dead, or thoughts of hurting yourself
#' in some way?
#' }
#'
#' \strong{Anxiety}
#' \itemize{
#'
#'
#'
#' \item \code{GAD1} Feeling nervous, anxious, or on edge
#' \item \code{GAD2} Not being able to stop or control worrying
#' \item \code{GAD3} Worrying too much about different things
#' \item \code{GAD4} Trouble relaxing
#' \item \code{GAD5} Being so restless that it's hard to sit still
#' \item \code{GAD6} Becoming easily annoyed or irritable
#' \item \code{GAD7} Feeling afraid as if something awful might happen
#' }
#'
#'
#' @docType data
#'
#' @keywords datasets
#' @name depression_anxiety_t1
#'
#' @usage data("depression_anxiety_t1")
#'
#' @format A data frame containing 403 observations (n = 7466) and 16 variables (p = 16) measured on the 4-point
#' likert scale.
#'
#' @examples
#' data("depression_anxiety_t1")
#' labels<- c("interest", "down", "sleep",
#' "tired", "appetite", "selfest",
#' "concen", "psychmtr", "suicid",
#' "nervous", "unctrworry", "worrylot",
#' "relax", "restless", "irritable", "awful")
#'
#'
#' @references
#' Forbes, M. K., Baillie, A. J., & Schniering, C. A. (2016). A structural equation modeling
#' analysis of the relationships between depression,anxiety, and sexual problems over time.
#' The Journal of Sex Research, 53(8), 942-954.
#'
#' Forbes, M. K., Wright, A. G., Markon, K. E., & Krueger, R. F. (2019). Quantifying the reliability and replicability of psychopathology network characteristics.
#' Multivariate behavioral research, 1-19.
#'
#' Jones, P. J., Williams, D. R., & McNally, R. J. (2019). Sampling variability is not nonreplication:
#' a Bayesian reanalysis of Forbes, Wright, Markon, & Krueger.
NULL
#' Data: Depression and Anxiety (Time 2)
#'
#' A data frame containing 403 observations (n = 403) and 16 variables (p = 16) measured on the 4-point
#' likert scale (depression: 9; anxiety: 7).
#'
#' \strong{Depression}:
#'
#' \itemize{
#' \item \code{PHQ1} Little interest or pleasure in doing things?
#' \item \code{PHQ2} Feeling down, depressed, or hopeless?
#' \item \code{PHQ3} Trouble falling or staying asleep, or sleeping too much?
#' \item \code{PHQ4} Feeling tired or having little energy?
#' \item \code{PHQ5} Poor appetite or overeating?
#' \item \code{PHQ6} Feeling bad about yourself — or that you are a failure or have let
#' yourself or your family down?
#' \item \code{PHQ7} Trouble concentrating on things, such as reading the newspaper or
#' watching television?
#' \item \code{PHQ8} Moving or speaking so slowly that other people could have noticed? Or so
#' fidgety or restless that you have been moving a lot more than usual?
#' \item \code{PHQ9} Thoughts that you would be better off dead, or thoughts of hurting yourself
#' in some way?
#' }
#'
#' \strong{Anxiety}
#' \itemize{
#'
#'
#'
#' \item \code{GAD1} Feeling nervous, anxious, or on edge
#' \item \code{GAD2} Not being able to stop or control worrying
#' \item \code{GAD3} Worrying too much about different things
#' \item \code{GAD4} Trouble relaxing
#' \item \code{GAD5} Being so restless that it's hard to sit still
#' \item \code{GAD6} Becoming easily annoyed or irritable
#' \item \code{GAD7} Feeling afraid as if something awful might happen
#' }
#'
#'
#' @docType data
#'
#' @keywords datasets
#' @name depression_anxiety_t2
#'
#' @usage data("depression_anxiety_t2")
#'
#' @format A data frame containing 403 observations (n = 7466) and 16 variables (p = 16) measured on the 4-point
#' likert scale.
#'
#' @examples
#' data("depression_anxiety_t2")
#' labels<- c("interest", "down", "sleep",
#' "tired", "appetite", "selfest",
#' "concen", "psychmtr", "suicid",
#' "nervous", "unctrworry", "worrylot",
#' "relax", "restless", "irritable", "awful")
#'
#'
#' @references
#' Forbes, M. K., Baillie, A. J., & Schniering, C. A. (2016). A structural equation modeling
#' analysis of the relationships between depression,anxiety, and sexual problems over time.
#' The Journal of Sex Research, 53(8), 942-954.
#'
#' Forbes, M. K., Wright, A. G., Markon, K. E., & Krueger, R. F. (2019). Quantifying the reliability and replicability of psychopathology network characteristics.
#' Multivariate behavioral research, 1-19.
#'
#' Jones, P. J., Williams, D. R., & McNally, R. J. (2019). Sampling variability is not nonreplication:
#' a Bayesian reanalysis of Forbes, Wright, Markon, & Krueger.
NULL
#' Data: Women and Mathematics
#'
#' A data frame containing 1190 observations (n = 1190) and 6 variables (p = 6) measured on the binary scale.
#'
#'\itemize{
#' \item \code{1} Lecture attendance (attend/did not attend)
#' \item \code{2} Gender (male/female)
#' \item \code{3} School type (urban/suburban)
#' \item \code{4} “I will be needing Mathematics in my future work” (agree/disagree)
#' \item \code{5} Subject preference (math/science vs. liberal arts)
#' \item \code{6} Future plans (college/job)
#'}
#'
#' @references
#' \insertAllCited{}
#'
#' @docType data
#'
#' @keywords datasets
#' @name women_math
#'
#' @usage data("women_math")
#'
#' @format A data frame containing 1190 observations (n = 1190) and 6 variables (p = 6) measured on the binary scale
#' \insertCite{fowlkes1988evaluating}{BGGM}. These data have been analyzed in \insertCite{tarantola2004mcmc;textual}{BGGM}
#' and in \insertCite{madigan1994model}{BGGM}. The variable descriptions were copied from (section 5.2 )
#' \insertCite{@section 5.2, @talhouk2012efficient}{BGGM}
#'
#' @examples
#' data("women_math")
NULL
#' @title Data: 1994 General Social Survey
#'
#' @description A data frame containing 1002 rows and 7 variables measured on various scales,
#' including binary and ordered cateogrical (with varying numbers of categories).
#' There are also missing values in each variable
#'
#'\itemize{
#' \item \code{Inc} Income of the respondent in 1000s of dollars, binned into 21 ordered categories.
#' \item \code{DEG} Highest degree ever obtained (none, HS, Associates, Bachelors, or Graduate)
#' \item \code{CHILD} Number of children ever had.
#' \item \code{PINC} Financial status of respondent's parents when respondent was 16 (on a 5-point scale).
#' \item \code{PDEG} Maximum of mother's and father's highest degree
#' \item \code{PCHILD} Number of siblings of the respondent plus one
#' \item \code{AGE} Age of the respondent in years.
#'}
#'
#' @references
#' \insertAllCited{}
#'
#' @docType data
#'
#' @keywords datasets
#' @name gss
#'
#' @usage data("gss")
#'
#' @format A data frame containing 1190 observations (n = 1190) and 6 variables (p = 6) measured on the binary scale
#' \insertCite{fowlkes1988evaluating}{BGGM}. The variable descriptions were copied from
#' \insertCite{@section 4, @hoff2007extending;textual}{BGGM}
#'
#' @examples
#' data("gss")
NULL
#' @title Data: ifit Intensive Longitudinal Data
#'
#' @description A data frame containing 8 variables and nearly 200 observations. There are
#' two subjects, each of which provided data every data for over 90 days. Six variables are from
#' the PANAS scale (positive and negative affect), the daily number of steps, and the subject id.
#'
#'\itemize{
#' \item \code{id} Subject id
#' \item \code{interested}
#' \item \code{disinterested}
#' \item \code{excited}
#' \item \code{upset}
#' \item \code{strong}
#' \item \code{stressed}
#' \item \code{steps} steps recorded by a fit bit
#'}
#'
#' @references
#' \insertAllCited{}
#'
#' @docType data
#'
#' @keywords datasets
#' @name ifit
#'
#' @usage data("ifit")
#'
#' @format A data frame containing 197 observations and 8 variables. The data have been used in
#' \insertCite{o2020use}{BGGM} and \insertCite{williams2019bayesian}{BGGM}
#'
#' @examples
#' data("ifit")
NULL
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/datasets.R
|
#' @title GGM: Estimation
#'
#' @description Estimate the conditional (in)dependence with either an analytic solution or efficiently
#' sampling from the posterior distribution. These methods were introduced in \insertCite{Williams2019;textual}{BGGM}.
#' The graph is selected with \code{\link{select.estimate}} and then plotted with \code{\link{plot.select}}.
#'
#' @name estimate
#'
#' @param Y Matrix (or data frame) of dimensions \emph{n} (observations) by \emph{p} (variables).
#'
#' @param formula An object of class \code{\link[stats]{formula}}. This allows for including
#' control variables in the model (i.e., \code{~ gender}). See the note for further details.
#'
#' @param type Character string. Which type of data for \code{Y} ? The options include \code{continuous},
#' \code{binary}, \code{ordinal}, or \code{mixed}. Note that mixed can be used for data with only
#' ordinal variables. See the note for further details.
#'
#' @param mixed_type Numeric vector. An indicator of length \emph{p} for which variables should be treated as ranks.
#' (1 for rank and 0 to assume normality). The default is currently to treat all integer variables as ranks
#' when \code{type = "mixed"} and \code{NULL} otherwise. See note for further details.
#'
#' @param analytic Logical. Should the analytic solution be computed (default is \code{FALSE})?
#'
#' @param prior_sd Scale of the prior distribution, approximately the standard deviation of a beta distribution
#' (defaults to 0.50).
#'
#' @param iter Number of iterations (posterior samples; defaults to 5000).
#'
#' @param impute Logical. Should the missing values (\code{NA})
#' be imputed during model fitting (defaults to \code{TRUE}) ?
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param seed An integer for the random seed.
#'
#' @param ... Currently ignored.
#'
#' @references
#' \insertAllCited{}
#'
#'
#' @return The returned object of class \code{estimate} contains a lot of information that
#' is used for printing and plotting the results. For users of \strong{BGGM}, the following
#' are the useful objects:
#'
#' \itemize{
#'
#' \item \code{pcor_mat} Partial correltion matrix (posterior mean).
#'
#' \item \code{post_samp} An object containing the posterior samples.
#'
#' }
#'
#'
#' @details
#'
#' The default is to draw samples from the posterior distribution (\code{analytic = FALSE}). The samples are
#' required for computing edge differences (see \code{\link{ggm_compare_estimate}}), Bayesian R2 introduced in
#' \insertCite{gelman_r2_2019;textual}{BGGM} (see \code{\link{predictability}}), etc. If the goal is
#' to *only* determine the non-zero effects, this can be accomplished by setting \code{analytic = TRUE}.
#' This is particularly useful when a fast solution is needed (see the examples in \code{\link{ggm_compare_ppc}})
#'
#' \strong{Controlling for Variables}:
#'
#' When controlling for variables, it is assumed that \code{Y} includes \emph{only}
#' the nodes in the GGM and the control variables. Internally, \code{only} the predictors
#' that are included in \code{formula} are removed from \code{Y}. This is not behavior of, say,
#' \code{\link{lm}}, but was adopted to ensure users do not have to write out each variable that
#' should be included in the GGM. An example is provided below.
#'
#' \strong{Mixed Type}:
#'
#' The term "mixed" is somewhat of a misnomer, because the method can be used for data including \emph{only}
#' continuous or \emph{only} discrete variables. This is based on the ranked likelihood which requires sampling
#' the ranks for each variable (i.e., the data is not merely transformed to ranks). This is computationally
#' expensive when there are many levels. For example, with continuous data, there are as many ranks
#' as data points!
#'
#' The option \code{mixed_type} allows the user to determine which variable should be treated as ranks
#' and the "emprical" distribution is used otherwise \insertCite{hoff2007extending}{BGGM}. This is
#' accomplished by specifying an indicator vector of length \emph{p}. A one indicates to use the ranks,
#' whereas a zero indicates to "ignore" that variable. By default all integer variables are treated as ranks.
#'
#' \strong{Dealing with Errors}:
#'
#' An error is most likely to arise when \code{type = "ordinal"}. The are two common errors (although still rare):
#'
#' \itemize{
#'
#' \item The first is due to sampling the thresholds, especially when the data is heavily skewed.
#' This can result in an ill-defined matrix. If this occurs, we recommend to first try
#' decreasing \code{prior_sd} (i.e., a more informative prior). If that does not work, then
#' change the data type to \code{type = mixed} which then estimates a copula GGM
#' (this method can be used for data containing \strong{only} ordinal variable). This should
#' work without a problem.
#'
#' \item The second is due to how the ordinal data are categorized. For example, if the error states
#' that the index is out of bounds, this indicates that the first category is a zero. This is not allowed, as
#' the first category must be one. This is addressed by adding one (e.g., \code{Y + 1}) to the data matrix.
#'
#' }
#'
#' \strong{Imputing Missing Values}:
#'
#' Missing values are imputed with the approach described in \insertCite{hoff2009first;textual}{BGGM}.
#' The basic idea is to impute the missing values with the respective posterior pedictive distribution,
#' given the observed data, as the model is being estimated. Note that the default is \code{TRUE},
#' but this ignored when there are no missing values. If set to \code{FALSE}, and there are missing
#' values, list-wise deletion is performed with \code{na.omit}.
#'
#'
#' @note
#'
#' \strong{Posterior Uncertainty}:
#'
#' A key feature of \bold{BGGM} is that there is a posterior distribution for each partial correlation.
#' This readily allows for visiualizing uncertainty in the estimates. This feature works
#' with all data types and is accomplished by plotting the summary of the \code{estimate} object
#' (i.e., \code{plot(summary(fit))}). Several examples are provided below.
#'
#'
#'
#' \strong{Interpretation of Conditional (In)dependence Models for Latent Data}:
#'
#' See \code{\link{BGGM-package}} for details about interpreting GGMs based on latent data
#' (i.e, all data types besides \code{"continuous"})
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' #########################################
#' ### example 1: continuous and ordinal ###
#' #########################################
#' # data
#' Y <- ptsd
#'
#' # continuous
#'
#' # fit model
#' fit <- estimate(Y, type = "continuous",
#' iter = 250)
#'
#' # summarize the partial correlations
#' summ <- summary(fit)
#'
#' # plot the summary
#' plt_summ <- plot(summary(fit))
#'
#' # select the graph
#' E <- select(fit)
#'
#' # plot the selected graph
#' plt_E <- plot(select(fit))
#'
#'
#' # ordinal
#'
#' # fit model (note + 1, due to zeros)
#' fit <- estimate(Y + 1, type = "ordinal",
#' iter = 250)
#'
#' # summarize the partial correlations
#' summ <- summary(fit)
#'
#' # plot the summary
#' plt <- plot(summary(fit))
#'
#' # select the graph
#' E <- select(fit)
#'
#' # plot the selected graph
#' plt_E <- plot(select(fit))
#'
#' ##################################
#' ## example 2: analytic solution ##
#' ##################################
#' # (only continuous)
#'
#' # data
#' Y <- ptsd
#'
#' # fit model
#' fit <- estimate(Y, analytic = TRUE)
#'
#' # summarize the partial correlations
#' summ <- summary(fit)
#'
#' # plot summary
#' plt_summ <- plot(summary(fit))
#'
#' # select graph
#' E <- select(fit)
#'
#' # plot the selected graph
#' plt_E <- plot(select(fit))
#'
#'}
#'
#' @export
estimate <- function(Y,
formula = NULL,
type = "continuous",
mixed_type = NULL,
analytic = FALSE,
prior_sd = 0.25,
iter = 5000,
impute = FALSE,
progress = TRUE,
seed = 1,
...){
# temporary warning until missing data is fully implemented
if(!type %in% c("continuous", "mixed")){
if(impute){
warning(paste0("imputation during model fitting is\n",
"currently only implemented for 'continuous'
and 'mixed' data."))
}
}
# removed per CRAN (8/12/21)
#old <- .Random.seed
set.seed(seed)
# delta rho ~ beta(delta/2, delta/2)
delta <- delta_solve(prior_sd)
# nodes
p <- ncol(Y)
# sample posterior
if(!analytic){
if(isTRUE(progress)){
message(paste0("BGGM: Posterior Sampling ", ...))
}
# continuous
if(type == "continuous"){
# no control
if(is.null(formula)){
if(!impute){
# na omit
Y <- as.matrix(na.omit(Y))
Y_miss <- Y
} else {
Y_miss <- ifelse(is.na(Y), 1, 0)
if(sum(Y_miss) == 0){
impute <- FALSE
}
# impute means
for(i in 1:p){
Y[which(is.na(Y[,i])),i] <- mean(na.omit(Y[,i]))
}
}
# scale Y
Y <- scale(Y, scale = F)
# design matrix
X <- NULL
# number of variables
n <- nrow(Y)
# starting values
start <- solve(cov(Y))
# posterior sample
post_samp <- .Call(
'_BGGM_Theta_continuous',
PACKAGE = 'BGGM',
Y = Y,
iter = iter + 50,
delta = delta,
epsilon = 0.1,
prior_only = 0,
explore = 1,
start = start,
progress = progress,
impute = impute,
Y_miss = Y_miss
)
# control for variables
} else {
control_info <- remove_predictors_helper(list(as.data.frame(Y)),
formula = formula)
# data
Y <- as.matrix(scale(control_info$Y_groups[[1]], scale = F))
# nodes
p <- ncol(Y)
# observations
n <- nrow(Y)
# model matrix
X <- as.matrix(control_info$model_matrices[[1]])
start <- solve(cov(Y))
# posterior sample
post_samp <- .Call(
"_BGGM_mv_continuous",
Y = Y,
X = X,
delta = delta,
epsilon = 0.1,
iter = iter + 50,
start = start,
progress = progress
)
# end control
}
# binary
} else if (type == "binary") {
# intercept only
if (is.null(formula)) {
# data
Y <- as.matrix(na.omit(Y))
# obervations
n <- nrow(Y)
# nodes
p <- ncol(Y)
X <- matrix(1, n, 1)
formula <- ~ 1
start <- solve(cov(Y))
} else {
control_info <- remove_predictors_helper(list(as.data.frame(Y)),
formula = formula)
# data
Y <- as.matrix(control_info$Y_groups[[1]])
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# model matrix
X <- as.matrix(control_info$model_matrices[[1]])
start <- solve(cov(Y))
}
# posterior sample
post_samp <- .Call(
"_BGGM_mv_binary",
Y = Y,
X = X,
delta = delta,
epsilon = 0.1,
iter = iter + 50,
beta_prior = 0.0001,
cutpoints = c(-Inf, 0, Inf),
start = start,
progress = progress
)
# ordinal
} else if(type == "ordinal"){
# intercept only
if(is.null(formula)){
# data
Y <- as.matrix(na.omit(Y))
# obervations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# intercept only
X <- matrix(1, n, 1)
formula <- ~ 1
start <- solve(cov(Y))
} else {
control_info <- remove_predictors_helper(list(as.data.frame(Y)),
formula = formula)
# data
Y <- as.matrix(control_info$Y_groups[[1]])
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# model matrix
X <- as.matrix(control_info$model_matrices[[1]])
start <- solve(cov(Y))
}
# categories
K <- max(apply(Y, 2, function(x) { length(unique(x)) } ))
# call c ++
post_samp <- .Call(
"_BGGM_mv_ordinal_albert",
Y = Y,
X = X,
iter = iter + 50,
delta = delta,
epsilon = 0.1,
K = K,
start = start,
progress = progress
)
} else if(type == "mixed"){
X <- NULL
# no control variables allowed
if(!is.null(formula)){
warning("formula ignored for mixed data at this time")
control_info <- remove_predictors_helper(list(as.data.frame(Y)),
formula = formula)
# data
Y <- as.matrix(control_info$Y_groups[[1]])
formula <- NULL
}
# default for ranks
if(is.null(mixed_type)) {
idx = rep(1, ncol(Y))
# user defined
} else {
idx = mixed_type
}
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# rank following hoff (2008)
rank_vars <- rank_helper(Y)
if(impute){
Y_missing <- ifelse(is.na(Y), 1, 0)
rank_vars$z0_start[is.na(rank_vars$z0_start)] <- rnorm(sum(Y_missing))
post_samp <- .Call(
"_BGGM_missing_copula",
Y = Y,
Y_missing = Y_missing,
z0_start = rank_vars$z0_start,
Sigma_start = cov(rank_vars$z0_start),
levels = rank_vars$levels,
iter_missing = iter + 50,
progress_impute = TRUE,
K = rank_vars$K,
idx = idx,
epsilon = 0.1,
delta = delta
)
} else {
Y <- na.omit(Y)
post_samp <- .Call(
"_BGGM_copula",
z0_start = rank_vars$z0_start,
levels = rank_vars$levels,
K = rank_vars$K,
Sigma_start = cov(Y),
iter = iter + 50,
delta = delta,
epsilon = 0.1,
idx = idx,
progress = progress
)
}
} else {
stop("'type' not supported: must be continuous, binary, ordinal, or mixed.")
}
if(isTRUE(progress)){
message("BGGM: Finished")
}
pcor_mat <- post_samp$pcor_mat
results <- list(
pcor_mat = pcor_mat,
analytic = analytic,
formula = formula,
post_samp = post_samp,
type = type,
iter = iter,
Y = Y,
X = X,
call = match.call(),
prior_sd = prior_sd,
p = p,
n = n,
ppd_mean = post_samp$ppd_mean
)
# analytic
} else {
if(type != "continuous"){
warning("analytic solution only available for 'type = continuous'")
type <- "continuous"
}
if(!is.null(formula)){
stop("formula note permitted with the analytic solution")
}
Y <- na.omit(Y)
# observations
n <- nrow(Y)
p <- ncol(Y)
formula <- NULL
analytic_fit <- analytic_solve(Y)
results <- list(pcor_mat = analytic_fit$pcor_mat,
analytic_fit = analytic_fit,
analytic = analytic,
formula = formula,
type = type,
iter = iter,
Y = Y,
call = match.call(),
prior_sd = prior_sd,
p = p,
n = n)
} # end analytic
# removed per CRAN (8/12/21)
#.Random.seed <<- old
returned_object <- results
class(returned_object) <- c("BGGM",
"estimate",
"default")
return(returned_object)
}
#' @title Summary method for \code{estimate.default} objects
#'
#' @name summary.estimate
#'
#' @description Summarize the posterior distribution of each partial correlation
#' with the posterior mean and standard deviation.
#'
#'
#' @param object An object of class \code{estimate}
#'
#' @param col_names Logical. Should the summary include the column names (default is \code{TRUE})?
#' Setting to \code{FALSE} includes the column numbers (e.g., \code{1--2}).
#'
#' @param cred Numeric. The credible interval width for summarizing the posterior
#' distributions (defaults to 0.95; must be between 0 and 1).
#'
#' @param ... Currently ignored.
#'
#' @seealso \code{\link{estimate}}
#'
#' @return A dataframe containing the summarized posterior distributions.
#'
#' @examples
#' \donttest{
#' # data
#' Y <- ptsd[,1:5]
#'
#' fit <- estimate(Y, iter = 250,
#' progress = FALSE)
#'
#' summary(fit)
#'
#'}
#'
#' @export
summary.estimate <- function(object,
col_names = TRUE,
cred = 0.95, ...) {
# nodes
p <- object$p
# identity matrix
I_p <- diag(p)
# lower bound
lb <- (1 - cred) / 2
# upper bound
ub <- 1 - lb
# column names
cn <- colnames(object$Y)
if(is.null(cn) | isFALSE(col_names)){
mat_names <- sapply(1:p , function(x) paste(1:p, x, sep = "--"))[upper.tri(I_p)]
} else {
mat_names <- sapply(cn , function(x) paste(cn, x, sep = "--"))[upper.tri(I_p)]
}
if(isFALSE(object$analytic)){
post_mean <- round(object$pcor_mat[upper.tri(I_p)], 3)
post_sd <- round(apply(object$post_samp$pcors[,, 51:(object$iter + 50) ], 1:2, sd), 3)[upper.tri(I_p)]
post_lb <- round(apply( object$post_samp$pcors[,, 51:(object$iter + 50) ], 1:2, quantile, lb), 3)[upper.tri(I_p)]
post_ub <- round(apply( object$post_samp$pcors[,, 51:(object$iter + 50) ], 1:2, quantile, ub), 3)[upper.tri(I_p)]
dat_results <-
data.frame(
relation = mat_names,
post_mean = post_mean,
post_sd = post_sd,
post_lb = post_lb,
post_ub = post_ub
)
colnames(dat_results) <- c(
"Relation",
"Post.mean",
"Post.sd",
"Cred.lb",
"Cred.ub")
} else {
dat_results <-
data.frame(
relation = mat_names,
post_mean = object$pcor_mat[upper.tri(I_p)]
)
colnames(dat_results) <- c(
"Relation",
"Post.mean")
}
returned_object <- list(dat_results = dat_results,
object = object)
class(returned_object) <- c("BGGM", "estimate",
"summary_estimate",
"summary.estimate")
returned_object
}
print_summary_estimate <- function(x, ...) {
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Type:", x$object$type, "\n")
cat("Analytic:", x$object$analytic, "\n")
cat("Formula:", paste(as.character(x$object$formula), collapse = " "), "\n")
# number of iterations
cat("Posterior Samples:", x$object$iter, "\n")
# number of observations
cat("Observations (n):\n")
# number of variables
cat("Nodes (p):", x$object$p, "\n")
# number of edges
cat("Relations:", .5 * (x$object$p * (x$object$p - 1)), "\n")
cat("--- \n")
cat("Call: \n")
print(x$object$call)
cat("--- \n")
cat("Estimates:\n")
print(x$dat_results, row.names = F)
cat("--- \n")
}
print_estimate <- function(x, ...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Type:", x$type, "\n")
cat("Analytic:", x$analytic, "\n")
cat("Formula:", paste(as.character(x$formula), collapse = " "), "\n")
# number of iterations
cat("Posterior Samples:", x$iter, "\n")
# number of observations
cat("Observations (n):\n")
# number of variables
cat("Nodes (p):", x$p, "\n")
# number of edges
cat("Relations:", .5 * (x$p * (x$p-1)), "\n")
cat("--- \n")
cat("Call: \n")
print(x$call)
cat("--- \n")
cat("Date:", date(), "\n")
}
#' @title Plot \code{summary.estimate} Objects
#'
#' @description Visualize the posterior distributions for each partial correlation.
#'
#' @name plot.summary.estimate
#'
#' @param x An object of class \code{summary.estimate}
#'
#' @param size Numeric. The size for the points (defaults to \code{2}).
#'
#' @param color Character string. The color for the error bars.
#' (defaults to \code{"black"}).
#'
#' @param width Numeric. The width of error bar ends (defaults to \code{0}).
#'
#' @param ... Currently ignored
#'
#' @seealso \code{\link{estimate}}
#'
#' @return A \code{ggplot} object.
#'
#' @examples
#' \donttest{
#' # data
#' Y <- ptsd[,1:5]
#'
#' fit <- estimate(Y, iter = 250,
#' progress = FALSE)
#'
#'
#' plot(summary(fit))
#'
#' }
#'
#' @export
plot.summary.estimate <- function(x,
color = "black",
size = 2,
width = 0, ...){
dat_temp <- x$dat_results[order(x$dat_results$Post.mean,
decreasing = F), ]
dat_temp$Relation <-
factor(dat_temp$Relation,
levels = dat_temp$Relation,
labels = dat_temp$Relation)
if(isFALSE(x$object$analytic)){
ggplot(dat_temp,
aes(x = Relation,
y = Post.mean)) +
geom_errorbar(aes(ymax = dat_temp[, 4],
ymin = dat_temp[, 5]),
width = width,
color = color) +
geom_point(size = size) +
xlab("Index") +
theme(axis.text.x = element_text(
angle = 90,
vjust = 0.5,
hjust = 1
))
} else {
ggplot(dat_temp,
aes(x = Relation,
y = Post.mean)) +
geom_point(size = size) +
xlab("Index") +
theme(axis.text.x = element_text(
angle = 90,
vjust = 0.5,
hjust = 1
))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/estimate.R
|
#' @title GGM: Exploratory Hypothesis Testing
#'
#' @name explore
#'
#' @description Learn the conditional (in)dependence structure with the Bayes factor using the matrix-F
#' prior distribution \insertCite{Mulder2018}{BGGM}. These methods were introduced in
#' \insertCite{Williams2019_bf;textual}{BGGM}. The graph is selected with \code{\link{select.explore}} and
#' then plotted with \code{\link{plot.select}}.
#'
#' @param Y Matrix (or data frame) of dimensions \emph{n} (observations) by \emph{p} (variables).
#'
#' @param formula An object of class \code{\link[stats]{formula}}. This allows for including
#' control variables in the model (i.e., \code{~ gender}).
#'
#' @param type Character string. Which type of data for \code{Y} ? The options include \code{continuous},
#' \code{binary}, \code{ordinal}, or \code{mixed} (semi-parametric copula). See the note for further details.
#'
#' @param mixed_type Numeric vector. An indicator of length p for which varibles should be treated as ranks.
#' (1 for rank and 0 to assume normality). The default is to treat all integer variables as ranks
#' when \code{type = "mixed"} and \code{NULL} otherwise. See note for further details.
#'
#' @param analytic Logical. Should the analytic solution be computed (default is \code{FALSE})?
#' (currently not implemented)
#'
#' @param prior_sd Scale of the prior distribution, approximately the standard deviation
#' of a beta distribution (defaults to 0.25).
#'
#' @param iter Number of iterations (posterior samples; defaults to 5000).
#'
#' @param impute Logicial. Should the missing values (\code{NA})
#' be imputed during model fitting (defaults to \code{TRUE}) ?
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param seed An integer for the random seed.
#'
#' @param ... Currently ignored (leave empty).
#'
#' @references
#' \insertAllCited{}
#'
#' @return The returned object of class \code{explore} contains a lot of information that
#' is used for printing and plotting the results. For users of \strong{BGGM}, the following
#' are the useful objects:
#'
#' \itemize{
#'
#' \item \code{pcor_mat} partial correltion matrix (posterior mean).
#'
#' \item \code{post_samp} an object containing the posterior samples.
#'
#' }
#'
#' @details
#'
#' \strong{Controlling for Variables}:
#'
#' When controlling for variables, it is assumed that \code{Y} includes \emph{only}
#' the nodes in the GGM and the control variables. Internally, \code{only} the predictors
#' that are included in \code{formula} are removed from \code{Y}. This is not behavior of, say,
#' \code{\link{lm}}, but was adopted to ensure users do not have to write out each variable that
#' should be included in the GGM. An example is provided below.
#'
#' \strong{Mixed Type}:
#'
#' The term "mixed" is somewhat of a misnomer, because the method can be used for data including \emph{only}
#' continuous or \emph{only} discrete variables. This is based on the ranked likelihood which requires sampling
#' the ranks for each variable (i.e., the data is not merely transformed to ranks). This is computationally
#' expensive when there are many levels. For example, with continuous data, there are as many ranks
#' as data points!
#'
#' The option \code{mixed_type} allows the user to determine which variable should be treated as ranks
#' and the "emprical" distribution is used otherwise. This is accomplished by specifying an indicator
#' vector of length \emph{p}. A one indicates to use the ranks, whereas a zero indicates to "ignore"
#' that variable. By default all integer variables are handled as ranks.
#'
#' \strong{Dealing with Errors}:
#'
#' An error is most likely to arise when \code{type = "ordinal"}. The are two common errors (although still rare):
#'
#' \itemize{
#'
#' \item The first is due to sampling the thresholds, especially when the data is heavily skewed.
#' This can result in an ill-defined matrix. If this occurs, we recommend to first try
#' decreasing \code{prior_sd} (i.e., a more informative prior). If that does not work, then
#' change the data type to \code{type = mixed} which then estimates a copula GGM
#' (this method can be used for data containing \strong{only} ordinal variable). This should
#' work without a problem.
#'
#' \item The second is due to how the ordinal data are categorized. For example, if the error states
#' that the index is out of bounds, this indicates that the first category is a zero. This is not allowed, as
#' the first category must be one. This is addressed by adding one (e.g., \code{Y + 1}) to the data matrix.
#'
#' }
#'
#'
#' \strong{Imputing Missing Values}:
#'
#' Missing values are imputed with the approach described in \insertCite{hoff2009first;textual}{BGGM}.
#' The basic idea is to impute the missing values with the respective posterior pedictive distribution,
#' given the observed data, as the model is being estimated. Note that the default is \code{TRUE},
#' but this ignored when there are no missing values. If set to \code{FALSE}, and there are missing
#' values, list-wise deletion is performed with \code{na.omit}.
#'
#' @note
#'
#' \strong{Posterior Uncertainty}:
#'
#' A key feature of \bold{BGGM} is that there is a posterior distribution for each partial correlation.
#' This readily allows for visiualizing uncertainty in the estimates. This feature works
#' with all data types and is accomplished by plotting the summary of the \code{explore} object
#' (i.e., \code{plot(summary(fit))}). Note that in contrast to \code{estimate} (credible intervals),
#' the posterior standard deviation is plotted for \code{explore} objects.
#'
#'
#' \strong{"Default" Prior}:
#'
#' In Bayesian statistics, a default Bayes factor needs to have several properties. I refer
#' interested users to \insertCite{@section 2.2 in @dablander2020default;textual}{BGGM}. In
#' \insertCite{Williams2019_bf;textual}{BGGM}, some of these propteries were investigated including
#' model selection consistency. That said, we would not consider this a "default" (or "automatic")
#' Bayes factor and thus we encourage users to perform sensitivity analyses by varying
#' the scale of the prior distribution.
#'
#' Furthermore, it is important to note there is no "correct" prior and, also, there is no need
#' to entertain the possibility of a "true" model. Rather, the Bayes factor can be interpreted as
#' which hypothesis best (\strong{relative} to each other) predicts the observed data
#' \insertCite{@Section 3.2 in @Kass1995}{BGGM}.
#'
#' \strong{Interpretation of Conditional (In)dependence Models for Latent Data}:
#'
#' See \code{\link{BGGM-package}} for details about interpreting GGMs based on latent data
#' (i.e, all data types besides \code{"continuous"})
#'
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' ###########################
#' ### example 1: binary ####
#' ###########################
#' Y <- women_math[1:500,]
#'
#' # fit model
#' fit <- explore(Y, type = "binary",
#' iter = 250,
#' progress = FALSE)
#'
#' # summarize the partial correlations
#' summ <- summary(fit)
#'
#' # plot the summary
#' plt_summ <- plot(summary(fit))
#'
#' # select the graph
#' E <- select(fit)
#'
#' # plot the selected graph
#' plt_E <- plot(E)
#'
#' plt_E$plt_alt
#'}
#' @export
explore <- function(Y,
formula = NULL,
type = "continuous",
mixed_type = NULL,
analytic = FALSE,
prior_sd = 0.25,
iter = 5000,
progress = TRUE,
impute = FALSE,
seed = 1, ...){
# temporary warning until missing data is fully implemented
if(!type %in% c("continuous", "mixed")){
if(impute){
warning(paste0("imputation during model fitting is\n",
"currently only implemented for 'continuous'
and 'mixed' data."))
}
}
# removed per CRAN (8/12/21)
#old <- .Random.seed
set.seed(seed)
dot_dot_dot <- list(...)
eps <- 0.01
# delta parameter
delta <- delta_solve(prior_sd)
if(!analytic){
if(isTRUE(progress)){
message(paste0("BGGM: Posterior Sampling ", ...))
}
# continuous
if (type == "continuous") {
# no control
if (is.null(formula)) {
# nodes
p <- ncol(Y)
if(!impute){
# na omit
Y <- as.matrix(na.omit(Y))
Y_miss <- Y
} else {
Y_miss <- ifelse(is.na(Y), 1, 0)
if(sum(Y_miss) == 0){
impute <- FALSE
}
# impute means
for(i in 1:p){
Y[which(is.na(Y[,i])),i] <- mean(na.omit(Y[,i]))
}
}
# scale Y
Y <- scale(Y, scale = F)
# NULL design matrix
X <- NULL
# observations
n <- nrow(Y)
# starting values
start <- solve(cov(Y))
# posterior sample
post_samp <- .Call(
'_BGGM_Theta_continuous',
PACKAGE = 'BGGM',
Y = Y,
iter = iter + 50,
delta = delta,
epsilon = eps,
prior_only = 0,
explore = 1,
start = start,
progress = progress,
impute = impute,
Y_miss = Y_miss
)
# control for variables
} else {
control_info <- remove_predictors_helper(list(as.data.frame(Y)),
formula = formula)
# data
Y <- as.matrix(scale(control_info$Y_groups[[1]], scale = F))
X <- NULL
# nodes
p <- ncol(Y)
# observations
n <- nrow(Y)
# model matrix
X <- as.matrix(control_info$model_matrices[[1]])
start <- solve(cov(Y))
# posterior sample
post_samp <- .Call(
"_BGGM_mv_continuous",
Y = Y,
X = X,
delta = delta,
epsilon = eps,
iter = iter + 50,
start = start,
progress = progress
)
} # end control
# binary
} else if (type == "binary") {
# intercept only
if (is.null(formula)) {
# data
Y <- as.matrix(na.omit(Y))
# obervations
n <- nrow(Y)
# nodes
p <- ncol(Y)
X <- matrix(1, n, 1)
formula <- ~ 1
start <- solve(cov(Y))
} else {
control_info <- remove_predictors_helper(list(as.data.frame(Y)),
formula = formula)
# data
Y <- as.matrix(control_info$Y_groups[[1]])
X <- NULL
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# model matrix
X <- as.matrix(control_info$model_matrices[[1]])
start <- solve(cov(Y))
}
# posterior sample
post_samp <- .Call(
"_BGGM_mv_binary",
Y = Y,
X = X,
delta = delta,
epsilon = 0.01,
iter = iter + 50,
beta_prior = 0.1,
cutpoints = c(-Inf, 0, Inf),
start = start,
progress = progress
)
# ordinal
} else if (type == "ordinal") {
# intercept only
if(is.null(formula)){
# data
Y <- as.matrix(na.omit(Y))
X <- NULL
# obervations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# intercept only
X <- matrix(1, n, 1)
formula <- ~ 1
# start
start <- solve(cov(Y))
} else {
control_info <- remove_predictors_helper(list(as.data.frame(Y)),
formula = formula)
# data
Y <- as.matrix(control_info$Y_groups[[1]])
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# model matrix
X <- as.matrix(control_info$model_matrices[[1]])
# start
start <- solve(cov(Y))
}
# categories
K <- max(apply(Y, 2, function(x) { length(unique(x)) } ))
# call c ++
post_samp <- .Call(
"_BGGM_mv_ordinal_albert",
Y = Y,
X = X,
iter = iter + 50,
delta = delta,
epsilon = 0.01,
K = K,
start = start,
progress = progress
)
} else if(type == "mixed"){
X <- NULL
# no control variables allowed
if(!is.null(formula)){
warning("formula ignored for mixed data at this time")
control_info <- remove_predictors_helper(list(as.data.frame(Y)),
formula = formula)
# data
Y <- as.matrix(control_info$Y_groups[[1]])
formula <- NULL
}
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# default for ranks
if(is.null(mixed_type)) {
# idx = rep(1, ncol(Y))
# idx = ifelse(idx == 1, 1, 0)
idx = rep(1, ncol(Y))
# user defined
} else {
idx = mixed_type
}
# rank following hoff (2008)
rank_vars <- rank_helper(Y)
if(impute){
Y_missing <- ifelse(is.na(Y), 1, 0)
rank_vars$z0_start[is.na(rank_vars$z0_start)] <- rnorm(sum(Y_missing))
post_samp <- .Call(
"_BGGM_missing_copula",
Y = Y,
Y_missing = Y_missing,
z0_start = rank_vars$z0_start,
Sigma_start = cov(rank_vars$z0_start),
levels = rank_vars$levels,
iter_missing = iter + 50,
progress_impute = TRUE,
K = rank_vars$K,
idx = idx,
epsilon = 0.01,
delta = delta
)
} else {
post_samp <- .Call(
"_BGGM_copula",
z0_start = rank_vars$z0_start,
levels = rank_vars$levels,
K = rank_vars$K,
Sigma_start = rank_vars$Sigma_start,
iter = iter + 50,
delta = delta,
epsilon = 0.01,
idx = idx,
progress = progress
)
}
} else {
stop("'type' not supported: must be continuous, binary, ordinal, or mixed.")
}
# matrix dimensions for prior
Y_dummy <- matrix(rnorm( 10 * 3 ),
nrow = 10, ncol = 3)
if(isTRUE(progress)){
message(paste0("BGGM: Prior Sampling ", ...))
}
# sample prior
prior_samp <- .Call('_BGGM_sample_prior',
PACKAGE = 'BGGM',
Y = Y_dummy,
iter = iter + 50,
delta = delta,
epsilon = eps,
prior_only = 1,
explore = 1,
progress = progress)
if(isTRUE(progress)){
message("BGGM: Finished")
}
# # compute post.mean
pcor_mat <- post_samp$pcor_mat
returned_object <- list(
pcor_mat = pcor_mat,
analytic = analytic,
formula = formula,
post_samp = post_samp,
prior_samp = prior_samp,
type = type,
iter = iter,
Y = Y,
call = match.call(),
p = p,
n = n,
X = X,
eps = eps,
ppd_mean = post_samp$ppd_mean
)
} else {
stop("analytic solution not currently available")
}
# removed per CRAN (8/12/21)
#.Random.seed <<- old
returned_object
class(returned_object) <- c("BGGM",
"explore",
"default")
return(returned_object)
}
#' @title Summary Method for \code{explore.default} Objects
#'
#' @description Summarize the posterior distribution for each partial correlation
#' with the posterior mean and standard deviation.
#'
#' @name summary.explore
#'
#' @param object An object of class \code{estimate}
#'
#' @param col_names Logical. Should the summary include the column names (default is \code{TRUE})?
#' Setting to \code{FALSE} includes the column numbers (e.g., \code{1--2}).
#'
#' @param ... Currently ignored
#'
#' @seealso \code{\link{select.explore}}
#'
#' @return A dataframe containing the summarized posterior distributions.
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' Y <- ptsd[,1:5]
#'
#' fit <- explore(Y, iter = 250,
#' progress = FALSE)
#'
#' summ <- summary(fit)
#'
#' summ
#' }
#' @export
summary.explore <- function(object,
col_names = TRUE, ...) {
# nodes
p <- object$p
# identity matrix
I_p <- diag(p)
# column names
cn <- colnames(object$Y)
if(is.null(cn) | isFALSE(col_names)){
mat_names <- sapply(1:p , function(x) paste(1:p, x, sep = "--"))[upper.tri(I_p)]
} else {
mat_names <- sapply(cn , function(x) paste(cn, x, sep = "--"))[upper.tri(I_p)]
}
if(isFALSE(object$analytic)){
post_mean <- round(object$pcor_mat, 3)[upper.tri(I_p)]
post_sd <- round(apply(object$post_samp$pcors[,, 51:(object$iter + 50) ], 1:2, sd), 3)[upper.tri(I_p)]
dat_results <-
data.frame(
relation = mat_names,
post_mean = post_mean,
post_sd = post_sd
)
colnames(dat_results) <- c(
"Relation",
"Post.mean",
"Post.sd")
} else {
dat_results <-
data.frame(
relation = mat_names,
post_mean = object$pcor_mat[upper.tri(I_p)]
)
colnames(dat_results) <- c(
"Relation",
"Post.mean")
}
returned_object <- list(dat_results = dat_results,
object = object)
class(returned_object) <- c("BGGM", "explore",
"summary_explore",
"summary.explore")
returned_object
}
print_explore <- function(x,...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Type:", x$type, "\n")
cat("Analytic:", x$analytic, "\n")
cat("Formula:", paste(as.character(x$formula), collapse = " "), "\n")
# number of iterations
cat("Posterior Samples:", x$iter, "\n")
# number of observations
cat("Observations (n):", x$n, "\n")
# number of variables
cat("Nodes (p):", x$p, "\n")
# number of edges
cat("Relations:", .5 * (x$p * (x$p-1)), "\n")
cat("--- \n")
cat("Call: \n")
print(x$call)
cat("--- \n")
cat("Date:", date(), "\n")
}
#' @title Plot \code{summary.explore} Objects
#'
#' @description Visualize the posterior distributions for each partial correlation.
#'
#' @name plot.summary.explore
#'
#'
#' @param x An object of class \code{summary.explore}
#'
#' @param size Numeric. The size for the points (defaults to \code{2}).
#'
#' @param color Character string. The color for the error bars.
#' (defaults to \code{"black"}).
#'
#' @param width Numeric. The width of error bar ends (defaults to \code{0} ).
#'
#' @param ... Currently ignored
#'
#' @return A \code{ggplot} object
#'
#' @seealso \code{\link{explore}}
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' Y <- ptsd[,1:5]
#'
#' fit <- explore(Y, iter = 250,
#' progress = FALSE)
#'
#' plt <- plot(summary(fit))
#'
#' plt
#' }
#' @export
plot.summary.explore <- function(x,
color = "black",
size = 2,
width = 0,
...){
dat_temp <- x$dat_results[order(x$dat_results$Post.mean,
decreasing = F), ]
dat_temp$Relation <-
factor(dat_temp$Relation,
levels = dat_temp$Relation,
labels = dat_temp$Relation)
if(isFALSE(x$object$analytic)){
ggplot(dat_temp,
aes(x = Relation,
y = Post.mean)) +
geom_errorbar(aes(ymax = Post.mean + dat_temp[, 3],
ymin = Post.mean - dat_temp[, 3]),
width = width,
color = color) +
geom_point(size = size) +
xlab("Index") +
theme(axis.text.x = element_text(
angle = 90,
vjust = 0.5,
hjust = 1
))
} else {
ggplot(dat_temp,
aes(x = Relation,
y = Post.mean)) +
geom_point(size = size) +
xlab("Index") +
theme(axis.text.x = element_text(
angle = 90,
vjust = 0.5,
hjust = 1
))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/explore.default.R
|
#' Fisher Z Transformation
#'
#' @description Tranform correlations to Fisher's Z
#' @param r correlation (can be a vector)
#'
#' @return Fisher Z transformed correlation(s)
#' @export
#'
#' @examples
#' fisher_r_to_z(0.5)
fisher_r_to_z <- function(r){
z <- fisher_z(r)
return(z)
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/fisher_r2z.R
|
#' Fisher Z Back Transformation
#' @description Back tranform Fisher's Z to correlations
#' @param z Fisher Z
#'
#' @return Correlation (s) (backtransformed)
#' @export
#'
#' @examples
#' fisher_z_to_r(0.5)
fisher_z_to_r <- function(z){
r <- z2r(z)
return(r)
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/fisher_z2r.R
|
#' Generate Ordinal and Binary data
#'
#' Generate Multivariate Ordinal and Binary data.
#'
#' @param n Number of observations (\emph{n}).
#'
#' @param p Number of variables (\emph{p}).
#'
#' @param levels Number of categories (defaults to 2; binary data).
#'
#' @param cor_mat A \emph{p} by \emph{p} matrix including the true correlation structure.
#'
#' @param empirical Logical. If true, \code{cor_mat} specifies the empirical not
#' population covariance matrix.
#'
#' @return A \emph{n} by \emph{p} data matrix.
#'
#' @references
#' \insertAllCited{}
#'
#' @importFrom utils capture.output
#' @note
#'
#' In order to allow users to enjoy the functionality of \bold{BGGM}, we had to make minor changes to the function \code{rmvord_naiv}
#' from the \code{R} package \bold{orddata} \insertCite{orddata}{BGGM}. All rights to, and credit for, the function \code{rmvord_naiv}
#' belong to the authors of that package.
#'
#' This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
#' This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#' A copy of the GNU General Public License is available online.
#'
#' @examples
#' ################################
#' ######### example 1 ############
#' ################################
#'
#' main <- ptsd_cor1[1:5,1:5]
#' p <- ncol(main)
#'
#' pcors <- -(cov2cor(solve(main)) -diag(p))
#' diag(pcors) <- 1
#' pcors <- ifelse(abs(pcors) < 0.05, 0, pcors)
#'
#' inv <- -pcors
#' diag(inv) <- 1
#' cors <- cov2cor( solve(inv))
#'
#' # example data
#' Y <- BGGM::gen_ordinal(n = 500, p = 5,
#' levels = 2,
#' cor_mat = cors,
#' empirical = FALSE)
#'
#'
#'
#' ################################
#' ######### example 2 ############
#' ################################
#' # empirical = TRUE
#'
#' Y <- gen_ordinal(n = 500,
#' p = 16,
#' levels = 5,
#' cor_mat = ptsd_cor1,
#' empirical = TRUE)
#'
#' @export
gen_ordinal <- function(n, p, levels = 2, cor_mat, empirical = FALSE){
ls <- list()
for(i in 1:p){
temp <- table(sample(c(1:levels),
size = n,
replace = T))
ls[[i]] <- as.numeric(temp / sum(temp))
}
junk <- capture.output(data <- rmvord_naiv(n = n, probs = ls,
Cors = cor_mat,
empirical = empirical))
data
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/gen_ordinal.R
|
#' Simulate a Partial Correlation Matrix
#'
#' @param p number of variables (nodes)
#'
#' @param edge_prob connectivity
#'
#' @param lb lower bound for the partial correlations
#'
#' @param ub upper bound for the partial correlations
#'
#' @note The function checks for a valid matrix (positive definite),
#' but sometimes this will still fail. For example, for
#' larger \code{p}, to have large partial correlations this
#' requires a sparse GGM
#' (accomplished by setting \code{edge_prob}
#' to a small value).
#'
#' @return A list containing the following:
#'
#' \itemize{
#'
#' \item{\strong{pcor}}: Partial correlation matrix, encoding
#' the conditional (in)dependence structure.
#'
#' \item{\strong{cors}}: Correlation matrix.
#'
#' \item{\strong{adj}}: Adjacency matrix.
#'
#' \item{\strong{trys}}: Number of attempts to obtain a
#' positive definite matrix.
#'
#' }
#'
#' @export
#'
#' @importFrom stats runif
#'
#' @examples
#'
#' true_net <- gen_net(p = 10)
gen_net <- function(p = 20,
edge_prob = 0.3,
lb = 0.05,
ub = 0.3) {
# negative determinant
d <- -1
# number of trys
trys <- 0
# until d is positive
while (d < 0) {
trys <- trys + 1
effects <- p * (p - 1) * 0.5
mat <- matrix(1, p, p)
prob_zero <- 1 - edge_prob
pool <- c(rep(0, effects * prob_zero),
runif(effects * edge_prob, lb, ub))
if (length(pool) != effects) {
pool <- c(0, pool)
}
mat[upper.tri(mat)] <- sample(pool, size = effects)
pcs <- symm_mat(mat)
pcs <- -pcs
diag(pcs) <- -diag(pcs)
d <- det(pcs)
}
cors <- cov2cor(solve(pcs))
inv <- solve(cors)
pcors <- cov2cor(inv) * -1
diag(pcors) <- 1
adj <- ifelse(pcs == 0, 0, 1)
returned_object <- list(
pcors = pcors * adj,
cors = cors,
adj = adj,
trys = trys
)
return(returned_object)
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/gen_pcors.R
|
#' GGM Compare: Exploratory Hypothesis Testing
#'
#' @name ggm_compare_explore
#'
#' @description Compare Gaussian graphical models with exploratory hypothesis testing using the matrix-F prior
#' distribution \insertCite{Mulder2018}{BGGM}. A test for each partial correlation in the model for any number
#' of groups. This provides evidence for the null hypothesis of no difference and the alternative hypothesis
#' of difference. With more than two groups, the test is for \emph{all} groups simultaneously (i.e., the relation
#' is the same or different in all groups). This method was introduced in \insertCite{williams2020comparing;textual}{BGGM}.
#' For confirmatory hypothesis testing see \code{confirm_groups}.
#'
#' @param ... At least two matrices (or data frame) of dimensions \emph{n} (observations) by \emph{p} (variables).
#'
#' @param formula An object of class \code{\link[stats]{formula}}. This allows for including
#' control variables in the model (i.e., \code{~ gender}).
#'
#' @param prior_sd Numeric. The scale of the prior distribution (centered at zero), in reference to a beta distribtuion.
#' The `default` is 0.20. See note for further details.
#'
#' @param type Character string. Which type of data for \code{Y} ? The options include \code{continuous},
#' \code{binary}, or \code{ordinal}. See the note for further details.
#'
#' @param mixed_type Numeric vector. An indicator of length p for which varibles should be treated as ranks.
#' (1 for rank and 0 to assume normality). The default is currently (dev version) to treat all integer variables
#' as ranks when \code{type = "mixed"} and \code{NULL} otherwise. See note for further details.
#'
#' @param analytic logical. Should the analytic solution be computed (default is \code{FALSE}) ? See note for details.
#'
#' @param iter number of iterations (posterior samples; defaults to 5000).
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param seed An integer for the random seed.
#'
#' @references
#' \insertAllCited{}
#'
#'
#' @return The returned object of class \code{ggm_compare_explore} contains a lot of information that
#' is used for printing and plotting the results. For users of \strong{BGGM}, the following
#' are the useful objects:
#'
#' \itemize{
#'
#' \item \code{BF_01} A \emph{p} by \emph{p} matrix including
#' the Bayes factor for the null hypothesis.
#'
#' \item \code{pcor_diff} A \emph{p} by \emph{p} matrix including
#' the difference in partial correlations (only for two groups).
#'
#' \item \code{samp} A list containing the fitted models (of class \code{explore}) for each group.
#'
#' }
#' @details
#'
#' \strong{Controlling for Variables}:
#'
#' When controlling for variables, it is assumed that \code{Y} includes \emph{only}
#' the nodes in the GGM and the control variables. Internally, \code{only} the predictors
#' that are included in \code{formula} are removed from \code{Y}. This is not behavior of, say,
#' \code{\link{lm}}, but was adopted to ensure users do not have to write out each variable that
#' should be included in the GGM. An example is provided below.
#'
#' \strong{Mixed Type}:
#'
#' The term "mixed" is somewhat of a misnomer, because the method can be used for data including \emph{only}
#' continuous or \emph{only} discrete variables. This is based on the ranked likelihood which requires sampling
#' the ranks for each variable (i.e., the data is not merely transformed to ranks). This is computationally
#' expensive when there are many levels. For example, with continuous data, there are as many ranks
#' as data points!
#'
#' The option \code{mixed_type} allows the user to determine which variable should be treated as ranks
#' and the "emprical" distribution is used otherwise. This is accomplished by specifying an indicator
#' vector of length \emph{p}. A one indicates to use the ranks, whereas a zero indicates to "ignore"
#' that variable. By default all integer variables are handled as ranks.
#'
#' \strong{Dealing with Errors}:
#'
#' An error is most likely to arise when \code{type = "ordinal"}. The are two common errors (although still rare):
#'
#' \itemize{
#'
#' \item The first is due to sampling the thresholds, especially when the data is heavily skewed.
#' This can result in an ill-defined matrix. If this occurs, we recommend to first try
#' decreasing \code{prior_sd} (i.e., a more informative prior). If that does not work, then
#' change the data type to \code{type = mixed} which then estimates a copula GGM
#' (this method can be used for data containing \strong{only} ordinal variable). This should
#' work without a problem.
#'
#' \item The second is due to how the ordinal data are categorized. For example, if the error states
#' that the index is out of bounds, this indicates that the first category is a zero. This is not allowed, as
#' the first category must be one. This is addressed by adding one (e.g., \code{Y + 1}) to the data matrix.
#'
#' }
#'
#' @note
#'
#' \strong{"Default" Prior}:
#'
#' In Bayesian statistics, a default Bayes factor needs to have several properties. I refer
#' interested users to \insertCite{@section 2.2 in @dablander2020default;textual}{BGGM}. In
#' \insertCite{Williams2019_bf;textual}{BGGM}, some of these propteries were investigated, such
#' model selection consistency. That said, we would not consider this a "default" Bayes factor and
#' thus we encourage users to perform sensitivity analyses by varying the scale of the prior
#' distribution.
#'
#' Furthermore, it is important to note there is no "correct" prior and, also, there is no need
#' to entertain the possibility of a "true" model. Rather, the Bayes factor can be interpreted as
#' which hypothesis best (relative to each other) predicts the observed data
#' \insertCite{@Section 3.2 in @Kass1995}{BGGM}.
#'
#' \strong{Interpretation of Conditional (In)dependence Models for Latent Data}:
#'
#' See \code{\link{BGGM-package}} for details about interpreting GGMs based on latent data
#' (i.e, all data types besides \code{"continuous"})
#'
#'
#' @examples
#'
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' # data
#' Y <- bfi
#'
#' # males and females
#' Ymale <- subset(Y, gender == 1,
#' select = -c(gender,
#' education))[,1:10]
#'
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(gender,
#' education))[,1:10]
#'
#' ##########################
#' ### example 1: ordinal ###
#' ##########################
#'
#' # fit model
#' fit <- ggm_compare_explore(Ymale, Yfemale,
#' type = "ordinal",
#' iter = 250,
#' progress = FALSE)
#' # summary
#' summ <- summary(fit)
#'
#' # edge set
#' E <- select(fit)
#' }
#'
#' @export
ggm_compare_explore <- function(...,
formula = NULL,
type = "continuous",
mixed_type = NULL,
analytic = FALSE,
prior_sd = 0.20,
iter = 5000,
progress = TRUE,
seed = 1){
# combine data
dat_list <- list(...)
# combine data
info <- Y_combine(...)
# groups
groups <- length(info$dat)
delta <- delta_solve(prior_sd)
# check at least two groups
if(groups < 2){
stop("must have (at least) two groups")
}
# sample
if(!analytic){
samp <- lapply(1:groups, function(x) {
# mixed
# message("BGGM: Posterior Sampling ", "(Group ",x ,")")
Y <- dat_list[[x]]
# call estimate
explore(Y, formula = formula,
type = type,
prior_sd = prior_sd,
iter = iter,
mixed_type = mixed_type,
progress = progress,
seed = x,
... = paste0("(Group ", x, ")"))
})
post_samp <- lapply(1:groups, function(x) samp[[x]]$post_samp )
prior_samp <- lapply(1:groups, function(x) samp[[x]]$prior_samp)
# p with predictors removed
p <- samp[[1]]$p
# store pcor diff
pcor_diff <- BF_01_mat <- matrix(0, p, p)
# upper triangular elements
indices <- which(upper.tri(diag(p)), arr.ind = TRUE )
# make contrast matrices
## words for compatability
groups_as_words <- numbers2words(1:groups)
## hypotheses
hyp <- paste(groups_as_words, sep = " ", collapse = "=")
## `framed` hypotheses
framed <- framer(hyp)
## contrast matrices
mats <- create_matrices(framed = framed,
varnames = groups_as_words)
# loop through upper triangular
for(i in seq_len(nrow(indices))){
rho_ij <- indices[i,]
# start
post_group <- post_samp[[1]]$fisher_z[ rho_ij[1], rho_ij[2], (51:(iter + 50))]
prior_group <- prior_samp[[1]]$fisher_z[ 1, 2,]
# combined groups
for(j in 2:(groups)){
post_group <- cbind(post_group, post_samp[[j]]$fisher_z[ rho_ij[1], rho_ij[2], (51:(iter + 50))])
prior_group <- cbind(prior_group, prior_samp[[j]]$fisher_z[1, 2,])
}
# posterior covariance
cov_post <- cov(post_group)
# prior covariance
cov_prior <- cov(prior_group)
# posterior mean
post_mean <- colMeans(post_group)
# tranformed posterior
mu_post <- mats$R_e %*% post_mean
s_post <- mats$R_e %*% cov_post %*% t(mats$R_e)
# transformed prior
mu_prior <- mats$R_e %*% rep(0, groups)
s_prior <- mats$R_e %*% cov_prior %*% t(mats$R_e)
# bayes factor
log_BF <- mvnfast::dmvn(X = t(mats$r_e),
mu = mu_post,
sigma = s_post,
log = TRUE) -
mvnfast::dmvn(X = t(mats$r_e),
mu = mu_prior,
sigma = s_prior,
log = TRUE)
BF_01_mat[ rho_ij[1], rho_ij[2] ] <- exp(log_BF)
if(groups == 2){
pcor_diff[ rho_ij[1], rho_ij[2] ] <- (z2r(post_mean)[1] - z2r(post_mean)[2])
}
}
BF_01 <- symmetric_mat(BF_01_mat)
pcor_diff <- symmetric_mat(pcor_diff)
returned_object <- list(BF_01 = BF_01,
info = info,
iter = iter,
prior_sd = prior_sd,
call = match.call(),
delta = delta,
groups = groups,
pcor_diff = pcor_diff,
samp = samp,
type = type,
p = p)
# analytic solution
} else {
stop("analytic not currently implemented")
}
class(returned_object) <- c("BGGM",
"ggm_compare_explore",
"explore")
returned_object
}
print_summary_ggm_compare_bf <- function(x, ...){
groups <- x$object$groups
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Type:", x$object$type, "\n")
cat("Formula:", paste(as.character(x$formula), collapse = " "), "\n")
# number of iterations
cat("Posterior Samples:", x$object$iter, "\n")
# number of observations
cat("Observations (n):\n")
groups <- length(x$object$info$dat)
for(i in 1:groups){
cat(" Group", paste( i, ":", sep = "") , x$object$info$dat_info$n[[i]], "\n")
}
# number of variables
cat("Variables (p):", x$object$p, "\n")
# number of edges
cat("Relations:", .5 * (x$object$p * (x$object$p-1)), "\n")
cat("Delta:", x$object$delta, "\n")
cat("--- \n")
cat("Call: \n")
print(x$object$call)
cat("--- \n")
cat("Hypotheses:\n")
cat("H0:", paste0("rho_g", 1:groups, collapse = " = "), "\n")
cat("H1:", paste0("rho_g", 1:groups, collapse = " - "), " = 0\n")
cat("--- \n\n")
print(x$results, right = FALSE, row.names = FALSE)
cat("--- \n")
}
print_ggm_compare_bf <- function(x, ...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Type:", x$type, "\n")
cat("Formula:", paste(as.character(x$formula), collapse = " "), "\n")
# number of iterations
cat("Posterior Samples:", x$iter, "\n")
# number of observations
cat("Observations (n):\n")
groups <- length(x$info$dat)
for(i in 1:groups){
cat(" Group", paste( i, ":", sep = "") , x$info$dat_info$n[[i]], "\n")
}
# number of variables
cat("Variables (p):", x$p, "\n")
# number of edges
cat("Relations:", .5 * (x$p * (x$p-1)), "\n")
cat("Delta:", x$delta, "\n")
cat("--- \n")
cat("Call: \n")
print(x$call)
cat("--- \n")
cat("Hypotheses:\n")
cat("H0:", paste0("rho_g", 1:groups, collapse = " = "), "\n")
cat("H1:", paste0("rho_g", 1:groups, collapse = " - "), " = 0\n")
cat("--- \n")
cat("Date:", date(), "\n")
}
#' @title Summary Method for \code{ggm_compare_explore} Objects
#'
#' @description Summarize the posterior hypothesis probabilities
#'
#' @name summary.ggm_compare_explore
#'
#' @param object An object of class \code{ggm_compare_explore}.
#'
#' @param col_names Logical. Should the summary include the column names (default is \code{TRUE})?
#' Setting to \code{FALSE} includes the column numbers (e.g., \code{1--2}).
#'
#' @param ... Currently ignored.
#'
#' @return An object of class \code{summary.ggm_compare_explore}
#'
#' @seealso \code{\link{ggm_compare_explore}}
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' # data
#' Y <- bfi
#'
#' # males and females
#' Ymale <- subset(Y, gender == 1,
#' select = -c(gender,
#' education))[,1:10]
#'
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(gender,
#' education))[,1:10]
#'
#' ##########################
#' ### example 1: ordinal ###
#' ##########################
#'
#' # fit model
#' fit <- ggm_compare_explore(Ymale, Yfemale,
#' type = "ordinal",
#' iter = 250,
#' progress = FALSE)
#' # summary
#' summ <- summary(fit)
#'
#' summ
#' }
#' @export
summary.ggm_compare_explore <- function(object,
col_names = TRUE,
...){
# nodes
p <- object$p
# identity matrix
I_p <- diag(p)
# prob null
prob_H0 <- round(object$BF_01 / (object$BF_01 + 1), 3)
# prob h1
prob_H1 <- round(1 - prob_H0, 3)
# column names
cn <- colnames(object$samp[[1]]$Y)
if(is.null(cn)){
mat_names <- sapply(1:p , function(x) paste(1:p, x, sep = "--"))[upper.tri(I_p)]
} else {
mat_names <- sapply(cn , function(x) paste(cn, x, sep = "--"))[upper.tri(I_p)]
}
if(object$groups == 2){
post_mean <- round(object$pcor_diff[upper.tri(I_p)], 3)
post_sd <- round(apply(object$samp[[1]]$post_samp$pcors -
object$samp[[2]]$post_samp$pcors, 1:2, sd)[upper.tri(I_p)], 3)
results <- data.frame(Relation = mat_names,
Post.mean = post_mean,
Post.sd = post_sd,
Pr.H0 = prob_H0[upper.tri(I_p)],
Pr.H1 = prob_H1[upper.tri(I_p)])
} else {
results <- data.frame(Relation = mat_names,
Pr.H0 = prob_H0[upper.tri(I_p)],
Pr.H1 = prob_H1[upper.tri(I_p)])
}
returned_object <- list(results = results,
object = object)
class(returned_object) <- c("BGGM",
"ggm_compare_explore",
"summary.ggm_compare_explore",
"explore")
returned_object
}
#' @title Plot \code{summary.ggm_compare_explore} Objects
#'
#' @description Visualize the posterior hypothesis probabilities.
#'
#' @name plot.summary.ggm_compare_explore
#'
#' @param x An object of class \code{summary.ggm_compare_explore}
#'
#' @param size Numeric. The size of the points (defaults to 2).
#'
#' @param color Character string. The color of the points
#' (defaults to \code{"black"}).
#'
#' @param ... Currently ignored.
#'
#' @return A \code{ggplot} object
#'
#' @seealso \code{\link{ggm_compare_explore}}
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' # data
#' Y <- bfi
#'
#' # males and females
#' Ymale <- subset(Y, gender == 1,
#' select = -c(gender,
#' education))[,1:10]
#'
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(gender,
#' education))[,1:10]
#'
#' ##########################
#' ### example 1: ordinal ###
#' ##########################
#'
#' # fit model
#' fit <- ggm_compare_explore(Ymale, Yfemale,
#' type = "ordinal",
#' iter = 250,
#' progress = FALSE)
#' # summary
#' summ <- summary(fit)
#'
#' plot(summ)
#' }
#' @export
plot.summary.ggm_compare_explore <- function(x,
size = 2,
color = "black", ...){
dat_temp <- x$results[order(x$results$Pr.H1,
decreasing = F), ]
dat_temp$Relation <-
factor(dat_temp$Relation,
levels = dat_temp$Relation,
labels = dat_temp$Relation)
ggplot(dat_temp,
aes(x = Relation,
y = Pr.H1)) +
geom_point(size = size, color = color) +
theme(axis.text.x = element_text(
angle = 90,
vjust = 0.5,
hjust = 1
)) +
coord_flip()
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/ggm_compare_bf.default.R
|
#' GGM Compare: Confirmatory Hypothesis Testing
#'
#' @description Confirmatory hypothesis testing for comparing GGMs. Hypotheses are expressed as equality
#' and/or ineqaulity contraints on the partial correlations of interest. Here the focus is \emph{not}
#' on determining the graph (see \code{\link{explore}}) but testing specific hypotheses related to
#' the conditional (in)dependence structure. These methods were introduced in
#' \insertCite{Williams2019_bf;textual}{BGGM} and in \insertCite{williams2020comparing;textual}{BGGM}
#'
#' @name ggm_compare_confirm
#'
#' @param ... At least two matrices (or data frame) of dimensions \emph{n} (observations) by \emph{p} (nodes).
#'
#' @param hypothesis Character string. The hypothesis (or hypotheses) to be tested. See notes for futher details.
#'
#' @param formula an object of class \code{\link[stats]{formula}}. This allows for including
#' control variables in the model (i.e., \code{~ gender}).
#'
#' @param prior_sd Numeric. The scale of the prior distribution (centered at zero),
#' in reference to a beta distribtuion (defaults to 0.25).
#'
#' @param type Character string. Which type of data for \code{Y} ? The options include \code{continuous},
#' \code{binary}, \code{ordinal}, or \code{mixed}. Note that mixed can be used for data with only
#' ordinal variables. See the note for further details.
#'
#' @param mixed_type numeric vector. An indicator of length p for which varibles should be treated as ranks.
#' (1 for rank and 0 to assume normality). The default is currently (dev version) to treat all integer variables
#' as ranks when \code{type = "mixed"} and \code{NULL} otherwise. See note for further details.
#'
#' @param iter Number of iterations (posterior samples; defaults to 25,000).
#'
#' @param impute Logicial. Should the missing values (\code{NA})
#' be imputed during model fitting (defaults to \code{TRUE}) ?
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param seed An integer for the random seed.
#'
#' @references
#' \insertAllCited{}
#'
#' @return The returned object of class \code{confirm} contains a lot of information that
#' is used for printing and plotting the results. For users of \strong{BGGM}, the following
#' are the useful objects:
#'
#' \itemize{
#'
#' \item \code{out_hyp_prob} Posterior hypothesis probabilities.
#'
#' \item \code{info} An object of class \code{BF} from the R package \strong{BFpack}
#' \insertCite{mulder2019bfpack}{BGGM}
#'
#' }
#'
#' @details
#' The hypotheses can be written either with the respective column names or numbers.
#' For example, \code{g1_1--2} denotes the relation between the variables in column 1 and 2 for group 1.
#' The \code{g1_} is required and the only difference from \code{\link{confirm}} (one group).
#' Note that these must correspond to the upper triangular elements of the correlation
#' matrix. This is accomplished by ensuring that the first number is smaller than the second number.
#' This also applies when using column names (i.e,, in reference to the column number).
#'
#'
#' \strong{One Hypothesis}:
#'
#' To test whether a relation in larger in one group, while both are expected
#' to be positive, this can be written as
#'
#' \itemize{
#'
#' \item \code{hyp <- c(g1_1--2 > g2_1--2 > 0)}
#' }
#'
#' This is then compared to the complement.
#'
#' \strong{More Than One Hypothesis}:
#'
#' The above hypothesis can also be compared to, say, a null model by using ";"
#' to seperate the hypotheses, for example,
#'
#' \itemize{
#'
#' \item \code{hyp <- c(g1_1--2 > g2_1--2 > 0; g1_1--2 = g2_1--2 = 0)}.
#'
#'}
#'
#' Any number of hypotheses can be compared this way.
#'
#' \strong{Using "&"}
#'
#' It is also possible to include \code{&}. This allows for testing one constraint \bold{and}
#' another contraint as one hypothesis.
#'
#' \itemize{
#'
#' \item \code{hyp <- c("g1_A1--A2 > g2_A1--A2 & g1_A1--A3 = g2_A1--A3")}
#'
#' }
#'
#' Of course, it is then possible to include additional hypotheses by separating them with ";".
#'
#' \strong{Testing Sums}
#'
#' It might also be interesting to test the sum of partial correlations. For example, that the
#' sum of specific relations in one group is larger than the sum in another group.
#'
#' \itemize{
#'
#' \item \code{hyp <- c("g1_A1--A2 + g1_A1--A3 > g2_A1--A2 + g2_A1--A3;
#' g1_A1--A2 + g1_A1--A3 = g2_A1--A2 + g2_A1--A3")}
#'
#' }
#'
#'
#' \strong{Potential Delays}:
#'
#' There is a chance for a potentially long delay from the time the progress bar finishes
#' to when the function is done running. This occurs when the hypotheses require further
#' sampling to be tested, for example, when grouping relations
#' \code{c("(g1_A1--A2, g2_A2--A3) > (g2_A1--A2, g2_A2--A3)"}.
#' This is not an error.
#'
#'
#' \strong{Controlling for Variables}:
#'
#' When controlling for variables, it is assumed that \code{Y} includes \emph{only}
#' the nodes in the GGM and the control variables. Internally, \code{only} the predictors
#' that are included in \code{formula} are removed from \code{Y}. This is not behavior of, say,
#' \code{\link{lm}}, but was adopted to ensure users do not have to write out each variable that
#' should be included in the GGM. An example is provided below.
#'
#' \strong{Mixed Type}:
#'
#' The term "mixed" is somewhat of a misnomer, because the method can be used for data including \emph{only}
#' continuous or \emph{only} discrete variables \insertCite{hoff2007extending}{BGGM}. This is based on the
#' ranked likelihood which requires sampling the ranks for each variable (i.e., the data is not merely
#' transformed to ranks). This is computationally expensive when there are many levels. For example,
#' with continuous data, there are as many ranks as data points!
#'
#' The option \code{mixed_type} allows the user to determine which variable should be treated as ranks
#' and the "emprical" distribution is used otherwise. This is accomplished by specifying an indicator
#' vector of length \emph{p}. A one indicates to use the ranks, whereas a zero indicates to "ignore"
#' that variable. By default all integer variables are handled as ranks.
#'
#' \strong{Dealing with Errors}:
#'
#' An error is most likely to arise when \code{type = "ordinal"}. The are two common errors (although still rare):
#'
#' \itemize{
#'
#' \item The first is due to sampling the thresholds, especially when the data is heavily skewed.
#' This can result in an ill-defined matrix. If this occurs, we recommend to first try
#' decreasing \code{prior_sd} (i.e., a more informative prior). If that does not work, then
#' change the data type to \code{type = mixed} which then estimates a copula GGM
#' (this method can be used for data containing \strong{only} ordinal variable). This should
#' work without a problem.
#'
#' \item The second is due to how the ordinal data are categorized. For example, if the error states
#' that the index is out of bounds, this indicates that the first category is a zero. This is not allowed, as
#' the first category must be one. This is addressed by adding one (e.g., \code{Y + 1}) to the data matrix.
#'
#' }
#'
#'
#' \strong{Imputing Missing Values}:
#'
#' Missing values are imputed with the approach described in \insertCite{hoff2009first;textual}{BGGM}.
#' The basic idea is to impute the missing values with the respective posterior pedictive distribution,
#' given the observed data, as the model is being estimated. Note that the default is \code{TRUE},
#' but this ignored when there are no missing values. If set to \code{FALSE}, and there are missing
#' values, list-wise deletion is performed with \code{na.omit}.
#'
#' @note
#'
#' \strong{"Default" Prior}:
#'
#' In Bayesian statistics, a default Bayes factor needs to have several properties. I refer
#' interested users to \insertCite{@section 2.2 in @dablander2020default;textual}{BGGM}. In
#' \insertCite{Williams2019_bf;textual}{BGGM}, some of these propteries were investigated (e.g.,
#' model selection consistency). That said, we would not consider this a "default" or "automatic"
#' Bayes factor and thus we encourage users to perform sensitivity analyses by varying the scale of
#' the prior distribution (\code{prior_sd}).
#'
#' Furthermore, it is important to note there is no "correct" prior and, also, there is no need
#' to entertain the possibility of a "true" model. Rather, the Bayes factor can be interpreted as
#' which hypothesis best (relative to each other) predicts the observed data
#' \insertCite{@Section 3.2 in @Kass1995}{BGGM}.
#'
#' \strong{Interpretation of Conditional (In)dependence Models for Latent Data}:
#'
#' See \code{\link{BGGM-package}} for details about interpreting GGMs based on latent data
#' (i.e, all data types besides \code{"continuous"})
#'
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' # data
#' Y <- bfi
#'
#' ###############################
#' #### example 1: continuous ####
#' ###############################
#'
#' # males
#' Ymale <- subset(Y, gender == 1,
#' select = -c(education,
#' gender))[,1:5]
#'
#'
#' # females
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(education,
#' gender))[,1:5]
#'
#' # exhaustive
#' hypothesis <- c("g1_A1--A2 > g2_A1--A2;
#' g1_A1--A2 < g2_A1--A2;
#' g1_A1--A2 = g2_A1--A2")
#'
#' # test hyp
#' test <- ggm_compare_confirm(Ymale, Yfemale,
#' hypothesis = hypothesis,
#' iter = 250,
#' progress = FALSE)
#'
#' # print (evidence not strong)
#' test
#'
#' #########################################
#' #### example 2: sensitivity to prior ####
#' #########################################
#' # continued from example 1
#'
#' # decrease prior SD
#' test <- ggm_compare_confirm(Ymale,
#' Yfemale,
#' prior_sd = 0.1,
#' hypothesis = hypothesis,
#' iter = 250,
#' progress = FALSE)
#'
#' # print
#' test
#'
#' # indecrease prior SD
#' test <- ggm_compare_confirm(Ymale,
#' Yfemale,
#' prior_sd = 0.5,
#' hypothesis = hypothesis,
#' iter = 250,
#' progress = FALSE)
#'
#' # print
#' test
#'
#' ################################
#' #### example 3: mixed data #####
#' ################################
#'
#' hypothesis <- c("g1_A1--A2 > g2_A1--A2;
#' g1_A1--A2 < g2_A1--A2;
#' g1_A1--A2 = g2_A1--A2")
#'
#' # test (1000 for example)
#' test <- ggm_compare_confirm(Ymale,
#' Yfemale,
#' type = "mixed",
#' hypothesis = hypothesis,
#' iter = 250,
#' progress = FALSE)
#'
#' # print
#' test
#'
#' ##############################
#' ##### example 4: control #####
#' ##############################
#' # control for education
#'
#' # data
#' Y <- bfi
#'
#' # males
#' Ymale <- subset(Y, gender == 1,
#' select = -c(gender))[,c(1:5, 26)]
#'
#' # females
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(gender))[,c(1:5, 26)]
#'
#' # test
#' test <- ggm_compare_confirm(Ymale,
#' Yfemale,
#' formula = ~ education,
#' hypothesis = hypothesis,
#' iter = 250,
#' progress = FALSE)
#' # print
#' test
#'
#'
#' #####################################
#' ##### example 5: many relations #####
#' #####################################
#'
#' # data
#' Y <- bfi
#'
#' hypothesis <- c("g1_A1--A2 > g2_A1--A2 & g1_A1--A3 = g2_A1--A3;
#' g1_A1--A2 = g2_A1--A2 & g1_A1--A3 = g2_A1--A3;
#' g1_A1--A2 = g2_A1--A2 = g1_A1--A3 = g2_A1--A3")
#'
#' Ymale <- subset(Y, gender == 1,
#' select = -c(education,
#' gender))[,1:5]
#'
#'
#' # females
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(education,
#' gender))[,1:5]
#'
#' test <- ggm_compare_confirm(Ymale,
#' Yfemale,
#' hypothesis = hypothesis,
#' iter = 250,
#' progress = FALSE)
#'
#' # print
#' test
#' }
#' @export
ggm_compare_confirm <- function(...,
hypothesis,
formula = NULL,
type = "continuous",
mixed_type = NULL,
prior_sd = 0.25,
iter = 25000,
impute = TRUE,
progress = TRUE,
seed = 1){
# temporary warning until missing data is fully implemented
if(type != "continuous"){
warning(paste0("imputation during model fitting is\n",
"currently only implemented for 'continuous' data."))
}
# removed per CRAN (8/12/21)
# old <- .Random.seed
set.seed(seed)
# prior prob
priorprob <- 1
# delta parameter
delta <- delta_solve(prior_sd)
# combine data
dat_list <- list(...)
# combine data
info <- Y_combine(...)
# groups
groups <- length(info$dat)
if(type == "continuous"){
if(is.null(formula)){
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ",x ,")")
}
# data
Y <- as.matrix(scale(dat_list[[x]], scale = F))
# nodes
p <- ncol(Y)
if(!impute){
# na omit
Y <- as.matrix(na.omit(Y))
Y_miss <- Y
} else {
Y_miss <- ifelse(is.na(Y), 1, 0)
if(sum(Y_miss) == 0){
impute <- FALSE
}
# impute means
for(i in 1:p){
Y[which(is.na(Y[,i])),i] <- mean(na.omit(Y[,i]))
}
}
start <- solve(cov(Y))
.Call(
'_BGGM_Theta_continuous',
PACKAGE = 'BGGM',
Y = Y,
iter = iter + 50,
delta = delta,
epsilon = 0.01,
prior_only = 0,
explore = 1,
start = start,
progress = progress,
impute = impute,
Y_miss = Y_miss
)
})
# formula
} else {
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ", x ,")")
}
control_info <- remove_predictors_helper(list(as.data.frame(dat_list[[x]])),
formula = formula)
# data
Y <- as.matrix(scale(control_info$Y_groups[[1]], scale = F))
# nodes
p <- ncol(Y)
# observations
n <- nrow(Y)
# model matrix
X <- as.matrix(control_info$model_matrices[[1]])
start <- solve(cov(Y))
# posterior sample
.Call(
"_BGGM_mv_continuous",
Y = Y,
X = X,
delta = delta,
epsilon = 0.01,
iter = iter + 50,
start = start,
progress = progress
)
})
}
} else if(type == "binary"){
# intercept only
if (is.null(formula)) {
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ",x ,")")
}
# data
Y <- as.matrix(na.omit(dat_list[[x]]))
# obervations
n <- nrow(Y)
# nodes
p <- ncol(Y)
X <- matrix(1, n, 1)
start <- solve(cov(Y))
# posterior sample
.Call(
"_BGGM_mv_binary",
Y = Y,
X = X,
delta = delta,
epsilon = 0.01,
iter = iter + 50,
beta_prior = 0.0001,
cutpoints = c(-Inf, 0, Inf),
start = start,
progress = progress
)
})
} else {
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ",x ,")")
}
control_info <- remove_predictors_helper(list(as.data.frame(dat_list[[x]])),
formula = formula)
# data
Y <- as.matrix(control_info$Y_groups[[1]])
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# model matrix
X <- as.matrix(control_info$model_matrices[[1]])
start <- solve(cov(Y))
# posterior sample
.Call(
"_BGGM_mv_binary",
Y = Y,
X = X,
delta = delta,
epsilon = 0.01,
iter = iter + 50,
beta_prior = 0.0001,
cutpoints = c(-Inf, 0, Inf),
start = start,
progress = progress
)
})
}
} else if(type == "ordinal"){
if(is.null(formula)){
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ",x ,")")
}
# data
Y <- as.matrix(na.omit(dat_list[[x]]))
# obervations
n <- nrow(Y)
# nodes
p <- ncol(Y)
X <- matrix(1, n, 1)
# categories
K <- max(apply(Y, 2, function(x) { length(unique(x)) } ))
start <- solve(cov(Y))
# posterior sample
# call c ++
.Call(
"_BGGM_mv_ordinal_albert",
Y = Y,
X = X,
iter = iter + 50,
delta = delta,
epsilon = 0.01,
K = K,
start = start,
progress = progress
)
})
} else {
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ",x ,")")
}
control_info <- remove_predictors_helper(list(as.data.frame(dat_list[[x]])),
formula = formula)
# data
Y <- as.matrix(control_info$Y_groups[[1]])
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# model matrix
X <- as.matrix(control_info$model_matrices[[1]])
# categories
K <- max(apply(Y, 2, function(x) { length(unique(x)) } ))
start <- solve(cov(Y))
# posterior sample
# call c ++
.Call(
"_BGGM_mv_ordinal_albert",
Y = Y,
X = X,
iter = iter + 50,
delta = delta,
epsilon = 0.01,
K = K,
start = start,
progress = progress
)
})
}
} else if(type == "mixed") {
if(!is.null(formula)){
warning("formula ignored for mixed data at this time")
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ",x ,")")
}
control_info <- remove_predictors_helper(list(as.data.frame(dat_list[[x]])),
formula = formula)
# data
Y <- as.matrix(control_info$Y_groups[[1]])
Y <- na.omit(Y)
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# default for ranks
if(is.null(mixed_type)) {
idx = colMeans(round(Y) == Y)
idx = ifelse(idx == 1, 1, 0)
# user defined
} else {
idx = mixed_type
}
# rank following hoff (2008)
rank_vars <- rank_helper(Y)
post_samp <- .Call(
"_BGGM_copula",
z0_start = rank_vars$z0_start,
levels = rank_vars$levels,
K = rank_vars$K,
Sigma_start = rank_vars$Sigma_start,
iter = iter + 50,
delta = delta,
epsilon = 0.01,
idx = idx,
progress = progress
)
})
} else {
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ",x ,")")
}
Y <- na.omit(dat_list[[x]])
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# default for ranks
if(is.null(mixed_type)) {
idx = colMeans(round(Y) == Y)
idx = ifelse(idx == 1, 1, 0)
# user defined
} else {
idx = mixed_type
}
# rank following hoff (2008)
rank_vars <- rank_helper(Y)
post_samp <- .Call(
"_BGGM_copula",
z0_start = rank_vars$z0_start,
levels = rank_vars$levels,
K = rank_vars$K,
Sigma_start = rank_vars$Sigma_start,
iter = iter + 50,
delta = delta,
epsilon = 0.01,
idx = idx,
progress = progress
)
})
}
} else {
stop("'type' not supported: must be continuous, binary, ordinal, or mixed.")
}
# sample prior
if(is.null(formula)){
Yprior <- as.matrix(dat_list[[1]])
prior_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Prior Sampling ", "(Group ",x ,")")
}
.Call(
'_BGGM_sample_prior',
PACKAGE = 'BGGM',
Y = Yprior,
iter = 25000,
delta = delta,
epsilon = 0.01,
prior_only = 1,
explore = 0,
progress = progress
)$fisher_z
})
} else {
control_info <- remove_predictors_helper(list(as.data.frame(dat_list[[1]])),
formula = formula)
Yprior <- as.matrix(scale(control_info$Y_groups[[1]], scale = F))
prior_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Prior Sampling ", "(Group ", x ,")")
}
set.seed(x)
.Call(
'_BGGM_sample_prior',
PACKAGE = 'BGGM',
Y = Yprior,
iter = 25000,
delta = delta,
epsilon = 0.01,
prior_only = 1,
explore = 0,
progress = progress
)$fisher_z
})
}
# nodes
p <- ncol(Yprior)
# number of pcors
pcors <- 0.5 * (p * (p - 1))
# identity matrix
I_p <- diag(p)
# colnames: post samples
col_names <- numbers2words(1:p)
mat_names <- lapply(1:groups, function(x) paste0("g", numbers2words(x),
sapply(col_names, function(x) paste(col_names, x, sep = ""))[upper.tri(I_p)]))
# posterior start group (one)
post_group <- matrix(post_samp[[1]]$fisher_z[, , 51:(iter + 50)][upper.tri(I_p)],
iter, pcors, byrow = TRUE)
# prior start group (one)
prior_group <- matrix(prior_samp[[1]][ , ,][upper.tri(I_p)],
nrow = 25000,
ncol = pcors,
byrow = TRUE)
# post group
for(j in 2:(groups)){
post_group <- cbind(post_group,
matrix(post_samp[[j]]$fisher_z[, , 51:(iter+50)][upper.tri(I_p)],
nrow = iter, ncol = pcors,
byrow = TRUE))
prior_group <- cbind(prior_group,
matrix(prior_samp[[j]][ , ,][upper.tri(I_p)], 25000, pcors, byrow = TRUE))
}
posterior_samples <- post_group
colnames(posterior_samples) <- unlist(mat_names)
prior_samples <- prior_group
colnames(prior_samples) <- unlist(mat_names)
prior_mu <- colMeans(prior_samples)
prior_cov <- cov(prior_samples)
post_mu <- colMeans(posterior_samples)
post_cov <- cov(posterior_samples)
BFprior <- BF(prior_mu,
Sigma = prior_cov,
hypothesis = group_hyp_helper(hypothesis, x = info$dat[[1]]),
n = 1)
BFpost <- BF(post_mu,
Sigma = post_cov,
hypothesis = group_hyp_helper(hypothesis, x = info$dat[[1]]),
n = 1)
# number of hypotheses
n_hyps <- nrow(BFpost$BFtable_confirmatory)
# BF against unconstrained
BF_tu <- NA
for (i in seq_len(n_hyps)) {
# BF tu
BF_tu[i] <-
prod(BFpost$BFtable_confirmatory[i, 3:4] / BFprior$BFtable_confirmatory[i, 3:4])
}
# posterior hyp probs
out_hyp_prob <- (BF_tu * priorprob) / sum(BF_tu * priorprob)
# BF matrix
BF_matrix <- matrix(rep(BF_tu, length(BF_tu)),
ncol = length(BF_tu),
byrow = TRUE)
BF_matrix[is.nan(BF_matrix)] <- 0
diag(BF_matrix) <- 1
BF_matrix <- t(BF_matrix) / (BF_matrix)
row.names(BF_matrix) <- row.names(BFpost$BFtable_confirmatory)
colnames(BF_matrix) <- row.names(BFpost$BFtable_confirmatory)
if(isTRUE(progress)){
message("BGGM: Finished")
}
returned_object <- list(
BF_matrix = BF_matrix,
out_hyp_prob = out_hyp_prob,
info = BFpost,
groups = groups,
info_dat = info,
type = type,
call = match.call(),
hypothesis = hypothesis,
iter = iter,
p = p,
posterior_samples = posterior_samples,
post_group = post_group,
delta = delta,
formula = formula,
dat_list = dat_list,
post_samp = post_samp
)
# removed per CRAN (8/12/21)
#.Random.seed <<- old
class(returned_object) <- c("BGGM",
"confirm",
"ggm_compare_confirm")
returned_object
}
print_ggm_confirm <- function(x, ...){
groups <- x$groups
info <- x$info_dat
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("Type:", x$type , "\n")
cat("--- \n")
cat("Posterior Samples:", x$iter, "\n")
for(i in 1:groups){
cat(" Group", paste( i, ":", sep = "") , info$dat_info$n[[i]], "\n")
}
# number of variables
cat("Variables (p):", x$p, "\n")
# number of edges
cat("Relations:", .5 * (x$p * (x$p-1)), "\n")
cat("Delta:", x$delta, "\n")
cat("--- \n")
cat("Call:\n")
print(x$call)
cat("--- \n")
cat("Hypotheses: \n\n")
hyps <- strsplit(x$hypothesis, ";")
n_hyps <- length(hyps[[1]])
x$info$hypotheses[1:n_hyps] <- hyps[[1]]
n_hyps <- length(x$info$hypotheses)
for (h in seq_len(n_hyps)) {
cat(paste0("H", h, ": ", gsub(" ", "", gsub('[\n]', '', x$info$hypotheses[h])), "\n"))
}
cat("--- \n")
cat("Posterior prob: \n\n")
for(h in seq_len(n_hyps)){
cat(paste0("p(H",h,"|data) = ", round(x$out_hyp_prob[h], 3 ) ))
cat("\n")
}
cat("--- \n")
cat('Bayes factor matrix: \n')
print(round(x$BF_matrix, 3))
cat("--- \n")
cat("note: equal hypothesis prior probabilities")
}
#' @title Plot \code{confirm} objects
#'
#' @description Plot the posterior hypothesis probabilities as a pie chart, with
#' each slice corresponding the probability of a given hypothesis.
#'
#' @param x An object of class \code{confirm}
#'
#' @param ... Currently ignored.
#'
#' @return A \code{ggplot} object.
#'
#'
#' @examples
#'
#' \donttest{
#'
#' #####################################
#' ##### example 1: many relations #####
#' #####################################
#'
#' # data
#' Y <- bfi
#'
#' hypothesis <- c("g1_A1--A2 > g2_A1--A2 & g1_A1--A3 = g2_A1--A3;
#' g1_A1--A2 = g2_A1--A2 & g1_A1--A3 = g2_A1--A3;
#' g1_A1--A2 = g2_A1--A2 = g1_A1--A3 = g2_A1--A3")
#'
#' Ymale <- subset(Y, gender == 1,
#' select = -c(education,
#' gender))[,1:5]
#'
#'
#' # females
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(education,
#' gender))[,1:5]
#'
#' test <- ggm_compare_confirm(Ymale,
#' Yfemale,
#' hypothesis = hypothesis,
#' iter = 250,
#' progress = FALSE)
#'
#'
#' # plot
#' plot(test)
#' }
#' @export
plot.confirm <- function(x, ...){
probs <- x$out_hyp_prob
hyps_names <- paste0("p(H", 1:length(probs), "|data) = ", round(probs, 3))
df <- data.frame(hyps_names = hyps_names,
hyps = probs)
plt <- ggplot(df, aes(x="",
y = probs,
fill = hyps_names))+
geom_bar(width = 1, stat = "identity") +
coord_polar("y") +
theme_minimal() +
theme(axis.text = element_blank(),
axis.ticks = element_blank(),
panel.grid = element_blank()) +
scale_fill_discrete("Posterior Prob") +
ylab("") +
xlab("")
plt
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/ggm_compare_confirm.R
|
#' GGM Compare: Estimate
#'
#' @name ggm_compare_estimate
#'
#' @description Compare partial correlations that are estimated from any number of groups. This method works for
#' continuous, binary, ordinal, and mixed data (a combination of categorical and continuous variables).
#' The approach (i.e., a difference between posterior distributions) was
#' described in \insertCite{Williams2019;textual}{BGGM}.
#'
#' @param ... Matrices (or data frames) of dimensions \emph{n} (observations) by \emph{p} (variables).
#' Requires at least two.
#'
#' @param formula An object of class \code{\link[stats]{formula}}. This allows for including
#' control variables in the model (i.e., \code{~ gender}). See the note for further details.
#'
#'
#' @param prior_sd The scale of the prior distribution (centered at zero), in reference to a beta distribtuion
#' (defaults to 0.50).
#' See note for further details.
#'
#' @param type Character string. Which type of data for \strong{Y} ? The options include \code{continuous},
#' \code{binary}, \code{ordinal}, or \code{continuous}. See the note for further details.
#'
#' @param mixed_type Numeric vector. An indicator of length \emph{p} for which varibles should be treated as ranks.
#' (1 for rank and 0 to use the 'empirical' or observed distribution). The default is currently to treat all integer variables
#' as ranks when \code{type = "mixed"} and \code{NULL} otherwise. See note for further details.
#'
#' @param iter Number of iterations (posterior samples; defaults to 5000).
#'
#' @param impute Logicial. Should the missing values (\code{NA})
#' be imputed during model fitting (defaults to \code{TRUE}) ?
#'
#' @param analytic Logical. Should the analytic solution be computed (default is \code{FALSE})? This is only available
#' for continous data. Note that if \code{type = "mixed"} and \code{analytic = TRUE}, the data will
#' automatically be treated as continuous.
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param seed An integer for the random seed.
#'
#' @references
#' \insertAllCited{}
#'
#' @return
#' A list of class \code{ggm_compare_estimate} containing:
#' \itemize{
#' \item \code{pcor_diffs} partial correlation differences (posterior distribution)
#' \item \code{p} number of variable
#' \item \code{info} list containing information about each group (e.g., sample size, etc.)
#' \item \code{iter} number of posterior samples
#' \item \code{call} \code{match.call}
#' }
#'
#' @details
#' This function can be used to compare the partial correlations for any number of groups.
#' This is accomplished with pairwise comparisons for each relation. In the case of three groups,
#' for example, group 1 and group 2 are compared, then group 1 and group 3 are compared, and then
#' group 2 and group 3 are compared. There is a full distibution for each difference that can be
#' summarized (i.e., \code{\link{summary.ggm_compare_estimate}}) and then visualized
#' (i.e., \code{\link{plot.summary.ggm_compare_estimate}}). The graph of difference is selected with
#' \code{\link{select.ggm_compare_estimate}}).
#'
#'
#' \strong{Controlling for Variables}:
#'
#' When controlling for variables, it is assumed that \code{Y} includes \emph{only}
#' the nodes in the GGM and the control variables. Internally, \code{only} the predictors
#' that are included in \code{formula} are removed from \code{Y}. This is not behavior of, say,
#' \code{\link{lm}}, but was adopted to ensure users do not have to write out each variable that
#' should be included in the GGM. An example is provided below.
#'
#' \strong{Mixed Type}:
#'
#' The term "mixed" is somewhat of a misnomer, because the method can be used for data including \emph{only}
#' continuous or \emph{only} discrete variables. This is based on the ranked likelihood which requires sampling
#' the ranks for each variable (i.e., the data is not merely transformed to ranks). This is computationally
#' expensive when there are many levels. For example, with continuous data, there are as many ranks
#' as data points!
#'
#' The option \code{mixed_type} allows the user to determine which variable should be treated as ranks
#' and the "emprical" distribution is used otherwise. This is accomplished by specifying an indicator
#' vector of length \emph{p}. A one indicates to use the ranks, whereas a zero indicates to "ignore"
#' that variable. By default all integer variables are handled as ranks.
#'
#' \strong{Dealing with Errors}:
#'
#' An error is most likely to arise when \code{type = "ordinal"}. The are two common errors (although still rare):
#'
#' \itemize{
#'
#' \item The first is due to sampling the thresholds, especially when the data is heavily skewed.
#' This can result in an ill-defined matrix. If this occurs, we recommend to first try
#' decreasing \code{prior_sd} (i.e., a more informative prior). If that does not work, then
#' change the data type to \code{type = mixed} which then estimates a copula GGM
#' (this method can be used for data containing \strong{only} ordinal variable). This should
#' work without a problem.
#'
#' \item The second is due to how the ordinal data are categorized. For example, if the error states
#' that the index is out of bounds, this indicates that the first category is a zero. This is not allowed, as
#' the first category must be one. This is addressed by adding one (e.g., \code{Y + 1}) to the data matrix.
#'
#' }
#'
#' \strong{Imputing Missing Values}:
#'
#' Missing values are imputed with the approach described in \insertCite{hoff2009first;textual}{BGGM}.
#' The basic idea is to impute the missing values with the respective posterior pedictive distribution,
#' given the observed data, as the model is being estimated. Note that the default is \code{TRUE},
#' but this ignored when there are no missing values. If set to \code{FALSE}, and there are missing
#' values, list-wise deletion is performed with \code{na.omit}.
#'
#'
#' @note
#'
#' \strong{Mixed Data}:
#'
#' The mixed data approach was introduced \insertCite{@in @hoff2007extending;textual}{BGGM}
#' (our paper describing an extension to Bayesian hypothesis testing if forthcoming).
#' This is a semi-paramateric copula model based on the ranked likelihood. This is computationally
#' expensive when treating continuous data as ranks. The current default is to treat only integer data as ranks.
#' This should of course be adjusted for continous data that is skewed. This can be accomplished with the
#' argument \code{mixed_type}. A \code{1} in the numeric vector of length \emph{p}indicates to treat that
#' respective node as a rank (corresponding to the column number) and a zero indicates to use the observed
#' (or "emprical") data.
#'
#'
#' It is also important to note that \code{type = "mixed"} is not restricted to mixed data (containing a combination of
#' categorical and continuous): all the nodes can be ordinal or continuous (but again this will take some time).
#'
#'
#' \strong{Interpretation of Conditional (In)dependence Models for Latent Data}:
#'
#' See \code{\link{BGGM-package}} for details about interpreting GGMs based on latent data
#' (i.e, all data types besides \code{"continuous"})
#'
#'
#'
#' \strong{Additional GGM Compare Methods}
#'
#' Bayesian hypothesis testing is implemented in \code{\link{ggm_compare_explore}} and
#' \code{\link{ggm_compare_confirm}} \insertCite{Williams2019_bf}{BGGM}. The latter allows for confirmatory
#' hypothesis testing. An approach based on a posterior predictive check is implemented in \code{\link{ggm_compare_ppc}}
#' \insertCite{williams2020comparing}{BGGM}. This provides a 'global' test for comparing the entire GGM and a 'nodewise'
#' test for comparing each variable in the network \insertCite{Williams2019;textual}{BGGM}.
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' # data
#' Y <- bfi
#'
#' # males and females
#' Ymale <- subset(Y, gender == 1,
#' select = -c(gender,
#' education))[,1:10]
#'
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(gender,
#' education))[,1:10]
#'
#' # fit model
#' fit <- ggm_compare_estimate(Ymale, Yfemale,
#' type = "ordinal",
#' iter = 250,
#' prior_sd = 0.25,
#' progress = FALSE)
#'
#' ###########################
#' ### example 2: analytic ###
#' ###########################
#' # only continuous
#'
#' # fit model
#' fit <- ggm_compare_estimate(Ymale, Yfemale,
#' analytic = TRUE)
#'
#' # summary
#' summ <- summary(fit)
#'
#' # plot summary
#' plt_summ <- plot(summary(fit))
#'
#' # select
#' E <- select(fit)
#'
#' # plot select
#' plt_E <- plot(select(fit))
#'
#' }
#'
#' @export
ggm_compare_estimate <- function(...,
formula = NULL,
type = "continuous",
mixed_type = NULL,
analytic = FALSE,
prior_sd = 0.50,
iter = 5000,
impute = TRUE,
progress = TRUE,
seed = 1){
# temporary warning until missing data is fully implemented
if(type != "continuous"){
warning(paste0("imputation during model fitting is\n",
"currently only implemented for 'continuous' data."))
}
# combine data
dat_list <- list(...)
# combine data
info <- Y_combine(...)
# number of groups
groups <- length(info$dat)
# number of comparisons
comparisons <- nrow(info$pairwise)
# delta rho ~ beta(delta/2, delta/2)
delta <- delta_solve(prior_sd)
if(groups < 2){
stop("must have (at least) two groups")
}
# sample
if(!analytic){
post_samp <- lapply(1:groups, function(x) {
Y <- dat_list[[x]]
# call estimate
estimate(Y, formula = formula,
type = type,
prior_sd = prior_sd,
iter = iter,
mixed_type = mixed_type,
seed = x,
progress = progress,
impute = impute,
... = paste0("(Group ", x, ")"))
})
# number of variables
p <- post_samp[[1]]$p
# compute difference
diff <- lapply(1:comparisons, function(x) {
contrast <- info$pairwise[x, ]
post_samp[[contrast[[1]]]]$post_samp$pcors[, , 51:(iter + 50)] - post_samp[[contrast[[2]]]]$post_samp$pcors[, , 51:(iter + 50)]
})
# name posterior (differences) array
names(diff) <- sapply(1:comparisons, function(x)
paste("Y_g",
info$pairwise[x, ],
sep = "",
collapse = " - "))
# pcor_mats
pcor_mats <- lapply(1:length(diff), function(x) {
round(apply(diff[[x]], 1:2, mean), 3)
})
# name pcor_mats
names(pcor_mats) <- names(diff)
# returned object
returned_object <- list(
pcor_mats = pcor_mats,
diff = diff,
p = p,
info = info,
iter = iter,
analytic = analytic,
type = type,
formula = formula,
call = match.call(),
post_samp= post_samp
)
# analytic
} else {
if(type != "continuous"){
warning("analytic solution only available for 'type = continuous'")
type <- "continuous"
}
formula <- NULL
z_stat <- lapply(1:comparisons, function(x) {
contrast <- info$pairwise[x, ]
g1 <- analytic_solve(info$dat[[contrast[[1]]]])
g2 <- analytic_solve(info$dat[[contrast[[2]]]])
z_stat <-
abs((g1$inv_map - g2$inv_map) / sqrt(g1$inv_var + g2$inv_var))
})
diff <- lapply(1:comparisons, function(x) {
contrast <- info$pairwise[x, ]
g1 <- analytic_solve(info$dat[[contrast[[1]]]])
g2 <- analytic_solve(info$dat[[contrast[[2]]]])
(g1$pcor_mat - g2$pcor_mat)
})
names(diff) <- sapply(1:comparisons, function(x)
paste("Y_g",
info$pairwise[x, ],
sep = "",
collapse = " - "))
names(z_stat) <-
sapply(1:comparisons, function(x)
paste("Y_g",
info$pairwise[x, ],
sep = "",
collapse = " - "))
p <- ncol(diff[[1]])
returned_object <- list(
z_stat = z_stat,
diff = diff,
p = p,
info = info,
iter = iter,
type = type,
analytic = analytic,
call = match.call()
)
}
class(returned_object) <- c("BGGM",
"ggm_compare_estimate",
"estimate")
returned_object
}
#' @title Summary method for \code{ggm_compare_estimate} objects
#'
#' @description Summarize the posterior distribution of each partial correlation
#' difference with the posterior mean and standard deviation.
#'
#' @name summary.ggm_compare_estimate
#'
#' @param object An object of class \code{ggm_compare_estimate}.
#'
#' @param col_names Logical. Should the summary include the column names (default is \code{TRUE})?
#' Setting to \code{FALSE} includes the column numbers (e.g., \code{1--2}).
#'
#' @param cred Numeric. The credible interval width for summarizing the posterior
#' distributions (defaults to 0.95; must be between 0 and 1).
#'
#' @param ... Currently ignored.
#'
#' @seealso \code{\link{ggm_compare_estimate}}
#'
#' @return A list containing the summarized posterior distributions.
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#' # data
#' Y <- bfi
#'
#' # males and females
#' Ymale <- subset(Y, gender == 1,
#' select = -c(gender,
#' education))[,1:5]
#'
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(gender,
#' education))[,1:5]
#'
#' # fit model
#' fit <- ggm_compare_estimate(Ymale, Yfemale,
#' type = "ordinal",
#' iter = 250,
#' prior_sd = 0.25,
#' progress = FALSE)
#'
#' summary(fit)
#' }
#' @export
summary.ggm_compare_estimate <- function(object,
col_names = TRUE,
cred = 0.95,...) {
# nodes
p <- object$p
# identity matrix
I_p <- diag(p)
# lower bound
lb <- (1 - cred) / 2
# upper bound
ub <- 1 - lb
# relation names
name_mat <- matrix(0, p, p)
# number of comparisons
comparisons <- length(names(object$diff))
# column names
cn <- colnames(object$post_samp[[1]]$Y)
if(is.null(cn) | isFALSE(col_names)){
mat_names <- sapply(1:p , function(x) paste(1:p, x, sep = "--"))[upper.tri(I_p)]
} else {
mat_names <- sapply(cn , function(x) paste(cn, x, sep = "--"))[upper.tri(I_p)]
}
dat_results <- list()
# summary for comparison i
for(i in seq_len(comparisons)){
if(isFALSE(object$analytic )){
post_mean <- round(apply(object$diff[[i]], 1:2, mean), 3)[upper.tri(I_p)]
post_sd <- round(apply( object$diff[[i]], 1:2, sd), 3)[upper.tri(I_p)]
post_lb <- round(apply( object$diff[[i]], 1:2, quantile, lb), 3)[upper.tri(I_p)]
post_ub <- round(apply( object$diff[[i]], 1:2, quantile, ub), 3)[upper.tri(I_p)]
results_i <-
data.frame(
relation = mat_names,
post_mean = post_mean,
post_sd = post_sd,
post_lb = post_lb,
post_ub = post_ub
)
colnames(results_i) <- c(
"Relation",
"Post.mean",
"Post.sd",
"Cred.lb",
"Cred.ub"
)
} else {
post_mean <- round(object$diff[[i]][upper.tri(I_p)], 3)
results_i <-
data.frame(
relation = mat_names,
post_mean = post_mean
)
colnames(results_i) <- c(
"Relation",
"Post.mean"
)
}
dat_results[[i]] <- results_i
}
returned_object <- list(dat_results = dat_results,
object = object)
class(returned_object) <- c("BGGM",
"summary", "summary.ggm_compare_estimate",
"ggm_compare_estimate",
"estimate")
returned_object
}
# print ggm compare
print_ggm_compare <- function(x, ...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Type:", x$type, "\n")
cat("Analytic:", x$analytic, "\n")
cat("Formula:", paste(as.character(x$formula), collapse = " "), "\n")
# number of iterations
cat("Posterior Samples:", x$iter, "\n")
# number of observations
cat("Observations (n):\n")
groups <- length(x$info$dat)
for(i in 1:groups){
cat(" Group", paste( i, ":", sep = "") , x$info$dat_info$n[[i]], "\n")
}
# number of variables
cat("Nodes (p):", x$p, "\n")
# number of edges
cat("Relations:", .5 * (x$p * (x$p-1)), "\n")
cat("--- \n")
cat("Call: \n")
print(x$call)
cat("--- \n")
cat("Date:", date(), "\n")
}
# print summary
print_summary_ggm_estimate_compare <- function(x,...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Type:", x$object$type, "\n")
cat("Analytic:", x$object$analytic, "\n")
cat("Formula:", paste(as.character(x$object$formula), collapse = " "), "\n")
# number of iterations
cat("Posterior Samples:", x$object$iter, "\n")
# number of observations
cat("Observations (n):\n")
groups <- length(x$object$info$dat)
for (i in 1:groups) {
cat(" Group",
paste(i, ":", sep = "") ,
x$object$info$dat_info$n[[i]],
"\n")
}
# number of variables
cat("Nodes (p):", x$object$p, "\n")
# number of edges
cat("Relations:", .5 * (x$object$p * (x$object$p - 1)), "\n")
cat("--- \n")
cat("Call: \n")
print(x$object$call)
cat("--- \n")
cat("Estimates:\n")
for (i in 1:nrow(x$object$info$pairwise)) {
cat("\n", names(x$object$pcors_diffs[[i]]), "\n")
print(x$dat_results[[i]], right = FALSE, row.names = FALSE,...)
}
cat("--- \n")
}
#' Plot \code{summary.ggm_compare_estimate} Objects
#'
#' @description Visualize the posterior distribution differences.
#'
#' @param x An object of class \code{ggm_compare_estimate}.
#'
#' @param size Numeric. The size of the points (defaults to 2).
#'
#' @param color Character string. The color of the points
#' (defaults to \code{"black"}).
#'
#' @param width Numeric. The width of error bar ends (defaults to \code{0}).
#'
#' @param ... Currently ignored.
#'
#' @return An object of class \code{ggplot}
#'
#' @seealso \code{\link{ggm_compare_estimate}}
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#' # data
#' Y <- bfi
#'
#' # males and females
#' Ymale <- subset(Y, gender == 1,
#' select = -c(gender,
#' education))[,1:5]
#'
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(gender,
#' education))[,1:5]
#'
#' # fit model
#' fit <- ggm_compare_estimate(Ymale, Yfemale,
#' type = "ordinal",
#' iter = 250,
#' prior_sd = 0.25,
#' progress = FALSE)
#'
#' plot(summary(fit))
#' }
#'
#' @export
plot.summary.ggm_compare_estimate <- function(x, color = "black",
size = 2,
width = 0, ...){
n_plt <- length(x$dat_results)
# plots
lapply(1:n_plt, function(i){
dat_temp <- x$dat_results[[i]][order(x$dat_results[[i]]$Post.mean,
decreasing = F), ]
dat_temp$Relation <-
factor(dat_temp$Relation,
levels = dat_temp$Relation,
labels = dat_temp$Relation)
plt <- ggplot(dat_temp,
aes(x = Relation,
y = Post.mean)) +
geom_point(size = size) +
xlab("Index") +
theme(axis.text.x = element_text(
angle = 90,
vjust = 0.5,
hjust = 1
)) +
ggtitle(paste(names(x$object$diff)[i]))
if(isFALSE( x$object$analytic)){
plt <- plt + geom_errorbar(aes(ymax = dat_temp[, 4],
ymin = dat_temp[, 5]),
width = width,
color = color)
}
plt
})
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/ggm_compare_estimate.default.R
|
#' @title GGM Compare: Posterior Predictive Check
#'
#' @description
#' Compare GGMs with a posterior predicitve check \insertCite{gelman1996posterior}{BGGM}.
#' This method was introduced in \insertCite{williams2020comparing;textual}{BGGM}. Currently,
#' there is a \code{global} (the entire GGM) and a \code{nodewise} test. The default
#' is to compare GGMs with respect to the posterior predictive distribution of Kullback
#' Leibler divergence and the sum of squared errors. It is also possible to compare the
#' GGMs with a user defined test-statistic.
#'
#' @name ggm_compare_ppc
#'
#'
#' @param ... At least two matrices (or data frames) of dimensions \emph{n} (observations) by \emph{p} (variables).
#'
#' @param test Which test should be performed (defaults to \code{"global"}) ? The options include
#' \code{global} and \code{nodewise}.
#'
#'
#' @param iter Number of replicated datasets used to construct the predictivie distribution
#' (defaults to 5000).
#'
#' @param FUN An optional function for comparing GGMs that returns a number. See \strong{Details}.
#'
#' @param custom_obs Number corresponding to the observed score for comparing the GGMs. This is
#' required if a function is provided in \code{FUN}. See \strong{Details}.
#'
#' @param loss Logical. If a function is provided, is the measure a "loss function"
#' (i.e., a large score is bad thing). This determines how the \emph{p}-value
#' is computed. See \strong{Details}.
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @details
#'
#' The \code{FUN} argument allows for a user defined test-statisic (the measure used to compare the GGMs).
#' The function must include only two agruments, each of which corresponds to a dataset. For example,
#' \code{f <- function(Yg1, Yg2)}, where each Y is dataset of dimensions \emph{n} by \emph{p}. The
#' groups are then compare within the function, returning a single number. An example is provided below.
#'
#' Further, when using a custom function care must be taken when specifying the argument \code{loss}.
#' We recommended to visualize the results with \code{plot} to ensure the \emph{p}-value was computed
#' in the right direction.
#'
#' @note
#'
#' \strong{Interpretation}:
#'
#' The primary test-statistic is symmetric KL-divergence that is termed Jensen-Shannon divergence (JSD).
#' This is in essence a likelihood ratio that provides the "distance" between two multivariate normal
#' distributions. The basic idea is to (1) compute the posterior predictive distribution, assuming group equality
#' (the null model). This provides the error that we would expect to see under the null model; (2) compute
#' JSD for the observed groups; and (3) compare the observed JSD to the posterior predictive distribution,
#' from which a posterior predictive \emph{p}-value is computed.
#'
#' For the \code{global} check, the sum of squared error is also provided.
#' This is computed from the partial correlation matrices and it is analagous
#' to the strength test in \insertCite{van2017comparing;textual}{BGGM}. The \code{nodewise}
#' test compares the posterior predictive distribution for each node. This is based on the correspondence
#' between the inverse covariance matrix and multiple regresssion \insertCite{kwan2014regression,Stephens1998}{BGGM}.
#'
#' If the null model is \code{not} rejected, note that this does \code{not} provide evidence for equality!
#' Further, if the null model is rejected, this means that the assumption of group equality is not tenable--the
#' groups are different.
#'
#' \strong{Alternative Methods}:
#'
#' There are several methods in \strong{BGGM} for comparing groups. See
#' \code{\link{ggm_compare_estimate}} (posterior differences for the
#' partial correlations), \code{\link{ggm_compare_explore}} (exploratory hypothesis testing),
#' and \code{\link{ggm_compare_confirm}} (confirmatory hypothesis testing).
#'
#'
#' @return The returned object of class \code{ggm_compare_ppc} contains a lot of information that
#' is used for printing and plotting the results. For users of \strong{BGGM}, the following
#' are the useful objects:
#'
#' \code{test = "global"}
#'
#' \itemize{
#'
#' \item \code{ppp_jsd} posterior predictive \emph{p}-values (JSD).
#'
#' \item \code{ppp_sse} posterior predictive \emph{p}-values (SSE).
#'
#' \item \code{predictive_jsd} list containing the posterior predictive distributions (JSD).
#'
#' \item \code{predictive_sse} list containing the posterior predictive distributions (SSE).
#'
#' \item \code{obs_jsd} list containing the observed error (JSD).
#'
#' \item \code{obs_sse} list containing the observed error (SSE).
#'
#'}
#'
#'
#' \code{test = "nodewise"}
#'
#' \itemize{
#'
#' \item \code{ppp_jsd} posterior predictive \emph{p}-values (JSD).
#'
#' \item \code{predictive_jsd} list containing the posterior predictive distributions (JSD).
#'
#' \item \code{obs_jsd} list containing the observed error (JSD).
#'
#' }
#'
#' \code{FUN = f()}
#'
#' \itemize{
#'
#' \item \code{ppp_custom} posterior predictive \emph{p}-values (custom).
#'
#' \item \code{predictive_custom} posterior predictive distributions (custom).
#'
#' \item \code{obs_custom} observed error (custom).
#'
#' }
#'
#'
#' @examples
#'
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' # data
#' Y <- bfi
#'
#' #############################
#' ######### global ############
#' #############################
#'
#'
#' # males
#' Ym <- subset(Y, gender == 1,
#' select = - c(gender, education))
#'
#' # females
#'
#' Yf <- subset(Y, gender == 2,
#' select = - c(gender, education))
#'
#'
#' global_test <- ggm_compare_ppc(Ym, Yf,
#' iter = 250)
#'
#' global_test
#'
#'
#' #############################
#' ###### custom function ######
#' #############################
#' # example 1
#'
#' # maximum difference van Borkulo et al. (2017)
#'
#' f <- function(Yg1, Yg2){
#'
#' # remove NA
#' x <- na.omit(Yg1)
#' y <- na.omit(Yg2)
#'
#' # nodes
#' p <- ncol(Yg1)
#'
#' # identity matrix
#' I_p <- diag(p)
#'
#' # partial correlations
#'
#' pcor_1 <- -(cov2cor(solve(cor(x))) - I_p)
#' pcor_2 <- -(cov2cor(solve(cor(y))) - I_p)
#'
#' # max difference
#' max(abs((pcor_1[upper.tri(I_p)] - pcor_2[upper.tri(I_p)])))
#'
#' }
#'
#' # observed difference
#' obs <- f(Ym, Yf)
#'
#' global_max <- ggm_compare_ppc(Ym, Yf,
#' iter = 250,
#' FUN = f,
#' custom_obs = obs,
#' progress = FALSE)
#'
#' global_max
#'
#'
#' # example 2
#' # Hamming distance (squared error for adjacency)
#'
#' f <- function(Yg1, Yg2){
#'
#' # remove NA
#' x <- na.omit(Yg1)
#' y <- na.omit(Yg2)
#'
#' # nodes
#' p <- ncol(x)
#'
#' # identity matrix
#' I_p <- diag(p)
#'
#' fit1 <- estimate(x, analytic = TRUE)
#' fit2 <- estimate(y, analytic = TRUE)
#'
#' sel1 <- select(fit1)
#' sel2 <- select(fit2)
#'
#' sum((sel1$adj[upper.tri(I_p)] - sel2$adj[upper.tri(I_p)])^2)
#'
#'}
#'
#' # observed difference
#' obs <- f(Ym, Yf)
#'
#' global_hd <- ggm_compare_ppc(Ym, Yf,
#' iter = 250,
#' FUN = f,
#' custom_obs = obs,
#' progress = FALSE)
#'
#' global_hd
#'
#'
#' #############################
#' ######## nodewise ##########
#' #############################
#'
#' nodewise <- ggm_compare_ppc(Ym, Yf, iter = 250,
#' test = "nodewise")
#'
#' nodewise
#'
#' }
#'
#' @export
ggm_compare_ppc <- function(...,
test = "global",
iter = 5000,
FUN = NULL,
custom_obs = NULL,
loss = TRUE,
progress = TRUE
){
# data information
info <- Y_combine(...)
# number of groups
groups <- length(info$dat)
if (groups < 2) {
stop("must have (at least) two groups")
}
n_total <- sum(info$dat_info$n)
Y_G <- scale(do.call(rbind, info$dat), scale = T)
# inverse scatter matrix
S_G <- solve(t(Y_G) %*% Y_G)
# M_0 posterior (group equality)
post <- rWishart(iter, n_total - 1, S_G)
p <- info$dat_info$p[1]
if(is.null(FUN)){
custom <- FALSE
if (test == "global") {
# jsd = symmetric KLD
predictive_jsd <- list()
# strength = sum of squares
predictive_ss <- list()
# observed error
obs_jsd <- list()
obs_ss <- list()
nms <- list()
for (i in 1:nrow(info$pairwise)) {
message(paste0("BGGM: Predictive Check ", "(Contrast ", i ,")"))
n1 <- info$dat_info$n[info$pairwise[i, 1]]
n2 <- info$dat_info$n[info$pairwise[i, 2]]
pp_check <- .Call(
"_BGGM_ppc_helper_fast",
PACKAGE = "BGGM",
Theta = post,
n1 = n1,
n2 = n2,
p = p,
BF_cut = 3,
dens = 1,
ppc_ss = TRUE,
ppc_cors = FALSE,
ppc_hd = FALSE
)
predictive_jsd[[i]] <- pp_check$kl
predictive_ss[[i]] <- pp_check$ss
# data set 2
y1 <- info$dat[info$pairwise[i, 1]][[1]]
# data set 2
y2 <- info$dat[info$pairwise[i, 2]][[1]]
# observed jsd
obs_jsd[[i]] <-
0.5 * KL(unbiased_cov(y1), unbiased_cov(y2)) +
0.5 * KL(unbiased_cov(y2), unbiased_cov(y1))
# observed sum of squared error
obs_ss[[i]] <- sum((cov2cor(solve(cor(y1))) - cov2cor(solve(cor(y2)))) ^ 2) * 0.5
# names
nms[[i]] <-
paste("Yg",
info$pairwise[i, ],
sep = "",
collapse = " vs ")
}
message("BGGM: Finished")
# results jsd
results_jsd <- do.call(cbind.data.frame, predictive_jsd)
# results ss
results_ss <- do.call(cbind.data.frame, predictive_ss)
# posterior predictive p-value
ppp_jsd <- sapply(1:nrow(info$pairwise), function(x)
mean(na.omit(results_jsd[, x]) > obs_jsd[[x]]))
ppp_ss <- sapply(1:nrow(info$pairwise), function(x)
mean(na.omit(results_ss[, x]) > obs_ss[[x]]))
returned_object <- list(
ppp_jsd = ppp_jsd,
ppp_sse = ppp_ss,
obs_jsd = obs_jsd,
obs_sse = obs_ss,
info = info,
names = nms,
iter = iter,
test = test,
call = match.call(),
predictive_jsd = predictive_jsd,
predictive_sse = predictive_ss,
custom = custom
)
} else if (test == "nodewise") {
predictive_jsd <- list()
obs_jsd <- list()
nms <- list()
for (i in 1:nrow(info$pairwise)) {
message(paste0("BGGM: Predictive Check ", "(Contrast ", i ,")"))
n1 <- info$dat_info$n[info$pairwise[i, 1]]
n2 <- info$dat_info$n[info$pairwise[i, 2]]
pp_check <- .Call(
"_BGGM_ppc_helper_nodewise_fast",
PACKAGE = "BGGM",
Theta = post,
n1 = n1,
n2 = n2,
p = p
)
predictive_jsd[[i]] <- pp_check$kl
}
message("BGGM: Finished")
for (i in 1:nrow(info$pairwise)) {
y1 <- info$dat[info$pairwise[i, 1]][[1]]
y2 <- info$dat[info$pairwise[i, 2]][[1]]
obs <- lapply(1:p, function(x) node_jsd_help(x, y1, y2))
nms[[i]] <-
paste("Yg",
info$pairwise[i, ],
sep = "",
collapse = " vs ")
obs_jsd[[i]] <- obs
}
pvalue <- list()
for (i in 1:nrow(info$pairwise)) {
obs_i <- obs_jsd[[i]]
ppc_i <- predictive_jsd[[i]]
pvalues <-
sapply(1:info$dat_info$p[1], function(x)
mean(ppc_i[, x] > obs_i[x]))
pvalue[[i]] <- pvalues
}
returned_object <- list(
ppp_jsd = pvalue,
obs_jsd = obs_jsd,
predictive_jsd = predictive_jsd,
info = info,
names = nms,
iter = iter,
test = test,
call = match.call(),
custom = custom
)
}
} else {
custom <- TRUE
if(groups > 2){
stop("only two groups allowed for custom functions")
}
# check for mice
if(!requireNamespace("mvnfast", quietly = TRUE)) {
stop("Please install the '", "mvnfast", "' package.")
}
if(is.null(custom_obs)){
stop("observed test-statistic is required when using a customing function.")
}
# group one
n1 <- info$dat_info$n[1]
# group two
n2 <- info$dat_info$n[2]
# progress bar
if(isTRUE(progress)){
pb <- utils::txtProgressBar(min = 0, max = iter, style = 3)
}
# predictive check
pp_check <- sapply(1:iter, function(x){
# correlation matrix
cors <- cov2cor(solve(post[,,x]))
# Yrep1
Yrep1 <- mvnfast::rmvn(n1, rep(0, p), cors)
# Yrep2
Yrep2 <- mvnfast::rmvn(n2, rep(0, p), cors)
# custom ppc
ppc <- FUN(Yrep1, Yrep2)
# update progress bar
if(isTRUE(progress)){
utils::setTxtProgressBar(pb, x)
}
ppc
})
if(isTRUE(loss)){
ppp <- mean(pp_check > custom_obs)
} else {
ppp <- mean(pp_check < custom_obs)
}
nms <- paste("Yg",
info$pairwise[1, ],
sep = "",
collapse = " vs ")
returned_object <- list(
ppp_custom = ppp,
predictive_custom = pp_check,
info = info,
names = nms,
iter = iter,
test = test,
call = match.call(),
custom = custom,
custom_obs = custom_obs
)
}
class(returned_object) <- c("BGGM",
"estimate",
"ggm_compare_ppc")
return(returned_object)
}
print_ggm_compare_ppc <- function(x, ...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
if(x$test == "nodewise"){
cat("Test: Nodewise Predictive Check \n")
} else{
cat("Test: Global Predictive Check \n")
}
p <- x$info$dat_info$p[1]
cat("Posterior Samples:", x$iter, "\n")
groups <- length(x$info$dat)
for (i in 1:groups) {
cat(" Group",
paste(i, ":", sep = "") ,
x$info$dat_info$n[[i]],
"\n")
}
cat("Nodes: ", p, "\n")
cat("Relations:", .5 * (p * (p-1)), "\n")
cat("--- \n")
cat("Call: \n")
print(x$call)
cat("--- \n")
if(x$test == "global"){
if(isFALSE(x$custom)){
cat("Symmetric KL divergence (JSD): \n \n")
results <- data.frame(
contrast = do.call(rbind, x$names),
JSD.obs = round(do.call(rbind, x$obs_jsd), 3),
p_value = round(x$ppp_jsd, 3)
)
print(results, row.names = F)
cat("--- \n \n")
cat("Sum of Squared Error: \n \n")
results <- data.frame(
contrast = do.call(rbind, x$names),
SSE.obs = round(do.call(rbind, x$obs_sse), 3),
p.value = round(x$ppp_sse, 3)
)
print(results, row.names = F)
cat("--- \n")
cat("note:\n")
cat("JSD is Jensen-Shannon divergence \n")
} else {
cat("Custom: \n \n")
results <- data.frame(
contrast = do.call(rbind, list(x$names)),
custom.obs = round(x$custom_obs, 3) ,
p.value = round(x$ppp_custom, 3)
)
print(results, row.names = F)
cat("--- \n")
}
} else {
cat("Symmetric KL divergence (JSD): \n \n")
results <- list()
for (i in 1:length(x$obs_jsd)) {
results[[i]] <-
data.frame(
Node = 1:p ,
JSD.obs = round(do.call(rbind, x$obs_jsd[[i]]), 3),
p_value = unlist(x$ppp_jsd[[i]])
)
names(results)[[i]] <- x$names[[i]]
}
for(i in 1:length(x$obs_jsd)){
cat(do.call(rbind, x$names)[[i]], "\n")
print(results[[i]], row.names = F)
cat("--- \n\n")
}
cat("note:\n")
cat("JSD is Jensen-Shannon divergence \n")
}
}
#' @title Plot \code{ggm_compare_ppc} Objects
#'
#' @description Plot the predictive check with \code{\link[ggridges]{ggridges}}
#'
#' @param x An object of class \code{ggm_compare_ppc}
#'
#' @param critical Numeric. The 'significance' level
#' (defaults to \code{0.05}).
#'
#' @param col_noncritical Character string. Fill color for the non-critical region
#' (defaults to \code{"#84e184A0"}).
#'
#' @param col_critical Character string. Fill color for the critical region
#' (defaults to \code{"red"}).
#'
#' @param point_size Numeric. The point size for the observed score
#' (defaults to \code{2}).
#'
#' @param ... Currently ignored.
#'
#' @return An object (or list of objects) of class \code{ggplot}.
#'
#' @importFrom ggridges stat_density_ridges
#'
#' @note
#' See
#' \href{https://CRAN.R-project.org/package=ggridges/vignettes/introduction.html}{ggridges} for
#' many examples.
#'
#' @seealso \code{\link{ggm_compare_ppc}}
#'
#' @examples
#' \donttest{
#' # data
#' Y <- bfi
#'
#' #############################
#' ######### global ############
#' #############################
#' # males
#' Ym <- subset(Y, gender == 1,
#' select = - c(gender, education))
#'
#' # females
#'
#' Yf <- subset(Y, gender == 2,
#' select = - c(gender, education))
#'
#'
#' global_test <- ggm_compare_ppc(Ym, Yf,
#' iter = 250,
#' progress = FALSE)
#'
#' plot(global_test)
#' }
#' @export
plot.ggm_compare_ppc <- function(x,
critical = 0.05,
col_noncritical = "#84e184A0",
col_critical = "red",
point_size = 2, ...){
if(x$test == "global"){
if(isFALSE( x$custom)) {
# number of contrasts
k <- length(x$ppp_jsd)
jsd <- unlist(x$predictive_jsd)
sse <- unlist(x$predictive_sse)
dat_jsd <- data.frame(ppc = jsd,
contrast = rep(gsub(
x = x$names,
pattern = "_",
replacement = ""
),
each = x$iter))
dat_obs_jsd <- data.frame(
contrast = gsub(
x = x$names,
pattern = "_",
replacement = ""
),
ppc = unlist(x$obs_jsd)
)
dat_sse <- data.frame(ppc = sse,
contrast = rep(gsub(
x = x$names,
pattern = "_",
replacement = ""
),
each = x$iter))
dat_obs_sse <- data.frame(
contrast = gsub(
x = x$names,
pattern = "_",
replacement = ""
),
ppc = unlist(x$obs_sse)
)
plot_jsd <- ggplot(dat_jsd, aes(
x = ppc,
y = contrast,
fill = factor(..quantile..)
)) +
stat_density_ridges(
geom = "density_ridges_gradient",
calc_ecdf = TRUE,
alpha = 0.5,
quantiles = c(0.025, 1 - (critical))
) +
scale_fill_manual(values = c(col_noncritical,
col_noncritical,
col_critical)) +
theme(legend.position = "none") +
xlab("Predictive Check") +
ylab("Contrast") +
geom_point(
inherit.aes = F,
data = dat_obs_jsd,
aes(x = ppc,
y = contrast),
size = point_size
) +
scale_y_discrete(limits = rev(levels(dat_obs_jsd$contrast))) +
ggtitle("Symmetric KL-Divergence")
plot_sse <- ggplot(dat_sse, aes(
x = ppc,
y = contrast,
fill = factor(..quantile..)
)) +
stat_density_ridges(
geom = "density_ridges_gradient",
calc_ecdf = TRUE,
alpha = 0.5,
quantiles = c(0.025, 1 - (critical))
) +
scale_fill_manual(values = c(col_noncritical,
col_noncritical,
col_critical)) +
theme(legend.position = "none") +
xlab("Predictive Check") +
ylab("Contrast") +
geom_point(
inherit.aes = F,
data = dat_obs_sse,
aes(x = ppc,
y = contrast),
size = point_size
) +
scale_y_discrete(limits = rev(levels(dat_obs_sse$contrast))) +
ggtitle("Sum of Squared Error")
list(plot_sse = plot_sse, plot_jsd = plot_jsd)
} else {
k <- length(x$ppp_custom)
custom <- unlist(x$predictive_custom)
dat_custom <- data.frame(ppc = custom,
contrast = rep(gsub(
x = x$names,
pattern = "_",
replacement = ""
),
each = x$iter))
dat_obs_custom <- data.frame(
contrast = gsub(
x = x$names,
pattern = "_",
replacement = ""
),
ppc = unlist(x$custom_obs)
)
plot_custom <- ggplot(dat_custom, aes(
x = ppc,
y = contrast,
fill = factor(..quantile..)
)) +
stat_density_ridges(
geom = "density_ridges_gradient",
calc_ecdf = TRUE,
alpha = 0.5,
quantiles = c(0.025, 1 - (critical))
) +
scale_fill_manual(values = c(col_noncritical,
col_noncritical,
col_critical)) +
theme(legend.position = "none") +
xlab("Predictive Check") +
ylab("Contrast") +
geom_point(
inherit.aes = F,
data = dat_obs_custom,
aes(x = ppc,
y = contrast),
size = point_size
) +
scale_y_discrete(limits = rev(levels(dat_obs_custom$contrast))) +
ggtitle("Custom")
list(plot_custom = plot_custom)
} # end of global
} else {
plt <- list()
k <- length(x$names)
for(i in 1:k){
dat_obs <- data.frame(ppc = unlist(x$obs_jsd[[i]]),
node = 1:x$info$dat_info$p[1])
test <- reshape::melt(x$predictive_jsd[[i]])
test$node <- factor(test$X2,
levels = rev(1:x$info$dat_info$p[1]),
labels = rev(1:x$info$dat_info$p[1]))
dat_obs$node <- factor(dat_obs$node,
levels = rev(1:x$info$dat_info$p[1]),
labels = rev(1:x$info$dat_info$p[1]))
suppressWarnings(
test$value <- log(test$value)
)
check_inf <- which(is.infinite(test$value))
test$value[check_inf] <- NA
test <- na.omit(test)
plt[[i]] <- ggplot(test, aes(x = value,
y = node,
fill = factor(..quantile..))) +
stat_density_ridges(geom = "density_ridges_gradient",
rel_min_height = 0.01,
calc_ecdf = TRUE,
quantiles = c(0.025, 1 - (critical))) +
scale_fill_manual( values = c(col_noncritical,
col_noncritical,
col_critical)) +
geom_point(data = dat_obs,
inherit.aes = F,
aes(x = log(ppc),
y = node),
size = point_size) +
theme(legend.position = "none") +
xlab("Predictive Check") +
ylab("Node") +
ggtitle(gsub(x = x$names[[i]],
pattern = "_",
replacement = ""),
subtitle = "Symmteric KL-Divergence (log scale)")
}
plt
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/ggm_compare_ppc.default.R
|
ggm_search <- function(x, n = NULL,
method = "mc3",
prior_prob = 0.1,
iter = 5000,
stop_early = 1000,
bma_mean = TRUE,
seed = 1,
progress = TRUE, ...){
set.seed(seed)
x <- Y
if (base::isSymmetric(as.matrix(x))) {
S <- x
} else {
S <- cor(x)
n <- nrow(x)
}
p <- ncol(S)
if(method == "mc3"){
if(is.null(stop_early)){
stop_early <- iter
}
pcors <- -cov2cor( solve(S) ) + diag(2, p)
# test full vs missing one edge
BF_01 <- exp(-0.5 * (tstat(r = pcors, n = n, p - 2)^2 - log(n)))
BF_10 <- 1/BF_01
adj_start <- ifelse(BF_10 > 1, 1, 0)
old <- hft_algorithm(Sigma = S,
adj = adj_start,
tol = 1e-10,
max_iter = 100)
bic_old <- bic_fast(Theta = old$Theta,
S = S,
n = n,
prior_prob = prior_prob)
if(isTRUE(progress)){
message(paste0("BGGM: Sampling Graphs"))
}
fit <- .Call('_BGGM_search',
PACKAGE = 'BGGM',
S = S,
iter = iter,
old_bic = bic_old,
start_adj = adj_start,
n = n,
gamma = prior_prob,
stop_early = stop_early,
progress = progress)
if(isTRUE(progress)){
message("BGGM: Finished")
}
# accepted
acc <- fit$acc
# first matrix (starting values)
fit$adj[,,1] <- adj_start
# approximate marginal likelihood
approx_marg_ll <- fit$bics
# starting bic
approx_marg_ll[1] <- bic_old
if(!is.null(stop_early)){
approx_marg_ll <- approx_marg_ll[which(approx_marg_ll != 0)]
fit$adj <- fit$adj[,, which(approx_marg_ll != 0)]
}
adj_path <- fit$adj
selected <- which.min(approx_marg_ll)
if(acc == 0){
adj <- fit$adj[,,1]
} else {
adj <- fit$adj[,,selected]
}
# BFs vs mpm
delta <- approx_marg_ll - min(approx_marg_ll)
probs <- exp(-0.5 * delta) / sum( exp(-0.5 * delta) )
Theta_map <- hft_algorithm(
Sigma = S,
adj = adj,
tol = 1e-10,
max_iter = 100
)
pcor_adj <- -cov2cor(Theta_map$Theta) + diag(2, p)
}
if(bma_mean & acc > 0){
graph_ids <- which(duplicated(approx_marg_ll) == 0)[-1]
delta <- (approx_marg_ll[graph_ids] - min(approx_marg_ll[graph_ids])) * (6 / (2*sqrt(2*n)))
probs <- exp(- 0.5 * delta) / sum(exp(- 0.5 * delta))
graphs <- adj_path[,,graph_ids]
Theta_bma <- lapply(1:length(probs), function(x){
hft_algorithm(Sigma = S,
adj = graphs[,,x],
tol =1e-10,
max_iter = 1000)$Theta * probs[x]
})
Theta_bma <- Reduce("+", Theta_bma)
pcor_bma <- -cov2cor(Theta_bma) + diag(2, p)
} else {
Theta_bma <- NULL
pcor_bma <- NULL
}
returned_object <- list(pcor_adj = pcor_adj,
Theta_map = Theta_map,
Theta_bma = Theta_bma,
pcor_bma = pcor_bma,
adj = adj,
adj_start = adj_start,
probs = probs,
approx_marg_ll = approx_marg_ll,
selected = selected,
BF_start = BF_10,
adj_path = adj_path,
acc = acc,
S = S,
n = n)
#rm(.Random.seed, envir=.GlobalEnv)
class(returned_object) <- c("BGGM",
"ggm_search")
return( returned_object )
}
print_ggm_search <- function(x, ...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
if(x$acc == 0){
mat <- x$pcor_adj
p <- ncol(mat)
if(is.null( colnames(x$S))){
colnames(mat) <- 1:p
row.names(mat) <- 1:p
} else {
colnames(mat) <- colnames(x$S)
row.names(mat) <- colnames(x$S)
}
cat("Most Probable Graph:\n\n")
print(round(mat, 3))
} else {
mat <- x$pcor_bma
p <- ncol(mat)
if(is.null( colnames(x$S))){
colnames(mat) <- 1:p
row.names(mat) <- 1:p
} else {
colnames(mat) <- colnames(x$S)
row.names(mat) <- colnames(x$S)
}
cat("Bayesian Model Averaged Graph:\n\n")
print(round(mat, 3))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/ggm_search.R
|
#' @importFrom stats coef cov2cor var dnorm lm
#' na.omit pnorm quantile rWishart runif dnorm rnorm
#' sd qnorm residuals fitted density weighted.mean
#' @importFrom utils combn
#' @importFrom graphics plot
#' @importFrom Rdpack reprompt
#' @importFrom MASS ginv
#' @import ggplot2
#' @importFrom stats model.matrix terms cor
tstat <- function(r, n, k){
r * sqrt(((n - 2 - k) / (1-r^2)))
}
hyp_converter <- function (x) {
hyp_converted <- x
extract_numbers <-strsplit( gsub("[^\\d]+", "",
hyp_converted, perl = TRUE),
split = "" )[[1]]
extract_numbers <- extract_numbers[unlist(extract_numbers) >= 1]
words <- NA
for (i in 1:length(extract_numbers)) {
temp <- noquote(extract_numbers[i])
words[i] <- numbers2words(as.numeric(temp))
hyp_converted <- sub(temp, numbers2words(as.numeric(temp)),
hyp_converted)
}
list(hyp_converted = hyp_converted, words = words)
}
remove_predictors_helper <- function(Y_groups, formula){
# number of groups
groups <- length(Y_groups)
# Y groups
Y_groups <- lapply(seq_len(groups), function(x) na.omit(Y_groups[[x]]) )
model_matrices <- lapply(seq_len(groups) , function(x) {
stats::model.matrix(formula, Y_groups[[x]])
})
# model matrix terms
mm_terms <- attr(terms(formula), "term.labels")
if(length(mm_terms) == 0){
Y_groups <- Y_groups
} else {
Y_groups <- lapply(seq_len(groups), function(x){
# check for factors
factor_pred <- which(paste0("as.factor(", colnames(Y_groups[[x]]), ")") %in% mm_terms)
# check for scaled
scale_pred <- which(paste0("scale(", colnames(Y_groups[[x]]), ")") %in% mm_terms)
# check for non factors
cont_pred <- which(colnames(Y_groups[[x]]) %in% mm_terms)
# remove predictors
Y_groups[[x]][,-c(factor_pred, cont_pred, scale_pred)]
})
}
list(Y_groups = Y_groups, model_matrices = model_matrices)
}
binary_latent_helper <- function(x){
# obervations
n <- nrow(x)
# variables
p <- ncol(x)
# thresholds
thresholds <- c(-Inf, 0, Inf)
# latent data
latent_data <- sapply(1:p, function(z) qnorm(runif(n, min = pnorm(thresholds[x[,z]],
mean = 0,
sd = 1),
max = pnorm(thresholds[x[,z]+1],
mean = 0,
sd = 1)),
mean = 0, sd = 1))
# latent data (sd = 1)
latent_data <- scale(latent_data)
# latent data
latent_data
}
ordinal_latent_helper <- function(x, thresholds){
# observations
n <- nrow(x)
# variables
p <- ncol(x)
# mean of thresholds
thresholds <- t(sapply(1:p, function(x) colMeans(thresholds[,,x])))
latent_data <- sapply(1:p, function(z) qnorm(runif(n, min = pnorm(thresholds[z, x[,z]],
mean = 0,
sd = 1),
max = pnorm(thresholds[z, x[,z]+1],
mean = 0,
sd = 1)),
mean = 0, sd = 1))
# latent data
latent_data <- scale(latent_data)
# latent data
latent_data
}
rank_helper <- function(Y){
# adapted from hoff (2008). See documentation.
p <- ncol(Y)
levels <- apply(Y, 2, function(x) {match(x, sort(unique(x)))})
K <- apply(levels, 2, max, na.rm = TRUE)
ranks <- apply(Y, 2, rank, ties.method = "max", na.last = "keep")
n_complete <- apply(!is.na(ranks), 2, sum)
U <- t(t(ranks)/(n_complete + 1))
Z <- qnorm(U)
S <- cov(Z)
list(K = K,
levels = levels,
Sigma_start = S,
z0_start = Z)
}
group_hyp_helper <- function(hypothesis, x){
hyp <- gsub(hyp_converter(convert_hyps(hypothesis = hypothesis, cbind(x)))$hyp_converted,
pattern = "_", replacement = "")
hyp
}
# convert hypothesis to words
convert_hyps <- function(hypothesis, Y){
p <- ncol(Y)
col_names <- numbers2words(1:p)
mat_name_num <- sapply(1:p, function(x) paste0(1:p, "--", x, sep = ""))
mat_name_n2w <- sapply(col_names, function(x) paste(col_names,x, sep = ""))
mat_name_actual <- sapply(colnames(Y), function(x) paste0(colnames(Y), "--", x, sep = ""))
n_off_diag <- p *( p - 1)*0.5
where <- lapply(1:n_off_diag, function(x) grep(mat_name_actual[upper.tri(diag(p))][x], hypothesis ))
where <- which(where == 1)
if(any(where)){
for (i in seq_along(where)) {
hypothesis <- gsub(mat_name_actual[upper.tri(diag(p))][where[i]],
replacement = mat_name_n2w[upper.tri(diag(p))][where[i]], x = hypothesis )
}
} else if (!any(where)){
where <- lapply(1:n_off_diag, function(x) grep(mat_name_num[upper.tri(diag(p))][x], hypothesis ))
where <- which(where == 1)
for (i in seq_along(where)) {
hypothesis <- gsub(mat_name_num[upper.tri(diag(p))][where[i]],
replacement = mat_name_n2w[upper.tri(diag(p))][where[i]], x = hypothesis )
}
} else {
stop("error in edge specification\n
Hints:\n
1) check that the first is smaller than the second (denoting the upper-triangular elements)-e.g., 1--2 and not 2--1\n
2) alternatively, the variable might not exist--e.g., p = 10 but 1--11 (11 > 10)")
}
hypothesis
}
prior_helper_2 <- function(p, delta , epsilon){
k <- p
nu <- 1/epsilon
parcorMat <- corMat <- ThetaMat <- SigmaMat <- array(0, dim=c(1e4 , k , k))
for(s in 1:dim(parcorMat)[1]){
Psi <- rWishart(1,df = nu, Sigma = 1/nu*diag(k))[,,1]
Sigma <- rWishart(1,df=k-1+delta, Sigma=solve(Psi))[,,1]
Theta <- solve(Sigma)
ThetaMat[s,,] <- Theta
SigmaMat[s,,] <- Sigma
parcorMat[s,,] <- -diag(1/sqrt(diag(Theta)))%*%Theta%*%diag(1/sqrt(diag(Theta)))
}
parcorMat
}
print_summary_explore <- function(x,...){
summary(x$dat_results, summarize = TRUE)
}
print_post_pred <- function(x,...){
if(length(x) != 2){
class(x) <- ""
x <- round(x, 3)
print(x)
} else {
cat("'summary = FALSE' not printed. See object contents")
}
}
print_map <- function(x,...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Method: Maximum A Posteriori \n")
cat("--- \n")
print(x$pcor)
}
print_fitted <- function(x,...){
if(length(x) == 2){
cat("'summary = FALSE' not printed. See object contents")
} else {
class(x) <- ""
x <- round(x, 3)
print(x)
}
}
print_predict <- function(x,...){
if(length(x) == 2){
cat("'summary = FALSE' not printed. See object contents")
} else {
class(x) <- ""
x <- round(x, 3)
print(x)
}
}
post_prob <- function(data){
p1 <- sum(data>0)/length(data)
p2 <- sum(data<0)/length(data)
p<- 1 - min(p1,p2)
return(p)
}
convert_colnames <- function(hyp, Y){
names_temp <- unlist(strsplit( strsplit(hyp, " ")[[1]], "--"))
names_temp <- paste(names_temp, collapse = " ")
names_temp <- unique(strsplit(gsub("[^[:alnum:] ]", "", names_temp), " +")[[1]])
if(any(names_temp == "0")){
names_temp <- names_temp[-which(names_temp == "0" )]
}
if(!all(names_temp %in% colnames(Y))){
stop("node names not found in the data")
}
for(i in 1:length(names_temp)){
id <- which(names_temp[[i]] == colnames(Y))
hyp <- gsub(x = hyp, pattern = names_temp[[i]], replacement = id)
}
hyp
}
compare_predict_helper <- function(x, ci_width){
post_mean <- mean(x)
post_sd <- stats::sd(x)
low <- (1 - ci_width) / 2
up <- 1 - low
interval <- t(stats::quantile(x, c(low, up)))
summ <- round(cbind.data.frame(post_mean = post_mean,
post_sd = post_sd,
interval), 3)
}
# delta give prior_sd
delta_solve = function(x){
(x^2)^-1 - 1
}
# fisher z to r
z2r <- function (z) {
(exp(2 * z) - 1)/(1 + exp(2 * z))
}
# lower triangle of matrix
get_lower_tri<-function(cormat){
cormat[upper.tri(cormat)] <- NA
return(cormat)
}
# analytic solution
analytic_solve <- function(X){
# sample size
n <- nrow(X)
# variables
p <- ncol(X)
# centererd mat
X <- scale(X, scale = T)
# scatter matrix
S <- t(X) %*% X
# degrees of freedom
df = p
# map estimate
inv_map <- solve(S + diag(10^-5, p)) * (n + df - p - 1)
# posterior variane
inv_var <- (n + df + 1) * (solve(S + diag(0.1^5, p) )^2 + tcrossprod(diag(solve(S + diag(0.1^5, p)))))
# inverse correlation matrix
inv_cor <- diag( 1 / sqrt((diag(inv_map)))) %*% inv_map %*% diag( 1 / sqrt((diag(inv_map))) )
pcor_mat <- -(inv_cor - diag(p))
list(inv_map = inv_map,
inv_var = inv_var,
pcor_mat = pcor_mat)
}
rope_helper <- function(x, rope){
mean(- rope < x & x < rope )
}
ci_helper <- function(x, ci_width){
low <- (1 - ci_width) / 2
up <- 1 - low
interval <- stats::quantile(x, c(low, up))
as.numeric(ifelse(interval[1] < 0 & interval[2] > 0, 0, 1))
}
Mo_risk_help_node <- function(x, post, n1, n2, p){
inv_mat <- post[,,x]
Y_rep1 <- mvnfast::rmvn(n = n1, mu = rep(0, p), sigma = stats::cov2cor(solve(inv_mat)))
Y_rep2 <- mvnfast::rmvn(n = n2, mu = rep(0, p), sigma = stats::cov2cor(solve(inv_mat)))
jsd_node <- unlist(lapply(1:ncol(Y_rep1), function(z) node_jsd_help(z, Y_rep1, Y_rep2)))
jsd_node
}
node_jsd_help <- function(x, Y_rep1, Y_rep2){
Y_rep1 <- scale(Y_rep1)
Y_rep2 <- scale(Y_rep2)
pred1 <- Y_rep1[,-x] %*% beta_helper(Y_rep1, x)
pred2 <- Y_rep2[,-x] %*% beta_helper(Y_rep2, x)
jsd_node <- (kl_func(stats::var(pred1), stats::var(pred2)) +
kl_func(stats::var(pred2), stats::var(pred1))) * .5
jsd_node
}
beta_helper <- function(x, which_one){
y <- x[,which_one]
X <- x[,-which_one]
fit <- lm(y ~ 0 + X)
coef(fit)
}
R2_helper <- function(ypred, y, ci_width) {
low <- (1 - ci_width) / 2
up <- 1 - low
e <- -1 * sweep(ypred, 2, y)
var_ypred <- apply(ypred, 1, stats::var)
var_e <- apply(e, 1, stats::var)
r2 <- unlist(var_ypred / (var_ypred + var_e))
ci <- quantile(r2, prob = c(low, up) )
mu_r2 <- mean(r2)
sd_r2 <- stats::sd(r2)
summary_r2 <- c(post_mean = mu_r2, post_sd = sd_r2, ci)
list(summary_r2 = summary_r2, R2 = r2)
}
MSE_helper <- function(ypred, y, ci_width){
low <- (1 - ci_width) / 2
up <- 1 - low
mse <- apply(ypred, MARGIN = 1, function(x){mean((x - y)^2)})
ci <- quantile(mse, prob = c(low, up) )
mu_mse <- mean(mse)
sd_mse <- stats::sd(mse)
summary_mse <- c(post_mean = mu_mse, post_sd = sd_mse, ci)
list(summary_mse = summary_mse, MSE = mse)
}
name_helper <- function(x){
x <- gsub("[A-z].*,", replacement = "", x)
col_names <- gsub("[]]", "", x)
col_names
}
error_helper <- function(ypred, y, ci_width, measure, sigmas = NULL) {
low <- (1 - ci_width) / 2
up <- 1 - low
all_residual <- sweep(ypred, 2, y)
if(measure == "mse"){
out <- rowMeans(all_residual^2)
}
if(measure == "mae"){
out <- rowMeans(abs(all_residual))
}
if(measure == "kl"){
out <- kl_func(stats::var(y), sigmas^2)
}
ci <- quantile(out, prob = c(low, up) )
mu_out <- mean(out)
sd_out <- stats::sd(out)
summary <- c(post_mean = mu_out, post_sd = sd_out, ci)
list(summary = summary, error = out)
}
kl_func <- function(sigma_1, sigma_2){
log(sqrt(sigma_2) / sqrt(sigma_1)) + (sigma_1 / (2 * sigma_2)) - .5
}
ppc_helper <- function(x, inv_g1, inv_cov, n, p){
inv_mat <- matrix(0, p , p)
inv_mat[,] <- as.numeric(inv_cov[x,])
y_rep <- mvnfast::rmvn(n, mu = rep(0, p),sigma = solve(inv_mat))
S_rep <- t(y_rep) %*% y_rep
theta_rep <- (n - 1) * solve(S_rep)
KLD <- KL(Theta = inv_g1, hatTheta = theta_rep)
JSD <- 0.5 * KL(Theta = inv_g1, hatTheta = theta_rep) + 0.5 * KL(hatTheta = theta_rep, Theta = inv_g1)
QL <- QL(Theta = inv_g1, hatTheta = theta_rep)
FL <- sum((stats::cov2cor(inv_g1) *-1 - stats::cov2cor((theta_rep) * -1)^2))
return <- list(KLD = KLD, JSD = JSD, QL = QL, FL = FL)
}
contrast_helper <- function(x){
temp <- unlist(regmatches(x, gregexpr("[[:digit:]]+", x)))
paste("Y", temp, sep = "_g", collapse = "_vs_")
}
KL = function(Theta,hatTheta){
# Kuismin, M., & Sillanpaa, M. J. (2016). Use of Wishart prior and simple extensions for
# sparse precision matrix estimation. PloS one, 11(2), e0148171.
p = ncol(Theta)
invTheta = solve(Theta,diag(1,p))
kl = 0.5 * (sum(diag(invTheta%*%hatTheta)) - log(det(invTheta%*%hatTheta)) - p)
return(kl)
}
QL = function(Theta,hatTheta){
# Kuismin, M., & Sillanpaa, M. J. (2016). Use of Wishart prior
# and simple extensions for
# sparse precision matrix estimation. PloS one, 11(2), e0148171.
p = ncol(Theta)
I = diag(1,p)
invTheta = solve(Theta,I)
osa = sum(diag(invTheta%*%hatTheta - I))
tulos = osa^2
return(tulos)
}
unbiased_cov <- function(x){
x <- scale(x)
n <- nrow(x) - 1
mle_cov <- n^-1 * t(x) %*% x
stats::cov2cor(solve(mle_cov))
}
Mo_risk_help <- function(x, post, n1, n2, p){
inv_mat <- post[,,x]
Y_rep1 <- mvnfast::rmvn(n = n1, mu = rep(0, p), sigma = stats::cov2cor(solve(inv_mat)))
Y_rep2 <- mvnfast::rmvn(n = n2, mu = rep(0, p), sigma = stats::cov2cor(solve(inv_mat)))
jsd <- 0.5 * KL(unbiased_cov(Y_rep1), unbiased_cov(Y_rep2)) +
0.5 * KL(unbiased_cov(Y_rep2), unbiased_cov(Y_rep1))
jsd
}
Y_combine <- function(...){
dat <- list(...)
dat <- lapply(1:length(dat), function(x) na.omit(dat[[x]]))
dat_info <- lapply(1:length(dat), function(x) {
p <- ncol(dat[[x]])
n <- nrow(dat[[x]])
data.frame(p = p, n = n)
})
list(dat = dat, dat_info = do.call(rbind, dat_info),
pairwise = t(combn(1:length(dat), 2)))
}
approx_sd <- function(r, n, k){
sqrt((1-r^2)/(n - k - 2))
}
positive_helper <- function(pcor, post_sd, BF_null){
dens_greater <- (1 - pnorm(0, pcor, post_sd)) * 2
BF_null * dens_greater
}
negative_helper <- function(pcor, post_sd, BF_null){
dens_less <- pnorm(0, pcor, post_sd) * 2
BF_null * dens_less
}
exhaustive_helper <- function(BF_null, BF_positive, BF_negative){
c(BF_null, BF_positive, BF_negative) / sum(BF_null, BF_positive, BF_negative)
}
symmetric_mat <- function(x){
x[lower.tri(x)] <- t(x)[lower.tri(x)]
x
}
colnames_helper <- function(x, col_names){
colnames(x) <- col_names
}
sampling_helper = function(X, nu, delta, n_samples){
X <- as.matrix(X)
# number of variables
p <- ncol(X)
# number of observations
n <- nrow(X)
# number of partial correlations
pcors <- (p * (p - 1)) / 2
# names for the partial correlations
col_names <- numbers2words(1:p)
mat_name <- matrix(unlist(lapply(col_names, function(x) paste(col_names,x, sep = ""))), p , p)
mat_name_up <- mat_name[upper.tri(mat_name)]
mat_name_low <- mat_name[lower.tri(mat_name)]
# center the data
Xhat <- X - rep(1,n)%*%t(apply(X,2,mean))
# scatter matrix
S <- t(Xhat)%*%Xhat
# storage
pcor_store_up <- pcor_store_low <- prior_store_up <- prior_store_low <- matrix(NA, nrow = n_samples, ncol = pcors)
inv_cov_store <- array(NA, c(p, p, n_samples))
# initial values
Psi <- b_inv <- sigma_inv <- diag(p)
for(i in 1:n_samples){
# draw from posterior
post <- post_helper(S = S, n = n,
nu = nu, p = p,
delta = delta,
Psi = Psi,
sigma_inv = sigma_inv)
# store partials
pcor_store_up[i,] <- post$pcors_post_up
pcor_store_low[i,] <- post$pcors_post_low
# store the inverse
inv_cov_store[,,i] <- post$sigma_inv
# draw from prior and store
prior_samps <- prior_helper(delta = delta, p = p)
prior_store_up[i,] <- prior_samps$pcors_prior_up
prior_store_low[i, ] <- prior_samps$pcors_prior_up
# Psi
Psi <- post$Psi
sigma_inv <- post$sigma_inv
}
# transform posterior samples
fisher_z_post_up <- apply(pcor_store_up, 2, fisher_z)
fisher_z_post_low <- apply(pcor_store_low, 2, fisher_z)
fisher_z_prior_up <- apply(prior_store_up, 2, fisher_z)
fisher_z_prior_low <- apply(prior_store_low, 2, fisher_z)
colnames(fisher_z_prior_up) <- mat_name_up
colnames(fisher_z_post_up) <- mat_name_up
colnames(pcor_store_up) <- mat_name_up
colnames(fisher_z_prior_low) <- mat_name_low
colnames(fisher_z_post_low) <- mat_name_low
colnames(pcor_store_low) <- mat_name_low
# returned list
list(fisher_z_post = cbind(fisher_z_post_up, fisher_z_post_low),
pcor_post = cbind(pcor_store_up, pcor_store_low),
inv_cov_post = inv_cov_store,
pcor_prior = cbind(prior_store_up, prior_store_low),
fisher_z_prior =cbind(fisher_z_prior_up, fisher_z_prior_low))
}
prior_helper <- function(p, delta){
I_p <- diag(p)
nu <- 1/0.001
Psi <- rWishart(1, df = nu, I_p * 0.001)[,,1]
sigma_inv_prior <- solve(rWishart(1, df = p - 1 + delta, solve(Psi))[,,1])
pcor_mat_prior <- - diag(1/sqrt(diag(sigma_inv_prior)))%*%sigma_inv_prior%*%diag(1/sqrt(diag(sigma_inv_prior)))
pcors_prior_up <- pcor_mat_prior[upper.tri(pcor_mat_prior)]
pcors_prior_low <- pcor_mat_prior[lower.tri(pcor_mat_prior)]
list(pcors_prior_up = pcors_prior_up,
pcors_prior_low = pcors_prior_low)
}
post_helper <- function(S, n, nu, p, delta, Psi, b_inv, sigma_inv){
nu = 1 / 0.001
delta = delta
B <- diag(p) * 0.001
nuMP <- delta + delta - 1
deltaMP <- nu - p + 1
BMP <- solve(B)
BMPinv <- solve(BMP)
# Psi
Psi <- rWishart(1, nuMP + deltaMP + p - 1, solve(sigma_inv + BMPinv, tol = 1e-20))[,,1]
# precision matrix
sigma_inv <- rWishart(1, (deltaMP + p - 1) + (n - 1), solve(Psi+S, tol = 1e-20))[,,1]
# partial correlation matrix
pcor_mat <- - diag(1/sqrt(diag(sigma_inv)))%*%sigma_inv%*%diag(1/sqrt(diag(sigma_inv)))
pcors_post_up = pcor_mat[upper.tri(pcor_mat)]
pcors_post_low = pcor_mat[lower.tri(pcor_mat)]
# returned list
list(pcors_post_up = pcors_post_up,
pcors_post_low = pcors_post_low, sigma_inv = sigma_inv, Psi = Psi)
}
fisher_z <- function(rho){
.5 * log(( 1 + rho )/ ( 1 - rho ))
}
sd_helper <- function(post_samples, prior_at_zero){
prior_at_zero / dnorm(0, mean(post_samples), stats::sd(post_samples))
}
pcor_name_helper <- function(x){
keep_vars <- unlist(strsplit(gsub("[^[:alnum:] ]", "", x), " +"))
keep_vars
}
framer <- function(x){
pos_comparisons <- unlist(gregexpr("[<>=]", x))
leftside <- rep(NA, length(pos_comparisons) + 1)
rightside <- rep(NA, length(pos_comparisons) + 1)
pos1 <- c(-1, pos_comparisons)
pos2 <- c(pos_comparisons, nchar(x) + 1)
for(i in seq_along(pos1)){
leftside[i] <- substring(x, pos1[i] + 1, pos1[i+1] - 1)
rightside[i] <- substring(x, pos2[i] + 1, pos2[i+1] - 1)
}
leftside <- leftside[-length(leftside)]
rightside <- rightside[-length(rightside)]
comparisons <- substring(x, pos_comparisons, pos_comparisons)
data.frame(left = leftside,
comp = comparisons,
right = rightside,
stringsAsFactors = FALSE)
}
create_matrices <- function(framed, varnames) {
k <- length(varnames)
if (any(grepl(",", framed$left)) ||
any(grepl(",", framed$right))) {
if (nrow(framed) > 1) {
for (r in 1:(nrow(framed) - 1)) {
if (all.equal(framed$right[r], framed$left[r + 1])) {
if (substring(framed$right[r], 1, 1) == "(") {
framed$right[r] <- sub("),.+", ")", framed$right[r])
framed$left[r + 1] <- sub(".+),", "", framed$left[r + 1])
} else{
framed$right[r] <- sub(",.+", "", framed$right[r])
framed$left[r + 1] <-
sub("[^,]+,", "", framed$left[r + 1])
}
}
}
}
commas_left <- framed$left[grep(",", framed$left)]
commas_right <- framed$right[grep(",", framed$right)]
if (isTRUE(any(!grepl("\\(.+)", commas_left))) ||
isTRUE(any(!grepl("\\(.+)", commas_right))) ||
isTRUE(any(grepl(").+", commas_left))) ||
isTRUE(any(grepl(").+", commas_right))) ||
isTRUE(any(grepl(".+\\(", commas_left))) ||
isTRUE(any(grepl(".+\\(", commas_right)))) {
stop("Incorrect hypothesis syntax or extra character, check specification")
}
framed$left <- gsub("[()]", "", framed$left)
framed$right <- gsub("[()]", "", framed$right)
commas <-
unique(c(grep(",", framed$left), grep(",", framed$right)))
if (length(commas) > 0) {
multiples <- vector("list", length = length(commas))
for (r in seq_along(commas)) {
several <- framed[commas, ][r,]
if (several$comp == "=") {
several <- c(several$left, several$right)
separate <- unlist(strsplit(several, split = ","))
if (any(grepl("^$", several)))
stop("Misplaced comma in hypothesis")
converted_equality <- paste(separate, collapse = "=")
multiples[[r]] <- framer(converted_equality)
} else{
leftvars <- unlist(strsplit(several$left, split = ","))
rightvars <- unlist(strsplit(several$right, split = ","))
if (any(grepl("^$", leftvars)) ||
any(grepl("^$", rightvars)))
stop("Misplaced comma in hypothesis")
left <-
rep(leftvars, length.out = length(rightvars) * length(leftvars))
right <- rep(rightvars, each = length(leftvars))
comp <- rep(several$comp, length(left))
multiples[[r]] <-
data.frame(
left = left,
comp = comp,
right = right,
stringsAsFactors = FALSE
)
}
}
framed <- framed[-commas, ]
multiples <- do.call(rbind, multiples)
framed <- rbind(multiples, framed)
}
}
equality <- framed[framed$comp == "=", ]
inequality <- framed[!framed$comp == "=", ]
#****Equality part string-to-matrix
if (nrow(equality) == 0) {
R_e <- r_e <- NULL
} else{
outcomes <- suppressWarnings(apply(equality[,-2], 2, as.numeric))
outcomes <- matrix(outcomes, ncol = 2, byrow = FALSE)
if (any(rowSums(is.na(outcomes)) == 0))
stop("Value compared with value rather than variable, e.g., '2 = 2', check hypotheses")
rows <- which(rowSums(is.na(outcomes)) < 2)
specified <- t(outcomes[rows, ])
specified <- specified[!is.na(specified)]
r_e <- ifelse(rowSums(is.na(outcomes)) == 2, 0, specified)
r_e <- matrix(r_e)
var_locations <-
apply(equality[,-2], 2, function(x)
ifelse(x %in% varnames, match(x, varnames), 0))
var_locations <- matrix(var_locations, ncol = 2)
R_e <-
matrix(rep(0, nrow(equality) * length(varnames)), ncol = length(varnames))
for (i in seq_along(r_e)) {
if (!all(var_locations[i,] > 0)) {
R_e[i, var_locations[i, ]] <- 1
} else{
R_e[i, var_locations[i, ]] <- c(1,-1)
}
}
}
#****Inequality part string-to-matrix
if (nrow(inequality) == 0) {
R_i <- r_i <- NULL
} else{
outcomes <- suppressWarnings(apply(inequality[,-2], 2, as.numeric))
outcomes <- matrix(outcomes, ncol = 2, byrow = FALSE)
if (any(rowSums(is.na(outcomes)) == 0))
stop("Value compared with value rather than variable, e.g., '2 > 2', check hypotheses")
cols <- which(rowSums(is.na(outcomes)) < 2)
specified <- t(outcomes[cols, ])
specified <- specified[!is.na(specified)]
r_i <- ifelse(rowSums(is.na(outcomes)) == 2, 0, specified)
r_i <- matrix(r_i)
leq <- which(inequality$comp == "<")
var_locations <-
apply(inequality[,-2], 2, function(x)
ifelse(x %in% varnames, match(x, varnames), 0))
var_locations <- matrix(var_locations, ncol = 2)
R_i <-
matrix(rep(0, nrow(inequality) * length(varnames)), ncol = length(varnames))
for (i in seq_along(r_i)) {
if (!all(var_locations[i,] > 0)) {
if (var_locations[i, 1] == 0) {
if (i %in% leq) {
value <- 1
} else{
r_i[i] <- r_i[i] * -1
value <- -1
}
} else{
if (i %in% leq) {
r_i[i] <- r_i[i] * -1
value <- -1
} else{
value <- 1
}
}
R_i[i, var_locations[i, ]] <- value
} else{
value <- if (i %in% leq)
c(-1, 1)
else
c(1,-1)
R_i[i, var_locations[i, ]] <- value
}
}
}
#3)check comparisons----------------
if (is.null(R_i)) {
comparisons <- "only equality"
} else if (is.null(R_e)) {
comparisons <- "only inequality"
} else{
comparisons <- "both comparisons"
}
#set prior mean
R_ei <- rbind(R_e, R_i)
r_ei <- rbind(r_e, r_i)
Rr_ei <- cbind(R_ei, r_ei)
# beta_zero <- MASS::ginv(R_ei) %*% r_ei
if (nrow(Rr_ei) > 1) {
# rref_ei <- pracma::rref(Rr_ei)
nonzero <- rref_ei[, k + 1] != 0
if (max(nonzero) > 0) {
row1 <- max(which(nonzero == T))
if (sum(abs(rref_ei[row1, 1:k])) == 0) {
stop("Default prior mean cannot be constructed from constraints.")
}
}
}
# beta_zero = beta_zero,
list(
R_i = R_i,
r_i = r_i,
R_e = R_e,
r_e = r_e,
R_ei = R_ei,
Rr_ei = Rr_ei,
r_ei = r_ei,
comparisons = comparisons
)
}
word2num <- function(word){
wsplit <- strsplit(tolower(word)," ")[[1]]
one_digits <- list(zero=0, one=1, two=2, three=3, four=4, five=5,
six=6, seven=7, eight=8, nine=9)
teens <- list(eleven=11, twelve=12, thirteen=13, fourteen=14, fifteen=15,
sixteen=16, seventeen=17, eighteen=18, nineteen=19)
ten_digits <- list(ten=10, twenty=20, thirty=30, forty=40, fifty=50,
sixty=60, seventy=70, eighty=80, ninety=90)
doubles <- c(teens,ten_digits)
out <- 0
i <- 1
while(i <= length(wsplit)){
j <- 1
if(i==1 && wsplit[i]=="hundred")
temp <- 100
else if(i==1 && wsplit[i]=="thousand")
temp <- 1000
else if(wsplit[i] %in% names(one_digits))
temp <- as.numeric(one_digits[wsplit[i]])
else if(wsplit[i] %in% names(teens))
temp <- as.numeric(teens[wsplit[i]])
else if(wsplit[i] %in% names(ten_digits))
temp <- (as.numeric(ten_digits[wsplit[i]]))
if(i < length(wsplit) && wsplit[i+1]=="hundred"){
if(i>1 && wsplit[i-1] %in% c("hundred","thousand"))
out <- out + 100*temp
else
out <- 100*(out + temp)
j <- 2
}
else if(i < length(wsplit) && wsplit[i+1]=="thousand"){
if(i>1 && wsplit[i-1] %in% c("hundred","thousand"))
out <- out + 1000*temp
else
out <- 1000*(out + temp)
j <- 2
}
else if(i < length(wsplit) && wsplit[i+1] %in% names(doubles)){
temp <- temp*100
out <- out + temp
}
else{
out <- out + temp
}
i <- i + j
}
return(list(word,out))
}
numbers2words <- function(x){
## Function by John Fox found here:
## http://tolstoy.newcastle.edu.au/R/help/05/04/2715.html
## Tweaks by AJH to add commas and "and"
helper <- function(x){
digits <- rev(strsplit(as.character(x), "")[[1]])
nDigits <- length(digits)
if (nDigits == 1) as.vector(ones[digits])
else if (nDigits == 2)
if (x <= 19) as.vector(teens[digits[1]])
else trim(paste(tens[digits[2]],
Recall(as.numeric(digits[1]))))
else if (nDigits == 3) trim(paste(ones[digits[3]], "hundred and",
Recall(makeNumber(digits[2:1]))))
else {
nSuffix <- ((nDigits + 2) %/% 3) - 1
if (nSuffix > length(suffixes)) stop(paste(x, "is too large!"))
trim(paste(Recall(makeNumber(digits[
nDigits:(3*nSuffix + 1)])),
suffixes[nSuffix],"," ,
Recall(makeNumber(digits[(3*nSuffix):1]))))
}
}
trim <- function(text){
#Tidy leading/trailing whitespace, space before comma
text=gsub("^\ ", "", gsub("\ *$", "", gsub("\ ,",",",text)))
#Clear any trailing " and"
text=gsub(" and$","",text)
#Clear any trailing comma
gsub("\ *,$","",text)
}
makeNumber <- function(...) as.numeric(paste(..., collapse=""))
#Disable scientific notation
opts <- options(scipen=100)
on.exit(options(opts))
ones <- c("", "one", "two", "three", "four", "five", "six", "seven",
"eight", "nine")
names(ones) <- 0:9
teens <- c("ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
"sixteen", " seventeen", "eighteen", "nineteen")
names(teens) <- 0:9
tens <- c("twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty",
"ninety")
names(tens) <- 2:9
x <- round(x)
suffixes <- c("thousand", "million", "billion", "trillion")
if (length(x) > 1) return(trim(sapply(x, helper)))
helper(x)
}
# make names for inverse
samps_inv_helper <- function(x, p){
inv <- paste("cov_inv", paste(paste("[", paste( 1:p, x, sep = ","),
sep = ""), "]", sep = ""), sep = "")
inv
}
# make names for partials
samps_pcor_helper <- function(x, p){
pcors <- paste("pcors", paste(paste("[", paste( 1:p, x, sep = ","),
sep = ""), "]", sep = ""), sep = "")
pcors
}
performance <- function(Estimate, True){
True <- as.matrix(True)
Estimate <- as.matrix(Estimate)
# True Negative
TN <- ifelse(True[upper.tri(True)] == 0 & Estimate[upper.tri(Estimate)] == 0, 1, 0); TN <- sum(TN)
# False Positive
FP <- ifelse(True[upper.tri(True)] == 0 & Estimate[upper.tri(Estimate)] != 0, 1, 0); FP <- sum(FP)
# True Positive
TP <- ifelse(True[upper.tri(True)] != 0 & Estimate[upper.tri(Estimate)] != 0, 1, 0); TP <- sum(TP)
# False Negatives
FN <- ifelse(True[upper.tri(True)] != 0 & Estimate[upper.tri(Estimate)] == 0, 1, 0); FN <- sum(FN)
Specificity <- TN/(TN + FP)
Sensitivity <- TP/(TP + FN)
Precision <- TP/(TP + FP)
Recall <- TP / (TP + FN)
F1_score <- 2 * ((Precision * Recall) / (Precision + Recall))
MCC <- (TP*TN - FP*FN)/sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))
results <- c(Specificity,
Sensitivity,
Precision,
Recall,
F1_score,
MCC)
results_name <- c("Specificity",
"Sensitivity",
"Precision",
"Recall",
"F1_score",
"MCC")
results <- cbind.data.frame(measure = results_name, score = results)
list(results = results)
}
################
# taken and modified from the package orddata
# under the GPL-2 licence
rmvord_naiv <- function(n, probs, Cors, empirical) {
q = length(probs)
categ_probs = 0
cumul_probs = list(0)
quant_probs = list(0)
means = 0
vars = 0
var.wt = function(x, w) {
m = weighted.mean(x = x, w = w)
sum((x[1:length(x)] - m)^2 * w[1:length(x)])
}
for (i in 1:q) {
categ_probs[i] = length(probs[[i]])
cumul_probs[[i]] = cumsum(1:categ_probs[i]/10^12 + probs[[i]])
cumul_probs[[i]][categ_probs[i]] = 1
quant_probs[[i]] = qnorm(p = cumul_probs[[i]], mean = 0,
sd = 1)
}
retval = MASS::mvrnorm(n = n, mu = rep(0,q),
Sigma = Cors,
empirical = empirical)
for (i in 1:q) {
retval[, i] = cut(x = retval[, i], breaks = c(-1/0, quant_probs[[i]]),
right = FALSE)
}
retval
}
csws_labels <- ifelse(1:35 %in% c(7,10,16,24,29),
"Family Support",
ifelse(1:35 %in% c(3,12,20,25,35),
"Competition",
ifelse(1:35 %in% c(1,4,17,21,30),
"Appearence",
ifelse(1:35%in%c(2,8,18,26,31),
"God's Love",
ifelse(1:35 %in% c(13, 19, 22, 27, 33),
"Academic Competence",
ifelse(1:35 %in% c(5, 11, 14, 28, 34),
"Virtue", "Approval From Others"))))))
tas_labels <- ifelse(1:20 %in% c(1,3,6,7,9,13,14),
"Difficulty\nIdentifying Feelings",
ifelse(1:20 %in% c(2,4,11,12,17),
"Difficulty\nDescribing Feelings",
"Externally\nOriented Feelings"))
iri_labels <- ifelse(1:28 %in% c(3, 8, 11, 15, 21, 25, 28),
"Perspective Taking",
ifelse(1:28 %in% c(2, 4, 9, 14, 18, 20, 22),
"Empathic Concern",
ifelse(1:28 %in% c(1, 5, 7, 12, 16, 23, 26), "Fantasy",
"Personal Distress")))
rsa_labels <- ifelse(1:33 %in% c(1, 4, 5, 32),
"Planned Future",
ifelse(1:33 %in% c(2, 11, 17, 25, 31, 33),
"Perception of Self",
ifelse(1:33 %in% c(3, 7, 13, 16, 24, 29),
"Family Cohesion",
ifelse(1:33 %in% c(6, 9, 10, 12, 15, 19, 27),
"Social Resources",
ifelse(1:33 %in% c(8, 14, 18, 21, 22, 26),
"Social Competence", "Structured Style")))))
globalVariables(c('Y1','Y2',
'X1', 'X2',
'contrast',
'..quantile..',
'value',
'node',
'BF',
'Edge',
'Estimate',
'selected',
'probability',
'cred.lb',
'sig',
'hyp',
'label',
'color',
'fit',
'post_mean',
'Error',
'density', 'Node',
'Post.mean',
'L1', 'lag', 'acf',
'iteration',
'.imp',
'estimate',
'rref_ei', 'explore',
'print_coef',
'print_confirm',
'print_estimate',
'print_explore',
'print_ggm_compare',
'print_ggm_compare_bf',
'print_ggm_compare_ppc',
'print_ggm_confirm',
'print_roll_your_own',
'print_select_explore',
'print_select_ggm_compare_estimate',
'print_summary_coef',
'print_summary_estimate',
'print_summary_ggm_compare_bf',
'print_summary_ggm_estimate_compare',
'print_summary_metric',
'print_summary_select_explore',
'..', 'ppc', 'rope', 'y',
'Relation', 'Pr.H1',
"Y_missing",
"na_indices",
'Y'))
gen_pcors <-
function (p = 20,
edge_prob = 0.3,
lb = 0.05,
ub = 0.3) {
d <- -1
trys <- 0
while (d < 0) {
trys <- trys + 1
effects <- p * (p - 1) * 0.5
mat <- matrix(1, p, p)
prob_zero <- 1 - edge_prob
pool <- c(rep(0, effects * prob_zero),
runif(effects *
edge_prob, lb, ub))
if (length(pool) != effects) {
pool <- c(0, pool)
}
mat[upper.tri(mat)] <- sample(pool, size = effects)
pcs <- symmetric_mat(mat)
pcs <- -pcs
diag(pcs) <- -diag(pcs)
d <- det(pcs)
}
cors <- cov2cor(solve(pcs))
inv <- solve(cors)
pcors <- cov2cor(inv) * -1
diag(pcors) <- 1
adj <- ifelse(pcs == 0, 0, 1)
returned_object <- list(
pcors = pcors,
cors = cors,
trys = trys,
pcs = pcs,
adj = adj
)
returned_object
}
symm_mat <- function (x) {
x[lower.tri(x)] <- t(x)[lower.tri(x)]
x
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/helpers.R
|
#' Maximum A Posteriori Precision Matrix
#'
#'
#' @param Y Matrix (or data frame) of dimensions \emph{n} (observations) by \emph{p} (variables).
#'
#' @return An object of class \code{map}, including the precision matrix,
#' partial correlation matrix, and regression parameters.
#'
#' @export
#'
#' @examples
#' Y <- BGGM::bfi[, 1:5]
#'
#' # map
#' map <- map(Y)
#' map
map <- function(Y){
Y <- na.omit(Y)
p <- ncol(Y)
fit <- analytic_solve(Y)
map <- fit$inv_map
pcor <- fit$pcor_mat
betas <- lapply(1:p, function(z) -1 * (map[z,-z] / map[z,z]) )
betas <- do.call(rbind, betas)
returned_object <- list(precision = round(map, 3),
pcor = round(pcor, 3),
betas = betas,
dat = Y)
class(returned_object) <- c("BGGM",
"estimate",
"map")
return(returned_object)
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/map.R
|
#' @title Obtain Imputed Datasets
#'
#' @description Impute missing values, assuming a multivariate normal distribution, with the posterior
#' predictive distribution. For binary, ordinal, and mixed (a combination of discrete and continuous)
#' data, the values are first imputed for the latent data and then converted to the original scale.
#'
#' @param Y Matrix (or data frame) of dimensions \emph{n} (observations) by \emph{p} (variables).
#'
#' @param type Character string. Which type of data for \code{Y} ? The options include \code{continuous},
#' \code{binary}, \code{ordinal}, or \code{mixed}. Note that mixed can be used for data with only
#' ordinal variables. See the note for further details.
#'
#' @param lambda Numeric. A regularization parameter, which defaults to p + 2. A larger value results
#' in more shrinkage.
#'
#' @param mixed_type Numeric vector. An indicator of length \emph{p} for which variables should be treated as ranks.
#' (1 for rank and 0 to assume the observed marginal distribution).
#' The default is currently to treat all integer variables as ranks when
#' \code{type = "mixed"} and \code{NULL} otherwise. See note for further details.
#'
#' @param iter Number of iterations (posterior samples; defaults to 1000).
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#'
#' @references
#' \insertAllCited{}
#'
#' @return An object of class \code{mvn_imputation}:
#'
#'\itemize{
#'
#'\item \code{imputed_datasets} An array including the imputed datasets.
#'
#'}
#'
#' @details
#' Missing values are imputed with the approach described in \insertCite{hoff2009first;textual}{BGGM}.
#' The basic idea is to impute the missing values with the respective posterior pedictive distribution,
#' given the observed data, as the model is being estimated. Note that the default is \code{TRUE},
#' but this ignored when there are no missing values. If set to \code{FALSE}, and there are missing
#' values, list-wise deletion is performed with \code{na.omit}.
#'
#' @examples
#' \donttest{
#' # obs
#' n <- 5000
#'
#' # n missing
#' n_missing <- 1000
#'
#' # variables
#' p <- 16
#'
#' # data
#' Y <- MASS::mvrnorm(n, rep(0, p), ptsd_cor1)
#'
#' # for checking
#' Ymain <- Y
#'
#' # all possible indices
#' indices <- which(matrix(0, n, p) == 0,
#' arr.ind = TRUE)
#'
#' # random sample of 1000 missing values
#' na_indices <- indices[sample(5:nrow(indices),
#' size = n_missing,
#' replace = FALSE),]
#'
#' # fill with NA
#' Y[na_indices] <- NA
#'
#' # missing = 1
#' Y_miss <- ifelse(is.na(Y), 1, 0)
#'
#' # true values (to check)
#' true <- unlist(sapply(1:p, function(x)
#' Ymain[which(Y_miss[,x] == 1),x] ))
#'
#' # impute
#' fit_missing <- impute_data(Y, progress = FALSE, iter = 250)
#'
#' # impute
#' fit_missing <- impute_data(Y,
#' progress = TRUE,
#' iter = 250)
#'
#' }
#' @export
impute_data <- function(Y,
type = "continuous",
lambda = NULL,
mixed_type = NULL,
iter = 1000,
progress = TRUE){
if(!type %in% c("continuous", "mixed")){
stop(paste0("currently only 'continuous' and 'mixed' data are supported."))
}
p <- ncol(Y)
if(is.null(lambda)){
lambda <- p + 2
}
if(is.null(mixed_type)){
idx <- rep(1, p)
} else {
idx <- mixed_type
}
Y_miss <- ifelse(is.na(Y), 1, 0)
if(sum(Y_miss) == 0){
stop("no missing values detected")
}
missing_location <- unlist(sapply(1:p, function(x) paste0(which(Y_miss[,x] ==1), "--", x)))
if(isTRUE(progress)){
message(paste0("BGGM: Imputing"))
}
if(type == "continuous"){
# impute means
for(i in 1:p){
Y[which(is.na(Y[,i])) ,i] <- mean(na.omit(Y[,i]))
}
fit <-.Call(
"_BGGM_missing_gaussian",
Y = as.matrix(Y),
Y_miss = as.matrix(Y_miss),
Sigma = cov(Y),
iter_missing = iter,
progress = progress,
store_all = TRUE,
lambda = lambda
)
names(fit) <- "imputed_datasets"
} else if(type == "mixed"){
rank_help <- rank_helper(Y)
rank_help$levels[na_indices] <- NA
rank_help$z0_start[is.na(rank_help$z0_start)] <- rnorm(sum(Y_missing))
fit <- .Call(
"_BGGM_missing_copula_data",
Y = Y,
Y_missing = Y_miss,
z0_start = rank_help$z0_start,
Sigma_start = cov(rank_help$z0_start),
levels = rank_help$levels,
iter_missing = iter,
progress_impute = TRUE,
K = rank_help$K,
idx = idx,
lambda = lambda
)
names(fit) <- "imputed_datasets"
}
if(isTRUE(progress)){
message("BGGM: Finished")
}
returned_object <- fit
class(returned_object) <- c("BGGM", "mvn_imputation")
returned_object
}
print_mvn_impute <- function(x, ...) {
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Multivariate Normal Imputation\n")
cat("--- \n")
cat(date())
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/mvn_imputation.R
|
#' @title Compute Correlations from the Partial Correlations
#'
#' @description Convert the partial correlation matrices into correlation matrices. To our knowledge,
#' this is the only Bayesian
#' implementation in \code{R} that can estiamte Pearson's, tetrachoric (binary), polychoric
#' (ordinal with more than two cateogries), and rank based correlation coefficients.
#'
#'
#' @name pcor_to_cor
#
#' @param object An object of class \code{estimate} or \code{explore}
#'
#' @param iter numeric. How many iterations (i.e., posterior samples) should be used ?
#' The default uses all of the samples, but note that this can take a long
#' time with large matrices.
#'
#' @return
#'
#' \itemize{
#'
#' \item \code{R} An array including the correlation matrices
#' (of dimensions \emph{p} by \emph{p} by \emph{iter})
#'
#' \item \code{R_mean} Posterior mean of the correlations (of dimensions \emph{p} by \emph{p})
#' }
#'
#' @note
#' The 'default' prior distributions are specified for partial correlations in particular. This
#' means that the implied prior distribution will not be the same for the correlations.
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' # data
#' Y <- BGGM::ptsd
#'
#' #########################
#' ###### continuous #######
#' #########################
#'
#' # estimate the model
#' fit <- estimate(Y, iter = 250,
#' progress = FALSE)
#'
#' # compute correlations
#' cors <- pcor_to_cor(fit)
#'
#'
#' #########################
#' ###### ordinal #########
#' #########################
#'
#' # first level must be 1 !
#' Y <- Y + 1
#'
#' # estimate the model
#' fit <- estimate(Y, type = "ordinal",
#' iter = 250,
#' progress = FALSE)
#'
#' # compute correlations
#' cors <- pcor_to_cor(fit)
#'
#'
#' #########################
#' ####### mixed ######
#' #########################
#'
#' # rank based correlations
#'
#' # estimate the model
#' fit <- estimate(Y, type = "mixed",
#' iter = 250,
#' progress = FALSE)
#'
#' # compute correlations
#' cors <- pcor_to_cor(fit)
#'}
#'
#' @export
pcor_to_cor <- function(object, iter = NULL){
if(!is(object, "default")){
stop("class not supported. Must but an 'estimate' or 'explore' object.")
}
post_samps <- -object$post_samp$pcors
dims <- dim(post_samps)
if(!is.null(iter)){
if((dims[3] - 50) < iter){
warning("Iterations do not exist (too large). Using all iterations in the object.")
iter <- dims[3] - 50
}
} else {
iter <- dims[3] - 50
}
p <- dims[1]
object <- post_samps[, , -c(1:50)]
# call c ++ for speed
returned_object <- .Call(
"_BGGM_pcor_to_cor_internal",
PACKAGE = "BGGM",
x = object,
p = p
)
returned_object
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/pcor_2_cor.BGGM.R
|
#' @title Extract the Partial Correlation Matrix
#'
#' @description Extract the partial correlation matrix (posterior mean)
#' from \code{\link{estimate}}, \code{\link{explore}}, \code{\link{ggm_compare_estimate}},
#' and \code{\link{ggm_compare_explore}} objects. It is also possible to extract the
#' partial correlation differences for \code{\link{ggm_compare_estimate}} and
#' \code{\link{ggm_compare_explore}} objects.
#'
#' @param object A model estimated with \strong{BGGM}. All classes are supported, assuming
#' there is matrix to be extracted.
#'
#' @param difference Logical. Should the difference be returned (defaults to \code{FALSE}) ? Note
#' that this assumes there is a difference (e.g., an object of class \code{ggm_compare_estimate})
#' and ignored otherwise.
#'
#' @param ... Currently ignored.
#'
#' @return The estimated partial correlation matrix.
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' # data
#' Y <- ptsd[,1:5] + 1
#'
#' # ordinal
#' fit <- estimate(Y, type = "ordinal",
#' iter = 250,
#' progress = FALSE)
#'
#' pcor_mat(fit)
#' }
#' @export
pcor_mat <- function(object, difference = FALSE, ...){
# estimate default
if(all(c("estimate", "default") %in% class(object))){
cn <- colnames(object$Y)
if(is.null(cn)) {
cn <- 1:object$p
}
colnames(object$pcor_mat) <- cn
rownames(object$pcor_mat) <- cn
pcor_mat <- round(object$pcor_mat, 3)
# return
pcor_mat
# explore default
} else if(all(c("explore", "default") %in% class(object))){
cn <- colnames(object$Y)
if(is.null(cn)) {
cn <- 1:object$p
}
colnames(object$pcor_mat) <- cn
rownames(object$pcor_mat) <- cn
pcor_mat <- round(object$pcor_mat, 3)
pcor_mat
# ggm compare estimate
} else if(is(object, "ggm_compare_estimate")){
# analytic is false
if(isFALSE(object$analytic)){
cn <- colnames(object$post_samp$Y)
comparisons <- length(object$pcor_mats)
if(is.null(cn)) {
cn <- 1:object$p
}
# difference ?
if(isTRUE(difference)){
# name matrices rows and columns
for(i in seq_len(comparisons)){
colnames(object$pcor_mats[[i]]) <- cn
rownames(object$pcor_mats[[i]]) <- cn
}
pcor_mat <- object$pcor_mats
} else {
pcor_mat <- list()
for(i in seq_len(comparisons)){
pcor_mat[[i]] <- round(object$post_samp[[i]]$pcor_mat, 3)
}
# name for clarity
names(pcor_mat) <- paste0("Y_g", seq_len(comparisons))
}
# return
pcor_mat
# analytic
} else {
# difference
if(isTRUE(difference)){
# names
cn <- colnames(object$info$dat[[1]])
# comparisons
comparisons <- length(object$diff)
# name matrices rows and columns
for(i in seq_len(comparisons)){
colnames(object$diff[[i]]) <- cn
rownames(object$diff[[i]]) <- cn
}
pcor_mat <- lapply(seq_len(comparisons), function(x) {
round(object$diff[[x]], 3)
})
names(pcor_mat) <- names(object$diff)
pcor_mat
# no difference
} else {
# groups
groups <- length(object$info$dat)
pcor_mat <- lapply(seq_len(groups), function(x) {
round(analytic_solve( object$info$dat[[x]])$pcor_mat, 3)
})
# names
names(pcor_mat) <- paste0("Y_g", seq_len(groups))
# return
pcor_mat
}
} # end analytic
} else if(is(object, "ggm_compare_explore")){
cn <- colnames(object$info$dat[[1]])
if(is.null(cn)){
cn <- 1:object$p
}
if(isTRUE(difference)){
if(object$groups > 2){
stop("difference only available with two groups. see 'estimate'.")
}
# pcor mat
pcor_mat <- round(object$pcor_diff, 3)
# names
colnames(pcor_mat) <- cn
rownames(pcor_mat) <- cn
# return
pcor_mat
} else {
# groups
groups <- length(object$info$dat)
pcor_mat <- lapply(seq_len(groups), function(x) {
round(analytic_solve( object$info$dat[[x]])$pcor_mat, 3)
})
# names
names(pcor_mat) <- paste0("Y_g", seq_len(groups))
# pcor mat
pcor_mat
}
} else {
stop("partial correlation matrix not found.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/pcor_mat.R
|
#' Network Plot for \code{select} Objects
#'
#' Visualize the conditional (in)dependence structure.
#'
#' @param x An object of class \code{select}.
#'
#' @param layout Character string. Which graph layout (defaults is \code{circle}) ?
#' See \link[sna]{gplot.layout}.
#'
#' @param pos_col Character string. Color for the positive edges (defaults to \code{green}).
#'
#' @param neg_col Character string. Color for the negative edges (defaults to \code{green}).
#'
#' @param node_size Numeric. The size of the nodes (defaults to \code{10}).
#'
#' @param edge_magnify Numeric. A value that is multiplied by the edge weights. This increases (> 1) or
#' decrease (< 1) the line widths (defaults to 1).
#'
#' @param groups A character string of length \emph{p} (the number of nodes in the model).
#' This indicates groups of nodes that should be the same color
#' (e.g., "clusters" or "communities").
#'
#' @param palette A character string sepcifying the palette for the \code{groups}.
#' (default is \code{Set3}). See \href{http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/}{palette options here}.
#'
#' @param ... Additional options passed to \link[GGally]{ggnet2}
#'
#' @importFrom GGally ggnet2
#'
#' @importFrom network network.vertex.names<- set.edge.value set.edge.attribute %e% %v%<- network
#'
#' @importFrom sna gplot.layout.circle
#'
#' @return An object (or list of objects) of class \code{ggplot}
#' that can then be further customized.
#'
#' @note
#' A more extensive example of a custom plot is
#' provided \href{https://donaldrwilliams.github.io/BGGM/articles/netplot.html}{here}
#'
#' @examples
#' \donttest{
#' #########################
#' ### example 1: one ggm ##
#' #########################
#'
#' # data
#' Y <- bfi[,1:25]
#'
#' # estimate
#' fit <- estimate(Y, iter = 250,
#' progress = FALSE)
#'
#' # "communities"
#' comm <- substring(colnames(Y), 1, 1)
#'
#' # edge set
#' E <- select(fit)
#'
#' # plot edge set
#' plt_E <- plot(E, edge_magnify = 5,
#' palette = "Set1",
#' groups = comm)
#'
#'
#' #############################
#' ### example 2: ggm compare ##
#' #############################
#' # compare males vs. females
#'
#' # data
#' Y <- bfi[,1:26]
#'
#' Ym <- subset(Y, gender == 1,
#' select = -gender)
#'
#' Yf <- subset(Y, gender == 2,
#' select = -gender)
#'
#' # estimate
#' fit <- ggm_compare_estimate(Ym, Yf, iter = 250,
#' progress = FALSE)
#'
#' # "communities"
#' comm <- substring(colnames(Ym), 1, 1)
#'
#' # edge set
#' E <- select(fit)
#'
#' # plot edge set
#' plt_E <- plot(E, edge_magnify = 5,
#' palette = "Set1",
#' groups = comm)
#'
#'
#'}
#'
#' @export
plot.select <- function(x,
layout = "circle",
pos_col = "#009E73",
neg_col = "#D55E00",
node_size = 10,
edge_magnify = 1,
groups = NULL,
palette = "Set3",
...){
# select estimate
if(is(x, "select.estimate")){
cn <- colnames(x$object$Y)
p <- ncol(x$pcor_adj)
diag(x$pcor_adj) <- 0
net <- network::network(x$pcor_adj)
if(is.null(cn) ) {
cn <- 1:p
}
# edge weights
network::set.edge.value(x = net, attrname = "weights",
value = x$pcor_adj)
# edge weights absolute
network::set.edge.value(x = net, attrname = "abs_weights",
value = abs(x$pcor_adj) * edge_magnify)
# edge colors
network::set.edge.attribute(x = net, attrname = "edge_color",
value = ifelse(net %e% "weights" < 0,
neg_col,
pos_col))
e <- abs(as.numeric(x$pcor_adj))
if(is.null(groups)){
plt <- ggnet2(net, edge.alpha = e[e != 0] / max(e),
edge.size = "abs_weights",
edge.color = "edge_color",
node.size = 1,
mode = layout, ...) +
geom_point(color = "black",
size = node_size+1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
} else {
net %v% "group" <- groups
suppressMessages(
plt <- ggnet2(net, edge.alpha = e[e != 0] / max(e),
edge.size = "abs_weights",
edge.color = "edge_color",
node.color = "group",
node.size = 1,
mode = layout,
...) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
}
list(plt = plt)
# end: select.estimate
} else if (is(x, "select.explore")){
if(x$alternative == "two.sided" |
x$alternative == "greater" |
x$alternative == "less"){
cn <- colnames(x$object$Y)
p <- ncol(x$pcor_mat_zero)
diag(x$pcor_mat_zero) <- 0
if(x$alternative == "two.sided"){
Adj_alt <- x$Adj_10
Adj_null <- x$Adj_01
}
if(x$alternative == "greater" |
x$alternative == "less"){
warning(paste0("interpret the conditional indepedence structure cautiously, as the Bayes factor\n",
"is a measure of 'relative' evidence. In this case, ",
x$alternative, " than zero was compared\n",
"to a null model. This does not consider the opposite direction."
))
Adj_alt <- x$Adj_20
Adj_null <- x$Adj_02
}
ambiguous <- matrix(1, p, p) - diag(p) - Adj_alt - Adj_null
net_alt <- network::network(x$pcor_mat_zero)
net_null <- network::network(Adj_null)
net_ambigous <- network::network(ambiguous)
if(is.null(cn) ) {
cn <- 1:p
}
# edge weights
network::set.edge.value(x = net_alt, attrname = "weights",
value = x$pcor_mat_zero)
# edge weights absolute
network::set.edge.value(x = net_alt, attrname = "abs_weights",
value = abs(x$pcor_mat_zero) * edge_magnify)
# edge colors
network::set.edge.attribute(x = net_alt, attrname = "edge_color",
value = ifelse(net_alt %e% "weights" < 0,
neg_col,
pos_col))
e <- abs(as.numeric( x$pcor_mat_zero))
if(is.null(groups)){
plt_alt <- ggnet2(
net_alt,
edge.alpha = e[e != 0] / max(e),
edge.size = "abs_weights",
edge.color = "edge_color",
node.size = 1,
mode = layout
) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
plt_null <- ggnet2(net_null,
node.size = 1,
mode = layout) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
plt_ambiguous <- ggnet2(net_ambigous,
node.size = 1,
mode = layout) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
list(plt_alt = plt_alt,
plt_null = plt_null,
plt_ambiguous = plt_ambiguous)
} else {
net_alt %v% "group" <- groups
net_null %v% "group" <- groups
net_ambigous %v% "group" <- groups
suppressMessages(
plt_alt <- ggnet2(net_alt, edge.alpha = e[e != 0] / max(e),
edge.size = "abs_weights",
edge.color = "edge_color",
node.color = "group",
node.size = 1,
mode = layout) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
suppressMessages(
plt_null <- ggnet2(net_null,
node.size = 1,
node.color = "group",
mode = layout) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
suppressMessages(
plt_ambiguous <- ggnet2(net_ambigous,
node.size = 1,
node.color = "group",
mode = layout) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
list(H1_plt = plt_alt,
H0_plt = plt_null,
ambiguous_plt = plt_ambiguous)
} # end groups
} else if(x$alternative == "exhaustive"){
cn <- colnames(x$object$Y)
p <- ncol(x$pcor_mat)
diag(x$pcor_mat) <- 0
ambiguous <- ifelse((x$neg_mat + x$pos_mat + x$null_mat) == 0, 1, 0)
net_pos <- network::network(x$pos_mat * x$pcor_mat)
net_neg <- network::network(x$neg_mat * x$pcor_mat)
net_null <- network::network(x$null_mat)
net_ambigous <- network::network(ambiguous)
if(is.null(cn) ) {
cn <- 1:p
}
# positive
# edge weights
network::set.edge.value(x = net_pos, attrname = "weights",
value = x$pos_mat * x$pcor_mat)
# edge weights absolute
network::set.edge.value(x = net_pos, attrname = "abs_weights",
value = abs(x$pos_mat * x$pcor_mat) * edge_magnify)
# edge colors
network::set.edge.attribute(x = net_pos, attrname = "edge_color",
value = ifelse(net_pos %e% "weights" < 0,
neg_col,
pos_col))
# negative
# edge weights
network::set.edge.value(x = net_neg, attrname = "weights",
value = x$neg_mat * x$pcor_mat)
# edge weights absolute
network::set.edge.value(x = net_neg, attrname = "abs_weights",
value = abs(x$neg_mat * x$pcor_mat ) * edge_magnify)
# edge colors
network::set.edge.attribute(x = net_neg, attrname = "edge_color",
value = ifelse(net_neg %e% "weights" < 0,
neg_col,
pos_col))
if(is.null(groups)){
e <- abs(as.numeric( x$pcor_mat * x$pos_mat))
plt_pos <- ggnet2(
net_pos,
edge.alpha = e[e != 0] / max(e),
edge.size = "abs_weights",
edge.color = "edge_color",
node.size = 1,
mode = layout
) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
e <- abs(as.numeric( x$pcor_mat * x$neg_mat))
plt_neg <- ggnet2(net_neg,
node.size = 1,
edge.alpha = e[e != 0] / max(e),
edge.size = "abs_weights",
edge.color = "edge_color",
mode = layout) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
plt_null <- ggnet2(net_null,
node.size = 1,
mode = layout) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
plt_ambiguous <- ggnet2(net_ambigous,
node.size = 1,
mode = layout) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
list(plt_pos = plt_pos,
plt_neg = plt_neg,
plt_null = plt_null,
plt_ambiguous = plt_ambiguous)
} else {
net_pos %v% "group" <- groups
net_neg %v% "group" <- groups
net_null %v% "group" <- groups
net_ambigous %v% "group" <- groups
e <- abs(as.numeric( x$pcor_mat * x$pos_mat))
suppressMessages(
plt_pos <- ggnet2(
net_pos,
edge.alpha = e[e != 0] / max(e),
edge.size = "abs_weights",
edge.color = "edge_color",
node.size = 1,
node.color = "group",
mode = layout
) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
e <- abs(as.numeric( x$pcor_mat * x$neg_mat))
suppressMessages(
plt_neg <- ggnet2(net_neg,
node.size = 1,
edge.alpha = e[e != 0] / max(e),
edge.size = "abs_weights",
edge.color = "edge_color",
node.color = "group",
mode = layout) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
suppressMessages(
plt_null <- ggnet2(net_null,
node.size = 1,
mode = layout,
node.color = "group") +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
suppressMessages(
plt_ambiguous <- ggnet2(net_ambigous,
node.size = 1,
mode = layout,
node.color = "group") +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
list(plt_pos = plt_pos,
plt_neg = plt_neg,
plt_null = plt_null,
plt_ambiguous = plt_ambiguous)
} # end groups
}
} else if(is(x, "select.ggm_compare_estimate")){
cn <- colnames(x$object$info$dat[[1]])
p <- ncol(x$pcor_adj[[1]])
comparisons <- length(x$pcor_adj)
if(is.null(cn) ) {
cn <- 1:p
}
lapply(1:comparisons, function(z){
net <- network::network(x$pcor_adj[[z]])
# edge weights
network::set.edge.value(x = net, attrname = "weights",
value = x$pcor_adj[[z]])
# edge weights absolute
network::set.edge.value(x = net, attrname = "abs_weights",
value = abs(x$pcor_adj[[z]]) * edge_magnify)
# edge colors
network::set.edge.attribute(x = net, attrname = "edge_color",
value = ifelse(net %e% "weights" < 0,
neg_col,
pos_col))
diag(x$pcor_adj[[z]]) <- 0
e <- abs(as.numeric(x$pcor_adj[[z]]))
if(is.null(groups)){
ggnet2(net, edge.alpha = e[e != 0] / max(e),
edge.size = "abs_weights",
edge.color = "edge_color",
node.size = 1,
mode = layout) +
geom_point(color = "black",
size = node_size+1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn) +
ggtitle(names(x$object$diff)[z])
} else {
net %v% "group" <- groups
suppressMessages(
ggnet2(net, edge.alpha = e[e != 0] / max(e),
edge.size = "abs_weights",
edge.color = "edge_color",
node.color = "group",
node.size = 1,
mode = layout) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn) +
ggtitle(names(x$object$diff[z]))
)
}
})
} else if(is(x, "select.ggm_compare_bf")){
if(x$post_prob == 0.50){
cn <- colnames(x$object$info$dat[[1]])
p <- ncol(x$pcor_adj[[1]])
if(is.null(cn) ){
cn <- 1:p
}
if(length(x$info$dat) == 2){
net_alt <- network::network(x$adj_10 * x$pcor_mat)
net_null <- network::network(x$adj_01)
# edge weights
network::set.edge.value(x = net_alt, attrname = "weights",
value = x$adj_10 * x$pcor_mat)
# edge weights absolute
network::set.edge.value(x = net_alt, attrname = "abs_weights",
value = abs(x$adj_10 * x$pcor_mat) * edge_magnify)
# edge colors
network::set.edge.attribute(x = net_alt, attrname = "edge_color",
value = ifelse(net_alt %e% "weights" < 0,
neg_col,
pos_col))
if(is.null(groups)){
e <- abs(as.numeric( x$pcor_mat * x$adj_10))
plt_alt <- ggnet2(
net_alt,
edge.alpha = e[e != 0] / max(e),
edge.size = "abs_weights",
edge.color = "edge_color",
node.size = 1,
mode = layout,...
) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
plt_null <- ggnet2(net_null,
node.size = 1,
mode = layout) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
list(plt_alt = plt_alt,
plt_null = plt_null)
} else {
net_alt %v% "group" <- groups
net_null %v% "group" <- groups
e <- abs(as.numeric( x$pcor_mat * x$adj_10))
suppressMessages(
plt_alt <- ggnet2(
net_alt,
edge.alpha = e[e != 0] / max(e),
edge.size = "abs_weights",
edge.color = "edge_color",
node.color = "group",
node.size = 1,
mode = layout
) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
suppressMessages(
plt_null <- ggnet2(net_null,
node.size = 1,
mode = layout,
node.color = "group") +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
list(plt_alt = plt_alt,
plt_null = plt_null)
} # end clusters
} else {
net_alt <- network::network(x$adj_10)
net_null <- network::network(x$adj_01)
if(is.null(groups)){
plt_alt <- ggnet2(net_alt,
node.size = 1,
mode = layout) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
plt_null <- ggnet2(net_null,
node.size = 1,
mode = layout) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
list(plt_alt = plt_alt,
plt_null = plt_null)
} else {
net_alt %v% "group" <- groups
net_null %v% "group" <- groups
suppressMessages(
plt_alt <- ggnet2(net_alt,
node.size = 1,
mode = layout,
node.color = "group") +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
suppressMessages(
plt_null <- ggnet2(net_null,
node.size = 1,
mode = layout,
node.color = "group") +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
list(plt_alt = plt_alt,
plt_null = plt_null)
} # end of clusters
} # end two groups
# more than 0.50
} else {
cn <- colnames(x$object$info$dat[[1]])
p <- ncol(x$BF_10)
if(is.null(cn) ){
cn <- 1:p
}
if(length(x$info$dat) == 2){
Adj_alt <- x$adj_10
Adj_null <- x$adj_01
ambiguous <- matrix(1, p, p) - diag(p) - Adj_alt - Adj_null
net_alt <- network::network(x$pcor_mat_10 * Adj_alt)
net_null <- network::network(Adj_null)
net_ambigous <- network(ambiguous)
# edge weights
network::set.edge.value(x = net_alt, attrname = "weights",
value = x$pcor_mat_10 * Adj_alt)
# edge weights absolute
network::set.edge.value(x = net_alt, attrname = "abs_weights",
value = abs(x$pcor_mat_10 * Adj_alt) * edge_magnify)
# edge colors
network::set.edge.attribute(x = net_alt, attrname = "edge_color",
value = ifelse(net_alt %e% "weights" < 0,
neg_col,
pos_col))
e <- abs(as.numeric( x$pcor_mat_10 * x$adj_10))
if(is.null(groups)){
plt_alt <- ggnet2(
net_alt,
edge.alpha = e[e != 0] / max(e),
edge.size = "abs_weights",
edge.color = "edge_color",
node.size = 1,
mode = layout
) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
plt_null <- ggnet2(net_null,
node.size = 1,
mode = layout) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
plt_ambiguous <- ggnet2(net_ambigous,
node.size = 1,
mode = layout) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
list(plt_alt = plt_alt,
plt_null = plt_null,
plt_ambiguous = plt_ambiguous)
} else {
net_alt %v% "group" <- groups
net_null %v% "group" <- groups
net_ambigous %v% "group" <- groups
suppressMessages(
plt_alt <- ggnet2(net_alt, edge.alpha = e[e != 0] / max(e),
edge.size = "abs_weights",
edge.color = "edge_color",
node.color = "group",
node.size = 1,
mode = layout) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
suppressMessages(
plt_null <- ggnet2(net_null,
node.size = 1,
node.color = "group",
mode = layout) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
suppressMessages(
plt_ambiguous <- ggnet2(net_ambigous,
node.size = 1,
node.color = "group",
mode = layout) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
list(H1_plt = plt_alt,
H0_plt = plt_null,
ambiguous_plt = plt_ambiguous)
} # end cluster
# end: two groups
} else {
Adj_alt <- x$adj_10
Adj_null <- x$adj_01
ambiguous <- matrix(1, p, p) - diag(p) - Adj_alt - Adj_null
net_alt <- network::network(Adj_alt)
net_null <- network::network(Adj_null)
net_ambigous <- network::network(ambiguous)
if(is.null(groups)){
plt_alt <- ggnet2(net_alt,
node.size = 1,
mode = layout) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
plt_null <- ggnet2(net_null,
node.size = 1,
mode = layout) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
plt_ambiguous <- ggnet2(net_ambigous,
node.size = 1,
mode = layout) +
geom_point(color = "black",
size = node_size + 1) +
geom_point(size = node_size, color = "white") +
guides(color = FALSE) +
geom_text(label = cn)
list(plt_alt = plt_alt,
plt_null = plt_null,
plt_ambiguous = plt_ambiguous)
} else {
net_alt %v% "group" <- groups
net_null %v% "group" <- groups
net_ambigous %v% "group" <- groups
suppressMessages(
plt_alt <- ggnet2(net_alt,
node.size = 1,
node.color = "group",
mode = layout) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
suppressMessages(
plt_null <- ggnet2(net_null,
node.size = 1,
node.color = "group",
mode = layout) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
suppressMessages(
plt_ambiguous <- ggnet2(net_ambigous,
node.size = 1,
node.color = "group",
mode = layout) +
geom_point(aes(color = groups),
size = node_size + 2,
alpha = 0.2) +
geom_point(aes(color = groups),
size = node_size,
alpha = 1) +
guides(colour = guide_legend(override.aes = list(size=node_size))) +
theme(legend.title = element_blank()) +
scale_color_brewer(palette = palette) +
geom_text(label = cn)
)
list(H1_plt = plt_alt,
H0_plt = plt_null,
ambiguous_plt = plt_ambiguous)
} # end cluster
} # end: more than 2 groups
} # end not 0.50.
} # end select.ggm_compare.explore
else {
stop("object class not currently supported")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/plot.select.R
|
#' @title Plot: Prior Distribution
#'
#' @description Visualize the implied prior distribution for the partial correlations. This is
#' particularly useful for the Bayesian hypothesis testing methods.
#'
#' @name plot_prior
#'
#' @param prior_sd Scale of the prior distribution, approximately the standard deviation
#' of a beta distribution (defaults to 0.25).
#'
#' @param iter Number of iterations (prior samples; defaults to 5000).
#'
#' @return A \code{ggplot} object.
#'
#' @examples
#' # note: iter = 250 for demonstrative purposes
#'
#' plot_prior(prior_sd = 0.25, iter = 250)
#' @export
plot_prior <- function(prior_sd = 0.2, iter = 5000){
# matrix dimensions for prior
Y_dummy <- matrix(rnorm(10 * 3),
nrow = 10, ncol = 3)
delta <- delta_solve(prior_sd)
# sample prior
prior_samp <- .Call('_BGGM_sample_prior',
PACKAGE = 'BGGM',
Y = Y_dummy,
iter = iter,
delta = delta,
epsilon = 0.01,
prior_only = 1,
explore = 1,
progress = FALSE
)
qplot(prior_samp$pcors[1,2,], geom = "density") +
xlab("Implied Prior Distribution")
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/plot_prior.R
|
#' Posterior Predictive Distribution
#'
#' @description Draw samples from the posterior predictive distribution.
#'
#' @param object An object of class \code{estimate} or \code{explore}
#'
#' @param iter Numeric. Number of samples from the predictive distribution
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE})
#'
#' @return A 3D array containing the predicted datasets
#'
#' @note Currently only implemented for \code{type = "mixed"}, \code{type = "ordinal"},
#' and \code{type = "binary"}. Note the term mixed is confusing, in that it can
#' be used with only, say, ordinal data. In this case, reestimate the model with \code{type = "mixed"}
#' until all data types are supported.
#'
#' @export
#'
#' @examples
#' \donttest{
#' Y <- gss
#'
#' fit <- estimate(as.matrix(Y),
#' impute = TRUE,
#' iter = 150, type = "mixed")
#'
#' yrep <- posterior_predict(fit, iter = 100)
#' }
posterior_predict <- function(object,
iter = 1000,
progress = TRUE){
if(!any(class(object) %in% c("estimate", "explore"))) {
stop("object must be of class 'estimate' or 'explore'.")
}
if(!object$type %in% c("binary", "mixed", "ordinal")){
stop("type must be 'mixed' or 'binary'")
}
Y <- object$Y
cors <- pcor_to_cor(object, iter = iter)
cors <- cors$R
n <- object$n
p <- object$p
predicted <- array(0, c(n, p, iter))
if(isTRUE(progress)){
pb <- utils::txtProgressBar(min = 0, max = iter, style = 3)
}
if(object$type == "mixed"){
for(s in 1:iter){
cors_s <- cors[,,s]
ypred_s <- mvnrnd(n, rep(0, p), cors_s)
predicted[,,s] <- sapply(1:p, function(j) {
quantile(na.omit(Y[, j]), pnorm(ypred_s[, j], 0,
sqrt(cors_s[j, j])),
na.rm = TRUE, type = 1)
}
)
if(isTRUE(progress)){
utils::setTxtProgressBar(pb, s)
}
}
} else if(object$type == "binary"){
betas <- t(object$post_samp$beta[,,-c(1:50)])
for(s in 1:iter){
cors_s <- cors[,,s]
yrep_s <- mvnrnd(n = n, betas[s,], cors_s)
predicted[,,s] <- apply(yrep_s, 2, function(x) {ifelse(x > 0, 1, 0) })
if(isTRUE(progress)){
utils::setTxtProgressBar(pb, s)
}
}
} else if(object$type == "ordinal"){
betas <- t(object$post_samp$beta[,,-c(1:50)])
thresh <- object$post_samp$thresh[-c(1:50),,]
K <- ncol(thresh) - 1
temp <- matrix(0, n, p)
# nasty loop
# todo: write in c++
for(s in 1:iter){
cors_s <- cors[,,s]
yrep_s <- mvnrnd(n = n, betas[s,], cors_s)
for(j in 1:p){
for(n in 1:n){
for(i in 1:K){
if(yrep_s[n,j] > thresh[s,i,j] & yrep_s[n,j] < thresh[s,i+1, j]){
temp[n, j] <- i
}
}
}
}
predicted[,,s] <- temp
if(isTRUE(progress)){
utils::setTxtProgressBar(pb, s)
}
}
}
dimnames(predicted)[[2]] <- colnames(Y)
class(predicted) <- c("array", "posterior_predict")
return(predicted)
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/posterior_predict.R
|
#' Extract Posterior Samples
#'
#' @description Extract posterior samples for all parameters.
#'
#' @param object an object of class \code{estimate} or \code{explore}.
#'
#' @param ... currently ignored.
#'
#' @return A matrix of posterior samples for the partial correlation. Note that if controlling for
#' variables (e.g., formula \code{~ age}), the matrix also includes the coefficients from each
#' multivariate regression.
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' ########################################
#' ### example 1: control with formula ###
#' ########################################
#' # (the following works with all data types)
#'
#' # controlling for gender
#' Y <- bfi
#'
#' # to control for only gender
#' # (remove education)
#' Y <- subset(Y, select = - education)
#'
#' # fit model
#' fit <- estimate(Y, formula = ~ gender,
#' iter = 250)
#'
#' # note regression coefficients
#' samps <- posterior_samples(fit)
#'
#' hist(samps[,1])
#' }
#'
#' @export
posterior_samples <- function(object, ...){
if(is(object, "estimate") | is(object, "explore")) {
if(!is(object, "default")){
stop("object most be from 'estimate' or 'explore'")
}
# nodes
p <- object$p
# total partials
pcors_total <- p * (p - 1) * 0.5
# identity matrix
I_p <- diag(p)
# iterations
iter <- object$iter
# pcor samples
pcor_samples <-
matrix(
object$post_samp$pcors[, , 51:(iter + 50)][upper.tri(I_p)],
nrow = iter,
ncol = pcors_total,
byrow = TRUE
)
# column names
cn <- colnames(object$Y)
if(is.null(cn)){
col_names <- sapply(1:p, function(x) paste(1:p, x, sep = "--"))[upper.tri(I_p)]
} else {
col_names <- sapply(cn, function(x) paste(cn, x, sep = "--"))[upper.tri(I_p)]
}
colnames(pcor_samples) <- col_names
posterior_samples <- pcor_samples
if(!is.null(object$formula)){
# intercept only
if(ncol(object$X) == 1){
beta_terms <- "(Intercept)"
} else {
# predictors
beta_terms <- colnames(object$X)
}
# number of terms
n_beta_terms <- length(beta_terms)
# posterior samples
beta_samples <- object$post_samp$beta
if(is.null(cn)){
col_names <- 1:p
} else {
col_names <- cn
}
beta_start <- matrix(beta_samples[1:n_beta_terms,1, 51:(iter+50)],
nrow = iter, n_beta_terms, byrow = TRUE)
colnames(beta_start) <- paste0(col_names[1], "_", beta_terms)
for(i in 2:p){
# beta next
beta_i <- matrix(beta_samples[1:n_beta_terms, i, 51:(iter+50)],
nrow = iter,
n_beta_terms,
byrow = TRUE)
# colnames
colnames(beta_i) <- paste0(col_names[i], "_", beta_terms)
# beta combine
beta_start <- cbind(beta_start, beta_i)
}
posterior_samples <- cbind(posterior_samples, beta_start)
}
} else if (is(object, "var_estimate")) {
if(!is(object, "default")){
stop("object most be from 'var_estimate'")
}
# nodes
p <- object$p
# total partials
pcors_total <- p * (p - 1) * 0.5
# identity matrix
I_p <- diag(p)
# iterations
iter <- object$iter
# pcor samples
pcor_samples <-
matrix(
object$fit$pcors[, , 51:(iter + 50)][upper.tri(I_p)],
nrow = iter,
ncol = pcors_total,
byrow = TRUE
)
# column names
cn <- colnames(object$Y)
if(is.null(cn)){
col_names <- sapply(1:p, function(x) paste(1:p, x, sep = "--"))[upper.tri(I_p)]
} else {
col_names <- sapply(cn, function(x) paste(cn, x, sep = "--"))[upper.tri(I_p)]
}
colnames(pcor_samples) <- col_names
posterior_samples <- pcor_samples
n_beta_terms <- nrow(object$beta_mu)
beta_samples <- object$fit$beta
col_names <- colnames(object$Y)
beta_terms <- colnames(object$X)
beta_start <- matrix(beta_samples[1:n_beta_terms,1, 51:(iter+50)],
nrow = iter, n_beta_terms, byrow = TRUE)
colnames(beta_start) <- paste0(col_names[1], "_", beta_terms)
for(i in 2:p){
# beta next
beta_i <- matrix(beta_samples[1:n_beta_terms, i, 51:(iter+50)],
nrow = iter,
n_beta_terms,
byrow = TRUE)
# colnames
colnames(beta_i) <- paste0(col_names[i], "_", beta_terms)
# beta combine
beta_start <- cbind(beta_start, beta_i)
}
posterior_samples <- cbind(posterior_samples, beta_start)
} else {
stop("object class not currently supported")
}
return(posterior_samples)
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/posterior_samples.R
|
#' @title Partial Correlation Sum
#'
#' @name pcor_sum
#'
#' @description Compute and test partial correlation sums either within or between GGMs
#' (e.g., different groups), resulting in a posterior distribution.
#'
#' @param ... An object of class \code{estimate}. This can be either one or two fitted objects.
#'
#' @param relations Character string. Which partial correlations should be summed?
#'
#' @param iter Number of iterations (posterior samples; defaults to the number in the object).
#'
#' @return An object of class \code{posterior_sum}, including the sum and possibly the difference for
#' two sums.
#'
#' @details
#' Some care must be taken when writing the string for \code{partial_sum}. Below are several examples
#'
#' \strong{Just a Sum}:
#' Perhaps a sum is of interest, and not necessarily the difference of two sums. This can be written as
#'
#' \itemize{
#' \item \code{partial_sum <- c("A1--A2 + A1--A3 + A1--A4")}
#' }
#'
#' which will sum those relations.
#'
#' \strong{Comparing Sums}:
#' When comparing sums, each must be seperated by "\code{;}". For example,
#'
#' \itemize{
#' \item \code{partial_sum <- c("A1--A2 + A1--A3; A1--A2 + A1--A4")}
#' }
#'
#' which will sum both and compute the difference. Note that there cannot be more than two sums, such
#' that \code{c("A1--A2 + A1--A3; A1--A2 + A1--A4; A1--A2 + A1--A5")} will result in an error.
#'
#' \strong{Comparing Groups}:
#'
#' When more than one fitted object is suppled to \code{object} it is assumed that the groups
#' should be compared for the same sum. Hence, in this case, only the sum needs to be written.
#'
#' \itemize{
#' \item \code{partial_sum <- c("A1--A2 + A1--A3 + A1--A4")}
#' }
#'
#' The above results in that sum being computed for each group and then compared.
#'
#' @export
#'
#' @examples
#' \donttest{
#' # data
#' Y <- bfi
#'
#' # males
#' Y_males <- subset(Y, gender == 1, select = -c(education, gender))[,1:5]
#'
#' # females
#' Y_females <- subset(Y, gender == 2, select = -c(education, gender))[,1:5]
#'
#' # males
#' fit_males <- estimate(Y_males, seed = 1,
#' progress = FALSE)
#'
#' # fit females
#' fit_females <- estimate(Y_females, seed = 2,
#' progress = FALSE)
#'
#'
#' sums <- pcor_sum(fit_males,
#' fit_females,
#' relations = "A1--A2 + A1--A3")
#' # print
#' sums
#'
#' # plot difference
#' plot(sums)[[3]]
#' }
pcor_sum <- function(..., iter = NULL, relations){
# collect ...
collect_objects <- list(...)
# number of groups
groups <- length(collect_objects)
# partial_sum_i
partial_sum_i <- list()
if(is.null(iter)){
iter <- collect_objects[[1]]$iter
}
# separate to count
count_sums <- strsplit(relations, "\\;")[[1]]
# how many sums ?
n_sums <- length(count_sums)
remove_space <- gsub("[[:space:]]", "", count_sums)
remove_plus <- gsub("\\+", replacement = " ", remove_space)
each_sum <- strsplit(remove_plus, split = "[[:space:]]")
if (n_sums > 2) {
stop("there is only support for 'at most' two sums")
}
# start one group
if (groups == 1) {
if(!all(c("estimate", "default") %in% class(collect_objects[[1]]))){
stop("the object must be of class 'estimate'")
}
# posterior samples
samps <- posterior_samples(collect_objects[[1]])[1:iter,]
if (n_sums == 1) {
# sum
sums <- lapply(1:1, function(x) {
sum_i <- eval(parse(text = paste0("samps[,'",
each_sum[[x]], "']",
collapse = "+")))
})
# assign name for printing
names(sums) <- remove_space
diff <- NULL
# start 2 sums
} else {
sums <- lapply(1:2, function(x) {
sum_i <- eval(parse(text = paste0(
"samps[,'",
each_sum[[x]], "']",
collapse = "+"
)))
})
diff <- sums[[1]] - sums[[2]]
names(sums) <- remove_space
}
} else if (groups == 2) {
if (!all(c("estimate", "default") %in% class(collect_objects[[1]]))) {
stop("the object must be of class 'estimate'")
}
if (!all(c("estimate", "default") %in% class(collect_objects[[2]]))) {
stop("the object must be of class 'estimate'")
}
if (n_sums > 1) {
stop("only one sum can be specified when there are two groups")
}
sums <- lapply(1:2, function(g) {
samps <- posterior_samples(collect_objects[[g]])[1:iter, ]
sapply(1:1, function(x) {
eval(parse(text = paste0(
"samps[,'",
each_sum[[x]], "']",
collapse = "+"
)))
})
})
names(sums) <- paste0("g", 1:2, ": ", remove_space)
diff <- sums[[1]] - sums[[2]]
} else{
stop("too many groups. only two is currently support")
}
partial_sum_i <- list(post_diff = diff,
post_sums = sums,
n_sums = n_sums,
iter = iter)
returned_object <- partial_sum_i
class(returned_object) <- c("BGGM", "pcor_sum")
return(returned_object)
}
print_pcor_sum <- function(x, cred = 0.95, row_names = TRUE){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Network Stats: Posterior Sum\n")
cat("Posterior Samples:", x$iter, "\n")
cat("--- \n")
cat("Estimates \n\n")
# lower bound
lb <- (1 - cred) / 2
# upper bound
ub <- 1 - lb
if(is.null(x$post_diff)){
cat("Sum:", "\n")
res <- round(
data.frame(Post.mean = mean(x$post_sums[[1]]),
Post.sd = sd(x$post_sums[[1]]),
Cred.lb = quantile(x$post_sums[[1]], probs = lb),
Cred.ub = quantile(x$post_sums[[1]], probs = ub)
), 3)
if(isTRUE(row_names)){
rownames(res) <- names(x$post_sums)
} else {
rownames(res) <- NULL
}
print(res, row.names = row_names)
} else {
cat("Sum:", "\n")
dat_i <- list()
for(i in 1:2){
dat_i[[i]] <- round(
data.frame(Post.mean = mean(x$post_sums[[i]]),
Post.sd = sd(x$post_sums[[i]]),
Cred.lb = quantile(x$post_sums[[i]], probs = lb),
Cred.ub = quantile(x$post_sums[[i]], probs = ub)
), 3)
}
diff_res <- round(
data.frame(Post.mean = mean(x$post_diff),
Post.sd = sd(x$post_diff),
Cred.lb = quantile(x$post_diff, probs = lb),
Cred.ub = quantile(x$post_diff, probs = ub),
Prob.greater = mean(x$post_diff > 0),
Prob.less = mean(x$post_diff < 0)
), 3)
res <- do.call(rbind.data.frame, dat_i)
if(isTRUE(row_names)){
rownames(res) <- names(x$post_sums)
} else {
rownames(res) <- NULL
}
rownames(diff_res) <- NULL
print(res, row.names = row_names)
cat("--- \n\n")
cat("Difference:\n")
cat(paste(names(x$post_sums)[1]), "-", paste(names(x$post_sums)[2]), "\n\n")
print(diff_res, row.names = FALSE)
cat("--- \n")
}
}
#' @title Plot \code{pcor_sum} Object
#'
#' @name plot.pcor_sum
#'
#' @param x An object of class \code{posterior_sum}
#'
#' @param fill Character string. What fill for the histogram
#' (defaults to colorblind "pink")?
#'
#' @param ... Currently ignored.
#'
#' @return A list of \code{ggplot} objects
#'
#' @export
#'
#' @note
#' \strong{Examples}:
#'
#' @seealso pcor_sum
plot.pcor_sum <- function(x,
fill = "#CC79A7",
...){
if(is.null( x$post_diff)){
g1 <- ggplot(data.frame(x = x$post_sums[[1]]),
aes(x = x)) +
geom_histogram(color = "white",
fill = fill) +
xlab(names(x$post_sums)[1])
if(length( x$post_sums) == 2){
g2 <- ggplot(data.frame(x = x$post_sums[[2]]),
aes(x = x)) +
geom_histogram(color = "white",
fill = fill) +
xlab(names(x$post_sums)[2])
list(g1 = g1, g2 = g2)
} else {
list(g1 = g1)
}
} else {
g1 <- ggplot(data.frame(x = x$post_sums[[1]]),
aes(x = x)) +
geom_histogram(color = "white",
fill = fill) +
xlab(names(x$post_sums)[1])
g2 <- ggplot(data.frame(x = x$post_sums[[2]]),
aes(x = x)) +
geom_histogram(color = "white",
fill = fill) +
xlab(names(x$post_sums)[2])
diff <- ggplot(data.frame(x = x$post_diff),
aes(x = x)) +
geom_histogram(color = "white",
fill = fill) +
xlab("Difference")
suppressWarnings( list(g1 = g1, g2 = g2, diff = diff))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/posterior_sum.R
|
#' @title Precision Matrix Posterior Distribution
#'
#' @description Transform the sampled correlation matrices to
#' precision matrices (i.e., inverse covariance matrices).
#'
#' @param object An object of class \code{estimate}.
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @note The estimated precision matrix is the inverse of the \strong{correlation} matrix.
#'
#' @return
#'
#' \itemize{
#'
#' \item \code{precision_mean} The mean of the precision matrix (\code{p} by \code{p} matrix).
#'
#' \item \code{precision} 3d array of dimensions \code{p} by \code{p} by \code{iter}
#' including \strong{unconstrained} (i.e., from th full graph)
#' precision matrices.
#'
#' }
#'
#' @examples
#' \donttest{
#' # data
#' Y <- ptsd
#'
#' # fit model
#' fit <- estimate(Y)
#'
#' # precision matrix
#' Theta <- precision(fit)
#'
#' }
#'
#' @export
precision <- function(object,
progress = TRUE){
if(is(object,"estimate") & is(object,"default")){
iter <- object$iter
p <- object$p
cors <- pcor_to_cor(object)$R
if(isTRUE(progress)){
pb <- utils::txtProgressBar(min = 0, max = iter, style = 3)
}
precision <- vapply(1:iter, function(s){
Theta <- solve(cors[,,s])
if(isTRUE(progress)){
utils::setTxtProgressBar(pb, s)
}
Theta
}, FUN.VALUE = matrix(0, p, p))
} else {
stop("class not currently supported")
}
precision_mean = apply(precision, 1:2, mean)
returned_object <- list(precision_mean = precision_mean,
precision = precision)
class(returned_object) <- c("BGGM",
"precision")
return(returned_object)
}
print_precision <- function(x,...) {
mat <- x$precision_mean
p <- ncol(mat)
colnames(mat) <- 1:p
row.names(mat) <- 1:p
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Estimate:\n\n")
print(round(mat, 3))
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/precision.R
|
#' Predicted Probabilities
#'
#' Compute the predicted probabilities for discrete data, with the possibility
#' of conditional predictive probabilities (i.e., at fixed values of other nodes)
#'
#' @param object An object of class \code{posterior_predict}
#'
#' @param outcome Character string. Node for which the probabilities are computed.
#'
#' @param Y Matrix (or data frame) of dimensions \emph{n} (observations) by \emph{p} (variables).
#' This must include the column names.
#'
#' @param ... Compute conditional probabilities by specifying a column name in \code{Y}
#' (besides the \code{outcome}) and a fixed value. This can include
#' any number of nodes. See example below. Leave this blank to compute
#' unconditional probabilities for \code{outcome}.
#'
#' @return A list containing a matrix with the computed probabilities
#' (a row for each predictive sample and a column for each category).
#'
#'
#' @note There are no checks that the conditional probability exists, i.e., suppose
#' you wish to condition on, say, B3 = 2 and B4 = 1, yet there is no instance in
#' which B3 is 2 AND B4 is 1. This will result in an uninformative error.
#'
#' @export
#'
#' @examples
#' \donttest{
#' Y <- ptsd
#' fit <- estimate(as.matrix(Y), iter = 150, type = "mixed")
#'
#' pred <- posterior_predict(fit, iter = 100)
#'
#' prob <- predicted_probability(pred,
#' Y = Y,
#' outcome = "B3",
#' B4 = 0,
#' B5 = 0)
#'
#' }
predicted_probability <- function(object, outcome, Y, ...){
# note: test is for checking the subsetting of the 3d arrays
unique_values <- sort(unique(Y[,outcome]))
K <- length(unique_values)
iter <- dim(object)[3]
collect <- matrix(0, iter, K)
if(!is(object, "posterior_predict")){
stop("must be of class 'posterior_predict'")
}
dots <- list(...)
test <- list()
if(length(dots) == 0){
for(i in 1:iter){
collect[i,] <- sapply(1:K, function(x) sum(object[,outcome,i] == unique_values[x]) )
}
} else {
text_eval <- sapply(1:length(dots), function(x) {
paste("sub_set[,", x, "] == ", dots[[x]])
})
for(i in 1:iter){
sub_set <- as.matrix(object[,names(dots),i])
conditional <- eval(parse(text =
paste("object[which(", paste(unlist(text_eval),
collapse = " & "), "),,", i,"]")
))
test[[i]] <- conditional
collect[i, ] <- sapply(1:K, function(x) sum(conditional[,outcome]
== unique_values[x]))
}
}
collect <- t(apply(collect, 1,function(x){x / sum(x)}))
colnames(collect) <- unique_values
returned_object <- list(collect = collect, sub_sets = test)
return(returned_object)
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/pred_prob.R
|
#' Model Predictions for \code{estimate} Objects
#'
#' @name predict.estimate
#'
#' @param object object of class \code{estimate}
#'
#' @param iter number of posterior samples (defaults to all in the object).
#'
#' @param cred credible interval used for summarizing
#'
#' @param newdata an optional data frame for obtaining predictions (e.g., on test data)
#'
#' @param summary summarize the posterior samples (defaults to \code{TRUE}).
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param ... currently ignored
#'
#' @return \code{summary = TRUE}: 3D array of dimensions n (observations),
#' 4 (posterior summary),
#' p (number of nodes). \code{summary = FALSE}:
#' list containing predictions for each variable
#'
#'
#' @examples
#' \donttest{
#' # # data
#' Y <- ptsd
#'
#' fit <- estimate(Y, iter = 250,
#' progress = FALSE)
#'
#' pred <- predict(fit,
#' progress = FALSE)
#' }
#'
#' @export
predict.estimate <- function(object,
newdata = NULL,
summary = TRUE,
cred = 0.95,
iter = NULL,
progress = TRUE,
...){
# lower bound
lb <- (1 - cred) / 2
# uppder bound
ub <- 1 - lb
if(is.null(iter)){
iter <- object$iter
}
# correlations
cors <- pcor_to_cor(object, iter = iter)$R
if(object$type == "continuous") {
if(is.null(newdata)){
# data matrix
Y <- object$Y
# nodes
p <- ncol(Y)
# observations
n <- nrow(Y)
} else {
# scale
Y <- scale(newdata, scale = F)
# nodes
p <- ncol(Y)
# observations
n <- nrow(Y)
}
} else{
stop("type not currently supported. must be continuous")
}
if(object$p != p){
stop(paste0("the number of nodes in the newdata does",
"not match the number of nodes in the object"))
}
if(isTRUE(progress)){
pb <- utils::txtProgressBar(min = 0, max = p, style = 3)
}
# yhats
yhats <- lapply(1:p, function(x) {
yhat_p <- .Call("_BGGM_pred_helper_latent",
Y = Y[,-x],
XX = cors[-x, -x,],
Xy = cors[x, -x,],
quantiles = c(lb, ub),
n = n,
iter = iter
)
if(isTRUE(progress)){
utils::setTxtProgressBar(pb, x)
}
yhat_p
})
# node names
cn <- colnames(object$Y)
# check for column names
if(is.null(cn)) {
cn <- 1:p
}
fitted_array <- array(0, dim = c(n, 4, p))
dimnames(fitted_array)[[2]] <- c("Post.mean",
"Post.sd",
"Cred.lb",
"Cred.ub")
dimnames(fitted_array)[[3]] <- cn
if(isTRUE(summary)){
for(i in 1:p){
fitted_array[,,i] <- cbind(t(as.matrix(yhats[[i]]$yhat_mean)),
t(as.matrix(yhats[[i]]$yhat_sd)),
t(yhats[[i]]$yhat_quantiles))
}
} else {
fitted_array <- array(0, dim = c(iter, n, p))
dimnames(fitted_array)[[3]] <- cn
for(i in 1:p){
fitted_array[,,i] <- as.matrix(yhats[[i]]$yhat)
}
}
return(fitted_array)
}
#' Model Predictions for \code{explore} Objects
#'
#' @name predict.explore
#'
#' @param object object of class \code{explore}
#'
#' @param iter number of posterior samples (defaults to all in the object).
#'
#' @param cred credible interval used for summarizing
#'
#' @param newdata an optional data frame for obtaining predictions (e.g., on test data)
#'
#' @param summary summarize the posterior samples (defaults to \code{TRUE}).
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param ... currently ignored
#'
#' @return \code{summary = TRUE}: 3D array of dimensions n (observations),
#' 4 (posterior summary),
#' p (number of nodes). \code{summary = FALSE}:
#' list containing predictions for each variable
#'
#' @examples
#' \donttest{
#' # data
#' Y <- ptsd
#'
#' # fit model
#' fit <- explore(Y, iter = 250,
#' progress = FALSE)
#'
#' # predict
#' pred <- predict(fit,
#' progress = FALSE)
#'
#' }
#' @export
predict.explore <- function(object,
newdata = NULL,
summary = TRUE,
cred = 0.95,
iter = NULL,
progress = TRUE,
...){
# lower bound
lb <- (1 - cred) / 2
# uppder bound
ub <- 1 - lb
if(is.null(iter)){
iter <- object$iter
}
# correlations
cors <- pcor_to_cor(object, iter = iter)$R
if(object$type == "continuous") {
if(is.null(newdata)){
# data matrix
Y <- object$Y
# nodes
p <- ncol(Y)
# observations
n <- nrow(Y)
} else {
# scale
Y <- scale(newdata, scale = F)
# nodes
p <- ncol(Y)
# observations
n <- nrow(Y)
}
} else{
stop("type not currently supported. must be continuous")
}
if(object$p != p){
stop(paste0("the number of nodes in the newdata does",
"not match the number of nodes in the object"))
}
if(isTRUE(progress)){
pb <- utils::txtProgressBar(min = 0, max = p, style = 3)
}
# yhats
yhats <- lapply(1:p, function(x) {
yhat_p <- .Call("_BGGM_pred_helper_latent",
Y = Y[,-x],
XX = cors[-x, -x,],
Xy = cors[x, -x,],
quantiles = c(lb, ub),
n = n,
iter = iter
)
if(isTRUE(progress)){
utils::setTxtProgressBar(pb, x)
}
yhat_p
})
# node names
cn <- colnames(object$Y)
# check for column names
if(is.null(cn)) {
cn <- 1:p
}
fitted_array <- array(0, dim = c(n, 4, p))
dimnames(fitted_array)[[2]] <- c("Post.mean",
"Post.sd",
"Cred.lb",
"Cred.ub")
dimnames(fitted_array)[[3]] <- cn
if(isTRUE(summary)){
for(i in 1:p){
fitted_array[,,i] <- cbind(t(as.matrix(yhats[[i]]$yhat_mean)),
t(as.matrix(yhats[[i]]$yhat_sd)),
t(yhats[[i]]$yhat_quantiles))
}
} else {
fitted_array <- array(0, dim = c(iter, n, p))
dimnames(fitted_array)[[3]] <- cn
for(i in 1:p){
fitted_array[,,i] <- as.matrix(yhats[[i]]$yhat)
}
}
return(fitted_array)
}
#' Model Predictions for \code{var_estimate} Objects
#'
#' @name predict.var_estimate
#'
#' @param object object of class \code{var_estimate}
#'
#' @param summary summarize the posterior samples (defaults to \code{TRUE}).
#'
#' @param cred credible interval used for summarizing
#'
#' @param iter number of posterior samples (defaults to all in the object).
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param ... Currently ignored
#'
#' @return The predicted values for each regression model.
#'
#' @examples
#' \donttest{
#' # data
#' Y <- subset(ifit, id == 1)[,-1]
#'
#' # fit model with alias (var_estimate also works)
#' fit <- var_estimate(Y, progress = FALSE)
#'
#' # fitted values
#' pred <- predict(fit, progress = FALSE)
#'
#' # predicted values (1st outcome)
#' pred[,,1]
#'
#' }
#' @export
predict.var_estimate <- function(object,
summary = TRUE,
cred = 0.95,
iter = NULL,
progress = TRUE,
...){
# lower bound
lb <- (1 - cred) / 2
# uppder bound
ub <- 1 - lb
# data matrix
X <- object$X
n <- nrow(X)
if(is.null(iter)){
iter <- object$iter
}
p <- object$p
post_names <- sapply(1:p, function(x) paste0(
colnames(object$Y)[x], "_", colnames(object$X))
)
post_samps <- posterior_samples(object)
if(isTRUE(progress)){
pb <- utils::txtProgressBar(min = 0, max = p, style = 3)
}
yhats <- lapply(1:p, function(x){
yhat_p <- post_samps[, post_names[,x]] %*% t(X)
if(isTRUE(progress)){
utils::setTxtProgressBar(pb, x)
}
yhat_p
})
if(isTRUE(summary)){
fitted_array <- array(0, dim = c(n, 4, p))
dimnames(fitted_array)[[2]] <- c("Post.mean",
"Post.sd",
"Cred.lb",
"Cred.ub")
dimnames(fitted_array)[[3]] <- colnames(object$Y)
for(i in 1:p){
fitted_array[,,i] <- cbind(colMeans(yhats[[i]]),
apply(yhats[[i]], 2, sd),
apply(yhats[[i]], 2, quantile, lb),
apply(yhats[[i]], 2, quantile, ub)
)
}
} else {
fitted_array <- array(0, dim = c(iter, n, p))
dimnames(fitted_array)[[3]] <- colnames(object$Y)
for(i in 1:p){
fitted_array[,,i] <- t(as.matrix(yhats[[i]]))
}
}
return(fitted_array)
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/predict.estimate.R
|
#' Predictability: Bayesian Variance Explained (R2)
#'
#' @name predictability
#'
#' @description Compute nodewise predictability or Bayesian variance explained \insertCite{@R2 @gelman_r2_2019}{BGGM}.
#' In the context of GGMs, this method was described in \insertCite{Williams2019;textual}{BGGM}.
#'
#'
#' @param object object of class \code{estimate} or \code{explore}
#'
#' @param select logical. Should the graph be selected ? The default is currently \code{FALSE}.
#'
#' @param cred numeric. credible interval between 0 and 1 (default is 0.95) that is used for selecting the graph.
#'
#' @param BF_cut numeric. evidentiary threshold (default is 3).
#'
#' @param iter interger. iterations (posterior samples) used for computing R2.
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param ... currently ignored.
#'
#' @return An object of classes \code{bayes_R2} and \code{metric}, including
#'
#' \itemize{
#'
#' \item \code{scores} A list containing the posterior samples of R2. The is one element
#'
#' for each node.
#'
#' }
#'
#' @note
#'
#'
#' \strong{Binary and Ordinal Data}:
#'
#' R2 is computed from the latent data.
#'
#'
#' \strong{Mixed Data}:
#'
#' The mixed data approach is somewhat ad-hoc \insertCite{@see for example p. 277 in @hoff2007extending;textual}{BGGM}. This
#' is becaue uncertainty in the ranks is not incorporated, which means that variance explained is computed from
#' the 'empirical' \emph{CDF}.
#'
#' \strong{Model Selection}:
#'
#' Currently the default to include all nodes in the model when computing R2. This can be changed (i.e., \code{select = TRUE}), which
#' then sets those edges not detected to zero. This is accomplished by subsetting the correlation matrix according to each neighborhood
#' of relations.
#'
#' @references
#' \insertAllCited{}
#'
#' @examples
#' \donttest{
#'
#' # data
#' Y <- ptsd[,1:5]
#'
#' fit <- estimate(Y, iter = 250, progress = FALSE)
#'
#' r2 <- predictability(fit, select = TRUE,
#' iter = 250, progress = FALSE)
#'
#' # summary
#' r2
#' }
#' @export
predictability <- function(object,
select = FALSE,
cred = 0.95,
BF_cut = 3,
iter = NULL,
progress = TRUE,
...){
if(is(object, "var_estimate")){
if(isTRUE(select)){
warning("'select' not implemented for 'var_estimate' objects.")
}
object$type <- "continuous"
Y <- object$Y
X <- object$X
p <- ncol(Y)
n <- nrow(Y)
if(is.null(iter)){
iter <- object$iter
}
post_names <- sapply(1:p, function(x) paste0(
colnames(object$Y)[x], "_", colnames(object$X))
)
post_samps <- posterior_samples(object)
if(isTRUE(progress)){
pb <- utils::txtProgressBar(min = 0, max = p, style = 3)
}
r2 <- lapply(1:p, function(z) {
yhat_p <- post_samps[, post_names[,z]] %*% t(X)
res_sd <- apply(sweep(yhat_p, MARGIN = 2, STATS = Y[,z]), 1, sd)
yhat_var <- apply(yhat_p, 1, sd)^2
r2_p <- sapply(1:iter, function(s) {
yhat_var[s] / sd(rnorm(n, as.numeric(yhat_p[s,]), res_sd[s]))^2
})
if(isTRUE(progress)){
utils::setTxtProgressBar(pb, z)
}
r2_p
})
} else {
if(object$type == "continuous"){
Y <- as.matrix(scale(object$Y))
} else if(object$type == "binary"){
Y <- binary_latent_helper(object$Y+1)
} else if(object$type == "ordinal"){
# latent data
Y <- ordinal_latent_helper(object$Y, object$post_samp$thresh)
} else {
# latent data
Y <- rank_helper(object$Y)$z0_start
}
# nodes
p <- ncol(Y)
# observations
n <- nrow(Y)
if(is.null(iter)){
iter <- object$iter
}
# correlations
cors <- pcor_to_cor(object)$R[,,1:iter]
# not conditional on selected model
if(isFALSE(select)){
# progress bar
if(isTRUE(progress)){
pb <- utils::txtProgressBar(min = 0, max = p, style = 3)
}
r2 <- lapply(1:p, function(x) {
# computed from selected model
r2_p <- .Call(
"_BGGM_predictability_helper",
Y[, -x],
y = Y[, x],
XX = cors[-x,-x, ],
Xy = cors[x, -x, ],
n = n,
iter = iter
)$r2
if(isTRUE(progress)){
utils::setTxtProgressBar(pb, x)
}
r2_p
})
} else {
if(is(object, "estimate") & is(object, "default")){
# select model
sel <- select(object, cred = cred)
# adjacency
adj <- sel$adj
} else if(is(object, "explore") & is(object, "default")){
sel <- select(object, BF_cut = BF_cut)
adj <- sel$Adj_10
}
# progress bar
if (isTRUE(progress)) {
pb <- utils::txtProgressBar(min = 0, max = p, style = 3)
}
# R2
r2 <- lapply(1:p, function(x) {
# non selected return zero
if(sum(adj[x,]) == 0 ){
r2_p <- 0
r2_p
# a neighborhood exists
} else {
# neighborhood
selected <- which(adj[x,] == 1)
# check length 1 (return correlation)
if(length(selected) == 1){
r2_p <- cors[x, selected,]
r2_p
# more than one relation: call c++
} else {
# computed from selected model
r2_p <- .Call(
"_BGGM_predictability_helper",
Y[, selected],
y = Y[, x],
XX = cors[selected, selected, ],
Xy = cors[x, selected, ],
n = n,
iter = iter
)$r2
}
}
if(isTRUE(progress)){
utils::setTxtProgressBar(pb, x)
}
r2_p
})
}
}
# R2
scores <- lapply(1:p, function(x) {
r2_new <- r2[[x]]
if(length(r2_new) > 0){
r2_new[r2_new > 0]
}
})
# returned object
returned_object <- list(scores = scores,
type = "post.pred",
metric = "bayes_R2",
cred = cred,
BF_cut = BF_cut,
data_type = object$type,
Y = Y)
class(returned_object) <- c("BGGM",
"predictability",
"metric",
"R2",
"estimate")
return(returned_object)
}
#' Summary Method for \code{predictability} Objects
#'
#' @param object An object of class \code{predictability}.
#'
#' @param cred Numeric. The credible interval width for summarizing the posterior
#' distributions (defaults to 0.95; must be between 0 and 1).
#'
#' @param ... Currently ignored
#'
#' @examples
#' \donttest{
#' Y <- ptsd[,1:5]
#'
#' fit <- explore(Y, iter = 250,
#' progress = FALSE)
#'
#' r2 <- predictability(fit, iter = 250,
#' progress = FALSE)
#'
#' summary(r2)
#'
#' }
#'
#' @export
summary.predictability <- function(object, cred = 0.95, ...){
lb <- (1 - cred) / 2
ub <- 1 - lb
p <- length(object$scores)
# identity matrix
I_p <- diag(p)
# column names
cn <- colnames(object$Y)
if(is.null(cn)){
mat_names <- 1:p
} else {
mat_names <- cn
}
iter <- length(object$scores[[1]])
dat_summ <- data.frame(Node = mat_names,
Post.mean = round(sapply(object$scores, mean), 3) ,
Post.sd = round(sapply(object$scores, sd),3),
Cred = round(t(sapply(object$scores,
quantile,
c(lb, ub))), 3))
dat_summ[is.na(dat_summ)] <- 0
returned_object <- list(summary = dat_summ,
metric = object$metric,
type = object$type,
iter = iter,
data_type = object$data_type,
cred = cred)
class(returned_object) <- c("BGGM",
"predictability",
"metric",
"estimate",
"summary",
"data.frame"
)
returned_object
}
print_summary_metric <- function(x, digits = 2,...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
if(x$metric == "bayes_R2"){
cat("Metric:", "Bayes R2\n")
} else if(x$metric == "bayes_R2_diff"){
cat("Metric:", "Bayes R2 Difference \n")
} else {
cat("Metric:", x$metric, "\n")
}
cat("Type:", x$data_type, "\n")
# cat("Credible Interval:", x$cred, "\n")
cat("--- \n")
cat("Estimates:\n\n")
dat <- x$summary
colnames(dat) <- c(colnames(dat)[1:3], "Cred.lb", "Cred.ub")
print(as.data.frame(dat),
row.names = FALSE)
}
#' Plot \code{predictability} Objects
#'
#' @param x An object of class \code{predictability}
#'
#' @param type Character string. Which type of plot ? The options
#' are \code{"error_bar"} or \code{"ridgeline"} (defaults to \code{"error_bar"}).
#'
#' @param cred Numeric. The credible interval width for summarizing the posterior
#' distributions (defaults to 0.95; must be between 0 and 1).
#'
#' @param width Numeric. The width of error bar ends (defaults to \code{0})
#' for \code{type = "error_bar"}.
#'
##' @param size Numeric. The size for the points (defaults to \code{2})
##' for \code{type = "error_bar"}.
#'
#' @param color Character string. What color for the point (\code{type = "error_bar"}) or
#' tail region (\code{type = "ridgeline"} ) ? Defaults to \code{"blue"}.
#'
#' @param alpha Numeric. Transparancey of the ridges
#'
#' @param scale Numeric. This controls the overlap of densities
#' for \code{type = "ridgeline"} (defaults to 1).
#'
#' @param ... Currently ignored.
#'
#' @return An object of class \code{ggplot}.
#'
#' @importFrom reshape melt
#'
#' @importFrom ggridges stat_density_ridges
#'
#'
#' @examples
#' \donttest{
#' Y <- ptsd[,1:5]
#'
#' fit <- explore(Y, iter = 250,
#' progress = FALSE)
#'
#' r2 <- predictability(fit, iter = 250,
#' progress = FALSE)
#'
#' plot(r2)
#'
#'}
#' @export
plot.predictability <- function(x, type = "error_bar",
cred = 0.95,
alpha = 0.5,
scale = 1,
width = 0,
size = 1,
color = "blue",
...){
if(type == "error_bar"){
# summary
summ <- summary(x, cred = cred)
# temporary dat
temp <- summ$summary
if(x$metric == "bayes_R2" | x$metric == "bayes_R2_diff"){
temp$Post.mean <- ifelse(is.nan(temp$Post.mean), 0, temp$Post.mean)
#add ordered levelse
temp$Node <- factor(temp$Node,
levels = temp$Node[(order(temp$Post.mean))],
labels = temp$Node[(order(temp$Post.mean))])
} else {
# add ordered levels
temp$Node <- factor(temp$Node,
levels = (order(temp$Post.mean)),
labels =(order(temp$Post.mean)))
}
# plot
plt <- ggplot(temp, aes(x = Node,
y = Post.mean))
if(x$metric == "bayes_R2_diff"){
plt <- plt + annotate("rect", xmin = -Inf,
xmax = Inf, ymin = -rope,
ymax =rope,
alpha = .1)
}
plt <- plt + geom_errorbar(aes(ymin = temp[,4],
ymax = temp[,5]),
width = width) +
geom_point(size = size,
color = color) +
coord_flip() +
xlab("Node") +
ylab(x$metric)
} else if (type == "ridgeline"){
# intervals
lb <- (1 - cred) / 2
ub <- 1 - lb
summ <- summary(x)
dat <- reshape::melt(x$scores)
dat$L1 <- as.factor(dat$L1)
dat$L1 <- factor(dat$L1,
labels = (order(tapply(dat$value,
dat$L1,
mean))),
levels = (order(tapply(dat$value,
dat$L1,
mean))))
color <- grDevices::adjustcolor(color,
alpha.f = alpha)
plt <- ggplot(dat, aes(x = as.numeric(value),
y = as.factor(L1),
fill=factor(stat(quantile)))) +
stat_density_ridges(rel_min_height = 0.01,
geom = "density_ridges_gradient",
calc_ecdf = TRUE,
quantiles = c(lb, ub),
scale = scale) +
scale_fill_manual(name = "Probability",
values = c(color,
"#A6A6A680",
color)) +
theme(legend.position = "none") +
ylab("Node") +
xlab(x$metric) +
scale_y_discrete(labels = summ$summary$Node[order(summ$summary$Post.mean)])
} else {
stop("type not supported. must be 'error_bar' or 'ridgeline'.")
}
plt
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/predictability.R
|
#' @name print.BGGM
#' @title Print method for \code{BGGM} objects
#'
#' @description Mainly used to avoid a plethora of different print
#' functions that overcrowded the documentation in previous versions
#' of \strong{BGGM}.
#'
#' @param x An object of class \code{BGGM}
#' @param ... currently ignored
#'
#' @importFrom methods is
#' @export
print.BGGM <- function(x, ...) {
if(is(x, "prior_var")){
print_prior_var(x, ...)
}
if(is(x, "prior_ggm")){
print_prior_ggm(x, ...)
}
# print estimate methods
if(is(x, "mvn_imputation")){
print_mvn_impute(x, ...)
}
if(is(x, "var_estimate")){
if(is(x, "default")){
print_var_estimate(x, ...)
}
if(is(x, "summary.var_estimate")){
print_summary_var_estimate(x, ...)
}
if(is(x, "select.var_estimate")){
print_select_var_estimate(x, ...)
}
} # end var_estimate
if(is(x, "pcor_sum")){
print_pcor_sum(x, ...)
}
if(is(x, "regression_summary")){
print_regression_summary(x,...)
}
if(is(x, "estimate")) {
if(is(x, "default")) {
print_estimate(x, ...)
} else if (is(x, "fitted")) {
print_fitted(x, ...)
} else if (is(x, "predict")) {
print_predict(x, ...)
} else if (is(x, "map")) {
print_map(x, ...)
} else if (is(x, "coef")) {
print_coef(x, ...)
} else if (is(x, "ggm_compare_estimate")) {
if (is(x, "summary")) {
print_summary_ggm_estimate_compare(x, ...)
} else {
print_ggm_compare(x, ...)
}
} else if (is(x, "ggm_compare_ppc")) {
print_ggm_compare_ppc(x, ...)
} else if (is(x, "metric")) {
if (is(x, "summary")) {
print_summary_metric(x)
} else {
if (is(x, "R2")) {
print_summary_metric(summary(x, ...))
} else {
print_summary_metric(summary(x, ...))
}
}
# end metric
} else if (is(x, "summary.estimate")) {
print_summary_estimate(x)
} else if (is(x, "post.pred")) {
print_post_pred(x, ...)
} else if (is(x, "select.estimate")) {
print_select_estimate(x, ...)
} else if (is(x, "select.ggm_compare_estimate")) {
print_select_ggm_compare_estimate(x, ...)
}
}
# explore methods
if (is(x, "explore")) {
if(is(x, "default")){
print_explore(x, ..)
}
if(is(x, "summary_explore")){
print_summary_estimate(x,...)
}
if (is(x, "select.explore")) {
if (is(x, "summary")) {
print_summary_select_explore(x, ...)
} else {
print_select_explore(x, ...)
}
}
if (is(x, "select.ggm_compare_bf")) {
print_select_ggm_compare_bf(x, ...)
}
if (is(x, "ggm_compare_explore")) {
if (is(x, "summary.ggm_compare_explore")) {
print_summary_ggm_compare_bf(x)
} else {
print_ggm_compare_bf(x, ...)
}
}
} # end of explore
if (is(x, "confirm")) {
if (is(x, "ggm_compare_confirm")) {
print_ggm_confirm(x , ...)
} else {
print_confirm(x, ...)
}
} # end confirm
# coefficients
if( is(x, "coef") ){
if( is(x, "summary.coef") ){
print_summary_coef(x, ...)
} else {
print_coef(x,...)
}
}
if (is(x, "roll_your_own")) {
print_roll_your_own(x, ...)
}
if (is(x, "ggm_search")) {
print_ggm_search(x, ..)
}
if(is(x, "precision")){
print_precision(x, ...)
}
if(is(x, "bma_posterior")){
print_bma(x,...)
}
if(is(x, "constrained")){
print_constrained(x, ...)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/print_BGGM.R
|
#' Prior Belief Gaussian Graphical Model
#'
#' @description Incorporate prior information into the estimation of the
#' conditional dependence structure. This prior information is expressed as
#' the prior odds that each relation should be included in the graph.
#'
#' @param Y Matrix (or data frame) of dimensions \emph{n} (observations) by
#' \emph{p} (variables/nodes).
#'
#' @param prior_ggm Matrix of dimensions \emph{p} by \emph{p}, encoding the prior
#' odds for including each relation in the graph (see '\code{Details}')
#'
#' @param post_odds_cut Numeric. Threshold for including an edge (defaults to 3).
#' Note \code{post_odds} refers to posterior odds.
#'
#' @param ... Additional arguments passed to \code{\link{explore}}.
#'
#'
#' @details Technically, the prior odds is not for including an edge in the graph,
#' but for (H1)/p(H0), where H1 captures the hypothesized edge size and H0 is the
#' null model \insertCite{@see Williams2019_bf}{BGGM}. Accordingly, setting an
#' entry in \code{prior_ggm} to, say, 10, encodes a prior belief that H1 is 10 times
#' more likely than H0. Further, setting an entry in \code{prior_ggm} to 1 results
#' in equal prior odds (the default in \code{\link{select.explore}}).
#'
#'
#' @return An object including:
#'
#' \itemize{
#'
#' \item{\strong{adj}}: Adjacency matrix
#'
#' \item{\strong{post_prob}}: Posterior probability for the
#' alternative hypothesis.
#'
#' }
#'
#' @examples
#' \donttest{
#' # Assume perfect prior information
#' # synthetic ggm
#' p <- 20
#' main <- gen_net()
#'
#' # prior odds 10:1, assuming graph is known
#' prior_ggm <- ifelse(main$adj == 1, 10, 1)
#'
#' # generate data
#' y <- MASS::mvrnorm(n = 200,
#' mu = rep(0, 20),
#' Sigma = main$cors)
#'
#' # prior est
#' prior_est <- prior_belief_ggm(Y = y,
#' prior_ggm = prior_ggm,
#' progress = FALSE)
#'
#' # check scores
#' BGGM:::performance(Estimate = prior_est$adj,
#' True = main$adj)
#'
#' # default in BGGM
#' default_est <- select(explore(y, progress = FALSE))
#'
#' # check scores
#' BGGM:::performance(Estimate = default_est$Adj_10,
#' True = main$adj)
#'
#' }
#' @export
prior_belief_ggm <- function(Y,
prior_ggm,
post_odds_cut = 3,
...){
if (any(dim(prior_ggm) != ncol(Y))) {
stop("prior_ggm must be a square matrix")
}
check_symmetric <-
all.equal(prior_ggm[lower.tri(prior_ggm)],
t(prior_ggm)[lower.tri(prior_ggm)])
if (isFALSE(check_symmetric)){
stop("prior_ggm must be symmetric")
}
if(any(prior_ggm == 0)){
stop("zeros are not allowed in prior_ggm")
}
fit <- explore(Y, ...)
sel <- select(fit)
post_odds <- sel$BF_10 * prior_ggm
adj <- ifelse(post_odds > post_odds_cut, 1, 0)
post_prob <- post_odds / (1 + post_odds)
returned_object <- list(adj = adj,
post_prob = post_prob)
class(returned_object) <- c("BGGM", "prior_ggm")
return(returned_object)
}
print_prior_ggm <- function(x, ...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("Prior Belief Gaussian Graphical Model\n")
cat("--- \n")
cat("Date:", date(), "\n")
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/prior_belief_ggm.R
|
#' Prior Belief Graphical VAR
#'
#' @param Y Matrix (or data frame) of dimensions \emph{n}
#' (observations) by \emph{p} (variables/nodes).
#'
#' @param prior_temporal Matrix of dimensions \emph{p} by \emph{p},
#' encoding the prior odds for including each relation
#' in the temporal graph (see '\code{Details}'). If null
#' a matrix of 1's is used, resulting in equal prior odds.
#'
#' @param post_odds_cut Numeric. Threshold for including an edge (defaults to 3).
#' Note \code{post_odds} refers to posterior odds.
#'
#' @param est_ggm Logical. Should the contemporaneous network be estimated
#' (defaults to \code{TRUE})?
#'
#' @param prior_ggm Matrix of dimensions \emph{p} by \emph{p}, encoding the prior
#' odds for including each relation in the graph
#' (see '\code{Details}'). If null a matrix of 1's is used,
#' resulting in equal prior odds.
#'
#' @param progress Logical. Should a progress bar be included
#' (defaults to \code{TRUE}) ?
#'
#' @param ... Additional arguments passed to \code{\link{explore}}. Ignored
#' if \code{prior_ggm = FALSE}.
#'
#' @details Technically, the prior odds is not for including an edge in the graph,
#' but for (H1)/p(H0), where H1 captures the hypothesized edge size and H0 is the
#' null model \insertCite{@see Williams2019_bf}{BGGM}. Accordingly, setting an
#' entry in \code{prior_ggm} to, say, 10, encodes a prior belief that H1 is 10 times
#' more likely than H0. Further, setting an entry in \code{prior_ggm} or
#' \code{prior_var} to 1 results in equal prior odds
#' (the default in \code{\link{select.explore}}).
#'
#' @note
#' The returned matrices are formatted with the rows indicating
#' the outcome and the columns the predictor. Hence, adj_temporal[1,4] is the temporal
#' relation of node 4 predicting node 1. This follows the convention of the
#' \strong{vars} package (i.e., \code{Acoef}).
#'
#' Further, in order to compute the Bayes factor the data is
#' standardized (mean = 0 and standard deviation = 1).
#'
#' @return An object including (\code{est_ggm = FALSE}):
#'
#' \itemize{
#'
#' \item{\strong{adj}}: Adjacency matrix
#'
#' \item{\strong{post_prob}}: Posterior probability for the
#' alternative hypothesis.
#'
#' }
#'
#' An object including (\code{est_ggm = TRUE}):
#' \itemize{
#'
#' \item{\strong{adj_temporal}}: Adjacency matrix for the temporal network.
#'
#' \item{\strong{post_prob_temporal}}: Posterior probability for the
#' alternative hypothesis (temporal edge)
#'
#' \item{\strong{adj_ggm}}: Adjacency matrix for the contemporaneous
#' network (ggm).
#'
#' \item{post_prob_ggm}: Posterior probability for the
#' alternative hypothesis (contemporaneous edge)
#' }
#'
#'
#' @export
#' @importFrom stats lm residuals
#' @importFrom BFpack BF
#' @examples
#' \donttest{
#' # affect data from 1 person
#' # (real data)
#' y <- na.omit(subset(ifit, id == 1)[,2:7])
#' p <- ncol(y)
#'
#' # random prior graph
#' # (dont do this in practice!!)
#' prior_var = matrix(sample(c(1,10),
#' size = p^2, replace = TRUE),
#' nrow = p, ncol = p)
#'
#' # fit model
#' fit <- prior_belief_var(y,
#' prior_temporal = prior_var,
#' post_odds_cut = 3)
#'}
prior_belief_var <- function(Y,
prior_temporal = NULL,
post_odds_cut = 3,
est_ggm = TRUE,
prior_ggm = NULL,
progress = TRUE, ...){
y <- Y
prior_var <- prior_temporal
p <- ncol(y)
n <- nrow(y)
y <- scale(y)
colnames(y) <- NULL
y_t <- as.matrix(y[-1,,drop=FALSE])
y_t_1 <- as.matrix(y[-nrow(y),,drop=FALSE])
colnames(y_t) <- 1:p
BF_mat <- matrix(data = 0,
nrow = p,
ncol = p)
coef_mat <- BF_mat
if(est_ggm) {
resid_mat <- matrix(data = 0,
nrow = n - 1,
ncol = p)
}
if(is.null(prior_var)) {
prior_var <- matrix(1, p, p)
}
if(is.null(prior_ggm)){
prior_ggm <- matrix(1, p, p)
}
if(any(prior_ggm == 0)){
stop("zeros are not allowed in prior_ggm")
}
if(any(prior_var == 0)){
stop("zeros are not allowed in prior_ggm")
}
message("testing temporal relations")
if(progress){
pb <- utils::txtProgressBar(min = 0,
max = p,
style = 3)
}
for(i in 1:p) {
fit_i <- lm(y_t[, i] ~ 0 + y_t_1)
coef_mat[i,] <- coef(fit_i)
if(est_ggm){
resid_mat[,i] <- residuals(fit_i)
}
BF_10 <-
lapply(names(coef(fit_i)), function(x) {
1 / BF(fit_i, hypothesis = paste0(x, "=0"))$BFmatrix_confirmatory[1, 2]
})
BF_mat[i, ] <- unlist(BF_10)
if(progress){
utils::setTxtProgressBar(pb, i)
}
}
if (est_ggm) {
message("\n\ntesting contemporanenous relations")
fit_ggm <- prior_belief_ggm(Y = resid_mat,
prior_ggm = prior_ggm,
post_odds_cut = post_odds_cut,
...)
}
post_odds <- BF_mat * prior_var
adj <- ifelse(post_odds > post_odds_cut, 1, 0)
post_prob <- post_odds/(1 + post_odds)
if(est_ggm) {
returned_object <- list(
adj_temporal = adj,
post_prob_temporal = post_prob,
adj_ggm = fit_ggm$adj,
post_prob_ggm = fit_ggm$post_prob,
coef_mat = coef_mat
)
} else {
returned_object <- list(adj = adj,
post_prob = post_prob,
coef_mat = coef_mat)
}
class(returned_object) <- c("BGGM", "prior_var")
return(returned_object)
}
print_prior_var <- function(x, ...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("Prior Belief Graphical VAR\n")
cat("--- \n")
cat("Date:", date(), "\n")
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/prior_belief_var.R
|
#' Summarary Method for Multivariate or Univarate Regression
#'
#' @param object An object of class \code{estimate}
#'
#' @param cred Numeric. The credible interval width for summarizing the posterior
#' distributions (defaults to 0.95; must be between 0 and 1).
#'
#' @param ... Currently ignored
#'
#' @return A list of length \emph{p} including the
#' summaries for each regression.
#'
#' @export
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' # data
#' Y <- bfi
#'
#' Y <- subset(Y, select = c("E5", "N5",
#' "gender", "education"))
#'
#'
#' fit_mv_ordinal <- estimate(Y, formula = ~ gender + as.factor(education),
#' type = "ordinal",
#' iter = 250,
#' progress = FALSE)
#'
#' regression_summary(fit_mv_ordinal)
#'}
regression_summary <- function(object, cred = 0.95, ...){
if(!all(c("estimate", "default") %in% class(object))){
stop("class not supported. must be an estimate object")
}
lb <- (1-cred)/2
ub <- 1 - lb
iter <- object$iter
beta <- object$post_samp$beta[,,51:(iter + 50)]
dims <- dim(beta)[1:2]
post_mean <- apply(beta, 1:2, mean)
post_sd <- apply(beta, 1:2, sd)
post_lb <- apply(beta, 1:2, quantile, lb)
post_ub <- apply(beta, 1:2, quantile, ub)
outcomes <- dims[2]
summ <- list()
for(i in 1:outcomes){
summ[[i]] <- round(data.frame(Post.mean = post_mean[,i],
Post.sd = post_sd[,i],
Cred.lb = post_lb[,i],
Cred.ub = post_ub[,i] ), 3)
rownames( summ[[i]]) <- colnames(object$X)
}
# check colnames
cn <- colnames(object$Y)
if(is.null(cn)){
cn <- 1:outcomes
}
# colnames
names(summ) <- cn
# correlation
cors <- pcor_to_cor(object)$R
# residual correlation mean
cor_mean <- apply(cors, 1:2, mean)
colnames(cor_mean) <- cn
rownames(cor_mean) <- cn
object$post_samp <- NULL
returned_object <- list(reg_summary = summ,
resid_cor = cor_mean,
object = object)
class(returned_object) <- c("BGGM",
"regression_summary")
returned_object
}
print_regression_summary <- function(x, ...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Type:", x$object$type, "\n")
cat("Formula:", paste(as.character(x$object$formula), collapse = " "), "\n")
cat("--- \n")
outcomes <- length(x$reg_summary)
cat("Coefficients: \n \n")
for(i in 1:outcomes){
cat(names(x$reg_summary)[i], "\n")
print(x$reg_summary[[i]])
cat("--- \n")
}
cat("Residual Correlation Matrix: \n")
print(round(x$resid_cor, 3))
cat("--- \n")
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/regression_summary.R
|
#' Compute Custom Network Statistics
#'
#' This function allows for computing custom network statistics for
#' weighted adjacency matrices (partial correlations). The statistics are computed for
#' each of the sampled matrices, resulting in a distribution.
#'
#' @param object An object of class \code{estimate}.
#'
#' @param FUN A custom function for computing the statistic. The first argument must be
#' a partial correlation matrix.
#'
#' @param iter Number of iterations (posterior samples; defaults to the number in the object).
#'
#' @param select Logical. Should the graph be selected ? The default is currently \code{FALSE}.
#'
#' @param cred Numeric. Credible interval between 0 and 1 (default is 0.95) that is used for selecting the graph.
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param ... Arguments passed to the function.
#'
#' @return An object defined by \code{FUN}.
#'
#' @details
#'
#' The user has complete control of this function. Hence, care must be taken as to what \code{FUN}
#' returns and in what format. The function should return a single number (one for the entire GGM)
#' or a vector (one for each node). This ensures that the print and \code{\link{plot.roll_your_own}}
#' will work.
#'
#'
#' When \code{select = TRUE}, the graph is selected and then the network statistics are computed based on
#' the weigthed adjacency matrix. This is accomplished internally by multiplying each of the sampled
#' partial correlation matrices by the adjacency matrix.
#'
#' @examples
#' \donttest{
#' ####################################
#' ###### example 1: assortment #######
#' ####################################
#' # assortment
#' library(assortnet)
#'
#' Y <- BGGM::bfi[,1:10]
#' membership <- c(rep("a", 5), rep("c", 5))
#'
#' # fit model
#' fit <- estimate(Y = Y, iter = 250,
#' progress = FALSE)
#'
#' # membership
#' membership <- c(rep("a", 5), rep("c", 5))
#'
#' # define function
#' f <- function(x,...){
#' assortment.discrete(x, ...)$r
#'}
#'
#'
#' net_stat <- roll_your_own(object = fit,
#' FUN = f,
#' types = membership,
#' weighted = TRUE,
#' SE = FALSE, M = 1,
#' progress = FALSE)
#'
#' # print
#' net_stat
#'
#'
#' ############################################
#' ###### example 2: expected influence #######
#' ############################################
#' # expected influence from this package
#' library(networktools)
#'
#' # data
#' Y <- depression
#'
#' # fit model
#' fit <- estimate(Y = Y, iter = 250)
#'
#' # define function
#' f <- function(x,...){
#' expectedInf(x,...)$step1
#' }
#'
#' # compute
#' net_stat <- roll_your_own(object = fit,
#' FUN = f,
#' progress = FALSE)
#'
#' #######################################
#' ### example 3: mixed data & bridge ####
#' #######################################
#' # bridge from this package
#' library(networktools)
#'
#' # data
#' Y <- ptsd[,1:7]
#'
#' fit <- estimate(Y,
#' type = "mixed",
#' iter = 250)
#'
#' # clusters
#' communities <- substring(colnames(Y), 1, 1)
#'
#' # function is slow
#' f <- function(x, ...){
#' bridge(x, ...)$`Bridge Strength`
#' }
#'
#' net_stat <- roll_your_own(fit,
#' FUN = f,
#' select = TRUE,
#' communities = communities,
#' progress = FALSE)
#'
#' }
#'
#' @export
roll_your_own <- function(object,
FUN,
iter = NULL,
select = FALSE,
cred = 0.95,
progress = TRUE,
...){
if(! all( c("estimate", "default") %in% class(object)) ){
stop("class must be 'estimate'")
}
if(!isFALSE(select)){
sel <- select(object, cred = cred)
adj <- sel$adj
} else {
p <- ncol(object$pcor_mat)
adj <- matrix(1, p, p)
}
if(is.null(iter)){
iter <- object$iter
}
pcors <- object$post_samp$pcors[, , 51:(iter + 50)]
if(isTRUE(progress)){
pb <- utils::txtProgressBar(min = 0, max = iter, style = 3)
}
results <- sapply(1:iter, function(x) {
pcors_s <- pcors[, , x] * adj
est <- FUN(pcors_s, ...)
if(isTRUE(progress)){
utils::setTxtProgressBar(pb, x)
}
est
})
returned_object <- list(results = results, iter = iter)
class(returned_object) <- c("BGGM",
"roll_your_own")
return(returned_object)
}
print_roll_your_own <- function(x, cred = 0.95, ...) {
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Network Stats: Roll Your Own\n")
cat("Posterior Samples:", x$iter, "\n")
cat("--- \n")
cat("Estimates: \n\n")
lb <- (1-cred) / 2
ub <- 1 - lb
dims <- dim(x$results)
if(is.null(dims)){
mu <- mean(x$results)
scale <- sd(x$results)
res <- data.frame(Post.mean = round(mean(x$results), 3),
Post.sd = round(sd(x$results), 3),
Cred.lb = round(quantile(x$results, probs = lb), 3),
Cred.ub = round(quantile(x$results, probs = ub), 3) )
} else {
mu <- apply( x$results, 1, mean)
p <- length(mu)
scale <- apply( x$results, 1, sd)
ci_lb <- apply( x$results, 1, quantile, lb)
ci_ub <- apply( x$results, 1, quantile, ub)
res<- data.frame(Node = 1:p,
Post.mean = round(mu, 3),
Post.sd = round(scale, 3),
Cred.lb = round(ci_lb, 3),
Cred.ub = round(ci_ub, 3))
}
print(res, row.names = FALSE)
cat("--- \n")
}
#' Plot \code{roll_your_own} Objects
#'
#' @name plot.roll_your_own
#'
#' @param x An object of class \code{roll_your_own}
#'
#' @param fill Character string specifying the color for the ridges.
#'
#' @param alpha Numeric. Transparancey of the ridges
#'
#' @param ... Currently ignored
#'
#' @return An object of class \code{ggplot}
#'
#' @importFrom ggridges geom_density_ridges
#'
#' @examples
#' \donttest{
#' ####################################
#' ###### example 1: assortment #######
#' ####################################
#' # assortment
#' library(assortnet)
#'
#' Y <- BGGM::bfi[,1:10]
#' membership <- c(rep("a", 5), rep("c", 5))
#'
#' # fit model
#' fit <- estimate(Y = Y, iter = 250,
#' progress = FALSE)
#'
#' # membership
#' membership <- c(rep("a", 5), rep("c", 5))
#'
#' # define function
#' f <- function(x,...){
#' assortment.discrete(x, ...)$r
#'}
#'
#' net_stat <- roll_your_own(object = fit,
#' FUN = f,
#' types = membership,
#' weighted = TRUE,
#' SE = FALSE, M = 1,
#' progress = FALSE)
#'
#' # plot
#' plot(net_stat)
#'
#' }
#' @export
plot.roll_your_own <- function(x, fill = "#CC79A7", alpha = 0.5, ...){
dims <- dim(x$results)
if(is.null(dims)){
dat <- data.frame(x= x$results, y = 1)
plt <- ggplot(dat, aes(x = x,
y = as.factor(y))) +
geom_density_ridges(fill = fill,
alpha = alpha)
} else {
dat <- reshape::melt(t(x$results))
mus <- rowMeans(x$results)
dat$order <- factor(dat$X2, levels = unique(dat$X2)[order(mus)],
labels = unique(dat$X2)[order(mus)] )
zeros <- which(with(dat, tapply(value, X2, sum)) == 0)
if(length(zeros) > 0){
zeros_dat <- data.frame(X1 = 1, X2= names(zeros), value = 0, order = names(zeros))
dat_new <- subset(dat, X2 != names(zeros)[1])
for(i in 2:length(zeros)){
dat_new <- subset(dat_new, X2 != names(zeros)[i])
}
dat <- rbind.data.frame(dat_new, zeros_dat)
}
plt <- ggplot(dat, aes(x = value,
group = as.factor(order),
y = as.factor(order))) +
geom_density_ridges(fill = fill,
alpha = alpha,
rel_min_height = 0.001) +
ylab("")
}
plt
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/roll_your_own.R
|
#' Graph Selection for \code{var.estimate} Object
#'
#' @param object An object of class \code{VAR.estimate}.
#'
#' @param cred Numeric. The credible interval width for selecting the graph
#' (defaults to 0.95; must be between 0 and 1).
#'
#' @param alternative A character string specifying the alternative hypothesis. It
#' must be one of "two.sided" (default), "greater" or "less".
#' See note for futher details.
#'
#' @param ... Currently ignored.
#'
#' @return An object of class \code{select.var_estimate}, including
#'
#' \itemize{
#'
#' \item {pcor_adj} Adjacency matrix for the partial correlations.
#'
#' \item {beta_adj} Adjacency matrix for the regression coefficients.
#'
#' \item {pcor_weighted_adj} Weighted adjacency matrix for the partial correlations.
#'
#' \item {beta_weighted_adj} Weighted adjacency matrix for the regression coefficients.
#'
#' \item \code{pcor_mu} Partial correlation matrix (posterior mean).
#'
#' \item \code{beta_mu} A matrix including the regression coefficients (posterior mean).
#'
#' }
#'
#'
#' @examples
#' \donttest{
#' # data
#' Y <- subset(ifit, id == 1)[,-1]
#'
#' # fit model with alias (var_estimate also works)
#' fit <- var_estimate(Y, progress = FALSE)
#'
#' # select graphs
#' select(fit, cred = 0.95)
#'
#' }
#' @export
select.var_estimate <- function(object,
cred = 0.95,
alternative = "two.sided"
,...){
pcors <- object$fit$pcors[,,51:(object$iter +50)]
pcor_mat <- apply(pcors, 1:2, mean)
beta <- object$fit$beta[,,51:(object$iter +50)]
beta_mat <- apply(beta, 1:2, mean)
if(alternative == "two.sided"){
lb <- (1 - cred) / 2
ub <- 1 - lb
pcor_adj <- ifelse(apply(pcors, 1:2, quantile, lb) < 0 &
apply(pcors, 1:2, quantile, ub) > 0, 0, 1)
beta_adj <- ifelse(apply(beta, 1:2, quantile, lb) < 0 &
apply(beta, 1:2, quantile, ub) > 0, 0, 1)
} else if(alternative == "greater") {
lb <- (1 - cred)
pcor_adj <- ifelse(apply(pcors, 1:2, quantile, lb) > 0, 1, 0)
beta_adj <- ifelse(apply(beta, 1:2, quantile, lb) > 0, 1, 0)
} else {
ub <- cred
pcor_adj <- ifelse(apply(pcors, 1:2, quantile, ub) < 0, 1, 0)
beta_adj <- ifelse(apply(beta, 1:2, quantile, ub) < 0, 1, 0)
}
beta_weighted_adj <- beta_adj * beta_mat
pcor_weighted_adj <- pcor_adj * pcor_mat
returned_object <- list(
pcor_adj = pcor_adj,
beta_adj = beta_adj,
beta_weighted_adj = beta_weighted_adj,
pcor_weighted_adj = pcor_weighted_adj,
beta_mu = beta_mat,
pcor_mu = pcor_mat,
alternative = alternative,
cred = cred,
object = object
)
class(returned_object) <- c("BGGM",
"select.var_estimate",
"var_estimate",
"select")
return(returned_object)
}
print_select_var_estimate <- function(x, ...){
object <- x
p <- ncol(object$pcor_adj)
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Vector Autoregressive Model (VAR) \n")
cat("--- \n")
cat("Posterior Samples:", object$object$iter, "\n")
cat("Credible Interval:",
gsub("*0.","", formatC( round(object$cred, 4), format='f', digits=2)),
"% \n")
cat("--- \n")
cat("Call: \n")
print(object$object$call)
cat("--- \n")
cat("Partial Correlations: \n\n")
colnames(object$pcor_weighted_adj) <- colnames(object$object$Y)
row.names(object$pcor_weighted_adj) <- colnames(object$object$Y)
print(round(object$pcor_weighted_adj, 3))
cat("--- \n")
cat("Coefficients: \n\n")
colnames(object$beta_weighted_adj) <- colnames(object$object$Y)
row.names(object$beta_weighted_adj) <- colnames(object$object$X)
print(round(object$beta_weighted_adj, 3))
cat("--- \n")
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/select.VAR_estimate.R
|
#' @title Graph Selection for \code{estimate} Objects
#'
#' @description Provides the selected graph based on credible intervals for
#' the partial correlations that did not contain zero
#' \insertCite{Williams2019}{BGGM}.
#'
#' @name select.estimate
#'
#' @param object An object of class \code{estimate.default}.
#'
#' @param cred Numeric. The credible interval width for selecting the graph
#' (defaults to 0.95; must be between 0 and 1).
#'
#' @param alternative A character string specifying the alternative hypothesis. It
#' must be one of "two.sided" (default), "greater" or "less".
#' See note for futher details.
#' @param ... Currently ignored.
#'
#' @references
#' \insertAllCited{}
#'
#' @return The returned object of class \code{select.estimate} contains a lot of information that
#' is used for printing and plotting the results. For users of \strong{BGGM}, the following
#' are the useful objects:
#'
#' \itemize{
#'
#' \item \code{pcor_adj} Selected partial correlation matrix (weighted adjacency).
#' \item \code{adj} Adjacency matrix for the selected edges
#' \item \code{object} An object of class \code{estimate} (the fitted model).
#'
#' }
#'
#' @seealso \code{\link{estimate}} and \code{\link{ggm_compare_estimate}} for several examples.
#'
#'
#' @details
#'
#' This package was built for the social-behavioral sciences in particular. In these applications, there is
#' strong theory that expects \emph{all} effects to be positive. This is known as a "positive manifold" and
#' this notion has a rich tradition in psychometrics. Hence, this can be incorporated into the graph with
#' \code{alternative = "greater"}. This results in the estimated structure including only positive edges.
#'
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' # data
#' Y <- bfi[,1:10]
#'
#' # estimate
#' fit <- estimate(Y, iter = 250,
#' progress = FALSE)
#'
#'
#' # select edge set
#' E <- select(fit)
#'
#' }
#'
#' @export
select.estimate <- function(object,
cred = 0.95,
alternative = "two.sided",
...){
if(isFALSE(object$analytic)){
pcors <- object$post_samp$pcors[,,51:(object$iter +50)]
if(alternative == "two.sided"){
lb <- (1 - cred) / 2
ub <- 1 - lb
adj <- ifelse(apply(pcors, 1:2, quantile, lb) < 0 &
apply(pcors, 1:2, quantile, ub) > 0, 0, 1)
} else if(alternative == "greater") {
lb <- (1 - cred)
adj <- ifelse(apply(pcors, 1:2, quantile, lb) > 0, 1, 0)
} else {
ub <- cred
adj <- ifelse(apply(pcors, 1:2, quantile, ub) < 0, 1, 0)
}
# analytic
} else {
if(alternative == "two.sided"){
lb <- (1 - cred) / 2
ub <- 1 - lb
z_stat <- abs(object$analytic_fit$inv_map / sqrt(object$analytic_fit$inv_var))
adj <- ifelse(z_stat > qnorm(ub), 1, 0)
} else if (alternative == "greater") {
ub <- 1 - cred
z_stat <- (-object$analytic_fit$inv_map) / sqrt(object$analytic_fit$inv_var)
adj <- ifelse(z_stat > qnorm(ub, lower.tail = FALSE), 1, 0)
} else if(alternative == "less"){
ub <- 1 - cred
z_stat <- (object$analytic_fit$inv_map) / sqrt(object$analytic_fit$inv_var)
adj <- ifelse(z_stat > qnorm(ub, lower.tail = FALSE), 1, 0)
}
}
pcor_adj <- adj * object$pcor_mat
returned_object <- list(
pcor_adj = pcor_adj,
adj = adj,
alternative = alternative,
cred = cred,
object = object
)
class(returned_object) <- c("BGGM",
"select.estimate",
"estimate",
"select")
returned_object
}
#' @title S3 \code{select} method
#' @name select
#' @description S3 select method
#' @param object object of class \code{estimate} or\code{explore}
#' @param ... not currently used
#' @return \code{select} works with the following methods:
#' \itemize{
#' \item \code{\link{select.estimate}}
#' \item \code{\link{select.explore}}
#' \item \code{\link{select.ggm_compare_estimate}}
#' }
#' @export
select <- function(object,...){
UseMethod("select", object)
}
print_select_estimate <- function(x, ...){
object <- x
p <- ncol(object$pcor_adj)
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Type:", object$object$type, "\n")
cat("Analytic:", object$object$analytic, "\n")
cat("Formula:", paste(as.character(x$formula), collapse = " "), "\n")
cat("Posterior Samples:", object$object$iter, "\n")
cat("Credible Interval:", gsub("*0.","", formatC( round(object$cred, 4), format='f', digits=2)), "% \n")
cat("--- \n")
cat("Call: \n")
print(object$object$call)
cat("--- \n")
cat("Selected:\n\n")
mat <- object$pcor_adj
colnames(mat) <- 1:p
row.names(mat) <- 1:p
print(round(mat, 3))
cat("--- \n")
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/select.estimate.R
|
#' @title Graph selection for \code{explore} Objects
#'
#' @description Provides the selected graph based on the Bayes factor
#' \insertCite{Williams2019_bf}{BGGM}.
#'
#' @name select.explore
#'
#' @param object An object of class \code{explore.default}
#'
#' @param BF_cut Numeric. Threshold for including an edge (defaults to 3).
#'
#' @param alternative A character string specifying the alternative hypothesis. It
#' must be one of "two.sided" (default), "greater", "less",
#' or "exhaustive". See note for further details.
#'
#' @param ... Currently ignored.
#'
#' @references
#' \insertAllCited{}
#'
#' @details Exhaustive provides the posterior hypothesis probabilities for
#' a positive, negative, or null relation \insertCite{@see Table 3 in @Williams2019_bf}{BGGM}.
#'
#' @note Care must be taken with the options \code{alternative = "less"} and
#' \code{alternative = "greater"}. This is because the full parameter space is not included,
#' such, for \code{alternative = "greater"}, there can be evidence for the "null" when
#' the relation is negative. This inference is correct: the null model better predicted
#' the data than the positive model. But note this is relative and does \strong{not}
#' provide absolute evidence for the null hypothesis.
#'
#' @return The returned object of class \code{select.explore} contains a lot of information that
#' is used for printing and plotting the results. For users of \strong{BGGM}, the following
#' are the useful objects:
#'
#'
#' \code{alternative = "two.sided"}
#'
#' \itemize{
#'
#' \item \code{pcor_mat_zero} Selected partial correlation matrix (weighted adjacency).
#'
#' \item \code{pcor_mat} Partial correlation matrix (posterior mean).
#'
#' \item \code{Adj_10} Adjacency matrix for the selected edges.
#'
#' \item \code{Adj_01} Adjacency matrix for which there was
#' evidence for the null hypothesis.
#' }
#'
#' \code{alternative = "greater"} and \code{"less"}
#'
#' \itemize{
#'
#' \item \code{pcor_mat_zero} Selected partial correlation matrix (weighted adjacency).
#'
#' \item \code{pcor_mat} Partial correlation matrix (posterior mean).
#'
#' \item \code{Adj_20} Adjacency matrix for the selected edges.
#'
#' \item \code{Adj_02} Adjacency matrix for which there was
#' evidence for the null hypothesis (see note).
#' }
#'
#' \code{alternative = "exhaustive"}
#'
#' \itemize{
#'
#' \item \code{post_prob} A data frame that included the posterior hypothesis probabilities.
#'
#' \item \code{neg_mat} Adjacency matrix for which there was evidence for negative edges.
#'
#' \item \code{pos_mat} Adjacency matrix for which there was evidence for positive edges.
#'
#' \item \code{neg_mat} Adjacency matrix for which there was
#' evidence for the null hypothesis (see note).
#'
#' \item \code{pcor_mat} Partial correlation matrix (posterior mean). The weighted adjacency
#' matrices can be computed by multiplying \code{pcor_mat} with an adjacency matrix.
#'
#' }
#'
#' @seealso \code{\link{explore}} and \code{\link{ggm_compare_explore}} for several examples.
#'
#' @examples
#'
#' \donttest{
#' #################
#' ### example 1 ###
#' #################
#'
#' # data
#' Y <- bfi[,1:10]
#'
#' # fit model
#' fit <- explore(Y, progress = FALSE)
#'
#' # edge set
#' E <- select(fit,
#' alternative = "exhaustive")
#'
#' }
#' @export
select.explore <- function(object,
BF_cut = 3,
alternative = "two.sided",
...){
# rename
x <- object
# hyp probability
hyp_prob <- BF_cut / (BF_cut + 1)
# posterior samples
post_samp <- x$post_samp
# prior samples
prior_samp <- x$prior_samp
# two sided testing
if(alternative == "two.sided"){
# posterior
post_sd <- apply(post_samp$fisher_z[,,(51:x$iter)], 1:2, sd)
post_mean <- apply(post_samp$fisher_z[,,(51:x$iter)], 1:2, mean)
# x$pcor_mat
post_dens <- dnorm(0, post_mean, post_sd)
# prior
prior_sd <- apply(prior_samp$fisher_z[,,(51:x$iter)], 1:2, sd)
prior_dens <- dnorm(0, 0, mean(prior_sd[upper.tri(diag(3))]))
# BF
BF_10_mat <- prior_dens / post_dens
BF_01_mat <- 1 / BF_10_mat
diag(BF_01_mat) <- 0
diag(BF_10_mat) <- 0
# selected: alternative
Adj_10 <- ifelse(BF_10_mat > BF_cut, 1, 0)
# selected: null
Adj_01 <- ifelse(BF_10_mat < 1 / BF_cut, 1, 0)
diag(Adj_01) <- 0
diag(Adj_10) <- 0
# returned object
returned_object = list(pcor_mat_zero = post_mean * Adj_10,
pcor_mat = round(post_mean, 3),
pcor_sd = round(post_sd, 3),
Adj_10 = Adj_10,
Adj_01 = Adj_01,
BF_10 = BF_10_mat,
BF_01 = BF_01_mat,
BF_cut = BF_cut,
alternative = alternative,
call = match.call(),
type = x$type,
formula = x$formula,
analytic = x$analytic,
object = object
)
# one sided greater
} else if(alternative == "greater"){
# posterior
post_sd <- apply(post_samp$fisher_z[,,(51:x$iter)], 1:2, sd)
post_mean <- apply(post_samp$fisher_z[,,(51:x$iter)], 1:2, mean)
#x$pcor_mat
post_dens <- dnorm(0, post_mean, post_sd )
# prior
prior_sd <- apply(prior_samp$fisher_z[,,(51:x$iter)], 1:2, sd)
prior_dens <- dnorm(0, 0, mean(prior_sd[upper.tri(diag(3))]))
# BF (two sided)
BF_10_mat <- prior_dens / post_dens
# BF one sided
BF_20_mat <- BF_10_mat * ((1 - pnorm(0, post_mean, post_sd)) * 2)
# BF null
BF_02_mat <- 1 / BF_20_mat
diag(BF_02_mat) <- 0
diag(BF_20_mat) <- 0
# selected edges (alternative)
Adj_20 <- ifelse(BF_20_mat > BF_cut, 1, 0)
# selected edges (null)
Adj_02 <- ifelse(BF_02_mat > BF_cut, 1, 0)
diag(Adj_02) <- 0
diag(Adj_20) <- 0
# returned object
returned_object = list(
pcor_mat_zero = post_mean * Adj_20,
pcor_mat = round(post_mean, 3),
pcor_sd = round(post_sd, 3),
Adj_20 = Adj_20,
Adj_02 = Adj_02,
BF_20 = BF_20_mat,
BF_02 = BF_02_mat,
BF_cut = BF_cut,
alternative = alternative,
call = match.call(),
type = x$type,
formula = x$formula,
analytic = x$analytic,
object = object
)
# one side less
} else if(alternative == "less") {
# posterior
post_sd <- apply(post_samp$fisher_z[,,(51:x$iter)], 1:2, sd)
post_mean <- x$pcor_mat
post_dens <- dnorm(0, post_mean, post_sd )
# prior
prior_sd <- apply(prior_samp$fisher_z[,,(51:x$iter)], 1:2, sd)
prior_dens <- dnorm(0, 0, mean(prior_sd))
# BF (two sided)
BF_10_mat <- prior_dens / post_dens
# BF one sided
BF_20_mat <- BF_10_mat * (pnorm(0, post_mean, post_sd) * 2)
# BF null
BF_02_mat <- 1 / BF_20_mat
diag(BF_02_mat) <- 0
diag(BF_20_mat) <- 0
# selected edges (alternative)
Adj_20 <- ifelse(BF_20_mat > BF_cut, 1, 0)
# selected edges (null)
Adj_02 <- ifelse(BF_02_mat > BF_cut, 1, 0)
diag(Adj_02) <- 0
diag(Adj_20) <- 0
# returned object
returned_object = list(
pcor_mat_zero = post_mean * Adj_20,
pcor_mat = round(post_mean, 3),
pcor_sd = round(post_sd, 3),
Adj_20 = Adj_20,
Adj_02 = Adj_02,
BF_20 = BF_20_mat,
BF_02 = BF_02_mat,
BF_cut = BF_cut,
alternative = alternative,
call = match.call(),
type = x$type,
formula = x$formula,
analytic = x$analytic,
object = object
)
# exhaustive testing
} else if (alternative == "exhaustive")
if(alternative == "exhaustive"){
if(is.null(hyp_prob)){
stop("posterior probability must be specificed \n for exhaustive hypothesis testing")
}
# column names
cn <- colnames(x$Y)
p <- ncol(x$pcor_mat)
I_p <- diag(p)
if(is.null(cn)){
mat_names <- sapply(1:p , function(z) paste(1:p, z, sep = "--"))[upper.tri(I_p)]
} else {
mat_names <- sapply(cn , function(z) paste(cn, z, sep = "--"))[upper.tri(I_p)]
}
# posterior
post_sd <- apply(post_samp$fisher_z[,,(51:x$iter)], 1:2, sd)
post_mean <- x$pcor_mat
post_dens <- dnorm(0, post_mean, post_sd)
# prior
prior_sd <- apply(prior_samp$fisher_z[,,(51:x$iter)], 1:2, sd)
prior_dens <- dnorm(0, 0, mean(prior_sd))
# BF (two sided)
BF_10_mat <- prior_dens / post_dens
# BF less
BF_less <- BF_10_mat * (pnorm(0, post_mean, post_sd) * 2)
BF_greater <- BF_10_mat * ((1 - pnorm(0, post_mean, post_sd)) * 2)
# BF null
BF_null <- 1 / BF_10_mat
prob_null <- BF_null / (BF_null + BF_greater + BF_less)
prob_greater <- BF_greater / (BF_null + BF_greater + BF_less)
prob_less <- BF_less / (BF_null + BF_greater + BF_less)
prob_mat <- prob_null + prob_greater + prob_less
prob_dat = data.frame(edge = mat_names,
prob_zero = prob_null[upper.tri(prob_null)],
prob_greater = prob_greater[upper.tri(prob_greater)],
prob_less = prob_less[upper.tri(prob_less)])
# no rownames
row.names(prob_dat) <- c()
# selected (null)
null_mat <- ifelse(prob_null > hyp_prob, 1, 0)
# selected (positive)
pos_mat <- ifelse(prob_greater > hyp_prob, 1, 0)
# selected (negative)
neg_mat <- ifelse(prob_less > hyp_prob, 1, 0)
# negative edges
returned_object <- list(
post_prob = prob_dat,
neg_mat = neg_mat,
pos_mat = pos_mat,
null_mat = null_mat,
alternative = alternative,
pcor_mat = round(post_mean, 3),
pcor_sd = round(post_sd, 3),
call = match.call(),
prob = hyp_prob,
type = x$type,
formula = x$formula,
analytic = x$analytic,
object = object
)
} else {
stop("alternative not supported. see documentation")
}
class(returned_object) <- c("BGGM",
"select.explore",
"explore",
"select")
returned_object
}
print_select_explore <- function(x,
...){
p <- ncol(x$pcor_mat_zero)
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Type:", x$type, "\n")
cat("Analytic:", x$analytic, "\n")
cat("Formula:", paste(as.character(x$formula), collapse = " "), "\n")
cat("Alternative:", x$alternative, "\n")
if(x$alternative == "two.sided"){
cat("Bayes Factor:", x$BF_cut, "\n")
}
cat("--- \n")
cat("Call:\n")
print(x$call)
cat("--- \n")
cat("Hypotheses: \n")
if(x$alternative == "two.sided"){
cat("H0: rho = 0\nH1: rho != 0", "\n")
cat("--- \n")
colnames(x$Adj_10) <- 1:p
row.names(x$Adj_10) <- 1:p
colnames( x$pcor_mat_zero) <- 1:p
row.names(x$pcor_mat_zero) <- 1:p
cat("Partial Correlations:\n\n")
print(round(x$pcor_mat_zero, 2))
cat("--- \n")
cat("Adjacency:\n\n")
print(x$Adj_10)
cat("--- \n")
} else if (x$alternative == "greater"){
cat("H0: rho = 0\nH1: rho > 0", "\n")
cat("--- \n")
colnames(x$Adj_20) <- 1:p
row.names(x$Adj_20) <- 1:p
colnames( x$pcor_mat_zero) <- 1:p
row.names(x$pcor_mat_zero) <- 1:p
cat("Partial Correlations:\n\n")
print(round(x$pcor_mat_zero, 2))
cat("--- \n")
cat("Adjacency:\n\n")
print(x$Adj_20)
cat("--- \n")
} else if (x$alternative == "less"){
cat("H0: rho = 0\nH1: rho < 0", "\n")
cat("--- \n")
colnames(x$Adj_20) <- 1:p
row.names(x$Adj_20) <- 1:p
colnames( x$pcor_mat_zero) <- 1:p
row.names(x$pcor_mat_zero) <- 1:p
cat("Partial Correlations:\n\n")
print(round(x$pcor_mat_zero, 2))
cat("--- \n")
cat("Adjacency:\n\n")
print(x$Adj_20)
cat("--- \n")
} else {
cat("H0: rho = 0\nH1: rho > 0\nH2: rho < 0", "\n")
cat("--- \n")
cat("Summary:\n\n")
dat <- x$post_prob
dat$prob_zero <- round(dat$prob_zero, 3)
dat$prob_greater <- round(dat$prob_greater, 3)
dat$prob_less <- round(dat$prob_less, 3)
colnames(dat) <- c("Relation", "Pr.H0", "Pr.H1", "Pr.H2")
print(dat, row.names = FALSE, right = FALSE)
cat("--- \n")
}
}
#' @title Summary Method for \code{select.explore} Objects
#'
#' @name summary.select.explore
#'
#' @param object object of class \code{select.explore}.
#'
#' @param col_names Logical.
#'
#' @param ... Currently ignored.
#'
#' @examples
#' \donttest{
#' # data
#' Y <- bfi[,1:10]
#'
#' # fit model
#' fit <- explore(Y, iter = 250,
#' progress = FALSE)
#'
#' # edge set
#' E <- select(fit,
#' alternative = "exhaustive")
#'
#' summary(E)
#'
#' }
#' @return a data frame including the posterior mean, standard deviation,
#' and posterior hypothesis probabilities for each relation.
#' @export
summary.select.explore <- function(object,
col_names = TRUE,
...){
x <- object
p <- ncol(x$pcor_mat)
I_p <- diag(p)
# column names
cn <- colnames(object$object$Y)
if(!isTRUE(col_names) | is.null(cn)){
mat_names <- sapply(1:p , function(x) paste(1:p, x, sep = "--"))[upper.tri(I_p)]
} else {
mat_names <- sapply(cn , function(x) paste(cn, x, sep = "--"))[upper.tri(I_p)]
}
if(x$alternative == "two.sided"){
post_mean <- x$pcor_mat[upper.tri(x$pcor_mat)]
post_sd <- x$pcor_sd[upper.tri(x$pcor_sd)]
prob_H1 <- x$BF_10[upper.tri(x$BF_10)] / (x$BF_10[upper.tri(x$BF_10)] + 1)
prob_H0 <- 1 - prob_H1
summ <- data.frame(
Relation = mat_names,
Post.mean = post_mean,
Post.sd = post_sd,
Pr.H0 = round(prob_H0, 3),
Pr.H1 = round(prob_H1, 3)
)
} else if (x$alternative == "greater"){
post_mean <- x$pcor_mat[upper.tri(x$pcor_mat)]
post_sd <- x$pcor_sd[upper.tri(x$pcor_sd)]
prob_H1 <- x$BF_20[upper.tri(x$BF_20)] / (x$BF_20[upper.tri(x$BF_20)] + 1)
prob_H0 <- 1 - prob_H1
summ <- data.frame(
Relation = mat_names,
Post.mean = post_mean,
Post.sd = post_sd,
Pr.H0 = round(prob_H0, 3),
Pr.H1 = round(prob_H1, 3)
)
} else if (x$alternative == "less" | x$alternative == "greater"){
post_mean <- x$pcor_mat[upper.tri(x$pcor_mat)]
post_sd <- x$pcor_sd[upper.tri(x$pcor_sd)]
prob_H1 <- x$BF_20[upper.tri(x$BF_20)] / (x$BF_20[upper.tri(x$BF_20)] + 1)
prob_H0 <- 1 - prob_H1
summ <- data.frame(
Relation = mat_names[upper.tri(mat_names)],
Post.mean = post_mean,
Post.sd = post_sd,
Pr.H0 = round(prob_H0, 3),
Pr.H1 = round(prob_H1, 3)
)
} else {
summ <- cbind.data.frame( x$post_prob[,1],
x$pcor_mat[upper.tri(x$pcor_mat)],
x$pcor_sd[upper.tri(x$pcor_sd)],
round(x$post_prob[,2:4], 3))
colnames(summ) <- c("Relation",
"Post.mean",
"Post.sd",
"Pr.H0",
"Pr.H1",
"Pr.H2")
}
returned_object <- list(summary = summ, object = object)
class(returned_object) <- c("BGGM", "summary.select.explore",
"explore", "select.explore",
"summary")
returned_object
}
print_summary_select_explore <- function(x,...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Type:", x$object$type, "\n")
cat("Alternative:", x$object$alternative, "\n")
cat("--- \n")
cat("Call:\n")
print(x$object$call)
cat("--- \n")
cat("Hypotheses: \n")
if(x$object$alternative == "two.sided"){
cat("H0: rho = 0\nH1: rho != 0", "\n")
} else if (x$object$alternative == "greater"){
cat("H0: rho = 0\nH1: rho > 0", "\n")
} else if (x$object$alternative == "less"){
cat("H0: rho = 0\nH1: rho < 0", "\n")
} else {
cat("H0: rho = 0\nH1: rho > 0\nH2: rho < 0", "\n")
}
cat("--- \n\n")
print(x$summary, right = FALSE, row.names = FALSE)
}
#' @title Plot \code{summary.select.explore} Objects
#'
#' @name plot.summary.select.explore
#'
#' @description Visualize the posterior hypothesis probabilities.
#'
#' @param x An object of class \code{summary.select.explore}
#'
#' @param size Numeric. The size for the points (defaults to 2).
#'
#' @param color Character string. The Color for the points
#'
#' @param ... Currently ignored
#'
#' @return A \code{ggplot} object
#'
#' @examples
#' \donttest{
#' # data
#' Y <- bfi[,1:10]
#'
#' # fit model
#' fit <- explore(Y, iter = 250,
#' progress = FALSE)
#'
#' # edge set
#' E <- select(fit,
#' alternative = "exhaustive")
#'
#' plot(summary(E))
#'
#' }
#' @export
plot.summary.select.explore <- function(x,
size = 2,
color = "black",
...){
dat_temp <- x$summary[order(x$summary$Pr.H1,
decreasing = F), ]
dat_temp$Relation <-
factor(dat_temp$Relation,
levels = dat_temp$Relation,
labels = dat_temp$Relation)
ggplot(dat_temp,
aes(x = Relation,
y = Pr.H1)) +
geom_point(size = size, color = color) +
theme(axis.text.x = element_text(
angle = 90,
vjust = 0.5,
hjust = 1
))
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/select.explore.R
|
#' @title Graph selection for \code{ggm_compare_explore} Objects
#'
#' @description Provides the selected graph (of differences) based on the Bayes factor
#' \insertCite{williams2020comparing}{BGGM}.
#'
#' @param object An object of class \code{ggm_compare_explore}.
#'
#' @param BF_cut Numeric. Threshold for including an edge (defaults to 3).
#'
#' @param ... Currently ignored.
#'
#' @return The returned object of class \code{select.ggm_compare_explore} contains
#' a lot of information that is used for printing and plotting the results.
#' For users of \strong{BGGM}, the following are the useful objects:
#'
#'
#' \itemize{
#'
#' \item \code{adj_10} Adjacency matrix for which there was evidence for a difference.
#'
#' \item \code{adj_10} Adjacency matrix for which there was evidence for a null relation
#'
#' \item \code{pcor_mat_10} Selected partial correlation matrix (weighted adjacency; only for two groups).
#'
#' }
#'
#' @seealso \code{\link{explore}} and \code{\link{ggm_compare_explore}} for several examples.
#'
#' @examples
#' \donttest{
#'
#' ##################
#' ### example 1: ###
#' ##################
#' # data
#' Y <- bfi
#'
#' # males and females
#' Ymale <- subset(Y, gender == 1,
#' select = -c(gender,
#' education))[,1:10]
#'
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(gender,
#' education))[,1:10]
#'
#' # fit model
#' fit <- ggm_compare_explore(Ymale, Yfemale,
#' iter = 250,
#' type = "continuous",
#' progress = FALSE)
#'
#'
#' E <- select(fit, post_prob = 0.50)
#'
#' }
#'
#' @export
select.ggm_compare_explore <- function(object,
BF_cut = 3,
...){
# change to x
x <- object
if(is.null(BF_cut)){
BF_cut <- (post_prob) / (1 - post_prob)
} else {
BF_cut <- BF_cut
}
# post
post_prob <- BF_cut / (BF_cut + 1)
# BF
BF_10 <- 1 / x$BF_01
# BF mat diagonal
diag(BF_10) <- 0
adj_10 <- ifelse(BF_10 > BF_cut, 1, 0)
adj_01 <- ifelse(x$BF_01 > BF_cut, 1, 0)
BF_01_adj <- adj_01 * x$BF_01
BF_10_adj <- adj_10 * BF_10
pcor_mat <- matrix(0, x$p, x$p)
if(x$groups == 2){
pcor_mat <- adj_10 * x$pcor_diff
}
returned_object <- list(BF_10 = BF_10,
BF_01 = x$BF_01,
BF_01_adj = BF_01_adj,
BF_10_adj = BF_10_adj,
adj_10 = adj_10,
adj_01 = adj_01,
call = match.call(),
p = ncol(BF_10),
iter = x$iter,
info = x$info,
post_prob = post_prob,
BF = BF,
pcor_mat_10 = pcor_mat,
object = object)
class(returned_object) <- c("BGGM",
"explore",
"select",
"select.ggm_compare_bf")
returned_object
}
print_select_ggm_compare_bf <- function(x,...){
groups <- x$object$groups
p <- x$p
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Type:", x$object$type, "\n")
# number of iterations
cat("Posterior Samples:", x$object$iter, "\n")
# number of observations
cat("Observations (n):\n")
groups <- length(x$object$info$dat)
for(i in 1:groups){
cat(" Group", paste( i, ":", sep = "") , x$object$info$dat_info$n[[i]], "\n")
}
# number of variables
cat("Variables (p):", x$object$p, "\n")
# number of edges
cat("Relations:", .5 * (x$object$p * (x$object$p-1)), "\n")
cat("Delta:", x$object$delta, "\n")
cat("--- \n")
cat("Call: \n")
print(x$object$call)
cat("--- \n")
cat("Hypotheses:\n")
cat("H0:", paste0("rho_g", 1:groups, collapse = " = "), "\n")
cat("H1:", paste0("rho_g", 1:groups, collapse = " - "), " = 0\n")
cat("--- \n\n")
if(groups ==2){
cat("Partial Correlations:\n\n")
colnames(x$pcor_mat_10) <- 1:p
row.names(x$pcor_mat_10) <- 1:p
print(round(x$pcor_mat_10, 2))
cat("--- \n")
}
cat("Adjacency:\n\n")
colnames(x$adj_10) <- 1:p
row.names(x$adj_10) <- 1:p
print(round(x$adj_10, 2))
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/select.ggm_compare_bf.R
|
#' @title Graph Selection for \code{ggm_compare_estimate} Objects
#' @description Provides the selected graph (of differences) based on credible intervals for
#' the partial correlations that did not contain zero
#' \insertCite{Williams2019}{BGGM}.
#'
#' @name select.ggm_compare_estimate
#'
#' @param object An object of class \code{estimate.default}.
#'
#' @param cred Numeric. The credible interval width for selecting the graph
#' (defaults to 0.95; must be between 0 and 1).
#'
#' @param ... not currently used
#'
#' @return The returned object of class \code{select.ggm_compare_estimate} contains a lot of information that
#' is used for printing and plotting the results. For users of \strong{BGGM}, the following
#' are the useful objects:
#'
#'
#' \itemize{
#'
#' \item \code{mean_diff} A list of matrices for each group comparsion (partial correlation differences).
#'
#' \item \code{pcor_adj} A list of weighted adjacency matrices for each group comparsion.
#'
#' \item \code{adj} A list of adjacency matrices for each group comparsion.
#'
#' }
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#' ##################
#' ### example 1: ###
#' ##################
#' # data
#' Y <- bfi
#'
#' # males and females
#' Ymale <- subset(Y, gender == 1,
#' select = -c(gender,
#' education))
#'
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(gender,
#' education))
#'
#' # fit model
#' fit <- ggm_compare_estimate(Ymale, Yfemale,
#' type = "continuous",
#' iter = 250,
#' progress = FALSE)
#'
#'
#' E <- select(fit)
#'
#' }
#' @export
select.ggm_compare_estimate <- function(object,
cred = 0.95,
...) {
# rope removed, but will add to minor realeses
rope = NULL
# number of contrasts
contrasts <- nrow(object$info$pairwise)
if(is.null(rope)){
if(isFALSE(object$analytic)){
lb <- (1 - cred) / 2
ub <- 1 - lb
mean_diff <- lapply(1:contrasts, function(x){
apply(object$diff[[x]] , 1:2, mean)
})
adj <- lapply(1:contrasts, function(x){
ifelse(apply(object$diff[[x]], 1:2, quantile, lb) < 0 &
apply(object$diff[[x]], 1:2, quantile, ub) > 0, 0, 1)
})
pcor_adj <- lapply(1:contrasts, function(x){
mean_diff[[x]] * adj[[x]]
})
returned_object <- list(mean_diff = mean_diff,
pcor_adj = pcor_adj,
adj = adj,
call = match.call(),
object = object,
rope = rope,
cred = cred)
} else {
# analytic
critical <- abs(qnorm((1 - cred) / 2))
adj <- lapply(1:contrasts, function(x) {
ifelse(object$z_stat[[x]] > critical, 1, 0)
})
pcor_adj <- lapply(1:contrasts, function(x){
object$diff[[x]] * adj[[x]]
})
returned_object <- list(adj = adj,
pcor_adj = pcor_adj,
adj = adj,
call = match.call(),
object = object,
rope = rope,
cred = cred)
} # end analytic
} else { # for rope. future direction
stop("rope is not currently implemented.")
}
class(returned_object) <- c("BGGM",
"select.ggm_compare_estimate",
"estimate", "select")
return(returned_object)
}
print_select_ggm_compare_estimate <- function(x,...){
object <- x
comparisons <- length(object$pcor_adj)
p <- ncol(object$pcor_adj[[1]])
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Type:", object$object$type, "\n")
cat("Analytic:", object$object$analytic, "\n")
cat("Posterior Samples:", object$object$iter, "\n")
cat("Credible Interval:", gsub("*0.","", formatC( round(object$cred, 4), format='f', digits=2)), "% \n")
cat("--- \n")
cat("Call: \n")
print(object$object$call)
cat("--- \n")
cat("Selected:\n\n")
for(i in 1:comparisons){
cat(names(object$object$diff)[i], "\n")
mat <- object$pcor_adj[[i]]
colnames(mat) <- 1:p
row.names(mat) <- 1:p
print(round(mat, 3))
cat("--- \n\n")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/select.ggm_compare_estimate.R
|
#' VAR: Estimation
#'
#' @description Estimate VAR(1) models by efficiently sampling from the posterior distribution. This
#' provides two graphical structures: (1) a network of undirected relations (the GGM, controlling for the
#' lagged predictors) and (2) a network of directed relations (the lagged coefficients). Note that
#' in the graphical modeling literature, this model is also known as a time series chain graphical model
#' \insertCite{abegaz2013sparse}{BGGM}.
#'
#' @name var_estimate
#'
#' @param Y Matrix (or data frame) of dimensions \emph{n} (observations) by \emph{p} (variables).
#'
#' @param rho_sd Numeric. Scale of the prior distribution for the partial correlations,
#' approximately the standard deviation of a beta distribution
#' (defaults to 0.50).
#'
#' @param beta_sd Numeric. Standard deviation of the prior distribution for the regression coefficients
#' (defaults to 1). The prior is by default centered at zero and follows a normal distribution
#' \insertCite{@Equation 9, @sinay2014bayesian}{BGGM}
#'
#' @param iter Number of iterations (posterior samples; defaults to 5000).
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param seed An integer for the random seed (defaults to 1).
#'
#' @param ... Currently ignored.
#'
#' @details Each time series in \code{Y} is standardized (mean = 0; standard deviation = 1).
#'
#' @note
#' \strong{Regularization}:
#'
#' A Bayesian ridge regression can be fitted by decreasing \code{beta_sd}
#' (e.g., \code{beta_sd = 0.25}). This could be advantageous for forecasting
#' (out-of-sample prediction) in particular.
#'
#'
#' @references
#' \insertAllCited{}
#'
#' @return An object of class \code{var_estimate} containing a lot of information that is
#' used for printing and plotting the results. For users of \strong{BGGM}, the following are the
#' useful objects:
#'
#' \itemize{
#'
#' \item \code{beta_mu} A matrix including the regression coefficients (posterior mean).
#'
#' \item \code{pcor_mu} Partial correlation matrix (posterior mean).
#'
#' \item \code{fit} A list including the posterior samples.
#'
#' }
#'
#' @examples
#' \donttest{
#' # data
#' Y <- subset(ifit, id == 1)[,-1]
#'
#' # use alias (var_estimate also works)
#' fit <- var_estimate(Y, progress = FALSE)
#'
#' fit
#'
#' }
#' @export
var_estimate <- function(Y, rho_sd = 0.50,
beta_sd = 1,
iter = 5000,
progress = TRUE,
seed = 1,
...) {
# removed per CRAN (8/12/21)
#old <- .Random.seed
set.seed(seed)
Y <- scale(na.omit(Y))
# number of nodes
p <- ncol(Y)
# number of obs
n <- nrow(Y)
# Y lagged: add NA
Y_lag <- rbind(NA, Y)
# Y lagged: column names
colnames(Y_lag) <- paste0(colnames(Y), ".l1")
# combine all data
Y_all <- na.omit(cbind.data.frame(rbind(Y, NA), Y_lag))
# nodes in GGM
Y <- as.matrix(Y_all[,1:p])
# predictors (lagged effects)
X <- as.matrix(Y_all[,(p+1):(p*2)])
# delta: rho ~ beta(delta/2, delta/2)
delta <- delta_solve(rho_sd)
# prior variance
beta_var <- beta_sd^2
if(isTRUE(progress)){
message(paste0("BGGM: Posterior Sampling "))
}
fit <-.Call(
"_BGGM_var",
Y = as.matrix(Y),
X = as.matrix(X),
delta = delta,
epsilon = 0.001,
beta_prior = diag(p) * (1 / beta_var),
iter = iter + 50,
start = solve(cor(Y)),
progress = progress
)
if(isTRUE(progress)){
message("BGGM: Finished")
}
pcor_mu <- round(
apply(fit$pcors[,,51:(iter + 50)], 1:2, mean),
digits = 3)
beta_mu <- round(
apply(fit$beta[,,51:(iter + 50)], 1:2, mean),
digits = 3)
colnames(pcor_mu) <- colnames(Y)
rownames(pcor_mu) <- colnames(Y)
colnames(beta_mu) <- colnames(Y)
row.names(beta_mu) <- colnames(X)
returned_object <- list(fit = fit,
iter = iter,
beta_mu = beta_mu,
pcor_mu = pcor_mu,
p = p,
n = n,
Y = Y,
X = X,
call = match.call())
# removed per CRAN (8/12/21)
#.Random.seed <<- old
class(returned_object) <- c("BGGM",
"var_estimate",
"default")
return(returned_object)
}
print_var_estimate <- function(x, ...){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Vector Autoregressive Model (VAR) \n")
cat("--- \n")
# number of iterations
cat("Posterior Samples:", x$iter, "\n")
# number of observations
cat("Observations (n):", x$n,"\n")
# number of variables
cat("Nodes (p):", x$p, "\n")
# number of edges
cat("--- \n")
cat("Call: \n")
print(x$call)
cat("--- \n")
cat("Partial Correlations: \n\n")
print(x$pcor_mu)
cat("--- \n")
cat("Coefficients: \n\n")
print(x$beta_mu)
cat("--- \n")
cat("Date:", date(), "\n")
}
#' @title Summary Method for \code{var_estimate} Objects
#'
#' @name summary.var_estimate
#'
#' @description Summarize the posterior distribution of each partial correlation
#' and regression coefficient with the posterior mean, standard deviation, and
#' credible intervals.
#'
#' @param object An object of class \code{var_estimate}
#'
#' @param cred Numeric. The credible interval width for summarizing the posterior
#' distributions (defaults to 0.95; must be between 0 and 1).
#'
#' @param ... Currently ignored.
#'
#' @seealso \code{\link{var_estimate}}
#'
#' @return A dataframe containing the summarized posterior distributions,
#' including both the partial correlations and the regression coefficients.
#'
#' \itemize{
#'
#' \item \code{pcor_results} A data frame including the summarized partial correlations
#'
#' \item \code{beta_results} A list containing the summarized regression coefficients (one
#' data frame for each outcome)
#' }
#'
#' @examples
#' \donttest{
#' # data
#' Y <- subset(ifit, id == 1)[,-1]
#'
#' # fit model with alias (var_estimate also works)
#' fit <- var_estimate(Y, progress = FALSE)
#'
#' # summary ('pcor')
#' print(
#' summary(fit, cred = 0.95),
#' param = "pcor",
#' )
#'
#'
#' # summary ('beta')
#' print(
#' summary(fit, cred = 0.95),
#' param = "beta",
#' )
#'
#' }
#' @export
summary.var_estimate <- function(object,
cred = 0.95,
...){
# nodes
p <- object$p
# identity matrix
I_p <- diag(p)
# lower bound
lb <- (1 - cred) / 2
# upper bound
ub <- 1 - lb
# column names
cn <- colnames(object$Y)
if(is.null(cn)){
mat_names <- sapply(1:p , function(x) paste(1:p, x, sep = "--"))[upper.tri(I_p)]
} else {
mat_names <- sapply(cn , function(x) paste(cn, x, sep = "--"))[upper.tri(I_p)]
}
pcor_mean <- round(
apply(object$fit$pcors[, , 51:(object$iter + 50)], 1:2, mean),
3)[upper.tri(I_p)]
pcor_sd <- round(
apply(object$fit$pcors[,, 51:(object$iter + 50) ], 1:2, sd),
digits = 3)[upper.tri(I_p)]
pcor_lb <- round(
apply( object$fit$pcors[,, 51:(object$iter + 50) ], 1:2, quantile, lb),
digits = 3)[upper.tri(I_p)]
pcor_ub <- round(
apply(object$fit$pcors[,, 51:(object$iter + 50) ], 1:2, quantile, ub),
digits = 3)[upper.tri(I_p)]
beta_mean <- round(
apply(object$fit$beta[,, 51:(object$iter + 50) ], 1:2, mean),
digits = 3)
beta_sd <- round(
apply(object$fit$beta[,, 51:(object$iter + 50) ], 1:2, sd),
digits = 3)
beta_lb <- round(
apply( object$fit$beta[,, 51:(object$iter + 50) ], 1:2, quantile, lb),
digits = 3)
beta_ub <- round(
apply(object$fit$beta[,, 51:(object$iter + 50) ], 1:2, quantile, ub),
digits = 3)
pcor_results <-
data.frame(
relation = mat_names,
post_mean = pcor_mean,
post_sd = pcor_sd,
post_lb = pcor_lb,
post_ub = pcor_ub
)
colnames(pcor_results) <- c(
"Relation",
"Post.mean",
"Post.sd",
"Cred.lb",
"Cred.ub")
beta_results <-
lapply(1:p, function (x) {
res_p <- data.frame(
relation = colnames(object$X),
post_mean = beta_mean[, x],
post_sd = beta_sd[, x],
post_lb = beta_lb[, x],
post_ub = beta_ub[, x]
)
colnames(res_p) <- c("Relation",
"Post.mean",
"Post.sd",
"Cred.lb",
"Cred.ub")
res_p
})
names(beta_results) <- colnames(object$Y)
returned_object <- list(pcor_results = pcor_results,
beta_results = beta_results)
class(returned_object) <- c("BGGM",
"var_estimate",
"summary.var_estimate")
return(returned_object)
}
print_summary_var_estimate <- function(x, param = "all", ...){
p <- nrow(x$beta_results[[1]])
cn <- gsub("\\..*","" , x$beta_results[[1]]$Relation)
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Vector Autoregressive Model (VAR) \n")
cat("--- \n")
if(param == "all" | param == "pcor"){
cat("Partial Correlations: \n\n")
print(x$pcor_results, row.names = FALSE)
cat("--- \n\n")
}
if(param == "all" | param == "beta") {
cat("Coefficients: \n\n")
for (i in seq_len(p)) {
# print outcome
cat(paste0(cn[i], " \n\n"))
# coefs for node i
coef_i <- x$beta_results[[i]]
# predictor names
# colnames(coef_i) <- cn[-i]
# print coefs
print(coef_i, row.names = FALSE)
cat("---\n")
}
}
}
#' Plot \code{summary.var_estimate} Objects
#'
#' @description Visualize the posterior distributions of each partial correlation and
#' regression coefficient.
#'
#' @param x An object of class \code{summary.var_estimate}
#'
#' @param color Character string. The color for the error bars.
#' (defaults to \code{"black"}).
#'
#' @param size Numeric. The size for the points (defaults to \code{2}).
#'
#' @param width Numeric. The width of error bar ends (defaults to \code{0}).
#'
#' @param param Character string. Which parameters should be plotted ? The options
#' are \code{pcor}, \code{beta}, or \code{all} (default).
#'
#' @param order Logical. Should the relations be ordered by size (defaults to \code{TRUE}) ?
#'
#' @param ... Currently ignored
#'
#' @return A list of \code{ggplot} objects.
#'
#' @examples
#' \donttest{
#'
#' # data
#' Y <- subset(ifit, id == 1)[,-1]
#'
#' # fit model with alias (var_estimate also works)
#' fit <- var_estimate(Y, progress = FALSE)
#'
#' plts <- plot(summary(fit))
#' plts$pcor_plt
#' }
#'
#' @export
plot.summary.var_estimate <- function(x,
color = "black",
size = 2,
width = 0,
param = "all",
order = TRUE,
...){
if(param == "all" |
param == "pcor"){
dat_temp <- x$pcor_results
if(isTRUE(order)){
dat_temp <- dat_temp[order(dat_temp$Post.mean,
decreasing = FALSE), ]
}
dat_temp$Relation <-
factor(dat_temp$Relation,
levels = dat_temp$Relation,
labels = dat_temp$Relation)
pcor_plt <- ggplot(dat_temp,
aes(x = Relation,
y = Post.mean)) +
geom_errorbar(aes(ymax = dat_temp[, 4],
ymin = dat_temp[, 5]),
width = width,
color = color) +
geom_point(size = size) +
xlab("Index") +
theme(axis.text.x = element_text(
angle = 90,
vjust = 0.5,
hjust = 1
)) +
ggtitle("Partial Correlations")
if(param == "pcor"){
beta_plt <- NULL
}
}
if (param == "all" | param == "beta") {
cn <- names(x$beta_results)
p <- nrow(x$beta_results[[1]])
beta_plt <- lapply(1:p, function(i) {
dat_temp <- x$beta_results[[i]]
if(isTRUE(order)){
dat_temp <- dat_temp[order(dat_temp$Post.mean,
decreasing = FALSE),]
}
dat_temp$Relation <-
factor(dat_temp$Relation,
levels = dat_temp$Relation,
labels = dat_temp$Relation)
ggplot(dat_temp,
aes(x = Relation,
y = Post.mean)) +
geom_errorbar(aes(ymax = dat_temp[, 4],
ymin = dat_temp[, 5]),
width = width,
color = color) +
geom_point(size = size) +
xlab("Index") +
theme(axis.text.x = element_text(
angle = 90,
vjust = 0.5,
hjust = 1
)) +
ggtitle(paste0("Coefficients: ", cn[i]))
})
names(beta_plt) <- cn
if (param == "beta") {
pcor_plt <- NULL
}
}
list(pcor_plt = pcor_plt, beta_plt = beta_plt)
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/var_estimate.R
|
#' Extract the Weighted Adjacency Matrix
#'
#' @description Extract the weighted adjacency matrix (posterior mean) from
#' \code{\link{estimate}}, \code{\link{explore}}, \code{\link{ggm_compare_estimate}},
#' and \code{\link{ggm_compare_explore}} objects.
#'
#' @param object A model estimated with \strong{BGGM}. All classes are supported, assuming
#' there is matrix to be extracted.
#'
#' @param ... Currently ignored.
#'
#' @return The weighted adjacency matrix (partial correlation matrix with zeros).
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#' Y <- bfi[,1:5]
#'
#' # estimate
#' fit <- estimate(Y, iter = 250,
#' progress = FALSE)
#'
#' # select graph
#' E <- select(fit)
#'
#' # extract weighted adj matrix
#' weighted_adj_mat(E)
#'
#' }
#' @export
weighted_adj_mat <- function(object, ...){
if(all(c("select.estimate", "estimate") %in% class(object))){
weighted_adj_mat <- round(object$pcor_adj, 3)
weighted_adj_mat
} else if(all(c("select.estimate", "estimate") %in% class(object))){
weighted_adj_mat <- round(object$pcor_mat_zero, 3)
weighted_adj_mat
} else if(all(c("select.ggm_compare_estimate", "estimate") %in% class(object))){
contrasts <- length(object$pcor_adj)
weighted_adj_mat <- lapply(1:length(contrasts), function(x) round(object$pcor_adj[[x]], 3))
names(weighted_adj_mat) <- names(object$object$pcor_mats)
weighted_adj_mat
} else if(c("select.ggm_compare_bf") %in% class(object)){
if(object$object$groups > 2){
stop("weigthed adjacency only available for two groups")
}
weighted_adj_mat <- round(object$pcor_mat_10, 3)
weighted_adj_mat
} else {
stop("weighted adjacency matrix not found.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/weighted_adj_mat.R
|
#' @title Zero-Order Correlations
#'
#' @description Estimate zero-order correlations for any type of data. Note zero-order refers to the fact that
#' no variables are controlled for (i.e., bivariate correlations). To our knowledge, this is the only Bayesian
#' implementation in \code{R} that can estiamte Pearson's, tetrachoric (binary), polychoric
#' (ordinal with more than two cateogries), and rank based correlation coefficients.
#'
#' @name zero_order_cors
#'
#' @param Y Matrix (or data frame) of dimensions \emph{n} (observations) by \emph{p} (variables).
#' @param type Character string. Which type of data for \code{Y} ? The options include \code{continuous},
#' \code{binary}, \code{ordinal}, or \code{mixed}. See the note for further details.
#'
#' @param mixed_type Numeric vector. An indicator of length p for which varibles should be treated as ranks.
#' (1 for rank and 0 to assume normality). The default is currently to treat all integer variables as ranks
#' when \code{type = "mixed"} and \code{NULL} otherwise. See note for further details.
#'
#' @param iter Number of iterations (posterior samples; defaults to 5000).
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @return
#'
#' \itemize{
#'
#' \item \code{R} An array including the correlation matrices
#' (of dimensions \emph{p} by \emph{p} by \emph{iter})
#'
#' \item \code{R_mean} Posterior mean of the correlations (of dimensions \emph{p} by \emph{p})
#' }
#'
#' @details
#'
#' \strong{Mixed Type}:
#'
#' The term "mixed" is somewhat of a misnomer, because the method can be used for data including \emph{only}
#' continuous or \emph{only} discrete variables. This is based on the ranked likelihood which requires sampling
#' the ranks for each variable (i.e., the data is not merely transformed to ranks). This is computationally
#' expensive when there are many levels. For example, with continuous data, there are as many ranks
#' as data points!
#'
#' The option \code{mixed_type} allows the user to determine which variable should be treated as ranks
#' and the "emprical" distribution is used otherwise \insertCite{hoff2007extending}{BGGM}. This is
#' accomplished by specifying an indicator vector of length \emph{p}. A one indicates to use the ranks,
#' whereas a zero indicates to "ignore" that variable. By default all integer variables are treated as ranks.
#'
#' \strong{Dealing with Errors}:
#'
#' An error is most likely to arise when \code{type = "ordinal"}. The are two common errors (although still rare):
#'
#' \itemize{
#'
#' \item The first is due to sampling the thresholds, especially when the data is heavily skewed.
#' This can result in an ill-defined matrix. If this occurs, we recommend to first try
#' decreasing \code{prior_sd} (i.e., a more informative prior). If that does not work, then
#' change the data type to \code{type = mixed} which then estimates a copula GGM
#' (this method can be used for data containing \strong{only} ordinal variable). This should
#' work without a problem.
#'
#' \item The second is due to how the ordinal data are categorized. For example, if the error states
#' that the index is out of bounds, this indicates that the first category is a zero. This is not allowed, as
#' the first category must be one. This is addressed by adding one (e.g., \code{Y + 1}) to the data matrix.
#'
#' }
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' Y <- ptsd[,1:3]
#'
#' #################################
#' ####### example 1: Pearson's ####
#' #################################
#'
#' fit <- zero_order_cors(Y, type = "continuous",
#' iter = 250,
#' progress = FALSE)
#'
#'
#' #################################
#' ###### example 2: polychoric ####
#' #################################
#'
#' fit <- zero_order_cors(Y+1, type = "ordinal",
#' iter = 250,
#' progress = FALSE)
#'
#'
#' ###########################
#' ##### example 3: rank #####
#' ###########################
#'
#' fit <- zero_order_cors(Y+1, type = "mixed",
#' iter = 250,
#' progress = FALSE)
#'
#' ############################
#' ## example 4: tetrachoric ##
#' ############################
#'
#' # binary data
#' Y <- women_math[,1:3]
#'
#' fit <- zero_order_cors(Y, type = "binary",
#' iter = 250,
#' progress = FALSE)
#'
#' }
#' @export
zero_order_cors <- function(Y, type = "continuous",
iter = 5000,
mixed_type = NULL,
progress = TRUE){
fit <- estimate(Y,
type = type,
iter = iter,
mixed_type = mixed_type,
progress = progress)
cors <- pcor_to_cor(fit)
return(cors)
}
|
/scratch/gouwar.j/cran-all/cranData/BGGM/R/zero_order.R
|
---
title: "Controlling for Variables"
author: "Donny Williams"
date: "5/25/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Controlling for Variables}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Introduction
This vignette describes how to control for variables. This is a new feature to **BGGM** (version `2.0.0`).
# Example 1: Multivariate Regression
When controlling for variables, a multivariate regression is fitted in **BGGM**. In fact, a GGM can be understood as a multivariate regression with intercepts only models for the predictors.
## Notes about Implementation
**BGGM** does not use the typical approach for multivariate regression in `R`. This avoids having to
write out each outcome variable, of which there are typically many in a GGM. In **BGGM**, it is assumed
that the data matrix includes only the variables to be included in the GGM and the control variables.
### Correct
Suppose that we want to control for education level, with five variables included in the
GGM.
```r
# data
Y <- bfi[,c(1:5, 27)]
# head
head(Y)
#> A1 A2 A3 A4 A5 education
#> 61617 2 4 3 4 4 NA
#> 61618 2 4 5 2 5 NA
#> 61620 5 4 5 4 4 NA
#> 61621 4 4 6 5 5 NA
#> 61622 2 3 3 4 5 NA
#> 61623 6 6 5 6 5 3
```
Notice that `Y` includes **only** the five variables and `education`.
## Fit Model
This model can then be fitted with
```
fit <- explore(Y, formula = ~ as.factor(education))
```
To show this is indeed a multivariate regression, here are the summarized regression coefficients for the first
outcome.
```
summ_coef <- regression_summary(fit)
# outcome one
summ_coef$reg_summary[[1]]
#> Post.mean Post.sd Cred.lb Cred.ub
#> (Intercept) 0.256 0.095 0.072 0.442
#> as.factor(education)2 0.073 0.128 -0.177 0.323
#> as.factor(education)3 -0.202 0.104 -0.405 -0.001
#> as.factor(education)4 -0.462 0.119 -0.691 -0.233
#> as.factor(education)5 -0.578 0.117 -0.815 -0.346
```
And here are the coefficients from `lm` (a univariate regression for `A1`)
```
round(
cbind(
# summary: coef and se
summary( lm(scale(A1, scale = F) ~ as.factor(education), data = Y))$coefficients[,1:2],
# confidence interval
confint( lm(scale(A1, scale = F) ~ as.factor(education), data = Y))
), 3)
#> Estimate Std. Error 2.5 % 97.5 %
#> (Intercept) 0.256 0.093 0.073 0.438
#> as.factor(education)2 0.072 0.125 -0.172 0.316
#> as.factor(education)3 -0.203 0.101 -0.401 -0.004
#> as.factor(education)4 -0.461 0.116 -0.690 -0.233
#> as.factor(education)5 -0.578 0.115 -0.804 -0.351
```
The estimate are very (very) similar.
## Summary
Note that all the other functions work just the same. For example, the relations controlling for education
are summarized with
```
summary(fit)
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Type: continuous
#> Analytic: FALSE
#> Formula: ~ as.factor(education)
#> Posterior Samples: 5000
#> Observations (n):
#> Nodes (p): 5
#> Relations: 10
#> ---
#> Call:
#> estimate(Y = Y, formula = ~as.factor(education))
#> ---
#> Estimates:
#> Relation Post.mean Post.sd Cred.lb Cred.ub
#> A1--A2 -0.239 0.020 -0.278 -0.200
#> A1--A3 -0.109 0.020 -0.150 -0.070
#> A2--A3 0.276 0.019 0.239 0.312
#> A1--A4 -0.013 0.021 -0.055 0.026
#> A2--A4 0.156 0.020 0.117 0.196
#> A3--A4 0.173 0.020 0.134 0.214
#> A1--A5 -0.010 0.020 -0.050 0.029
#> A2--A5 0.150 0.020 0.111 0.189
#> A3--A5 0.358 0.018 0.322 0.392
#> A4--A5 0.121 0.020 0.082 0.159
#> ---
```
### Incorrect
Now if we wanted to control for education, but also had gender in `Y`, this would be incorrect
```
Y <- bfi[,c(1:5, 26:27)]
head(Y)
#> A1 A2 A3 A4 A5 gender education
#> 61617 2 4 3 4 4 1 NA
#> 61618 2 4 5 2 5 2 NA
#> 61620 5 4 5 4 4 2 NA
#> 61621 4 4 6 5 5 2 NA
#> 61622 2 3 3 4 5 1 NA
#> 61623 6 6 5 6 5 2 3
```
In this case, with `estimate(Y, formula = as.factor(education))`, the GGM would also include `gender`
(six variables instead of the desired 5). This is because all variables not included in `formula` are included in the GGM. This was adopted in **BGGM** to save the user from having to write out each outcome.
This differs from `lm`, where each outcome needs to be written out, for example `cbind(A1, A2, A3, A4, A4) ~ as.factor(education)`. This is quite cumbersome for a model that includes many nodes.
# Example 2: Multivariate Probit
The above data is ordinal. In this case, it is possible to fit a multivariate probit model. This is also the approach for binary data in **BGGM**. This is implemented with
```
fit <- estimate(Y, formula = ~ as.factor(education),
type = "ordinal", iter = 1000)
```
Note that the multivariate probit models can also be summarized with `regression_summary`.
# Example 3: Gaussian Copula Graphical Model
This final example fits a Gaussian copula graphical model that can be used for mixed data. In this case,
`formula` is not used and instead all of the variables are included in the GGM.
## Fit Model
This model is estimated with
```
# data
Y <- na.omit(bfi[,c(1:5, 27)])
# fit type = "mixed"
fit <- estimate(Y, type = "mixed", iter = 1000)
# summary
summary(fit)
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Type: mixed
#> Analytic: FALSE
#> Formula:
#> Posterior Samples: 1000
#> Observations (n):
#> Nodes (p): 6
#> Relations: 15
#> ---
#> Call:
#> estimate(Y = Y, type = "mixed", iter = 1000)
#> ---
#> Estimates:
#> Relation Post.mean Post.sd Cred.lb Cred.ub
#> A1--A2 -0.217 0.048 -0.294 -0.114
#> A1--A3 -0.063 0.027 -0.113 -0.011
#> A2--A3 0.364 0.023 0.317 0.410
#> A1--A4 0.116 0.038 0.048 0.192
#> A2--A4 0.241 0.031 0.182 0.303
#> A3--A4 0.228 0.026 0.174 0.275
#> A1--A5 0.057 0.031 0.003 0.120
#> A2--A5 0.186 0.027 0.135 0.241
#> A3--A5 0.438 0.019 0.399 0.474
#> A4--A5 0.151 0.025 0.103 0.199
#> A1--education -0.016 0.069 -0.125 0.119
#> A2--education 0.063 0.049 -0.016 0.162
#> A3--education 0.049 0.025 0.002 0.099
#> A4--education 0.053 0.026 0.005 0.105
#> A5--education 0.072 0.024 0.024 0.120
#> ---
```
Here it is clear that education is included in the model, as the relations with the other nodes are included in the output.
## Select Graph
The graph is selected with
```
select(fit)
```
# Note
It is possible to control for variable with all methods in **BGGM**, including when comparing groups, Bayesian hypothesis testing, etc.
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/control.Rmd
|
## ---- eval = FALSE, message=FALSE---------------------------------------------
# # need the developmental version
# if (!requireNamespace("remotes")) {
# install.packages("remotes")
# }
#
# # install from github
# remotes::install_github("donaldRwilliams/BGGM")
# library(BGGM)
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/hyp_3_ways.R
|
---
title: "Three Ways to Test the Same Hypothesis"
author: "Donny Williams"
date: "5/23/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Three Ways to Test the Same Hypothesis}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Introduction
On a Facebook methods group, there was a question about testing hypotheses in networks. In
the comments, it was suggested that **BGGM** could be used to test the hypothesis. And it turns
out that **BGGM** really shines for testing expectations [see for example @rodriguez2020formalizing].
In this vignette, I demonstrate three ways to go about testing the same hypothesis, which is
essentially testing for a difference in the **sum** of partial correlations between groups.
### R package
```{r, eval = FALSE, message=FALSE}
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
library(BGGM)
```
### Data
For demonstrative purposes, I use the `bfi` data and test the hypotheses in males and females
```
# data
Y <- bfi
# males
Y_males <- subset(Y, gender == 1, select = -c(education, gender))[,1:5]
# females
Y_females <- subset(Y, gender == 2, select = -c(education, gender))[,1:5]
```
# Approach 1: Posterior Difference
The first approach is rather straightforward, with the caveat that the method needs to be
implemented by the user. Note that I could certainly implement this in **BGGM**, assuming there
is enough interest. Please make a feature request [here](https://github.com/donaldRwilliams/BGGM/issues).
## Hypothesis
The hypothesis was that a sum of relations was larger in one group, for example,
$$
\begin{align}
\mathcal{H}_0: (\rho^{male}_{A1--A2}\; + \; \rho^{male}_{A1--A3}) = (\rho^{female}_{A1--A2}\; + \; \rho^{female}_{A1--A3}) \\
\mathcal{H}_1: (\rho^{male}_{A1--A2}\; + \; \rho^{male}_{A1--A3}) > (\rho^{female}_{A1--A2}\; + \; \rho^{female}_{A1--A3})
\end{align}
$$
Note that the hypothesis is related to the sum of relations, which is readily tested in **BGGM**.
## Fit Models
The first step is to estimate the model for each group
```r
# fit female
fit_female <- estimate(Y_females, seed = 2)
# fit males
fit_male <- estimate(Y_males, seed = 1)
```
For an example, I used the default which is to assume the data is Gaussian. This can be changed with `type = ` either `binary`, `ordinal`, or `mixed`.
## Extract the Samples
The next step is to extract the posterior samples for each relation
```r
post_male <- posterior_samples(fit_male)[,c("A1--A2", "A1--A3")]
post_female <- posterior_samples(fit_female)[,c("A1--A2", "A1--A3")]
```
Note that the column names reflect the upper-triangular elements of the
partial correlation matrix. Hence, the first name (e.g.,`A1`) must be located before
the second name (e.g., `A2`) in the data matrix. This can be understood in reference
to the column numbers: `1--2` is correct whereas `2--1` will result in an error.
## Sum and Compute Difference
The next step is to sum the relations and compute the difference
```r
# sum males
sum_male <- rowSums(post_male)
# sum females
sum_female <- rowSums(post_female)
# difference
diff <- sum_male - sum_female
```
which can then be plotted
```r
# three column
par(mfrow=c(1,3))
# male sum
hist(sum_male)
# female sum
hist(sum_female)
# difference
hist(diff)
```

## Posterior Probability
Next compute the posterior probability the sum is larger in males than females
```r
# posterior prob
mean(sum_male > sum_female)
#> 0.737
```
and then the credible interval for the difference
```
quantile(diff, probs = c(0.025, 0.975))
#> 2.5% 97.5%
#> -0.06498586 0.12481253
```
# Approach 2: Predictive Check
The next approach is based on a posterior predictive check. The hypothesis is essentially the same as above, but for the predictive distribution, that is,
$$
\begin{align}
\mathcal{H}_0: (\rho^{male^{yrep}}_{A1--A2}\; + \; \rho^{male^{yrep}}_{A1--A3}) = (\rho^{female^{yrep}}_{A1--A2}\; + \; \rho^{female^{yrep}}_{A1--A3}) \\
\mathcal{H}_1: (\rho^{male^{yrep}}_{A1--A2}\; + \; \rho^{male^{yrep}}_{A1--A3}) > (\rho^{female^{yrep}}_{A1--A2}\; + \; \rho^{female^{yrep}}_{A1--A3})
\end{align}
$$
where the only difference is $yrep$. See more details [here](https://donaldrwilliams.github.io/BGGM/articles/ppc_custom.html).
## Define Function
The first step is to define a function to compute the difference in sums
```r
# colnames
cn <- colnames(Y_males)
# function
f <- function(Yg1, Yg2){
# data
Yg1 <- na.omit(Yg1)
Yg2 <- na.omit(Yg2)
# estimate partials
fit1 <- pcor_mat(estimate(Yg1, analytic = TRUE))
fit2 <- pcor_mat(estimate(Yg2, analytic = TRUE))
# names (not needed)
colnames(fit1) <- cn
rownames(fit1) <- cn
colnames(fit2) <- cn
rownames(fit2) <- cn
# take sum
sum1 <- fit1["A1", "A2"] + fit1["A1", "A3"]
sum2 <- fit2["A1", "A2"] + fit2["A1", "A3"]
# difference
sum1 - sum2
}
```
Note that the function takes two data matrices and then returns a single value.
Also, the default in **BGGM** does not require a custom function
(only needs the data from each group).
## Predictive Check
The next step is to compute the observed difference and then perform the check.
```r
# observed
obs <- f(Y_males, Y_females)
# check
ppc <- ggm_compare_ppc(Y_males, Y_females,
iter = 250,
FUN = f,
custom_obs = obs)
# print
ppc
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Test: Global Predictive Check
#> Posterior Samples: 250
#> Group 1: 896
#> Group 2: 1813
#> Nodes: 5
#> Relations: 10
#> ---
#> Call:
#> ggm_compare_ppc(Y_males, Y_females, iter = 250, FUN = f, custom_obs = obs)
#> ---
#> Custom:
#>
#> contrast custom.obs p.value
#> Yg1 vs Yg2 0.029 0.264
#> ---
```
Note this requires the user to determine $\alpha$.
## Plot
The check can also be plotted
```r
plot(ppc)
```

where the red is the critical region.
# Approach 3: Bayesian Hypothesis Testing
The above approaches cannot provide evidence that the sum is equal. In other words, just because there was
not a difference, this does not provide evidence for equality. The Bayes factor methods allow for formally
assessing the equality model, that is,
$$
\begin{align}
\mathcal{H}_1&: (\rho^{male}_{A1--A2}\; + \; \rho^{male}_{A1--A3}) > (\rho^{female}_{A1--A2}\; + \; \rho^{female}_{A1--A3}) \\
\mathcal{H}_2&: (\rho^{male}_{A1--A2}\; + \; \rho^{male}_{A1--A3}) = (\rho^{female}_{A1--A2}\; + \; \rho^{female}_{A1--A3}) \\
\mathcal{H}_3&: \text{not} \; \mathcal{H}_1 \; \text{or} \; \mathcal{H}_2
\end{align}
$$
where $\mathcal{H}_3$ is the complement and can be understood as neither the first or second hypothesis.
## Test Hypothesis
The hypothesis is easily translated to `R` code
```r
hyp <- c("g1_A1--A2 + g1_A1--A3 > g2_A1--A2 + g2_A1--A3;
g1_A1--A2 + g1_A1--A3 = g2_A1--A2 + g2_A1--A3")
```
Note the `g1` indicates the group and `;` separates the hypotheses. I again assume the data is Gaussian
(although this can be changed to `type = "ordinal"` or `type = "mixed"`; see [here](https://donaldrwilliams.github.io/BGGM/reference/ggm_compare_confirm.html))
```r
test <- ggm_compare_confirm(Y_males, Y_females,
hypothesis = hyp)
# print
test
#> BGGM: Bayesian Gaussian Graphical Models
#> Type: continuous
#> ---
#> Posterior Samples: 25000
#> Group 1: 896
#> Group 2: 1813
#> Variables (p): 5
#> Relations: 10
#> Delta: 15
#> ---
#> Call:
#> ggm_compare_confirm(Y_males, Y_females, hypothesis = hyp)
#> ---
#> Hypotheses:
#>
#> H1: g1_A1--A2+g1_A1--A3>g2_A1--A2+g2_A1--A3
#> H2: g1_A1--A2+g1_A1--A3=g2_A1--A2+g2_A1--A3
#> H3: complement
#> ---
#> Posterior prob:
#>
#> p(H1|data) = 0.13
#> p(H2|data) = 0.825
#> p(H3|data) = 0.046
#> ---
#> Bayes factor matrix:
#> H1 H2 H3
#> H1 1.000 0.158 2.853
#> H2 6.349 1.000 18.113
#> H3 0.351 0.055 1.000
#> ---
#> note: equal hypothesis prior probabilities
```
Note the posterior hypothesis probability for the equality model is 0.825. The Bayes factor matrix then divides those values, for example, $BF_{21}$ indicates the data were about 6 times more likely under $\mathcal{H}_2$ than $\mathcal{H}_1$.
## Plot Hypothesis
The hypothesis can be plotted
```r
plot(test)
```

### Sensitivity Analysis
It is also important to check the robustness. Here the width of the prior distribution is decreased
```r
test <- ggm_compare_confirm(Y_males, Y_females,
hypothesis = hyp,
prior_sd = 0.15)
# print
test$out_hyp_prob
#> 0.18523406 0.74906147 0.06570447
```
which results in a probability of 0.75 for $\mathcal{H}_2$ ($BF_{21} = 4.04$).
# Conclusion
Three approaches for testing the same hypothesis were demonstrated in this vignette. This highlights that any hypothesis can be tested in **BGGM** and in several ways.
# References
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/hyp_3_ways.Rmd
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/in_tandem.R
|
---
title: "In Tandem: Confirmatory and Exploratory Testing"
author: "Donny Williams"
date: "5/23/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{In Tandem: Confirmatory and Exploratory Testing}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
The blog post, "Tutorial: Bayesian Testing of Central Structures in Psychological Networks," is hosted on a different website.
# <i class="fas fa-external-link-square-alt"></i> [External Link](https://josue.rbind.io/post/tutorial-bayesian-testing/)
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/in_tandem.Rmd
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/installation.R
|
---
title: "Troubleshoot"
author: "Donny Williams"
date: "5/20/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Troubleshoot}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
# OSX
## Error 1: Missing a Fortran Compiler
The most common error seems to be (or similar).
```
E> ld: warning: directory not found for option '-L/usr/local/gfortran/lib'
E> ld: library not found for -lgfortran
E> clang: error: linker command failed with exit code 1 (use -v to see invocation)
```
This indicates that the fortran compiler is missing. This can can be installed [here](https://github.com/fxcoudert/gfortran-for-macOS/releases/tag/8.2?fbclid=IwAR2SyHWB2BzFcY7bpHYW8VzNvlDsy4Gw1QxUMueXB3H0fXicCWoMbE7Ypac) (the `.dmg` file).
## Error 2: Outdated R and/or R-studio
Typically the above has solved the issue. If not, then an additional error could be
```
Error: (converted from warning) Setting LC_CTYPE failed, using "C"
```
This was solved by updating both R and R-studio. More information can be found [here](https://stackoverflow.com/questions/9689104/installing-r-on-mac-warning-messages-setting-lc-ctype-failed-using-c?fbclid=IwAR0DSaPeWOvRyfIsCx4Tjvz9-jZUh2ySXQIHnzqwbqL2_idfPlFF3j6mOe8).
## Error 3: Xcode missing
If that does not work, then perhaps `Xcode` is missing. This can be installed at the "Mac App Store".
## GitHub Issues
The following are links to issues on github for troubleshooting installation of **BGGM** on OSX.
* [https://github.com/donaldRwilliams/BGGM/issues/26](https://github.com/donaldRwilliams/BGGM/issues/26)(closed)
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/installation.Rmd
|
## ---- eval = FALSE, message=FALSE---------------------------------------------
# # need the developmental version
# if (!requireNamespace("remotes")) {
# install.packages("remotes")
# }
#
# # install from github
# remotes::install_github("donaldRwilliams/BGGM")
# library(BGGM)
## ---- echo=FALSE, message=FALSE-----------------------------------------------
library(BGGM)
## ---- eval=FALSE--------------------------------------------------------------
# # data
# Y <- ptsd[,1:10]
#
# # fit model
# # + 1 makes first category a 1
# fit <- estimate(Y + 1, type = "ordinal")
## ---- eval=FALSE--------------------------------------------------------------
# convergence(fit, print_names = TRUE)
#
# #> [1] "B1--B2" "B1--B3" "B2--B3" "B1--B4" "B2--B4" "B3--B4" "B1--B5"
# #> [8] "B2--B5" "B3--B5" "B4--B5" "B1--C1" "B2--C1" "B3--C1" "B4--C1"
# #> [15] "B5--C1" "B1--C2" "B2--C2" "B3--C2" "B4--C2" "B5--C2" "C1--C2"
# #> [22] "B1--D1" "B2--D1" "B3--D1" "B4--D1" "B5--D1" "C1--D1" "C2--D1"
# #> [29] "B1--D2" "B2--D2" "B3--D2" "B4--D2" "B5--D2" "C1--D2" "C2--D2"
# #> [36] "D1--D2" "B1--D3" "B2--D3" "B3--D3" "B4--D3" "B5--D3" "C1--D3"
# #> [43] "C2--D3" "D1--D3" "D2--D3" "B1_(Intercept)" "B2_(Intercept)" "B3_(Intercept)" "B4_(Intercept)"
# #> [50] "B5_(Intercept)" "C1_(Intercept)" "C2_(Intercept)" "D1_(Intercept)" "D2_(Intercept)" "D3_(Intercept)"
## ---- eval=FALSE--------------------------------------------------------------
# convergence(fit, param = "B1--B2", type = "acf")
## ---- eval=FALSE--------------------------------------------------------------
# # sim time series
# ts.sim <- arima.sim(list(order = c(1,1,0), ar = 0.7), n = 200)
#
# acf(ts.sim)
## ---- eval=FALSE--------------------------------------------------------------
# # extract samples
# samps <- fit$post_samp$pcors
#
# # iterations
# iter <- fit$iter
#
# # thinning interval
# thin <- 5
#
# # save every 5th (add 50 which is the burnin)
# new_iter <- length(seq(1,to = iter + 50 , by = thin))
#
# # replace (add 50 which is the burnin)
# fit$post_samp$pcors <- samps[,,seq(1,to = iter + 50, by = thin)]
#
# # replace iter
# fit$iter <- new_iter - 50
#
# # check thinned
# convergence(fit, param = "B1--B2", type = "acf")
## ---- eval=FALSE--------------------------------------------------------------
# convergence(fit, param = "B1--B2", type = "trace")
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/mcmc_diagnostics.R
|
---
title: "MCMC Diagnostics"
author: "Donny Williams"
date: "5/20/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{MCMC Diagnostics}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Introduction
The algorithms in **BGGM** are based on Gibbs samplers. In the context of
covariance matrix estimation, as opposed, to, say, hierarchical models,
this allows for efficiently sampling the posterior distribution. Furthermore, in all samplers
the empirical covariance matrix is used as the starting value which reduces
the length of the burn-in (or warm-up). Still yet it is important to monitor convergence. See [here](https://sbfnk.github.io/mfiidd/mcmc_diagnostics.html) for an introduction to MCMC diagnostics.
### R packages
```{r, eval = FALSE, message=FALSE}
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
library(BGGM)
```
```{r, echo=FALSE, message=FALSE}
library(BGGM)
```
# ACF plot
This first example includes an "acf" plot that looks at the auto correlation of the samples. In general,
we do not want the samples to be strongly correlated or related to the previous samples (or lags $k$).
I am not sure there are general guidelines, but typically we do not want "auto correlation...for higher values of k, [because] this indicates a high degree of correlation between our samples and slow mixing " [source](https://sbfnk.github.io/mfiidd/mcmc_diagnostics.html)
Here is an example for ordinal data.
```{r, eval=FALSE}
# data
Y <- ptsd[,1:10]
# fit model
# + 1 makes first category a 1
fit <- estimate(Y + 1, type = "ordinal")
```
To check the convergence of a partial correlation, we need the parameter name. These are printed as follows
```{r, eval=FALSE}
convergence(fit, print_names = TRUE)
#> [1] "B1--B2" "B1--B3" "B2--B3" "B1--B4" "B2--B4" "B3--B4" "B1--B5"
#> [8] "B2--B5" "B3--B5" "B4--B5" "B1--C1" "B2--C1" "B3--C1" "B4--C1"
#> [15] "B5--C1" "B1--C2" "B2--C2" "B3--C2" "B4--C2" "B5--C2" "C1--C2"
#> [22] "B1--D1" "B2--D1" "B3--D1" "B4--D1" "B5--D1" "C1--D1" "C2--D1"
#> [29] "B1--D2" "B2--D2" "B3--D2" "B4--D2" "B5--D2" "C1--D2" "C2--D2"
#> [36] "D1--D2" "B1--D3" "B2--D3" "B3--D3" "B4--D3" "B5--D3" "C1--D3"
#> [43] "C2--D3" "D1--D3" "D2--D3" "B1_(Intercept)" "B2_(Intercept)" "B3_(Intercept)" "B4_(Intercept)"
#> [50] "B5_(Intercept)" "C1_(Intercept)" "C2_(Intercept)" "D1_(Intercept)" "D2_(Intercept)" "D3_(Intercept)"
```
Note the `(Intercept)` which reflect the fact that the ordinal approach is a multivariate probit model with only intercepts.
The next step is to make the plot
```{r, eval=FALSE}
convergence(fit, param = "B1--B2", type = "acf")
```

The argument `param` can take any number of parameters and a plot will be made for each (e.g.., `param = c("B1--B2", B1--B3)`). In this case, the auto correlations looks acceptable and actually really good (note the drop to zero). A problematic `acf` plot would have the black lines start at `1.0`
and perhaps never go below `0.20`.
To make this clear, I simulated time series data taking the code from [here](https://stat.ethz.ch/R-manual/R-devel/library/stats/html/arima.sim.html)
```{r, eval=FALSE}
# sim time series
ts.sim <- arima.sim(list(order = c(1,1,0), ar = 0.7), n = 200)
acf(ts.sim)
```

This would be considered problematic. If this occurs, one solution could be to thin the samples manually
```{r, eval=FALSE}
# extract samples
samps <- fit$post_samp$pcors
# iterations
iter <- fit$iter
# thinning interval
thin <- 5
# save every 5th (add 50 which is the burnin)
new_iter <- length(seq(1,to = iter + 50 , by = thin))
# replace (add 50 which is the burnin)
fit$post_samp$pcors <- samps[,,seq(1,to = iter + 50, by = thin)]
# replace iter
fit$iter <- new_iter - 50
# check thinned
convergence(fit, param = "B1--B2", type = "acf")
```
or perhaps just running the model for more iterations (e.g., increasing `iter` in `estimate`). The above is quite convoluted but note convergence should not typically be an issue. And it might come in handy to know that the samples can be replaced and the other functions
in **BGGM** will still work with the object `fit`.
# Trace plot
The next example is a trace plot. Here we are looking for good "mixing".
```{r, eval=FALSE}
convergence(fit, param = "B1--B2", type = "trace")
```

Admittedly the term "mixing" is vague. But in general the plot should look like this example,
where there is no place that the chain is "stuck". See [here](https://stats.stackexchange.com/questions/311151/evaluation-of-mcmc-samples) for
problematic trace plots.
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/mcmc_diagnostics.Rmd
|
## ---- eval = FALSE, message=FALSE---------------------------------------------
# # need the developmental version
# if (!requireNamespace("remotes")) {
# install.packages("remotes")
# }
#
# # install from github
# remotes::install_github("donaldRwilliams/BGGM")
# library(BGGM)
# library(cowplot)
## ---- echo=FALSE, message=FALSE-----------------------------------------------
library(BGGM)
## ---- eval=FALSE--------------------------------------------------------------
# # data
# Y <- bfi[,1:25]
#
# # fit model
# fit <- estimate(Y)
## ---- eval=FALSE--------------------------------------------------------------
# # select the edge set
# E <- select(fit,
# cred = 0.95,
# alternative = "two.sided")
## ---- eval=FALSE--------------------------------------------------------------
# plot(E)
## ---- eval=FALSE--------------------------------------------------------------
# plot(E,
# # enlarge edges
# edge_magnify = 5,
# # cluster nodes
# groups = comm,
# # change layout
# layout = "random")$plt +
# # add custom labels
# scale_color_brewer(breaks = c("A",
# "C",
# "E",
# "N",
# "O"),
# labels = c("Agreeableness", "Conscientiousness",
# "Extraversion", "Neuroticism",
# "Opennness"),
# palette = "Set2")
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/netplot.R
|
---
title: "Network Plots"
author: "Donny Williams"
date: "5/20/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Network Plots}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Introduction
This vignette shows how to make network plots.
### R packages
```{r, eval = FALSE, message=FALSE}
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
library(BGGM)
library(cowplot)
```
```{r, echo=FALSE, message=FALSE}
library(BGGM)
```
# Estimate
For the estimate methods, it is currently only possible detect non-zero relations and
the others are set to zero (no connection in the graph). In a future release, it will be possible
to define a region of equivalence to directly assess null values. Hence, it is important to note those nodes
not connected are not necessarily conditionally independent (absence of evidence is not evidence of absence).
## Fit Model
In this example, I use the `bfi` data which consists of 25 variables measureing different aspects of personality.
```{r, eval=FALSE}
# data
Y <- bfi[,1:25]
# fit model
fit <- estimate(Y)
```
## Select Graph
The next step is to selec the graph or those relations for which the credible excludes zero
```{r, eval=FALSE}
# select the edge set
E <- select(fit,
cred = 0.95,
alternative = "two.sided")
```
`alternative` can be changed to, say, `"greater"` which would then perform a one-sided hypothesis
test for postive relations. This is ideal for many applications in psychology, because often
**all** relations are expected to be positive.
## Plot Graph
Here is the basic plot. This works for any object from `select` (e.g., comparing groups).
```{r, eval=FALSE}
plot(E)
```

### Customize Plot
The above is `ggplot` that can be futher honed in. Here is an example.
```r
# extract communities
comm <- substring(colnames(Y), 1, 1)
plot(E,
# enlarge edges
edge_magnify = 5,
# cluster nodes
groups = comm,
# change layout
layout = "circle")$plt +
# add custom labels
scale_color_brewer(breaks = c("A",
"C",
"E",
"N",
"O"),
labels = c("Agreeableness", "Conscientiousness",
"Extraversion", "Neuroticism",
"Opennness"),
palette = "Set2")
```

The `edge_magnify` is a value that is multiplied by the edges, `groups` allows for grouping the
variables (e.g., those thought to belong to the same "community" will be the same color), and the
`scale_color_brewer` is from the package `ggplot2` (`pallete` controls the color of the `groups`).
By default the edge colors are from a color blind palette. This can be changed in `plot` with
the arguments `pos_col` (the color for positive edges) and `pos_neg` (the color for negative edges).
This is just scratching the surface of possibilities, as essentially any change
can be made to the plot. There is lots of support for making nice plots readily available
online.
#### Layout
It is also possible to change the layout. This is done with the **sna** package, which is linked in the documentation for `plot.select` in **BGGM**. Here is an example using `layout = "random"`
```{r, eval=FALSE}
plot(E,
# enlarge edges
edge_magnify = 5,
# cluster nodes
groups = comm,
# change layout
layout = "random")$plt +
# add custom labels
scale_color_brewer(breaks = c("A",
"C",
"E",
"N",
"O"),
labels = c("Agreeableness", "Conscientiousness",
"Extraversion", "Neuroticism",
"Opennness"),
palette = "Set2")
```

# Bayesian Hypothesis Testing
The Bayesian hypothesis testing methods offer several advantages, for example, that
evidence for the null hypothesis of conditional independence is formally evaluated.
As a result, the `explore` method in **BGGM** provides plots for both the conditional
dependence and independence structure, in addition to a plot for which the evidence was
ambiguous.
To highlight this advantage, `ptsd` data is used that has a relatively small sample size.
```r
# fit model
fit <- explore(Y)
E <- select(fit, BF_cut = 3)
```
Then plot the results. Note that there are three plots, so the package **cowplot** is used
to combine them into one plot.
```r
plts <- plot(E,
edge_magnify = 5,
groups = comm)
plot_grid(
plts$H1_plt +
ggtitle("Conditional Dependence") +
theme(legend.position = "none"),
plts$H0_plt +
ggtitle("Conditional Independence") +
theme(legend.position = "none"),
plts$ambiguous_plt +
ggtitle("Ambiguous"),
nrow = 1,
rel_widths = c(1, 1, 1.1)
)
```

As can be seen, there is not evidence for conditional independence for any of the relations. And
the ambiguous network makes clear there is large uncertainty as to what or what might not be the "true" network structure. This basic idea of having three adjacency matrices was proposed in @Williams2019_bf.
# Note
**BGGM** provides a publication ready plot, but it is also limited compared to **qgraph**
[@epskamp2012qgraph]. The one advantage of **BGGM** is that all plots are `ggplots`
which then allows for combining them rather easily. An example is included in another
vignette that shows how to combine several plots made with various methods in **BGGM**
# References
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/netplot.Rmd
|
## ---- eval = FALSE------------------------------------------------------------
# # need the developmental version
# if (!requireNamespace("remotes")) {
# install.packages("remotes")
# }
#
# # install from github
# remotes::install_github("donaldRwilliams/BGGM")
## ---- warning =FALSE, message=FALSE-------------------------------------------
# need these packages
library(BGGM)
library(ggplot2)
library(assortnet)
library(networktools)
# data
Y <- ptsd[,1:7]
## ---- message=FALSE, warning=FALSE, eval=FALSE--------------------------------
# library(BGGM)
#
# # copula ggm
# fit <- estimate(Y, type = "mixed", iter = 1000)
## -----------------------------------------------------------------------------
# define function
f <- function(x,...){
networktools::expectedInf(x,...)$step1
}
## ---- eval = FALSE, message=FALSE, results='hide'-----------------------------
# # iter = 250 for demonstrative purposes
# # (but note even 1000 iters takes less than 1 second)
# # compute
# net_stat <- roll_your_own(object = fit,
# FUN = f,
# select = FALSE,
# iter = 250)
# # print
# net_stat
#
# #> BGGM: Bayesian Gaussian Graphical Models
# #> ---
# #> Network Stats: Roll Your Own
# #> Posterior Samples: 250
# #> ---
# #> Estimates:
# #>
# #> Node Post.mean Post.sd Cred.lb Cred.ub
# #> 1 0.701 0.099 0.508 0.871
# #> 2 0.912 0.113 0.722 1.179
# #> 3 0.985 0.112 0.742 1.199
# #> 4 1.056 0.105 0.851 1.247
# #> 5 1.056 0.116 0.862 1.288
# #> 6 0.491 0.092 0.329 0.679
# #> 7 0.698 0.098 0.521 0.878
# #> ---
## ---- eval = FALSE, results='hide'--------------------------------------------
# net_stat <- roll_your_own(object = fit,
# FUN = f,
# select = TRUE,
# iter = 250)
#
# # print
# net_stat
#
# #> BGGM: Bayesian Gaussian Graphical Models
# #> ---
# #> Network Stats: Roll Your Own
# #> Posterior Samples: 250
# #> ---
# #> Estimates:
# #>
# #> Node Post.mean Post.sd Cred.lb Cred.ub
# #> 1 0.636 0.136 0.386 0.874
# #> 2 0.792 0.113 0.580 0.996
# #> 3 0.777 0.122 0.544 1.001
# #> 4 0.910 0.121 0.667 1.143
# #> 5 0.525 0.104 0.331 0.727
# #> 6 0.484 0.110 0.270 0.686
# #> 7 0.247 0.081 0.088 0.412
# #> ---
## ---- message=FALSE, eval=FALSE-----------------------------------------------
# plot(net_stat)
## ---- eval = FALSE, message=FALSE, results='hide'-----------------------------
# # clusters
# communities <- substring(colnames(Y), 1, 1)
#
# # function is slow
# f <- function(x, ...){
# networktools::bridge(x, ...)$`Bridge Strength`
# }
#
#
# # compute
# net_stat <- roll_your_own(object = fit,
# FUN = f,
# communities = communities,
# iter = 250)
#
# # print
# net_stat
#
# #> BGGM: Bayesian Gaussian Graphical Models
# #> ---
# #> Network Stats: Roll Your Own
# #> Posterior Samples: 250
# #> ---
# #> Estimates:
# #>
# #> Node Post.mean Post.sd Cred.lb Cred.ub
# #> 1 0.162 0.082 0.035 0.347
# #> 2 0.250 0.113 0.061 0.501
# #> 3 0.180 0.104 0.049 0.480
# #> 4 0.280 0.098 0.090 0.480
# #> 5 0.375 0.093 0.196 0.558
# #> 6 0.617 0.166 0.339 1.002
# #> 7 0.628 0.166 0.400 1.025
# #> ---
## ---- message = FALSE, eval=FALSE---------------------------------------------
# plot(net_stat,
# fill = "lightblue") +
# ggtitle("Bridge Strength") +
# xlab("Score")
## ---- eval = FALSE, message=FALSE, results='hide'-----------------------------
# # clusters
# communities <- substring(colnames(Y), 1, 1)
#
# # define function
# f <- function(x,...){
# assortnet::assortment.discrete(x, ...)$r
# }
#
# net_stat <- roll_your_own(object = fit,
# FUN = f,
# types = communities,
# weighted = TRUE,
# SE = FALSE, M = 1,
# iter = 250)
#
# # print
# net_stat
#
# #> BGGM: Bayesian Gaussian Graphical Models
# #> ---
# #> Network Stats: Roll Your Own
# #> Posterior Samples: 250
# #> ---
# #> Estimates:
# #>
# #> Post.mean Post.sd Cred.lb Cred.ub
# #> 0.261 0.124 -0.01 0.469
# #> ---
## ---- eval=FALSE--------------------------------------------------------------
# hist(net_stat$results, main = "Assortment")
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/netstat_custom.R
|
---
title: "Custom Network Statistics"
author: "Donny Williams"
date: "5/19/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Custom Network Statistics}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Background
This vignette describes a new feature to **BGGM** (`2.0.0`) that allows for
computing custom network statistics (e.g., centrality). The new function is
called `roll_your_own` and it was suggested by a user of **BGGM** ([see feature request here](https://github.com/donaldRwilliams/BGGM/issues/12)).
# Basic Idea
The basic idea is to compute the chosen network statistic for each of the sampled partial
correlation matrices, resulting in a distribution. All that is required is to define a function
that takes either a partial correlation matrix or a weighted adjacency matrix
(the partial correlation matrix with values set to zero) as the first argument.
Several examples are provided below.
### R packages
```{r, eval = FALSE}
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
```
### Data
In all examples, a subset of `ptsd` data is used. The subset includes two of the "communities" of
symptoms [details for these data can be found in @armour2017network]. The data are ordinal (5-level Likert).
```{r, warning =FALSE, message=FALSE}
# need these packages
library(BGGM)
library(ggplot2)
library(assortnet)
library(networktools)
# data
Y <- ptsd[,1:7]
```
### Fit Model
For these data, the GGM is estimated with a semi-parametric copula [@hoff2007extending].
In **BGGM**, this implemented with `type = mixed` which is kind of a misnomer because the data do not
have to be "mixed" (consisting of continuous and discrete variables).
Note that the model is fitted only once which highlights that only the posterior samples
are needed to compute any network statistic.
```{r, message=FALSE, warning=FALSE, eval=FALSE}
library(BGGM)
# copula ggm
fit <- estimate(Y, type = "mixed", iter = 1000)
```
# Examples
## Expected Influence
The first example computes expected influence [@robinaugh2016identifying]. The first step is to define a function
```{r}
# define function
f <- function(x,...){
networktools::expectedInf(x,...)$step1
}
```
Note that `x` takes the matrix which is then passed to `expectedInf`. The `...` allows for
passing additional arguments to the `expectedInf` function. An example is provided below.
With the function defined, the next step is to compute the network statistic.
```{r, eval = FALSE, message=FALSE, results='hide'}
# iter = 250 for demonstrative purposes
# (but note even 1000 iters takes less than 1 second)
# compute
net_stat <- roll_your_own(object = fit,
FUN = f,
select = FALSE,
iter = 250)
# print
net_stat
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Network Stats: Roll Your Own
#> Posterior Samples: 250
#> ---
#> Estimates:
#>
#> Node Post.mean Post.sd Cred.lb Cred.ub
#> 1 0.701 0.099 0.508 0.871
#> 2 0.912 0.113 0.722 1.179
#> 3 0.985 0.112 0.742 1.199
#> 4 1.056 0.105 0.851 1.247
#> 5 1.056 0.116 0.862 1.288
#> 6 0.491 0.092 0.329 0.679
#> 7 0.698 0.098 0.521 0.878
#> ---
```
The option `select = FALSE` indicates to compute the statistics from the partial correlation matrices (nothing set to zero). This can be changed with `select = TRUE`. Internally, each of the sampled
partial correlation matrices is multiplied by the adjacency matrix.
```{r, eval = FALSE, results='hide'}
net_stat <- roll_your_own(object = fit,
FUN = f,
select = TRUE,
iter = 250)
# print
net_stat
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Network Stats: Roll Your Own
#> Posterior Samples: 250
#> ---
#> Estimates:
#>
#> Node Post.mean Post.sd Cred.lb Cred.ub
#> 1 0.636 0.136 0.386 0.874
#> 2 0.792 0.113 0.580 0.996
#> 3 0.777 0.122 0.544 1.001
#> 4 0.910 0.121 0.667 1.143
#> 5 0.525 0.104 0.331 0.727
#> 6 0.484 0.110 0.270 0.686
#> 7 0.247 0.081 0.088 0.412
#> ---
```
The results are then plotted with
```{r, message=FALSE, eval=FALSE}
plot(net_stat)
```

## Bridge Strength
The next example computes bridge strength [@jones2019bridge]. This requires the user to define clusters or "communities".
```{r, eval = FALSE, message=FALSE, results='hide'}
# clusters
communities <- substring(colnames(Y), 1, 1)
# function is slow
f <- function(x, ...){
networktools::bridge(x, ...)$`Bridge Strength`
}
# compute
net_stat <- roll_your_own(object = fit,
FUN = f,
communities = communities,
iter = 250)
# print
net_stat
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Network Stats: Roll Your Own
#> Posterior Samples: 250
#> ---
#> Estimates:
#>
#> Node Post.mean Post.sd Cred.lb Cred.ub
#> 1 0.162 0.082 0.035 0.347
#> 2 0.250 0.113 0.061 0.501
#> 3 0.180 0.104 0.049 0.480
#> 4 0.280 0.098 0.090 0.480
#> 5 0.375 0.093 0.196 0.558
#> 6 0.617 0.166 0.339 1.002
#> 7 0.628 0.166 0.400 1.025
#> ---
```
Notice `communities`. This is passed to `...` in the function `f`, which, in turn, is passed to the function `bridge`. Any number of arguments can be passed this way. Here are the results
This can then be plotted and further customized (the returned object is a `ggplot`)
```{r, message = FALSE, eval=FALSE}
plot(net_stat,
fill = "lightblue") +
ggtitle("Bridge Strength") +
xlab("Score")
```

## Assortment
The next example computes assortment [@newman2003mixing].
```{r, eval = FALSE, message=FALSE, results='hide'}
# clusters
communities <- substring(colnames(Y), 1, 1)
# define function
f <- function(x,...){
assortnet::assortment.discrete(x, ...)$r
}
net_stat <- roll_your_own(object = fit,
FUN = f,
types = communities,
weighted = TRUE,
SE = FALSE, M = 1,
iter = 250)
# print
net_stat
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Network Stats: Roll Your Own
#> Posterior Samples: 250
#> ---
#> Estimates:
#>
#> Post.mean Post.sd Cred.lb Cred.ub
#> 0.261 0.124 -0.01 0.469
#> ---
```
This example demonstrate that `...` can take several arguments. The results are stored in the `net_stat` object. They can be accessed with
```{r, eval=FALSE}
hist(net_stat$results, main = "Assortment")
```

# Note
The function `roll_your_own` is expecting the custom function to return either a single number or a number for each node. This ensures all the printing and plotting functions work. However, you could return anything you want and then access the results to plot, summarize, etc.
# References
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/netstat_custom.Rmd
|
## ---- eval = FALSE------------------------------------------------------------
# # need the developmental version
# if (!requireNamespace("remotes")) {
# install.packages("remotes")
# }
#
# # install from github
# remotes::install_github("donaldRwilliams/BGGM")
#
## ---- warning =FALSE, message=FALSE-------------------------------------------
# need these packages
library(BGGM)
library(ggplot2)
library(assortnet)
library(networktools)
library(MASS)
# group 1
Yg1 <- MASS::mvrnorm(n = 926,
mu = rep(0, 16),
Sigma = ptsd_cor3,
empirical = TRUE)
# group 2
Yg2 <- MASS::mvrnorm(n = 956,
mu = rep(0, 16),
Sigma = ptsd_cor4,
empirical = TRUE)
## -----------------------------------------------------------------------------
f <- function(Yg1, Yg2){
# number of nodes
p <- ncol(Yg1)
# index of off-diagonal
indices <- upper.tri( diag(p))
# group 1:
# fit model
g1_fit <- estimate(Yg1, analytic = TRUE)
# pcors
g1_pcors <- pcor_mat(g1_fit)[indices]
# group 2
# fit model
g2_fit <- estimate(Yg2, analytic = TRUE)
# pcors
g2_pcors <- pcor_mat(g2_fit)[indices]
# test-statistic
cor(g1_pcors, g2_pcors)
}
## -----------------------------------------------------------------------------
obs <- f(Yg1, Yg2)
# observed
obs
## ---- message=FALSE, results='hide'-------------------------------------------
ppc <- BGGM::ggm_compare_ppc(Yg1, Yg2,
FUN = f,
custom_obs = obs,
iter = 1000,
loss = FALSE)
## -----------------------------------------------------------------------------
ppc
## ---- eval=FALSE--------------------------------------------------------------
# plot(ppc)
## ---- echo=FALSE, message=FALSE, warning=FALSE--------------------------------
plot(ppc, col_critical = "lightblue",
col_noncritical = "lightblue")[[1]] +
xlab("Predictive Correlation")
## -----------------------------------------------------------------------------
f <- function(Yg1, Yg2){
# nodes
p <- ncol(Yg1)
# index of off-diagonal
indices <- upper.tri( diag(p))
# fit models
fit1 <- BGGM::estimate(Yg1, analytic = TRUE)
fit2 <- BGGM::estimate(Yg2, analytic = TRUE)
# select graphs
sel1 <- BGGM::select(fit1)
sel2 <- BGGM::select(fit2)
# hamming distance
sum((sel1$adj[indices] - sel2$adj[indices]) ^ 2)
}
## -----------------------------------------------------------------------------
obs <- f(Yg1, Yg2)
# observed
obs
## ---- message=FALSE, results='hide'-------------------------------------------
ppc <- BGGM::ggm_compare_ppc(Yg1, Yg2,
FUN = f,
custom_obs = obs,
iter = 1000)
## -----------------------------------------------------------------------------
ppc
## ---- message=FALSE, warning=FALSE--------------------------------------------
plot(ppc)
## -----------------------------------------------------------------------------
f <- function(Yg1, Yg2){
# nodes
p <- ncol(Yg1)
# index of off-diagonal
indices <- upper.tri( diag(p))
# fit models
fit1 <- BGGM::estimate(Yg1, analytic = TRUE)
fit2 <- BGGM::estimate(Yg2, analytic = TRUE)
pcor1 <- BGGM::pcor_mat(fit1)
pcor2 <- BGGM::pcor_mat(fit2)
# CDM for partial correlations
# note: numerator is the trace; denominator is the Frobenius norm
1 - (sum(diag(pcor1 %*% pcor2)) / (norm(pcor1, type = "f") * norm(pcor2, type = "f")))
}
## -----------------------------------------------------------------------------
obs <- f(Yg1, Yg2)
# observed
obs
## ---- message=FALSE, results='hide'-------------------------------------------
ppc <- BGGM::ggm_compare_ppc(Yg1, Yg2,
FUN = f,
custom_obs = obs,
iter = 1000)
## -----------------------------------------------------------------------------
ppc
## -----------------------------------------------------------------------------
hist(ppc$predictive_custom,
xlim = c(0, obs),
main = "Partial Correlation Matrix Distance")
abline(v = obs)
## -----------------------------------------------------------------------------
# clusters based on DSM-5
comms <- c(
rep("A", 4),
rep("B", 7),
rep("C", 5)
)
f <- function(Yg1, Yg2){
fit1 <- BGGM::estimate(Yg1, analytic = TRUE)
fit2 <- BGGM::estimate(Yg2, analytic = TRUE)
pcor1 <- BGGM::pcor_mat(fit1)
pcor2 <- BGGM::pcor_mat(fit2)
assort1 <- assortnet::assortment.discrete(pcor1, types = comms,
weighted = TRUE,
SE = FALSE, M = 1)$r
assort2 <- assortnet::assortment.discrete(pcor2, types = comms,
weighted = TRUE,
SE = FALSE, M = 1)$r
(assort1 - assort2)
}
## -----------------------------------------------------------------------------
obs <- f(Yg1, Yg2)
# observed
obs
## ---- message=FALSE, results='hide'-------------------------------------------
ppc <- BGGM::ggm_compare_ppc(Yg1, Yg2,
FUN = f,
custom_obs = obs,
iter = 1000)
## -----------------------------------------------------------------------------
ppc
## -----------------------------------------------------------------------------
plot(ppc)
## -----------------------------------------------------------------------------
f <- function(Yg1, Yg2){
fit1 <- BGGM::estimate(Yg1, analytic = TRUE)
fit2 <- BGGM::estimate(Yg2, analytic = TRUE)
pcor1 <- BGGM::pcor_mat(fit1)
pcor2 <- BGGM::pcor_mat(fit2)
ei1 <- networktools::expectedInf(pcor1)$step1
ei2 <- networktools::expectedInf(pcor2)$step1
sum((ei1 - ei2)^2)
}
## -----------------------------------------------------------------------------
obs <- f(Yg1, Yg2)
# observed
obs
## ---- message=FALSE, results='hide'-------------------------------------------
ppc <- BGGM:::ggm_compare_ppc(Yg1, Yg2,
FUN = f,
custom_obs = obs,
iter = 1000)
## -----------------------------------------------------------------------------
ppc
## -----------------------------------------------------------------------------
hist(ppc$predictive_custom,
xlim = c(0, obs),
main = "Expected Influence\n Sum of Squared Error")
abline(v = obs)
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/ppc_custom.R
|
---
title: "Custom Network Comparisons"
author: "Donny Williams"
date: "5/19/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Custom Network Comparisons}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Background
It is quite common to have partial correlation networks (GGMs) for various subgroups,
say, males and females, a control and treatment group, or perhaps several educational
levels. In this case, it is important to not only determine whether the groups are
different, but actually compare the groups in a way that answers a specific question
of interest.
To date, most `R` packages provide a few ways to compare groups, including **BGGM** (version `1.0.0`).
In version `2.0.0`, however, **BGGM** includes a new feature for the function `ggm_compare_ppc` that enables
users to **compare networks in any way they want**.
# Basic Idea
The technical details of the approach are described in [@williams2020comparing]. The basic idea is to
1. Draw samples from the posterior distribution, assuming the groups are equal (i.e., the "null" model).
2. Generate the posterior **predictive** distribution for the chosen test-statistic (how the groups
are being compared)
+ This can be understood as what we would expect to observe in the future
(e.g., in replication), assuming the groups were in fact equal.
3. Compute the test-statistic for the observed groups.
4. Then compare the observed test-statistic to the predictive distribution
(what is expected under the "null" model).
+ If the observed error is larger than the model assuming group equality, this
suggests that the groups are different.
In **BGGM**, the default is to compare the groups with respect to (symmetric) Kullback-Leibler
divergence (i.e., "distance" between multivariate normal distributions) and the sum of
squared error (for the partial correlation matrix). This was shown to be quite powerful in @williams2020comparing, while also having a
low false positive rate.
In the following, the focus is on defining custom functions
and using them with `ggm_compare_ppc`. In all examples, post-traumatic stress disorder
networks are compared [@fried2018replicability].
### R packages
```{r, eval = FALSE}
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
```
### Data
Only the correlation matrices are available. Hence, multivariate normal data is generated with that *exact*
correlation structure via the `R` package **MASS**.
```{r, warning =FALSE, message=FALSE}
# need these packages
library(BGGM)
library(ggplot2)
library(assortnet)
library(networktools)
library(MASS)
# group 1
Yg1 <- MASS::mvrnorm(n = 926,
mu = rep(0, 16),
Sigma = ptsd_cor3,
empirical = TRUE)
# group 2
Yg2 <- MASS::mvrnorm(n = 956,
mu = rep(0, 16),
Sigma = ptsd_cor4,
empirical = TRUE)
```
# Illustrative Examples
## Correlation
This first example looks at the correlation between partial correlations of the two networks. Note that
it could be two networks have what is considered a large correlation. However, the question here is,
assuming the groups are equal, just how large should the correlation be? This is needed to interpret
the observed test-statistic.
### Step 1: Define Custom Function
The first step is to define a custom function that takes two data matrices and the output
is the chosen test-statistic (in this case a correlation)
```{r}
f <- function(Yg1, Yg2){
# number of nodes
p <- ncol(Yg1)
# index of off-diagonal
indices <- upper.tri( diag(p))
# group 1:
# fit model
g1_fit <- estimate(Yg1, analytic = TRUE)
# pcors
g1_pcors <- pcor_mat(g1_fit)[indices]
# group 2
# fit model
g2_fit <- estimate(Yg2, analytic = TRUE)
# pcors
g2_pcors <- pcor_mat(g2_fit)[indices]
# test-statistic
cor(g1_pcors, g2_pcors)
}
```
### Step 2: Compute the Observed Score
The next step is to compute the observed test-statistic, that is, the correlation between the partial correlations.
```{r}
obs <- f(Yg1, Yg2)
# observed
obs
```
### Step 3: Predictive Check
With the function, `f`, and the observed scores, `obs`, in hand, what is left is the predictive check
```{r, message=FALSE, results='hide'}
ppc <- BGGM::ggm_compare_ppc(Yg1, Yg2,
FUN = f,
custom_obs = obs,
iter = 1000,
loss = FALSE)
```
Note that `loss = FALSE` controls how the p-value is computed. It is an indicator of whether the test-statistic is
a "loss" (a bad thing). In this case, a large correlation is a good thing so it is set to `FALSE`. The results
can then be printed
```{r}
ppc
```
which shows the posterior predictive p-value is zero. This indicates that the observed correlation is lower than
the entire predictive distribution (the distribution of correlations for future data, assuming group equality)
and finally plot the results
```{r, eval=FALSE}
plot(ppc)
```
```{r, echo=FALSE, message=FALSE, warning=FALSE}
plot(ppc, col_critical = "lightblue",
col_noncritical = "lightblue")[[1]] +
xlab("Predictive Correlation")
```
The density is the predictive distribution for the correlation. Recall that this is the correlation that we would expect, given the groups were actually the same, and the black point is the observed correlation. In this case, it seems quite clear that the "null model" is inadequate--the groups are apparently quite different.
## Hamming Distance
The next example is [Hamming distance](https://en.wikipedia.org/wiki/Hamming_distance), which, in this case, is the squared error for the adjacency matrices. It seems reasonable to think of this as a test for
different network structures or patterns of zeros and ones.
### Step 1: Define Custom Function
The first step is to define a custom function that takes two data matrices and the output
is the chosen test-statistic (in this case Hamming distance)
```{r}
f <- function(Yg1, Yg2){
# nodes
p <- ncol(Yg1)
# index of off-diagonal
indices <- upper.tri( diag(p))
# fit models
fit1 <- BGGM::estimate(Yg1, analytic = TRUE)
fit2 <- BGGM::estimate(Yg2, analytic = TRUE)
# select graphs
sel1 <- BGGM::select(fit1)
sel2 <- BGGM::select(fit2)
# hamming distance
sum((sel1$adj[indices] - sel2$adj[indices]) ^ 2)
}
```
### Step 2: Compute the Observed Score
The next step is to compute the observed test-statistic, that is, the Hamming distance between adjacency matrices
```{r}
obs <- f(Yg1, Yg2)
# observed
obs
```
### Step 3: Predictive Check
With the function, `f`, and the observed scores, `obs`, in hand, what is left is the predictive check
```{r, message=FALSE, results='hide'}
ppc <- BGGM::ggm_compare_ppc(Yg1, Yg2,
FUN = f,
custom_obs = obs,
iter = 1000)
```
The results can then be printed
```{r}
ppc
```
And then plot the results
```{r, message=FALSE, warning=FALSE}
plot(ppc)
```
This result is intriguing. Whereas the correlation looked at the relation between partial correlation, here there seems to be evidence
that the adjacency matrices are different (perhaps suggesting that the conditional independence structure is different).
## Partial Correlation Matrix Distance
There might also be interest in the so-called correlation matrix distance [@herdin2005correlation]. This is also easily tested, in this case for the partial correlation matrix.
### Step 1: Define Custom Function
```{r}
f <- function(Yg1, Yg2){
# nodes
p <- ncol(Yg1)
# index of off-diagonal
indices <- upper.tri( diag(p))
# fit models
fit1 <- BGGM::estimate(Yg1, analytic = TRUE)
fit2 <- BGGM::estimate(Yg2, analytic = TRUE)
pcor1 <- BGGM::pcor_mat(fit1)
pcor2 <- BGGM::pcor_mat(fit2)
# CDM for partial correlations
# note: numerator is the trace; denominator is the Frobenius norm
1 - (sum(diag(pcor1 %*% pcor2)) / (norm(pcor1, type = "f") * norm(pcor2, type = "f")))
}
```
### Step 2: Compute the Observed Score
The next step is to compute the observed test-statistic, that is, the Partial Correlation Matrix Distance
```{r}
obs <- f(Yg1, Yg2)
# observed
obs
```
### Step 3: Predictive Check
With the function, `f`, and the observed scores, `obs`, in hand, what is left is the predictive check
```{r, message=FALSE, results='hide'}
ppc <- BGGM::ggm_compare_ppc(Yg1, Yg2,
FUN = f,
custom_obs = obs,
iter = 1000)
```
The results can then be printed
```{r}
ppc
```
which again provides a p-value of zero.
Note that the object `ppc` includes the predictive samples that allows for user defined plots (in the event something custom is desired).
```{r}
hist(ppc$predictive_custom,
xlim = c(0, obs),
main = "Partial Correlation Matrix Distance")
abline(v = obs)
```
Note that the line is the observed which again makes it clear that the distance is quite surprising,
assuming the null model were true.
## Assortment
This next example is assortment [@newman2003mixing], which is a measure related
to clustering in a network. Here the test is for a difference in assortment.
This is computed by taking the difference (absolute value) for each draw
from the predictive distribution.
### Step 1: Define Custom Function
```{r}
# clusters based on DSM-5
comms <- c(
rep("A", 4),
rep("B", 7),
rep("C", 5)
)
f <- function(Yg1, Yg2){
fit1 <- BGGM::estimate(Yg1, analytic = TRUE)
fit2 <- BGGM::estimate(Yg2, analytic = TRUE)
pcor1 <- BGGM::pcor_mat(fit1)
pcor2 <- BGGM::pcor_mat(fit2)
assort1 <- assortnet::assortment.discrete(pcor1, types = comms,
weighted = TRUE,
SE = FALSE, M = 1)$r
assort2 <- assortnet::assortment.discrete(pcor2, types = comms,
weighted = TRUE,
SE = FALSE, M = 1)$r
(assort1 - assort2)
}
```
### Step 2: Compute the Observed Score
The next step is to compute the observed test-statistic, that is, assortment for the two groups
```{r}
obs <- f(Yg1, Yg2)
# observed
obs
```
### Step 3: Predictive Check
With the function, `f`, and the observed score, `obs`, in hand, the next step is the predictive check
```{r, message=FALSE, results='hide'}
ppc <- BGGM::ggm_compare_ppc(Yg1, Yg2,
FUN = f,
custom_obs = obs,
iter = 1000)
```
The results can then be printed
```{r}
ppc
```
and plotted
```{r}
plot(ppc)
```
which shows that the clustering in the data appears to be different (given the observed value exceeds
the entire predictive distribution).
## Expected Influence
This last example looks at the expected influence for the network [@robinaugh2016identifying]. In this case, the sum of squared error is the test statistic. This is computed from the squared error for each
draw from the predictive distribution.
### Step 1: Define Custom Function
```{r}
f <- function(Yg1, Yg2){
fit1 <- BGGM::estimate(Yg1, analytic = TRUE)
fit2 <- BGGM::estimate(Yg2, analytic = TRUE)
pcor1 <- BGGM::pcor_mat(fit1)
pcor2 <- BGGM::pcor_mat(fit2)
ei1 <- networktools::expectedInf(pcor1)$step1
ei2 <- networktools::expectedInf(pcor2)$step1
sum((ei1 - ei2)^2)
}
```
### Step 2: Compute the Observed Score
The next step is to compute the observed test-statistic, that is, the sum of squared error
for expected influence
```{r}
obs <- f(Yg1, Yg2)
# observed
obs
```
### Step 3: Predictive Check
With the function, `f`, and the observed scores, `obs`, in hand, what is left is the predictive check
```{r, message=FALSE, results='hide'}
ppc <- BGGM:::ggm_compare_ppc(Yg1, Yg2,
FUN = f,
custom_obs = obs,
iter = 1000)
```
The results can then be printed
```{r}
ppc
```
and plotted
```{r}
hist(ppc$predictive_custom,
xlim = c(0, obs),
main = "Expected Influence\n Sum of Squared Error")
abline(v = obs)
```
which again shows the sum of squared error for expected influence far exceeds what would be expected, assuming
the null model were true.
# Two Notes of Caution
1. Note that only the default in **BGGM** have been shown to have nominal error rates. However, there is a proof that suggests the error rate cannot be larger than $2\alpha$ [@meng1994posterior], and, further, a predictive check is typically below $\alpha$ [i.e., a tendency to be conservative, @gelman2013two].
2. Failing to reject the null model does not indicate the groups are the same! To test for
equality see `ggm_compare_explore` and `ggm_compare_confirm`.
# Conclusion
These example certainly open the door for tailoring network comparison to answer specific research questions.
# References
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/ppc_custom.Rmd
|
## ---- eval = FALSE, message=FALSE---------------------------------------------
# # need the developmental version
# if (!requireNamespace("remotes")) {
# install.packages("remotes")
# }
#
# # install from github
# remotes::install_github("donaldRwilliams/BGGM")
# library(BGGM)
## ---- eval=FALSE--------------------------------------------------------------
# # binary data
# Y <- women_math
#
# # fit model
# fit <- estimate(Y, type = "binary")
## ---- eval=FALSE--------------------------------------------------------------
# r2 <- predictability(fit)
#
# # print
# r2
#
# #> BGGM: Bayesian Gaussian Graphical Models
# #> ---
# #> Metric: Bayes R2
# #> Type: binary
# #> ---
# #> Estimates:
# #>
# #> Node Post.mean Post.sd Cred.lb Cred.ub
# #> 1 0.016 0.012 0.002 0.046
# #> 2 0.103 0.023 0.064 0.150
# #> 3 0.155 0.030 0.092 0.210
# #> 4 0.160 0.021 0.118 0.201
# #> 5 0.162 0.022 0.118 0.202
# #> 6 0.157 0.028 0.097 0.208
# #> ---
## ---- message=FALSE, eval=FALSE-----------------------------------------------
# plot(r2,
# type = "error_bar",
# size = 4,
# cred = 0.90)
## ---- message=FALSE, eval=FALSE-----------------------------------------------
# plot(r2,
# type = "ridgeline",
# cred = 0.50)
## ---- eval=FALSE--------------------------------------------------------------
# Y <- ptsd
#
# fit <- estimate(Y + 1, type = "ordinal")
## ---- eval=FALSE--------------------------------------------------------------
# r2 <- predictability(fit)
#
# # print
# r2
#
# #> BGGM: Bayesian Gaussian Graphical Models
# #> ---
# #> Metric: Bayes R2
# #> Type: ordinal
# #> ---
# #> Estimates:
# #>
# #> Node Post.mean Post.sd Cred.lb Cred.ub
# #> 1 0.487 0.049 0.394 0.585
# #> 2 0.497 0.047 0.412 0.592
# #> 3 0.509 0.047 0.423 0.605
# #> 4 0.524 0.049 0.441 0.633
# #> 5 0.495 0.047 0.409 0.583
# #> 6 0.297 0.043 0.217 0.379
# #> 7 0.395 0.045 0.314 0.491
# #> 8 0.250 0.042 0.173 0.336
# #> 9 0.440 0.048 0.358 0.545
# #> 10 0.417 0.044 0.337 0.508
# #> 11 0.549 0.048 0.463 0.648
# #> 12 0.508 0.048 0.423 0.607
# #> 13 0.504 0.047 0.421 0.600
# #> 14 0.485 0.043 0.411 0.568
# #> 15 0.442 0.045 0.355 0.528
# #> 16 0.332 0.039 0.257 0.414
# #> 17 0.331 0.045 0.259 0.436
# #> 18 0.423 0.044 0.345 0.510
# #> 19 0.438 0.044 0.354 0.525
# #> 20 0.362 0.043 0.285 0.454
# #> ---
## ---- eval=FALSE--------------------------------------------------------------
# plot(r2)
## ---- eval=FALSE--------------------------------------------------------------
# # fit model
# fit <- estimate(Y)
#
# # predictability
# r2 <- predictability(fit)
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/predictability.R
|
---
title: "Predictability: Binary, Ordinal, and Continuous"
author: "Donny Williams"
date: "5/20/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Predictability: Binary, Ordinal, and Continuous}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Background
This vignette describes a new feature to **BGGM** (`2.0.0`) that allows for
computing network predictability for binary and ordinal data. Currently
the available option is Bayesian $R^2$ [@gelman_r2_2019].
### R packages
```{r, eval = FALSE, message=FALSE}
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
library(BGGM)
```
# Binary
The first example looks at Binary data, consisting of 1190 observations and 6 variables. The data are called `women_math` and the variable descriptions are provided in **BGGM**.
The model is estimated with
```{r, eval=FALSE}
# binary data
Y <- women_math
# fit model
fit <- estimate(Y, type = "binary")
```
and then predictability is computed
```{r, eval=FALSE}
r2 <- predictability(fit)
# print
r2
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Metric: Bayes R2
#> Type: binary
#> ---
#> Estimates:
#>
#> Node Post.mean Post.sd Cred.lb Cred.ub
#> 1 0.016 0.012 0.002 0.046
#> 2 0.103 0.023 0.064 0.150
#> 3 0.155 0.030 0.092 0.210
#> 4 0.160 0.021 0.118 0.201
#> 5 0.162 0.022 0.118 0.202
#> 6 0.157 0.028 0.097 0.208
#> ---
```
There are then two options for plotting. The first is with error bars, denoting the credible interval (i.e., `cred`),
```{r, message=FALSE, eval=FALSE}
plot(r2,
type = "error_bar",
size = 4,
cred = 0.90)
```

and the second is with a ridgeline plot
```{r, message=FALSE, eval=FALSE}
plot(r2,
type = "ridgeline",
cred = 0.50)
```

# Ordinal
In the following, the `ptsd` data is used (5-level Likert). The variable descriptions are provided in **BGGM**. This is based on the polychoric partial correlations, with $R^2$ computed from the corresponding correlations (due to the correspondence between the correlation matrix and multiple regression).
```{r, eval=FALSE}
Y <- ptsd
fit <- estimate(Y + 1, type = "ordinal")
```
The only change is switching type from `"binary` to `ordinal`. One important
point is the `+ 1`. This is required because for the ordinal approach the first
category must be 1 (in `ptsd` the first category is coded as 0).
```{r, eval=FALSE}
r2 <- predictability(fit)
# print
r2
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Metric: Bayes R2
#> Type: ordinal
#> ---
#> Estimates:
#>
#> Node Post.mean Post.sd Cred.lb Cred.ub
#> 1 0.487 0.049 0.394 0.585
#> 2 0.497 0.047 0.412 0.592
#> 3 0.509 0.047 0.423 0.605
#> 4 0.524 0.049 0.441 0.633
#> 5 0.495 0.047 0.409 0.583
#> 6 0.297 0.043 0.217 0.379
#> 7 0.395 0.045 0.314 0.491
#> 8 0.250 0.042 0.173 0.336
#> 9 0.440 0.048 0.358 0.545
#> 10 0.417 0.044 0.337 0.508
#> 11 0.549 0.048 0.463 0.648
#> 12 0.508 0.048 0.423 0.607
#> 13 0.504 0.047 0.421 0.600
#> 14 0.485 0.043 0.411 0.568
#> 15 0.442 0.045 0.355 0.528
#> 16 0.332 0.039 0.257 0.414
#> 17 0.331 0.045 0.259 0.436
#> 18 0.423 0.044 0.345 0.510
#> 19 0.438 0.044 0.354 0.525
#> 20 0.362 0.043 0.285 0.454
#> ---
```
Here is the `error_bar` plot.
```{r, eval=FALSE}
plot(r2)
```

Note that the plot object is a `ggplot` which allows for further customization (e.g,. adding the variable names, a title, etc.).
# Continuous
It is quite common to compute predictability assuming that the data are Gaussian. In the context of Bayesian GGMs, this was introduced in [@Williams2019]. This can also be implemented in **BGGM**.
```{r, eval=FALSE}
# fit model
fit <- estimate(Y)
# predictability
r2 <- predictability(fit)
```
`type` is missing which indicates that `continuous` is the default.
# Note
$R^2$ for binary and ordinal data is computed for the underlying latent variables. This is also the case
when `type = "mixed` (a semi-parametric copula). In future releases, there will be support for predicting
the variables on the observed scale.
# References
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/predictability.Rmd
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ---- eval = FALSE, message=FALSE---------------------------------------------
# # need the developmental version
# if (!requireNamespace("remotes")) {
# install.packages("remotes")
# }
#
# # install from github
# remotes::install_github("donaldRwilliams/BGGM")
# library(BGGM)
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/test_sum.R
|
---
title: "Testing Sums"
author: "Donny Williams"
date: "5/25/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Testing Sums}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
# Introduction
This is a follow-up to the vignette ["Three Ways to Test the Same Hypothesis"](https://donaldrwilliams.github.io/BGGM/articles/hyp_3_ways.html). A
new feature, `pcor_sum`, was added to **BGGM** that allows for testing partial correlation sums.
This differs from the Bayes factor approach ("Approach #3"), in that only the posterior
distribution is used to determine whether there is a difference in the sums.
### R package
```{r, eval = FALSE, message=FALSE}
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
library(BGGM)
```
# One Group
This first example looks at one group, where a sum is tested within the same ptsd network. I focus on the
relations between the re-experiencing (`B`) and avoidance (`C`) communities. In particular, the sum of relations between the "Intrusion" (5 nodes) community and the "Avoidance" (two nodes) community is tested.
## Sum to String
For the avoidance symptom "avoidance of thoughts" `C1`, this can be written in `R` code with
```
# ptsd
Y <- ptsd
# paste together sums
paste0(colnames(Y)[1:5], "--C1", collapse = " + ")
#> "B1--C1 + B2--C1 + B3--C1 + B4--C1 + B5--C1"
```
whereas, for the avoidance symptom "avoidance of reminders" (`C2`), this is written as
```
paste0(colnames(Y)[1:5], "--C2", collapse = " + ")
#> "B1--C2 + B2--C2 + B3--C2 + B4--C2 + B5--C2"
```
Note that typically this would have to be written out. `paste0` was used in this case to
avoid typing out all of the relations.
## Fit Model
Here an ordinal GGM is fitted
```
fit <- estimate(Y+1, type = "ordinal", iter = 1000)
```
where the `+1` changes the first category from 0 to 1 (required).
## Test Sums
The next step is to use the `pcor_sum` function. First, I combine the sums into one string separated with `;`.
```
# sum 1
sum1 <- paste0(colnames(Y)[1:5], "--C1", collapse = " + ")
# sum 2
sum2 <- paste0(colnames(Y)[1:5], "--C2", collapse = " + ")
# paste together
sums <- paste(sum1, sum2, sep = ";")
# print
sums
#> "B1--C1 + B2--C1 + B3--C1 + B4--C1 + B5--C1;B1--C2 + B2--C2 + B3--C2 + B4--C2 + B5--C2"
```
Next `pcor_sum` is used
```
test_sum <- pcor_sum(fit, relations = sums)
# print
test_sum
# BGGM: Bayesian Gaussian Graphical Models
# ---
# Network Stats: Posterior Sum
# Posterior Samples: 1000
# ---
# Estimates
#
# Sum:
# Post.mean Post.sd Cred.lb Cred.ub
# B1--C1+B2--C1+B3--C1+B4--C1+B5--C1 0.215 0.096 0.034 0.404
# B1--C2+B2--C2+B3--C2+B4--C2+B5--C2 0.334 0.097 0.145 0.514
# ---
#
# Difference:
# B1--C1+B2--C1+B3--C1+B4--C1+B5--C1 - B1--C2+B2--C2+B3--C2+B4--C2+B5--C2
#
# Post.mean Post.sd Cred.lb Cred.ub Prob.greater Prob.less
# -0.119 0.145 -0.409 0.173 0.205 0.795
# ---
```
`Prob.greater` is the posterior probability that the first sum is larger than the second sum.
## Plot Results
The object `test_sum` can then be plotted. Note this returns three plots, but only the difference is shown here
```
plot(test_sum)$diff
```

The histogram is not very smooth in this case because `iter = 1000`, but this of course can be changed.
# Two Groups
This next example is for two groups. The data are called `bfi` and they are in the **BGGM** package. I compare a sum of two relations for questions measuring agreeableness in males and females. The relations tested are as follows
## Sum to String
```r
sums <- c("A3--A4 + A4--A5")
```
where `A1` is "know how to comfort others", `A4` is "love children", and `A5` is "make people feel at ease".
## Fit Models
The next step is to fit the models
```r
# data
Y <- bfi
# males
Y_males <- subset(Y, gender == 1, select = -c(education, gender))[,1:5]
# females
Y_females <- subset(Y, gender == 2, select = -c(education, gender))[,1:5]
fit_female <- estimate(Y_females, seed = 2)
# fit males
fit_male <- estimate(Y_males, seed = 1)
```
## Test Sums
Then test the sum
```r
test_sum <- pcor_sum(fit_female, fit_male, relations = sums)
# print
test_sum
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Network Stats: Posterior Sum
#> Posterior Samples: 5000
#> ---
#> Estimates
#>
#> Sum:
#> Post.mean Post.sd Cred.lb Cred.ub
#> g1: A3--A4+A4--A5 0.292 0.026 0.241 0.342
#> g2: A3--A4+A4--A5 0.305 0.036 0.234 0.375
#> ---
#>
#> Difference:
#> g1: A3--A4+A4--A5 - g2: A3--A4+A4--A5
#>
#> Post.mean Post.sd Cred.lb Cred.ub Prob.greater Prob.less
#> -0.014 0.045 -0.1 0.074 0.386 0.614
#> ---
```
## Sanity Check
For a kind of sanity check, here is the sum for the male group obtained from the point estimates.
```r
pcor_mat(fit_male)["A3", "A4"] + pcor_mat(fit_male)["A4", "A5"]
#> 0.305
```
This matches the output.
# Notes
By default, the print function for `pcor_sum` provides 95 % credible intervals. This can be changed by
directly using the print function, for example `print(test_sum, cred = 0.99)`, provides
99 % credible intervals.
Currently, this function only supports sums, due to this being of interest for the psychological network
literature in particular. This can be extended to accommodate multiplication, subtraction,
testing values other than zero, etc. Please make a feature request at either
[github](https://github.com/donaldRwilliams/BGGM/issues) or [BGGM-users group](https://groups.google.com/forum/#!forum/bggm-users).
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/test_sum.Rmd
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/var_model.R
|
---
title: "Graphical VAR"
author: "Donny Williams"
date: "6/04/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Graphical VAR}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
# Introduction
This vignette describes the implemention for a "graphical VAR" in `BGGM`. In `BGGM`, this is fitted as a multivariate regression. The key innovation is a novel prior distribution for the residual covariance matrix. There are a variety of much cooler names than a *mere* "multivariate regression", including "VAR" (vector autoregressive models) and "TSCGM" (time series chain graphical model).
## R package
```
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
library(BGGM)
# for comparsion
library(vars)
# for plotting
library(qgraph)
# combine plots
library(cowplot)
```
## Data
I use data from the ifit (fit bit) study. The data were gathered over 100 consecutive days on a variety of variables, including the PANAS scale (positive and negative affect) and the number of steps each day. `BGGM` includes a subset of variables for two individuals.
```r
# data
Y <- subset(ifit, id == 1)[,-1]
# first 3 rows
head(Y, n = 3)
#> interested disinterested excited upset strong stressed steps
#> 72 10 50 2 50 16 7805
#> 75 6 75 0 76 0 18248
># 36 58 38 5 45 1 12139
```
# Estimation
The methods in **BGGM** are organized around Bayesian "estimation" and "hypothesis testing".
This is to reach a broader audience, as former is more similar to classical
methods (those more familiar to researchers).
## Fit Model
With the data in hand, the model is fitted as follows
```
# fit model
fit <- var_estimate(Y, beta_sd = 1)
```
Note that `beta_sd` is the prior distribution for the regression coefficients. A smaller value, say, `beta_sd = 0.25`, results in a Bayesian ridge regression. Note also this model, including 5000 draws from the posterior, was estimated in less than 1 second.
The results can then be printed
```r
# print
fit
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Vector Autoregressive Model (VAR)
#> ---
#> Posterior Samples: 5000
#> Observations (n): 94
#> Nodes (p): 7
#> ---
#> Call:
#> var_estimate(Y = Y, beta_sd = 10)
#> ---
#> Partial Correlations:
#>
#> interested disinterested excited upset strong stressed steps
#> interested 0.000 -0.170 0.388 -0.217 0.313 0.268 0.089
#> disinterested -0.170 0.000 -0.172 -0.029 0.094 0.160 -0.078
#> excited 0.388 -0.172 0.000 -0.126 0.500 -0.161 -0.016
#> upset -0.217 -0.029 -0.126 0.000 0.118 0.350 -0.039
#> strong 0.313 0.094 0.500 0.118 0.000 -0.010 0.176
#> stressed 0.268 0.160 -0.161 0.350 -0.010 0.000 -0.038
#> steps 0.089 -0.078 -0.016 -0.039 0.176 -0.038 0.000
#> ---
#> Coefficients:
#>
#> interested disinterested excited upset strong stressed steps
#> interested.l1 0.230 -0.009 0.182 -0.102 0.178 0.018 0.113
#> disinterested.l1 -0.051 -0.007 0.056 -0.019 0.049 0.091 -0.023
#> excited.l1 -0.088 -0.196 0.003 0.057 -0.093 0.092 0.106
#> upset.l1 -0.155 0.262 -0.097 0.435 0.057 0.324 -0.091
#> strong.l1 0.026 0.182 0.026 0.048 0.189 -0.073 -0.196
#> stressed.l1 -0.021 -0.014 -0.033 -0.048 -0.079 0.152 0.133
#> steps.l1 -0.157 0.180 -0.211 0.155 -0.092 0.209 0.042
#> ---
#> Date: Thu Jun 04 08:54:04 2020
```
Note that the coefficients are comparable, given each variable has been standardized (e.g., the predictors
and the outcome are standardized). `BGGM` does not compute the partial directed correlation (PDC) by default (as in **graphicalVAR**). This is because the standardized effects can readily be tested with the Bayes factor, both across and within each model, whereas this does not seem straightforward for the PDC (which requires a transformation).
### Compare to Classical
Here are the estimates from the `vars` package
```r
t(round(
vars::Bcoef(
vars:::VAR(scale(na.omit(Y)), type = "none")),
digits = 3)
)
#> interested disinterested excited upset strong stressed steps
#> interested.l1 0.229 -0.012 0.184 -0.100 0.180 0.015 0.112
#> disinterested.l1 -0.050 -0.006 0.057 -0.019 0.050 0.092 -0.022
#> excited.l1 -0.088 -0.193 0.002 0.056 -0.091 0.093 0.106
#> upset.l1 -0.155 0.260 -0.096 0.436 0.058 0.321 -0.092
#> strong.l1 0.027 0.182 0.025 0.047 0.188 -0.073 -0.192
#> stressed.l1 -0.021 -0.012 -0.033 -0.046 -0.077 0.152 0.133
#> steps.l1 -0.157 0.183 -0.210 0.153 -0.093 0.207 0.041
```
Recall that the "estimation" methods are similar to, in this case, ordinary least squares. The graphical structure in `BGGM` is determined with credible intervals, which will be quite similar to using confidence
intervals. Hence for those researchers unfamiliar with Bayesian methods the "estimation" methods are perhaps
a nice place to start.
## Summarize Model
The model can also be summarized with
```r
print(
summary(fit, cred = 0.95),
param = "pcor"
)
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Vector Autoregressive Model (VAR)
#> ---
#> Partial Correlations:
#>
#> Relation Post.mean Post.sd Cred.lb Cred.ub
#> interested--disinterested -0.170 0.108 -0.382 0.044
#> interested--excited 0.388 0.085 0.219 0.546
#> disinterested--excited -0.172 0.104 -0.369 0.049
#> interested--upset -0.217 0.106 -0.417 0.000
#> disinterested--upset -0.029 0.101 -0.239 0.161
#> excited--upset -0.126 0.098 -0.315 0.066
#> interested--strong 0.313 0.090 0.135 0.480
#> disinterested--strong 0.094 0.112 -0.120 0.318
#> excited--strong 0.500 0.078 0.337 0.645
#> upset--strong 0.118 0.109 -0.100 0.325
#> interested--stressed 0.268 0.102 0.058 0.460
#> disinterested--stressed 0.160 0.100 -0.049 0.351
#> excited--stressed -0.161 0.099 -0.358 0.031
#> upset--stressed 0.350 0.091 0.166 0.519
#> strong--stressed -0.010 0.107 -0.212 0.201
#> interested--steps 0.089 0.108 -0.123 0.297
#> disinterested--steps -0.078 0.108 -0.284 0.125
#> excited--steps -0.016 0.100 -0.207 0.182
#> upset--steps -0.039 0.107 -0.245 0.178
#> strong--steps 0.176 0.101 -0.024 0.364
#> stressed--steps -0.038 0.108 -0.236 0.193
#> ---
```
The coefficients can also be printed by changing `param` to either `all` or `beta`, The summary can also be plotted. Here are the coefficients
```r
plts <- plot(summary(fit, cred = 0.95))
cowplot::plot_grid(
cowplot::plot_grid(
plts$beta_plt$interested,
plts$beta_plt$disinterested,
plts$beta_plt$excited,
nrow = 1),
cowplot::plot_grid(
plts$beta_plt$upset,
plts$beta_plt$strong,
plts$beta_plt$stressed,
nrow = 1
),
nrow = 2)
```

There is a plot for the partial correlations in the object `plts`.
## Select Graph
The graphs are selected with
```r
select(fit, cred = 0.95)
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Vector Autoregressive Model (VAR)
#> ---
#> Posterior Samples: 5000
#> Credible Interval: 95 %
#> ---
#> Call:
#> var_estimate(Y = Y, beta_sd = 10)
#> ---
#> Partial Correlations:
#>
#> interested disinterested excited upset strong stressed steps
#> interested 0.000 0 0.388 -0.217 0.313 0.268 0
#> disinterested 0.000 0 0.000 0.000 0.000 0.000 0
#> excited 0.388 0 0.000 0.000 0.500 0.000 0
#> upset -0.217 0 0.000 0.000 0.000 0.350 0
#> strong 0.313 0 0.500 0.000 0.000 0.000 0
#> stressed 0.268 0 0.000 0.350 0.000 0.000 0
#> steps 0.000 0 0.000 0.000 0.000 0.000 0
#> ---
#> Coefficients:
#>
#> interested disinterested excited upset strong stressed steps
#> interested.l1 0 0.000 0 0.000 0 0.000 0
#> disinterested.l1 0 0.000 0 0.000 0 0.000 0
#> excited.l1 0 0.000 0 0.000 0 0.000 0
#> upset.l1 0 0.262 0 0.435 0 0.324 0
#> strong.l1 0 0.000 0 0.000 0 0.000 0
#> stressed.l1 0 0.000 0 0.000 0 0.000 0
#> steps.l1 0 0.000 0 0.000 0 0.209 0
#> ---
```
# Plot Graph
For plotting, I use the **qgraph** package.
```r
par(mfrow=c(1,2))
qgraph::qgraph(sel$pcor_weighted_adj, title = "Partials")
qgraph::qgraph(sel$beta_weighted_adj, title = "Coefficients")
```

# Predictability
Finally, it is also possible to compute predictability, in this case Bayesian $R^2$
```r
r2 <- predictability(fit)
# print
r2
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Metric: Bayes R2
#> Type: continuous
#> ---
#> Estimates:
#>
#> Node Post.mean Post.sd Cred.lb Cred.ub
#> interested 0.144 0.057 0.050 0.271
#> disinterested 0.166 0.061 0.060 0.302
#> excited 0.127 0.054 0.039 0.250
#> upset 0.220 0.070 0.093 0.368
#> strong 0.116 0.051 0.035 0.232
#> stressed 0.227 0.069 0.102 0.373
#> steps 0.105 0.047 0.032 0.210
```
The object `r2` can also be plotted
```r
plot(r2, type = "ridgeline")
```

# Explore
Bayesian (exploratory) testing to come...
# Confirm
Bayesian (confirmatory) testing to come...
# Note
|
/scratch/gouwar.j/cran-all/cranData/BGGM/inst/doc/var_model.Rmd
|
---
title: "Controlling for Variables"
author: "Donny Williams"
date: "5/25/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Controlling for Variables}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Introduction
This vignette describes how to control for variables. This is a new feature to **BGGM** (version `2.0.0`).
# Example 1: Multivariate Regression
When controlling for variables, a multivariate regression is fitted in **BGGM**. In fact, a GGM can be understood as a multivariate regression with intercepts only models for the predictors.
## Notes about Implementation
**BGGM** does not use the typical approach for multivariate regression in `R`. This avoids having to
write out each outcome variable, of which there are typically many in a GGM. In **BGGM**, it is assumed
that the data matrix includes only the variables to be included in the GGM and the control variables.
### Correct
Suppose that we want to control for education level, with five variables included in the
GGM.
```r
# data
Y <- bfi[,c(1:5, 27)]
# head
head(Y)
#> A1 A2 A3 A4 A5 education
#> 61617 2 4 3 4 4 NA
#> 61618 2 4 5 2 5 NA
#> 61620 5 4 5 4 4 NA
#> 61621 4 4 6 5 5 NA
#> 61622 2 3 3 4 5 NA
#> 61623 6 6 5 6 5 3
```
Notice that `Y` includes **only** the five variables and `education`.
## Fit Model
This model can then be fitted with
```
fit <- explore(Y, formula = ~ as.factor(education))
```
To show this is indeed a multivariate regression, here are the summarized regression coefficients for the first
outcome.
```
summ_coef <- regression_summary(fit)
# outcome one
summ_coef$reg_summary[[1]]
#> Post.mean Post.sd Cred.lb Cred.ub
#> (Intercept) 0.256 0.095 0.072 0.442
#> as.factor(education)2 0.073 0.128 -0.177 0.323
#> as.factor(education)3 -0.202 0.104 -0.405 -0.001
#> as.factor(education)4 -0.462 0.119 -0.691 -0.233
#> as.factor(education)5 -0.578 0.117 -0.815 -0.346
```
And here are the coefficients from `lm` (a univariate regression for `A1`)
```
round(
cbind(
# summary: coef and se
summary( lm(scale(A1, scale = F) ~ as.factor(education), data = Y))$coefficients[,1:2],
# confidence interval
confint( lm(scale(A1, scale = F) ~ as.factor(education), data = Y))
), 3)
#> Estimate Std. Error 2.5 % 97.5 %
#> (Intercept) 0.256 0.093 0.073 0.438
#> as.factor(education)2 0.072 0.125 -0.172 0.316
#> as.factor(education)3 -0.203 0.101 -0.401 -0.004
#> as.factor(education)4 -0.461 0.116 -0.690 -0.233
#> as.factor(education)5 -0.578 0.115 -0.804 -0.351
```
The estimate are very (very) similar.
## Summary
Note that all the other functions work just the same. For example, the relations controlling for education
are summarized with
```
summary(fit)
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Type: continuous
#> Analytic: FALSE
#> Formula: ~ as.factor(education)
#> Posterior Samples: 5000
#> Observations (n):
#> Nodes (p): 5
#> Relations: 10
#> ---
#> Call:
#> estimate(Y = Y, formula = ~as.factor(education))
#> ---
#> Estimates:
#> Relation Post.mean Post.sd Cred.lb Cred.ub
#> A1--A2 -0.239 0.020 -0.278 -0.200
#> A1--A3 -0.109 0.020 -0.150 -0.070
#> A2--A3 0.276 0.019 0.239 0.312
#> A1--A4 -0.013 0.021 -0.055 0.026
#> A2--A4 0.156 0.020 0.117 0.196
#> A3--A4 0.173 0.020 0.134 0.214
#> A1--A5 -0.010 0.020 -0.050 0.029
#> A2--A5 0.150 0.020 0.111 0.189
#> A3--A5 0.358 0.018 0.322 0.392
#> A4--A5 0.121 0.020 0.082 0.159
#> ---
```
### Incorrect
Now if we wanted to control for education, but also had gender in `Y`, this would be incorrect
```
Y <- bfi[,c(1:5, 26:27)]
head(Y)
#> A1 A2 A3 A4 A5 gender education
#> 61617 2 4 3 4 4 1 NA
#> 61618 2 4 5 2 5 2 NA
#> 61620 5 4 5 4 4 2 NA
#> 61621 4 4 6 5 5 2 NA
#> 61622 2 3 3 4 5 1 NA
#> 61623 6 6 5 6 5 2 3
```
In this case, with `estimate(Y, formula = as.factor(education))`, the GGM would also include `gender`
(six variables instead of the desired 5). This is because all variables not included in `formula` are included in the GGM. This was adopted in **BGGM** to save the user from having to write out each outcome.
This differs from `lm`, where each outcome needs to be written out, for example `cbind(A1, A2, A3, A4, A4) ~ as.factor(education)`. This is quite cumbersome for a model that includes many nodes.
# Example 2: Multivariate Probit
The above data is ordinal. In this case, it is possible to fit a multivariate probit model. This is also the approach for binary data in **BGGM**. This is implemented with
```
fit <- estimate(Y, formula = ~ as.factor(education),
type = "ordinal", iter = 1000)
```
Note that the multivariate probit models can also be summarized with `regression_summary`.
# Example 3: Gaussian Copula Graphical Model
This final example fits a Gaussian copula graphical model that can be used for mixed data. In this case,
`formula` is not used and instead all of the variables are included in the GGM.
## Fit Model
This model is estimated with
```
# data
Y <- na.omit(bfi[,c(1:5, 27)])
# fit type = "mixed"
fit <- estimate(Y, type = "mixed", iter = 1000)
# summary
summary(fit)
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Type: mixed
#> Analytic: FALSE
#> Formula:
#> Posterior Samples: 1000
#> Observations (n):
#> Nodes (p): 6
#> Relations: 15
#> ---
#> Call:
#> estimate(Y = Y, type = "mixed", iter = 1000)
#> ---
#> Estimates:
#> Relation Post.mean Post.sd Cred.lb Cred.ub
#> A1--A2 -0.217 0.048 -0.294 -0.114
#> A1--A3 -0.063 0.027 -0.113 -0.011
#> A2--A3 0.364 0.023 0.317 0.410
#> A1--A4 0.116 0.038 0.048 0.192
#> A2--A4 0.241 0.031 0.182 0.303
#> A3--A4 0.228 0.026 0.174 0.275
#> A1--A5 0.057 0.031 0.003 0.120
#> A2--A5 0.186 0.027 0.135 0.241
#> A3--A5 0.438 0.019 0.399 0.474
#> A4--A5 0.151 0.025 0.103 0.199
#> A1--education -0.016 0.069 -0.125 0.119
#> A2--education 0.063 0.049 -0.016 0.162
#> A3--education 0.049 0.025 0.002 0.099
#> A4--education 0.053 0.026 0.005 0.105
#> A5--education 0.072 0.024 0.024 0.120
#> ---
```
Here it is clear that education is included in the model, as the relations with the other nodes are included in the output.
## Select Graph
The graph is selected with
```
select(fit)
```
# Note
It is possible to control for variable with all methods in **BGGM**, including when comparing groups, Bayesian hypothesis testing, etc.
|
/scratch/gouwar.j/cran-all/cranData/BGGM/vignettes/control.Rmd
|
---
title: "Three Ways to Test the Same Hypothesis"
author: "Donny Williams"
date: "5/23/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Three Ways to Test the Same Hypothesis}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Introduction
On a Facebook methods group, there was a question about testing hypotheses in networks. In
the comments, it was suggested that **BGGM** could be used to test the hypothesis. And it turns
out that **BGGM** really shines for testing expectations [see for example @rodriguez2020formalizing].
In this vignette, I demonstrate three ways to go about testing the same hypothesis, which is
essentially testing for a difference in the **sum** of partial correlations between groups.
### R package
```{r, eval = FALSE, message=FALSE}
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
library(BGGM)
```
### Data
For demonstrative purposes, I use the `bfi` data and test the hypotheses in males and females
```
# data
Y <- bfi
# males
Y_males <- subset(Y, gender == 1, select = -c(education, gender))[,1:5]
# females
Y_females <- subset(Y, gender == 2, select = -c(education, gender))[,1:5]
```
# Approach 1: Posterior Difference
The first approach is rather straightforward, with the caveat that the method needs to be
implemented by the user. Note that I could certainly implement this in **BGGM**, assuming there
is enough interest. Please make a feature request [here](https://github.com/donaldRwilliams/BGGM/issues).
## Hypothesis
The hypothesis was that a sum of relations was larger in one group, for example,
$$
\begin{align}
\mathcal{H}_0: (\rho^{male}_{A1--A2}\; + \; \rho^{male}_{A1--A3}) = (\rho^{female}_{A1--A2}\; + \; \rho^{female}_{A1--A3}) \\
\mathcal{H}_1: (\rho^{male}_{A1--A2}\; + \; \rho^{male}_{A1--A3}) > (\rho^{female}_{A1--A2}\; + \; \rho^{female}_{A1--A3})
\end{align}
$$
Note that the hypothesis is related to the sum of relations, which is readily tested in **BGGM**.
## Fit Models
The first step is to estimate the model for each group
```r
# fit female
fit_female <- estimate(Y_females, seed = 2)
# fit males
fit_male <- estimate(Y_males, seed = 1)
```
For an example, I used the default which is to assume the data is Gaussian. This can be changed with `type = ` either `binary`, `ordinal`, or `mixed`.
## Extract the Samples
The next step is to extract the posterior samples for each relation
```r
post_male <- posterior_samples(fit_male)[,c("A1--A2", "A1--A3")]
post_female <- posterior_samples(fit_female)[,c("A1--A2", "A1--A3")]
```
Note that the column names reflect the upper-triangular elements of the
partial correlation matrix. Hence, the first name (e.g.,`A1`) must be located before
the second name (e.g., `A2`) in the data matrix. This can be understood in reference
to the column numbers: `1--2` is correct whereas `2--1` will result in an error.
## Sum and Compute Difference
The next step is to sum the relations and compute the difference
```r
# sum males
sum_male <- rowSums(post_male)
# sum females
sum_female <- rowSums(post_female)
# difference
diff <- sum_male - sum_female
```
which can then be plotted
```r
# three column
par(mfrow=c(1,3))
# male sum
hist(sum_male)
# female sum
hist(sum_female)
# difference
hist(diff)
```

## Posterior Probability
Next compute the posterior probability the sum is larger in males than females
```r
# posterior prob
mean(sum_male > sum_female)
#> 0.737
```
and then the credible interval for the difference
```
quantile(diff, probs = c(0.025, 0.975))
#> 2.5% 97.5%
#> -0.06498586 0.12481253
```
# Approach 2: Predictive Check
The next approach is based on a posterior predictive check. The hypothesis is essentially the same as above, but for the predictive distribution, that is,
$$
\begin{align}
\mathcal{H}_0: (\rho^{male^{yrep}}_{A1--A2}\; + \; \rho^{male^{yrep}}_{A1--A3}) = (\rho^{female^{yrep}}_{A1--A2}\; + \; \rho^{female^{yrep}}_{A1--A3}) \\
\mathcal{H}_1: (\rho^{male^{yrep}}_{A1--A2}\; + \; \rho^{male^{yrep}}_{A1--A3}) > (\rho^{female^{yrep}}_{A1--A2}\; + \; \rho^{female^{yrep}}_{A1--A3})
\end{align}
$$
where the only difference is $yrep$. See more details [here](https://donaldrwilliams.github.io/BGGM/articles/ppc_custom.html).
## Define Function
The first step is to define a function to compute the difference in sums
```r
# colnames
cn <- colnames(Y_males)
# function
f <- function(Yg1, Yg2){
# data
Yg1 <- na.omit(Yg1)
Yg2 <- na.omit(Yg2)
# estimate partials
fit1 <- pcor_mat(estimate(Yg1, analytic = TRUE))
fit2 <- pcor_mat(estimate(Yg2, analytic = TRUE))
# names (not needed)
colnames(fit1) <- cn
rownames(fit1) <- cn
colnames(fit2) <- cn
rownames(fit2) <- cn
# take sum
sum1 <- fit1["A1", "A2"] + fit1["A1", "A3"]
sum2 <- fit2["A1", "A2"] + fit2["A1", "A3"]
# difference
sum1 - sum2
}
```
Note that the function takes two data matrices and then returns a single value.
Also, the default in **BGGM** does not require a custom function
(only needs the data from each group).
## Predictive Check
The next step is to compute the observed difference and then perform the check.
```r
# observed
obs <- f(Y_males, Y_females)
# check
ppc <- ggm_compare_ppc(Y_males, Y_females,
iter = 250,
FUN = f,
custom_obs = obs)
# print
ppc
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Test: Global Predictive Check
#> Posterior Samples: 250
#> Group 1: 896
#> Group 2: 1813
#> Nodes: 5
#> Relations: 10
#> ---
#> Call:
#> ggm_compare_ppc(Y_males, Y_females, iter = 250, FUN = f, custom_obs = obs)
#> ---
#> Custom:
#>
#> contrast custom.obs p.value
#> Yg1 vs Yg2 0.029 0.264
#> ---
```
Note this requires the user to determine $\alpha$.
## Plot
The check can also be plotted
```r
plot(ppc)
```

where the red is the critical region.
# Approach 3: Bayesian Hypothesis Testing
The above approaches cannot provide evidence that the sum is equal. In other words, just because there was
not a difference, this does not provide evidence for equality. The Bayes factor methods allow for formally
assessing the equality model, that is,
$$
\begin{align}
\mathcal{H}_1&: (\rho^{male}_{A1--A2}\; + \; \rho^{male}_{A1--A3}) > (\rho^{female}_{A1--A2}\; + \; \rho^{female}_{A1--A3}) \\
\mathcal{H}_2&: (\rho^{male}_{A1--A2}\; + \; \rho^{male}_{A1--A3}) = (\rho^{female}_{A1--A2}\; + \; \rho^{female}_{A1--A3}) \\
\mathcal{H}_3&: \text{not} \; \mathcal{H}_1 \; \text{or} \; \mathcal{H}_2
\end{align}
$$
where $\mathcal{H}_3$ is the complement and can be understood as neither the first or second hypothesis.
## Test Hypothesis
The hypothesis is easily translated to `R` code
```r
hyp <- c("g1_A1--A2 + g1_A1--A3 > g2_A1--A2 + g2_A1--A3;
g1_A1--A2 + g1_A1--A3 = g2_A1--A2 + g2_A1--A3")
```
Note the `g1` indicates the group and `;` separates the hypotheses. I again assume the data is Gaussian
(although this can be changed to `type = "ordinal"` or `type = "mixed"`; see [here](https://donaldrwilliams.github.io/BGGM/reference/ggm_compare_confirm.html))
```r
test <- ggm_compare_confirm(Y_males, Y_females,
hypothesis = hyp)
# print
test
#> BGGM: Bayesian Gaussian Graphical Models
#> Type: continuous
#> ---
#> Posterior Samples: 25000
#> Group 1: 896
#> Group 2: 1813
#> Variables (p): 5
#> Relations: 10
#> Delta: 15
#> ---
#> Call:
#> ggm_compare_confirm(Y_males, Y_females, hypothesis = hyp)
#> ---
#> Hypotheses:
#>
#> H1: g1_A1--A2+g1_A1--A3>g2_A1--A2+g2_A1--A3
#> H2: g1_A1--A2+g1_A1--A3=g2_A1--A2+g2_A1--A3
#> H3: complement
#> ---
#> Posterior prob:
#>
#> p(H1|data) = 0.13
#> p(H2|data) = 0.825
#> p(H3|data) = 0.046
#> ---
#> Bayes factor matrix:
#> H1 H2 H3
#> H1 1.000 0.158 2.853
#> H2 6.349 1.000 18.113
#> H3 0.351 0.055 1.000
#> ---
#> note: equal hypothesis prior probabilities
```
Note the posterior hypothesis probability for the equality model is 0.825. The Bayes factor matrix then divides those values, for example, $BF_{21}$ indicates the data were about 6 times more likely under $\mathcal{H}_2$ than $\mathcal{H}_1$.
## Plot Hypothesis
The hypothesis can be plotted
```r
plot(test)
```

### Sensitivity Analysis
It is also important to check the robustness. Here the width of the prior distribution is decreased
```r
test <- ggm_compare_confirm(Y_males, Y_females,
hypothesis = hyp,
prior_sd = 0.15)
# print
test$out_hyp_prob
#> 0.18523406 0.74906147 0.06570447
```
which results in a probability of 0.75 for $\mathcal{H}_2$ ($BF_{21} = 4.04$).
# Conclusion
Three approaches for testing the same hypothesis were demonstrated in this vignette. This highlights that any hypothesis can be tested in **BGGM** and in several ways.
# References
|
/scratch/gouwar.j/cran-all/cranData/BGGM/vignettes/hyp_3_ways.Rmd
|
---
title: "In Tandem: Confirmatory and Exploratory Testing"
author: "Donny Williams"
date: "5/23/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{In Tandem: Confirmatory and Exploratory Testing}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
The blog post, "Tutorial: Bayesian Testing of Central Structures in Psychological Networks," is hosted on a different website.
# <i class="fas fa-external-link-square-alt"></i> [External Link](https://josue.rbind.io/post/tutorial-bayesian-testing/)
|
/scratch/gouwar.j/cran-all/cranData/BGGM/vignettes/in_tandem.Rmd
|
---
title: "Troubleshoot"
author: "Donny Williams"
date: "5/20/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Troubleshoot}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
# OSX
## Error 1: Missing a Fortran Compiler
The most common error seems to be (or similar).
```
E> ld: warning: directory not found for option '-L/usr/local/gfortran/lib'
E> ld: library not found for -lgfortran
E> clang: error: linker command failed with exit code 1 (use -v to see invocation)
```
This indicates that the fortran compiler is missing. This can can be installed [here](https://github.com/fxcoudert/gfortran-for-macOS/releases/tag/8.2?fbclid=IwAR2SyHWB2BzFcY7bpHYW8VzNvlDsy4Gw1QxUMueXB3H0fXicCWoMbE7Ypac) (the `.dmg` file).
## Error 2: Outdated R and/or R-studio
Typically the above has solved the issue. If not, then an additional error could be
```
Error: (converted from warning) Setting LC_CTYPE failed, using "C"
```
This was solved by updating both R and R-studio. More information can be found [here](https://stackoverflow.com/questions/9689104/installing-r-on-mac-warning-messages-setting-lc-ctype-failed-using-c?fbclid=IwAR0DSaPeWOvRyfIsCx4Tjvz9-jZUh2ySXQIHnzqwbqL2_idfPlFF3j6mOe8).
## Error 3: Xcode missing
If that does not work, then perhaps `Xcode` is missing. This can be installed at the "Mac App Store".
## GitHub Issues
The following are links to issues on github for troubleshooting installation of **BGGM** on OSX.
* [https://github.com/donaldRwilliams/BGGM/issues/26](https://github.com/donaldRwilliams/BGGM/issues/26)(closed)
|
/scratch/gouwar.j/cran-all/cranData/BGGM/vignettes/installation.Rmd
|
---
title: "MCMC Diagnostics"
author: "Donny Williams"
date: "5/20/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{MCMC Diagnostics}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Introduction
The algorithms in **BGGM** are based on Gibbs samplers. In the context of
covariance matrix estimation, as opposed, to, say, hierarchical models,
this allows for efficiently sampling the posterior distribution. Furthermore, in all samplers
the empirical covariance matrix is used as the starting value which reduces
the length of the burn-in (or warm-up). Still yet it is important to monitor convergence. See [here](https://sbfnk.github.io/mfiidd/mcmc_diagnostics.html) for an introduction to MCMC diagnostics.
### R packages
```{r, eval = FALSE, message=FALSE}
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
library(BGGM)
```
```{r, echo=FALSE, message=FALSE}
library(BGGM)
```
# ACF plot
This first example includes an "acf" plot that looks at the auto correlation of the samples. In general,
we do not want the samples to be strongly correlated or related to the previous samples (or lags $k$).
I am not sure there are general guidelines, but typically we do not want "auto correlation...for higher values of k, [because] this indicates a high degree of correlation between our samples and slow mixing " [source](https://sbfnk.github.io/mfiidd/mcmc_diagnostics.html)
Here is an example for ordinal data.
```{r, eval=FALSE}
# data
Y <- ptsd[,1:10]
# fit model
# + 1 makes first category a 1
fit <- estimate(Y + 1, type = "ordinal")
```
To check the convergence of a partial correlation, we need the parameter name. These are printed as follows
```{r, eval=FALSE}
convergence(fit, print_names = TRUE)
#> [1] "B1--B2" "B1--B3" "B2--B3" "B1--B4" "B2--B4" "B3--B4" "B1--B5"
#> [8] "B2--B5" "B3--B5" "B4--B5" "B1--C1" "B2--C1" "B3--C1" "B4--C1"
#> [15] "B5--C1" "B1--C2" "B2--C2" "B3--C2" "B4--C2" "B5--C2" "C1--C2"
#> [22] "B1--D1" "B2--D1" "B3--D1" "B4--D1" "B5--D1" "C1--D1" "C2--D1"
#> [29] "B1--D2" "B2--D2" "B3--D2" "B4--D2" "B5--D2" "C1--D2" "C2--D2"
#> [36] "D1--D2" "B1--D3" "B2--D3" "B3--D3" "B4--D3" "B5--D3" "C1--D3"
#> [43] "C2--D3" "D1--D3" "D2--D3" "B1_(Intercept)" "B2_(Intercept)" "B3_(Intercept)" "B4_(Intercept)"
#> [50] "B5_(Intercept)" "C1_(Intercept)" "C2_(Intercept)" "D1_(Intercept)" "D2_(Intercept)" "D3_(Intercept)"
```
Note the `(Intercept)` which reflect the fact that the ordinal approach is a multivariate probit model with only intercepts.
The next step is to make the plot
```{r, eval=FALSE}
convergence(fit, param = "B1--B2", type = "acf")
```

The argument `param` can take any number of parameters and a plot will be made for each (e.g.., `param = c("B1--B2", B1--B3)`). In this case, the auto correlations looks acceptable and actually really good (note the drop to zero). A problematic `acf` plot would have the black lines start at `1.0`
and perhaps never go below `0.20`.
To make this clear, I simulated time series data taking the code from [here](https://stat.ethz.ch/R-manual/R-devel/library/stats/html/arima.sim.html)
```{r, eval=FALSE}
# sim time series
ts.sim <- arima.sim(list(order = c(1,1,0), ar = 0.7), n = 200)
acf(ts.sim)
```

This would be considered problematic. If this occurs, one solution could be to thin the samples manually
```{r, eval=FALSE}
# extract samples
samps <- fit$post_samp$pcors
# iterations
iter <- fit$iter
# thinning interval
thin <- 5
# save every 5th (add 50 which is the burnin)
new_iter <- length(seq(1,to = iter + 50 , by = thin))
# replace (add 50 which is the burnin)
fit$post_samp$pcors <- samps[,,seq(1,to = iter + 50, by = thin)]
# replace iter
fit$iter <- new_iter - 50
# check thinned
convergence(fit, param = "B1--B2", type = "acf")
```
or perhaps just running the model for more iterations (e.g., increasing `iter` in `estimate`). The above is quite convoluted but note convergence should not typically be an issue. And it might come in handy to know that the samples can be replaced and the other functions
in **BGGM** will still work with the object `fit`.
# Trace plot
The next example is a trace plot. Here we are looking for good "mixing".
```{r, eval=FALSE}
convergence(fit, param = "B1--B2", type = "trace")
```

Admittedly the term "mixing" is vague. But in general the plot should look like this example,
where there is no place that the chain is "stuck". See [here](https://stats.stackexchange.com/questions/311151/evaluation-of-mcmc-samples) for
problematic trace plots.
|
/scratch/gouwar.j/cran-all/cranData/BGGM/vignettes/mcmc_diagnostics.Rmd
|
---
title: "Network Plots"
author: "Donny Williams"
date: "5/20/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Network Plots}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Introduction
This vignette shows how to make network plots.
### R packages
```{r, eval = FALSE, message=FALSE}
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
library(BGGM)
library(cowplot)
```
```{r, echo=FALSE, message=FALSE}
library(BGGM)
```
# Estimate
For the estimate methods, it is currently only possible detect non-zero relations and
the others are set to zero (no connection in the graph). In a future release, it will be possible
to define a region of equivalence to directly assess null values. Hence, it is important to note those nodes
not connected are not necessarily conditionally independent (absence of evidence is not evidence of absence).
## Fit Model
In this example, I use the `bfi` data which consists of 25 variables measureing different aspects of personality.
```{r, eval=FALSE}
# data
Y <- bfi[,1:25]
# fit model
fit <- estimate(Y)
```
## Select Graph
The next step is to selec the graph or those relations for which the credible excludes zero
```{r, eval=FALSE}
# select the edge set
E <- select(fit,
cred = 0.95,
alternative = "two.sided")
```
`alternative` can be changed to, say, `"greater"` which would then perform a one-sided hypothesis
test for postive relations. This is ideal for many applications in psychology, because often
**all** relations are expected to be positive.
## Plot Graph
Here is the basic plot. This works for any object from `select` (e.g., comparing groups).
```{r, eval=FALSE}
plot(E)
```

### Customize Plot
The above is `ggplot` that can be futher honed in. Here is an example.
```r
# extract communities
comm <- substring(colnames(Y), 1, 1)
plot(E,
# enlarge edges
edge_magnify = 5,
# cluster nodes
groups = comm,
# change layout
layout = "circle")$plt +
# add custom labels
scale_color_brewer(breaks = c("A",
"C",
"E",
"N",
"O"),
labels = c("Agreeableness", "Conscientiousness",
"Extraversion", "Neuroticism",
"Opennness"),
palette = "Set2")
```

The `edge_magnify` is a value that is multiplied by the edges, `groups` allows for grouping the
variables (e.g., those thought to belong to the same "community" will be the same color), and the
`scale_color_brewer` is from the package `ggplot2` (`pallete` controls the color of the `groups`).
By default the edge colors are from a color blind palette. This can be changed in `plot` with
the arguments `pos_col` (the color for positive edges) and `pos_neg` (the color for negative edges).
This is just scratching the surface of possibilities, as essentially any change
can be made to the plot. There is lots of support for making nice plots readily available
online.
#### Layout
It is also possible to change the layout. This is done with the **sna** package, which is linked in the documentation for `plot.select` in **BGGM**. Here is an example using `layout = "random"`
```{r, eval=FALSE}
plot(E,
# enlarge edges
edge_magnify = 5,
# cluster nodes
groups = comm,
# change layout
layout = "random")$plt +
# add custom labels
scale_color_brewer(breaks = c("A",
"C",
"E",
"N",
"O"),
labels = c("Agreeableness", "Conscientiousness",
"Extraversion", "Neuroticism",
"Opennness"),
palette = "Set2")
```

# Bayesian Hypothesis Testing
The Bayesian hypothesis testing methods offer several advantages, for example, that
evidence for the null hypothesis of conditional independence is formally evaluated.
As a result, the `explore` method in **BGGM** provides plots for both the conditional
dependence and independence structure, in addition to a plot for which the evidence was
ambiguous.
To highlight this advantage, `ptsd` data is used that has a relatively small sample size.
```r
# fit model
fit <- explore(Y)
E <- select(fit, BF_cut = 3)
```
Then plot the results. Note that there are three plots, so the package **cowplot** is used
to combine them into one plot.
```r
plts <- plot(E,
edge_magnify = 5,
groups = comm)
plot_grid(
plts$H1_plt +
ggtitle("Conditional Dependence") +
theme(legend.position = "none"),
plts$H0_plt +
ggtitle("Conditional Independence") +
theme(legend.position = "none"),
plts$ambiguous_plt +
ggtitle("Ambiguous"),
nrow = 1,
rel_widths = c(1, 1, 1.1)
)
```

As can be seen, there is not evidence for conditional independence for any of the relations. And
the ambiguous network makes clear there is large uncertainty as to what or what might not be the "true" network structure. This basic idea of having three adjacency matrices was proposed in @Williams2019_bf.
# Note
**BGGM** provides a publication ready plot, but it is also limited compared to **qgraph**
[@epskamp2012qgraph]. The one advantage of **BGGM** is that all plots are `ggplots`
which then allows for combining them rather easily. An example is included in another
vignette that shows how to combine several plots made with various methods in **BGGM**
# References
|
/scratch/gouwar.j/cran-all/cranData/BGGM/vignettes/netplot.Rmd
|
---
title: "Custom Network Statistics"
author: "Donny Williams"
date: "5/19/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Custom Network Statistics}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Background
This vignette describes a new feature to **BGGM** (`2.0.0`) that allows for
computing custom network statistics (e.g., centrality). The new function is
called `roll_your_own` and it was suggested by a user of **BGGM** ([see feature request here](https://github.com/donaldRwilliams/BGGM/issues/12)).
# Basic Idea
The basic idea is to compute the chosen network statistic for each of the sampled partial
correlation matrices, resulting in a distribution. All that is required is to define a function
that takes either a partial correlation matrix or a weighted adjacency matrix
(the partial correlation matrix with values set to zero) as the first argument.
Several examples are provided below.
### R packages
```{r, eval = FALSE}
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
```
### Data
In all examples, a subset of `ptsd` data is used. The subset includes two of the "communities" of
symptoms [details for these data can be found in @armour2017network]. The data are ordinal (5-level Likert).
```{r, warning =FALSE, message=FALSE}
# need these packages
library(BGGM)
library(ggplot2)
library(assortnet)
library(networktools)
# data
Y <- ptsd[,1:7]
```
### Fit Model
For these data, the GGM is estimated with a semi-parametric copula [@hoff2007extending].
In **BGGM**, this implemented with `type = mixed` which is kind of a misnomer because the data do not
have to be "mixed" (consisting of continuous and discrete variables).
Note that the model is fitted only once which highlights that only the posterior samples
are needed to compute any network statistic.
```{r, message=FALSE, warning=FALSE, eval=FALSE}
library(BGGM)
# copula ggm
fit <- estimate(Y, type = "mixed", iter = 1000)
```
# Examples
## Expected Influence
The first example computes expected influence [@robinaugh2016identifying]. The first step is to define a function
```{r}
# define function
f <- function(x,...){
networktools::expectedInf(x,...)$step1
}
```
Note that `x` takes the matrix which is then passed to `expectedInf`. The `...` allows for
passing additional arguments to the `expectedInf` function. An example is provided below.
With the function defined, the next step is to compute the network statistic.
```{r, eval = FALSE, message=FALSE, results='hide'}
# iter = 250 for demonstrative purposes
# (but note even 1000 iters takes less than 1 second)
# compute
net_stat <- roll_your_own(object = fit,
FUN = f,
select = FALSE,
iter = 250)
# print
net_stat
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Network Stats: Roll Your Own
#> Posterior Samples: 250
#> ---
#> Estimates:
#>
#> Node Post.mean Post.sd Cred.lb Cred.ub
#> 1 0.701 0.099 0.508 0.871
#> 2 0.912 0.113 0.722 1.179
#> 3 0.985 0.112 0.742 1.199
#> 4 1.056 0.105 0.851 1.247
#> 5 1.056 0.116 0.862 1.288
#> 6 0.491 0.092 0.329 0.679
#> 7 0.698 0.098 0.521 0.878
#> ---
```
The option `select = FALSE` indicates to compute the statistics from the partial correlation matrices (nothing set to zero). This can be changed with `select = TRUE`. Internally, each of the sampled
partial correlation matrices is multiplied by the adjacency matrix.
```{r, eval = FALSE, results='hide'}
net_stat <- roll_your_own(object = fit,
FUN = f,
select = TRUE,
iter = 250)
# print
net_stat
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Network Stats: Roll Your Own
#> Posterior Samples: 250
#> ---
#> Estimates:
#>
#> Node Post.mean Post.sd Cred.lb Cred.ub
#> 1 0.636 0.136 0.386 0.874
#> 2 0.792 0.113 0.580 0.996
#> 3 0.777 0.122 0.544 1.001
#> 4 0.910 0.121 0.667 1.143
#> 5 0.525 0.104 0.331 0.727
#> 6 0.484 0.110 0.270 0.686
#> 7 0.247 0.081 0.088 0.412
#> ---
```
The results are then plotted with
```{r, message=FALSE, eval=FALSE}
plot(net_stat)
```

## Bridge Strength
The next example computes bridge strength [@jones2019bridge]. This requires the user to define clusters or "communities".
```{r, eval = FALSE, message=FALSE, results='hide'}
# clusters
communities <- substring(colnames(Y), 1, 1)
# function is slow
f <- function(x, ...){
networktools::bridge(x, ...)$`Bridge Strength`
}
# compute
net_stat <- roll_your_own(object = fit,
FUN = f,
communities = communities,
iter = 250)
# print
net_stat
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Network Stats: Roll Your Own
#> Posterior Samples: 250
#> ---
#> Estimates:
#>
#> Node Post.mean Post.sd Cred.lb Cred.ub
#> 1 0.162 0.082 0.035 0.347
#> 2 0.250 0.113 0.061 0.501
#> 3 0.180 0.104 0.049 0.480
#> 4 0.280 0.098 0.090 0.480
#> 5 0.375 0.093 0.196 0.558
#> 6 0.617 0.166 0.339 1.002
#> 7 0.628 0.166 0.400 1.025
#> ---
```
Notice `communities`. This is passed to `...` in the function `f`, which, in turn, is passed to the function `bridge`. Any number of arguments can be passed this way. Here are the results
This can then be plotted and further customized (the returned object is a `ggplot`)
```{r, message = FALSE, eval=FALSE}
plot(net_stat,
fill = "lightblue") +
ggtitle("Bridge Strength") +
xlab("Score")
```

## Assortment
The next example computes assortment [@newman2003mixing].
```{r, eval = FALSE, message=FALSE, results='hide'}
# clusters
communities <- substring(colnames(Y), 1, 1)
# define function
f <- function(x,...){
assortnet::assortment.discrete(x, ...)$r
}
net_stat <- roll_your_own(object = fit,
FUN = f,
types = communities,
weighted = TRUE,
SE = FALSE, M = 1,
iter = 250)
# print
net_stat
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Network Stats: Roll Your Own
#> Posterior Samples: 250
#> ---
#> Estimates:
#>
#> Post.mean Post.sd Cred.lb Cred.ub
#> 0.261 0.124 -0.01 0.469
#> ---
```
This example demonstrate that `...` can take several arguments. The results are stored in the `net_stat` object. They can be accessed with
```{r, eval=FALSE}
hist(net_stat$results, main = "Assortment")
```

# Note
The function `roll_your_own` is expecting the custom function to return either a single number or a number for each node. This ensures all the printing and plotting functions work. However, you could return anything you want and then access the results to plot, summarize, etc.
# References
|
/scratch/gouwar.j/cran-all/cranData/BGGM/vignettes/netstat_custom.Rmd
|
---
title: "Custom Network Comparisons"
author: "Donny Williams"
date: "5/19/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Custom Network Comparisons}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Background
It is quite common to have partial correlation networks (GGMs) for various subgroups,
say, males and females, a control and treatment group, or perhaps several educational
levels. In this case, it is important to not only determine whether the groups are
different, but actually compare the groups in a way that answers a specific question
of interest.
To date, most `R` packages provide a few ways to compare groups, including **BGGM** (version `1.0.0`).
In version `2.0.0`, however, **BGGM** includes a new feature for the function `ggm_compare_ppc` that enables
users to **compare networks in any way they want**.
# Basic Idea
The technical details of the approach are described in [@williams2020comparing]. The basic idea is to
1. Draw samples from the posterior distribution, assuming the groups are equal (i.e., the "null" model).
2. Generate the posterior **predictive** distribution for the chosen test-statistic (how the groups
are being compared)
+ This can be understood as what we would expect to observe in the future
(e.g., in replication), assuming the groups were in fact equal.
3. Compute the test-statistic for the observed groups.
4. Then compare the observed test-statistic to the predictive distribution
(what is expected under the "null" model).
+ If the observed error is larger than the model assuming group equality, this
suggests that the groups are different.
In **BGGM**, the default is to compare the groups with respect to (symmetric) Kullback-Leibler
divergence (i.e., "distance" between multivariate normal distributions) and the sum of
squared error (for the partial correlation matrix). This was shown to be quite powerful in @williams2020comparing, while also having a
low false positive rate.
In the following, the focus is on defining custom functions
and using them with `ggm_compare_ppc`. In all examples, post-traumatic stress disorder
networks are compared [@fried2018replicability].
### R packages
```{r, eval = FALSE}
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
```
### Data
Only the correlation matrices are available. Hence, multivariate normal data is generated with that *exact*
correlation structure via the `R` package **MASS**.
```{r, warning =FALSE, message=FALSE}
# need these packages
library(BGGM)
library(ggplot2)
library(assortnet)
library(networktools)
library(MASS)
# group 1
Yg1 <- MASS::mvrnorm(n = 926,
mu = rep(0, 16),
Sigma = ptsd_cor3,
empirical = TRUE)
# group 2
Yg2 <- MASS::mvrnorm(n = 956,
mu = rep(0, 16),
Sigma = ptsd_cor4,
empirical = TRUE)
```
# Illustrative Examples
## Correlation
This first example looks at the correlation between partial correlations of the two networks. Note that
it could be two networks have what is considered a large correlation. However, the question here is,
assuming the groups are equal, just how large should the correlation be? This is needed to interpret
the observed test-statistic.
### Step 1: Define Custom Function
The first step is to define a custom function that takes two data matrices and the output
is the chosen test-statistic (in this case a correlation)
```{r}
f <- function(Yg1, Yg2){
# number of nodes
p <- ncol(Yg1)
# index of off-diagonal
indices <- upper.tri( diag(p))
# group 1:
# fit model
g1_fit <- estimate(Yg1, analytic = TRUE)
# pcors
g1_pcors <- pcor_mat(g1_fit)[indices]
# group 2
# fit model
g2_fit <- estimate(Yg2, analytic = TRUE)
# pcors
g2_pcors <- pcor_mat(g2_fit)[indices]
# test-statistic
cor(g1_pcors, g2_pcors)
}
```
### Step 2: Compute the Observed Score
The next step is to compute the observed test-statistic, that is, the correlation between the partial correlations.
```{r}
obs <- f(Yg1, Yg2)
# observed
obs
```
### Step 3: Predictive Check
With the function, `f`, and the observed scores, `obs`, in hand, what is left is the predictive check
```{r, message=FALSE, results='hide'}
ppc <- BGGM::ggm_compare_ppc(Yg1, Yg2,
FUN = f,
custom_obs = obs,
iter = 1000,
loss = FALSE)
```
Note that `loss = FALSE` controls how the p-value is computed. It is an indicator of whether the test-statistic is
a "loss" (a bad thing). In this case, a large correlation is a good thing so it is set to `FALSE`. The results
can then be printed
```{r}
ppc
```
which shows the posterior predictive p-value is zero. This indicates that the observed correlation is lower than
the entire predictive distribution (the distribution of correlations for future data, assuming group equality)
and finally plot the results
```{r, eval=FALSE}
plot(ppc)
```
```{r, echo=FALSE, message=FALSE, warning=FALSE}
plot(ppc, col_critical = "lightblue",
col_noncritical = "lightblue")[[1]] +
xlab("Predictive Correlation")
```
The density is the predictive distribution for the correlation. Recall that this is the correlation that we would expect, given the groups were actually the same, and the black point is the observed correlation. In this case, it seems quite clear that the "null model" is inadequate--the groups are apparently quite different.
## Hamming Distance
The next example is [Hamming distance](https://en.wikipedia.org/wiki/Hamming_distance), which, in this case, is the squared error for the adjacency matrices. It seems reasonable to think of this as a test for
different network structures or patterns of zeros and ones.
### Step 1: Define Custom Function
The first step is to define a custom function that takes two data matrices and the output
is the chosen test-statistic (in this case Hamming distance)
```{r}
f <- function(Yg1, Yg2){
# nodes
p <- ncol(Yg1)
# index of off-diagonal
indices <- upper.tri( diag(p))
# fit models
fit1 <- BGGM::estimate(Yg1, analytic = TRUE)
fit2 <- BGGM::estimate(Yg2, analytic = TRUE)
# select graphs
sel1 <- BGGM::select(fit1)
sel2 <- BGGM::select(fit2)
# hamming distance
sum((sel1$adj[indices] - sel2$adj[indices]) ^ 2)
}
```
### Step 2: Compute the Observed Score
The next step is to compute the observed test-statistic, that is, the Hamming distance between adjacency matrices
```{r}
obs <- f(Yg1, Yg2)
# observed
obs
```
### Step 3: Predictive Check
With the function, `f`, and the observed scores, `obs`, in hand, what is left is the predictive check
```{r, message=FALSE, results='hide'}
ppc <- BGGM::ggm_compare_ppc(Yg1, Yg2,
FUN = f,
custom_obs = obs,
iter = 1000)
```
The results can then be printed
```{r}
ppc
```
And then plot the results
```{r, message=FALSE, warning=FALSE}
plot(ppc)
```
This result is intriguing. Whereas the correlation looked at the relation between partial correlation, here there seems to be evidence
that the adjacency matrices are different (perhaps suggesting that the conditional independence structure is different).
## Partial Correlation Matrix Distance
There might also be interest in the so-called correlation matrix distance [@herdin2005correlation]. This is also easily tested, in this case for the partial correlation matrix.
### Step 1: Define Custom Function
```{r}
f <- function(Yg1, Yg2){
# nodes
p <- ncol(Yg1)
# index of off-diagonal
indices <- upper.tri( diag(p))
# fit models
fit1 <- BGGM::estimate(Yg1, analytic = TRUE)
fit2 <- BGGM::estimate(Yg2, analytic = TRUE)
pcor1 <- BGGM::pcor_mat(fit1)
pcor2 <- BGGM::pcor_mat(fit2)
# CDM for partial correlations
# note: numerator is the trace; denominator is the Frobenius norm
1 - (sum(diag(pcor1 %*% pcor2)) / (norm(pcor1, type = "f") * norm(pcor2, type = "f")))
}
```
### Step 2: Compute the Observed Score
The next step is to compute the observed test-statistic, that is, the Partial Correlation Matrix Distance
```{r}
obs <- f(Yg1, Yg2)
# observed
obs
```
### Step 3: Predictive Check
With the function, `f`, and the observed scores, `obs`, in hand, what is left is the predictive check
```{r, message=FALSE, results='hide'}
ppc <- BGGM::ggm_compare_ppc(Yg1, Yg2,
FUN = f,
custom_obs = obs,
iter = 1000)
```
The results can then be printed
```{r}
ppc
```
which again provides a p-value of zero.
Note that the object `ppc` includes the predictive samples that allows for user defined plots (in the event something custom is desired).
```{r}
hist(ppc$predictive_custom,
xlim = c(0, obs),
main = "Partial Correlation Matrix Distance")
abline(v = obs)
```
Note that the line is the observed which again makes it clear that the distance is quite surprising,
assuming the null model were true.
## Assortment
This next example is assortment [@newman2003mixing], which is a measure related
to clustering in a network. Here the test is for a difference in assortment.
This is computed by taking the difference (absolute value) for each draw
from the predictive distribution.
### Step 1: Define Custom Function
```{r}
# clusters based on DSM-5
comms <- c(
rep("A", 4),
rep("B", 7),
rep("C", 5)
)
f <- function(Yg1, Yg2){
fit1 <- BGGM::estimate(Yg1, analytic = TRUE)
fit2 <- BGGM::estimate(Yg2, analytic = TRUE)
pcor1 <- BGGM::pcor_mat(fit1)
pcor2 <- BGGM::pcor_mat(fit2)
assort1 <- assortnet::assortment.discrete(pcor1, types = comms,
weighted = TRUE,
SE = FALSE, M = 1)$r
assort2 <- assortnet::assortment.discrete(pcor2, types = comms,
weighted = TRUE,
SE = FALSE, M = 1)$r
(assort1 - assort2)
}
```
### Step 2: Compute the Observed Score
The next step is to compute the observed test-statistic, that is, assortment for the two groups
```{r}
obs <- f(Yg1, Yg2)
# observed
obs
```
### Step 3: Predictive Check
With the function, `f`, and the observed score, `obs`, in hand, the next step is the predictive check
```{r, message=FALSE, results='hide'}
ppc <- BGGM::ggm_compare_ppc(Yg1, Yg2,
FUN = f,
custom_obs = obs,
iter = 1000)
```
The results can then be printed
```{r}
ppc
```
and plotted
```{r}
plot(ppc)
```
which shows that the clustering in the data appears to be different (given the observed value exceeds
the entire predictive distribution).
## Expected Influence
This last example looks at the expected influence for the network [@robinaugh2016identifying]. In this case, the sum of squared error is the test statistic. This is computed from the squared error for each
draw from the predictive distribution.
### Step 1: Define Custom Function
```{r}
f <- function(Yg1, Yg2){
fit1 <- BGGM::estimate(Yg1, analytic = TRUE)
fit2 <- BGGM::estimate(Yg2, analytic = TRUE)
pcor1 <- BGGM::pcor_mat(fit1)
pcor2 <- BGGM::pcor_mat(fit2)
ei1 <- networktools::expectedInf(pcor1)$step1
ei2 <- networktools::expectedInf(pcor2)$step1
sum((ei1 - ei2)^2)
}
```
### Step 2: Compute the Observed Score
The next step is to compute the observed test-statistic, that is, the sum of squared error
for expected influence
```{r}
obs <- f(Yg1, Yg2)
# observed
obs
```
### Step 3: Predictive Check
With the function, `f`, and the observed scores, `obs`, in hand, what is left is the predictive check
```{r, message=FALSE, results='hide'}
ppc <- BGGM:::ggm_compare_ppc(Yg1, Yg2,
FUN = f,
custom_obs = obs,
iter = 1000)
```
The results can then be printed
```{r}
ppc
```
and plotted
```{r}
hist(ppc$predictive_custom,
xlim = c(0, obs),
main = "Expected Influence\n Sum of Squared Error")
abline(v = obs)
```
which again shows the sum of squared error for expected influence far exceeds what would be expected, assuming
the null model were true.
# Two Notes of Caution
1. Note that only the default in **BGGM** have been shown to have nominal error rates. However, there is a proof that suggests the error rate cannot be larger than $2\alpha$ [@meng1994posterior], and, further, a predictive check is typically below $\alpha$ [i.e., a tendency to be conservative, @gelman2013two].
2. Failing to reject the null model does not indicate the groups are the same! To test for
equality see `ggm_compare_explore` and `ggm_compare_confirm`.
# Conclusion
These example certainly open the door for tailoring network comparison to answer specific research questions.
# References
|
/scratch/gouwar.j/cran-all/cranData/BGGM/vignettes/ppc_custom.Rmd
|
---
title: "Predictability: Binary, Ordinal, and Continuous"
author: "Donny Williams"
date: "5/20/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Predictability: Binary, Ordinal, and Continuous}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Background
This vignette describes a new feature to **BGGM** (`2.0.0`) that allows for
computing network predictability for binary and ordinal data. Currently
the available option is Bayesian $R^2$ [@gelman_r2_2019].
### R packages
```{r, eval = FALSE, message=FALSE}
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
library(BGGM)
```
# Binary
The first example looks at Binary data, consisting of 1190 observations and 6 variables. The data are called `women_math` and the variable descriptions are provided in **BGGM**.
The model is estimated with
```{r, eval=FALSE}
# binary data
Y <- women_math
# fit model
fit <- estimate(Y, type = "binary")
```
and then predictability is computed
```{r, eval=FALSE}
r2 <- predictability(fit)
# print
r2
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Metric: Bayes R2
#> Type: binary
#> ---
#> Estimates:
#>
#> Node Post.mean Post.sd Cred.lb Cred.ub
#> 1 0.016 0.012 0.002 0.046
#> 2 0.103 0.023 0.064 0.150
#> 3 0.155 0.030 0.092 0.210
#> 4 0.160 0.021 0.118 0.201
#> 5 0.162 0.022 0.118 0.202
#> 6 0.157 0.028 0.097 0.208
#> ---
```
There are then two options for plotting. The first is with error bars, denoting the credible interval (i.e., `cred`),
```{r, message=FALSE, eval=FALSE}
plot(r2,
type = "error_bar",
size = 4,
cred = 0.90)
```

and the second is with a ridgeline plot
```{r, message=FALSE, eval=FALSE}
plot(r2,
type = "ridgeline",
cred = 0.50)
```

# Ordinal
In the following, the `ptsd` data is used (5-level Likert). The variable descriptions are provided in **BGGM**. This is based on the polychoric partial correlations, with $R^2$ computed from the corresponding correlations (due to the correspondence between the correlation matrix and multiple regression).
```{r, eval=FALSE}
Y <- ptsd
fit <- estimate(Y + 1, type = "ordinal")
```
The only change is switching type from `"binary` to `ordinal`. One important
point is the `+ 1`. This is required because for the ordinal approach the first
category must be 1 (in `ptsd` the first category is coded as 0).
```{r, eval=FALSE}
r2 <- predictability(fit)
# print
r2
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Metric: Bayes R2
#> Type: ordinal
#> ---
#> Estimates:
#>
#> Node Post.mean Post.sd Cred.lb Cred.ub
#> 1 0.487 0.049 0.394 0.585
#> 2 0.497 0.047 0.412 0.592
#> 3 0.509 0.047 0.423 0.605
#> 4 0.524 0.049 0.441 0.633
#> 5 0.495 0.047 0.409 0.583
#> 6 0.297 0.043 0.217 0.379
#> 7 0.395 0.045 0.314 0.491
#> 8 0.250 0.042 0.173 0.336
#> 9 0.440 0.048 0.358 0.545
#> 10 0.417 0.044 0.337 0.508
#> 11 0.549 0.048 0.463 0.648
#> 12 0.508 0.048 0.423 0.607
#> 13 0.504 0.047 0.421 0.600
#> 14 0.485 0.043 0.411 0.568
#> 15 0.442 0.045 0.355 0.528
#> 16 0.332 0.039 0.257 0.414
#> 17 0.331 0.045 0.259 0.436
#> 18 0.423 0.044 0.345 0.510
#> 19 0.438 0.044 0.354 0.525
#> 20 0.362 0.043 0.285 0.454
#> ---
```
Here is the `error_bar` plot.
```{r, eval=FALSE}
plot(r2)
```

Note that the plot object is a `ggplot` which allows for further customization (e.g,. adding the variable names, a title, etc.).
# Continuous
It is quite common to compute predictability assuming that the data are Gaussian. In the context of Bayesian GGMs, this was introduced in [@Williams2019]. This can also be implemented in **BGGM**.
```{r, eval=FALSE}
# fit model
fit <- estimate(Y)
# predictability
r2 <- predictability(fit)
```
`type` is missing which indicates that `continuous` is the default.
# Note
$R^2$ for binary and ordinal data is computed for the underlying latent variables. This is also the case
when `type = "mixed` (a semi-parametric copula). In future releases, there will be support for predicting
the variables on the observed scale.
# References
|
/scratch/gouwar.j/cran-all/cranData/BGGM/vignettes/predictability.Rmd
|
---
title: "Testing Sums"
author: "Donny Williams"
date: "5/25/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Testing Sums}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
# Introduction
This is a follow-up to the vignette ["Three Ways to Test the Same Hypothesis"](https://donaldrwilliams.github.io/BGGM/articles/hyp_3_ways.html). A
new feature, `pcor_sum`, was added to **BGGM** that allows for testing partial correlation sums.
This differs from the Bayes factor approach ("Approach #3"), in that only the posterior
distribution is used to determine whether there is a difference in the sums.
### R package
```{r, eval = FALSE, message=FALSE}
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
library(BGGM)
```
# One Group
This first example looks at one group, where a sum is tested within the same ptsd network. I focus on the
relations between the re-experiencing (`B`) and avoidance (`C`) communities. In particular, the sum of relations between the "Intrusion" (5 nodes) community and the "Avoidance" (two nodes) community is tested.
## Sum to String
For the avoidance symptom "avoidance of thoughts" `C1`, this can be written in `R` code with
```
# ptsd
Y <- ptsd
# paste together sums
paste0(colnames(Y)[1:5], "--C1", collapse = " + ")
#> "B1--C1 + B2--C1 + B3--C1 + B4--C1 + B5--C1"
```
whereas, for the avoidance symptom "avoidance of reminders" (`C2`), this is written as
```
paste0(colnames(Y)[1:5], "--C2", collapse = " + ")
#> "B1--C2 + B2--C2 + B3--C2 + B4--C2 + B5--C2"
```
Note that typically this would have to be written out. `paste0` was used in this case to
avoid typing out all of the relations.
## Fit Model
Here an ordinal GGM is fitted
```
fit <- estimate(Y+1, type = "ordinal", iter = 1000)
```
where the `+1` changes the first category from 0 to 1 (required).
## Test Sums
The next step is to use the `pcor_sum` function. First, I combine the sums into one string separated with `;`.
```
# sum 1
sum1 <- paste0(colnames(Y)[1:5], "--C1", collapse = " + ")
# sum 2
sum2 <- paste0(colnames(Y)[1:5], "--C2", collapse = " + ")
# paste together
sums <- paste(sum1, sum2, sep = ";")
# print
sums
#> "B1--C1 + B2--C1 + B3--C1 + B4--C1 + B5--C1;B1--C2 + B2--C2 + B3--C2 + B4--C2 + B5--C2"
```
Next `pcor_sum` is used
```
test_sum <- pcor_sum(fit, relations = sums)
# print
test_sum
# BGGM: Bayesian Gaussian Graphical Models
# ---
# Network Stats: Posterior Sum
# Posterior Samples: 1000
# ---
# Estimates
#
# Sum:
# Post.mean Post.sd Cred.lb Cred.ub
# B1--C1+B2--C1+B3--C1+B4--C1+B5--C1 0.215 0.096 0.034 0.404
# B1--C2+B2--C2+B3--C2+B4--C2+B5--C2 0.334 0.097 0.145 0.514
# ---
#
# Difference:
# B1--C1+B2--C1+B3--C1+B4--C1+B5--C1 - B1--C2+B2--C2+B3--C2+B4--C2+B5--C2
#
# Post.mean Post.sd Cred.lb Cred.ub Prob.greater Prob.less
# -0.119 0.145 -0.409 0.173 0.205 0.795
# ---
```
`Prob.greater` is the posterior probability that the first sum is larger than the second sum.
## Plot Results
The object `test_sum` can then be plotted. Note this returns three plots, but only the difference is shown here
```
plot(test_sum)$diff
```

The histogram is not very smooth in this case because `iter = 1000`, but this of course can be changed.
# Two Groups
This next example is for two groups. The data are called `bfi` and they are in the **BGGM** package. I compare a sum of two relations for questions measuring agreeableness in males and females. The relations tested are as follows
## Sum to String
```r
sums <- c("A3--A4 + A4--A5")
```
where `A1` is "know how to comfort others", `A4` is "love children", and `A5` is "make people feel at ease".
## Fit Models
The next step is to fit the models
```r
# data
Y <- bfi
# males
Y_males <- subset(Y, gender == 1, select = -c(education, gender))[,1:5]
# females
Y_females <- subset(Y, gender == 2, select = -c(education, gender))[,1:5]
fit_female <- estimate(Y_females, seed = 2)
# fit males
fit_male <- estimate(Y_males, seed = 1)
```
## Test Sums
Then test the sum
```r
test_sum <- pcor_sum(fit_female, fit_male, relations = sums)
# print
test_sum
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Network Stats: Posterior Sum
#> Posterior Samples: 5000
#> ---
#> Estimates
#>
#> Sum:
#> Post.mean Post.sd Cred.lb Cred.ub
#> g1: A3--A4+A4--A5 0.292 0.026 0.241 0.342
#> g2: A3--A4+A4--A5 0.305 0.036 0.234 0.375
#> ---
#>
#> Difference:
#> g1: A3--A4+A4--A5 - g2: A3--A4+A4--A5
#>
#> Post.mean Post.sd Cred.lb Cred.ub Prob.greater Prob.less
#> -0.014 0.045 -0.1 0.074 0.386 0.614
#> ---
```
## Sanity Check
For a kind of sanity check, here is the sum for the male group obtained from the point estimates.
```r
pcor_mat(fit_male)["A3", "A4"] + pcor_mat(fit_male)["A4", "A5"]
#> 0.305
```
This matches the output.
# Notes
By default, the print function for `pcor_sum` provides 95 % credible intervals. This can be changed by
directly using the print function, for example `print(test_sum, cred = 0.99)`, provides
99 % credible intervals.
Currently, this function only supports sums, due to this being of interest for the psychological network
literature in particular. This can be extended to accommodate multiplication, subtraction,
testing values other than zero, etc. Please make a feature request at either
[github](https://github.com/donaldRwilliams/BGGM/issues) or [BGGM-users group](https://groups.google.com/forum/#!forum/bggm-users).
|
/scratch/gouwar.j/cran-all/cranData/BGGM/vignettes/test_sum.Rmd
|
---
title: "Graphical VAR"
author: "Donny Williams"
date: "6/04/2020"
bibliography: ../inst/REFERENCES.bib
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{Graphical VAR}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
# Introduction
This vignette describes the implemention for a "graphical VAR" in `BGGM`. In `BGGM`, this is fitted as a multivariate regression. The key innovation is a novel prior distribution for the residual covariance matrix. There are a variety of much cooler names than a *mere* "multivariate regression", including "VAR" (vector autoregressive models) and "TSCGM" (time series chain graphical model).
## R package
```
# need the developmental version
if (!requireNamespace("remotes")) {
install.packages("remotes")
}
# install from github
remotes::install_github("donaldRwilliams/BGGM")
library(BGGM)
# for comparsion
library(vars)
# for plotting
library(qgraph)
# combine plots
library(cowplot)
```
## Data
I use data from the ifit (fit bit) study. The data were gathered over 100 consecutive days on a variety of variables, including the PANAS scale (positive and negative affect) and the number of steps each day. `BGGM` includes a subset of variables for two individuals.
```r
# data
Y <- subset(ifit, id == 1)[,-1]
# first 3 rows
head(Y, n = 3)
#> interested disinterested excited upset strong stressed steps
#> 72 10 50 2 50 16 7805
#> 75 6 75 0 76 0 18248
># 36 58 38 5 45 1 12139
```
# Estimation
The methods in **BGGM** are organized around Bayesian "estimation" and "hypothesis testing".
This is to reach a broader audience, as former is more similar to classical
methods (those more familiar to researchers).
## Fit Model
With the data in hand, the model is fitted as follows
```
# fit model
fit <- var_estimate(Y, beta_sd = 1)
```
Note that `beta_sd` is the prior distribution for the regression coefficients. A smaller value, say, `beta_sd = 0.25`, results in a Bayesian ridge regression. Note also this model, including 5000 draws from the posterior, was estimated in less than 1 second.
The results can then be printed
```r
# print
fit
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Vector Autoregressive Model (VAR)
#> ---
#> Posterior Samples: 5000
#> Observations (n): 94
#> Nodes (p): 7
#> ---
#> Call:
#> var_estimate(Y = Y, beta_sd = 10)
#> ---
#> Partial Correlations:
#>
#> interested disinterested excited upset strong stressed steps
#> interested 0.000 -0.170 0.388 -0.217 0.313 0.268 0.089
#> disinterested -0.170 0.000 -0.172 -0.029 0.094 0.160 -0.078
#> excited 0.388 -0.172 0.000 -0.126 0.500 -0.161 -0.016
#> upset -0.217 -0.029 -0.126 0.000 0.118 0.350 -0.039
#> strong 0.313 0.094 0.500 0.118 0.000 -0.010 0.176
#> stressed 0.268 0.160 -0.161 0.350 -0.010 0.000 -0.038
#> steps 0.089 -0.078 -0.016 -0.039 0.176 -0.038 0.000
#> ---
#> Coefficients:
#>
#> interested disinterested excited upset strong stressed steps
#> interested.l1 0.230 -0.009 0.182 -0.102 0.178 0.018 0.113
#> disinterested.l1 -0.051 -0.007 0.056 -0.019 0.049 0.091 -0.023
#> excited.l1 -0.088 -0.196 0.003 0.057 -0.093 0.092 0.106
#> upset.l1 -0.155 0.262 -0.097 0.435 0.057 0.324 -0.091
#> strong.l1 0.026 0.182 0.026 0.048 0.189 -0.073 -0.196
#> stressed.l1 -0.021 -0.014 -0.033 -0.048 -0.079 0.152 0.133
#> steps.l1 -0.157 0.180 -0.211 0.155 -0.092 0.209 0.042
#> ---
#> Date: Thu Jun 04 08:54:04 2020
```
Note that the coefficients are comparable, given each variable has been standardized (e.g., the predictors
and the outcome are standardized). `BGGM` does not compute the partial directed correlation (PDC) by default (as in **graphicalVAR**). This is because the standardized effects can readily be tested with the Bayes factor, both across and within each model, whereas this does not seem straightforward for the PDC (which requires a transformation).
### Compare to Classical
Here are the estimates from the `vars` package
```r
t(round(
vars::Bcoef(
vars:::VAR(scale(na.omit(Y)), type = "none")),
digits = 3)
)
#> interested disinterested excited upset strong stressed steps
#> interested.l1 0.229 -0.012 0.184 -0.100 0.180 0.015 0.112
#> disinterested.l1 -0.050 -0.006 0.057 -0.019 0.050 0.092 -0.022
#> excited.l1 -0.088 -0.193 0.002 0.056 -0.091 0.093 0.106
#> upset.l1 -0.155 0.260 -0.096 0.436 0.058 0.321 -0.092
#> strong.l1 0.027 0.182 0.025 0.047 0.188 -0.073 -0.192
#> stressed.l1 -0.021 -0.012 -0.033 -0.046 -0.077 0.152 0.133
#> steps.l1 -0.157 0.183 -0.210 0.153 -0.093 0.207 0.041
```
Recall that the "estimation" methods are similar to, in this case, ordinary least squares. The graphical structure in `BGGM` is determined with credible intervals, which will be quite similar to using confidence
intervals. Hence for those researchers unfamiliar with Bayesian methods the "estimation" methods are perhaps
a nice place to start.
## Summarize Model
The model can also be summarized with
```r
print(
summary(fit, cred = 0.95),
param = "pcor"
)
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Vector Autoregressive Model (VAR)
#> ---
#> Partial Correlations:
#>
#> Relation Post.mean Post.sd Cred.lb Cred.ub
#> interested--disinterested -0.170 0.108 -0.382 0.044
#> interested--excited 0.388 0.085 0.219 0.546
#> disinterested--excited -0.172 0.104 -0.369 0.049
#> interested--upset -0.217 0.106 -0.417 0.000
#> disinterested--upset -0.029 0.101 -0.239 0.161
#> excited--upset -0.126 0.098 -0.315 0.066
#> interested--strong 0.313 0.090 0.135 0.480
#> disinterested--strong 0.094 0.112 -0.120 0.318
#> excited--strong 0.500 0.078 0.337 0.645
#> upset--strong 0.118 0.109 -0.100 0.325
#> interested--stressed 0.268 0.102 0.058 0.460
#> disinterested--stressed 0.160 0.100 -0.049 0.351
#> excited--stressed -0.161 0.099 -0.358 0.031
#> upset--stressed 0.350 0.091 0.166 0.519
#> strong--stressed -0.010 0.107 -0.212 0.201
#> interested--steps 0.089 0.108 -0.123 0.297
#> disinterested--steps -0.078 0.108 -0.284 0.125
#> excited--steps -0.016 0.100 -0.207 0.182
#> upset--steps -0.039 0.107 -0.245 0.178
#> strong--steps 0.176 0.101 -0.024 0.364
#> stressed--steps -0.038 0.108 -0.236 0.193
#> ---
```
The coefficients can also be printed by changing `param` to either `all` or `beta`, The summary can also be plotted. Here are the coefficients
```r
plts <- plot(summary(fit, cred = 0.95))
cowplot::plot_grid(
cowplot::plot_grid(
plts$beta_plt$interested,
plts$beta_plt$disinterested,
plts$beta_plt$excited,
nrow = 1),
cowplot::plot_grid(
plts$beta_plt$upset,
plts$beta_plt$strong,
plts$beta_plt$stressed,
nrow = 1
),
nrow = 2)
```

There is a plot for the partial correlations in the object `plts`.
## Select Graph
The graphs are selected with
```r
select(fit, cred = 0.95)
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Vector Autoregressive Model (VAR)
#> ---
#> Posterior Samples: 5000
#> Credible Interval: 95 %
#> ---
#> Call:
#> var_estimate(Y = Y, beta_sd = 10)
#> ---
#> Partial Correlations:
#>
#> interested disinterested excited upset strong stressed steps
#> interested 0.000 0 0.388 -0.217 0.313 0.268 0
#> disinterested 0.000 0 0.000 0.000 0.000 0.000 0
#> excited 0.388 0 0.000 0.000 0.500 0.000 0
#> upset -0.217 0 0.000 0.000 0.000 0.350 0
#> strong 0.313 0 0.500 0.000 0.000 0.000 0
#> stressed 0.268 0 0.000 0.350 0.000 0.000 0
#> steps 0.000 0 0.000 0.000 0.000 0.000 0
#> ---
#> Coefficients:
#>
#> interested disinterested excited upset strong stressed steps
#> interested.l1 0 0.000 0 0.000 0 0.000 0
#> disinterested.l1 0 0.000 0 0.000 0 0.000 0
#> excited.l1 0 0.000 0 0.000 0 0.000 0
#> upset.l1 0 0.262 0 0.435 0 0.324 0
#> strong.l1 0 0.000 0 0.000 0 0.000 0
#> stressed.l1 0 0.000 0 0.000 0 0.000 0
#> steps.l1 0 0.000 0 0.000 0 0.209 0
#> ---
```
# Plot Graph
For plotting, I use the **qgraph** package.
```r
par(mfrow=c(1,2))
qgraph::qgraph(sel$pcor_weighted_adj, title = "Partials")
qgraph::qgraph(sel$beta_weighted_adj, title = "Coefficients")
```

# Predictability
Finally, it is also possible to compute predictability, in this case Bayesian $R^2$
```r
r2 <- predictability(fit)
# print
r2
#> BGGM: Bayesian Gaussian Graphical Models
#> ---
#> Metric: Bayes R2
#> Type: continuous
#> ---
#> Estimates:
#>
#> Node Post.mean Post.sd Cred.lb Cred.ub
#> interested 0.144 0.057 0.050 0.271
#> disinterested 0.166 0.061 0.060 0.302
#> excited 0.127 0.054 0.039 0.250
#> upset 0.220 0.070 0.093 0.368
#> strong 0.116 0.051 0.035 0.232
#> stressed 0.227 0.069 0.102 0.373
#> steps 0.105 0.047 0.032 0.210
```
The object `r2` can also be plotted
```r
plot(r2, type = "ridgeline")
```

# Explore
Bayesian (exploratory) testing to come...
# Confirm
Bayesian (confirmatory) testing to come...
# Note
|
/scratch/gouwar.j/cran-all/cranData/BGGM/vignettes/var_model.Rmd
|
#This function creates an incidence matrix that will be included in the
#linear term of the model
#Arguments: LT, Linear term, an object of the class "formula" that also includes
#optionally a data.frame to obtain the information
#It returns the incidence matrix
set.X=function(LT)
{
flag=TRUE
n_elements=length(LT)
i=0
while(i<=n_elements & flag)
{
i=i+1;
if(is(LT[[i]],"formula"))
{
flag=FALSE
rhs=LT[[i]]
if(is.null(LT$data))
{
mf = model.frame(formula=rhs)
}else{
mf = model.frame(formula=rhs,data=LT$data)
}
X = model.matrix(attr(mf, "terms"), data=mf)
Xint = match("(Intercept)", colnames(X), nomatch=0L)
if(Xint > 0L) X = X[, -Xint, drop=FALSE]
}
}
if(flag) stop("Unable to build incidence matrix, wrong formula or data")
return(X)
}
## Fixed Effects ##################################################################
#Function for initializing regression coefficients for Fixed effects.
#All the arguments are defined in the function BGLR
setLT.Fixed=function(LT,n,j,y,weights,nLT,saveAt,rmExistingFiles,groups,nGroups)
{
if(is.null(LT$X)) LT$X=set.X(LT)
LT$X=as.matrix(LT$X)
LT$p=ncol(LT$X)
LT$colNames=colnames(LT$X)
if(any(is.na(LT$X)))
{
stop("LP ",j," has NAs in X")
}
if(nrow(LT$X)!=n)
{
stop("Number of rows of LP ",j," not equal to the number of phenotypes")
}
#weight inputs if necessary
LT$X=sweep(LT$X,1L,weights,FUN="*") #weights
if(!is.null(groups))
{
x2=matrix(NA,nrow=nGroups,ncol=ncol(LT$X))
for(g in 1:nGroups)
{
x2[g,]=apply(LT$X[groups==g,,drop=FALSE],2L,function(x) sum(x^2)) #the sum of the square of each of the columns for each group
}
LT$x2=x2;
}else{
LT$x2=apply(LT$X,2L,function(x) sum(x^2)) #the sum of the square of each of the columns
}
#Objects for saving posterior means from MCMC
LT$b=rep(0,LT$p)
LT$post_b=rep(0,LT$p)
LT$post_b2=rep(0,LT$p)
fname=paste(saveAt,LT$Name,"_b.dat",sep="")
LT$NamefileOut=fname;
if(rmExistingFiles)
{
unlink(fname)
}
LT$fileOut=file(description=fname,open="w")
tmp=LT$colNames
write(tmp, ncolumns = LT$p, file = LT$fileOut, append = TRUE)
LT$X=as.vector(LT$X)
LT$x2=as.vector(LT$x2)
LT$varB=1e10
return(LT)
}
## Gaussian Regression ############################################################
#Function for initializing regression coefficients for Ridge Regression.
#All the arguments are defined in the function BGLR
setLT.BRR=function(LT,y,n,j,weights,nLT,R2,saveAt,rmExistingFiles,groups,nGroups,verbose,thin,nIter,burnIn,lower_tri){ #*#
#Check inputs
if(is.null(LT$X)) LT$X=set.X(LT)
LT$X=as.matrix(LT$X)
LT$p=ncol(LT$X)
LT$colNames=colnames(LT$X)
if(any(is.na(LT$X)))
{
stop("LP ",j," has NAs in X")
}
if(nrow(LT$X)!=n)
{
stop("Number of rows of LP ",j," not equal to the number of phenotypes")
}
#Weight inputs if necessary
LT$X=sweep(LT$X,1L,weights,FUN="*") #weights
if(!is.null(groups))
{
x2=matrix(NA,nrow=nGroups,ncol=ncol(LT$X))
for(g in 1:nGroups)
{
x2[g,]=apply(LT$X[groups==g,,drop=FALSE],2L,function(x) sum(x^2)) #the sum of the square of each of the columns for each group
}
LT$x2=x2;
}else{
LT$x2=apply(LT$X,2L,function(x) sum(x^2)) #the sum of the square of each of the columns
}
sumMeanXSq = sum((apply(LT$X,2L,mean))^2)
#Default df for the prior assigned to the variance of marker effects
if(is.null(LT$df0))
{
LT$df0=5
if(verbose)
{
message("Degree of freedom of LP ",j," set to default value (",LT$df0,")")
}
}
if(is.null(LT$R2))
{
LT$R2=R2/nLT
}
#Default scale parameter for the prior assigned to the variance of marker effects
if(is.null(LT$S0))
{
if(LT$df0<=0) stop("df0>0 in BRR in order to set S0")
LT$MSx=sum(LT$x2)/n-sumMeanXSq
LT$S0=((var(y,na.rm=TRUE)*LT$R2)/(LT$MSx))*(LT$df0+2)
if(verbose)
{
message("Scale parameter of LP ",j," set to default value (",LT$S0,")")
}
}
#Objects for saving posterior means from MCMC
LT$b=rep(0,LT$p)
LT$post_b=rep(0,LT$p)
LT$post_b2=rep(0,LT$p)
LT$varB=LT$S0/(LT$df0+2)
LT$post_varB=0
LT$post_varB2=0
fname=paste(saveAt,LT$Name,"_varB.dat",sep="");
if(rmExistingFiles)
{
unlink(fname)
}
LT$NamefileOut=fname
LT$fileOut=file(description=fname,open="w")
if(is.null(LT$lower_tri)) LT$lower_tri=FALSE;
if(LT$lower_tri)
{
message("You have provided a lower triangular matrix for LP ", j)
message("Checking dimmensions...")
if(ncol(LT$X)==nrow(LT$X))
{
message("Ok.")
LT$X=LT$X[lower.tri(LT$X,diag=TRUE)]
}
}else{
LT$X=as.vector(LT$X)
}
LT$x2=as.vector(LT$x2)
#*#
if(is.null(LT$saveEffects)){LT$saveEffects=FALSE}
if(LT$saveEffects){
if(is.null(LT$storageMode)){LT$storageMode="double"}
if(!LT$storageMode%in%c("single","double")) {
stop("storageMode of LP ",j," can either be 'single' or 'double' (default)")
}
if(is.null(LT$thin)){ LT$thin=thin }
fname=paste(saveAt,LT$Name,"_b.bin",sep="")
if(rmExistingFiles){ unlink(fname) }
LT$fileEffects=file(fname,open='wb')
nRow=floor((nIter-burnIn)/LT$thin)
writeBin(object=c(nRow,LT$p),con=LT$fileEffects,size=ifelse(LT$storageMode=="single",4,8))
}#*#
return(LT)
}
#Ridge regression using sets of markers
#This is just a Ridge Regression set-specific variances,
#LT has an extra attribute: sets
setLT.BRR_sets=function(LT,y,n,j,weights,nLT,R2,saveAt,rmExistingFiles,verbose,thin,nIter,burnIn)
{
#Check the inputs
if(is.null(LT$X)) LT$X=set.X(LT)
LT$X=as.matrix(LT$X)
LT$p=ncol(LT$X)
LT$colNames=colnames(LT$X)
if(is.null(LT$sets)) stop("Argument sets (a vector grouping effects into sets) is required in BRR_sets");
if(length(LT$sets)!=LT$p){ stop("The length of sets must be equal to the number of predictors") }
LT$sets<-as.integer(factor(LT$sets,ordered=TRUE,levels=unique(LT$sets)))
LT$n_sets=length(unique(LT$sets))
if(LT$n_sets>=LT$p){ stop("The number of sets is greater or equal than the number of effects!") }
if(any(is.na(LT$X))){
stop("LP ",j," has NAs in X")
}
if(nrow(LT$X)!=n){
stop("Number of rows of LP ",j," not equal to the number of phenotypes")
}
#Weight inputs if necessary
LT$X=sweep(LT$X,1L,weights,FUN="*") #weights
LT$x2=apply(LT$X,2L,function(x) sum(x^2)) #the sum of the square of each of the columns
sumMeanXSq = sum((apply(LT$X,2L,mean))^2)
if(is.null(LT$df0)){
LT$df0=5
if(verbose){
message("Degree of freedom of LP ",j," set to default value (",LT$df0,")")
}
}
if(is.null(LT$R2)){
LT$R2=R2/nLT
}
if(is.null(LT$S0)) {
if(LT$df0<=0) stop("df0 must be greater than 0")
LT$MSx=sum(LT$x2)/n-sumMeanXSq
LT$S0=((var(y,na.rm=TRUE)*LT$R2)/(LT$MSx))*(LT$df0+2)
if(verbose){
message("Scale parameter of LP ",j," set to default value (",LT$S0,")")
}
}
LT$DF1=table(LT$sets)+LT$df0
LT$b=rep(0,LT$p)
LT$post_b=rep(0,LT$p)
LT$post_b2=rep(0,LT$p)
LT$varB=rep(LT$S0/(LT$df0+2),LT$p)
LT$varSets=rep(0,LT$n_sets)
LT$post_varSets=rep(0,LT$n_sets)
LT$post_varSets2<-rep(0,LT$n_sets)
LT$post_varB=rep(0 ,LT$p)
LT$post_varB2=rep(0,LT$p)
fname=paste(saveAt,LT$Name,"_varB.dat",sep="");
if(rmExistingFiles){
unlink(fname)
}
LT$NamefileOut=fname
LT$fileOut=file(description=fname,open="w")
LT$X=as.vector(LT$X)
if(is.null(LT$saveEffects)){LT$saveEffects=FALSE}
if(LT$saveEffects){
if(is.null(LT$storageMode)){LT$storageMode="double"}
if(!LT$storageMode%in%c("single","double")) {
stop("storageMode of LP ",j," can either be 'single' or 'double' (default)")
}
if(is.null(LT$thin)){ LT$thin=thin }
fname=paste(saveAt,LT$Name,"_b.bin",sep="")
if(rmExistingFiles){ unlink(fname) }
LT$fileEffects=file(fname,open='wb')
nRow=floor((nIter-burnIn)/LT$thin)
writeBin(object=c(nRow,LT$p),con=LT$fileEffects,size=ifelse(LT$storageMode=="single",4,8))
}
return(LT)
}
## Bayesian LASSO ############################################################
## The well known Bayesian LASSO (Park and Casella, 2008) and
## de los Campos et al (2009).
# This functions simply sets hyper-parameters for quantities involved in BL regression
setLT.BL=function(LT,y,n,j,weights,nLT,R2,saveAt,rmExistingFiles,verbose,thin,nIter,burnIn)
{
#Check the inputs
if(is.null(LT$minAbsBeta)) LT$minAbsBeta=1e-9
if(is.null(LT$X)) LT$X=set.X(LT)
LT$X=as.matrix(LT$X)
LT$p=ncol(LT$X)
LT$colNames=colnames(LT$X)
if(any(is.na(LT$X)))
{
stop("LP ",j," has NAs in X")
}
if(nrow(LT$X)!=n)
{
stop("Number of rows of LP ",j," not equal to the number of phenotypes")
}
#Wheight inputs if necessary
LT$X=sweep(LT$X,1L,weights,FUN="*") #weights
LT$x2=apply(LT$X,2L,function(x) sum(x^2)) #the sum of the square of each of the columns
sumMeanXSq = sum((apply(LT$X,2L,mean))^2)
LT$MSx=sum(LT$x2)/n-sumMeanXSq
# Prior
if(is.null(LT$R2))
{
LT$R2=R2/nLT
}
# Setting default value of lambda
if(!is.null(LT$lambda))
{
if(LT$lambda<0)
{
stop("lambda should be positive")
}
}
if(is.null(LT$lambda))
{
LT$lambda2=2*(1-R2)/(LT$R2)*LT$MSx
LT$lambda=sqrt(LT$lambda2)
if(verbose)
{
message("Initial value of lambda in LP ",j," was set to default value (",LT$lambda,")")
}
}else{
if(LT$lambda<0) stop("lambda should be positive");
LT$lambda2=LT$lambda^2
}
# Checking lambda-type
if(is.null(LT$type))
{
LT$type="gamma"
if(verbose)
{
message("By default, the prior density of lambda^2 in the LP ",j," was set to gamma")
}
}else{
if(!LT$type%in%c("gamma","beta","FIXED")) stop("The prior for lambda^2 should be gamma, beta or a point of mass (i.e., fixed lambda)")
}
if(LT$type=="gamma")
{
if(is.null(LT$shape))
{
LT$shape=1.1
if(verbose)
{
message("shape parameter in LP ",j," was missing and was set to ",LT$shape)
}
}
if(is.null(LT$rate))
{
LT$rate=(LT$shape-1)/LT$lambda2
if(verbose)
{
message("rate parameter in LP ",j," was missing and was set to ",LT$rate)
}
}
}
if(LT$type=="beta")
{
if(is.null(LT$probIn))
{
LT$probIn=0.5
if(verbose)
{
message("probIn in LP ",j," was missing and was set to ",LT$probIn)
}
}
if(is.null(LT$counts))
{
LT$counts=2
if(verbose)
{
message("Counts in LP ",j," was missing and was set to ",LT$counts)
}
}
LT$shape1=LT$probIn*LT$counts;
LT$shape2=(1-LT$probIn)*LT$counts;
if(is.null(LT$max))
{
LT$max=10*LT$lambda
if(verbose)
{
message("max parameter in LP ",j," was missing and was set to ",LT$max)
}
}
}
#Objects to storing information for MCMC iterations
LT$b=rep(0,LT$p)
LT$post_b=rep(0,LT$p)
LT$post_b2=rep(0,LT$p)
tmp=((var(y,na.rm=TRUE)*R2/nLT)/(LT$MSx))
LT$tau2=rep(tmp,LT$p)
LT$post_tau2=0
LT$post_lambda=0
fname=paste(saveAt,LT$Name,"_lambda.dat",sep="");
if(rmExistingFiles)
{
unlink(fname)
}
LT$NamefileOut=fname
LT$fileOut=file(description=fname,open="w")
LT$X=as.vector(LT$X)
#*#
if(is.null(LT$saveEffects)){LT$saveEffects=FALSE}
if(LT$saveEffects){
if(is.null(LT$storageMode)){LT$storageMode="double"}
if(!LT$storageMode%in%c("single","double")) {
stop("storageMode of LP ",j," can either be 'single' or 'double' (default)")
}
if(is.null(LT$thin)){ LT$thin=thin }
fname=paste(saveAt,LT$Name,"_b.bin",sep="")
if(rmExistingFiles){ unlink(fname) }
LT$fileEffects=file(fname,open='wb')
nRow=floor((nIter-burnIn)/LT$thin)
writeBin(object=c(nRow,LT$p),con=LT$fileEffects,size=ifelse(LT$storageMode=="single",4,8))
}#*#
return(LT)
}
#Reproducing kernel Hilbert spaces
#This function simply sets hyperparameters and prepares inputs
#for Reproducing Kernel Hilbert Spaces. The algorithm used here is
#Fully described in de los Campos et al (2010).
setLT.RKHS=function(LT,y,n,j,weights,saveAt,R2,nLT,rmExistingFiles,verbose)
{
#Checking inputs
if(is.null(LT$V))
{
if(is.null(LT$K)) stop("Kernel for linear term ",j, " was not provided, specify it with list(K=?,model='RKHS'), where ? is the kernel matrix")
if(!is.matrix(LT$K)) stop("Kernel for linear term ",j, " should be a matrix, the kernel provided is of class ", class(LT$K))
LT$K = as.matrix(LT$K)
if(nrow(LT$K)!=ncol(LT$K)) stop("Kernel for linear term ",j, " is not a square matrix")
#This code was rewritten to speed up computations
#T = diag(weights)
#LT$K = T %*% LT$K %*% T
#Weight kernels
#for(i in 1:nrow(LT$K))
#{
# #j can not be used as subindex because its value is overwritten
# for(m in i:ncol(LT$K))
# {
# LT$K[i,m]=LT$K[i,m]*weights[i]*weights[m];
# LT$K[m,i]=LT$K[i,m]
# }
#}
#Added January 10/2020
#This is faster than the for loop
LT$K=sweep(sweep(LT$K,1L,weights,"*"),2L,weights,"*")
tmp =eigen(LT$K,symmetric=TRUE)
LT$V =tmp$vectors
LT$d =tmp$values
rm(tmp)
}else{
if(any(weights!=1))
{
warning("Eigen decomposition for LT",j," was provided and the model involves weights. Note: You should have weighted the kernel before computing eigen(K)")
}
}
#Defaul value for tolD
#Only those eigenvectors whose eigenvalues> tolD are kept.
if (is.null(LT$tolD))
{
LT$tolD = 1e-10
if(verbose)
{
message("Default value of minimum eigenvalue in LP ",j," was set to ",LT$tolD)
}
}
#Removing elements whose eigenvalues < tolD
tmp= LT$d > LT$tolD
LT$levelsU = sum(tmp)
LT$d = LT$d[tmp]
LT$V = LT$V[, tmp]
#Default degrees of freedom and scale parameter associated with the variance component for marker effect
if (is.null(LT$df0))
{
LT$df0 = 5
if(verbose)
{
message("default value of df0 in LP ",j," was missing and was set to ",LT$df0)
}
}
if(is.null(LT$R2))
{
LT$R2=R2/nLT
}
if (is.null(LT$S0))
{
if(LT$df0<=0) stop("df0>0 in RKHS in order to set S0");
LT$S0=((var(y,na.rm=TRUE)*LT$R2)/(mean(LT$d)))*(LT$df0+2)
if(verbose)
{
message("default value of S0 in LP ",j," was missing and was set to ",LT$S0)
}
}
LT$u=rep(0,nrow(LT$V))
LT$varU=LT$S0/(LT$df0+2)
LT$uStar=rep(0, LT$levelsU)
#Output files
fname=paste(saveAt,LT$Name,"_varU.dat",sep="")
LT$NamefileOut=fname;
if(rmExistingFiles)
{
unlink(fname)
}
#Objects for storing information for MCMC iterations
LT$fileOut=file(description=fname,open="w")
LT$post_varU=0
LT$post_varU2=0
LT$post_uStar = rep(0, LT$levelsU)
LT$post_u = rep(0, nrow(LT$V))
LT$post_u2 = rep(0,nrow(LT$V))
#return object
return(LT)
}
###Bayes B and C########################################################################################################################################
setLT.BayesBandC=function(LT,y,n,j,weights,saveAt,R2,nLT,rmExistingFiles, groups, nGroups,verbose,thin,nIter,burnIn)
{
model=LT$model
if(is.null(LT$X)) LT$X=set.X(LT)
#Be sure that your X is a matrix
LT$X=as.matrix(LT$X)
LT$p=ncol(LT$X)
LT$colNames=colnames(LT$X)
#Weight inputs if necessary
LT$X=sweep(LT$X,1L,weights,FUN="*") #weights
if(!is.null(groups))
{
x2=matrix(NA,nrow=nGroups,ncol=ncol(LT$X))
for(g in 1:nGroups)
{
x2[g,]=apply(LT$X[groups==g,,drop=FALSE],2L,function(x) sum(x^2)) #the sum of the square of each of the columns for each group
}
LT$x2=x2;
}else{
LT$x2=apply(LT$X,2L,function(x) sum(x^2)) #the sum of the square of each of the columns
}
sumMeanXSq = sum((apply(LT$X,2L,mean))^2)
LT$MSx=sum(LT$x2)/n-sumMeanXSq
if(any(is.na(LT$X))){ stop("LP ",j," has NAs in X") }
if(nrow(LT$X)!=n){ stop("Number of rows of LP ",j," not equal to the number of phenotypes") }
if(is.null(LT$R2))
{
LT$R2=R2/nLT
if(verbose)
{
message("R2 in LP ",j," was missing and was set to ",LT$R2)
}
}
#Default value for the degrees of freedom associated with the distribution assigned to the variance
#of marker effects
if(is.null(LT$df0))
{
LT$df0= 5
if(verbose)
{
message("DF in LP ",j," was missing and was set to ",LT$df0)
}
}
#Default value for a marker being "in" the model
if(is.null(LT$probIn))
{
LT$probIn=0.5
if(verbose)
{
message("probIn in LP ",j," was missing and was set to ",LT$probIn)
}
}
#Default value for prior counts
if(is.null(LT$counts))
{
LT$counts=10
if(verbose)
{
message("Counts in LP ",j," was missing and was set to ",LT$counts)
}
}
LT$countsIn=LT$counts * LT$probIn
LT$countsOut=LT$counts - LT$countsIn
#Default value for the scale parameter associated with the distribution assigned to the variance of
#marker effects
if(is.null(LT$S0))
{
if(LT$df0<=0) stop("df0>0 in ",model," in order to set S0");
LT$S0=var(y, na.rm = TRUE)*LT$R2/(LT$MSx)*(LT$df0+2)/LT$probIn
if(verbose)
{
message("Scale parameter in LP ",j," was missing and was set to ",LT$S0)
}
}
LT$b=rep(0, LT$p)
LT$d=rbinom(n = LT$p, size = 1, prob = LT$probIn)
if(model=="BayesB")
{
if(is.null(LT$shape0))
{
LT$shape0=1.1
}
if(is.null(LT$rate0)){
LT$rate0=(LT$shape0-1)/LT$S0
}
LT$S=LT$S0
LT$varB = LT$varB=rep(LT$S0/(LT$df0+2),LT$p)
fname=paste(saveAt,LT$Name,"_parBayesB.dat",sep="")
}else{
LT$varB = LT$S0
fname=paste(saveAt,LT$Name,"_parBayesC.dat",sep="")
}
LT$X=as.vector(LT$X)
LT$x2=as.vector(LT$x2)
if(rmExistingFiles)
{
unlink(fname)
}
LT$fileOut=file(description=fname,open="w")
LT$NamefileOut=fname;
if(model=="BayesB")
{
tmp=c('probIn','scale')
write(tmp, ncolumns = LT$p, file = LT$fileOut, append = TRUE)
}
#Objects for storing MCMC information
LT$post_varB=0
LT$post_varB2=0
LT$post_d=0
LT$post_probIn=0
LT$post_probIn2=0
LT$post_b=rep(0,LT$p)
LT$post_b2=rep(0,LT$p)
if(model=="BayesB")
{
LT$post_S=0
LT$post_S2=0
}
#*#
if(is.null(LT$saveEffects)){LT$saveEffects=FALSE}
if(LT$saveEffects){
if(is.null(LT$storageMode)){LT$storageMode="double"}
if(!LT$storageMode%in%c("single","double")) {
stop("storageMode of LP ",j," can either be 'single' or 'double' (default)")
}
if(is.null(LT$thin)){ LT$thin=thin }
fname=paste(saveAt,LT$Name,"_b.bin",sep="")
if(rmExistingFiles){ unlink(fname) }
LT$fileEffects=file(fname,open='wb')
nRow=floor((nIter-burnIn)/LT$thin)
writeBin(object=c(nRow,LT$p),con=LT$fileEffects,size=ifelse(LT$storageMode=="single",4,8))
}#*#
#return object
return(LT)
}
#Bayes A, Mewissen et al. (2001).
#Prediction of Total Genetic Value Using Genome-Wide Dense Marker Maps
#Genetics 157: 1819-1829, Modified so that the Scale parameter is estimated from data (a gamma prior is assigned)
setLT.BayesA=function(LT,y,n,j,weights,saveAt,R2,nLT,rmExistingFiles,verbose,thin,nIter,burnIn)
{
#Ckecking inputs
if(is.null(LT$X)) LT$X=set.X(LT)
LT$X=as.matrix(LT$X)
LT$p=ncol(LT$X)
LT$colNames=colnames(LT$X)
#Weight inputs if necessary
LT$X=sweep(LT$X,1L,weights,FUN="*") #weights
LT$x2=apply(LT$X,2L,function(x) sum(x^2)) #the sum of the square of each of the columns
sumMeanXSq = sum((apply(LT$X,2L,mean))^2)
LT$MSx=sum(LT$x2)/n-sumMeanXSq
#Default degrees of freedom for the prior assigned to the variance of markers
if(is.null(LT$df0))
{
LT$df0 = 5
if(verbose)
{
message("DF in LP ",j," was missing and was set to ",LT$df0)
}
}
if(is.null(LT$R2))
{
LT$R2=R2/nLT
if(verbose)
{
message("R2 in LP ",j," was missing and was set to ",LT$R2)
}
}
#Defuault scale parameter for the prior assigned to the variance of markers
if(is.null(LT$S0))
{
if(LT$df0<=0) stop("df0>0 in BayesA in order to set S0")
LT$S0 = var(y, na.rm = TRUE)*LT$R2/(LT$MSx)*(LT$df0+2)
if(verbose)
{
message("Scale parameter in LP ",j," was missing and was set to ",LT$S0)
}
}
# Improvement: Treat Scale as random, assign a gamma density
if(is.null(LT$shape0))
{
LT$shape0=1.1
}
if(is.null(LT$rate0))
{
LT$rate0=(LT$shape0-1)/LT$S0
}
LT$S=LT$S0
LT$b=rep(0,LT$p)
LT$varB=rep(LT$S0/(LT$df0+2),LT$p)
# Add one file when S0 is treated as random.
fname=paste(saveAt,LT$Name,"_ScaleBayesA.dat",sep="")
if(rmExistingFiles)
{
unlink(fname)
}
LT$fileOut=file(description=fname,open="w")
LT$NamefileOut=fname;
LT$X=as.vector(LT$X)
#Objects for storing information generated during MCMC iterations
LT$post_varB=0
LT$post_varB2=0
LT$post_b=rep(0,LT$p)
LT$post_b2=rep(0,LT$p)
LT$post_S=0
LT$post_S2=0
#*#
if(is.null(LT$saveEffects)){LT$saveEffects=FALSE}
if(LT$saveEffects){
if(is.null(LT$storageMode)){LT$storageMode="double"}
if(!LT$storageMode%in%c("single","double")) {
stop("storageMode of LP ",j," can either be 'single' or 'double' (default)")
}
if(is.null(LT$thin)){ LT$thin=thin }
fname=paste(saveAt,LT$Name,"_b.bin",sep="")
if(rmExistingFiles){ unlink(fname) }
LT$fileEffects=file(fname,open='wb')
nRow=floor((nIter-burnIn)/LT$thin)
writeBin(object=c(nRow,LT$p),con=LT$fileEffects,size=ifelse(LT$storageMode=="single",4,8))
}#*#
#return object
return(LT)
}
##################################################################################################
#Just the welcome function that will appear every time that your run the program
welcome=function()
{
message("\n");
message("#--------------------------------------------------------------------#");
message("# _\\\\|//_ #");
message("# (` o-o ') BGLR v1.1.1 #");
message("#------ooO-(_)-Ooo---------------------------------------------------#");
message("# Bayesian Generalized Linear Regression #");
message("# Gustavo de los Campos, [email protected] #");
message("# .oooO Oooo. Paulino Perez-Rodriguez, [email protected] #");
message("# ( ) ( ) December, 2023 #");
message("#_____\\ (_______) /_________________________________________________ #");
message("# \\_) (_/ #");
message("# #");
message("#------------------------------------------------------------------- #");
message("\n");
}
##################################################################################################
##################################################################################################
#The density of a scaled inverted chi-squered distribution
#df: degrees of freedom, S: Scale parameter
dScaledInvChisq=function (x, df, S)
{
tmp = dchisq(S/x, df = df)/(x^2)
return(tmp)
}
##################################################################################################
#The density function for lambda
#Density function for Regularization parameter in Bayesian LASSO
#Rate: rate parameter, shape: the value for the shape parameter
dLambda=function (rate, shape, lambda)
{
tmp = dgamma(x = I(lambda^2), rate = rate, shape = shape) * 2 * lambda
return(tmp)
}
##################################################################################################
#Metropolis sampler for lambda in the Bayesian LASSO
metropLambda=function (tau2, lambda, shape1 = 1.2, shape2 = 1.2, max = 200, ncp = 0)
{
lambda2 = lambda^2
l2_new = rgamma(rate = sum(tau2)/2, shape = length(tau2),
n = 1)
l_new = sqrt(l2_new)
logP_old = sum(dexp(x = tau2, log = TRUE, rate = (lambda2/2))) +
dbeta(x = lambda/max, log = TRUE, shape1 = shape1, shape2 = shape2) -
dgamma(shape = sum(tau2)/2, rate = length(tau2), x = (2/lambda2),
log = TRUE)
logP_new = sum(dexp(x = tau2, log = TRUE, rate = (l2_new/2))) +
dbeta(x = l_new/max, log = TRUE, shape1 = shape1, shape2 = shape2) -
dgamma(shape = sum(tau2)/2, rate = length(tau2), x = (2/l2_new),
log = TRUE)
accept = (logP_new - logP_old) > log(runif(1))
if (accept) {
lambda = l_new
}
return(lambda)
}
##################################################################################################
#Startup function
#this function is executed once the library is loaded
.onAttach = function(library, pkg)
{
if(interactive())
{
packageStartupMessage("# Gustavo de los Campos & Paulino Perez-Rodriguez")
packageStartupMessage("# Support provided by the U.S., National Institutes of Health (NIH)")
packageStartupMessage("# (Grant: R01GM101219, NIGMS)")
packageStartupMessage("# and by the International Maize and Wheat Improvement Center (CIMMyT).")
packageStartupMessage("# Type 'help(BGLR)' for summary information")
}
invisible()
}
##################################################################################################
#rtrun draws from a truncated univariate normal distribution using the inverse CDF algorithm
#Arguments:
#mu: mean
#sigma: standard deviation
#a: lower bound
#b: upper bound
#NOTES: 1) This routine was taken from bayesm package, December 18, 2012
# 2) The inputs are not checked,
#It is assumed that are ok.
#rtrun=function (mu, sigma, a, b)
#{
# FA = pnorm(((a - mu)/sigma))
# FB = pnorm(((b - mu)/sigma))
# return(mu + sigma * qnorm(runif(length(mu)) * (FB - FA) + FA))
#}
#Using the rtruncnorm function in the truncnorm package
rtrun=function(mu,sigma,a,b)
{
n=max(c(length(mu),length(sigma),length(a),length(b)))
rtruncnorm(n,a,b,mu,sigma)
}
#Extract the values of z such that y[i]=j
#z,y vectors, j integer
#extract=function(z,y,j) subset(as.data.frame(z,y),subset=(y==j))
extract=function(z,y,j) z[y==j]
#This routine was adapted from rinvGauss function from S-Plus
# Random variates from inverse Gaussian distribution
# Reference:
# Chhikara and Folks, The Inverse Gaussian Distribution,
# Marcel Dekker, 1989, page 53.
# GKS 15 Jan 98
#n: Number of samples
#nu: nu parameter
#lambda: lambda parameter
rinvGauss=function(n, nu, lambda)
{
if(any(nu<=0)) stop("nu must be positive")
if(any(lambda<=0)) stop("lambda must be positive")
if(length(n)>1) n = length(n)
if(length(nu)>1 && length(nu)!=n) nu = rep(nu,length=n)
if(length(lambda)>1 && length(lambda)!=n) lambda = rep(lambda,length=n)
tmp = rnorm(n)
y2 = tmp*tmp
u = runif(n)
r1 = nu/(2*lambda) * (2*lambda + nu*y2 - sqrt(4*lambda*nu*y2 + nu*nu*y2*y2))
r2 = nu*nu/r1
ifelse(u < nu/(nu+r1), r1, r2)
}
#log-likelihood for ordinal data
#y: response vector
#predicted response vector, yHat=X%*%beta
#threshold
loglik_ordinal=function(y,yHat,threshold)
{
sum=0
n=length(y)
for(i in 1:n)
{
sum=sum + log(pnorm(threshold[y[i] + 1]-yHat[i])-pnorm(threshold[y[i]]-yHat[i]))
}
return(sum)
}
##################################################################################################
#Arguments:
#y: data vector, NAs allowed
#response_type: can be "gaussian", "ordinal",
#ETA: The linear predictor
#nIter: Number of MCMC iterations
#burnIn: burnIn
#thin: thin
#saveAt: string, where to save the information
#Se: Scale parameter for the prior for varE
#dfe: Degrees of freedom for the prior for varE
#weights:
#R2
#Note: The function was designed to work with gaussian responses, some changes were made to deal binary and ordinal responses
#To add new method:
#(a) create setLT,
#(b) add it to the switch statement,
#(c) add code to update parameters in the Gibbs sampler,
#(d) add code to save samples
#(e) add code to compute posterior means
#(f) Test:
#(f1) Test simple example without hyper-paramaeters, evaluate how
# default values were set
#(f2) Check posterior means and files
#(f3) Test simple example with some hyper-parameters give and
# some set by default
#(f4) Run an example with a few missing values, compare to BLR
# example, check: (i) residual variance, (ii) plot of effects, (iii) plot
# of predictions in trn, (iv) plot of prediction in tst.
BGLR=function (y, response_type = "gaussian", a = NULL, b = NULL,
ETA = NULL, nIter = 1500, burnIn = 500, thin = 5, saveAt = "",
S0 = NULL, df0 = 5, R2 = 0.5, weights = NULL,
verbose = TRUE, rmExistingFiles = TRUE, groups=NULL)
{
if(verbose)
{
welcome()
}
IDs=names(y)
if (!(response_type %in% c("gaussian", "ordinal"))) stop("Only gaussian and ordinal responses are allowed")
if (saveAt == "") {
saveAt = paste(getwd(), "/", sep = "")
}
y=as.vector(y)
y0=y
a = as.vector(a)
b = as.vector(b)
n = length(y)
nGroups=1
if(!is.null(groups))
{
groups<-as.character(groups) #Groups as character and then as factor to avoid dummy levels
groups<-as.factor(groups)
#Number of records by group
countGroups=table(groups)
nGroups=length(countGroups)
groupLabels=names(countGroups)
groups=as.integer(groups)
ggg=as.integer(groups-1); #In C we begin to count in 0
if(sum(countGroups)!=n) stop("length of groups and y differs, NA's not allowed in groups");
}
if(response_type=="ordinal")
{
y=factor(y,ordered=TRUE)
lev=levels(y)
nclass=length(lev)
if(nclass==n) stop("The number of classes in y must be smaller than the number of observations");
y=as.integer(y)
z=y
fname = paste(saveAt, "thresholds.dat", sep = "")
fileOutThresholds = file(description = fname, open = "w")
}
if (is.null(weights))
{
weights = rep(1, n)
}
if(!is.null(groups))
{
sumW2=tapply(weights^2,groups,"sum")
}else{
sumW2 = sum(weights^2)
}
nSums = 0
whichNa = which(is.na(y))
nNa = length(whichNa)
Censored = FALSE
if (response_type == "gaussian")
{
if ((!is.null(a)) | (!is.null(b)))
{
Censored = TRUE
if ((length(a) != n) | (length(b) != n)) stop("y, a and b must have the same dimension")
if (any(weights != 1)) stop("Weights are only implemented for Gausian uncensored responses")
}
mu = weighted.mean(x = y, w = weights, na.rm = TRUE)
}
post_mu = 0
post_mu2 = 0
fname = paste(saveAt, "mu.dat", sep = "")
if (rmExistingFiles)
{
unlink(fname)
}
else {
message("Note: samples will be appended to existing files")
}
fileOutMu = file(description = fname, open = "w")
if (response_type == "ordinal") {
if(verbose){message("Prior for residual is not necessary, if you provided it, it will be ignored")}
if (any(weights != 1)) stop("Weights are not supported")
countsZ=table(z)
if (nclass <= 1) stop("Data vector y has only ", nclass, " differente values, it should have at least 2 different values")
threshold=qnorm(p=c(0,cumsum(as.vector(countsZ)/n)))
y = rtrun(mu =0, sigma = 1, a = threshold[z], b = threshold[ (z + 1)])
mu=0
#posterior for thresholds
post_threshold = 0
post_threshold2 = 0
post_prob=matrix(nrow=n,ncol=nclass,0)
post_prob2=post_prob
}
post_logLik = 0
# yStar & yHat
yStar = y * weights
yHat = mu * weights
if (nNa > 0) {
yStar[whichNa] = yHat[whichNa]
}
post_yHat = rep(0, n)
post_yHat2 = rep(0, n)
# residual and residual variance
e = (yStar - yHat)
varE = var(e, na.rm = TRUE) * (1 - R2)
if (is.null(S0)) {
S0 = varE * (df0 + 2)
}
if(!is.null(groups))
{
varE=rep(varE/nGroups,nGroups)
names(varE)=groupLabels
}
sdE = sqrt(varE)
post_varE = 0
post_varE2 = 0
#File for storing sample for varE
fname = paste(saveAt, "varE.dat", sep = "")
if (rmExistingFiles) {
unlink(fname)
}
fileOutVarE = file(description = fname, open = "w")
nLT = ifelse(is.null(ETA), 0, length(ETA))
#Setting the linear terms
if (nLT > 0) {
if(is.null(names(ETA)))
{
names(ETA)<-rep("",nLT)
}
for (i in 1:nLT) {
if(names(ETA)[i]=="")
{
ETA[[i]]$Name=paste("ETA_",i,sep="")
}else{
ETA[[i]]$Name=paste("ETA_",names(ETA)[i],sep="")
}
if (!(ETA[[i]]$model %in% c("FIXED", "BRR", "BL", "BayesA", "BayesB","BayesC", "RKHS","BRR_sets")))
{
stop("Error in ETA[[", i, "]]", " model ", ETA[[i]]$model, " not implemented (note: evaluation is case sensitive)")
}
if(!is.null(groups))
{
if(!(ETA[[i]]$model %in% c("BRR","FIXED","BayesB","BayesC"))) stop("Error in ETA[[", i, "]]", " model ", ETA[[i]]$model, " not implemented for groups")
}
ETA[[i]] = switch(ETA[[i]]$model,
FIXED = setLT.Fixed(LT = ETA[[i]], n = n, j = i, weights = weights, y = y, nLT = nLT, saveAt = saveAt, rmExistingFiles = rmExistingFiles,groups=groups,nGroups=nGroups),
BRR = setLT.BRR(LT = ETA[[i]], n = n, j = i, weights = weights, y = y, nLT = nLT, R2 = R2, saveAt = saveAt, rmExistingFiles = rmExistingFiles,groups=groups,nGroups=nGroups,verbose=verbose,thin=thin,nIter=nIter,burnIn=burnIn,lower_tri=ETA[[i]]$lower_tri),#*#
BL = setLT.BL(LT = ETA[[i]], n = n, j = i, weights = weights, y = y, nLT = nLT, R2 = R2, saveAt = saveAt, rmExistingFiles = rmExistingFiles,verbose=verbose,thin=thin,nIter=nIter,burnIn=burnIn),
RKHS = setLT.RKHS(LT = ETA[[i]], n = n, j = i, weights = weights, y = y, nLT = nLT, R2 = R2, saveAt = saveAt, rmExistingFiles = rmExistingFiles,verbose=verbose),
BayesC = setLT.BayesBandC(LT = ETA[[i]], n = n, j = i, weights = weights, y = y, nLT = nLT, R2 = R2, saveAt = saveAt, rmExistingFiles = rmExistingFiles,groups=groups,nGroups=nGroups,verbose=verbose,thin=thin,nIter=nIter,burnIn=burnIn),
BayesA = setLT.BayesA(LT = ETA[[i]], n = n, j = i, weights = weights, y = y, nLT = nLT, R2 = R2, saveAt = saveAt, rmExistingFiles = rmExistingFiles,verbose=verbose,thin=thin,nIter=nIter,burnIn=burnIn),
BayesB = setLT.BayesBandC(LT = ETA[[i]], n = n, j = i, weights = weights, y = y, nLT = nLT, R2 = R2, saveAt = saveAt, rmExistingFiles = rmExistingFiles,groups=groups,nGroups=nGroups,verbose=verbose,thin=thin,nIter=nIter,burnIn=burnIn),
BRR_sets = setLT.BRR_sets(LT = ETA[[i]], n = n, j = i, weights = weights, y = y, nLT = nLT, R2 = R2, saveAt = saveAt, rmExistingFiles = rmExistingFiles,verbose=verbose,thin=thin,nIter=nIter,burnIn=burnIn)
)
}
}
# Gibbs sampler
time = proc.time()[3]
for (i in 1:nIter) {
# intercept
if(!is.null(groups))
{
e = e + weights * mu
varEexpanded=varE[groups]
#rhs = sum(tapply(e*weights,groups,"sum")/varE)
rhs = as.numeric(crossprod(e/varEexpanded,weights));
C = sum(sumW2/varE)
sol = rhs/C
mu = rnorm(n = 1, sd = sqrt(1/C)) + sol;
}else{
e = e + weights * mu
rhs = sum(weights * e)/varE
C = sumW2/varE
sol = rhs/C
mu = rnorm(n = 1, sd = sqrt(1/C)) + sol
}
if (response_type == "ordinal") {
mu=0
}
e = e - weights * mu
#deltaSS and deltadf for updating varE
deltaSS = 0
deltadf = 0
if (nLT > 0) {
for (j in 1:nLT) {
## Fixed effects ####################################################################
if (ETA[[j]]$model == "FIXED") {
#cat("varB=",ETA[[j]]$varB,"\n");
varBj = rep(ETA[[j]]$varB, ETA[[j]]$p)
if(!is.null(groups)){
ans = .Call("sample_beta_groups", n, ETA[[j]]$p, ETA[[j]]$X, ETA[[j]]$x2, ETA[[j]]$b,
e, varBj, varE, 1e-9,ggg,nGroups)
}else{
ans = .Call("sample_beta", n, ETA[[j]]$p, ETA[[j]]$X, ETA[[j]]$x2, ETA[[j]]$b,
e, varBj, varE, 1e-9)
}
ETA[[j]]$b = ans[[1]]
e = ans[[2]]
}#End of fixed effects
## Ridge Regression ##################################################################
if (ETA[[j]]$model == "BRR") {
varBj = rep(ETA[[j]]$varB, ETA[[j]]$p)
if(!is.null(groups))
{
ans = .Call("sample_beta_groups",n, ETA[[j]]$p, ETA[[j]]$X, ETA[[j]]$x2, ETA[[j]]$b,
e, varBj, varE, 1e-9,ggg,nGroups)
}else{
if(!(ETA[[j]]$lower_tri))
{
ans = .Call("sample_beta", n, ETA[[j]]$p, ETA[[j]]$X, ETA[[j]]$x2, ETA[[j]]$b,
e, varBj, varE, 1e-9)
}else{
ans = .Call("sample_beta_lower_tri", n, ETA[[j]]$p, ETA[[j]]$X, ETA[[j]]$x2, ETA[[j]]$b,
e, ETA[[j]]$varB, varE, 1e-9)
}
}
ETA[[j]]$b = ans[[1]]
e = ans[[2]]
DF = ETA[[j]]$df0 + ETA[[j]]$p
SS = sum(ETA[[j]]$b^2) + ETA[[j]]$S0
ETA[[j]]$varB = SS/rchisq(df = DF, n = 1)
}# END BRR
if(ETA[[j]]$model=="BRR_sets"){
ans = .Call("sample_beta", n, ETA[[j]]$p, ETA[[j]]$X, ETA[[j]]$x2, ETA[[j]]$b,
e, ETA[[j]]$varB, varE, 1e-9)
ETA[[j]]$b = ans[[1]]
e = ans[[2]]
SS=tapply(X=ETA[[j]]$b^2,INDEX=ETA[[j]]$sets,FUN=sum)+ETA[[j]]$S0
ETA[[j]]$varSets=SS/rchisq(df=ETA[[j]]$DF1,n=ETA[[j]]$n_sets)
ETA[[j]]$varB=ETA[[j]]$varSets[ETA[[j]]$sets]
}
## Bayesian LASSO ####################################################################
if (ETA[[j]]$model == "BL") {
varBj = ETA[[j]]$tau2 * varE
ans = .Call("sample_beta", n, ETA[[j]]$p, ETA[[j]]$X, ETA[[j]]$x2, ETA[[j]]$b,
e, varBj, varE, ETA[[j]]$minAbsBeta)
ETA[[j]]$b = ans[[1]]
e = ans[[2]]
nu = sqrt(varE) * ETA[[j]]$lambda/abs(ETA[[j]]$b)
tmp = NULL
try(tmp <- rinvGauss(n = ETA[[j]]$p, nu = nu, lambda = ETA[[j]]$lambda2))
if (!is.null(tmp) && !any(tmp<0)) {
if (!any(is.na(sqrt(tmp)))) {
ETA[[j]]$tau2 = 1/tmp
}
else {
warning(paste("tau2 was not updated in iteration",i, "due to numeric problems with beta\n",sep=" "),immediate. = TRUE)
}
}
else {
warning(paste("tau2 was not updated in iteration",i,"due to numeric problems with beta\n",sep=" "),immediate. = TRUE)
}
#Update lambda
if (ETA[[j]]$type == "gamma") {
rate = sum(ETA[[j]]$tau2)/2 + ETA[[j]]$rate
shape = ETA[[j]]$p + ETA[[j]]$shape
ETA[[j]]$lambda2 = rgamma(rate = rate, shape = shape, n = 1)
if (!is.na(ETA[[j]]$lambda2)) {
ETA[[j]]$lambda = sqrt(ETA[[j]]$lambda2)
}
else {
warning(paste("lambda was not updated in iteration",i, "due to numeric problems with beta\n",sep=" "),immediate. = TRUE)
}
}
if (ETA[[j]]$type == "beta") {
ETA[[j]]$lambda = metropLambda(tau2 = ETA[[j]]$tau2,
lambda = ETA[[j]]$lambda, shape1 = ETA[[j]]$shape1, shape2 = ETA[[j]]$shape2,
max = ETA[[j]]$max)
ETA[[j]]$lambda2 = ETA[[j]]$lambda^2
}
deltaSS = deltaSS + sum((ETA[[j]]$b/sqrt(ETA[[j]]$tau2))^2)
deltadf = deltadf + ETA[[j]]$p
}#END BL
## RKHS ####################################################################
if (ETA[[j]]$model == "RKHS") {
#error
e = e + ETA[[j]]$u
rhs = crossprod(ETA[[j]]$V, e)/varE
varU = ETA[[j]]$varU * ETA[[j]]$d
C = as.numeric(1/varU + 1/varE)
SD = 1/sqrt(C)
sol = rhs/C
tmp = rnorm(n = ETA[[j]]$levelsU, mean = sol, sd = SD)
ETA[[j]]$uStar = tmp
ETA[[j]]$u = as.vector(ETA[[j]]$V %*% tmp)
#update error
e = e - ETA[[j]]$u
#update the variance
tmp = ETA[[j]]$uStar/sqrt(ETA[[j]]$d)
SS = as.numeric(crossprod(tmp)) + ETA[[j]]$S0
DF = ETA[[j]]$levelsU + ETA[[j]]$df0
ETA[[j]]$varU = SS/rchisq(n = 1, df = DF)
}#END RKHS
## BayesA ##############################################################################
if (ETA[[j]]$model == "BayesA") {
varBj = ETA[[j]]$varB
ans = .Call("sample_beta", n, ETA[[j]]$p, ETA[[j]]$X, ETA[[j]]$x2, ETA[[j]]$b,
e, varBj, varE, 1e-9)
ETA[[j]]$b = ans[[1]]
e = ans[[2]]
#Update variances
SS = ETA[[j]]$S + ETA[[j]]$b^2
DF = ETA[[j]]$df0 + 1
ETA[[j]]$varB = SS/rchisq(n = ETA[[j]]$p, df = DF)
tmpShape=ETA[[j]]$p*ETA[[j]]$df0/2+ETA[[j]]$shape0
tmpRate=sum(1/ETA[[j]]$varB)/2+ETA[[j]]$rate0
ETA[[j]]$S=rgamma(shape=tmpShape,rate=tmpRate,n=1)
}#End BayesA
#BayesB and BayesC
if(ETA[[j]]$model %in% c("BayesB","BayesC"))
{
#Update marker effects
mrkIn=ETA[[j]]$d==1
pIn=sum(mrkIn)
if(ETA[[j]]$model=="BayesB")
{
if(!is.null(groups))
{
ans=.Call("sample_beta_BB_BCp_groups",n,ETA[[j]]$p, ETA[[j]]$X, ETA[[j]]$x2, ETA[[j]]$b, ETA[[j]]$d, e, ETA[[j]]$varB, varE, 1e-9, ETA[[j]]$probIn,ggg,nGroups);
}else{
ans=.Call("sample_beta_BB_BCp",n,ETA[[j]]$p, ETA[[j]]$X, ETA[[j]]$x2, ETA[[j]]$b, ETA[[j]]$d, e, ETA[[j]]$varB, varE, 1e-9, ETA[[j]]$probIn);
}
}else{
if(!is.null(groups))
{
ans=.Call("sample_beta_BB_BCp_groups",n,ETA[[j]]$p, ETA[[j]]$X, ETA[[j]]$x2, ETA[[j]]$b, ETA[[j]]$d, e, rep(ETA[[j]]$varB,ETA[[j]]$p), varE, 1e-9, ETA[[j]]$probIn,ggg,nGroups);
}else{
ans=.Call("sample_beta_BB_BCp",n,ETA[[j]]$p, ETA[[j]]$X, ETA[[j]]$x2, ETA[[j]]$b, ETA[[j]]$d, e, rep(ETA[[j]]$varB,ETA[[j]]$p), varE, 1e-9, ETA[[j]]$probIn);
}
}
ETA[[j]]$d=ans[[1]]
e=ans[[2]]
ETA[[j]]$b=ans[[3]]
#Update the variance component associated with the markers
if(ETA[[j]]$model=="BayesB")
{
SS = ETA[[j]]$b^2 + ETA[[j]]$S
DF = ETA[[j]]$df0+1
ETA[[j]]$varB = SS/rchisq(df=DF, n = ETA[[j]]$p)
# Update scale
tmpShape=ETA[[j]]$p*ETA[[j]]$df0/2+ETA[[j]]$shape0
tmpRate=sum(1/ETA[[j]]$varB)/2+ETA[[j]]$rate0
ETA[[j]]$S=rgamma(shape=tmpShape,rate=tmpRate,n=1)
}else{
SS = sum(ETA[[j]]$b^2) + ETA[[j]]$S0
DF = ETA[[j]]$df0 + ETA[[j]]$p
ETA[[j]]$varB = SS/rchisq(df = DF, n = 1)
}
mrkIn = sum(ETA[[j]]$d)
ETA[[j]]$probIn = rbeta(shape1 = (mrkIn + ETA[[j]]$countsIn + 1),
shape2 = (ETA[[j]]$p - mrkIn + ETA[[j]]$countsOut + 1), n = 1)
}
}#Loop for
}#nLT
# yHat
yHat = yStar - e
#4#
# residual variance # missing values
if (response_type == "gaussian") {
if(!is.null(groups))
{
for(g in 1:nGroups)
{
SS=sum(e[groups==g]^2)+ S0 + deltaSS
DF=countGroups[g]+df0+deltadf
varE[g]=SS/rchisq(n=1,df=DF)
}
}else{
SS = sum(e * e) + S0 + deltaSS
DF = n + df0 + deltadf
varE = SS/rchisq(n = 1, df = DF)
}
sdE = sqrt(varE)
if (nNa > 0) {
if (Censored) {
if(!is.null(groups))
{
#FIXME: Double check this, I was testing it and is ok
sdEexpanded=sdE[groups]
yStar[whichNa] = rtrun(mu = yHat[whichNa], a = a[whichNa], b = b[whichNa], sigma = sdEexpanded)
}else{
yStar[whichNa] = rtrun(mu = yHat[whichNa], a = a[whichNa], b = b[whichNa], sigma = sdE)
}
}
else{
if(!is.null(groups))
{
#FIXME: Double check this, I was testing it and is ok
sdEexpanded=sdE[groups]
yStar[whichNa] = yHat[whichNa] + rnorm(n = nNa, sd = sdEexpanded)
}else{
yStar[whichNa] = yHat[whichNa] + rnorm(n = nNa, sd = sdE)
}
}
e[whichNa] = yStar[whichNa] - yHat[whichNa]
}
}else{ #ordinal
varE = 1
sdE = 1
#Update yStar, this is the latent variable
if(nNa==0){
yStar=rtrun(mu = yHat, sigma = 1, a = threshold[z], b = threshold[(z + 1)])
}else{
yStar[-whichNa]=rtrun(mu = yHat[-whichNa], sigma = 1, a = threshold[z[-whichNa]], b = threshold[(z[-whichNa] + 1)])
yStar[whichNa]=yHat[whichNa] + rnorm(n = nNa, sd = sdE)
}
#Update thresholds
if(nNa==0){
for (m in 2:nclass) {
lo = max(max(extract(yStar, z, m - 1)), threshold[m - 1])
hi = min(min(extract(yStar, z, m)), threshold[m + 1])
threshold[m] = runif(1, lo, hi)
}
}else{
for (m in 2:nclass) {
tmpY=yStar[-whichNa]
tmpZ=z[-whichNa]
lo = max(max(extract(tmpY, tmpZ, m - 1)), threshold[m - 1])
hi = min(min(extract(tmpY, tmpZ, m)), threshold[m + 1])
threshold[m] = runif(1, lo, hi)
}
}
#Update error
e = yStar - yHat
}
# Saving samples and computing running means
if ((i%%thin == 0)) {
if (nLT > 0) {
for (j in 1:nLT) {
if (ETA[[j]]$model == "FIXED") {
write(ETA[[j]]$b,ncolumns=ETA[[j]]$p, file = ETA[[j]]$fileOut, append = TRUE)
}
if (ETA[[j]]$model == "BRR") {
write(ETA[[j]]$varB, file = ETA[[j]]$fileOut, append = TRUE)
}
if (ETA[[j]]$model == "BRR_sets") {
write(ETA[[j]]$varSets, ncolumns=ETA[[j]]$n_sets,file = ETA[[j]]$fileOut, append = TRUE)
}
if (ETA[[j]]$model == "BL") {
write(ETA[[j]]$lambda, file = ETA[[j]]$fileOut, append = TRUE)
}
if (ETA[[j]]$model == "RKHS") {
write(ETA[[j]]$varU, file = ETA[[j]]$fileOut, append = TRUE)
}
if (ETA[[j]]$model == "BayesC") {
tmp = c(ETA[[j]]$probIn, ETA[[j]]$varB)
write(tmp, ncolumns = 2, file = ETA[[j]]$fileOut, append = TRUE)
}
if (ETA[[j]]$model == "BayesA") {
tmp=ETA[[j]]$S
write(tmp, ncolumns = 1, file = ETA[[j]]$fileOut, append = TRUE)
}
if(ETA[[j]]$model=="BayesB")
{
tmp=c(ETA[[j]]$probIn,ETA[[j]]$S)
write(tmp, ncolumns = 2, file = ETA[[j]]$fileOut, append = TRUE)
}
}
}
#Output files
write(x = mu, file = fileOutMu, append = TRUE)
write(x = varE, ncolumns=nGroups,file = fileOutVarE, append = TRUE)
if (response_type == "ordinal") {
write(x=threshold[2:nclass],ncolumns=nclass-1,file=fileOutThresholds,append=TRUE)
}
if (i > burnIn) {
nSums = nSums + 1
k = (nSums - 1)/(nSums)
if (nLT > 0) {
for (j in 1:nLT) {
if (ETA[[j]]$model == "FIXED") {
ETA[[j]]$post_b = ETA[[j]]$post_b * k + ETA[[j]]$b/nSums
ETA[[j]]$post_b2 = ETA[[j]]$post_b2 * k + (ETA[[j]]$b^2)/nSums
}
if (ETA[[j]]$model == "BRR") {
ETA[[j]]$post_b = ETA[[j]]$post_b * k + ETA[[j]]$b/nSums
ETA[[j]]$post_b2 = ETA[[j]]$post_b2 * k + (ETA[[j]]$b^2)/nSums
ETA[[j]]$post_varB = ETA[[j]]$post_varB * k + (ETA[[j]]$varB)/nSums
ETA[[j]]$post_varB2 = ETA[[j]]$post_varB2 * k + (ETA[[j]]$varB^2)/nSums
if(ETA[[j]]$saveEffects&&(i%%ETA[[j]]$thin)==0){
writeBin(object=ETA[[j]]$b,con=ETA[[j]]$fileEffects,size=ifelse(ETA[[j]]$storageMode=="single",4,8))
}#*#
}
if (ETA[[j]]$model == "BRR_sets") {
ETA[[j]]$post_b = ETA[[j]]$post_b * k + ETA[[j]]$b/nSums
ETA[[j]]$post_b2 = ETA[[j]]$post_b2 * k + (ETA[[j]]$b^2)/nSums
ETA[[j]]$post_varB = ETA[[j]]$post_varB * k + (ETA[[j]]$varB)/nSums
ETA[[j]]$post_varB2 = ETA[[j]]$post_varB2 * k + (ETA[[j]]$varB^2)/nSums
ETA[[j]]$post_varSets<-ETA[[j]]$post_varSets*k+ETA[[j]]$varSets/nSums
ETA[[j]]$post_varSets2<-ETA[[j]]$post_varSets2*k+(ETA[[j]]$varSets^2)/nSums
if(ETA[[j]]$saveEffects&&(i%%ETA[[j]]$thin)==0){
writeBin(object=ETA[[j]]$b,con=ETA[[j]]$fileEffects,size=ifelse(ETA[[j]]$storageMode=="single",4,8))
}#*#
}
if (ETA[[j]]$model == "BL") {
ETA[[j]]$post_b = ETA[[j]]$post_b * k + ETA[[j]]$b/nSums
ETA[[j]]$post_b2 = ETA[[j]]$post_b2 * k + (ETA[[j]]$b^2)/nSums
ETA[[j]]$post_tau2 = ETA[[j]]$post_tau2 * k + (ETA[[j]]$tau2)/nSums
ETA[[j]]$post_lambda = ETA[[j]]$post_lambda * k + (ETA[[j]]$lambda)/nSums
if(ETA[[j]]$saveEffects&&(i%%ETA[[j]]$thin)==0){
writeBin(object=ETA[[j]]$b,con=ETA[[j]]$fileEffects,size=ifelse(ETA[[j]]$storageMode=="single",4,8))
}#*#
}
if (ETA[[j]]$model == "RKHS") {
ETA[[j]]$post_varU = ETA[[j]]$post_varU * k + ETA[[j]]$varU/nSums
ETA[[j]]$post_varU2 = ETA[[j]]$post_varU2 * k + (ETA[[j]]$varU^2)/nSums
ETA[[j]]$post_uStar = ETA[[j]]$post_uStar * k + ETA[[j]]$uStar/nSums
ETA[[j]]$post_u = ETA[[j]]$post_u * k + ETA[[j]]$u/nSums
ETA[[j]]$post_u2 = ETA[[j]]$post_u2 * k + (ETA[[j]]$u^2)/nSums
}
if (ETA[[j]]$model == "BayesC") {
ETA[[j]]$post_b = ETA[[j]]$post_b * k + ETA[[j]]$b/nSums
ETA[[j]]$post_b2 = ETA[[j]]$post_b2 * k + (ETA[[j]]$b^2)/nSums
ETA[[j]]$post_varB = ETA[[j]]$post_varB * k + (ETA[[j]]$varB)/nSums
ETA[[j]]$post_varB2 = ETA[[j]]$post_varB2 * k + (ETA[[j]]$varB^2)/nSums
ETA[[j]]$post_d = ETA[[j]]$post_d * k + (ETA[[j]]$d)/nSums
ETA[[j]]$post_probIn = ETA[[j]]$post_probIn * k + (ETA[[j]]$probIn)/nSums
ETA[[j]]$post_probIn2 = ETA[[j]]$post_probIn2 * k + (ETA[[j]]$probIn^2)/nSums
if(ETA[[j]]$saveEffects&&(i%%ETA[[j]]$thin)==0){
writeBin(object=ETA[[j]]$b*ETA[[j]]$d,con=ETA[[j]]$fileEffects,size=ifelse(ETA[[j]]$storageMode=="single",4,8))
}#*#
}
if (ETA[[j]]$model == "BayesA") {
ETA[[j]]$post_b = ETA[[j]]$post_b * k + ETA[[j]]$b/nSums
ETA[[j]]$post_b2 = ETA[[j]]$post_b2 * k + (ETA[[j]]$b^2)/nSums
ETA[[j]]$post_varB = ETA[[j]]$post_varB * k + (ETA[[j]]$varB)/nSums
ETA[[j]]$post_varB2 = ETA[[j]]$post_varB2 * k + (ETA[[j]]$varB^2)/nSums
ETA[[j]]$post_S = ETA[[j]]$post_S * k + (ETA[[j]]$S)/nSums
ETA[[j]]$post_S2 = ETA[[j]]$post_S2 * k + (ETA[[j]]$S^2)/nSums
if(ETA[[j]]$saveEffects&&(i%%ETA[[j]]$thin)==0){
writeBin(object=ETA[[j]]$b,con=ETA[[j]]$fileEffects,size=ifelse(ETA[[j]]$storageMode=="single",4,8))
}#*#
}
if(ETA[[j]]$model=="BayesB")
{
ETA[[j]]$post_b=ETA[[j]]$post_b*k+ETA[[j]]$b/nSums
ETA[[j]]$post_b2=ETA[[j]]$post_b2*k+(ETA[[j]]$b^2)/nSums
ETA[[j]]$post_varB=ETA[[j]]$post_varB*k+(ETA[[j]]$varB)/nSums
ETA[[j]]$post_varB2=ETA[[j]]$post_varB2*k+(ETA[[j]]$varB^2)/nSums
ETA[[j]]$post_d = ETA[[j]]$post_d * k + (ETA[[j]]$d)/nSums
ETA[[j]]$post_probIn = ETA[[j]]$post_probIn * k + (ETA[[j]]$probIn)/nSums
ETA[[j]]$post_probIn2 = ETA[[j]]$post_probIn2 * k + (ETA[[j]]$probIn^2)/nSums
ETA[[j]]$post_S = ETA[[j]]$post_S * k + (ETA[[j]]$S)/nSums
ETA[[j]]$post_S2 = ETA[[j]]$post_S2 * k + (ETA[[j]]$S^2)/nSums
if(ETA[[j]]$saveEffects&&(i%%ETA[[j]]$thin)==0){
writeBin(object=ETA[[j]]$b*ETA[[j]]$d,con=ETA[[j]]$fileEffects,size=ifelse(ETA[[j]]$storageMode=="single",4,8))
}#*#
}
}
}
post_mu = post_mu * k + mu/nSums
post_mu2 = post_mu2 * k + (mu^2)/nSums
post_yHat = post_yHat * k + yHat/nSums
post_yHat2 = post_yHat2 * k + (yHat^2)/nSums
post_varE = post_varE * k + varE/nSums
post_varE2 = post_varE2 * k + (varE^2)/nSums
if (response_type == "ordinal") {
post_threshold = post_threshold * k + threshold/nSums
post_threshold2 = post_threshold2 * k + (threshold^2)/nSums
TMP=matrix(nrow=n,ncol=nclass,0)
TMP[,1]=pnorm(threshold[2]-yHat)
if(nclass>2){
for(m in 2:(nclass-1)){
TMP[,m]=pnorm(threshold[(m+1)]-yHat)-rowSums(as.matrix(TMP[,1:(m-1)]))
}
}
TMP[,nclass]=1-rowSums(TMP)
post_prob=post_prob*k+TMP/nSums
post_prob2=post_prob2*k+(TMP^2)/nSums
if(nNa==0){
logLik=loglik_ordinal(z,yHat,threshold)
}else{
logLik=loglik_ordinal(z[-whichNa],yHat[-whichNa],threshold)
}
}
if(response_type == "gaussian") {
tmpE = e/weights
if(!is.null(groups)){
tmpSD=rep(NA,n)
for(g in 1:nGroups)
{
index=(groups==g)
tmpSD[index]=sqrt(varE[g])/weights[index]
}
}else{
tmpSD = sqrt(varE)/weights
}
if (nNa > 0) {
tmpE = tmpE[-whichNa]
tmpSD = tmpSD[-whichNa]
}
logLik = sum(dnorm(tmpE, sd = tmpSD, log = TRUE))
}#end gaussian
post_logLik = post_logLik * k + logLik/nSums
}
}#end of saving samples and computing running means
if (verbose) {
message("---------------------------------------")
tmp = proc.time()[3]
message("Iter=",i," Time/Iter=",round(tmp-time,3))
#message("VarE=",round(varE,3))
#In the case of variance by groups
message("varE=",paste(round(varE,3),collapse=", "))
time = tmp
}
}#end of Gibbs sampler
#Closing files
close(fileOutVarE)
close(fileOutMu)
if(response_type == "ordinal") close(fileOutThresholds)
if (nLT > 0) {
for (i in 1:nLT) {
if (!is.null(ETA[[i]]$fileOut)) {
flush(ETA[[i]]$fileOut)
close(ETA[[i]]$fileOut)
ETA[[i]]$fileOut = NULL
}
if(!is.null(ETA[[i]]$fileEffects)){
flush(ETA[[i]]$fileEffects)
close(ETA[[i]]$fileEffects)
if(!is.null(ETA[[i]]$compressEffects)&&ETA[[i]]$compressEffects==TRUE){
compressFile(paste0(saveAt,ETA[[i]]$Name,"_b.bin"))
}
ETA[[i]]$fileEffects = NULL
}
}
}
#return goodies
out = list(y = y0, a=a,b=b,whichNa = whichNa, saveAt = saveAt, nIter = nIter,
burnIn = burnIn, thin = thin,
weights = weights, verbose = verbose,
response_type = response_type, df0 = df0, S0 = S0)
out$yHat = post_yHat
names(out$yHat)=IDs
names(out$y)=IDs
out$SD.yHat = sqrt(post_yHat2 - (post_yHat^2))
out$mu = post_mu
out$SD.mu = sqrt(post_mu2 - post_mu^2)
out$varE = post_varE
out$SD.varE = sqrt(post_varE2 - post_varE^2)
#goodness of fit
out$fit = list()
if(response_type=="gaussian")
{
tmpE = (yStar - post_yHat)/weights
if(!is.null(groups))
{
tmpSD=rep(NA,n)
for(g in 1:nGroups)
{
index=(groups==g)
tmpSD[index]=sqrt(varE[g])/weights[index]
}
}else{
tmpSD = sqrt(post_varE)/weights
}
if (nNa > 0) {
tmpE = tmpE[-whichNa]
tmpSD = tmpSD[-whichNa]
}
out$fit$logLikAtPostMean = sum(dnorm(tmpE, sd = tmpSD, log = TRUE))
if (Censored) {
cdfA = pnorm(q = a[whichNa], sd = sqrt(post_varE), mean = post_yHat[whichNa])
cdfB = pnorm(q = b[whichNa], sd = sqrt(post_varE), mean = post_yHat[whichNa])
out$fit$logLikAtPostMean = out$fit$logLikAtPostMean + sum(log(cdfB - cdfA))
}
}
if(response_type=="ordinal")
{
out$probs=post_prob
out$SD.probs=sqrt(post_prob2-post_prob^2)
colnames(out$probs)=lev
colnames(out$SD.probs)=lev
out$threshold = post_threshold[-c(1, nclass + 1)]
out$SD.threshold = sqrt(post_threshold2 - post_threshold^2)[-c(1, nclass + 1)]
#out$fit$logLikAtPostMean = loglik_ordinal(y,post_yHat,post_threshold)#*#
tmp=0
for(i in 1:nclass){
tmp=tmp+sum(ifelse(y0==lev[i],log(out$probs[,i]),0))
}
out$fit$logLikAtPostMean=tmp
out$levels=lev
out$nlevels=nclass
}
out$fit$postMeanLogLik = post_logLik
out$fit$pD = -2 * (post_logLik - out$fit$logLikAtPostMean)
out$fit$DIC = out$fit$pD - 2 * post_logLik
# Renaming/removing objects in ETA and appending names
if (nLT > 0) {
for (i in 1:nLT) {
if (ETA[[i]]$model != "RKHS") {
ETA[[i]]$b = ETA[[i]]$post_b
ETA[[i]]$SD.b = sqrt(ETA[[i]]$post_b2 - ETA[[i]]$post_b^2)
names(ETA[[i]]$b)=ETA[[i]]$colNames
names(ETA[[i]]$SD.b)=ETA[[i]]$colNames
tmp = which(names(ETA[[i]]) %in% c("post_b", "post_b2","X","x2"))
ETA[[i]] = ETA[[i]][-tmp]
}
if(ETA[[i]]$model=="RKHS")
{
ETA[[i]]$SD.u=sqrt(ETA[[i]]$post_u2 - ETA[[i]]$post_u^2)
ETA[[i]]$u=ETA[[i]]$post_u
ETA[[i]]$uStar=ETA[[i]]$post_uStar
ETA[[i]]$varU=ETA[[i]]$post_varU
ETA[[i]]$SD.varU=sqrt(ETA[[i]]$post_varU2 - ETA[[i]]$post_varU^2)
tmp=which(names(ETA[[i]])%in%c("post_varU","post_varU2","post_uStar","post_u","post_u2"))
ETA[[i]]=ETA[[i]][-tmp]
}
if (ETA[[i]]$model %in% c("BRR","BRR_sets", "BayesA", "BayesC","BayesB")) {
ETA[[i]]$varB = ETA[[i]]$post_varB
ETA[[i]]$SD.varB = sqrt(ETA[[i]]$post_varB2 - (ETA[[i]]$post_varB^2))
tmp = which(names(ETA[[i]]) %in% c("post_varB", "post_varB2"))
ETA[[i]] = ETA[[i]][-tmp]
}
if(ETA[[i]]$model=="BRR_sets"){
ETA[[i]]$varSets=ETA[[i]]$post_varSets
ETA[[i]]$SD.varSets=sqrt(ETA[[i]]$post_varSets2-(ETA[[i]]$post_varSets^2))
tmp<-which(names(ETA[[i]])%in%c("post_varSets","post_varSets2"))
ETA[[i]]=ETA[[i]][-tmp]
}
if(ETA[[i]]$model %in% c("BayesB","BayesC"))
{
ETA[[i]]$d=ETA[[i]]$post_d
ETA[[i]]$probIn=ETA[[i]]$post_probIn
ETA[[i]]$SD.probIn=sqrt(ETA[[i]]$post_probIn2 - (ETA[[i]]$post_probIn^2))
tmp = which(names(ETA[[i]]) %in% c("post_d", "post_probIn","post_probIn2"))
ETA[[i]] = ETA[[i]][-tmp]
}
if(ETA[[i]]$model %in% c("BayesA","BayesB"))
{
ETA[[i]]$S=ETA[[i]]$post_S
ETA[[i]]$SD.S=sqrt( ETA[[i]]$post_S2 - (ETA[[i]]$post_S^2))
tmp=which(names(ETA[[i]])%in%c("post_S","post_S2"))
ETA[[i]]=ETA[[i]][-tmp]
}
if(ETA[[i]]$model=="BL")
{
ETA[[i]]$tau2=ETA[[i]]$post_tau2
ETA[[i]]$lambda=ETA[[i]]$post_lambda
tmp = which(names(ETA[[i]]) %in% c("post_tau2", "post_lambda","lambda2"))
ETA[[i]] = ETA[[i]][-tmp]
}
}
out$ETA = ETA
}
class(out) = "BGLR"
return(out)
}
#This function will be a wrapper for BGLR
#the idea is to maintain the compatibility with the function BLR in
#the package BLR that was released in 2010, updated in 2011 and 2012
#NOTE: thin2 parameter is missing in BGLR, so it will be removed
BLR=function (y, XF = NULL, XR = NULL, XL = NULL, GF = list(ID = NULL,
A = NULL), prior = NULL, nIter = 1100, burnIn = 100, thin = 10,
thin2 = 1e+10, saveAt = "", minAbsBeta = 1e-09, weights = NULL)
{
ETA = NULL
ETA = list()
nLT = 0
message("This implementation is a simplified interface for the more general")
message("function BGLR, we keep it for backward compatibility with our package BLR")
warning("thin2 parameter is not used any more and will be deleted in next releases\n",immediate. = TRUE);
message("Setting parameters for BGLR...")
if (is.null(prior)) {
message("===============================================================")
message("No prior was provided, BGLR will be running with improper priors.")
message("===============================================================")
prior = list(varE = list(S = NULL, df = 1), varBR = list(S = 0,
df = 0), varU = list(S = 0, df = 0), lambda = list(shape = 0,
rate = 0, type = "random", value = 50))
}
if (!is.null(XF)) {
nLT = nLT + 1
ETA[[nLT]] = list(X = XF, model = "FIXED")
}
if (!is.null(XR)) {
nLT = nLT + 1
ETA[[nLT]] = list(X = XR, model = "BRR", df0 = prior$varBR$df,
S0 = prior$varBR$S)
}
if (!is.null(XL)) {
nLT = nLT + 1
if (prior$lambda$type == "random") {
if (is.null(prior$lambda$rate)) {
message("Setting prior for lambda^2 to beta")
prior$lambda$type = "beta"
prior$lambda$shape = NULL
prior$lambda$rate = NULL
}
else {
message("Setting prior for lambda^2 to gamma")
prior$lambda$type = "gamma"
prior$lambda$max = NULL
prior$lambda$shape1 = NULL
prior$lambda$shape2 = NULL
}
}
ETA[[nLT]] = list(X = XL, model = "BL", type = prior$lambda$type,
rate = prior$lambda$rate, shape = prior$lambda$shape,
max = prior$lambda$max, shape1 = prior$lambda$shape1,
shape2 = prior$lambda$shape2, lambda = prior$lambda$value,
minAbsBeta=minAbsBeta)
}
#NOTE: In original BLR IDS are used to buid A matrix Z,
#and then run the model y=Zu+e, u~MN(0,varU*A), and then using the Cholesky factorization
#it was possible to fit the model. The algorithm used here is different (Orthogonal variables)
#and it may be that the IDS are not longer necessary
if (!is.null(GF[[1]])) {
nLT = nLT + 1
ETA[[nLT]] = list(K = GF$A, model = "RKHS", df0 = prior$varU$df,
S0 = prior$varU$S)
warning("IDs are not used any more and will be deleted in next releases...\n",immediate. = TRUE)
}
message("Finish setting parameters for BGLR")
message("Fitting model using BGLR...")
out = BGLR(y = y, ETA = ETA, df0 = prior$varE$df, S0 = prior$varE$S,
nIter = nIter, burnIn = burnIn, thin = thin, saveAt = saveAt,
weights = weights)
#Backward compatibility with BLR
if (nLT > 0) {
for (j in 1:nLT) {
if (ETA[[j]]$model == "FIXED") {
out$bF = out$ETA[[j]]$b
out$SD.bF = out$ETA[[j]]$SD.b
}
if (ETA[[j]]$model == "BL") {
out$bL = out$ETA[[j]]$b
out$SD.bL = out$ETA[[j]]$SD.b
out$lambda = out$ETA[[j]]$lambda
}
if (ETA[[j]]$model == "BRR") {
out$bR = out$ETA[[j]]$b
out$SD.bR = out$ETA[[j]]$SD.b
out$varBR = out$ETA[[j]]$varB
out$SD.bR = out$ETA[[j]]$SD.varB
}
if (ETA[[j]]$model == "RKHS") {
out$u = out$ETA[[j]]$u
out$SD.u =out$ETA[[j]]$SD.u
out$varU = out$ETA[[j]]$varU
}
}
}
out$ETA = NULL
class(out) = "BLR"
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGLR/R/BGLR.R
|
#Bayes A, Mewissen et al. (2001).
#Prediction of Total Genetic Value Using Genome-Wide Dense Marker Maps
#Genetics 157: 1819-1829, Modified so that the Scale parameter is estimated from data (a gamma prior is assigned)
setLT.BayesA.Cross=function(prior,y,j,p,idColumns,sumVarX,R2,nLT,verbose,
saveAt,rmExistingFiles,thin,nIter,burnIn)
{
#Just a copy of values provided by user
LT=list()
LT$Name=prior$Name
LT$R2=prior$R2
LT$df0=prior$df0
LT$S0=prior$S0
LT$shape0=prior$shape0
LT$rate0=prior$rate0
LT$p=p
LT$idColumns=idColumns
LT$saveEffects=prior$saveEffects
LT$storageMode=prior$storageMode
LT$MSx=sumVarX
if(is.null(LT$R2))
{
LT$R2=R2/nLT
if(verbose)
{
message("R2 in LP ",j, " was missing and was set to ",LT$R2)
}
}
#Default value for the degrees of freedom associated with the distribution
#assigned to the variance of betas
if(is.null(LT$df0))
{
LT$df0= 5
if(verbose)
{
message("DF in LP ",j," was missing and was set to ",LT$df0)
}
}
#Default value for the scale parameter associated with the distribution
#assigned to the variance of betas
if(is.null(LT$S0))
{
if(LT$df0<=0) stop("df0>0 in ",LT$model," in order to set S0");
LT$S0=var(y, na.rm = TRUE)*LT$R2/(LT$MSx)*(LT$df0+2)
if(verbose)
{
message("Scale parameter in LP ",j," was missing and was set to ",LT$S0)
}
}
# Improvement: Treat Scale as random, assign a gamma density
message("Scale parameter is treated as a random, we have assigned a gamma density")
if(is.null(LT$shape0))
{
LT$shape0=1.1
message("shape parameter for the Scale in LP ",j, " was missing and was set to ",LT$shape0)
}
if(is.null(LT$rate0))
{
LT$rate0=(LT$shape0-1)/LT$S0
message("rate parameter for the Scale in LP ",j, " was missing and was set to ",LT$rate0)
}
LT$S=LT$S0
LT$b=rep(0,LT$p)
LT$varB=rep(LT$S0/(LT$df0+2),LT$p)
# Add one file when S0 is treated as random.
fname=paste(saveAt,LT$Name,"_ScaleBayesA.dat",sep="")
if(rmExistingFiles)
{
unlink(fname)
}
LT$fileOut=file(description=fname,open="w")
LT$NamefileOut=fname;
#Objects for saving posterior means for MCMC
LT$post_b=rep(0,LT$p)
LT$post_b2=rep(0,LT$p)
LT$post_varB=0
LT$post_varB2=0
LT$post_S=0
LT$post_S2=0
#*#
if(is.null(LT$saveEffects)){LT$saveEffects=FALSE}
if(LT$saveEffects){
if(is.null(LT$storageMode)){LT$storageMode="double"}
if(!LT$storageMode%in%c("single","double")) {
stop("storageMode of LP ",j," can either be 'single' or 'double' (default)")
}
if(is.null(LT$thin)){ LT$thin=thin }
fname=paste(saveAt,LT$Name,"_b.bin",sep="")
if(rmExistingFiles){ unlink(fname) }
LT$fileEffects=file(fname,open='wb')
nRow=floor((nIter-burnIn)/LT$thin)
writeBin(object=c(nRow,LT$p),con=LT$fileEffects,size=ifelse(LT$storageMode=="single",4,8))
}#*#
return(LT)
}
##########################################################################################
#Set linear term for BayesB
##########################################################################################
setLT.BayesB.Cross=function(prior,y,j,p,idColumns,sumVarX,R2,nLT,verbose,
saveAt,rmExistingFiles,thin,nIter,burnIn)
{
#Just a copy of values provided by user
LT=list()
LT$Name=prior$Name
LT$R2=prior$R2
LT$df0=prior$df0
LT$rate0=prior$rate0
LT$shape0=prior$shape0
LT$probIn=prior$probIn
LT$counts=prior$counts
LT$p=p
LT$idColumns=idColumns
LT$saveEffects=prior$saveEffects
LT$storageMode=prior$storageMode
LT$MSx=sumVarX
if(is.null(LT$R2))
{
LT$R2=R2/nLT
if(verbose)
{
message("R2 in LP ",j, " was missing and was set to ",LT$R2)
}
}
#Default value for the degrees of freedom associated with the distribution
#assigned to the variance of betas
if(is.null(LT$df0))
{
LT$df0= 5
if(verbose)
{
message("DF in LP ",j," was missing and was set to ",LT$df0)
}
}
#Default value for a predictor being "in" the model
if(is.null(LT$probIn))
{
LT$probIn=0.5
if(verbose)
{
message("probIn in LP ",j," was missing and was set to ",LT$probIn)
}
}
#Default value for prior counts
if(is.null(LT$counts))
{
LT$counts=10
if(verbose)
{
message("Counts in LP ",j," was missing and was set to ",LT$counts)
}
}
LT$countsIn=LT$counts * LT$probIn
LT$countsOut=LT$counts - LT$countsIn
#Set the initial value for S
if(LT$df0<=0) stop("df0>0 in ",LT$model," in order to set S")
LT$S=var(y, na.rm = TRUE)*LT$R2/(LT$MSx)*(LT$df0+2)/LT$probIn
if(is.null(LT$shape0))
{
LT$shape0=1.1
message("shape0 in LP ",j," was missing and was set to ",LT$shape0)
}
if(is.null(LT$rate0))
{
LT$rate0=(LT$shape0-1)/LT$S
message("rate0 in LP ",j," was missing and was set to ",LT$rate0)
}
LT$a=rep(0, LT$p)
LT$d=rbinom(n = LT$p, size = 1, prob = LT$probIn)
LT$b=LT$a*LT$d #b=a*d, for compatibility with BGLR we use b instead of beta in linear terms
LT$varB = rep(LT$S/(LT$df0+2),LT$p)
fname=paste(saveAt,LT$Name,"_parBayesB.dat",sep="")
if(rmExistingFiles)
{
unlink(fname)
}
LT$fileOut=file(description=fname,open="w")
LT$NamefileOut=fname
tmp=c('probIn','scale')
write(tmp, ncolumns = 2, file = LT$fileOut, append = TRUE)
#Objects for saving posterior means for MCMC
LT$post_b=rep(0,LT$p)
LT$post_b2=rep(0,LT$p)
LT$post_varB=0
LT$post_varB2=0
LT$post_d=0
LT$post_probIn=0
LT$post_probIn2=0
LT$post_S=0
LT$post_S2=0
#*#
if(is.null(LT$saveEffects)){LT$saveEffects=FALSE}
if(LT$saveEffects){
if(is.null(LT$storageMode)){LT$storageMode="double"}
if(!LT$storageMode%in%c("single","double")) {
stop("storageMode of LP ",j," can either be 'single' or 'double' (default)")
}
if(is.null(LT$thin)){ LT$thin=thin }
fname=paste(saveAt,LT$Name,"_b.bin",sep="")
if(rmExistingFiles){ unlink(fname) }
LT$fileEffects=file(fname,open='wb')
nRow=floor((nIter-burnIn)/LT$thin)
writeBin(object=c(nRow,LT$p),con=LT$fileEffects,size=ifelse(LT$storageMode=="single",4,8))
}#*#
return(LT)
}
##########################################################################################
#Set linear term for BayesC
##########################################################################################
setLT.BayesC.Cross=function(prior,y,j,p,idColumns,sumVarX,R2,nLT,verbose,
saveAt,rmExistingFiles,thin,nIter,burnIn)
{
#Just a copy of values provided by user
LT=list()
LT$Name=prior$Name
LT$R2=prior$R2
LT$df0=prior$df0
LT$S0=prior$S0
LT$probIn=prior$probIn
LT$counts=prior$counts
LT$p=p
LT$idColumns=idColumns
LT$saveEffects=prior$saveEffects
LT$storageMode=prior$storageMode
LT$MSx=sumVarX
if(is.null(LT$R2))
{
LT$R2=R2/nLT
if(verbose)
{
message("R2 in LP ",j, " was missing and was set to ",LT$R2)
}
}
#Default value for the degrees of freedom associated with the distribution
#assigned to the variance of betas
if(is.null(LT$df0))
{
LT$df0= 5
if(verbose)
{
message("DF in LP ",j," was missing and was set to ",LT$df0)
}
}
#Default value for a predictor being "in" the model
if(is.null(LT$probIn))
{
LT$probIn=0.5
if(verbose)
{
message("probIn in LP ",j," was missing and was set to ",LT$probIn)
}
}
#Default value for prior counts
if(is.null(LT$counts))
{
LT$counts=10
if(verbose)
{
message("Counts in LP ",j," was missing and was set to ",LT$counts)
}
}
LT$countsIn=LT$counts * LT$probIn
LT$countsOut=LT$counts - LT$countsIn
#Default value for the scale parameter associated with the distribution
#assigned to the variance of betas
if(is.null(LT$S0))
{
if(LT$df0<=0) stop("df0>0 in ",LT$model," in order to set S0");
LT$S0=var(y, na.rm = TRUE)*LT$R2/(LT$MSx)*(LT$df0+2)/LT$probIn
if(verbose)
{
message("Scale parameter in LP ",j," was missing and was set to ",LT$S0)
}
}
LT$a=rep(0, LT$p)
LT$d=rbinom(n = LT$p, size = 1, prob = LT$probIn)
LT$b=LT$a*LT$d #b=a*d, for compatibility with BGLR we use b instead of beta in linear terms
LT$varB = LT$S0
fname=paste(saveAt,LT$Name,"_parBayesC.dat",sep="")
if(rmExistingFiles)
{
unlink(fname)
}
LT$fileOut=file(description=fname,open="w")
LT$NamefileOut=fname
tmp=c('probIn','varB')
write(tmp, ncolumns = 2, file = LT$fileOut, append = TRUE)
#Objects for saving posterior means for MCMC
LT$post_b=rep(0,LT$p)
LT$post_b2=rep(0,LT$p)
LT$post_varB=0
LT$post_varB2=0
LT$post_d=0
LT$post_probIn=0
LT$post_probIn2=0
#*#
if(is.null(LT$saveEffects)){LT$saveEffects=FALSE}
if(LT$saveEffects){
if(is.null(LT$storageMode)){LT$storageMode="double"}
if(!LT$storageMode%in%c("single","double")) {
stop("storageMode of LP ",j," can either be 'single' or 'double' (default)")
}
if(is.null(LT$thin)){ LT$thin=thin }
fname=paste(saveAt,LT$Name,"_b.bin",sep="")
if(rmExistingFiles){ unlink(fname) }
LT$fileEffects=file(fname,open='wb')
nRow=floor((nIter-burnIn)/LT$thin)
writeBin(object=c(nRow,LT$p),con=LT$fileEffects,size=ifelse(LT$storageMode=="single",4,8))
}#*#
return(LT)
}
##########################################################################################
#Set linear term for SSVS
#George, E. I. and McCulloch, R. E. 1993. Variable selection via Gibbs Sampling,
#Journal of the American Statistical Association, 88(423): 881-889
##########################################################################################
#Evaluates the logarithm of p(c|else)
LogCondc=function(c,varB,b,d,shape1,shape2)
{
p=length(b)
bs=b[d!=1]
-1/(2*varB*c^2)*sum(bs^2)+(shape1-1)*log(c)+(shape2-1)*log(1-c)-(p-sum(d))*log(c)
}
#Metropolis sampler for c|else
metropc=function(c,varB,b,d,shape1,shape2)
{
flag=TRUE
while(flag)
{
c_new=c+rnorm(1,0,sd=0.05)
if(c_new>0 & c_new<1)
{
flag=FALSE
}
}
logP_old = LogCondc(c,varB,b,d,shape1,shape2)
logP_new = LogCondc(c_new,varB,b,d,shape1,shape2)
accept = (logP_new - logP_old) > log(runif(1))
if (accept) {
return(c_new)
}else{
return(c)
}
}
setLT.SSVS.Cross=function(prior,y,j,p,idColumns,sumVarX,R2,nLT,verbose,
saveAt,rmExistingFiles,thin,nIter,burnIn)
{
#Just a copy of values provided by user
LT=list()
LT$Name=prior$Name
LT$cprobIn=prior$cprobIn
LT$ccounts=prior$ccounts
LT$R2=prior$R2
LT$df0=prior$df0
LT$S0=prior$S0
LT$probIn=prior$probIn
LT$counts=prior$counts
LT$p=p
LT$idColumns=idColumns
LT$saveEffects=prior$saveEffects
LT$storageMode=prior$storageMode
LT$MSx=sumVarX
if(is.null(LT$cprobIn))
{
LT$cprobIn=1/100
if(verbose)
{
message("cprobIn in LP ",j," was missing and was set to ",LT$cprobIn)
}
}
if(is.null(LT$ccounts))
{
LT$ccounts=1E3
if(verbose)
{
message("ccounts in LP ",j," was missing and was set to ",LT$ccounts)
}
}
LT$ccountsIn=LT$cprobIn*LT$ccounts #Shape1
LT$ccountsOut=(1-LT$cprobIn)*LT$ccounts #Shape2
LT$c=rbeta(n=1,LT$ccountsIn,LT$ccountsOut)
if(is.null(LT$R2))
{
LT$R2=R2/nLT
if(verbose)
{
message("R2 in LP ",j, " was missing and was set to ",LT$R2)
}
}
#Default value for the degrees of freedom associated with the distribution
#assigned to the variance of betas
if(is.null(LT$df0))
{
LT$df0= 5
if(verbose)
{
message("DF in LP ",j," was missing and was set to ",LT$df0)
}
}
#Default value for a predictor being "in" the model
if(is.null(LT$probIn))
{
LT$probIn=0.5
if(verbose)
{
message("probIn in LP ",j," was missing and was set to ",LT$probIn)
}
}
#Default value for prior counts
if(is.null(LT$counts))
{
LT$counts=10
if(verbose)
{
message("Counts in LP ",j," was missing and was set to ",LT$counts)
}
}
LT$countsIn=LT$counts * LT$probIn
LT$countsOut=LT$counts - LT$countsIn
#Default value for the scale parameter associated with the distribution
#assigned to the variance of betas
if(is.null(LT$S0))
{
if(LT$df0<=0) stop("df0>0 in ",LT$model," in order to set S0");
LT$S0=var(y, na.rm = TRUE)*LT$R2/(LT$MSx)*(LT$df0+2)/LT$probIn
if(verbose)
{
message("Scale parameter in LP ",j," was missing and was set to ",LT$S0)
}
}
LT$b=rep(0, LT$p)
LT$d=rbinom(n = LT$p, size = 1, prob = LT$probIn)
LT$a=rep(1, LT$p) #a=1 if d==1 and a=c if d==0, here the values are all set to 1 initially
LT$varB = LT$S0
fname=paste(saveAt,LT$Name,"_parSSVS.dat",sep="")
if(rmExistingFiles)
{
unlink(fname)
}
LT$fileOut=file(description=fname,open="w")
LT$NamefileOut=fname
tmp=c('probIn','varB','c')
write(tmp, ncolumns = 3, file = LT$fileOut, append = TRUE)
#Objects for saving posterior means for MCMC
LT$post_b=rep(0,LT$p)
LT$post_b2=rep(0,LT$p)
LT$post_varB=0
LT$post_varB2=0
LT$post_d=0
LT$post_probIn=0
LT$post_probIn2=0
LT$post_c=0
LT$post_c2=0
#*#
if(is.null(LT$saveEffects)){LT$saveEffects=FALSE}
if(LT$saveEffects){
if(is.null(LT$storageMode)){LT$storageMode="double"}
if(!LT$storageMode%in%c("single","double")) {
stop("storageMode of LP ",j," can either be 'single' or 'double' (default)")
}
if(is.null(LT$thin)){ LT$thin=thin }
fname=paste(saveAt,LT$Name,"_b.bin",sep="")
if(rmExistingFiles){ unlink(fname) }
LT$fileEffects=file(fname,open='wb')
nRow=floor((nIter-burnIn)/LT$thin)
writeBin(object=c(nRow,LT$p),con=LT$fileEffects,size=ifelse(LT$storageMode=="single",4,8))
}#*#
return(LT)
}
##########################################################################################
#Set linear term for Bayesian Ridge Regression
##########################################################################################
setLT.BRR.Cross=function(prior,y,j,p,idColumns,sumVarX,R2,nLT,verbose,
saveAt,rmExistingFiles,thin,nIter,burnIn)
{
#Just a copy of values provided by user
LT=list()
LT$Name=prior$Name
LT$R2=prior$R2
LT$df0=prior$df0
LT$S0=prior$S0
LT$p=p
LT$idColumns=idColumns
LT$saveEffects=prior$saveEffects
LT$storageMode=prior$storageMode
LT$MSx=sumVarX
if(is.null(LT$R2))
{
LT$R2=R2/nLT
if(verbose)
{
message("R2 in LP ",j, " was missing and was set to ",LT$R2)
}
}
#Default value for the degrees of freedom associated with the distribution
#assigned to the variance of betas
if(is.null(LT$df0))
{
LT$df0=5
if(verbose)
{
message("DF in LP ",j," was missing and was set to ",LT$df0)
}
}
#Default value for the scale parameter associated with the distribution
#assigned to the variance of betas
if(is.null(LT$S0))
{
if(LT$df0<=0) stop("df0>0 in ",LT$model," in order to set S0");
LT$S0=var(y, na.rm = TRUE)*LT$R2/(LT$MSx)*(LT$df0+2)
if(verbose)
{
message("Scale parameter in LP ",j," was missing and was set to ",LT$S0)
}
}
LT$b=rep(0, LT$p)
LT$varB = LT$S0/(LT$df0+2)
fname=paste(saveAt,LT$Name,"_varB.dat",sep="");
if(rmExistingFiles)
{
unlink(fname)
}
LT$NamefileOut=fname
LT$fileOut=file(description=fname,open="w")
#Objects for saving posterior means for MCMC
LT$post_b=rep(0,LT$p)
LT$post_b2=rep(0,LT$p)
LT$post_varB=0
LT$post_varB2=0
#*#
if(is.null(LT$saveEffects)){LT$saveEffects=FALSE}
if(LT$saveEffects){
if(is.null(LT$storageMode)){LT$storageMode="double"}
if(!LT$storageMode%in%c("single","double")) {
stop("storageMode of LP ",j," can either be 'single' or 'double' (default)")
}
if(is.null(LT$thin)){ LT$thin=thin }
fname=paste(saveAt,LT$Name,"_b.bin",sep="")
if(rmExistingFiles){ unlink(fname) }
LT$fileEffects=file(fname,open='wb')
nRow=floor((nIter-burnIn)/LT$thin)
writeBin(object=c(nRow,LT$p),con=LT$fileEffects,size=ifelse(LT$storageMode=="single",4,8))
}#*#
return(LT)
}
##########################################################################################
#FIXED Effects (Ridge Regression with huge variance for regression coefficients,
#which efective leads to a flat prior)
##########################################################################################
setLT.Fixed.Cross=function(p,idColumns,Name,saveAt,rmExistingFiles)
{
#Just a copy of values provided by user
LT=list()
LT$Name=Name
LT$p=p
LT$idColumns=idColumns
LT$b=rep(0, LT$p)
LT$varB = 1e10
if(LT$Name=="___INTERCEPT___")
{
#Dirty hack to save samples from the intercept as "mu.dat"
#when the intercept is as fixed effect included automatically in routine BLRXy
fname=paste(saveAt,"mu.dat",sep="")
}else{
fname=paste(saveAt,LT$Name,"_b.dat",sep="")
}
LT$NamefileOut=fname
if(rmExistingFiles)
{
unlink(fname)
}
LT$fileOut=file(description=fname,open="w")
#Objects for saving posterior means for MCMC
LT$post_b=rep(0,LT$p)
LT$post_b2=rep(0,LT$p)
return(LT)
}
BLRCross=function(y,XX,Xy,nIter=1500,burnIn=500,
thin=5,R2=0.5,
S0=NULL,df0=5,
priors=NULL,
idPriors=NULL,
verbose=TRUE,
saveAt="",
rmExistingFiles = TRUE)
{
if(verbose)
{
welcome()
}
#Check burning and thin
if(burnIn>=nIter)
{
burnIn=as.integer(nIter/2)
message("burnIn was set to ",burnIn, " because burnIn can not be bigger than nIter")
}
#Assuming all efects are zero
RSS=sum(y^2)
n=length(y)
p=ncol(XX)
#Number of predictors in each group
nCols=table(idPriors)
if(p!=sum(nCols)) stop("The number of columns in X'X is different to the number of elements in idPriors\n")
varY=var(y,na.rm=TRUE)
varE=varY*(1-R2)
if(is.null(S0))
{
S0=varE*(df0+2)
message("S0 was missing and was set to ",S0)
}
if(is.null(priors)) stop("priors can not be NULL\n")
if(!is.list(priors)) stop("priors should be a list\n")
nLT = length(priors)
if(!(nLT>0)) stop("priors should have at least one component\n")
if(is.null(names(priors)))
{
names(priors)=rep("",nLT)
}
#Setting the linear terms
#Create an empty list
ETA=list()
#Loop over the components for the linear terms and set up
#hyperparameters
for(j in 1:nLT)
{
diagonal=XX[1 + 0L:(p - 1L) * (p + 1)]
sumVarX=sum(diagonal[idPriors==j])/n
idColumns=which(idPriors==j)
if(!(priors[[j]]$model %in% c("FIXED", "BRR", "BayesA", "BayesB","BayesC","SSVS")))
{
stop("Error in priors[[", j, "]]", " model ", priors[[j]]$model, " not implemented (note: evaluation is case sensitive)")
}
if(names(priors)[j]=="")
{
priors[[j]]$Name=paste("ETA_",j,sep="")
}else{
if(names(priors)[j]!="___INTERCEPT___")
{
#Dirty hack to avoid changing Name, see setLT.Fixed.Cross
priors[[j]]$Name=paste("ETA_",names(priors)[j],sep="")
}
}
ETA[[j]]=switch(priors[[j]]$model,
BayesA=setLT.BayesA.Cross(prior=priors[[j]],y=y,j=j,p=nCols[j],
idColumns=idColumns,sumVarX=sumVarX,
R2=R2,nLT=nLT,verbose=verbose,
saveAt=saveAt,
rmExistingFiles=rmExistingFiles,
thin=thin,
nIter=nIter,
burnIn=burnIn),
BayesB=setLT.BayesB.Cross(prior=priors[[j]],y=y,j=j,p=nCols[j],
idColumns=idColumns,sumVarX=sumVarX,
R2=R2,nLT=nLT,verbose=verbose,
saveAt=saveAt,
rmExistingFiles=rmExistingFiles,
thin=thin,
nIter=nIter,
burnIn=burnIn),
BayesC=setLT.BayesC.Cross(prior=priors[[j]],y=y,j=j,p=nCols[j],
idColumns=idColumns,sumVarX=sumVarX,
R2=R2,nLT=nLT,verbose=verbose,
saveAt=saveAt,
rmExistingFiles=rmExistingFiles,
thin=thin,
nIter=nIter,
burnIn=burnIn),
BRR=setLT.BRR.Cross(prior=priors[[j]],y=y,j=j,p=nCols[j],
idColumns=idColumns,sumVarX=sumVarX,
R2=R2,nLT=nLT,verbose=verbose,
saveAt=saveAt,
rmExistingFiles=rmExistingFiles,
thin=thin,
nIter=nIter,
burnIn=burnIn),
FIXED=setLT.Fixed.Cross(p=nCols[j],idColumns=idColumns,
Name=priors[[j]]$Name,
saveAt=saveAt,
rmExistingFiles=rmExistingFiles),
SSVS=setLT.SSVS.Cross(prior=priors[[j]],y=y,j=j,p=nCols[j],
idColumns=idColumns,sumVarX=sumVarX,
R2=R2,nLT=nLT,verbose=verbose,
saveAt=saveAt,
rmExistingFiles=rmExistingFiles,
thin=thin,
nIter=nIter,
burnIn=burnIn)
)
}
#File for storing sample for varE
fname = paste(saveAt, "varE.dat", sep = "")
if(rmExistingFiles) {
unlink(fname)
}
fileOutVarE = file(description = fname, open = "w")
post_varE = 0
post_varE2 = 0
#Gibbs sampler
#Loop over iterations
nSums=0 #For running means
#VERY IMPORTANT DO NOT CHANGE THIS LINE IN THE INITIALIZATION
beta=rep(0,p)
for(i in 1:nIter)
{
start=proc.time()[3]
#Loop over linear predictors
for(j in 1:nLT)
{
if(priors[[j]]$model=="BayesA")
{
ans=.Call("sampler_others",p, XX, Xy, ETA[[j]]$idColumns, ETA[[j]]$p, beta,
rep(ETA[[j]]$varB,nCols[j]), varE,RSS)
beta=ans[[1]]
ETA[[j]]$b=beta[ETA[[j]]$idColumns]
RSS=ans[[2]]
#Update variances
SS = ETA[[j]]$S + ETA[[j]]$b^2
DF = ETA[[j]]$df0 + 1
ETA[[j]]$varB = SS/rchisq(n = ETA[[j]]$p, df = DF)
tmpShape=ETA[[j]]$p*ETA[[j]]$df0/2+ETA[[j]]$shape0
tmpRate=sum(1/ETA[[j]]$varB)/2+ETA[[j]]$rate0
ETA[[j]]$S=rgamma(shape=tmpShape,rate=tmpRate,n=1)
}#End of BayesA
if(priors[[j]]$model=="BayesB")
{
ans=.Call("sampler_DiracSS",p, XX, Xy, ETA[[j]]$idColumns, ETA[[j]]$p, ETA[[j]]$a, beta,
ETA[[j]]$d, ETA[[j]]$varB, varE, ETA[[j]]$probIn,RSS)
ETA[[j]]$a=ans[[1]]
ETA[[j]]$d=ans[[2]]
beta=ans[[3]]
ETA[[j]]$b=beta[ETA[[j]]$idColumns]
RSS=ans[[4]]
#Sampling hyper-parameters
SS=sum((ETA[[j]]$a)^2)+ETA[[j]]$S
DF=ETA[[j]]$df0+1
ETA[[j]]$varB=SS/rchisq(n=ETA[[j]]$p,df=DF)
#Update the scale
tmpShape=ETA[[j]]$p*ETA[[j]]$df0/2+ETA[[j]]$shape0
tmpRate=sum(1/ETA[[j]]$varB)/2+ETA[[j]]$rate0
ETA[[j]]$S=rgamma(shape=tmpShape,rate=tmpRate,n=1)
#Update inclusion probabilities
mrkIn = sum(ETA[[j]]$d)
ETA[[j]]$probIn=rbeta(shape1 = (mrkIn + ETA[[j]]$countsIn + 1),
shape2 = (ETA[[j]]$p - mrkIn + ETA[[j]]$countsOut + 1), n = 1)
}#End of BayesB
#BayesC case
if(priors[[j]]$model=="BayesC")
{
ans=.Call("sampler_DiracSS",p, XX, Xy, ETA[[j]]$idColumns, ETA[[j]]$p, ETA[[j]]$a, beta,
ETA[[j]]$d, rep(ETA[[j]]$varB,nCols[j]), varE, ETA[[j]]$probIn,RSS)
ETA[[j]]$a=ans[[1]]
ETA[[j]]$d=ans[[2]]
beta=ans[[3]]
ETA[[j]]$b=beta[ETA[[j]]$idColumns]
RSS=ans[[4]]
#Sampling hyper-parameters
S=sum((ETA[[j]]$a)^2)+ETA[[j]]$S0
ETA[[j]]$varB=S/rchisq(n=1,df=ETA[[j]]$df0+nCols[j])
mrkIn = sum(ETA[[j]]$d)
ETA[[j]]$probIn=rbeta(shape1 = (mrkIn + ETA[[j]]$countsIn + 1),
shape2 = (ETA[[j]]$p - mrkIn + ETA[[j]]$countsOut + 1), n = 1)
} #End of BayesC
if(priors[[j]]$model=="SSVS")
{
ans=.Call("sampler_ACSS", p, XX, Xy, ETA[[j]]$idColumns, ETA[[j]]$p, ETA[[j]]$a, beta,
ETA[[j]]$d, rep(ETA[[j]]$varB, nCols[j]), varE, ETA[[j]]$probIn,
RSS, ETA[[j]]$c)
ETA[[j]]$a=ans[[1]]
ETA[[j]]$d=ans[[2]]
beta=ans[[3]]
ETA[[j]]$b=beta[ETA[[j]]$idColumns]
RSS=ans[[4]]
#Sampling hyper-parameters
S=sum((ETA[[j]]$b/ETA[[j]]$a)^2)+ETA[[j]]$S0
ETA[[j]]$varB=S/rchisq(n=1,df=ETA[[j]]$df0+nCols[j])
mrkIn = sum(ETA[[j]]$d)
ETA[[j]]$probIn=rbeta(shape1 = (mrkIn + ETA[[j]]$countsIn + 1),
shape2 = (ETA[[j]]$p - mrkIn + ETA[[j]]$countsOut + 1), n = 1)
#Update c
ETA[[j]]$c=metropc(ETA[[j]]$c,ETA[[j]]$varB,ETA[[j]]$b,ETA[[j]]$d,ETA[[j]]$ccountsIn,ETA[[j]]$ccountsOut)
#message("c=",ETA[[j]]$c)
} #End of SSVS
#BRR case
if(priors[[j]]$model=="BRR")
{
ans=.Call("sampler_others",p, XX, Xy, ETA[[j]]$idColumns, ETA[[j]]$p, beta,
rep(ETA[[j]]$varB,nCols[j]), varE,RSS)
beta=ans[[1]]
ETA[[j]]$b=beta[ETA[[j]]$idColumns]
RSS=ans[[2]]
#Sampling hyper-parameters
DF = ETA[[j]]$df0 + ETA[[j]]$p
SS = sum(ETA[[j]]$b^2) + ETA[[j]]$S0
ETA[[j]]$varB = SS/rchisq(df = DF, n = 1)
} #End of BRR
if(priors[[j]]$model=="FIXED")
{
ans=.Call("sampler_others",p, XX, Xy, ETA[[j]]$idColumns, ETA[[j]]$p, beta,
rep(ETA[[j]]$varB,nCols[j]), varE,RSS)
beta=ans[[1]]
#print(beta)
ETA[[j]]$b=beta[ETA[[j]]$idColumns]
RSS=ans[[2]]
#message("RSS=",RSS)
} #End of FIXED
}
#Update residual
varE=(RSS+S0)/rchisq(n=1,df=n+df0)
#Saving samples and computing running means
if(i%%thin==0)
{
for(j in 1:nLT)
{
if(priors[[j]]$model == "BayesA")
{
tmp=ETA[[j]]$S
write(tmp, ncolumns = 1, file = ETA[[j]]$fileOut, append = TRUE)
}
if(priors[[j]]$model == "BayesB")
{
tmp=c(ETA[[j]]$probIn,ETA[[j]]$S)
write(tmp, ncolumns = 2, file = ETA[[j]]$fileOut, append = TRUE)
}
if(priors[[j]]$model == "BayesC")
{
tmp = c(ETA[[j]]$probIn, ETA[[j]]$varB)
write(tmp, ncolumns = 2, file = ETA[[j]]$fileOut, append = TRUE)
}
if(priors[[j]]$model == "SSVS")
{
tmp = c(ETA[[j]]$probIn, ETA[[j]]$varB, ETA[[j]]$c)
write(tmp, ncolumns = 3, file = ETA[[j]]$fileOut, append = TRUE)
}
if (priors[[j]]$model == "BRR")
{
write(ETA[[j]]$varB, file = ETA[[j]]$fileOut, append = TRUE)
}
if (priors[[j]]$model == "FIXED") {
write(ETA[[j]]$b,ncolumns=ETA[[j]]$p, file = ETA[[j]]$fileOut, append = TRUE)
}
}
#Write output file for varE
write(x = varE, ncolumns=1,file = fileOutVarE, append = TRUE)
if(i>burnIn)
{
nSums = nSums + 1
k = (nSums - 1)/(nSums)
for(j in 1:nLT)
{
#BayesA
if(priors[[j]]$model=="BayesA")
{
ETA[[j]]$post_b = ETA[[j]]$post_b * k + ETA[[j]]$b/nSums
ETA[[j]]$post_b2 = ETA[[j]]$post_b2 * k + (ETA[[j]]$b^2)/nSums
ETA[[j]]$post_varB = ETA[[j]]$post_varB * k + (ETA[[j]]$varB)/nSums
ETA[[j]]$post_varB2 = ETA[[j]]$post_varB2 * k + (ETA[[j]]$varB^2)/nSums
ETA[[j]]$post_S = ETA[[j]]$post_S * k + (ETA[[j]]$S)/nSums
ETA[[j]]$post_S2 = ETA[[j]]$post_S2 * k + (ETA[[j]]$S^2)/nSums
if(ETA[[j]]$saveEffects&&(i%%ETA[[j]]$thin)==0){
writeBin(object=ETA[[j]]$b,con=ETA[[j]]$fileEffects,size=ifelse(ETA[[j]]$storageMode=="single",4,8))
}#*#
}
#BayesB case
if(priors[[j]]$model=="BayesB")
{
ETA[[j]]$post_b = ETA[[j]]$post_b * k + ETA[[j]]$b/nSums
ETA[[j]]$post_b2 = ETA[[j]]$post_b2 * k + (ETA[[j]]$b^2)/nSums
ETA[[j]]$post_varB = ETA[[j]]$post_varB * k + (ETA[[j]]$varB)/nSums
ETA[[j]]$post_varB2 = ETA[[j]]$post_varB2 * k + (ETA[[j]]$varB^2)/nSums
ETA[[j]]$post_d = ETA[[j]]$post_d * k + (ETA[[j]]$d)/nSums
ETA[[j]]$post_probIn = ETA[[j]]$post_probIn * k + (ETA[[j]]$probIn)/nSums
ETA[[j]]$post_probIn2 = ETA[[j]]$post_probIn2 * k + (ETA[[j]]$probIn^2)/nSums
ETA[[j]]$post_S = ETA[[j]]$post_S * k + (ETA[[j]]$S)/nSums
ETA[[j]]$post_S2 = ETA[[j]]$post_S2 * k + (ETA[[j]]$S^2)/nSums
if(ETA[[j]]$saveEffects&&(i%%ETA[[j]]$thin)==0){
writeBin(object=ETA[[j]]$b,con=ETA[[j]]$fileEffects,size=ifelse(ETA[[j]]$storageMode=="single",4,8))
}#*#
}
#BayesC case
if(priors[[j]]$model=="BayesC")
{
ETA[[j]]$post_b = ETA[[j]]$post_b * k + ETA[[j]]$b/nSums
ETA[[j]]$post_b2 = ETA[[j]]$post_b2 * k + (ETA[[j]]$b^2)/nSums
ETA[[j]]$post_varB = ETA[[j]]$post_varB * k + (ETA[[j]]$varB)/nSums
ETA[[j]]$post_varB2 = ETA[[j]]$post_varB2 * k + (ETA[[j]]$varB^2)/nSums
ETA[[j]]$post_d = ETA[[j]]$post_d * k + (ETA[[j]]$d)/nSums
ETA[[j]]$post_probIn = ETA[[j]]$post_probIn * k + (ETA[[j]]$probIn)/nSums
ETA[[j]]$post_probIn2 = ETA[[j]]$post_probIn2 * k + (ETA[[j]]$probIn^2)/nSums
if(ETA[[j]]$saveEffects&&(i%%ETA[[j]]$thin)==0){
writeBin(object=ETA[[j]]$b,con=ETA[[j]]$fileEffects,size=ifelse(ETA[[j]]$storageMode=="single",4,8))
}#*#
}
#SSVS case
if(priors[[j]]$model=="SSVS")
{
ETA[[j]]$post_b = ETA[[j]]$post_b * k + ETA[[j]]$b/nSums
ETA[[j]]$post_b2 = ETA[[j]]$post_b2 * k + (ETA[[j]]$b^2)/nSums
ETA[[j]]$post_varB = ETA[[j]]$post_varB * k + (ETA[[j]]$varB)/nSums
ETA[[j]]$post_varB2 = ETA[[j]]$post_varB2 * k + (ETA[[j]]$varB^2)/nSums
ETA[[j]]$post_d = ETA[[j]]$post_d * k + (ETA[[j]]$d)/nSums
ETA[[j]]$post_probIn = ETA[[j]]$post_probIn * k + (ETA[[j]]$probIn)/nSums
ETA[[j]]$post_probIn2 = ETA[[j]]$post_probIn2 * k + (ETA[[j]]$probIn^2)/nSums
ETA[[j]]$post_c = ETA[[j]]$post_c * k + ETA[[j]]$c/nSums
ETA[[j]]$post_c2 = ETA[[j]]$post_c2 * k + (ETA[[j]]$c^2)/nSums
if(ETA[[j]]$saveEffects&&(i%%ETA[[j]]$thin)==0){
writeBin(object=ETA[[j]]$b,con=ETA[[j]]$fileEffects,size=ifelse(ETA[[j]]$storageMode=="single",4,8))
}#*#
}
#BRR case
if(priors[[j]]$model=="BRR")
{
ETA[[j]]$post_b = ETA[[j]]$post_b * k + ETA[[j]]$b/nSums
ETA[[j]]$post_b2 = ETA[[j]]$post_b2 * k + (ETA[[j]]$b^2)/nSums
ETA[[j]]$post_varB = ETA[[j]]$post_varB * k + (ETA[[j]]$varB)/nSums
ETA[[j]]$post_varB2 = ETA[[j]]$post_varB2 * k + (ETA[[j]]$varB^2)/nSums
if(ETA[[j]]$saveEffects&&(i%%ETA[[j]]$thin)==0){
writeBin(object=ETA[[j]]$b,con=ETA[[j]]$fileEffects,size=ifelse(ETA[[j]]$storageMode=="single",4,8))
}#*#
}
#FIXED case
if(priors[[j]]$model=="FIXED")
{
ETA[[j]]$post_b = ETA[[j]]$post_b * k + ETA[[j]]$b/nSums
ETA[[j]]$post_b2 = ETA[[j]]$post_b2 * k + (ETA[[j]]$b^2)/nSums
}
}
post_varE = post_varE * k + varE/nSums
post_varE2 = post_varE2 * k + (varE^2)/nSums
}
}#End if loop for checking that i is a multiple of thin
end=proc.time()[3]
if(verbose)
{
message("Iter=",i," Time/Iter=",round(end-start,3))
message("varE=",round(varE,4))
}
}#End of loop for Gibbs sampler
#close output files
close(fileOutVarE)
for (j in 1:nLT)
{
if (!is.null(ETA[[j]]$fileOut))
{
flush(ETA[[j]]$fileOut)
close(ETA[[j]]$fileOut)
ETA[[j]]$fileOut = NULL
}
if(!is.null(ETA[[j]]$fileEffects))
{
flush(ETA[[j]]$fileEffects)
close(ETA[[j]]$fileEffects)
ETA[[j]]$fileEffects = NULL
}
}
#Return the goodies
out=list()
#Renaming/removing objects in ETA ...
for(j in 1:nLT)
{
ETA[[j]]$b = ETA[[j]]$post_b
ETA[[j]]$SD.b = sqrt(ETA[[j]]$post_b2 - ETA[[j]]$post_b^2)
ETA[[j]]$varB = ETA[[j]]$post_varB
ETA[[j]]$SD.varB = sqrt(ETA[[j]]$post_varB2 - (ETA[[j]]$post_varB^2))
tmp = which(names(ETA[[j]]) %in% c("post_b", "post_b2","post_varB", "post_varB2"))
ETA[[j]] = ETA[[j]][-tmp]
if(priors[[j]]$model%in%c("BayesB","BayesC","SSVS"))
{
ETA[[j]]$d=ETA[[j]]$post_d
ETA[[j]]$probIn=ETA[[j]]$post_probIn
ETA[[j]]$SD.probIn=sqrt(ETA[[j]]$post_probIn2 - (ETA[[j]]$post_probIn^2))
tmp = which(names(ETA[[j]]) %in% c("a","post_d", "post_probIn","post_probIn2"))
ETA[[j]] = ETA[[j]][-tmp]
}
if(priors[[j]]$model=="SSVS")
{
ETA[[j]]$c=ETA[[j]]$post_c
ETA[[j]]$SD.c=sqrt(ETA[[j]]$post_c2 - (ETA[[j]]$post_c^2))
tmp = which(names(ETA[[j]]) %in% c("post_c", "post_c2"))
ETA[[j]]=ETA[[j]][-tmp]
}
}
out$ETA=ETA
out$varE = post_varE
out$SD.varE = sqrt(post_varE2 - post_varE^2)
class(out)="BLRCross"
return(out)
}
BLRXy<-function(y, intercept=TRUE, ETA,
nIter = 1500, burnIn = 500, thin = 5,
S0 = NULL, df0 = 5, R2 = 0.5,
verbose = TRUE, saveAt="",rmExistingFiles = TRUE)
{
nLT <- ifelse(is.null(ETA), 0, length(ETA))
if(!(nLT>0)) stop("ETA should have at least one component\n")
if(is.null(names(ETA)))
{
names(ETA)<-rep("",nLT)
}
#Check NA's
if(any(is.na(y))) stop("NA's not allowed in this implementation")
n<-length(y)
ps<-rep(NA,nLT)
ns<-rep(NA,nLT)
#Check supported models, number of columns in each element of the list
for(j in 1:nLT)
{
if(!(ETA[[j]]$model %in% c("FIXED", "BRR", "BayesA", "BayesB","BayesC","SSVS")))
{
stop("Error in ETA[[", j, "]]", " model ", ETA[[j]]$model, " not implemented (note: evaluation is case sensitive)")
}
if(!is.null(ETA[[j]]$X))
{
if(is.matrix(ETA[[j]]$X))
{
ps[j]<-ncol(ETA[[j]]$X)
ns[j]<-nrow(ETA[[j]]$X)
}else{
stop("Error in ETA[[", j, "]], X should be a matrix")
}
}else{
stop("Error in ETA[[", j, "]], X is NULL")
}
}#End of checking inputs
if(any(is.na(ps))) stop("Check that every X in ETA is a matrix at least one column and ",n," rows")
if(any(is.na(ns))) stop("Check that every X in ETA is a matrix at least one column and ",n," rows")
if(any(n!=ns)) stop("Every X in ETA must have the same number of rows")
low<-cumsum(ps)-ps+1
up<-cumsum(ps)
X<-matrix(NA,nrow=n,ncol=sum(ps))
idPriors<-rep(NA,sum(ps))
priors<-list()
for(j in 1:nLT)
{
X[1:n,c(low[j]:up[j])]<-ETA[[j]]$X
idPriors[c(low[j]:up[j])]<-j
priors[[j]]<-ETA[[j]]
priors[[j]]$X<-NULL
}
tmp<-names(ETA)
#Dirty hack
if(intercept)
{
X<-cbind(X,1)
idPriors<-c(idPriors,nLT+1)
priors[[nLT+1]]<-list(model="FIXED")
priors[[nLT+1]]$Name="___INTERCEPT___"
tmp<-c(tmp,"___INTERCEPT___")
}
names(priors)<-tmp
#Crossprod
XX<-crossprod(X)
Xy<-as.vector(crossprod(X,y))
out<-BLRCross(y=y,XX=XX,Xy=Xy,nIter=nIter,burnIn=burnIn,
thin=thin,R2=R2,
S0=S0,df0=df0,
priors=priors,
idPriors=idPriors,
verbose=verbose,
saveAt=saveAt,
rmExistingFiles = rmExistingFiles)
if(intercept)
{
out$mu<-out$ETA[[nLT+1]]$b
out$SD.mu<-out$ETA[[nLT+1]]$SD.b
out$ETA[[nLT+1]]<-NULL
}
#Compute yHat
yHat<-rep(0,n)
for(j in 1:nLT)
{
yHat<-yHat+as.vector(ETA[[j]]$X%*%out$ETA[[j]]$b)
}
if(intercept)
{
yHat<-yHat+out$mu
}
out$yHat<-yHat
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGLR/R/BLRCross.R
|
#Auxiliary functions
#Converts a logical vector to decimal representation
#eg x=c(TRUE,FALSE), means that we have a binary number "10" which in decimal is 2
logicalToDec<-function(x)
{
sum(x * 2^(rev(seq_along(x)) - 1))
}
#generating pseudo-random sequences from Wishart distribution
#v Degrees of freedom (scalar).
#S Inverse scale matrix pxp.
#Function included in the MCMCpack, version 1.4-5
rwish<-function(v, S)
{
if (!is.matrix(S))
S <- matrix(S)
if (nrow(S) != ncol(S))
{
stop(message="S not square in rwish().\n")
}
if (v < nrow(S))
{
stop(message="v is less than the dimension of S in rwish().\n")
}
p <- nrow(S)
CC <- chol(S)
Z <- matrix(0, p, p)
diag(Z) <- sqrt(rchisq(p, v:(v-p+1)))
if(p > 1)
{
pseq <- 1:(p-1)
Z[rep(p*pseq, pseq) + unlist(lapply(pseq, seq))] <- rnorm(p*(p-1)/2)
}
return(crossprod(Z %*% CC))
}
#generating pseudo-random sequences from inverse Wishart distribution
#v Degrees of freedom (scalar).
#S Inverse scale matrix p x p
#Function included in the MCMCpack, version 1.4-5
riwish<-function(v, S)
{
return(solve(rwish(v,solve(S))))
}
#Extract Lower Triangular Elements from a Symmetric Matrix.
#The elements are stored in column major order, it does not check for symmetry.
#Function included in the MCMCpack, version 1.4-5
vech<-function (x)
{
x <- as.matrix(x)
if (dim(x)[1] != dim(x)[2])
{
stop("Non-square matrix passed to vech().\n")
}
output <- x[lower.tri(x, diag = TRUE)]
dim(output) <- NULL
return(output)
}
#Takes vector x and returns an nrow times nrow symmetric matrix,
#this will recycle the elements of x as needed to fill the matrix.
#The number of rows is computed autmatically is not given.
#Function included in the MCMCpack, version 1.4-5
xpnd <-function (x, nrow = NULL)
{
dim(x) <- NULL
if(is.null(nrow)) nrow <- (-1 + sqrt(1 + 8 * length(x))) / 2
output <- matrix(0, nrow, nrow)
output[lower.tri(output, diag = TRUE)] <- x
hold <- output
hold[upper.tri(hold, diag=TRUE)] <- 0
output <- output + t(hold)
return(output)
}
#UNstructured covariance matrix
#Cov, list
#traits, integer
#mo, Mode
setCov.UN<-function(Cov,traits,j,mo,saveAt)
{
message("UNstructured covariance matrix")
if(is.null(Cov$df0))
{
Cov$df0<-traits+1
message("df0 was set to ",Cov$df0)
}
if(is.null(Cov$S0))
{
#Cov$S0<-diag(traits)
#message("S0 set to an identity matrix")
Cov$S0<-mo*(Cov$df0+traits+1)
message("S0 set to ")
print(Cov$S0)
}
#Omega=cov(b)
Cov$Omega<-riwish(v=Cov$df0,S=Cov$S0)
Cov$Omegainv<-solve(Cov$Omega)
#Objects for saving posterior means for MCMC
Cov$post_Omega<-matrix(0,nrow=traits,ncol=traits)
Cov$post_Omega2<-matrix(0,nrow=traits,ncol=traits)
#Output files
Cov$fName_Omega<-paste(saveAt,"Omega_",j,".dat",sep="")
Cov$f_Omega<-file(description=Cov$fName_Omega,open="w")
return(Cov)
}
#DIAGonal covariance matrix
#Cov, list
#traits, integer
#mo, Mode
setCov.DIAG<-function(Cov,traits,j,mo,saveAt)
{
message("DIAGonal covariance matrix")
if(is.null(Cov$df0))
{
Cov$df0<-rep(traits+1,traits)
message("df0 set to ",traits+1," for all the traits")
}
if(is.null(Cov$S0))
{
#Cov$S0 <- rep(1,traits)
#message("S0 was set to 1 for all the traits")
Cov$S0<-mo*(Cov$df0+2)
message("S0 was set to: ")
print(Cov$S0)
}
Cov$Omega<-matrix(0,nrow=traits,ncol=traits)
for(k in 1:traits)
{
Cov$Omega[k,k]<-Cov$S0[k]/rchisq(n=1,df=Cov$df0[k])
}
Cov$Omegainv<-solve(Cov$Omega)
#Objects for saving posterior means for MCMC
Cov$post_Omega<-matrix(0,nrow=traits,ncol=traits)
Cov$post_Omega2<-matrix(0,nrow=traits,ncol=traits)
#Output files
Cov$fName_Omega<-paste(saveAt,"Omega_",j,".dat",sep="")
Cov$f_Omega<-file(description=Cov$fName_Omega,open="w")
return(Cov)
}
#RECursive covariance matrix
setCov.REC<-function(Cov,traits,j,mo,saveAt)
{
message("RECursive covariance matrix")
if(is.null(Cov$df0))
{
Cov$df0<-rep(traits+1,traits)
message("df0 set to ", traits+1, " for all the traits")
}
if(is.null(Cov$S0))
{
#Cov$S0 <- rep(1,traits)
#message("S0 was set to 1 for all the traits")
Cov$S0<-mo*(Cov$df0+2)
message("S0 was set to: ")
print(Cov$S0)
}
if(is.null(Cov$var))
{
Cov$var<-100
message("var was set to 100")
}
if(is.null(Cov$M)) stop("M can not be null")
if(!is.logical(Cov$M)) stop("M must be logical matrix (with entries being TRUE/FALSE)")
if(!is.matrix(Cov$M)) stop("M must be a matrix")
if(nrow(Cov$M)!=ncol(Cov$M)) stop("M must be a square matrix")
if(nrow(Cov$M)!=traits) stop("M must have ", traits, " rows and columns")
if(any(diag(Cov$M)==TRUE)) stop("All diagonal entries of M must be set to FALSE")
Cov$M[upper.tri(Cov$M)]<-FALSE
Cov$W<-matrix(0,nrow=traits,ncol=traits)
Cov$PSI<-rep(NA,traits)
for(k in 1:traits)
{
Cov$PSI[k]<-Cov$S0[k]/rchisq(n=1,df=Cov$df0[k])
}
#Omega=cov(b)
Cov$Omega<-riwish(v=traits,S=diag(Cov$S0))
Cov$Omegainv<-solve(Cov$Omega)
#Objects for saving posterior means for MCMC
Cov$post_Omega<-matrix(0,nrow=traits,ncol=traits)
Cov$post_Omega2<-matrix(0,nrow=traits,ncol=traits)
Cov$post_W<-matrix(0,nrow=traits,ncol=traits)
Cov$post_W2<-matrix(0,nrow=traits,ncol=traits)
Cov$post_PSI<-rep(0,traits)
Cov$post_PSI2<-rep(0,traits)
#Output files
Cov$fName_W<-paste(saveAt,"W_",j,".dat",sep="")
Cov$fName_PSI<-paste(saveAt,"PSI_",j,".dat",sep="")
Cov$f_W<-file(description=Cov$fName_W,open="w")
Cov$f_PSI<-file(description=Cov$fName_PSI,open="w")
return(Cov)
}
#FA covariance matrix
setCov.FA<-function(Cov,traits,nD,j,mo,saveAt)
{
message("FA covariance matrix")
if(is.null(Cov$df0))
{
Cov$df0<-rep(traits+1,traits)
message("df0 set to ",traits+1," for all the traits")
}
if(is.null(Cov$S0))
{
#Cov$S0 <- rep(1/100,traits)
#message("S0 was set to 1/100 for all the traits")
Cov$S0<-mo*(Cov$df0+2)
message("S0 was set to: ")
print(Cov$S0)
}
if(is.null(Cov$var))
{
Cov$var<-100
message("var was set to 100")
}
if(is.null(Cov$M)) stop("M can not be null")
if(!is.logical(Cov$M)) stop("M must be logical matrix (with entries being TRUE/FALSE)")
if(!is.matrix(Cov$M)) stop("M must be a matrix")
if(nrow(Cov$M)!=traits) stop("M must have ", traits, " rows")
if(ncol(Cov$M)>traits) stop("Number of columns of M must be smaller than ", traits)
Cov$nF<-ncol(Cov$M)
Cov$nD<-nD
#Omega=cov(b)
Cov$Omega<-riwish(v=traits,S=diag(Cov$S0))
sdU <- sqrt(diag(Cov$Omega))
FA <- factanal(covmat = Cov$Omega, factors = Cov$nF)
Cov$W <- matrix(nrow = traits, ncol = Cov$nF, 0)
Cov$W[Cov$M] <- (diag(sdU) %*% FA$loadings)[Cov$M]
Cov$PSI <- (sdU^2) * FA$uniquenesses + 1e-04
Cov$Omega <- tcrossprod(Cov$W) + diag(Cov$PSI)
Cov$Omegainv<-solve(Cov$Omega)
Cov$F <- matrix(nrow = nD, ncol = Cov$nF, 0)
#Objects for saving posterior means for MCMC
Cov$post_Omega<-matrix(0,nrow=traits,ncol=traits)
Cov$post_Omega2<-matrix(0,nrow=traits,ncol=traits)
Cov$post_W<-matrix(0,nrow=traits,ncol=Cov$nF)
Cov$post_W2<-matrix(0,nrow=traits,ncol=Cov$nF)
Cov$post_PSI<-rep(0,traits)
Cov$post_PSI2<-rep(0,traits)
#Output files
Cov$fName_W<-paste(saveAt,"W_",j,".dat",sep="")
Cov$fName_PSI<-paste(saveAt,"PSI_",j,".dat",sep="")
Cov$f_W<-file(description=Cov$fName_W,open="w")
Cov$f_PSI<-file(description=Cov$fName_PSI,open="w")
return(Cov)
}
#Set linear term for DiracSS_mt
setLT.DiracSS_mt<-function(LT,traits,j,Sy,nLT,R2,saveAt,nRow)
{
message("Setting linear term ",j)
#Inclusion probabilities
if(is.null(LT$inclusionProb))
{
LT$inclusionProb<-list(probIn=rep(0.5,traits),
counts=rep(2,traits))
message("probIn set to 0.5 for all the traits")
message("counts set to 2 for all the traits")
}else{
if(is.null(LT$inclusionProb$probIn))
{
LT$inclusionProb$probIn<-rep(0.5,traits)
message("probIn set to 0.5 for all the traits")
}
if(is.null(LT$inclusionProb$counts))
{
LT$inclusionProb$probIn$counts<-rep(2,traits)
message("counts set to 2 for all the traits")
}
}
#Compute countsIn and countsOut
LT$inclusionProb$countsIn <- LT$inclusionProb$counts * LT$inclusionProb$probIn
LT$inclusionProb$countsOut <- LT$inclusionProb$counts - LT$inclusionProb$countsIn
#X
if(is.null(LT$X)) stop("X can not be NULL\n")
if(!is.matrix(LT$X)) stop("X must be a matrix\n")
if(any(is.na(LT$X))) stop("X has NAs\n")
LT$x2<-as.vector(colSums(LT$X^2))
sumMeanXSq<-sum((apply(LT$X,2L,mean))^2)
MSx<-sum(LT$x2)/nrow(LT$X)-sumMeanXSq
message("MSx=",MSx)
#Initialize b, d, beta, beta=b#d
LT$p<-ncol(LT$X)
LT$b<-matrix(0,nrow=LT$p,ncol=traits)
LT$d<-matrix(1,nrow=LT$p,ncol=traits)
LT$beta<-LT$b*LT$d
#Distribution for Omega, which is related to (b_1j,b_2j,...,b_tj)
#j=1,...,p, where p is the number of columns of X
#t the number of traits
if(is.null(LT$Cov))
{
#Cov is null
LT$Cov<-list()
LT$Cov$type<-"UN"
}else{
#Cov is not null
if(is.null(LT$Cov$type))
{
LT$Cov$type<-"UN"
}else{
if(!(LT$Cov$type %in% c("UN","REC","FA","DIAG")))
{
stop("Error '", LT$Cov$type, "' not implemented (note: evaluation is case sensitive)")
}
}
}
#Beyond this point Cov is not NULL and we already know the covariance structure
#Select appropriate covariance structure
LT$Cov<-switch(LT$Cov$type,
UN=setCov.UN(Cov=LT$Cov,traits=traits,j=j,mo=(R2/nLT)*Sy/MSx,saveAt=saveAt),
DIAG=setCov.DIAG(Cov=LT$Cov,traits=traits,j=j,mo=(R2/nLT)*diag(Sy)/MSx,saveAt),
REC=setCov.REC(Cov=LT$Cov,traits=traits,j=j,mo=(R2/nLT)*diag(Sy)/MSx,saveAt=saveAt),
FA=setCov.FA(Cov=LT$Cov,traits=traits,nD=LT$p,j=j,mo=(R2/nLT)*diag(Sy)/MSx,saveAt=saveAt)
)
#It is not working very well when probIn is small
#Dm<-diag(1/LT$inclusionProb$probIn)
#LT$Cov<-switch(LT$Cov$type,
# UN=setCov.UN(Cov=LT$Cov,traits=traits,mo=(R2/nLT)*Dm%*%Sy%*%Dm/MSx),
# DIAG=setCov.DIAG(Cov=LT$Cov,traits=traits,mo=(R2/nLT)*Dm%*%diag(Sy)%*%Dm/MSx),
# REC=setCov.REC(Cov=LT$Cov,traits=traits,j=j,mo=(R2/nLT)*Dm%*%diag(Sy)%*%Dm/MSx,saveAt=saveAt),
# FA=setCov.FA(Cov=LT$Cov,traits=traits,nD=LT$p,j=j,mo=(R2/nLT)*Dm%*%diag(Sy)%*%Dm/MSx,saveAt=saveAt)
# )
#Add a new object to compute covariance between entries of
#beta=b*d with MCMC output, Sigma=Cov(beta,beta'), beta is
#a vector with marker effects for one locus, dimmension 1*traits
LT$Cov$Sigma=matrix(0,nrow=traits,ncol=traits)
LT$Cov$post_Sigma=matrix(0,nrow=traits,ncol=traits)
LT$Cov$post_Sigma2=matrix(0,nrow=traits,ncol=traits)
#Objects for saving posterior means for MCMC
LT$post_b<-matrix(0,nrow=LT$p,ncol=traits)
LT$post_b2<-matrix(0,nrow=LT$p,ncol=traits)
LT$post_d<-matrix(0,nrow=LT$p,ncol=traits)
LT$post_d2<-matrix(0,nrow=LT$p,ncol=traits)
LT$post_beta<-matrix(0,nrow=LT$p,ncol=traits)
LT$post_beta2<-matrix(0,nrow=LT$p,ncol=traits)
#Files to save binary files with betas
if(is.null(LT$saveEffects))
{
LT$saveEffects<-FALSE
}
if(LT$saveEffects)
{
if(is.null(LT$storageMode))
{
LT$storageMode<-"double"
}
if(!LT$storageMode%in%c("single","double"))
{
stop("storageMode of LP ",j," can either be 'single' or 'double' (default)")
}
fname<-paste(saveAt,LT$Name,"_beta.bin",sep="")
LT$fileEffects<-file(fname,open='wb')
writeBin(object=c(nRow,traits,LT$p),con=LT$fileEffects,size=ifelse(LT$storageMode=="single",4,8))
}#*#
#NEW,
#perhaps we need to remove, this is only useful for computing genomic relationship
#matrix when using Cheng (2018) method.
#Files to save indicator variables
#binary file are saved in single mode (4 bytes for each number)
if(is.null(LT$saveIndicators))
{
LT$saveIndicators<-FALSE
}
if(LT$saveIndicators)
{
fname2<-paste(saveAt,LT$Name,"_d.bin",sep="")
LT$fileIndicators<-file(fname2,open='wb')
#nrow, traits and p stored as single
writeBin(object=c(nRow,traits,LT$p),con=LT$fileIndicators,size=4)
}
return(LT)
}
#Set linear term for Ridge Regression
setLT.BRR_mt<-function(LT,traits,j,Sy,nLT,R2,saveAt,nRow)
{
message("Setting linear term ",j)
#X
if(is.null(LT$X)) stop("X can not be NULL\n")
if(!is.matrix(LT$X)) stop("X must be a matrix\n")
if(any(is.na(LT$X))) stop("X has NAs\n")
LT$x2<-as.vector(colSums(LT$X^2))
sumMeanXSq<-sum((apply(LT$X,2L,mean))^2)
MSx<-sum(LT$x2)/nrow(LT$X)-sumMeanXSq
message("MSx=",MSx)
#Initialize beta
LT$p<-ncol(LT$X)
LT$beta<-matrix(0,nrow=LT$p,ncol=traits)
#Distribution for Omega, which is related to (b_1j,b_2j,...,b_tj)
#j=1,...,p, where p is the number of columns of X
#t the number of traits
if(is.null(LT$Cov))
{
#Cov is null
LT$Cov<-list()
LT$Cov$type<-"UN"
}else{
#Cov is not null
if(is.null(LT$Cov$type))
{
LT$Cov$type<-"UN"
}else{
if(!(LT$Cov$type %in% c("UN","REC","FA","DIAG")))
{
stop("Error '", LT$Cov$type, "' not implemented (note: evaluation is case sensitive)")
}
}
}
#Beyond this point Cov is not NULL and we already know the covariance structure
#Select appropriate covariance structure
LT$Cov<-switch(LT$Cov$type,
UN=setCov.UN(Cov=LT$Cov,traits=traits,j=j,mo=(R2/nLT)*Sy/MSx,saveAt=saveAt),
DIAG=setCov.DIAG(Cov=LT$Cov,traits=traits,j=j,mo=(R2/nLT)*diag(Sy)/MSx,saveAt=saveAt),
REC=setCov.REC(Cov=LT$Cov,traits=traits,j=j,mo=(R2/nLT)*diag(Sy)/MSx,saveAt=saveAt),
FA=setCov.FA(Cov=LT$Cov,traits=traits,nD=LT$p,j=j,mo=(R2/nLT)*diag(Sy)/MSx,saveAt=saveAt)
)
#Objects for saving posterior means for MCMC
LT$post_beta<-matrix(0,nrow=LT$p,ncol=traits)
LT$post_beta2<-matrix(0,nrow=LT$p,ncol=traits)
#Files to save binary files with betas
if(is.null(LT$saveEffects))
{
LT$saveEffects<-FALSE
}
if(LT$saveEffects)
{
if(is.null(LT$storageMode))
{
LT$storageMode<-"double"
}
if(!LT$storageMode%in%c("single","double"))
{
stop("storageMode of LP ",j," can either be 'single' or 'double' (default)")
}
fname<-paste(saveAt,LT$Name,"_beta.bin",sep="")
LT$fileEffects<-file(fname,open='wb')
writeBin(object=c(nRow,traits,LT$p),con=LT$fileEffects,size=ifelse(LT$storageMode=="single",4,8))
}#*#
return(LT)
}
#Set linear term for u_t ~ N(0, \sigma^2_r K)
#Note also (u_1',...,u_t') ~ N(0, G_0 x K), where x represents the Kronecker product
#Internally we represent this using the eigen-value decomposition,
#using as incidence matrix X=Gamma*Lambda^{1/2}, where K=Gamma*Lambda*Gamma'
setLT.RKHS_mt<-function(LT,traits,j,Sy,nLT,R2,saveAt)
{
if(is.null(LT$EVD) & is.null(LT$K))
{
text<-"Either variance co-variance matrix K or its eigen-value decomposition\n"
text<-paste(text,"must be provided for linear term ",j,"\n")
text<-paste(text,"To specify the variance covariance matrix K use:\n")
text<-paste(text,"list(K=?,model='RKHS'), where ? is the user defined (between subjects) co-variance matrix\n")
text<-paste(text,"To specify the eigen-value decomposition for K use:\n")
text<-paste(text,"list(EVD=?,model='RKHS'), where ? is the output from eigen function for a user defined (between subjects) co-variance matrix\n")
stop(text)
}
if((!is.null(LT$K)) & (!is.null(LT$EVD)))
{
message("Variance covariance matrix K and its eigen-value decomposition for linear term ",j, " was provided")
message("ONLY EVD will be used")
LT$K<-NULL
}
if((!is.null(LT$K)) & is.null(LT$EVD))
{
message("Checking variance co-variance matrix K for linear term ",j)
if(nrow(LT$K)!=ncol(LT$K)) stop("variance covariance matrix must be square")
LT$EVD <- eigen(LT$K,symmetric=TRUE)
message("Ok")
}
if(is.null(LT$K) & (!is.null(LT$EVD)))
{
message("Checking EVD provided for linear term ",j)
if(!is.matrix(LT$EVD$vectors)) stop("eigen-vectors must be a matrix\n")
if(nrow(LT$EVD$vectors)!=ncol(LT$EVD$vectors)) stop("eigen-vectors must be a square matrix\n")
if(!is.numeric(LT$EVD$values)) stop("eigen-values must be a numeric vector\n")
message("Ok")
}
keep <- LT$EVD$values>1e-10
LT$EVD$vectors <- LT$EVD$vectors[,keep]
LT$EVD$values <- LT$EVD$values[keep]
#X=Gamma*Lambda^{1/2}
LT$X<-sweep(x=LT$EVD$vectors,MARGIN=2,STATS=sqrt(LT$EVD$values),FUN="*")
#We do not save effects in RKHS
LT$saveEffects<-FALSE
LT<-setLT.BRR_mt(LT=LT,traits=traits,j=j,Sy=Sy,nLT=nLT,R2=R2,saveAt=saveAt,nRow=0)
return(LT)
}
#Set linear term for Fixed effects
#Modified by Gustavo to support saving fixed effects, April 13, 2022
setLT.FIXED_mt<-function(LT,traits,j,saveAt,nRow)
{
message("Setting linear term ",j)
if(is.null(LT$common))
{
LT$common<-TRUE
message("matrix of fixed effects X is the same for all the traits,")
message("so the same effects are assumed for all the traits")
}else{
if(LT$common)
{
message("matrix for fixed effects X is the same for all the traits,")
message("so the same effects are assumed for all the traits")
}else{
message("each trait has a different matrix for fixed X_t, we assume")
message("X=[X_1,...,X_t], k=1,...,t (traits)")
}
}
#X
if(is.null(LT$X)) stop("X can not be NULL\n")
if(!is.matrix(LT$X)) stop("X must be a matrix\n")
if(any(is.na(LT$X))) stop("X has NAs\n")
#Omega
LT$Cov<-list()
LT$Cov$Omega<-diag(rep(1E6,traits))
LT$Cov$Omegainv<-solve(LT$Cov$Omega)
if(LT$common)
{
#check rank
if(qr(LT$X)$rank<ncol(LT$X)) stop("X is rank deficient")
LT$x2<-as.vector(colSums(LT$X^2))
#Initialize beta
LT$p<-ncol(LT$X)
LT$beta<-matrix(0,nrow=LT$p,ncol=traits)
#Objects for saving posterior means for MCMC
LT$post_beta<-matrix(0,nrow=LT$p,ncol=traits)
LT$post_beta2<-matrix(0,nrow=LT$p,ncol=traits)
}else{
#Number of columns in each X=[X_1,...,X_t], k=1,...,t (traits)
LT$p<-floor(ncol(LT$X)/traits)
if(traits*LT$p!=ncol(LT$X)) stop("Check the number of columns in X")
LT$upper<-LT$p*c(1:traits)
LT$lower<-LT$upper+1-LT$p
#Check the rank of each matrix
for(k in 1:traits)
{
if(qr(LT$X[,c(LT$lower[k]:LT$upper[k])])$rank<LT$p) stop("X_",k, " is rank deficient")
}
#Do not move this code out here!!!
#It appears to be repeated but is not the case
LT$x2<-as.vector(colSums(LT$X^2))
LT$beta<-matrix(0,nrow=LT$p,ncol=traits)
#Objects for saving posterior means for MCMC
LT$post_beta<-matrix(0,nrow=LT$p,ncol=traits)
LT$post_beta2<-matrix(0,nrow=LT$p,ncol=traits)
}
#*#
#Files to save binary files with betas
if(is.null(LT$saveEffects))
{
LT$saveEffects<-FALSE
}
if(LT$saveEffects)
{
if(is.null(LT$storageMode))
{
LT$storageMode<-"double"
}
if(!LT$storageMode%in%c("single","double"))
{
stop("storageMode of LP ",j," can either be 'single' or 'double' (default)")
}
fname<-paste(saveAt,LT$Name,"_beta.bin",sep="")
LT$fileEffects<-file(fname,open='wb')
writeBin(object=c(nRow,traits,LT$p),con=LT$fileEffects,size=ifelse(LT$storageMode=="single",4,8))
}#*#
return(LT)
}
#Initialize residual covariance structure
setResCov<-function(resCov,traits,error,Sy,R2,saveAt)
{
message("Initializing resCov")
resCov$R<-var(error)/2
resCov$Rinv<-solve(resCov$R)
if(is.null(resCov$type))
{
resCov$type<-"UN"
message("Modelling R as UNstructured")
}else{
if(!(resCov$type %in% c("UN","DIAG","REC","FA")))
{
stop("Error '", resCov$type, "' not implemented (note: evaluation is case sensitive)")
}
}
if(resCov$type=="UN")
{
message("Setting hyperparameters for UNstructured R")
if(is.null(resCov$df0))
{
resCov$df0<-5
message("df0 was set to ", resCov$df0)
}
if(is.null(resCov$S0))
{
#resCov$S0 <- diag(traits)
resCov$S0<-(1-R2)*Sy*(resCov$df0+traits+1)
message("S0 was set to ")
print(resCov$S0)
}
}
if(resCov$type=="DIAG")
{
message("Setting hyperparameters for DIAG R")
if(is.null(resCov$df0))
{
resCov$df0<-rep(5,traits)
message("df0 set to 5 for all the traits")
}
if(is.null(resCov$S0))
{
#resCov$S0 <- rep(1,traits)
#message("S0 was set to 1 for all the traits")
resCov$S0<-(1-R2)*diag(Sy)*(resCov$df0+2)
message("S0 was set to ")
print(resCov$S0)
}
}
if(resCov$type=="REC")
{
message("Setting hyperparameters for REC R")
if(is.null(resCov$M)) stop("M can not be null")
if(!is.logical(resCov$M)) stop("M must be logical matrix (with entries being TRUE/FALSE)")
if(!is.matrix(resCov$M)) stop("M must be a matrix")
if(nrow(resCov$M)!=ncol(resCov$M)) stop("M must be a square matrix")
if(nrow(resCov$M)!=traits) stop("M must have ", traits, " rows and columns")
if(any(diag(resCov$M)==TRUE)) stop("All diagonal entries of M must be set to FALSE")
resCov$M[upper.tri(resCov$M)]<-FALSE
if(is.null(resCov$df0))
{
resCov$df0<-rep(5,traits)
message("df0 set to 5 for all the traits")
}
if(is.null(resCov$S0))
{
#resCov$S0 <- rep(1,traits)
#message("S0 was set to 1 for all the traits")
resCov$S0<-(1-R2)*diag(Sy)*(resCov$df0+2)
message("S0 was set to ")
print(resCov$S0)
}
if(is.null(resCov$var))
{
resCov$var<-100
message("var was set to 100")
}
resCov$W<-matrix(0,nrow=traits,ncol=traits)
resCov$PSI<-rep(NA,traits)
for(k in 1:traits)
{
resCov$PSI[k]<-resCov$S0[k]/rchisq(n=1,df=resCov$df0[k])
}
#Objects for saving posterior means for MCMC
resCov$post_W<-matrix(0,nrow=traits,ncol=traits)
resCov$post_W2<-matrix(0,nrow=traits,ncol=traits)
resCov$post_PSI<-rep(0,traits)
resCov$post_PSI2<-rep(0,traits)
#Output files
resCov$fName_W<-paste(saveAt,"W_R.dat",sep="")
resCov$fName_PSI<-paste(saveAt,"PSI_R.dat",sep="")
resCov$f_W<-file(description=resCov$fName_W,open="w")
resCov$f_PSI<-file(description=resCov$fName_PSI,open="w")
}
if(resCov$type=="FA")
{
message("Setting hyperparameters for FA R")
if(is.null(resCov$M)) stop("M can not be null")
if(!is.logical(resCov$M)) stop("M must be logical matrix (with entries being TRUE/FALSE)")
if(!is.matrix(resCov$M)) stop("M must be a matrix")
if(nrow(resCov$M)!=traits) stop("M must have ", traits, " rows")
if(ncol(resCov$M)>traits) stop("Number of columns of M must be smaller than ", traits)
resCov$nF<-ncol(resCov$M)
if(is.null(resCov$df0))
{
resCov$df0<-rep(5,traits)
message("df0 set to 5 for all the traits")
}
if(is.null(resCov$S0))
{
#resCov$S0 <- rep(1/100,traits)
#message("S0 was set to 1/100 for all the traits")
resCov$S0<-(1-R2)*diag(Sy)*(resCov$df0+2)
message("S0 was set to ")
print(resCov$S0)
}
if(is.null(resCov$var))
{
resCov$var<-100
message("var was set to 100")
}
sdU <- sqrt(diag(resCov$R))
FA <- factanal(covmat = resCov$R, factors = resCov$nF)
resCov$W <- matrix(nrow = traits, ncol = resCov$nF, 0)
resCov$W[resCov$M] <- (diag(sdU) %*% FA$loadings)[resCov$M]
resCov$PSI <- (sdU^2) * FA$uniquenesses + 1e-04
resCov$R <- tcrossprod(resCov$W) + diag(resCov$PSI)
resCov$Rinv<-solve(resCov$R)
resCov$F <- matrix(nrow = nrow(error), ncol = resCov$nF, 0)
#Objects for saving posterior means for MCMC
resCov$post_W<-matrix(0,nrow=traits,ncol=resCov$nF)
resCov$post_W2<-matrix(0,nrow=traits,ncol=resCov$nF)
resCov$post_PSI<-rep(0,traits)
resCov$post_PSI2<-rep(0,traits)
#Output files
resCov$fName_W<-paste(saveAt,"W_R.dat",sep="")
resCov$fName_PSI<-paste(saveAt,"PSI_R.dat",sep="")
resCov$f_W<-file(description=resCov$fName_W,open="w")
resCov$f_PSI<-file(description=resCov$fName_PSI,open="w")
}
#Objects for saving posterior means for MCMC
resCov$post_R<-matrix(0,nrow=traits,ncol=traits)
resCov$post_R2<-matrix(0,nrow=traits,ncol=traits)
resCov$fName_R<-paste(saveAt,"R.dat",sep="")
resCov$f_R<-file(description=resCov$fName_R,open="w")
message("Done")
return(resCov)
}
#Evaluates
#partial Loglikelihood for complete and partially observed records
#-0.5 * n * log(det(R)) - 0.5 * sum (error_i' R^{-1} error_i)
#error a matrix with errors, R residual variance covariance matrix
partialLogLik<-function(error,R)
{
if(is.matrix(error))
{
n<-nrow(error)
Linv<-solve(chol(R))
ans<- -0.5*n*log(det(R)) - 0.5 * sum(crossprod(t(error), Linv)^2)
}else{
stop("error must be a matrix")
}
return(ans)
}
#Computes the Deviance Information Criterion and Effective Number of Parameters
#parameters:
#y.back: matrix of dimension n x traits, NA's for missing values
#ETAHat the posterior mean of the conditional expectation
#meanLogLik the posterior mean of logLik
#cte = -(n_complete_observed + n_partially_observed)*traits/2*log(2*pi)
getDIC<-function(y.back, ETAHat, meanLogLik, cte, complete_records, R,
missings=FALSE,
missing_records=NULL,
Dpatterns=NULL,
Upatterns=NULL,
dUpatterns=NULL,
dAllMissings=NULL)
{
error <- y.back-ETAHat
logLikAtPostMean <- cte
LogLikAtPostMean <- logLikAtPostMean + partialLogLik(error[complete_records,,drop=FALSE],R)
if(missings)
{
for (q in 1:length(dUpatterns))
{
#Some traits observed
if(dUpatterns[q]!=dAllMissings)
{
#1=missing, 2=observed
S22<-R[!Upatterns[q,],!Upatterns[q,],drop=FALSE]
index<-missing_records[Dpatterns==dUpatterns[q]]
#logLik
logLikAtPostMean <- logLikAtPostMean + partialLogLik(error[index,!Upatterns[q,],drop=FALSE],S22)
}
}
}
pD <- -2 * (meanLogLik - logLikAtPostMean)
DIC <- pD - 2 * meanLogLik
return(list(pD=pD,DIC=DIC,logLikAtPostMean=logLikAtPostMean))
}
#Recursive structures for variance covariance matrix
#Model: U=UB+D (ONLY RECURSIVE ALLOWED!) Current sample of random effects ('data')
#M a traits x traits matrix with TRUE/FALSE indicating position of non-null
#recursive effects (FALSE in diagonal!) PSI px1 the variance of the orthogonal
#shocks ...
#Function taken from MTM package
sample_G0_REC <- function(U, M, PSI, traits, priorVar = 100,
df0 = rep(0, traits),S0 = rep(0, traits))
{
B <- matrix(nrow = traits, ncol = traits, 0)
for (i in 1:traits)
{
dimX <- sum(M[i, ])
if (dimX > 0) {
tmpX <- U[, M[i, ]]
tmpY <- U[, i]
C <- crossprod(tmpX)/PSI[i] + 1/priorVar
CInv <- chol2inv(chol(C))
rhs <- crossprod(tmpX, tmpY)/PSI[i]
sol <- crossprod(CInv, rhs)
L <- chol(CInv)
shock <- crossprod(L, rnorm(dimX))
tmpB <- as.numeric(sol + shock)
B[i, M[i, ]] <- tmpB
uStar <- tmpY - matrix(tmpX, ncol = dimX) %*% (tmpB)
SS <- as.numeric(crossprod(uStar)) + S0[i]
df <- nrow(U) + df0[i]
PSI[i] <- SS/rchisq(n = 1, df = df)
}else{
SS <- as.numeric(crossprod(U[, i])) + S0[i]
df <- nrow(U) + df0
PSI[i] <- SS/rchisq(n = 1, df = df)
}
}
tmp <- solve(diag(traits) - B)
G <- tmp %*% diag(PSI) %*% t(tmp)
out <- list(B = B, PSI = PSI, G = G)
return(out)
}
#Sampler for FA
#Model U=BF+D
#G_0 = B B' + PSI
#B is a matrix of loadings (regressions of the original random effects into common factors)
#F is a matrix of common factors
#M a logical matrix of the same dimensions that F, with TRUE for the loadings that the
#user wants to estimate
#PSI is a diagonal matrix whose non-null entries give the variances of factors that
#are trait-specific
#nF number of common factors, is equal to the number of columns of B
#nD: number of rows of U
#df0: degrees of freedom associated to the prior of PSI
#S0: scale parameter associated to the prior for PSI
#priorVar: prior variance if the Gaussian prior assigned to the unknown loadings
#Function taken from MTM package
sample_G0_FA <- function(U, F, M, B, PSI, traits, nF, nD,
df0 = rep(1, traits), S0 = rep(1/100,traits), priorVar = 100)
{
## sampling common factors LOOP OVER FACTORS
for (i in 1:nF)
{
tmpY <- U - F[, -i] %*% matrix((B[, -i]), ncol = traits)
rhs <- tmpY %*% matrix(B[, i]/PSI, ncol = 1)
CInv <- 1/(sum((B[, i]^2)/PSI) + 1)
sol <- CInv * rhs
SD <- sqrt(CInv)
F[, i] <- rnorm(n = nD, sd = SD, mean = sol)
}
# sampling loadings LOOP OVER TRAITS LOOP OVER FACTORS
for (i in 1:traits)
{
for (j in 1:nF)
{
if (M[i, j])
{
tmpY <- U[, i] - F[, -j] %*% matrix(B[i, -j], ncol = 1)
CInv <- 1/as.numeric(crossprod(F[, j])/PSI[i] + 1/priorVar)
rhs <- as.numeric(crossprod(F[, j], tmpY)/PSI[i])
sol <- CInv * rhs
SD <- sqrt(CInv)
B[i, j] <- rnorm(n = 1, mean = sol, sd = SD)
}
}
D <- U[, i] - F %*% B[i, ]
df <- df0[i] + nD
SS <- S0[i] + crossprod(D)
PSI[i] <- SS/rchisq(df = df, n = 1)
}
if (nF > 1)
{
B <- varimax(B)$loadings[]
}
G <- tcrossprod(B) + diag(PSI)
out <- list(F = F, PSI = PSI, B = B, G=G)
return(out)
}
#Gibbs sampler for DIAG covariance matrices
#Function taken from MTM package
sample_G0_DIAG <- function(U, traits = ncol(U), n = nrow(U),
df0 = rep(0, traits),
S0 = diag(0, traits))
{
G <- matrix(nrow = traits, ncol = traits, 0)
## sampling common factors LOOP OVER FACTORS
for (i in 1:traits)
{
tmp_SS <- sum(U[, i]^2) + S0[i]
tmp_df <- n + df0[i]
G[i, i] <- tmp_SS/rchisq(df = tmp_df, n = 1)
}
return(G)
}
#Sample the vector mu (intercept)
#The prior for mu is non informative
#For trait k, y_k = mu_k * 1 + eta_j + e_j
#ystar_k = y_k - eta_j = mu_k * 1 + e_j
#mu | else ~ MN(m,R/n), where m is the vector of sample means obtained obtained from ystar
#Arguments:
#ystar: matrix, individuals in rows, traits in columns
#R: residual covariance matrix
#n: number of rows of y
#traits: number of traits
sample_mu <- function(ystar, R, n, traits)
{
sol <- colMeans(ystar)
L <- chol(R)/sqrt(n)
mu <- as.vector(crossprod(L, rnorm(n=traits,mean=0,sd=1)) + sol)
return(mu)
}
# Function to read effects saved by Multitrait when ETA[[j]]$saveEffects=TRUE
# It returns a 3D array, with dim=c(nRow,p,traits)
# nRow number of MCMC samples saved,
# p number of predictors
# traits number of traits
readBinMatMultitrait<-function(filename,storageMode="double")
{
if(!storageMode%in%c("single","double")){
stop("storageMode can either be 'single' or 'double' (default)")
}
fileIn<-gzfile(filename,open='rb')
nRow<-readBin(fileIn,n=1,what=numeric(),size=ifelse(storageMode=="single",4,8))
traits<-readBin(fileIn,n=1,what=numeric(),size=ifelse(storageMode=="single",4,8))
p<-readBin(fileIn,n=1,what=numeric(),size=ifelse(storageMode=="single",4,8))
Beta<-array(data=NA,dim=c(nRow,p,traits))
for(j in 1:nRow)
{
for(k in 1:traits)
{
Beta[j,,k]<-readBin(fileIn,n=p,what=numeric(),size=ifelse(storageMode=="single",4,8))
}
}
close(fileIn)
return(Beta)
}
#Get genetic co-variance matrix
#Internal function, used by getGCovar
getG0i<-function(Z,Bi)
{
U<-Z%*%Bi
G0i<-cov(U)
return(G0i[row(G0i)>=col(G0i)])
}
#Genetic co-variance matrix using MCMC samples
#Lehermeier et al., 2017.
#Genomic Variance Estimates: With or without Disequilibrium Covariances?
#J Anim Breed Genet, 134(3):232-241.
#Arguments:
#X: matrix of covariates
#B: samples for regression coefficients, 3D array, with dim=c(nRow,p,traits)
# nRow number of MCMC samples saved,
# p number of predictors
# traits number of traits
getGCovar<-function(X,B)
{
q<-dim(B)[3]
G<-t(apply(FUN=getG0i,X=B,Z=X,MARGIN=1))
return(G)
}
#Function to fit multi-trait model
#Arguments:
#y: A matrix of dimension n * t, where t is the number of traits, NAs allowed.
#ETA: Linear predictor A two level list to specify the linear predictors.
#intecept: Logical, TRUE an intercept is included and FALSE an intercept is not included.
#resCov: List to specify the prior for residual covariance matrix (R).
#nIter, burnIn, thin: Number of iterations, burnIn and thin.
#verbose: logical, TRUE/FALSE to print iteration history
Multitrait<-function(y,
ETA,
intercept=TRUE,
resCov = list(df0=5,S0=NULL,type="UN"),
R2=0.5,
nIter=1000,burnIn=500,thin=10,
saveAt="",verbose=TRUE)
{
#Check inputs
if(!is.matrix(y)) stop("y must be a matrix\n")
traits<-ncol(y)
if(traits<2) stop("y must hava at least 2 columns\n")
n<-nrow(y)
#Compute sample variance covariance matrix
Sy<-cov(y,use="complete.obs")
#Deep copy of y, DO NOT REPLACE with y.back<-y, it does not work, both objects
#share the same memory address
y.back<-matrix(NA,nrow=n,ncol=traits)
y.back<-y[]
#Now check if there are missing values
complete_records <- which(complete.cases(y))
n_complete_observed <- length(complete_records)
n_partially_observed <- 0 #Just initial value
n_complete_missing <- 0 #Just initial value
#Note:
#n=n_complete_observed + n_partially_observed + n_complete_missing
missings<-any(is.na(y))
if(missings)
{
missing_records<-which(!complete.cases(y))
#patterns of missing records, should be a matrix
patterns<-is.na(y[missing_records,])
#Case when there is only one missing record
if(!is.matrix(patterns))
{
patterns<-matrix(patterns,ncol=ncol(y))
}
Dpatterns<-apply(patterns,1,logicalToDec) #patterns in decimal
Upatterns<-unique(patterns,drop=FALSE) #Unique patterns
dUpatterns<-apply(Upatterns,1,logicalToDec) #decimal of unique patterns
dAllMissings<-logicalToDec(rep(TRUE,traits)) #decimal representation of a record
#with all missing values
m<-colMeans(y,na.rm=TRUE)
for(k in 1:traits)
{
tmp<-is.na(y[,k])
y[tmp,k]<-m[k]
rm(tmp)
}
#Partially observed records
n_partially_observed <- sum(Dpatterns!=dAllMissings)
#Complete missing
n_complete_missing <- sum(Dpatterns==dAllMissings)
}else{
missing_records <- NULL
Dpatterns <- NULL
Upatterns <- NULL
dUpatterns <- NULL
dAllMissings <- NULL
}
#For likelihood
cte<- -(n_complete_observed + n_partially_observed)*traits/2*log(2*pi)
if(intercept)
{
mu<-colMeans(y)
post_mu<-rep(0,traits)
post_mu2<-rep(0,traits)
f_mu<-file(description=paste(saveAt,"mu.dat",sep=""),open="w")
}
#Setting the linear terms
nLT <- ifelse(is.null(ETA), 0, length(ETA))
if(nLT<1) stop("Provide at least a linear predictor in ETA\n")
#Names of linear terms
if(is.null(names(ETA)))
{
names(ETA)<-rep("",nLT)
}
nRow<-nIter/thin
if(nRow<1) stop("Check nIter, thin\n")
for(j in 1:nLT)
{
if(names(ETA)[j]=="")
{
ETA[[j]]$Name=paste("ETA_",j,sep="")
}else{
ETA[[j]]$Name=paste("ETA_",names(ETA)[j],sep="")
}
if(!(ETA[[j]]$model %in% c("SpikeSlab","BRR","RKHS","FIXED")))
{
stop("Error in ETA[[", j, "]]", " model ", ETA[[j]]$model, " not implemented (note: evaluation is case sensitive)")
}
ETA[[j]]<-switch(ETA[[j]]$model,
SpikeSlab=setLT.DiracSS_mt(LT=ETA[[j]],traits=traits,j=j,Sy=Sy,nLT=nLT,R2=R2,saveAt=saveAt,nRow=nRow),
BRR=setLT.BRR_mt(LT=ETA[[j]],traits=traits,j=j,Sy=Sy,nLT=nLT,R2=R2,saveAt=saveAt,nRow=nRow),
RKHS=setLT.RKHS_mt(LT=ETA[[j]],traits=traits,j=j,Sy=Sy,nLT=nLT,R2=R2,saveAt=saveAt),
FIXED=setLT.FIXED_mt(LT=ETA[[j]],traits=traits,j=j,saveAt=saveAt,nRow=nRow))
} #End of cycle for setting linear terms
#error initialization assuming beta=0 in all the entries
#Deep copy of y, DO NOT REPLACE with error<-y, it does not work, both objects
#share the same memory address
error<-matrix(NA,nrow=n,ncol=traits)
error<-y[]
if(intercept)
{
error<-sweep(x=error,MARGIN=2,STATS=mu,FUN="-")
}
#Initialization of variance covariance matrix for errors (R)
resCov<-setResCov(resCov=resCov,traits=traits,error=error,Sy=Sy,R2=R2,saveAt=saveAt)
ETAHat<-matrix(0,nrow=n,ncol=traits)
ETAHat2<-matrix(0,nrow=n,ncol=traits)
post_logLik<-0
#Objects for running means
nSums<-0
#Iterations
for(iter in 1:nIter)
{
start<-proc.time()[3]
logLik <- cte
if(intercept)
{
error<-sweep(x=error,MARGIN=2,STATS=mu,FUN="+")
mu<-sample_mu(ystar=error, R=resCov$R, n=n, traits=traits)
error<-sweep(x=error,MARGIN=2,STATS=mu,FUN="-")
#print(mu)
}
for(j in 1:nLT)
{
#SpikeSlab
if(ETA[[j]]$model=="SpikeSlab")
{
# for(k in 1:traits)
# {
# #cat("k=",k,"\n")
#
# S11 <- ETA[[j]]$Cov$Omega[k,k,drop=FALSE]
# S22 <- ETA[[j]]$Cov$Omega[-k,-k,drop=FALSE]
# S12 <- ETA[[j]]$Cov$Omega[k,-k,drop=FALSE]
# tmp12 <- as.vector(S12%*%solve(S22))
# tmp11 <- tmp12%*%t(S12)
# sigma2 <- as.numeric(S11-tmp11)
#
# logPriorOdds<-log(ETA[[j]]$inclusionProb$probIn[k]/(1-ETA[[j]]$inclusionProb$probIn[k]))
#
# #b, d, beta and error are overwritten with this .Call
#
# .Call("sampler_DiracSS_mt", k, logPriorOdds, n, ETA[[j]]$p,
# traits, resCov$Rinv, ETA[[j]]$X, error,
# ETA[[j]]$beta,
# ETA[[j]]$b,
# ETA[[j]]$d,
# ETA[[j]]$x2,
# tmp12,
# sigma2,
# ETA[[j]]$Cov$Omegainv[k,-k],
# ETA[[j]]$Cov$Omegainv[k,k])
#
# #Sampling inclusion probabilities | else
# mrkIn <- sum(ETA[[j]]$d[,k])
# shape1 <- mrkIn + ETA[[j]]$inclusionProb$countsIn[k]
# shape2 <- ETA[[j]]$p - mrkIn + ETA[[j]]$inclusionProb$countsOut[k]
# ETA[[j]]$inclusionProb$probIn[k]=rbeta(shape1 = shape1,
# shape2 = shape2,
# n = 1)
#
# }#End of loop for traits
#### BEGIN NEW code
logPriorOdds<-log(ETA[[j]]$inclusionProb$probIn/(1-ETA[[j]]$inclusionProb$probIn))
.Call("sampler_DiracSS_mt_v2",logPriorOdds,n, ETA[[j]]$p,
traits,resCov$Rinv, ETA[[j]]$X, error,
ETA[[j]]$beta,
ETA[[j]]$b,
ETA[[j]]$d,
ETA[[j]]$x2,
ETA[[j]]$Cov$Omega,
ETA[[j]]$Cov$Omegainv)
for(k in 1:traits)
{
#Sampling inclusion probabilities | else
mrkIn <- sum(ETA[[j]]$d[,k])
shape1 <- mrkIn + ETA[[j]]$inclusionProb$countsIn[k]
shape2 <- ETA[[j]]$p - mrkIn + ETA[[j]]$inclusionProb$countsOut[k]
ETA[[j]]$inclusionProb$probIn[k]=rbeta(shape1 = shape1,
shape2 = shape2,
n = 1)
}
### END NEW code
#Sampling from Omega | else
if(ETA[[j]]$Cov$type=="UN")
{
S4<-crossprod(ETA[[j]]$b)
ETA[[j]]$Cov$Omega<-riwish(v=ETA[[j]]$Cov$df0+traits+ETA[[j]]$p,
S=S4+ETA[[j]]$Cov$S0)
ETA[[j]]$Cov$Omegainv<-solve(ETA[[j]]$Cov$Omega)
}
if(ETA[[j]]$Cov$type=="DIAG")
{
ETA[[j]]$Cov$Omega<-sample_G0_DIAG(U=ETA[[j]]$b, traits=traits,
n=nrow(ETA[[j]]$b),
df0=ETA[[j]]$Cov$df0,
S0=ETA[[j]]$Cov$S0)
ETA[[j]]$Cov$Omegainv<-solve(ETA[[j]]$Cov$Omega)
}
if(ETA[[j]]$Cov$type=="REC")
{
tmp<-sample_G0_REC(U=ETA[[j]]$b, M=ETA[[j]]$Cov$M,
PSI=ETA[[j]]$Cov$PSI,
traits=traits,
priorVar = ETA[[j]]$Cov$var,
df0 = ETA[[j]]$Cov$df0,
S0 = ETA[[j]]$Cov$S0)
ETA[[j]]$Cov$Omega<-tmp$G
ETA[[j]]$Cov$Omegainv<-solve(ETA[[j]]$Cov$Omega)
ETA[[j]]$Cov$W<-tmp$B
ETA[[j]]$Cov$PSI<-tmp$PSI
rm(tmp)
}
if(ETA[[j]]$Cov$type=="FA")
{
tmp<-sample_G0_FA(U=ETA[[j]]$b, F=ETA[[j]]$Cov$F, M=ETA[[j]]$Cov$M,
B=ETA[[j]]$Cov$W, PSI=ETA[[j]]$Cov$PSI,
traits=traits, nF=ETA[[j]]$Cov$nF,
nD=ETA[[j]]$Cov$nD, df0 = ETA[[j]]$Cov$df0,
S0 = ETA[[j]]$Cov$S0,
priorVar = ETA[[j]]$Cov$var)
ETA[[j]]$Cov$F<-tmp$F
ETA[[j]]$Cov$PSI<-tmp$PSI
ETA[[j]]$Cov$W<-tmp$B
ETA[[j]]$Cov$Omega<-tmp$G
ETA[[j]]$Cov$Omegainv<-solve(ETA[[j]]$Cov$Omega)
rm(tmp)
}
#Compute the variance-covariance matrix for beta=b*d
ETA[[j]]$Cov$Sigma<-cov(ETA[[j]]$beta)
} #End of SpikeSlab
if(ETA[[j]]$model%in%c("BRR","RKHS"))
{
# for(k in 1:traits)
# {
# #cat("k=",k,"\n")
#
# #beta and error are overwritten with this .Call
# .Call("sampler_BRR_mt", k, n, ETA[[j]]$p,
# traits, resCov$Rinv, ETA[[j]]$X, error,
# ETA[[j]]$beta,
# ETA[[j]]$x2,
# ETA[[j]]$Cov$Omegainv[k,-k],
# ETA[[j]]$Cov$Omegainv[k,k])
#
# }#End of loop for traits
### BEGIN NEW code
.Call("sampler_BRR_mt_v2", n, ETA[[j]]$p,
traits, resCov$Rinv, ETA[[j]]$X, error,
ETA[[j]]$beta,
ETA[[j]]$x2,
ETA[[j]]$Cov$Omegainv)
### END NEW code
#Sampling from Omega | else
if(ETA[[j]]$Cov$type=="UN")
{
S4<-crossprod(ETA[[j]]$beta)
ETA[[j]]$Cov$Omega<-riwish(v=ETA[[j]]$Cov$df0+traits+ETA[[j]]$p,
S=S4+ETA[[j]]$Cov$S0)
ETA[[j]]$Cov$Omegainv<-solve(ETA[[j]]$Cov$Omega)
}#End of UN
if(ETA[[j]]$Cov$type=="DIAG")
{
ETA[[j]]$Cov$Omega<-sample_G0_DIAG(U=ETA[[j]]$beta, traits=traits,
n=nrow(ETA[[j]]$beta),
df0=ETA[[j]]$Cov$df0,
S0=ETA[[j]]$Cov$S0)
ETA[[j]]$Cov$Omegainv<-solve(ETA[[j]]$Cov$Omega)
}#End of DIAG
if(ETA[[j]]$Cov$type=="REC")
{
tmp<-sample_G0_REC(U=ETA[[j]]$beta, M=ETA[[j]]$Cov$M,
PSI=ETA[[j]]$Cov$PSI,
traits=traits,
priorVar = ETA[[j]]$Cov$var,
df0 = ETA[[j]]$Cov$df0,
S0 = ETA[[j]]$Cov$S0)
ETA[[j]]$Cov$Omega<-tmp$G
ETA[[j]]$Cov$Omegainv<-solve(ETA[[j]]$Cov$Omega)
ETA[[j]]$Cov$W<-tmp$B
ETA[[j]]$Cov$PSI<-tmp$PSI
rm(tmp)
}#End of REC
if(ETA[[j]]$Cov$type=="FA")
{
tmp<-sample_G0_FA(U=ETA[[j]]$beta, F=ETA[[j]]$Cov$F, M=ETA[[j]]$Cov$M,
B=ETA[[j]]$Cov$W, PSI=ETA[[j]]$Cov$PSI,
traits=traits, nF=ETA[[j]]$Cov$nF,
nD=ETA[[j]]$Cov$nD, df0 = ETA[[j]]$Cov$df0,
S0 = ETA[[j]]$Cov$S0,
priorVar = ETA[[j]]$Cov$var)
ETA[[j]]$Cov$F<-tmp$F
ETA[[j]]$Cov$PSI<-tmp$PSI
ETA[[j]]$Cov$W<-tmp$B
ETA[[j]]$Cov$Omega<-tmp$G
ETA[[j]]$Cov$Omegainv<-solve(ETA[[j]]$Cov$Omega)
rm(tmp)
}#End of FA
}#End of BRR and RKHS
if(ETA[[j]]$model=="FIXED")
{
if(ETA[[j]]$common)
{
for(k in 1:traits)
{
#beta and error are overwritten with this .Call
.Call("sampler_BRR_mt", k, n, ETA[[j]]$p,
traits, resCov$Rinv, ETA[[j]]$X, error,
ETA[[j]]$beta,
ETA[[j]]$x2,
ETA[[j]]$Cov$Omegainv[k,-k],
ETA[[j]]$Cov$Omegainv[k,k])
}#End of loop for traits
}else{
low<-ETA[[j]]$lower
up<-ETA[[j]]$upper
for(k in 1:traits)
{
#Exactly the same routine as before, but instead of X, with
#X=[X_1,...,X_t], k=1,...,t (traits), we pass as
#argument X_k, and the corresponding sum of squares of columns
Xk<-ETA[[j]]$X[,c(low[k]:up[k]),drop=FALSE]
x2k<-ETA[[j]]$x2[c(low[k]:up[k])]
#beta and error are overwritten with this .Call
.Call("sampler_BRR_mt", k, n, ETA[[j]]$p,
traits, resCov$Rinv, Xk, error,
ETA[[j]]$beta,
x2k,
ETA[[j]]$Cov$Omegainv[k,-k],
ETA[[j]]$Cov$Omegainv[k,k])
}#End of loop for traits
}
} #End of FIXED
} #End of loop linear terms
#Sampling from R
if(resCov$type=="UN")
{
CP<-crossprod(error)
resCov$R<-riwish(v=resCov$df0+n,S=CP+resCov$S0)
}
if(resCov$type=="DIAG")
{
resCov$R<-sample_G0_DIAG(U=error, traits=traits, n=nrow(error),
df0=resCov$df0, S0=resCov$S0)
}
if(resCov$type=="REC")
{
tmp<-sample_G0_REC(U=error,M=resCov$M, PSI=resCov$PSI,
traits=traits, priorVar = resCov$var,
df0 = resCov$df0,
S0 = resCov$S0)
resCov$R<-tmp$G
resCov$W<-tmp$B
resCov$PSI<-tmp$PSI
rm(tmp)
}
if(resCov$type=="FA")
{
tmp<-sample_G0_FA(U=error, F=resCov$F, M=resCov$M,
B=resCov$W, PSI=resCov$PSI,
traits=traits, nF=resCov$nF, nD=nrow(error),
df0 = resCov$df0,
S0 = resCov$S0,
priorVar = resCov$var)
resCov$F<-tmp$F
resCov$PSI<-tmp$PSI
resCov$W<-tmp$B
resCov$R<-tmp$G
}
resCov$Rinv<-solve(resCov$R)
#Impute missing values
#Linear predictor
lp <- y-error
#Log likelihood for complete cases
logLik <- logLik + partialLogLik(error[complete_records,,drop=FALSE],resCov$R)
if(missings)
{
for (q in 1:length(dUpatterns))
{
#Some traits observed
if(dUpatterns[q]!=dAllMissings)
{
#1=missing, 2=observed
S11<-resCov$R[Upatterns[q,],Upatterns[q,],drop=FALSE]
S12<-resCov$R[Upatterns[q,],!Upatterns[q,],drop=FALSE]
S21<-resCov$R[!Upatterns[q,],Upatterns[q,],drop=FALSE]
S22<-resCov$R[!Upatterns[q,],!Upatterns[q,],drop=FALSE]
tmp12<-S12%*%solve(S22)
index<-missing_records[Dpatterns==dUpatterns[q]]
#logLik
logLik <- logLik + partialLogLik(error[index,!Upatterns[q,],drop=FALSE],S22)
mu1<-lp[index,Upatterns[q,],drop=FALSE]
mu2<-lp[index,!Upatterns[q,],drop=FALSE]
Sigma<-S11-tmp12%*%S21
tmp3<-tmp12%*%t(y[index,!Upatterns[q,],drop=FALSE]-mu2)
tmp3<-t(tmp3)
mean<-mu1+tmp3
#Impute y and overwrite the value
y[index,Upatterns[q,]]<-mean+mvrnorm(n=length(index),mu=rep(0,nrow(Sigma)),Sigma=Sigma)
}else{
#Observations for all traits are missing for some records
index<-missing_records[Dpatterns==dUpatterns[q]]
#predict y and overwrite the value
y[index,]<-lp[index,]+mvrnorm(n=length(index),mu=rep(0,nrow(resCov$R)),Sigma=resCov$R)
}
}
#Update residuals
for(k in 1:traits)
{
index<-missing_records[patterns[,k]]
error[index,k] <- y[index,k]-lp[index,k]
}
}
#Saving files
if(iter%%thin==0)
{
#mu
if(intercept)
{
write(mu,ncolumns=length(mu),file=f_mu,append=TRUE,sep=" ")
}
#resCov
tmp <- vech(resCov$R)
write(tmp, ncolumns = length(tmp), file = resCov$f_R, append = TRUE, sep = " ")
rm(tmp)
if(resCov$type%in%c("REC","FA"))
{
if (sum(resCov$M) > 0)
{
tmp <- resCov$W[resCov$M]
write(tmp, ncolumns = length(tmp), file = resCov$f_W, append = TRUE,
sep = " ")
rm(tmp)
}
write(resCov$PSI, ncolumns = length(resCov$PSI), file = resCov$f_PSI,
append = TRUE, sep = " ")
}
for(j in 1:nLT)
{
if(ETA[[j]]$model%in%c("SpikeSlab","BRR","RKHS"))
{
if(ETA[[j]]$Cov$type%in%c("REC","FA"))
{
if (sum(ETA[[j]]$Cov$M) > 0)
{
tmp <- ETA[[j]]$Cov$W[ETA[[j]]$Cov$M]
write(tmp, ncolumns = length(tmp), file = ETA[[j]]$Cov$f_W,
append = TRUE, sep = " ")
rm(tmp)
}
write(ETA[[j]]$Cov$PSI, ncolumns = length(ETA[[j]]$Cov$PSI),
file = ETA[[j]]$Cov$f_PSI, append = TRUE, sep = " ")
}#End REC and FA
if(ETA[[j]]$Cov$type%in%c("UN","DIAG"))
{
tmp <- vech(ETA[[j]]$Cov$Omega)
write(tmp, ncolumns = length(tmp), file = ETA[[j]]$Cov$f_Omega, append = TRUE, sep = " ")
rm(tmp)
}
}#End of SpikeSlab, BRR and RKHS
}#End for
#Saving beta effects and indicator variables
for(j in 1:nLT)
{
if(ETA[[j]]$model%in%c("SpikeSlab","BRR","FIXED"))
{
if(ETA[[j]]$saveEffects)
{
writeBin(object=as.vector(ETA[[j]]$beta),
con=ETA[[j]]$fileEffects,
size=ifelse(ETA[[j]]$storageMode=="single",4,8))
}
if(ETA[[j]]$model=="SpikeSlab")
{
if(ETA[[j]]$saveIndicators)
{
#entries are saved in "single" mode
writeBin(object=as.vector(ETA[[j]]$d),
con=ETA[[j]]$fileIndicators,
size=4)
}
}
}
}
}#End of saving files
#Running means
if((iter>burnIn) & (iter%%thin==0))
{
nSums<-nSums + 1
fraction <- (nSums - 1)/nSums
#mu
if(intercept)
{
post_mu<-post_mu * fraction + mu/nSums
post_mu2<-post_mu2 * fraction + mu^2/nSums
}
#Predictions
ETAHat<-ETAHat * fraction + lp/nSums
ETAHat2<-ETAHat2 * fraction + lp^2/nSums
#Residual
resCov$post_R <- resCov$post_R * fraction + resCov$R/nSums
resCov$post_R2<- resCov$post_R2 * fraction + resCov$R^2/nSums
if(resCov$type%in%c("REC","FA"))
{
resCov$post_W <- resCov$post_W * fraction + resCov$W/nSums
resCov$post_W2 <- resCov$post_W2 * fraction + resCov$W^2/nSums
resCov$post_PSI <- resCov$post_PSI * fraction + resCov$PSI/nSums
resCov$post_PSI2 <- resCov$post_PSI2 * fraction + resCov$PSI^2/nSums
}
#Likelihood
post_logLik<- post_logLik * fraction + logLik/nSums
for(j in 1:nLT)
{
#All the models, post_beta and post_beta2
ETA[[j]]$post_beta <- ETA[[j]]$post_beta * fraction + ETA[[j]]$beta/nSums
ETA[[j]]$post_beta2 <- ETA[[j]]$post_beta2 * fraction + ETA[[j]]$beta^2/nSums
#post_b, post_b2, post_d, post_d2
if(ETA[[j]]$model=="SpikeSlab")
{
ETA[[j]]$post_b <- ETA[[j]]$post_b * fraction + ETA[[j]]$b/nSums
ETA[[j]]$post_b2 <- ETA[[j]]$post_b2 * fraction + ETA[[j]]$b^2/nSums
ETA[[j]]$post_d <- ETA[[j]]$post_d * fraction + ETA[[j]]$d/nSums
ETA[[j]]$post_d2 <- ETA[[j]]$post_d2 * fraction + ETA[[j]]$d^2/nSums
}
if(ETA[[j]]$model%in%c("SpikeSlab","BRR","RKHS"))
{
#post_Omega, post_Omega2
ETA[[j]]$Cov$post_Omega <- ETA[[j]]$Cov$post_Omega * fraction + ETA[[j]]$Cov$Omega/nSums
ETA[[j]]$Cov$post_Omega2 <- ETA[[j]]$Cov$post_Omega2 * fraction + ETA[[j]]$Cov$Omega^2/nSums
if(ETA[[j]]$model%in%"SpikeSlab")
{
ETA[[j]]$Cov$post_Sigma <- ETA[[j]]$Cov$post_Sigma * fraction + ETA[[j]]$Cov$Sigma/nSums
ETA[[j]]$Cov$post_Sigma2 <- ETA[[j]]$Cov$post_Sigma2 * fraction + ETA[[j]]$Cov$Sigma^2/nSums
}
if(ETA[[j]]$Cov$type%in%c("REC","FA"))
{
ETA[[j]]$Cov$post_W <- ETA[[j]]$Cov$post_W * fraction + ETA[[j]]$Cov$W/nSums
ETA[[j]]$Cov$post_W2 <- ETA[[j]]$Cov$post_W2 * fraction + ETA[[j]]$Cov$W^2/nSums
ETA[[j]]$Cov$post_PSI <- ETA[[j]]$Cov$post_PSI * fraction + ETA[[j]]$Cov$PSI/nSums
ETA[[j]]$Cov$post_PSI2 <- ETA[[j]]$Cov$post_PSI2 * fraction + ETA[[j]]$Cov$PSI^2/nSums
}
}
}#End for linear terms
}
end<-proc.time()[3]
if(verbose)
{
cat(paste("Iter: ", iter, "time: ", round(end - start, 4)," s \n"))
}
}#End of loop for iterations
if(intercept)
{
mu<-post_mu
SD.mu<-sqrt(post_mu2-post_mu^2)
}
#Renaming/removing objects in ETA
for(j in 1:nLT)
{
#beta and SD.beta
if(ETA[[j]]$model%in%c("SpikeSlab","BRR","FIXED"))
{
ETA[[j]]$beta<-ETA[[j]]$post_beta
ETA[[j]]$SD.beta<-sqrt(ETA[[j]]$post_beta2-ETA[[j]]$post_beta^2)
}
#u, FIXME: SD.u missing
if(ETA[[j]]$model=="RKHS")
{
ETA[[j]]$u<-ETA[[j]]$X%*%ETA[[j]]$post_beta
}
#post_b, post_b2, post_d, post_d2
if(ETA[[j]]$model=="SpikeSlab")
{
ETA[[j]]$b<-ETA[[j]]$post_b
ETA[[j]]$SD.b<-sqrt(ETA[[j]]$post_b2-ETA[[j]]$post_b^2)
ETA[[j]]$d<-ETA[[j]]$post_d
ETA[[j]]$SD.d<-sqrt(ETA[[j]]$post_d2-ETA[[j]]$post_d^2)
}
if(ETA[[j]]$model%in%c("SpikeSlab","BRR","RKHS"))
{
#Omega
ETA[[j]]$Cov$Omega<-ETA[[j]]$Cov$post_Omega
ETA[[j]]$Cov$SD.Omega<-sqrt(ETA[[j]]$Cov$post_Omega2-ETA[[j]]$Cov$post_Omega^2)
if(ETA[[j]]$model=="SpikeSlab")
{
ETA[[j]]$Cov$Sigma<-ETA[[j]]$Cov$post_Sigma
ETA[[j]]$Cov$SD.Sigma<-sqrt(ETA[[j]]$Cov$post_Sigma2-ETA[[j]]$Cov$post_Sigma^2)
}
if(ETA[[j]]$Cov$type%in%c("REC","FA"))
{
ETA[[j]]$Cov$W<-ETA[[j]]$Cov$post_W
ETA[[j]]$Cov$SD.W<-sqrt(ETA[[j]]$Cov$post_W2-ETA[[j]]$Cov$post_W^2)
ETA[[j]]$Cov$PSI<-ETA[[j]]$Cov$post_PSI
ETA[[j]]$Cov$SD.PSI<-sqrt(ETA[[j]]$Cov$post_PSI2-ETA[[j]]$Cov$post_PSI^2)
}
tmp<-which(names(ETA[[j]]$Cov)%in%c("post_Omega","post_Omega2","Omegainv",
"post_W","post_W2","post_PSI","post_PSI2",
"F",
"post_Sigma","post_Sigma2"))
ETA[[j]]$Cov<-ETA[[j]]$Cov[-tmp]
rm(tmp)
}
#Deep cleaning!!
tmp<-which(names(ETA[[j]])%in%c("X","x2","post_beta","post_beta2",
"post_b","post_b2","post_d","post_d2"))
ETA[[j]]<-ETA[[j]][-tmp]
rm(tmp)
}#End for linear terms
#resCov
resCov$R<-resCov$post_R
resCov$SD.R<-sqrt(resCov$post_R2-resCov$post_R^2)
if(resCov$type%in%c("REC","FA"))
{
resCov$W<-resCov$post_W
resCov$SD.W<-sqrt(resCov$post_W2-resCov$post_W^2)
resCov$PSI<-resCov$post_PSI
resCov$SD.PSI<-sqrt(resCov$post_PSI2-resCov$post_PSI^2)
close(resCov$f_W)
resCov$f_W<-NULL
close(resCov$f_PSI)
resCov$f_PSI<-NULL
}
#Deep cleaning!
tmp<-which(names(resCov)%in%c("post_R","post_R2","Rinv","post_W","post_W2",
"post_PSI","post_PSI2","F"))
resCov<-resCov[-tmp]
rm(tmp)
#Closing files
if(intercept)
{
close(f_mu)
f_mu<-NULL
}
close(resCov$f_R)
resCov$f_R<-NULL
#Covariance matrices
for(j in 1:nLT)
{
if(ETA[[j]]$model%in%c("SpikeSlab","BRR","RKHS"))
{
if(ETA[[j]]$Cov$type%in%c("REC","FA"))
{
close(ETA[[j]]$Cov$f_W)
ETA[[j]]$Cov$f_W<-NULL
close(ETA[[j]]$Cov$f_PSI)
ETA[[j]]$Cov$f_PSI<-NULL
}#End of if REC, FA
if(ETA[[j]]$Cov$type%in%c("UN","DIAG"))
{
close(ETA[[j]]$Cov$f_Omega)
ETA[[j]]$Cov$f_Omega<-NULL
}
}#End of if SpikeSlab, BRR, RKHS
}#End of for
#Effect files & indicators files
for(j in 1:nLT)
{
if(!is.null(ETA[[j]]$fileEffects))
{
flush(ETA[[j]]$fileEffects)
close(ETA[[j]]$fileEffects)
ETA[[j]]$fileEffects<-NULL
if(!is.null(ETA[[j]]$compressEffects) && ETA[[j]]$compressEffects)
{
message("Compressing binary file for effects for term ", j)
compressFile(paste(saveAt,ETA[[j]]$Name,"_beta.bin",sep=""))
message("Done")
}
}
if(!is.null(ETA[[j]]$fileIndicators))
{
flush(ETA[[j]]$fileIndicators)
close(ETA[[j]]$fileIndicators)
ETA[[j]]$fileIndicators<-NULL
#Compress file by default to save a lot of space
message("Compressing binary file for indicators for term ", j)
compressFile(paste(saveAt,ETA[[j]]$Name,"_d.bin",sep=""))
message("Done")
}
}
SD.ETAHat<-sqrt(ETAHat2-ETAHat^2)
#Fit
fit<-getDIC(y.back, ETAHat, post_logLik, cte, complete_records, resCov$R,
missings, missing_records, Dpatterns, Upatterns,
dUpatterns,dAllMissings)
fit$postMeanLogLik<-post_logLik
#Return the goodies
out<-list(ETA=ETA,resCov=resCov,ETAHat=ETAHat,SD.ETAHat=SD.ETAHat,
fit=fit)
if(intercept)
{
out$mu<-mu
out$SD.mu<-SD.mu
}
if(missings)
{
out$missing_records<-missing_records
out$patterns<-patterns
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGLR/R/Multitrait.R
|
# file BGLR/methods.R
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 or 3 of the License
# (at your option).
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
#BGLR: A Statistical Package for Whole-Genome Regression & Prediction
#Authors: Gustavo de los Campos & Paulino Perez Rodriguez
#Birmingaham, Alabama, 2013, 2014
#DO NOT REPLACE cat here since this are S3 methods
#use cat() when making the print.* functions for S3 object
summary.BGLR=function(object,...)
{
if(!inherits(object, "BGLR")) stop("This function only works for objects of class `BGLR'")
tmp<-paste('--------------------> Summary of data & model <--------------------')
cat(tmp,'\n\n')
tmp<-paste(' Number of phenotypes=', sum(!is.na(object$y)))
cat(tmp,'\n')
cat(' Min (TRN)= ', min(object$y,na.rm=TRUE),'\n')
cat(' Max (TRN)= ', max(object$y,na.rm=TRUE),'\n')
cat(' Variance of phenotypes (TRN)=', round(var(object$y,na.rm=TRUE),4),'\n')
cat(' Residual variance=',round(object$varE,4),'\n')
n<-length(object$y)
if(any(is.na(object$y)))
{
tst<-which(is.na(object$y))
cat(' N-TRN=',n-length(tst), ' N-TST=',length(tst),'\n')
cat(' Correlation TRN=',round(cor(object$y[-tst],object$yHat[-tst]),4),'\n')
}else{
cat(' N-TRN=',n,' N-TST=0', '\n\n')
}
cat('\n')
cat(' -- Linear Predictor -- \n')
cat('\n')
cat(' Intercept included by default\n')
for(k in 1:length(object$ETA))
{
if(object$ETA[[k]]$model=="FIXED")
{
if(!is.null(names(object$ETA)[k])){
cat(" Coefficientes in ETA[",k,"] (",names(object$ETA)[k],") were asigned a flat prior\n")
}else{
cat(" Coefficientes in ETA[",k,"] (no-name) are asigned a flat prior\n")
}
}else{
if(object$ETA[[k]]$model=="RKHS")
{
if(!is.null(names(object$ETA)[k])){
cat(" Coefficientes in ETA[",k,"] (",names(object$ETA)[k],") were assumed to be normally distributed with zero mean and \n covariance (or its eigendecoposition) provided by user \n")
}else{
cat(" Coefficientes in ETA[",k,"] (no-name) were assumed to be normally distributed with zero mean and \n covariance (or its eigendecoposition) provided by user \n")
}
}else{
if(!is.null(names(object$ETA)[k])){
cat(" Coefficientes in ETA[",k,"] (",names(object$ETA)[k],") modeled as in ", object$ETA[[k]]$model,"\n")
}else{
cat(" Coefficientes in ETA[",k,"] (no-name) modeled as in ", object$ETA[[k]]$model,"\n")
}
}
}
}
cat('\n------------------------------------------------------------------\n');
}
residuals.BGLR=function(object,...)
{
if(!inherits(object, "BGLR")) stop("This function only works for objects of class `BGLR'")
object$y-object$yHat
}
predict.BGLR=function(object,newdata,...)
{
if (!inherits(object, "BGLR")) stop("This function only works for objects of class `BGLR'")
object$yHat
}
#effects.BGLR=function(object,...)
#{
# if(!inherits(object, "BGLR")) stop("This function only works for objects of class `BGLR'")
# object$ETA
#}
plot.BGLR=function(x,...)
{
### Check that object is compatible
if(!inherits(x, "BGLR")) stop("This function only works for objects of class `BGLR'")
limits<-range(c(x$y,x$yHat),na.rm=TRUE)
plot(x$y,x$yHat,main="Training",xlim=limits,ylim=limits,xlab='Response',ylab='Prediction');
abline(a=0,b=1,lty=3)
}
|
/scratch/gouwar.j/cran-all/cranData/BGLR/R/methods.R
|
readBinMat=function(filename,byrow=TRUE,storageMode="double"){
## Function to read effects saved by BGLR when ETA[[j]]$saveEffects=TRUE
## Also works with files generated by ETA[[j]]$compressEffects=TRUE
if(!storageMode%in%c("single","double")){
stop("storageMode can either be 'single' or 'double' (default)")
}
fileIn=gzfile(filename,open='rb')
n=readBin(fileIn,n=1,what=numeric(),size=ifelse(storageMode=="single",4,8))
p=readBin(fileIn,n=1,what=numeric(),size=ifelse(storageMode=="single",4,8))
tmp=readBin(fileIn,n=(n*p),what=numeric(),size=ifelse(storageMode=="single",4,8))
X=matrix(data=tmp,nrow=n,ncol=p,byrow=byrow)
close(fileIn)
return(X)
}
writeBinMat=function(x,file,byrow=T,storageMode="double"){
if(!storageMode%in%c("single","double")){
stop("storageMode can either be 'single' or 'double' (default)")
}
n=nrow(x)
p=ncol(x)
x=as.vector(x)
fileOut<-file(file,open='rb')
writeBin(object=n,con=fileOut,size=ifelse(storageMode=="single",4,8))
writeBin(object=p,con=fileOut,size=ifelse(storageMode=="single",4,8))
writeBin(object=x,con=fileOut,size=ifelse(storageMode=="single",4,8))
close(fileOut)
}
getVariances<-function(X,B,sets,verbose=TRUE)
{
nSets=length(unique(sets))
n=nrow(X)
setLabels=unique(sets)
nIter=nrow(B)
VAR=matrix(nrow=nIter,ncol=(nSets+1),NA)
colnames(VAR)<-c(setLabels,'total')
XList=list()
for(i in 1:nSets)
{
index<-sets==setLabels[i]
XList[[i]]<-list(index=index,X=X[,index,drop=F])
}
rm(X)
for(i in 1:nIter)
{
yHat<-rep(0,n)
for(j in 1:nSets)
{
uHat<-XList[[j]]$X%*%as.vector(B[i,XList[[j]]$index])
VAR[i,j]<-var(uHat)
yHat=yHat+uHat
}
if(verbose){ cat(' Working iteration',i,'(out of',nIter,')\n')}
VAR[i,(nSets+1)]<-var(yHat)
}
return(VAR)
}
#
#Rotines for Plink support
#http://pngu.mgh.harvard.edu/~purcell/plink/
#
#The documentation for the C functions can be found in src/util_plink.c
#This function will read a bed file (binary file for genotypes in plink format)
#Arguments:
#bed_file: Name for bed file
#bim_file: Name for bim file
#fam_file: Name for fam file
#na_strings: Missing value indicator
#verbose: Logical, it TRUE it will print information generated when reading the bed file.
#it returns a list with 3 components, n: number of individuals, p: number of markers,
#x, integer vector of dimensions n*p with genotypic information.
#see demo/read_bed.R for an example
read_bed=function(bed_file,bim_file,fam_file,na.strings=c("0","-9"),verbose=FALSE)
{
#Extended map file (this gives the number of snps)
bim=read.table(bim_file, comment.char="", as.is=TRUE, na.strings=na.strings)
snps=as.character(bim[,2])
#First 6 columns of ped file (this gives the number of individuals)
fam=read.table(fam_file, comment.char="", as.is=TRUE, na.strings=na.strings)
n=nrow(fam)
p=length(snps)
out=rep(0,n*p)
if(verbose)
{
verbose=1
}else
{
verbose=0
}
bed_file=path.expand(bed_file)
out=.C("read_bed_",bed_file,as.integer(n),as.integer(p),as.integer(out),as.integer(verbose))[[4]]
return(list(n=n,p=p,x=out))
}
#This function will read a ped file
#Note: It assumes that the missing value is 0
#It returns a list with 3 components, n: number of individuals, p: number of markers,
#x, integer vector of dommensions n*p with genotypic information.
#see demo/read_ped.R for an example
read_ped=function(ped_file)
{
ped_file=path.expand(ped_file)
out=.Call("read_ped_",ped_file)
return(out)
}
#This function will write a bed file (binary file for genotypes in plink format)
#x: vector with genotypic information
#n: number of individuals
#p: number of markers
#bed_file: Output file in bed format
#See demo/write_bed.R for an example
write_bed=function(x,n,p,bed_file)
{
#Check inputs
if(!is.numeric(x)) stop("x should be an integer and numeric");
if(min(x)<0) stop("Supported codes are 0,1,2,3");
if(max(x)>3) stop("Supported codes are 0,1,2,3");
if(n<=0) stop("n should be bigger than 0");
if(p<=0) stop("p should be bigger than 0");
if(length(x)!=n*p) stop("length of x is not equal to n*p");
#Function call
bed_file=path.expand(bed_file)
.C("write_bed_", bed_file, as.integer(n), as.integer(p), as.integer(x))
}
compressFile=function(pathname,bufferSize=1e6,keep=FALSE){
conIn=file(pathname,"rb")
conOut=gzfile(paste0(pathname,".gz"),"wb")
while(length(bytesRead<-readBin(conIn,"raw",bufferSize))>0){
writeBin(bytesRead,conOut)
}
close(conIn)
if(!keep){
unlink(pathname)
}
close(conOut)
}
# Compute Bayesian FDR
BFDR=function(prob){
origNames=names(prob)
names(prob)=1:length(prob)
prob=sort(prob,decreasing=TRUE)
bfdr=1-cumsum(prob)/1:length(prob)
# Recover the original order and names
bfdr=bfdr[order(as.integer(names(prob)))]
names(bfdr)=origNames
return(bfdr)
}
|
/scratch/gouwar.j/cran-all/cranData/BGLR/R/utils.R
|
rm(list=ls())
setwd(tempdir())
data(wheat)
n<-599 # should be <= 599
p<-300 # should be <= than 1279=ncol(X)
nQTL<-30 # should be <= than p
X<-wheat.X[1:n,1:p]
## Centering and standarization
for(i in 1:p)
{
X[,i]<-(X[,i]-mean(X[,i]))/sd(X[,i])
}
# Simulation
b0<-rep(0,p)
whichQTL<-sample(1:p,size=nQTL,replace=FALSE)
b0[whichQTL]<-rnorm(length(whichQTL),
sd=sqrt(1/length(whichQTL)))
signal<-as.vector(X%*%b0)
error<-rnorm(n=n,sd=sqrt(0.5))
y<-signal +error
nIter=500;
burnIn=100;
thin=3;
saveAt='';
S0=NULL;
weights=NULL;
R2=0.5;
ETA<-list(list(X=X,model='BayesA'))
fit_BA=BGLR(y=y,ETA=ETA,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2)
plot(fit_BA$yHat,y)
|
/scratch/gouwar.j/cran-all/cranData/BGLR/demo/BA.R
|
rm(list=ls())
setwd(tempdir())
data(wheat)
n<-599 # should be <= 599
p<-1279 # should be <= than 1279=ncol(X)
nQTL<-30 # should be <= than p
X<-wheat.X[1:n,1:p]
## Centering and standarization
for(i in 1:p)
{
X[,i]<-(X[,i]-mean(X[,i]))/sd(X[,i])
}
# Simulation
b0<-rep(0,p)
whichQTL<-sample(1:p,size=nQTL,replace=FALSE)
b0[whichQTL]<-rnorm(length(whichQTL),
sd=sqrt(1/length(whichQTL)))
signal<-as.vector(X%*%b0)
error<-rnorm(n=n,sd=sqrt(0.5))
y<-signal +error
nIter=5000;
burnIn=2500;
thin=10;
saveAt='';
S0=NULL;
weights=NULL;
R2=0.5;
ETA<-list(list(X=X,model='BayesB',probIn=0.05))
fit_BB=BGLR(y=y,ETA=ETA,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2)
plot(fit_BB$yHat,y)
|
/scratch/gouwar.j/cran-all/cranData/BGLR/demo/BB.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.