content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
binomial.lerouxCARMCMC <- function(Y, failures, trials, offset, X.standardised, W, rho, fix.rho, K, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
dat <- cbind(Y, failures)
mod.glm <- glm(dat~X.standardised-1, offset=offset, family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
theta.hat <- Y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat / (1 - theta.hat)) - X.standardised %*% beta.mean - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
tau2 <- var(phi) / 10
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
lp <- as.numeric(X.standardised %*% beta) + phi + offset
prob <- exp(lp) / (1 + exp(lp))
fitted <- trials * prob
Y.DA <- Y
failures.DA <- trials - Y.DA
########################################
#### Set up the MCMC model run quantities
#########################################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, K))
samples.tau2 <- array(NA, c(n.keep, 1))
if(!fix.rho) samples.rho <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,6)
proposal.sd.beta <- 0.01
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.02
tau2.posterior.shape <- prior.tau2[1] + 0.5 * K
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Create the determinant
if(!fix.rho)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1) tau2.posterior.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rbinom(n=n.miss, size=trials[which.miss==0], prob=prob[which.miss==0])
failures.DA <- trials - Y.DA
}else
{}
####################
## Sample from beta
####################
offset.temp <- phi + offset
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, K, p, beta, offset.temp, Y.DA, failures.DA, trials, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, K, p, beta, offset.temp, Y.DA, failures.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
####################
## Sample from phi
####################
beta.offset <- X.standardised %*% beta + offset
temp1 <- binomialcarupdateRW(Wtriplet=W.triplet, Wbegfin=W.begfin, Wtripletsum=W.triplet.sum, nsites=K, phi=phi, tau2=tau2, y=Y.DA, failures=failures.DA, phi_tune=proposal.sd.phi, rho=rho, offset=beta.offset)
phi <- temp1[[1]]
if(rho<1)
{
phi <- phi - mean(phi)
}else
{
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
}
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + K
##################
## Sample from tau2
##################
temp2 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, rho)
tau2.posterior.scale <- temp2 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
##################
## Sample from rho
##################
if(!fix.rho)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp3 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, proposal.rho)
det.Q.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- det.Q - temp2 / tau2
logprob.proposal <- det.Q.proposal - temp3 / tau2
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- det.Q.proposal
accept[5] <- accept[5] + 1
}else
{
}
accept[6] <- accept[6] + 1
}else
{}
#########################
## Calculate the deviance
#########################
lp <- as.numeric(X.standardised %*% beta) + phi + offset
prob <- exp(lp) / (1 + exp(lp))
fitted <- trials * prob
loglike <- dbinom(x=Y, size=trials, prob=prob, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.tau2[ele, ] <- tau2
if(!fix.rho) samples.rho[ele, ] <- rho
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{
}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
if(!fix.rho)
{
proposal.sd.rho <- common.accceptrates2(accept[5:6], proposal.sd.rho, 40, 50, 0.5)
}
accept <- c(0,0,0,0,0,0)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
if(fix.rho) samples.rho=NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/binomial.lerouxCARMCMC.R
|
binomial.localisedCAR <- function(formula, data=NULL, trials, G, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, prior.delta = NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame.localised(formula, data, "binomial", trials)
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Check and format the trials argument
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- K-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
failures <- trials - Y
if(sum(Y>trials, na.rm=TRUE)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
#### Format and check the number of clusters G
if(length(G)!=1) stop("G is the wrong length.", call.=FALSE)
if(!is.numeric(G)) stop("G is not numeric.", call.=FALSE)
if(G<=1) stop("G is less than 2.", call.=FALSE)
if(G!=round(G)) stop("G is not an integer.", call.=FALSE)
if(floor(G/2)==ceiling(G/2))
{
Gstar <- G/2
}else
{
Gstar <- (G+1)/2
}
#### Priors
if(p>0)
{
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
}else
{
prior.mean.beta <- NULL
prior.var.beta <- NULL
}
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
common.prior.var.check(prior.tau2)
if(is.null(prior.delta)) prior.delta <- 10
if(length(prior.delta)!=1) stop("the prior value for delta is the wrong length.", call.=FALSE)
if(!is.numeric(prior.delta)) stop("the prior value for delta is not numeric.", call.=FALSE)
if(sum(is.na(prior.delta))!=0) stop("the prior value for delta has missing values.", call.=FALSE)
if(prior.delta<=0) stop("the prior value for delta is not positive.", call.=FALSE)
#### Compute the blocking structure for beta
if(p>0)
{
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
}else
{
n.beta.block <- NULL
list.block <- NULL
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- binomial.localisedCARMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, G=G, Gstar=Gstar, W=W, K=K, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.delta=prior.delta, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- binomial.localisedCARMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, G=G, Gstar=Gstar, W=W, K=K, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.delta=prior.delta, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=binomial.localisedCARMCMC, Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, G=G, Gstar=Gstar, W=W, K=K, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.delta=prior.delta, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.phi <- 100 * results$accept[1] / results$accept[2]
accept.lambda <- 100 * results$accept[3] / results$accept[4]
accept.delta <- 100 * results$accept[5] / results$accept[6]
accept.tau2 <- 100
if(p>0)
{
accept.beta <- 100 * results$accept[7] / results$accept[8]
accept.final <- c(accept.beta, accept.lambda, accept.delta, accept.phi, accept.tau2)
names(accept.final) <- c("beta", "lambda", "delta", "phi", "tau2")
}else
{
accept.final <- c(accept.lambda, accept.delta, accept.phi, accept.tau2)
names(accept.final) <- c("lambda", "delta", "phi", "tau2")
}
## Compute the model fit criterion
mean.phi <- apply(results$samples.phi, 2, mean)
mean.Z <- round(apply(results$samples.Z,2,mean),0)
mean.lambda <- apply(results$samples.lambda,2,mean)
if(p>0)
{
mean.beta <- apply(results$samples.beta, 2, mean)
regression.vec <- as.numeric(X.standardised %*% mean.beta)
}else
{
regression.vec <- rep(0,K)
}
mean.logit <- mean.lambda[mean.Z] + mean.phi + regression.vec + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE))
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
if(p>0)
{
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), lambda=mcmc(results$samples.lambda), Z=mcmc(results$samples.Z), tau2=mcmc(results$samples.tau2), delta=mcmc(results$samples.delta), fitted=mcmc(results$samples.fitted), Y=NA)
}else
{
samples <- list(phi=mcmc(results$samples.phi), lambda=mcmc(results$samples.lambda), Z=mcmc(results$samples.Z), tau2=mcmc(results$samples.tau2), delta=mcmc(results$samples.delta), fitted=mcmc(results$samples.fitted), Y=NA)
}
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$tau2), quantile(samples$tau2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.hyper[2, 1:3] <- c(mean(samples$delta), quantile(samples$delta, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.delta, effectiveSize(samples$delta), geweke.diag(samples$delta)$z)
summary.lambda <- t(rbind(apply(samples$lambda, 2, mean), apply(samples$lambda, 2, quantile, c(0.025, 0.975))))
summary.lambda <- cbind(summary.lambda, rep(n.keep, G), rep(accept.lambda, G), effectiveSize(samples$lambda), geweke.diag(samples$lambda)$z)
Z.used <- as.numeric(names(table(samples$Z)))
summary.lambda <- summary.lambda[Z.used, ]
if(p>0)
{
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
rownames(summary.beta) <- colnames(X)
summary.results <- rbind(summary.beta, summary.lambda, summary.hyper)
row.names(summary.results)[(p+1):nrow(summary.results)] <- c(paste("lambda", Z.used, sep=""), "tau2", "delta")
colnames(summary.results) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
summary.results <- rbind(summary.lambda, summary.hyper)
row.names(summary.results) <- c(paste("lambda", Z.used, sep=""), "tau2", "delta")
colnames(summary.results) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.phi <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.lambda <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.delta <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
accept.tau2 <- 100
if(p>0)
{
accept.beta <- 100 * sum(accept.temp2[ ,7]) / sum(accept.temp2[ ,8])
accept.final <- c(accept.beta, accept.lambda, accept.delta, accept.phi, accept.tau2)
names(accept.final) <- c("beta", "lambda", "delta", "phi", "tau2")
}else
{
accept.final <- c(accept.lambda, accept.delta, accept.phi, accept.tau2)
names(accept.final) <- c("lambda", "delta", "phi", "tau2")
}
## Extract the samples into separate lists
if(p>0) samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.delta.list <- lapply(results, function(l) l[["samples.delta"]])
samples.lambda.list <- lapply(results, function(l) l[["samples.lambda"]])
samples.Z.list <- lapply(results, function(l) l[["samples.Z"]])
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
## Convert the samples into separate matrix objects
if(p>0) samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list)
samples.lambda.matrix <- do.call(what=rbind, args=samples.lambda.list)
samples.Z.matrix <- do.call(what=rbind, args=samples.Z.list)
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.phi <- apply(samples.phi.matrix, 2, mean)
mean.Z <- round(apply(samples.Z.matrix,2,mean),0)
mean.lambda <- apply(samples.lambda.matrix,2,mean)
if(p>0)
{
mean.beta <- apply(samples.beta.matrix, 2, mean)
regression.vec <- as.numeric(X.standardised %*% mean.beta)
}else
{
regression.vec <- rep(0,K)
}
mean.logit <- mean.lambda[mean.Z] + mean.phi + regression.vec + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE))
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
if(p>0)
{
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
}else
{}
## Create MCMC objects
if(p>0) beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
delta.temp <- samples.delta.list
lambda.temp <- samples.lambda.list
Z.temp <- samples.Z.list
tau2.temp <- samples.tau2.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
for(j in 1:n.chains)
{
if(p>0) beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
delta.temp[[j]] <- mcmc(samples.delta.list[[j]])
lambda.temp[[j]] <- mcmc(samples.lambda.list[[j]])
Z.temp[[j]] <- mcmc(samples.Z.list[[j]])
tau2.temp[[j]] <- mcmc(samples.tau2.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
}
if(p>0) beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
delta.mcmc <- as.mcmc.list(delta.temp)
Z.mcmc <- as.mcmc.list(Z.temp)
lambda.mcmc <- as.mcmc.list(lambda.temp)
tau2.mcmc <- as.mcmc.list(tau2.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
if(p>0)
{
samples <- list(beta=beta.mcmc, phi=phi.mcmc, lambda=lambda.mcmc, Z=Z.mcmc, tau2=tau2.mcmc, delta=delta.mcmc, fitted=fitted.mcmc, Y=NA)
}else
{
samples <- list(phi=phi.mcmc, lambda=lambda.mcmc, Z=Z.mcmc, tau2=tau2.mcmc, delta=delta.mcmc, fitted=fitted.mcmc, Y=NA)
}
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.hyper[2, 1:3] <- c(mean(samples.delta.matrix), quantile(samples.delta.matrix, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.delta, effectiveSize(delta.mcmc), gelman.diag(delta.mcmc)$psrf[ ,2])
summary.lambda <- t(rbind(apply(samples.lambda.matrix, 2, mean), apply(samples.lambda.matrix, 2, quantile, c(0.025, 0.975))))
summary.lambda <- cbind(summary.lambda, rep(n.keep, G), rep(accept.lambda, G), effectiveSize(lambda.mcmc), gelman.diag(lambda.mcmc)$psrf[ ,2])
Z.used <- as.numeric(names(table(samples.Z.matrix)))
summary.lambda <- summary.lambda[Z.used, ]
if(p>0)
{
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
summary.results <- rbind(summary.beta, summary.lambda, summary.hyper)
row.names(summary.results)[(p+1):nrow(summary.results)] <- c(paste("lambda", Z.used, sep=""), "tau2", "delta")
colnames(summary.results) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
summary.results <- rbind(summary.lambda, summary.hyper)
row.names(summary.results) <- c(paste("lambda", Z.used, sep=""), "tau2", "delta")
colnames(summary.results) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Binomial (logit link function", "\nRandom effects model - Localised CAR model\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=mean.Z, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/binomial.localisedCAR.R
|
binomial.localisedCARMCMC <- function(Y, failures, trials, offset, X.standardised, G, Gstar, W, K, p, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, prior.delta, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
if(p==0)
{
regression.vec <- rep(0, K)
beta <- NA
}else
{
mod.glm <- glm(cbind(Y, failures)~X.standardised, offset=offset, family="quasibinomial")
beta.mean <- mod.glm$coefficients[-1]
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))[-1]
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
regression.vec <- X.standardised %*% beta
}
theta.hat <- Y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat / (1 - theta.hat)) - regression.vec - offset
clust <- kmeans(res.temp,G)
lambda <- clust$centers[order(clust$centers)]
Z <- rep(1, K)
for(j in 2:G)
{
Z[clust$cluster==order(clust$centers)[j]] <- j
}
delta <- runif(1,1, min(2, prior.delta))
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=rep(0,K), sd = res.sd)
for(i in 1:G)
{
phi[which(Z==i)] <- phi[which(Z==i)] - mean(phi[which(Z==i)])
}
tau2 <- var(phi) / 10
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
lp <- lambda[Z] + phi + regression.vec + offset
prob <- exp(lp) / (1 + exp(lp))
fitted <- trials * prob
########################################
#### Set up the MCMC model run quantities
#########################################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.phi <- array(NA, c(n.keep, K))
samples.tau2 <- array(NA, c(n.keep, 1))
samples.Z <- array(NA, c(n.keep, K))
samples.lambda <- array(NA, c(n.keep, G))
samples.delta <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
#### Metropolis quantities
if(p>0)
{
samples.beta <- array(NA, c(n.keep, p))
accept <- rep(0,8)
proposal.sd.beta <- 0.01
}else
{
accept <- rep(0,6)
}
proposal.sd.phi <- 0.1
proposal.sd.delta <- 0.1
proposal.sd.lambda <- 0.01
tau2.posterior.shape <- prior.tau2[1] + 0.5 * K
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
###################
## Sample from beta
###################
if(p>0)
{
offset.temp <- phi + offset + lambda[Z]
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, K, p, beta, offset.temp, Y, failures, trials, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, K, p, beta, offset.temp, Y, failures, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[7] <- accept[7] + temp[[2]]
accept[8] <- accept[8] + n.beta.block
regression.vec <- X.standardised %*% beta
}else
{}
##################
## Sample from phi
##################
phi.offset <- regression.vec + offset + lambda[Z]
temp1 <- binomialcarupdateRW(Wtriplet=W.triplet, Wbegfin=W.begfin, Wtripletsum=W.triplet.sum, nsites=K, phi=phi, tau2=tau2, y=Y, failures=failures, phi_tune=proposal.sd.phi, rho=1, offset=phi.offset)
phi <- temp1[[1]]
for(i in 1:G)
{
phi[which(Z==i)] <- phi[which(Z==i)] - mean(phi[which(Z==i)])
}
accept[1] <- accept[1] + temp1[[2]]
accept[2] <- accept[2] + K
##################
## Sample from tau2
##################
temp2 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, 1)
tau2.posterior.scale <- temp2 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
#####################
## Sample from lambda
#####################
proposal.extend <- c(-1000, lambda, 1000)
lambda.extend <- c(-1000, lambda, 1000)
for(i in 1:G)
{
proposal.extend[(i+1)] <- rtruncnorm(n=1, a=proposal.extend[i], b=proposal.extend[(i+2)], mean=lambda[i], sd=proposal.sd.lambda)
}
proposal <- proposal.extend[2:(G+1)]
lp.current <- lambda[Z] + phi + regression.vec + offset
lp.proposal <- proposal[Z] + phi + regression.vec + offset
prob.current <- exp(lp.current) / (1 + exp(lp.current))
prob.proposal <- exp(lp.proposal) / (1 + exp(lp.proposal))
prob1 <- sum(Y * (log(prob.proposal) - log(prob.current)) + failures * (log(1-prob.proposal) - log(1-prob.current)))
prob <- exp(prob1)
if(prob > runif(1))
{
lambda <- proposal
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
################
## Sample from Z
################
Z.proposal <- sample(1:G, size=K, replace=TRUE)
prior <- delta * ((Z - Gstar)^2 - (Z.proposal-Gstar)^2)
lp.current <- lambda[Z] + phi + regression.vec + offset
lp.proposal <- lambda[Z.proposal] + phi + regression.vec + offset
prob.current <- exp(lp.current) / (1 + exp(lp.current))
prob.proposal <- exp(lp.proposal) / (1 + exp(lp.proposal))
like <- Y * (log(prob.proposal) - log(prob.current)) + failures * (log(1-prob.proposal) - log(1-prob.current))
prob <- exp(like + prior)
test <- prob> runif(K)
Z[test] <- Z.proposal[test]
####################
## Sample from delta
####################
proposal.delta <- rtruncnorm(n=1, a=1, b=prior.delta, mean=delta, sd=proposal.sd.delta)
prob1 <- sum((Z-Gstar)^2) * (delta - proposal.delta)
prob2 <- K * log(sum(exp(-delta *(1:G - Gstar)^2))) - K * log(sum(exp(-proposal.delta *(1:G - Gstar)^2)))
hastings <- log(dtruncnorm(x=delta, a=1, b=prior.delta, mean=proposal.delta, sd=proposal.sd.delta)) - log(dtruncnorm(x=proposal.delta, a=1, b=prior.delta, mean=delta, sd=proposal.sd.delta))
prob <- exp(prob1 + prob2 + hastings)
if(prob > runif(1))
{
delta <- proposal.delta
accept[5] <- accept[5] + 1
}else
{
}
accept[6] <- accept[6] + 1
#########################
## Calculate the deviance
#########################
lp <- lambda[Z] + phi + regression.vec + offset
prob <- exp(lp) / (1 + exp(lp))
fitted <- trials * prob
loglike <- dbinom(x=Y, size=trials, prob=prob, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.phi[ele, ] <- phi
samples.lambda[ele, ] <- lambda
samples.tau2[ele, ] <- tau2
samples.Z[ele, ] <- Z
samples.delta[ele, ] <- delta
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(p>0) samples.beta[ele, ] <- beta
}else
{
}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
if(p>0)
{
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[7:8], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[7:8], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50)
proposal.sd.lambda <- common.accceptrates1(accept[3:4], proposal.sd.lambda, 20, 40)
proposal.sd.delta <- common.accceptrates2(accept[5:6], proposal.sd.delta, 40, 50, prior.delta/6)
accept <- rep(0,8)
}else
{
proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50)
proposal.sd.lambda <- common.accceptrates1(accept[3:4], proposal.sd.lambda, 20, 40)
proposal.sd.delta <- common.accceptrates2(accept[5:6], proposal.sd.delta, 40, 50, prior.delta/6)
accept <- rep(0,6)
}
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(p>0)
{
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.Z=samples.Z, samples.lambda=samples.lambda, samples.tau2=samples.tau2, samples.delta=samples.delta, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
accept=accept)
}else
{
chain.results <- list(samples.phi=samples.phi, samples.Z=samples.Z, samples.lambda=samples.lambda, samples.tau2=samples.tau2, samples.delta=samples.delta, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
accept=accept)
}
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/binomial.localisedCARMCMC.R
|
binomial.multilevelCAR <- function(formula, data=NULL, trials, W, ind.area, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, rho=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "binomial")
n <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
K <- length(unique(ind.area))
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Check and format the trials argument
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- n-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
failures <- trials - Y
if(sum(Y>trials, na.rm=TRUE)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
#### rho
if(is.null(rho))
{
rho <- runif(1)
fix.rho <- FALSE
}else
{
fix.rho <- TRUE
}
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
#### Checks and formatting for ind.area
if(!is.vector(ind.area)) stop("ind.area is not a vector.", call.=FALSE)
if(sum(ceiling(ind.area)==floor(ind.area))!=n) stop("ind.area does not have all integer values.", call.=FALSE)
if(min(ind.area)!=1) stop("the minimum value in ind.area is not 1.", call.=FALSE)
if(max(ind.area)!=K) stop("the maximum value in ind.area is not equal to the number of spatial areal units.", call.=FALSE)
if(length(table(ind.area))!=K) stop("the number of unique areas in ind.area does not equal K.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
common.prior.var.check(prior.tau2)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- binomial.multilevelCARMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, ind.area=ind.area, rho=rho, fix.rho=fix.rho, n=n, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- binomial.multilevelCARMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, ind.area=ind.area, rho=rho, fix.rho=fix.rho, n=n, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=binomial.multilevelCARMCMC, Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, ind.area=ind.area, rho=rho, fix.rho=fix.rho, n=n, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * results$accept[1] / results$accept[2]
accept.phi <- 100 * results$accept[3] / results$accept[4]
accept.tau2 <- 100
if(!fix.rho)
{
accept.rho <- 100 * results$accept[5] / results$accept[6]
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.tau2)
names(accept.final) <- c("beta", "phi", "rho", "tau2")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
mean.phi <- apply(results$samples.phi, 2, mean)
mean.phi.extend <- mean.phi[ind.area]
mean.logit <- as.numeric(X.standardised %*% mean.beta) + mean.phi.extend + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), rho=mcmc(results$samples.rho), tau2=mcmc(results$samples.tau2), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$tau2), quantile(samples$tau2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
if(!fix.rho)
{
summary.hyper[2, 1:3] <- c(mean(samples$rho), quantile(samples$rho, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.rho, effectiveSize(samples$rho), geweke.diag(samples$rho)$z)
}else
{
summary.hyper[2, 1:3] <- c(rho, rho, rho)
summary.hyper[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.phi <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.tau2 <- 100
if(!fix.rho)
{
accept.rho <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.tau2)
names(accept.final) <- c("beta", "phi", "rho", "tau2")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
mean.phi <- apply(samples.phi.matrix, 2, mean)
mean.phi.extend <- mean.phi[ind.area]
mean.logit <- as.numeric(X.standardised %*% mean.beta) + mean.phi.extend + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
rho.temp <- samples.rho.list
tau2.temp <- samples.tau2.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
rho.temp[[j]] <- mcmc(samples.rho.list[[j]])
tau2.temp[[j]] <- mcmc(samples.tau2.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
rho.mcmc <- as.mcmc.list(rho.temp)
tau2.mcmc <- as.mcmc.list(tau2.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rho.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
if(!fix.rho)
{
summary.hyper[2, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.rho, effectiveSize(rho.mcmc), gelman.diag(rho.mcmc)$psrf[ ,2])
}else
{
summary.hyper[2, 1:3] <- c(rho, rho, rho)
summary.hyper[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Binomial (logit link function)", "\nRandom effects model - Multilevel Leroux CAR\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/binomial.multilevelCAR.R
|
binomial.multilevelCARMCMC <- function(Y, failures, trials, offset, X.standardised, W, ind.area, rho, fix.rho, n, K, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
dat <- cbind(Y, failures)
mod.glm <- glm(dat~X.standardised-1, offset=offset, family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
theta.hat <- Y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat / (1 - theta.hat)) - X.standardised %*% beta.mean - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
phi.extend <- phi[ind.area]
tau2 <- var(phi) / 10
lp <- as.numeric(X.standardised %*% beta) + phi.extend + offset
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
prob <- exp(lp) / (1 + exp(lp))
Y.DA <- Y
failures.DA <- trials - Y.DA
########################################
#### Set up the MCMC model run quantities
#########################################
#### Ind.area parts
ind.area.list <- as.list(rep(0,K))
n.individual <- rep(0,K)
n.individual.miss <- rep(0,K)
for(r in 1:K)
{
ind.area.list[[r]] <- which(ind.area==r)
n.individual[r] <- length(ind.area.list[[r]])
n.individual.miss[r] <- sum(which.miss[ind.area.list[[r]]])
}
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, K))
samples.tau2 <- array(NA, c(n.keep, 1))
if(!fix.rho) samples.rho <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, n))
samples.fitted <- array(NA, c(n.keep, n))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,6)
proposal.sd.beta <- 0.01
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.02
tau2.posterior.shape <- prior.tau2[1] + 0.5 * K
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Create the spatial determinant
if(!fix.rho)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1) tau2.posterior.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rbinom(n=n.miss, size=trials[which.miss==0], prob=prob[which.miss==0])
failures.DA <- trials - Y.DA
}else
{}
####################
## Sample from beta
####################
offset.temp <- phi.extend + offset
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, n, p, beta, offset.temp, Y.DA, failures.DA, trials, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, n, p, beta, offset.temp, Y.DA, failures.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
####################
## Sample from phi
####################
beta.offset <- X.standardised %*% beta + offset
temp1 <- binomialcarmultilevelupdate(Wtriplet=W.triplet, Wbegfin=W.begfin, Wtripletsum=W.triplet.sum, ind_area_list=ind.area.list, n_individual=n.individual, nsites=K, phi=phi, tau2=tau2, y=Y.DA, failures=failures.DA, phi_tune=proposal.sd.phi, rho=rho, offset=beta.offset)
phi <- temp1[[1]]
if(rho<1)
{
phi <- phi - mean(phi)
}else
{
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
}
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + K
phi.extend <- phi[ind.area]
##################
## Sample from tau2
##################
temp2 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, rho)
tau2.posterior.scale <- temp2 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
##################
## Sample from rho
##################
if(!fix.rho)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp3 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, proposal.rho)
det.Q.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- det.Q - temp2 / tau2
logprob.proposal <- det.Q.proposal - temp3 / tau2
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- det.Q.proposal
accept[5] <- accept[5] + 1
}else
{}
accept[6] <- accept[6] + 1
}else
{}
#########################
## Calculate the deviance
#########################
lp <- as.numeric(X.standardised %*% beta) + phi.extend + offset
prob <- exp(lp) / (1 + exp(lp))
fitted <- trials * prob
loglike <- dbinom(x=Y, size=trials, prob=prob, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.tau2[ele, ] <- tau2
if(!fix.rho) samples.rho[ele, ] <- rho
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
if(!fix.rho) proposal.sd.rho <- common.accceptrates2(accept[5:6], proposal.sd.rho, 40, 50, 0.5)
accept <- rep(0,6)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
if(fix.rho) samples.rho=NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/binomial.multilevelCARMCMC.R
|
#### This file has a list of common functions in alphabetical order. These functions include:
# common.acceptrates1 - update proposal variance for a MH step based on having no max limit on the proposal var.
# common.acceptrates2 - update proposal variance for a MH step based on having a max limit on the proposal var.
# common.betablock - Create the blocking structure for beta.
# common.betatransform - back transform the regression parameters to the original scale.
# common.burnin.nsample.thin.check - check the burnin, n.sample, thin arugments.
# common.frame - check the frame argument.
# common.frame.localised - check the frame argument for the localised model.
# common.modelfit - compute the model fit criteria.s
# common.prior.beta.check - Check the prior entered for beta.
# common.prior.var.check - check the prior entered for variance parameters.
# common.prior.varmat.check - check the prior entered for variance matrix parameters.
# common.verbose - check the verbose argument.
# common.Wcheckformat - check the W matrix.
# common.Wcheckformat.disimilarity - check the W matrix for the dissimilarity model.
#### Acceptance rates - no maximum limit on the proposal sd
common.accceptrates1 <- function(accept, sd, min, max)
{
#### Update the proposal standard deviations
rate <- 100 * accept[1] / accept[2]
if(rate > max)
{
sd <- sd + 0.1 * sd
}else if(rate < min)
{
sd <- sd - 0.1 * sd
}else
{
}
return(sd)
}
#### Acceptance rates - maximum limit on the proposal sd
common.accceptrates2 <- function(accept, sd, min, max, sd.max)
{
#### Update the proposal standard deviations
rate <- 100 * accept[1] / accept[2]
if(rate > max)
{
sd <- sd + 0.1 * sd
sd[which(sd>sd.max)] <- sd.max
}else if(rate < min)
{
sd <- sd - 0.1 * sd
}else
{
}
return(sd)
}
#### Beta blocking
common.betablock <- function(p, blocksize.beta=NULL)
{
## Compute the blocking structure for beta
if(is.null(blocksize.beta)) blocksize.beta <- 5
if(blocksize.beta >= p)
{
n.beta.block <- 1
beta.beg <- 1
beta.fin <- p
}else
{
n.standard <- 1 + floor((p-blocksize.beta) / blocksize.beta)
remainder <- p - n.standard * blocksize.beta
if(remainder==0)
{
beta.beg <- c(1,seq((blocksize.beta+1), p, blocksize.beta))
beta.fin <- seq(blocksize.beta, p, blocksize.beta)
n.beta.block <- length(beta.beg)
}else
{
beta.beg <- c(1, seq((blocksize.beta+1), p, blocksize.beta))
beta.fin <- c(seq((blocksize.beta), p, blocksize.beta), p)
n.beta.block <- length(beta.beg)
}
}
return(list(beta.beg, beta.fin, n.beta.block))
}
#### beta back transform samples
common.betatransform <- function(samples.beta, X.indicator, X.mean, X.sd, p, localised)
{
#### Back transform the beta values
#### Slightly different code depending on whether the localised model is used
samples.beta.orig <- samples.beta
number.cts <- sum(X.indicator==1)
if(localised)
{
#### Localised model
if(number.cts>0)
{
for(r in 1:p)
{
if(X.indicator[r]==1)
{
samples.beta.orig[ ,r] <- samples.beta[ ,r] / X.sd[r]
}else
{
}
}
}else
{
}
}else
{
#### Not the localised model
if(number.cts>0)
{
for(r in 1:p)
{
if(X.indicator[r]==1)
{
samples.beta.orig[ ,r] <- samples.beta[ ,r] / X.sd[r]
}else if(X.indicator[r]==2 & p>1)
{
X.transformed <- which(X.indicator==1)
samples.temp <- as.matrix(samples.beta[ ,X.transformed])
for(s in 1:length(X.transformed))
{
samples.temp[ ,s] <- samples.temp[ ,s] * X.mean[X.transformed[s]] / X.sd[X.transformed[s]]
}
intercept.adjustment <- apply(samples.temp, 1,sum)
samples.beta.orig[ ,r] <- samples.beta[ ,r] - intercept.adjustment
}else
{
}
}
}else
{
}
}
#### Return the transformed samples
return(samples.beta.orig)
}
#### Check MCMC arguments
common.burnin.nsample.thin.check <- function(burnin, n.sample, thin)
{
#### Check for valid arguments for the burnin, n.sample and thin arguments
if(is.null(burnin)) stop("the burnin argument is missing", call.=FALSE)
if(is.null(n.sample)) stop("the n.sample argument is missing", call.=FALSE)
if(!is.numeric(burnin)) stop("burn-in is not a number", call.=FALSE)
if(!is.numeric(n.sample)) stop("n.sample is not a number", call.=FALSE)
if(!is.numeric(thin)) stop("thin is not a number", call.=FALSE)
if(n.sample <= 0) stop("n.sample is less than or equal to zero.", call.=FALSE)
if(burnin < 0) stop("burn-in is less than zero.", call.=FALSE)
if(thin <= 0) stop("thin is less than or equal to zero.", call.=FALSE)
if(n.sample <= burnin) stop("Burn-in is greater than n.sample.", call.=FALSE)
if(n.sample <= thin) stop("thin is greater than n.sample.", call.=FALSE)
if(burnin!=round(burnin)) stop("burnin is not an integer.", call.=FALSE)
if(n.sample!=round(n.sample)) stop("n.sample is not an integer.", call.=FALSE)
if(thin!=round(thin)) stop("thin is not an integer.", call.=FALSE)
}
#### Read in and format the frame argument
common.frame <- function(formula, data, family)
{
#### Overall formula object
frame <- try(suppressWarnings(model.frame(formula, data=data, na.action=na.pass)), silent=TRUE)
if(class(frame)[1]=="try-error") stop("the formula inputted contains an error, e.g the variables may be different lengths.", call.=FALSE)
#### Design matrix
## Create the matrix
X <- try(suppressWarnings(model.matrix(object=attr(frame, "terms"), data=frame)), silent=TRUE)
if(class(X)[1]=="try-error") stop("the covariate matrix contains inappropriate values.", call.=FALSE)
if(sum(is.na(X))>0) stop("the covariate matrix contains missing 'NA' values.", call.=FALSE)
n <- nrow(X)
p <- ncol(X)
## Check for linearly related columns
cor.X <- suppressWarnings(cor(X))
diag(cor.X) <- 0
if(max(cor.X, na.rm=TRUE)==1) stop("the covariate matrix has two exactly linearly related columns.", call.=FALSE)
if(min(cor.X, na.rm=TRUE)==-1) stop("the covariate matrix has two exactly linearly related columns.", call.=FALSE)
if(p>1)
{
if(sort(apply(X, 2, sd))[2]==0) stop("the covariate matrix has two intercept terms.", call.=FALSE)
}else
{
}
## Standardise the matrix
X.standardised <- X
X.sd <- apply(X, 2, sd)
X.mean <- apply(X, 2, mean)
X.indicator <- rep(NA, p) # To determine which parameter estimates to transform back
for(j in 1:p)
{
if(length(table(X[ ,j]))>2)
{
X.indicator[j] <- 1
X.standardised[ ,j] <- (X[ ,j] - mean(X[ ,j])) / sd(X[ ,j])
}else if(length(table(X[ ,j]))==1)
{
X.indicator[j] <- 2
}else
{
X.indicator[j] <- 0
}
}
#### Response variable
## Create the response
Y <- model.response(frame)
J <- length(Y) / n
which.miss <- matrix(as.numeric(!is.na(Y)), nrow=n, ncol=J)
if(J==1) which.miss <- as.numeric(which.miss)
n.miss <- n*J - sum(which.miss)
## Check for errors
if(family=="binomial")
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE)
int.check <- n*J - n.miss - sum(ceiling(Y)==floor(Y), na.rm=TRUE)
if(int.check > 0) stop("the response variable has non-integer values.", call.=FALSE)
if(min(Y, na.rm=TRUE)<0) stop("the response variable has negative values.", call.=FALSE)
}else if(family=="gaussian")
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE)
}else if(family=="poisson")
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE)
int.check <- n*J - n.miss - sum(ceiling(Y)==floor(Y), na.rm=TRUE)
if(int.check > 0) stop("the response variable has non-integer values.", call.=FALSE)
if(min(Y, na.rm=TRUE)<0) stop("the response variable has negative values.", call.=FALSE)
}else if(family=="multinomial")
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE)
int.check <- n*J - n.miss - sum(ceiling(Y)==floor(Y), na.rm=TRUE)
if(int.check > 0) stop("the response variable has non-integer values.", call.=FALSE)
if(min(Y, na.rm=TRUE)<0) stop("the response variable has negative values.", call.=FALSE)
}else
{}
#### Offset variable
offset <- try(model.offset(frame), silent=TRUE)
if(class(offset)[1]=="try-error") stop("the offset is not numeric.", call.=FALSE)
if(family=="multinomial")
{
if(is.null(offset)) offset <- array(0,c(n, (J-1)))
}else
{
if(is.null(offset)) offset <- array(0,c(n, J))
}
if(sum(is.na(offset))>0) stop("the offset has missing 'NA' values.", call.=FALSE)
if(!is.numeric(offset)) stop("the offset variable has non-numeric values.", call.=FALSE)
#### Return the values needed
results <- list(n=n, p=p, X=X, X.standardised=X.standardised, X.sd=X.sd, X.mean=X.mean, X.indicator=X.indicator,
offset=offset, Y=Y, which.miss=which.miss, n.miss=n.miss)
return(results)
}
#### Read in and format the frame argument from the localised model
common.frame.localised <- function(formula, data, family, trials)
{
#### Overall formula object
frame <- try(suppressWarnings(model.frame(formula, data=data, na.action=na.pass)), silent=TRUE)
if(class(frame)[1]=="try-error") stop("the formula inputted contains an error, e.g the variables may be different lengths.", call.=FALSE)
#### Response variable
## Create the response
Y <- model.response(frame)
n <- length(Y)
## Check for errors
if(family=="binomial")
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE)
int.check <- n - sum(ceiling(Y)==floor(Y), na.rm=TRUE)
if(int.check > 0) stop("the respons variable has non-integer values.", call.=FALSE)
if(min(Y, na.rm=TRUE)<0) stop("the response variable has negative values.", call.=FALSE)
}else if(family=="gaussian")
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE)
}else
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE)
int.check <- n - sum(ceiling(Y)==floor(Y), na.rm=TRUE)
if(int.check > 0) stop("the response variable has non-integer values.", call.=FALSE)
if(min(Y, na.rm=TRUE)<0) stop("the response variable has negative values.", call.=FALSE)
}
#### Offset variable
offset <- try(model.offset(frame), silent=TRUE)
if(class(offset)[1]=="try-error") stop("the offset is not numeric.", call.=FALSE)
if(is.null(offset)) offset <- rep(0,n)
if(sum(is.na(offset))>0) stop("the offset has missing 'NA' values.", call.=FALSE)
if(!is.numeric(offset)) stop("the offset variable has non-numeric values.", call.=FALSE)
#### Design matrix - Create and then adapt to remove the intercept term
X <- try(suppressWarnings(model.matrix(object=attr(frame, "terms"), data=frame)), silent=TRUE)
if(class(X)[1]=="try-error") stop("the covariate matrix contains inappropriate values.", call.=FALSE)
if(sum(is.na(X))>0) stop("the covariate matrix contains missing 'NA' values.", call.=FALSE)
ptemp <- ncol(X)
if(ptemp==1)
{
X <- NULL
X.standardised <- NULL
X.sd <- NULL
X.mean <- NULL
X.indicator <- NULL
regression.vec <- rep(0, n)
p <- 0
beta <- NA
}else
{
## Check for linearly related columns
cor.X <- suppressWarnings(cor(X))
diag(cor.X) <- 0
if(max(cor.X, na.rm=TRUE)==1) stop("the covariate matrix has two exactly linearly related columns.", call.=FALSE)
if(min(cor.X, na.rm=TRUE)==-1) stop("the covariate matrix has two exactly linearly related columns.", call.=FALSE)
if(sort(apply(X, 2, sd))[2]==0) stop("the covariate matrix has two intercept terms.", call.=FALSE)
## Remove the intercept term
int.which <- which(apply(X,2,sd)==0)
colnames.X <- colnames(X)
X <- as.matrix(X[ ,-int.which])
colnames(X) <- colnames.X[-int.which]
p <- ncol(X)
## Standardise X
X.standardised <- X
X.sd <- apply(X, 2, sd)
X.mean <- apply(X, 2, mean)
X.indicator <- rep(NA, p) # To determine which parameter estimates to transform back
for(j in 1:p)
{
if(length(table(X[ ,j]))>2)
{
X.indicator[j] <- 1
X.standardised[ ,j] <- (X[ ,j] - mean(X[ ,j])) / sd(X[ ,j])
}else
{
X.indicator[j] <- 0
}
}
## Compute a starting value for beta
if(family=="binomial")
{
failures <- trials - Y
mod.glm <- glm(cbind(Y, failures)~X.standardised, offset=offset, family="quasibinomial")
beta.mean <- mod.glm$coefficients[-1]
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))[-1]
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
regression.vec <- X.standardised %*% beta
}else
{
mod.glm <- glm(Y~X.standardised, offset=offset, family="quasipoisson")
beta.mean <- mod.glm$coefficients[-1]
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))[-1]
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
regression.vec <- X.standardised %*% beta
}
}
#### Return the values needed
results <- list(n=n, p=p, X=X, X.standardised=X.standardised, X.sd=X.sd, X.mean=X.mean, X.indicator=X.indicator,
offset=offset, Y=Y, regression.vec=regression.vec, beta=beta)
return(results)
}
# Compute the DIC. WAIC,LMPL and loglikelihood
common.modelfit <- function(samples.loglike, deviance.fitted)
{
#### WAIC
p.w <- sum(apply(samples.loglike,2, var), na.rm=TRUE)
mean.like <- apply(exp(samples.loglike),2,mean)
mean.min <- min(mean.like[mean.like>0])
mean.like[mean.like==0] <- mean.min
lppd <- sum(log(mean.like), na.rm=TRUE)
WAIC <- -2 * (lppd - p.w)
#### Compute the Conditional Predictive Ordinate
CPO <- 1/apply(exp(-samples.loglike), 2, mean)
mean.min <- min(CPO[CPO>0])
CPO[CPO==0] <- mean.min
LMPL <- sum(log(CPO), na.rm=TRUE)
#### DIC
mean.deviance <- -2 * sum(samples.loglike, na.rm=TRUE) / nrow(samples.loglike)
p.d <- mean.deviance - deviance.fitted
DIC <- deviance.fitted + 2 * p.d
#### loglikelihood
loglike <- -0.5 * deviance.fitted
#### Model fit criteria
modelfit <- c(DIC, p.d, WAIC, p.w, LMPL, loglike)
names(modelfit) <- c("DIC", "p.d", "WAIC", "p.w", "LMPL", "loglikelihood")
return(modelfit)
}
#### Check beta prior arguments
common.prior.beta.check <- function(prior.mean.beta, prior.var.beta, p)
{
## Checks
if(length(prior.mean.beta)!=p) stop("the vector of prior means for beta is the wrong length.", call.=FALSE)
if(!is.numeric(prior.mean.beta)) stop("the vector of prior means for beta is not numeric.", call.=FALSE)
if(sum(is.na(prior.mean.beta))!=0) stop("the vector of prior means for beta has missing values.", call.=FALSE)
if(length(prior.var.beta)!=p) stop("the vector of prior variances for beta is the wrong length.", call.=FALSE)
if(!is.numeric(prior.var.beta)) stop("the vector of prior variances for beta is not numeric.", call.=FALSE)
if(sum(is.na(prior.var.beta))!=0) stop("the vector of prior variances for beta has missing values.", call.=FALSE)
if(min(prior.var.beta) <=0) stop("the vector of prior variances has elements less than zero", call.=FALSE)
}
#### Check variance prior arguments
common.prior.var.check <- function(prior.var)
{
## Checks
if(length(prior.var)!=2) stop("the prior values for a variance parameter are the wrong length.", call.=FALSE)
if(!is.numeric(prior.var)) stop("the prior values for a variance parameter are not numeric.", call.=FALSE)
if(sum(is.na(prior.var))!=0) stop("the prior values for a variance parameter have missing values.", call.=FALSE)
}
#### Check variance matrix prior arguments
common.prior.varmat.check <- function(prior.varmat, J)
{
if(nrow(prior.varmat)!=J) stop("prior.Sigma.scale is the wrong dimension.", call.=FALSE)
if(ncol(prior.varmat)!=J) stop("prior.Sigma.scale is the wrong dimension.", call.=FALSE)
if(!is.numeric(prior.varmat)) stop("prior.Sigma.scale has non-numeric values.", call.=FALSE)
if(sum(is.na(prior.varmat))!=0) stop("prior.Sigma.scale has missing values.", call.=FALSE)
}
#### Check the verbose option
common.verbose <- function(verbose)
{
if(is.null(verbose)) verbose=TRUE
if(!is.logical(verbose)) stop("the verbose option is not logical.", call.=FALSE)
if(verbose)
{
cat("Setting up the model.\n")
a<-proc.time()
}else{
a <- 1
}
return(a)
}
#### Check the W matrix
common.Wcheckformat <- function(W)
{
#### Check W is a matrix of the correct dimension
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
n <- nrow(W)
if(ncol(W)!= n) stop("W is not a square matrix.", call.=FALSE)
#### Check validity of inputed W matrix
if(sum(is.na(W))>0) stop("W has missing 'NA' values.", call.=FALSE)
if(!is.numeric(W)) stop("W has non-numeric values.", call.=FALSE)
if(min(W)<0) stop("W has negative elements.", call.=FALSE)
if(sum(W!=t(W))>0) stop("W is not symmetric.", call.=FALSE)
if(min(apply(W, 1, sum))==0) stop("W has some areas with no neighbours (one of the row sums equals zero).", call.=FALSE)
#### Create the triplet form
ids <- which(W > 0, arr.ind = T)
W.triplet <- cbind(ids, W[ids])
W.triplet <- W.triplet[ ,c(2,1,3)]
#W.triplet <- c(NA, NA, NA)
#for(i in 1:n)
#{
# for(j in 1:n)
# {
# if(W[i,j]>0)
# {
# W.triplet <- rbind(W.triplet, c(i,j, W[i,j]))
# }else{}
# }
#}
#W.triplet <- W.triplet[-1, ]
n.triplet <- nrow(W.triplet)
W.triplet.sum <- tapply(W.triplet[ ,3], W.triplet[ ,1], sum)
n.neighbours <- tapply(W.triplet[ ,3], W.triplet[ ,1], length)
#### Create the start and finish points for W updating
W.begfin <- cbind(c(1, cumsum(n.neighbours[-n])+1), cumsum(n.neighbours))
#W.begfin <- array(NA, c(n, 2))
#temp <- 1
#for(i in 1:n)
#{
# W.begfin[i, ] <- c(temp, (temp + n.neighbours[i]-1))
# temp <- temp + n.neighbours[i]
#}
#### Return the critical quantities
results <- list(W=W, W.triplet=W.triplet, n.triplet=n.triplet, W.triplet.sum=W.triplet.sum, n.neighbours=n.neighbours, W.begfin=W.begfin, n=n)
return(results)
}
#### Check the W matrix - Dissimilarity model
common.Wcheckformat.disimilarity <- function(W)
{
#### Check validity of inputed W matrix
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
n <- nrow(W)
if(ncol(W)!= n) stop("W is not a square matrix.", call.=FALSE)
if(sum(is.na(W))>0) stop("W has missing 'NA' values.", call.=FALSE)
if(!is.numeric(W)) stop("W has non-numeric values.", call.=FALSE)
if(min(W)<0) stop("W has negative elements.", call.=FALSE)
if(sum(W!=t(W))>0) stop("W is not symmetric.", call.=FALSE)
if(sum(as.numeric(W)==0) + sum(as.numeric(W)==1) - n^2 !=0) stop("W has non-binary elements", call.=FALSE)
if(min(apply(W, 1, sum))==0) stop("W has some areas with no neighbours (one of the row sums equals zero).", call.=FALSE)
## Ensure the W matrix is symmetric
W <- (W + t(W)) / 2
#Wnew <- array(0, c(n,n))
#for(i in 1:n)
#{
# for(j in 1:n)
# {
# if(i>j)
# {
# temp <- W[i,j]
# Wnew[i,j] <- temp
# Wnew[j,i] <- temp
# }else{}
# }
#}
#W <- Wnew
n.neighbours <- apply(W, 2, sum)
spam.W <- as.spam(W)
#### Create the triplet form
ids <- which(W > 0, arr.ind = T)
W.triplet <- cbind(ids, W[ids])
W.triplet <- W.triplet[ ,c(2,1,3)]
#W.triplet <- c(NA, NA, NA)
#for(i in 1:n)
#{
# for(j in 1:n)
# {
# if(W[i,j]>0)
# {
# W.triplet <- rbind(W.triplet, c(i,j, W[i,j]))
# }else{}
# }
#}
#W.triplet <- W.triplet[-1, ]
n.triplet <- nrow(W.triplet)
W.triplet.sum <- tapply(W.triplet[ ,3], W.triplet[ ,1], sum)
n.neighbours <- tapply(W.triplet[ ,3], W.triplet[ ,1], length)
#### Create the start and finish points for W updating
W.begfin <- cbind(c(1, cumsum(n.neighbours[-n])+1), cumsum(n.neighbours))
#W.begfin <- array(NA, c(n, 2))
#temp <- 1
#for(i in 1:n)
#{
# W.begfin[i, ] <- c(temp, (temp + n.neighbours[i]-1))
# temp <- temp + n.neighbours[i]
#}
#### Return the critical quantities
results <- list(W=W, W.triplet=W.triplet, n.triplet=n.triplet, W.triplet.sum=W.triplet.sum, n.neighbours=n.neighbours, W.begfin=W.begfin, spam.W=spam.W, n=n)
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/common.functions.R
|
fitted.CARBayes <- function(object,...)
{
#### Return the fitted values
return(object$fitted.values)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/fitted.CARBayes.R
|
gaussian.MVlerouxCAR <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.Sigma.df=NULL, prior.Sigma.scale=NULL, rho=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "gaussian")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
J <- ncol(Y)
N.all <- K * J
#### Create a missing list
if(n.miss>0)
{
miss.locator <- array(NA, c(n.miss, 2))
colnames(miss.locator) <- c("row", "column")
locations <- which(t(which.miss)==0)
miss.locator[ ,1] <- ceiling(locations/J)
miss.locator[ ,2] <- locations - (miss.locator[ ,1]-1) * J
}else
{}
#### W matrix
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
if(nrow(W)!= K) stop("The number of data points divided by the number of rows in W is not a whole number.", call.=FALSE)
#### rho
if(is.null(rho))
{
rho <- runif(1)
fix.rho <- FALSE
}else
{
fix.rho <- TRUE
}
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.Sigma.df)) prior.Sigma.df <- 2
if(is.null(prior.Sigma.scale)) prior.Sigma.scale <- rep(100000, J)
if(is.null(prior.nu2)) prior.nu2 <- c(1, 0.01)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
if(!is.numeric(prior.Sigma.scale)) stop("prior.Sigma.scale has non-numeric values.", call.=FALSE)
if(sum(is.na(prior.Sigma.scale))!=0) stop("prior.Sigma.scale has missing values.", call.=FALSE)
common.prior.var.check(prior.nu2)
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- gaussian.MVlerouxCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, J=J, N.all=N.all, which.miss=which.miss, n.miss=n.miss, miss.locator=miss.locator, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- gaussian.MVlerouxCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, J=J, N.all=N.all, which.miss=which.miss, n.miss=n.miss, miss.locator=miss.locator, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=gaussian.MVlerouxCARMCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, J=J, N.all=N.all, which.miss=which.miss, n.miss=n.miss, miss.locator=miss.locator, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100
accept.nu2 <- 100
accept.phi <- 100 * results$accept[1] / results$accept[2]
accept.Sigma <- 100
if(!fix.rho)
{
accept.rho <- 100 * results$accept[3] / results$accept[4]
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.nu2, accept.rho, accept.Sigma)
names(accept.final) <- c("beta", "phi", "nu2", "rho", "Sigma")
## Compute the model fit criterion
mean.beta <- matrix(apply(results$samples.beta, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=K, ncol=J, byrow=T)
fitted.mean <- X.standardised %*% mean.beta + mean.phi + offset
nu2.mean <- apply(results$samples.nu2,2,mean)
deviance.fitted <- -2 * sum(dnorm(as.numeric(t(Y)), mean = as.numeric(t(fitted.mean)), sd = sqrt(nu2.mean[rep(1:J,K)]), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- matrix(apply(results$samples.fitted, 2, mean), nrow=K, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
nu.mat <- matrix(rep(sqrt(nu2.mean), K), nrow=K, byrow=T)
pearson.residuals <- response.residuals / nu.mat
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- results$samples.beta
for(r in 1:J)
{
samples.beta.orig[ ,((r-1)*p+1):(r*p)] <- common.betatransform(results$samples.beta[ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE)
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), nu2=mcmc(results$samples.nu2), Sigma=results$samples.Sigma, rho=mcmc(results$samples.rho), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, J*p), rep(accept.beta,J*p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
col.name <- rep(NA, p*J)
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c((2*J+1) ,7))
## nu2
summary.hyper[1:J, 1:3] <- t(rbind(apply(samples$nu2, 2, mean), apply(samples$nu2, 2, quantile, c(0.025, 0.975))))
summary.hyper[1:J, 4] <- rep(n.keep, J)
summary.hyper[1:J, 5] <- rep(100, J)
summary.hyper[1:J, 6] <- apply(samples$nu2, 2, effectiveSize)
summary.hyper[1:J, 7] <- geweke.diag(samples$nu2)$z
## Sigma
summary.hyper[(J+1):(2*J), 1] <- diag(apply(samples$Sigma, c(2,3), mean))
summary.hyper[(J+1):(2*J), 2] <- diag(apply(samples$Sigma, c(2,3), quantile, c(0.025)))
summary.hyper[(J+1):(2*J), 3] <- diag(apply(samples$Sigma, c(2,3), quantile, c(0.975)))
summary.hyper[(J+1):(2*J), 4] <- rep(n.keep, J)
summary.hyper[(J+1):(2*J), 5] <- rep(100, J)
summary.hyper[(J+1):(2*J), 6] <- diag(apply(samples$Sigma, c(2,3), effectiveSize))
for(r in 1:J)
{
summary.hyper[(r+J), 7] <- geweke.diag(samples$Sigma[ ,r,r])$z
}
if(!fix.rho)
{
summary.hyper[(2*J+1), 1:3] <- c(mean(samples$rho), quantile(samples$rho, c(0.025, 0.975)))
summary.hyper[(2*J+1), 4:7] <- c(n.keep, accept.rho, effectiveSize(samples$rho), geweke.diag(samples$rho)$z)
}else
{
summary.hyper[(2*J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(2*J+1), 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("nu2.",J), 1:J, sep=""), paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.beta <- 100
accept.nu2 <- 100
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.phi <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.Sigma <- 100
if(!fix.rho)
{
accept.rho <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.nu2, accept.rho, accept.Sigma)
names(accept.final) <- c("beta", "phi", "nu2", "rho", "Sigma")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.nu2.list <- lapply(results, function(l) l[["samples.nu2"]])
samples.Sigma.list <- lapply(results, function(l) l[["samples.Sigma"]])
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.nu2.matrix <- do.call(what=rbind, args=samples.nu2.list)
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- matrix(apply(samples.beta.matrix, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=K, ncol=J, byrow=T)
fitted.mean <- X.standardised %*% mean.beta + mean.phi + offset
nu2.mean <- apply(samples.nu2.matrix,2,mean)
deviance.fitted <- -2 * sum(dnorm(as.numeric(t(Y)), mean = as.numeric(t(fitted.mean)), sd = sqrt(nu2.mean[rep(1:J,K)]), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- matrix(apply(samples.fitted.matrix, 2, mean), nrow=K, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
nu.mat <- matrix(rep(sqrt(nu2.mean), K), nrow=K, byrow=T)
pearson.residuals <- response.residuals / nu.mat
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
for(r in 1:J)
{
samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)] <- common.betatransform(samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)], X.indicator, X.mean, X.sd, p, FALSE)
}
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
rho.temp <- samples.rho.list
nu2.temp <- samples.nu2.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
rho.temp[[j]] <- mcmc(samples.rho.list[[j]])
nu2.temp[[j]] <- mcmc(samples.nu2.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
rho.mcmc <- as.mcmc.list(rho.temp)
nu2.mcmc <- as.mcmc.list(nu2.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rho.mcmc, nu2=nu2.mcmc, Sigma=samples.Sigma.list, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, J*p), rep(accept.beta,J*p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
col.name <- rep(NA, p*J)
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Category ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c((2*J+1) ,7))
## nu2
summary.hyper[1:J, 1:3] <- t(rbind(apply(samples.nu2.matrix, 2, mean), apply(samples.nu2.matrix, 2, quantile, c(0.025, 0.975))))
summary.hyper[1:J, 4] <- rep(n.keep, J)
summary.hyper[1:J, 5] <- rep(accept.nu2, J)
summary.hyper[1:J, 6] <- effectiveSize(nu2.mcmc)
summary.hyper[1:J, 7] <- gelman.diag(nu2.mcmc)$psrf[ ,2]
## Sigma
summary.hyper[(J+1):(2*J), 4] <- rep(n.keep, J)
summary.hyper[(J+1):(2*J), 5] <- rep(accept.Sigma, J)
for(r in 1:J)
{
test.vec <- samples.Sigma.list[[1]][ , r, r]
test.list <- as.list(rep(NA, n.chains))
test.list[[1]] <- mcmc(samples.Sigma.list[[1]][ , r, r])
for(i in 2:n.chains)
{
test.vec <- c(test.vec, samples.Sigma.list[[i]][ , r, r])
test.list[[i]] <- mcmc(samples.Sigma.list[[i]][ , r, r])
}
test.mcmc <- as.mcmc.list(test.list)
summary.hyper[r+J,1] <- mean(test.vec)
summary.hyper[r+J,2:3] <- quantile(test.vec, c(0.025, 0.975))
summary.hyper[r+J,6] <- effectiveSize(test.mcmc)
summary.hyper[r+J,7] <- gelman.diag(test.mcmc)$psrf[ ,2]
}
if(!fix.rho)
{
summary.hyper[(2*J+1), 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[(2*J+1), 4:7] <- c(n.keep, accept.rho, effectiveSize(rho.mcmc), gelman.diag(rho.mcmc)$psrf[ ,2])
}else
{
summary.hyper[(2*J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(2*J+1), 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("nu2.",J), 1:J, sep=""), paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Gaussian (identity link function)", "\nRandom effects model - Leroux MCAR\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/gaussian.MVlerouxCAR.R
|
gaussian.MVlerouxCARMCMC <- function(Y, offset, X.standardised, W, rho, fix.rho, K, p, J, N.all, which.miss, n.miss, miss.locator, burnin, n.sample, thin, prior.mean.beta, prior.var.beta, prior.nu2, prior.Sigma.df, prior.Sigma.scale, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
# library(MCMCpack)
##########################################
#### Generate the initial parameter values
##########################################
beta <- array(NA, c(p, J))
nu2 <- rep(NA, J)
for(i in 1:J)
{
mod.glm <- lm(Y[ ,i]~X.standardised-1, offset=offset[ ,i])
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.unscaled)) * summary(mod.glm)$sigma
beta[ ,i] <- rnorm(n=p, mean=beta.mean, sd=beta.sd)
nu2[i] <- runif(1, var(mod.glm$residuals)*0.5, var(mod.glm$residuals)*2)
}
res.temp <- Y - X.standardised %*% beta - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi.vec <- rnorm(n=N.all, mean=0, sd=res.sd)
phi <- matrix(phi.vec, nrow=K, byrow=TRUE)
Sigma <- cov(phi)
Sigma.inv <- solve(Sigma)
Sigma.a <- rep(1, J)
####################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
regression <- X.standardised %*% beta
fitted <- regression + phi + offset
Y.DA <- Y
###############################
#### Set up the MCMC quantities
###############################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, J*p))
samples.nu2 <- array(NA, c(n.keep, J))
samples.phi <- array(NA, c(n.keep, N.all))
samples.Sigma <- array(NA, c(n.keep, J, J))
samples.Sigma.a <- array(NA, c(n.keep, J))
if(!fix.rho) samples.rho <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, N.all))
samples.fitted <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,4)
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.02
nu2.posterior.shape <- prior.nu2[1] + 0.5 * K
Sigma.post.df <- prior.Sigma.df + K + J - 1
Sigma.a.post.shape <- (prior.Sigma.df + J) / 2
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
Wstar <- diag(apply(W,1,sum)) - W
Q <- rho * Wstar + diag(rep(1-rho,K))
#### Create the determinant
if(!fix.rho)
{
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
islands.all <- rep(islands,J)
n.islands <- max(W.islands$nc)
if(rho==1) Sigma.post.df <- prior.Sigma.df + K + J - 1 - n.islands
#### Specify vector variants
Y.vec <- as.numeric(t(Y))
#### Beta update quantities
data.precision <- t(X.standardised) %*% X.standardised
if(length(prior.var.beta)==1)
{
prior.precision.beta <- 1 / prior.var.beta
}else
{
prior.precision.beta <- solve(diag(prior.var.beta))
}
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
nu.mat <- matrix(rep(sqrt(nu2), K), nrow=K, byrow=T)
Y.DA[miss.locator] <- rnorm(n=n.miss, mean=fitted[miss.locator], sd=nu.mat[miss.locator])
}else
{}
###################
## Sample from beta
###################
for(r in 1:J)
{
fc.precision <- prior.precision.beta + data.precision / nu2[r]
fc.var <- solve(fc.precision)
fc.temp1 <- t(((Y.DA[, r] - phi[ , r] - offset[ , r]) %*% X.standardised) / nu2[r]) + prior.precision.beta %*% prior.mean.beta
fc.mean <- fc.var %*% fc.temp1
chol.var <- t(chol(fc.var))
beta[ ,r] <- fc.mean + chol.var %*% rnorm(p)
}
regression <- X.standardised %*% beta
##################
## Sample from nu2
##################
fitted.current <- regression + phi + offset
nu2.posterior.scale <- prior.nu2[2] + 0.5 * apply((Y.DA - fitted.current)^2, 2, sum)
nu2 <- 1 / rgamma(J, nu2.posterior.shape, scale=(1/nu2.posterior.scale))
##################
## Sample from phi
##################
den.offset <- rho * W.triplet.sum + 1 - rho
phi.offset <- Y.DA - regression - offset
Chol.Sigma <- t(chol(proposal.sd.phi*Sigma))
z.mat <- matrix(rnorm(n=N.all, mean=0, sd=1), nrow=J, ncol=K)
innovations <- t(Chol.Sigma %*% z.mat)
temp1 <- gaussianmcarupdateRW(W.triplet, W.begfin, K, J, phi, phi.offset, den.offset, Sigma.inv, rho, nu2, proposal.sd.phi, innovations)
phi <- temp1[[1]]
for(r in 1:J)
{
phi[ ,r] <- phi[ ,r] - mean(phi[ ,r])
}
accept[1] <- accept[1] + temp1[[2]]
accept[2] <- accept[2] + K
####################
## Sample from Sigma
####################
Sigma.post.scale <- 2 * prior.Sigma.df * diag(1 / Sigma.a) + t(phi) %*% Q %*% phi
Sigma <- riwish(Sigma.post.df, Sigma.post.scale)
Sigma.inv <- solve(Sigma)
######################
## Sample from Sigma.a
######################
Sigma.a.posterior.scale <- prior.Sigma.df * diag(Sigma.inv) + 1 / prior.Sigma.scale^2
Sigma.a <- 1 / rgamma(J, Sigma.a.post.shape, scale=(1/Sigma.a.posterior.scale))
##################
## Sample from rho
##################
if(!fix.rho)
{
## Propose a new value
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
Q.prop <- proposal.rho * Wstar + diag(rep(1-proposal.rho), K)
det.Q.prop <- sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
## Compute the acceptance rate
logprob.current <- 0.5 * J * det.Q - 0.5 * sum(diag(t(phi) %*% Q %*% phi %*% Sigma.inv))
logprob.proposal <- 0.5 * J * det.Q.prop - 0.5 * sum(diag(t(phi) %*% Q.prop %*% phi %*% Sigma.inv))
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- det.Q.prop
Q <- Q.prop
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
}else
{}
#########################
## Calculate the deviance
#########################
fitted <- regression + phi + offset
loglike <- dnorm(x=as.numeric(t(Y)), mean=as.numeric(t(fitted)), sd=rep(sqrt(nu2), K), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- as.numeric(beta)
samples.nu2[ele, ] <- nu2
samples.phi[ele, ] <- as.numeric(t(phi))
samples.Sigma[ele, , ] <- Sigma
samples.Sigma.a[ele, ] <- Sigma.a
if(!fix.rho) samples.rho[ele, ] <- rho
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- as.numeric(t(fitted))
if(n.miss>0) samples.Y[ele, ] <- Y.DA[miss.locator]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50)
if(!fix.rho)
{
proposal.sd.rho <- common.accceptrates2(accept[3:4], proposal.sd.rho, 40, 50, 0.5)
}
accept <- c(0,0,0,0)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
if(fix.rho) samples.rho=NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.nu2=samples.nu2, samples.Sigma=samples.Sigma, samples.Sigma.a=samples.Sigma.a, samples.rho=samples.rho, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/gaussian.MVlerouxCARMCMC.R
|
gaussian.RAB <- function(formula, data=NULL, W, V, nlambda, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "gaussian")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
which.present <- which(!is.na(Y))
n.miss <- frame.results$n.miss
if(p==0) stop("The model (via the formula object) must at least have an intercept term.", call.=FALSE)
#### Ancillary data
if(!is.numeric(V)) stop("The ancillary data V is not a vector.", call.=FALSE)
if(length(V) != K) stop("The ancillary data V is not the same length as the remaining data.", call.=FALSE)
if(sum(is.na(V))>0) stop("The ancillary data V has missing 'NA' values.", call.=FALSE)
if(!is.numeric(V)) stop("The ancillary data V has non-numeric values.", call.=FALSE)
#### Neighbourhood matrix W
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
if(ncol(W)!= nrow(W)) stop("W is not a square matrix.", call.=FALSE)
if(sum(is.na(W))>0) stop("W has missing 'NA' values.", call.=FALSE)
if(!is.numeric(W)) stop("W has non-numeric values.", call.=FALSE)
if(min(W)<0) stop("W has negative elements.", call.=FALSE)
if(sum(W!=t(W))>0) stop("W is not symmetric.", call.=FALSE)
if(min(apply(W, 1, sum))==0) stop("W has some areas with no neighbours (one of the row sums equals zero).", call.=FALSE)
#### Create the shortest path matrix
graph.W <- graph.adjacency(W, mode="undirected")
graph.dist <- shortest.paths(graph.W)
#####################################################
#### Create the basis functions and the data elements
#####################################################
#### Create the three sets of basis functions
B.anisotropic.exp <- basiscomputeexponential(D=graph.dist, nrows=K, ncols=K, Z=V, startcol=1)
B.anisotropic.inv <- basiscomputeinverse(D=graph.dist, nrows=K, ncols=K, Z=V, startcol=1)
B.anisotropic.linear <- basiscomputelinear(D=graph.dist, nrows=K, ncols=K, Z=V, startcol=1)
#### Combine with the covariate matrix if needed
X.anisotropic.exp <- cbind(X, B.anisotropic.exp)
X.anisotropic.inv <- cbind(X, B.anisotropic.inv)
X.anisotropic.linear <- cbind(X, B.anisotropic.linear)
#### Remove an intercept term if it present
if(var(X.anisotropic.exp[ ,1])==0)
{
X.anisotropic.exp <- X.anisotropic.exp[ ,-1]
X.anisotropic.inv <- X.anisotropic.inv[ ,-1]
X.anisotropic.linear <- X.anisotropic.linear[ ,-1]
p <- p-1
}else
{}
#### Remove rows with missing values for model fitting
Y.train <- Y[which.present]
offset.train <- offset[which.present]
K.train <- length(Y.train)
X.anisotropic.exp.train <- X.anisotropic.exp[which.present, ]
X.anisotropic.inv.train <- X.anisotropic.inv[which.present, ]
X.anisotropic.linear.train <- X.anisotropic.linear[which.present, ]
W.train <- W[which.present, which.present]
W.list.train <- mat2listw(W.train, style="B")
########################################
#### Fit the models and make predictions
########################################
#### Update the user on the functions progress
if(verbose) cat("Fitting the model.")
#### Fit the models with the 3 different types of basis functions
penfac <- c(rep(0, p), rep(1,K))
mod.ridge.exp <- glmnet(x=X.anisotropic.exp.train, y=Y.train, offset=offset.train, alpha=0, nlambda=nlambda, penalty.factor = penfac, family = "gaussian", intercept=TRUE, standardize=FALSE)
mod.ridge.inv <- glmnet(x=X.anisotropic.inv.train, y=Y.train, offset=offset.train, alpha=0, nlambda=nlambda, penalty.factor = penfac, family = "gaussian", intercept=TRUE, standardize=FALSE)
mod.ridge.linear <- glmnet(x=X.anisotropic.linear.train, y=Y.train, offset=offset.train, alpha=0, nlambda=nlambda, penalty.factor = penfac, family = "gaussian", intercept=TRUE, standardize=FALSE)
#### Compute the level of residual spatial autocorrelation for each model and lambda value
## Exponential model
fits.exp <- predict(object=mod.ridge.exp, newx=X.anisotropic.exp.train, newoffset=offset.train)
m <- ncol(fits.exp)
results.exp <- data.frame(lambda=mod.ridge.exp$lambda, I=rep(NA, m))
for(j in 1:m)
{
resids <- Y.train - fits.exp[ ,j]
results.exp$I[j] <- moran.mc(x=resids, listw=W.list.train, zero.policy = TRUE, nsim=1)$statistic
}
row.exp <- which(abs(results.exp$I)==min(abs(results.exp$I)))
moran.exp <- results.exp$I[row.exp]
## Inverse model
fits.inv <- predict(object=mod.ridge.inv, newx=X.anisotropic.inv.train, newoffset=offset.train)
m <- ncol(fits.inv)
results.inv <- data.frame(lambda=mod.ridge.inv$lambda, I=rep(NA, m))
for(j in 1:m)
{
resids <- Y.train - fits.inv[ ,j]
results.inv$I[j] <- moran.mc(x=resids, listw=W.list.train, zero.policy = TRUE, nsim=1)$statistic
}
row.inv <- which(abs(results.inv$I)==min(abs(results.inv$I)))
moran.inv <- results.inv$I[row.inv]
## Linear model
fits.linear <- predict(object=mod.ridge.linear, newx=X.anisotropic.linear.train, newoffset=offset.train)
m <- ncol(fits.linear)
results.linear <- data.frame(lambda=mod.ridge.linear$lambda, I=rep(NA, m))
for(j in 1:m)
{
resids <- Y.train - fits.linear[ ,j]
results.linear$I[j] <- moran.mc(x=resids, listw=W.list.train, zero.policy = TRUE, nsim=1)$statistic
}
row.linear <- which(abs(results.linear$I)==min(abs(results.linear$I)))
moran.linear <- results.linear$I[row.linear]
#### Choose the final model
moran.all <- abs(c(moran.exp, moran.inv, moran.linear))
model <- which(moran.all == min(moran.all))[1]
if(model==1)
{
model.string <- c("Likelihood model - Gaussian (identity link function)", "Spatial structure model - Anistropic exponential distance-decay basis functions")
model <- mod.ridge.exp
row <- row.exp
X.final <- X.anisotropic.exp
X.final.train <- X.final[which.present, ]
lambda.hat <- results.exp$lambda[row] * K.train
I <- results.exp$I[row]
}else if(model==2)
{
model.string <- c("Likelihood model - Gaussian (identity link function)", "Spatial structure model - Anistropic inverse distance-decay basis functions")
model <- mod.ridge.inv
row <- row.inv
X.final <- X.anisotropic.inv
X.final.train <- X.final[which.present, ]
lambda.hat <- results.inv$lambda[row] * K.train
I <- results.inv$I[row]
}else if(model==3)
{
model.string <- c("Likelihood model - Gaussian (identity link function)", "Spatial structure model - Anistropic linear distance-decay basis functions")
model <- mod.ridge.linear
row <- row.linear
X.final <- X.anisotropic.linear
X.final.train <- X.final[which.present, ]
lambda.hat <- results.linear$lambda[row] * K.train
I <- results.linear$I[row]
}else{}
#### Compute the parameter estimates for beta and sigma^2
beta.hat <- c(model$a0[row], model$beta[ ,row])
X.extend.train <- cbind(rep(1, K.train), X.final.train)
fit.train <- as.numeric(X.extend.train %*% beta.hat + offset.train)
D <- diag(c(rep(0, p+1), rep(1, K)))
XtX <- t(X.extend.train) %*% X.extend.train
XtXpluspen <- XtX + lambda.hat * D
XtXpluspen.inv <- solve(XtXpluspen)
H <- X.extend.train %*% XtXpluspen.inv %*% t(X.extend.train)
df.res <- K.train - sum(diag(H))
sigma2.hat <- sum((Y.train - fit.train)^2) / (df.res)
#####################################
#### Summarise and return the results
#####################################
#### Update the user on the progress
if(verbose) cat("\nSummarising results.\n")
#### Compute the final fitted / predicted values and residuals
fitted.values <- as.numeric(beta.hat[1] + X.final %*% beta.hat[-1] + offset)
response.residuals <- Y - fitted.values
pearson.residuals <- response.residuals /sqrt(sigma2.hat)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
#### Format the final X matrix returned
X.extend <- cbind(rep(1, K), X.final)
colnames(X.extend)[1] <- "(Intercept)"
colnames(X.extend)[(p+2):(p+K+1)] <- paste("Basis function", 1:K, sep=" ")
#######################
#### Return the results
#######################
results <- list(beta.hat=beta.hat, sigma2.hat=sigma2.hat, lambda.hat=lambda.hat, I=I, fitted.values=fitted.values, residuals=residuals, formula=formula, model.string=model.string, X=X.extend, model=model)
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/gaussian.RAB.R
|
gaussian.dissimilarityCAR <- function(formula, data=NULL, W, Z, W.binary=TRUE, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.tau2=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "gaussian")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Dissimilarity metric matrix
if(!is.list(Z)) stop("Z is not a list object.", call.=FALSE)
if(sum(is.na(as.numeric(lapply(Z, sum, na.rm=FALSE))))>0) stop("Z contains missing 'NA' values.", call.=FALSE)
q <- length(Z)
if(sum(as.numeric(lapply(Z,nrow))==K) <q) stop("Z contains matrices of the wrong size.", call.=FALSE)
if(sum(as.numeric(lapply(Z,ncol))==K) <q) stop("Z contains matrices of the wrong size.", call.=FALSE)
if(min(as.numeric(lapply(Z,min)))<0) stop("Z contains negative values.", call.=FALSE)
if(!is.logical(W.binary)) stop("W.binary is not TRUE or FALSE.", call.=FALSE)
if(length(W.binary)!=1) stop("W.binary has the wrong length.", call.=FALSE)
if(W.binary)
{
alpha.max <- rep(NA,q)
alpha.threshold <- rep(NA,q)
for(k in 1:q)
{
Z.crit <- quantile(as.numeric(Z[[k]])[as.numeric(Z[[k]])!=0], 0.5)
alpha.max[k] <- -log(0.5) / Z.crit
alpha.threshold[k] <- -log(0.5) / max(Z[[k]])
}
}else
{
alpha.max <- rep(50, q)
}
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
if(is.null(prior.nu2)) prior.nu2 <- c(1, 0.01)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
common.prior.var.check(prior.tau2)
common.prior.var.check(prior.nu2)
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- gaussian.dissimilarityCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, Z=Z, W.binary=W.binary, W=W, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.nu2=prior.nu2, alpha.max=alpha.max, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- gaussian.dissimilarityCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, Z=Z, W.binary=W.binary, W=W, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.nu2=prior.nu2, alpha.max=alpha.max, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=gaussian.dissimilarityCARMCMC, Y=Y, offset=offset, X.standardised=X.standardised, Z=Z, W.binary=W.binary, W=W, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.nu2=prior.nu2, alpha.max=alpha.max, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.alpha <- 100 * results$accept[1] / results$accept[2]
accept.beta <- 100
accept.phi <- 100
accept.tau2 <- 100
accept.nu2 <- 100
accept.final <- c(accept.beta, accept.phi, accept.nu2, accept.tau2, accept.alpha)
names(accept.final) <- c("beta", "phi", "nu2", "tau2", "alpha")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
mean.phi <- apply(results$samples.phi, 2, mean)
mean.nu2 <- mean(results$samples.nu2)
fitted.mean <- X.standardised %*% mean.beta + mean.phi + offset
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(mean.nu2),K), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(mean.nu2)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), alpha=mcmc(results$samples.alpha), tau2=mcmc(results$samples.tau2), nu2=mcmc(results$samples.nu2), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.alpha <- t(rbind(apply(samples$alpha, 2, mean), apply(samples$alpha, 2, quantile, c(0.025, 0.975))))
summary.alpha <- cbind(summary.alpha, rep(n.keep, q), rep(accept.alpha,q), effectiveSize(samples$alpha), geweke.diag(samples$alpha)$z)
if(!is.null(names(Z)))
{
rownames(summary.alpha) <- names(Z)
}else
{
names.Z <- rep(NA,q)
for(j in 1:q)
{
names.Z[j] <- paste("Z[[",j, "]]", sep="")
}
rownames(summary.alpha) <- names.Z
}
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$nu2), quantile(samples$nu2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.nu2, effectiveSize(samples$nu2), geweke.diag(samples$nu2)$z)
summary.hyper[2, 1:3] <- c(mean(samples$tau2), quantile(samples$tau2, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.results <- rbind(summary.beta, summary.hyper, summary.alpha)
if(W.binary)
{
alpha.min <- c(rep(NA, (p+2)), alpha.threshold)
summary.results <- cbind(summary.results, alpha.min)
}else
{}
rownames(summary.results)[(p+1):(p+2)] <- c("nu2", "tau2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
if(W.binary) summary.results[ , 8] <- round(summary.results[ , 8], 4)
#### Create the posterior medians for the neighbourhood matrix W
W.posterior <- array(NA, c(K,K))
if(W.binary)
{
W.border.prob <- array(NA, c(K,K))
}else
{
W.border.prob <- NA
}
for(i in 1:K)
{
for(j in 1:K)
{
if(W[i,j]==1)
{
z.temp <- NA
for(k in 1:q)
{
z.temp <- c(z.temp, Z[[k]][i,j])
}
z.temp <- z.temp[-1]
w.temp <- exp(-samples$alpha %*% z.temp)
if(W.binary)
{
w.posterior <- as.numeric(w.temp>=0.5)
W.posterior[i,j] <- ceiling(median(w.posterior))
W.border.prob[i,j] <- (1 - sum(w.posterior) / length(w.posterior))
}else
{
W.posterior[i,j] <- median(w.temp)
}
}else
{
}
}
}
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.alpha <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.beta <- 100
accept.phi <- 100
accept.tau2 <- 100
accept.nu2 <- 100
accept.final <- c(accept.beta, accept.phi, accept.nu2, accept.tau2, accept.alpha)
names(accept.final) <- c("beta", "phi", "nu2", "tau2", "alpha")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.alpha.list <- lapply(results, function(l) l[["samples.alpha"]])
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.nu2.list <- lapply(results, function(l) l[["samples.nu2"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.alpha.matrix <- do.call(what=rbind, args=samples.alpha.list)
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.nu2.matrix <- do.call(what=rbind, args=samples.nu2.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
mean.phi <- apply(samples.phi.matrix, 2, mean)
mean.nu2 <- apply(samples.nu2.matrix, 2, mean)
fitted.mean <- X.standardised %*% mean.beta + mean.phi + offset
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(mean.nu2),K), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(mean.nu2)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
alpha.temp <- samples.alpha.list
tau2.temp <- samples.tau2.list
nu2.temp <- samples.tau2.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
alpha.temp[[j]] <- mcmc(samples.alpha.list[[j]])
tau2.temp[[j]] <- mcmc(samples.tau2.list[[j]])
nu2.temp[[j]] <- mcmc(samples.nu2.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
alpha.mcmc <- as.mcmc.list(alpha.temp)
tau2.mcmc <- as.mcmc.list(tau2.temp)
nu2.mcmc <- as.mcmc.list(nu2.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, phi=phi.mcmc, alpha=alpha.mcmc, tau2=tau2.mcmc, nu2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.alpha <- t(rbind(apply(samples.alpha.matrix, 2, mean), apply(samples.alpha.matrix, 2, quantile, c(0.025, 0.975))))
summary.alpha <- cbind(summary.alpha, rep(n.keep, q), rep(accept.alpha,q), effectiveSize(alpha.mcmc), gelman.diag(alpha.mcmc)$psrf[ ,2])
if(!is.null(names(Z)))
{
rownames(summary.alpha) <- names(Z)
}else
{
names.Z <- rep(NA,q)
for(j in 1:q)
{
names.Z[j] <- paste("Z[[",j, "]]", sep="")
}
rownames(summary.alpha) <- names.Z
}
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.nu2.matrix), quantile(samples.nu2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.nu2, effectiveSize(nu2.mcmc), gelman.diag(nu2.mcmc)$psrf[ ,2])
summary.hyper[2, 1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.tau2, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.results <- rbind(summary.beta, summary.hyper, summary.alpha)
if(W.binary)
{
alpha.min <- c(rep(NA, (p+2)), alpha.threshold)
summary.results <- cbind(summary.results, alpha.min)
}else
{}
rownames(summary.results)[(p+1):(p+2)] <- c("nu2", "tau2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
if(W.binary) summary.results[ , 8] <- round(summary.results[ , 8], 4)
#### Create the posterior medians for the neighbourhood matrix W
W.posterior <- array(NA, c(K,K))
if(W.binary)
{
W.border.prob <- array(NA, c(K,K))
}else
{
W.border.prob <- NA
}
for(i in 1:K)
{
for(j in 1:K)
{
if(W[i,j]==1)
{
z.temp <- NA
for(k in 1:q)
{
z.temp <- c(z.temp, Z[[k]][i,j])
}
z.temp <- z.temp[-1]
w.temp <- exp(-samples.alpha.matrix %*% z.temp)
if(W.binary)
{
w.posterior <- as.numeric(w.temp>=0.5)
W.posterior[i,j] <- ceiling(median(w.posterior))
W.border.prob[i,j] <- (1 - sum(w.posterior) / length(w.posterior))
}else
{
W.posterior[i,j] <- median(w.temp)
}
}else
{
}
}
}
}
###################################
#### Compile and return the results
###################################
## Generate the dissimilarity equation
if(q==1)
{
dis.eq <- rownames(summary.results)[nrow(summary.results)]
}else
{
dis.eq <- paste(rownames(summary.alpha), "+")
len <- length(dis.eq)
dis.eq[len] <- substr(dis.eq[len],1,nchar(dis.eq[2])-1)
}
if(W.binary)
{
model.string <- c("Likelihood model - Gaussian (identity link function)", "\nRandom effects model - Binary dissimilarity CAR", "\nDissimilarity metrics - ", dis.eq, "\n")
}else
{
model.string <- c("Likelihood model - Gaussian (identity link function)", "\nRandom effects model - Non-binary dissimilarity CAR", "\nDissimilarity metrics - ", dis.eq, "\n")
}
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=list(W.posterior=W.posterior, W.border.prob=W.border.prob), formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/gaussian.dissimilarityCAR.R
|
gaussian.dissimilarityCARMCMC <- function(Y, offset, X.standardised, Z, W.binary, W, K, p, q, which.miss, n.miss, burnin, n.sample, thin, prior.mean.beta, prior.var.beta, prior.tau2, prior.nu2, alpha.max, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
# library(spam)
#
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
mod.glm <- lm(Y~X.standardised-1, offset=offset)
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.unscaled)) * summary(mod.glm)$sigma
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
res.temp <- Y - X.standardised %*% beta.mean - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
tau2 <- var(phi) / 10
nu2 <- tau2
alpha <- runif(n=q, min=rep(0,q), max=rep(alpha.max/(2+q)))
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
fitted <- as.numeric(X.standardised %*% beta) + phi + offset
Y.DA <- Y
########################################
#### Set up the MCMC model run quantities
#########################################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, K))
samples.nu2 <- array(NA, c(n.keep, 1))
samples.tau2 <- array(NA, c(n.keep, 1))
samples.alpha <- array(NA, c(n.keep, q))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
## Metropolis quantities
accept <- c(0,0)
proposal.sd.alpha <- 0.02 * alpha.max
tau2.posterior.shape <- prior.tau2[1] + 0.5 * K
nu2.posterior.shape <- prior.nu2[1] + 0.5*K
#### Beta update quantities
data.precision.beta <- t(X.standardised) %*% X.standardised
if(length(prior.var.beta)==1)
{
prior.precision.beta <- 1 / prior.var.beta
}else
{
prior.precision.beta <- solve(diag(prior.var.beta))
}
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat.disimilarity(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
spam.W <- W.quants$spam.W
#### Create the Z triplet form
Z.triplet <- array(NA, c(n.triplet, q))
for(i in 1:n.triplet)
{
row <- W.triplet[i,1]
col <- W.triplet[i,2]
for(j in 1:q)
{
Z.triplet[i,j] <- Z[[j]][row, col]
}
}
if(W.binary)
{
W.triplet[ ,3] <- as.numeric(exp(-Z.triplet %*% alpha)>=0.5)
}else
{
W.triplet[ ,3] <- as.numeric(exp(-Z.triplet %*% alpha))
}
W.triplet.sum <- tapply(W.triplet[ ,3], W.triplet[ ,1], sum)
spam.W@entries <- W.triplet[ ,3]
spam.Wprop <- spam.W
W.tripletprop <- W.triplet
#### Create the matrix form of Q
rho <- 0.99
Q <- -rho * spam.W
diag(Q) <- rho * rowSums(spam.W) + 1-rho
det.Q <- sum(log(diag(chol.spam(Q))))
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rnorm(n=n.miss, mean=fitted[which.miss==0], sd=sqrt(nu2))
}else
{}
####################
## Sample from beta
####################
fc.precision <- prior.precision.beta + data.precision.beta / nu2
fc.var <- solve(fc.precision)
beta.offset <- as.numeric(Y.DA - offset - phi)
beta.offset2 <- t(X.standardised) %*% beta.offset / nu2 + prior.precision.beta %*% prior.mean.beta
fc.mean <- fc.var %*% beta.offset2
chol.var <- t(chol(fc.var))
beta <- fc.mean + chol.var %*% rnorm(p)
##################
## Sample from nu2
##################
fitted.current <- as.numeric(X.standardised %*% beta) + phi + offset
nu2.posterior.scale <- prior.nu2[2] + 0.5 * sum((Y.DA - fitted.current)^2)
nu2 <- 1 / rgamma(1, nu2.posterior.shape, scale=(1/nu2.posterior.scale))
####################
## Sample from phi
####################
offset.phi <- (Y.DA - as.numeric(X.standardised %*% beta) - offset) / nu2
phi <- gaussiancarupdate(Wtriplet=W.triplet, Wbegfin=W.begfin, W.triplet.sum, nsites=K, phi=phi, tau2=tau2, rho=rho, nu2=nu2, offset=offset.phi)
phi <- phi - mean(phi)
##################
## Sample from tau2
##################
temp2 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, rho)
tau2.posterior.scale <- temp2 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
######################
#### Sample from alpha
######################
## Propose a value
proposal.alpha <- alpha
for(r in 1:q)
{
proposal.alpha[r] <- rtruncnorm(n=1, a=0, b=alpha.max[r], mean=alpha[r], sd=proposal.sd.alpha[r])
}
## Create the proposal values for W and Q
if(W.binary)
{
W.tripletprop[ ,3] <- as.numeric(exp(-Z.triplet %*% proposal.alpha)>=0.5)
}else
{
W.tripletprop[ ,3] <- as.numeric(exp(-Z.triplet %*% proposal.alpha))
}
W.triplet.sum.prop <- tapply(W.tripletprop[ ,3], W.tripletprop[ ,1], sum)
spam.Wprop@entries <- W.tripletprop[ ,3]
Qprop <- -rho * spam.Wprop
diag(Qprop) <- rho * rowSums(spam.Wprop) + 1-rho
det.Qprop <- sum(log(diag(chol.spam(Qprop))))
temp3 <- quadform(W.tripletprop, W.triplet.sum.prop, n.triplet, K, phi, phi, rho)
#### Calculate the acceptance probability
logprob.current <- det.Q - temp2 / tau2
logprob.proposal <- det.Qprop - temp3 / tau2
hastings <- sum(log(dtruncnorm(x=alpha, a=rep(0,q), b=alpha.max, mean=proposal.alpha, sd=proposal.sd.alpha)) - log(dtruncnorm(x=proposal.alpha, a=rep(0,q), b=alpha.max, mean=alpha, sd=proposal.sd.alpha)))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposed value
if(prob > runif(1))
{
alpha <- proposal.alpha
det.Q <- det.Qprop
W.triplet[ ,3] <- W.tripletprop[ ,3]
W.triplet.sum <- W.triplet.sum.prop
accept[1] <- accept[1] + 1
}else
{}
accept[2] <- accept[2] + 1
#########################
## Calculate the deviance
#########################
fitted <- as.numeric(X.standardised %*% beta) + phi + offset
loglike <- dnorm(Y, mean = fitted, sd = rep(sqrt(nu2),K), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.nu2[ele, ] <- nu2
samples.tau2[ele, ] <- tau2
samples.alpha[ele, ] <- alpha
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
proposal.sd.alpha <- common.accceptrates2(accept[1:2], proposal.sd.alpha, 40, 50, alpha.max/4)
accept <- c(0,0)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.tau2=samples.tau2, samples.nu2=samples.nu2, samples.alpha=samples.alpha, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/gaussian.dissimilarityCARMCMC.R
|
gaussian.glm <- function(formula, data=NULL, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "gaussian")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.nu2)) prior.nu2 <- c(1, 0.01)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
common.prior.var.check(prior.nu2)
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- gaussian.glmMCMC(Y=Y, offset=offset, X.standardised=X.standardised, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- gaussian.glmMCMC(Y=Y, offset=offset, X.standardised=X.standardised, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=gaussian.glmMCMC, Y=Y, offset=offset, X.standardised=X.standardised, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.final <- rep(100, 2)
names(accept.final) <- c("beta", "nu2")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
fitted.mean <- X.standardised %*% mean.beta + offset
nu2.mean <- mean(results$samples.nu2)
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(nu2.mean),K), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(nu2.mean)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), nu2=mcmc(results$samples.nu2), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(100,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(1 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$nu2), quantile(samples$nu2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, 100, effectiveSize(samples$nu2), geweke.diag(samples$nu2)$z)
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[nrow(summary.results)] <- c("nu2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.final <- rep(100, 2)
names(accept.final) <- c("beta", "nu2")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.nu2.list <- lapply(results, function(l) l[["samples.nu2"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.nu2.matrix <- do.call(what=rbind, args=samples.nu2.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
fitted.mean <- X.standardised %*% mean.beta + offset
nu2.mean <- mean(samples.nu2.matrix)
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(nu2.mean),K), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(nu2.mean)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
nu2.temp <- samples.nu2.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
nu2.temp[[j]] <- mcmc(samples.nu2.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
nu2.mcmc <- as.mcmc.list(nu2.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, nu2=nu2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(100,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c(1 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.nu2.matrix), quantile(samples.nu2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, 100, effectiveSize(nu2.mcmc), gelman.diag(nu2.mcmc)$psrf[ ,2])
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[nrow(summary.results)] <- c("nu2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Gaussian (identity link function)", "\nRandom effects model - None\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/gaussian.glm.R
|
gaussian.glmMCMC <- function(Y, offset, X.standardised, K, p, which.miss, n.miss, burnin, n.sample, thin, prior.mean.beta, prior.var.beta, prior.nu2, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
mod.glm <- glm(Y~X.standardised-1, offset=offset, family="quasipoisson")
beta.mean <- mod.glm$coefficients
beta.sd <- 10 * sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
mod.glm <- lm(Y~X.standardised-1, offset=offset)
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.unscaled)) * summary(mod.glm)$sigma
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
res.temp <- Y - X.standardised %*% beta.mean - offset
nu2 <- var(as.numeric(res.temp), na.rm=TRUE)
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
fitted <- as.numeric(X.standardised %*% beta) + offset
Y.DA <- Y
########################################
#### Set up the MCMC model run quantities
#########################################
#### Metropolis quantities
nu2.posterior.shape <- prior.nu2[1] + 0.5*K
#### Beta update quantities
data.precision.beta <- t(X.standardised) %*% X.standardised
if(length(prior.var.beta)==1)
{
prior.precision.beta <- 1 / prior.var.beta
}else
{
prior.precision.beta <- solve(diag(prior.var.beta))
}
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.nu2 <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rnorm(n=n.miss, mean=fitted[which.miss==0], sd=sqrt(nu2))
}else
{}
####################
## Sample from beta
####################
fc.precision <- prior.precision.beta + data.precision.beta / nu2
fc.var <- solve(fc.precision)
beta.offset <- as.numeric(Y.DA - offset)
beta.offset2 <- t(X.standardised) %*% beta.offset / nu2 + prior.precision.beta %*% prior.mean.beta
fc.mean <- fc.var %*% beta.offset2
chol.var <- t(chol(fc.var))
beta <- fc.mean + chol.var %*% rnorm(p)
##################
## Sample from nu2
##################
fitted.current <- as.numeric(X.standardised %*% beta) + offset
nu2.posterior.scale <- prior.nu2[2] + 0.5 * sum((Y.DA - fitted.current)^2)
nu2 <- 1 / rgamma(1, nu2.posterior.shape, scale=(1/nu2.posterior.scale))
#########################
## Calculate the deviance
#########################
fitted <- as.numeric(X.standardised %*% beta) + offset
loglike <- dnorm(Y, mean = fitted, sd = rep(sqrt(nu2),K), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.nu2[ele, ] <- nu2
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
chain.results <- list(samples.beta=samples.beta, samples.nu2=samples.nu2, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/gaussian.glmMCMC.R
|
gaussian.lerouxCAR <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.tau2=NULL, rho=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "gaussian")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### rho
if(is.null(rho))
{
rho <- runif(1)
fix.rho <- FALSE
}else
{
fix.rho <- TRUE
}
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
if(is.null(prior.nu2)) prior.nu2 <- c(1, 0.01)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
common.prior.var.check(prior.tau2)
common.prior.var.check(prior.nu2)
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- gaussian.lerouxCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.nu2=prior.nu2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- gaussian.lerouxCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.nu2=prior.nu2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=gaussian.lerouxCARMCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.nu2=prior.nu2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100
accept.phi <- 100
accept.tau2 <- 100
accept.nu2 <- 100
if(!fix.rho)
{
accept.rho <- 100 * results$accept[1] / results$accept[2]
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.nu2, accept.tau2)
names(accept.final) <- c("beta", "phi", "rho", "nu2", "tau2")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
mean.phi <- apply(results$samples.phi, 2, mean)
fitted.mean <- X.standardised %*% mean.beta + mean.phi + offset
mean.nu2 <- mean(results$samples.nu2)
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(mean.nu2),K), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(mean.nu2)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), rho=mcmc(results$samples.rho), tau2=mcmc(results$samples.tau2), nu2=mcmc(results$samples.nu2), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(3 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$nu2), quantile(samples$nu2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.nu2, effectiveSize(samples$nu2), geweke.diag(samples$nu2)$z)
summary.hyper[2, 1:3] <- c(mean(samples$tau2), quantile(samples$tau2, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
if(!fix.rho)
{
summary.hyper[3, 1:3] <- c(mean(samples$rho), quantile(samples$rho, c(0.025, 0.975)))
summary.hyper[3, 4:7] <- c(n.keep, accept.rho, effectiveSize(samples$rho), geweke.diag(samples$rho)$z)
}else
{
summary.hyper[3, 1:3] <- c(rho, rho, rho)
summary.hyper[3, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-2):nrow(summary.results)] <- c("nu2", "tau2", "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100
accept.phi <- 100
accept.tau2 <- 100
accept.nu2 <- 100
if(!fix.rho)
{
accept.rho <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.nu2, accept.tau2)
names(accept.final) <- c("beta", "phi", "rho", "nu2", "tau2")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.nu2.list <- lapply(results, function(l) l[["samples.nu2"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.nu2.matrix <- do.call(what=rbind, args=samples.nu2.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
mean.phi <- apply(samples.phi.matrix, 2, mean)
fitted.mean <- X.standardised %*% mean.beta + mean.phi + offset
mean.nu2 <- mean(samples.nu2.matrix)
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(mean.nu2),K), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(mean.nu2)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
rho.temp <- samples.rho.list
tau2.temp <- samples.tau2.list
nu2.temp <- samples.nu2.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
rho.temp[[j]] <- mcmc(samples.rho.list[[j]])
tau2.temp[[j]] <- mcmc(samples.tau2.list[[j]])
nu2.temp[[j]] <- mcmc(samples.nu2.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
rho.mcmc <- as.mcmc.list(rho.temp)
tau2.mcmc <- as.mcmc.list(tau2.temp)
nu2.mcmc <- as.mcmc.list(nu2.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rho.mcmc, tau2=tau2.mcmc, nu2=nu2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c(3 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.nu2.matrix), quantile(samples.nu2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.nu2, effectiveSize(nu2.mcmc), gelman.diag(nu2.mcmc)$psrf[ ,2])
summary.hyper[2, 1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.tau2, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
if(!fix.rho)
{
summary.hyper[3, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[3, 4:7] <- c(n.keep, accept.rho, effectiveSize(rho.mcmc), gelman.diag(rho.mcmc)$psrf[ ,2])
}else
{
summary.hyper[3, 1:3] <- c(rho, rho, rho)
summary.hyper[3, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-2):nrow(summary.results)] <- c("nu2", "tau2", "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Gaussian (identity link function)", "\nRandom effects model - Leroux CAR\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/gaussian.lerouxCAR.R
|
gaussian.lerouxCARMCMC <- function(Y, offset, X.standardised, W, rho, fix.rho, K, p, which.miss, n.miss, burnin, n.sample, thin, prior.mean.beta, prior.var.beta, prior.tau2, prior.nu2, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
#
#
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
mod.glm <- lm(Y~X.standardised-1, offset=offset)
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.unscaled)) * summary(mod.glm)$sigma
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
res.temp <- Y - X.standardised %*% beta.mean - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
tau2 <- var(phi) / 10
nu2 <- tau2
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
fitted <- as.numeric(X.standardised %*% beta) + phi + offset
Y.DA <- Y
########################################
#### Set up the MCMC model run quantities
#########################################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, K))
samples.tau2 <- array(NA, c(n.keep, 1))
samples.nu2 <- array(NA, c(n.keep, 1))
if(!fix.rho) samples.rho <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,2)
proposal.sd.rho <- 0.02
tau2.posterior.shape <- prior.tau2[1] + 0.5*K
nu2.posterior.shape <- prior.nu2[1] + 0.5*K
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Create the determinant
if(!fix.rho)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1) tau2.posterior.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
#### Beta update quantities
data.precision.beta <- t(X.standardised) %*% X.standardised
if(length(prior.var.beta)==1)
{
prior.precision.beta <- 1 / prior.var.beta
}else
{
prior.precision.beta <- solve(diag(prior.var.beta))
}
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rnorm(n=n.miss, mean=fitted[which.miss==0], sd=sqrt(nu2))
}else
{}
####################
## Sample from beta
####################
fc.precision <- prior.precision.beta + data.precision.beta / nu2
fc.var <- solve(fc.precision)
beta.offset <- as.numeric(Y.DA - offset - phi)
beta.offset2 <- t(X.standardised) %*% beta.offset / nu2 + prior.precision.beta %*% prior.mean.beta
fc.mean <- fc.var %*% beta.offset2
chol.var <- t(chol(fc.var))
beta <- fc.mean + chol.var %*% rnorm(p)
##################
## Sample from nu2
##################
fitted.current <- as.numeric(X.standardised %*% beta) + phi + offset
nu2.posterior.scale <- prior.nu2[2] + 0.5 * sum((Y.DA - fitted.current)^2)
nu2 <- 1 / rgamma(1, nu2.posterior.shape, scale=(1/nu2.posterior.scale))
####################
## Sample from phi
####################
offset.phi <- (Y.DA - as.numeric(X.standardised %*% beta) - offset) / nu2
phi <- gaussiancarupdate(Wtriplet=W.triplet, Wbegfin=W.begfin, W.triplet.sum, nsites=K, phi=phi, tau2=tau2, rho=rho, nu2=nu2, offset=offset.phi)
if(rho<1)
{
phi <- phi - mean(phi)
}
else
{
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
}
##################
## Sample from tau2
##################
temp2 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, rho)
tau2.posterior.scale <- temp2 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
##################
## Sample from rho
##################
if(!fix.rho)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp3 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, proposal.rho)
det.Q.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- det.Q - temp2 / tau2
logprob.proposal <- det.Q.proposal - temp3 / tau2
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- det.Q.proposal
accept[1] <- accept[1] + 1
}else
{
}
accept[2] <- accept[2] + 1
}else
{}
#########################
## Calculate the deviance
#########################
fitted <- as.numeric(X.standardised %*% beta) + phi + offset
loglike <- dnorm(Y, mean = fitted, sd = rep(sqrt(nu2),K), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.nu2[ele, ] <- nu2
samples.tau2[ele, ] <- tau2
if(!fix.rho) samples.rho[ele, ] <- rho
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{
}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(!fix.rho)
{
proposal.sd.rho <- common.accceptrates2(accept[1:2], proposal.sd.rho, 40, 50, 0.5)
}
accept <- c(0,0)
}else
{
}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
if(fix.rho) samples.rho=NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.tau2=samples.tau2, samples.nu2=samples.nu2, samples.rho=samples.rho, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/gaussian.lerouxCARMCMC.R
|
gaussian.multilevelCAR <- function(formula, data=NULL, W, ind.area, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.tau2=NULL, rho=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "gaussian")
n <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
K <- length(unique(ind.area))
#### rho
if(is.null(rho))
{
rho <- runif(1)
fix.rho <- FALSE
}else
{
fix.rho <- TRUE
}
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
#### Checks and formatting for ind.area
if(!is.vector(ind.area)) stop("ind.area is not a vector.", call.=FALSE)
if(sum(ceiling(ind.area)==floor(ind.area))!=n) stop("ind.area does not have all integer values.", call.=FALSE)
if(min(ind.area)!=1) stop("the minimum value in ind.area is not 1.", call.=FALSE)
if(max(ind.area)!=K) stop("the maximum value in ind.area is not equal to the number of spatial areal units.", call.=FALSE)
if(length(table(ind.area))!=K) stop("the number of unique areas in ind.area does not equal K.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
if(is.null(prior.nu2)) prior.nu2 <- c(1, 0.01)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
common.prior.var.check(prior.tau2)
common.prior.var.check(prior.nu2)
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- gaussian.multilevelCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, ind.area=ind.area, rho=rho, fix.rho=fix.rho, n=n, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.tau2=prior.tau2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- gaussian.multilevelCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, ind.area=ind.area, rho=rho, fix.rho=fix.rho, n=n, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.tau2=prior.tau2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=gaussian.multilevelCARMCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, ind.area=ind.area, rho=rho, fix.rho=fix.rho, n=n, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.tau2=prior.tau2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100
accept.phi <- 100
accept.tau2 <- 100
accept.nu2 <- 100
if(!fix.rho)
{
accept.rho <- 100 * results$accept[1] / results$accept[2]
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.nu2, accept.tau2)
names(accept.final) <- c("beta", "phi", "rho", "nu2", "tau2")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
mean.phi <- apply(results$samples.phi, 2, mean)
mean.phi.extend <- mean.phi[ind.area]
fitted.mean <- X.standardised %*% mean.beta + mean.phi.extend + offset
mean.nu2 <- mean(results$samples.nu2)
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(mean.nu2),K), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(mean.nu2)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), rho=mcmc(results$samples.rho), tau2=mcmc(results$samples.tau2), nu2=mcmc(results$samples.nu2), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(3 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$nu2), quantile(samples$nu2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.nu2, effectiveSize(samples$nu2), geweke.diag(samples$nu2)$z)
summary.hyper[2, 1:3] <- c(mean(samples$tau2), quantile(samples$tau2, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
if(!fix.rho)
{
summary.hyper[3, 1:3] <- c(mean(samples$rho), quantile(samples$rho, c(0.025, 0.975)))
summary.hyper[3, 4:7] <- c(n.keep, accept.rho, effectiveSize(samples$rho), geweke.diag(samples$rho)$z)
}else
{
summary.hyper[3, 1:3] <- c(rho, rho, rho)
summary.hyper[3, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-2):nrow(summary.results)] <- c("nu2", "tau2", "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100
accept.phi <- 100
accept.tau2 <- 100
accept.nu2 <- 100
if(!fix.rho)
{
accept.rho <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.nu2, accept.tau2)
names(accept.final) <- c("beta", "phi", "rho", "nu2", "tau2")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.nu2.list <- lapply(results, function(l) l[["samples.nu2"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.nu2.matrix <- do.call(what=rbind, args=samples.nu2.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
mean.phi <- apply(samples.phi.matrix, 2, mean)
mean.phi.extend <- mean.phi[ind.area]
fitted.mean <- X.standardised %*% mean.beta + mean.phi.extend + offset
mean.nu2 <- mean(samples.nu2.matrix)
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(mean.nu2),K), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(mean.nu2)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
rho.temp <- samples.rho.list
tau2.temp <- samples.tau2.list
nu2.temp <- samples.nu2.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
rho.temp[[j]] <- mcmc(samples.rho.list[[j]])
tau2.temp[[j]] <- mcmc(samples.tau2.list[[j]])
nu2.temp[[j]] <- mcmc(samples.nu2.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
rho.mcmc <- as.mcmc.list(rho.temp)
tau2.mcmc <- as.mcmc.list(tau2.temp)
nu2.mcmc <- as.mcmc.list(nu2.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rho.mcmc, tau2=tau2.mcmc, nu2=nu2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c(3 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.nu2.matrix), quantile(samples.nu2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.nu2, effectiveSize(nu2.mcmc), gelman.diag(nu2.mcmc)$psrf[ ,2])
summary.hyper[2, 1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.tau2, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
if(!fix.rho)
{
summary.hyper[3, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[3, 4:7] <- c(n.keep, accept.rho, effectiveSize(rho.mcmc), gelman.diag(rho.mcmc)$psrf[ ,2])
}else
{
summary.hyper[3, 1:3] <- c(rho, rho, rho)
summary.hyper[3, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-2):nrow(summary.results)] <- c("nu2", "tau2", "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Gaussian (identity link function)", "\nRandom effects model - Multilevel Leroux CAR\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/gaussian.multilevelCAR.R
|
gaussian.multilevelCARMCMC <- function(Y, offset, X.standardised, W, ind.area, rho, fix.rho, n, K, p, which.miss, n.miss, burnin, n.sample, thin, prior.mean.beta, prior.var.beta, prior.nu2, prior.tau2, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
mod.glm <- lm(Y~X.standardised-1, offset=offset)
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.unscaled)) * summary(mod.glm)$sigma
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
res.temp <- Y - X.standardised %*% beta.mean - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
phi.extend <- phi[ind.area]
tau2 <- var(phi) / 10
nu2 <- tau2
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
fitted <- as.numeric(X.standardised %*% beta) + phi.extend + offset
Y.DA <- Y
########################################
#### Set up the MCMC model run quantities
#########################################
#### Ind.area parts
ind.area.list <- as.list(rep(0,K))
n.individual <- rep(0,K)
n.individual.miss <- rep(0,K)
for(r in 1:K)
{
ind.area.list[[r]] <- which(ind.area==r)
n.individual[r] <- length(ind.area.list[[r]])
n.individual.miss[r] <- sum(which.miss[ind.area.list[[r]]])
}
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, K))
samples.nu2 <- array(NA, c(n.keep, 1))
samples.tau2 <- array(NA, c(n.keep, 1))
if(!fix.rho) samples.rho <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, n))
samples.fitted <- array(NA, c(n.keep, n))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,2)
proposal.sd.rho <- 0.02
tau2.posterior.shape <- prior.tau2[1] + 0.5*K
nu2.posterior.shape <- prior.nu2[1] + 0.5*n
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Create the spatial determinant
if(!fix.rho)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1) tau2.posterior.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
#### Beta update quantities
data.precision.beta <- t(X.standardised) %*% X.standardised
if(length(prior.var.beta)==1)
{
prior.precision.beta <- 1 / prior.var.beta
}else
{
prior.precision.beta <- solve(diag(prior.var.beta))
}
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rnorm(n=n.miss, mean=fitted[which.miss==0], sd=sqrt(nu2))
}else
{}
####################
## Sample from beta
####################
fc.precision <- prior.precision.beta + data.precision.beta / nu2
fc.var <- solve(fc.precision)
beta.offset <- as.numeric(Y.DA - offset - phi.extend)
beta.offset2 <- t(X.standardised) %*% beta.offset / nu2 + prior.precision.beta %*% prior.mean.beta
fc.mean <- fc.var %*% beta.offset2
chol.var <- t(chol(fc.var))
beta <- fc.mean + chol.var %*% rnorm(p)
##################
## Sample from nu2
##################
fitted.current <- as.numeric(X.standardised %*% beta) + phi.extend + offset
nu2.posterior.scale <- prior.nu2[2] + 0.5 * sum((Y.DA - fitted.current)^2)
nu2 <- 1 / rgamma(1, nu2.posterior.shape, scale=(1/nu2.posterior.scale))
####################
## Sample from phi
####################
offset.phi <- (Y.DA - as.numeric(X.standardised %*% beta) - offset) / nu2
offset.phi2 <- tapply(offset.phi, ind.area, sum, na.rm=T)
phi <- gaussiancarmultilevelupdate(Wtriplet=W.triplet, Wbegfin=W.begfin, W.triplet.sum, n_individual=n.individual, nsites=K, phi=phi, tau2=tau2, rho=rho, nu2=nu2, offset=offset.phi2)
if(rho<1)
{
phi <- phi - mean(phi)
}else
{
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
}
phi.extend <- phi[ind.area]
##################
## Sample from tau2
##################
temp2 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, rho)
tau2.posterior.scale <- temp2 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
##################
## Sample from rho
##################
if(!fix.rho)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp3 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, proposal.rho)
det.Q.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- det.Q - temp2 / tau2
logprob.proposal <- det.Q.proposal - temp3 / tau2
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- det.Q.proposal
accept[1] <- accept[1] + 1
}else
{}
accept[2] <- accept[2] + 1
}else
{}
#########################
## Calculate the deviance
#########################
fitted <- as.numeric(X.standardised %*% beta) + phi.extend + offset
loglike <- dnorm(Y, mean = fitted, sd = rep(sqrt(nu2),n), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.nu2[ele, ] <- nu2
samples.tau2[ele, ] <- tau2
if(!fix.rho) samples.rho[ele, ] <- rho
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
#######################################
#### Update the acceptance rate for rho
#######################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(!fix.rho)
{
proposal.sd.rho <- common.accceptrates2(accept[1:2], proposal.sd.rho, 40, 50, 0.5)
}
accept <- c(0,0)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
if(fix.rho) samples.rho=NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.tau2=samples.tau2, samples.nu2=samples.nu2, samples.rho=samples.rho, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/gaussian.multilevelCARMCMC.R
|
highlight.borders <- function(border.locations, sfdata)
{
#### This function takes in an n by n matrix where values of zero represent borders to be highlighted
######################################
#### Identify the borders to highlight
######################################
border.temp <- border.locations
border.temp[upper.tri(border.temp)] <- NA
boundary.list <- which(border.temp==0, arr.ind=TRUE)
boundary.dat <- data.frame(area1=boundary.list[ ,1], area2=boundary.list[ ,2])
M <- nrow(boundary.dat)
################################################
#### Add the geometry to the boundary data frame
################################################
boundary.dat$geometry <- rep(NA, M)
for(j in 1:M)
{
intersect.all <- st_intersection(sfdata$geometry[boundary.list[j, ]])
intersect.type <- sapply(intersect.all, class)
intersect.final <- intersect.all[intersect.type[2, ] %in% c("LINESTRING", "MULTILINESTRING", "GEOMETRYCOLLECTION")]
if(length(intersect.final)>0) boundary.dat$geometry[j] <- intersect.final
}
#boundary.dat2 <- boundary.dat[which(!is.na(boundary.dat$geometry)), ]
boundary.final <- st_as_sf(x=boundary.dat)
############################
#### Return the final object
############################
return(boundary.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/highlight.borders.R
|
logLik.CARBayes <- function(object,...)
{
#### Return the log likeilhood
return(object$modelfit[6])
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/logLik.CARBayes.R
|
model.matrix.CARBayes <- function(object,...)
{
#### Return the DIC
return(object$X)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/model.matrix.CARBayes.R
|
multinomial.MVlerouxCAR <- function(formula, data=NULL, trials, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.Sigma.df=NULL, prior.Sigma.scale=NULL, rho=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "multinomial")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
J <- ncol(Y)
N.all <- K * J
N.re <- K * (J-1)
#### If only one element in Y is missing then fix it as we know the total number of trials
which.miss.row <- J-apply(which.miss,1,sum)
which.miss.1 <- which(which.miss.row==1)
if(length(length(which.miss.1))>0)
{
for(r in 1:length(which.miss.1))
{
which.miss[which.miss.1[r], is.na(Y[which.miss.1[r], ])] <- 1
Y[which.miss.1[r], is.na(Y[which.miss.1[r], ])] <- trials[which.miss.1[r]] - sum(Y[which.miss.1[r], ], na.rm=T)
}
n.miss <- sum(is.na(Y))
which.miss.row <- J-apply(which.miss,1,sum)
}else
{}
const.like <- lfactorial(trials[which.miss.row==0]) - apply(lfactorial(Y[which.miss.row==0, ]),1,sum)
K.present <- sum(which.miss.row==0)
#### Determine which rows have missing values
if(n.miss>0) which.miss.row2 <- which(which.miss.row>0)
#### Check and format the trials argument
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- K-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
diffs <- apply(Y, 1, sum, na.rm=T) - trials
if(max(diffs)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
#### W matrix
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
if(ceiling(N.all/K)!= floor(N.all/K)) stop("The number of data points divided by the number of rows in W is not a whole number.", call.=FALSE)
#### rho
if(is.null(rho))
{
rho <- runif(1)
fix.rho <- FALSE
}else
{
fix.rho <- TRUE
}
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.Sigma.df)) prior.Sigma.df <- 2
if(is.null(prior.Sigma.scale)) prior.Sigma.scale <- rep(100000, (J-1))
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
if(!is.numeric(prior.Sigma.scale)) stop("prior.Sigma.scale has non-numeric values.", call.=FALSE)
if(sum(is.na(prior.Sigma.scale))!=0) stop("prior.Sigma.scale has missing values.", call.=FALSE)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p, 5)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- multinomial.MVlerouxCARMCMC(Y=Y, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, J=J, N.all=N.all, N.re=N.re, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- multinomial.MVlerouxCARMCMC(Y=Y, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, J=J, N.all=N.all, N.re=N.re, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=multinomial.MVlerouxCARMCMC, Y=Y, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, J=J, N.all=N.all, N.re=N.re, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * sum(results$accept.beta[1:(J-1)]) / sum(results$accept.beta[(J:(2*(J-1)))])
accept.phi <- 100 * results$accept[1] / results$accept[2]
accept.Sigma <- 100
if(!fix.rho)
{
accept.rho <- 100 * results$accept[3] / results$accept[4]
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.Sigma)
names(accept.final) <- c("beta", "phi", "rho", "Sigma")
## Compute the model fit criterion
mean.beta <- matrix(apply(results$samples.beta, 2, mean), nrow=p, ncol=(J-1), byrow=F)
mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=K, ncol=(J-1), byrow=T)
mean.logit <- X.standardised %*% mean.beta + mean.phi + offset
mean.logit <- cbind(rep(0,K), mean.logit)
mean.prob <- exp(mean.logit) / apply(exp(mean.logit),1,sum)
deviance.fitted <- -2* sum(const.like + apply(Y[which.miss.row==0, ] * log(mean.prob[which.miss.row==0, ]),1,sum))
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- matrix(apply(results$samples.fitted, 2, mean), nrow=K, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
var.y <- fitted.values * (1-fitted.values / trials)
pearson.residuals <- response.residuals / sqrt(var.y)
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- results$samples.beta
for(r in 1:(J-1))
{
samples.beta.orig[ ,((r-1)*p+1):(r*p)] <- common.betatransform(results$samples.beta[ ,((r-1)*p+1):(r*p)], X.indicator, X.mean, X.sd, p, FALSE)
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), Sigma=results$samples.Sigma, rho=mcmc(results$samples.rho), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, (J-1)*p), rep(accept.beta,(J-1)*p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
col.name <- rep(NA, p*(J-1))
if(is.null(colnames(Y)))
{
for(r in 1:(J-1))
{
col.name[((r-1)*p+1):(r*p)] <- paste("Category ", r+1, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:(J-1))
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[(r+1)], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(J ,7))
summary.hyper[1:(J-1), 1] <- diag(apply(samples$Sigma, c(2,3), mean))
summary.hyper[1:(J-1), 2] <- diag(apply(samples$Sigma, c(2,3), quantile, c(0.025)))
summary.hyper[1:(J-1), 3] <- diag(apply(samples$Sigma, c(2,3), quantile, c(0.975)))
summary.hyper[1:(J-1), 4] <- n.keep
summary.hyper[1:(J-1), 5] <- accept.Sigma
summary.hyper[1:(J-1), 6] <- diag(apply(samples$Sigma, c(2,3), effectiveSize))
for(r in 1:(J-1))
{
summary.hyper[r, 7] <- geweke.diag(samples$Sigma[ ,r,r])$z
}
if(!fix.rho)
{
summary.hyper[J, 1:3] <- c(mean(samples$rho), quantile(samples$rho, c(0.025, 0.975)))
summary.hyper[J, 4:7] <- c(n.keep, accept.rho, effectiveSize(samples$rho), geweke.diag(samples$rho)$z)
}else
{
summary.hyper[J, 1:3] <- c(rho, rho, rho)
summary.hyper[J, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(((J-1)*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",(J-1)), 1:(J-1), 1:(J-1), sep=""), "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept.beta"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1:(J-1)]) / sum(accept.temp2[ ,(J:(2*(J-1)))])
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.phi <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.Sigma <- 100
if(!fix.rho)
{
accept.rho <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.Sigma)
names(accept.final) <- c("beta", "phi", "rho", "Sigma")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.Sigma.list <- lapply(results, function(l) l[["samples.Sigma"]])
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- matrix(apply(samples.beta.matrix, 2, mean), nrow=p, ncol=(J-1), byrow=F)
mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=K, ncol=(J-1), byrow=T)
mean.logit <- X.standardised %*% mean.beta + mean.phi + offset
mean.logit <- cbind(rep(0,K), mean.logit)
mean.prob <- exp(mean.logit) / apply(exp(mean.logit),1,sum)
deviance.fitted <- -2* sum(const.like + apply(Y[which.miss.row==0, ] * log(mean.prob[which.miss.row==0, ]),1,sum))
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- matrix(apply(samples.fitted.matrix, 2, mean), nrow=K, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
var.y <- fitted.values * (1-fitted.values / trials)
pearson.residuals <- response.residuals / sqrt(var.y)
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
for(r in 1:(J-1))
{
samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)] <- common.betatransform(samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)], X.indicator, X.mean, X.sd, p, FALSE)
}
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
rho.temp <- samples.rho.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
rho.temp[[j]] <- mcmc(samples.rho.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
rho.mcmc <- as.mcmc.list(rho.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rho.mcmc, Sigma=samples.Sigma.list, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, (J-1)*p), rep(accept.beta,(J-1)*p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
col.name <- rep(NA, p*(J-1))
if(is.null(colnames(Y)))
{
for(r in 1:(J-1))
{
col.name[((r-1)*p+1):(r*p)] <- paste("Category ", r+1, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:(J-1))
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[(r+1)], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c(J ,7))
summary.hyper[1:(J-1), 4] <- rep(n.keep, J-1)
summary.hyper[1:(J-1), 5] <- rep(accept.Sigma, J-1)
for(r in 1:(J-1))
{
test.vec <- samples.Sigma.list[[1]][ , r, r]
test.list <- as.list(rep(NA, n.chains))
test.list[[1]] <- mcmc(samples.Sigma.list[[1]][ , r, r])
for(i in 2:n.chains)
{
test.vec <- c(test.vec, samples.Sigma.list[[i]][ , r, r])
test.list[[i]] <- mcmc(samples.Sigma.list[[i]][ , r, r])
}
test.mcmc <- as.mcmc.list(test.list)
summary.hyper[r,1] <- mean(test.vec)
summary.hyper[r,2:3] <- quantile(test.vec, c(0.025, 0.975))
summary.hyper[r,6] <- effectiveSize(test.mcmc)
summary.hyper[r,7] <- gelman.diag(test.mcmc)$psrf[ ,2]
}
if(!fix.rho)
{
summary.hyper[J, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[J, 4:7] <- c(n.keep, accept.rho, effectiveSize(rho.mcmc), gelman.diag(rho.mcmc)$psrf[ ,2])
}else
{
summary.hyper[J, 1:3] <- c(rho, rho, rho)
summary.hyper[J, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(((J-1)*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",(J-1)), 1:(J-1), 1:(J-1), sep=""), "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Multinomial (logit link function)", "\nRandom effects model - Leroux MCAR\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/multinomial.MVlerouxCAR.R
|
multinomial.MVlerouxCARMCMC <- function(Y, trials, offset, X.standardised, W, rho, fix.rho, K, p, J, N.all, N.re, which.miss, n.miss, burnin, n.sample, thin, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.Sigma.df, prior.Sigma.scale, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
# library(MCMCpack)
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
beta <- array(NA, c(p, (J-1)))
for(i in 2:J)
{
mod.glm <- glm(cbind(Y[ ,i], trials - Y[ ,i])~X.standardised-1, offset=offset[ ,(i-1)], family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta[ ,(i-1)] <- rnorm(n=p, mean=beta.mean, sd=beta.sd)
}
regression <- X.standardised %*% beta
theta.hat <- Y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat[ ,-1] / theta.hat[ ,1]) - offset - regression
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi.vec <- rnorm(n=N.re, mean=0, sd=res.sd)
phi <- matrix(phi.vec, nrow=K, byrow=TRUE)
Sigma <- cov(phi)
Sigma.inv <- solve(Sigma)
Sigma.a <- rep(1, (J-1))
####################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
regression <- X.standardised %*% beta
Y.DA <- Y
#### If only one element in Y is missing then fix it as we know the total number of trials
which.miss.row <- J-apply(which.miss,1,sum)
which.miss.1 <- which(which.miss.row==1)
if(length(length(which.miss.1))>0)
{
for(r in 1:length(which.miss.1))
{
which.miss[which.miss.1[r], is.na(Y[which.miss.1[r], ])] <- 1
Y[which.miss.1[r], is.na(Y[which.miss.1[r], ])] <- trials[which.miss.1[r]] - sum(Y[which.miss.1[r], ], na.rm=T)
}
n.miss <- sum(is.na(Y))
which.miss.row <- J-apply(which.miss,1,sum)
}else
{}
const.like <- lfactorial(trials[which.miss.row==0]) - apply(lfactorial(Y[which.miss.row==0, ]),1,sum)
K.present <- sum(which.miss.row==0)
#### Determine which rows have missing values
if(n.miss>0) which.miss.row2 <- which(which.miss.row>0)
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, (J-1)*p))
samples.phi <- array(NA, c(n.keep, N.re))
samples.Sigma <- array(NA, c(n.keep, (J-1), (J-1)))
samples.Sigma.a <- array(NA, c(n.keep, (J-1)))
if(!fix.rho) samples.rho <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, K.present))
samples.fitted <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept.beta <- rep(0,2*(J-1))
proposal.sd.beta <- rep(0.01, (J-1))
accept <- rep(0,4)
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.02
Sigma.post.df <- prior.Sigma.df + K + J - 2
Sigma.a.post.shape <- (prior.Sigma.df + J-1) / 2
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
Wstar <- diag(apply(W,1,sum)) - W
Q <- rho * Wstar + diag(rep(1-rho,K))
#### Create the determinant
if(!fix.rho)
{
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
islands.all <- rep(islands,J)
n.islands <- max(W.islands$nc)
if(rho==1) Sigma.post.df <- prior.Sigma.df + K + J - 2 - n.islands
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
for(g in 1:length(which.miss.row2))
{
## Determine which row (area) of Y to update
row <- which.miss.row2[g]
## Compute the vector of probabilities for that row
lp <- c(0, regression[row, ] + phi[row, ] + offset[row, ])
prob <- exp(lp) / sum(exp(lp))
## Do the multinomial data augmentation
if(which.miss.row[row]==J)
{
## All the Ys are missing
Y.DA[row, ] <- as.numeric(rmultinom(n=1, size=trials[row], prob=prob))
}else
{
## Not all the Ys are missing
## Re-normalise the probabilities
prob[!is.na(Y[row, ])] <- 0
prob <- prob / sum(prob)
temp <- as.numeric(rmultinom(n=1, size=trials[row]-sum(Y[row, ], na.rm=T), prob=prob))
Y.DA[row, which.miss[row, ]==0] <- temp[which.miss[row, ]==0]
}
}
}else
{}
###################
## Sample from beta
###################
offset.temp <- phi + offset
for(r in 1:(J-1))
{
temp <- multinomialbetaupdateRW(X.standardised, K, J, p, r, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block, rep(0, K))
beta[ ,r] <- temp[[1]][ ,r]
accept.beta[r] <- accept.beta[r] + temp[[2]]
accept.beta[(r+J-1)] <- accept.beta[(r+J-1)] + n.beta.block
}
regression <- X.standardised %*% beta
##################
## Sample from phi
##################
den.offset <- rho * W.triplet.sum + 1 - rho
phi.offset <- regression + offset
Chol.Sigma <- t(chol(proposal.sd.phi*Sigma))
z.mat <- matrix(rnorm(n=N.all-K, mean=0, sd=1), nrow=J-1, ncol=K)
innovations <- t(Chol.Sigma %*% z.mat)
temp1 <- multinomialmcarupdateRW(W.triplet, W.begfin, K, J, phi, Y.DA, phi.offset, den.offset, Sigma.inv, rho, proposal.sd.phi, innovations)
phi <- temp1[[1]]
for(r in 1:(J-1))
{
phi[ ,r] <- phi[ ,r] - mean(phi[ ,r])
}
accept[1] <- accept[1] + temp1[[2]]
accept[2] <- accept[2] + K
####################
## Sample from Sigma
####################
Sigma.post.scale <- 2 * prior.Sigma.df * diag(1 / Sigma.a) + t(phi) %*% Q %*% phi
Sigma <- riwish(Sigma.post.df, Sigma.post.scale)
Sigma.inv <- solve(Sigma)
######################
## Sample from Sigma.a
######################
Sigma.a.posterior.scale <- prior.Sigma.df * diag(Sigma.inv) + 1 / prior.Sigma.scale^2
Sigma.a <- 1 / rgamma((J-1), Sigma.a.post.shape, scale=(1/Sigma.a.posterior.scale))
##################
## Sample from rho
##################
if(!fix.rho)
{
## Propose a new value
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
Q.prop <- proposal.rho * Wstar + diag(rep(1-proposal.rho), K)
det.Q.prop <- sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
## Compute the acceptance rate
logprob.current <- 0.5 * (J-1) * det.Q - 0.5 * sum(diag(t(phi) %*% Q %*% phi %*% Sigma.inv))
logprob.proposal <- 0.5 * (J-1) * det.Q.prop - 0.5 * sum(diag(t(phi) %*% Q.prop %*% phi %*% Sigma.inv))
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- det.Q.prop
Q <- Q.prop
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
}else
{}
#########################
## Calculate the deviance
#########################
lp <- regression + phi + offset
lp <- cbind(rep(0,K), lp)
prob <- exp(lp) / apply(exp(lp),1,sum)
fitted <- prob * trials
loglike <- const.like + apply(Y[which.miss.row==0, ] * log(prob[which.miss.row==0, ]),1,sum)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- as.numeric(beta)
samples.phi[ele, ] <- as.numeric(t(phi))
samples.Sigma[ele, , ] <- Sigma
samples.Sigma.a[ele, ] <- Sigma.a
if(!fix.rho) samples.rho[ele, ] <- rho
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- as.numeric(t(fitted))
if(n.miss>0) samples.Y[ele, ] <- t(Y.DA)[is.na(t(Y))]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
for(r in 1:(J-1))
{
if(p>2)
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J-1))], proposal.sd.beta[r], 40, 50)
}else
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J-1))], proposal.sd.beta[r], 30, 40)
}
}
proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50)
if(!fix.rho)
{
proposal.sd.rho <- common.accceptrates2(accept[3:4], proposal.sd.rho, 40, 50, 0.5)
}
accept <- c(0,0,0,0)
accept.beta <- rep(0,2*(J-1))
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
#### Close the progress bar if used
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
if(fix.rho) samples.rho=NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.Sigma=samples.Sigma, samples.Sigma.a=samples.Sigma.a, samples.rho=samples.rho, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept, accept.beta=accept.beta)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/multinomial.MVlerouxCARMCMC.R
|
multinomial.glm <- function(formula, data=NULL, trials, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "multinomial")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
J <- ncol(Y)
N.all <- K * J
#### Check and format the trials argument
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- K-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
diffs <- apply(Y, 1, sum, na.rm=T) - trials
if(max(diffs)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
#### If only one element in Y is missing then fix it as we know the total number of trials
which.miss.row <- J-apply(which.miss,1,sum)
which.miss.1 <- which(which.miss.row==1)
if(length(length(which.miss.1))>0)
{
for(r in 1:length(which.miss.1))
{
which.miss[which.miss.1[r], is.na(Y[which.miss.1[r], ])] <- 1
Y[which.miss.1[r], is.na(Y[which.miss.1[r], ])] <- trials[which.miss.1[r]] - sum(Y[which.miss.1[r], ], na.rm=T)
}
n.miss <- sum(is.na(Y))
which.miss.row <- J-apply(which.miss,1,sum)
}else
{}
const.like <- lfactorial(trials[which.miss.row==0]) - apply(lfactorial(Y[which.miss.row==0, ]),1,sum)
K.present <- sum(which.miss.row==0)
if(n.miss>0) which.miss.row2 <- which(which.miss.row>0)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p, 5)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- multinomial.glmMCMC(Y=Y, trials=trials, offset=offset, X.standardised=X.standardised, K=K, p=p, J=J, N.all=N.all, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- multinomial.glmMCMC(Y=Y, trials=trials, offset=offset, X.standardised=X.standardised, K=K, p=p, J=J, N.all=N.all, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=multinomial.glmMCMC, Y=Y, trials=trials, offset=offset, X.standardised=X.standardised, K=K, p=p, J=J, N.all=N.all, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * sum(results$accept.beta[1:(J-1)]) / sum(results$accept.beta[(J:(2*(J-1)))])
accept.final <- accept.beta
names(accept.final) <- c("beta")
## Compute the model fit criterion
mean.beta <- matrix(apply(results$samples.beta, 2, mean), nrow=p, ncol=(J-1), byrow=F)
mean.logit <- X.standardised %*% mean.beta + offset
mean.logit <- cbind(rep(0,K), mean.logit)
mean.prob <- exp(mean.logit) / apply(exp(mean.logit),1,sum)
deviance.fitted <- -2* sum(const.like + apply(Y[which.miss.row==0, ] * log(mean.prob[which.miss.row==0, ]),1,sum))
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- matrix(apply(results$samples.fitted, 2, mean), nrow=K, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
var.y <- fitted.values * (1-fitted.values / trials)
pearson.residuals <- response.residuals / sqrt(var.y)
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- results$samples.beta
for(r in 1:(J-1))
{
samples.beta.orig[ ,((r-1)*p+1):(r*p)] <- common.betatransform(results$samples.beta[ ,((r-1)*p+1):(r*p)], X.indicator, X.mean, X.sd, p, FALSE)
}
samples <- list(beta=mcmc(samples.beta.orig), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, (J-1)*p), rep(accept.beta,(J-1)*p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
col.name <- rep(NA, p*(J-1))
if(is.null(colnames(Y)))
{
for(r in 1:(J-1))
{
col.name[((r-1)*p+1):(r*p)] <- paste("Category ", r+1, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:(J-1))
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[(r+1)], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.results <- summary.beta
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept.beta"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1:(J-1)]) / sum(accept.temp2[ ,(J:(2*(J-1)))])
accept.final <- c(accept.beta)
names(accept.final) <- c("beta")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- matrix(apply(samples.beta.matrix, 2, mean), nrow=p, ncol=(J-1), byrow=F)
mean.logit <- X.standardised %*% mean.beta + offset
mean.logit <- cbind(rep(0,K), mean.logit)
mean.prob <- exp(mean.logit) / apply(exp(mean.logit),1,sum)
deviance.fitted <- -2* sum(const.like + apply(Y[which.miss.row==0, ] * log(mean.prob[which.miss.row==0, ]),1,sum))
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- matrix(apply(samples.fitted.matrix, 2, mean), nrow=K, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
var.y <- fitted.values * (1-fitted.values / trials)
pearson.residuals <- response.residuals / sqrt(var.y)
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
for(r in 1:(J-1))
{
samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)] <- common.betatransform(samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)], X.indicator, X.mean, X.sd, p, FALSE)
}
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, (J-1)*p), rep(accept.beta,(J-1)*p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
col.name <- rep(NA, p*(J-1))
if(is.null(colnames(Y)))
{
for(r in 1:(J-1))
{
col.name[((r-1)*p+1):(r*p)] <- paste("Category ", r+1, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:(J-1))
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[(r+1)], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.results <- summary.beta
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Multinomial (logit link function)", "\nRandom effects model - None\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/multinomial.glm.R
|
multinomial.glmMCMC <- function(Y, trials, offset, X.standardised, K, p, J, N.all, which.miss, n.miss, burnin, n.sample, thin, n.beta.block, list.block, prior.mean.beta, prior.var.beta, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
beta <- array(NA, c(p, (J-1)))
for(i in 2:J)
{
mod.glm <- glm(cbind(Y[ ,i], trials - Y[ ,i])~X.standardised-1, offset=offset[ ,(i-1)], family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta[ ,(i-1)] <- rnorm(n=p, mean=beta.mean, sd=beta.sd)
}
####################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
regression <- X.standardised %*% beta
Y.DA <- Y
#### If only one element in Y is missing then fix it as we know the total number of trials
which.miss.row <- J-apply(which.miss,1,sum)
which.miss.1 <- which(which.miss.row==1)
if(length(length(which.miss.1))>0)
{
for(r in 1:length(which.miss.1))
{
which.miss[which.miss.1[r], is.na(Y[which.miss.1[r], ])] <- 1
Y[which.miss.1[r], is.na(Y[which.miss.1[r], ])] <- trials[which.miss.1[r]] - sum(Y[which.miss.1[r], ], na.rm=T)
}
n.miss <- sum(is.na(Y))
which.miss.row <- J-apply(which.miss,1,sum)
}else
{}
const.like <- lfactorial(trials[which.miss.row==0]) - apply(lfactorial(Y[which.miss.row==0, ]),1,sum)
K.present <- sum(which.miss.row==0)
#### Determine which rows have missing values
if(n.miss>0) which.miss.row2 <- which(which.miss.row>0)
########################################
#### Set up the MCMC model run quantities
#########################################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, (J-1)*p))
samples.loglike <- array(NA, c(n.keep, K.present))
samples.fitted <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept.beta <- rep(0,2*(J-1))
proposal.sd.beta <- rep(0.01, (J-1))
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
for(g in 1:length(which.miss.row2))
{
## Determine which row (area) of Y to update
row <- which.miss.row2[g]
## Compute the vector of probabilities for that row
lp <- c(0, regression[row, ] + offset[row, ])
prob <- exp(lp) / sum(exp(lp))
## Do the multinomial data augmentation
if(which.miss.row[row]==J)
{
## All the Ys are missing
Y.DA[row, ] <- as.numeric(rmultinom(n=1, size=trials[row], prob=prob))
}else
{
## Not all the Ys are missing
## Re-normalise the probabilities
prob[!is.na(Y[row, ])] <- 0
prob <- prob / sum(prob)
temp <- as.numeric(rmultinom(n=1, size=trials[row]-sum(Y[row, ], na.rm=T), prob=prob))
Y.DA[row, which.miss[row, ]==0] <- temp[which.miss[row, ]==0]
}
}
}else
{}
###################
## Sample from beta
###################
for(r in 1:(J-1))
{
temp <- multinomialbetaupdateRW(X.standardised, K, J, p, r, beta, offset, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block, rep(0, K))
beta[ ,r] <- temp[[1]][ ,r]
accept.beta[r] <- accept.beta[r] + temp[[2]]
accept.beta[(r+J-1)] <- accept.beta[(r+J-1)] + n.beta.block
}
regression <- X.standardised %*% beta
#########################
## Calculate the deviance
#########################
lp <- regression + offset
lp <- cbind(rep(0,K), lp)
prob <- exp(lp) / apply(exp(lp),1,sum)
fitted <- prob * trials
loglike <- const.like + apply(Y[which.miss.row==0, ] * log(prob[which.miss.row==0, ]),1,sum)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- as.numeric(beta)
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- as.numeric(t(fitted))
if(n.miss>0) samples.Y[ele, ] <- t(Y.DA)[is.na(t(Y))]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
for(r in 1:(J-1))
{
if(p>2)
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J-1))], proposal.sd.beta[r], 40, 50)
}else
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J-1))], proposal.sd.beta[r], 30, 40)
}
}
accept.beta <- rep(0,2*(J-1))
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
#### Close the progress bar if used
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
chain.results <- list(samples.beta=samples.beta, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept.beta=accept.beta)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/multinomial.glmMCMC.R
|
poisson.MVlerouxCAR <- function(formula, data=NULL, W, burnin, n.sample, n.chains=1, n.cores=1, thin=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.Sigma.df=NULL, prior.Sigma.scale=NULL, rho=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "poisson")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
J <- ncol(Y)
N.all <- K * J
#### Create a missing list
if(n.miss>0)
{
miss.locator <- array(NA, c(n.miss, 2))
colnames(miss.locator) <- c("row", "column")
locations <- which(t(which.miss)==0)
miss.locator[ ,1] <- ceiling(locations/J)
miss.locator[ ,2] <- locations - (miss.locator[ ,1]-1) * J
}else
{}
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### W matrix
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
if(nrow(W)!= K) stop("The number of data points divided by the number of rows in W is not a whole number.", call.=FALSE)
#### rho
if(is.null(rho))
{
rho <- runif(1)
fix.rho <- FALSE
}else
{
fix.rho <- TRUE
}
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.Sigma.df)) prior.Sigma.df <- 2
if(is.null(prior.Sigma.scale)) prior.Sigma.scale <- rep(100000, J)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
if(!is.numeric(prior.Sigma.scale)) stop("prior.Sigma.scale has non-numeric values.", call.=FALSE)
if(sum(is.na(prior.Sigma.scale))!=0) stop("prior.Sigma.scale has missing values.", call.=FALSE)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- poisson.MVlerouxCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, J=J, N.all=N.all, which.miss=which.miss, n.miss=n.miss, miss.locator=miss.locator, burnin=burnin, n.sample=n.sample, thin=thin, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, MALA=MALA, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- poisson.MVlerouxCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, J=J, N.all=N.all, which.miss=which.miss, n.miss=n.miss, miss.locator=miss.locator, burnin=burnin, n.sample=n.sample, thin=thin, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, MALA=MALA, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=poisson.MVlerouxCARMCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, J=J, N.all=N.all, which.miss=which.miss, n.miss=n.miss, miss.locator=miss.locator, burnin=burnin, n.sample=n.sample, thin=thin, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, MALA=MALA, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * sum(results$accept.beta[1:J]) / sum(results$accept.beta[(J+1):(2*J)])
accept.phi <- 100 * results$accept[1] / results$accept[2]
accept.Sigma <- 100
if(!fix.rho)
{
accept.rho <- 100 * results$accept[3] / results$accept[4]
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.Sigma)
names(accept.final) <- c("beta", "phi", "rho", "Sigma")
## Compute the model fit criterion
mean.beta <- matrix(apply(results$samples.beta, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=K, ncol=J, byrow=T)
fitted.mean <- exp(X.standardised %*% mean.beta + mean.phi + offset)
deviance.fitted <- -2 * sum(dpois(x=as.numeric(t(Y)), lambda=as.numeric(t(fitted.mean)), log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- matrix(apply(results$samples.fitted, 2, mean), nrow=K, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
pearson.residuals <- response.residuals / sqrt(fitted.values)
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- results$samples.beta
for(r in 1:J)
{
samples.beta.orig[ ,((r-1)*p+1):(r*p)] <- common.betatransform(results$samples.beta[ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE)
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), Sigma=results$samples.Sigma, rho=mcmc(results$samples.rho), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, J*p), rep(accept.beta,J*p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
col.name <- rep(NA, p*J)
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c((J+1) ,7))
summary.hyper[1:J, 1] <- diag(apply(samples$Sigma, c(2,3), mean))
summary.hyper[1:J, 2] <- diag(apply(samples$Sigma, c(2,3), quantile, c(0.025)))
summary.hyper[1:J, 3] <- diag(apply(samples$Sigma, c(2,3), quantile, c(0.975)))
summary.hyper[1:J, 4] <- n.keep
summary.hyper[1:J, 5] <- accept.Sigma
summary.hyper[1:J, 6] <- diag(apply(samples$Sigma, c(2,3), effectiveSize))
for(r in 1:J)
{
summary.hyper[r, 7] <- geweke.diag(samples$Sigma[ ,r,r])$z
}
if(!fix.rho)
{
summary.hyper[(J+1), 1:3] <- c(mean(samples$rho), quantile(samples$rho, c(0.025, 0.975)))
summary.hyper[(J+1), 4:7] <- c(n.keep, accept.rho, effectiveSize(samples$rho), geweke.diag(samples$rho)$z)
}else
{
summary.hyper[(J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(J+1), 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept.beta"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1:J]) / sum(accept.temp2[ ,(J+1):(2*J)])
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.phi <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.Sigma <- 100
if(!fix.rho)
{
accept.rho <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.Sigma)
names(accept.final) <- c("beta", "phi", "rho", "Sigma")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.Sigma.list <- lapply(results, function(l) l[["samples.Sigma"]])
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- matrix(apply(samples.beta.matrix, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=K, ncol=J, byrow=T)
fitted.mean <- exp(X.standardised %*% mean.beta + mean.phi + offset)
deviance.fitted <- -2 * sum(dpois(x=as.numeric(t(Y)), lambda=as.numeric(t(fitted.mean)), log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- matrix(apply(samples.fitted.matrix, 2, mean), nrow=K, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
pearson.residuals <- response.residuals / sqrt(fitted.values)
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
for(r in 1:J)
{
samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)] <- common.betatransform(samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)], X.indicator, X.mean, X.sd, p, FALSE)
}
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
rho.temp <- samples.rho.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
rho.temp[[j]] <- mcmc(samples.rho.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
rho.mcmc <- as.mcmc.list(rho.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rho.mcmc, Sigma=samples.Sigma.list, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, J*p), rep(accept.beta,J*p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
col.name <- rep(NA, p*J)
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Category ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c((J+1) ,7))
summary.hyper[1:J, 4] <- rep(n.keep, J)
summary.hyper[1:J, 5] <- rep(accept.Sigma, J)
for(r in 1:J)
{
test.vec <- samples.Sigma.list[[1]][ , r, r]
test.list <- as.list(rep(NA, n.chains))
test.list[[1]] <- mcmc(samples.Sigma.list[[1]][ , r, r])
for(i in 2:n.chains)
{
test.vec <- c(test.vec, samples.Sigma.list[[i]][ , r, r])
test.list[[i]] <- mcmc(samples.Sigma.list[[i]][ , r, r])
}
test.mcmc <- as.mcmc.list(test.list)
summary.hyper[r,1] <- mean(test.vec)
summary.hyper[r,2:3] <- quantile(test.vec, c(0.025, 0.975))
summary.hyper[r,6] <- effectiveSize(test.mcmc)
summary.hyper[r,7] <- gelman.diag(test.mcmc)$psrf[ ,2]
}
if(!fix.rho)
{
summary.hyper[(J+1), 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[(J+1), 4:7] <- c(n.keep, accept.rho, effectiveSize(rho.mcmc), gelman.diag(rho.mcmc)$psrf[ ,2])
}else
{
summary.hyper[(J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(J+1), 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Poisson (log link function)", "\nRandom effects model - Leroux MCAR\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/poisson.MVlerouxCAR.R
|
poisson.MVlerouxCARMCMC <- function(Y, offset, X.standardised, W, rho, fix.rho, K, p, J, N.all, which.miss, n.miss, miss.locator, burnin, n.sample, thin, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.Sigma.df, prior.Sigma.scale, MALA, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
# library(MCMCpack)
##########################################
#### Generate the initial parameter values
##########################################
beta <- array(NA, c(p, J))
for(i in 1:J)
{
mod.glm <- glm(Y[ ,i]~X.standardised-1, offset=offset[ ,i], family="quasipoisson")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta[ ,i] <- rnorm(n=p, mean=beta.mean, sd=beta.sd)
}
log.Y <- log(Y)
log.Y[Y==0] <- -0.1
res.temp <- log.Y - X.standardised %*% beta - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi.vec <- rnorm(n=N.all, mean=0, sd=res.sd)
phi <- matrix(phi.vec, nrow=K, byrow=TRUE)
Sigma <- cov(phi)
Sigma.inv <- solve(Sigma)
Sigma.a <- rep(1, J)
regression <- X.standardised %*% beta
####################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
fitted <- exp(X.standardised %*% beta + phi + offset)
Y.DA <- Y
###############################
#### Set up the MCMC quantities
###############################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, J*p))
samples.phi <- array(NA, c(n.keep, N.all))
samples.Sigma <- array(NA, c(n.keep, J, J))
samples.Sigma.a <- array(NA, c(n.keep, J))
if(!fix.rho) samples.rho <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, N.all))
samples.fitted <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,4)
accept.beta <- rep(0,2*J)
proposal.sd.beta <- rep(0.01, J)
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.02
Sigma.post.df <- prior.Sigma.df + K + J - 1
Sigma.a.post.shape <- (prior.Sigma.df + J) / 2
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
Wstar <- diag(apply(W,1,sum)) - W
Q <- rho * Wstar + diag(rep(1-rho,K))
#### Create the determinant
if(!fix.rho)
{
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
islands.all <- rep(islands,J)
n.islands <- max(W.islands$nc)
if(rho==1) Sigma.post.df <- prior.Sigma.df + K + J - 1 - n.islands
#### Specify vector variants
Y.vec <- as.numeric(t(Y))
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[miss.locator] <- rpois(n=n.miss, lambda=fitted[miss.locator])
}else
{}
###################
## Sample from beta
###################
offset.temp <- phi + offset
for(r in 1:J)
{
if(MALA)
{
temp <- poissonbetaupdateMALA(X.standardised, K, p, beta[ ,r], offset.temp[ ,r], Y.DA[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block)
}else
{
temp <- poissonbetaupdateRW(X.standardised, K, p, beta[ ,r], offset.temp[ ,r], Y.DA[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block)
}
beta[ ,r] <- temp[[1]]
accept.beta[r] <- accept.beta[r] + temp[[2]]
accept.beta[(r+J)] <- accept.beta[(r+J)] + n.beta.block
}
regression <- X.standardised %*% beta
##################
## Sample from phi
##################
den.offset <- rho * W.triplet.sum + 1 - rho
phi.offset <- regression + offset
Chol.Sigma <- t(chol(proposal.sd.phi*Sigma))
z.mat <- matrix(rnorm(n=N.all, mean=0, sd=1), nrow=J, ncol=K)
innovations <- t(Chol.Sigma %*% z.mat)
temp1 <- poissonmcarupdateRW(W.triplet, W.begfin, K, J, phi, Y.DA, phi.offset, den.offset, Sigma.inv, rho, proposal.sd.phi, innovations)
phi <- temp1[[1]]
for(r in 1:J)
{
phi[ ,r] <- phi[ ,r] - mean(phi[ ,r])
}
accept[1] <- accept[1] + temp1[[2]]
accept[2] <- accept[2] + K
####################
## Sample from Sigma
####################
Sigma.post.scale <- 2 * prior.Sigma.df * diag(1 / Sigma.a) + t(phi) %*% Q %*% phi
Sigma <- riwish(Sigma.post.df, Sigma.post.scale)
Sigma.inv <- solve(Sigma)
######################
## Sample from Sigma.a
######################
Sigma.a.posterior.scale <- prior.Sigma.df * diag(Sigma.inv) + 1 / prior.Sigma.scale^2
Sigma.a <- 1 / rgamma(J, Sigma.a.post.shape, scale=(1/Sigma.a.posterior.scale))
##################
## Sample from rho
##################
if(!fix.rho)
{
## Propose a new value
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
Q.prop <- proposal.rho * Wstar + diag(rep(1-proposal.rho), K)
det.Q.prop <- sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
## Compute the acceptance rate
logprob.current <- 0.5 * J * det.Q - 0.5 * sum(diag(t(phi) %*% Q %*% phi %*% Sigma.inv))
logprob.proposal <- 0.5 * J * det.Q.prop - 0.5 * sum(diag(t(phi) %*% Q.prop %*% phi %*% Sigma.inv))
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- det.Q.prop
Q <- Q.prop
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
}else
{}
#########################
## Calculate the deviance
#########################
fitted <- exp(regression + phi + offset)
loglike <- dpois(x=as.numeric(t(Y)), lambda=as.numeric(t(fitted)), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- as.numeric(beta)
samples.phi[ele, ] <- as.numeric(t(phi))
samples.Sigma[ele, , ] <- Sigma
samples.Sigma.a[ele, ] <- Sigma.a
if(!fix.rho) samples.rho[ele, ] <- rho
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- as.numeric(t(fitted))
if(n.miss>0) samples.Y[ele, ] <- Y.DA[miss.locator]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
for(r in 1:J)
{
if(p>2)
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 40, 50)
}else
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 30, 40)
}
}
proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50)
if(!fix.rho)
{
proposal.sd.rho <- common.accceptrates2(accept[3:4], proposal.sd.rho, 40, 50, 0.5)
}
accept <- c(0,0,0,0)
accept.beta <- rep(0,2*J)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
if(fix.rho) samples.rho=NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.Sigma=samples.Sigma, samples.Sigma.a=samples.Sigma.a, samples.rho=samples.rho, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept, accept.beta=accept.beta)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/poisson.MVlerouxCARMCMC.R
|
poisson.RAB <- function(formula, data=NULL, W, V, nlambda, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "gaussian")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
which.present <- which(!is.na(Y))
n.miss <- frame.results$n.miss
if(p==0) stop("The model (via the formula object) must at least have an intercept term.", call.=FALSE)
#### Ancillary data
if(!is.numeric(V)) stop("The ancillary data V is not a vector.", call.=FALSE)
if(length(V) != K) stop("The ancillary data V is not the same length as the remaining data.", call.=FALSE)
if(sum(is.na(V))>0) stop("The ancillary data V has missing 'NA' values.", call.=FALSE)
if(!is.numeric(V)) stop("The ancillary data V has non-numeric values.", call.=FALSE)
#### Neighbourhood matrix W
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
if(ncol(W)!= nrow(W)) stop("W is not a square matrix.", call.=FALSE)
if(sum(is.na(W))>0) stop("W has missing 'NA' values.", call.=FALSE)
if(!is.numeric(W)) stop("W has non-numeric values.", call.=FALSE)
if(min(W)<0) stop("W has negative elements.", call.=FALSE)
if(sum(W!=t(W))>0) stop("W is not symmetric.", call.=FALSE)
if(min(apply(W, 1, sum))==0) stop("W has some areas with no neighbours (one of the row sums equals zero).", call.=FALSE)
#### Create the shortest path matrix
graph.W <- graph.adjacency(W, mode="undirected")
graph.dist <- shortest.paths(graph.W)
#####################################################
#### Create the basis functions and the data elements
#####################################################
#### Create the three sets of basis functions
B.anisotropic.exp <- basiscomputeexponential(D=graph.dist, nrows=K, ncols=K, Z=V, startcol=1)
B.anisotropic.inv <- basiscomputeinverse(D=graph.dist, nrows=K, ncols=K, Z=V, startcol=1)
B.anisotropic.linear <- basiscomputelinear(D=graph.dist, nrows=K, ncols=K, Z=V, startcol=1)
#### Combine with the covariate matrix if needed
X.anisotropic.exp <- cbind(X, B.anisotropic.exp)
X.anisotropic.inv <- cbind(X, B.anisotropic.inv)
X.anisotropic.linear <- cbind(X, B.anisotropic.linear)
#### Remove an intercept term if it present
if(var(X.anisotropic.exp[ ,1])==0)
{
X.anisotropic.exp <- X.anisotropic.exp[ ,-1]
X.anisotropic.inv <- X.anisotropic.inv[ ,-1]
X.anisotropic.linear <- X.anisotropic.linear[ ,-1]
p <- p-1
}else
{}
#### Remove rows with missing values for model fitting
Y.train <- Y[which.present]
offset.train <- offset[which.present]
K.train <- length(Y.train)
X.anisotropic.exp.train <- X.anisotropic.exp[which.present, ]
X.anisotropic.inv.train <- X.anisotropic.inv[which.present, ]
X.anisotropic.linear.train <- X.anisotropic.linear[which.present, ]
W.train <- W[which.present, which.present]
W.list.train <- mat2listw(W.train, style="B")
########################################
#### Fit the models and make predictions
########################################
#### Update the user on the functions progress
if(verbose) cat("Fitting the model.")
#### Fit the models with the 3 different types of basis functions
penfac <- c(rep(0, p), rep(1,K))
mod.ridge.exp <- glmnet(x=X.anisotropic.exp.train, y=Y.train, offset=offset.train, alpha=0, nlambda=nlambda, penalty.factor = penfac, family = "poisson", intercept=TRUE, standardize=FALSE)
mod.ridge.inv <- glmnet(x=X.anisotropic.inv.train, y=Y.train, offset=offset.train, alpha=0, nlambda=nlambda, penalty.factor = penfac, family = "poisson", intercept=TRUE, standardize=FALSE)
mod.ridge.linear <- glmnet(x=X.anisotropic.linear.train, y=Y.train, offset=offset.train, alpha=0, nlambda=nlambda, penalty.factor = penfac, family = "poisson", intercept=TRUE, standardize=FALSE)
#### Compute the level of residual spatial autocorrelation for each model and lambda value
## Remove 0s from Y for computing residual Moran's I
Ytemp <- Y.train
Ytemp[Y.train==0] <- 0.1
## Exponential model
fits.lp.exp <- predict(object=mod.ridge.exp, newx=X.anisotropic.exp.train, newoffset=offset.train)
m <- ncol(fits.lp.exp)
results.exp <- data.frame(lambda=mod.ridge.exp$lambda, I=rep(NA, m))
for(j in 1:m)
{
resids <- log(Ytemp) - fits.lp.exp[ ,j]
results.exp$I[j] <- moran.mc(x=resids, listw=W.list.train, zero.policy = TRUE, nsim=1)$statistic
}
row.exp <- which(abs(results.exp$I)==min(abs(results.exp$I)))
moran.exp <- results.exp$I[row.exp]
## Inverse model
fits.lp.inv <- predict(object=mod.ridge.inv, newx=X.anisotropic.inv.train, newoffset=offset.train)
m <- ncol(fits.lp.inv)
results.inv <- data.frame(lambda=mod.ridge.inv$lambda, I=rep(NA, m))
for(j in 1:m)
{
resids <- log(Ytemp) - fits.lp.inv[ ,j]
results.inv$I[j] <- moran.mc(x=resids, listw=W.list.train, zero.policy = TRUE, nsim=1)$statistic
}
row.inv <- which(abs(results.inv$I)==min(abs(results.inv$I)))
moran.inv <- results.inv$I[row.inv]
## Linear model
fits.lp.linear <- predict(object=mod.ridge.linear, newx=X.anisotropic.linear.train, newoffset=offset.train)
m <- ncol(fits.lp.linear)
results.linear <- data.frame(lambda=mod.ridge.linear$lambda, I=rep(NA, m))
for(j in 1:m)
{
resids <- log(Ytemp) - fits.lp.linear[ ,j]
results.linear$I[j] <- moran.mc(x=resids, listw=W.list.train, zero.policy = TRUE, nsim=1)$statistic
}
row.linear <- which(abs(results.linear$I)==min(abs(results.linear$I)))
moran.linear <- results.linear$I[row.linear]
#### Choose the final model
moran.all <- abs(c(moran.exp, moran.inv, moran.linear))
model <- which(moran.all == min(moran.all))[1]
if(model==1)
{
model.string <- c("Likelihood model - Poisson (log link function)", "Spatial structure model - Anistropic exponential distance-decay basis functions")
model <- mod.ridge.exp
row <- row.exp
X.final <- X.anisotropic.exp
X.final.train <- X.final[which.present, ]
lambda.hat <- results.exp$lambda[row] * K.train
I <- results.exp$I[row]
}else if(model==2)
{
model.string <- c("Likelihood model - Poisson (log link function)", "Spatial structure model - Anistropic inverse distance-decay basis functions")
model <- mod.ridge.inv
row <- row.inv
X.final <- X.anisotropic.inv
X.final.train <- X.final[which.present, ]
lambda.hat <- results.inv$lambda[row] * K.train
I <- results.inv$I[row]
}else if(model==3)
{
model.string <- c("Likelihood model - Poisson (log link function)", "Spatial structure model - Anistropic linear distance-decay basis functions")
model <- mod.ridge.linear
row <- row.linear
X.final <- X.anisotropic.linear
X.final.train <- X.final[which.present, ]
lambda.hat <- results.linear$lambda[row] * K.train
I <- results.linear$I[row]
}else{}
#### Compute the parameter estimate for beta
beta.hat <- c(model$a0[row], model$beta[ ,row])
#####################################
#### Summarise and return the results
#####################################
#### Update the user on the progress
if(verbose) cat("\nSummarising results.\n")
#### Compute the final fitted / predicted values and residuals
lp.all <- as.numeric(beta.hat[1] + X.final %*% beta.hat[-1] + offset)
fitted.values <- exp(lp.all)
response.residuals <- Y - fitted.values
pearson.residuals <- response.residuals / sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
#### Format the final X matrix returned
X.extend <- cbind(rep(1, K), X.final)
colnames(X.extend)[1] <- "(Intercept)"
colnames(X.extend)[(p+2):(p+K+1)] <- paste("Basis function", 1:K, sep=" ")
#######################
#### Return the results
#######################
results <- list(beta.hat=beta.hat, sigma2.hat=NA, lambda.hat=lambda.hat, I=I, fitted.values=fitted.values, residuals=residuals, formula=formula, model.string=model.string, X=X.extend, model=model)
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/poisson.RAB.R
|
poisson.bymCAR <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, prior.sigma2=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "poisson")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
if(is.null(prior.sigma2)) prior.sigma2 <- c(1, 0.01)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
common.prior.var.check(prior.tau2)
common.prior.var.check(prior.sigma2)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- poisson.bymCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.sigma2=prior.sigma2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- poisson.bymCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.sigma2=prior.sigma2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=poisson.bymCARMCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.sigma2=prior.sigma2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * results$accept[1] / results$accept[2]
accept.phi <- 100 * results$accept[3] / results$accept[4]
accept.theta <- 100 * results$accept[5] / results$accept[6]
accept.tau2 <- 100
accept.sigma2 <- 100
accept.final <- c(accept.beta, accept.phi, accept.theta, accept.tau2, accept.sigma2)
names(accept.final) <- c("beta", "phi", "theta", "tau2", "sigma2")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
mean.psi <- apply(results$samples.psi, 2, mean)
fitted.mean <- exp(X.standardised %*% mean.beta + mean.psi + offset)
deviance.fitted <- -2 * sum(dpois(x=Y, lambda=fitted.mean, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), psi=mcmc(results$samples.psi), tau2=mcmc(results$samples.tau2), sigma2=mcmc(results$samples.sigma2), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$tau2), quantile(samples$tau2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.hyper[2, 1:3] <- c(mean(samples$sigma2), quantile(samples$sigma2, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.sigma2, effectiveSize(samples$sigma2), geweke.diag(samples$sigma2)$z)
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "sigma2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.phi <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.theta <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
accept.tau2 <- 100
accept.sigma2 <- 100
accept.final <- c(accept.beta, accept.phi, accept.theta, accept.tau2, accept.sigma2)
names(accept.final) <- c("beta", "phi", "theta", "tau2", "sigma2")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.psi.list <- lapply(results, function(l) l[["samples.psi"]])
samples.sigma2.list <- lapply(results, function(l) l[["samples.sigma2"]])
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.psi.matrix <- do.call(what=rbind, args=samples.psi.list)
samples.sigma2.matrix <- do.call(what=rbind, args=samples.sigma2.list)
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
mean.psi <- apply(samples.psi.matrix, 2, mean)
fitted.mean <- exp(X.standardised %*% mean.beta + mean.psi + offset)
deviance.fitted <- -2 * sum(dpois(x=Y, lambda=fitted.mean, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
psi.temp <- samples.psi.list
sigma2.temp <- samples.sigma2.list
tau2.temp <- samples.tau2.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
psi.temp[[j]] <- mcmc(samples.psi.list[[j]])
sigma2.temp[[j]] <- mcmc(samples.sigma2.list[[j]])
tau2.temp[[j]] <- mcmc(samples.tau2.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
psi.mcmc <- as.mcmc.list(psi.temp)
sigma2.mcmc <- as.mcmc.list(sigma2.temp)
tau2.mcmc <- as.mcmc.list(tau2.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, psi=psi.mcmc, tau2=tau2.mcmc, sigma2=sigma2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.hyper[2, 1:3] <- c(mean(samples.sigma2.matrix), quantile(samples.sigma2.matrix, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.sigma2, effectiveSize(sigma2.mcmc), gelman.diag(sigma2.mcmc)$psrf[ ,2])
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "sigma2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Poisson (log link function)", "\nRandom effects model - BYM CAR\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/poisson.bymCAR.R
|
poisson.bymCARMCMC <- function(Y, offset, X.standardised, W, K, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, prior.sigma2, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
#
#
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
mod.glm <- glm(Y~X.standardised-1, offset=offset, family="quasipoisson")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
log.Y <- log(Y)
log.Y[Y==0] <- -0.1
res.temp <- log.Y - X.standardised %*% beta.mean - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
tau2 <- var(phi) / 10
theta <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
sigma2 <- var(theta) / 10
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
fitted <- exp(as.numeric(X.standardised %*% beta) + phi + theta + offset)
Y.DA <- Y
########################################
#### Set up the MCMC model run quantities
#########################################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.re <- array(NA, c(n.keep, K))
samples.tau2 <- array(NA, c(n.keep, 1))
samples.sigma2 <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
## Metropolis quantities
accept <- rep(0,6)
proposal.sd.beta <- 0.01
proposal.sd.phi <- 0.1
proposal.sd.theta <- 0.1
sigma2.posterior.shape <- prior.sigma2[1] + 0.5 * K
tau2.posterior.shape <- prior.tau2[1] + 0.5 * K
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
tau2.posterior.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rpois(n=n.miss, lambda=fitted[which.miss==0])
}else
{}
####################
## Sample from beta
####################
offset.temp <- phi + offset + theta
if(MALA)
{
temp <- poissonbetaupdateMALA(X.standardised, K, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- poissonbetaupdateRW(X.standardised, K, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
####################
## Sample from phi
####################
beta.offset <- X.standardised %*% beta + theta + offset
temp1 <- poissoncarupdateRW(Wtriplet=W.triplet, Wbegfin=W.begfin, W.triplet.sum, nsites=K, phi=phi, tau2=tau2, y=Y.DA, phi_tune=proposal.sd.phi, rho=1, offset=beta.offset)
phi <- temp1[[1]]
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + K
####################
## Sample from theta
####################
beta.offset <- as.numeric(X.standardised %*% beta) + phi + offset
temp2 <- poissonindepupdateRW(nsites=K, theta=theta, sigma2=sigma2, y=Y.DA, theta_tune=proposal.sd.theta, offset=beta.offset)
theta <- temp2[[1]]
theta <- theta - mean(theta)
accept[5] <- accept[5] + temp2[[2]]
accept[6] <- accept[6] + K
###################
## Sample from tau2
###################
temp2 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, 1)
tau2.posterior.scale <- temp2 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
#####################
## Sample from sigma2
#####################
sigma2.posterior.scale <- prior.sigma2[2] + 0.5*sum(theta^2)
sigma2 <- 1 / rgamma(1, sigma2.posterior.shape, scale=(1/sigma2.posterior.scale))
#########################
## Calculate the deviance
#########################
lp <- as.numeric(X.standardised %*% beta) + phi + theta + offset
fitted <- exp(lp)
loglike <- dpois(x=as.numeric(Y), lambda=fitted, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.re[ele, ] <- phi + theta
samples.tau2[ele, ] <- tau2
samples.sigma2[ele, ] <- sigma2
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{
}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
proposal.sd.theta <- common.accceptrates1(accept[5:6], proposal.sd.theta, 40, 50)
accept <- c(0,0,0,0,0,0)
}else
{
}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
#### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
chain.results <- list(samples.beta=samples.beta, samples.psi=samples.re, samples.tau2=samples.tau2, samples.sigma2=samples.sigma2, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/poisson.bymCARMCMC.R
|
poisson.dissimilarityCAR <- function(formula, data=NULL, W, Z, W.binary=TRUE, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "poisson")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Dissimilarity metric matrix
if(!is.list(Z)) stop("Z is not a list object.", call.=FALSE)
if(sum(is.na(as.numeric(lapply(Z, sum, na.rm=FALSE))))>0) stop("Z contains missing 'NA' values.", call.=FALSE)
q <- length(Z)
if(sum(as.numeric(lapply(Z,nrow))==K) <q) stop("Z contains matrices of the wrong size.", call.=FALSE)
if(sum(as.numeric(lapply(Z,ncol))==K) <q) stop("Z contains matrices of the wrong size.", call.=FALSE)
if(min(as.numeric(lapply(Z,min)))<0) stop("Z contains negative values.", call.=FALSE)
if(!is.logical(W.binary)) stop("W.binary is not TRUE or FALSE.", call.=FALSE)
if(length(W.binary)!=1) stop("W.binary has the wrong length.", call.=FALSE)
if(W.binary)
{
alpha.max <- rep(NA,q)
alpha.threshold <- rep(NA,q)
for(k in 1:q)
{
Z.crit <- quantile(as.numeric(Z[[k]])[as.numeric(Z[[k]])!=0], 0.5)
alpha.max[k] <- -log(0.5) / Z.crit
alpha.threshold[k] <- -log(0.5) / max(Z[[k]])
}
}else
{
alpha.max <- rep(50, q)
}
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
common.prior.var.check(prior.tau2)
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- poisson.dissimilarityCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, Z=Z, W.binary=W.binary, W=W, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, alpha.max=alpha.max, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- poisson.dissimilarityCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, Z=Z, W.binary=W.binary, W=W, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, alpha.max=alpha.max, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=poisson.dissimilarityCARMCMC, Y=Y, offset=offset, X.standardised=X.standardised, Z=Z, W.binary=W.binary, W=W, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, alpha.max=alpha.max, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * results$accept[1] / results$accept[2]
accept.phi <- 100 * results$accept[3] / results$accept[4]
accept.tau2 <- 100
accept.alpha <- 100 * results$accept[5] / results$accept[6]
accept.final <- c(accept.beta, accept.phi, accept.tau2, accept.alpha)
names(accept.final) <- c("beta", "phi", "tau2", "alpha")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
mean.phi <- apply(results$samples.phi, 2, mean)
fitted.mean <- exp(X.standardised %*% mean.beta + mean.phi + offset)
deviance.fitted <- -2 * sum(dpois(x=Y, lambda=fitted.mean, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), alpha=mcmc(results$samples.alpha), tau2=mcmc(results$samples.tau2), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.alpha <- t(rbind(apply(samples$alpha, 2, mean), apply(samples$alpha, 2, quantile, c(0.025, 0.975))))
summary.alpha <- cbind(summary.alpha, rep(n.keep, q), rep(accept.alpha,q), effectiveSize(samples$alpha), geweke.diag(samples$alpha)$z)
if(!is.null(names(Z)))
{
rownames(summary.alpha) <- names(Z)
}else
{
names.Z <- rep(NA,q)
for(j in 1:q)
{
names.Z[j] <- paste("Z[[",j, "]]", sep="")
}
rownames(summary.alpha) <- names.Z
}
summary.hyper <- array(NA, c(1 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$tau2), quantile(samples$tau2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.results <- rbind(summary.beta, summary.hyper, summary.alpha)
if(W.binary)
{
alpha.min <- c(rep(NA, (p+1)), alpha.threshold)
summary.results <- cbind(summary.results, alpha.min)
}else
{}
rownames(summary.results)[(p+1)] <- c("tau2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
if(W.binary) summary.results[ , 8] <- round(summary.results[ , 8], 4)
#### Create the posterior medians for the neighbourhood matrix W
W.posterior <- array(NA, c(K,K))
if(W.binary)
{
W.border.prob <- array(NA, c(K,K))
}else
{
W.border.prob <- NA
}
for(i in 1:K)
{
for(j in 1:K)
{
if(W[i,j]==1)
{
z.temp <- NA
for(k in 1:q)
{
z.temp <- c(z.temp, Z[[k]][i,j])
}
z.temp <- z.temp[-1]
w.temp <- exp(-samples$alpha %*% z.temp)
if(W.binary)
{
w.posterior <- as.numeric(w.temp>=0.5)
W.posterior[i,j] <- ceiling(median(w.posterior))
W.border.prob[i,j] <- (1 - sum(w.posterior) / length(w.posterior))
}else
{
W.posterior[i,j] <- median(w.temp)
}
}else
{
}
}
}
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.phi <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.tau2 <- 100
accept.alpha <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
accept.final <- c(accept.beta, accept.phi, accept.tau2, accept.alpha)
names(accept.final) <- c("beta", "phi", "tau2", "alpha")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.alpha.list <- lapply(results, function(l) l[["samples.alpha"]])
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.alpha.matrix <- do.call(what=rbind, args=samples.alpha.list)
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
mean.phi <- apply(samples.phi.matrix, 2, mean)
fitted.mean <- exp(X.standardised %*% mean.beta + mean.phi + offset)
deviance.fitted <- -2 * sum(dpois(x=Y, lambda=fitted.mean, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
alpha.temp <- samples.alpha.list
tau2.temp <- samples.tau2.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
alpha.temp[[j]] <- mcmc(samples.alpha.list[[j]])
tau2.temp[[j]] <- mcmc(samples.tau2.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
alpha.mcmc <- as.mcmc.list(alpha.temp)
tau2.mcmc <- as.mcmc.list(tau2.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, phi=phi.mcmc, alpha=alpha.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.alpha <- t(rbind(apply(samples.alpha.matrix, 2, mean), apply(samples.alpha.matrix, 2, quantile, c(0.025, 0.975))))
summary.alpha <- cbind(summary.alpha, rep(n.keep, q), rep(accept.alpha,q), effectiveSize(alpha.mcmc), gelman.diag(alpha.mcmc)$psrf[ ,2])
if(!is.null(names(Z)))
{
rownames(summary.alpha) <- names(Z)
}else
{
names.Z <- rep(NA,q)
for(j in 1:q)
{
names.Z[j] <- paste("Z[[",j, "]]", sep="")
}
rownames(summary.alpha) <- names.Z
}
summary.hyper <- array(NA, c(1 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.results <- rbind(summary.beta, summary.hyper, summary.alpha)
if(W.binary)
{
alpha.min <- c(rep(NA, (p+1)), alpha.threshold)
summary.results <- cbind(summary.results, alpha.min)
}else
{}
rownames(summary.results)[(p+1)] <- c("tau2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
if(W.binary) summary.results[ , 8] <- round(summary.results[ , 8], 4)
#### Create the posterior medians for the neighbourhood matrix W
W.posterior <- array(NA, c(K,K))
if(W.binary)
{
W.border.prob <- array(NA, c(K,K))
}else
{
W.border.prob <- NA
}
for(i in 1:K)
{
for(j in 1:K)
{
if(W[i,j]==1)
{
z.temp <- NA
for(k in 1:q)
{
z.temp <- c(z.temp, Z[[k]][i,j])
}
z.temp <- z.temp[-1]
w.temp <- exp(-samples.alpha.matrix %*% z.temp)
if(W.binary)
{
w.posterior <- as.numeric(w.temp>=0.5)
W.posterior[i,j] <- ceiling(median(w.posterior))
W.border.prob[i,j] <- (1 - sum(w.posterior) / length(w.posterior))
}else
{
W.posterior[i,j] <- median(w.temp)
}
}else
{
}
}
}
}
###################################
#### Compile and return the results
###################################
## Generate the dissimilarity equation
if(q==1)
{
dis.eq <- rownames(summary.results)[nrow(summary.results)]
}else
{
dis.eq <- paste(rownames(summary.alpha), "+")
len <- length(dis.eq)
dis.eq[len] <- substr(dis.eq[len],1,nchar(dis.eq[2])-1)
}
if(W.binary)
{
model.string <- c("Likelihood model - Poisson (log link function)", "\nRandom effects model - Binary dissimilarity CAR", "\nDissimilarity metrics - ", dis.eq, "\n")
}else
{
model.string <- c("Likelihood model - Poisson (log link function)", "\nRandom effects model - Non-binary dissimilarity CAR", "\nDissimilarity metrics - ", dis.eq, "\n")
}
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=list(W.posterior=W.posterior, W.border.prob=W.border.prob), formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/poisson.dissimilarityCAR.R
|
poisson.dissimilarityCARMCMC <- function(Y, offset, X.standardised, Z, W.binary, W, K, p, q, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, alpha.max, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
# library(spam)
#
#
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
mod.glm <- glm(Y~X.standardised-1, offset=offset, family="quasipoisson")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
log.Y <- log(Y)
log.Y[Y==0] <- -0.1
res.temp <- log.Y - X.standardised %*% beta.mean - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
tau2 <- var(phi) / 10
alpha <- runif(n=q, min=rep(0,q), max=rep(alpha.max/(2+q)))
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
fitted <- exp(as.numeric(X.standardised %*% beta) + phi + offset)
Y.DA <- Y
########################################
#### Set up the MCMC model run quantities
#########################################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, K))
samples.tau2 <- array(NA, c(n.keep, 1))
samples.alpha <- array(NA, c(n.keep, q))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
## Metropolis quantities
accept <- rep(0,6)
proposal.sd.alpha <- 0.02 * alpha.max
proposal.sd.beta <- 0.01
proposal.sd.phi <- 0.1
tau2.posterior.shape <- prior.tau2[1] + 0.5 * K
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat.disimilarity(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
spam.W <- W.quants$spam.W
#### Create the Z triplet form
Z.triplet <- array(NA, c(n.triplet, q))
for(i in 1:n.triplet)
{
row <- W.triplet[i,1]
col <- W.triplet[i,2]
for(j in 1:q)
{
Z.triplet[i,j] <- Z[[j]][row, col]
}
}
if(W.binary)
{
W.triplet[ ,3] <- as.numeric(exp(-Z.triplet %*% alpha)>=0.5)
}else
{
W.triplet[ ,3] <- as.numeric(exp(-Z.triplet %*% alpha))
}
W.triplet.sum <- tapply(W.triplet[ ,3], W.triplet[ ,1], sum)
spam.W@entries <- W.triplet[ ,3]
spam.Wprop <- spam.W
W.tripletprop <- W.triplet
#### Create the matrix form of Q
rho <- 0.99
Q <- -rho * spam.W
diag(Q) <- rho * rowSums(spam.W) + 1-rho
det.Q <- sum(log(diag(chol.spam(Q))))
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rpois(n=n.miss, lambda=fitted[which.miss==0])
}else
{}
####################
## Sample from beta
####################
offset.temp <- phi + offset
if(MALA)
{
temp <- poissonbetaupdateMALA(X.standardised, K, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- poissonbetaupdateRW(X.standardised, K, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
####################
## Sample from phi
####################
beta.offset <- as.numeric(X.standardised %*% beta) + offset
temp1 <- poissoncarupdateRW(Wtriplet=W.triplet, Wbegfin=W.begfin, W.triplet.sum, nsites=K, phi=phi, tau2=tau2, y=Y.DA, phi_tune=proposal.sd.phi, rho=rho, offset=beta.offset)
phi <- temp1[[1]]
phi <- phi - mean(phi)
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + K
##################
## Sample from tau2
##################
temp2 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, rho)
tau2.posterior.scale <- temp2 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
######################
#### Sample from alpha
######################
## Propose a value
proposal.alpha <- alpha
for(r in 1:q)
{
proposal.alpha[r] <- rtruncnorm(n=1, a=0, b=alpha.max[r], mean=alpha[r], sd=proposal.sd.alpha[r])
}
## Create the proposal values for W and Q
if(W.binary)
{
W.tripletprop[ ,3] <- as.numeric(exp(-Z.triplet %*% proposal.alpha)>=0.5)
}else
{
W.tripletprop[ ,3] <- as.numeric(exp(-Z.triplet %*% proposal.alpha))
}
W.triplet.sum.prop <- tapply(W.tripletprop[ ,3], W.tripletprop[ ,1], sum)
spam.Wprop@entries <- W.tripletprop[ ,3]
Qprop <- -rho * spam.Wprop
diag(Qprop) <- rho * rowSums(spam.Wprop) + 1-rho
det.Qprop <- sum(log(diag(chol.spam(Qprop))))
temp3 <- quadform(W.tripletprop, W.triplet.sum.prop, n.triplet, K, phi, phi, rho)
#### Calculate the acceptance probability
logprob.current <- det.Q - temp2 / tau2
logprob.proposal <- det.Qprop - temp3 / tau2
hastings <- sum(log(dtruncnorm(x=alpha, a=rep(0,q), b=alpha.max, mean=proposal.alpha, sd=proposal.sd.alpha)) - log(dtruncnorm(x=proposal.alpha, a=rep(0,q), b=alpha.max, mean=alpha, sd=proposal.sd.alpha)))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposed value
if(prob > runif(1))
{
alpha <- proposal.alpha
det.Q <- det.Qprop
W.triplet[ ,3] <- W.tripletprop[ ,3]
W.triplet.sum <- W.triplet.sum.prop
accept[5] <- accept[5] + 1
}else
{
}
accept[6] <- accept[6] + 1
#########################
## Calculate the deviance
#########################
lp <- as.numeric(X.standardised %*% beta) + phi + offset
fitted <- exp(lp)
loglike <- dpois(x=as.numeric(Y), lambda=fitted, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.tau2[ele, ] <- tau2
samples.alpha[ele, ] <- alpha
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
proposal.sd.alpha <- common.accceptrates2(accept[5:6], proposal.sd.alpha, 40, 50, alpha.max/4)
accept <- c(0,0,0,0,0,0)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.tau2=samples.tau2, samples.alpha=samples.alpha, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/poisson.dissimilarityCARMCMC.R
|
poisson.glm <- function(formula, data=NULL, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "poisson")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- poisson.glmMCMC(Y=Y, offset=offset, X.standardised=X.standardised, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- poisson.glmMCMC(Y=Y, offset=offset, X.standardised=X.standardised, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=poisson.glmMCMC, Y=Y, offset=offset, X.standardised=X.standardised, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * results$accept[1] / results$accept[2]
accept.final <- c(accept.beta)
names(accept.final) <- c("beta")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
fitted.mean <- exp(X.standardised %*% mean.beta + offset)
deviance.fitted <- -2 * sum(dpois(x=Y, lambda=fitted.mean, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.results <- summary.beta
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.final <- c(accept.beta)
names(accept.final) <- c("beta")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
fitted.mean <- exp(X.standardised %*% mean.beta + offset)
deviance.fitted <- -2 * sum(dpois(x=Y, lambda=fitted.mean, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.results <- summary.beta
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Poisson (log link function)", "\nRandom effects model - None\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/poisson.glm.R
|
poisson.glmMCMC <- function(Y, offset, X.standardised, K, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
mod.glm <- glm(Y~X.standardised-1, offset=offset, family="quasipoisson")
beta.mean <- mod.glm$coefficients
beta.sd <- 10 * sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
fitted <- exp(as.numeric(X.standardised %*% beta) + offset)
Y.DA <- Y
########################################
#### Set up the MCMC model run quantities
#########################################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,2)
proposal.sd.beta <- 0.01
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
for(j in 1:n.sample)
{
######################################
#### Sample from Y - data augmentation
######################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rpois(n=n.miss, lambda=fitted[which.miss==0])
}else
{}
#####################
#### Sample from beta
#####################
offset.temp <- offset
if(MALA)
{
temp <- poissonbetaupdateMALA(X.standardised, K, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- poissonbetaupdateRW(X.standardised, K, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
#########################
## Calculate the deviance
#########################
lp <- as.numeric(X.standardised %*% beta) + offset
fitted <- exp(lp)
loglike <- dpois(x=as.numeric(Y), lambda=fitted, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(n.miss>0) samples.Y[ele, ] <-Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
accept <- rep(0,2)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
#### Close the progress bar if used
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
chain.results <- list(samples.beta=samples.beta, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/poisson.glmMCMC.R
|
poisson.lerouxCAR <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, rho=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "poisson")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### rho
if(is.null(rho))
{
rho <- runif(1)
fix.rho <- FALSE
}else
{
fix.rho <- TRUE
}
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
common.prior.var.check(prior.tau2)
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- poisson.lerouxCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- poisson.lerouxCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=poisson.lerouxCARMCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * results$accept[1] / results$accept[2]
accept.phi <- 100 * results$accept[3] / results$accept[4]
accept.tau2 <- 100
if(!fix.rho)
{
accept.rho <- 100 * results$accept[5] / results$accept[6]
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.tau2)
names(accept.final) <- c("beta", "phi", "rho", "tau2")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
mean.phi <- apply(results$samples.phi, 2, mean)
fitted.mean <- exp(X.standardised %*% mean.beta + mean.phi + offset)
deviance.fitted <- -2 * sum(dpois(x=Y, lambda=fitted.mean, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), rho=mcmc(results$samples.rho), tau2=mcmc(results$samples.tau2), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$tau2), quantile(samples$tau2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
if(!fix.rho)
{
summary.hyper[2, 1:3] <- c(mean(samples$rho), quantile(samples$rho, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.rho, effectiveSize(samples$rho), geweke.diag(samples$rho)$z)
}else
{
summary.hyper[2, 1:3] <- c(rho, rho, rho)
summary.hyper[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.phi <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.tau2 <- 100
if(!fix.rho)
{
accept.rho <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.tau2)
names(accept.final) <- c("beta", "phi", "rho", "tau2")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
mean.phi <- apply(samples.phi.matrix, 2, mean)
fitted.mean <- exp(X.standardised %*% mean.beta + mean.phi + offset)
deviance.fitted <- -2 * sum(dpois(x=Y, lambda=fitted.mean, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
rho.temp <- samples.rho.list
tau2.temp <- samples.tau2.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
rho.temp[[j]] <- mcmc(samples.rho.list[[j]])
tau2.temp[[j]] <- mcmc(samples.tau2.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
rho.mcmc <- as.mcmc.list(rho.temp)
tau2.mcmc <- as.mcmc.list(tau2.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rho.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
if(!fix.rho)
{
summary.hyper[2, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.rho, effectiveSize(rho.mcmc), gelman.diag(rho.mcmc)$psrf[ ,2])
}else
{
summary.hyper[2, 1:3] <- c(rho, rho, rho)
summary.hyper[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Poisson (log link function)", "\nRandom effects model - Leroux CAR\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/poisson.lerouxCAR.R
|
poisson.lerouxCARMCMC <- function(Y, offset, X.standardised, W, rho, fix.rho, K, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayes.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#
#
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
mod.glm <- glm(Y~X.standardised-1, offset=offset, family="quasipoisson")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
log.Y <- log(Y)
log.Y[Y==0] <- -0.1
res.temp <- log.Y - X.standardised %*% beta.mean - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
tau2 <- var(phi) / 10
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
fitted <- exp(as.numeric(X.standardised %*% beta) + offset + phi)
Y.DA <- Y
########################################
#### Set up the MCMC model run quantities
#########################################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, K))
samples.tau2 <- array(NA, c(n.keep, 1))
if(!fix.rho) samples.rho <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,6)
proposal.sd.beta <- 0.01
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.02
tau2.posterior.shape <- prior.tau2[1] + 0.5 * K
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Create the determinant
if(!fix.rho)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1) tau2.posterior.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rpois(n=n.miss, lambda=fitted[which.miss==0])
}else
{}
####################
## Sample from beta
####################
offset.temp <- phi + offset
if(MALA)
{
temp <- poissonbetaupdateMALA(X.standardised, K, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- poissonbetaupdateRW(X.standardised, K, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
regression <- X.standardised %*% beta
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
####################
## Sample from phi
####################
beta.offset <- regression + offset
temp1 <- poissoncarupdateRW(Wtriplet=W.triplet, Wbegfin=W.begfin, W.triplet.sum, nsites=K, phi=phi, tau2=tau2, y=Y.DA, phi_tune=proposal.sd.phi, rho=rho, offset=beta.offset)
phi <- temp1[[1]]
if(rho<1)
{
phi <- phi - mean(phi)
}
else
{
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
}
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + K
##################
## Sample from tau2
##################
temp2 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, rho)
tau2.posterior.scale <- temp2 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
##################
## Sample from rho
##################
if(!fix.rho)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp3 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, proposal.rho)
det.Q.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- det.Q - temp2 / tau2
logprob.proposal <- det.Q.proposal - temp3 / tau2
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- det.Q.proposal
accept[5] <- accept[5] + 1
}else
{
}
accept[6] <- accept[6] + 1
}else
{}
#########################
## Calculate the deviance
#########################
lp <- as.numeric(regression) + phi + offset
fitted <- exp(lp)
loglike <- dpois(x=as.numeric(Y), lambda=fitted, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.tau2[ele, ] <- tau2
if(!fix.rho) samples.rho[ele, ] <- rho
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{
}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
if(!fix.rho) proposal.sd.rho <- common.accceptrates2(accept[5:6], proposal.sd.rho, 40, 50, 0.5)
accept <- c(0,0,0,0,0,0)
}else
{
}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
if(fix.rho) samples.rho=NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/poisson.lerouxCARMCMC.R
|
poisson.localisedCAR <- function(formula, data=NULL, G, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, prior.delta = NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame.localised(formula, data, "poisson", trials=NA)
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
log.Y <- log(Y)
log.Y[Y==0] <- -0.1
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Format and check the number of clusters G
if(length(G)!=1) stop("G is the wrong length.", call.=FALSE)
if(!is.numeric(G)) stop("G is not numeric.", call.=FALSE)
if(G<=1) stop("G is less than 2.", call.=FALSE)
if(G!=round(G)) stop("G is not an integer.", call.=FALSE)
if(floor(G/2)==ceiling(G/2))
{
Gstar <- G/2
}else
{
Gstar <- (G+1)/2
}
#### Priors
if(p>0)
{
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
}else
{
prior.mean.beta <- NULL
prior.var.beta <- NULL
}
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
common.prior.var.check(prior.tau2)
if(is.null(prior.delta)) prior.delta <- 10
if(length(prior.delta)!=1) stop("the prior value for delta is the wrong length.", call.=FALSE)
if(!is.numeric(prior.delta)) stop("the prior value for delta is not numeric.", call.=FALSE)
if(sum(is.na(prior.delta))!=0) stop("the prior value for delta has missing values.", call.=FALSE)
if(prior.delta<=0) stop("the prior value for delta is not positive.", call.=FALSE)
#### Compute the blocking structure for beta
if(p>0)
{
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
}else
{
n.beta.block <- NULL
list.block <- NULL
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- poisson.localisedCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, G=G, Gstar=Gstar, W=W, K=K, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.delta=prior.delta, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- poisson.localisedCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, G=G, Gstar=Gstar, W=W, K=K, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.delta=prior.delta, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=poisson.localisedCARMCMC, Y=Y, offset=offset, X.standardised=X.standardised, G=G, Gstar=Gstar, W=W, K=K, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.delta=prior.delta, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.phi <- 100 * results$accept[1] / results$accept[2]
accept.lambda <- 100 * results$accept[3] / results$accept[4]
accept.delta <- 100 * results$accept[5] / results$accept[6]
accept.tau2 <- 100
if(p>0)
{
accept.beta <- 100 * results$accept[7] / results$accept[8]
accept.final <- c(accept.beta, accept.lambda, accept.delta, accept.phi, accept.tau2)
names(accept.final) <- c("beta", "lambda", "delta", "phi", "tau2")
}else
{
accept.final <- c(accept.lambda, accept.delta, accept.phi, accept.tau2)
names(accept.final) <- c("lambda", "delta", "phi", "tau2")
}
## Compute the model fit criterion
mean.phi <- apply(results$samples.phi, 2, mean)
mean.Z <- round(apply(results$samples.Z,2,mean),0)
mean.lambda <- apply(results$samples.lambda,2,mean)
if(p>0)
{
mean.beta <- apply(results$samples.beta, 2, mean)
regression.vec <- as.numeric(X.standardised %*% mean.beta)
}else
{
regression.vec <- rep(0,K)
}
fitted.mean <- exp(regression.vec + mean.lambda[mean.Z] + mean.phi + offset)
deviance.fitted <- -2 * sum(dpois(x=Y, lambda=fitted.mean, log=TRUE))
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
if(p>0)
{
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), lambda=mcmc(results$samples.lambda), Z=mcmc(results$samples.Z), tau2=mcmc(results$samples.tau2), delta=mcmc(results$samples.delta), fitted=mcmc(results$samples.fitted), Y=NA)
}else
{
samples <- list(phi=mcmc(results$samples.phi), lambda=mcmc(results$samples.lambda), Z=mcmc(results$samples.Z), tau2=mcmc(results$samples.tau2), delta=mcmc(results$samples.delta), fitted=mcmc(results$samples.fitted), Y=NA)
}
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$tau2), quantile(samples$tau2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.hyper[2, 1:3] <- c(mean(samples$delta), quantile(samples$delta, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.delta, effectiveSize(samples$delta), geweke.diag(samples$delta)$z)
summary.lambda <- t(rbind(apply(samples$lambda, 2, mean), apply(samples$lambda, 2, quantile, c(0.025, 0.975))))
summary.lambda <- cbind(summary.lambda, rep(n.keep, G), rep(accept.lambda, G), effectiveSize(samples$lambda), geweke.diag(samples$lambda)$z)
Z.used <- as.numeric(names(table(samples$Z)))
summary.lambda <- summary.lambda[Z.used, ]
if(p>0)
{
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
rownames(summary.beta) <- colnames(X)
summary.results <- rbind(summary.beta, summary.lambda, summary.hyper)
row.names(summary.results)[(p+1):nrow(summary.results)] <- c(paste("lambda", Z.used, sep=""), "tau2", "delta")
colnames(summary.results) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
summary.results <- rbind(summary.lambda, summary.hyper)
row.names(summary.results) <- c(paste("lambda", Z.used, sep=""), "tau2", "delta")
colnames(summary.results) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.phi <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.lambda <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.delta <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
accept.tau2 <- 100
if(p>0)
{
accept.beta <- 100 * sum(accept.temp2[ ,7]) / sum(accept.temp2[ ,8])
accept.final <- c(accept.beta, accept.lambda, accept.delta, accept.phi, accept.tau2)
names(accept.final) <- c("beta", "lambda", "delta", "phi", "tau2")
}else
{
accept.final <- c(accept.lambda, accept.delta, accept.phi, accept.tau2)
names(accept.final) <- c("lambda", "delta", "phi", "tau2")
}
## Extract the samples into separate lists
if(p>0) samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.delta.list <- lapply(results, function(l) l[["samples.delta"]])
samples.lambda.list <- lapply(results, function(l) l[["samples.lambda"]])
samples.Z.list <- lapply(results, function(l) l[["samples.Z"]])
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
## Convert the samples into separate matrix objects
if(p>0) samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list)
samples.lambda.matrix <- do.call(what=rbind, args=samples.lambda.list)
samples.Z.matrix <- do.call(what=rbind, args=samples.Z.list)
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.phi <- apply(samples.phi.matrix, 2, mean)
mean.Z <- round(apply(samples.Z.matrix,2,mean),0)
mean.lambda <- apply(samples.lambda.matrix,2,mean)
if(p>0)
{
mean.beta <- apply(samples.beta.matrix, 2, mean)
regression.vec <- as.numeric(X.standardised %*% mean.beta)
}else
{
regression.vec <- rep(0,K)
}
fitted.mean <- exp(regression.vec + mean.lambda[mean.Z] + mean.phi + offset)
deviance.fitted <- -2 * sum(dpois(x=Y, lambda=fitted.mean, log=TRUE))
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
if(p>0)
{
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
}else
{}
## Create MCMC objects
if(p>0) beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
delta.temp <- samples.delta.list
lambda.temp <- samples.lambda.list
Z.temp <- samples.Z.list
tau2.temp <- samples.tau2.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
for(j in 1:n.chains)
{
if(p>0) beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
delta.temp[[j]] <- mcmc(samples.delta.list[[j]])
lambda.temp[[j]] <- mcmc(samples.lambda.list[[j]])
Z.temp[[j]] <- mcmc(samples.Z.list[[j]])
tau2.temp[[j]] <- mcmc(samples.tau2.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
}
if(p>0) beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
delta.mcmc <- as.mcmc.list(delta.temp)
Z.mcmc <- as.mcmc.list(Z.temp)
lambda.mcmc <- as.mcmc.list(lambda.temp)
tau2.mcmc <- as.mcmc.list(tau2.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
if(p>0)
{
samples <- list(beta=beta.mcmc, phi=phi.mcmc, lambda=lambda.mcmc, Z=Z.mcmc, tau2=tau2.mcmc, delta=delta.mcmc, fitted=fitted.mcmc, Y=NA)
}else
{
samples <- list(phi=phi.mcmc, lambda=lambda.mcmc, Z=Z.mcmc, tau2=tau2.mcmc, delta=delta.mcmc, fitted=fitted.mcmc, Y=NA)
}
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.hyper[2, 1:3] <- c(mean(samples.delta.matrix), quantile(samples.delta.matrix, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.delta, effectiveSize(delta.mcmc), gelman.diag(delta.mcmc)$psrf[ ,2])
summary.lambda <- t(rbind(apply(samples.lambda.matrix, 2, mean), apply(samples.lambda.matrix, 2, quantile, c(0.025, 0.975))))
summary.lambda <- cbind(summary.lambda, rep(n.keep, G), rep(accept.lambda, G), effectiveSize(lambda.mcmc), gelman.diag(lambda.mcmc)$psrf[ ,2])
Z.used <- as.numeric(names(table(samples.Z.matrix)))
summary.lambda <- summary.lambda[Z.used, ]
if(p>0)
{
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
summary.results <- rbind(summary.beta, summary.lambda, summary.hyper)
row.names(summary.results)[(p+1):nrow(summary.results)] <- c(paste("lambda", Z.used, sep=""), "tau2", "delta")
colnames(summary.results) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
summary.results <- rbind(summary.lambda, summary.hyper)
row.names(summary.results) <- c(paste("lambda", Z.used, sep=""), "tau2", "delta")
colnames(summary.results) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Poisson (log link function)", "\nRandom effects model - Localised CAR model\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=mean.Z, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/poisson.localisedCAR.R
|
poisson.localisedCARMCMC <- function(Y, offset, X.standardised, G, Gstar, W, K, p, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, prior.delta, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
#
#
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
if(p==0)
{
regression.vec <- rep(0, K)
beta <- NA
}else
{
mod.glm <- glm(Y~X.standardised, offset=offset, family="quasipoisson")
beta.mean <- mod.glm$coefficients[-1]
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))[-1]
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
regression.vec <- X.standardised %*% beta
}
log.Y <- log(Y)
log.Y[Y==0] <- -0.1
res.temp <- log.Y - regression.vec - offset
clust <- kmeans(res.temp,G)
lambda <- clust$centers[order(clust$centers)]
Z <- rep(1, K)
for(j in 2:G)
{
Z[clust$cluster==order(clust$centers)[j]] <- j
}
delta <- runif(1,1, min(2, prior.delta))
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=rep(0,K), sd = res.sd)
for(i in 1:G)
{
phi[which(Z==i)] <- phi[which(Z==i)] - mean(phi[which(Z==i)])
}
tau2 <- var(phi) / 10
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
lp <- lambda[Z] + phi + regression.vec + offset
fitted <- exp(lp)
########################################
#### Set up the MCMC model run quantities
#########################################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.phi <- array(NA, c(n.keep, K))
samples.tau2 <- array(NA, c(n.keep, 1))
samples.Z <- array(NA, c(n.keep, K))
samples.lambda <- array(NA, c(n.keep, G))
samples.delta <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
#### Metropolis quantities
if(p>0)
{
samples.beta <- array(NA, c(n.keep, p))
accept <- rep(0,8)
proposal.sd.beta <- 0.01
}else
{
accept <- rep(0,6)
}
proposal.sd.phi <- 0.1
proposal.sd.delta <- 0.1
proposal.sd.lambda <- 0.01
tau2.posterior.shape <- prior.tau2[1] + 0.5 * K
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
###################
## Sample from beta
###################
if(p>0)
{
offset.temp <- phi + offset + lambda[Z]
if(MALA)
{
temp <- poissonbetaupdateMALA(X.standardised, K, p, beta, offset.temp, Y, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- poissonbetaupdateRW(X.standardised, K, p, beta, offset.temp, Y, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[7] <- accept[7] + temp[[2]]
accept[8] <- accept[8] + n.beta.block
regression.vec <- X.standardised %*% beta
}else
{}
##################
## Sample from phi
##################
phi.offset <- regression.vec + offset + lambda[Z]
temp1 <- poissoncarupdateRW(Wtriplet=W.triplet, Wbegfin=W.begfin, W.triplet.sum, nsites=K, phi=phi, tau2=tau2, y=Y, phi_tune=proposal.sd.phi, rho=1, offset=phi.offset)
phi <- temp1[[1]]
for(i in 1:G)
{
phi[which(Z==i)] <- phi[which(Z==i)] - mean(phi[which(Z==i)])
}
accept[1] <- accept[1] + temp1[[2]]
accept[2] <- accept[2] + K
###################
## Sample from tau2
###################
temp2 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, 1)
tau2.posterior.scale <- temp2 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
#####################
## Sample from lambda
#####################
proposal.extend <- c(-1000, lambda, 1000)
lambda.extend <- c(-1000, lambda, 1000)
for(i in 1:G)
{
proposal.extend[(i+1)] <- rtruncnorm(n=1, a=proposal.extend[i], b=proposal.extend[(i+2)], mean=lambda[i], sd=proposal.sd.lambda)
}
proposal <- proposal.extend[2:(G+1)]
lp.current <- lambda[Z] + phi + regression.vec + offset
lp.proposal <- proposal[Z] + phi + regression.vec + offset
prob1 <- sum((exp(lp.current) - exp(lp.proposal)))
prob2 <- sum(Y * (lp.proposal - lp.current))
prob <- exp(prob1 + prob2)
if(prob > runif(1))
{
lambda <- proposal
accept[3] <- accept[3] + 1
}else
{
}
accept[4] <- accept[4] + 1
################
## Sample from Z
################
Z.offset <- phi + offset + regression.vec
Z.proposal <- sample(1:G, size=K, replace=TRUE)
prior <- delta * ((Z - Gstar)^2 - (Z.proposal-Gstar)^2)
like <- exp(Z.offset) * (exp(lambda[Z]) - exp(lambda[Z.proposal])) + Y * (lambda[Z.proposal] - lambda[Z])
prob <- exp(like + prior)
test <- prob> runif(K)
Z[test] <- Z.proposal[test]
####################
## Sample from delta
####################
proposal.delta <- rtruncnorm(n=1, a=1, b=prior.delta, mean=delta, sd=proposal.sd.delta)
prob1 <- sum((Z-Gstar)^2) * (delta - proposal.delta)
prob2 <- K * log(sum(exp(-delta *(1:G - Gstar)^2))) - K * log(sum(exp(-proposal.delta *(1:G - Gstar)^2)))
hastings <- log(dtruncnorm(x=delta, a=1, b=prior.delta, mean=proposal.delta, sd=proposal.sd.delta)) - log(dtruncnorm(x=proposal.delta, a=1, b=prior.delta, mean=delta, sd=proposal.sd.delta))
prob <- exp(prob1 + prob2 + hastings)
if(prob > runif(1))
{
delta <- proposal.delta
accept[5] <- accept[5] + 1
}else
{
}
accept[6] <- accept[6] + 1
#########################
## Calculate the deviance
#########################
lp <- lambda[Z] + phi + regression.vec + offset
fitted <- exp(lp)
loglike <- dpois(x=as.numeric(Y), lambda=fitted, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.phi[ele, ] <- phi
samples.lambda[ele, ] <- lambda
samples.tau2[ele, ] <- tau2
samples.Z[ele, ] <- Z
samples.delta[ele, ] <- delta
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(p>0) samples.beta[ele, ] <- beta
}else
{
}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
if(p>0)
{
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[7:8], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[7:8], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50)
proposal.sd.lambda <- common.accceptrates1(accept[3:4], proposal.sd.lambda, 20, 40)
proposal.sd.delta <- common.accceptrates2(accept[5:6], proposal.sd.delta, 40, 50, prior.delta/6)
accept <- rep(0,8)
}else
{
proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50)
proposal.sd.lambda <- common.accceptrates1(accept[3:4], proposal.sd.lambda, 20, 40)
proposal.sd.delta <- common.accceptrates2(accept[5:6], proposal.sd.delta, 40, 50, prior.delta/6)
accept <- rep(0,6)
}
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(p>0)
{
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.Z=samples.Z, samples.lambda=samples.lambda, samples.tau2=samples.tau2, samples.delta=samples.delta, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
accept=accept)
}else
{
chain.results <- list(samples.phi=samples.phi, samples.Z=samples.Z, samples.lambda=samples.lambda, samples.tau2=samples.tau2, samples.delta=samples.delta, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
accept=accept)
}
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/poisson.localisedCARMCMC.R
|
poisson.multilevelCAR <- function(formula, data=NULL, W, ind.area, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, rho=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "poisson")
n <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
K <- length(unique(ind.area))
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### rho
if(is.null(rho))
{
rho <- runif(1)
fix.rho <- FALSE
}else
{
fix.rho <- TRUE
}
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
#### Checks and formatting for ind.area
if(!is.vector(ind.area)) stop("ind.area is not a vector.", call.=FALSE)
if(sum(ceiling(ind.area)==floor(ind.area))!=n) stop("ind.area does not have all integer values.", call.=FALSE)
if(min(ind.area)!=1) stop("the minimum value in ind.area is not 1.", call.=FALSE)
if(max(ind.area)!=K) stop("the maximum value in ind.area is not equal to the number of spatial areal units.", call.=FALSE)
if(length(table(ind.area))!=K) stop("the number of unique areas in ind.area does not equal K.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
common.prior.var.check(prior.tau2)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- poisson.multilevelCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, ind.area=ind.area, rho=rho, fix.rho=fix.rho, n=n, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- poisson.multilevelCARMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, ind.area=ind.area, rho=rho, fix.rho=fix.rho, n=n, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=poisson.multilevelCARMCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, ind.area=ind.area, rho=rho, fix.rho=fix.rho, n=n, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * results$accept[1] / results$accept[2]
accept.phi <- 100 * results$accept[3] / results$accept[4]
accept.tau2 <- 100
if(!fix.rho)
{
accept.rho <- 100 * results$accept[5] / results$accept[6]
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.tau2)
names(accept.final) <- c("beta", "phi", "rho", "tau2")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
mean.phi <- apply(results$samples.phi, 2, mean)
mean.phi.extend <- mean.phi[ind.area]
fitted.mean <- exp(X.standardised %*% mean.beta + mean.phi.extend + offset)
deviance.fitted <- -2 * sum(dpois(x=Y, lambda=fitted.mean, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), rho=mcmc(results$samples.rho), tau2=mcmc(results$samples.tau2), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$tau2), quantile(samples$tau2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
if(!fix.rho)
{
summary.hyper[2, 1:3] <- c(mean(samples$rho), quantile(samples$rho, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.rho, effectiveSize(samples$rho), geweke.diag(samples$rho)$z)
}else
{
summary.hyper[2, 1:3] <- c(rho, rho, rho)
summary.hyper[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.phi <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.tau2 <- 100
if(!fix.rho)
{
accept.rho <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.tau2)
names(accept.final) <- c("beta", "phi", "rho", "tau2")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
mean.phi <- apply(samples.phi.matrix, 2, mean)
mean.phi.extend <- mean.phi[ind.area]
fitted.mean <- exp(X.standardised %*% mean.beta + mean.phi.extend + offset)
deviance.fitted <- -2 * sum(dpois(x=Y, lambda=fitted.mean, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
rho.temp <- samples.rho.list
tau2.temp <- samples.tau2.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
rho.temp[[j]] <- mcmc(samples.rho.list[[j]])
tau2.temp[[j]] <- mcmc(samples.tau2.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
rho.mcmc <- as.mcmc.list(rho.temp)
tau2.mcmc <- as.mcmc.list(tau2.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rho.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
if(!fix.rho)
{
summary.hyper[2, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.rho, effectiveSize(rho.mcmc), gelman.diag(rho.mcmc)$psrf[ ,2])
}else
{
summary.hyper[2, 1:3] <- c(rho, rho, rho)
summary.hyper[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Poisson (log link function)", "\nRandom effects model - Multilevel Leroux CAR\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/poisson.multilevelCAR.R
|
poisson.multilevelCARMCMC <- function(Y, offset, X.standardised, W, ind.area, rho, fix.rho, n, K, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
#
#
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
mod.glm <- glm(Y~X.standardised-1, offset=offset, family="quasipoisson")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
log.Y <- log(Y)
log.Y[Y==0] <- -0.1
res.temp <- log.Y - X.standardised %*% beta.mean - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
phi.extend <- phi[ind.area]
tau2 <- var(phi) / 10
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
fitted <- exp(as.numeric(X.standardised %*% beta) + phi.extend + offset)
Y.DA <- Y
########################################
#### Set up the MCMC model run quantities
#########################################
#### Ind.area parts
ind.area.list <- as.list(rep(0,K))
n.individual <- rep(0,K)
n.individual.miss <- rep(0,K)
for(r in 1:K)
{
ind.area.list[[r]] <- which(ind.area==r)
n.individual[r] <- length(ind.area.list[[r]])
n.individual.miss[r] <- sum(which.miss[ind.area.list[[r]]])
}
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, K))
samples.tau2 <- array(NA, c(n.keep, 1))
if(!fix.rho) samples.rho <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, n))
samples.fitted <- array(NA, c(n.keep, n))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,6)
proposal.sd.beta <- 0.01
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.02
tau2.posterior.shape <- prior.tau2[1] + 0.5 * K
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Create the spatial determinant
if(!fix.rho)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1) tau2.posterior.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rpois(n=n.miss, lambda=fitted[which.miss==0])
}else
{}
####################
## Sample from beta
####################
offset.temp <- phi.extend + offset
if(MALA)
{
temp <- poissonbetaupdateMALA(X.standardised, n, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- poissonbetaupdateRW(X.standardised, n, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
####################
## Sample from phi
####################
beta.offset <- X.standardised %*% beta + offset
temp1 <- poissoncarmultilevelupdate(Wtriplet=W.triplet, Wbegfin=W.begfin, Wtripletsum=W.triplet.sum, ind_area_list=ind.area.list, n_individual=n.individual, nsites=K, phi=phi, tau2=tau2, y=Y.DA, phi_tune=proposal.sd.phi, rho=rho, offset=beta.offset)
phi <- temp1[[1]]
if(rho<1)
{
phi <- phi - mean(phi)
}else
{
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
}
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + K
phi.extend <- phi[ind.area]
##################
## Sample from tau2
##################
temp2 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, rho)
tau2.posterior.scale <- temp2 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
##################
## Sample from rho
##################
if(!fix.rho)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp3 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, proposal.rho)
det.Q.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- det.Q - temp2 / tau2
logprob.proposal <- det.Q.proposal - temp3 / tau2
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- det.Q.proposal
accept[5] <- accept[5] + 1
}else
{
}
accept[6] <- accept[6] + 1
}else
{}
#########################
## Calculate the deviance
#########################
lp <- as.numeric(X.standardised %*% beta) + phi.extend + offset
fitted <- exp(lp)
loglike <- dpois(x=as.numeric(Y), lambda=fitted, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.tau2[ele, ] <- tau2
if(!fix.rho) samples.rho[ele, ] <- rho
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{
}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
if(!fix.rho) proposal.sd.rho <- common.accceptrates2(accept[5:6], proposal.sd.rho, 40, 50, 0.5)
accept <- rep(0,6)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
if(fix.rho) samples.rho=NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/poisson.multilevelCARMCMC.R
|
print.CARBayes <- function(x,...)
{
if(is.list(x$localised.structure))
{
#### Print out the model fitted
cat("\n#################\n")
cat("#### Model fitted\n")
cat("#################\n")
cat(x$model)
cat("Regression equation - ")
print(x$formula)
cat("\n")
cat("\n#################\n")
cat("#### MCMC details\n")
cat("#################\n")
cat("Total number of post burnin and thinned MCMC samples generated - ")
cat(x$mcmc.info[1])
cat("\n")
cat("Number of MCMC chains used - ")
cat(x$mcmc.info[5])
cat("\n")
cat("Length of the burnin period used for each chain - ")
cat(x$mcmc.info[3])
cat("\n")
cat("Amount of thinning used - ")
cat(x$mcmc.info[4])
cat("\n")
#### Print out the results
cat("\n############\n")
cat("#### Results\n")
cat("############\n")
cat("Posterior quantities and DIC\n\n")
print(x$summary.results[ ,-c(4,5)])
cat("\nDIC = ", x$modelfit[1], " ", "p.d = ", x$modelfit[2], " ", "LMPL = ", round(x$modelfit[5],2),"\n")
if(length(x$localised.structure[[2]])>1)
{
cat("\nThe number of stepchanges identified in the random effect surface\n")
temp <- x$localised.structure[[1]][!is.na(x$localised.structure[[1]])]
tab <- array(NA, c(1,2))
tab[1, ] <- c(sum(temp)/2, length(temp)/2- sum(temp)/2)
colnames(tab) <- c("no stepchange", "stepchange")
print(tab)
}else
{}
}else if(is.numeric(x$localised.structure))
{
#### Print out the model fitted
cat("\n#################\n")
cat("#### Model fitted\n")
cat("#################\n")
cat(x$model)
cat("Regression equation - ")
print(x$formula)
cat("\n")
cat("\n#################\n")
cat("#### MCMC details\n")
cat("#################\n")
cat("Total number of post burnin and thinned MCMC samples generated - ")
cat(x$mcmc.info[1])
cat("\n")
cat("Number of MCMC chains used - ")
cat(x$mcmc.info[5])
cat("\n")
cat("Length of the burnin period used for each chain - ")
cat(x$mcmc.info[3])
cat("\n")
cat("Amount of thinning used - ")
cat(x$mcmc.info[4])
cat("\n")
#### Print out the results
cat("\n############\n")
cat("#### Results\n")
cat("############\n")
cat("Posterior quantities and DIC\n\n")
print(x$summary.results[ ,-c(4,5)])
cat("\nDIC = ", x$modelfit[1], " ", "p.d = ", x$modelfit[2], " ", "LMPL = ", round(x$modelfit[5],2),"\n")
cat("\nNumber of clusters with the number of data points in each one\n")
print(table(paste("group", x$localised.structure, sep="")))
}else
{
#### Print out the model fitted
cat("\n#################\n")
cat("#### Model fitted\n")
cat("#################\n")
cat(x$model)
if(!is.list(x$formula))
{
cat("Regression equation - ")
print(x$formula)
}else
{
cat("Regression equation - ")
print(x$formula[[1]])
cat("Zero probability equation - ")
print(x$formula[[2]])
}
cat("\n")
cat("\n#################\n")
cat("#### MCMC details\n")
cat("#################\n")
cat("Total number of post burnin and thinned MCMC samples generated - ")
cat(x$mcmc.info[1])
cat("\n")
cat("Number of MCMC chains used - ")
cat(x$mcmc.info[5])
cat("\n")
cat("Length of the burnin period used for each chain - ")
cat(x$mcmc.info[3])
cat("\n")
cat("Amount of thinning used - ")
cat(x$mcmc.info[4])
cat("\n")
#### Print out the results
cat("\n############\n")
cat("#### Results\n")
cat("############\n")
cat("Posterior quantities and DIC\n\n")
print(x$summary.results[ ,-c(4,5)])
cat("\nDIC = ", x$modelfit[1], " ", "p.d = ", x$modelfit[2], " ", "LMPL = ", round(x$modelfit[5],2),"\n")
}
return(invisible(x))
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/print.CARBayes.R
|
residuals.CARBayes <- function(object, type="pearson", ...)
{
residuals <- object$residuals
#### The multivariate models provides lists the univariate models provide matrices
if(is.list(residuals))
{
#### Return one of two types of residuals
if(type=="response")
{
return(residuals$response)
}else if(type=="pearson")
{
return(residuals$pearson)
}else
{
return("Error. That is not one of the allowable residual types.")
}
}else
{
#### Return one of two types of residuals
if(type=="response")
{
return(residuals$response)
}else if(type=="pearson")
{
return(residuals$pearson)
}else
{
return("Error. That is not one of the allowable residual types.")
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/residuals.CARBayes.R
|
zip.bymCAR <- function(formula, formula.omega, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, prior.sigma2=NULL, prior.mean.delta=NULL, prior.var.delta=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "poisson")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Frame object for the omega model
## Create the matrix
frame.omega <- try(suppressWarnings(model.frame(formula.omega, data=data, na.action=na.pass)), silent=TRUE)
if(class(frame.omega)[1]=="try-error") stop("the formula.omega inputted contains an error.", call.=FALSE)
V <- try(suppressWarnings(model.matrix(object=attr(frame.omega, "terms"), data=frame.omega)), silent=TRUE)
if(class(V)[1]=="try-error") stop("the covariate matrix for the zero probabilities contains inappropriate values.", call.=FALSE)
if(length(V)==0)
{
V <- matrix(rep(1, K), nrow=K, ncol=1, byrow=FALSE)
}else
{}
if(sum(is.na(V))>0) stop("the covariate matrix for the zero probabilities contains missing 'NA' values.", call.=FALSE)
if(nrow(V)!=nrow(X)) stop("the two matrices of covariates don't have the same length.", call.=FALSE)
q <- ncol(V)
## Check for linearly related columns
cor.V <- suppressWarnings(cor(V))
diag(cor.V) <- 0
if(max(cor.V, na.rm=TRUE)==1) stop("the covariate matrix for the zero probabilities has two exactly linearly related columns.", call.=FALSE)
if(min(cor.V, na.rm=TRUE)==-1) stop("the covariate matrix for the zero probabilities has two exactly linearly related columns.", call.=FALSE)
if(q>1)
{
if(sort(apply(V, 2, sd))[2]==0) stop("the covariate matrix for the zero probabilities has two intercept terms.", call.=FALSE)
}else
{}
## Standardise the matrix
V.standardised <- V
V.sd <- apply(V, 2, sd)
V.mean <- apply(V, 2, mean)
V.indicator <- rep(NA, q) # To determine which parameter estimates to transform back
for(j in 1:q)
{
if(length(table(V[ ,j]))>2)
{
V.indicator[j] <- 1
V.standardised[ ,j] <- (V[ ,j] - mean(V[ ,j])) / sd(V[ ,j])
}else if(length(table(V[ ,j]))==1)
{
V.indicator[j] <- 2
}else
{
V.indicator[j] <- 0
}
}
## Check for an offset term
offset.omega <- try(model.offset(frame.omega), silent=TRUE)
if(class(offset.omega)[1]=="try-error") stop("the offset for the probability of being a zero is not numeric.", call.=FALSE)
if(is.null(offset.omega)) offset.omega <- rep(0,K)
if(sum(is.na(offset.omega))>0) stop("the offset for the probability of being a zero has missing 'NA' values.", call.=FALSE)
if(!is.numeric(offset.omega)) stop("the offset for the probability of being a zero variable has non-numeric values.", call.=FALSE)
#### Set up which elements are zero
which.zero <- which(Y==0)
n.zero <- length(which.zero)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
if(is.null(prior.sigma2)) prior.sigma2 <- c(1, 0.01)
if(is.null(prior.mean.delta)) prior.mean.delta <- rep(0, q)
if(is.null(prior.var.delta)) prior.var.delta <- rep(100000, q)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
common.prior.var.check(prior.tau2)
common.prior.var.check(prior.sigma2)
common.prior.beta.check(prior.mean.delta, prior.var.delta, q)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
## Compute the blocking structure for delta
block.temp <- common.betablock(q)
delta.beg <- block.temp[[1]]
delta.fin <- block.temp[[2]]
n.delta.block <- block.temp[[3]]
list.block.delta <- as.list(rep(NA, n.delta.block*2))
for(r in 1:n.delta.block)
{
list.block.delta[[r]] <- delta.beg[r]:delta.fin[r]-1
list.block.delta[[r+n.delta.block]] <- length(list.block.delta[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- zip.bymCARMCMC(Y=Y, offset=offset, offset.omega=offset.omega, X.standardised=X.standardised, V.standardised=V.standardised, W=W, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, which.zero=which.zero, n.zero=n.zero, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, n.delta.block=n.delta.block, list.block.delta=list.block.delta, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.delta=prior.mean.delta, prior.var.delta=prior.var.delta, prior.tau2=prior.tau2, prior.sigma2=prior.sigma2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- zip.bymCARMCMC(Y=Y, offset=offset, offset.omega=offset.omega, X.standardised=X.standardised, V.standardised=V.standardised, W=W, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, which.zero=which.zero, n.zero=n.zero, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, n.delta.block=n.delta.block, list.block.delta=list.block.delta, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.delta=prior.mean.delta, prior.var.delta=prior.var.delta, prior.tau2=prior.tau2, prior.sigma2=prior.sigma2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=zip.bymCARMCMC, Y=Y, offset=offset, offset.omega=offset.omega, X.standardised=X.standardised, V.standardised=V.standardised, W=W, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, which.zero=which.zero, n.zero=n.zero, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, n.delta.block=n.delta.block, list.block.delta=list.block.delta, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.delta=prior.mean.delta, prior.var.delta=prior.var.delta, prior.tau2=prior.tau2, prior.sigma2=prior.sigma2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * results$accept[1] / results$accept[2]
accept.delta <- 100 * results$accept[7] / results$accept[8]
accept.phi <- 100 * results$accept[3] / results$accept[4]
accept.theta <- 100 * results$accept[5] / results$accept[6]
accept.tau2 <- 100
accept.sigma2 <- 100
accept.final <- c(accept.beta, accept.phi, accept.theta, accept.tau2, accept.sigma2, accept.delta)
names(accept.final) <- c("beta", "phi", "theta", "tau2", "sigma2", "delta")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
mean.psi <- apply(results$samples.psi, 2, mean)
mean.lp <- X.standardised %*% mean.beta + mean.psi + offset
mean.fitted <- exp(mean.lp)
mean.Z <- round(apply(results$samples.Z,2,mean))
mean.delta <- apply(results$samples.delta, 2, mean)
mean.omega <- exp(V.standardised %*% mean.delta + offset.omega) / (1+exp(V.standardised %*% mean.delta + offset.omega))
temp <- rep(0,K)
temp[mean.Z==1] <- log(mean.omega[mean.Z==1])
mean.deviance.all <- temp + (1-mean.Z) * (log(1-mean.omega) + dpois(x=as.numeric(Y), lambda=mean.fitted, log=T))
deviance.fitted <- -2 * sum(mean.deviance.all, na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
var.y <- fitted.values + (1 - mean.omega) * mean.omega * mean.fitted^2
pearson.residuals <- response.residuals /sqrt(var.y)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples.delta.orig <- common.betatransform(results$samples.delta, V.indicator, V.mean, V.sd, q, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), psi=mcmc(results$samples.psi), tau2=mcmc(results$samples.tau2), sigma2=mcmc(results$samples.sigma2), delta=mcmc(samples.delta.orig), Z=mcmc(results$samples.Z), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.delta <- t(rbind(apply(samples$delta, 2, mean), apply(samples$delta, 2, quantile, c(0.025, 0.975))))
summary.delta <- cbind(summary.delta, rep(n.keep, q), rep(accept.delta,q), effectiveSize(samples$delta), geweke.diag(samples$delta)$z)
for(i in 1:q)
{
rownames(summary.delta)[i] <- paste("omega - ", colnames(V)[i])
}
colnames(summary.delta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$tau2), quantile(samples$tau2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.hyper[2, 1:3] <- c(mean(samples$sigma2), quantile(samples$sigma2, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.sigma2, effectiveSize(samples$sigma2), geweke.diag(samples$sigma2)$z)
summary.results <- rbind(summary.beta, summary.delta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "sigma2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.phi <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.delta <- 100 * sum(accept.temp2[ ,7]) / sum(accept.temp2[ ,8])
accept.theta <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
accept.tau2 <- 100
accept.sigma2 <- 100
accept.final <- c(accept.beta, accept.phi, accept.theta, accept.tau2, accept.sigma2, accept.delta)
names(accept.final) <- c("beta", "phi", "theta", "tau2", "sigma2", "delta")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.psi.list <- lapply(results, function(l) l[["samples.psi"]])
samples.sigma2.list <- lapply(results, function(l) l[["samples.sigma2"]])
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.delta.list <- lapply(results, function(l) l[["samples.delta"]])
samples.Z.list <- lapply(results, function(l) l[["samples.Z"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.psi.matrix <- do.call(what=rbind, args=samples.psi.list)
samples.sigma2.matrix <- do.call(what=rbind, args=samples.sigma2.list)
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list)
samples.Z.matrix <- do.call(what=rbind, args=samples.Z.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
mean.psi <- apply(samples.psi.matrix, 2, mean)
mean.lp <- X.standardised %*% mean.beta + mean.psi + offset
mean.fitted <- exp(mean.lp)
mean.Z <- round(apply(samples.Z.matrix,2,mean))
mean.delta <- apply(samples.delta.matrix, 2, mean)
mean.omega <- exp(V.standardised %*% mean.delta + offset.omega) / (1+exp(V.standardised %*% mean.delta + offset.omega))
temp <- rep(0,K)
temp[mean.Z==1] <- log(mean.omega[mean.Z==1])
mean.deviance.all <- temp + (1-mean.Z) * (log(1-mean.omega) + dpois(x=as.numeric(Y), lambda=mean.fitted, log=T))
deviance.fitted <- -2 * sum(mean.deviance.all, na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
var.y <- fitted.values + (1 - mean.omega) * mean.omega * mean.fitted^2
pearson.residuals <- response.residuals /sqrt(var.y)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
samples.delta.list <- samples.delta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
samples.delta.list[[j]] <- common.betatransform(samples.delta.list[[j]], V.indicator, V.mean, V.sd, q, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
psi.temp <- samples.psi.list
sigma2.temp <- samples.sigma2.list
tau2.temp <- samples.tau2.list
delta.temp <- samples.delta.list
Z.temp <- samples.Z.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
psi.temp[[j]] <- mcmc(samples.psi.list[[j]])
sigma2.temp[[j]] <- mcmc(samples.sigma2.list[[j]])
tau2.temp[[j]] <- mcmc(samples.tau2.list[[j]])
delta.temp[[j]] <- mcmc(samples.delta.list[[j]])
Z.temp[[j]] <- mcmc(samples.Z.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
psi.mcmc <- as.mcmc.list(psi.temp)
sigma2.mcmc <- as.mcmc.list(sigma2.temp)
tau2.mcmc <- as.mcmc.list(tau2.temp)
delta.mcmc <- as.mcmc.list(delta.temp)
Z.mcmc <- as.mcmc.list(Z.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, psi=psi.mcmc, sigma2=sigma2.mcmc, tau2=tau2.mcmc, delta=delta.mcmc, Z=Z.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.delta <- t(rbind(apply(samples.delta.matrix, 2, mean), apply(samples.delta.matrix, 2, quantile, c(0.025, 0.975))))
summary.delta <- cbind(summary.delta, rep(n.keep, q), rep(accept.delta,q), effectiveSize(delta.mcmc), gelman.diag(delta.mcmc)$psrf[ ,2])
for(i in 1:q)
{
rownames(summary.delta)[i] <- paste("omega - ", colnames(V)[i])
}
colnames(summary.delta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.hyper[2, 1:3] <- c(mean(samples.sigma2.matrix), quantile(samples.sigma2.matrix, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.sigma2, effectiveSize(sigma2.mcmc), gelman.diag(sigma2.mcmc)$psrf[ ,2])
summary.results <- rbind(summary.beta, summary.delta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "sigma2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Zero-Inflated Poisson (log link function)", "\nRandom effects model - BYM CAR\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=c(formula, formula.omega), model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/zip.bymCAR.R
|
zip.bymCARMCMC <- function(Y, offset, offset.omega, X.standardised, V.standardised, W, K, p, q, which.miss, n.miss, which.zero, n.zero, burnin, n.sample, thin, MALA, n.beta.block, list.block, n.delta.block, list.block.delta, prior.mean.beta, prior.var.beta, prior.mean.delta, prior.var.delta, prior.tau2, prior.sigma2, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
#
#
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
mod.glm <- glm(Y~X.standardised-1, offset=offset, family="quasipoisson")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
log.Y <- log(Y)
log.Y[Y==0] <- -0.1
res.temp <- log.Y - X.standardised %*% beta.mean - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
tau2 <- var(phi) / 10
theta <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
sigma2 <- var(theta) / 10
fitted <- exp(as.numeric(X.standardised %*% beta) + phi + theta + offset)
Y.zero <- rep(0,K)
Y.zero[which.zero] <- 1
mod.glm2 <- glm(Y.zero~V.standardised-1, offset=offset.omega, family="binomial")
delta.mean <- mod.glm2$coefficients
delta.sd <- sqrt(diag(summary(mod.glm2)$cov.scaled))
delta <- rnorm(n=length(delta.mean), mean=delta.mean, sd=delta.sd)
omega <- exp(V.standardised %*% delta+offset.omega) / (1+exp(V.standardised %*% delta+offset.omega))
prob.pointmass <- omega[which.zero] / (omega[which.zero]+(1-omega[which.zero])*exp(-exp(as.matrix(X.standardised[which.zero, ]) %*% beta + offset[which.zero])))
Z <- rep(0, K)
Z[which.zero] <- rbinom(n=n.zero, size=1, prob=prob.pointmass)
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
fitted <- exp(as.numeric(X.standardised %*% beta) + phi + theta + offset)
Y.DA <- Y
########################################
#### Set up the MCMC model run quantities
#########################################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.re <- array(NA, c(n.keep, K))
samples.tau2 <- array(NA, c(n.keep, 1))
samples.sigma2 <- array(NA, c(n.keep, 1))
samples.delta <- array(NA, c(n.keep, q))
samples.Z <- array(NA, c(n.keep, K))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
## Metropolis quantities
accept <- rep(0,8)
proposal.sd.beta <- 0.01
proposal.sd.phi <- 0.1
proposal.sd.theta <- 0.1
proposal.sd.delta <- 0.01
sigma2.posterior.shape <- prior.sigma2[1] + 0.5 * K
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
tau2.posterior.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rpois(n=n.miss, lambda=fitted[which.miss==0]) * (1-Z[which.miss==0])
}else
{}
which.zero <- which(Y.DA==0)
n.zero <- length(which.zero)
###################################
#### Update Z via data augmentation
###################################
prob.pointmass <- omega[which.zero] / (omega[which.zero] + (1 - omega[which.zero]) * exp(-exp(as.matrix(X.standardised[which.zero, ]) %*% beta + offset[which.zero] + phi[which.zero] + theta[which.zero])))
Z <- rep(0, K)
Z[which.zero] <- rbinom(n=n.zero, size=1, prob=prob.pointmass)
####################
## Sample from beta
####################
Z.zero <- which(Z==0)
offset.temp <- phi[Z.zero] + offset[Z.zero] + theta[Z.zero]
if(MALA)
{
temp <- poissonbetaupdateMALA(as.matrix(X.standardised[Z.zero, ]), length(Z.zero), p, beta, offset.temp, Y.DA[Z.zero], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- poissonbetaupdateRW(as.matrix(X.standardised[Z.zero, ]), length(Z.zero), p, beta, offset.temp, Y.DA[Z.zero], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
####################
## Sample from phi
####################
beta.offset <- X.standardised %*% beta + theta + offset
temp1 <- zipcarupdateRW(Wtriplet=W.triplet, Wbegfin=W.begfin, W.triplet.sum, nsites=K, phi=phi, tau2=tau2, y=Y.DA, phi_tune=proposal.sd.phi, rho=1, offset=beta.offset, 1-Z)
phi <- temp1[[1]]
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + sum(Z==0)
####################
## Sample from theta
####################
beta.offset <- as.numeric(X.standardised %*% beta) + phi + offset
temp2 <- zipindepupdateRW(nsites=K, theta=theta, sigma2=sigma2, y=Y.DA, theta_tune=proposal.sd.theta, offset=beta.offset, 1-Z)
theta <- temp2[[1]]
theta <- theta - mean(theta)
accept[5] <- accept[5] + temp2[[2]]
accept[6] <- accept[6] + sum(Z==0)
###################
## Sample from tau2
###################
temp2 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, 1)
tau2.posterior.scale <- temp2 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
#####################
## Sample from sigma2
#####################
sigma2.posterior.scale <- prior.sigma2[2] + 0.5*sum(theta^2)
sigma2 <- 1 / rgamma(1, sigma2.posterior.shape, scale=(1/sigma2.posterior.scale))
######################
#### Sample from delta
######################
offset.temp <- offset.omega
if(MALA)
{
temp <- binomialbetaupdateMALA(V.standardised, K, q, delta, offset.temp, Z, 1-Z, rep(1,K), prior.mean.delta, prior.var.delta, n.delta.block, proposal.sd.delta, list.block.delta)
}else
{
temp <- binomialbetaupdateRW(V.standardised, K, q, delta, offset.temp, Z, 1-Z, prior.mean.delta, prior.var.delta, n.delta.block, proposal.sd.delta, list.block.delta)
}
delta <- temp[[1]]
accept[7] <- accept[7] + temp[[2]]
accept[8] <- accept[8] + n.delta.block
omega <- exp(V.standardised %*% delta+offset.omega) / (1+exp(V.standardised %*% delta+offset.omega))
#########################
## Calculate the deviance
#########################
lp <- as.numeric(X.standardised %*% beta) + phi + theta + offset
fitted <- exp(lp)
fitted.zip <- fitted * (1-omega)
temp <- rep(0,K)
temp[Z==1] <- log(omega[Z==1])
loglike <- temp + (1-Z) * (log(1-omega) + dpois(x=as.numeric(Y), lambda=fitted, log=T))
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.re[ele, ] <- phi + theta
samples.tau2[ele, ] <- tau2
samples.sigma2[ele, ] <- sigma2
samples.delta[ele, ] <- delta
samples.Z[ele, ] <- Z
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted.zip
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{
}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
if(q>2)
{
proposal.sd.delta <- common.accceptrates1(accept[7:8], proposal.sd.delta, 40, 50)
}else
{
proposal.sd.delta <- common.accceptrates1(accept[7:8], proposal.sd.delta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
proposal.sd.theta <- common.accceptrates1(accept[5:6], proposal.sd.theta, 40, 50)
accept <- c(0,0,0,0,0,0,0,0)
}else
{
}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
#### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
chain.results <- list(samples.beta=samples.beta, samples.psi=samples.re, samples.tau2=samples.tau2, samples.sigma2=samples.sigma2, samples.delta=samples.delta, samples.Z=samples.Z, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/zip.bymCARMCMC.R
|
zip.glm <- function(formula, formula.omega, data=NULL, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.mean.delta=NULL, prior.var.delta=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object for mean model
frame.results <- common.frame(formula, data, "poisson")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Frame object for the omega model
## Create the matrix
frame.omega <- try(suppressWarnings(model.frame(formula.omega, data=data, na.action=na.pass)), silent=TRUE)
if(class(frame.omega)[1]=="try-error") stop("the formula.omega inputted contains an error.", call.=FALSE)
V <- try(suppressWarnings(model.matrix(object=attr(frame.omega, "terms"), data=frame.omega)), silent=TRUE)
if(class(V)[1]=="try-error") stop("the covariate matrix for the zero probabilities contains inappropriate values.", call.=FALSE)
if(length(V)==0)
{
V <- matrix(rep(1, K), nrow=K, ncol=1, byrow=FALSE)
}else
{}
if(sum(is.na(V))>0) stop("the covariate matrix for the zero probabilities contains missing 'NA' values.", call.=FALSE)
if(nrow(V)!=nrow(X)) stop("the two matrices of covariates don't have the same length.", call.=FALSE)
q <- ncol(V)
## Check for linearly related columns
cor.V <- suppressWarnings(cor(V))
diag(cor.V) <- 0
if(max(cor.V, na.rm=TRUE)==1) stop("the covariate matrix for the zero probabilities has two exactly linearly related columns.", call.=FALSE)
if(min(cor.V, na.rm=TRUE)==-1) stop("the covariate matrix for the zero probabilities has two exactly linearly related columns.", call.=FALSE)
if(q>1)
{
if(sort(apply(V, 2, sd))[2]==0) stop("the covariate matrix for the zero probabilities has two intercept terms.", call.=FALSE)
}else
{}
## Standardise the matrix
V.standardised <- V
V.sd <- apply(V, 2, sd)
V.mean <- apply(V, 2, mean)
V.indicator <- rep(NA, q) # To determine which parameter estimates to transform back
for(j in 1:q)
{
if(length(table(V[ ,j]))>2)
{
V.indicator[j] <- 1
V.standardised[ ,j] <- (V[ ,j] - mean(V[ ,j])) / sd(V[ ,j])
}else if(length(table(V[ ,j]))==1)
{
V.indicator[j] <- 2
}else
{
V.indicator[j] <- 0
}
}
## Check for an offset term
offset.omega <- try(model.offset(frame.omega), silent=TRUE)
if(class(offset.omega)[1]=="try-error") stop("the offset for the probability of being a zero is not numeric.", call.=FALSE)
if(is.null(offset.omega)) offset.omega <- rep(0,K)
if(sum(is.na(offset.omega))>0) stop("the offset for the probability of being a zero has missing 'NA' values.", call.=FALSE)
if(!is.numeric(offset.omega)) stop("the offset for the probability of being a zero variable has non-numeric values.", call.=FALSE)
#### Set up which elements are zero
which.zero <- which(Y==0)
n.zero <- length(which.zero)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
if(is.null(prior.mean.delta)) prior.mean.delta <- rep(0, q)
if(is.null(prior.var.delta)) prior.var.delta <- rep(100000, q)
common.prior.beta.check(prior.mean.delta, prior.var.delta, q)
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
## Compute the blocking structure for delta
block.temp <- common.betablock(q)
delta.beg <- block.temp[[1]]
delta.fin <- block.temp[[2]]
n.delta.block <- block.temp[[3]]
list.block.delta <- as.list(rep(NA, n.delta.block*2))
for(r in 1:n.delta.block)
{
list.block.delta[[r]] <- delta.beg[r]:delta.fin[r]-1
list.block.delta[[r+n.delta.block]] <- length(list.block.delta[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- zip.glmMCMC(Y=Y, offset=offset, offset.omega=offset.omega, X.standardised=X.standardised, V.standardised=V.standardised, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, which.zero=which.zero, n.zero=n.zero, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, n.delta.block=n.delta.block, list.block.delta=list.block.delta, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.delta=prior.mean.delta, prior.var.delta=prior.var.delta, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- zip.glmMCMC(Y=Y, offset=offset, offset.omega=offset.omega, X.standardised=X.standardised, V.standardised=V.standardised, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, which.zero=which.zero, n.zero=n.zero, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, n.delta.block=n.delta.block, list.block.delta=list.block.delta, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.delta=prior.mean.delta, prior.var.delta=prior.var.delta, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=zip.glmMCMC, Y=Y, offset=offset, offset.omega=offset.omega, X.standardised=X.standardised, V.standardised=V.standardised, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, which.zero=which.zero, n.zero=n.zero, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, n.delta.block=n.delta.block, list.block.delta=list.block.delta, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.delta=prior.mean.delta, prior.var.delta=prior.var.delta, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * results$accept[1] / results$accept[2]
accept.delta <- 100 * results$accept[3] / results$accept[4]
accept.final <- c(accept.beta, accept.delta)
names(accept.final) <- c("beta", "delta")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
mean.lp <- X.standardised %*% mean.beta + offset
mean.fitted <- exp(mean.lp)
mean.Z <- round(apply(results$samples.Z,2,mean))
mean.delta <- apply(results$samples.delta, 2, mean)
mean.omega <- exp(V.standardised %*% mean.delta + offset.omega) / (1+exp(V.standardised %*% mean.delta + offset.omega))
temp <- rep(0,K)
temp[mean.Z==1] <- log(mean.omega[mean.Z==1])
mean.deviance.all <- temp + (1-mean.Z) * (log(1-mean.omega) + dpois(x=as.numeric(Y), lambda=mean.fitted, log=T))
deviance.fitted <- -2 * sum(mean.deviance.all, na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples.delta.orig <- common.betatransform(results$samples.delta, V.indicator, V.mean, V.sd, q, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), delta=mcmc(samples.delta.orig), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.delta <- t(rbind(apply(samples$delta, 2, mean), apply(samples$delta, 2, quantile, c(0.025, 0.975))))
summary.delta <- cbind(summary.delta, rep(n.keep, q), rep(accept.delta,q), effectiveSize(samples.delta.orig), geweke.diag(samples.delta.orig)$z)
for(i in 1:q)
{
rownames(summary.delta)[i] <- paste("omega - ", colnames(V)[i])
}
colnames(summary.delta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.results <- rbind(summary.beta, summary.delta)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.delta <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.final <- c(accept.beta, accept.delta)
names(accept.final) <- c("beta", "delta")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.delta.list <- lapply(results, function(l) l[["samples.delta"]])
samples.Z.list <- lapply(results, function(l) l[["samples.Z"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list)
samples.Z.matrix <- do.call(what=rbind, args=samples.Z.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
mean.lp <- X.standardised %*% mean.beta + offset
mean.fitted <- exp(mean.lp)
mean.Z <- round(apply(samples.Z.matrix,2,mean))
mean.delta <- apply(samples.delta.matrix, 2, mean)
mean.omega <- exp(V.standardised %*% mean.delta + offset.omega) / (1+exp(V.standardised %*% mean.delta + offset.omega))
temp <- rep(0,K)
temp[mean.Z==1] <- log(mean.omega[mean.Z==1])
mean.deviance.all <- temp + (1-mean.Z) * (log(1-mean.omega) + dpois(x=as.numeric(Y), lambda=mean.fitted, log=T))
deviance.fitted <- -2 * sum(mean.deviance.all, na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
samples.delta.list <- samples.delta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
samples.delta.list[[j]] <- common.betatransform(samples.delta.list[[j]], V.indicator, V.mean, V.sd, q, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
delta.temp <- samples.delta.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
delta.temp[[j]] <- mcmc(samples.delta.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
delta.mcmc <- as.mcmc.list(delta.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, delta=delta.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.delta <- t(rbind(apply(samples.delta.matrix, 2, mean), apply(samples.delta.matrix, 2, quantile, c(0.025, 0.975))))
summary.delta <- cbind(summary.delta, rep(n.keep, q), rep(accept.delta,q), effectiveSize(delta.mcmc), gelman.diag(delta.mcmc)$psrf[ ,2])
for(i in 1:q)
{
rownames(summary.delta)[i] <- paste("omega - ", colnames(V)[i])
}
colnames(summary.delta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.results <- rbind(summary.beta, summary.delta)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Zero-Inflated Poisson (log link function)", "\nRandom effects model - None\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=c(formula, formula.omega), model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/zip.glm.R
|
zip.glmMCMC <- function(Y, offset, offset.omega, X.standardised, V.standardised, K, p, q, which.miss, n.miss, which.zero, n.zero, burnin, n.sample, thin, MALA, n.beta.block, list.block, n.delta.block, list.block.delta, prior.mean.beta, prior.var.beta, prior.mean.delta, prior.var.delta, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
mod.glm <- glm(Y[Y>0]~X.standardised[Y>0, ]-1, offset=offset[Y>0], family="quasipoisson")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
Y.zero <- rep(0,K)
Y.zero[which.zero] <- 1
mod.glm2 <- glm(Y.zero~V.standardised-1, offset=offset.omega, family="binomial")
delta.mean <- mod.glm2$coefficients
delta.sd <- sqrt(diag(summary(mod.glm2)$cov.scaled))
delta <- rnorm(n=length(delta.mean), mean=delta.mean, sd=delta.sd)
omega <- exp(V.standardised %*% delta+offset.omega) / (1+exp(V.standardised %*% delta+offset.omega))
prob.pointmass <- omega[which.zero] / (omega[which.zero]+(1-omega[which.zero])*exp(-exp(as.matrix(X.standardised[which.zero, ]) %*% beta + offset[which.zero])))
Z <- rep(0, K)
Z[which.zero] <- rbinom(n=n.zero, size=1, prob=prob.pointmass)
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
fitted <- exp(as.numeric(X.standardised %*% beta) + offset)
Y.DA <- Y
########################################
#### Set up the MCMC model run quantities
#########################################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.delta <- array(NA, c(n.keep, q))
samples.Z <- array(NA, c(n.keep, K))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,4)
proposal.sd.beta <- 0.01
proposal.sd.delta <- 0.01
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rpois(n=n.miss, lambda=fitted[which.miss==0]) * (1-Z[which.miss==0])
}else
{}
which.zero <- which(Y.DA==0)
n.zero <- length(which.zero)
###################################
#### Update Z via data augmentation
###################################
prob.pointmass <- omega[which.zero] / (omega[which.zero] + (1 - omega[which.zero]) * exp(-exp(as.matrix(X.standardised[which.zero, ]) %*% beta + offset[which.zero])))
Z <- rep(0, K)
Z[which.zero] <- rbinom(n=n.zero, size=1, prob=prob.pointmass)
####################
## Sample from beta
####################
Z.zero <- which(Z==0)
offset.temp <- offset[Z.zero]
if(MALA)
{
temp <- poissonbetaupdateMALA(as.matrix(X.standardised[Z.zero, ]), length(Z.zero), p, beta, offset.temp, Y.DA[Z.zero], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- poissonbetaupdateRW(as.matrix(X.standardised[Z.zero, ]), length(Z.zero), p, beta, offset.temp, Y.DA[Z.zero], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
######################
#### Sample from delta
######################
offset.temp <- offset.omega
if(MALA)
{
temp <- binomialbetaupdateMALA(V.standardised, K, q, delta, offset.temp, Z, 1-Z, rep(1,K), prior.mean.delta, prior.var.delta, n.delta.block, proposal.sd.delta, list.block.delta)
}else
{
temp <- binomialbetaupdateRW(V.standardised, K, q, delta, offset.temp, Z, 1-Z, prior.mean.delta, prior.var.delta, n.delta.block, proposal.sd.delta, list.block.delta)
}
delta <- temp[[1]]
accept[3] <- accept[3] + temp[[2]]
accept[4] <- accept[4] + n.delta.block
omega <- exp(V.standardised %*% delta+offset.omega) / (1+exp(V.standardised %*% delta+offset.omega))
#########################
## Calculate the deviance
#########################
lp <- as.numeric(X.standardised %*% beta) + offset
fitted <- exp(lp)
fitted.zip <- fitted * (1-omega)
temp <- rep(0,K)
temp[Z==1] <- log(omega[Z==1])
loglike <- temp + (1-Z) * (log(1-omega) + dpois(x=as.numeric(Y), lambda=fitted, log=T))
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.delta[ele, ] <- delta
samples.Z[ele, ] <- Z
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted.zip
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
## beta
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
## delta
if(q>2)
{
proposal.sd.delta <- common.accceptrates1(accept[3:4], proposal.sd.delta, 40, 50)
}else
{
proposal.sd.delta <- common.accceptrates1(accept[3:4], proposal.sd.delta, 30, 40)
}
accept <- rep(0,4)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
#### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
chain.results <- list(samples.beta=samples.beta, samples.delta=samples.delta, samples.Z=samples.Z, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/zip.glmMCMC.R
|
zip.lerouxCAR <- function(formula, formula.omega, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, prior.mean.delta=NULL, prior.var.delta=NULL, rho=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object for mean model
frame.results <- common.frame(formula, data, "poisson")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Frame object for the omega model
## Create the matrix
frame.omega <- try(suppressWarnings(model.frame(formula.omega, data=data, na.action=na.pass)), silent=TRUE)
if(class(frame.omega)[1]=="try-error") stop("the formula.omega inputted contains an error.", call.=FALSE)
V <- try(suppressWarnings(model.matrix(object=attr(frame.omega, "terms"), data=frame.omega)), silent=TRUE)
if(class(V)[1]=="try-error") stop("the covariate matrix for the zero probabilities contains inappropriate values.", call.=FALSE)
if(length(V)==0)
{
V <- matrix(rep(1, K), nrow=K, ncol=1, byrow=FALSE)
}else
{}
if(sum(is.na(V))>0) stop("the covariate matrix for the zero probabilities contains missing 'NA' values.", call.=FALSE)
if(nrow(V)!=nrow(X)) stop("the two matrices of covariates don't have the same length.", call.=FALSE)
q <- ncol(V)
## Check for linearly related columns
cor.V <- suppressWarnings(cor(V))
diag(cor.V) <- 0
if(max(cor.V, na.rm=TRUE)==1) stop("the covariate matrix for the zero probabilities has two exactly linearly related columns.", call.=FALSE)
if(min(cor.V, na.rm=TRUE)==-1) stop("the covariate matrix for the zero probabilities has two exactly linearly related columns.", call.=FALSE)
if(q>1)
{
if(sort(apply(V, 2, sd))[2]==0) stop("the covariate matrix for the zero probabilities has two intercept terms.", call.=FALSE)
}else
{}
## Standardise the matrix
V.standardised <- V
V.sd <- apply(V, 2, sd)
V.mean <- apply(V, 2, mean)
V.indicator <- rep(NA, q) # To determine which parameter estimates to transform back
for(j in 1:q)
{
if(length(table(V[ ,j]))>2)
{
V.indicator[j] <- 1
V.standardised[ ,j] <- (V[ ,j] - mean(V[ ,j])) / sd(V[ ,j])
}else if(length(table(V[ ,j]))==1)
{
V.indicator[j] <- 2
}else
{
V.indicator[j] <- 0
}
}
## Check for an offset term
offset.omega <- try(model.offset(frame.omega), silent=TRUE)
if(class(offset.omega)[1]=="try-error") stop("the offset for the probability of being a zero is not numeric.", call.=FALSE)
if(is.null(offset.omega)) offset.omega <- rep(0,K)
if(sum(is.na(offset.omega))>0) stop("the offset for the probability of being a zero has missing 'NA' values.", call.=FALSE)
if(!is.numeric(offset.omega)) stop("the offset for the probability of being a zero variable has non-numeric values.", call.=FALSE)
#### Set up which elements are zero
which.zero <- which(Y==0)
n.zero <- length(which.zero)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### rho
if(is.null(rho))
{
rho <- runif(1)
fix.rho <- FALSE
}else
{
fix.rho <- TRUE
}
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
if(is.null(prior.mean.delta)) prior.mean.delta <- rep(0, q)
if(is.null(prior.var.delta)) prior.var.delta <- rep(100000, q)
common.prior.beta.check(prior.mean.delta, prior.var.delta, q)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
common.prior.var.check(prior.tau2)
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
## Compute the blocking structure for delta
block.temp <- common.betablock(q)
delta.beg <- block.temp[[1]]
delta.fin <- block.temp[[2]]
n.delta.block <- block.temp[[3]]
list.block.delta <- as.list(rep(NA, n.delta.block*2))
for(r in 1:n.delta.block)
{
list.block.delta[[r]] <- delta.beg[r]:delta.fin[r]-1
list.block.delta[[r+n.delta.block]] <- length(list.block.delta[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- zip.lerouxCARMCMC(Y=Y, offset=offset, offset.omega=offset.omega, X.standardised=X.standardised, V.standardised=V.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, which.zero=which.zero, n.zero=n.zero, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, n.delta.block=n.delta.block, list.block.delta=list.block.delta, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.delta=prior.mean.delta, prior.var.delta=prior.var.delta, prior.tau2=prior.tau2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- zip.lerouxCARMCMC(Y=Y, offset=offset, offset.omega=offset.omega, X.standardised=X.standardised, V.standardised=V.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, which.zero=which.zero, n.zero=n.zero, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, n.delta.block=n.delta.block, list.block.delta=list.block.delta, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.delta=prior.mean.delta, prior.var.delta=prior.var.delta, prior.tau2=prior.tau2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=zip.lerouxCARMCMC, Y=Y, offset=offset, offset.omega=offset.omega, X.standardised=X.standardised, V.standardised=V.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, which.zero=which.zero, n.zero=n.zero, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, n.delta.block=n.delta.block, list.block.delta=list.block.delta, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.delta=prior.mean.delta, prior.var.delta=prior.var.delta, prior.tau2=prior.tau2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * results$accept[1] / results$accept[2]
accept.delta <- 100 * results$accept[7] / results$accept[8]
accept.phi <- 100 * results$accept[3] / results$accept[4]
accept.tau2 <- 100
if(!fix.rho)
{
accept.rho <- 100 * results$accept[5] / results$accept[6]
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.tau2, accept.delta)
names(accept.final) <- c("beta", "phi", "rho", "tau2", "delta")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
mean.phi <- apply(results$samples.phi, 2, mean)
mean.lp <- X.standardised %*% mean.beta + mean.phi + offset
mean.fitted <- exp(mean.lp)
mean.Z <- round(apply(results$samples.Z,2,mean))
mean.delta <- apply(results$samples.delta, 2, mean)
mean.omega <- exp(V.standardised %*% mean.delta + offset.omega) / (1+exp(V.standardised %*% mean.delta + offset.omega))
temp <- rep(0,K)
temp[mean.Z==1] <- log(mean.omega[mean.Z==1])
mean.deviance.all <- temp + (1-mean.Z) * (log(1-mean.omega) + dpois(x=as.numeric(Y), lambda=mean.fitted, log=T))
deviance.fitted <- -2 * sum(mean.deviance.all, na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples.delta.orig <- common.betatransform(results$samples.delta, V.indicator, V.mean, V.sd, q, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), rho=mcmc(results$samples.rho), tau2=mcmc(results$samples.tau2), delta=mcmc(samples.delta.orig), Z=mcmc(results$samples.Z), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.delta <- t(rbind(apply(samples$delta, 2, mean), apply(samples$delta, 2, quantile, c(0.025, 0.975))))
summary.delta <- cbind(summary.delta, rep(n.keep, q), rep(accept.delta,q), effectiveSize(samples$delta), geweke.diag(samples$delta)$z)
for(i in 1:q)
{
rownames(summary.delta)[i] <- paste("omega - ", colnames(V)[i])
}
colnames(summary.delta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$tau2), quantile(samples$tau2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
if(!fix.rho)
{
summary.hyper[2, 1:3] <- c(mean(samples$rho), quantile(samples$rho, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.rho, effectiveSize(samples$rho), geweke.diag(samples$rho)$z)
}else
{
summary.hyper[2, 1:3] <- c(rho, rho, rho)
summary.hyper[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.delta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.phi <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.delta <- 100 * sum(accept.temp2[ ,7]) / sum(accept.temp2[ ,8])
accept.tau2 <- 100
if(!fix.rho)
{
accept.rho <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.tau2, accept.delta)
names(accept.final) <- c("beta", "phi", "rho", "tau2", "delta")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.delta.list <- lapply(results, function(l) l[["samples.delta"]])
samples.Z.list <- lapply(results, function(l) l[["samples.Z"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list)
samples.Z.matrix <- do.call(what=rbind, args=samples.Z.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
mean.phi <- apply(samples.phi.matrix, 2, mean)
mean.lp <- X.standardised %*% mean.beta + mean.phi + offset
mean.fitted <- exp(mean.lp)
mean.Z <- round(apply(samples.Z.matrix,2,mean))
mean.delta <- apply(samples.delta.matrix, 2, mean)
mean.omega <- exp(V.standardised %*% mean.delta + offset.omega) / (1+exp(V.standardised %*% mean.delta + offset.omega))
temp <- rep(0,K)
temp[mean.Z==1] <- log(mean.omega[mean.Z==1])
mean.deviance.all <- temp + (1-mean.Z) * (log(1-mean.omega) + dpois(x=as.numeric(Y), lambda=mean.fitted, log=T))
deviance.fitted <- -2 * sum(mean.deviance.all, na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
samples.delta.list <- samples.delta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
samples.delta.list[[j]] <- common.betatransform(samples.delta.list[[j]], V.indicator, V.mean, V.sd, q, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
rho.temp <- samples.rho.list
tau2.temp <- samples.tau2.list
delta.temp <- samples.delta.list
Z.temp <- samples.Z.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
rho.temp[[j]] <- mcmc(samples.rho.list[[j]])
tau2.temp[[j]] <- mcmc(samples.tau2.list[[j]])
delta.temp[[j]] <- mcmc(samples.delta.list[[j]])
Z.temp[[j]] <- mcmc(samples.Z.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
rho.mcmc <- as.mcmc.list(rho.temp)
tau2.mcmc <- as.mcmc.list(tau2.temp)
delta.mcmc <- as.mcmc.list(delta.temp)
Z.mcmc <- as.mcmc.list(Z.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rho.mcmc, tau2=tau2.mcmc, delta=delta.mcmc, Z=Z.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.delta <- t(rbind(apply(samples.delta.matrix, 2, mean), apply(samples.delta.matrix, 2, quantile, c(0.025, 0.975))))
summary.delta <- cbind(summary.delta, rep(n.keep, q), rep(accept.delta,q), effectiveSize(delta.mcmc), gelman.diag(delta.mcmc)$psrf[ ,2])
for(i in 1:q)
{
rownames(summary.delta)[i] <- paste("omega - ", colnames(V)[i])
}
colnames(summary.delta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
if(!fix.rho)
{
summary.hyper[2, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.rho, effectiveSize(rho.mcmc), gelman.diag(rho.mcmc)$psrf[ ,2])
}else
{
summary.hyper[2, 1:3] <- c(rho, rho, rho)
summary.hyper[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.delta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Zero-Inflated Poisson (log link function)", "\nRandom effects model - Leroux CAR\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=c(formula, formula.omega), model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/zip.lerouxCAR.R
|
zip.lerouxCARMCMC <- function(Y, offset, offset.omega, X.standardised, V.standardised, W, rho, fix.rho, K, p, q, which.miss, n.miss, which.zero, n.zero, burnin, n.sample, thin, MALA, n.beta.block, list.block, n.delta.block, list.block.delta, prior.mean.beta, prior.var.beta, prior.mean.delta, prior.var.delta, prior.tau2, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
#
#
##########################################
#### Generate the initial parameter values
##########################################
#### Initial parameter values
mod.glm <- glm(Y[Y>0]~X.standardised[Y>0, ]-1, offset=offset[Y>0], family="quasipoisson")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
log.Y <- log(Y)
log.Y[Y==0] <- -0.1
res.temp <- log.Y - X.standardised %*% beta.mean - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
tau2 <- var(phi) / 10
Y.zero <- rep(0,K)
Y.zero[which.zero] <- 1
mod.glm2 <- glm(Y.zero~V.standardised-1, offset=offset.omega, family="binomial")
delta.mean <- mod.glm2$coefficients
delta.sd <- sqrt(diag(summary(mod.glm2)$cov.scaled))
delta <- rnorm(n=length(delta.mean), mean=delta.mean, sd=delta.sd)
omega <- exp(V.standardised %*% delta+offset.omega) / (1+exp(V.standardised %*% delta+offset.omega))
prob.pointmass <- omega[which.zero] / (omega[which.zero]+(1-omega[which.zero])*exp(-exp(as.matrix(X.standardised[which.zero, ]) %*% beta + offset[which.zero])))
Z <- rep(0, K)
Z[which.zero] <- rbinom(n=n.zero, size=1, prob=prob.pointmass)
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
fitted <- exp(as.numeric(X.standardised %*% beta) + offset + phi)
Y.DA <- Y
########################################
#### Set up the MCMC model run quantities
#########################################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, K))
samples.tau2 <- array(NA, c(n.keep, 1))
if(!fix.rho) samples.rho <- array(NA, c(n.keep, 1))
samples.delta <- array(NA, c(n.keep, q))
samples.Z <- array(NA, c(n.keep, K))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,8)
proposal.sd.beta <- 0.01
proposal.sd.delta <- 0.01
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.02
tau2.posterior.shape <- prior.tau2[1] + 0.5 * K
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Create the determinant
if(!fix.rho)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1) tau2.posterior.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rpois(n=n.miss, lambda=fitted[which.miss==0]) * (1-Z[which.miss==0])
}else
{}
which.zero <- which(Y.DA==0)
n.zero <- length(which.zero)
###################################
#### Update Z via data augmentation
###################################
prob.pointmass <- omega[which.zero] / (omega[which.zero] + (1 - omega[which.zero]) * exp(-exp(as.matrix(X.standardised[which.zero, ]) %*% beta + offset[which.zero] + phi[which.zero])))
Z <- rep(0, K)
Z[which.zero] <- rbinom(n=n.zero, size=1, prob=prob.pointmass)
####################
## Sample from beta
####################
Z.zero <- which(Z==0)
offset.temp <- offset[Z.zero] + phi[Z.zero]
if(MALA)
{
temp <- poissonbetaupdateMALA(as.matrix(X.standardised[Z.zero, ]), length(Z.zero), p, beta, offset.temp, Y.DA[Z.zero], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- poissonbetaupdateRW(as.matrix(X.standardised[Z.zero, ]), length(Z.zero), p, beta, offset.temp, Y.DA[Z.zero], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
regression <- X.standardised %*% beta
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
####################
## Sample from phi
####################
beta.offset <- regression + offset
temp1 <- zipcarupdateRW(Wtriplet=W.triplet, Wbegfin=W.begfin, W.triplet.sum, nsites=K, phi=phi, tau2=tau2, y=Y.DA, phi_tune=proposal.sd.phi, rho=rho, offset=beta.offset, 1-Z)
phi <- temp1[[1]]
if(rho<1)
{
phi <- phi - mean(phi)
}else
{
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
}
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + sum(Z==0)
##################
## Sample from tau2
##################
temp2 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, rho)
tau2.posterior.scale <- temp2 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
##################
## Sample from rho
##################
if(!fix.rho)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp3 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, proposal.rho)
det.Q.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- det.Q - temp2 / tau2
logprob.proposal <- det.Q.proposal - temp3 / tau2
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- det.Q.proposal
accept[5] <- accept[5] + 1
}else
{
}
accept[6] <- accept[6] + 1
}else
{}
######################
#### Sample from delta
######################
offset.temp <- offset.omega
if(MALA)
{
temp <- binomialbetaupdateMALA(V.standardised, K, q, delta, offset.temp, Z, 1-Z, rep(1,K), prior.mean.delta, prior.var.delta, n.delta.block, proposal.sd.delta, list.block.delta)
}else
{
temp <- binomialbetaupdateRW(V.standardised, K, q, delta, offset.temp, Z, 1-Z, prior.mean.delta, prior.var.delta, n.delta.block, proposal.sd.delta, list.block.delta)
}
delta <- temp[[1]]
accept[7] <- accept[7] + temp[[2]]
accept[8] <- accept[8] + n.delta.block
omega <- exp(V.standardised %*% delta+offset.omega) / (1+exp(V.standardised %*% delta+offset.omega))
#########################
## Calculate the deviance
#########################
lp <- as.numeric(regression) + phi + offset
fitted <- exp(lp)
fitted.zip <- fitted * (1-omega)
temp <- rep(0,K)
temp[Z==1] <- log(omega[Z==1])
loglike <- temp + (1-Z) * (log(1-omega) + dpois(x=as.numeric(Y), lambda=fitted, log=T))
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.tau2[ele, ] <- tau2
if(!fix.rho) samples.rho[ele, ] <- rho
samples.delta[ele, ] <- delta
samples.Z[ele, ] <- Z
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted.zip
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
## beta
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
## delta
if(q>2)
{
proposal.sd.delta <- common.accceptrates1(accept[7:8], proposal.sd.delta, 40, 50)
}else
{
proposal.sd.delta <- common.accceptrates1(accept[7:8], proposal.sd.delta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
if(!fix.rho) proposal.sd.rho <- common.accceptrates2(accept[5:6], proposal.sd.rho, 40, 50, 0.5)
accept <- rep(0,8)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
if(fix.rho) samples.rho=NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.delta=samples.delta, samples.Z=samples.Z, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/zip.lerouxCARMCMC.R
|
### R code from vignette source 'CARBayes.Rnw'
###################################################
### code chunk number 1: CARBayes.Rnw:63-64
###################################################
options(prompt = "R> ")
###################################################
### code chunk number 2: CARBayes.Rnw:423-427
###################################################
library(CARBayesdata)
library(sf)
data(pricedata)
data(GGHB.IZ)
###################################################
### code chunk number 3: CARBayes.Rnw:433-435
###################################################
head(pricedata)
head(GGHB.IZ)
###################################################
### code chunk number 4: CARBayes.Rnw:461-464
###################################################
library(dplyr)
pricedata <- pricedata %>% mutate(logprice = log(pricedata$price))
head(pricedata)
###################################################
### code chunk number 5: CARBayes.Rnw:470-472
###################################################
library(GGally)
ggpairs(data = pricedata, columns = c(8, 3:7))
###################################################
### code chunk number 6: CARBayes.Rnw:485-486
###################################################
pricedata.sf <- merge(x=GGHB.IZ, y=pricedata, by="IZ", all.x=FALSE)
###################################################
### code chunk number 7: CARBayes.Rnw:494-496
###################################################
pricedata.sf <- st_transform(x=pricedata.sf,
crs='+proj=longlat +datum=WGS84 +no_defs')
###################################################
### code chunk number 8: CARBayes.Rnw:501-507
###################################################
library(mapview)
library(RColorBrewer)
map1 <- mapview(pricedata.sf, zcol = "price", col.regions=brewer.pal(9, "YlOrRd"),
alpha.regions=0.6, layer.name="Price", lwd=0.5, col="grey90",
homebutton=FALSE)
removeMapJunk(map1, junk = c("zoomControl", "layersControl"))
###################################################
### code chunk number 9: CARBayes.Rnw:523-526
###################################################
form <- logprice~crime+rooms+sales+factor(type) + driveshop
model <- lm(formula=form, data=pricedata.sf)
summary(model)
###################################################
### code chunk number 10: CARBayes.Rnw:531-535
###################################################
library(spdep)
W.nb <- poly2nb(pricedata.sf, row.names = pricedata.sf$IZ)
W.list <- nb2listw(W.nb, style="B")
moran.mc(x=residuals(model), listw=W.list, nsim=1000)
###################################################
### code chunk number 11: CARBayes.Rnw:545-546
###################################################
W <- nb2mat(W.nb, style="B")
###################################################
### code chunk number 12: CARBayes.Rnw:703-707
###################################################
library(CARBayesdata)
library(sf)
data(respiratorydata)
data(GGHB.IZ)
###################################################
### code chunk number 13: CARBayes.Rnw:713-715
###################################################
head(pricedata)
head(GGHB.IZ)
###################################################
### code chunk number 14: CARBayes.Rnw:721-723
###################################################
respiratorydata.sf <- merge(x=GGHB.IZ, y=respiratorydata, by="IZ", all.x=FALSE)
head(respiratorydata.sf)
###################################################
### code chunk number 15: CARBayes.Rnw:728-730
###################################################
respiratorydata.sf <- st_transform(x=respiratorydata.sf,
crs='+proj=longlat +datum=WGS84 +no_defs')
###################################################
### code chunk number 16: CARBayes.Rnw:737-743
###################################################
library(mapview)
library(RColorBrewer)
map2 <- mapview(respiratorydata.sf, zcol = "SMR", col.regions=brewer.pal(9, "YlOrRd"),
alpha.regions=0.6, layer.name="SMR", lwd=0.5, col="grey90",
homebutton=FALSE)
removeMapJunk(map2, junk = c("zoomControl", "layersControl"))
###################################################
### code chunk number 17: CARBayes.Rnw:758-760
###################################################
W.nb <- poly2nb(respiratorydata.sf, row.names = respiratorydata.sf$IZ)
W <- nb2mat(W.nb, style="B")
###################################################
### code chunk number 18: CARBayes.Rnw:775-777
###################################################
income <- respiratorydata.sf$incomedep
Z.incomedep <- as.matrix(dist(income, diag=TRUE, upper=TRUE))
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/inst/doc/CARBayes.R
|
MVST.CARar <- function(formula, family, data=NULL, trials=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.Sigma.df=NULL, prior.Sigma.scale=NULL, AR=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE)
{
## This is a wrapper function for the following six functions.
## binomial.MVCARar1
## gaussian.MVCARar1
## poisson.MVCARar1
## binomial.MVCARar2
## gaussian.MVCARar2
## poisson.MVCARar2
if(is.null(family)) stop("the 'family' argument is missing.", call.=FALSE)
if(is.null(AR)) stop("the 'AR' argument is missing, please specify 1 for an AR(1) model and 2 for an AR(2) model.", call.=FALSE)
#### Run the appropriate model according to the family argument and ar argument
if(family=="binomial" & AR==1)
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified.", call.=FALSE)
model <- binomial.MVCARar1(formula=formula, data=data, trials=trials, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, rho.S=rho.S, rho.T=rho.T, MALA=MALA, verbose=verbose)
}else if(family=="binomial" & AR==2)
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified.", call.=FALSE)
model <- binomial.MVCARar2(formula=formula, data=data, trials=trials, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, rho.S=rho.S, rho.T=rho.T, MALA=MALA, verbose=verbose)
}else if(family=="gaussian" & AR==1)
{
model <- gaussian.MVCARar1(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, rho.S=rho.S, rho.T=rho.T, verbose=verbose)
}else if(family=="gaussian" & AR==2)
{
model <- gaussian.MVCARar2(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, rho.S=rho.S, rho.T=rho.T, verbose=verbose)
}else if(family=="poisson" & AR==1)
{
model <- poisson.MVCARar1(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, rho.S=rho.S, rho.T=rho.T, MALA=MALA, verbose=verbose)
}else if(family=="poisson" & AR==2)
{
model <- poisson.MVCARar2(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, rho.S=rho.S, rho.T=rho.T, MALA=MALA, verbose=verbose)
}else
{
stop("the 'family' arugment is not one of `binomial', `gaussian' or `poisson' or the 'AR' argument is not '1' or '2'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/MVST.CARar.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
linpredcompute <- function(X, nsites, p, beta, offset) {
.Call(`_CARBayesST_linpredcompute`, X, nsites, p, beta, offset)
}
quadform <- function(Wtriplet, Wtripletsum, n_triplet, nsites, phi, theta, rho) {
.Call(`_CARBayesST_quadform`, Wtriplet, Wtripletsum, n_triplet, nsites, phi, theta, rho)
}
gammaquadformcompute <- function(Wtriplet, Wtripletsum, n_triplet, nsites, ntime, phi, rho) {
.Call(`_CARBayesST_gammaquadformcompute`, Wtriplet, Wtripletsum, n_triplet, nsites, ntime, phi, rho)
}
alphaquadformcompute <- function(Wtriplet, Wtripletsum, n_triplet, nsites, ntime, phi, rho, tau2) {
.Call(`_CARBayesST_alphaquadformcompute`, Wtriplet, Wtripletsum, n_triplet, nsites, ntime, phi, rho, tau2)
}
tauquadformcompute <- function(Wtriplet, Wtripletsum, n_triplet, nsites, ntime, phi, rho, gamma) {
.Call(`_CARBayesST_tauquadformcompute`, Wtriplet, Wtripletsum, n_triplet, nsites, ntime, phi, rho, gamma)
}
tauquadformcomputear2 <- function(Wtriplet, Wtripletsum, n_triplet, nsites, ntime, phi, rho, alpha1, alpha2) {
.Call(`_CARBayesST_tauquadformcomputear2`, Wtriplet, Wtripletsum, n_triplet, nsites, ntime, phi, rho, alpha1, alpha2)
}
poissonbetaupdateMALA <- function(X, nsites, p, beta, offset, y, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list) {
.Call(`_CARBayesST_poissonbetaupdateMALA`, X, nsites, p, beta, offset, y, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list)
}
poissonbetaupdateRW <- function(X, nsites, p, beta, offset, y, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list) {
.Call(`_CARBayesST_poissonbetaupdateRW`, X, nsites, p, beta, offset, y, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list)
}
binomialbetaupdateMALA <- function(X, nsites, p, beta, offset, y, failures, trials, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list) {
.Call(`_CARBayesST_binomialbetaupdateMALA`, X, nsites, p, beta, offset, y, failures, trials, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list)
}
binomialbetaupdateRW <- function(X, nsites, p, beta, offset, y, failures, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list) {
.Call(`_CARBayesST_binomialbetaupdateRW`, X, nsites, p, beta, offset, y, failures, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list)
}
poissoncarupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, phi, tau2, y, phi_tune, rho, offset, ntime, mult_offset) {
.Call(`_CARBayesST_poissoncarupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsites, phi, tau2, y, phi_tune, rho, offset, ntime, mult_offset)
}
poissonindepupdateRW <- function(nsites, theta, tau2, y, theta_tune, offset) {
.Call(`_CARBayesST_poissonindepupdateRW`, nsites, theta, tau2, y, theta_tune, offset)
}
binomialindepupdateRW <- function(nsites, theta, tau2, y, failures, theta_tune, offset) {
.Call(`_CARBayesST_binomialindepupdateRW`, nsites, theta, tau2, y, failures, theta_tune, offset)
}
binomialcarupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, phi, tau2, y, failures, phi_tune, rho, offset, ntime, mult_offset) {
.Call(`_CARBayesST_binomialcarupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsites, phi, tau2, y, failures, phi_tune, rho, offset, ntime, mult_offset)
}
gaussiancarupdate <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, phi, tau2, nu2, offset, rho, ntime) {
.Call(`_CARBayesST_gaussiancarupdate`, Wtriplet, Wbegfin, Wtripletsum, nsites, phi, tau2, nu2, offset, rho, ntime)
}
poissonar1carupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, tau2, gamma, rho, ymat, phi_tune, offset, denoffset) {
.Call(`_CARBayesST_poissonar1carupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, tau2, gamma, rho, ymat, phi_tune, offset, denoffset)
}
poissonar2carupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, tau2, alpha1, alpha2, rho, ymat, phi_tune, offset, denoffset) {
.Call(`_CARBayesST_poissonar2carupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, tau2, alpha1, alpha2, rho, ymat, phi_tune, offset, denoffset)
}
binomialar1carupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, tau2, gamma, rho, ymat, failuresmat, phi_tune, offset, denoffset) {
.Call(`_CARBayesST_binomialar1carupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, tau2, gamma, rho, ymat, failuresmat, phi_tune, offset, denoffset)
}
binomialar2carupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, tau2, alpha1, alpha2, rho, ymat, failuresmat, phi_tune, offset, denoffset) {
.Call(`_CARBayesST_binomialar2carupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, tau2, alpha1, alpha2, rho, ymat, failuresmat, phi_tune, offset, denoffset)
}
gaussianar1carupdate <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, tau2, nu2, gamma, rho, offset, denoffset) {
.Call(`_CARBayesST_gaussianar1carupdate`, Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, tau2, nu2, gamma, rho, offset, denoffset)
}
gaussianar2carupdate <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, tau2, nu2, alpha1, alpha2, rho, offset, denoffset) {
.Call(`_CARBayesST_gaussianar2carupdate`, Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, tau2, nu2, alpha1, alpha2, rho, offset, denoffset)
}
binomialmvar1carupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsite, ntime, nvar, phi, alpha, rho, Sigmainv, ymat, failuresmat, innovations, offset, denoffset) {
.Call(`_CARBayesST_binomialmvar1carupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsite, ntime, nvar, phi, alpha, rho, Sigmainv, ymat, failuresmat, innovations, offset, denoffset)
}
binomialmvar2carupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsite, ntime, nvar, phi, alpha1, alpha2, rho, Sigmainv, ymat, failuresmat, innovations, offset, denoffset) {
.Call(`_CARBayesST_binomialmvar2carupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsite, ntime, nvar, phi, alpha1, alpha2, rho, Sigmainv, ymat, failuresmat, innovations, offset, denoffset)
}
poissonmvar1carupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsite, ntime, nvar, phi, alpha, rho, Sigmainv, ymat, innovations, offset, denoffset) {
.Call(`_CARBayesST_poissonmvar1carupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsite, ntime, nvar, phi, alpha, rho, Sigmainv, ymat, innovations, offset, denoffset)
}
poissonmvar2carupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsite, ntime, nvar, phi, alpha1, alpha2, rho, Sigmainv, ymat, innovations, offset, denoffset) {
.Call(`_CARBayesST_poissonmvar2carupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsite, ntime, nvar, phi, alpha1, alpha2, rho, Sigmainv, ymat, innovations, offset, denoffset)
}
gaussianmvar1carupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsite, ntime, nvar, phi, alpha, rho, Sigmainv, nu2, innovations, offset, denoffset) {
.Call(`_CARBayesST_gaussianmvar1carupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsite, ntime, nvar, phi, alpha, rho, Sigmainv, nu2, innovations, offset, denoffset)
}
gaussianmvar2carupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsite, ntime, nvar, phi, alpha1, alpha2, rho, Sigmainv, nu2, innovations, offset, denoffset) {
.Call(`_CARBayesST_gaussianmvar2carupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsite, ntime, nvar, phi, alpha1, alpha2, rho, Sigmainv, nu2, innovations, offset, denoffset)
}
MVSTquadformcompute <- function(Wtriplet, Wtripletsum, n_triplet, den_offset, nsite, nvar, phit, phij, rho, Sigmainv) {
.Call(`_CARBayesST_MVSTquadformcompute`, Wtriplet, Wtripletsum, n_triplet, den_offset, nsite, nvar, phit, phij, rho, Sigmainv)
}
MVSTrhoTAR1compute <- function(Wtriplet, Wtripletsum, n_triplet, den_offset, nsite, ntime, nvar, phi, rho, Sigmainv) {
.Call(`_CARBayesST_MVSTrhoTAR1compute`, Wtriplet, Wtripletsum, n_triplet, den_offset, nsite, ntime, nvar, phi, rho, Sigmainv)
}
MVSTrhoTAR2compute <- function(Wtriplet, Wtripletsum, n_triplet, den_offset, nsite, ntime, nvar, phi, rho, Sigmainv) {
.Call(`_CARBayesST_MVSTrhoTAR2compute`, Wtriplet, Wtripletsum, n_triplet, den_offset, nsite, ntime, nvar, phi, rho, Sigmainv)
}
MVSTrhoSAR1compute <- function(Wtriplet, Wtripletsum, n_triplet, den_offset, nsite, ntime, nvar, phi, rho, alpha, Sigmainv) {
.Call(`_CARBayesST_MVSTrhoSAR1compute`, Wtriplet, Wtripletsum, n_triplet, den_offset, nsite, ntime, nvar, phi, rho, alpha, Sigmainv)
}
MVSTrhoSAR2compute <- function(Wtriplet, Wtripletsum, n_triplet, den_offset, nsite, ntime, nvar, phi, rho, alpha1, alpha2, Sigmainv) {
.Call(`_CARBayesST_MVSTrhoSAR2compute`, Wtriplet, Wtripletsum, n_triplet, den_offset, nsite, ntime, nvar, phi, rho, alpha1, alpha2, Sigmainv)
}
qform <- function(Qtrip, phi) {
.Call(`_CARBayesST_qform`, Qtrip, phi)
}
qform_asym <- function(Qtrip, phi1, phi2) {
.Call(`_CARBayesST_qform_asym`, Qtrip, phi1, phi2)
}
qformSPACETIME <- function(Qtrip, phi, ntime, nsite) {
.Call(`_CARBayesST_qformSPACETIME`, Qtrip, phi, ntime, nsite)
}
SPTICARphiGaussian <- function(W, nsites, ntimes, phi, nneighbours, tau, lik_var, y, alpha, XB) {
.Call(`_CARBayesST_SPTICARphiGaussian`, W, nsites, ntimes, phi, nneighbours, tau, lik_var, y, alpha, XB)
}
qform_difference_ST <- function(Qtrip, Qtime, phi, nsites) {
.Call(`_CARBayesST_qform_difference_ST`, Qtrip, Qtime, phi, nsites)
}
qform_ST <- function(Qspace, Qtime, phi, nsites) {
.Call(`_CARBayesST_qform_ST`, Qspace, Qtime, phi, nsites)
}
qform_ST_asym <- function(Qspace, Qtime, phi1, phi2, nsites) {
.Call(`_CARBayesST_qform_ST_asym`, Qspace, Qtime, phi1, phi2, nsites)
}
update_Qtime <- function(Qtime, alpha, rowNumberLastDiag) {
.Call(`_CARBayesST_update_Qtime`, Qtime, alpha, rowNumberLastDiag)
}
updatetriplets_rho <- function(trips, nsites, rho_old, rho_new, fixedridge) {
.Call(`_CARBayesST_updatetriplets_rho`, trips, nsites, rho_old, rho_new, fixedridge)
}
updatetripList2 <- function(trips, vold, vnew, nedges, nsites, block, block_length, rho, fixedridge) {
.Call(`_CARBayesST_updatetripList2`, trips, vold, vnew, nedges, nsites, block, block_length, rho, fixedridge)
}
SPTICARphiBinomial <- function(W, nsites, ntimes, phi, nneighbours, tau, y, alpha, XB, phiVarb_tune, trials) {
.Call(`_CARBayesST_SPTICARphiBinomial`, W, nsites, ntimes, phi, nneighbours, tau, y, alpha, XB, phiVarb_tune, trials)
}
SPTICARphiVarb <- function(W, nsites, ntimes, phiVarb, nneighbours, tau, y, E, phiVarb_tune, alpha, XB) {
.Call(`_CARBayesST_SPTICARphiVarb`, W, nsites, ntimes, phiVarb, nneighbours, tau, y, E, phiVarb_tune, alpha, XB)
}
Zupdatesqbin <- function(Z, Offset, Y, delta, lambda, nsites, ntime, G, SS, prioroffset, Gstar, failures) {
.Call(`_CARBayesST_Zupdatesqbin`, Z, Offset, Y, delta, lambda, nsites, ntime, G, SS, prioroffset, Gstar, failures)
}
Zupdatesqpoi <- function(Z, Offset, Y, delta, lambda, nsites, ntime, G, SS, prioroffset, Gstar) {
.Call(`_CARBayesST_Zupdatesqpoi`, Z, Offset, Y, delta, lambda, nsites, ntime, G, SS, prioroffset, Gstar)
}
Zupdatesqgau <- function(Z, Offset, delta, lambda, nsites, ntime, G, SS, prioroffset, Gstar, nu2) {
.Call(`_CARBayesST_Zupdatesqgau`, Z, Offset, delta, lambda, nsites, ntime, G, SS, prioroffset, Gstar, nu2)
}
tau2compute <- function(tau2, temp, tau2_shape, prior_tau2, N) {
.Call(`_CARBayesST_tau2compute`, tau2, temp, tau2_shape, prior_tau2, N)
}
rhoquadformcompute <- function(Wtriplet, Wtripletsum, n_triplet, nsites, ntime, phi, rho, tau2) {
.Call(`_CARBayesST_rhoquadformcompute`, Wtriplet, Wtripletsum, n_triplet, nsites, ntime, phi, rho, tau2)
}
binomialsrecarupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, rho, ymat, failuresmat, phi_tune, offset, denoffset, tau2) {
.Call(`_CARBayesST_binomialsrecarupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, rho, ymat, failuresmat, phi_tune, offset, denoffset, tau2)
}
poissonsrecarupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, rho, ymat, phi_tune, offset, denoffset, tau2) {
.Call(`_CARBayesST_poissonsrecarupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsites, ntime, phi, rho, ymat, phi_tune, offset, denoffset, tau2)
}
tauquadformcompute2 <- function(Wtriplet, Wtripletsum, n_triplet, nsites, ntime, phi, rho) {
.Call(`_CARBayesST_tauquadformcompute2`, Wtriplet, Wtripletsum, n_triplet, nsites, ntime, phi, rho)
}
tempupdate <- function(Nchains, dt) {
.Call(`_CARBayesST_tempupdate`, Nchains, dt)
}
matcomp <- function(X, beta, prop, p, Nchains) {
.Call(`_CARBayesST_matcomp`, X, beta, prop, p, Nchains)
}
offsetcompute <- function(w, gamma, time, Nchains, nsites, Ntrends, begin) {
.Call(`_CARBayesST_offsetcompute`, w, gamma, time, Nchains, nsites, Ntrends, begin)
}
matN <- function(x, nsites, Nchains) {
.Call(`_CARBayesST_matN`, x, nsites, Nchains)
}
linpredcomputeNchains <- function(X, nsites, p, beta, Nchains) {
.Call(`_CARBayesST_linpredcomputeNchains`, X, nsites, p, beta, Nchains)
}
gammaproposal <- function(Nchains, gamma, gamma_tune, prior_vargamma, Wareas, trend, knots) {
.Call(`_CARBayesST_gammaproposal`, Nchains, gamma, gamma_tune, prior_vargamma, Wareas, trend, knots)
}
lambdaupdate <- function(Nchains, temp) {
.Call(`_CARBayesST_lambdaupdate`, Nchains, temp)
}
tau2quadform <- function(Wtriplet, Wtripletsum, n_triplet, nsites, phi, theta, rho, Nchains) {
.Call(`_CARBayesST_tau2quadform`, Wtriplet, Wtripletsum, n_triplet, nsites, phi, theta, rho, Nchains)
}
tau2computeNchains <- function(temp, tau2_shape, prior_tau2, Nchains) {
.Call(`_CARBayesST_tau2computeNchains`, temp, tau2_shape, prior_tau2, Nchains)
}
rhoquadformcomputeNchains <- function(Wtriplet, Wtripletsum, n_triplet, nsites, Nchains, phi, rho, tau2) {
.Call(`_CARBayesST_rhoquadformcomputeNchains`, Wtriplet, Wtripletsum, n_triplet, nsites, Nchains, phi, rho, tau2)
}
Qdet <- function(Nchains, rho, Wstar_val) {
.Call(`_CARBayesST_Qdet`, Nchains, rho, Wstar_val)
}
poissondevfit <- function(y, fitted, nsites, Nchains) {
.Call(`_CARBayesST_poissondevfit`, y, fitted, nsites, Nchains)
}
poissonbetablockupdate <- function(nsites, beta, betaprop, lp_beta, lp_betaprop, offset, y, prior_meanbeta, prior_varbeta, Nchains, temps, p) {
.Call(`_CARBayesST_poissonbetablockupdate`, nsites, beta, betaprop, lp_beta, lp_betaprop, offset, y, prior_meanbeta, prior_varbeta, Nchains, temps, p)
}
poissongammaupdate <- function(nsites, gamma, proposal, offset, offset_proposal, y, prior_meangamma, prior_vargamma, Nchains, temps) {
.Call(`_CARBayesST_poissongammaupdate`, nsites, gamma, proposal, offset, offset_proposal, y, prior_meangamma, prior_vargamma, Nchains, temps)
}
poissonwupdate <- function(nsites, ntimes, w, offset, offset_proposal, w_proposal, y, lambda, Nchains, temps, begin, regbegin, Ntrends) {
.Call(`_CARBayesST_poissonwupdate`, nsites, ntimes, w, offset, offset_proposal, w_proposal, y, lambda, Nchains, temps, begin, regbegin, Ntrends)
}
poissonphiupdate <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, ntimes, phi, offset, y, tau2, rho, Nchains, temps, phi_tune, regbegin) {
.Call(`_CARBayesST_poissonphiupdate`, Wtriplet, Wbegfin, Wtripletsum, nsites, ntimes, phi, offset, y, tau2, rho, Nchains, temps, phi_tune, regbegin)
}
poissoncouplingAllupdate <- function(nsites, K, p, w, offset, beta, gamma, lambda, phi, rho, tau2, Wtripletsum, Wtriplet, Wbegfin, y, prior_meanbeta, prior_varbeta, prior_meantrends, prior_vartrends, prior_lambda, prior_tau2, swap, temps, begin, Ntrends, TrendSel) {
.Call(`_CARBayesST_poissoncouplingAllupdate`, nsites, K, p, w, offset, beta, gamma, lambda, phi, rho, tau2, Wtripletsum, Wtriplet, Wbegfin, y, prior_meanbeta, prior_varbeta, prior_meantrends, prior_vartrends, prior_lambda, prior_tau2, swap, temps, begin, Ntrends, TrendSel)
}
binomialdevfit <- function(y, trials, probs, nsites, Nchains) {
.Call(`_CARBayesST_binomialdevfit`, y, trials, probs, nsites, Nchains)
}
binomialbetablockupdate <- function(nsites, beta, betaprop, lp_beta, lp_betaprop, offset, y, failures, prior_meanbeta, prior_varbeta, Nchains, temps, p) {
.Call(`_CARBayesST_binomialbetablockupdate`, nsites, beta, betaprop, lp_beta, lp_betaprop, offset, y, failures, prior_meanbeta, prior_varbeta, Nchains, temps, p)
}
binomialgammaupdate <- function(nsites, gamma, proposal, offset, offset_proposal, y, failures, prior_meangamma, prior_vargamma, Nchains, temps) {
.Call(`_CARBayesST_binomialgammaupdate`, nsites, gamma, proposal, offset, offset_proposal, y, failures, prior_meangamma, prior_vargamma, Nchains, temps)
}
binomialwupdate <- function(nsites, ntimes, w, offset, offset_proposal, w_proposal, y, failures, lambda, Nchains, temps, begin, regbegin, Ntrends) {
.Call(`_CARBayesST_binomialwupdate`, nsites, ntimes, w, offset, offset_proposal, w_proposal, y, failures, lambda, Nchains, temps, begin, regbegin, Ntrends)
}
binomialphiupdate <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, ntimes, phi, offset, y, failures, tau2, rho, Nchains, temps, phi_tune, regbegin) {
.Call(`_CARBayesST_binomialphiupdate`, Wtriplet, Wbegfin, Wtripletsum, nsites, ntimes, phi, offset, y, failures, tau2, rho, Nchains, temps, phi_tune, regbegin)
}
binomialcouplingAllupdate <- function(nsites, K, p, w, offset, beta, gamma, lambda, phi, rho, tau2, Wtripletsum, Wtriplet, Wbegfin, y, failures, prior_meanbeta, prior_varbeta, prior_meantrends, prior_vartrends, prior_lambda, prior_tau2, swap, temps, begin, Ntrends, TrendSel) {
.Call(`_CARBayesST_binomialcouplingAllupdate`, nsites, K, p, w, offset, beta, gamma, lambda, phi, rho, tau2, Wtripletsum, Wtriplet, Wbegfin, y, failures, prior_meanbeta, prior_varbeta, prior_meantrends, prior_vartrends, prior_lambda, prior_tau2, swap, temps, begin, Ntrends, TrendSel)
}
optimise_graph <- function(adj, data, add = FALSE, remove = TRUE, remove_first = FALSE) {
.Call(`_CARBayesST_optimise_graph`, adj, data, add, remove, remove_first)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/RcppExports.R
|
ST.CARadaptive <- function(formula, family, data=NULL, trials=NULL, W, burnin, n.sample, thin=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.tau2=NULL, rho = NULL, epsilon = 0, MALA=TRUE, verbose=TRUE)
{
## This is a wrapper function for the following three functions.
## binomial.CARadaptive
## gaussian.CARadaptive
## poisson.CARadaptive
if(is.null(family)) stop("the family argument is missing", call.=FALSE)
#### Run the appropriate model according to the family arugment
if(family=="binomial")
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified", call.=FALSE)
model <- binomial.CARadaptive(formula=formula, data=data, trials=trials, W=W, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, rho = rho, epsilon = epsilon, MALA=MALA, verbose=verbose)
}else if(family=="gaussian")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- gaussian.CARadaptive(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.tau2=prior.tau2, rho = rho, epsilon = epsilon, verbose=verbose)
}else if(family=="poisson")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- poisson.CARadaptive(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, rho = rho, epsilon = epsilon, MALA=MALA, verbose=verbose)
}else
{
stop("the family arugment is not one of `binomial', `gaussian' or `poisson'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/ST.CARadaptive.R
|
ST.CARanova <- function(formula, family, data=NULL, trials=NULL, W, interaction=TRUE, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.tau2=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE)
{
## This is a wrapper function for the following three functions.
## binomial.CARanova
## gaussian.CARanova
## poisson.CARanova
if(is.null(family)) stop("the family argument is missing", call.=FALSE)
#### Run the appropriate model according to the family arugment
if(family=="binomial")
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified", call.=FALSE)
model <- binomial.CARanova(formula=formula, data=data, trials=trials, W=W, interaction=interaction, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, rho.S=rho.S, rho.T=rho.T, MALA=MALA, verbose=verbose)
}else if(family=="gaussian")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- gaussian.CARanova(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.tau2=prior.tau2, rho.S=rho.S, rho.T=rho.T, verbose=verbose)
}else if(family=="poisson")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- poisson.CARanova(formula=formula, data=data, W=W, interaction=interaction, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, rho.S=rho.S, rho.T=rho.T, MALA=MALA, verbose=verbose)
}else
{
stop("the family arugment is not one of `binomial', `gaussian' or `poisson'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/ST.CARanova.R
|
ST.CARar <- function(formula, family, data=NULL, trials=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.tau2=NULL, AR=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE)
{
## This is a wrapper function for the following six functions.
## binomial.CARar1
## gaussian.CARar1
## poisson.CARar1
## binomial.CARar2
## gaussian.CARar2
## poisson.CARar2
if(is.null(family)) stop("the 'family' argument is missing.", call.=FALSE)
if(is.null(AR)) stop("the 'AR' argument is missing, please specify 1 for an AR(1) model and 2 for an AR(2) model.", call.=FALSE)
#### Run the appropriate model according to the family argument and ar argument
if(family=="binomial" & AR==1)
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified.", call.=FALSE)
model <- binomial.CARar1(formula=formula, data=data, trials=trials, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, rho.S=rho.S, rho.T=rho.T, MALA=MALA, verbose=verbose)
}else if(family=="binomial" & AR==2)
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified.", call.=FALSE)
model <- binomial.CARar2(formula=formula, data=data, trials=trials, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, rho.S=rho.S, rho.T=rho.T, MALA=MALA, verbose=verbose)
}else if(family=="gaussian" & AR==1)
{
model <- gaussian.CARar1(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.tau2=prior.tau2, rho.S=rho.S, rho.T=rho.T, verbose=verbose)
}else if(family=="gaussian" & AR==2)
{
model <- gaussian.CARar2(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.tau2=prior.tau2, rho.S=rho.S, rho.T=rho.T, verbose=verbose)
}else if(family=="poisson" & AR==1)
{
model <- poisson.CARar1(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, rho.S=rho.S, rho.T=rho.T, MALA=MALA, verbose=verbose)
}else if(family=="poisson" & AR==2)
{
model <- poisson.CARar2(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, rho.S=rho.S, rho.T=rho.T, MALA=MALA, verbose=verbose)
}else
{
stop("the 'family' arugment is not one of `binomial', `gaussian' or `poisson' or the 'AR' argument is not '1' or '2'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/ST.CARar.R
|
ST.CARclustrends <- function(formula, family, data=NULL, trials=NULL, W, burnin, n.sample, thin=1, trends=NULL, changepoint=NULL, knots=NULL, prior.mean.beta=NULL, prior.var.beta=NULL, prior.mean.gamma=NULL, prior.var.gamma=NULL, prior.lambda=NULL, prior.tau2=NULL, Nchains=4, verbose=TRUE)
{
## This is a wrapper function for the following two functions.
## binomial.CARclustrends
## poisson.CARclustrends
if(is.null(family)) stop("the family argument is missing", call.=FALSE)
#### Run the appropriate model according to the family argument
if(family=="binomial")
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified", call.=FALSE)
model <- binomial.CARclustrends(formula=formula, data=data, trials=trials, W=W, burnin=burnin, n.sample=n.sample, thin=thin, trends=trends, changepoint=changepoint, knots=knots,
prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.gamma=prior.mean.gamma, prior.var.gamma=prior.var.gamma,
prior.lambda=prior.lambda, prior.tau2=prior.tau2, Nchains=Nchains, verbose=TRUE)
}else if(family=="poisson")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- poisson.CARclustrends(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, trends=trends, changepoint=changepoint, knots=knots,
prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.gamma=prior.mean.gamma, prior.var.gamma=prior.var.gamma,
prior.lambda=prior.lambda, prior.tau2=prior.tau2, Nchains=Nchains, verbose=TRUE)
}else
{
stop("the family argument is not one of `binomial' or `poisson'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/ST.CARclustrends.R
|
ST.CARlinear <- function(formula, family, data=NULL, trials=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.mean.alpha=NULL, prior.var.alpha=NULL, prior.nu2=NULL, prior.tau2=NULL, rho.slo=NULL, rho.int=NULL, MALA=TRUE, verbose=TRUE)
{
## This is a wrapper function for the following three functions.
## binomial.CARanova
## gaussian.CARanova
## poisson.CARanova
if(is.null(family)) stop("the family argument is missing", call.=FALSE)
#### Run the appropriate model according to the family arugment
if(family=="binomial")
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified", call.=FALSE)
model <- binomial.CARlinear(formula=formula, data=data, trials=trials, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.alpha=prior.mean.alpha, prior.var.alpha=prior.var.alpha, prior.tau2=prior.tau2, rho.slo=rho.slo, rho.int=rho.int, MALA=MALA, verbose=verbose)
}else if(family=="gaussian")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- gaussian.CARlinear(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.alpha=prior.mean.alpha, prior.var.alpha=prior.var.alpha, prior.nu2=prior.nu2, prior.tau2=prior.tau2, rho.slo=rho.slo, rho.int=rho.int, verbose=verbose)
}else if(family=="poisson")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- poisson.CARlinear(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.alpha=prior.mean.alpha, prior.var.alpha=prior.var.alpha, prior.tau2=prior.tau2, rho.slo=rho.slo, rho.int=rho.int, MALA=MALA, verbose=verbose)
}else
{
stop("the family arugment is not one of `binomial', `gaussian' or `poisson'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/ST.CARlinear.R
|
ST.CARlocalised <- function(formula, family, data=NULL, G, trials=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.delta=NULL, prior.tau2=NULL, MALA=TRUE, verbose=TRUE)
{
## This is a wrapper function for the following three functions.
## binomial.CARlocalised
## poisson.CARlocalised
if(is.null(family)) stop("the family argument is missing", call.=FALSE)
#### Run the appropriate model according to the family arugment
if(family=="binomial")
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified", call.=FALSE)
model <- binomial.CARlocalised(formula=formula, data=data, G=G, trials=trials, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.delta=prior.delta, prior.tau2=prior.tau2, MALA=MALA, verbose=verbose)
}else if(family=="poisson")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- poisson.CARlocalised(formula=formula, data=data, G=G, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.delta=prior.delta, prior.tau2=prior.tau2, MALA=MALA, verbose=verbose)
}else
{
stop("the family arugment is not one of `binomial' or `poisson'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/ST.CARlocalised.R
|
ST.CARsepspatial <- function(formula, family, data=NULL, trials=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE)
{
## This is a wrapper function for the following two functions.
## binomial.CARepspatial
## poisson.CARepspatial
if(is.null(family)) stop("the family argument is missing", call.=FALSE)
#### Run the appropriate model according to the family arugment
if(family=="binomial")
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified", call.=FALSE)
model <- binomial.CARsepspatial(formula=formula, data=data, trials=trials, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, rho.S=rho.S, rho.T=rho.T, MALA=MALA, verbose=verbose)
}else if(family=="poisson")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- poisson.CARsepspatial(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, rho.S=rho.S, rho.T=rho.T, MALA=MALA, verbose=verbose)
}else
{
stop("the family arugment is not one of `binomial' or `poisson'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/ST.CARsepspatial.R
|
W.estimate <- function(W, spdata, add=FALSE, remove=TRUE, remove_first=FALSE)
{
#### This function is a wrapper for the C++ function that optimises the neighbourhood matrix W to a given data set.
###################################################
#### Perform checks to prevent inappropriate inputs
###################################################
if(class(W)[1]!="matrix") stop("W is not a matrix.", call.=FALSE)
if(sum(is.na(W))>0) stop("W contains NA values.", call.=FALSE)
if(nrow(W)!=ncol(W)) stop("W is not a square matrix.", call.=FALSE)
if(length(names(table(W)))!=2) stop("W contains more than two distinct values.", call.=FALSE)
if(sum(as.numeric(names(table(W))) != c(0,1))>0) stop("W contains elements other than 0s and 1s.", call.=FALSE)
if(sum(diag(W))>0) stop("The diagonal of W contains non-zero elements.", call.=FALSE)
if(length(spdata)!=nrow(W)) stop("W and spdata do not have matching dimensions.", call.=FALSE)
if(!is.numeric(spdata)) stop("spdata is not a numeric vector.", call.=FALSE)
if(sum(is.na(spdata))>0) stop("spdata contains NA values.", call.=FALSE)
if(sum(spdata==Inf)>0) stop("spdata contains Inf values.", call.=FALSE)
if(sum(spdata==-Inf)>0) stop("spdata contains -Inf values.", call.=FALSE)
######################################
#### Optimise the neighbourhood matrix
######################################
#### Optimise W
K <- nrow(W)
W.temp <- optimise_graph(W, spdata, add=add, remove=remove, remove_first=remove_first)
#### Create the matrix version
W.est <- array(NA, c(K,K))
for(k in 1:K)
{
W.est[k, ] <- W.temp[[k]]
}
#### Return the result
return(W.est)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/W.estimate.R
|
binomial.CARadaptive <- function(formula, data = NULL, trials, W, burnin, n.sample, thin = 1, prior.mean.beta = NULL, prior.var.beta = NULL, prior.tau2 = NULL, rho = NULL, epsilon = 0, MALA=TRUE, verbose = TRUE)
{
#### Verbose
a <- common.verbose(verbose)
blocksize.beta <- 5
blocksize.v <- 10
z <- which(W > 0, arr.ind = T)
locs <- z[which(z[,1] < z[,2]), ]
char.locs <- paste(locs[,1], ".", locs[,2], sep = "")
n.edges <- nrow(locs)
logit <- function(p) log(p/(1-p))
inv_logit <- function(v) 1/(1+exp(-v))
# interpret the formula
frame <- try(suppressWarnings(model.frame(formula, data = data, na.action=na.pass)), silent=TRUE)
if(inherits(frame, "try-error")) stop("the formula inputted contains an error, e.g the variables may be different lengths.", call.=FALSE)
X <- try(suppressWarnings(model.matrix(object=attr(frame, "terms"), data=frame)), silent=TRUE)
if(sum(is.na(X))>0) stop("the covariate matrix contains missing 'NA' values.", call.=FALSE)
# get summaries of the model matrix
p <- ncol(X)
y <- model.response(frame)
which.miss <- as.numeric(!is.na(y))
n.sites <- as.integer(nrow(W))
n.time <- as.integer(length(y)/n.sites)
k <- as.integer(round(n.sites*n.time, 0))
#### Check and specify the priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
# identify and error check the offset term, if it exists.
offset <- try(model.offset(frame), silent=TRUE)
if(is.null(offset)) offset <- rep(0,(n.time * n.sites))
if(sum(is.na(offset))>0) stop("the offset has missing 'NA' values.", call.=FALSE)
if(!is.numeric(offset)) stop("the offset variable has non-numeric values.", call.=FALSE)
#### Format and check the MCMC quantities
common.burnin.nsample.thin.check(burnin, n.sample, thin)
## Check for linearly related columns
cor.X <- suppressWarnings(cor(X))
diag(cor.X) <- 0
if(max(cor.X, na.rm=TRUE)==1) stop("the covariate matrix has two exactly linearly related columns.", call.=FALSE)
if(min(cor.X, na.rm=TRUE)==-1) stop("the covariate matrix has two exactly linearly related columns.", call.=FALSE)
if(p>1)
{
if(sort(apply(X, 2, sd))[2]==0) stop("the covariate matrix has two intercept terms.", call.=FALSE)
}else
{
}
# check trials and y values
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- k-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
if(sum(is.na(y))>0) stop("the response has missing 'NA' values.", call.=FALSE)
if(!is.numeric(y)) stop("the response variable has non-numeric values.", call.=FALSE)
int.check <- k-sum(ceiling(y)==floor(y))
if(int.check > 0) stop("the respons variable has non-integer values.", call.=FALSE)
if(min(y)<0) stop("the response variable has negative values.", call.=FALSE)
if(sum(y>trials)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
y <- as.numeric(y)
failures <- trials - y
## Standardise the model matrix,
X.standardised <- X
X.sd <- apply(X, 2, sd)
X.mean <- apply(X, 2, mean)
X.indicator <- rep(NA, p) # To determine which parameter estimates to transform back
for(j in 1:p){
if(length(table(X[ ,j])) > 2){
X.indicator[j] <- 1
X.standardised[ ,j] <- (X[ ,j] - mean(X[ ,j])) / sd(X[ ,j])
}else if(length(table(X[ ,j]))==1){
X.indicator[j] <- 2
}else{
X.indicator[j] <- 0
}
}
# based on the blocksize.v provided create lists with relevent bits for untransformed edge parameter update
if(is.numeric(blocksize.v)){
## Compute the blocking structure for v
fromto <- seq(0, n.edges, by = blocksize.v)
fromto[1] <- 0
if(!n.edges %in% fromto) fromto <- c(fromto, n.edges)
n.blocks <- length(fromto) - 1
blockinds <- vector("list", length = n.blocks)
for(i in 1:n.blocks) blockinds[[i]] <- (fromto[i] + 1):fromto[i + 1]
}
# propose starting values for the adjacency elements (very close to 1)
# current temporary version of the adacency is W_current
v <- logit(rtruncnorm(n.edges, mean = 0.999, sd = 0.001, a = 0, b=1))
v_15 <- v - 15
vqform_current <- sum(v_15^2)
W_current <- W
W_current[locs][1:n.edges] <- inv_logit(v)
W_current[locs[,2:1]][1:n.edges] <- inv_logit(v)
# given the temporary adjacency, construct a temporary (Q.space) and proposal (Q.space.prop)
# for the prior ICAR precision for phi. Associated with these is the triplet form tripList.
# get the cholesky of Q.space, and its determinant
# if rho is not fixed, then ridge must be fixed
rhofix <- rho
rho <- ifelse(!is.null(rhofix), rhofix, 0.99)
fixedridge <- epsilon
if(rho==1) fixedridge <- 0.0001
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
tripList <- vector("list", length = 2)
tripList[[1]] <- cbind(1:nrow(W_current), 1:nrow(W_current), rowSums(W_current) + fixedridge)
tripList[[2]] <- cbind(rbind(locs, locs[,2:1]), -rep(inv_logit(v), 2))
Q.space.trip <- rbind(tripList[[1]], tripList[[2]])
Q.space.trip <- updatetriplets_rho(trips = Q.space.trip, nsites = n.sites, rho_old = 1, rho_new = rho, fixedridge = fixedridge)
Q.space <- Q.space.prop <- spam(list(i = Q.space.trip[,1], j = Q.space.trip[,2], Q.space.trip[,3]))
chol.Q.space <- chol.spam(Q.space)
Q.space.det.old <- n.time*2*determinant(chol.Q.space, logarithm = T)$modulus
# propose an initial value for alpha, the temporal correlation parameter
# using alpha, create initial temporal precision matrices Q.time
alpha <- 1
if(n.time > 1){
# this bit constructs Q.time, temporal precision, its determinant, and triplet form
Q.block <- as.spam(crossprod(diff(diag(n.time))))
Q.block[1,1] <- Q.block[1,1] + 1
Dg <- diag.spam(diag.spam(Q.block))
R <- Q.block - Dg
Dtime <- diag.spam( c(rep(1,nrow(Q.block)-1), 0))
Dg <- Dg - Dtime
Q.time <- Dg + Dtime*alpha^2+ R*alpha
Q.time[n.time,n.time] <- 1
Q.det <- determinant(Q.time, logarithm = T)
detTime <- as.numeric(0.5*n.sites*(Q.det$m)*(Q.det$s))
Q.time.trip <- Reduce("cbind", triplet(Q.time))
} else {
# if n.time == 1, detTime equals 1 and Q.time is just a 1 times 1 matrix.
Q.time <- 1
detTime <- 1
Q.time.trip <- matrix(rep(1, 3), ncol = 3)
}
# MCMC parameter starting values
phi_tune <- 0.5
W.tune <- 1
rho.tune <- 0.1
tau_v <- 200
prior.max.tau <- 1000
increment <- 0
glm_mod <- glm(y/trials ~-1+X.standardised, family = "quasibinomial", weights = trials, offset = offset)
beta.mean <- glm_mod$coefficients
beta.sd <- sqrt(diag(summary(glm_mod)$cov.scaled))
beta_par <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
theta.hat <- y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat / (1 - theta.hat)) - X.standardised %*% beta_par - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=k, mean=0, sd = res.sd)
tau <- var(phi)/10
phiQphi <- qform_ST(Qspace = Q.space.trip, Qtime = Q.time.trip, phi = phi, nsites = n.sites)
XB <- X.standardised %*% beta_par
tau_v.shape <- (n.edges/2) + prior.tau2[1]
tau_phi_shape <- (n.sites*n.time/2) + prior.tau2[1]
# general MCMC housekeeping
n.save <- ifelse(thin == 1, (n.sample - burnin), (n.sample - burnin) / thin)
accept <- rep(0, 8)
# storage of parameters in the MCMC,
samples.beta <- array(NA, c(n.save, p))
samples.phi <- array(NA, c(n.save, n.sites * n.time))
samples.tau2 <- samples.vtau2 <- samples.alpha <- samples.rho <- matrix(0, n.save, 1)
samples.v <- matrix(0, ncol = n.edges, nrow = c(n.save, n.sites*n.time))
samples.fit <- array(NA, c(n.save, n.sites * n.time))
samples.loglike <- array(NA, c(n.save, n.sites*n.time))
# turn off spam check options to speed things up (a bit)
options(spam.cholsymmetrycheck = FALSE)
options(spam.cholpivotcheck = FALSE)
options(spam.safemode = c(F, F, F))
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
proposal.sd.beta <- 0.01
proposal.corr.beta <- solve(t(X.standardised) %*% X.standardised)
chol.proposal.corr.beta <- chol(proposal.corr.beta)
# the perm ordering is used to map the @entries slot ordering to the ordering used when 'triplet' is called
perm <- order(Q.space.trip[,1], Q.space.trip[,2])
diags.space <- which(Q.space.trip[perm,1] == Q.space.trip[perm,2])
if(n.time > 1) diag.time <- Reduce("cbind", triplet(diag.spam(n.time - 1)))
time.last.diag <- which((Q.time.trip[,1] == Q.time.trip[,2]) & (Q.time.trip[,1] == n.time))
lastblock <- (k - n.sites+1):k
firstblock <- 1:n.sites
## Start timer
n.keep <- floor((n.sample - burnin)/thin)
if(verbose){
cat("Generating", n.keep, "post burnin and thinned (if requested) samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points <- round((1:100/100)*n.sample)
} else percentage.points <- round((1:100/100)*n.sample)
# -------------------------------------------------------------------------------------------
# START THE MCMC SAMPLING
# -------------------------------------------------------------------------------------------
for(j in 1:n.sample){
# START ITERATING, ONLY SAVE thin^th ITERATION
save.iter <- j > burnin && ((j %% thin == 0) | thin == 0)
if(save.iter) increment <- increment+1
# update ALPHA
if(n.time > 1){
phifirst <- phi[-firstblock]
philast <- phi[-lastblock]
philastQphilast <- qform_ST(Qspace = Q.space.trip, Qtime = diag.time, phi = philast, nsites = n.sites)
phifirstQphilast <- qform_ST_asym(Qspace = Q.space.trip, Qtime = diag.time, phi1 = phifirst, phi2 = philast, nsites = n.sites)
mu_alpha <- phifirstQphilast/philastQphilast
mu_sigmasq <- tau/philastQphilast
alpha <- rtruncnorm(n=1, a=10^-5, b=1 - 10^-5, mean=mu_alpha, sd = sqrt(mu_sigmasq))
Q.time.trip <- update_Qtime(Q.time.trip, alpha, time.last.diag - 1)
phiQphi <- qform_ST(Qspace = Q.space.trip, Qtime = Q.time.trip, phi = phi, nsites = n.sites)
detTime <- determinant(Q.time, logarithm = TRUE)
detTime <- (detTime$m)*(detTime$s)
}
# Gibbs update of tau_v
tau_scale <- vqform_current/2 + prior.tau2[2]
tau_v <- 1/rtrunc(n=1, spec="gamma", a=0.000001, b=Inf, shape=tau_v.shape, scale=(1/tau_scale))
#tau_v <- 1/rtgamma(n=1, shape=tau_v.shape, scale=tau_scale, min=0.000001, max=Inf)
v.proposal <- rtruncnorm(n = n.edges, a=-15, b=15, mean = v, sd = W.tune)
for(i in 1:n.blocks){
# propose new v for the i^th block
vnew <- v
block_inds <- blockinds[[i]]
vnew[block_inds] <- v.proposal[block_inds]
# update the spatial precision matrix using c++ loop.
# This is efficient because changes are only made where vnew differs from v
# combine the result back into triplet matrix (Q.space.trip.prop), and spam matrix (Q.space.prop)
tripUpdate <- updatetripList2(Q.space.trip, vold = v, vnew = vnew, nedges = n.edges,
nsites = n.sites, block = block_inds,
block_length = length(block_inds), fixedridge = fixedridge, rho = rho)
Q.space.trip.prop <- tripUpdate[[1]]
Q.space.trip.diff <- tripUpdate[[2]]
# combine the result back into triplet matrix (Q.space.trip.prop), and spam matrix (Q.space.prop)
Q.space.prop@entries <- Q.space.trip.prop[perm,3]
# acceptance ratio requires calculation of phi'Q_prop phi - phi'Q phi.
# do this quickly by taking the difference between old and new triplets and working out the
# difference directly. Much faster than working out quadratic forms seperately.
Q.space.trip.diff[, 3]<- Q.space.trip[, 3] - Q.space.trip.prop[,3]
phiQphi_phiQphiNew <- qform_difference_ST(Qtrip = Q.space.trip.diff, Qtime = Q.time.trip, phi = phi, nsites = n.sites)
# update the cholesky of the precision matrix & calculate the determinant
chol.Q.space.prop <- update(chol.Q.space, x = Q.space.prop)
detSpace <- 2*determinant(chol.Q.space.prop, logarithm = T)$modulus
Q.space.det.prop <- n.sites*detTime + n.time*detSpace
v_15_prop <- vnew - 15
vqform_prop <- sum(v_15_prop^2)
acceptance <- exp(0.5*(Q.space.det.prop - Q.space.det.old) + (1/(2*tau))*(phiQphi_phiQphiNew)
+ 0.5*(1/tau_v)*(vqform_current - vqform_prop))
accept[8] <- accept[8] + (1/n.blocks)
if(runif(1) <= acceptance){
vqform_current <- vqform_prop
v <- vnew
accept[7] <- accept[7] + (1/n.blocks)
Q.space.det.old <- Q.space.det.prop
Q.space.trip <- Q.space.trip.prop
chol.Q.space <- chol.Q.space.prop
Q.space <- Q.space.prop
}
}
# update BETA
offset.temp <- offset + as.numeric(phi)
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, k, p, beta_par, offset.temp, y, failures, trials, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, k, p, beta_par, offset.temp, y, failures, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta_par <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
XB <- X.standardised %*% beta_par
# update PHI using one at a time M-H sampling
nneighbours <- diag.spam(Q.space)
W_current <- diag(nneighbours) - as.matrix(Q.space)
phi_update <- SPTICARphiBinomial(W = W_current, nsites = n.sites, ntimes = n.time, phi = phi,
nneighbours = nneighbours, tau = tau, y = y,
phiVarb_tune = phi_tune, trials = trials,
alpha = alpha, XB = XB + offset)
phi <- phi_update[[2]]
phi <- phi - mean(phi)
accept[3] <- accept[3] + phi_update[[1]][2]
accept[4] <- accept[4] + k
# update rho, the spatial leroux parameter
if(!is.null(rhofix)){
proposal.rho <- rhofix
} else {
proposal.rho <- rtruncnorm(n = 1, a=0, b=1, mean = rho, sd = rho.tune)
}
Q.space.trip.prop <- updatetriplets_rho(trips = Q.space.trip, nsites = n.sites, rho_old = rho, rho_new = proposal.rho, fixedridge = fixedridge)
Q.space.prop@entries <- Q.space.trip.prop[perm,3]
Q.space.trip.diff[, 3] <- Q.space.trip[, 3] - Q.space.trip.prop[,3]
phiQphi_phiQphiNew <- qform_difference_ST(Qtrip = Q.space.trip.diff, Qtime = Q.time.trip, phi = phi, nsites = n.sites)
# update the cholesky of the precision matrix & calculate the determinant
chol.Q.space.prop <- update(chol.Q.space, x = Q.space.prop)
detSpace <- 2*determinant(chol.Q.space.prop, logarithm = T)$modulus
Q.space.det.prop <- n.sites*detTime + n.time*detSpace
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=rho.tune)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=rho.tune))
acceptance <- exp(0.5*(Q.space.det.prop - Q.space.det.old) + (1/(2*tau))*(phiQphi_phiQphiNew) + hastings)
accept[6] <- accept[6] + 1
if(runif(1) <= acceptance){
accept[5] <- accept[5] + 1
Q.space.det.old <- Q.space.det.prop
Q.space.trip <- Q.space.trip.prop
chol.Q.space <- chol.Q.space.prop
Q.space <- Q.space.prop
rho <- proposal.rho
}
# Gibbs update TAU using the gamma distribution
phiQphi <- qform_ST(Qspace = Q.space.trip, Qtime = Q.time.trip, phi = phi, nsites = n.sites)
tau_scale <- phiQphi/2 + prior.tau2[2]
tau <- 1/rtrunc(n=1, spec="gamma", a=0.000001, b=Inf, shape=tau_phi_shape, scale=(1/tau_scale))
#tau <- 1/rtgamma(n=1, shape=tau_phi_shape, scale=tau_scale, min=0.000001, max=Inf)
# calculate the deviance
lp <- as.vector(XB) + phi + offset
prob <- exp(lp) / (1+exp(lp))
fitted <- prob*trials
loglike <- dbinom(x=y, size=trials, prob=prob, log=TRUE)
# save samples if past burnin
if(save.iter){
samples.beta[increment,] <- beta_par
samples.phi[increment,] <- phi
samples.fit[increment, ] <- fitted
samples.tau2[increment,] <- tau
samples.vtau2[increment,] <- tau_v
samples.v[increment,] <- v
samples.alpha[increment,] <- alpha
samples.rho[increment,] <- rho
samples.loglike[increment, ] <- loglike
}
# adjust the acceptance rate if required
if(j %% 100 == 0 & j < burnin){
accept.beta <- 100 * accept[1] / accept[2]
accept.phi <- 100 * accept[3] / accept[4]
accept.w <- 100 * accept[7] / accept[8]
if(is.null(rhofix))
{
accept.rho <- 100 * accept[5] / accept[6]
}else
{
accept.rho <- 45
}
#### beta tuning parameter
if(accept.beta > 50)
{
proposal.sd.beta <- proposal.sd.beta + 0.1 * proposal.sd.beta
}else if(accept.beta < 40)
{
proposal.sd.beta <- proposal.sd.beta - 0.1 * proposal.sd.beta
}else
{
}
#### phi tuning parameter
if(accept.phi > 50)
{
phi_tune <- phi_tune + 0.1 * phi_tune
}else if(accept.phi < 40)
{
phi_tune <- phi_tune - 0.1 * phi_tune
}else
{
}
#### w tuning parameter
if(accept.w > 40)
{
W.tune <- W.tune + 0.1 * W.tune
}else if(accept.w < 20)
{
W.tune <- W.tune - 0.1 * W.tune
}else
{
}
#### rho tuning parameter
if(accept.rho > 50)
{
rho.tune <- min(rho.tune + 0.1 * rho.tune, 0.5)
}else if(accept.rho < 40)
{
rho.tune <- rho.tune - 0.1 * rho.tune
}else
{
}
accept <- accept*0
}else
{}
# print progress to the console
if(j %in% percentage.points & verbose) setTxtProgressBar(progressBar, j/n.sample)
}
# end timer
if(verbose)
{
cat("\nSummarising results.")
close(progressBar)
}else
{}
###################################
#### Summarise and save the results
###################################
## Compute the acceptance rates
accept.beta <- 100 * accept[1] / accept[2]
accept.phi <- 100 * accept[3] / accept[4]
accept.rho <- 100 * accept[5] / accept[6]
accept.w <- 100 * accept[7] / accept[8]
accept.alpha <- 100
if(!is.null(rhofix))
{
accept.final <- c(accept.beta, accept.phi, accept.w)
names(accept.final) <- c("beta", "phi", "w")
}else
{
accept.final <- c(accept.beta, accept.phi, accept.rho,accept.w)
names(accept.final) <- c("beta", "phi", "rho", "w")
}
#### Compute the fitted deviance
mean.beta <- apply(samples.beta, 2, mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow = n.sites, ncol = n.time, byrow=FALSE)
mean.phi <- matrix(apply(samples.phi, 2, mean), nrow = n.sites, ncol = n.time)
offset.mat <- matrix(offset, nrow = n.sites, ncol = n.time, byrow=FALSE)
lp.mean <- as.numeric(offset.mat + mean.phi + regression.mat)
mean.prob <- exp(lp.mean) / (1 + exp(lp.mean))
deviance.fitted <- -2 * sum(dbinom(x=y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
#### Model fit criteria
modelfit <- common.modelfit(samples.loglike, deviance.fitted)
#### Create the fitted values and residuals
fitted.values <- apply(samples.fit, 2, mean)
response.residuals <- as.numeric(y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
#### transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
#### Create a summary object
samples.beta.orig <- mcmc(samples.beta.orig)
summary.beta <- t(rbind(apply(samples.beta.orig, 2, mean), apply(samples.beta.orig, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.save, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(4, 7))
summary.hyper[1,1:3] <- c(mean(samples.tau2), quantile(samples.tau2, c(0.025, 0.975)))
summary.hyper[2,1:3] <- c(mean(samples.rho), quantile(samples.rho, c(0.025, 0.975)))
summary.hyper[3,1:3] <- c(mean(samples.alpha), quantile(samples.alpha, c(0.025, 0.975)))
summary.hyper[4,1:3] <- c(mean(samples.vtau2), quantile(samples.vtau2, c(0.025, 0.975)))
rownames(summary.hyper) <- c("tau2", "rho.S", "rho.T", "tau2.w")
summary.hyper[1, 4:7] <- c(n.save, 100, effectiveSize(mcmc(samples.tau2)), geweke.diag(mcmc(samples.tau2))$z)
summary.hyper[2, 4:7] <- c(n.save, accept.rho, effectiveSize(mcmc(samples.rho)), geweke.diag(mcmc(samples.rho))$z)
summary.hyper[3, 4:7] <- c(n.save, accept.alpha, effectiveSize(mcmc(samples.alpha)), geweke.diag(mcmc(samples.alpha))$z)
summary.hyper[4, 4:7] <- c(n.save, 100, effectiveSize(mcmc(samples.vtau2)), geweke.diag(mcmc(samples.vtau2))$z)
if(!is.null(rhofix))
{
summary.hyper[2, ] <- c(rep(rhofix, 3),rep(NA, 4))
}
summary.results <- rbind(summary.beta, summary.hyper)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
# convert v back to w, summarise and create a 'fitted' adjacency matrix
samples.w <- inv_logit(samples.v)
colnames(samples.w) <- char.locs
get_prop_thresh <- function(v, thresh) as.numeric(!((sum(v < thresh)/length(v)) < 0.99))
bdry99 <- apply(samples.w, 2, get_prop_thresh, thresh = 0.5)
bdryMN <- apply(samples.w, 2, mean)
Wmn <- W99 <- matrix(NA, nrow = n.sites, ncol = n.sites)
W99[locs] <- bdry99
Wmn[locs] <- bdryMN
W99[locs] <- bdry99
W99[locs[ ,c(2,1)]] <- bdry99
Wmn[locs] <- bdryMN
Wmn[locs[ ,c(2,1)]] <- bdryMN
#### Compile and return the results
model.string <- c("Likelihood model - Binomial (logit link function)",
"\nLatent structure model - Adaptive autoregressive order 1 CAR model\n")
samples.tau2all <- cbind(samples.tau2, samples.vtau2)
colnames(samples.tau2all) <- c("tau2", "tau2.w")
if(is.null(rhofix))
{
samples.rhoext <- cbind(samples.rho, samples.alpha)
colnames(samples.rhoext) <- c("rho.S", "rho.T")
}else
{
samples.rhoext <- cbind(samples.alpha)
names(samples.rhoext) <- c("rho.T")
}
samples <- list(beta = mcmc(samples.beta.orig), phi = mcmc(samples.phi), rho = mcmc(samples.rhoext),
tau2 = mcmc(samples.tau2all), w = mcmc(samples.w), fitted = mcmc(samples.fit))
localised.structure <- list(Wmedian = Wmn, W99 = W99)
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=localised.structure, formula=formula, model=model.string, X=X)
class(results) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.CARadaptive.R
|
binomial.CARanova <- function(formula, data=NULL, trials, W, interaction=TRUE, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "binomial")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Determine the number of spatial and temporal units
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Check the trials argument
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- N.all-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
if(sum(Y>trials, na.rm=TRUE)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
failures <- trials - Y
#### Check on the rho arguments
if(is.null(rho.S))
{
rho <- runif(1)
fix.rho.S <- FALSE
}else
{
rho <- rho.S
fix.rho.S <- TRUE
}
if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.T))
{
lambda <- runif(1)
fix.rho.T <- FALSE
}else
{
lambda <- rho.T
fix.rho.T <- TRUE
}
if(!is.numeric(lambda)) stop("rho.T is fixed but is not numeric.", call.=FALSE)
if(lambda<0 ) stop("rho.T is outside the range [0, 1].", call.=FALSE)
if(lambda>1 ) stop("rho.T is outside the range [0, 1].", call.=FALSE)
#### Checks on the interaction flag
if(sum(interaction==c(TRUE, FALSE)) != 1) stop("interaction must be either TRUE or FALSE.", call.=FALSE)
if(length(interaction) != 1) stop("interaction must be of length 1.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- binomial.CARanovaMCMC(Y=Y, trials=trials, failures=failures, offset=offset, X.standardised=X.standardised, W=W, interaction=interaction, rho=rho, lambda=lambda, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- binomial.CARanovaMCMC(Y=Y, trials=trials, failures=failures, offset=offset, X.standardised=X.standardised, W=W, interaction=interaction, rho=rho, lambda=lambda, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=binomial.CARanovaMCMC, Y=Y, trials=trials, failures=failures, offset=offset, X.standardised=X.standardised, W=W, interaction=interaction, rho=rho, lambda=lambda, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.final <- rep(NA, 6)
names(accept.final) <- c("beta", "phi", "delta", "gamma", "rho.S", "rho.T")
accept.final[1] <- 100 * results$accept[1] / results$accept[2]
accept.final[2] <- 100 * results$accept[3] / results$accept[4]
accept.final[3] <- 100 * results$accept[5] / results$accept[6]
if(interaction) accept.final[4] <- 100 * results$accept[7] / results$accept[8]
if(!fix.rho.S) accept.final[5] <- 100 * results$accept[9] / results$accept[10]
if(!fix.rho.T) accept.final[6] <- 100 * results$accept[11] / results$accept[12]
## Compute the fitted deviance
mean.phi <- apply(results$samples.phi, 2, mean)
mean.delta <- apply(results$samples.delta, 2, mean)
mean.phi.mat <- matrix(rep(mean.phi, N), byrow=F, nrow=K)
mean.delta.mat <- matrix(rep(mean.delta, K), byrow=T, nrow=K)
mean.beta <- apply(results$samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
if(interaction)
{
mean.gamma <- apply(results$samples.gamma, 2,mean)
mean.gamma.mat <- matrix(mean.gamma, byrow=F, nrow=K)
lp.mean <- as.numeric(offset.mat + regression.mat + mean.phi.mat + mean.delta.mat + mean.gamma.mat)
}else
{
lp.mean <- as.numeric(offset.mat + regression.mat + mean.phi.mat + mean.delta.mat)
}
mean.prob <- exp(lp.mean) / (1 + exp(lp.mean))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
## Create the samples object
if(fix.rho.S & fix.rho.T)
{
samples.rhoext <- NA
}else if(fix.rho.S & !fix.rho.T)
{
samples.rhoext <- results$samples.lambda
names(samples.rhoext) <- "rho.T"
}else if(!fix.rho.S & fix.rho.T)
{
samples.rhoext <- results$samples.rho
names(samples.rhoext) <- "rho.S"
}else
{
samples.rhoext <- cbind(results$samples.rho, results$samples.lambda)
colnames(samples.rhoext) <- c("rho.S", "rho.T")
}
if(interaction)
{
colnames(results$samples.tau2) <- c("tau2.S", "tau2.T", "tau2.I")
}else
{
colnames(results$samples.tau2) <- c("tau2.S", "tau2.T")
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), delta=mcmc(results$samples.delta), gamma=mcmc(results$samples.gamma), tau2=mcmc(results$samples.tau2), rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
n.tau2 <- ncol(results$samples.tau2)
summary.tau2 <- cbind(apply(results$samples.tau2, 2, mean), t(apply(results$samples.tau2, 2, quantile, c(0.025, 0.975))), rep(n.keep, n.tau2), rep(100, n.tau2),
effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
if(interaction)
{
rownames(summary.tau2) <- c("tau2.S", "tau2.T", "tau2.I")
}else
{
rownames(summary.tau2) <- c("tau2.S", "tau2.T")
}
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.S", "rho.T")
if(!fix.rho.S)
{
summary.rho[1, 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z)
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
summary.rho[2, 1:3] <- c(mean(results$samples.lambda), quantile(results$samples.lambda, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(results$samples.lambda), geweke.diag(results$samples.lambda)$z)
}else
{
summary.rho[2, 1:3] <- c(lambda, lambda, lambda)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.tau2, summary.rho)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.final <- rep(NA, 6)
names(accept.final) <- c("beta", "phi", "delta", "gamma", "rho.S", "rho.T")
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[1] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.final[2] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.final[3] <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
if(interaction) accept.final[4] <- 100 * sum(accept.temp2[ ,7]) / sum(accept.temp2[ ,8])
if(!fix.rho.S) accept.final[5] <- 100 * sum(accept.temp2[ ,9]) / sum(accept.temp2[ ,10])
if(!fix.rho.T) accept.final[6] <- 100 * sum(accept.temp2[ ,11]) / sum(accept.temp2[ ,12])
## Extract the samples into separate matrix and list objects
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.delta.list <- lapply(results, function(l) l[["samples.delta"]])
samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list)
if(interaction)
{
samples.gamma.list <- lapply(results, function(l) l[["samples.gamma"]])
samples.gamma.matrix <- do.call(what=rbind, args=samples.gamma.list)
}
if(!fix.rho.S)
{
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
}
if(!fix.rho.T)
{
samples.lambda.list <- lapply(results, function(l) l[["samples.lambda"]])
samples.lambda.matrix <- do.call(what=rbind, args=samples.lambda.list)
}
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Compute the fitted deviance
mean.phi <- apply(samples.phi.matrix, 2, mean)
mean.delta <- apply(samples.delta.matrix, 2, mean)
mean.phi.mat <- matrix(rep(mean.phi, N), byrow=F, nrow=K)
mean.delta.mat <- matrix(rep(mean.delta, K), byrow=T, nrow=K)
mean.beta <- apply(samples.beta.matrix,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
if(interaction)
{
mean.gamma <- apply(samples.gamma.matrix, 2,mean)
mean.gamma.mat <- matrix(mean.gamma, byrow=F, nrow=K)
lp.mean <- as.numeric(offset.mat + regression.mat + mean.phi.mat + mean.delta.mat + mean.gamma.mat)
}else
{
lp.mean <- as.numeric(offset.mat + regression.mat + mean.phi.mat + mean.delta.mat)
}
mean.prob <- exp(lp.mean) / (1 + exp(lp.mean))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
delta.mcmc <- mcmc.list(lapply(samples.delta.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
if(n.miss>0)
{
Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc))
}else
{
Y.mcmc <- NA
}
if(interaction)
{
for(j in 1:n.chains)
{
colnames(samples.tau2.list[[j]]) <- c("tau2.S", "tau2.T", "tau2.I")
}
tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc))
gamma.mcmc <- mcmc.list(lapply(samples.gamma.list, mcmc))
}else
{
for(j in 1:n.chains)
{
colnames(samples.tau2.list[[j]]) <- c("tau2.S", "tau2.T")
}
tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc))
gamma.mcmc <- NA
}
if(fix.rho.S & fix.rho.T)
{
rhoext.mcmc <- NA
}else if(fix.rho.S & !fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.lambda.list[[j]]) <- c("rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(samples.lambda.list, mcmc))
}else if(!fix.rho.S & fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.rho.list[[j]]) <- c("rho.S")
}
rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc))
}else
{
rho.temp <- as.list(rep(NA, n.chains))
for(j in 1:n.chains)
{
rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.lambda.list[[j]])
colnames(rho.temp[[j]]) <- c("rho.S", "rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc))
}
samples <- list(beta=beta.mcmc, phi=phi.mcmc, delta=delta.mcmc, gamma=gamma.mcmc, rho=rhoext.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
n.tau2 <- ncol(samples.tau2.matrix)
summary.tau2 <- cbind(apply(samples.tau2.matrix, 2, mean), t(apply(samples.tau2.matrix, 2, quantile, c(0.025, 0.975))), rep(n.keep, n.tau2), rep(100, n.tau2),
effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
if(interaction)
{
rownames(summary.tau2) <- c("tau2.S", "tau2.T", "tau2.I")
}else
{
rownames(summary.tau2) <- c("tau2.S", "tau2.T")
}
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.S", "rho.T")
if(!fix.rho.S)
{
temp <- mcmc.list(lapply(samples.rho.list, mcmc))
summary.rho[1, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
temp <- mcmc.list(lapply(samples.lambda.list, mcmc))
summary.rho[2, 1:3] <- c(mean(samples.lambda.matrix), quantile(samples.lambda.matrix, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[2, 1:3] <- c(lambda, lambda, lambda)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.tau2, summary.rho)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
if(interaction)
{
model.string <- c("Likelihood model - binomial (logit link function)", "\nLatent structure model - spatial and temporal main effects and an interaction\n")
}else
{
model.string <- c("Likelihood model - binomial (logit link function)", "\nLatent structure model - spatial and temporal main effects\n")
}
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.CARanova.R
|
binomial.CARanovaMCMC <- function(Y, trials, failures, offset, X.standardised, W, interaction, rho, lambda, fix.rho.S, fix.rho.T, K, N, N.all, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Generate the initial parameter values
dat <- cbind(Y, failures)
mod.glm <- glm(dat~X.standardised-1, offset=offset, family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
theta.hat <- Y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat / (1 - theta.hat)) - X.standardised %*% beta - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=0, sd = res.sd)
delta <- rnorm(n=N, mean=0, sd = res.sd)
tau2.phi <- var(phi)/10
tau2.delta <- var(delta)/10
if(interaction)
{
gamma <- rnorm(n=N.all, mean=0, sd = res.sd)
tau2.gamma <- var(gamma)/10
}else
{}
#### Matrix versions
Y.DA <- Y
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
trials.mat <- matrix(trials, nrow=K, ncol=N, byrow=FALSE)
phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K)
delta.mat <- matrix(rep(delta, K), byrow=T, nrow=K)
if(interaction)
{
gamma.mat <- matrix(gamma, byrow=F, nrow=K)
}else
{
gamma.mat <- matrix(rep(0, N.all), byrow=F, nrow=K)
}
lp <- as.numeric(offset.mat + regression.mat + phi.mat + delta.mat + gamma.mat)
prob <- exp(lp) / (1 + exp(lp))
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, K))
samples.delta <- array(NA, c(n.keep, N))
if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.T) samples.lambda <- array(NA, c(n.keep, 1))
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
if(interaction)
{
samples.gamma <- array(NA, c(n.keep, N.all))
samples.tau2 <- array(NA, c(n.keep, 3))
colnames(samples.tau2) <- c("tau2.phi", "tau2.delta", "tau2.gamma")
}else
{
samples.tau2 <- array(NA, c(n.keep, 2))
colnames(samples.tau2) <- c("tau2.phi", "tau2.delta")
}
#### Specify the Metropolis quantities
accept <- rep(0,12)
proposal.sd.beta <- 0.01
proposal.sd.phi <- 0.1
proposal.sd.delta <- 0.1
proposal.sd.rho <- 0.02
proposal.sd.lambda <- 0.02
tau2.phi.shape <- prior.tau2[1] + K/2
tau2.delta.shape <- prior.tau2[1] + N/2
if(interaction)
{
proposal.sd.gamma <- 0.1
tau2.gamma.shape <- prior.tau2[1] + N*K/2
}else
{
}
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Spatial determinant
if(!fix.rho.S)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q.W <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Temporal neighbourhood matrix
D <-array(0, c(N,N))
for(i in 1:N)
{
for(j in 1:N)
{
if(abs((i-j))==1) D[i,j] <- 1
}
}
#### Temporal triplet object
D.triplet <- c(NA, NA, NA)
for(i in 1:N)
{
for(j in 1:N)
{
if(D[i,j]>0)
{
D.triplet <- rbind(D.triplet, c(i,j, D[i,j]))
}else{}
}
}
D.triplet <- D.triplet[-1, ]
D.n.triplet <- nrow(D.triplet)
D.triplet.sum <- tapply(D.triplet[ ,3], D.triplet[ ,1], sum)
D.neighbours <- tapply(D.triplet[ ,3], D.triplet[ ,1], length)
#### Temporal begfin argument
D.begfin <- array(NA, c(N, 2))
temp <- 1
for(i in 1:N)
{
D.begfin[i, ] <- c(temp, (temp + D.neighbours[i]-1))
temp <- temp + D.neighbours[i]
}
#### Temporal determinant
if(!fix.rho.T)
{
Dstar <- diag(apply(D,1,sum)) - D
Dstar.eigen <- eigen(Dstar)
Dstar.val <- Dstar.eigen$values
det.Q.D <- 0.5 * sum(log((lambda * Dstar.val + (1-lambda))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1) tau2.phi.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
if(lambda==1) tau2.delta.shape <- prior.tau2[1] + 0.5 * (N-1)
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rbinom(n=n.miss, size=trials[which.miss==0], prob=prob[which.miss==0])
failures.DA <- trials - Y.DA
}else
{}
Y.DA.mat <- matrix(Y.DA, nrow=K, ncol=N, byrow=FALSE)
failures.DA.mat <- matrix(failures.DA, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from beta
####################
offset.temp <- offset + as.numeric(phi.mat) + as.numeric(delta.mat) + as.numeric(gamma.mat)
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, N.all, p, beta, offset.temp, Y.DA, failures.DA, trials, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, N.all, p, beta, offset.temp, Y.DA, failures.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from phi
####################
phi.offset <- offset.mat + regression.mat + delta.mat + gamma.mat
temp1 <- binomialcarupdateRW(W.triplet, W.begfin, W.triplet.sum, K, phi, tau2.phi,Y.DA.mat, failures.DA.mat, proposal.sd.phi, rho, phi.offset, N, rep(1,N))
phi <- temp1[[1]]
if(rho<1)
{
phi <- phi - mean(phi)
}else
{
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
}
phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K)
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + K
####################
## Sample from delta
####################
delta.offset <- t(offset.mat + regression.mat + phi.mat + gamma.mat)
temp2 <- binomialcarupdateRW(D.triplet, D.begfin, D.triplet.sum, N, delta, tau2.delta, t(Y.DA.mat), t(failures.DA.mat), proposal.sd.delta, lambda, delta.offset, K, rep(1,K))
delta <- temp2[[1]]
delta <- delta - mean(delta)
delta.mat <- matrix(rep(delta, K), byrow=T, nrow=K)
accept[5] <- accept[5] + temp2[[2]]
accept[6] <- accept[6] + N
if(interaction)
{
####################
## Sample from gamma
####################
gamma.offset <- offset.mat + regression.mat + phi.mat + delta.mat
gamma.offset.vec <- as.numeric(gamma.offset)
temp5 <- binomialindepupdateRW(N.all, gamma, tau2.gamma, Y.DA, failures.DA, proposal.sd.gamma, gamma.offset.vec)
gamma <- temp5[[1]]
gamma <- gamma - mean(gamma)
gamma.mat <- matrix(gamma, byrow=F, nrow=K)
accept[7] <- accept[7] + temp5[[2]]
accept[8] <- accept[8] + N * K
#########################
## Sample from tau2.gamma
#########################
tau2.gamma.scale <- prior.tau2[2] + sum(gamma.mat^2)/2
tau2.gamma <- 1 / rgamma(1, tau2.gamma.shape, scale=(1/tau2.gamma.scale))
}else
{}
#######################
## Sample from tau2.phi
#######################
temp2.phi <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, rho)
tau2.phi.scale <- temp2.phi + prior.tau2[2]
tau2.phi <- 1 / rgamma(1, tau2.phi.shape, scale=(1/tau2.phi.scale))
#########################
## Sample from tau2.delta
#########################
temp2.delta <- quadform(D.triplet, D.triplet.sum, D.n.triplet, N, delta, delta, lambda)
tau2.delta.scale <- temp2.delta + prior.tau2[2]
tau2.delta <- 1 / rgamma(1, tau2.delta.shape, scale=(1/tau2.delta.scale))
##################
## Sample from rho
##################
if(!fix.rho.S)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp3 <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, proposal.rho)
det.Q.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- det.Q.W - temp2.phi / tau2.phi
logprob.proposal <- det.Q.proposal - temp3 / tau2.phi
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
rho <- proposal.rho
det.Q.W <- det.Q.proposal
accept[9] <- accept[9] + 1
}else
{}
accept[10] <- accept[10] + 1
}else
{}
#####################
## Sample from lambda
#####################
if(!fix.rho.T)
{
proposal.lambda <- rtruncnorm(n=1, a=0, b=1, mean=lambda, sd=proposal.sd.lambda)
temp3 <- quadform(D.triplet, D.triplet.sum, D.n.triplet, N, delta, delta, proposal.lambda)
det.Q.proposal <- 0.5 * sum(log((proposal.lambda * Dstar.val + (1-proposal.lambda))))
logprob.current <- det.Q.D - temp2.delta / tau2.delta
logprob.proposal <- det.Q.proposal - temp3 / tau2.delta
hastings <- log(dtruncnorm(x=lambda, a=0, b=1, mean=proposal.lambda, sd=proposal.sd.lambda)) - log(dtruncnorm(x=proposal.lambda, a=0, b=1, mean=lambda, sd=proposal.sd.lambda))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
lambda <- proposal.lambda
det.Q.D <- det.Q.proposal
accept[11] <- accept[11] + 1
}else
{}
accept[12] <- accept[12] + 1
}else
{}
#########################
## Calculate the deviance
#########################
lp <- as.numeric(offset.mat + regression.mat + phi.mat + delta.mat + gamma.mat)
prob <- exp(lp) / (1+exp(lp))
fitted <- trials * prob
loglike <- dbinom(x=Y, size=trials, prob=prob, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.delta[ele, ] <- delta
if(!fix.rho.S) samples.rho[ele, ] <- rho
if(!fix.rho.T) samples.lambda[ele, ] <- lambda
samples.fitted[ele, ] <- fitted
samples.loglike[ele, ] <- loglike
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
if(interaction)
{
samples.gamma[ele, ] <- gamma
samples.tau2[ele, ] <- c(tau2.phi, tau2.delta, tau2.gamma)
}else
{
samples.tau2[ele, ] <- c(tau2.phi, tau2.delta)
}
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
proposal.sd.delta <- common.accceptrates1(accept[5:6], proposal.sd.delta, 40, 50)
if(interaction) proposal.sd.gamma <- common.accceptrates1(accept[7:8], proposal.sd.gamma, 40, 50)
if(!fix.rho.S) proposal.sd.rho <- common.accceptrates2(accept[9:10], proposal.sd.rho, 40, 50, 0.5)
if(!fix.rho.T) proposal.sd.lambda <- common.accceptrates2(accept[11:12], proposal.sd.lambda, 40, 50, 0.5)
accept <- rep(0,12)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
#### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y <- NA
if(fix.rho.S) samples.rho <- NA
if(fix.rho.T) samples.lambda <- NA
if(!interaction) samples.gamma <- NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.delta=samples.delta, samples.gamma=samples.gamma, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.lambda=samples.lambda, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.CARanovaMCMC.R
|
binomial.CARar1 <- function(formula, data=NULL, trials, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "binomial")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Determine the number of spatial and temporal units
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Check the trials argument
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- N.all-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
if(sum(Y>trials, na.rm=TRUE)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
failures <- trials - Y
#### Check on the rho arguments
if(is.null(rho.S))
{
rho <- runif(1)
fix.rho.S <- FALSE
}else
{
rho <- rho.S
fix.rho.S <- TRUE
}
if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.T))
{
gamma <- runif(1)
fix.rho.T <- FALSE
}else
{
gamma <- rho.T
fix.rho.T <- TRUE
}
if(!is.numeric(gamma)) stop("rho.T is fixed but is not numeric.", call.=FALSE)
if(gamma<0 ) stop("rho.T is outside the range [0, 1].", call.=FALSE)
if(gamma>1 ) stop("rho.T is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- binomial.CARar1MCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, gamma=gamma, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- binomial.CARar1MCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, gamma=gamma, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=binomial.CARar1MCMC, Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, gamma=gamma, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.final <- rep(NA, 4)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T")
accept.final[1] <- 100 * results$accept[1] / results$accept[2]
accept.final[2] <- 100 * results$accept[3] / results$accept[4]
if(!fix.rho.S) accept.final[3] <- 100 * results$accept[5] / results$accept[6]
if(!fix.rho.T) accept.final[4] <- 100
## Compute the fitted deviance
mean.beta <- apply(results$samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=K, ncol=N)
lp.mean <- as.numeric(offset.mat + mean.phi + regression.mat)
mean.prob <- exp(lp.mean) / (1 + exp(lp.mean))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
## Create the samples object
if(fix.rho.S & fix.rho.T)
{
samples.rhoext <- NA
}else if(fix.rho.S & !fix.rho.T)
{
samples.rhoext <- results$samples.gamma
names(samples.rhoext) <- "rho.T"
}else if(!fix.rho.S & fix.rho.T)
{
samples.rhoext <- results$samples.rho
names(samples.rhoext) <- "rho.S"
}else
{
samples.rhoext <- cbind(results$samples.rho, results$samples.gamma)
colnames(samples.rhoext) <- c("rho.S", "rho.T")
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), tau2=mcmc(results$samples.tau2), rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.tau2 <- c(mean(results$samples.tau2), quantile(results$samples.tau2, c(0.025, 0.975)), n.keep, 100,
effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.S", "rho.T")
if(!fix.rho.S)
{
summary.rho[1, 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z)
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
summary.rho[2, 1:3] <- c(mean(results$samples.gamma), quantile(results$samples.gamma, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(results$samples.gamma), geweke.diag(results$samples.gamma)$z)
}else
{
summary.rho[2, 1:3] <- c(gamma, gamma, gamma)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.tau2, summary.rho)
rownames(summary.results)[(p+1)] <- "tau2"
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.final <- rep(NA, 4)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T")
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[1] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.final[2] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
if(!fix.rho.S) accept.final[3] <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
if(!fix.rho.T) accept.final[4] <- 100
## Extract the samples into separate matrix and list objects
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
if(!fix.rho.S)
{
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
}
if(!fix.rho.T)
{
samples.gamma.list <- lapply(results, function(l) l[["samples.gamma"]])
samples.gamma.matrix <- do.call(what=rbind, args=samples.gamma.list)
}
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Compute the fitted deviance
mean.beta <- apply(samples.beta.matrix,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=K, ncol=N)
lp.mean <- as.numeric(offset.mat + mean.phi + regression.mat)
mean.prob <- exp(lp.mean) / (1 + exp(lp.mean))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc))
if(n.miss>0)
{
Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc))
}else
{
Y.mcmc <- NA
}
if(fix.rho.S & fix.rho.T)
{
rhoext.mcmc <- NA
}else if(fix.rho.S & !fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.gamma.list[[j]]) <- c("rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(samples.gamma.list, mcmc))
}else if(!fix.rho.S & fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.rho.list[[j]]) <- c("rho.S")
}
rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc))
}else
{
rho.temp <- as.list(rep(NA, n.chains))
for(j in 1:n.chains)
{
rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.gamma.list[[j]])
colnames(rho.temp[[j]]) <- c("rho.S", "rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc))
}
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rhoext.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.tau2 <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)), n.keep, 100,
effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.S", "rho.T")
if(!fix.rho.S)
{
temp <- mcmc.list(lapply(samples.rho.list, mcmc))
summary.rho[1, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
temp <- mcmc.list(lapply(samples.gamma.list, mcmc))
summary.rho[2, 1:3] <- c(mean(samples.gamma.matrix), quantile(samples.gamma.matrix, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[2, 1:3] <- c(gamma, gamma, gamma)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.tau2, summary.rho)
rownames(summary.results)[(p+1)] <- "tau2"
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - binomial (logit link function)", "\nLatent structure model - Autoregressive order 1 CAR model\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.CARar1.R
|
binomial.CARar1MCMC <- function(Y, failures, trials, offset, X.standardised, W, rho, gamma, fix.rho.S, fix.rho.T, K, N, N.all, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Generate the initial parameter values
dat <- cbind(Y, failures)
mod.glm <- glm(dat~X.standardised-1, offset=offset, family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
theta.hat <- Y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat / (1 - theta.hat)) - X.standardised %*% beta - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=N.all, mean=0, sd = res.sd)
tau2 <- var(phi)/10
#### Specify matrix quantities
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
trials.mat <- matrix(trials, nrow=K, ncol=N, byrow=FALSE)
phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE)
lp <- as.numeric(offset.mat + regression.mat + phi.mat)
prob <- exp(lp) / (1+exp(lp))
Y.DA <- Y
failures.DA <- failures
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, N.all))
samples.tau2 <- array(NA, c(n.keep, 1))
if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.T) samples.gamma <- array(NA, c(n.keep, 1))
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Specify the Metropolis quantities
accept <- rep(0,6)
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.05
proposal.sd.beta <- 0.01
tau2.shape <- prior.tau2[1] + N.all/2
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Create the determinant
if(!fix.rho.S)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q.W <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1 & gamma==1)
{
tau2.shape <- prior.tau2[1] + prior.tau2[1] + ((N-1) * (K-n.islands))/2
}else if(rho==1)
{
tau2.shape <- prior.tau2[1] + prior.tau2[1] + (N * (K-n.islands))/2
}else if(gamma==1)
{
tau2.shape <- prior.tau2[1] + prior.tau2[1] + ((N-1) * K)/2
}else
{}
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rbinom(n=n.miss, size=trials[which.miss==0], prob=prob[which.miss==0])
failures.DA <- trials - Y.DA
}else
{}
Y.DA.mat <- matrix(Y.DA, nrow=K, ncol=N, byrow=FALSE)
failures.DA.mat <- matrix(failures.DA, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from beta
####################
offset.temp <- as.numeric(offset.mat + phi.mat)
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, N.all, p, beta, offset.temp, Y.DA, failures.DA, trials, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, N.all, p, beta, offset.temp, Y.DA, failures.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from phi
####################
phi.offset <- offset.mat + regression.mat
den.offset <- rho * W.triplet.sum + 1 - rho
temp1 <- binomialar1carupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, phi.mat, tau2, gamma, rho, Y.DA.mat, failures.DA.mat, proposal.sd.phi, phi.offset, den.offset)
phi.temp <- temp1[[1]]
phi <- as.numeric(phi.temp) - mean(as.numeric(phi.temp))
phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE)
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + K*N
####################
## Sample from gamma
####################
if(!fix.rho.T)
{
temp2 <- gammaquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho)
mean.gamma <- temp2[[1]] / temp2[[2]]
sd.gamma <- sqrt(tau2 / temp2[[2]])
gamma <- rtruncnorm(n=1, a=0, b=1, mean=mean.gamma, sd=sd.gamma)
}else
{}
####################
## Samples from tau2
####################
temp3 <- tauquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho, gamma)
tau2.scale <- temp3 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.shape, scale=(1/tau2.scale))
##################
## Sample from rho
##################
if(!fix.rho.S)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp4 <- tauquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, proposal.rho, gamma)
det.Q.W.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- N * det.Q.W - temp3 / tau2
logprob.proposal <- N * det.Q.W.proposal - temp4 / tau2
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
det.Q.W <- det.Q.W.proposal
accept[5] <- accept[5] + 1
}else
{}
accept[6] <- accept[6] + 1
}else
{}
#########################
## Calculate the deviance
#########################
lp <- as.numeric(offset.mat + regression.mat + phi.mat)
prob <- exp(lp) / (1+exp(lp))
fitted <- trials * prob
loglike <- dbinom(x=Y, size=trials, prob=prob, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- as.numeric(phi)
if(!fix.rho.S) samples.rho[ele, ] <- rho
if(!fix.rho.T) samples.gamma[ele, ] <- gamma
samples.tau2[ele, ] <- tau2
samples.fitted[ele, ] <- fitted
samples.loglike[ele, ] <- loglike
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
if(!fix.rho.S) proposal.sd.rho <- common.accceptrates2(accept[5:6], proposal.sd.rho, 40, 50, 0.5)
accept <- rep(0,6)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y <- NA
if(fix.rho.S) samples.rho <- NA
if(fix.rho.T) samples.gamma <- NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.gamma=samples.gamma, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.CARar1MCMC.R
|
binomial.CARar2 <- function(formula, data=NULL, trials, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "binomial")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Determine the number of spatial and temporal units
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Check the trials argument
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- N.all-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
if(sum(Y>trials, na.rm=TRUE)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
failures <- trials - Y
#### Check on the rho arguments
if(is.null(rho.S))
{
rho <- runif(1)
fix.rho.S <- FALSE
}else
{
rho <- rho.S
fix.rho.S <- TRUE
}
if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.T))
{
alpha <- c(runif(1), runif(1))
fix.rho.T <- FALSE
}else
{
alpha <- rho.T
fix.rho.T <- TRUE
}
if(!is.numeric(alpha)) stop("rho.T is fixed but is not numeric.", call.=FALSE)
if(length(alpha)!=2) stop("rho.T is fixed but is not of length 2.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- binomial.CARar2MCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- binomial.CARar2MCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=binomial.CARar2MCMC, Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.final <- rep(NA, 4)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T")
accept.final[1] <- 100 * results$accept[1] / results$accept[2]
accept.final[2] <- 100 * results$accept[3] / results$accept[4]
if(!fix.rho.S) accept.final[3] <- 100 * results$accept[5] / results$accept[6]
if(!fix.rho.T) accept.final[4] <- 100
## Compute the fitted deviance
mean.beta <- apply(results$samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=K, ncol=N)
lp.mean <- as.numeric(offset.mat + mean.phi + regression.mat)
mean.prob <- exp(lp.mean) / (1 + exp(lp.mean))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
## Create the samples object
if(fix.rho.S & fix.rho.T)
{
samples.rhoext <- NA
}else if(fix.rho.S & !fix.rho.T)
{
samples.rhoext <- results$samples.alpha
names(samples.rhoext) <- c("rho1.T", "rho2.T")
}else if(!fix.rho.S & fix.rho.T)
{
samples.rhoext <- results$samples.rho
names(samples.rhoext) <- "rho.S"
}else
{
samples.rhoext <- cbind(results$samples.rho, results$samples.alpha)
colnames(samples.rhoext) <- c("rho.S", "rho1.T", "rho2.T")
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), tau2=mcmc(results$samples.tau2), rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.tau2 <- c(mean(results$samples.tau2), quantile(results$samples.tau2, c(0.025, 0.975)), n.keep, 100,
effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.rho <- array(NA, c(3,7))
row.names(summary.rho) <- c("rho.S", "rho1.T", "rho2.T")
if(!fix.rho.S)
{
summary.rho[1, 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z)
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
summary.rho[2, 1:3] <- c(mean(results$samples.alpha[ ,1]), quantile(results$samples.alpha[ ,1], c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(results$samples.alpha[ ,1]), geweke.diag(results$samples.alpha[ ,1])$z)
summary.rho[3, 1:3] <- c(mean(results$samples.alpha[ ,2]), quantile(results$samples.alpha[ ,2], c(0.025, 0.975)))
summary.rho[3, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(results$samples.alpha[ ,2]), geweke.diag(results$samples.alpha[ ,2])$z)
}else
{
summary.rho[2, 1:3] <- c(alpha[1], alpha[1], alpha[1])
summary.rho[2, 4:7] <- rep(NA, 4)
summary.rho[3, 1:3] <- c(alpha[2], alpha[2], alpha[2])
summary.rho[3, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.tau2, summary.rho)
rownames(summary.results)[(p+1)] <- "tau2"
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.final <- rep(NA, 4)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T")
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[1] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.final[2] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
if(!fix.rho.S) accept.final[3] <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
if(!fix.rho.T) accept.final[4] <- 100
## Extract the samples into separate matrix and list objects
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
if(!fix.rho.S)
{
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
}
if(!fix.rho.T)
{
samples.alpha.list <- lapply(results, function(l) l[["samples.alpha"]])
samples.alpha.matrix <- do.call(what=rbind, args=samples.alpha.list)
}
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Compute the fitted deviance
mean.beta <- apply(samples.beta.matrix,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=K, ncol=N)
lp.mean <- as.numeric(offset.mat + mean.phi + regression.mat)
mean.prob <- exp(lp.mean) / (1 + exp(lp.mean))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc))
if(n.miss>0)
{
Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc))
}else
{
Y.mcmc <- NA
}
if(fix.rho.S & fix.rho.T)
{
rhoext.mcmc <- NA
}else if(fix.rho.S & !fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.alpha.list[[j]]) <- c("rho1.T", "rho2.T")
}
rhoext.mcmc <- mcmc.list(lapply(samples.alpha.list, mcmc))
}else if(!fix.rho.S & fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.rho.list[[j]]) <- c("rho.S")
}
rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc))
}else
{
rho.temp <- as.list(rep(NA, n.chains))
for(j in 1:n.chains)
{
rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.alpha.list[[j]])
colnames(rho.temp[[j]]) <- c("rho.S", "rho1.T", "rho2.T")
}
rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc))
}
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rhoext.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.tau2 <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)), n.keep, 100,
effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.rho <- array(NA, c(3,7))
row.names(summary.rho) <- c("rho.S", "rho1.T", "rho2.T")
if(!fix.rho.S)
{
temp <- mcmc.list(lapply(samples.rho.list, mcmc))
summary.rho[1, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
temp <- mcmc.list(lapply(samples.alpha.list, mcmc))
summary.rho[2, 1:3] <- c(mean(samples.alpha.matrix[ ,1]), quantile(samples.alpha.matrix[ ,1], c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(temp[ ,1]), gelman.diag(temp[ ,1])$psrf[ ,2])
summary.rho[3, 1:3] <- c(mean(samples.alpha.matrix[ ,2]), quantile(samples.alpha.matrix[ ,2], c(0.025, 0.975)))
summary.rho[3, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(temp[ ,2]), gelman.diag(temp[ ,2])$psrf[ ,2])
}else
{
summary.rho[2, 1:3] <- c(alpha[1], alpha[1], alpha[1])
summary.rho[2, 4:7] <- rep(NA, 4)
summary.rho[3, 1:3] <- c(alpha[2], alpha[2], alpha[2])
summary.rho[3, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.tau2, summary.rho)
rownames(summary.results)[(p+1)] <- "tau2"
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - binomial (logit link function)", "\nLatent structure model - Autoregressive order 2 CAR model\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.CARar2.R
|
binomial.CARar2MCMC <- function(Y, failures, trials, offset, X.standardised, W, rho, alpha, fix.rho.S, fix.rho.T, K, N, N.all, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#library(MASS)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Generate the initial parameter values
dat <- cbind(Y, failures)
mod.glm <- glm(dat~X.standardised-1, offset=offset, family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
theta.hat <- Y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat / (1 - theta.hat)) - X.standardised %*% beta - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=N.all, mean=0, sd = res.sd)
tau2 <- var(phi)/10
#### Specify matrix quantities
Y.DA <- Y
failures.DA <- failures
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
trials.mat <- matrix(trials, nrow=K, ncol=N, byrow=FALSE)
phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE)
lp <- as.numeric(offset.mat + regression.mat + phi.mat)
prob <- exp(lp) / (1+exp(lp))
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, N.all))
samples.tau2 <- array(NA, c(n.keep, 1))
if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.T) samples.alpha <- array(NA, c(n.keep, 2))
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Specify the Metropolis quantities
accept <- rep(0,6)
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.05
proposal.sd.beta <- 0.01
tau2.shape <- prior.tau2[1] + N.all/2
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Create the determinant
if(!fix.rho.S)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q.W <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1 & alpha[1]==2 & alpha[2]==-1)
{
tau2.shape <- prior.tau2[1] + prior.tau2[1] + ((N-2) * (K-n.islands))/2
}else if(rho==1)
{
tau2.shape <- prior.tau2[1] + prior.tau2[1] + (N * (K-n.islands))/2
}else if(alpha[1]==2 & alpha[2]==-1)
{
tau2.shape <- prior.tau2[1] + prior.tau2[1] + ((N-2) * K)/2
}else
{}
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rbinom(n=n.miss, size=trials[which.miss==0], prob=prob[which.miss==0])
failures.DA <- trials - Y.DA
}else
{}
Y.DA.mat <- matrix(Y.DA, nrow=K, ncol=N, byrow=FALSE)
failures.DA.mat <- matrix(failures.DA, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from beta
####################
offset.temp <- as.numeric(offset.mat + phi.mat)
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, N.all, p, beta, offset.temp, Y.DA, failures.DA, trials, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, N.all, p, beta, offset.temp, Y.DA, failures.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from phi
####################
phi.offset <- offset.mat + regression.mat
den.offset <- rho * W.triplet.sum + 1 - rho
temp1 <- binomialar2carupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, phi.mat, tau2, alpha[1], alpha[2], rho, Y.DA.mat, failures.DA.mat, proposal.sd.phi, phi.offset, den.offset)
phi.temp <- temp1[[1]]
phi <- as.numeric(phi.temp) - mean(as.numeric(phi.temp))
phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE)
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + K*N
####################
## Sample from alpha
####################
if(!fix.rho.T)
{
#### Construct the quadratic forms
temp2 <- alphaquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho, tau2)
#### Construct the precision matrix
alpha.prec <- array(c(temp2[[1]], temp2[[3]], temp2[[3]], temp2[[2]]), c(2,2))
alpha.var <- solve(alpha.prec)
#### Construct the mean vector
U2 <- (temp2[[1]] * temp2[[5]] - temp2[[3]] * temp2[[4]]) / (temp2[[2]] * temp2[[1]] - temp2[[3]]^2)
U1 <- (1 / temp2[[3]]) * (temp2[[5]] - temp2[[2]] * U2)
alpha.mean <- c(U1, U2)
alpha <- mvrnorm(n=1, mu=alpha.mean, Sigma=alpha.var)
}else
{}
####################
## Samples from tau2
####################
temp3 <- tauquadformcomputear2(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho, alpha[1], alpha[2])
tau2.scale <- temp3 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.shape, scale=(1/tau2.scale))
##################
## Sample from rho
##################
if(!fix.rho.S)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp4 <- tauquadformcomputear2(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, proposal.rho, alpha[1], alpha[2])
det.Q.W.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- N * det.Q.W - temp3 / tau2
logprob.proposal <- N * det.Q.W.proposal - temp4 / tau2
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
det.Q.W <- det.Q.W.proposal
accept[5] <- accept[5] + 1
}else
{
}
accept[6] <- accept[6] + 1
}else
{}
#########################
## Calculate the deviance
#########################
lp <- as.numeric(offset.mat + regression.mat + phi.mat)
prob <- exp(lp) / (1+exp(lp))
fitted <- trials * prob
loglike <- dbinom(x=Y, size=trials, prob=prob, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- as.numeric(phi)
samples.tau2[ele, ] <- tau2
if(!fix.rho.S) samples.rho[ele, ] <- rho
if(!fix.rho.T) samples.alpha[ele, ] <- alpha
samples.fitted[ele, ] <- fitted
samples.loglike[ele, ] <- loglike
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
if(!fix.rho.S) proposal.sd.rho <- common.accceptrates2(accept[5:6], proposal.sd.rho, 40, 50, 0.5)
accept <- rep(0,6)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y <- NA
if(fix.rho.S) samples.rho <- NA
if(fix.rho.T) samples.alpha <- NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.alpha=samples.alpha, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.CARar2MCMC.R
|
#--------------------------------------------------------------------------------------------------------------------------------------------------------
# Bayesian hierarchical mixed-effects model for clustering areas based on disease risk trends (Binomial)
#--------------------------------------------------------------------------------------------------------------------------------------------------------
binomial.CARclustrends <- function(formula, data=NULL, trials, W, burnin, n.sample, thin=1, trends=NULL, changepoint=NULL, knots=NULL,
prior.mean.beta=NULL, prior.var.beta=NULL, prior.mean.gamma=NULL, prior.var.gamma=NULL,
prior.lambda=NULL, prior.tau2=NULL, Nchains=4, verbose=TRUE)
{
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Check on the verbose option
#------------------------------------------------------------------------------------------------------------------------------------------------------
a <- common.verbose(verbose)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Check trends vector
#------------------------------------------------------------------------------------------------------------------------------------------------------
All.Trends <- c("Constant", "LD", "LI", "CP", "CT", "MD", "MI")
Total.trends <- length(All.Trends) - 2 # minus 2 as can't include both LD/MD or LI/MI
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Check that a trend vector has been given
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(is.null(trends)) stop("At least two trends, with one being the constant trend, have to be given.", call.=FALSE)
trends <- unique(trends)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Check that the constant trend is selected
#------------------------------------------------------------------------------------------------------------------------------------------------------
if((All.Trends[1] %in% trends) & length(trends) == 1 | !(All.Trends[1] %in% trends))
{
stop("The constant trend has to be selected alongside at least one other trend.", call.=FALSE)
}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Check to see if correct trends inputted
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(!all(trends %in% All.Trends)) stop("Incorrect trend selected.", call.=FALSE)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Check that both LI and MI are both not included
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(all(c("LI", "MI") %in% trends)) stop("Select only one of LI or MI as the increasing trend.", call.=FALSE)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Check that both LD and MD are both not included
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(all(c("LD", "MD") %in% trends)) stop("Select only one of LD or MD as the decreasing trend.", call.=FALSE)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Check that the changepoint is included and within the given time period
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(any(c("CP", "CT") %in% trends) & is.null(changepoint)) stop("A changepoint needs to be included for the changepoint trends (CP, CT).", call.=FALSE)
if(any(c("CP", "CT") %in% trends) & length(changepoint) != 1) stop("The changepoint should be a scalar.", call.=FALSE)
if(any(c("CP", "CT") %in% trends) & !is.null(changepoint))
{
if(changepoint < 1) stop("The changepoint should be positive.", call.=FALSE)
}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Check the number of knots for the monotonic trends
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(any(c("MD", "MI") %in% trends) & is.null(knots)) stop("The number of knots has to be chosen for the monotonic trends (MD, MI).", call.=FALSE)
if(any(c("MD", "MI") %in% trends) & length(knots) != 1) stop("The number of knots should be a scalar.", call.=FALSE)
if(any(c("MD", "MI") %in% trends) & !is.null(knots))
{
if(knots < 1) stop("The number of knots should be positive.", call.=FALSE)
}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# The constant trend does not need to be included within the trends vector
#------------------------------------------------------------------------------------------------------------------------------------------------------
N.trends <- length(trends)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Set number of knots to 0 if monontonic trends not included
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(!any(c("MD", "MI") %in% trends)) knots <- 0
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Track positions of each of the possible trends
#------------------------------------------------------------------------------------------------------------------------------------------------------
Trends.pos <- c("Constant", "LD", "LI", rep("CP", 2), rep("CT", 2), rep("MD", knots + 1), rep("MI", knots + 1))
Trends.pos.numeric <- c(1, 2, 3, rep(4, 2), rep(5, 2), rep(6, knots + 1), rep(7, knots + 1))
Trends.pos.num <- length(Trends.pos)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Track positions of the chosen trends
#------------------------------------------------------------------------------------------------------------------------------------------------------
Trends.id <- which(Trends.pos %in% trends)
Trends.sel <- length(Trends.pos[Trends.id])
Trends.id <- Trends.id[Trends.id != 1]
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Vector for the number of gamma parameters associated with each of the trends
#------------------------------------------------------------------------------------------------------------------------------------------------------
params.trends <- c(0, 1, 1, rep(1, 2), rep(1, 2), rep(1, knots + 1), rep(1, knots + 1))
Total.params.trends <- sum(params.trends)
params.selected <- sum(params.trends[Trends.id])
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Matrix containing tracking positions of associated gamma parameters
#------------------------------------------------------------------------------------------------------------------------------------------------------
Trend.pairs <- matrix(c(1, 0,
2, 0,
3, 0,
4, 5,
6, 7), ncol = 2, byrow = TRUE)
rownames(Trend.pairs) <- c("Constant", "LD", "LI", "CP", "CT")
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Include corresponding information for the monotonic trends
#------------------------------------------------------------------------------------------------------------------------------------------------------
col.knots1 <- seq(from = max(Trend.pairs) + 1, to = max(Trend.pairs) + (2 * (knots + 1)), by = 1)
col.knots2 <- c(0, rep(-1, knots), 0, rep(-1, knots))
col.knots2[which(col.knots2 == 0)] <- col.knots1[which(col.knots2 == 0)]
row.knots <- matrix(c(col.knots1, col.knots2), ncol = 2)
rownames(row.knots) <- c(rep("MD", knots + 1), rep("MI", knots + 1))
row.knots <- row.knots[which(rownames(row.knots) %in% trends), ]
Trend.pairs <- rbind(Trend.pairs, row.knots)
Trend.pairs <- Trend.pairs[which(rownames(Trend.pairs) %in% trends), ]
n.sel <- nrow(Trend.pairs)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Update tracking positions for the selected gamma parameters
#------------------------------------------------------------------------------------------------------------------------------------------------------
Trend.pairs.update <- Trend.pairs
CP.check <- 1
for(i in 1:n.sel)
{
if(Trend.pairs[i, 2] == 0)
{
Trend.pairs.update[i, 1] <- i
}else if(Trend.pairs[i, 2] > 0)
{
if(rownames(Trend.pairs)[i] %in% c("CP", "CT"))
{
Trend.pairs.update[i, 1] <- Trend.pairs.update[(i-1), 1] + CP.check
Trend.pairs.update[i, 2] <- Trend.pairs.update[i, 1] + 1
CP.check <- CP.check + 1
}else if(rownames(Trend.pairs)[i] %in% c("MD", "MI"))
{
if(Trend.pairs.update[(i-1), 2] > 0)
{
Mono.check <- 2
}else
{
Mono.check <- 1
}
Trend.pairs.update[i, ] <- Trend.pairs.update[(i-1), 1] + Mono.check
}
}else if(Trend.pairs[i, 2] < 0)
{
Trend.pairs.update[i, 1] <- Trend.pairs.update[(i-1), 1] + 1
}
}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Track positions of the gamma parameters selected by the given trends
#------------------------------------------------------------------------------------------------------------------------------------------------------
Trend.names <- rownames(Trend.pairs.update)
gamma.pos <- rep(0, Trends.pos.num)
pos.gamma <- unique(Trend.pairs.update[which(Trend.pairs %in% Trends.id)])
gamma.pos[Trends.id] <- pos.gamma[order(pos.gamma)]
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Check the number of MCMC chains is >= 2
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(Nchains <= 1) stop("the number of chains has to be greater than or equal 2.", call.=FALSE)
if(Nchains %% 1 != 0) stop("the number of chains needs to be an integer.", call.=FALSE)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Format the arguments and check for errors
#------------------------------------------------------------------------------------------------------------------------------------------------------
frame.results <- common.frame(formula, data, "binomial")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
failures <- trials - Y
if(p>1) stop("No covariates are allowed in this model due to identifiability issues.", call.=FALSE)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Check that the changepoint is included and within time period
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(any(c("CP", "CT") %in% trends) & !is.null(changepoint))
{
if(!(changepoint >= 1 & changepoint <=N)) stop("The changepoint needs to be within the time period.", call.=FALSE)
}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Check that the number of knots is not greater than the number of time points
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(any(c("MD", "MI") %in% trends) & !is.null(knots))
{
if(knots > N) stop("The number of knots cannot be greater than the number of time points.", call.=FALSE)
}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Spatial quantities
#------------------------------------------------------------------------------------------------------------------------------------------------------
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
W.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Check and specify the priors
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.mean.gamma)) prior.mean.gamma <- rep(0, params.selected)
if(is.null(prior.var.gamma)) prior.var.gamma <- rep(100000, params.selected)
prior.mean.trends <- rep(0, Trends.pos.num)
prior.mean.trends[Trends.id] <- prior.mean.gamma
prior.var.trends <- rep(1000, Trends.pos.num)
prior.var.trends[Trends.id] <- prior.var.gamma
if(is.null(prior.lambda)) prior.lambda <- rep(1, N.trends)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
if(length(prior.mean.gamma) != params.selected) stop("the prior mean for gamma is the wrong length.", call.=FALSE)
if(!is.numeric(prior.mean.gamma)) stop("the prior mean for gamma is not numeric.", call.=FALSE)
if(sum(is.na(prior.mean.gamma)) != 0) stop("the prior mean for gamma is missing.", call.=FALSE)
if(prior.mean.trends[2] > 0) stop("the prior mean for the LD trend should be non-positive.", call.=FALSE)
if(prior.mean.trends[3] < 0) stop("the prior mean for the LI trend should be non-negative.", call.=FALSE)
if(prior.mean.trends[4] < 0) stop("the prior mean for the increase in CP trend should be non-negative.", call.=FALSE)
if(prior.mean.trends[5] > 0) stop("the prior mean for the decrease in CP trend should be non-positive.", call.=FALSE)
if(prior.mean.trends[6] > 0) stop("the prior mean for the decrease in CT trend should be non-positive.", call.=FALSE)
if(prior.mean.trends[7] < 0) stop("the prior mean for the increase in CT trend should be non-negative.", call.=FALSE)
if(any(prior.mean.trends[8:(8 + knots)] > 0)) stop("the prior mean for the MD trend should be non-positive.", call.=FALSE)
if(any(prior.mean.trends[(8 + knots + 1):(8 + knots + 1) + knots] < 0)) stop("the prior mean for the MI trend should be non-negative.", call.=FALSE)
if(length(prior.var.gamma)!= params.selected) stop("the prior variance for gamma is the wrong length.", call.=FALSE)
if(!is.numeric(prior.var.gamma)) stop("the prior variance for gamma is not numeric.", call.=FALSE)
if(sum(is.na(prior.var.gamma))!= 0) stop("the prior variance for gamma is missing.", call.=FALSE)
if(min(prior.var.gamma) <= 0) stop("the prior variance for gamma is less than zero", call.=FALSE)
if(length(prior.lambda) != N.trends) stop("the prior value for lambda is the wrong length.", call.=FALSE)
if(!is.numeric(prior.lambda)) stop("the prior value for lambda is not numeric.", call.=FALSE)
if(sum(is.na(prior.lambda)) != 0) stop("the prior value for lambda has missing values.", call.=FALSE)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Specify the initial parameter values
#------------------------------------------------------------------------------------------------------------------------------------------------------
dat <- cbind(Y, failures)
beta <- glm(dat~X.standardised-1, offset=offset, family=binomial(link="logit"))$coefficients
beta <- matrix(beta, nrow = p, ncol = Nchains)
proposal.corr.beta <- solve(t(X.standardised) %*% X.standardised)
chol.proposal.corr.beta <- chol(proposal.corr.beta)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Different initial beta values for each chain
#------------------------------------------------------------------------------------------------------------------------------------------------------
proposal.sd.beta <- rep(0.1, Nchains)
beta <- matcomp(chol.proposal.corr.beta, beta, proposal.sd.beta, p, Nchains)
gamma <- array(0, c(Trends.sel, Nchains))
for (i in Trends.id)
{
if(i == 2 | i == 5 | i == 6 | (i %in% 8:(8 + knots)))
{
gamma[gamma.pos[i], ] <- rtruncnorm(n=Nchains, b = 0, mean = 0, sd = 0.1)
}else if (i == 3 | i == 4 | i == 7 | (i %in% (8 + knots + 1):(8 + knots + 1 + knots)))
{
gamma[gamma.pos[i], ] <- rtruncnorm(n=Nchains, a = 0, mean = 0, sd = 0.1)
}
}
gamma.mat <- array(0, c(N.all, Trends.sel, Nchains))
for (i in Trends.id)
{
gamma.mat[,gamma.pos[i],] <- matN(gamma[gamma.pos[i], ], N.all, Nchains)
}
tau2 <- runif(Nchains, 0, 1)
rho <- runif(Nchains, 0, 1)
lambda <- t(rdirichlet(Nchains, prior.lambda))
w <- array(NA, c(K, N.trends, Nchains))
phi <- array(NA, c(K, Nchains))
for (i in 1:Nchains)
{
w[, , i] <- t(rmultinom(K, 1, lambda[, i]))
phi[, i] <- rnorm(K, mean = 0, sd = 0.01)
}
phi <- array(NA, c(K, Nchains))
for (i in 1:Nchains)
{
phi[, i] <- rnorm(K, mean = 0, sd = 0.01)
}
kronN <- rep(1, N)
phimat <- kronecker(kronN, phi)
wmat <- kronecker(kronN, w)
w.chain.mat <- matrix(aperm(w, c(1, 3, 2)), nrow = K * Nchains, ncol = N.trends)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Compute the blocking structure for covariate beta's
#------------------------------------------------------------------------------------------------------------------------------------------------------
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
#------------------------------------------------------------------------------------------------------------------------------------------------------
# MCMC quantities - burnin, n.sample, thin
#------------------------------------------------------------------------------------------------------------------------------------------------------
common.burnin.nsample.thin.check(burnin, n.sample, thin)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Set up matrices to store samples
#------------------------------------------------------------------------------------------------------------------------------------------------------
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p, Nchains))
samples.gamma <- array(NA, c(Trends.sel, n.keep, 1, Nchains))
samples.w <- array(NA, c(n.keep, K, N.trends, Nchains))
samples.lambda <- array(NA, c(n.keep, N.trends, Nchains))
samples.tau2 <- array(NA, c(n.keep, 1, Nchains))
samples.rho <- array(NA, c(n.keep, 1, Nchains))
samples.phi <- array(NA, c(n.keep, K, Nchains))
samples.fitted <- array(NA, c(n.keep, N.all, Nchains))
samples.like <- array(NA, c(n.keep, N.all, Nchains))
samples.deviance <- array(NA, c(n.keep, 1, Nchains))
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Specify the Metropolis quantities
#------------------------------------------------------------------------------------------------------------------------------------------------------
accept.all <- rep(0, 2 * (Trends.sel + 1) * Nchains)
accept <- accept.all
begin.accept <- seq(from = 1, to = length(accept), by = 2)
end.accept <- begin.accept + 1
accept.blocks.num <- array(begin.accept, c(Nchains, 2))
accept.blocks.den <- array(end.accept, c(Nchains, 2))
accept.weight <- matrix(0, nrow = K, ncol = 2 * Nchains)
accept.w.all <- matrix(0, nrow = K, ncol = 2 * Nchains)
accept.phis <- matrix(0, nrow = K, ncol = 2 * Nchains)
accept.phis.all <- matrix(0, nrow = K, ncol = 2 * Nchains)
accept.gammas <- matrix(0, nrow = Trends.sel, ncol = 2 * Nchains)
accept.gammas.all <- matrix(0, nrow = Trends.sel, ncol = 2 * Nchains)
accept.couple <- rep(0, 2)
couples <- accept.couple
tau2.shape <- prior.tau2[1] + K/2
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Create the determinant
#------------------------------------------------------------------------------------------------------------------------------------------------------
Wstar <- diag(apply(W, 1, sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q.W <- Qdet(Nchains, rho, Wstar.val)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Specify quantities that do not change
#------------------------------------------------------------------------------------------------------------------------------------------------------
Y.mat <- matrix(Y, nrow=K, ncol=N, byrow=FALSE)
failures.mat <- matrix(failures, nrow=K, ncol=N, byrow=FALSE)
offset.mat <- matrix(offset, nrow=N.all, ncol=Nchains)
tp <- rep(1:N, each=K)
tp.mat <- matrix(tp, nrow=K, ncol=N)
tp.mat.trends <- array(tp.mat, c(K, N, Trends.sel))
tp.mat.trends <- aperm(tp.mat.trends, c(1, 3, 2))
tpmat <- array(tp, c(N.all, Trends.sel, Nchains))
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Update the matrix corresponding to time given trends CP/CT or MD/MI
#------------------------------------------------------------------------------------------------------------------------------------------------------
any.CP <- any(Trend.pairs[, 2] != 0)
track.pos <- which(Trend.pairs[, 2] != 0)
neg.pos <- which(Trend.pairs.update < 0, arr.ind = TRUE)
Trend.pairs.update[neg.pos[, 1], 2] <- Trend.pairs.update[neg.pos[, 1], 1]
track.add <- which(Trend.pairs[, 2] > 0)
if(any(names(track.pos) %in% c("MD", "MI")))
{
track.0 <- track.pos[which(names(track.pos) %in% c("MD", "MI"))]
track.0 <- track.0[which(track.0 %in% track.add)]
track.add <- track.pos[-which(track.pos %in% track.0)]
track.add <- Trend.pairs.update[track.add, 2]
}else
{
track.add <- Trend.pairs.update[track.add, 2]
}
if(any.CP)
{
tp.pos <- Trend.pairs.update[track.pos, 2]
if(any(names(tp.pos) %in% c("CP", "CT")))
{
tp.CP <- tp.pos[names(tp.pos) %in% c("CP", "CT")]
tpmat[, tp.CP, ] <- tpmat[, tp.CP, ] - changepoint
tpmat[tpmat < 0] <- 0
tp.mat.trends[, tp.CP, ] <- tp.mat.trends[, tp.CP, ] - changepoint
tp.mat.trends[tp.mat.trends < 0] <- 0
}
if(any(names(tp.pos) %in% c("MD", "MI")))
{
tp.CP <- tp.pos[names(tp.pos) %in% c("MD", "MI")]
k.space <- seq(from = 1, to = N, length = knots + 2)
k.space <- round(k.space[-c(1, (knots+2))], digits = 1)
if(all(c("MD", "MI") %in% names(tp.pos)))
{
kmat.col <- 2 * knots
}else
{
kmat.col <- knots
}
kmat <- matrix(k.space, nrow = N.all, ncol = kmat.col, byrow = TRUE)
kmat.Nchains <- array(kmat, dim = c(N.all, kmat.col, Nchains))
kmat.N <- matrix(k.space, nrow = K, ncol = kmat.col, byrow = TRUE)
kmat.N <- array(kmat.N, dim = c(K, kmat.col, N))
if(all(c("MD", "MI") %in% trends))
{
tp.pos.0 <- rep(NA, 2)
tp.pos.0[1] <- which(names(tp.CP) == "MD")[1]
tp.pos.0[2] <- which(names(tp.CP) == "MI")[1]
} else if("MD" %in% trends & !("MI" %in% trends))
{
tp.pos.0 <- which(names(tp.CP) == "MD")[1]
} else if("MI" %in% trends & !("MD" %in% trends))
{
tp.pos.0 <- which(names(tp.CP) == "MI")[1]
}
tp.pos.row <- tp.CP[tp.pos.0]
tpmat[, tp.pos.row, ] <- tpmat[, tp.pos.row, ] / N
tpmat[, tp.CP[-tp.pos.0], ] <- ((tpmat[, tp.CP[-tp.pos.0], ] - kmat.Nchains)^3) / N^3
tpmat[tpmat < 0] <- 0
kmax <- apply(tpmat[, tp.CP[-tp.pos.0], ], 2, max)
kmax.mat <- matrix(kmax, nrow = N.all, ncol = kmat.col, byrow = TRUE)
kmax.Nchains <- array(kmax.mat, dim = c(N.all, kmat.col, Nchains))
kmax.N <- matrix(kmax, nrow = K, ncol = kmat.col, byrow = TRUE)
kmax.N <- array(kmax.N, dim = c(K, kmat.col, N))
tpmat[, tp.CP[-tp.pos.0], ] <- tpmat[, tp.CP[-tp.pos.0], ] / kmax.Nchains
tp.mat.trends[, tp.pos.row, ] <- tp.mat.trends[, tp.pos.row, ] / N
tp.mat.trends[, tp.CP[-tp.pos.0], ] <- ((tp.mat.trends[, tp.CP[-tp.pos.0], ] - kmat.N)^3) / N^3
tp.mat.trends[tp.mat.trends < 0] <- 0
tp.mat.trends[, tp.CP[-tp.pos.0], ] <- tp.mat.trends[, tp.CP[-tp.pos.0], ] / kmax.N
}
}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Keep track of the additional positions of the selected gamma parameters of the CP/CT and MD/MI trends
#------------------------------------------------------------------------------------------------------------------------------------------------------
Trends.chosen.names <- c("Constant", unique(Trends.pos[Trends.id]))
New.trend.pos <- rep(NA, length(track.add))
if(length(track.add) != 0)
{
for(i in 1:length(track.add))
{
New.trend.pos[i] <- which(Trends.chosen.names %in% names(track.add)[i])
}
}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# tempering temperatures
#------------------------------------------------------------------------------------------------------------------------------------------------------
d.t <- Nchains / (Nchains + 4)
temps <- tempupdate(Nchains, d.t)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# proposal standard deviations for M-H moves
#------------------------------------------------------------------------------------------------------------------------------------------------------
proposal.sd.gamma <- matrix(0.1, nrow = Trends.sel, ncol = Nchains)
proposal.sd.phi <- matrix(0.1, nrow = K, ncol = Nchains)
proposal.sd.rho <- rep(0.01, Nchains)
max.proposal.sd.rho <- 0.1
min.proposal.sd.rho <- 0.001
#------------------------------------------------------------------------------------------------------------------------------------------------------
# begin/end of chains for use in c++ functions due to using arrays
#------------------------------------------------------------------------------------------------------------------------------------------------------
begin.chain <- seq(from = 1, to = K * Nchains, by = K)
begin.chainN <- seq(from = 1, to = N.all * Nchains, by = N.all)
beg.reg.chain <- seq(from = 1, to = N.all, by = K)
log1 <- log(1)
N.all.trends <- N.all * Trends.sel
if(any.CP)
{
wmat.extend <- array(0, c(N.all, Trends.sel, Nchains))
wmat.extend[, -track.add, ] <- wmat
wmat.extend[, track.add, ] <- wmat[, New.trend.pos, ]
}else
{
wmat.extend <- wmat
}
beg.trend <- seq(from = 1, to = N.all.trends, by = N.all)
wmat.ar <- matrix(wmat.extend, nrow = N.all.trends, ncol = Nchains)
gamma.mat.ar <- matrix(gamma.mat, nrow = N.all.trends, ncol = Nchains)
tpmat.ar <- matrix(tpmat, nrow = N.all.trends, ncol = Nchains)
trends.part <- offsetcompute(wmat.ar, gamma.mat.ar, tpmat.ar, Nchains, N.all, Trends.sel, beg.trend)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Run the Bayesian model
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Start timer
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(verbose)
{
cat("Generating", n.keep, "post burnin and thinned (if requested) samples\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
for(j in 1:n.sample)
{
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Sample from beta
#------------------------------------------------------------------------------------------------------------------------------------------------------
proposal <- matcomp(chol.proposal.corr.beta, beta, proposal.sd.beta, p, Nchains)
proposal.beta <- beta
offset.temp <- offset.mat + trends.part + phimat
for(r in 1:n.beta.block)
{
proposal.beta[beta.beg[r]:beta.fin[r], ] <- proposal[beta.beg[r]:beta.fin[r], ]
beta.linpred <- linpredcomputeNchains(X.standardised, N.all, p, beta, Nchains)
proposal.linpred <- linpredcomputeNchains(X.standardised, N.all, p, proposal.beta, Nchains)
prob <- binomialbetablockupdate(N.all, beta, proposal.beta, beta.linpred, proposal.linpred, offset.temp, Y, failures, prior.mean.beta,
prior.var.beta, Nchains, temps, p)
accept.beta.chain <- prob > runif(Nchains)
beta[beta.beg[r]:beta.fin[r], accept.beta.chain] <- proposal.beta[beta.beg[r]:beta.fin[r], accept.beta.chain]
accept[accept.blocks.num[, 1]] <- accept[accept.blocks.num[, 1]] + as.numeric(accept.beta.chain)
proposal.beta[beta.beg[r]:beta.fin[r], !accept.beta.chain] <- beta[beta.beg[r]:beta.fin[r], !accept.beta.chain]
}
accept[accept.blocks.den[, 1]] <- accept[accept.blocks.den[, 1]] + n.beta.block
regression.mat <- linpredcomputeNchains(X.standardised, N.all, p, beta, Nchains)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Sample trend gamma's
#------------------------------------------------------------------------------------------------------------------------------------------------------
W.areas <- apply(w, c(2, 3), sum)
offset.temp <- offset.mat + regression.mat + trends.part + phimat
for (i in Trends.id)
{
gamma.proposal <- gammaproposal(Nchains, gamma[gamma.pos[i], ], proposal.sd.gamma[gamma.pos[i], ], prior.var.trends[i],
W.areas[which(Trends.chosen.names %in% Trends.pos[i])[1], ], i, knots)
gamma.mat.proposal <- gamma.mat
gamma.mat.proposal[, gamma.pos[i], ] <- matN(gamma.proposal, N.all, Nchains)
gamma.mat.proposal.ar <- matrix(gamma.mat.proposal, nrow = N.all.trends, ncol = Nchains)
trends.proposal <- offsetcompute(wmat.ar, gamma.mat.proposal.ar, tpmat.ar, Nchains, N.all, Trends.sel, beg.trend)
offset.proposal <- offset.mat + regression.mat + trends.proposal + phimat
gamma.list <- binomialgammaupdate(N.all, gamma[gamma.pos[i], ], gamma.proposal, offset.temp, offset.proposal, Y, failures,
prior.mean.trends[i], prior.var.trends[i], Nchains, temps)
if(!all(gamma.list[[2]] == 0))
{
gamma[gamma.pos[i], ] <- gamma.list[[1]]
gamma.mat[, gamma.pos[i], ] <- matN(gamma[gamma.pos[i], ], N.all, Nchains)
gamma.mat.ar <- matrix(gamma.mat, nrow = N.all.trends, ncol = Nchains)
trends.part <- offsetcompute(wmat.ar, gamma.mat.ar, tpmat.ar, Nchains, N.all, Trends.sel, beg.trend)
offset.temp <- offset.mat + regression.mat + trends.part + phimat
accept.gammas[gamma.pos[i], accept.blocks.num[, 1]] <- accept.gammas[gamma.pos[i], accept.blocks.num[, 1]] + gamma.list[[2]]
}
accept.gammas[gamma.pos[i], accept.blocks.den[, 1]] <- accept.gammas[gamma.pos[i], accept.blocks.den[, 1]] + 1
}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Sample from w
#------------------------------------------------------------------------------------------------------------------------------------------------------
w.perm <- matrix(aperm(w, c(1, 3, 2)), nrow = K*Nchains, ncol = N.trends)
w.props <- sample(N.trends)
while (all(w.props == 1:N.trends))
{
w.props <- sample(N.trends)
}
w.proposal <- w.perm[, w.props]
w.proposal.array <- array(w.proposal, c(K, Nchains, N.trends))
w.proposal.array <- aperm(w.proposal.array, c(1, 3, 2))
w.proposal.array <- kronecker(kronN, w.proposal.array)
if(any.CP)
{
wmat.extend.proposal <- array(0, c(N.all, Trends.sel, Nchains))
wmat.extend.proposal[, -track.add, ] <- w.proposal.array
wmat.extend.proposal[, track.add, ] <- w.proposal.array[, New.trend.pos, ]
}else
{
wmat.extend.proposal <- w.proposal.array
}
w.proposal.ar <- matrix(wmat.extend.proposal, nrow = N.all.trends, ncol = Nchains)
trends.proposal <- offsetcompute(w.proposal.ar, gamma.mat.ar, tpmat.ar, Nchains, N.all, Trends.sel, beg.trend)
offset.proposal <- offset.mat + regression.mat + trends.proposal + phimat
w.list <- binomialwupdate(K, N, w.perm, offset.temp, offset.proposal, w.proposal, Y.mat, failures.mat, lambda, Nchains, temps, begin.chain,
beg.reg.chain, N.trends)
if(!all(w.list[[2]] == 0))
{
w <- w.list[[1]]
w.array <- array(w, c(K, Nchains, N.trends))
w <- aperm(w.array, c(1, 3, 2))
wmat <- kronecker(kronN, w)
if(any.CP)
{
wmat.extend <- array(0, c(N.all, Trends.sel, Nchains))
wmat.extend[, -track.add, ] <- wmat
wmat.extend[, track.add, ] <- wmat[, New.trend.pos, ]
}else
{
wmat.extend <- wmat
}
wmat.ar <- matrix(wmat.extend, nrow = N.all.trends, ncol = Nchains)
trends.part <- offsetcompute(wmat.ar, gamma.mat.ar, tpmat.ar, Nchains, N.all, Trends.sel, beg.trend)
w.chain.mat <- matrix(aperm(w, c(1, 3, 2)), nrow = K * Nchains, ncol = N.trends)
accept.weight[, accept.blocks.num[, 1]] <- accept.weight[, accept.blocks.num[, 1]] + w.list[[2]]
}
accept.weight[, accept.blocks.den[, 1]] <- accept.weight[, accept.blocks.den[, 1]] + 1
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Sample from lambda
#------------------------------------------------------------------------------------------------------------------------------------------------------
lambda.temp <- prior.lambda + apply(w, c(2, 3), sum)
lambda <- lambdaupdate(Nchains, lambda.temp)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Sample from phi
#------------------------------------------------------------------------------------------------------------------------------------------------------
offset.temp <- offset.mat + regression.mat + trends.part
phi.list <- binomialphiupdate(W.triplet, W.begfin, W.triplet.sum, K, N, phi, offset.temp, Y.mat, failures.mat, tau2, rho, Nchains,
temps, proposal.sd.phi, beg.reg.chain)
if(!all(phi.list[[2]] == 0))
{
phi.means <- apply(phi.list[[1]], 2, mean)
phi <- phi.list[[1]] - matrix(phi.means, nrow = K, ncol = Nchains, byrow = TRUE)
phimat <- kronecker(kronN, phi)
accept.phis[, accept.blocks.num[, 1]] <- accept.phis[, accept.blocks.num[, 1]] + phi.list[[2]]
}
accept.phis[, accept.blocks.den[, 1]] <- accept.phis[, accept.blocks.den[, 1]] + 1
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Samples from tau2
#------------------------------------------------------------------------------------------------------------------------------------------------------
tau2.temp <- tau2quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, rho, Nchains)
tau2 <- tau2computeNchains(tau2.temp, tau2.shape, prior.tau2[2], Nchains)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Samples from rho
#------------------------------------------------------------------------------------------------------------------------------------------------------
rho.temp1 <- rhoquadformcomputeNchains(W.triplet, W.triplet.sum, W.n.triplet, K, Nchains, phi, rho, tau2)
proposal.rho <- suppressWarnings(rtruncnorm(n = Nchains, a = 0, b = 0.99, mean = rho, sd = proposal.sd.rho))
rho.temp2 <- rhoquadformcomputeNchains(W.triplet, W.triplet.sum, W.n.triplet, K, Nchains, phi, proposal.rho, tau2)
det.Q.W.proposal <- Qdet(Nchains, proposal.rho, Wstar.val)
logprob.current <- det.Q.W - rho.temp1
logprob.proposal <- det.Q.W.proposal - rho.temp2
prob <- exp((logprob.proposal - logprob.current) * temps) # raised to temperature levels of each chain
accept.rho.chain <- prob > runif(Nchains)
rho[accept.rho.chain] <- proposal.rho[accept.rho.chain]
det.Q.W[accept.rho.chain] <- det.Q.W.proposal[accept.rho.chain]
accept[accept.blocks.num[, 2]] <- accept[accept.blocks.num[, 2]] + as.numeric(accept.rho.chain)
accept[accept.blocks.den[, 2]] <- accept[accept.blocks.den[, 2]] + 1
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Metropolis coupling
#------------------------------------------------------------------------------------------------------------------------------------------------------
swap <- sample(1:Nchains, 2)
offset.temp <- offset.mat + regression.mat + trends.part + phimat
accept.swap <- binomialcouplingAllupdate(N.all, K, p, w.chain.mat, offset.temp, beta, gamma, lambda, phi, rho, tau2, W.triplet.sum, W.triplet,
W.begfin, Y, failures, prior.mean.beta, prior.var.beta, prior.mean.trends, prior.var.trends,
prior.lambda, prior.tau2, swap, temps, begin.chain, N.trends, Trends.sel)
if(accept.swap == 1)
{
rev.swap <- rev(swap)
beta[rev.swap] <- beta[swap]
regression.mat[, rev.swap] <- regression.mat[, swap]
proposal.sd.beta[rev.swap] <- proposal.sd.beta[swap]
gamma[, rev.swap] <- gamma[, swap]
gamma.mat[, , rev.swap] <- gamma.mat[, , swap]
gamma.mat.ar <- matrix(gamma.mat, nrow = N.all.trends, ncol = Nchains)
proposal.sd.gamma[, rev.swap] <- proposal.sd.gamma[, swap]
lambda[, rev.swap] <- lambda[, swap]
w[, , rev.swap] <- w[, , swap]
wmat[, , rev.swap] <- wmat[, , swap]
if(any.CP)
{
wmat.extend <- array(0, c(N.all, Trends.sel, Nchains))
wmat.extend[, -track.add, ] <- wmat
wmat.extend[, track.add, ] <- wmat[, New.trend.pos, ]
}else
{
wmat.extend <- wmat
}
w.chain.mat <- matrix(aperm(w, c(1, 3, 2)), nrow = K * Nchains, ncol = N.trends)
wmat.ar <- matrix(wmat.extend, nrow = N.all.trends, ncol = Nchains)
phi[, rev.swap] <- phi[, swap]
proposal.sd.phi[, rev.swap] <- proposal.sd.phi[, swap]
phimat[, rev.swap] <- phimat[, swap]
tau2[rev.swap] <- tau2[swap]
rho[rev.swap] <- rho[swap]
proposal.sd.rho[rev.swap] <- proposal.sd.rho[swap]
det.Q.W[rev.swap] <- det.Q.W[swap]
trends.part <- offsetcompute(wmat.ar, gamma.mat.ar, tpmat.ar, Nchains, N.all, Trends.sel, beg.trend)
offset.temp <- offset.mat + regression.mat + trends.part + phimat
}else
{}
accept.couple[1] <- accept.couple[1] + accept.swap
accept.couple[2] <- accept.couple[2] + 1
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Update temperatures
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(j%%10==0)
{
MC3.accept <- 100 * accept.couple[1] / accept.couple[2]
if(MC3.accept > 30)
{
d.t <- max(runif(1, d.t * 0.8, d.t), 0.1)
temps <- tempupdate(Nchains, d.t)
}else if(MC3.accept < 20)
{
d.t <- min(runif(1, d.t, d.t * 1.2), 0.99)
temps <- tempupdate(Nchains, d.t)
}else
{}
}else
{}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Calculate the deviance
#------------------------------------------------------------------------------------------------------------------------------------------------------
bin.probs <- exp(offset.temp) / (1 + exp(offset.temp))
fitted <- trials * bin.probs
dev.like <- binomialdevfit(Y, trials, bin.probs, N.all, Nchains)
deviance <- dev.like[[1]]
like <- dev.like[[2]]
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Save the results
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele,,] <- beta
samples.gamma[,ele,,] <- gamma
samples.w[ele,,,] <- w
samples.lambda[ele,,] <- lambda
samples.tau2[ele,,] <- tau2
samples.rho[ele,,] <- rho
samples.phi[ele,,] <- phi
samples.deviance[ele,,] <- deviance
samples.fitted[ele,,] <- fitted
samples.like[ele,,] <- like
}else
{
}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Self tune the acceptance probabilties
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Determine the acceptance probabilities
#------------------------------------------------------------------------------------------------------------------------------------------------------
accept.beta <- 100 * accept[accept.blocks.num[,1]] / accept[accept.blocks.den[,1]]
accept.gamma <- 100 * accept.gammas[,accept.blocks.num[,1]] / accept.gammas[,accept.blocks.den[,1]]
accept.gamma[1,] <- 0
accept.gammas.all <- accept.gammas.all + accept.gammas
accept.rho <- 100 * accept[accept.blocks.num[,2]] / accept[accept.blocks.den[,2]]
accept.w <- 100 * accept.weight[,accept.blocks.num[,1]] / accept.weight[,accept.blocks.den[,1]]
accept.w.all <- accept.w.all + accept.weight
accept.phi <- 100 * accept.phis[,accept.blocks.num[,1]] / accept.phis[,accept.blocks.den[,1]]
accept.phis.all <- accept.phis.all + accept.phis
accept.all <- accept.all + accept
accept <- rep(0, 2 * (Trends.sel + 1) * Nchains)
accept.weight <- matrix(0, nrow=K, ncol=2*Nchains)
accept.phis <- matrix(0, nrow=K, ncol=2*Nchains)
accept.gammas <- matrix(0, nrow=Trends.sel, ncol=2*Nchains)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# beta tuning parameter
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(any(accept.beta > 50))
{
proposal.sd.beta[which(accept.beta > 50)] <- 2 * proposal.sd.beta[which(accept.beta > 50)]
}else if(any(accept.beta < 40))
{
proposal.sd.beta[which(accept.beta < 40)] <- 0.5 * proposal.sd.beta[which(accept.beta < 40)]
}else
{
}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# gamma tuning parameter
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(any(accept.gamma > 50))
{
proposal.sd.gamma[which(accept.gamma > 50)] <- 2 * proposal.sd.gamma[which(accept.gamma > 50)]
}else if(any(accept.gamma < 40))
{
proposal.sd.gamma[which(accept.gamma < 40)] <- 0.5 * proposal.sd.gamma[which(accept.gamma < 40)]
}else
{
}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# rho tuning parameter
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(any(accept.rho > 50))
{
proposal.sd.rho[which(accept.rho > 50)] <- 2 * proposal.sd.rho[which(accept.rho > 50)]
if(any(proposal.sd.rho > max.proposal.sd.rho))
{
proposal.sd.rho[which(proposal.sd.rho > max.proposal.sd.rho)] <- max.proposal.sd.rho
}else
{
}
}else if(any(accept.rho < 40))
{
proposal.sd.rho[which(accept.rho < 40)] <- 0.5 * proposal.sd.rho[which(accept.rho < 40)]
if(any(proposal.sd.rho < min.proposal.sd.rho))
{
proposal.sd.rho[which(proposal.sd.rho < min.proposal.sd.rho)] <- min.proposal.sd.rho
}else
{
}
}else
{
}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# phi tuning parameter
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(any(accept.phi > 50))
{
proposal.sd.phi[which(accept.phi > 50)] <- 2 * proposal.sd.phi[which(accept.phi > 50)]
}else if(any(accept.phi < 40))
{
proposal.sd.phi[which(accept.phi < 40)] <- 0.5 * proposal.sd.phi[which(accept.phi < 40)]
}else
{
}
}else
{}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Print progress to the console
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(j %in% percentage.points)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# End timer
#------------------------------------------------------------------------------------------------------------------------------------------------------
if(verbose)
{
cat("\nSummarising results")
close(progressBar)
}else
{}
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Summarise and save the results
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Select untempered chain for inference
#------------------------------------------------------------------------------------------------------------------------------------------------------
chain.sel <- 1
p.d <- DIC <- LMPL <- NA
fitted.values <- residuals <- rep(NA, N.all)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Watanabe-Akaike Information Criterion (WAIC)
#------------------------------------------------------------------------------------------------------------------------------------------------------
LPPD <- sum(log(apply(samples.like[,,chain.sel],2,mean)), na.rm=TRUE)
p.w <- sum(apply(log(samples.like[,,chain.sel]),2,var), na.rm=TRUE)
WAIC <- -2 * (LPPD - p.w)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Compute information criterion (DIC, DIC3, WAIC)
#------------------------------------------------------------------------------------------------------------------------------------------------------
mode.w <- matrix(0, nrow=K, ncol=N.trends)
Wsum <- apply(samples.w[,,,chain.sel],c(2,3),sum)
Wtrend <- which(Wsum == rowMaxs(Wsum), arr.ind=TRUE)
for (i in 1:K) {
mode.w[Wtrend[i,1], Wtrend[i,2]] <- 1
}
mode.w <- array(mode.w, c(K,N.trends,N))
mode.beta <- rep(NA, p)
if(p == 1)
{
mode.beta <- density(samples.beta[,,chain.sel])
mode.beta <- mean(mode.beta$x[which(mode.beta$y==max(mode.beta$y))])
}else
{
for(i in 1:p)
{
betamode <- density(samples.beta[,i,chain.sel])
mode.beta[i] <- mean(betamode$x[which(betamode$y==max(betamode$y))])
}
}
reg.mat <- matrix(X.standardised %*% mode.beta, nrow=K, ncol=N, byrow=FALSE)
gamma.mat <- array(0, c(K,Trends.sel,N))
for(i in Trends.id)
{
gamma.dens <- density(samples.gamma[gamma.pos[i],,,chain.sel])
gamma.mean <- mean(gamma.dens$x[which(gamma.dens$y==max(gamma.dens$y))])
gamma.mat[,gamma.pos[i],] <- matN(rep(gamma.mean, N),K,N)
}
mode.phi <- rep(NA,K)
for(i in 1:K)
{
phimode <- density(samples.phi[,i,chain.sel])
mode.phi[i] <- mean(phimode$x[which(phimode$y==max(phimode$y))])
}
phi.mat <- matN(rep(mode.phi,N),K,N)
wmat.extend <- array(0, c(K,Trends.sel,N))
wmat.extend[,-track.add,] <- mode.w
wmat.extend[,track.add,] <- mode.w[,New.trend.pos,]
trends.part <- apply(wmat.extend * (gamma.mat * tp.mat.trends),c(1,3),sum)
offset.temp <- as.numeric(offset.mat[,chain.sel] + reg.mat + trends.part + phi.mat)
mode.prob <- exp(offset.temp) / (1 + exp(offset.temp))
fitted.mode <- trials * mode.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mode.prob, log=TRUE))
p.d <- mean(samples.deviance[,,chain.sel]) - deviance.fitted
DIC <- 2 * mean(samples.deviance[,,chain.sel]) - deviance.fitted
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Compute the LMPL
#------------------------------------------------------------------------------------------------------------------------------------------------------
CPO <- rep(NA, N.all)
for(j in 1:N.all)
{
CPO[j] <- 1/mean((1 / dbinom(x=Y[j], size=trials[j], prob=(samples.fitted[,j,chain.sel] / trials[j]))))
}
LMPL <- sum(log(CPO))
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Create the Fitted values
#------------------------------------------------------------------------------------------------------------------------------------------------------
fitted.values <- apply(samples.fitted[,,chain.sel],2,mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mode.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Transform the parameters back to the original covariate scale
#------------------------------------------------------------------------------------------------------------------------------------------------------
samples.beta.orig <- common.betatransform(samples.beta[,,chain.sel], X.indicator, X.mean, X.sd, p, FALSE)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Compute the acceptance rates
#------------------------------------------------------------------------------------------------------------------------------------------------------
accept.beta <- 100 * accept.all[accept.blocks.num[,1]] / accept.all[accept.blocks.den[,1]]
accept.beta <- accept.beta[chain.sel]
accept.gammas <- 100 * accept.gammas.all[,accept.blocks.num[,1]] / accept.gammas.all[,accept.blocks.den[,1]]
accept.gammas <- accept.gammas[,chain.sel]
accept.rho <- 100 * accept.all[accept.blocks.num[,2]] / accept.all[accept.blocks.den[,2]]
accept.rho <- accept.rho[chain.sel]
accept.phis <- 100 * accept.phis.all[, accept.blocks.num[,1]] / accept.phis.all[,accept.blocks.den[,1]]
accept.phis <- accept.phis[,chain.sel]
coupled <- 100*accept.couple[1]/accept.couple[2]
accept.final <- c(accept.beta, accept.gammas[-1], accept.rho, mean(accept.phis), coupled)
names(accept.final) <- c("beta", paste("gamma.", Trends.pos[Trends.id], sep=""), "rho", "phi", "coupled")
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Create a summary object
#------------------------------------------------------------------------------------------------------------------------------------------------------
samples.beta.orig <- mcmc(samples.beta.orig)
mode.beta.orig <- rep(NA, p)
HPD.beta.orig <- matrix(NA, nrow=2, ncol=p)
if(p == 1)
{
mode.beta.orig <- density(samples.beta.orig)
mode.beta.orig <- mean(mode.beta.orig$x[which(mode.beta.orig$y==max(mode.beta.orig$y))])
HPD.beta.orig[1,] <- HPDinterval(samples.beta.orig, prob=0.95)[1]
HPD.beta.orig[2,] <- HPDinterval(samples.beta.orig, prob=0.95)[2]
summary.beta <- t(c(mode.beta.orig, HPD.beta.orig[1,], HPD.beta.orig[2,]))
}else
{
summary.beta <- matrix(NA, nrow=p, ncol=3)
for(i in 1:p)
{
origbetamode <- density(samples.beta.orig[,i])
mode.beta.orig[i] <- mean(origbetamode$x[which(origbetamode$y==max(origbetamode$y))])
HPD.beta.orig[1,i] <- HPDinterval(samples.beta.orig[,i], prob=0.95)[1]
HPD.beta.orig[2,i] <- HPDinterval(samples.beta.orig[,i], prob=0.95)[2]
summary.beta[i,1] <- mode.beta.orig[i]
summary.beta[i,2] <- HPD.beta.orig[1,i]
summary.beta[i,3] <- HPD.beta.orig[2,i]
}
}
summary.beta <- cbind(summary.beta, rep(n.keep,p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mode", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.gamma <- matrix(NA, nrow=Trends.sel, ncol=7)
for(i in Trends.id)
{
summary.gamma[gamma.pos[i],1] <- unique(as.numeric(gamma.mat[, gamma.pos[i],]))
summary.gamma[gamma.pos[i],2:3] <- HPDinterval(mcmc(samples.gamma[gamma.pos[i],,,chain.sel]), prob=0.95)
summary.gamma[gamma.pos[i],4] <- rep(n.keep,1)
summary.gamma[gamma.pos[i],5] <- accept.gammas[gamma.pos[i]]
summary.gamma[gamma.pos[i],6] <- effectiveSize(samples.gamma[gamma.pos[i],,,chain.sel])
summary.gamma[gamma.pos[i],7] <- geweke.diag(samples.gamma[gamma.pos[i],,,chain.sel])$z
}
colnames(summary.gamma) <- c("Mode", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
rownames(summary.gamma) <- c("gamma.constant", paste("gamma.", Trends.pos[Trends.id], sep=""))
summary.gamma <- summary.gamma[-1,]
if(Trends.sel==2)
{
summary.gamma <- matrix(summary.gamma, nrow=1, ncol=7)
colnames(summary.gamma) <- c("Mode", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
rownames(summary.gamma) <- paste("gamma.", Trends.pos[Trends.id], sep = "")
}
summary.lambda <- matrix(NA, nrow=N.trends, ncol=7)
for(i in 1:N.trends)
{
lambda.dens <- density(samples.lambda[,i,chain.sel])
lambda.mean <- mean(lambda.dens$x[which(lambda.dens$y==max(lambda.dens$y))])
summary.lambda[i,1] <- lambda.mean
summary.lambda[i,2:3] <- HPDinterval(mcmc(samples.lambda[,i,chain.sel]), prob=0.95)
summary.lambda[i,4] <- rep(n.keep,1)
summary.lambda[i,5] <- rep(100,1)
summary.lambda[i,6] <- effectiveSize(samples.lambda[,i,chain.sel])
summary.lambda[i,7] <- geweke.diag(samples.lambda[,i,chain.sel])$z
}
colnames(summary.lambda) <- c("Mode", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
rownames(summary.lambda) <- paste("lambda.", All.Trends[which(All.Trends %in% trends)], sep = "")
mode.tau2 <- density(samples.tau2[,,chain.sel])
mode.tau2 <- mean(mode.tau2$x[which(mode.tau2$y==max(mode.tau2$y))])
summary.tau2 <- t(c(mode.tau2, HPDinterval(mcmc(samples.tau2[,,chain.sel]), prob=0.95)[1],
HPDinterval(mcmc(samples.tau2[,,chain.sel]), prob=0.95)[2]))
summary.tau2 <- cbind(summary.tau2, rep(n.keep, 1), rep(100,1), effectiveSize(samples.tau2[,,chain.sel]),
geweke.diag(samples.tau2[,,chain.sel])$z)
colnames(summary.tau2) <- c("Mode", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
rownames(summary.tau2) <- c("tau2")
mode.rho <- density(samples.rho[,,chain.sel])
mode.rho <- mean(mode.rho$x[which(mode.rho$y==max(mode.rho$y))])
summary.rho <- t(c(mode.rho, HPDinterval(mcmc(samples.rho[,,chain.sel]), prob=0.95)[1],
HPDinterval(mcmc(samples.rho[,,chain.sel]), prob=0.95)[2]))
summary.rho <- cbind(summary.rho, rep(n.keep, 1), rep(accept.rho,1), effectiveSize(samples.rho[,,chain.sel]),
geweke.diag(samples.rho[,,chain.sel])$z)
colnames(summary.rho) <- c("Mode", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
rownames(summary.rho) <- c("rho")
summary.results <- rbind(summary.beta, summary.gamma, summary.lambda, summary.tau2, summary.rho)
summary.results[,1:3] <- round(summary.results[,1:3],4)
summary.results[,4:7] <- round(summary.results[,4:7],1)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Allocated trends for each location
#------------------------------------------------------------------------------------------------------------------------------------------------------
trends <- apply(samples.w[,,,chain.sel], c(2,3), sum)
trend.probs <- trends / n.keep
trends <- which(trends==rowMaxs(trends), arr.ind=TRUE)
trends <- trends[order(trends[,1]),]
trends[ ,2] <- Trends.chosen.names[trends[ ,2]]
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Compile and return the results
#------------------------------------------------------------------------------------------------------------------------------------------------------
loglike <- -0.5 * deviance.fitted
modelfit <- c(DIC[chain.sel], p.d[chain.sel], WAIC[chain.sel], p.w[chain.sel], LMPL[chain.sel], loglike)
names(modelfit) <- c("DIC", "p.d", "WAIC", "p.w", "LMPL", "loglikelihood")
samples <- list(beta=mcmc(t(matrix(samples.beta.orig, ncol=n.keep))), gamma=mcmc(t(matrix(samples.gamma[-1,,,chain.sel], ncol=n.keep))), lambda=mcmc(samples.lambda[,,chain.sel]),
tau2=mcmc(as.matrix(samples.tau2[,,chain.sel])), rho=mcmc(as.matrix(samples.rho[,,chain.sel])), w=samples.w[,,,chain.sel],
phi=mcmc(samples.phi[,,chain.sel]), fitted=mcmc(samples.fitted[,,chain.sel]))
model.string <- c("Likelihood model - binomial (logit link function)", "\nLatent structure model - spatial main effects and an area clustered trend\n")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit,
accept=accept.final, localised.structure=list(trends=trends[ ,-1], trend.probs=trend.probs), formula=formula, model=model.string, X=X)
class(results) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat(" finished in ", round(b[3]-a[3], 1), "seconds")
}else
{}
return(results)
}
#------------------------------------------------------------------------------------------------------------------------------------------------------
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.CARclustrends.R
|
binomial.CARlinear <- function(formula, data=NULL, trials, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.mean.alpha=NULL, prior.var.alpha=NULL, prior.tau2=NULL, rho.slo=NULL, rho.int=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "binomial")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Determine the number of spatial and temporal units
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
time <-(1:N - mean(1:N))/N
time.mat <- matrix(rep(time, K), byrow=TRUE, nrow=K)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Check the trials argument
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- N.all-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
if(sum(Y>trials, na.rm=TRUE)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
failures <- trials - Y
#### Check on the rho arguments
if(is.null(rho.int))
{
rho <- runif(1)
fix.rho.int <- FALSE
}else
{
rho <- rho.int
fix.rho.int <- TRUE
}
if(!is.numeric(rho)) stop("rho.int is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.int is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.int is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.slo))
{
lambda <- runif(1)
fix.rho.slo <- FALSE
}else
{
lambda <- rho.slo
fix.rho.slo <- TRUE
}
if(!is.numeric(lambda)) stop("rho.slo is fixed but is not numeric.", call.=FALSE)
if(lambda<0 ) stop("rho.slo is outside the range [0, 1].", call.=FALSE)
if(lambda>1 ) stop("rho.slo is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
if(is.null(prior.mean.alpha)) prior.mean.alpha <- rep(0, 1)
if(is.null(prior.var.alpha)) prior.var.alpha <- rep(100000, 1)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
if(length(prior.mean.alpha)!=1) stop("the prior mean for alpha is the wrong length.", call.=FALSE)
if(!is.numeric(prior.mean.alpha)) stop("the prior mean for alpha is not numeric.", call.=FALSE)
if(sum(is.na(prior.mean.alpha))!=0) stop("the prior mean for alpha has missing values.", call.=FALSE)
if(length(prior.var.alpha)!=1) stop("the prior variance for alpha is the wrong length.", call.=FALSE)
if(!is.numeric(prior.var.alpha)) stop("the prior variance for alpha is not numeric.", call.=FALSE)
if(sum(is.na(prior.var.alpha))!=0) stop("the prior variance for alpha has missing values.", call.=FALSE)
if(min(prior.var.alpha) <=0) stop("the prior variance for alpha has elements less than zero", call.=FALSE)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- binomial.CARlinearMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, lambda=lambda, fix.rho.int=fix.rho.int, fix.rho.slo=fix.rho.slo, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.alpha=prior.mean.alpha, prior.var.alpha=prior.var.alpha, prior.tau2=prior.tau2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- binomial.CARlinearMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, lambda=lambda, fix.rho.int=fix.rho.int, fix.rho.slo=fix.rho.slo, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.alpha=prior.mean.alpha, prior.var.alpha=prior.var.alpha, prior.tau2=prior.tau2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=binomial.CARlinearMCMC, Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, lambda=lambda, fix.rho.int=fix.rho.int, fix.rho.slo=fix.rho.slo, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.alpha=prior.mean.alpha, prior.var.alpha=prior.var.alpha, prior.tau2=prior.tau2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.final <- rep(NA, 6)
names(accept.final) <- c("beta", "alpha", "phi", "delta", "rho.int", "rho.slo")
accept.final[1] <- 100 * results$accept[1] / results$accept[2]
accept.final[2] <- 100 * results$accept[3] / results$accept[4]
accept.final[3] <- 100 * results$accept[5] / results$accept[6]
accept.final[4] <- 100 * results$accept[7] / results$accept[8]
if(!fix.rho.int) accept.final[5] <- 100 * results$accept[9] / results$accept[10]
if(!fix.rho.slo) accept.final[6] <- 100 * results$accept[11] / results$accept[12]
## Compute the fitted deviance
mean.phi <- apply(results$samples.phi, 2, mean)
mean.delta <- apply(results$samples.delta, 2, mean)
mean.phi.mat <- matrix(rep(mean.phi, N), byrow=F, nrow=K)
delta.time.mat <- apply(time.mat, 2, "*", mean.delta)
mean.alpha <- mean(results$samples.alpha)
mean.beta <- apply(results$samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
lp.mean <- as.numeric(offset.mat + regression.mat + mean.phi.mat + delta.time.mat + mean.alpha * time.mat)
mean.prob <- exp(lp.mean) / (1 + exp(lp.mean))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
## Create the samples object
if(fix.rho.int & fix.rho.slo)
{
samples.rhoext <- NA
}else if(fix.rho.int & !fix.rho.slo)
{
samples.rhoext <- results$samples.lambda
names(samples.rhoext) <- "rho.slo"
}else if(!fix.rho.int & fix.rho.slo)
{
samples.rhoext <- results$samples.rho
names(samples.rhoext) <- "rho.int"
}else
{
samples.rhoext <- cbind(results$samples.rho, results$samples.lambda)
colnames(samples.rhoext) <- c("rho.int", "rho.slo")
}
colnames(results$samples.tau2) <- c("tau2.int", "tau2.slo")
samples <- list(beta=mcmc(samples.beta.orig), alpha=mcmc(results$samples.alpha), phi=mcmc(results$samples.phi), delta=mcmc(results$samples.delta), tau2=mcmc(results$samples.tau2), rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.tau2 <- cbind(apply(results$samples.tau2, 2, mean), t(apply(results$samples.tau2, 2, quantile, c(0.025, 0.975))), rep(n.keep, 2), rep(100, 2),
effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.alpha <- c(mean(results$samples.alpha), quantile(results$samples.alpha, c(0.025, 0.975)), n.keep, accept.final[names(accept.final)=="alpha"],
effectiveSize(samples$alpha), geweke.diag(samples$alpha)$z)
summary.combine <- rbind(summary.alpha, summary.tau2)
rownames(summary.combine)[1] <- "alpha"
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.int", "rho.slo")
if(!fix.rho.int)
{
summary.rho[1, 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.int"], effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z)
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.slo)
{
summary.rho[2, 1:3] <- c(mean(results$samples.lambda), quantile(results$samples.lambda, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.slo"], effectiveSize(results$samples.lambda), geweke.diag(results$samples.lambda)$z)
}else
{
summary.rho[2, 1:3] <- c(lambda, lambda, lambda)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.combine, summary.rho)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.final <- rep(NA, 6)
names(accept.final) <- c("beta", "alpha", "phi", "delta", "rho.int", "rho.slo")
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[1] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.final[2] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.final[3] <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
accept.final[4] <- 100 * sum(accept.temp2[ ,7]) / sum(accept.temp2[ ,8])
if(!fix.rho.int) accept.final[5] <- 100 * sum(accept.temp2[ ,9]) / sum(accept.temp2[ ,10])
if(!fix.rho.slo) accept.final[6] <- 100 * sum(accept.temp2[ ,11]) / sum(accept.temp2[ ,12])
## Extract the samples into separate matrix and list objects
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.delta.list <- lapply(results, function(l) l[["samples.delta"]])
samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list)
samples.alpha.list <- lapply(results, function(l) l[["samples.alpha"]])
samples.alpha.matrix <- do.call(what=rbind, args=samples.alpha.list)
if(!fix.rho.int)
{
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
}
if(!fix.rho.slo)
{
samples.lambda.list <- lapply(results, function(l) l[["samples.lambda"]])
samples.lambda.matrix <- do.call(what=rbind, args=samples.lambda.list)
}
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Compute the fitted deviance
mean.phi <- apply(samples.phi.matrix, 2, mean)
mean.delta <- apply(samples.delta.matrix, 2, mean)
mean.phi.mat <- matrix(rep(mean.phi, N), byrow=F, nrow=K)
delta.time.mat <- apply(time.mat, 2, "*", mean.delta)
mean.alpha <- mean(samples.alpha.matrix)
mean.beta <- apply(samples.beta.matrix,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
lp.mean <- as.numeric(offset.mat + regression.mat + mean.phi.mat + delta.time.mat + mean.alpha * time.mat)
mean.prob <- exp(lp.mean) / (1 + exp(lp.mean))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
alpha.mcmc <- mcmc.list(lapply(samples.alpha.list, mcmc))
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
delta.mcmc <- mcmc.list(lapply(samples.delta.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
for(j in 1:n.chains)
{
colnames(samples.tau2.list[[j]]) <- c("tau2.int", "tau2.slo")
}
tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc))
if(n.miss>0)
{
Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc))
}else
{
Y.mcmc <- NA
}
if(fix.rho.int & fix.rho.slo)
{
rhoext.mcmc <- NA
}else if(fix.rho.int & !fix.rho.slo)
{
for(j in 1:n.chains)
{
colnames(samples.lambda.list[[j]]) <- c("rho.slo")
}
rhoext.mcmc <- mcmc.list(lapply(samples.lambda.list, mcmc))
}else if(!fix.rho.int & fix.rho.slo)
{
for(j in 1:n.chains)
{
colnames(samples.rho.list[[j]]) <- c("rho.int")
}
rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc))
}else
{
rho.temp <- as.list(rep(NA, n.chains))
for(j in 1:n.chains)
{
rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.lambda.list[[j]])
colnames(rho.temp[[j]]) <- c("rho.int", "rho.slo")
}
rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc))
}
samples <- list(beta=beta.mcmc, alpha=alpha.mcmc, phi=phi.mcmc, delta=delta.mcmc, rho=rhoext.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.tau2 <- cbind(apply(samples.tau2.matrix, 2, mean), t(apply(samples.tau2.matrix, 2, quantile, c(0.025, 0.975))), rep(n.keep, 2), rep(100, 2),
effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.alpha <- c(mean(samples.alpha.matrix), quantile(samples.alpha.matrix, c(0.025, 0.975)), n.keep, accept.final[names(accept.final)=="alpha"],
effectiveSize(alpha.mcmc), gelman.diag(alpha.mcmc)$psrf[ ,2])
summary.combine <- rbind(summary.alpha, summary.tau2)
rownames(summary.combine)[1] <- "alpha"
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.int", "rho.slo")
if(!fix.rho.int)
{
temp <- mcmc.list(lapply(samples.rho.list, mcmc))
summary.rho[1, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.int"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.slo)
{
temp <- mcmc.list(lapply(samples.lambda.list, mcmc))
summary.rho[2, 1:3] <- c(mean(samples.lambda.matrix), quantile(samples.lambda.matrix, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.slo"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[2, 1:3] <- c(lambda, lambda, lambda)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.combine, summary.rho)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - binomial (logit link function)", "\nLatent structure model - Spatially autocorrelated linear time trends\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.CARlinear.R
|
binomial.CARlinearMCMC <- function(Y, failures, trials, offset, X.standardised, W, rho, lambda, fix.rho.int, fix.rho.slo, K, N, N.all, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.mean.alpha, prior.var.alpha, prior.tau2, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Generate the initial parameter values
time <-(1:N - mean(1:N))/N
time.all <- kronecker(time, rep(1,K))
dat <- cbind(Y, failures)
mod.glm <- glm(dat~X.standardised-1 + time.all, offset=offset, family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
temp <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
beta <- temp[1:p]
alpha <- temp[(p+1)]
theta.hat <- Y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat / (1 - theta.hat)) - as.numeric(X.standardised %*% beta) - time.all * alpha - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=0, sd = res.sd)
delta <- rnorm(n=K, mean=0, sd = res.sd)
tau2.phi <- var(phi)/10
tau2.delta <- var(delta)/10
#### Matrix versions
Y.DA <- Y
failures.DA <- failures
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
trials.mat <- matrix(trials, nrow=K, ncol=N, byrow=FALSE)
phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K)
time.mat <- matrix(rep(time, K), byrow=TRUE, nrow=K)
delta.time.mat <- apply(time.mat, 2, "*", delta)
lp <- as.numeric(offset.mat + regression.mat + phi.mat + delta.time.mat + alpha * time.mat)
prob <- exp(lp) / (1 + exp(lp))
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.alpha <- array(NA, c(n.keep, 1))
samples.phi <- array(NA, c(n.keep, K))
samples.delta <- array(NA, c(n.keep, K))
if(!fix.rho.int) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.slo) samples.lambda <- array(NA, c(n.keep, 1))
samples.tau2 <- array(NA, c(n.keep, 2))
colnames(samples.tau2) <- c("tau2.int", "tau2.slo")
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Specify the Metropolis quantities
accept <- rep(0,12)
proposal.sd.beta <- 0.01
proposal.sd.phi <- 0.1
proposal.sd.delta <- 0.1
proposal.sd.alpha <- 0.1
proposal.sd.rho <- 0.02
proposal.sd.lambda <- 0.02
tau2.phi.shape <- prior.tau2[1] + K/2
tau2.delta.shape <- prior.tau2[1] + K/2
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Create the determinant
if(!fix.rho.int | !fix.rho.slo)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
}else
{}
if(!fix.rho.int) det.Q.rho <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
if(!fix.rho.slo) det.Q.lambda <- 0.5 * sum(log((lambda * Wstar.val + (1-lambda))))
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1) tau2.phi.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
if(lambda==1) tau2.delta.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rbinom(n=n.miss, size=trials[which.miss==0], prob=prob[which.miss==0])
failures.DA <- trials - Y.DA
}else
{}
Y.DA.mat <- matrix(Y.DA, nrow=K, ncol=N, byrow=FALSE)
failures.DA.mat <- matrix(failures.DA, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from beta
####################
offset.temp <- offset + as.numeric(phi.mat) + as.numeric(delta.time.mat) + as.numeric(alpha * time.mat)
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, N.all, p, beta, offset.temp, Y.DA, failures.DA, trials, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, N.all, p, beta, offset.temp, Y.DA, failures.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from alpha
####################
proposal.alpha <- rnorm(n=1, mean=alpha, sd=proposal.sd.alpha)
prob1 <- 0.5 * (alpha - prior.mean.alpha)^2 / prior.var.alpha - 0.5 * (proposal.alpha - prior.mean.alpha)^2 / prior.var.alpha
lp.current <- offset + as.numeric(regression.mat) + as.numeric(phi.mat) + as.numeric(delta.time.mat) + as.numeric(alpha * time.mat)
lp.proposal <- offset + as.numeric(regression.mat) + as.numeric(phi.mat) + as.numeric(delta.time.mat) + as.numeric(proposal.alpha * time.mat)
p.current <- exp(lp.current) / (1 + exp(lp.current))
p.proposal <- exp(lp.proposal) / (1 + exp(lp.proposal))
like.current <- Y.DA * log(p.current) + failures.DA * log(1-p.current)
like.proposal <- Y.DA * log(p.proposal) + failures.DA * log(1-p.proposal)
prob2 <- sum(like.proposal - like.current, na.rm=TRUE)
prob <- exp(prob1 + prob2)
if(prob > runif(1))
{
alpha <- proposal.alpha
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
####################
## Sample from phi
####################
phi.offset <- offset.mat + regression.mat + delta.time.mat + alpha * time.mat
temp1 <- binomialcarupdateRW(W.triplet, W.begfin, W.triplet.sum, K, phi, tau2.phi,Y.DA.mat, failures.DA.mat, proposal.sd.phi, rho, phi.offset, N, rep(1,N))
phi <- temp1[[1]]
if(rho<1)
{
phi <- phi - mean(phi)
}else
{
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
}
phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K)
accept[5] <- accept[5] + temp1[[2]]
accept[6] <- accept[6] + K
####################
## Sample from delta
####################
delta.offset <- offset.mat + regression.mat + phi.mat + alpha * time.mat
temp2 <- binomialcarupdateRW(W.triplet, W.begfin, W.triplet.sum, K, delta, tau2.delta,Y.DA.mat, failures.DA.mat, proposal.sd.delta, lambda, delta.offset, N, time)
delta <- temp2[[1]]
if(lambda <1)
{
delta <- delta - mean(delta)
}else
{
delta[which(islands==1)] <- delta[which(islands==1)] - mean(delta[which(islands==1)])
}
delta.time.mat <- apply(time.mat, 2, "*", delta)
accept[7] <- accept[7] + temp2[[2]]
accept[8] <- accept[8] + K
#######################
## Sample from tau2.phi
#######################
temp2.phi <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, rho)
tau2.phi.scale <- temp2.phi + prior.tau2[2]
tau2.phi <- 1 / rgamma(1, tau2.phi.shape, scale=(1/tau2.phi.scale))
#########################
## Sample from tau2.delta
#########################
temp2.delta <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, delta, delta, lambda)
tau2.delta.scale <- temp2.delta + prior.tau2[2]
tau2.delta <- 1 / rgamma(1, tau2.delta.shape, scale=(1/tau2.delta.scale))
##################
## Sample from rho
##################
if(!fix.rho.int)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp3 <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, proposal.rho)
det.Q.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- det.Q.rho - temp2.phi / tau2.phi
logprob.proposal <- det.Q.proposal - temp3 / tau2.phi
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
rho <- proposal.rho
det.Q.rho <- det.Q.proposal
accept[9] <- accept[9] + 1
}else
{
}
accept[10] <- accept[10] + 1
}else
{}
#####################
## Sample from lambda
#####################
if(!fix.rho.slo)
{
proposal.lambda <- rtruncnorm(n=1, a=0, b=1, mean=lambda, sd=proposal.sd.lambda)
temp3 <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, delta, delta, proposal.lambda)
det.Q.proposal <- 0.5 * sum(log((proposal.lambda * Wstar.val + (1-proposal.lambda))))
logprob.current <- det.Q.lambda - temp2.delta / tau2.delta
logprob.proposal <- det.Q.proposal - temp3 / tau2.delta
hastings <- log(dtruncnorm(x=lambda, a=0, b=1, mean=proposal.lambda, sd=proposal.sd.lambda)) - log(dtruncnorm(x=proposal.lambda, a=0, b=1, mean=lambda, sd=proposal.sd.lambda))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
lambda <- proposal.lambda
det.Q.lambda <- det.Q.proposal
accept[11] <- accept[11] + 1
}else
{
}
accept[12] <- accept[12] + 1
}else
{}
#########################
## Calculate the deviance
#########################
lp <- as.numeric(offset.mat + regression.mat + phi.mat + delta.time.mat + alpha * time.mat)
prob <- exp(lp) / (1+exp(lp))
fitted <- trials * prob
loglike <- dbinom(x=Y, size=trials, prob=prob, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.delta[ele, ] <- delta
samples.alpha[ele, ] <- alpha
if(!fix.rho.int) samples.rho[ele, ] <- rho
if(!fix.rho.slo) samples.lambda[ele, ] <- lambda
samples.tau2[ele, ] <- c(tau2.phi, tau2.delta)
samples.fitted[ele, ] <- fitted
samples.loglike[ele, ] <- loglike
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
proposal.sd.alpha <- common.accceptrates1(accept[3:4], proposal.sd.alpha, 30, 40)
proposal.sd.phi <- common.accceptrates1(accept[5:6], proposal.sd.phi, 40, 50)
proposal.sd.delta <- common.accceptrates1(accept[7:8], proposal.sd.delta, 40, 50)
if(!fix.rho.int) proposal.sd.rho <- common.accceptrates2(accept[9:10], proposal.sd.rho, 40, 50, 0.5)
if(!fix.rho.slo) proposal.sd.lambda <- common.accceptrates2(accept[11:12], proposal.sd.lambda, 40, 50, 0.5)
accept <- rep(0,12)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y <- NA
if(fix.rho.int) samples.rho <- NA
if(fix.rho.slo) samples.lambda <- NA
chain.results <- list(samples.beta=samples.beta, samples.alpha=samples.alpha, samples.phi=samples.phi, samples.delta=samples.delta, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.lambda=samples.lambda, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.CARlinearMCMC.R
|
binomial.CARlocalised <- function(formula, data=NULL, G, trials, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.delta=NULL, prior.tau2=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame.localised(formula, data, "binomial")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- as.numeric(!is.na(Y))
n.miss <- N.all - sum(which.miss)
if(n.miss>0) stop("the response has missing 'NA' values.", call.=FALSE)
#### Determine the number of spatial and temporal units
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Check the trials argument
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- N.all-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
if(sum(Y>trials, na.rm=TRUE)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
failures <- trials - Y
#### Format and check the number of clusters G
if(length(G)!=1) stop("G is the wrong length.", call.=FALSE)
if(!is.numeric(G)) stop("G is not numeric.", call.=FALSE)
if(G<=1) stop("G is less than 2.", call.=FALSE)
if(G!=round(G)) stop("G is not an integer.", call.=FALSE)
if(floor(G/2)==ceiling(G/2))
{
Gstar <- G/2
}else
{
Gstar <- (G+1)/2
}
#### Priors
if(!is.null(X.standardised))
{
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
}else
{}
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
prior.var.check(prior.tau2)
if(is.null(prior.delta)) prior.delta <- 10
if(length(prior.delta)!=1) stop("the prior value for delta is the wrong length.", call.=FALSE)
if(!is.numeric(prior.delta)) stop("the prior value for delta is not numeric.", call.=FALSE)
if(sum(is.na(prior.delta))!=0) stop("the prior value for delta has missing values.", call.=FALSE)
if(prior.delta<=0) stop("the prior value for delta is not positive.", call.=FALSE)
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- binomial.CARlocalisedMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, G=G, Gstar=Gstar, K=K, N=N, N.all=N.all, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.delta=prior.delta, prior.tau2=prior.tau2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- binomial.CARlocalisedMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, G=G, Gstar=Gstar, K=K, N=N, N.all=N.all, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.delta=prior.delta, prior.tau2=prior.tau2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=binomial.CARlocalisedMCMC, Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, G=G, Gstar=Gstar, K=K, N=N, N.all=N.all, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.delta=prior.delta, prior.tau2=prior.tau2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.lambda <- 100 * results$accept[1] / results$accept[2]
accept.delta <- 100 * results$accept[3] / results$accept[4]
accept.phi <- 100 * results$accept[5] / results$accept[6]
accept.gamma <- 100
if(!is.null(X.standardised))
{
accept.beta <- 100 * results$accept[7] / results$accept[8]
accept.final <- c(accept.beta, accept.lambda, accept.delta, accept.phi, accept.gamma)
names(accept.final) <- c("beta", "lambda", "delta", "phi", "rho.T")
}else
{
accept.final <- c(accept.lambda, accept.delta, accept.phi, accept.gamma)
names(accept.final) <- c("lambda", "delta", "phi", "rho.T")
}
## Compute the fitted deviance
mean.Z <- round(apply(results$samples.Z,2,mean), 0)
mean.lambda <- apply(results$samples.lambda, 2, mean)
mean.mu <- matrix(mean.lambda[mean.Z], nrow=K, ncol=N, byrow=FALSE)
if(!is.null(X.standardised))
{
mean.beta <- apply(results$samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
}else
{
regression.mat <- matrix(0, nrow=K, ncol=N, byrow=FALSE)
}
mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=K, byrow=FALSE)
lp.mean <- as.numeric(mean.mu + offset.mat + mean.phi + regression.mat)
mean.prob <- exp(lp.mean) / (1 + exp(lp.mean))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE))
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the origianl covariate scale.
if(!is.null(X.standardised))
{
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
}else
{
samples.beta.orig = NA
}
## Create the samples object
samples <- list(beta=mcmc(samples.beta.orig), lambda=mcmc(results$samples.lambda), Z=mcmc(results$samples.Z), delta=mcmc(results$samples.delta), phi = mcmc(results$samples.phi), tau2=mcmc(results$samples.tau2), rho.T=mcmc(results$samples.gamma), fitted=mcmc(results$samples.fitted))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.hyper <- array(NA, c(3, 7))
summary.hyper[1,1:3] <- c(mean(results$samples.delta), quantile(results$samples.delta, c(0.025, 0.975)))
summary.hyper[2,1:3] <- c(mean(results$samples.tau2), quantile(results$samples.tau2, c(0.025, 0.975)))
summary.hyper[3,1:3] <- c(mean(results$samples.gamma), quantile(results$samples.gamma, c(0.025, 0.975)))
rownames(summary.hyper) <- c("delta", "tau2", "rho.T")
summary.hyper[1, 4:7] <- c(n.keep, accept.delta, effectiveSize(mcmc(results$samples.delta)), geweke.diag(mcmc(results$samples.delta))$z)
summary.hyper[2, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(results$samples.tau2)), geweke.diag(mcmc(results$samples.tau2))$z)
summary.hyper[3, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(results$samples.gamma)), geweke.diag(mcmc(results$samples.gamma))$z)
summary.lambda <- t(rbind(apply(results$samples.lambda, 2, mean), apply(results$samples.lambda, 2, quantile, c(0.025, 0.975))))
summary.lambda <- cbind(summary.lambda, rep(n.keep, G), rep(accept.lambda, G), effectiveSize(mcmc(results$samples.lambda)), geweke.diag(mcmc(results$samples.lambda))$z)
summary.lambda <- matrix(summary.lambda, ncol=7)
rownames(summary.lambda) <- paste("lambda", 1:G, sep="")
if(!is.null(X.standardised))
{
samples.beta.orig <- mcmc(samples.beta.orig)
summary.beta <- t(rbind(apply(samples.beta.orig, 2, mean), apply(samples.beta.orig, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.results <- rbind(summary.beta, summary.lambda, summary.hyper)
}else
{
summary.results <- rbind(summary.lambda, summary.hyper)
}
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
colnames(summary.results) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.lambda <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.delta <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.phi <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
accept.gamma <- 100
if(!is.null(X.standardised))
{
accept.beta <- 100 * sum(accept.temp2[ ,7]) / sum(accept.temp2[ ,8])
accept.final <- c(accept.beta, accept.lambda, accept.delta, accept.phi, accept.gamma)
names(accept.final) <- c("beta", "lambda", "delta", "phi", "rho.T")
}else
{
accept.final <- c(accept.lambda, accept.delta, accept.phi, accept.gamma)
names(accept.final) <- c("lambda", "delta", "phi", "rho.T")
}
## Extract the samples into separate matrix and list objects
if(!is.null(X.standardised))
{
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
}else
{}
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.Z.list <- lapply(results, function(l) l[["samples.Z"]])
samples.Z.matrix <- do.call(what=rbind, args=samples.Z.list)
samples.gamma.list <- lapply(results, function(l) l[["samples.gamma"]])
samples.gamma.matrix <- do.call(what=rbind, args=samples.gamma.list)
samples.delta.list <- lapply(results, function(l) l[["samples.delta"]])
samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list)
samples.lambda.list <- lapply(results, function(l) l[["samples.lambda"]])
samples.lambda.matrix <- do.call(what=rbind, args=samples.lambda.list)
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the fitted deviance
mean.Z <- round(apply(samples.Z.matrix,2,mean), 0)
mean.lambda <- apply(samples.lambda.matrix, 2, mean)
mean.mu <- matrix(mean.lambda[mean.Z], nrow=K, ncol=N, byrow=FALSE)
if(!is.null(X.standardised))
{
mean.beta <- apply(samples.beta.matrix,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
}else
{
regression.mat <- matrix(0, nrow=K, ncol=N, byrow=FALSE)
}
mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=K, byrow=FALSE)
lp.mean <- as.numeric(mean.mu + offset.mat + mean.phi + regression.mat)
mean.prob <- exp(lp.mean) / (1 + exp(lp.mean))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE))
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
if(!is.null(X.standardised))
{
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
}else
{
beta.mcmc = mcmc(NA)
}
## Create MCMC objects
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc))
Z.mcmc <- mcmc.list(lapply(samples.Z.list, mcmc))
gamma.mcmc <- mcmc.list(lapply(samples.gamma.list, mcmc))
delta.mcmc <- mcmc.list(lapply(samples.delta.list, mcmc))
lambda.mcmc <- mcmc.list(lapply(samples.lambda.list, mcmc))
samples <- list(beta=beta.mcmc, phi=phi.mcmc, Z=Z.mcmc, rho.T=gamma.mcmc, lambda=lambda.mcmc, delta=delta.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.hyper <- array(NA, c(3, 7))
summary.hyper[1,1:3] <- c(mean(samples.delta.matrix), quantile(samples.delta.matrix, c(0.025, 0.975)))
summary.hyper[2,1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[3,1:3] <- c(mean(samples.gamma.matrix), quantile(samples.gamma.matrix, c(0.025, 0.975)))
rownames(summary.hyper) <- c("delta", "tau2", "rho.T")
summary.hyper[1, 4:7] <- c(n.keep, accept.delta, effectiveSize(delta.mcmc), gelman.diag(delta.mcmc)$psrf[ ,2])
summary.hyper[2, 4:7] <- c(n.keep, 100, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.hyper[3, 4:7] <- c(n.keep, 100, effectiveSize(gamma.mcmc), gelman.diag(gamma.mcmc)$psrf[ ,2])
summary.lambda <- t(rbind(apply(samples.lambda.matrix, 2, mean), apply(samples.lambda.matrix, 2, quantile, c(0.025, 0.975))))
summary.lambda <- cbind(summary.lambda, rep(n.keep, G), rep(accept.lambda, G), effectiveSize(lambda.mcmc), gelman.diag(lambda.mcmc)$psrf[ ,2])
summary.lambda <- matrix(summary.lambda, ncol=7)
rownames(summary.lambda) <- paste("lambda", 1:G, sep="")
if(!is.null(X.standardised))
{
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.results <- rbind(summary.beta, summary.lambda, summary.hyper)
}else
{
summary.results <- rbind(summary.lambda, summary.hyper)
}
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
colnames(summary.results) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Binomial (logit link function)", "\nLatent structure model - Localised autoregressive order 1 CAR model\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=mean.Z, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.CARlocalised.R
|
binomial.CARlocalisedMCMC <- function(Y, failures, trials, offset, X.standardised, W, G, Gstar, K, N, N.all, p, burnin, n.sample, thin, MALA, prior.mean.beta, prior.var.beta, prior.delta, prior.tau2, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Compute the blocking structure for beta
if(!is.null(X.standardised))
{
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
}else
{}
#### Compute a starting value for beta
if(!is.null(X.standardised))
{
dat <- cbind(Y, failures)
mod.glm <- glm(dat~X.standardised-1, offset=offset, family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
regression.vec <- X.standardised %*% beta
}else
{
regression.vec <- rep(0, N.all)
}
#### Generate the initial parameter values
theta.hat <- Y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat / (1 - theta.hat)) - regression.vec - offset
clust <- kmeans(res.temp,G)
lambda <- clust$centers[order(clust$centers)]
lambda.mat <- matrix(rep(lambda, N), nrow=N, byrow=TRUE)
Z <- rep(1, N.all)
for(j in 2:G)
{
Z[clust$cluster==order(clust$centers)[j]] <- j
}
Z.mat <- matrix(Z, nrow=K, ncol=N, byrow=FALSE)
mu <- matrix(lambda[Z], nrow=K, ncol=N, byrow=FALSE)
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi.mat <- matrix(rnorm(n=N.all, mean=0, sd = res.sd), nrow=K, byrow=FALSE)
phi <- as.numeric(phi.mat)
tau2 <- var(phi)/10
gamma <- runif(1)
delta <- runif(1,1, min(2, prior.delta))
#### Specify matrix quantities
Y.mat <- matrix(Y, nrow=K, ncol=N, byrow=FALSE)
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(regression.vec, nrow=K, ncol=N, byrow=FALSE)
failures.mat <- matrix(failures, nrow=K, ncol=N, byrow=FALSE)
trials.mat <- matrix(trials, nrow=K, ncol=N, byrow=FALSE)
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.Z <- array(NA, c(n.keep, N.all))
samples.lambda <- array(NA, c(n.keep, G))
samples.delta <- array(NA, c(n.keep, 1))
samples.tau2 <- array(NA, c(n.keep, 1))
samples.gamma <- array(NA, c(n.keep, 1))
samples.phi <- array(NA, c(n.keep, N.all))
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
#### Specify the Metropolis quantities
if(!is.null(X.standardised))
{
samples.beta <- array(NA, c(n.keep, p))
accept <- rep(0,8)
proposal.corr.beta <- solve(t(X.standardised) %*% X.standardised)
chol.proposal.corr.beta <- chol(proposal.corr.beta)
proposal.sd.beta <- 0.01
}else
{
accept <- rep(0,6)
}
proposal.sd.lambda <- 0.1
proposal.sd.delta <- 0.1
proposal.sd.phi <- 0.1
Y.extend <- matrix(rep(Y, G), byrow=F, ncol=G)
delta.update <- matrix(rep(1:G, N.all-K), ncol=G, byrow=T)
tau2.posterior.shape <- prior.tau2[1] + N * (K-1) /2
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################
## Sample from beta
####################
if(!is.null(X.standardised))
{
offset.temp <- offset + as.numeric(mu) + as.numeric(phi.mat)
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, N.all, p, beta, offset.temp, Y, failures, trials, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, N.all, p, beta, offset.temp, Y, failures, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[7] <- accept[7] + temp[[2]]
accept[8] <- accept[8] + n.beta.block
regression.vec <- X.standardised %*% beta
regression.mat <- matrix(regression.vec, nrow=K, ncol=N, byrow=FALSE)
}else
{}
#######################
#### Sample from lambda
#######################
#### Propose a new value
proposal.extend <- c(-100, lambda, 100)
for(r in 1:G)
{
proposal.extend[(r+1)] <- rtruncnorm(n=1, a=proposal.extend[r], b=proposal.extend[(r+2)], mean=proposal.extend[(r+1)], sd=proposal.sd.lambda)
}
proposal <- proposal.extend[-c(1, (G+2))]
#### Compute the data likelihood
lp.current <- lambda[Z] + offset + as.numeric(regression.mat) + as.numeric(phi.mat)
lp.proposal <- proposal[Z] + offset + as.numeric(regression.mat) + as.numeric(phi.mat)
p.current <- exp(lp.current) / (1 + exp(lp.current))
p.proposal <- exp(lp.proposal) / (1 + exp(lp.proposal))
like.current <- Y * log(p.current) + failures * log(1 - p.current)
like.proposal <- Y * log(p.proposal) + failures * log(1 - p.proposal)
prob <- exp(sum(like.proposal - like.current))
if(prob > runif(1))
{
lambda <- proposal
lambda.mat <- matrix(rep(lambda, N), nrow=N, byrow=TRUE)
mu <- matrix(lambda[Z], nrow=K, ncol=N, byrow=FALSE)
accept[1] <- accept[1] + 1
}else
{}
accept[2] <- accept[2] + 1
##################
#### Sample from Z
##################
prior.offset <- rep(NA, G)
for(r in 1:G)
{
prior.offset[r] <- log(sum(exp(-delta * ((1:G - r)^2 + (1:G - Gstar)^2))))
}
mu.offset <- offset.mat + regression.mat + phi.mat
test <- Zupdatesqbin(Z=Z.mat, Offset=mu.offset, Y=Y.mat, delta=delta, lambda=lambda, nsites=K, ntime=N, G=G, SS=1:G, prioroffset=prior.offset, Gstar=Gstar, failures=failures.mat)
Z.mat <- test
Z <- as.numeric(Z.mat)
mu <- matrix(lambda[Z], nrow=K, ncol=N, byrow=FALSE)
######################
#### Sample from delta
######################
proposal.delta <- rtruncnorm(n=1, a=1, b=prior.delta, mean=delta, sd=proposal.sd.delta)
sum.delta1 <- sum((Z - Gstar)^2)
sum.delta2 <- sum((Z.mat[ ,-1] - Z.mat[ ,-N])^2)
current.fc1 <- -delta * (sum.delta1 + sum.delta2) - K * log(sum(exp(-delta * (1:G - Gstar)^2)))
proposal.fc1 <- -proposal.delta * (sum.delta1 + sum.delta2) - K * log(sum(exp(-proposal.delta * (1:G - Gstar)^2)))
Z.temp <- matrix(rep(as.numeric(Z.mat[ ,-N]),G), ncol=G, byrow=FALSE)
Z.temp2 <- (delta.update - Z.temp)^2 + (delta.update - Gstar)^2
current.fc <- current.fc1 - sum(log(apply(exp(-delta * Z.temp2),1,sum)))
proposal.fc <- proposal.fc1 - sum(log(apply(exp(-proposal.delta * Z.temp2),1,sum)))
hastings <- log(dtruncnorm(x=delta, a=1, b=prior.delta, mean=proposal.delta, sd=proposal.sd.delta)) - log(dtruncnorm(x=proposal.delta, a=1, b=prior.delta, mean=delta, sd=proposal.sd.delta))
prob <- exp(proposal.fc - current.fc + hastings)
if(prob > runif(1))
{
delta <- proposal.delta
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
####################
#### Sample from phi
####################
phi.offset <- mu + offset.mat + regression.mat
temp1 <- binomialar1carupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, phi.mat, tau2, gamma, 1, Y.mat, failures.mat, proposal.sd.phi, phi.offset, W.triplet.sum)
phi.temp <- temp1[[1]]
phi <- as.numeric(phi.temp)
for(i in 1:G)
{
phi[which(Z==i)] <- phi[which(Z==i)] - mean(phi[which(Z==i)])
}
phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE)
accept[5] <- accept[5] + temp1[[2]]
accept[6] <- accept[6] + K*N
####################
## Sample from gamma
####################
temp2 <- gammaquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, 1)
mean.gamma <- temp2[[1]] / temp2[[2]]
sd.gamma <- sqrt(tau2 / temp2[[2]])
gamma <- rtruncnorm(n=1, a=0, b=1, mean=mean.gamma, sd=sd.gamma)
####################
## Samples from tau2
####################
temp3 <- tauquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, 1, gamma)
tau2.posterior.scale <- temp3 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
#########################
## Calculate the deviance
#########################
lp <- as.numeric(mu + offset.mat + regression.mat + phi.mat)
prob <- exp(lp) / (1+exp(lp))
fitted <- trials * prob
loglike <- dbinom(x=Y, size=trials, prob=prob, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.delta[ele, ] <- delta
samples.lambda[ele, ] <- lambda
samples.Z[ele, ] <- Z
samples.phi[ele, ] <- as.numeric(phi.mat)
samples.tau2[ele, ] <- tau2
samples.gamma[ele, ] <- gamma
samples.fitted[ele, ] <- fitted
samples.loglike[ele, ] <- loglike
if(!is.null(X.standardised)) samples.beta[ele, ] <- beta
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
if(!is.null(X.standardised))
{
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[7:8], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[7:8], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[5:6], proposal.sd.phi, 40, 50)
proposal.sd.lambda <- common.accceptrates2(accept[1:2], proposal.sd.lambda, 20, 40, 10)
proposal.sd.delta <- common.accceptrates2(accept[3:4], proposal.sd.delta, 40, 50, prior.delta/6)
accept <- rep(0,8)
}else
{
proposal.sd.phi <- common.accceptrates1(accept[5:6], proposal.sd.phi, 40, 50)
proposal.sd.lambda <- common.accceptrates2(accept[1:2], proposal.sd.lambda, 20, 40, 10)
proposal.sd.delta <- common.accceptrates2(accept[3:4], proposal.sd.delta, 40, 50, prior.delta/6)
accept <- rep(0,6)
}
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(is.null(X.standardised)) samples.beta <- NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.Z=samples.Z, samples.lambda=samples.lambda, samples.tau2=samples.tau2, samples.delta=samples.delta, samples.gamma=samples.gamma, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.CARlocalisedMCMC.R
|
binomial.CARsepspatial <- function(formula, data=NULL, trials, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "binomial")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
failures <- trials - Y
n.miss <- frame.results$n.miss
if(n.miss>0) stop("the response has missing 'NA' values.", call.=FALSE)
#### Determine the number of spatial and temporal units
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
## Check for errors
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
if(sum(Y>trials)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
#### Check on the rho arguments
if(is.null(rho.S))
{
rho <- runif(1)
fix.rho.S <- FALSE
}else
{
rho <- rho.S
fix.rho.S <- TRUE
}
if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.T))
{
lambda <- runif(1)
fix.rho.T <- FALSE
}else
{
lambda <- rho.T
fix.rho.T <- TRUE
}
if(!is.numeric(lambda)) stop("rho.T is fixed but is not numeric.", call.=FALSE)
if(lambda<0 ) stop("rho.T is outside the range [0, 1].", call.=FALSE)
if(lambda>1 ) stop("rho.T is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- binomial.CARsepspatialMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, lambda=lambda, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- binomial.CARsepspatialMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, lambda=lambda, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=binomial.CARsepspatialMCMC, Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, lambda=lambda, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.final <- rep(NA, 5)
names(accept.final) <- c("beta", "phi", "delta", "rho.S", "rho.T")
accept.final[1] <- 100 * results$accept[1] / results$accept[2]
accept.final[2] <- 100 * results$accept[3] / results$accept[4]
accept.final[3] <- 100 * results$accept[7] / results$accept[8]
if(!fix.rho.S) accept.final[4] <- 100 * results$accept[5] / results$accept[6]
if(!fix.rho.T) accept.final[5] <- 100 * results$accept[9] / results$accept[10]
## Compute the fitted deviance
mean.beta <- apply(results$samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=K, ncol=N)
mean.delta <- apply(results$samples.delta,2,mean)
delta.mat <- matrix(mean.delta, nrow=K, ncol=N, byrow=TRUE)
lp.mean <- as.numeric(offset.mat + mean.phi + regression.mat + delta.mat)
mean.prob <- exp(lp.mean) / (1 + exp(lp.mean))
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm = T)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
## Create the samples object
if(fix.rho.S & fix.rho.T)
{
samples.rhoext <- NA
}else if(fix.rho.S & !fix.rho.T)
{
samples.rhoext <- results$samples.lambda
names(samples.rhoext) <- "rho.T"
}else if(!fix.rho.S & fix.rho.T)
{
samples.rhoext <- results$samples.rho
names(samples.rhoext) <- "rho.S"
}else
{
samples.rhoext <- cbind(results$samples.rho, results$samples.lambda)
colnames(samples.rhoext) <- c("rho.S", "rho.T")
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), delta=mcmc(results$samples.delta), tau2=mcmc(results$samples.tau2), tau2.T=mcmc(results$samples.sig2), rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(3 + N, 7))
for (tt in 1:N)
{
summary.hyper[tt,1:3] <- c(mean(results$samples.tau2[, tt]), quantile(results$samples.tau2[, tt], c(0.025, 0.975)))
summary.hyper[tt, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(results$samples.tau2[, tt])), geweke.diag(mcmc(results$samples.tau2[, tt]))$z)
}
summary.hyper[N+1,1:3] <- c(mean(results$samples.sig2), quantile(results$samples.sig2, c(0.025, 0.975)))
summary.hyper[N+1, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(results$samples.sig2)), geweke.diag(mcmc(results$samples.sig2))$z)
if(!fix.rho.S)
{
summary.hyper[N+2, 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975)))
summary.hyper[N+2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z)
}else
{
summary.hyper[N+2, 1:3] <- c(rho, rho, rho)
summary.hyper[N+2, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
summary.hyper[N+3, 1:3] <- c(mean(results$samples.lambda), quantile(results$samples.lambda, c(0.025, 0.975)))
summary.hyper[N+3, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(results$samples.lambda), geweke.diag(results$samples.lambda)$z)
}else
{
summary.hyper[N+3, 1:3] <- c(lambda, lambda, lambda)
summary.hyper[N+3, 4:7] <- rep(NA, 4)
}
rownames(summary.hyper) <- c(paste("tau2.", c(1:N), sep = ""), "tau2.T", "rho.S","rho.T")
summary.results <- rbind(summary.beta, summary.hyper)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.final <- rep(NA, 5)
names(accept.final) <- c("beta", "phi", "delta", "rho.S", "rho.T")
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[1] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.final[2] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.final[3] <- 100 * sum(accept.temp2[ ,7]) / sum(accept.temp2[ ,8])
if(!fix.rho.S) accept.final[4] <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
if(!fix.rho.T) accept.final[5] <- 100 * sum(accept.temp2[ ,9]) / sum(accept.temp2[ ,10])
## Extract the samples into separate matrix and list objects
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.delta.list <- lapply(results, function(l) l[["samples.delta"]])
samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list)
if(!fix.rho.S)
{
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
}
if(!fix.rho.T)
{
samples.lambda.list <- lapply(results, function(l) l[["samples.lambda"]])
samples.lambda.matrix <- do.call(what=rbind, args=samples.lambda.list)
}
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.sig2.list <- lapply(results, function(l) l[["samples.sig2"]])
samples.sig2.matrix <- do.call(what=rbind, args=samples.sig2.list)
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the fitted deviance
mean.beta <- apply(samples.beta.matrix,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=K, ncol=N)
mean.delta <- apply(samples.delta.matrix,2,mean)
delta.mat <- matrix(mean.delta, nrow=K, ncol=N, byrow=TRUE)
lp.mean <- as.numeric(offset.mat + mean.phi + regression.mat + delta.mat)
mean.prob <- exp(lp.mean) / (1 + exp(lp.mean))
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm = T)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
delta.mcmc <- mcmc.list(lapply(samples.delta.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc))
sig2.mcmc <- mcmc.list(lapply(samples.sig2.list, mcmc))
if(fix.rho.S & fix.rho.T)
{
rhoext.mcmc <- NA
}else if(fix.rho.S & !fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.lambda.list[[j]]) <- c("rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(samples.lambda.list, mcmc))
}else if(!fix.rho.S & fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.rho.list[[j]]) <- c("rho.S")
}
rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc))
}else
{
rho.temp <- as.list(rep(NA, n.chains))
for(j in 1:n.chains)
{
rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.lambda.list[[j]])
colnames(rho.temp[[j]]) <- c("rho.S", "rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc))
}
samples <- list(beta=beta.mcmc, phi=phi.mcmc, delta=delta.mcmc, rho=rhoext.mcmc, tau2=tau2.mcmc, tau2.T=sig2.mcmc, fitted=fitted.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c(3 + N, 7))
for (tt in 1:N)
{
summary.hyper[tt,1:3] <- c(mean(samples.tau2.matrix[, tt]), quantile(samples.tau2.matrix[, tt], c(0.025, 0.975)))
summary.hyper[tt, 4:7] <- c(n.keep, 100, effectiveSize(tau2.mcmc[ ,tt]), gelman.diag(tau2.mcmc[ ,tt])$psrf[ ,2])
}
summary.hyper[N+1,1:3] <- c(mean(samples.sig2.matrix), quantile(samples.sig2.matrix, c(0.025, 0.975)))
summary.hyper[N+1, 4:7] <- c(n.keep, 100, effectiveSize(sig2.mcmc), gelman.diag(sig2.mcmc)$psrf[ ,2])
if(!fix.rho.S)
{
temp <- mcmc.list(lapply(samples.rho.list, mcmc))
summary.hyper[N+2, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[N+2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.hyper[N+2, 1:3] <- c(rho, rho, rho)
summary.hyper[N+2, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
temp <- mcmc.list(lapply(samples.lambda.list, mcmc))
summary.hyper[N+3, 1:3] <- c(mean(samples.lambda.matrix), quantile(samples.lambda.matrix, c(0.025, 0.975)))
summary.hyper[N+3, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.hyper[N+3, 1:3] <- c(lambda, lambda, lambda)
summary.hyper[N+3, 4:7] <- rep(NA, 4)
}
rownames(summary.hyper) <- c(paste("tau2.", c(1:N), sep = ""), "tau2.T", "rho.S","rho.T")
summary.results <- rbind(summary.beta, summary.hyper)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - binomial (logit link function)", "\nLatent structure model - An overall time trend with temporal specific spatial effects\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.CARsepspatial.R
|
binomial.CARsepspatialMCMC <- function(Y, failures, trials, offset, X.standardised, W, rho, lambda, fix.rho.S, fix.rho.T, K, N, N.all, p, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Generate the initial parameter values
dat <- cbind(Y, failures)
mod.glm <- glm(dat~X.standardised-1, offset=offset, family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
theta.hat <- Y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat / (1 - theta.hat)) - X.standardised %*% beta - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=N.all, mean=0, sd = res.sd)
phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE)
delta <- rnorm(n=N, mean=0, sd = res.sd)
tau2 <- apply(phi.mat, 2, var) / 10
sig2 <- var(delta)/10
#### Matrix versions
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
Y.mat <- matrix(Y, nrow=K, ncol=N, byrow=FALSE)
trials.mat <- matrix(trials, nrow=K, ncol=N, byrow=FALSE)
failures.mat <- matrix(failures, nrow=K, ncol=N, byrow=FALSE)
delta.mat <- matrix(delta, nrow=K, ncol=N, byrow=TRUE)
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, N.all))
samples.tau2 <- array(NA, c(n.keep, N))
samples.sig2 <- array(NA, c(n.keep, 1))
if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.T) samples.lambda <- array(NA, c(n.keep, 1))
samples.delta <- array(NA, c(n.keep, N))
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
#### Specify the Metropolis quantities
accept <- rep(0,10)
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.05
proposal.sd.beta <- 0.01
proposal.sd.delta <- 0.05
proposal.sd.lambda <- 0.02
tau2.shape <- prior.tau2[1] + K/2
sig2.shape <- prior.tau2[1] + N/2
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Spatial determinant
if(!fix.rho.S)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q.W <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### .T quantities
D <-array(0, c(N,N))
for(i in 1:N)
{
for(j in 1:N)
{
if(abs((i-j))==1) D[i,j] <- 1
}
}
D.triplet <- c(NA, NA, NA)
for(i in 1:N)
{
for(j in 1:N)
{
if(D[i,j]>0)
{
D.triplet <- rbind(D.triplet, c(i,j, D[i,j]))
}else{}
}
}
D.triplet <- D.triplet[-1, ]
D.n.triplet <- nrow(D.triplet)
D.triplet.sum <- tapply(D.triplet[ ,3], D.triplet[ ,1], sum)
D.neighbours <- tapply(D.triplet[ ,3], D.triplet[ ,1], length)
D.begfin <- array(NA, c(N, 2))
temp <- 1
for(i in 1:N)
{
D.begfin[i, ] <- c(temp, (temp + D.neighbours[i]-1))
temp <- temp + D.neighbours[i]
}
if(!fix.rho.T)
{
Dstar <- diag(apply(D,1,sum)) - D
Dstar.eigen <- eigen(Dstar)
Dstar.val <- Dstar.eigen$values
det.Q.D <- 0.5 * sum(log((lambda * Dstar.val + (1-lambda))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
n.island1 <- length(which(islands==1))
if(rho==1) tau2.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
if(lambda==1) sig2.shape <- prior.tau2[1] + 0.5 * (N-1)
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
for(j in 1:n.sample)
{
###################
## Sample from beta
###################
offset.temp <- as.numeric(offset.mat + phi.mat + delta.mat)
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, N.all, p, beta, offset.temp, Y, failures, trials, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, N.all, p, beta, offset.temp, Y, failures, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from phi
####################
phi.offset <- offset.mat + regression.mat + delta.mat
den.offset <- rho * W.triplet.sum + 1 - rho
temp1 <- binomialsrecarupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, phi.mat, rho, Y.mat, failures.mat, proposal.sd.phi, phi.offset, den.offset, tau2)
phi.temp <- temp1[[1]]
phi.mean <- apply(phi.temp,2,mean)
if(rho<1)
{
phi <- as.numeric(phi.temp) - kronecker(phi.mean, rep(1,K))
}else
{
phi.temp[which(islands==1), ] <- phi.temp[which(islands==1), ] - matrix(kronecker(phi.mean, rep(1,n.island1)), ncol=N, byrow=F)
phi <- as.numeric(phi.temp)
}
phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE)
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + N.all
#####################
## Samples from delta
#####################
delta.offset <- t(offset.mat + phi.mat + regression.mat)
temp2 <- binomialcarupdateRW(D.triplet, D.begfin, D.triplet.sum, N, delta, sig2, t(Y.mat), t(failures.mat), proposal.sd.delta, lambda, delta.offset, K, rep(1,K))
delta <- temp2[[1]]
delta <- delta - mean(delta)
delta.mat <- matrix(rep(delta, K), byrow=T, nrow=K)
accept[7] <- accept[7] + temp2[[2]]
accept[8] <- accept[8] + N
####################
## Samples from tau2
####################
tau2.temp <- tauquadformcompute2(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho)
tau2 <- tau2compute(tau2, tau2.temp, tau2.shape, prior.tau2[2], N)
####################
## Samples from sig2
####################
temp2.delta <- quadform(D.triplet, D.triplet.sum, D.n.triplet, N, delta, delta, lambda)
sig2.scale <- temp2.delta + prior.tau2[2]
sig2 <- 1 / rgamma(1, sig2.shape, scale=(1/sig2.scale))
##################
## Sample from rho
##################
if(!fix.rho.S)
{
temp3 <- rhoquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho, tau2)
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp4 <- rhoquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, proposal.rho, tau2)
det.Q.W.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- N * det.Q.W - temp3
logprob.proposal <- N * det.Q.W.proposal - temp4
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
det.Q.W <- det.Q.W.proposal
accept[5] <- accept[5] + 1
}else
{
}
accept[6] <- accept[6] + 1
}else
{}
#####################
## Sample from lambda
#####################
if(!fix.rho.T)
{
proposal.lambda <- rtruncnorm(n=1, a=0, b=1, mean=lambda, sd=proposal.sd.lambda)
temp3 <- quadform(D.triplet, D.triplet.sum, D.n.triplet, N, delta, delta, proposal.lambda)
det.Q.proposal <- 0.5 * sum(log((proposal.lambda * Dstar.val + (1-proposal.lambda))))
logprob.current <- det.Q.D - temp2.delta / sig2
logprob.proposal <- det.Q.proposal - temp3 / sig2
hastings <- log(dtruncnorm(x=lambda, a=0, b=1, mean=proposal.lambda, sd=proposal.sd.lambda)) - log(dtruncnorm(x=proposal.lambda, a=0, b=1, mean=lambda, sd=proposal.sd.lambda))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
lambda <- proposal.lambda
det.Q.D <- det.Q.proposal
accept[9] <- accept[9] + 1
}else
{
}
accept[10] <- accept[10] + 1
}else
{}
#########################
## Calculate the deviance
#########################
lp <- as.numeric(offset.mat + regression.mat + phi.mat + delta.mat)
prob <- exp(lp) / (1+exp(lp))
fitted <- trials * prob
loglike <- dbinom(x=Y, size=trials, prob=prob, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- as.numeric(phi)
if(!fix.rho.S) samples.rho[ele, ] <- rho
if(!fix.rho.T) samples.lambda[ele, ] <- lambda
samples.tau2[ele, ] <- tau2
samples.sig2[ele, ] <- sig2
samples.delta[ele, ] <- delta
samples.fitted[ele, ] <- fitted
samples.loglike[ele, ] <- loglike
}else
{
}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
proposal.sd.delta <- common.accceptrates1(accept[7:8], proposal.sd.delta, 40, 50)
if(!fix.rho.S) proposal.sd.rho <- common.accceptrates2(accept[5:6], proposal.sd.rho, 40, 50, 0.5)
if(!fix.rho.T) proposal.sd.lambda <- common.accceptrates2(accept[9:10], proposal.sd.lambda, 40, 50, 0.5)
accept <- rep(0,10)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(fix.rho.S) samples.rho <- NA
if(fix.rho.T) samples.lambda <- NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.delta=samples.delta, samples.lambda=samples.lambda, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.sig2=samples.sig2, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.CARsepspatialMCMC.R
|
binomial.MVCARar1 <- function(formula, data=NULL, trials, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.Sigma.df=NULL, prior.Sigma.scale=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Frame object
frame.results <- common.frame.MVST(formula, data, "binomial")
NK <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
N.all <- length(Y)
J <- ncol(Y)
which.miss <- frame.results$which.miss
n.miss <- N.all - sum(which.miss)
#### W matrix
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- NK / K
if(ceiling(N)!= floor(N)) stop("The number of data points in Y divided by the number of rows in W is not a whole number.", call.=FALSE)
#### Create a missing list
if(n.miss>0)
{
miss.locator <- array(NA, c(n.miss, 2))
colnames(miss.locator) <- c("row", "column")
locations <- which(which.miss==0)
miss.locator[ ,1] <- ceiling(locations/J)
miss.locator[ ,2] <- locations - (miss.locator[ ,1]-1) * J
}else
{
miss.locator <- NA
}
#### Check and format the trials argument
if(ncol(trials)!=J) stop("trials has the wrong number of columns.", call.=FALSE)
if(nrow(trials)!=NK) stop("trials has the wrong number of rows.", call.=FALSE)
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- N.all-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
failures <- trials - Y
if(sum(Y>trials, na.rm=TRUE)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
#### Check on the rho arguments
if(is.null(rho.S))
{
rho <- runif(1)
fix.rho.S <- FALSE
}else
{
rho <- rho.S
fix.rho.S <- TRUE
}
if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.T))
{
alpha <- runif(1)
fix.rho.T <- FALSE
}else
{
alpha <- rho.T
fix.rho.T <- TRUE
}
if(!is.numeric(alpha)) stop("rho.T is fixed but is not numeric.", call.=FALSE)
if(length(alpha)!=1) stop("rho.T is fixed but is not of length 1.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.Sigma.df)) prior.Sigma.df <- 2
if(is.null(prior.Sigma.scale)) prior.Sigma.scale <- rep(100000, J)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
if(!is.numeric(prior.Sigma.scale)) stop("prior.Sigma.scale has non-numeric values.", call.=FALSE)
if(sum(is.na(prior.Sigma.scale))!=0) stop("prior.Sigma.scale has missing values.", call.=FALSE)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- binomial.MVCARar1MCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- binomial.MVCARar1MCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=binomial.MVCARar1MCMC, Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.final <- rep(NA, 5)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T", "Sigma")
accept.final[1] <- 100 * sum(results$accept.beta[1:J]) / sum(results$accept.beta[(J+1):(2*J)])
accept.final[2] <- 100 * results$accept[1] / results$accept[2]
if(!fix.rho.S) accept.final[3] <- 100 * results$accept[3] / results$accept[4]
if(!fix.rho.T) accept.final[4] <- 100
accept.final[5] <- 100
## Compute the fitted deviance
mean.beta <- matrix(apply(results$samples.beta, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=NK, ncol=J, byrow=T)
mean.logit <- X.standardised %*% mean.beta + mean.phi + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=as.numeric(t(Y)), size=as.numeric(t(trials)), prob=as.numeric(t(mean.prob)), log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- matrix(apply(results$samples.fitted, 2, mean), nrow=NK, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale
samples.beta.orig <- results$samples.beta
for(r in 1:J)
{
samples.beta.orig[ ,((r-1)*p+1):(r*p)] <- common.betatransform(results$samples.beta[ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE)
}
## Create the samples object
if(fix.rho.S & fix.rho.T)
{
samples.rhoext <- NA
}else if(fix.rho.S & !fix.rho.T)
{
samples.rhoext <- results$samples.alpha
colnames(samples.rhoext) <- c("rho.T")
}else if(!fix.rho.S & fix.rho.T)
{
samples.rhoext <- results$samples.rho
names(samples.rhoext) <- "rho.S"
}else
{
samples.rhoext <- cbind(results$samples.rho, results$samples.alpha)
colnames(samples.rhoext) <- c("rho.S", "rho.T")
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), Sigma=results$samples.Sigma, rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.orig, 2, mean), apply(samples.beta.orig, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
col.name <- rep(NA, p*(J-1))
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c((J+2) ,7))
for(r in 1:J)
{
summary.hyper[r, 1] <- mean(results$samples.Sigma[ ,r,r])
summary.hyper[r, 2:3] <- quantile(results$samples.Sigma[ ,r,r], c(0.025, 0.975))
summary.hyper[r, 4] <- n.keep
summary.hyper[r, 5] <- 100
summary.hyper[r, 6] <- effectiveSize(results$samples.Sigma[ ,r,r])
summary.hyper[r, 7] <- geweke.diag(results$samples.Sigma[ ,r,r])$z
}
if(!fix.rho.S)
{
summary.hyper[(J+1), 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975)))
summary.hyper[(J+1), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.S"])
summary.hyper[(J+1), 6:7] <- c(effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z)
}else
{
summary.hyper[(J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(J+1), 4:5] <- rep(NA, 2)
summary.hyper[(J+1), 6:7] <- rep(NA, 2)
}
if(!fix.rho.T)
{
summary.hyper[(J+2), 1:3] <- c(mean(results$samples.alpha), quantile(results$samples.alpha, c(0.025, 0.975)))
summary.hyper[(J+2), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"])
summary.hyper[(J+2), 6:7] <- c(effectiveSize(results$samples.alpha), geweke.diag(results$samples.alpha)$z)
}else
{
summary.hyper[(J+2), 1:3] <- c(alpha, alpha, alpha)
summary.hyper[(J+2), 4:5] <- rep(NA, 2)
summary.hyper[(J+2), 6:7] <- rep(NA, 2)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho.S", "rho.T")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.final <- rep(NA, 5)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T", "Sigma")
accept.final[5] <- 100
accept.temp <- lapply(results, function(l) l[["accept.beta"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[1] <- 100 * sum(accept.temp2[ ,1:J]) / sum(accept.temp2[ ,(J+1):(2*J)])
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[2] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
if(!fix.rho.S) accept.final[3] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
if(!fix.rho.T) accept.final[4] <- 100
## Extract the samples into separate matrix and list objects
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
if(!fix.rho.S)
{
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
}
if(!fix.rho.T)
{
samples.alpha.list <- lapply(results, function(l) l[["samples.alpha"]])
samples.alpha.matrix <- do.call(what=rbind, args=samples.alpha.list)
}
samples.Sigma.list <- lapply(results, function(l) l[["samples.Sigma"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Compute the fitted deviance
mean.beta <- matrix(apply(samples.beta.matrix, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=NK, ncol=J, byrow=T)
mean.logit <- X.standardised %*% mean.beta + mean.phi + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=as.numeric(t(Y)), size=as.numeric(t(trials)), prob=as.numeric(t(mean.prob)), log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- matrix(apply(samples.fitted.matrix, 2, mean), nrow=NK, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
for(r in 1:J)
{
samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)] <- common.betatransform(samples.beta.list[[j]][ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE)
}
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
if(n.miss>0)
{
Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc))
}else
{
Y.mcmc <- NA
}
if(fix.rho.S & fix.rho.T)
{
rhoext.mcmc <- NA
}else if(fix.rho.S & !fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.alpha.list[[j]]) <- c("rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(samples.alpha.list, mcmc))
}else if(!fix.rho.S & fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.rho.list[[j]]) <- c("rho.S")
}
rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc))
}else
{
rho.temp <- as.list(rep(NA, n.chains))
for(j in 1:n.chains)
{
rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.alpha.list[[j]])
colnames(rho.temp[[j]]) <- c("rho.S", "rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc))
}
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rhoext.mcmc, Sigma=samples.Sigma.list, fitted=fitted.mcmc, Y=Y.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
col.name <- rep(NA, p*(J-1))
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c((J+2) ,7))
for(r in 1:J)
{
temp <- NA
temp2 <- as.list(rep(NA, n.chains))
for(v in 1:n.chains)
{
temp <- c(temp, samples.Sigma.list[[v]][ ,r,r])
temp2[[v]] <- mcmc(samples.Sigma.list[[v]][ ,r,r])
}
temp <- temp[-1]
summary.hyper[r, 1] <- mean(temp)
summary.hyper[r, 2:3] <- quantile(temp, c(0.025, 0.975))
summary.hyper[r, 4] <- n.keep
summary.hyper[r, 5] <- 100
summary.hyper[r, 6] <- effectiveSize(mcmc.list(temp2))
summary.hyper[r, 7] <- gelman.diag(mcmc.list(temp2))$psrf[ ,2]
}
if(!fix.rho.S)
{
temp <- mcmc.list(lapply(samples.rho.list, mcmc))
summary.hyper[(J+1), 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[(J+1), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.S"])
summary.hyper[(J+1), 6:7] <- c(effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.hyper[(J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(J+1), 4:5] <- rep(NA, 2)
summary.hyper[(J+1), 6:7] <- rep(NA, 2)
}
if(!fix.rho.T)
{
temp <- mcmc.list(lapply(samples.alpha.list, mcmc))
summary.hyper[(J+2), 1:3] <- c(mean(samples.alpha.matrix), quantile(samples.alpha.matrix, c(0.025, 0.975)))
summary.hyper[(J+2), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"])
summary.hyper[(J+2), 6:7] <- c(effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.hyper[(J+2), 1:3] <- c(alpha, alpha, alpha)
summary.hyper[(J+2), 4:5] <- rep(NA, 2)
summary.hyper[(J+2), 6:7] <- rep(NA, 2)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho.S", "rho.T")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Binomial (logit link function)", "\nRandom effects model - Multivariate Autoregressive order 1 CAR model\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.MVCARar1.R
|
binomial.MVCARar1MCMC <- function(Y, failures, trials, offset, X.standardised, W, rho, alpha, fix.rho.S, fix.rho.T, K, N, NK, J, N.all, p, miss.locator, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.Sigma.df, prior.Sigma.scale, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#library(MCMCpack)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Generate the initial parameter values
beta <- array(NA, c(p, J))
for(i in 1:J)
{
mod.glm <- glm(cbind(Y[ ,i], failures[ ,i])~X.standardised-1, offset=offset[ ,i], family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta[ ,i] <- rnorm(n=p, mean=beta.mean, sd=beta.sd)
}
theta.hat <- Y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat / (1 - theta.hat)) - X.standardised %*% beta - offset
phi <- res.temp
phi[is.na(phi)] <- rnorm(n=sum(is.na(phi)), mean=0, sd=sd(res.temp, na.rm=T))
Sigma <- cov(phi)
Sigma.inv <- solve(Sigma)
Sigma.a <- rep(1, J)
regression <- X.standardised %*% beta
lp <- regression + phi + offset
prob <- exp(lp) / (1 + exp(lp))
Y.DA <- Y
failures.DA <- failures
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, J*p))
samples.phi <- array(NA, c(n.keep, N.all))
samples.Sigma <- array(NA, c(n.keep, J, J))
samples.Sigma.a <- array(NA, c(n.keep, J))
if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.T) samples.alpha <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, N.all))
samples.fitted <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,4)
accept.beta <- rep(0,2*J)
proposal.sd.beta <- rep(0.01, J)
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.02
Sigma.post.df <- prior.Sigma.df + J - 1 + K * N
Sigma.a.post.shape <- (prior.Sigma.df + J) / 2
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
Wstar <- diag(apply(W,1,sum)) - W
Q <- rho * Wstar + diag(rep(1-rho,K))
#### Create the determinant
if(!fix.rho.S)
{
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1 & alpha==1)
{
Sigma.post.df <- prior.Sigma.df + ((N-1) * (K-n.islands)) + J - 1
}else if(rho==1)
{
Sigma.post.df <- prior.Sigma.df + (N * (K-n.islands)) + J - 1
}else if(alpha==1)
{
Sigma.post.df <- prior.Sigma.df + ((N-1) * K) + J - 1
}else
{}
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[miss.locator] <- rbinom(n=n.miss, size=trials[miss.locator], prob=prob[miss.locator])
failures.DA <- trials - Y.DA
}else
{}
###################
## Sample from beta
###################
offset.temp <- phi + offset
for(r in 1:J)
{
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, NK, p, beta[ ,r], offset.temp[ ,r], Y.DA[ ,r], failures.DA[ ,r], trials[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, NK, p, beta[ ,r], offset.temp[ ,r], Y.DA[ ,r], failures.DA[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block)
}
beta[ ,r] <- temp[[1]]
accept.beta[r] <- accept.beta[r] + temp[[2]]
accept.beta[(r+J)] <- accept.beta[(r+J)] + n.beta.block
}
regression <- X.standardised %*% beta
##################
## Sample from phi
##################
#### Create the offset elements
den.offset <- rho * W.triplet.sum + 1 - rho
phi.offset <- regression + offset
#### Create the random draws to create the proposal distribution
Chol.Sigma <- t(chol(proposal.sd.phi*Sigma))
z.mat <- matrix(rnorm(n=N.all, mean=0, sd=1), nrow=J, ncol=NK)
innovations <- t(Chol.Sigma %*% z.mat)
#### Update the elements of phi
temp1 <- binomialmvar1carupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, J, phi, alpha, rho, Sigma.inv, Y.DA, failures.DA, innovations, phi.offset, den.offset)
phi <- temp1[[1]]
for(r in 1:J)
{
phi[ ,r] <- phi[ ,r] - mean(phi[ ,r])
}
accept[1] <- accept[1] + temp1[[2]]
accept[2] <- accept[2] + NK
####################
## Sample from Sigma
####################
Sigma.post.scale <- 2 * prior.Sigma.df * diag(1 / Sigma.a) + t(phi[1:K, ]) %*% Q %*% phi[1:K, ]
for(t in 2:N)
{
phit <- phi[((t-1)*K+1):(t*K), ]
phitminus1 <- phi[((t-2)*K+1):((t-1)*K), ]
temp1 <- phit - alpha * phitminus1
Sigma.post.scale <- Sigma.post.scale + t(temp1) %*% Q %*% temp1
}
Sigma <- riwish(Sigma.post.df, Sigma.post.scale)
Sigma.inv <- solve(Sigma)
######################
## Sample from Sigma.a
######################
Sigma.a.posterior.scale <- prior.Sigma.df * diag(Sigma.inv) + 1 / prior.Sigma.scale^2
Sigma.a <- 1 / rgamma(J, Sigma.a.post.shape, scale=(1/Sigma.a.posterior.scale))
######################
#### Sample from alpha
######################
if(!fix.rho.T)
{
temp <- MVSTrhoTAR1compute(W.triplet, W.triplet.sum, n.triplet, den.offset, K, N, J, phi, rho, Sigma.inv)
num <- temp[[1]]
denom <- temp[[2]]
alpha <- rnorm(n=1, mean = (num / denom), sd=sqrt(1 / denom))
}else
{}
#alpha <- 0.8
##################
## Sample from rho
##################
if(!fix.rho.S)
{
## Propose a new value
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
proposal.Q <- proposal.rho * Wstar + diag(rep(1-proposal.rho), K)
proposal.det.Q <- sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
proposal.den.offset <- proposal.rho * W.triplet.sum + 1 - proposal.rho
## Compute the quadratic forms based on current and proposed values of rho
temp1.QF <- MVSTrhoSAR1compute(W.triplet, W.triplet.sum, n.triplet, den.offset, K, N, J, phi, rho, alpha, Sigma.inv)
temp2.QF <- MVSTrhoSAR1compute(W.triplet, W.triplet.sum, n.triplet, proposal.den.offset, K, N, J, phi, proposal.rho, alpha, Sigma.inv)
## Compute the acceptance rate
logprob.current <- 0.5 * J * N * det.Q - 0.5 * temp1.QF
logprob.proposal <- 0.5 * J * N * proposal.det.Q - 0.5 * temp2.QF
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
#rho <- 0.8
det.Q <- proposal.det.Q
Q <- proposal.Q
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
}else
{}
#########################
## Calculate the deviance
#########################
lp <- regression + phi + offset
prob <- exp(lp) / (1 + exp(lp))
fitted <- trials * prob
loglike <- dbinom(x=as.numeric(t(Y)), size=as.numeric(t(trials)), prob=as.numeric(t(prob)), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- as.numeric(beta)
samples.phi[ele, ] <- as.numeric(t(phi))
samples.Sigma[ele, , ] <- Sigma
samples.Sigma.a[ele, ] <- Sigma.a
if(!fix.rho.S) samples.rho[ele, ] <- rho
if(!fix.rho.T) samples.alpha[ele, ] <- alpha
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- as.numeric(t(fitted))
if(n.miss>0) samples.Y[ele, ] <- Y.DA[miss.locator]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
for(r in 1:J)
{
if(p>2)
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 40, 50)
}else
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 30, 40)
}
}
proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50)
if(!fix.rho.S)
{
proposal.sd.rho <- common.accceptrates2(accept[3:4], proposal.sd.rho, 40, 50, 0.5)
}
accept <- c(0,0,0,0)
accept.beta <- rep(0,2*J)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y <- NA
if(fix.rho.S) samples.rho <- NA
if(fix.rho.T) samples.alpha <- NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.Sigma=samples.Sigma, samples.Sigma.a=samples.Sigma.a, samples.rho=samples.rho, samples.alpha=samples.alpha, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept, accept.beta=accept.beta)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.MVCARar1MCMC.R
|
binomial.MVCARar2 <- function(formula, data=NULL, trials, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.Sigma.df=NULL, prior.Sigma.scale=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Frame object
frame.results <- common.frame.MVST(formula, data, "binomial")
NK <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
N.all <- length(Y)
J <- ncol(Y)
which.miss <- frame.results$which.miss
n.miss <- N.all - sum(which.miss)
#### W matrix
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- NK / K
if(ceiling(N)!= floor(N)) stop("The number of data points in Y divided by the number of rows in W is not a whole number.", call.=FALSE)
#### Create a missing list
if(n.miss>0)
{
miss.locator <- array(NA, c(n.miss, 2))
colnames(miss.locator) <- c("row", "column")
locations <- which(which.miss==0)
miss.locator[ ,1] <- ceiling(locations/J)
miss.locator[ ,2] <- locations - (miss.locator[ ,1]-1) * J
}else
{
miss.locator <- NA
}
#### Check and format the trials argument
if(ncol(trials)!=J) stop("trials has the wrong number of columns.", call.=FALSE)
if(nrow(trials)!=NK) stop("trials has the wrong number of rows.", call.=FALSE)
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- N.all-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
failures <- trials - Y
if(sum(Y>trials, na.rm=TRUE)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
#### Check on the rho arguments
if(is.null(rho.S))
{
rho <- runif(1)
fix.rho.S <- FALSE
}else
{
rho <- rho.S
fix.rho.S <- TRUE
}
if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.T))
{
alpha <- c(runif(1), runif(1))
fix.rho.T <- FALSE
}else
{
alpha <- rho.T
fix.rho.T <- TRUE
}
if(!is.numeric(alpha)) stop("rho.T is fixed but is not numeric.", call.=FALSE)
if(length(alpha)!=2) stop("rho.T is fixed but is not of length 2.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.Sigma.df)) prior.Sigma.df <- 2
if(is.null(prior.Sigma.scale)) prior.Sigma.scale <- rep(100000, J)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
if(!is.numeric(prior.Sigma.scale)) stop("prior.Sigma.scale has non-numeric values.", call.=FALSE)
if(sum(is.na(prior.Sigma.scale))!=0) stop("prior.Sigma.scale has missing values.", call.=FALSE)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- binomial.MVCARar2MCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- binomial.MVCARar2MCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=binomial.MVCARar2MCMC, Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.final <- rep(NA, 5)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T", "Sigma")
accept.final[1] <- 100 * sum(results$accept.beta[1:J]) / sum(results$accept.beta[(J+1):(2*J)])
accept.final[2] <- 100 * results$accept[1] / results$accept[2]
if(!fix.rho.S) accept.final[3] <- 100 * results$accept[3] / results$accept[4]
if(!fix.rho.T) accept.final[4] <- 100
accept.final[5] <- 100
## Compute the fitted deviance
mean.beta <- matrix(apply(results$samples.beta, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=NK, ncol=J, byrow=T)
mean.logit <- X.standardised %*% mean.beta + mean.phi + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=as.numeric(t(Y)), size=as.numeric(t(trials)), prob=as.numeric(t(mean.prob)), log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- matrix(apply(results$samples.fitted, 2, mean), nrow=NK, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale
samples.beta.orig <- results$samples.beta
for(r in 1:J)
{
samples.beta.orig[ ,((r-1)*p+1):(r*p)] <- common.betatransform(results$samples.beta[ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE)
}
## Create the samples object
if(fix.rho.S & fix.rho.T)
{
samples.rhoext <- NA
}else if(fix.rho.S & !fix.rho.T)
{
samples.rhoext <- results$samples.alpha
colnames(samples.rhoext) <- c("rho1.T", "rho2.T")
}else if(!fix.rho.S & fix.rho.T)
{
samples.rhoext <- results$samples.rho
names(samples.rhoext) <- "rho.S"
}else
{
samples.rhoext <- cbind(results$samples.rho, results$samples.alpha)
colnames(samples.rhoext) <- c("rho.S", "rho1.T", "rho2.T")
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), Sigma=results$samples.Sigma, rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.orig, 2, mean), apply(samples.beta.orig, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
col.name <- rep(NA, p*(J-1))
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c((J+3) ,7))
for(r in 1:J)
{
summary.hyper[r, 1] <- mean(results$samples.Sigma[ ,r,r])
summary.hyper[r, 2:3] <- quantile(results$samples.Sigma[ ,r,r], c(0.025, 0.975))
summary.hyper[r, 4] <- n.keep
summary.hyper[r, 5] <- 100
summary.hyper[r, 6] <- effectiveSize(results$samples.Sigma[ ,r,r])
summary.hyper[r, 7] <- geweke.diag(results$samples.Sigma[ ,r,r])$z
}
if(!fix.rho.S)
{
summary.hyper[(J+1), 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975)))
summary.hyper[(J+1), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.S"])
summary.hyper[(J+1), 6:7] <- c(effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z)
}else
{
summary.hyper[(J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(J+1), 4:5] <- rep(NA, 2)
summary.hyper[(J+1), 6:7] <- rep(NA, 2)
}
if(!fix.rho.T)
{
summary.hyper[(J+2), 1:3] <- c(mean(results$samples.alpha[ ,1]), quantile(results$samples.alpha[ ,1], c(0.025, 0.975)))
summary.hyper[(J+2), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"])
summary.hyper[(J+2), 6:7] <- c(effectiveSize(results$samples.alpha[ ,1]), geweke.diag(results$samples.alpha[ ,1])$z)
summary.hyper[(J+3), 1:3] <- c(mean(results$samples.alpha[ ,2]), quantile(results$samples.alpha[ ,2], c(0.025, 0.975)))
summary.hyper[(J+3), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"])
summary.hyper[(J+3), 6:7] <- c(effectiveSize(results$samples.alpha[ ,2]), geweke.diag(results$samples.alpha[ ,2])$z)
}else
{
summary.hyper[(J+2), 1:3] <- c(alpha[1], alpha[1], alpha[1])
summary.hyper[(J+2), 4:5] <- rep(NA, 2)
summary.hyper[(J+2), 6:7] <- rep(NA, 2)
summary.hyper[(J+3), 1:3] <- c(alpha[2], alpha[2], alpha[2])
summary.hyper[(J+3), 4:5] <- rep(NA, 2)
summary.hyper[(J+3), 6:7] <- rep(NA, 2)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho.S", "rho1.T", "rho2.T")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.final <- rep(NA, 5)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T", "Sigma")
accept.final[5] <- 100
accept.temp <- lapply(results, function(l) l[["accept.beta"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[1] <- 100 * sum(accept.temp2[ ,1:J]) / sum(accept.temp2[ ,(J+1):(2*J)])
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[2] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
if(!fix.rho.S) accept.final[3] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
if(!fix.rho.T) accept.final[4] <- 100
## Extract the samples into separate matrix and list objects
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
if(!fix.rho.S)
{
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
}
if(!fix.rho.T)
{
samples.alpha.list <- lapply(results, function(l) l[["samples.alpha"]])
samples.alpha.matrix <- do.call(what=rbind, args=samples.alpha.list)
}
samples.Sigma.list <- lapply(results, function(l) l[["samples.Sigma"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Compute the fitted deviance
mean.beta <- matrix(apply(samples.beta.matrix, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=NK, ncol=J, byrow=T)
mean.logit <- X.standardised %*% mean.beta + mean.phi + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=as.numeric(t(Y)), size=as.numeric(t(trials)), prob=as.numeric(t(mean.prob)), log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- matrix(apply(samples.fitted.matrix, 2, mean), nrow=NK, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
for(r in 1:J)
{
samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)] <- common.betatransform(samples.beta.list[[j]][ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE)
}
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
if(n.miss>0)
{
Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc))
}else
{
Y.mcmc <- NA
}
if(fix.rho.S & fix.rho.T)
{
rhoext.mcmc <- NA
}else if(fix.rho.S & !fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.alpha.list[[j]]) <- c("rho1.T", "rho2.T")
}
rhoext.mcmc <- mcmc.list(lapply(samples.alpha.list, mcmc))
}else if(!fix.rho.S & fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.rho.list[[j]]) <- c("rho.S")
}
rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc))
}else
{
rho.temp <- as.list(rep(NA, n.chains))
for(j in 1:n.chains)
{
rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.alpha.list[[j]])
colnames(rho.temp[[j]]) <- c("rho.S", "rho1.T", "rho2.T")
}
rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc))
}
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rhoext.mcmc, Sigma=samples.Sigma.list, fitted=fitted.mcmc, Y=Y.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
col.name <- rep(NA, p*(J-1))
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c((J+3) ,7))
for(r in 1:J)
{
temp <- NA
temp2 <- as.list(rep(NA, n.chains))
for(v in 1:n.chains)
{
temp <- c(temp, samples.Sigma.list[[v]][ ,r,r])
temp2[[v]] <- mcmc(samples.Sigma.list[[v]][ ,r,r])
}
temp <- temp[-1]
summary.hyper[r, 1] <- mean(temp)
summary.hyper[r, 2:3] <- quantile(temp, c(0.025, 0.975))
summary.hyper[r, 4] <- n.keep
summary.hyper[r, 5] <- 100
summary.hyper[r, 6] <- effectiveSize(mcmc.list(temp2))
summary.hyper[r, 7] <- gelman.diag(mcmc.list(temp2))$psrf[ ,2]
}
if(!fix.rho.S)
{
temp <- mcmc.list(lapply(samples.rho.list, mcmc))
summary.hyper[(J+1), 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[(J+1), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.S"])
summary.hyper[(J+1), 6:7] <- c(effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.hyper[(J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(J+1), 4:5] <- rep(NA, 2)
summary.hyper[(J+1), 6:7] <- rep(NA, 2)
}
if(!fix.rho.T)
{
temp <- mcmc.list(lapply(samples.alpha.list, mcmc))
summary.hyper[(J+2), 1:3] <- c(mean(samples.alpha.matrix[ ,1]), quantile(samples.alpha.matrix[ ,1], c(0.025, 0.975)))
summary.hyper[(J+2), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"])
summary.hyper[(J+2), 6:7] <- c(effectiveSize(temp)[1], gelman.diag(temp)$psrf[ ,2][1])
summary.hyper[(J+3), 1:3] <- c(mean(samples.alpha.matrix[ ,2]), quantile(samples.alpha.matrix[ ,2], c(0.025, 0.975)))
summary.hyper[(J+3), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"])
summary.hyper[(J+3), 6:7] <- c(effectiveSize(temp)[2], gelman.diag(temp)$psrf[ ,2][2])
}else
{
summary.hyper[(J+2), 1:3] <- c(alpha[1], alpha[1], alpha[1])
summary.hyper[(J+2), 4:5] <- rep(NA, 2)
summary.hyper[(J+2), 6:7] <- rep(NA, 2)
summary.hyper[(J+3), 1:3] <- c(alpha[2], alpha[2], alpha[2])
summary.hyper[(J+3), 4:5] <- rep(NA, 2)
summary.hyper[(J+3), 6:7] <- rep(NA, 2)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho.S", "rho1.T", "rho2.T")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Binomial (logit link function)", "\nRandom effects model - Multivariate Autoregressive order 2 CAR model\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.MVCARar2.R
|
binomial.MVCARar2MCMC <- function(Y, failures, trials, offset, X.standardised, W, rho, alpha, fix.rho.S, fix.rho.T, K, N, NK, J, N.all, p, miss.locator, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.Sigma.df, prior.Sigma.scale, verbose, chain)
{
#cpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#library(MCMCpack)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Generate the initial parameter values
beta <- array(NA, c(p, J))
for(i in 1:J)
{
mod.glm <- glm(cbind(Y[ ,i], failures[ ,i])~X.standardised-1, offset=offset[ ,i], family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta[ ,i] <- rnorm(n=p, mean=beta.mean, sd=beta.sd)
}
theta.hat <- Y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat / (1 - theta.hat)) - X.standardised %*% beta - offset
phi <- res.temp
phi[is.na(phi)] <- rnorm(n=sum(is.na(phi)), mean=0, sd=sd(res.temp, na.rm=T))
Sigma <- cov(phi)
Sigma.inv <- solve(Sigma)
Sigma.a <- rep(1, J)
regression <- X.standardised %*% beta
lp <- regression + phi + offset
prob <- exp(lp) / (1 + exp(lp))
Y.DA <- Y
failures.DA <- failures
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, J*p))
samples.phi <- array(NA, c(n.keep, N.all))
samples.Sigma <- array(NA, c(n.keep, J, J))
samples.Sigma.a <- array(NA, c(n.keep, J))
if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.T) samples.alpha <- array(NA, c(n.keep, 2))
samples.loglike <- array(NA, c(n.keep, N.all))
samples.fitted <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,4)
accept.beta <- rep(0,2*J)
proposal.sd.beta <- rep(0.01, J)
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.02
Sigma.post.df <- prior.Sigma.df + J - 1 + K * N
Sigma.a.post.shape <- (prior.Sigma.df + J) / 2
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
Wstar <- diag(apply(W,1,sum)) - W
Q <- rho * Wstar + diag(rep(1-rho,K))
#### Create the determinant
if(!fix.rho.S)
{
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1 & alpha[1]==2 & alpha[2]==-1)
{
Sigma.post.df <- prior.Sigma.df + ((N-2) * (K-n.islands)) + J - 1
}else if(rho==1)
{
Sigma.post.df <- prior.Sigma.df + (N * (K-n.islands)) + J - 1
}else if(alpha[1]==2 & alpha[2]==-1)
{
Sigma.post.df <- prior.Sigma.df + ((N-2) * K) + J - 1
}else
{}
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[miss.locator] <- rbinom(n=n.miss, size=trials[miss.locator], prob=prob[miss.locator])
failures.DA <- trials - Y.DA
}else
{}
###################
## Sample from beta
###################
offset.temp <- phi + offset
for(r in 1:J)
{
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, NK, p, beta[ ,r], offset.temp[ ,r], Y.DA[ ,r], failures.DA[ ,r], trials[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, NK, p, beta[ ,r], offset.temp[ ,r], Y.DA[ ,r], failures.DA[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block)
}
beta[ ,r] <- temp[[1]]
accept.beta[r] <- accept.beta[r] + temp[[2]]
accept.beta[(r+J)] <- accept.beta[(r+J)] + n.beta.block
}
regression <- X.standardised %*% beta
##################
## Sample from phi
##################
#### Create the offset elements
den.offset <- rho * W.triplet.sum + 1 - rho
phi.offset <- regression + offset
#### Create the random draws to create the proposal distribution
Chol.Sigma <- t(chol(proposal.sd.phi*Sigma))
z.mat <- matrix(rnorm(n=N.all, mean=0, sd=1), nrow=J, ncol=NK)
innovations <- t(Chol.Sigma %*% z.mat)
#### Update the elements of phi
temp1 <- binomialmvar2carupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, J, phi, alpha[1], alpha[2], rho, Sigma.inv, Y.DA, failures.DA, innovations, phi.offset, den.offset)
phi <- temp1[[1]]
for(r in 1:J)
{
phi[ ,r] <- phi[ ,r] - mean(phi[ ,r])
}
accept[1] <- accept[1] + temp1[[2]]
accept[2] <- accept[2] + NK
####################
## Sample from Sigma
####################
Sigma.post.scale <- 2 * prior.Sigma.df * diag(1 / Sigma.a) + t(phi[1:K, ]) %*% Q %*% phi[1:K, ] + t(phi[(K+1):(2*K), ]) %*% Q %*% phi[(K+1):(2*K), ]
for(t in 3:N)
{
phit <- phi[((t-1)*K+1):(t*K), ]
phitminus1 <- phi[((t-2)*K+1):((t-1)*K), ]
phitminus2 <- phi[((t-3)*K+1):((t-2)*K), ]
temp1 <- phit - alpha[1] * phitminus1 - alpha[2] * phitminus2
Sigma.post.scale <- Sigma.post.scale + t(temp1) %*% Q %*% temp1
}
Sigma <- riwish(Sigma.post.df, Sigma.post.scale)
Sigma.inv <- solve(Sigma)
######################
## Sample from Sigma.a
######################
Sigma.a.posterior.scale <- prior.Sigma.df * diag(Sigma.inv) + 1 / prior.Sigma.scale^2
Sigma.a <- 1 / rgamma(J, Sigma.a.post.shape, scale=(1/Sigma.a.posterior.scale))
######################
#### Sample from alpha
######################
if(!fix.rho.T)
{
temp <- MVSTrhoTAR2compute(W.triplet, W.triplet.sum, n.triplet, den.offset, K, N, J, phi, rho, Sigma.inv)
alpha.precision <- matrix(c(temp[[1]], temp[[2]], temp[[2]], temp[[3]]), nrow=2, ncol=2)
alpha.var <- solve(alpha.precision)
alpha.mean <- rep(NA, 2)
alpha.mean[2] <- (temp[[1]] * temp[[5]] - temp[[2]] * temp[[4]]) / (temp[[1]] * temp[[3]] - temp[[2]]^2)
alpha.mean[1] <- (temp[[5]] - temp[[3]] * alpha.mean[2]) / temp[[2]]
alpha <- mvrnorm(n=1, mu=alpha.mean, Sigma=alpha.var)
}else
{}
##################
## Sample from rho
##################
if(!fix.rho.S)
{
## Propose a new value
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
proposal.Q <- proposal.rho * Wstar + diag(rep(1-proposal.rho), K)
proposal.det.Q <- sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
proposal.den.offset <- proposal.rho * W.triplet.sum + 1 - proposal.rho
## Compute the quadratic forms based on current and proposed values of rho
temp1.QF <- MVSTrhoSAR2compute(W.triplet, W.triplet.sum, n.triplet, den.offset, K, N, J, phi, rho, alpha[1], alpha[2], Sigma.inv)
temp2.QF <- MVSTrhoSAR2compute(W.triplet, W.triplet.sum, n.triplet, proposal.den.offset, K, N, J, phi, proposal.rho, alpha[1], alpha[2], Sigma.inv)
## Compute the acceptance rate
logprob.current <- 0.5 * J * N * det.Q - 0.5 * temp1.QF
logprob.proposal <- 0.5 * J * N * proposal.det.Q - 0.5 * temp2.QF
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- proposal.det.Q
Q <- proposal.Q
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
}else
{}
#########################
## Calculate the deviance
#########################
lp <- regression + phi + offset
prob <- exp(lp) / (1 + exp(lp))
fitted <- trials * prob
loglike <- dbinom(x=as.numeric(t(Y)), size=as.numeric(t(trials)), prob=as.numeric(t(prob)), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- as.numeric(beta)
samples.phi[ele, ] <- as.numeric(t(phi))
samples.Sigma[ele, , ] <- Sigma
samples.Sigma.a[ele, ] <- Sigma.a
if(!fix.rho.S) samples.rho[ele, ] <- rho
if(!fix.rho.T) samples.alpha[ele, ] <- alpha
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- as.numeric(t(fitted))
if(n.miss>0) samples.Y[ele, ] <- Y.DA[miss.locator]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
for(r in 1:J)
{
if(p>2)
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 40, 50)
}else
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 30, 40)
}
}
proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50)
if(!fix.rho.S)
{
proposal.sd.rho <- common.accceptrates2(accept[3:4], proposal.sd.rho, 40, 50, 0.5)
}
accept <- c(0,0,0,0)
accept.beta <- rep(0,2*J)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y <- NA
if(fix.rho.S) samples.rho <- NA
if(fix.rho.T) samples.alpha <- NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.Sigma=samples.Sigma, samples.Sigma.a=samples.Sigma.a, samples.rho=samples.rho, samples.alpha=samples.alpha, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept, accept.beta=accept.beta)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/binomial.MVCARar2MCMC.R
|
coef.CARBayesST <- function(object,...)
{
#### Return the estimated regression coefficient
if(is.null(nrow(object$samples$beta)))
{
return(NULL)
}else
{
beta <- apply(object$samples$beta, 2, mean)
names(beta) <- rownames(object$summary.results)[1:length(beta)]
return(beta)
}
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/coef.CARBayesST.R
|
#### This file has a list of common functions in alphabetical order. These functions include:
# common.acceptrates1 - update proposal variance for a MH step based on having no max limit on the proposal var.
# common.acceptrates2 - update proposal variance for a MH step based on having a max limit on the proposal var.
# common.betablock - Create the blocking structure for beta.
# common.betatransform - back transform the regression parameters to the original scale.
# common.burnin.nsample.thin.check - check the burnin, n.sample, thin arugments.
# common.frame - check the frame argument.
# common.frame.localised - check the frame argument for the localised model.
# common.modelfit - compute the model fit criteria.s
# common.prior.beta.check - Check the prior entered for beta.
# common.prior.var.check - check the prior entered for variance parameters.
# common.prior.varmat.check - check the prior entered for variance matrix parameters.
# common.verbose - check the verbose argument.
# common.Wcheckformat.leroux - check the W matrix for the leroux model.
#### Acceptance rates - no maximum limit on the proposal sd
common.accceptrates1 <- function(accept, sd, min, max)
{
#### Update the proposal standard deviations
rate <- 100 * accept[1] / accept[2]
if(rate > max)
{
sd <- sd + 0.1 * sd
}else if(rate < min)
{
sd <- sd - 0.1 * sd
}else
{
}
return(sd)
}
#### Acceptance rates - maximum limit on the proposal sd
common.accceptrates2 <- function(accept, sd, min, max, sd.max)
{
#### Update the proposal standard deviations
rate <- 100 * accept[1] / accept[2]
if(rate > max)
{
sd <- sd + 0.1 * sd
sd <- min(sd + 0.1 * sd, sd.max)
}else if(rate < min)
{
sd <- sd - 0.1 * sd
}else
{
}
return(sd)
}
#### Beta blocking
common.betablock <- function(p)
{
## Compute the blocking structure for beta
blocksize.beta <- 5
if(blocksize.beta >= p)
{
n.beta.block <- 1
beta.beg <- 1
beta.fin <- p
}else
{
n.standard <- 1 + floor((p-blocksize.beta) / blocksize.beta)
remainder <- p - n.standard * blocksize.beta
if(remainder==0)
{
beta.beg <- c(1,seq((blocksize.beta+1), p, blocksize.beta))
beta.fin <- seq(blocksize.beta, p, blocksize.beta)
n.beta.block <- length(beta.beg)
}else
{
beta.beg <- c(1, seq((blocksize.beta+1), p, blocksize.beta))
beta.fin <- c(seq((blocksize.beta), p, blocksize.beta), p)
n.beta.block <- length(beta.beg)
}
}
return(list(beta.beg, beta.fin, n.beta.block))
}
#### beta back transform samples
common.betatransform <- function(samples.beta, X.indicator, X.mean, X.sd, p, localised)
{
#### Back transform the beta values
#### Slightly different code depending on whether the localised model is used
samples.beta.orig <- samples.beta
number.cts <- sum(X.indicator==1)
if(localised)
{
#### Localised model
if(number.cts>0)
{
for(r in 1:p)
{
if(X.indicator[r]==1)
{
samples.beta.orig[ ,r] <- samples.beta[ ,r] / X.sd[r]
}else
{
}
}
}else
{
}
}else
{
#### Not the localised model
if(number.cts>0)
{
for(r in 1:p)
{
if(X.indicator[r]==1)
{
samples.beta.orig[ ,r] <- samples.beta[ ,r] / X.sd[r]
}else if(X.indicator[r]==2 & p>1)
{
X.transformed <- which(X.indicator==1)
samples.temp <- as.matrix(samples.beta[ ,X.transformed])
for(s in 1:length(X.transformed))
{
samples.temp[ ,s] <- samples.temp[ ,s] * X.mean[X.transformed[s]] / X.sd[X.transformed[s]]
}
intercept.adjustment <- apply(samples.temp, 1,sum)
samples.beta.orig[ ,r] <- samples.beta[ ,r] - intercept.adjustment
}else
{
}
}
}else
{
}
}
#### Return the transformed samples
return(samples.beta.orig)
}
#### Check MCMC arguments
common.burnin.nsample.thin.check <- function(burnin, n.sample, thin)
{
#### Check for valid arguments for the burnin, n.sample and thin arguments
if(is.null(burnin)) stop("the burnin argument is missing", call.=FALSE)
if(is.null(n.sample)) stop("the n.sample argument is missing", call.=FALSE)
if(!is.numeric(burnin)) stop("burn-in is not a number", call.=FALSE)
if(!is.numeric(n.sample)) stop("n.sample is not a number", call.=FALSE)
if(!is.numeric(thin)) stop("thin is not a number", call.=FALSE)
if(n.sample <= 0) stop("n.sample is less than or equal to zero.", call.=FALSE)
if(burnin < 0) stop("burn-in is less than zero.", call.=FALSE)
if(thin <= 0) stop("thin is less than or equal to zero.", call.=FALSE)
if(n.sample <= burnin) stop("Burn-in is greater than n.sample.", call.=FALSE)
if(n.sample <= thin) stop("thin is greater than n.sample.", call.=FALSE)
if(burnin!=round(burnin)) stop("burnin is not an integer.", call.=FALSE)
if(n.sample!=round(n.sample)) stop("n.sample is not an integer.", call.=FALSE)
if(thin!=round(thin)) stop("thin is not an integer.", call.=FALSE)
}
#### Read in and format the frame argument
common.frame <- function(formula, data, family)
{
#### Overall formula object
frame <- try(suppressWarnings(model.frame(formula, data=data, na.action=na.pass)), silent=TRUE)
if(inherits(frame, "try-error")) stop("the formula inputted contains an error, e.g the variables may be different lengths.", call.=FALSE)
#### Design matrix
## Create the matrix
X <- try(suppressWarnings(model.matrix(object=attr(frame, "terms"), data=frame)), silent=TRUE)
if(sum(is.na(X))>0) stop("the covariate matrix contains missing 'NA' values.", call.=FALSE)
n <- nrow(X)
p <- ncol(X)
## Check for linearly related columns
cor.X <- suppressWarnings(cor(X))
diag(cor.X) <- 0
if(max(cor.X, na.rm=TRUE)==1) stop("the covariate matrix has two exactly linearly related columns.", call.=FALSE)
if(min(cor.X, na.rm=TRUE)==-1) stop("the covariate matrix has two exactly linearly related columns.", call.=FALSE)
if(p>1)
{
if(sort(apply(X, 2, sd))[2]==0) stop("the covariate matrix has two intercept terms.", call.=FALSE)
}else
{
}
## Standardise the matrix
X.standardised <- X
X.sd <- apply(X, 2, sd)
X.mean <- apply(X, 2, mean)
X.indicator <- rep(NA, p) # To determine which parameter estimates to transform back
for(j in 1:p)
{
if(length(table(X[ ,j]))>2)
{
X.indicator[j] <- 1
X.standardised[ ,j] <- (X[ ,j] - mean(X[ ,j])) / sd(X[ ,j])
}else if(length(table(X[ ,j]))==1)
{
X.indicator[j] <- 2
}else
{
X.indicator[j] <- 0
}
}
#### Offset variable
offset <- try(model.offset(frame), silent=TRUE)
if(is.null(offset)) offset <- rep(0,n)
if(sum(is.na(offset))>0) stop("the offset has missing 'NA' values.", call.=FALSE)
if(!is.numeric(offset)) stop("the offset variable has non-numeric values.", call.=FALSE)
#### Response variable
## Create the response
Y <- model.response(frame)
which.miss <- as.numeric(!is.na(Y))
n.miss <- n - sum(which.miss)
## Check for errors
if(family=="binomial")
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE)
int.check <- n - n.miss - sum(ceiling(Y)==floor(Y), na.rm=TRUE)
if(int.check > 0) stop("the respons variable has non-integer values.", call.=FALSE)
if(min(Y, na.rm=TRUE)<0) stop("the response variable has negative values.", call.=FALSE)
}else if(family=="gaussian")
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE) }else
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE)
int.check <- n - n.miss - sum(ceiling(Y)==floor(Y), na.rm=TRUE)
if(int.check > 0) stop("the response variable has non-integer values.", call.=FALSE)
if(min(Y, na.rm=TRUE)<0) stop("the response variable has negative values.", call.=FALSE)
}
#### Return the values needed
results <- list(n=n, p=p, X=X, X.standardised=X.standardised, X.sd=X.sd, X.mean=X.mean, X.indicator=X.indicator,
offset=offset, Y=Y, which.miss=which.miss, n.miss=n.miss)
return(results)
}
#### Read in and format the frame argument
common.frame.MVST <- function(formula, data, family)
{
#### Overall formula object
frame <- try(suppressWarnings(model.frame(formula, data=data, na.action=na.pass)), silent=TRUE)
if(inherits(frame, "try-error")) stop("the formula inputted contains an error, e.g the variables may be different lengths.", call.=FALSE)
#### Design matrix
## Create the matrix
X <- try(suppressWarnings(model.matrix(object=attr(frame, "terms"), data=frame)), silent=TRUE)
if(sum(is.na(X))>0) stop("the covariate matrix contains missing 'NA' values.", call.=FALSE)
n <- nrow(X)
p <- ncol(X)
## Check for linearly related columns
cor.X <- suppressWarnings(cor(X))
diag(cor.X) <- 0
if(max(cor.X, na.rm=TRUE)==1) stop("the covariate matrix has two exactly linearly related columns.", call.=FALSE)
if(min(cor.X, na.rm=TRUE)==-1) stop("the covariate matrix has two exactly linearly related columns.", call.=FALSE)
if(p>1)
{
if(sort(apply(X, 2, sd))[2]==0) stop("the covariate matrix has two intercept terms.", call.=FALSE)
}else
{
}
## Standardise the matrix
X.standardised <- X
X.sd <- apply(X, 2, sd)
X.mean <- apply(X, 2, mean)
X.indicator <- rep(NA, p) # To determine which parameter estimates to transform back
for(j in 1:p)
{
if(length(table(X[ ,j]))>2)
{
X.indicator[j] <- 1
X.standardised[ ,j] <- (X[ ,j] - mean(X[ ,j])) / sd(X[ ,j])
}else if(length(table(X[ ,j]))==1)
{
X.indicator[j] <- 2
}else
{
X.indicator[j] <- 0
}
}
#### Response variable
## Create the response
Y <- model.response(frame)
J <- ncol(Y)
which.miss <- as.numeric(!is.na(t(Y)))
n.miss <- n*J - sum(which.miss)
#### Offset variable
offset <- try(model.offset(frame), silent=TRUE)
if(is.null(offset)) offset <- array(0, c(n, J))
if(sum(is.na(offset))>0) stop("the offset has missing 'NA' values.", call.=FALSE)
if(!is.numeric(offset)) stop("the offset variable has non-numeric values.", call.=FALSE)
## Check for errors
if(family=="binomial")
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE)
int.check <- n*J - n.miss - sum(ceiling(Y)==floor(Y), na.rm=TRUE)
if(int.check > 0) stop("the respons variable has non-integer values.", call.=FALSE)
if(min(Y, na.rm=TRUE)<0) stop("the response variable has negative values.", call.=FALSE)
}else if(family=="gaussian")
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE)
}else
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE)
int.check <- n*J - n.miss - sum(ceiling(Y)==floor(Y), na.rm=TRUE)
if(int.check > 0) stop("the response variable has non-integer values.", call.=FALSE)
if(min(Y, na.rm=TRUE)<0) stop("the response variable has negative values.", call.=FALSE)
}
#### Return the values needed
results <- list(n=n, p=p, X=X, X.standardised=X.standardised, X.sd=X.sd, X.mean=X.mean, X.indicator=X.indicator,
offset=offset, Y=Y, which.miss=which.miss, n.miss=n.miss)
return(results)
}
#### Read in and format the frame argument from the localised model
common.frame.localised <- function(formula, data, family)
{
#### Overall formula object
frame <- try(suppressWarnings(model.frame(formula, data=data, na.action=na.pass)), silent=TRUE)
if(inherits(frame, "try-error")) stop("the formula inputted contains an error, e.g the variables may be different lengths.", call.=FALSE)
#### Response variable
## Create the response
Y <- model.response(frame)
n <- length(Y)
## Check for errors
if(family=="binomial")
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE)
int.check <- n - sum(ceiling(Y)==floor(Y), na.rm=TRUE)
if(int.check > 0) stop("the respons variable has non-integer values.", call.=FALSE)
if(min(Y, na.rm=TRUE)<0) stop("the response variable has negative values.", call.=FALSE)
}else if(family=="gaussian")
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE)
}else
{
if(!is.numeric(Y)) stop("the response variable has non-numeric values.", call.=FALSE)
int.check <- n - sum(ceiling(Y)==floor(Y), na.rm=TRUE)
if(int.check > 0) stop("the response variable has non-integer values.", call.=FALSE)
if(min(Y, na.rm=TRUE)<0) stop("the response variable has negative values.", call.=FALSE)
}
#### Offset variable
offset <- try(model.offset(frame), silent=TRUE)
if(is.null(offset)) offset <- rep(0,n)
if(sum(is.na(offset))>0) stop("the offset has missing 'NA' values.", call.=FALSE)
if(!is.numeric(offset)) stop("the offset variable has non-numeric values.", call.=FALSE)
#### Design matrix - Create and then adapt to remove the intercept term
X <- try(suppressWarnings(model.matrix(object=attr(frame, "terms"), data=frame)), silent=TRUE)
if(sum(is.na(X))>0) stop("the covariate matrix contains missing 'NA' values.", call.=FALSE)
ptemp <- ncol(X)
if(ptemp==1)
{
X <- NULL
X.standardised <- NULL
X.sd <- NULL
X.mean <- NULL
X.indicator <- NULL
regression.vec <- rep(0, n)
p <- 0
}else
{
## Check for linearly related columns
cor.X <- suppressWarnings(cor(X))
diag(cor.X) <- 0
if(max(cor.X, na.rm=TRUE)==1) stop("the covariate matrix has two exactly linearly related columns.", call.=FALSE)
if(min(cor.X, na.rm=TRUE)==-1) stop("the covariate matrix has two exactly linearly related columns.", call.=FALSE)
if(sort(apply(X, 2, sd))[2]==0) stop("the covariate matrix has two intercept terms.", call.=FALSE)
## Remove the intercept term
int.which <- which(apply(X,2,sd)==0)
colnames.X <- colnames(X)
X <- as.matrix(X[ ,-int.which])
colnames(X) <- colnames.X[-int.which]
p <- ncol(X)
## Standardise X
X.standardised <- X
X.sd <- apply(X, 2, sd)
X.mean <- apply(X, 2, mean)
X.indicator <- rep(NA, p) # To determine which parameter estimates to transform back
for(j in 1:p)
{
if(length(table(X[ ,j]))>2)
{
X.indicator[j] <- 1
X.standardised[ ,j] <- (X[ ,j] - mean(X[ ,j])) / sd(X[ ,j])
}else
{
X.indicator[j] <- 0
}
}
}
#### Return the values needed
results <- list(n=n, p=p, X=X, X.standardised=X.standardised, X.sd=X.sd, X.mean=X.mean, X.indicator=X.indicator,
offset=offset, Y=Y)
return(results)
}
# Compute the DIC. WAIC,LMPL and loglikelihood
common.modelfit <- function(samples.loglike, deviance.fitted)
{
#### WAIC
p.w <- sum(apply(samples.loglike,2, var), na.rm=TRUE)
mean.like <- apply(exp(samples.loglike),2,mean)
mean.min <- min(mean.like[mean.like>0])
mean.like[mean.like==0] <- mean.min
lppd <- sum(log(mean.like), na.rm=TRUE)
WAIC <- -2 * (lppd - p.w)
#### Compute the Conditional Predictive Ordinate
CPO <- 1/apply(exp(-samples.loglike), 2, mean)
mean.min <- min(CPO[CPO>0])
CPO[CPO==0] <- mean.min
LMPL <- sum(log(CPO), na.rm=TRUE)
#### DIC
mean.deviance <- -2 * sum(samples.loglike, na.rm=TRUE) / nrow(samples.loglike)
p.d <- mean.deviance - deviance.fitted
DIC <- deviance.fitted + 2 * p.d
#### loglikelihood
loglike <- -0.5 * deviance.fitted
#### Model fit criteria
modelfit <- c(DIC, p.d, WAIC, p.w, LMPL, loglike)
names(modelfit) <- c("DIC", "p.d", "WAIC", "p.w", "LMPL", "loglikelihood")
return(modelfit)
}
#### Check beta prior arguments
prior.beta.check <- function(prior.mean.beta, prior.var.beta, p)
{
## Checks
if(length(prior.mean.beta)!=p) stop("the vector of prior means for beta is the wrong length.", call.=FALSE)
if(!is.numeric(prior.mean.beta)) stop("the vector of prior means for beta is not numeric.", call.=FALSE)
if(sum(is.na(prior.mean.beta))!=0) stop("the vector of prior means for beta has missing values.", call.=FALSE)
if(length(prior.var.beta)!=p) stop("the vector of prior variances for beta is the wrong length.", call.=FALSE)
if(!is.numeric(prior.var.beta)) stop("the vector of prior variances for beta is not numeric.", call.=FALSE)
if(sum(is.na(prior.var.beta))!=0) stop("the vector of prior variances for beta has missing values.", call.=FALSE)
if(min(prior.var.beta) <=0) stop("the vector of prior variances has elements less than zero", call.=FALSE)
}
#### Check variance prior arguments
prior.var.check <- function(prior.var)
{
## Checks
if(length(prior.var)!=2) stop("the prior values for a variance parameter are the wrong length.", call.=FALSE)
if(!is.numeric(prior.var)) stop("the prior values for a variance parameter are not numeric.", call.=FALSE)
if(sum(is.na(prior.var))!=0) stop("the prior values for a variance parameter have missing values.", call.=FALSE)
}
#### Check variance matrix prior arguments
common.prior.varmat.check <- function(prior.varmat, J)
{
if(nrow(prior.varmat)!=J) stop("prior.Sigma.scale is the wrong dimension.", call.=FALSE)
if(ncol(prior.varmat)!=J) stop("prior.Sigma.scale is the wrong dimension.", call.=FALSE)
if(!is.numeric(prior.varmat)) stop("prior.Sigma.scale has non-numeric values.", call.=FALSE)
if(sum(is.na(prior.varmat))!=0) stop("prior.Sigma.scale has missing values.", call.=FALSE)
}
#### Check the verbose option
common.verbose <- function(verbose)
{
if(is.null(verbose)) verbose=TRUE
if(!is.logical(verbose)) stop("the verbose option is not logical.", call.=FALSE)
if(verbose)
{
cat("Setting up the model.\n")
a<-proc.time()
}else{
a <- 1
}
return(a)
}
#### Check the W matrix - Leroux model
common.Wcheckformat.leroux <- function(W)
{
#### Check W is a matrix of the correct dimension
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
n <- nrow(W)
if(ncol(W)!= n) stop("W is not a square matrix.", call.=FALSE)
#### Check validity of inputed W matrix
if(sum(is.na(W))>0) stop("W has missing 'NA' values.", call.=FALSE)
if(!is.numeric(W)) stop("W has non-numeric values.", call.=FALSE)
if(min(W)<0) stop("W has negative elements.", call.=FALSE)
if(sum(W!=t(W))>0) stop("W is not symmetric.", call.=FALSE)
if(min(apply(W, 1, sum))==0) stop("W has some areas with no neighbours (one of the row sums equals zero).", call.=FALSE)
#### Create the triplet form
W.triplet <- c(NA, NA, NA)
for(i in 1:n)
{
for(j in 1:n)
{
if(W[i,j]>0)
{
W.triplet <- rbind(W.triplet, c(i,j, W[i,j]))
}else{}
}
}
W.triplet <- W.triplet[-1, ]
n.triplet <- nrow(W.triplet)
W.triplet.sum <- tapply(W.triplet[ ,3], W.triplet[ ,1], sum)
n.neighbours <- tapply(W.triplet[ ,3], W.triplet[ ,1], length)
#### Create the start and finish points for W updating
W.begfin <- array(NA, c(n, 2))
temp <- 1
for(i in 1:n)
{
W.begfin[i, ] <- c(temp, (temp + n.neighbours[i]-1))
temp <- temp + n.neighbours[i]
}
#### Return the critical quantities
results <- list(W=W, W.triplet=W.triplet, n.triplet=n.triplet, W.triplet.sum=W.triplet.sum, n.neighbours=n.neighbours, W.begfin=W.begfin, n=n)
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/common.functions.R
|
fitted.CARBayesST <- function(object,...)
{
#### Return the fitted values
return(object$fitted.values)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/fitted.CARBayesST.R
|
gaussian.CARadaptive <- function(formula, data = NULL, W, burnin, n.sample, thin = 1, prior.mean.beta = NULL, prior.var.beta = NULL, prior.tau2 = NULL, prior.nu2 = NULL, rho = NULL, epsilon = 0, verbose = TRUE)
{
#### Verbose
a <- common.verbose(verbose)
blocksize.beta <- 5
blocksize.v <- 10
z <- which(W > 0, arr.ind = T)
locs <- z[which(z[,1] < z[,2]), ]
char.locs <- paste(locs[,1], ".", locs[,2], sep = "")
n.edges <- nrow(locs)
logit <- function(p) log(p/(1-p))
inv_logit <- function(v) 1/(1+exp(-v))
# interpret the formula
frame <- try(suppressWarnings(model.frame(formula, data = data, na.action=na.pass)), silent=TRUE)
if(inherits(frame, "try-error")) stop("the formula inputted contains an error, e.g the variables may be different lengths.", call.=FALSE)
X <- try(suppressWarnings(model.matrix(object=attr(frame, "terms"), data=frame)), silent=TRUE)
if(sum(is.na(X))>0) stop("the covariate matrix contains missing 'NA' values.", call.=FALSE)
# get summaries of the model matrix
p <- ncol(X)
y <- model.response(frame)
n.sites <- as.integer(nrow(W))
n.time <- as.integer(length(y)/n.sites)
k <- as.integer(round(n.sites*n.time, 0))
# check y, the model response
if(sum(is.na(y))>0) stop("the response has missing 'NA' values.", call.=FALSE)
if(!is.numeric(y)) stop("the response variable has non-numeric values.", call.=FALSE)
#### Check and specify the priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
if(is.null(prior.nu2)) prior.nu2 <- c(1, 0.01)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
prior.var.check(prior.nu2)
prior.prec.beta <- diag.spam(1/prior.var.beta, nrow = length(prior.var.beta))
# identify and error check the offset term, if it exists.
offset <- try(model.offset(frame), silent=TRUE)
if(is.null(offset)) offset <- rep(0,(n.time * n.sites))
if(sum(is.na(offset))>0) stop("the offset has missing 'NA' values.", call.=FALSE)
if(!is.numeric(offset)) stop("the offset variable has non-numeric values.", call.=FALSE)
#### Format and check the MCMC quantities
common.burnin.nsample.thin.check(burnin, n.sample, thin)
## Check for linearly related columns
cor.X <- suppressWarnings(cor(X))
diag(cor.X) <- 0
if(max(cor.X, na.rm=TRUE)==1) stop("the covariate matrix has two exactly linearly related columns.", call.=FALSE)
if(min(cor.X, na.rm=TRUE)==-1) stop("the covariate matrix has two exactly linearly related columns.", call.=FALSE)
if(p>1)
{
if(sort(apply(X, 2, sd))[2]==0) stop("the covariate matrix has two intercept terms.", call.=FALSE)
}else
{
}
## Standardise the model matrix,
X.standardised <- X
X.sd <- apply(X, 2, sd)
X.mean <- apply(X, 2, mean)
X.indicator <- rep(NA, p) # To determine which parameter estimates to transform back
for(j in 1:p){
if(length(table(X[ ,j])) > 2){
X.indicator[j] <- 1
X.standardised[ ,j] <- (X[ ,j] - mean(X[ ,j])) / sd(X[ ,j])
}else if(length(table(X[ ,j]))==1){
X.indicator[j] <- 2
}else{
X.indicator[j] <- 0
}
}
# based on the blocksize.v provided create lists with relevent bits for untransformed edge parameter update
if(is.numeric(blocksize.v)){
## Compute the blocking structure for v
fromto <- seq(0, n.edges, by = blocksize.v)
fromto[1] <- 0
if(!n.edges %in% fromto) fromto <- c(fromto, n.edges)
n.blocks <- length(fromto) - 1
blockinds <- vector("list", length = n.blocks)
for(i in 1:n.blocks) blockinds[[i]] <- (fromto[i] + 1):fromto[i + 1]
}
# propose starting values for the adjacency elements (very close to 1)
# current temporary version of the adacency is W_current
v <- logit(rtruncnorm(n.edges, mean = 0.999, sd = 0.001, a = 0, b=1))
v_15 <- v - 15
vqform_current <- sum(v_15^2)
W_current <- W
W_current[locs][1:n.edges] <- inv_logit(v)
W_current[locs[,2:1]][1:n.edges] <- inv_logit(v)
# given the temporary adjacency, construct a temporary (Q.space) and proposal (Q.space.prop)
# for the prior ICAR precision for phi. Associated with these is the triplet form tripList.
# get the cholesky of Q.space, and its determinant
# if rho is not fixed, then ridge must be fixed
rhofix <- rho
rho <- ifelse(!is.null(rhofix), rhofix, 0.99)
fixedridge <- epsilon
if(rho==1) fixedridge <- 0.0001
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
tripList <- vector("list", length = 2)
tripList[[1]] <- cbind(1:nrow(W_current), 1:nrow(W_current), rowSums(W_current) + fixedridge)
tripList[[2]] <- cbind(rbind(locs, locs[,2:1]), -rep(inv_logit(v), 2))
Q.space.trip <- rbind(tripList[[1]], tripList[[2]])
Q.space.trip <- updatetriplets_rho(trips = Q.space.trip, nsites = n.sites, rho_old = 1, rho_new = rho, fixedridge = fixedridge)
Q.space <- Q.space.prop <- spam(list(i = Q.space.trip[,1], j = Q.space.trip[,2], Q.space.trip[,3]))
chol.Q.space <- chol.spam(Q.space)
Q.space.det.old <- n.time*2*determinant(chol.Q.space, logarithm = T)$modulus
# propose an initial value for alpha, the temporal correlation parameter
# using alpha, create initial temporal precision matrices Q.time
alpha <- 1
if(n.time > 1){
# this bit constructs Q.time, temporal precision, its determinant, and triplet form
Q.block <- as.spam(crossprod(diff(diag(n.time))))
Q.block[1,1] <- Q.block[1,1] + 1
Dg <- diag.spam(diag.spam(Q.block))
R <- Q.block - Dg
Dtime <- diag.spam( c(rep(1,nrow(Q.block)-1), 0))
Dg <- Dg - Dtime
Q.time <- Dg + Dtime*alpha^2+ R*alpha
Q.time[n.time,n.time] <- 1
Q.det <- determinant(Q.time, logarithm = T)
detTime <- as.numeric(0.5*n.sites*(Q.det$m)*(Q.det$s))
Q.time.trip <- Reduce("cbind", triplet(Q.time))
} else {
# if n.time == 1, detTime equals 1 and Q.time is just a 1 times 1 matrix.
Q.time <- 1
detTime <- 1
Q.time.trip <- matrix(rep(1, 3), ncol = 3)
}
# MCMC parameter starting values
W.tune <- 1
rho.tune <- 0.1
tau_v <- 200
prior.max.tau <- 1000
increment <- 0
mod.glm <- glm(y~X.standardised-1, offset=offset)
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta_par <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
res.temp <- y - X.standardised %*% beta_par - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=k, mean=0, sd = res.sd)
tau <- var(phi)/10
nu2 <- var(phi)/10
phiQphi <- qform_ST(Qspace = Q.space.trip, Qtime = Q.time.trip, phi = phi, nsites = n.sites)
XB <- X.standardised %*% beta_par
tau_v.shape <- (n.edges/2) + prior.tau2[1]
tau_phi_shape <- (n.sites*n.time/2) + prior.tau2[1]
# general MCMC housekeeping
n.save <- ifelse(thin == 1, (n.sample - burnin), (n.sample - burnin) / thin)
accept <- rep(0, 8)
# storage of parameters in the MCMC,
samples.beta <- array(NA, c(n.save, p))
samples.phi <- array(NA, c(n.save, n.sites * n.time))
samples.tau2 <- samples.vtau2 <- samples.alpha <- samples.nu2 <- samples.rho <- matrix(0, n.save, 1)
samples.v <- matrix(0, ncol = n.edges, nrow = c(n.save, n.sites*n.time))
samples.fit <- array(NA, c(n.save, n.sites * n.time))
samples.loglike <- array(NA, c(n.save, n.sites*n.time))
# turn off spam check options to speed things up (a bit)
options(spam.cholsymmetrycheck = FALSE)
options(spam.cholpivotcheck = FALSE)
options(spam.safemode = c(F, F, F))
## Compute the blocking structure for beta
if(blocksize.beta >= p){
n.beta.block <- 1
beta.beg <- 1
beta.fin <- p
} else {
n.standard <- 1 + floor((p-blocksize.beta) / blocksize.beta)
remainder <- p - n.standard * blocksize.beta
if(remainder==0){
beta.beg <- c(1,seq((blocksize.beta+1), p, blocksize.beta))
beta.fin <- seq(blocksize.beta, p, blocksize.beta)
n.beta.block <- length(beta.beg)
} else {
beta.beg <- c(1, seq((blocksize.beta+1), p, blocksize.beta))
beta.fin <- c(seq((blocksize.beta), p, blocksize.beta), p)
n.beta.block <- length(beta.beg)
}
}
corr.beta <- t(X.standardised) %*% X.standardised
corr.beta.inv <- solve(t(X.standardised) %*% X.standardised)
chol.corr.beta <- chol(corr.beta.inv)
# the perm ordering is used to map the @entries slot ordering to the ordering used when 'triplet' is called
perm <- order(Q.space.trip[,1], Q.space.trip[,2])
diags.space <- which(Q.space.trip[perm,1] == Q.space.trip[perm,2])
if(n.time > 1) diag.time <- Reduce("cbind", triplet(diag.spam(n.time - 1)))
time.last.diag <- which((Q.time.trip[,1] == Q.time.trip[,2]) & (Q.time.trip[,1] == n.time))
lastblock <- (k - n.sites+1):k
firstblock <- 1:n.sites
## Start timer
n.keep <- floor((n.sample - burnin)/thin)
if(verbose){
cat("Generating", n.keep, "post burnin and thinned (if requested) samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points <- round((1:100/100)*n.sample)
} else percentage.points <- round((1:100/100)*n.sample)
# -------------------------------------------------------------------------------------------
# START THE MCMC SAMPLING
# -------------------------------------------------------------------------------------------
for(j in 1:n.sample){
# START ITERATING, ONLY SAVE thin^th ITERATION
save.iter <- j > burnin && ((j %% thin == 0) | thin == 0)
if(save.iter) increment <- increment+1
# update ALPHA
if(n.time > 1){
phifirst <- phi[-firstblock]
philast <- phi[-lastblock]
philastQphilast <- qform_ST(Qspace = Q.space.trip, Qtime = diag.time, phi = philast, nsites = n.sites)
phifirstQphilast <- qform_ST_asym(Qspace = Q.space.trip, Qtime = diag.time, phi1 = phifirst, phi2 = philast, nsites = n.sites)
mu_alpha <- phifirstQphilast/philastQphilast
mu_sigmasq <- tau/philastQphilast
alpha <- rtruncnorm(n=1, a=10^-5, b=1 - 10^-5, mean=mu_alpha, sd = sqrt(mu_sigmasq))
Q.time.trip <- update_Qtime(Q.time.trip, alpha, time.last.diag - 1)
phiQphi <- qform_ST(Qspace = Q.space.trip, Qtime = Q.time.trip, phi = phi, nsites = n.sites)
detTime <- determinant(Q.time, logarithm = TRUE)
detTime <- (detTime$m)*(detTime$s)
}
# Gibbs update of tau_v
tau_scale <- vqform_current/2 + prior.tau2[2]
tau_v <- 1/rtrunc(n=1, spec="gamma", a=0.000001, b=Inf, shape=tau_v.shape, scale=(1/tau_scale))
#tau_v <- 1/rtgamma(n=1, shape=tau_v.shape, scale=tau_scale, min=0.000001, max=Inf)
v.proposal <- rtruncnorm(n = n.edges, a=-15, b=15, mean = v, sd = W.tune)
for(i in 1:n.blocks){
# propose new v for the i^th block
vnew <- v
block_inds <- blockinds[[i]]
vnew[block_inds] <- v.proposal[block_inds]
# update the spatial precision matrix using c++ loop.
# This is efficient because changes are only made where vnew differs from v
# combine the result back into triplet matrix (Q.space.trip.prop), and spam matrix (Q.space.prop)
tripUpdate <- updatetripList2(Q.space.trip, vold = v, vnew = vnew, nedges = n.edges,
nsites = n.sites, block = block_inds,
block_length = length(block_inds), fixedridge = fixedridge, rho = rho)
Q.space.trip.prop <- tripUpdate[[1]]
Q.space.trip.diff <- tripUpdate[[2]]
# combine the result back into triplet matrix (Q.space.trip.prop), and spam matrix (Q.space.prop)
Q.space.prop@entries <- Q.space.trip.prop[perm,3]
# acceptance ratio requires calculation of phi'Q_prop phi - phi'Q phi.
# do this quickly by taking the difference between old and new triplets and working out the
# difference directly. Much faster than working out quadratic forms seperately.
Q.space.trip.diff[, 3]<- Q.space.trip[, 3] - Q.space.trip.prop[,3]
phiQphi_phiQphiNew <- qform_difference_ST(Qtrip = Q.space.trip.diff, Qtime = Q.time.trip, phi = phi, nsites = n.sites)
# update the cholesky of the precision matrix & calculate the determinant
chol.Q.space.prop <- update(chol.Q.space, x = Q.space.prop)
detSpace <- 2*determinant(chol.Q.space.prop, logarithm = T)$modulus
Q.space.det.prop <- n.sites*detTime + n.time*detSpace
v_15_prop <- vnew - 15
vqform_prop <- sum(v_15_prop^2)
acceptance <- exp(0.5*(Q.space.det.prop - Q.space.det.old) + (1/(2*tau))*(phiQphi_phiQphiNew)
+ 0.5*(1/tau_v)*(vqform_current - vqform_prop))
accept[8] <- accept[8] + (1/n.blocks)
if(runif(1) <= acceptance){
vqform_current <- vqform_prop
v <- vnew
accept[7] <- accept[7] + (1/n.blocks)
Q.space.det.old <- Q.space.det.prop
Q.space.trip <- Q.space.trip.prop
chol.Q.space <- chol.Q.space.prop
Q.space <- Q.space.prop
}
}
# Gibbs update beta
var_mat_full_con <- solve(((corr.beta/nu2) + prior.prec.beta))
mean_full_con <- ((((y - phi - offset)/nu2) %*% X.standardised) + (prior.mean.beta/prior.var.beta)) %*% var_mat_full_con
beta_par <- mvrnorm(n = 1, mu = mean_full_con, Sigma = var_mat_full_con)
XB <- X.standardised %*% beta_par
# Gibbs update of the variance of the normal likelihood
lik_comp <- sum( (y - XB - phi - offset)^2 )/2
nu2 <- 1 / rgamma(1, (prior.nu2[1] + 0.5*k), scale=(1/(prior.nu2[2] + lik_comp)))
# update PHI using one at a time M-H sampling
nneighbours <- diag.spam(Q.space)
W_current <- diag(nneighbours) - as.matrix(Q.space)
phi_update <- SPTICARphiGaussian(W = W_current, nsites = n.sites, ntimes = n.time,
phi = phi, nneighbours = nneighbours,
tau = tau, y = as.vector(y - offset),
alpha = alpha, XB = XB, lik_var = nu2)
phi <- phi_update[[1]]
phi <- phi - mean(phi)
# update rho, the spatial leroux parameter
if(!is.null(rhofix)){
proposal.rho <- rhofix
} else {
proposal.rho <- rtruncnorm(n = 1, a=0, b=1, mean = rho, sd = rho.tune)
}
Q.space.trip.prop <- updatetriplets_rho(trips = Q.space.trip, nsites = n.sites, rho_old = rho, rho_new = proposal.rho, fixedridge = fixedridge)
Q.space.prop@entries <- Q.space.trip.prop[perm,3]
Q.space.trip.diff[, 3] <- Q.space.trip[, 3] - Q.space.trip.prop[,3]
phiQphi_phiQphiNew <- qform_difference_ST(Qtrip = Q.space.trip.diff, Qtime = Q.time.trip, phi = phi, nsites = n.sites)
# update the cholesky of the precision matrix & calculate the determinant
chol.Q.space.prop <- update(chol.Q.space, x = Q.space.prop)
detSpace <- 2*determinant(chol.Q.space.prop, logarithm = T)$modulus
Q.space.det.prop <- n.sites*detTime + n.time*detSpace
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=rho.tune)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=rho.tune))
acceptance <- exp(0.5*(Q.space.det.prop - Q.space.det.old) + (1/(2*tau))*(phiQphi_phiQphiNew) + hastings)
accept[6] <- accept[6] + 1
if(runif(1) <= acceptance){
accept[5] <- accept[5] + 1
Q.space.det.old <- Q.space.det.prop
Q.space.trip <- Q.space.trip.prop
chol.Q.space <- chol.Q.space.prop
Q.space <- Q.space.prop
rho <- proposal.rho
}
# Gibbs update TAU using the gamma distribution
phiQphi <- qform_ST(Qspace = Q.space.trip, Qtime = Q.time.trip, phi = phi, nsites = n.sites)
tau_scale <- phiQphi/2 + prior.tau2[2]
tau <- 1/rtrunc(n=1, spec="gamma", a=0.000001, b=Inf, shape=tau_phi_shape, scale=(1/tau_scale))
#tau <- 1/rtgamma(n=1, shape=tau_phi_shape, scale=tau_scale, min=0.000001, max=Inf)
# calculate the deviance
fitted <- as.numeric(X.standardised %*% beta_par) + phi + offset
loglike <- dnorm(y, mean = fitted, sd = rep(sqrt(nu2), k), log=TRUE)
# save samples if past burnin
if(save.iter){
samples.beta[increment,] <- beta_par
samples.phi[increment,] <- phi
samples.fit[increment, ] <- fitted
samples.tau2[increment,] <- tau
samples.vtau2[increment,] <- tau_v
samples.v[increment,] <- v
samples.alpha[increment,] <- alpha
samples.rho[increment,] <- rho
samples.nu2[increment,] <- nu2
samples.loglike[increment, ] <- loglike
}
# adjust the acceptance rate if required
if(j %% 100 == 0 & j < burnin){
accept.w <- 100 * accept[7] / accept[8]
if(is.null(rhofix))
{
accept.rho <- 100 * accept[5] / accept[6]
}else
{
accept.rho <- 45
}
#### w tuning parameter
if(accept.w > 40)
{
W.tune <- W.tune + 0.1 * W.tune
}else if(accept.w < 20)
{
W.tune <- W.tune - 0.1 * W.tune
}else
{
}
#### rho tuning parameter
if(accept.rho > 50)
{
rho.tune <- min(rho.tune + 0.1 * rho.tune, 0.5)
}else if(accept.rho < 40)
{
rho.tune <- rho.tune - 0.1 * rho.tune
}else
{
}
accept <- accept*0
}
# print progress to the console
if(j %in% percentage.points & verbose) setTxtProgressBar(progressBar, j/n.sample)
}
# end timer
if(verbose)
{
cat("\nSummarising results.")
close(progressBar)
}else
{}
###################################
#### Summarise and save the results
###################################
## Compute the acceptance rates
accept.beta <- 100
accept.phi <- 100
accept.rho <- 100 * accept[5] / accept[6]
accept.w <- 100 * accept[7] / accept[8]
accept.alpha <- 100
if(!is.null(rhofix))
{
accept.final <- c(accept.beta, accept.phi, accept.w)
names(accept.final) <- c("beta", "phi", "w")
}else
{
accept.final <- c(accept.beta, accept.phi, accept.rho,accept.w)
names(accept.final) <- c("beta", "phi", "rho", "w")
}
#### Compute the fitted deviance
mean.beta <- apply(samples.beta, 2, mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow = n.sites, ncol = n.time, byrow=FALSE)
mean.phi <- matrix(apply(samples.phi, 2, mean), nrow = n.sites, ncol = n.time)
mean.nu2 <- mean(samples.nu2)
offset.mat <- matrix(offset, nrow = n.sites, ncol = n.time, byrow=FALSE)
fitted.mean <- as.numeric(mean.phi + regression.mat + offset.mat)
deviance.fitted <- -2 * sum(dnorm(y, mean = fitted.mean, sd = rep(sqrt(mean.nu2), k), log = TRUE))
#### Model fit criteria
modelfit <- common.modelfit(samples.loglike, deviance.fitted)
#### Create the fitted values and residuals
fitted.values <- apply(samples.fit, 2, mean)
response.residuals <- as.numeric(y) - fitted.values
pearson.residuals <- response.residuals /sqrt(mean.nu2)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
#### transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
#### Create a summary object
samples.beta.orig <- mcmc(samples.beta.orig)
summary.beta <- t(rbind(apply(samples.beta.orig, 2, mean), apply(samples.beta.orig, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.save, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(5, 7))
summary.hyper[1,1:3] <- c(mean(samples.nu2), quantile(samples.nu2, c(0.025, 0.975)))
summary.hyper[2,1:3] <- c(mean(samples.tau2), quantile(samples.tau2, c(0.025, 0.975)))
summary.hyper[3,1:3] <- c(mean(samples.rho), quantile(samples.rho, c(0.025, 0.975)))
summary.hyper[4,1:3] <- c(mean(samples.alpha), quantile(samples.alpha, c(0.025, 0.975)))
summary.hyper[5,1:3] <- c(mean(samples.vtau2), quantile(samples.vtau2, c(0.025, 0.975)))
rownames(summary.hyper) <- c("nu2", "tau2", "rho.S", "rho.T", "tau2.w")
summary.hyper[1, 4:7] <- c(n.save, 100, effectiveSize(mcmc(samples.nu2)), geweke.diag(mcmc(samples.nu2))$z)
summary.hyper[2, 4:7] <- c(n.save, 100, effectiveSize(mcmc(samples.tau2)), geweke.diag(mcmc(samples.tau2))$z)
summary.hyper[3, 4:7] <- c(n.save, accept.rho, effectiveSize(mcmc(samples.rho)), geweke.diag(mcmc(samples.rho))$z)
summary.hyper[4, 4:7] <- c(n.save, accept.alpha, effectiveSize(mcmc(samples.alpha)), geweke.diag(mcmc(samples.alpha))$z)
summary.hyper[5, 4:7] <- c(n.save, 100, effectiveSize(mcmc(samples.vtau2)), geweke.diag(mcmc(samples.vtau2))$z)
if(!is.null(rhofix))
{
summary.hyper[3, ] <- c(rep(rhofix, 3),rep(NA, 4))
}
summary.results <- rbind(summary.beta, summary.hyper)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
# convert v back to w, summarise and create a 'fitted' adjacency matrix
samples.w <- inv_logit(samples.v)
colnames(samples.w) <- char.locs
get_prop_thresh <- function(v, thresh) as.numeric(!((sum(v < thresh)/length(v)) < 0.99))
bdry99 <- apply(samples.w, 2, get_prop_thresh, thresh = 0.5)
bdryMN <- apply(samples.w, 2, mean)
Wmn <- W99 <- matrix(NA, nrow = n.sites, ncol = n.sites)
W99[locs] <- bdry99
Wmn[locs] <- bdryMN
W99[locs] <- bdry99
W99[locs[ ,c(2,1)]] <- bdry99
Wmn[locs] <- bdryMN
Wmn[locs[ ,c(2,1)]] <- bdryMN
#### Compile and return the results
model.string <- c("Likelihood model - Gaussian (identity link function)",
"\nLatent structure model - Adaptive autoregressive order 1 CAR model\n")
samples.tau2all <- cbind(samples.tau2, samples.vtau2)
colnames(samples.tau2all) <- c("tau2", "tau2.w")
if(is.null(rhofix))
{
samples.rhoext <- cbind(samples.rho, samples.alpha)
colnames(samples.rhoext) <- c("rho.S", "rho.T")
}else
{
samples.rhoext <- cbind(samples.alpha)
names(samples.rhoext) <- c("rho.T")
}
samples <- list(beta = mcmc(samples.beta.orig), phi = mcmc(samples.phi), rho = mcmc(samples.rhoext),
tau2 = mcmc(samples.tau2all), nu2 = mcmc(samples.nu2), w = mcmc(samples.w), fitted = mcmc(samples.fit))
localised.structure <- list(Wmedian = Wmn, W99 = W99)
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=localised.structure, formula=formula, model=model.string, X=X)
class(results) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/gaussian.CARadaptive.R
|
gaussian.CARanova <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.tau2=NULL, rho.S=NULL, rho.T=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "gaussian")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Determine the number of spatial and temporal units
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
#### Check on the rho arguments
if(is.null(rho.S))
{
rho <- runif(1)
fix.rho.S <- FALSE
}else
{
rho <- rho.S
fix.rho.S <- TRUE
}
if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.T))
{
lambda <- runif(1)
fix.rho.T <- FALSE
}else
{
lambda <- rho.T
fix.rho.T <- TRUE
}
if(!is.numeric(lambda)) stop("rho.T is fixed but is not numeric.", call.=FALSE)
if(lambda<0 ) stop("rho.T is outside the range [0, 1].", call.=FALSE)
if(lambda>1 ) stop("rho.T is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
if(is.null(prior.nu2)) prior.nu2 <- c(1, 0.01)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
prior.var.check(prior.nu2)
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- gaussian.CARanovaMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, interaction=interaction, rho=rho, lambda=lambda, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.nu2=prior.nu2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- gaussian.CARanovaMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, interaction=interaction, rho=rho, lambda=lambda, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.nu2=prior.nu2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=gaussian.CARanovaMCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, interaction=interaction, rho=rho, lambda=lambda, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.nu2=prior.nu2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.final <- rep(NA, 5)
names(accept.final) <- c("beta", "phi", "delta", "rho.S", "rho.T")
accept.final[1:3] <- 100
if(!fix.rho.S) accept.final[4] <- 100 * results$accept[1] / results$accept[2]
if(!fix.rho.T) accept.final[5] <- 100 * results$accept[3] / results$accept[4]
## Compute the fitted deviance
mean.phi <- apply(results$samples.phi, 2, mean)
mean.delta <- apply(results$samples.delta, 2, mean)
mean.phi.mat <- matrix(rep(mean.phi, N), byrow=F, nrow=K)
mean.delta.mat <- matrix(rep(mean.delta, K), byrow=T, nrow=K)
mean.beta <- apply(results$samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
fitted.mean <- as.numeric(offset.mat + regression.mat + mean.phi.mat + mean.delta.mat)
nu2.mean <- mean(results$samples.nu2)
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(nu2.mean),N.all), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(nu2.mean)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
## Create the samples object
if(fix.rho.S & fix.rho.T)
{
samples.rhoext <- NA
}else if(fix.rho.S & !fix.rho.T)
{
samples.rhoext <- results$samples.lambda
names(samples.rhoext) <- "rho.T"
}else if(!fix.rho.S & fix.rho.T)
{
samples.rhoext <- results$samples.rho
names(samples.rhoext) <- "rho.S"
}else
{
samples.rhoext <- cbind(results$samples.rho, results$samples.lambda)
colnames(samples.rhoext) <- c("rho.S", "rho.T")
}
colnames(results$samples.tau2) <- c("tau2.S", "tau2.T")
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), delta=mcmc(results$samples.delta), tau2=mcmc(results$samples.tau2), nu2=mcmc(results$samples.nu2), rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
n.tau2 <- ncol(results$samples.tau2)
summary.tau2 <- cbind(apply(results$samples.tau2, 2, mean), t(apply(results$samples.tau2, 2, quantile, c(0.025, 0.975))), rep(n.keep, n.tau2), rep(100, n.tau2),
effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.nu2 <- c(mean(results$samples.nu2), quantile(results$samples.nu2, c(0.025, 0.975)), n.keep, 100,
effectiveSize(samples$nu2), geweke.diag(samples$nu2)$z)
summary.var <- rbind(summary.nu2, summary.tau2)
rownames(summary.var) <- c("nu2", "tau2.S", "tau2.T")
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.S", "rho.T")
if(!fix.rho.S)
{
summary.rho[1, 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z)
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
summary.rho[2, 1:3] <- c(mean(results$samples.lambda), quantile(results$samples.lambda, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(results$samples.lambda), geweke.diag(results$samples.lambda)$z)
}else
{
summary.rho[2, 1:3] <- c(lambda, lambda, lambda)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.var, summary.rho)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.final <- rep(NA, 5)
names(accept.final) <- c("beta", "phi", "delta", "rho.S", "rho.T")
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[1:3] <- 100
if(!fix.rho.S) accept.final[4] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
if(!fix.rho.T) accept.final[5] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
## Extract the samples into separate matrix and list objects
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.delta.list <- lapply(results, function(l) l[["samples.delta"]])
samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list)
samples.nu2.list <- lapply(results, function(l) l[["samples.nu2"]])
samples.nu2.matrix <- do.call(what=rbind, args=samples.nu2.list)
if(!fix.rho.S)
{
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
}
if(!fix.rho.T)
{
samples.lambda.list <- lapply(results, function(l) l[["samples.lambda"]])
samples.lambda.matrix <- do.call(what=rbind, args=samples.lambda.list)
}
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Compute the fitted deviance
mean.phi <- apply(samples.phi.matrix, 2, mean)
mean.delta <- apply(samples.delta.matrix, 2, mean)
mean.phi.mat <- matrix(rep(mean.phi, N), byrow=F, nrow=K)
mean.delta.mat <- matrix(rep(mean.delta, K), byrow=T, nrow=K)
mean.beta <- apply(samples.beta.matrix,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
fitted.mean <- as.numeric(offset.mat + regression.mat + mean.phi.mat + mean.delta.mat)
nu2.mean <- mean(samples.nu2.matrix)
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(nu2.mean),N.all), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(nu2.mean)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
nu2.mcmc <- mcmc.list(lapply(samples.nu2.list, mcmc))
delta.mcmc <- mcmc.list(lapply(samples.delta.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
for(j in 1:n.chains)
{
colnames(samples.tau2.list[[j]]) <- c("tau2.S", "tau2.T")
}
tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc))
if(n.miss>0)
{
Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc))
}else
{
Y.mcmc <- NA
}
if(fix.rho.S & fix.rho.T)
{
rhoext.mcmc <- NA
}else if(fix.rho.S & !fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.lambda.list[[j]]) <- c("rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(samples.lambda.list, mcmc))
}else if(!fix.rho.S & fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.rho.list[[j]]) <- c("rho.S")
}
rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc))
}else
{
rho.temp <- as.list(rep(NA, n.chains))
for(j in 1:n.chains)
{
rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.lambda.list[[j]])
colnames(rho.temp[[j]]) <- c("rho.S", "rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc))
}
samples <- list(beta=beta.mcmc, phi=phi.mcmc, delta=delta.mcmc, rho=rhoext.mcmc, tau2=tau2.mcmc, nu2=nu2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
n.tau2 <- ncol(samples.tau2.matrix)
summary.tau2 <- cbind(apply(samples.tau2.matrix, 2, mean), t(apply(samples.tau2.matrix, 2, quantile, c(0.025, 0.975))), rep(n.keep, n.tau2), rep(100, n.tau2),
effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.nu2 <- c(mean(samples.nu2.matrix), quantile(samples.nu2.matrix, c(0.025, 0.975)), n.keep, 100,
effectiveSize(nu2.mcmc), gelman.diag(nu2.mcmc)$psrf[ ,2])
summary.var <- rbind(summary.nu2, summary.tau2)
rownames(summary.var) <- c("nu2", "tau2.S", "tau2.T")
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.S", "rho.T")
if(!fix.rho.S)
{
temp <- mcmc.list(lapply(samples.rho.list, mcmc))
summary.rho[1, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
temp <- mcmc.list(lapply(samples.lambda.list, mcmc))
summary.rho[2, 1:3] <- c(mean(samples.lambda.matrix), quantile(samples.lambda.matrix, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[2, 1:3] <- c(lambda, lambda, lambda)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.var, summary.rho)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Gaussian (identity link function)", "\nLatent structure model - spatial and temporal main effects\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/gaussian.CARanova.R
|
gaussian.CARanovaMCMC <- function(Y, offset, X.standardised, W, interaction, rho, lambda, fix.rho.S, fix.rho.T, K, N, N.all, p, which.miss, n.miss, burnin, n.sample, thin, prior.mean.beta, prior.var.beta, prior.tau2, prior.nu2, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Generate the initial parameter values
mod.glm <- glm(Y~X.standardised-1, offset=offset)
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
res.temp <- Y - X.standardised %*% beta - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=0, sd = res.sd)
delta <- rnorm(n=N, mean=0, sd = res.sd)
tau2.phi <- var(phi)/10
tau2.delta <- var(delta)/10
nu2 <- runif(1, 0, res.sd)
#### Matrix versions of quantites
Y.DA <- Y
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K)
delta.mat <- matrix(rep(delta, K), byrow=T, nrow=K)
fitted <- as.numeric(offset.mat + regression.mat + phi.mat + delta.mat)
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, K))
samples.delta <- array(NA, c(n.keep, N))
samples.nu2 <- array(NA, c(n.keep, 1))
samples.tau2 <- array(NA, c(n.keep, 2))
colnames(samples.tau2) <- c("tau2.phi", "tau2.delta")
if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.T) samples.lambda <- array(NA, c(n.keep, 1))
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Specify the Metropolis quantities
accept <- rep(0,4)
proposal.sd.rho <- 0.02
proposal.sd.lambda <- 0.02
tau2.phi.shape <- prior.tau2[1] + K/2
tau2.delta.shape <- prior.tau2[1] + N/2
nu2.shape <- prior.nu2[1] + N*K/2
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Spatial determinant
if(!fix.rho.S)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q.W <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Temporal neighbourhood matrix
D <-array(0, c(N,N))
for(i in 1:N)
{
for(j in 1:N)
{
if(abs((i-j))==1) D[i,j] <- 1
}
}
#### Temporal triplet object
D.triplet <- c(NA, NA, NA)
for(i in 1:N)
{
for(j in 1:N)
{
if(D[i,j]>0)
{
D.triplet <- rbind(D.triplet, c(i,j, D[i,j]))
}else{}
}
}
D.triplet <- D.triplet[-1, ]
D.n.triplet <- nrow(D.triplet)
D.triplet.sum <- tapply(D.triplet[ ,3], D.triplet[ ,1], sum)
D.neighbours <- tapply(D.triplet[ ,3], D.triplet[ ,1], length)
#### Temporal begfin argument
D.begfin <- array(NA, c(N, 2))
temp <- 1
for(i in 1:N)
{
D.begfin[i, ] <- c(temp, (temp + D.neighbours[i]-1))
temp <- temp + D.neighbours[i]
}
#### Temporal determinant
if(!fix.rho.T)
{
Dstar <- diag(apply(D,1,sum)) - D
Dstar.eigen <- eigen(Dstar)
Dstar.val <- Dstar.eigen$values
det.Q.D <- 0.5 * sum(log((lambda * Dstar.val + (1-lambda))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1) tau2.phi.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
if(lambda==1) tau2.delta.shape <- prior.tau2[1] + 0.5 * (N-1)
#### Beta update quantities
data.precision.beta <- t(X.standardised) %*% X.standardised
if(length(prior.var.beta)==1)
{
prior.precision.beta <- 1 / prior.var.beta
}else
{
prior.precision.beta <- solve(diag(prior.var.beta))
}
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rnorm(n=n.miss, mean=fitted[which.miss==0], sd=sqrt(nu2))
}else
{}
Y.DA.mat <- matrix(Y.DA, nrow=K, ncol=N, byrow=FALSE)
##################
## Sample from nu2
##################
nu2.offset <- as.numeric(Y.DA.mat - offset.mat - regression.mat - phi.mat - delta.mat)
nu2.scale <- prior.nu2[2] + sum(nu2.offset^2)/2
nu2 <- 1 / rgamma(1, nu2.shape, scale=(1/nu2.scale))
####################
## Sample from beta
####################
fc.precision <- prior.precision.beta + data.precision.beta / nu2
fc.var <- solve(fc.precision)
beta.offset <- as.numeric(Y.DA.mat - offset.mat - phi.mat - delta.mat)
beta.offset2 <- t(X.standardised) %*% beta.offset / nu2 + prior.precision.beta %*% prior.mean.beta
fc.mean <- fc.var %*% beta.offset2
chol.var <- t(chol(fc.var))
beta <- fc.mean + chol.var %*% rnorm(p)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from phi
####################
phi.offset <- Y.DA.mat - offset.mat - regression.mat - delta.mat
phi.offset2 <- apply(phi.offset,1, sum, na.rm=TRUE)
temp1 <- gaussiancarupdate(W.triplet, W.begfin, W.triplet.sum, K, phi, tau2.phi, nu2, phi.offset2, rho, N)
phi <- temp1
if(rho<1)
{
phi <- phi - mean(phi)
}else
{
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
}
phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K)
####################
## Sample from delta
####################
delta.offset <- Y.DA.mat - offset.mat - regression.mat - phi.mat
delta.offset2 <- apply(delta.offset,2, sum, na.rm=TRUE)
temp2 <- gaussiancarupdate(D.triplet, D.begfin, D.triplet.sum, N, delta, tau2.delta, nu2, delta.offset2, lambda, K)
delta <- temp2
delta <- delta - mean(delta)
delta.mat <- matrix(rep(delta, K), byrow=T, nrow=K)
#######################
## Sample from tau2.phi
#######################
temp2.phi <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, rho)
tau2.phi.scale <- temp2.phi + prior.tau2[2]
tau2.phi <- 1 / rgamma(1, tau2.phi.shape, scale=(1/tau2.phi.scale))
#########################
## Sample from tau2.delta
#########################
temp2.delta <- quadform(D.triplet, D.triplet.sum, D.n.triplet, N, delta, delta, lambda)
tau2.delta.scale <- temp2.delta + prior.tau2[2]
tau2.delta <- 1 / rgamma(1, tau2.delta.shape, scale=(1/tau2.delta.scale))
##################
## Sample from rho
##################
if(!fix.rho.S)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp3 <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, proposal.rho)
det.Q.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- det.Q.W - temp2.phi / tau2.phi
logprob.proposal <- det.Q.proposal - temp3 / tau2.phi
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
rho <- proposal.rho
det.Q.W <- det.Q.proposal
accept[1] <- accept[1] + 1
}else
{}
accept[2] <- accept[2] + 1
}else
{}
#####################
## Sample from lambda
#####################
if(!fix.rho.T)
{
proposal.lambda <- rtruncnorm(n=1, a=0, b=1, mean=lambda, sd=proposal.sd.lambda)
temp3 <- quadform(D.triplet, D.triplet.sum, D.n.triplet, N, delta, delta, proposal.lambda)
det.Q.proposal <- 0.5 * sum(log((proposal.lambda * Dstar.val + (1-proposal.lambda))))
logprob.current <- det.Q.D - temp2.delta / tau2.delta
logprob.proposal <- det.Q.proposal - temp3 / tau2.delta
hastings <- log(dtruncnorm(x=lambda, a=0, b=1, mean=proposal.lambda, sd=proposal.sd.lambda)) - log(dtruncnorm(x=proposal.lambda, a=0, b=1, mean=lambda, sd=proposal.sd.lambda))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
lambda <- proposal.lambda
det.Q.D <- det.Q.proposal
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
}else
{}
#########################
## Calculate the deviance
#########################
fitted <- as.numeric(offset.mat + regression.mat + phi.mat + delta.mat)
loglike <- dnorm(Y, mean = fitted, sd = rep(sqrt(nu2),N.all), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.delta[ele, ] <- delta
if(!fix.rho.S) samples.rho[ele, ] <- rho
if(!fix.rho.T) samples.lambda[ele, ] <- lambda
samples.nu2[ele, ] <- nu2
samples.fitted[ele, ] <- fitted
samples.tau2[ele, ] <- c(tau2.phi, tau2.delta)
samples.loglike[ele, ] <- loglike
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
#######################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
if(!fix.rho.S) proposal.sd.rho <- common.accceptrates2(accept[1:2], proposal.sd.rho, 40, 50, 0.5)
if(!fix.rho.T) proposal.sd.lambda <- common.accceptrates2(accept[3:4], proposal.sd.lambda, 40, 50, 0.5)
accept <- rep(0,4)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y <- NA
if(fix.rho.S) samples.rho <- NA
if(fix.rho.T) samples.lambda <- NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.delta=samples.delta, samples.tau2=samples.tau2, samples.nu2=samples.nu2, samples.rho=samples.rho, samples.lambda=samples.lambda, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/gaussian.CARanovaMCMC.R
|
gaussian.CARar1 <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, prior.nu2=NULL, rho.S=NULL, rho.T=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "gaussian")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Determine the number of spatial and temporal units
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
#### Check on the rho arguments
if(is.null(rho.S))
{
rho <- runif(1)
fix.rho.S <- FALSE
}else
{
rho <- rho.S
fix.rho.S <- TRUE
}
if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.T))
{
gamma <- runif(1)
fix.rho.T <- FALSE
}else
{
gamma <- rho.T
fix.rho.T <- TRUE
}
if(!is.numeric(gamma)) stop("rho.T is fixed but is not numeric.", call.=FALSE)
if(gamma<0 ) stop("rho.T is outside the range [0, 1].", call.=FALSE)
if(gamma>1 ) stop("rho.T is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
if(is.null(prior.nu2)) prior.nu2 <- c(1, 0.01)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
prior.var.check(prior.nu2)
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- gaussian.CARar1MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, gamma=gamma, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.nu2=prior.nu2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- gaussian.CARar1MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, gamma=gamma, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.nu2=prior.nu2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=gaussian.CARar1MCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, gamma=gamma, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.nu2=prior.nu2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.final <- rep(NA, 4)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T")
accept.final[1] <- 100
accept.final[2] <- 100
if(!fix.rho.S) accept.final[3] <- 100 * results$accept[1] / results$accept[2]
if(!fix.rho.T) accept.final[4] <- 100
## Compute the fitted deviance
mean.beta <- apply(results$samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=K, ncol=N)
fitted.mean <- as.numeric(offset.mat + mean.phi + regression.mat)
nu2.mean <- mean(results$samples.nu2)
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(nu2.mean),N.all), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals / sqrt(nu2.mean)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
## Create the samples object
if(fix.rho.S & fix.rho.T)
{
samples.rhoext <- NA
}else if(fix.rho.S & !fix.rho.T)
{
samples.rhoext <- results$samples.gamma
names(samples.rhoext) <- "rho.T"
}else if(!fix.rho.S & fix.rho.T)
{
samples.rhoext <- results$samples.rho
names(samples.rhoext) <- "rho.S"
}else
{
samples.rhoext <- cbind(results$samples.rho, results$samples.gamma)
colnames(samples.rhoext) <- c("rho.S", "rho.T")
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), tau2=mcmc(results$samples.tau2), nu2=mcmc(results$samples.nu2), rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.tau2 <- c(mean(results$samples.tau2), quantile(results$samples.tau2, c(0.025, 0.975)), n.keep, 100,
effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.nu2 <- c(mean(results$samples.nu2), quantile(results$samples.nu2, c(0.025, 0.975)), n.keep, 100,
effectiveSize(samples$nu2), geweke.diag(samples$nu2)$z)
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.S", "rho.T")
if(!fix.rho.S)
{
summary.rho[1, 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z)
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
summary.rho[2, 1:3] <- c(mean(results$samples.gamma), quantile(results$samples.gamma, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(results$samples.gamma), geweke.diag(results$samples.gamma)$z)
}else
{
summary.rho[2, 1:3] <- c(gamma, gamma, gamma)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.nu2, summary.tau2, summary.rho)
rownames(summary.results)[(p+1):(p+2)] <- c("nu2", "tau2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.final <- rep(NA, 4)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T")
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[1] <- 100
accept.final[2] <- 100
if(!fix.rho.S) accept.final[3] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
if(!fix.rho.T) accept.final[4] <- 100
## Extract the samples into separate matrix and list objects
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
if(!fix.rho.S)
{
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
}
if(!fix.rho.T)
{
samples.gamma.list <- lapply(results, function(l) l[["samples.gamma"]])
samples.gamma.matrix <- do.call(what=rbind, args=samples.gamma.list)
}
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.nu2.list <- lapply(results, function(l) l[["samples.nu2"]])
samples.nu2.matrix <- do.call(what=rbind, args=samples.nu2.list)
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Compute the fitted deviance
mean.beta <- apply(samples.beta.matrix,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=K, ncol=N)
fitted.mean <- as.numeric(offset.mat + mean.phi + regression.mat)
nu2.mean <- mean(samples.nu2.matrix)
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(nu2.mean),N.all), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals / sqrt(nu2.mean)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc))
nu2.mcmc <- mcmc.list(lapply(samples.nu2.list, mcmc))
if(n.miss>0)
{
Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc))
}else
{
Y.mcmc <- NA
}
if(fix.rho.S & fix.rho.T)
{
rhoext.mcmc <- NA
}else if(fix.rho.S & !fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.gamma.list[[j]]) <- c("rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(samples.gamma.list, mcmc))
}else if(!fix.rho.S & fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.rho.list[[j]]) <- c("rho.S")
}
rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc))
}else
{
rho.temp <- as.list(rep(NA, n.chains))
for(j in 1:n.chains)
{
rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.gamma.list[[j]])
colnames(rho.temp[[j]]) <- c("rho.S", "rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc))
}
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rhoext.mcmc, tau2=tau2.mcmc, nu2=nu2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.tau2 <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)), n.keep, 100,
effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.nu2 <- c(mean(samples.nu2.matrix), quantile(samples.nu2.matrix, c(0.025, 0.975)), n.keep, 100,
effectiveSize(nu2.mcmc), gelman.diag(nu2.mcmc)$psrf[ ,2])
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.S", "rho.T")
if(!fix.rho.S)
{
temp <- mcmc.list(lapply(samples.rho.list, mcmc))
summary.rho[1, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
temp <- mcmc.list(lapply(samples.gamma.list, mcmc))
summary.rho[2, 1:3] <- c(mean(samples.gamma.matrix), quantile(samples.gamma.matrix, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[2, 1:3] <- c(gamma, gamma, gamma)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.nu2, summary.tau2, summary.rho)
rownames(summary.results)[(p+1):(p+2)] <- c("nu2", "tau2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Gaussian (identity link function)", "\nLatent structure model - Autoregressive order 1 CAR model\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/gaussian.CARar1.R
|
gaussian.CARar1MCMC <- function(Y, offset, X.standardised, W, rho, gamma, fix.rho.S, fix.rho.T, K, N, N.all, p, which.miss, n.miss, burnin, n.sample, thin, prior.mean.beta, prior.var.beta, prior.tau2, prior.nu2, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Generate the initial parameter values
mod.glm <- glm(Y~X.standardised-1, offset=offset)
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
res.temp <- Y - X.standardised %*% beta - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=N.all, mean=0, sd = res.sd)
tau2 <- var(phi)/10
nu2 <- runif(1, 0, res.sd)
#### Matrix versions of quantites
Y.DA <- Y
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE)
fitted <- as.numeric(offset.mat + regression.mat + phi.mat)
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, N.all))
samples.tau2 <- array(NA, c(n.keep, 1))
samples.nu2 <- array(NA, c(n.keep, 1))
if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.T) samples.gamma <- array(NA, c(n.keep, 1))
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Specify the Metropolis quantities
accept <- rep(0,2)
proposal.sd.rho <- 0.05
tau2.shape <- prior.tau2[1] + N.all/2
nu2.shape <- prior.nu2[1] + N.all/2
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Create the determinant
if(!fix.rho.S)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q.W <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1 & gamma==1)
{
tau2.shape <- prior.tau2[1] + prior.tau2[1] + ((N-1) * (K-n.islands))/2
}else if(rho==1)
{
tau2.shape <- prior.tau2[1] + prior.tau2[1] + (N * (K-n.islands))/2
}else if(gamma==1)
{
tau2.shape <- prior.tau2[1] + prior.tau2[1] + ((N-1) * K)/2
}else
{}
#### Beta update quantities
data.precision.beta <- t(X.standardised) %*% X.standardised
if(length(prior.var.beta)==1)
{
prior.precision.beta <- 1 / prior.var.beta
}else
{
prior.precision.beta <- solve(diag(prior.var.beta))
}
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rnorm(n=n.miss, mean=fitted[which.miss==0], sd=sqrt(nu2))
}else
{}
Y.DA.mat <- matrix(Y.DA, nrow=K, ncol=N, byrow=FALSE)
##################
## Sample from nu2
##################
nu2.offset <- as.numeric(Y.DA.mat - offset.mat - regression.mat - phi.mat)
nu2.scale <- prior.nu2[2] + sum(nu2.offset^2)/2
nu2 <- 1 / rgamma(1, nu2.shape, scale=(1/nu2.scale))
####################
## Sample from beta
####################
fc.precision <- prior.precision.beta + data.precision.beta / nu2
fc.var <- solve(fc.precision)
beta.offset <- as.numeric(Y.DA.mat - offset.mat - phi.mat)
beta.offset2 <- t(X.standardised) %*% beta.offset / nu2 + prior.precision.beta %*% prior.mean.beta
fc.mean <- fc.var %*% beta.offset2
chol.var <- t(chol(fc.var))
beta <- fc.mean + chol.var %*% rnorm(p)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from phi
####################
phi.offset <- Y.DA.mat - offset.mat - regression.mat
den.offset <- rho * W.triplet.sum + 1 - rho
phi.temp <- gaussianar1carupdate(W.triplet, W.begfin, W.triplet.sum, K, N, phi.mat, tau2, nu2, gamma, rho, phi.offset, den.offset)
phi <- as.numeric(phi.temp) - mean(as.numeric(phi.temp))
phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from gamma
####################
if(!fix.rho.T)
{
temp2 <- gammaquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho)
mean.gamma <- temp2[[1]] / temp2[[2]]
sd.gamma <- sqrt(tau2 / temp2[[2]])
gamma <- rtruncnorm(n=1, a=0, b=1, mean=mean.gamma, sd=sd.gamma)
}else
{}
####################
## Samples from tau2
####################
temp3 <- tauquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho, gamma)
tau2.scale <- temp3 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.shape, scale=(1/tau2.scale))
##################
## Sample from rho
##################
if(!fix.rho.S)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp4 <- tauquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, proposal.rho, gamma)
det.Q.W.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- N * det.Q.W - temp3 / tau2
logprob.proposal <- N * det.Q.W.proposal - temp4 / tau2
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
det.Q.W <- det.Q.W.proposal
accept[1] <- accept[1] + 1
}else
{}
accept[2] <- accept[2] + 1
}else
{}
#########################
## Calculate the deviance
#########################
fitted <- as.numeric(offset.mat + regression.mat + phi.mat)
loglike <- dnorm(Y, mean = fitted, sd = rep(sqrt(nu2),N.all), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- as.numeric(phi)
if(!fix.rho.S) samples.rho[ele, ] <- rho
if(!fix.rho.T) samples.gamma[ele, ] <- gamma
samples.tau2[ele, ] <- tau2
samples.nu2[ele, ] <- nu2
samples.fitted[ele, ] <- fitted
samples.loglike[ele, ] <- loglike
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
if(!fix.rho.S) proposal.sd.rho <- common.accceptrates2(accept[1:2], proposal.sd.rho, 40, 50, 0.5)
accept <- rep(0,2)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y <- NA
if(fix.rho.S) samples.rho <- NA
if(fix.rho.T) samples.gamma <- NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.tau2=samples.tau2, samples.nu2=samples.nu2, samples.rho=samples.rho, samples.gamma=samples.gamma, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/gaussian.CARar1MCMC.R
|
gaussian.CARar2 <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.tau2=NULL, rho.S=NULL, rho.T=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "gaussian")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Determine the number of spatial and temporal units
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
#### Check on the rho arguments
if(is.null(rho.S))
{
rho <- runif(1)
fix.rho.S <- FALSE
}else
{
rho <- rho.S
fix.rho.S <- TRUE
}
if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.T))
{
alpha <- c(runif(1), runif(1))
fix.rho.T <- FALSE
}else
{
alpha <- rho.T
fix.rho.T <- TRUE
}
if(!is.numeric(alpha)) stop("rho.T is fixed but is not numeric.", call.=FALSE)
if(length(alpha)!=2) stop("rho.T is fixed but is not of length 2.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
if(is.null(prior.nu2)) prior.nu2 <- c(1, 0.01)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
prior.var.check(prior.nu2)
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- gaussian.CARar2MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.tau2=prior.tau2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- gaussian.CARar2MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.tau2=prior.tau2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=gaussian.CARar2MCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.tau2=prior.tau2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.final <- rep(NA, 4)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T")
accept.final[1] <- 100
accept.final[2] <- 100
if(!fix.rho.S) accept.final[3] <- 100 * results$accept[1] / results$accept[2]
if(!fix.rho.T) accept.final[4] <- 100
## Compute the fitted deviance
mean.beta <- apply(results$samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=K, ncol=N)
fitted.mean <- as.numeric(offset.mat + mean.phi + regression.mat)
nu2.mean <- mean(results$samples.nu2)
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(nu2.mean),N.all), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals / sqrt(nu2.mean)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
## Create the samples object
if(fix.rho.S & fix.rho.T)
{
samples.rhoext <- NA
}else if(fix.rho.S & !fix.rho.T)
{
samples.rhoext <- results$samples.alpha
names(samples.rhoext) <- c("rho1.T", "rho2.T")
}else if(!fix.rho.S & fix.rho.T)
{
samples.rhoext <- results$samples.rho
names(samples.rhoext) <- "rho.S"
}else
{
samples.rhoext <- cbind(results$samples.rho, results$samples.alpha)
colnames(samples.rhoext) <- c("rho.S", "rho1.T", "rho2.T")
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), tau2=mcmc(results$samples.tau2), nu2=mcmc(results$samples.nu2), rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.tau2 <- c(mean(results$samples.tau2), quantile(results$samples.tau2, c(0.025, 0.975)), n.keep, 100,
effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.nu2 <- c(mean(results$samples.nu2), quantile(results$samples.nu2, c(0.025, 0.975)), n.keep, 100,
effectiveSize(samples$nu2), geweke.diag(samples$nu2)$z)
summary.rho <- array(NA, c(3,7))
row.names(summary.rho) <- c("rho.S", "rho1.T", "rho2.T")
if(!fix.rho.S)
{
summary.rho[1, 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z)
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
summary.rho[2, 1:3] <- c(mean(results$samples.alpha[ ,1]), quantile(results$samples.alpha[ ,1], c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(results$samples.alpha[ ,1]), geweke.diag(results$samples.alpha[ ,1])$z)
summary.rho[3, 1:3] <- c(mean(results$samples.alpha[ ,2]), quantile(results$samples.alpha[ ,2], c(0.025, 0.975)))
summary.rho[3, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(results$samples.alpha[ ,2]), geweke.diag(results$samples.alpha[ ,2])$z)
}else
{
summary.rho[2, 1:3] <- c(alpha[1], alpha[1], alpha[1])
summary.rho[2, 4:7] <- rep(NA, 4)
summary.rho[3, 1:3] <- c(alpha[2], alpha[2], alpha[2])
summary.rho[3, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.nu2, summary.tau2, summary.rho)
rownames(summary.results)[(p+1):(p+2)] <- c("nu2", "tau2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.final <- rep(NA, 4)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T")
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[1] <- 100
accept.final[2] <- 100
if(!fix.rho.S) accept.final[3] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
if(!fix.rho.T) accept.final[4] <- 100
## Extract the samples into separate matrix and list objects
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
if(!fix.rho.S)
{
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
}
if(!fix.rho.T)
{
samples.alpha.list <- lapply(results, function(l) l[["samples.alpha"]])
samples.alpha.matrix <- do.call(what=rbind, args=samples.alpha.list)
}
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.nu2.list <- lapply(results, function(l) l[["samples.nu2"]])
samples.nu2.matrix <- do.call(what=rbind, args=samples.nu2.list)
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Compute the fitted deviance
mean.beta <- apply(samples.beta.matrix,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=K, ncol=N)
fitted.mean <- as.numeric(offset.mat + mean.phi + regression.mat)
nu2.mean <- mean(samples.nu2.matrix)
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(nu2.mean),N.all), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals / sqrt(nu2.mean)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc))
nu2.mcmc <- mcmc.list(lapply(samples.nu2.list, mcmc))
if(n.miss>0)
{
Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc))
}else
{
Y.mcmc <- NA
}
if(fix.rho.S & fix.rho.T)
{
rhoext.mcmc <- NA
}else if(fix.rho.S & !fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.alpha.list[[j]]) <- c("rho1.T", "rho2.T")
}
rhoext.mcmc <- mcmc.list(lapply(samples.alpha.list, mcmc))
}else if(!fix.rho.S & fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.rho.list[[j]]) <- c("rho.S")
}
rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc))
}else
{
rho.temp <- as.list(rep(NA, n.chains))
for(j in 1:n.chains)
{
rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.alpha.list[[j]])
colnames(rho.temp[[j]]) <- c("rho.S", "rho1.T", "rho2.T")
}
rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc))
}
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rhoext.mcmc, tau2=tau2.mcmc, nu2=nu2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.tau2 <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)), n.keep, 100,
effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.nu2 <- c(mean(samples.nu2.matrix), quantile(samples.nu2.matrix, c(0.025, 0.975)), n.keep, 100,
effectiveSize(nu2.mcmc), gelman.diag(nu2.mcmc)$psrf[ ,2])
summary.rho <- array(NA, c(3,7))
row.names(summary.rho) <- c("rho.S", "rho1.T", "rho2.T")
if(!fix.rho.S)
{
temp <- mcmc.list(lapply(samples.rho.list, mcmc))
summary.rho[1, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
temp <- mcmc.list(lapply(samples.alpha.list, mcmc))
summary.rho[2, 1:3] <- c(mean(samples.alpha.matrix[ ,1]), quantile(samples.alpha.matrix[ ,1], c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(temp[ ,1]), gelman.diag(temp[ ,1])$psrf[ ,2])
summary.rho[3, 1:3] <- c(mean(samples.alpha.matrix[ ,2]), quantile(samples.alpha.matrix[ ,2], c(0.025, 0.975)))
summary.rho[3, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(temp[ ,2]), gelman.diag(temp[ ,2])$psrf[ ,2])
}else
{
summary.rho[2, 1:3] <- c(alpha[1], alpha[1], alpha[1])
summary.rho[2, 4:7] <- rep(NA, 4)
summary.rho[3, 1:3] <- c(alpha[2], alpha[2], alpha[2])
summary.rho[3, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.nu2, summary.tau2, summary.rho)
rownames(summary.results)[(p+1):(p+2)] <- c("nu2", "tau2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Gaussian (identity link function)", "\nLatent structure model - Autoregressive order 2 CAR model\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/gaussian.CARar2.R
|
gaussian.CARar2MCMC <- function(Y, offset, X.standardised, W, rho, alpha, fix.rho.S, fix.rho.T, K, N, N.all, p, which.miss, n.miss, burnin, n.sample, thin, prior.mean.beta, prior.var.beta, prior.nu2, prior.tau2, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#library(MASS)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Generate the initial parameter values
mod.glm <- glm(Y~X.standardised-1, offset=offset)
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
res.temp <- Y - X.standardised %*% beta - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=N.all, mean=0, sd = res.sd)
tau2 <- var(phi)/10
nu2 <- runif(1, 0, res.sd)
#### Matrix versions of quantites
Y.DA <- Y
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE)
fitted <- as.numeric(offset.mat + regression.mat + phi.mat)
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, N.all))
samples.tau2 <- array(NA, c(n.keep, 1))
samples.nu2 <- array(NA, c(n.keep, 1))
if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.T) samples.alpha <- array(NA, c(n.keep, 2))
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Specify the Metropolis quantities
accept <- rep(0,2)
proposal.sd.rho <- 0.05
tau2.shape <- prior.tau2[1] + N.all/2
nu2.shape <- prior.nu2[1] + N.all/2
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Create the determinant
if(!fix.rho.S)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q.W <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1 & alpha[1]==2 & alpha[2]==-1)
{
tau2.shape <- prior.tau2[1] + prior.tau2[1] + ((N-2) * (K-n.islands))/2
}else if(rho==1)
{
tau2.shape <- prior.tau2[1] + prior.tau2[1] + (N * (K-n.islands))/2
}else if(alpha[1]==2 & alpha[2]==-1)
{
tau2.shape <- prior.tau2[1] + prior.tau2[1] + ((N-2) * K)/2
}else
{}
#### Beta update quantities
data.precision.beta <- t(X.standardised) %*% X.standardised
if(length(prior.var.beta)==1)
{
prior.precision.beta <- 1 / prior.var.beta
}else
{
prior.precision.beta <- solve(diag(prior.var.beta))
}
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rnorm(n=n.miss, mean=fitted[which.miss==0], sd=sqrt(nu2))
}else
{}
Y.DA.mat <- matrix(Y.DA, nrow=K, ncol=N, byrow=FALSE)
##################
## Sample from nu2
##################
nu2.offset <- as.numeric(Y.DA.mat - offset.mat - regression.mat - phi.mat)
nu2.scale <- prior.nu2[2] + sum(nu2.offset^2)/2
nu2 <- 1 / rgamma(1, nu2.shape, scale=(1/nu2.scale))
####################
## Sample from beta
####################
fc.precision <- prior.precision.beta + data.precision.beta / nu2
fc.var <- solve(fc.precision)
beta.offset <- as.numeric(Y.DA.mat - offset.mat - phi.mat)
beta.offset2 <- t(X.standardised) %*% beta.offset / nu2 + prior.precision.beta %*% prior.mean.beta
fc.mean <- fc.var %*% beta.offset2
chol.var <- t(chol(fc.var))
beta <- fc.mean + chol.var %*% rnorm(p)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from phi
####################
phi.offset <- Y.DA.mat - offset.mat - regression.mat
den.offset <- rho * W.triplet.sum + 1 - rho
phi.temp <- gaussianar2carupdate(W.triplet, W.begfin, W.triplet.sum, K, N, phi.mat, tau2, nu2, alpha[1], alpha[2], rho, phi.offset, den.offset)
phi <- as.numeric(phi.temp) - mean(as.numeric(phi.temp))
phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from alpha
####################
if(!fix.rho.T)
{
#### Construct the quadratic forms
temp2 <- alphaquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho, tau2)
#### Construct the precision matrix
alpha.prec <- array(c(temp2[[1]], temp2[[3]], temp2[[3]], temp2[[2]]), c(2,2))
alpha.var <- solve(alpha.prec)
#### Construct the mean vector
U2 <- (temp2[[1]] * temp2[[5]] - temp2[[3]] * temp2[[4]]) / (temp2[[2]] * temp2[[1]] - temp2[[3]]^2)
U1 <- (1 / temp2[[3]]) * (temp2[[5]] - temp2[[2]] * U2)
alpha.mean <- c(U1, U2)
alpha <- mvrnorm(n=1, mu=alpha.mean, Sigma=alpha.var)
}else
{}
####################
## Samples from tau2
####################
temp3 <- tauquadformcomputear2(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho, alpha[1], alpha[2])
tau2.scale <- temp3 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.shape, scale=(1/tau2.scale))
##################
## Sample from rho
##################
if(!fix.rho.S)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp4 <- tauquadformcomputear2(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, proposal.rho, alpha[1], alpha[2])
det.Q.W.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- N * det.Q.W - temp3 / tau2
logprob.proposal <- N * det.Q.W.proposal - temp4 / tau2
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
det.Q.W <- det.Q.W.proposal
accept[1] <- accept[1] + 1
}else
{}
accept[2] <- accept[2] + 1
}else
{}
#########################
## Calculate the deviance
#########################
fitted <- as.numeric(offset.mat + regression.mat + phi.mat)
loglike <- dnorm(Y, mean = fitted, sd = rep(sqrt(nu2),N.all), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- as.numeric(phi)
if(!fix.rho.S) samples.rho[ele, ] <- rho
if(!fix.rho.T) samples.alpha[ele, ] <- alpha
samples.tau2[ele, ] <- tau2
samples.nu2[ele, ] <- nu2
samples.fitted[ele, ] <- fitted
samples.loglike[ele, ] <- loglike
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
if(!fix.rho.S) proposal.sd.rho <- common.accceptrates2(accept[1:2], proposal.sd.rho, 40, 50, 0.5)
accept <- rep(0,2)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y <- NA
if(fix.rho.S) samples.rho <- NA
if(fix.rho.T) samples.alpha <- NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.nu2=samples.nu2, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.alpha=samples.alpha, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/gaussian.CARar2MCMC.R
|
gaussian.CARlinear <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.mean.alpha=NULL, prior.var.alpha=NULL, prior.nu2=NULL, prior.tau2=NULL, rho.slo=NULL, rho.int=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "gaussian")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Determine the number of spatial and temporal units
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
time <-(1:N - mean(1:N))/N
time.mat <- matrix(rep(time, K), byrow=TRUE, nrow=K)
#### Check on the rho arguments
if(is.null(rho.int))
{
rho <- runif(1)
fix.rho.int <- FALSE
}else
{
rho <- rho.int
fix.rho.int <- TRUE
}
if(!is.numeric(rho)) stop("rho.int is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.int is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.int is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.slo))
{
lambda <- runif(1)
fix.rho.slo <- FALSE
}else
{
lambda <- rho.slo
fix.rho.slo <- TRUE
}
if(!is.numeric(lambda)) stop("rho.slo is fixed but is not numeric.", call.=FALSE)
if(lambda<0 ) stop("rho.slo is outside the range [0, 1].", call.=FALSE)
if(lambda>1 ) stop("rho.slo is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
if(is.null(prior.nu2)) prior.nu2 <- c(1, 0.01)
if(is.null(prior.mean.alpha)) prior.mean.alpha <- rep(0, 1)
if(is.null(prior.var.alpha)) prior.var.alpha <- rep(100000, 1)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
prior.var.check(prior.nu2)
if(length(prior.mean.alpha)!=1) stop("the prior mean for alpha is the wrong length.", call.=FALSE)
if(!is.numeric(prior.mean.alpha)) stop("the prior mean for alpha is not numeric.", call.=FALSE)
if(sum(is.na(prior.mean.alpha))!=0) stop("the prior mean for alpha has missing values.", call.=FALSE)
if(length(prior.var.alpha)!=1) stop("the prior variance for alpha is the wrong length.", call.=FALSE)
if(!is.numeric(prior.var.alpha)) stop("the prior variance for alpha is not numeric.", call.=FALSE)
if(sum(is.na(prior.var.alpha))!=0) stop("the prior variance for alpha has missing values.", call.=FALSE)
if(min(prior.var.alpha) <=0) stop("the prior variance for alpha has elements less than zero", call.=FALSE)
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- gaussian.CARlinearMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, lambda=lambda, fix.rho.int=fix.rho.int, fix.rho.slo=fix.rho.slo, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.alpha=prior.mean.alpha, prior.var.alpha=prior.var.alpha, prior.tau2=prior.tau2, prior.nu2=prior.nu2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- gaussian.CARlinearMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, lambda=lambda, fix.rho.int=fix.rho.int, fix.rho.slo=fix.rho.slo, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.alpha=prior.mean.alpha, prior.var.alpha=prior.var.alpha, prior.tau2=prior.tau2, prior.nu2=prior.nu2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=gaussian.CARlinearMCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, lambda=lambda, fix.rho.int=fix.rho.int, fix.rho.slo=fix.rho.slo, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.alpha=prior.mean.alpha, prior.var.alpha=prior.var.alpha, prior.tau2=prior.tau2, prior.nu2=prior.nu2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.final <- rep(NA, 6)
names(accept.final) <- c("beta", "alpha", "phi", "delta", "rho.int", "rho.slo")
accept.final[1:4] <- 100
if(!fix.rho.int) accept.final[5] <- 100 * results$accept[1] / results$accept[2]
if(!fix.rho.slo) accept.final[6] <- 100 * results$accept[3] / results$accept[4]
## Compute the fitted deviance
mean.phi <- apply(results$samples.phi, 2, mean)
mean.delta <- apply(results$samples.delta, 2, mean)
mean.phi.mat <- matrix(rep(mean.phi, N), byrow=F, nrow=K)
delta.time.mat <- apply(time.mat, 2, "*", mean.delta)
mean.alpha <- mean(results$samples.alpha)
mean.beta <- apply(results$samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
nu2.mean <- mean(results$samples.nu2)
fitted.mean <- offset.mat + regression.mat + mean.phi.mat + delta.time.mat + mean.alpha * time.mat
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(nu2.mean),N.all), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(nu2.mean)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
## Create the samples object
if(fix.rho.int & fix.rho.slo)
{
samples.rhoext <- NA
}else if(fix.rho.int & !fix.rho.slo)
{
samples.rhoext <- results$samples.lambda
names(samples.rhoext) <- "rho.slo"
}else if(!fix.rho.int & fix.rho.slo)
{
samples.rhoext <- results$samples.rho
names(samples.rhoext) <- "rho.int"
}else
{
samples.rhoext <- cbind(results$samples.rho, results$samples.lambda)
colnames(samples.rhoext) <- c("rho.int", "rho.slo")
}
colnames(results$samples.tau2) <- c("tau2.int", "tau2.slo")
samples <- list(beta=mcmc(samples.beta.orig), alpha=mcmc(results$samples.alpha), phi=mcmc(results$samples.phi), delta=mcmc(results$samples.delta), tau2=mcmc(results$samples.tau2), nu2=mcmc(results$samples.nu2), rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.tau2 <- cbind(apply(results$samples.tau2, 2, mean), t(apply(results$samples.tau2, 2, quantile, c(0.025, 0.975))), rep(n.keep, 2), rep(100, 2),
effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.alpha <- c(mean(results$samples.alpha), quantile(results$samples.alpha, c(0.025, 0.975)), n.keep, accept.final[names(accept.final)=="alpha"],
effectiveSize(samples$alpha), geweke.diag(samples$alpha)$z)
summary.nu2 <- c(mean(results$samples.nu2), quantile(results$samples.nu2, c(0.025, 0.975)), n.keep, 100,
effectiveSize(samples$nu2), geweke.diag(samples$nu2)$z)
summary.combine <- rbind(summary.alpha, summary.nu2, summary.tau2)
rownames(summary.combine) <- c("alpha", "nu2", "tau2.int", "tau2.slo")
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.int", "rho.slo")
if(!fix.rho.int)
{
summary.rho[1, 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.int"], effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z)
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.slo)
{
summary.rho[2, 1:3] <- c(mean(results$samples.lambda), quantile(results$samples.lambda, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.slo"], effectiveSize(results$samples.lambda), geweke.diag(results$samples.lambda)$z)
}else
{
summary.rho[2, 1:3] <- c(lambda, lambda, lambda)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.combine, summary.rho)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.final <- rep(NA, 6)
names(accept.final) <- c("beta", "alpha", "phi", "delta", "rho.int", "rho.slo")
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[1:4] <- 100
if(!fix.rho.int) accept.final[5] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
if(!fix.rho.slo) accept.final[6] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
## Extract the samples into separate matrix and list objects
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.delta.list <- lapply(results, function(l) l[["samples.delta"]])
samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list)
samples.alpha.list <- lapply(results, function(l) l[["samples.alpha"]])
samples.alpha.matrix <- do.call(what=rbind, args=samples.alpha.list)
if(!fix.rho.int)
{
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
}
if(!fix.rho.slo)
{
samples.lambda.list <- lapply(results, function(l) l[["samples.lambda"]])
samples.lambda.matrix <- do.call(what=rbind, args=samples.lambda.list)
}
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.nu2.list <- lapply(results, function(l) l[["samples.nu2"]])
samples.nu2.matrix <- do.call(what=rbind, args=samples.nu2.list)
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Compute the fitted deviance
mean.phi <- apply(samples.phi.matrix, 2, mean)
mean.delta <- apply(samples.delta.matrix, 2, mean)
mean.phi.mat <- matrix(rep(mean.phi, N), byrow=F, nrow=K)
delta.time.mat <- apply(time.mat, 2, "*", mean.delta)
mean.alpha <- mean(samples.alpha.matrix)
mean.beta <- apply(samples.beta.matrix,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
nu2.mean <- mean(samples.nu2.matrix)
fitted.mean <- offset.mat + regression.mat + mean.phi.mat + delta.time.mat + mean.alpha * time.mat
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(nu2.mean),N.all), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(nu2.mean)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
alpha.mcmc <- mcmc.list(lapply(samples.alpha.list, mcmc))
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
delta.mcmc <- mcmc.list(lapply(samples.delta.list, mcmc))
nu2.mcmc <- mcmc.list(lapply(samples.nu2.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
for(j in 1:n.chains)
{
colnames(samples.tau2.list[[j]]) <- c("tau2.int", "tau2.slo")
}
tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc))
if(n.miss>0)
{
Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc))
}else
{
Y.mcmc <- NA
}
if(fix.rho.int & fix.rho.slo)
{
rhoext.mcmc <- NA
}else if(fix.rho.int & !fix.rho.slo)
{
for(j in 1:n.chains)
{
colnames(samples.lambda.list[[j]]) <- c("rho.slo")
}
rhoext.mcmc <- mcmc.list(lapply(samples.lambda.list, mcmc))
}else if(!fix.rho.int & fix.rho.slo)
{
for(j in 1:n.chains)
{
colnames(samples.rho.list[[j]]) <- c("rho.int")
}
rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc))
}else
{
rho.temp <- as.list(rep(NA, n.chains))
for(j in 1:n.chains)
{
rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.lambda.list[[j]])
colnames(rho.temp[[j]]) <- c("rho.int", "rho.slo")
}
rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc))
}
samples <- list(beta=beta.mcmc, alpha=alpha.mcmc, phi=phi.mcmc, delta=delta.mcmc, rho=rhoext.mcmc, tau2=tau2.mcmc, nu2=nu2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.tau2 <- cbind(apply(samples.tau2.matrix, 2, mean), t(apply(samples.tau2.matrix, 2, quantile, c(0.025, 0.975))), rep(n.keep, 2), rep(100, 2),
effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.alpha <- c(mean(samples.alpha.matrix), quantile(samples.alpha.matrix, c(0.025, 0.975)), n.keep, accept.final[names(accept.final)=="alpha"],
effectiveSize(alpha.mcmc), gelman.diag(alpha.mcmc)$psrf[ ,2])
summary.nu2 <- c(mean(samples.nu2.matrix), quantile(samples.nu2.matrix, c(0.025, 0.975)), n.keep, 100,
effectiveSize(nu2.mcmc), gelman.diag(nu2.mcmc)$psrf[ ,2])
summary.combine <- rbind(summary.alpha, summary.nu2, summary.tau2)
rownames(summary.combine) <- c("alpha", "nu2", "tau2.int", "tau2.slo")
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.int", "rho.slo")
if(!fix.rho.int)
{
temp <- mcmc.list(lapply(samples.rho.list, mcmc))
summary.rho[1, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.int"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.slo)
{
temp <- mcmc.list(lapply(samples.lambda.list, mcmc))
summary.rho[2, 1:3] <- c(mean(samples.lambda.matrix), quantile(samples.lambda.matrix, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.slo"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[2, 1:3] <- c(lambda, lambda, lambda)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.combine, summary.rho)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Gaussian (identity link function)", "\nLatent structure model - Spatially autocorrelated linear time trends\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/gaussian.CARlinear.R
|
gaussian.CARlinearMCMC <- function(Y, offset, X.standardised, W, rho, lambda, fix.rho.int, fix.rho.slo, K, N, N.all, p, which.miss, n.miss, burnin, n.sample, thin, prior.mean.beta, prior.var.beta, prior.mean.alpha, prior.var.alpha, prior.tau2, prior.nu2, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Generate the initial parameter values
time <-(1:N - mean(1:N))/N
time.all <- kronecker(time, rep(1,K))
mod.glm <- glm(Y~X.standardised-1 + time.all, offset=offset)
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
temp <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
beta <- temp[1:p]
alpha <- temp[(p+1)]
res.temp <- Y - as.numeric(X.standardised %*% beta) - time.all * alpha - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=0, sd = res.sd)
delta <- rnorm(n=K, mean=0, sd = res.sd)
tau2.phi <- var(phi)/10
tau2.delta <- var(delta)/10
nu2 <- runif(1, 0, res.sd)
#### Specify matrix quantities
Y.DA <- Y
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K)
time.mat <- matrix(rep(time, K), byrow=TRUE, nrow=K)
delta.time.mat <- apply(time.mat, 2, "*", delta)
alpha.offset1 <- sum(time.mat^2)
fitted <- as.numeric(offset.mat + regression.mat + phi.mat + delta.time.mat + alpha * time.mat)
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.alpha <- array(NA, c(n.keep, 1))
samples.phi <- array(NA, c(n.keep, K))
samples.delta <- array(NA, c(n.keep, K))
if(!fix.rho.int) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.slo) samples.lambda <- array(NA, c(n.keep, 1))
samples.nu2 <- array(NA, c(n.keep, 1))
samples.tau2 <- array(NA, c(n.keep, 2))
colnames(samples.tau2) <- c("tau2.int", "tau2.slo")
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Specify the Metropolis quantities
accept <- rep(0,4)
proposal.sd.rho <- 0.02
proposal.sd.lambda <- 0.02
nu2.shape <- prior.nu2[1] + N*K/2
tau2.phi.shape <- prior.tau2[1] + K/2
tau2.delta.shape <- prior.tau2[1] + K/2
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Create the determinant
if(!fix.rho.int | !fix.rho.slo)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
}else
{}
if(!fix.rho.int) det.Q.rho <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
if(!fix.rho.slo) det.Q.lambda <- 0.5 * sum(log((lambda * Wstar.val + (1-lambda))))
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1) tau2.phi.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
if(lambda==1) tau2.delta.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
#### Beta update quantities
data.precision.beta <- t(X.standardised) %*% X.standardised
if(length(prior.var.beta)==1)
{
prior.precision.beta <- 1 / prior.var.beta
}else
{
prior.precision.beta <- solve(diag(prior.var.beta))
}
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rnorm(n=n.miss, mean=fitted[which.miss==0], sd=sqrt(nu2))
}else
{}
Y.DA.mat <- matrix(Y.DA, nrow=K, ncol=N, byrow=FALSE)
##################
## Sample from nu2
##################
nu2.offset <- as.numeric(Y.DA.mat - offset.mat - regression.mat - phi.mat - delta.time.mat - alpha * time.mat)
nu2.scale <- prior.nu2[2] + sum(nu2.offset^2)/2
nu2 <- 1 / rgamma(1, nu2.shape, scale=(1/nu2.scale))
####################
## Sample from beta
####################
fc.precision <- prior.precision.beta + data.precision.beta / nu2
fc.var <- solve(fc.precision)
beta.offset <- as.numeric(Y.DA.mat - offset.mat - phi.mat - delta.time.mat - alpha * time.mat)
beta.offset2 <- t(X.standardised) %*% beta.offset / nu2 + prior.precision.beta %*% prior.mean.beta
fc.mean <- fc.var %*% beta.offset2
chol.var <- t(chol(fc.var))
beta <- fc.mean + chol.var %*% rnorm(p)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from alpha
####################
fc.var <- 1 / (1 / prior.var.alpha + alpha.offset1 / nu2)
alpha.offset <- (Y.DA.mat - offset.mat - regression.mat - phi.mat - delta.time.mat) * time.mat
alpha.offset2 <- sum(alpha.offset, na.rm=TRUE) / nu2
fc.mean <- fc.var * (alpha.offset2 + prior.mean.alpha / prior.var.alpha)
alpha <- rnorm(n=1, mean=fc.mean, sd=sqrt(fc.var))
####################
## Sample from phi
####################
phi.offset <- Y.DA.mat - offset.mat - regression.mat - delta.time.mat - alpha * time.mat
phi.offset2 <- apply(phi.offset,1, sum, na.rm=TRUE)
temp1 <- gaussiancarupdate(W.triplet, W.begfin, W.triplet.sum, K, phi, tau2.phi, nu2, phi.offset2, rho, N)
phi <- temp1
if(rho<1)
{
phi <- phi - mean(phi)
}else
{
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
}
phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K)
####################
## Sample from delta
####################
delta.offset <- (Y.DA.mat - offset.mat - regression.mat - phi.mat - alpha * time.mat) * time.mat
delta.offset2 <- apply(delta.offset,1, sum, na.rm=TRUE)
temp2 <- gaussiancarupdate(W.triplet, W.begfin, W.triplet.sum, K, delta, tau2.delta, nu2, delta.offset2, lambda, sum(time^2))
delta <- temp2
if(lambda <1)
{
delta <- delta - mean(delta)
}else
{
delta[which(islands==1)] <- delta[which(islands==1)] - mean(delta[which(islands==1)])
}
delta.time.mat <- apply(time.mat, 2, "*", delta)
#######################
## Sample from tau2.phi
#######################
temp2.phi <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, rho)
tau2.phi.scale <- temp2.phi + prior.tau2[2]
tau2.phi <- 1 / rgamma(1, tau2.phi.shape, scale=(1/tau2.phi.scale))
#########################
## Sample from tau2.delta
#########################
temp2.delta <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, delta, delta, lambda)
tau2.delta.scale <- temp2.delta + prior.tau2[2]
tau2.delta <- 1 / rgamma(1, tau2.delta.shape, scale=(1/tau2.delta.scale))
##################
## Sample from rho
##################
if(!fix.rho.int)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp3 <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, proposal.rho)
det.Q.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- det.Q.rho - temp2.phi / tau2.phi
logprob.proposal <- det.Q.proposal - temp3 / tau2.phi
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
rho <- proposal.rho
det.Q.rho <- det.Q.proposal
accept[1] <- accept[1] + 1
}else
{}
accept[2] <- accept[2] + 1
}else
{}
#####################
## Sample from lambda
#####################
if(!fix.rho.slo)
{
proposal.lambda <- rtruncnorm(n=1, a=0, b=1, mean=lambda, sd=proposal.sd.lambda)
temp3 <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, delta, delta, proposal.lambda)
det.Q.proposal <- 0.5 * sum(log((proposal.lambda * Wstar.val + (1-proposal.lambda))))
logprob.current <- det.Q.lambda - temp2.delta / tau2.delta
logprob.proposal <- det.Q.proposal - temp3 / tau2.delta
hastings <- log(dtruncnorm(x=lambda, a=0, b=1, mean=proposal.lambda, sd=proposal.sd.lambda)) - log(dtruncnorm(x=proposal.lambda, a=0, b=1, mean=lambda, sd=proposal.sd.lambda))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
lambda <- proposal.lambda
det.Q.lambda <- det.Q.proposal
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
}else
{}
#########################
## Calculate the deviance
#########################
fitted <- as.numeric(offset.mat + regression.mat + phi.mat + delta.time.mat + alpha * time.mat)
loglike <- dnorm(Y, mean = fitted, sd = rep(sqrt(nu2),N.all), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.delta[ele, ] <- delta
samples.alpha[ele, ] <- alpha
if(!fix.rho.int) samples.rho[ele, ] <- rho
if(!fix.rho.slo) samples.lambda[ele, ] <- lambda
samples.nu2[ele, ] <- nu2
samples.tau2[ele, ] <- c(tau2.phi, tau2.delta)
samples.fitted[ele, ] <- fitted
samples.loglike[ele, ] <- loglike
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
if(!fix.rho.int) proposal.sd.rho <- common.accceptrates2(accept[1:2], proposal.sd.rho, 40, 50, 0.5)
if(!fix.rho.slo) proposal.sd.lambda <- common.accceptrates2(accept[3:4], proposal.sd.lambda, 40, 50, 0.5)
accept <- rep(0,4)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y <- NA
if(fix.rho.int) samples.rho <- NA
if(fix.rho.slo) samples.lambda <- NA
chain.results <- list(samples.beta=samples.beta, samples.alpha=samples.alpha, samples.phi=samples.phi, samples.delta=samples.delta, samples.tau2=samples.tau2, samples.nu2=samples.nu2, samples.rho=samples.rho, samples.lambda=samples.lambda, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/gaussian.CARlinearMCMC.R
|
gaussian.MVCARar1 <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.Sigma.df=NULL, prior.Sigma.scale=NULL, rho.S=NULL, rho.T=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame.MVST(formula, data, "gaussian")
NK <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
N.all <- length(Y)
J <- ncol(Y)
which.miss <- frame.results$which.miss
n.miss <- N.all - sum(which.miss)
#### W matrix
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- NK / K
if(ceiling(N)!= floor(N)) stop("The number of data points in Y divided by the number of rows in W is not a whole number.", call.=FALSE)
#### Create a missing list
if(n.miss>0)
{
miss.locator <- array(NA, c(n.miss, 2))
colnames(miss.locator) <- c("row", "column")
locations <- which(which.miss==0)
miss.locator[ ,1] <- ceiling(locations/J)
miss.locator[ ,2] <- locations - (miss.locator[ ,1]-1) * J
}else
{
miss.locator <- NA
}
#### Check on the rho arguments
if(is.null(rho.S))
{
rho <- runif(1)
fix.rho.S <- FALSE
}else
{
rho <- rho.S
fix.rho.S <- TRUE
}
if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.T))
{
alpha <- runif(1)
fix.rho.T <- FALSE
}else
{
alpha <- rho.T
fix.rho.T <- TRUE
}
if(!is.numeric(alpha)) stop("rho.T is fixed but is not numeric.", call.=FALSE)
if(length(alpha)!=1) stop("rho.T is fixed but is not of length 1.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.Sigma.df)) prior.Sigma.df <- 2
if(is.null(prior.Sigma.scale)) prior.Sigma.scale <- rep(100000, J)
if(is.null(prior.nu2)) prior.nu2 <- c(1, 0.01)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.nu2)
if(!is.numeric(prior.Sigma.scale)) stop("prior.Sigma.scale has non-numeric values.", call.=FALSE)
if(sum(is.na(prior.Sigma.scale))!=0) stop("prior.Sigma.scale has missing values.", call.=FALSE)
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- gaussian.MVCARar1MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- gaussian.MVCARar1MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=gaussian.MVCARar1MCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.final <- rep(NA, 6)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T", "nu2", "Sigma")
accept.final[1] <- 100
accept.final[2] <- 100 * results$accept[1] / results$accept[2]
if(!fix.rho.S) accept.final[3] <- 100 * results$accept[3] / results$accept[4]
if(!fix.rho.T) accept.final[4] <- 100
accept.final[5] <- 100
accept.final[6] <- 100
## Compute the fitted deviance
mean.beta <- matrix(apply(results$samples.beta, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=NK, ncol=J, byrow=T)
fitted.mean <- X.standardised %*% mean.beta + mean.phi + offset
nu2.mean <- apply(results$samples.nu2,2,mean)
deviance.fitted <- -2 * sum(dnorm(as.numeric(t(Y)), mean = as.numeric(t(fitted.mean)), sd=rep(sqrt(nu2.mean), K*N), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- matrix(apply(results$samples.fitted, 2, mean), nrow=NK, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
nu.mat <- matrix(rep(sqrt(nu2.mean), N*K), nrow=N*K, byrow=T)
pearson.residuals <- response.residuals / nu.mat
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale
samples.beta.orig <- results$samples.beta
for(r in 1:J)
{
samples.beta.orig[ ,((r-1)*p+1):(r*p)] <- common.betatransform(results$samples.beta[ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE)
}
## Create the samples object
if(fix.rho.S & fix.rho.T)
{
samples.rhoext <- NA
}else if(fix.rho.S & !fix.rho.T)
{
samples.rhoext <- results$samples.alpha
colnames(samples.rhoext) <- c("rho.T")
}else if(!fix.rho.S & fix.rho.T)
{
samples.rhoext <- results$samples.rho
names(samples.rhoext) <- "rho.S"
}else
{
samples.rhoext <- cbind(results$samples.rho, results$samples.alpha)
colnames(samples.rhoext) <- c("rho.S", "rho.T")
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), nu2=mcmc(results$samples.nu2), Sigma=results$samples.Sigma, rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.orig, 2, mean), apply(samples.beta.orig, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
col.name <- rep(NA, p*(J-1))
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c((2*J+2) ,7))
for(r in 1:J)
{
summary.hyper[r, 1] <- mean(results$samples.nu2[ ,r])
summary.hyper[r, 2:3] <- quantile(results$samples.nu2[ ,r], c(0.025, 0.975))
summary.hyper[r, 4] <- n.keep
summary.hyper[r, 5] <- 100
summary.hyper[r, 6] <- effectiveSize(results$samples.nu2[ ,r])
summary.hyper[r, 7] <- geweke.diag(results$samples.nu2[ ,r])$z
summary.hyper[r+J, 1] <- mean(results$samples.Sigma[ ,r,r])
summary.hyper[r+J, 2:3] <- quantile(results$samples.Sigma[ ,r,r], c(0.025, 0.975))
summary.hyper[r+J, 4] <- n.keep
summary.hyper[r+J, 5] <- 100
summary.hyper[r+J, 6] <- effectiveSize(results$samples.Sigma[ ,r,r])
summary.hyper[r+J, 7] <- geweke.diag(results$samples.Sigma[ ,r,r])$z
}
if(!fix.rho.S)
{
summary.hyper[(2*J+1), 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975)))
summary.hyper[(2*J+1), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.S"])
summary.hyper[(2*J+1), 6:7] <- c(effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z)
}else
{
summary.hyper[(2*J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(2*J+1), 4:5] <- rep(NA, 2)
summary.hyper[(2*J+1), 6:7] <- rep(NA, 2)
}
if(!fix.rho.T)
{
summary.hyper[(2*J+2), 1:3] <- c(mean(results$samples.alpha), quantile(results$samples.alpha, c(0.025, 0.975)))
summary.hyper[(2*J+2), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"])
summary.hyper[(2*J+2), 6:7] <- c(effectiveSize(results$samples.alpha), geweke.diag(results$samples.alpha)$z)
}else
{
summary.hyper[(2*J+2), 1:3] <- c(alpha, alpha, alpha)
summary.hyper[(2*J+2), 4:5] <- rep(NA, 2)
summary.hyper[(2*J+2), 6:7] <- rep(NA, 2)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("nu2",J), 1:J, sep=""), paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho.S", "rho.T")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.final <- rep(NA, 6)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T", "nu2", "Sigma")
accept.final[1] <- 100
accept.final[5] <- 100
accept.final[6] <- 100
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[2] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
if(!fix.rho.S) accept.final[3] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
if(!fix.rho.T) accept.final[4] <- 100
## Extract the samples into separate matrix and list objects
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.nu2.list <- lapply(results, function(l) l[["samples.nu2"]])
samples.nu2.matrix <- do.call(what=rbind, args=samples.nu2.list)
if(!fix.rho.S)
{
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
}
if(!fix.rho.T)
{
samples.alpha.list <- lapply(results, function(l) l[["samples.alpha"]])
samples.alpha.matrix <- do.call(what=rbind, args=samples.alpha.list)
}
samples.Sigma.list <- lapply(results, function(l) l[["samples.Sigma"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Compute the fitted deviance
mean.beta <- matrix(apply(samples.beta.matrix, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=NK, ncol=J, byrow=T)
fitted.mean <- X.standardised %*% mean.beta + mean.phi + offset
nu2.mean <- apply(samples.nu2.matrix,2,mean)
deviance.fitted <- -2 * sum(dnorm(as.numeric(t(Y)), mean = as.numeric(t(fitted.mean)), sd=rep(sqrt(nu2.mean), K*N), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- matrix(apply(samples.fitted.matrix, 2, mean), nrow=NK, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
nu.mat <- matrix(rep(sqrt(nu2.mean), N*K), nrow=N*K, byrow=T)
pearson.residuals <- response.residuals / nu.mat
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
for(r in 1:J)
{
samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)] <- common.betatransform(samples.beta.list[[j]][ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE)
}
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
nu2.mcmc <- mcmc.list(lapply(samples.nu2.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
if(n.miss>0)
{
Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc))
}else
{
Y.mcmc <- NA
}
if(fix.rho.S & fix.rho.T)
{
rhoext.mcmc <- NA
}else if(fix.rho.S & !fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.alpha.list[[j]]) <- c("rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(samples.alpha.list, mcmc))
}else if(!fix.rho.S & fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.rho.list[[j]]) <- c("rho.S")
}
rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc))
}else
{
rho.temp <- as.list(rep(NA, n.chains))
for(j in 1:n.chains)
{
rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.alpha.list[[j]])
colnames(rho.temp[[j]]) <- c("rho.S", "rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc))
}
samples <- list(beta=beta.mcmc, phi=phi.mcmc, nu2=nu2.mcmc, rho=rhoext.mcmc, Sigma=samples.Sigma.list, fitted=fitted.mcmc, Y=Y.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
col.name <- rep(NA, p*(J-1))
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c((2*J+2) ,7))
for(r in 1:J)
{
temp <- NA
temp2 <- as.list(rep(NA, n.chains))
for(v in 1:n.chains)
{
temp <- c(temp, samples.Sigma.list[[v]][ ,r,r])
temp2[[v]] <- mcmc(samples.Sigma.list[[v]][ ,r,r])
}
temp <- temp[-1]
summary.hyper[r, 1] <- mean(samples.nu2.matrix[ ,r])
summary.hyper[r, 2:3] <- quantile(samples.nu2.matrix[ ,r], c(0.025, 0.975))
summary.hyper[r, 4] <- n.keep
summary.hyper[r, 5] <- 100
summary.hyper[r, 6] <- effectiveSize(nu2.mcmc)[r]
summary.hyper[r, 7] <- (gelman.diag(nu2.mcmc)$psrf[ ,2])[r]
summary.hyper[r+J, 1] <- mean(temp)
summary.hyper[r+J, 2:3] <- quantile(temp, c(0.025, 0.975))
summary.hyper[r+J, 4] <- n.keep
summary.hyper[r+J, 5] <- 100
summary.hyper[r+J, 6] <- effectiveSize(mcmc.list(temp2))
summary.hyper[r+J, 7] <- gelman.diag(mcmc.list(temp2))$psrf[ ,2]
}
if(!fix.rho.S)
{
temp <- mcmc.list(lapply(samples.rho.list, mcmc))
summary.hyper[(2*J+1), 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[(2*J+1), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.S"])
summary.hyper[(2*J+1), 6:7] <- c(effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.hyper[(2*J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(2*J+1), 4:5] <- rep(NA, 2)
summary.hyper[(2*J+1), 6:7] <- rep(NA, 2)
}
if(!fix.rho.T)
{
temp <- mcmc.list(lapply(samples.alpha.list, mcmc))
summary.hyper[(2*J+2), 1:3] <- c(mean(samples.alpha.matrix), quantile(samples.alpha.matrix, c(0.025, 0.975)))
summary.hyper[(2*J+2), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"])
summary.hyper[(2*J+2), 6:7] <- c(effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.hyper[(2*J+2), 1:3] <- c(alpha, alpha, alpha)
summary.hyper[(2*J+2), 4:5] <- rep(NA, 2)
summary.hyper[(2*J+2), 6:7] <- rep(NA, 2)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("nu2",J), 1:J, sep=""), paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho.S", "rho.T")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Gaussian (identity link function)", "\nRandom effects model - Multivariate Autoregressive order 1 CAR model\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/gaussian.MVCARar1.R
|
gaussian.MVCARar1MCMC <- function(Y, offset, X.standardised, W, rho, alpha, fix.rho.S, fix.rho.T, K, N, NK, J, N.all, p, miss.locator, n.miss, burnin, n.sample, thin, prior.mean.beta, prior.var.beta, prior.nu2, prior.Sigma.df, prior.Sigma.scale, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#library(MCMCpack)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Generate the initial parameter values
beta <- array(NA, c(p, J))
nu2 <- rep(NA, J)
for(i in 1:J)
{
mod.glm <- lm(Y[ ,i]~X.standardised-1, offset=offset[ ,i])
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.unscaled)) * summary(mod.glm)$sigma
beta[ ,i] <- rnorm(n=p, mean=beta.mean, sd=beta.sd)
nu2[i] <- runif(1, var(mod.glm$residuals)*0.5, var(mod.glm$residuals))
}
res.temp <- Y - X.standardised %*% beta - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi.vec <- rnorm(n=N.all, mean=0, sd=res.sd)
phi <- matrix(phi.vec, ncol=J, byrow=TRUE)
Sigma <- cov(phi)
Sigma.inv <- solve(Sigma)
Sigma.a <- rep(1, J)
regression <- X.standardised %*% beta
fitted <- regression + phi + offset
Y.DA <- Y
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, J*p))
samples.nu2 <- array(NA, c(n.keep, J))
samples.phi <- array(NA, c(n.keep, N.all))
samples.Sigma <- array(NA, c(n.keep, J, J))
samples.Sigma.a <- array(NA, c(n.keep, J))
if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.T) samples.alpha <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, N.all))
samples.fitted <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,4)
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.02
Sigma.post.df <- prior.Sigma.df + J - 1 + K * N
Sigma.a.post.shape <- (prior.Sigma.df + J) / 2
nu2.posterior.shape <- prior.nu2[1] + 0.5 * K * N
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
Wstar <- diag(apply(W,1,sum)) - W
Q <- rho * Wstar + diag(rep(1-rho,K))
#### Create the determinant
if(!fix.rho.S)
{
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1 & alpha==1)
{
Sigma.post.df <- prior.Sigma.df + ((N-1) * (K-n.islands)) + J - 1
}else if(rho==1)
{
Sigma.post.df <- prior.Sigma.df + (N * (K-n.islands)) + J - 1
}else if(alpha==1)
{
Sigma.post.df <- prior.Sigma.df + ((N-1) * K) + J - 1
}else
{}
#### Beta update quantities
data.precision <- t(X.standardised) %*% X.standardised
if(length(prior.var.beta)==1)
{
prior.precision.beta <- 1 / prior.var.beta
}else
{
prior.precision.beta <- solve(diag(prior.var.beta))
}
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[miss.locator] <- rnorm(n=n.miss, mean=fitted[miss.locator], sd=sqrt(nu2[miss.locator[ ,2]]))
}else
{}
##################
## Sample from nu2
##################
fitted.current <- regression + phi + offset
nu2.posterior.scale <- prior.nu2[2] + 0.5 * apply((Y.DA - fitted.current)^2, 2, sum)
nu2 <- 1 / rgamma(J, nu2.posterior.shape, scale=(1/nu2.posterior.scale))
###################
## Sample from beta
###################
for(r in 1:J)
{
fc.precision <- prior.precision.beta + data.precision / nu2[r]
fc.var <- solve(fc.precision)
fc.temp1 <- t(((Y.DA[, r] - phi[ , r] - offset[ , r]) %*% X.standardised) / nu2[r]) + prior.precision.beta %*% prior.mean.beta
fc.mean <- fc.var %*% fc.temp1
chol.var <- t(chol(fc.var))
beta[ ,r] <- fc.mean + chol.var %*% rnorm(p)
}
regression <- X.standardised %*% beta
##################
## Sample from phi
##################
#### Create the offset elements
den.offset <- rho * W.triplet.sum + 1 - rho
phi.offset <- Y.DA - regression - offset
#### Create the random draws to create the proposal distribution
Chol.Sigma <- t(chol(proposal.sd.phi*Sigma))
z.mat <- matrix(rnorm(n=N.all, mean=0, sd=1), nrow=J, ncol=NK)
innovations <- t(Chol.Sigma %*% z.mat)
#### Update the elements of phi
temp1 <- gaussianmvar1carupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, J, phi, alpha, rho, Sigma.inv, nu2, innovations, phi.offset, den.offset)
phi <- temp1[[1]]
for(r in 1:J)
{
phi[ ,r] <- phi[ ,r] - mean(phi[ ,r])
}
accept[1] <- accept[1] + temp1[[2]]
accept[2] <- accept[2] + NK
####################
## Sample from Sigma
####################
Sigma.post.scale <- 2 * prior.Sigma.df * diag(1 / Sigma.a) + t(phi[1:K, ]) %*% Q %*% phi[1:K, ]
for(t in 2:N)
{
phit <- phi[((t-1)*K+1):(t*K), ]
phitminus1 <- phi[((t-2)*K+1):((t-1)*K), ]
temp1 <- phit - alpha * phitminus1
Sigma.post.scale <- Sigma.post.scale + t(temp1) %*% Q %*% temp1
}
Sigma <- riwish(Sigma.post.df, Sigma.post.scale)
Sigma.inv <- solve(Sigma)
######################
## Sample from Sigma.a
######################
Sigma.a.posterior.scale <- prior.Sigma.df * diag(Sigma.inv) + 1 / prior.Sigma.scale^2
Sigma.a <- 1 / rgamma(J, Sigma.a.post.shape, scale=(1/Sigma.a.posterior.scale))
######################
#### Sample from alpha
######################
if(!fix.rho.T)
{
temp <- MVSTrhoTAR1compute(W.triplet, W.triplet.sum, n.triplet, den.offset, K, N, J, phi, rho, Sigma.inv)
num <- temp[[1]]
denom <- temp[[2]]
alpha <- rnorm(n=1, mean = (num / denom), sd=sqrt(1 / denom))
}else
{}
##################
## Sample from rho
##################
if(!fix.rho.S)
{
## Propose a new value
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
proposal.Q <- proposal.rho * Wstar + diag(rep(1-proposal.rho), K)
proposal.det.Q <- sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
proposal.den.offset <- proposal.rho * W.triplet.sum + 1 - proposal.rho
## Compute the quadratic forms based on current and proposed values of rho
temp1.QF <- MVSTrhoSAR1compute(W.triplet, W.triplet.sum, n.triplet, den.offset, K, N, J, phi, rho, alpha, Sigma.inv)
temp2.QF <- MVSTrhoSAR1compute(W.triplet, W.triplet.sum, n.triplet, proposal.den.offset, K, N, J, phi, proposal.rho, alpha, Sigma.inv)
## Compute the acceptance rate
logprob.current <- 0.5 * J * N * det.Q - 0.5 * temp1.QF
logprob.proposal <- 0.5 * J * N * proposal.det.Q - 0.5 * temp2.QF
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- proposal.det.Q
Q <- proposal.Q
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
}else
{}
#########################
## Calculate the deviance
#########################
fitted <- regression + phi + offset
loglike <- dnorm(x=as.numeric(t(Y)), mean=as.numeric(t(fitted)), sd=rep(sqrt(nu2), K*N), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- as.numeric(beta)
samples.nu2[ele, ] <- nu2
samples.phi[ele, ] <- as.numeric(t(phi))
samples.Sigma[ele, , ] <- Sigma
samples.Sigma.a[ele, ] <- Sigma.a
if(!fix.rho.S) samples.rho[ele, ] <- rho
if(!fix.rho.T) samples.alpha[ele, ] <- alpha
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- as.numeric(t(fitted))
if(n.miss>0) samples.Y[ele, ] <- Y.DA[miss.locator]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50)
if(!fix.rho.S)
{
proposal.sd.rho <- common.accceptrates2(accept[3:4], proposal.sd.rho, 40, 50, 0.5)
}
accept <- c(0,0,0,0)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y <- NA
if(fix.rho.S) samples.rho <- NA
if(fix.rho.T) samples.alpha <- NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.nu2=samples.nu2, samples.Sigma=samples.Sigma, samples.Sigma.a=samples.Sigma.a, samples.rho=samples.rho, samples.alpha=samples.alpha, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/gaussian.MVCARar1MCMC.R
|
gaussian.MVCARar2 <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.Sigma.df=NULL, prior.Sigma.scale=NULL, rho.S=NULL, rho.T=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame.MVST(formula, data, "gaussian")
NK <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
N.all <- length(Y)
J <- ncol(Y)
which.miss <- frame.results$which.miss
n.miss <- N.all - sum(which.miss)
#### W matrix
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- NK / K
if(ceiling(N)!= floor(N)) stop("The number of data points in Y divided by the number of rows in W is not a whole number.", call.=FALSE)
#### Create a missing list
if(n.miss>0)
{
miss.locator <- array(NA, c(n.miss, 2))
colnames(miss.locator) <- c("row", "column")
locations <- which(which.miss==0)
miss.locator[ ,1] <- ceiling(locations/J)
miss.locator[ ,2] <- locations - (miss.locator[ ,1]-1) * J
}else
{
miss.locator <- NA
}
#### Check on the rho arguments
if(is.null(rho.S))
{
rho <- runif(1)
fix.rho.S <- FALSE
}else
{
rho <- rho.S
fix.rho.S <- TRUE
}
if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.T))
{
alpha <- c(runif(1), runif(1))
fix.rho.T <- FALSE
}else
{
alpha <- rho.T
fix.rho.T <- TRUE
}
if(!is.numeric(alpha)) stop("rho.T is fixed but is not numeric.", call.=FALSE)
if(length(alpha)!=2) stop("rho.T is fixed but is not of length 2.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.Sigma.df)) prior.Sigma.df <- 2
if(is.null(prior.Sigma.scale)) prior.Sigma.scale <- rep(100000, J)
if(is.null(prior.nu2)) prior.nu2 <- c(1, 0.01)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
if(!is.numeric(prior.Sigma.scale)) stop("prior.Sigma.scale has non-numeric values.", call.=FALSE)
if(sum(is.na(prior.Sigma.scale))!=0) stop("prior.Sigma.scale has missing values.", call.=FALSE)
prior.var.check(prior.nu2)
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- gaussian.MVCARar2MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- gaussian.MVCARar2MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=gaussian.MVCARar2MCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.final <- rep(NA, 6)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T", "nu2", "Sigma")
accept.final[1] <- 100
accept.final[2] <- 100 * results$accept[1] / results$accept[2]
if(!fix.rho.S) accept.final[3] <- 100 * results$accept[3] / results$accept[4]
if(!fix.rho.T) accept.final[4] <- 100
accept.final[5] <- 100
accept.final[6] <- 100
## Compute the fitted deviance
mean.beta <- matrix(apply(results$samples.beta, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=NK, ncol=J, byrow=T)
fitted.mean <- X.standardised %*% mean.beta + mean.phi + offset
nu2.mean <- apply(results$samples.nu2,2,mean)
deviance.fitted <- -2 * sum(dnorm(as.numeric(t(Y)), mean = as.numeric(t(fitted.mean)), sd=rep(sqrt(nu2.mean), K*N), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- matrix(apply(results$samples.fitted, 2, mean), nrow=NK, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
nu.mat <- matrix(rep(sqrt(nu2.mean), N*K), nrow=N*K, byrow=T)
pearson.residuals <- response.residuals / nu.mat
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale
samples.beta.orig <- results$samples.beta
for(r in 1:J)
{
samples.beta.orig[ ,((r-1)*p+1):(r*p)] <- common.betatransform(results$samples.beta[ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE)
}
## Create the samples object
if(fix.rho.S & fix.rho.T)
{
samples.rhoext <- NA
}else if(fix.rho.S & !fix.rho.T)
{
samples.rhoext <- results$samples.alpha
colnames(samples.rhoext) <- c("rho1.T", "rho2.T")
}else if(!fix.rho.S & fix.rho.T)
{
samples.rhoext <- results$samples.rho
names(samples.rhoext) <- "rho.S"
}else
{
samples.rhoext <- cbind(results$samples.rho, results$samples.alpha)
colnames(samples.rhoext) <- c("rho.S", "rho1.T", "rho2.T")
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), nu2=mcmc(results$samples.nu2), Sigma=results$samples.Sigma, rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.orig, 2, mean), apply(samples.beta.orig, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
col.name <- rep(NA, p*(J-1))
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c((2*J+3) ,7))
for(r in 1:J)
{
summary.hyper[r, 1] <- mean(results$samples.nu2[ ,r])
summary.hyper[r, 2:3] <- quantile(results$samples.nu2[ ,r], c(0.025, 0.975))
summary.hyper[r, 4] <- n.keep
summary.hyper[r, 5] <- 100
summary.hyper[r, 6] <- effectiveSize(results$samples.nu2[ ,r])
summary.hyper[r, 7] <- geweke.diag(results$samples.nu2[ ,r])$z
summary.hyper[r+J, 1] <- mean(results$samples.Sigma[ ,r,r])
summary.hyper[r+J, 2:3] <- quantile(results$samples.Sigma[ ,r,r], c(0.025, 0.975))
summary.hyper[r+J, 4] <- n.keep
summary.hyper[r+J, 5] <- 100
summary.hyper[r+J, 6] <- effectiveSize(results$samples.Sigma[ ,r,r])
summary.hyper[r+J, 7] <- geweke.diag(results$samples.Sigma[ ,r,r])$z
}
if(!fix.rho.S)
{
summary.hyper[(2*J+1), 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975)))
summary.hyper[(2*J+1), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.S"])
summary.hyper[(2*J+1), 6:7] <- c(effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z)
}else
{
summary.hyper[(2*J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(2*J+1), 4:5] <- rep(NA, 2)
summary.hyper[(2*J+1), 6:7] <- rep(NA, 2)
}
if(!fix.rho.T)
{
summary.hyper[(2*J+2), 1:3] <- c(mean(results$samples.alpha[ ,1]), quantile(results$samples.alpha[ ,1], c(0.025, 0.975)))
summary.hyper[(2*J+2), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"])
summary.hyper[(2*J+2), 6:7] <- c(effectiveSize(results$samples.alpha[ ,1]), geweke.diag(results$samples.alpha[ ,1])$z)
summary.hyper[(2*J+3), 1:3] <- c(mean(results$samples.alpha[ ,2]), quantile(results$samples.alpha[ ,2], c(0.025, 0.975)))
summary.hyper[(2*J+3), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"])
summary.hyper[(2*J+3), 6:7] <- c(effectiveSize(results$samples.alpha[ ,2]), geweke.diag(results$samples.alpha[ ,2])$z)
}else
{
summary.hyper[(2*J+2), 1:3] <- c(alpha[1], alpha[1], alpha[1])
summary.hyper[(2*J+2), 4:5] <- rep(NA, 2)
summary.hyper[(2*J+2), 6:7] <- rep(NA, 2)
summary.hyper[(2*J+3), 1:3] <- c(alpha[2], alpha[2], alpha[2])
summary.hyper[(2*J+3), 4:5] <- rep(NA, 2)
summary.hyper[(2*J+3), 6:7] <- rep(NA, 2)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("nu2",J), 1:J, sep=""), paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho.S", "rho1.T", "rho2.T")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.final <- rep(NA, 6)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T", "nu2", "Sigma")
accept.final[1] <- 100
accept.final[5] <- 100
accept.final[6] <- 100
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[2] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
if(!fix.rho.S) accept.final[3] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
if(!fix.rho.T) accept.final[4] <- 100
## Extract the samples into separate matrix and list objects
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.nu2.list <- lapply(results, function(l) l[["samples.nu2"]])
samples.nu2.matrix <- do.call(what=rbind, args=samples.nu2.list)
if(!fix.rho.S)
{
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
}
if(!fix.rho.T)
{
samples.alpha.list <- lapply(results, function(l) l[["samples.alpha"]])
samples.alpha.matrix <- do.call(what=rbind, args=samples.alpha.list)
}
samples.Sigma.list <- lapply(results, function(l) l[["samples.Sigma"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Compute the fitted deviance
mean.beta <- matrix(apply(samples.beta.matrix, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=NK, ncol=J, byrow=T)
fitted.mean <- X.standardised %*% mean.beta + mean.phi + offset
nu2.mean <- apply(samples.nu2.matrix,2,mean)
deviance.fitted <- -2 * sum(dnorm(as.numeric(t(Y)), mean = as.numeric(t(fitted.mean)), sd=rep(sqrt(nu2.mean), K*N), log = TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- matrix(apply(samples.fitted.matrix, 2, mean), nrow=NK, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
nu.mat <- matrix(rep(sqrt(nu2.mean), N*K), nrow=N*K, byrow=T)
pearson.residuals <- response.residuals / nu.mat
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
for(r in 1:J)
{
samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)] <- common.betatransform(samples.beta.list[[j]][ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE)
}
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
nu2.mcmc <- mcmc.list(lapply(samples.nu2.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
if(n.miss>0)
{
Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc))
}else
{
Y.mcmc <- NA
}
if(fix.rho.S & fix.rho.T)
{
rhoext.mcmc <- NA
}else if(fix.rho.S & !fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.alpha.list[[j]]) <- c("rho1.T", "rho2.T")
}
rhoext.mcmc <- mcmc.list(lapply(samples.alpha.list, mcmc))
}else if(!fix.rho.S & fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.rho.list[[j]]) <- c("rho.S")
}
rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc))
}else
{
rho.temp <- as.list(rep(NA, n.chains))
for(j in 1:n.chains)
{
rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.alpha.list[[j]])
colnames(rho.temp[[j]]) <- c("rho.S", "rho1.T", "rho2.T")
}
rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc))
}
samples <- list(beta=beta.mcmc, phi=phi.mcmc, nu2=nu2.mcmc, rho=rhoext.mcmc, Sigma=samples.Sigma.list, fitted=fitted.mcmc, Y=Y.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
col.name <- rep(NA, p*(J-1))
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c((2*J+3) ,7))
for(r in 1:J)
{
temp <- NA
temp2 <- as.list(rep(NA, n.chains))
for(v in 1:n.chains)
{
temp <- c(temp, samples.Sigma.list[[v]][ ,r,r])
temp2[[v]] <- mcmc(samples.Sigma.list[[v]][ ,r,r])
}
temp <- temp[-1]
summary.hyper[r, 1] <- mean(samples.nu2.matrix[ ,r])
summary.hyper[r, 2:3] <- quantile(samples.nu2.matrix[ ,r], c(0.025, 0.975))
summary.hyper[r, 4] <- n.keep
summary.hyper[r, 5] <- 100
summary.hyper[r, 6] <- effectiveSize(nu2.mcmc)[r]
summary.hyper[r, 7] <- (gelman.diag(nu2.mcmc)$psrf[ ,2])[r]
summary.hyper[r+J, 1] <- mean(temp)
summary.hyper[r+J, 2:3] <- quantile(temp, c(0.025, 0.975))
summary.hyper[r+J, 4] <- n.keep
summary.hyper[r+J, 5] <- 100
summary.hyper[r+J, 6] <- effectiveSize(mcmc.list(temp2))
summary.hyper[r+J, 7] <- gelman.diag(mcmc.list(temp2))$psrf[ ,2]
}
if(!fix.rho.S)
{
temp <- mcmc.list(lapply(samples.rho.list, mcmc))
summary.hyper[(2*J+1), 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[(2*J+1), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.S"])
summary.hyper[(2*J+1), 6:7] <- c(effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.hyper[(2*J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(2*J+1), 4:5] <- rep(NA, 2)
summary.hyper[(2*J+1), 6:7] <- rep(NA, 2)
}
if(!fix.rho.T)
{
temp <- mcmc.list(lapply(samples.alpha.list, mcmc))
summary.hyper[(2*J+2), 1:3] <- c(mean(samples.alpha.matrix[ ,1]), quantile(samples.alpha.matrix[ ,1], c(0.025, 0.975)))
summary.hyper[(2*J+2), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"])
summary.hyper[(2*J+2), 6:7] <- c(effectiveSize(temp)[1], gelman.diag(temp)$psrf[ ,2][1])
summary.hyper[(2*J+3), 1:3] <- c(mean(samples.alpha.matrix[ ,2]), quantile(samples.alpha.matrix[ ,2], c(0.025, 0.975)))
summary.hyper[(2*J+3), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"])
summary.hyper[(2*J+3), 6:7] <- c(effectiveSize(temp)[2], gelman.diag(temp)$psrf[ ,2][2])
}else
{
summary.hyper[(2*J+2), 1:3] <- c(alpha[1], alpha[1], alpha[1])
summary.hyper[(2*J+2), 4:5] <- rep(NA, 2)
summary.hyper[(2*J+2), 6:7] <- rep(NA, 2)
summary.hyper[(2*J+3), 1:3] <- c(alpha[2], alpha[2], alpha[2])
summary.hyper[(2*J+3), 4:5] <- rep(NA, 2)
summary.hyper[(2*J+3), 6:7] <- rep(NA, 2)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("nu2",J), 1:J, sep=""), paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho.S", "rho1.T", "rho2.T")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Gaussian (identity link function)", "\nRandom effects model - Multivariate Autoregressive order 2 CAR model\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/gaussian.MVCARar2.R
|
gaussian.MVCARar2MCMC <- function(Y, offset, X.standardised, W, rho, alpha, fix.rho.S, fix.rho.T, K, N, NK, J, N.all, p, miss.locator, n.miss, burnin, n.sample, thin, prior.mean.beta, prior.var.beta, prior.nu2, prior.Sigma.df, prior.Sigma.scale, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#library(MCMCpack)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Generate the initial parameter values
beta <- array(NA, c(p, J))
nu2 <- rep(NA, J)
for(i in 1:J)
{
mod.glm <- lm(Y[ ,i]~X.standardised-1, offset=offset[ ,i])
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.unscaled)) * summary(mod.glm)$sigma
beta[ ,i] <- rnorm(n=p, mean=beta.mean, sd=beta.sd)
nu2[i] <- runif(1, var(mod.glm$residuals)*0.5, var(mod.glm$residuals))
}
res.temp <- Y - X.standardised %*% beta - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi.vec <- rnorm(n=N.all, mean=0, sd=res.sd)
phi <- matrix(phi.vec, ncol=J, byrow=TRUE)
Sigma <- cov(phi)
Sigma.inv <- solve(Sigma)
Sigma.a <- rep(1, J)
regression <- X.standardised %*% beta
fitted <- regression + phi + offset
Y.DA <- Y
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, J*p))
samples.nu2 <- array(NA, c(n.keep, J))
samples.phi <- array(NA, c(n.keep, N.all))
samples.Sigma <- array(NA, c(n.keep, J, J))
samples.Sigma.a <- array(NA, c(n.keep, J))
if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.T) samples.alpha <- array(NA, c(n.keep, 2))
samples.loglike <- array(NA, c(n.keep, N.all))
samples.fitted <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,4)
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.02
Sigma.post.df <- prior.Sigma.df + J - 1 + K * N
Sigma.a.post.shape <- (prior.Sigma.df + J) / 2
nu2.posterior.shape <- prior.nu2[1] + 0.5 * K * N
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
Wstar <- diag(apply(W,1,sum)) - W
Q <- rho * Wstar + diag(rep(1-rho,K))
#### Create the determinant
if(!fix.rho.S)
{
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1 & alpha[1]==2 & alpha[2]==-1)
{
Sigma.post.df <- prior.Sigma.df + ((N-2) * (K-n.islands)) + J - 1
}else if(rho==1)
{
Sigma.post.df <- prior.Sigma.df + (N * (K-n.islands)) + J - 1
}else if(alpha[1]==2 & alpha[2]==-1)
{
Sigma.post.df <- prior.Sigma.df + ((N-2) * K) + J - 1
}else
{}
#### Beta update quantities
data.precision <- t(X.standardised) %*% X.standardised
if(length(prior.var.beta)==1)
{
prior.precision.beta <- 1 / prior.var.beta
}else
{
prior.precision.beta <- solve(diag(prior.var.beta))
}
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[miss.locator] <- rnorm(n=n.miss, mean=fitted[miss.locator], sd=sqrt(nu2[miss.locator[ ,2]]))
}else
{}
##################
## Sample from nu2
##################
fitted.current <- regression + phi + offset
nu2.posterior.scale <- prior.nu2[2] + 0.5 * apply((Y.DA - fitted.current)^2, 2, sum)
nu2 <- 1 / rgamma(J, nu2.posterior.shape, scale=(1/nu2.posterior.scale))
###################
## Sample from beta
###################
for(r in 1:J)
{
fc.precision <- prior.precision.beta + data.precision / nu2[r]
fc.var <- solve(fc.precision)
fc.temp1 <- t(((Y.DA[, r] - phi[ , r] - offset[ , r]) %*% X.standardised) / nu2[r]) + prior.precision.beta %*% prior.mean.beta
fc.mean <- fc.var %*% fc.temp1
chol.var <- t(chol(fc.var))
beta[ ,r] <- fc.mean + chol.var %*% rnorm(p)
}
regression <- X.standardised %*% beta
##################
## Sample from phi
##################
#### Create the offset elements
den.offset <- rho * W.triplet.sum + 1 - rho
phi.offset <- Y.DA - regression - offset
#### Create the random draws to create the proposal distribution
Chol.Sigma <- t(chol(proposal.sd.phi*Sigma))
z.mat <- matrix(rnorm(n=N.all, mean=0, sd=1), nrow=J, ncol=NK)
innovations <- t(Chol.Sigma %*% z.mat)
#### Update the elements of phi
temp1 <- gaussianmvar2carupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, J, phi, alpha[1], alpha[2], rho, Sigma.inv, nu2, innovations, phi.offset, den.offset)
phi <- temp1[[1]]
for(r in 1:J)
{
phi[ ,r] <- phi[ ,r] - mean(phi[ ,r])
}
accept[1] <- accept[1] + temp1[[2]]
accept[2] <- accept[2] + NK
####################
## Sample from Sigma
####################
Sigma.post.scale <- 2 * prior.Sigma.df * diag(1 / Sigma.a) + t(phi[1:K, ]) %*% Q %*% phi[1:K, ] + t(phi[(K+1):(2*K), ]) %*% Q %*% phi[(K+1):(2*K), ]
for(t in 3:N)
{
phit <- phi[((t-1)*K+1):(t*K), ]
phitminus1 <- phi[((t-2)*K+1):((t-1)*K), ]
phitminus2 <- phi[((t-3)*K+1):((t-2)*K), ]
temp1 <- phit - alpha[1] * phitminus1 - alpha[2] * phitminus2
Sigma.post.scale <- Sigma.post.scale + t(temp1) %*% Q %*% temp1
}
Sigma <- riwish(Sigma.post.df, Sigma.post.scale)
Sigma.inv <- solve(Sigma)
######################
## Sample from Sigma.a
######################
Sigma.a.posterior.scale <- prior.Sigma.df * diag(Sigma.inv) + 1 / prior.Sigma.scale^2
Sigma.a <- 1 / rgamma(J, Sigma.a.post.shape, scale=(1/Sigma.a.posterior.scale))
######################
#### Sample from alpha
######################
if(!fix.rho.T)
{
temp <- MVSTrhoTAR2compute(W.triplet, W.triplet.sum, n.triplet, den.offset, K, N, J, phi, rho, Sigma.inv)
alpha.precision <- matrix(c(temp[[1]], temp[[2]], temp[[2]], temp[[3]]), nrow=2, ncol=2)
alpha.var <- solve(alpha.precision)
alpha.mean <- rep(NA, 2)
alpha.mean[2] <- (temp[[1]] * temp[[5]] - temp[[2]] * temp[[4]]) / (temp[[1]] * temp[[3]] - temp[[2]]^2)
alpha.mean[1] <- (temp[[5]] - temp[[3]] * alpha.mean[2]) / temp[[2]]
alpha <- mvrnorm(n=1, mu=alpha.mean, Sigma=alpha.var)
}else
{}
##################
## Sample from rho
##################
if(!fix.rho.S)
{
## Propose a new value
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
proposal.Q <- proposal.rho * Wstar + diag(rep(1-proposal.rho), K)
proposal.det.Q <- sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
proposal.den.offset <- proposal.rho * W.triplet.sum + 1 - proposal.rho
## Compute the quadratic forms based on current and proposed values of rho
temp1.QF <- MVSTrhoSAR2compute(W.triplet, W.triplet.sum, n.triplet, den.offset, K, N, J, phi, rho, alpha[1], alpha[2], Sigma.inv)
temp2.QF <- MVSTrhoSAR2compute(W.triplet, W.triplet.sum, n.triplet, proposal.den.offset, K, N, J, phi, proposal.rho, alpha[1], alpha[2], Sigma.inv)
## Compute the acceptance rate
logprob.current <- 0.5 * J * N * det.Q - 0.5 * temp1.QF
logprob.proposal <- 0.5 * J * N * proposal.det.Q - 0.5 * temp2.QF
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- proposal.det.Q
Q <- proposal.Q
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
}else
{}
#########################
## Calculate the deviance
#########################
fitted <- regression + phi + offset
loglike <- dnorm(x=as.numeric(t(Y)), mean=as.numeric(t(fitted)), sd=rep(sqrt(nu2), K*N), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- as.numeric(beta)
samples.nu2[ele, ] <- nu2
samples.phi[ele, ] <- as.numeric(t(phi))
samples.Sigma[ele, , ] <- Sigma
samples.Sigma.a[ele, ] <- Sigma.a
if(!fix.rho.S) samples.rho[ele, ] <- rho
if(!fix.rho.T) samples.alpha[ele, ] <- alpha
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- as.numeric(t(fitted))
if(n.miss>0) samples.Y[ele, ] <- Y.DA[miss.locator]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50)
if(!fix.rho.S)
{
proposal.sd.rho <- common.accceptrates2(accept[3:4], proposal.sd.rho, 40, 50, 0.5)
}
accept <- c(0,0,0,0)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y <- NA
if(fix.rho.S) samples.rho <- NA
if(fix.rho.T) samples.alpha <- NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.nu2=samples.nu2, samples.Sigma=samples.Sigma, samples.Sigma.a=samples.Sigma.a, samples.rho=samples.rho, samples.alpha=samples.alpha, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/gaussian.MVCARar2MCMC.R
|
logLik.CARBayesST <- function(object,...)
{
#### Return the log likeilhood
return(object$modelfit[6])
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/logLik.CARBayesST.R
|
model.matrix.CARBayesST <- function(object,...)
{
#### Return the DIC
return(object$X)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/model.matrix.CARBayesST.R
|
poisson.CARadaptive <- function(formula, data = NULL, W, burnin, n.sample, thin = 1, prior.mean.beta = NULL, prior.var.beta = NULL, prior.tau2 = NULL, rho = NULL, epsilon = 0, MALA=TRUE, verbose = TRUE)
{
#### Verbose
a <- common.verbose(verbose)
blocksize.beta <- 5
blocksize.v <- 10
z <- which(W > 0, arr.ind = T)
locs <- z[which(z[,1] < z[,2]), ]
char.locs <- paste(locs[,1], ".", locs[,2], sep = "")
n.edges <- nrow(locs)
logit <- function(p) log(p/(1-p))
inv_logit <- function(v) 1/(1+exp(-v))
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
# interpret the formula
frame <- try(suppressWarnings(model.frame(formula, data = data, na.action=na.pass)), silent=TRUE)
if(inherits(frame, "try-error")) stop("the formula inputted contains an error, e.g the variables may be different lengths.", call.=FALSE)
X <- try(suppressWarnings(model.matrix(object=attr(frame, "terms"), data=frame)), silent=TRUE)
if(sum(is.na(X))>0) stop("the covariate matrix contains missing 'NA' values.", call.=FALSE)
# get summaries of the model matrix
p <- ncol(X)
y <- model.response(frame)
which.miss <- as.numeric(!is.na(y))
n.sites <- as.integer(nrow(W))
n.time <- as.integer(length(y)/n.sites)
k <- as.integer(round(n.sites*n.time, 0))
#### Check and specify the priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
# identify and error check the offset term, if it exists.
offset <- try(model.offset(frame), silent=TRUE)
if(is.null(offset)) offset <- rep(0,(n.time * n.sites))
if(sum(is.na(offset))>0) stop("the offset has missing 'NA' values.", call.=FALSE)
if(!is.numeric(offset)) stop("the offset variable has non-numeric values.", call.=FALSE)
#### Format and check the MCMC quantities
common.burnin.nsample.thin.check(burnin, n.sample, thin)
## Standardise the model matrix,
X.standardised <- X
X.sd <- apply(X, 2, sd)
X.mean <- apply(X, 2, mean)
X.indicator <- rep(NA, p) # To determine which parameter estimates to transform back
for(j in 1:p){
if(length(table(X[ ,j])) > 2){
X.indicator[j] <- 1
X.standardised[ ,j] <- (X[ ,j] - mean(X[ ,j])) / sd(X[ ,j])
}else if(length(table(X[ ,j]))==1){
X.indicator[j] <- 2
}else{
X.indicator[j] <- 0
}
}
# based on the blocksize.v provided create lists with relevent bits for untransformed edge parameter update
if(is.numeric(blocksize.v)){
## Compute the blocking structure for v
fromto <- seq(0, n.edges, by = blocksize.v)
fromto[1] <- 0
if(!n.edges %in% fromto) fromto <- c(fromto, n.edges)
n.blocks <- length(fromto) - 1
blockinds <- vector("list", length = n.blocks)
for(i in 1:n.blocks) blockinds[[i]] <- (fromto[i] + 1):fromto[i + 1]
}
# propose starting values for the adjacency elements (very close to 1)
# current temporary version of the adacency is W_current
v <- logit(rtruncnorm(n.edges, mean = 0.999, sd = 0.001, a = 0, b=1))
v_15 <- v - 15
vqform_current <- sum(v_15^2)
W_current <- W
W_current[locs][1:n.edges] <- inv_logit(v)
W_current[locs[,2:1]][1:n.edges] <- inv_logit(v)
# given the temporary adjacency, construct a temporary (Q.space) and proposal (Q.space.prop)
# for the prior ICAR precision for phi. Associated with these is the triplet form tripList.
# get the cholesky of Q.space, and its determinant
# if rho is not fixed, then ridge must be fixed
rhofix <- rho
rho <- ifelse(!is.null(rhofix), rhofix, 0.99)
fixedridge <- epsilon
if(rho==1) fixedridge <- 0.0001
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
tripList <- vector("list", length = 2)
tripList[[1]] <- cbind(1:nrow(W_current), 1:nrow(W_current), rowSums(W_current) + fixedridge)
tripList[[2]] <- cbind(rbind(locs, locs[,2:1]), -rep(inv_logit(v), 2))
Q.space.trip <- rbind(tripList[[1]], tripList[[2]])
Q.space.trip <- updatetriplets_rho(trips = Q.space.trip, nsites = n.sites, rho_old = 1, rho_new = rho, fixedridge = fixedridge)
Q.space <- Q.space.prop <- spam(list(i = Q.space.trip[,1], j = Q.space.trip[,2], Q.space.trip[,3]))
chol.Q.space <- chol.spam(Q.space)
Q.space.det.old <- n.time*2*determinant(chol.Q.space, logarithm = T)$modulus
# propose an initial value for alpha, the temporal correlation parameter
# using alpha, create initial temporal precision matrices Q.time
alpha <- 1
if(n.time > 1){
# this bit constructs Q.time, temporal precision, its determinant, and triplet form
Q.block <- as.spam(crossprod(diff(diag(n.time))))
Q.block[1,1] <- Q.block[1,1] + 1
Dg <- diag.spam(diag.spam(Q.block))
R <- Q.block - Dg
Dtime <- diag.spam( c(rep(1,nrow(Q.block)-1), 0))
Dg <- Dg - Dtime
Q.time <- Dg + Dtime*alpha^2+ R*alpha
Q.time[n.time,n.time] <- 1
Q.det <- determinant(Q.time, logarithm = T)
detTime <- as.numeric(0.5*n.sites*(Q.det$m)*(Q.det$s))
Q.time.trip <- Reduce("cbind", triplet(Q.time))
} else {
# if n.time == 1, then Q is Q.space, and detTime is just replaced with 1.
Q.time <- 1
detTime <- 1
Q.time.trip <- matrix(rep(1, 3), ncol = 3)
}
# MCMC parameter starting values
phi_tune <- 0.5
W.tune <- 1
rho.tune <- 0.1
tau_v <- 200
prior.max.tau <- 1000
increment <- 0
glm_mod <- glm(y ~-1+X.standardised, family = "quasipoisson", offset = offset)
beta.mean <- glm_mod$coefficients
beta.sd <- sqrt(diag(summary(glm_mod)$cov.scaled))
beta_par <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
log.y <- log(y)
log.y[y==0] <- -0.1
res.temp <- log.y - X.standardised %*% beta_par - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(k, mean=0, sd=res.sd)
tau <- var(phi)/10
phiQphi <- qform_ST(Qspace = Q.space.trip, Qtime = Q.time.trip, phi = phi, nsites = n.sites)
XB <- X.standardised %*% beta_par
tau_v.shape <- (n.edges/2) + prior.tau2[1]
tau_phi_shape <- (n.sites*n.time/2) + prior.tau2[1]
# general MCMC housekeeping
n.save <- ifelse(thin == 1, (n.sample - burnin), (n.sample - burnin) / thin)
accept <- rep(0, 8)
# storage of parameters in the MCMC,
samples.beta <- array(NA, c(n.save, p))
samples.phi <- array(NA, c(n.save, n.sites * n.time))
samples.tau2 <- samples.vtau2 <- samples.alpha <- samples.rho <- matrix(0, n.save, 1)
samples.v <- matrix(0, ncol = n.edges, nrow = c(n.save, n.sites*n.time))
samples.fit <- array(NA, c(n.save, n.sites * n.time))
samples.loglike <- array(NA, c(n.save, n.sites*n.time))
# turn off spam check options to speed things up (a bit)
options(spam.cholsymmetrycheck = FALSE)
options(spam.cholpivotcheck = FALSE)
options(spam.safemode = c(F, F, F))
## Compute the blocking structure for beta
#if(blocksize.beta >= p){
# n.beta.block <- 1
# beta.beg <- 1
# beta.fin <- p
#} else {
# n.standard <- 1 + floor((p-blocksize.beta) / blocksize.beta)
# remainder <- p - n.standard * blocksize.beta
# if(remainder==0){
# beta.beg <- c(1,seq((blocksize.beta+1), p, blocksize.beta))
# beta.fin <- seq(blocksize.beta, p, blocksize.beta)
# n.beta.block <- length(beta.beg)
# } else {
# beta.beg <- c(1, seq((blocksize.beta+1), p, blocksize.beta))
# beta.fin <- c(seq((blocksize.beta), p, blocksize.beta), p)
# n.beta.block <- length(beta.beg)
# }
#}
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
proposal.sd.beta <- 0.01
proposal.corr.beta <- solve(t(X.standardised) %*% X.standardised)
chol.proposal.corr.beta <- chol(proposal.corr.beta)
# the perm ordering is used to map the @entries slot ordering to the ordering used when 'triplet' is called
perm <- order(Q.space.trip[,1], Q.space.trip[,2])
diags.space <- which(Q.space.trip[perm,1] == Q.space.trip[perm,2])
if(n.time > 1) diag.time <- Reduce("cbind", triplet(diag.spam(n.time - 1)))
time.last.diag <- which((Q.time.trip[,1] == Q.time.trip[,2]) & (Q.time.trip[,1] == n.time))
lastblock <- (k - n.sites+1):k
firstblock <- 1:n.sites
## Start timer
n.keep <- floor((n.sample - burnin)/thin)
if(verbose){
cat("Generating", n.keep, "post burnin and thinned (if requested) samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points <- round((1:100/100)*n.sample)
} else percentage.points <- round((1:100/100)*n.sample)
# -------------------------------------------------------------------------------------------
# START THE MCMC SAMPLING
# -------------------------------------------------------------------------------------------
for(j in 1:n.sample){
# START ITERATING, ONLY SAVE thin^th ITERATION
save.iter <- j > burnin && ((j %% thin == 0) | thin == 0)
if(save.iter) increment <- increment+1
# update ALPHA
if(n.time > 1){
phifirst <- phi[-firstblock]
philast <- phi[-lastblock]
philastQphilast <- qform_ST(Qspace = Q.space.trip, Qtime = diag.time, phi = philast, nsites = n.sites)
phifirstQphilast <- qform_ST_asym(Qspace = Q.space.trip, Qtime = diag.time, phi1 = phifirst, phi2 = philast, nsites = n.sites)
mu_alpha <- phifirstQphilast/philastQphilast
mu_sigmasq <- tau/philastQphilast
alpha <- rtruncnorm(n=1, a=10^-5, b=1 - 10^-5, mean=mu_alpha, sd = sqrt(mu_sigmasq))
Q.time.trip <- update_Qtime(Q.time.trip, alpha, time.last.diag - 1)
phiQphi <- qform_ST(Qspace = Q.space.trip, Qtime = Q.time.trip, phi = phi, nsites = n.sites)
detTime <- determinant(Q.time, logarithm = TRUE)
detTime <- (detTime$m)*(detTime$s)
}
# Gibbs update of tau_v
tau_scale <- vqform_current/2 + prior.tau2[2]
tau_v <- 1/rtrunc(n=1, spec="gamma", a=0.000001, b=Inf, shape=tau_v.shape, scale=(1/tau_scale))
#tau_v <- 1/rtgamma(n=1, shape=tau_v.shape, scale=tau_scale, min=0.000001, max=Inf)
v.proposal <- rtruncnorm(n = n.edges, a=-15, b=15, mean = v, sd = W.tune)
for(i in 1:n.blocks){
# propose new v for the i^th block
vnew <- v
block_inds <- blockinds[[i]]
vnew[block_inds] <- v.proposal[block_inds]
# update the spatial precision matrix using c++ loop.
# This is efficient because changes are only made where vnew differs from v
# combine the result back into triplet matrix (Q.space.trip.prop), and spam matrix (Q.space.prop)
tripUpdate <- updatetripList2(Q.space.trip, vold = v, vnew = vnew, nedges = n.edges,
nsites = n.sites, block = block_inds,
block_length = length(block_inds), fixedridge = fixedridge, rho = rho)
Q.space.trip.prop <- tripUpdate[[1]]
Q.space.trip.diff <- tripUpdate[[2]]
# combine the result back into triplet matrix (Q.space.trip.prop), and spam matrix (Q.space.prop)
Q.space.prop@entries <- Q.space.trip.prop[perm,3]
# acceptance ratio requires calculation of phi'Q_prop phi - phi'Q phi.
# do this quickly by taking the difference between old and new triplets and working out the
# difference directly. Much faster than working out quadratic forms seperately.
Q.space.trip.diff[, 3]<- Q.space.trip[, 3] - Q.space.trip.prop[,3]
phiQphi_phiQphiNew <- qform_difference_ST(Qtrip = Q.space.trip.diff, Qtime = Q.time.trip, phi = phi, nsites = n.sites)
# update the cholesky of the precision matrix & calculate the determinant
chol.Q.space.prop <- update(chol.Q.space, x = Q.space.prop)
detSpace <- 2*determinant(chol.Q.space.prop, logarithm = T)$modulus
Q.space.det.prop <- n.sites*detTime + n.time*detSpace
v_15_prop <- vnew - 15
vqform_prop <- sum(v_15_prop^2)
acceptance <- exp(0.5*(Q.space.det.prop - Q.space.det.old) + (1/(2*tau))*(phiQphi_phiQphiNew)
+ 0.5*(1/tau_v)*(vqform_current - vqform_prop))
accept[8] <- accept[8] + (1/n.blocks)
if(runif(1) <= acceptance){
vqform_current <- vqform_prop
v <- vnew
accept[7] <- accept[7] + (1/n.blocks)
Q.space.det.old <- Q.space.det.prop
Q.space.trip <- Q.space.trip.prop
chol.Q.space <- chol.Q.space.prop
Q.space <- Q.space.prop
}
}
# update BETA
offset.temp <- offset + as.numeric(phi)
if(MALA)
{
temp <- poissonbetaupdateMALA(X.standardised, k, p, beta_par, offset.temp, y, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- poissonbetaupdateRW(X.standardised, k, p, beta_par, offset.temp, y, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta_par <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
XB <- X.standardised %*% beta_par
# update PHI using one at a time M-H sampling
nneighbours <- diag.spam(Q.space)
W_current <- diag(nneighbours) - as.matrix(Q.space)
phi_update <- SPTICARphiVarb(W = W_current, nsites = n.sites, ntimes = n.time, phiVarb = phi,
nneighbours = nneighbours, tau = tau, y = y, E = offset,
phiVarb_tune = phi_tune, alpha = alpha, XB = XB)
phi <- phi_update[[2]]
phi <- phi - mean(phi)
accept[3] <- accept[3] + phi_update[[1]][2]
accept[4] <- accept[4] + k
# update rho, the spatial leroux parameter
if(!is.null(rhofix)){
proposal.rho <- rhofix
} else {
proposal.rho <- rtruncnorm(n = 1, a=0, b=1, mean = rho, sd = rho.tune)
}
Q.space.trip.prop <- updatetriplets_rho(trips = Q.space.trip, nsites = n.sites, rho_old = rho, rho_new = proposal.rho, fixedridge = fixedridge)
Q.space.prop@entries <- Q.space.trip.prop[perm,3]
Q.space.trip.diff[, 3] <- Q.space.trip[, 3] - Q.space.trip.prop[,3]
phiQphi_phiQphiNew <- qform_difference_ST(Qtrip = Q.space.trip.diff, Qtime = Q.time.trip, phi = phi, nsites = n.sites)
# update the cholesky of the precision matrix & calculate the determinant
chol.Q.space.prop <- update(chol.Q.space, x = Q.space.prop)
detSpace <- 2*determinant(chol.Q.space.prop, logarithm = T)$modulus
Q.space.det.prop <- n.sites*detTime + n.time*detSpace
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=rho.tune)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=rho.tune))
acceptance <- exp(0.5*(Q.space.det.prop - Q.space.det.old) + (1/(2*tau))*(phiQphi_phiQphiNew) + hastings)
accept[6] <- accept[6] + 1
if(runif(1) <= acceptance){
accept[5] <- accept[5] + 1
Q.space.det.old <- Q.space.det.prop
Q.space.trip <- Q.space.trip.prop
chol.Q.space <- chol.Q.space.prop
Q.space <- Q.space.prop
rho <- proposal.rho
}
# Gibbs update TAU using the gamma distribution
phiQphi <- qform_ST(Qspace = Q.space.trip, Qtime = Q.time.trip, phi = phi, nsites = n.sites)
tau_scale <- phiQphi/2 + prior.tau2[2]
tau <- 1/rtrunc(n=1, spec="gamma", a=0.000001, b=Inf, shape=tau_phi_shape, scale=(1/tau_scale))
#tau <- 1/rtgamma(n=1, shape=tau_phi_shape, scale=tau_scale, min=0.000001, max=Inf)
# calculate the deviance and fitted values
fitted <- exp(as.vector(XB) + phi + offset)
loglike <- dpois(x=as.numeric(y), lambda=fitted, log=TRUE)
# save samples if past burnin
if(save.iter){
samples.beta[increment,] <- beta_par
samples.phi[increment,] <- phi
samples.fit[increment, ] <- fitted
samples.tau2[increment,] <- tau
samples.vtau2[increment,] <- tau_v
samples.v[increment,] <- v
samples.alpha[increment,] <- alpha
samples.rho[increment,] <- rho
samples.loglike[increment, ] <- loglike
}
# adjust the acceptance rate if required
if(j %% 100 == 0 & j < burnin){
accept.beta <- 100 * accept[1] / accept[2]
accept.phi <- 100 * accept[3] / accept[4]
accept.w <- 100 * accept[7] / accept[8]
if(is.null(rhofix))
{
accept.rho <- 100 * accept[5] / accept[6]
}else
{
accept.rho <- 45
}
#### beta tuning parameter
if(accept.beta > 50)
{
proposal.sd.beta <- proposal.sd.beta + 0.1 * proposal.sd.beta
}else if(accept.beta < 40)
{
proposal.sd.beta <- proposal.sd.beta - 0.1 * proposal.sd.beta
}else
{
}
#### phi tuning parameter
if(accept.phi > 50)
{
phi_tune <- phi_tune + 0.1 * phi_tune
}else if(accept.phi < 40)
{
phi_tune <- phi_tune - 0.1 * phi_tune
}else
{
}
#### w tuning parameter
if(accept.w > 40)
{
W.tune <- W.tune + 0.1 * W.tune
}else if(accept.w < 20)
{
W.tune <- W.tune - 0.1 * W.tune
}else
{
}
#### rho tuning parameter
if(accept.rho > 50)
{
rho.tune <- min(rho.tune + 0.1 * rho.tune, 0.5)
}else if(accept.rho < 40)
{
rho.tune <- rho.tune - 0.1 * rho.tune
}else
{
}
accept <- accept*0
}else
{}
# print progress to the console
if(j %in% percentage.points & verbose) setTxtProgressBar(progressBar, j/n.sample)
}
# end timer
if(verbose)
{
cat("\nSummarising results.")
close(progressBar)
}else
{}
###################################
#### Summarise and save the results
###################################
## Compute the acceptance rates
accept.beta <- 100 * accept[1] / accept[2]
accept.phi <- 100 * accept[3] / accept[4]
accept.rho <- 100 * accept[5] / accept[6]
accept.w <- 100 * accept[7] / accept[8]
accept.alpha <- 100
if(!is.null(rhofix))
{
accept.final <- c(accept.beta, accept.phi, accept.w)
names(accept.final) <- c("beta", "phi", "w")
}else
{
accept.final <- c(accept.beta, accept.phi, accept.rho,accept.w)
names(accept.final) <- c("beta", "phi", "rho", "w")
}
#### Compute the fitted deviance
mean.beta <- apply(samples.beta, 2, mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow = n.sites, ncol = n.time, byrow=FALSE)
mean.phi <- matrix(apply(samples.phi, 2, mean), nrow = n.sites, ncol = n.time)
offset.mat <- matrix(offset, nrow = n.sites, ncol = n.time, byrow=FALSE)
fitted.mean <- as.numeric(exp(offset.mat + mean.phi + regression.mat))
deviance.fitted <- -2 * sum(dpois(x=as.numeric(y), lambda=fitted.mean, log=TRUE))
#### Model fit criteria
modelfit <- common.modelfit(samples.loglike, deviance.fitted)
#### Create the fitted values and residuals
fitted.values <- apply(samples.fit, 2, mean)
response.residuals <- as.numeric(y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
#### transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
#### Create a summary object
samples.beta.orig <- mcmc(samples.beta.orig)
summary.beta <- t(rbind(apply(samples.beta.orig, 2, mean), apply(samples.beta.orig, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.save, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(4, 7))
summary.hyper[1,1:3] <- c(mean(samples.tau2), quantile(samples.tau2, c(0.025, 0.975)))
summary.hyper[2,1:3] <- c(mean(samples.rho), quantile(samples.rho, c(0.025, 0.975)))
summary.hyper[3,1:3] <- c(mean(samples.alpha), quantile(samples.alpha, c(0.025, 0.975)))
summary.hyper[4,1:3] <- c(mean(samples.vtau2), quantile(samples.vtau2, c(0.025, 0.975)))
rownames(summary.hyper) <- c("tau2", "rho.S", "rho.T", "tau2.w")
summary.hyper[1, 4:7] <- c(n.save, 100, effectiveSize(mcmc(samples.tau2)), geweke.diag(mcmc(samples.tau2))$z)
summary.hyper[2, 4:7] <- c(n.save, accept.rho, effectiveSize(mcmc(samples.rho)), geweke.diag(mcmc(samples.rho))$z)
summary.hyper[3, 4:7] <- c(n.save, accept.alpha, effectiveSize(mcmc(samples.alpha)), geweke.diag(mcmc(samples.alpha))$z)
summary.hyper[4, 4:7] <- c(n.save, 100, effectiveSize(mcmc(samples.vtau2)), geweke.diag(mcmc(samples.vtau2))$z)
if(!is.null(rhofix))
{
summary.hyper[2, ] <- c(rep(rhofix, 3),rep(NA, 4))
}
summary.results <- rbind(summary.beta, summary.hyper)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
# convert v back to w, summarise and create a 'fitted' adjacency matrix
samples.w <- inv_logit(samples.v)
colnames(samples.w) <- char.locs
get_prop_thresh <- function(v, thresh) as.numeric(!((sum(v < thresh)/length(v)) < 0.99))
bdry99 <- apply(samples.w, 2, get_prop_thresh, thresh = 0.5)
bdryMN <- apply(samples.w, 2, mean)
Wmn <- W99 <- matrix(NA, nrow = n.sites, ncol = n.sites)
W99[locs] <- bdry99
W99[locs[ ,c(2,1)]] <- bdry99
Wmn[locs] <- bdryMN
Wmn[locs[ ,c(2,1)]] <- bdryMN
#### Compile and return the results
model.string <- c("Likelihood model - Poisson (log link function)",
"\nLatent structure model - Adaptive autoregressive order 1 CAR model\n")
samples.tau2all <- cbind(samples.tau2, samples.vtau2)
colnames(samples.tau2all) <- c("tau2", "tau2.w")
if(is.null(rhofix))
{
samples.rhoext <- cbind(samples.rho, samples.alpha)
colnames(samples.rhoext) <- c("rho.S", "rho.T")
}else
{
samples.rhoext <- cbind(samples.alpha)
names(samples.rhoext) <- c("rho.T")
}
samples <- list(beta = mcmc(samples.beta.orig), phi = mcmc(samples.phi), rho = mcmc(samples.rhoext),
tau2 = mcmc(samples.tau2all), w = mcmc(samples.w), fitted = mcmc(samples.fit))
localised.structure <- list(Wmedian = Wmn, W99 = W99)
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=localised.structure, formula=formula, model=model.string, X=X)
class(results) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.CARadaptive.R
|
poisson.CARanova <- function(formula, data=NULL, W, interaction=TRUE, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "poisson")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Determine the number of spatial and temporal units
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Check on the rho arguments
if(is.null(rho.S))
{
rho <- runif(1)
fix.rho.S <- FALSE
}else
{
rho <- rho.S
fix.rho.S <- TRUE
}
if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.T))
{
lambda <- runif(1)
fix.rho.T <- FALSE
}else
{
lambda <- rho.T
fix.rho.T <- TRUE
}
if(!is.numeric(lambda)) stop("rho.T is fixed but is not numeric.", call.=FALSE)
if(lambda<0 ) stop("rho.T is outside the range [0, 1].", call.=FALSE)
if(lambda>1 ) stop("rho.T is outside the range [0, 1].", call.=FALSE)
#### Checks on the interaction flag
if(sum(interaction==c(TRUE, FALSE)) != 1) stop("interaction must be either TRUE or FALSE.", call.=FALSE)
if(length(interaction) != 1) stop("interaction must be of length 1.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- poisson.CARanovaMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, interaction=interaction, rho=rho, lambda=lambda, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- poisson.CARanovaMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, interaction=interaction, rho=rho, lambda=lambda, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=poisson.CARanovaMCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, interaction=interaction, rho=rho, lambda=lambda, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.final <- rep(NA, 6)
names(accept.final) <- c("beta", "phi", "delta", "gamma", "rho.S", "rho.T")
accept.final[1] <- 100 * results$accept[1] / results$accept[2]
accept.final[2] <- 100 * results$accept[3] / results$accept[4]
accept.final[3] <- 100 * results$accept[5] / results$accept[6]
if(interaction) accept.final[4] <- 100 * results$accept[7] / results$accept[8]
if(!fix.rho.S) accept.final[5] <- 100 * results$accept[9] / results$accept[10]
if(!fix.rho.T) accept.final[6] <- 100 * results$accept[11] / results$accept[12]
## Compute the fitted deviance
mean.phi <- apply(results$samples.phi, 2, mean)
mean.delta <- apply(results$samples.delta, 2, mean)
mean.phi.mat <- matrix(rep(mean.phi, N), byrow=F, nrow=K)
mean.delta.mat <- matrix(rep(mean.delta, K), byrow=T, nrow=K)
mean.beta <- apply(results$samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
if(interaction)
{
mean.gamma <- apply(results$samples.gamma, 2,mean)
mean.gamma.mat <- matrix(mean.gamma, byrow=F, nrow=K)
fitted.mean <- as.numeric(exp(offset.mat + regression.mat + mean.phi.mat + mean.delta.mat + mean.gamma.mat))
}else
{
fitted.mean <- as.numeric(exp(offset.mat + regression.mat + mean.phi.mat + mean.delta.mat))
}
deviance.fitted <- -2 * sum(dpois(x=as.numeric(Y), lambda=fitted.mean, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
## Create the samples object
if(fix.rho.S & fix.rho.T)
{
samples.rhoext <- NA
}else if(fix.rho.S & !fix.rho.T)
{
samples.rhoext <- results$samples.lambda
names(samples.rhoext) <- "rho.T"
}else if(!fix.rho.S & fix.rho.T)
{
samples.rhoext <- results$samples.rho
names(samples.rhoext) <- "rho.S"
}else
{
samples.rhoext <- cbind(results$samples.rho, results$samples.lambda)
colnames(samples.rhoext) <- c("rho.S", "rho.T")
}
if(interaction)
{
colnames(results$samples.tau2) <- c("tau2.S", "tau2.T", "tau2.I")
}else
{
colnames(results$samples.tau2) <- c("tau2.S", "tau2.T")
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), delta=mcmc(results$samples.delta), gamma=mcmc(results$samples.gamma), tau2=mcmc(results$samples.tau2), rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
n.tau2 <- ncol(results$samples.tau2)
summary.tau2 <- cbind(apply(results$samples.tau2, 2, mean), t(apply(results$samples.tau2, 2, quantile, c(0.025, 0.975))), rep(n.keep, n.tau2), rep(100, n.tau2),
effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
if(interaction)
{
rownames(summary.tau2) <- c("tau2.S", "tau2.T", "tau2.I")
}else
{
rownames(summary.tau2) <- c("tau2.S", "tau2.T")
}
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.S", "rho.T")
if(!fix.rho.S)
{
summary.rho[1, 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z)
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
summary.rho[2, 1:3] <- c(mean(results$samples.lambda), quantile(results$samples.lambda, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(results$samples.lambda), geweke.diag(results$samples.lambda)$z)
}else
{
summary.rho[2, 1:3] <- c(lambda, lambda, lambda)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.tau2, summary.rho)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.final <- rep(NA, 6)
names(accept.final) <- c("beta", "phi", "delta", "gamma", "rho.S", "rho.T")
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[1] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.final[2] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.final[3] <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
if(interaction) accept.final[4] <- 100 * sum(accept.temp2[ ,7]) / sum(accept.temp2[ ,8])
if(!fix.rho.S) accept.final[5] <- 100 * sum(accept.temp2[ ,9]) / sum(accept.temp2[ ,10])
if(!fix.rho.T) accept.final[6] <- 100 * sum(accept.temp2[ ,11]) / sum(accept.temp2[ ,12])
## Extract the samples into separate matrix and list objects
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.delta.list <- lapply(results, function(l) l[["samples.delta"]])
samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list)
if(interaction)
{
samples.gamma.list <- lapply(results, function(l) l[["samples.gamma"]])
samples.gamma.matrix <- do.call(what=rbind, args=samples.gamma.list)
}
if(!fix.rho.S)
{
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
}
if(!fix.rho.T)
{
samples.lambda.list <- lapply(results, function(l) l[["samples.lambda"]])
samples.lambda.matrix <- do.call(what=rbind, args=samples.lambda.list)
}
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Compute the fitted deviance
mean.phi <- apply(samples.phi.matrix, 2, mean)
mean.delta <- apply(samples.delta.matrix, 2, mean)
mean.phi.mat <- matrix(rep(mean.phi, N), byrow=F, nrow=K)
mean.delta.mat <- matrix(rep(mean.delta, K), byrow=T, nrow=K)
mean.beta <- apply(samples.beta.matrix,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
if(interaction)
{
mean.gamma <- apply(samples.gamma.matrix, 2,mean)
mean.gamma.mat <- matrix(mean.gamma, byrow=F, nrow=K)
fitted.mean <- as.numeric(exp(offset.mat + regression.mat + mean.phi.mat + mean.delta.mat + mean.gamma.mat))
}else
{
fitted.mean <- as.numeric(exp(offset.mat + regression.mat + mean.phi.mat + mean.delta.mat))
}
deviance.fitted <- -2 * sum(dpois(x=as.numeric(Y), lambda=fitted.mean, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
delta.mcmc <- mcmc.list(lapply(samples.delta.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
if(n.miss>0)
{
Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc))
}else
{
Y.mcmc <- NA
}
if(interaction)
{
for(j in 1:n.chains)
{
colnames(samples.tau2.list[[j]]) <- c("tau2.S", "tau2.T", "tau2.I")
}
tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc))
gamma.mcmc <- mcmc.list(lapply(samples.gamma.list, mcmc))
}else
{
for(j in 1:n.chains)
{
colnames(samples.tau2.list[[j]]) <- c("tau2.S", "tau2.T")
}
tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc))
gamma.mcmc <- NA
}
if(fix.rho.S & fix.rho.T)
{
rhoext.mcmc <- NA
}else if(fix.rho.S & !fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.lambda.list[[j]]) <- c("rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(samples.lambda.list, mcmc))
}else if(!fix.rho.S & fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.rho.list[[j]]) <- c("rho.S")
}
rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc))
}else
{
rho.temp <- as.list(rep(NA, n.chains))
for(j in 1:n.chains)
{
rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.lambda.list[[j]])
colnames(rho.temp[[j]]) <- c("rho.S", "rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc))
}
samples <- list(beta=beta.mcmc, phi=phi.mcmc, delta=delta.mcmc, gamma=gamma.mcmc, rho=rhoext.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
n.tau2 <- ncol(samples.tau2.matrix)
summary.tau2 <- cbind(apply(samples.tau2.matrix, 2, mean), t(apply(samples.tau2.matrix, 2, quantile, c(0.025, 0.975))), rep(n.keep, n.tau2), rep(100, n.tau2),
effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
if(interaction)
{
rownames(summary.tau2) <- c("tau2.S", "tau2.T", "tau2.I")
}else
{
rownames(summary.tau2) <- c("tau2.S", "tau2.T")
}
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.S", "rho.T")
if(!fix.rho.S)
{
temp <- mcmc.list(lapply(samples.rho.list, mcmc))
summary.rho[1, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
temp <- mcmc.list(lapply(samples.lambda.list, mcmc))
summary.rho[2, 1:3] <- c(mean(samples.lambda.matrix), quantile(samples.lambda.matrix, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[2, 1:3] <- c(lambda, lambda, lambda)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.tau2, summary.rho)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
if(interaction)
{
model.string <- c("Likelihood model - Poisson (log link function)", "\nLatent structure model - spatial and temporal main effects and an interaction\n")
}else
{
model.string <- c("Likelihood model - Poisson (log link function)", "\nLatent structure model - spatial and temporal main effects\n")
}
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.CARanova.R
|
poisson.CARanovaMCMC <- function(Y, offset, X.standardised, W, interaction, rho, lambda, fix.rho.S, fix.rho.T, K, N, N.all, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Generate the initial parameter values
mod.glm <- glm(Y~X.standardised-1, offset=offset, family="quasipoisson")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
log.Y <- log(Y)
log.Y[Y==0] <- -0.1
res.temp <- log.Y - X.standardised %*% beta - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=0, sd = res.sd)
delta <- rnorm(n=N, mean=0, sd = res.sd)
tau2.phi <- var(phi)/10
tau2.delta <- var(delta)/10
if(interaction)
{
gamma <- rnorm(n=N.all, mean=0, sd = res.sd)
tau2.gamma <- var(gamma)/10
}else
{}
#### Matrix versions
Y.DA <- Y
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K)
delta.mat <- matrix(rep(delta, K), byrow=T, nrow=K)
if(interaction)
{
gamma.mat <- matrix(gamma, byrow=F, nrow=K)
}else
{
gamma.mat <- matrix(rep(0, N.all), byrow=F, nrow=K)
}
fitted <- exp(as.numeric(offset.mat + regression.mat + phi.mat + delta.mat + gamma.mat))
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, K))
samples.delta <- array(NA, c(n.keep, N))
if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.T) samples.lambda <- array(NA, c(n.keep, 1))
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
if(interaction)
{
samples.gamma <- array(NA, c(n.keep, N.all))
samples.tau2 <- array(NA, c(n.keep, 3))
colnames(samples.tau2) <- c("tau2.phi", "tau2.delta", "tau2.gamma")
}else
{
samples.tau2 <- array(NA, c(n.keep, 2))
colnames(samples.tau2) <- c("tau2.phi", "tau2.delta")
}
#### Specify the Metropolis quantities
accept <- rep(0,12)
proposal.sd.beta <- 0.01
proposal.sd.phi <- 0.1
proposal.sd.delta <- 0.1
proposal.sd.rho <- 0.02
proposal.sd.lambda <- 0.02
tau2.phi.shape <- prior.tau2[1] + K/2
tau2.delta.shape <- prior.tau2[1] + N/2
if(interaction)
{
proposal.sd.gamma <- 0.1
tau2.gamma.shape <- prior.tau2[1] + N*K/2
}else
{
}
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Spatial determinant
if(!fix.rho.S)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q.W <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Temporal neighbourhood matrix
D <-array(0, c(N,N))
for(i in 1:N)
{
for(j in 1:N)
{
if(abs((i-j))==1) D[i,j] <- 1
}
}
#### Temporal triplet object
D.triplet <- c(NA, NA, NA)
for(i in 1:N)
{
for(j in 1:N)
{
if(D[i,j]>0)
{
D.triplet <- rbind(D.triplet, c(i,j, D[i,j]))
}else{}
}
}
D.triplet <- D.triplet[-1, ]
D.n.triplet <- nrow(D.triplet)
D.triplet.sum <- tapply(D.triplet[ ,3], D.triplet[ ,1], sum)
D.neighbours <- tapply(D.triplet[ ,3], D.triplet[ ,1], length)
#### Temporal begfin argument
D.begfin <- array(NA, c(N, 2))
temp <- 1
for(i in 1:N)
{
D.begfin[i, ] <- c(temp, (temp + D.neighbours[i]-1))
temp <- temp + D.neighbours[i]
}
#### Temporal determinant
if(!fix.rho.T)
{
Dstar <- diag(apply(D,1,sum)) - D
Dstar.eigen <- eigen(Dstar)
Dstar.val <- Dstar.eigen$values
det.Q.D <- 0.5 * sum(log((lambda * Dstar.val + (1-lambda))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1) tau2.phi.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
if(lambda==1) tau2.delta.shape <- prior.tau2[1] + 0.5 * (N-1)
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rpois(n=n.miss, lambda=fitted[which.miss==0])
}else
{}
Y.DA.mat <- matrix(Y.DA, nrow=K, ncol=N, byrow=FALSE)
###################
## Sample from beta
###################
offset.temp <- offset + as.numeric(phi.mat) + as.numeric(delta.mat) + as.numeric(gamma.mat)
if(MALA)
{
temp <- poissonbetaupdateMALA(X.standardised, N.all, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- poissonbetaupdateRW(X.standardised, N.all, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from phi
####################
phi.offset <- offset.mat + regression.mat + delta.mat + gamma.mat
temp1 <- poissoncarupdateRW(W.triplet, W.begfin, W.triplet.sum, K, phi, tau2.phi, Y.DA.mat, proposal.sd.phi, rho, phi.offset, N, rep(1,N))
phi <- temp1[[1]]
if(rho<1)
{
phi <- phi - mean(phi)
}else
{
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
}
phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K)
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + K
####################
## Sample from delta
####################
delta.offset <- t(offset.mat + regression.mat + phi.mat + gamma.mat)
temp2 <- poissoncarupdateRW(D.triplet, D.begfin, D.triplet.sum, N, delta, tau2.delta, t(Y.DA.mat), proposal.sd.delta, lambda, delta.offset, K, rep(1,K))
delta <- temp2[[1]]
delta <- delta - mean(delta)
delta.mat <- matrix(rep(delta, K), byrow=T, nrow=K)
accept[5] <- accept[5] + temp2[[2]]
accept[6] <- accept[6] + N
if(interaction)
{
####################
## Sample from gamma
####################
gamma.offset <- offset.mat + regression.mat + phi.mat + delta.mat
gamma.offset.vec <- as.numeric(gamma.offset)
temp5 <- poissonindepupdateRW(N.all, gamma, tau2.gamma, Y.DA, proposal.sd.gamma, gamma.offset.vec)
gamma <- temp5[[1]]
gamma <- gamma - mean(gamma)
gamma.mat <- matrix(gamma, byrow=F, nrow=K)
accept[7] <- accept[7] + temp5[[2]]
accept[8] <- accept[8] + N * K
#########################
## Sample from tau2.gamma
#########################
tau2.gamma.scale <- prior.tau2[2] + sum(gamma.mat^2)/2
tau2.gamma <- 1 / rgamma(1, tau2.gamma.shape, scale=(1/tau2.gamma.scale))
}else
{}
#######################
## Sample from tau2.phi
#######################
temp2.phi <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, rho)
tau2.phi.scale <- temp2.phi + prior.tau2[2]
tau2.phi <- 1 / rgamma(1, tau2.phi.shape, scale=(1/tau2.phi.scale))
#########################
## Sample from tau2.delta
#########################
temp2.delta <- quadform(D.triplet, D.triplet.sum, D.n.triplet, N, delta, delta, lambda)
tau2.delta.scale <- temp2.delta + prior.tau2[2]
tau2.delta <- 1 / rgamma(1, tau2.delta.shape, scale=(1/tau2.delta.scale))
##################
## Sample from rho
##################
if(!fix.rho.S)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp3 <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, proposal.rho)
det.Q.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- det.Q.W - temp2.phi / tau2.phi
logprob.proposal <- det.Q.proposal - temp3 / tau2.phi
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
rho <- proposal.rho
det.Q.W <- det.Q.proposal
accept[9] <- accept[9] + 1
}else
{
}
accept[10] <- accept[10] + 1
}else
{}
#####################
## Sample from lambda
#####################
if(!fix.rho.T)
{
proposal.lambda <- rtruncnorm(n=1, a=0, b=1, mean=lambda, sd=proposal.sd.lambda)
temp3 <- quadform(D.triplet, D.triplet.sum, D.n.triplet, N, delta, delta, proposal.lambda)
det.Q.proposal <- 0.5 * sum(log((proposal.lambda * Dstar.val + (1-proposal.lambda))))
logprob.current <- det.Q.D - temp2.delta / tau2.delta
logprob.proposal <- det.Q.proposal - temp3 / tau2.delta
hastings <- log(dtruncnorm(x=lambda, a=0, b=1, mean=proposal.lambda, sd=proposal.sd.lambda)) - log(dtruncnorm(x=proposal.lambda, a=0, b=1, mean=lambda, sd=proposal.sd.lambda))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
lambda <- proposal.lambda
det.Q.D <- det.Q.proposal
accept[11] <- accept[11] + 1
}else
{
}
accept[12] <- accept[12] + 1
}else
{}
#########################
## Calculate the deviance
#########################
lp <- as.numeric(offset.mat + regression.mat + phi.mat + delta.mat + gamma.mat)
fitted <- exp(lp)
loglike <- dpois(x=as.numeric(Y), lambda=fitted, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.delta[ele, ] <- delta
if(!fix.rho.S) samples.rho[ele, ] <- rho
if(!fix.rho.T) samples.lambda[ele, ] <- lambda
samples.fitted[ele, ] <- fitted
samples.loglike[ele, ] <- loglike
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
if(interaction)
{
samples.gamma[ele, ] <- gamma
samples.tau2[ele, ] <- c(tau2.phi, tau2.delta, tau2.gamma)
}else
{
samples.tau2[ele, ] <- c(tau2.phi, tau2.delta)
}
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
proposal.sd.delta <- common.accceptrates1(accept[5:6], proposal.sd.delta, 40, 50)
if(interaction) proposal.sd.gamma <- common.accceptrates1(accept[7:8], proposal.sd.gamma, 40, 50)
if(!fix.rho.S) proposal.sd.rho <- common.accceptrates2(accept[9:10], proposal.sd.rho, 40, 50, 0.5)
if(!fix.rho.T) proposal.sd.lambda <- common.accceptrates2(accept[11:12], proposal.sd.lambda, 40, 50, 0.5)
accept <- rep(0,12)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y <- NA
if(fix.rho.S) samples.rho <- NA
if(fix.rho.T) samples.lambda <- NA
if(!interaction) samples.gamma <- NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.delta=samples.delta, samples.gamma=samples.gamma, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.lambda=samples.lambda, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.CARanovaMCMC.R
|
poisson.CARar1 <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "poisson")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Determine the number of spatial and temporal units
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Check on the rho arguments
if(is.null(rho.S))
{
rho <- runif(1)
fix.rho.S <- FALSE
}else
{
rho <- rho.S
fix.rho.S <- TRUE
}
if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.T))
{
gamma <- runif(1)
fix.rho.T <- FALSE
}else
{
gamma <- rho.T
fix.rho.T <- TRUE
}
if(!is.numeric(gamma)) stop("rho.T is fixed but is not numeric.", call.=FALSE)
if(gamma<0 ) stop("rho.T is outside the range [0, 1].", call.=FALSE)
if(gamma>1 ) stop("rho.T is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- poisson.CARar1MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, gamma=gamma, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- poisson.CARar1MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, gamma=gamma, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=poisson.CARar1MCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, gamma=gamma, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
#### If n.chains==1
## Compute the acceptance rates
accept.final <- rep(NA, 4)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T")
accept.final[1] <- 100 * results$accept[1] / results$accept[2]
accept.final[2] <- 100 * results$accept[3] / results$accept[4]
if(!fix.rho.S) accept.final[3] <- 100 * results$accept[5] / results$accept[6]
if(!fix.rho.T) accept.final[4] <- 100
## Compute the fitted deviance
mean.beta <- apply(results$samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=K, ncol=N)
fitted.mean <- as.numeric(exp(offset.mat + mean.phi + regression.mat))
deviance.fitted <- -2 * sum(dpois(x=as.numeric(Y), lambda=fitted.mean, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
## Create the samples object
if(fix.rho.S & fix.rho.T)
{
samples.rhoext <- NA
}else if(fix.rho.S & !fix.rho.T)
{
samples.rhoext <- results$samples.gamma
names(samples.rhoext) <- "rho.T"
}else if(!fix.rho.S & fix.rho.T)
{
samples.rhoext <- results$samples.rho
names(samples.rhoext) <- "rho.S"
}else
{
samples.rhoext <- cbind(results$samples.rho, results$samples.gamma)
colnames(samples.rhoext) <- c("rho.S", "rho.T")
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), tau2=mcmc(results$samples.tau2), rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.tau2 <- c(mean(results$samples.tau2), quantile(results$samples.tau2, c(0.025, 0.975)), n.keep, 100,
effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.S", "rho.T")
if(!fix.rho.S)
{
summary.rho[1, 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z)
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
summary.rho[2, 1:3] <- c(mean(results$samples.gamma), quantile(results$samples.gamma, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(results$samples.gamma), geweke.diag(results$samples.gamma)$z)
}else
{
summary.rho[2, 1:3] <- c(gamma, gamma, gamma)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.tau2, summary.rho)
rownames(summary.results)[(p+1)] <- "tau2"
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
#### If n.chains > 1
## Compute the acceptance rates
accept.final <- rep(NA, 4)
names(accept.final) <- c("beta", "phi", "rho.S", "rho.T")
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.final[1] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.final[2] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
if(!fix.rho.S) accept.final[3] <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
if(!fix.rho.T) accept.final[4] <- 100
## Extract the samples into separate matrix and list objects
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
if(!fix.rho.S)
{
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
}
if(!fix.rho.T)
{
samples.gamma.list <- lapply(results, function(l) l[["samples.gamma"]])
samples.gamma.matrix <- do.call(what=rbind, args=samples.gamma.list)
}
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Compute the fitted deviance
mean.beta <- apply(samples.beta.matrix,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=K, ncol=N)
fitted.mean <- as.numeric(exp(offset.mat + mean.phi + regression.mat))
deviance.fitted <- -2 * sum(dpois(x=as.numeric(Y), lambda=fitted.mean, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Transform the parameters back to the original covariate scale.
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc))
phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc))
fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc))
tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc))
if(n.miss>0)
{
Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc))
}else
{
Y.mcmc <- NA
}
if(fix.rho.S & fix.rho.T)
{
rhoext.mcmc <- NA
}else if(fix.rho.S & !fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.gamma.list[[j]]) <- c("rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(samples.gamma.list, mcmc))
}else if(!fix.rho.S & fix.rho.T)
{
for(j in 1:n.chains)
{
colnames(samples.rho.list[[j]]) <- c("rho.S")
}
rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc))
}else
{
rho.temp <- as.list(rep(NA, n.chains))
for(j in 1:n.chains)
{
rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.gamma.list[[j]])
colnames(rho.temp[[j]]) <- c("rho.S", "rho.T")
}
rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc))
}
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rhoext.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## create a summary object
n.keep <- floor((n.sample - burnin)/thin) * n.chains
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.tau2 <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)), n.keep, 100,
effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.rho <- array(NA, c(2,7))
row.names(summary.rho) <- c("rho.S", "rho.T")
if(!fix.rho.S)
{
temp <- mcmc.list(lapply(samples.rho.list, mcmc))
summary.rho[1, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[1, 1:3] <- c(rho, rho, rho)
summary.rho[1, 4:7] <- rep(NA, 4)
}
if(!fix.rho.T)
{
temp <- mcmc.list(lapply(samples.gamma.list, mcmc))
summary.rho[2, 1:3] <- c(mean(samples.gamma.matrix), quantile(samples.gamma.matrix, c(0.025, 0.975)))
summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2])
}else
{
summary.rho[2, 1:3] <- c(gamma, gamma, gamma)
summary.rho[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.tau2, summary.rho)
rownames(summary.results)[(p+1)] <- "tau2"
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Poisson (log link function)", "\nLatent structure model - Autoregressive order 1 CAR model\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results.final) <- "CARBayesST"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results.final)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.CARar1.R
|
poisson.CARar1MCMC <- function(Y, offset, X.standardised, W, rho, gamma, fix.rho.S, fix.rho.T, K, N, N.all, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, verbose, chain)
{
#Rcpp::sourceCpp("src/CARBayesST.cpp")
#source("R/common.functions.R")
#library(spdep)
#library(truncnorm)
#
#
############################################
#### Set up the key elements before sampling
############################################
#### Generate the initial parameter values
mod.glm <- glm(Y~X.standardised-1, offset=offset, family="quasipoisson")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
log.Y <- log(Y)
log.Y[Y==0] <- -0.1
res.temp <- log.Y - X.standardised %*% beta - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=N.all, mean=0, sd = res.sd)
tau2 <- var(phi)/10
#### Specify matrix quantities
Y.DA <- Y
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE)
fitted <- exp(as.numeric(offset.mat + regression.mat + phi.mat))
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, N.all))
samples.tau2 <- array(NA, c(n.keep, 1))
if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.T) samples.gamma <- array(NA, c(n.keep, 1))
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Specify the Metropolis quantities
accept <- rep(0,6)
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.05
proposal.sd.beta <- 0.01
tau2.shape <- prior.tau2[1] + N.all/2
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Create the determinant
if(!fix.rho.S)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q.W <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1 & gamma==1)
{
tau2.shape <- prior.tau2[1] + prior.tau2[1] + ((N-1) * (K-n.islands))/2
}else if(rho==1)
{
tau2.shape <- prior.tau2[1] + prior.tau2[1] + (N * (K-n.islands))/2
}else if(gamma==1)
{
tau2.shape <- prior.tau2[1] + prior.tau2[1] + ((N-1) * K)/2
}else
{}
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
##############################
#### Generate the MCMC samples
##############################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rpois(n=n.miss, lambda=fitted[which.miss==0])
}else
{}
Y.DA.mat <- matrix(Y.DA, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from beta
####################
offset.temp <- as.numeric(offset.mat + phi.mat)
if(MALA)
{
temp <- poissonbetaupdateMALA(X.standardised, N.all, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- poissonbetaupdateRW(X.standardised, N.all, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from phi
####################
phi.offset <- offset.mat + regression.mat
den.offset <- rho * W.triplet.sum + 1 - rho
temp1 <- poissonar1carupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, phi.mat, tau2, gamma, rho, Y.DA.mat, proposal.sd.phi, phi.offset, den.offset)
phi.temp <- temp1[[1]]
phi <- as.numeric(phi.temp) - mean(as.numeric(phi.temp))
phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE)
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + K*N
####################
## Sample from gamma
####################
if(!fix.rho.T)
{
temp2 <- gammaquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho)
mean.gamma <- temp2[[1]] / temp2[[2]]
sd.gamma <- sqrt(tau2 / temp2[[2]])
gamma <- rtruncnorm(n=1, a=0, b=1, mean=mean.gamma, sd=sd.gamma)
}else
{}
####################
## Samples from tau2
####################
temp3 <- tauquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho, gamma)
tau2.scale <- temp3 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.shape, scale=(1/tau2.scale))
##################
## Sample from rho
##################
if(!fix.rho.S)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp4 <- tauquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, proposal.rho, gamma)
det.Q.W.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- N * det.Q.W - temp3 / tau2
logprob.proposal <- N * det.Q.W.proposal - temp4 / tau2
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
det.Q.W <- det.Q.W.proposal
accept[5] <- accept[5] + 1
}else
{
}
accept[6] <- accept[6] + 1
}else
{}
#########################
## Calculate the deviance
#########################
lp <- as.numeric(offset.mat + regression.mat + phi.mat)
fitted <- exp(lp)
loglike <- dpois(x=as.numeric(Y), lambda=fitted, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- as.numeric(phi)
samples.tau2[ele, ] <- tau2
if(!fix.rho.S) samples.rho[ele, ] <- rho
if(!fix.rho.T) samples.gamma[ele, ] <- gamma
samples.fitted[ele, ] <- fitted
samples.loglike[ele, ] <- loglike
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
if(!fix.rho.S) proposal.sd.rho <- common.accceptrates2(accept[5:6], proposal.sd.rho, 40, 50, 0.5)
accept <- rep(0,6)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y <- NA
if(fix.rho.S) samples.rho <- NA
if(fix.rho.T) samples.gamma <- NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.gamma=samples.gamma, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.CARar1MCMC.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.