content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
library(BART)
B <- getOption('mc.cores', 1)
figures = getOption('figures', default='NONE')
##simulate from Friedman's five-dimensional test function
##Friedman JH. Multivariate adaptive regression splines
##(with discussion and a rejoinder by the author).
##Annals of Statistics 1991; 19:1-67.
f = function(x) #only the first 5 matter
sin(pi*x[ , 1]*x[ , 2]) + 2*(x[ , 3]-.5)^2+x[ , 4]+0.5*x[ , 5]-1.5
sigma = 1.0 #y = f(x) + sigma*z where z~N(0, 1)
k = 50 #number of covariates
thin = 10
ndpost = 1000
nskip = 100
par(mfrow=c(2, 2))
for(n in c(200, 1000, 5000)) {
set.seed(12)
x.train=matrix(runif(n*k), n, k)
Ey.train = f(x.train)
y.train=(Ey.train+sigma*rnorm(n)>0)*1
##run BART with B cores in parallel
mc.train = mc.pbart(x.train, y.train, mc.cores=B, keepevery=thin,
seed=99, ndpost=ndpost, nskip=nskip)
x <- x.train
x4 <- seq(0, 1, length.out=10)
for(i in 1:10) {
x[ , 4] <- x4[i]
if(i==1) x.test <- x
else x.test <- rbind(x.test, x)
}
##run predict with B cores in parallel
mc.test <- predict(mc.train, newdata=x.test, mc.cores=B)
##create Friedman's partial dependence function for x4
pred <- matrix(nrow=ndpost, ncol=10)
for(i in 1:10) {
h <- (i-1)*n+1:n
pred[ , i] <- apply(mc.test$prob.test[ , h], 1, mean)
##pred[ , i] <- apply(pnorm(mc.test[ , h]), 1, mean)
}
pred <- apply(pred, 2, mean)
plot(x4, qnorm(pred), xlab='x4', ylab='partial dependence function', type='l')
geweke <- gewekediag(mc.train$yhat.train)
i <- floor(seq(1, n, length.out=10))
auto.corr <- acf(mc.train$yhat.train[ , i], plot=FALSE)
max.lag <- max(auto.corr$lag[ , 1, 1])
j <- seq(-0.5, 0.4, length.out=10)
for(h in 1:10) {
if(h==1)
plot(1:max.lag+j[h], auto.corr$acf[1+(1:max.lag), h, h],
type='h', xlim=c(0, max.lag+1), ylim=c(-1, 1),
ylab='acf', xlab='lag')
else
lines(1:max.lag+j[h], auto.corr$acf[1+(1:max.lag), h, h],
type='h', col=h)
}
for(j in 1:10) {
if(j==1)
plot(pnorm(mc.train$yhat.train[ , i[j]]),
type='l', ylim=c(0, 1),
sub=paste0('N:', n, ', k:', k),
ylab=expression(Phi(f(x))), xlab='m')
else
lines(pnorm(mc.train$yhat.train[ , i[j]]),
type='l', col=j)
}
j <- -10^(log10(n)-1)
plot(geweke$z, pch='.', cex=2, ylab='z', xlab='i',
sub=paste0('N:', n, ', k:', k),
xlim=c(j, n), ylim=c(-5, 5))
lines(1:n, rep(-1.96, n), type='l', col=6)
lines(1:n, rep(+1.96, n), type='l', col=6)
lines(1:n, rep(-2.576, n), type='l', col=5)
lines(1:n, rep(+2.576, n), type='l', col=5)
lines(1:n, rep(-3.291, n), type='l', col=4)
lines(1:n, rep(+3.291, n), type='l', col=4)
lines(1:n, rep(-3.891, n), type='l', col=3)
lines(1:n, rep(+3.891, n), type='l', col=3)
lines(1:n, rep(-4.417, n), type='l', col=2)
lines(1:n, rep(+4.417, n), type='l', col=2)
text(c(1, 1), c(-1.96, 1.96), pos=2, cex=0.6, labels='0.95')
text(c(1, 1), c(-2.576, 2.576), pos=2, cex=0.6, labels='0.99')
text(c(1, 1), c(-3.291, 3.291), pos=2, cex=0.6, labels='0.999')
text(c(1, 1), c(-3.891, 3.891), pos=2, cex=0.6, labels='0.9999')
text(c(1, 1), c(-4.417, 4.417), pos=2, cex=0.6, labels='0.99999')
if(figures!='NONE')
dev.copy2pdf(file=paste(figures, paste0('geweke-pbart2-', n, '.pdf'),
sep='/'))
}
par(mfrow=c(1, 1))
| /scratch/gouwar.j/cran-all/cranData/BART/demo/geweke.pbart2.R |
library(BART)
##simulate from Friedman's five-dimensional test function
##Friedman JH. Multivariate adaptive regression splines
##(with discussion and a rejoinder by the author).
##Annals of Statistics 1991; 19:1-67.
f = function(x) #only the first 5 matter
sin(pi*x[ , 1]*x[ , 2]) + 2*(x[ , 3]-.5)^2+x[ , 4]+0.5*x[ , 5]-1.5
sigma = 1.0 #y = f(x) + sigma*z where z~N(0, 1)
k = 50 #number of covariates
thin = 10
ndpost = 1000
nskip = 100
C = 8
m <- 10
n <- 10000
set.seed(12)
x.train=matrix(runif(n*k), n, k)
Ey.train = f(x.train)
y.train=(Ey.train+sigma*rnorm(n)>0)*1
table(y.train)/n
x <- x.train
x4 <- seq(0, 1, length.out=10)
for(i in 1:10) {
x[ , 4] <- x4[i]
if(i==1) x.test <- x
else x.test <- rbind(x.test, x)
}
j <- n/m
##run BART with C cores in parallel
for(i in 1:m) {
h <- (i-1)*j+1:j
post = mc.pbart(x.train[h, ], y.train[h], x.test,
mc.cores=C, keepevery=thin,
seed=99, ndpost=ndpost, nskip=nskip)
if(i==1) {
mc.train <- post
mc.train$yhat.test <- list(1:10)
for(l in 1:10) {
mc.train$yhat.test[[l]] <- post$yhat.test[ , (l-1)*n+1:n]
}
}
else {
mc.train$yhat.train <- cbind(mc.train$yhat.train, post$yhat.train)
for(l in 1:10) {
mc.train$yhat.test[[l]] <-
rbind(mc.train$yhat.test[[l]], post$yhat.test[ , (l-1)*n+1:n])
}
}
}
par(mfrow=c(2, 2))
pred <- matrix(nrow=10000, ncol=10)
for(i in 1:10) pred[ , i] <- apply(pnorm(mc.train$yhat.test[[i]]), 1, mean)
pred <- apply(pred, 2, mean)
plot(x4, qnorm(pred), xlab='x4', ylab='partial dependence function', type='l')
geweke <- gewekediag(mc.train$yhat.train)
i <- floor(seq(1, n, length.out=10))
auto.corr <- acf(mc.train$yhat.train[ , i], plot=FALSE)
max.lag <- max(auto.corr$lag[ , 1, 1])
j <- seq(-0.5, 0.4, length.out=10)
for(h in 1:10) {
if(h==1)
plot(1:max.lag+j[h], auto.corr$acf[1+(1:max.lag), h, h],
type='h', xlim=c(0, max.lag+1), ylim=c(-1, 1),
ylab='acf', xlab='lag')
else
lines(1:max.lag+j[h], auto.corr$acf[1+(1:max.lag), h, h],
type='h', col=h)
}
for(j in 1:10) {
if(j==1)
plot(pnorm(mc.train$yhat.train[ , i[j]]),
type='l', ylim=c(0, 1),
sub=paste0('N:', n, ', k:', k),
ylab=expression(Phi(f(x))), xlab='m')
else
lines(pnorm(mc.train$yhat.train[ , i[j]]),
type='l', col=j)
}
j <- -10^(log10(n)-1)
plot(geweke$z, pch='.', cex=2, ylab='z', xlab='i',
sub=paste0('N:', n, ', k:', k),
xlim=c(j, n), ylim=c(-5, 5))
lines(1:n, rep(-1.96, n), type='l', col=6)
lines(1:n, rep(+1.96, n), type='l', col=6)
lines(1:n, rep(-2.576, n), type='l', col=5)
lines(1:n, rep(+2.576, n), type='l', col=5)
lines(1:n, rep(-3.291, n), type='l', col=4)
lines(1:n, rep(+3.291, n), type='l', col=4)
lines(1:n, rep(-3.891, n), type='l', col=3)
lines(1:n, rep(+3.891, n), type='l', col=3)
lines(1:n, rep(-4.417, n), type='l', col=2)
lines(1:n, rep(+4.417, n), type='l', col=2)
text(c(1, 1), c(-1.96, 1.96), pos=2, cex=0.6, labels='0.95')
text(c(1, 1), c(-2.576, 2.576), pos=2, cex=0.6, labels='0.99')
text(c(1, 1), c(-3.291, 3.291), pos=2, cex=0.6, labels='0.999')
text(c(1, 1), c(-3.891, 3.891), pos=2, cex=0.6, labels='0.9999')
text(c(1, 1), c(-4.417, 4.417), pos=2, cex=0.6, labels='0.99999')
par(mfrow=c(1, 1))
| /scratch/gouwar.j/cran-all/cranData/BART/demo/geweke.pbart3.R |
library(BART)
## simulate recurrent events data set with Exponential proportional intensity
N <- 250
K <- 60
NK <- N*K
C <- 8
set.seed(-1)
x <- matrix(nrow=NK, ncol=23)
dimnames(x)[[2]] <- c('t', 'v', 'N', paste0('x', 1:20))
b <- c(1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1.5, 0, 0, 0, 0, 0, 0, 0, 0, 0)
N. <- double(NK)
y <- integer(NK)
cum <- double(NK)
k <- 1
for(i in 1:N) {
v <- 0
x[k, 4:13] <- runif(10)
x[k, 14:23] <- rbinom(10, 1, 0.5)
for(j in 1:K) {
x[k, 1:3] <- c(j, j-v, N.[k])
if(j>1) x[k, 4:23] <- x[k-1, 4:23]
alpha <- 0.0001*exp(sum(b*x[k, 4:23])+sqrt(N.[k]))
cum[k] <- pexp(30, alpha)
y[k] <- rbinom(1, 1, cum[k])
if(y[k]==1) v <- j
if(j>1) cum[k] <- cum[k-1]+cum[k]
if(j<K) N.[k+1] <- N.[k]+y[k]
k <- k+1
}
}
table(x[K*(1:N), 3])
table(x[K*(1:N), 3])/N
for(i in 1:N) {
j <- (i-1)*K+1:K
if(i==1) plot(1:K, cum[j], type='l',
xlab='t', ylab=expression(Lambda(t, x)),
sub='Proportional Setting', ylim=c(0, 20))
else lines(1:K, cum[j], col=i)
}
post <- mc.recur.bart(x.train=x, y.train=y, x.test=x, nskip=1000,
keepevery=100, seed=99, mc.cores=C,
sparse=TRUE, augment=TRUE)
for(i in 1:N) {
j <- (i-1)*K+1:K
if(i==1) plot(1:K, post$cum.test.mean[j], type='l',
xlab='t', ylab=expression(Lambda(t, x)),
sub='Proportional Setting', ylim=c(0, 20))
else lines(1:K, post$cum.test.mean[j], col=i)
}
print(cor(cum, post$cum.test.mean)^2)
plot(cum, post$cum.test.mean, pch='.',
xlim=c(0, 20), ylim=c(0, 20),
xlab='True', ylab='Estimated', sub='Proportional Setting')
abline(0, 1)
## convergence diagnostics
par(mfrow=c(2, 2))
## select 10 values of x1 for Friedman's partial dependence function
M <- nrow(post$yhat.test)
m <- 10
x1 <- seq(0, 1, length.out=m)
pred <- as.list(1:m)
Fpdf <- as.list(1:m)
y. <- 0
for(i in 1:m) {
x.test <- x
x.test[ , 4] <- x1[i]
if(length(pred[[i]])==1) {
pred[[i]] <- predict(post, newdata=x.test, mc.cores=C)
Fpdf[[i]] <- apply(pnorm(pred[[i]]$yhat.test), 1, mean)
}
y.[i] <- mean(Fpdf[[i]])
}
plot(x1, y., type='l',
xlab=expression(x[1]), ylab='partial dependence function')
# select 10 subject X time points to summarize
i <- floor(seq(1, NK, length.out=m))
j <- seq(-0.5, 0.4, length.out=m)
for(h in 1:m) {
auto.corr <- acf(post$yhat.test[ , i[h]], plot=FALSE)
if(h==1) {
max.lag <- max(auto.corr$lag[ , 1, 1])
plot(1:max.lag+j[h], auto.corr$acf[1+(1:max.lag), 1, 1],
type='h', xlim=c(0, max.lag+1), ylim=c(-1, 1),
ylab='auto-correlation', xlab='lag')
}
else
lines(1:max.lag+j[h], auto.corr$acf[1+(1:max.lag), 1, 1],
type='h', col=h)
}
for(j in 1:m) {
##if(j==1) plot(pnorm(post$yhat.test[ , i[j]]), ylim=c(0, 0.16),
if(j==1) plot(post$yhat.test[ , i[j]], ylim=c(-3, -1),
type='l', ylab='f(x)', xlab='m')
else lines(post$yhat.test[ , i[j]], type='l', col=j)
}
## select 10 subjects uniformly spread out over the data set
h <- seq(1, N*K, floor(N/m)*K)
j <- 1
for(i in h) {
z <- gewekediag(post$yhat.test[ , (i-1)+1:K])$z
y <- max(c(4, abs(z)))
## plot the z scores vs. time for each patient
if(i==1) plot(post$times, z, ylim=c(-y, y), type='l',
xlab='t', ylab='z')
else lines(post$times, z, type='l', col=j)
j <- j+1
}
## add two-sided alpha=0.05 critical value lines
lines(post$times, rep(-1.96, K), type='l', lty=2)
lines(post$times, rep( 1.96, K), type='l', lty=2)
par(mfrow=c(1, 1))
| /scratch/gouwar.j/cran-all/cranData/BART/demo/geweke.recur.bart.R |
library(BART)
##simulate from Friedman's five-dimensional test function
##Friedman JH. Multivariate adaptive regression splines
##(with discussion and a rejoinder by the author).
##Annals of Statistics 1991; 19:1-67.
f = function(x) #only the first 5 matter
sin(pi*x[ , 1]*x[ , 2]) + 2*(x[ , 3]-.5)^2+x[ , 4]+0.5*x[ , 5]-1.5
sigma = 1.0 #y = f(x) + sigma*z where z~N(0, 1)
k = 50 #number of covariates
thin = 25
ndpost = 2500
nskip = 100
C = 10
m = 10
n = 10000
set.seed(12)
x.train=matrix(runif(n*k), n, k)
Ey.train = f(x.train)
y.train=(Ey.train+sigma*rnorm(n)>0)*1
table(y.train)/n
x <- x.train
x4 <- seq(0, 1, length.out=m)
for(i in 1:m) {
x[ , 4] <- x4[i]
if(i==1) x.test <- x
else x.test <- rbind(x.test, x)
}
post = rs.pbart(x.train, y.train, x.test=x.test,
C=C, mc.cores=8, keepevery=thin,
seed=99, ndpost=ndpost, nskip=nskip)
str(post)
par(mfrow=c(2, 2))
M <- nrow(post$yhat.test)
pred <- matrix(nrow=M, ncol=10)
for(i in 1:m) {
h <- (i-1)*n+1:n
pred[ , i] <- apply(pnorm(post$yhat.test[ , h]), 1, mean)
}
pred <- apply(pred, 2, mean)
plot(x4, qnorm(pred), xlab=expression(x[4]),
ylab='partial dependence function', type='l')
i <- floor(seq(1, n, length.out=10))
j <- seq(-0.5, 0.4, length.out=10)
for(h in 1:10) {
auto.corr <- acf(post$yhat.shard[ , i[h]], plot=FALSE)
if(h==1) {
max.lag <- max(auto.corr$lag[ , 1, 1])
plot(1:max.lag+j[h], auto.corr$acf[1+(1:max.lag), 1, 1],
type='h', xlim=c(0, max.lag+1), ylim=c(-1, 1),
ylab='auto-correlation', xlab='lag')
}
else
lines(1:max.lag+j[h], auto.corr$acf[1+(1:max.lag), 1, 1],
type='h', col=h)
}
for(j in 1:10) {
if(j==1)
plot(pnorm(post$yhat.shard[ , i[j]]),
type='l', ylim=c(0, 1),
sub=paste0('N:', n, ', k:', k),
ylab=expression(Phi(f(x))), xlab='m')
else
lines(pnorm(post$yhat.shard[ , i[j]]),
type='l', col=j)
}
geweke <- gewekediag(post$yhat.shard)
j <- -10^(log10(n)-1)
plot(geweke$z, pch='.', cex=2, ylab='z', xlab='i',
sub=paste0('N:', n, ', k:', k),
xlim=c(j, n), ylim=c(-5, 5))
lines(1:n, rep(-1.96, n), type='l', col=6)
lines(1:n, rep(+1.96, n), type='l', col=6)
lines(1:n, rep(-2.576, n), type='l', col=5)
lines(1:n, rep(+2.576, n), type='l', col=5)
lines(1:n, rep(-3.291, n), type='l', col=4)
lines(1:n, rep(+3.291, n), type='l', col=4)
lines(1:n, rep(-3.891, n), type='l', col=3)
lines(1:n, rep(+3.891, n), type='l', col=3)
lines(1:n, rep(-4.417, n), type='l', col=2)
lines(1:n, rep(+4.417, n), type='l', col=2)
text(c(1, 1), c(-1.96, 1.96), pos=2, cex=0.6, labels='0.95')
text(c(1, 1), c(-2.576, 2.576), pos=2, cex=0.6, labels='0.99')
text(c(1, 1), c(-3.291, 3.291), pos=2, cex=0.6, labels='0.999')
text(c(1, 1), c(-3.891, 3.891), pos=2, cex=0.6, labels='0.9999')
text(c(1, 1), c(-4.417, 4.417), pos=2, cex=0.6, labels='0.99999')
par(mfrow=c(1, 1))
##dev.copy2pdf(file='geweke.rs.pbart.pdf')
| /scratch/gouwar.j/cran-all/cranData/BART/demo/geweke.rs.pbart.R |
library(BART)
f <- function(x) ## adpated Friedman's five dimensional test function
3+sin(pi*x[ , 1]*x[ , 2])-2*(x[ , 3]-0.5)^2+x[ , 4]-0.5*x[ , 5]
N <- 100
A <- 1/155
P <- 20 #number of covariates
set.seed(12)
x.train <- matrix(runif(N*P), nrow=N, ncol=P)
x.test <- matrix(runif(N*P), nrow=N, ncol=P)
T <- ceiling(rweibull(N, shape=2, scale=exp(f(x.train))))
C <- ceiling(rexp(N, A))
delta <- 1*(T<C)
times <- delta*T+(1-delta)*C
table(delta)/N
C = 8
##run BART with C cores in parallel
post = mc.surv.bart(x.train, times=times, delta=delta, mc.cores=C, seed=99,
keepevery=50)
x <- x.train
x4 <- seq(0, 1, length.out=10)
for(i in 1:10) {
x[ , 4] <- x4[i]
if(i==1) x.test <- x
else x.test <- rbind(x.test, x)
}
pre = surv.pre.bart(times=times, delta=delta,
x.train=x.train, x.test=x.test)
##run predict with C cores in parallel
pred <- predict(post, newdata=pre$tx.test, mc.cores=C)
K <- pred$K
##create Friedman's partial dependence function for x4
surv <- list(1:10)
for(i in 1:10) {
surv[[i]] <- matrix(nrow=1000, ncol=K)
for(j in 1:K) {
h <- (i-1)*N*K+seq(j, N*K, by=K)
surv[[i]][ , j] <- apply(pred$surv.test[ , h], 1, mean)
}
surv[[i]] <- apply(surv[[i]], 2, mean)
}
for(i in 1:10) {
if(i==1) plot(c(0, pre$times), c(1, surv[[i]]), type='s',
xlim=c(0, 50), ylim=0:1, xlab='t', ylab='S(t, x)')
else lines(c(0, pre$times), c(1, surv[[i]]), type='s', col=i)
j <- min(which(surv[[i]]<0.5))
text(pre$times[j], 0.5, paste(round(x4[i], digits=1)), col=i, pos=2)
}
## acf plots for 10 subjects
k <- floor(seq(1, N, length.out=10))
j. <- seq(-0.5, 0.4, length.out=10)
for(j in 1:K) {
for(i in 1:10) {
h <- (k[i]-1)*K+j
auto.corr <- acf(pred$yhat.test[ , h], plot=FALSE)
max.lag <- max(auto.corr$lag[ , 1, 1])
if(i==1)
plot(1:max.lag+j.[i], auto.corr$acf[1+(1:max.lag), 1, 1],
type='h', xlim=c(0, max.lag+1), ylim=c(-1, 1),
sub=paste0('t=', pre$times[j]), ylab='acf', xlab='lag')
else
lines(1:max.lag+j.[i], auto.corr$acf[1+(1:max.lag), 1, 1],
type='h', col=i)
}
Sys.sleep(1)
}
## trace plots for 10 subjects
k <- floor(seq(1, N, length.out=10))
for(j in 1:K) {
for(i in 1:10) {
h <- (k[i]-1)*K+j
if(i==1)
plot(pred$yhat.test[ , h], type='l',
ylim=c(-4, 0), sub=paste0('t=', pre$times[j]),
ylab=expression(Phi(f(x))), xlab='m')
else
lines(pred$yhat.test[ , h], type='l', col=i)
}
Sys.sleep(1)
}
## Geweke plot for 10 subjects
k <- floor(seq(1, N, length.out=10))
geweke <- list(1:10)
for(i in 1:10) {
h <- (k[i]-1)*K+1:K
geweke[[i]] <- gewekediag(pred$yhat.test[ , h])
}
max.t <- max(pre$times)
min.t <- -max.t/10
for(i in 1:10) {
if(i==1) {
plot(pre$times, geweke[[i]]$z, type='l',
ylab='z', xlab='t', ylim=c(-5, 5), xlim=c(min.t, max.t))
lines(pre$times, rep(-1.96, K), type='l', col=6)
lines(pre$times, rep(+1.96, K), type='l', col=6)
lines(pre$times, rep(-2.576, K), type='l', col=5)
lines(pre$times, rep(+2.576, K), type='l', col=5)
lines(pre$times, rep(-3.291, K), type='l', col=4)
lines(pre$times, rep(+3.291, K), type='l', col=4)
lines(pre$times, rep(-3.891, K), type='l', col=3)
lines(pre$times, rep(+3.891, K), type='l', col=3)
lines(pre$times, rep(-4.417, K), type='l', col=2)
lines(pre$times, rep(+4.417, K), type='l', col=2)
text(c(0, 0), c(-1.96, 1.96), pos=2, cex=0.6, labels='0.95')
text(c(0, 0), c(-2.576, 2.576), pos=2, cex=0.6, labels='0.99')
text(c(0, 0), c(-3.291, 3.291), pos=2, cex=0.6, labels='0.999')
text(c(0, 0), c(-3.891, 3.891), pos=2, cex=0.6, labels='0.9999')
text(c(0, 0), c(-4.417, 4.417), pos=2, cex=0.6, labels='0.99999')
}
else lines(pre$times, geweke[[i]]$z, type='l')
}
| /scratch/gouwar.j/cran-all/cranData/BART/demo/geweke.surv.bart.R |
library(BART)
N = 500
P = 1 #number of covariates
C = 8
SD.y=10
M = 201
set.seed(12)
x.train=matrix(runif(N*P, -2, 2), N, P)
x.test=matrix(seq(-2, 2, length.out=M), M, P)
Ey.train = x.train[ , 1]^3
y.train=rnorm(N, Ey.train, SD.y)
post <- as.list(1:4)
post[[1]] = mc.gbart(x.train, y.train, x.test, mc.cores=C, seed=99)
info <- c(0, N, 4*N)
for(i in 2:3) {
M=info[i]
x.info=matrix(runif(M*P, -2, 2), M, P)
Ey.info = x.info[ , 1]^3
y.info=c(rnorm(M, Ey.info, SD.y), y.train)
x.info=rbind(x.info, x.train)
post[[i]] = mc.gbart(x.info, y.info, x.test, mc.cores=C, seed=99)
}
## plot(x.test[ , 1], x.test[ , 1]^3, type='l', xlab='x', ylab='y')
## legend(-2, 8, lty=rep(1:0, 4), legend=c('Truth', ' ', 'No prior', ' ',
## 'Equiv. N', ' ', 'Equiv. 4N', ' '),
## col=rep(1:4, each=2), cex=0.8, bty='n')
## legend(-0.75, 8, lty=rep(0, 8),
## legend=c(expression(italic(R)^2), ' ', '0.828', ' ',
## '0.858', ' ', '0.881', ' '),
## col=rep(1:4, each=2), cex=0.8, bty='n')
## legend(0, 8, lty=rep(0, 8), legend=c('MSE', ' ', '1.93', ' ',
## '1.73', ' ', '1.24', ' '),
## col=rep(1:4, each=2), cex=0.8, bty='n')
## for(i in 1:3) {
## print(cor(x.test[ , 1]^3, post[[i]]$yhat.test.mean)^2)
## lines(x.test[ , 1], post[[i]]$yhat.test.mean, col=i+1)
## }
## dev.copy2pdf(file='inform-alt.pdf', height=4, width=4)
for(i in 1:3)
print(mean(((x.test[ , 1]^3)-post[[i]]$yhat.test.mean)^2))
| /scratch/gouwar.j/cran-all/cranData/BART/demo/inform.alt.gbart.R |
library(BART)
N = 500
P = 1 #number of covariates
C = 8
SD.y=10
M = 201
set.seed(12)
x.train=matrix(runif(N*P, -2, 2), N, P)
x.test=matrix(seq(-2, 2, length.out=M), M, P)
Ey.train = x.train[ , 1]^3
y.train=rnorm(N, Ey.train, SD.y)
post <- as.list(1:4)
post[[1]] = mc.gbart(x.train, y.train, x.test, mc.cores=C, seed=99)
info <- c(0, N, 4*N)
for(i in 2:3) {
M=info[i]
x.info=matrix(runif(M*P, -2, 2), M, P)
Ey.info = x.info[ , 1]^3
y.info=c(rnorm(M, Ey.info, SD.y), y.train)
x.info=rbind(matrix(runif(M*P, -2, 2), M, P), x.train)
post[[i]] = mc.gbart(x.info, y.info, x.test, mc.cores=C, seed=99)
}
## plot(x.test[ , 1], x.test[ , 1]^3, type='l', xlab='x', ylab='y')
## legend(-2, 8, lty=rep(1:0, 4), legend=c('Truth', ' ', 'No prior', ' ',
## 'Equiv. N', ' ', 'Equiv. 4N', ' '),
## col=rep(1:4, each=2), cex=0.8, bty='n')
## legend(-0.75, 8, lty=rep(0, 8),
## legend=c(expression(italic(R)^2), ' ', '0.828', ' ',
## '0.551', ' ', '0.248', ' '),
## col=rep(1:4, each=2), cex=0.8, bty='n')
## legend(0, 8, lty=rep(0, 8), legend=c('MSE', ' ', '1.93', ' ',
## '4.31', ' ', '7.17', ' '),
## col=rep(1:4, each=2), cex=0.8, bty='n')
## for(i in 1:3) {
## print(cor(x.test[ , 1]^3, post[[i]]$yhat.test.mean)^2)
## lines(x.test[ , 1], post[[i]]$yhat.test.mean, col=i+1)
## }
## dev.copy2pdf(file='inform-null.pdf', height=4, width=4)
for(i in 1:3)
print(mean(((x.test[ , 1]^3)-post[[i]]$yhat.test.mean)^2))
| /scratch/gouwar.j/cran-all/cranData/BART/demo/inform.null.gbart.R |
library(BART)
data(leukemia)
leukemia$TD=ceiling(leukemia$TD/30)
leukemia$TB=ceiling(leukemia$TB/30)
leukemia$TA=ceiling(leukemia$TA/30)
leukemia$TC=ceiling(leukemia$TC/30)
leukemia$TP=ceiling(leukemia$TP/30)
leukemia$X7=ceiling(leukemia$X7/30)
N=137
events=unique(sort(c(leukemia$TD, leukemia$TB)))
K=length(events)
T=events[K]
## the following covariates appear to be important
## G, TB, R, TA, A, TC, C, TP, P, X2, X8
pick=c(1, 3, 5, 7, 8, 9, 10, 11, 12, 14, 20)
x.train3=as.matrix(leukemia[ , -c(2, 4, 6)])
P=ncol(x.train3)
L=32
x.test3=matrix(nrow=L*N, ncol=P)
dimnames(x.test3)=dimnames(x.train3)
k=1
for(R in 0:1)
for(A in 0:1)
for(C in 0:1)
for(P in 0:1)
for(X2 in c(20, 40)) {
h=(k-1)*N+1:N
x.test3[h, ]=x.train3
x.test3[h, 'TB']=R*8+(1-R)*T
x.test3[h, 'R']=R
x.test3[h, 'TA']=A*1+(1-A)*T
x.test3[h, 'A']=A
x.test3[h, 'TC']=C*5+(1-C)*T
x.test3[h, 'C']=C
x.test3[h, 'TP']=P*1+(1-P)*T
x.test3[h, 'P']=P
x.test3[h, 'X2']=X2
k=k+1
}
post3=mc.surv.bart(x.train=x.train3, times=leukemia$TD, delta=leukemia$D,
events=events, ztimes=c(2, 4, 6, 8), zdelta=c(3, 5, 7, 9),
sparse=TRUE, mc.cores=8, seed=99)
state3=surv.pre.bart(leukemia$TD, leukemia$D, x.train3, x.test3,
events=events,
ztimes=c(2, 4, 6, 8), zdelta=c(3, 5, 7, 9))
## post3=mc.surv.bart(state3$tx.train, state3$y.train,
## x.test=state3$tx.train,
## sparse=TRUE,
## mc.cores=8, seed=99)
x.train2=x.train3[ , -(2:3)]
x.test2=x.test3[ , -(2:3)]
post2=mc.surv.bart(x.train=x.train2, times=leukemia$TB, delta=leukemia$R,
events=events, ztimes=c(2, 4, 6), zdelta=c(3, 5, 7),
sparse=TRUE, mc.cores=8, seed=99)
state2=surv.pre.bart(leukemia$TB, leukemia$R, x.train2, x.test2,
events=events, ztimes=c(2, 4, 6), zdelta=c(3, 5, 7))
## post2=mc.surv.bart(state2$tx.train, state2$y.train,
## x.test=state2$tx.train,
## sparse=TRUE,
## mc.cores=8, seed=99)
##pdf(file='leuk.pdf')
par(mfrow=c(2, 2))
m=0
for(l in 1:L) {
h=(l-1)*N*K+1:(N*K)
for(G in 1:5) {
if(G==1) {
state3$tx.test[h, 'G']=1
state3$tx.test[h, 'X8']=0
} else if(G %in% 2:3) {
state3$tx.test[h, 'G']=2
state3$tx.test[h, 'X8']=G-2
} else if(G %in% 4:5) {
state3$tx.test[h, 'G']=3
state3$tx.test[h, 'X8']=G-4
}
state2$tx.test[h, 'G']=state3$tx.test[h, 'G']
state2$tx.test[h, 'X8']=state3$tx.test[h, 'X8']
pred3=predict(post3, state3$tx.test[h, ], mc.cores=8)
pred2=predict(post2, state2$tx.test[h, ], mc.cores=8)
i=(l-1)*N+1
R=x.test3[i, 'R']
string=paste0(' R=', R, ' A=', x.test3[i, 'A'],
' C=', x.test3[i, 'C'], ' P=', x.test3[i, 'P'],
' X2=', x.test3[i, 'X2'])
state0.mean=double(K)
state1.mean=double(K)
for(j in 1:K) {
k=seq(j, N*K, by=K)
state0.mean[j]=mean(apply(pred3$surv.test[ , k], 1, mean))
state1.mean[j]=mean(apply(pred2$surv.test[ , k]*
pred3$surv.test[ , k], 1, mean))
## state2.mean[j]=mean(apply((1-pred2$surv.test[ , k])*
## pred3$surv.test[ , k], 1, mean))
}
if(R==1) state1.mean[8:K]=0
if(G==1)
plot(c(0, pred3$times), c(1, state0.mean), type='s', lwd=2, lty=G,
ylim=0:1, xlab='t (months)', ylab='P(t, state)', main=string)
else
lines(c(0, pred3$times), c(1, state0.mean), type='s', lwd=2, lty=G)
lines(c(0, pred3$times), c(1, state1.mean), lty=G,
type='s', lwd=2, col=2)
lines(c(0, pred3$times), c(0, state0.mean-state1.mean), lty=G,
type='s', lwd=2, col=4)
if((l%%4)==0) {
legend('topright', col=c(1, 2, 4), lty=1, lwd=2,
legend=c('alive', 'remission', 'relapsed'))
}
else if((l%%2)==0) {
legend('topright', lty=1:5, lwd=2,
legend=c('G=1 X8=0', 'G=2 X8=0', 'G=2 X8=1',
'G=3 X8=0', 'G=3 X8=1'))
}
m=m+1
if((m%%20)==0) dev.copy2pdf(file=paste0('leuk', m/20, '.pdf'))
}
}
##dev.off()
par(mfrow=c(1, 1))
| /scratch/gouwar.j/cran-all/cranData/BART/demo/leuk.R |
library(BART)
B <- getOption('mc.cores', 1)
figures = getOption('figures', default='NONE')
data(transplant)
pfit <- survfit(Surv(futime, event) ~ abo, transplant)
# competing risks for type O
plot(pfit[4,], xscale=7, xmax=735, col=1:3, lwd=2, ylim=c(0, 1),
xlab='t (weeks)', ylab='Aalen-Johansen (AJ) CI(t)')
legend(450, .4, c("Death", "Transplant", "Withdrawal"), col=1:3, lwd=2)
## plot(pfit[4,], xscale=30.5, xmax=735, col=1:3, lwd=2, ylim=c(0, 1),
## xlab='t (months)', ylab='Aalen-Johansen (AJ) CI(t)')
## legend(450, .4, c("Death", "Transplant", "Withdrawal"), col=1:3, lwd=2)
delta <- (as.numeric(transplant$event)-1)
## recode so that delta=1 is cause of interest; delta=2 otherwise
delta[delta==1] <- 4
delta[delta==2] <- 1
delta[delta>1] <- 2
table(delta, transplant$event)
times <- pmax(1, ceiling(transplant$futime/7)) ## weeks
##times <- pmax(1, ceiling(transplant$futime/30.5)) ## months
table(times)
typeO <- 1*(transplant$abo=='O')
typeA <- 1*(transplant$abo=='A')
typeB <- 1*(transplant$abo=='B')
typeAB <- 1*(transplant$abo=='AB')
table(typeA, typeO)
x.train <- cbind(typeO, typeA, typeB, typeAB)
x.test <- cbind(1, 0, 0, 0)
dimnames(x.test)[[2]] <- dimnames(x.train)[[2]]
## run one long MCMC chain in one process
## set.seed(99)
## post <- crisk.bart(x.train=x.train, times=times, delta=delta, x.test=x.test)
## in the interest of time, consider speeding it up by parallel processing
## run "mc.cores" number of shorter MCMC chains in parallel processes
post <- mc.crisk.bart(x.train=x.train, times=times, delta=delta,
x.test=x.test, seed=99, mc.cores=B)
K <- post$K
typeO.cif.mean <- apply(post$cif.test, 2, mean)
typeO.cif.025 <- apply(post$cif.test, 2, quantile, probs=0.025)
typeO.cif.975 <- apply(post$cif.test, 2, quantile, probs=0.975)
plot(pfit[4,], xscale=7, xmax=735, col=1:3, lwd=2, ylim=c(0, 0.8),
xlab='t (weeks)', ylab='CI(t)')
points(c(0, post$times)*7, c(0, typeO.cif.mean), col=4, type='s', lwd=2)
points(c(0, post$times)*7, c(0, typeO.cif.025), col=4, type='s', lwd=2, lty=2)
points(c(0, post$times)*7, c(0, typeO.cif.975), col=4, type='s', lwd=2, lty=2)
legend(450, .4, c("Transplant(BART)", "Transplant(AJ)",
"Death(AJ)", "Withdrawal(AJ)"),
col=c(4, 2, 1, 3), lwd=2)
if(figures!='NONE')
dev.copy2pdf(file=paste(figures, 'liver-BART.pdf', sep='/'))
## plot(pfit[4,], xscale=30.5, xmax=735, col=1:3, lwd=2, ylim=c(0, 0.8),
## xlab='t (months)', ylab='CI(t)')
## points(c(0, post$times)*30.5, c(0, typeO.cif.mean), col=4, type='s', lwd=2)
## points(c(0, post$times)*30.5, c(0, typeO.cif.025), col=4, type='s', lwd=2, lty=2)
## points(c(0, post$times)*30.5, c(0, typeO.cif.975), col=4, type='s', lwd=2, lty=2)
## legend(450, .4, c("Transplant(BART)", "Transplant(AJ)",
## "Death(AJ)", "Withdrawal(AJ)"),
## col=c(4, 2, 1, 3), lwd=2)
## check <- predict(post, newdata=post$tx.test, newdata2=post$tx.test2,
## mc.cores=B)
## print(c(post$surv.test.mean[1], check$surv.test.mean[1],
## post$surv.test.mean[1]-check$surv.test.mean[1]), digits=22)
## print(all(round(post$surv.test.mean, digits=9)==
## round(check$surv.test.mean, digits=9)))
## print(c(post$cif.test.mean[1], check$cif.test.mean[1],
## post$cif.test.mean[1]-check$cif.test.mean[1]), digits=22)
## print(all(round(post$cif.test.mean, digits=9)==
## round(check$cif.test.mean, digits=9)))
## print(c(post$cif.test2.mean[1], check$cif.test2.mean[1],
## post$cif.test2.mean[1]-check$cif.test2.mean[1]), digits=22)
## print(all(round(post$cif.test2.mean, digits=9)==
## round(check$cif.test2.mean, digits=9)))
## typeO.cif.mean <- apply(check$cif.test, 2, mean)
## typeO.cif.025 <- apply(check$cif.test, 2, quantile, probs=0.025)
## typeO.cif.975 <- apply(check$cif.test, 2, quantile, probs=0.975)
## plot(pfit[4,], xscale=7, xmax=735, col=1:3, lwd=2, ylim=c(0, 0.8),
## xlab='t (weeks)', ylab='CI(t)')
## points(c(0, post$times)*7, c(0, typeO.cif.mean), col=4, type='s', lwd=2)
## points(c(0, post$times)*7, c(0, typeO.cif.025), col=4, type='s', lwd=2, lty=2)
## points(c(0, post$times)*7, c(0, typeO.cif.975), col=4, type='s', lwd=2, lty=2)
## legend(450, .4, c("Transplant(BART)", "Transplant(AJ)",
## "Death(AJ)", "Withdrawal(AJ)"),
## col=c(4, 2, 1, 3), lwd=2)
## cor(post$cif.test.mean, check$cif.test.mean)
## plot(post$cif.test.mean, check$cif.test.mean)
## abline(a=0, b=1)
## cor(post$cif.test2.mean, check$cif.test2.mean)
## plot(post$cif.test2.mean, check$cif.test2.mean)
## abline(a=0, b=1)
| /scratch/gouwar.j/cran-all/cranData/BART/demo/liver.crisk.bart.R |
library(BART)
data(transplant)
pfit <- survfit(Surv(futime, event) ~ abo, transplant)
# competing risks for type O
plot(pfit[4,], xscale=7, xmax=735, col=1:3, lwd=2, ylim=c(0, 1),
xlab='t (weeks)', ylab='Aalen-Johansen (AJ) CI(t)')
legend(450, .4, c("Death", "Transplant", "Withdrawal"), col=1:3, lwd=2)
## plot(pfit[4,], xscale=30.5, xmax=735, col=1:3, lwd=2, ylim=c(0, 1),
## xlab='t (months)', ylab='Aalen-Johansen (AJ) CI(t)')
## legend(450, .4, c("Death", "Transplant", "Withdrawal"), col=1:3, lwd=2)
delta <- (as.numeric(transplant$event)-1)
## recode so that delta=1 is cause of interest; delta=2 otherwise
delta[delta==1] <- 4
delta[delta==2] <- 1
delta[delta>1] <- 2
table(delta, transplant$event)
times <- pmax(1, ceiling(transplant$futime/7)) ## weeks
##times <- pmax(1, ceiling(transplant$futime/30.5)) ## months
table(times)
typeO <- 1*(transplant$abo=='O')
typeA <- 1*(transplant$abo=='A')
typeB <- 1*(transplant$abo=='B')
typeAB <- 1*(transplant$abo=='AB')
table(typeA, typeO)
x.train <- cbind(typeO, typeA, typeB, typeAB)
x.test <- cbind(1, 0, 0, 0)
dimnames(x.test)[[2]] <- dimnames(x.train)[[2]]
## run one long MCMC chain in one process
## set.seed(99)
## post <- crisk2.bart(x.train=x.train, times=times, delta=delta, x.test=x.test)
## in the interest of time, consider speeding it up by parallel processing
## run "mc.cores" number of shorter MCMC chains in parallel processes
post <- mc.crisk2.bart(x.train=x.train, times=times, delta=delta,
x.test=x.test, seed=99, mc.cores=8)
K <- post$K
typeO.cif.mean <- apply(post$cif.test, 2, mean)
typeO.cif.025 <- apply(post$cif.test, 2, quantile, probs=0.025)
typeO.cif.975 <- apply(post$cif.test, 2, quantile, probs=0.975)
plot(pfit[4,], xscale=7, xmax=735, col=1:3, lwd=2, ylim=c(0, 0.8),
xlab='t (weeks)', ylab='CI(t)')
points(c(0, post$times)*7, c(0, typeO.cif.mean), col=4, type='s', lwd=2)
points(c(0, post$times)*7, c(0, typeO.cif.025), col=4, type='s', lwd=2, lty=2)
points(c(0, post$times)*7, c(0, typeO.cif.975), col=4, type='s', lwd=2, lty=2)
legend(450, .4, c("Transplant(BART)", "Transplant(AJ)",
"Death(AJ)", "Withdrawal(AJ)"),
col=c(4, 2, 1, 3), lwd=2)
##dev.copy2pdf(file='../vignettes/figures/liver-BART.pdf')
## plot(pfit[4,], xscale=30.5, xmax=735, col=1:3, lwd=2, ylim=c(0, 0.8),
## xlab='t (months)', ylab='CI(t)')
## points(c(0, post$times)*30.5, c(0, typeO.cif.mean), col=4, type='s', lwd=2)
## points(c(0, post$times)*30.5, c(0, typeO.cif.025), col=4, type='s', lwd=2, lty=2)
## points(c(0, post$times)*30.5, c(0, typeO.cif.975), col=4, type='s', lwd=2, lty=2)
## legend(450, .4, c("Transplant(BART)", "Transplant(AJ)",
## "Death(AJ)", "Withdrawal(AJ)"),
## col=c(4, 2, 1, 3), lwd=2)
check <- predict(post, newdata=post$tx.test, newdata2=post$tx.test2,
mc.cores=8)
print(c(post$surv.test.mean[1], check$surv.test.mean[1],
post$surv.test.mean[1]-check$surv.test.mean[1]), digits=22)
print(all(round(post$surv.test.mean, digits=9)==
round(check$surv.test.mean, digits=9)))
print(c(post$cif.test.mean[1], check$cif.test.mean[1],
post$cif.test.mean[1]-check$cif.test.mean[1]), digits=22)
print(all(round(post$cif.test.mean, digits=9)==
round(check$cif.test.mean, digits=9)))
print(c(post$cif.test2.mean[1], check$cif.test2.mean[1],
post$cif.test2.mean[1]-check$cif.test2.mean[1]), digits=22)
print(all(round(post$cif.test2.mean, digits=9)==
round(check$cif.test2.mean, digits=9)))
typeO.cif.mean <- apply(check$cif.test, 2, mean)
typeO.cif.025 <- apply(check$cif.test, 2, quantile, probs=0.025)
typeO.cif.975 <- apply(check$cif.test, 2, quantile, probs=0.975)
plot(pfit[4,], xscale=7, xmax=735, col=1:3, lwd=2, ylim=c(0, 0.8),
xlab='t (weeks)', ylab='CI(t)')
points(c(0, post$times)*7, c(0, typeO.cif.mean), col=4, type='s', lwd=2)
points(c(0, post$times)*7, c(0, typeO.cif.025), col=4, type='s', lwd=2, lty=2)
points(c(0, post$times)*7, c(0, typeO.cif.975), col=4, type='s', lwd=2, lty=2)
legend(450, .4, c("Transplant(BART)", "Transplant(AJ)",
"Death(AJ)", "Withdrawal(AJ)"),
col=c(4, 2, 1, 3), lwd=2)
cor(post$cif.test.mean, check$cif.test.mean)
plot(post$cif.test.mean, check$cif.test.mean)
abline(a=0, b=1)
cor(post$cif.test2.mean, check$cif.test2.mean)
plot(post$cif.test2.mean, check$cif.test2.mean)
abline(a=0, b=1)
| /scratch/gouwar.j/cran-all/cranData/BART/demo/liver.crisk2.bart.R |
library(BART)
data(transplant)
pfit <- survfit(Surv(futime, event) ~ abo, transplant)
# competing risks for type O
plot(pfit[4,], xscale=7, xmax=735, col=1:3, lwd=2, ylim=c(0, 1),
xlab='t (weeks)', ylab='Aalen-Johansen (AJ) CI(t)')
legend(450, .4, c("Death", "Transplant", "Withdrawal"), col=1:3, lwd=2)
## plot(pfit[4,], xscale=30.5, xmax=735, col=1:3, lwd=2, ylim=c(0, 1),
## xlab='t (months)', ylab='Aalen-Johansen (AJ) CI(t)')
## legend(450, .4, c("Death", "Transplant", "Withdrawal"), col=1:3, lwd=2)
delta <- (as.numeric(transplant$event)-1)
## recode so that delta=1 is cause of interest; delta=2 otherwise
delta[delta==1] <- 4
delta[delta==2] <- 1
delta[delta>1] <- 2
table(delta, transplant$event)
times <- pmax(1, transplant$futime/7) ## weeks
##times <- pmax(1, ceiling(transplant$futime/30.5)) ## months
typeO <- 1*(transplant$abo=='O')
typeA <- 1*(transplant$abo=='A')
typeB <- 1*(transplant$abo=='B')
typeAB <- 1*(transplant$abo=='AB')
table(typeA, typeO)
x.train <- cbind(typeO, typeA, typeB, typeAB)
x.test <- cbind(1, 0, 0, 0)
dimnames(x.test)[[2]] <- dimnames(x.train)[[2]]
## pre <- crisk.pre.bart(x.train=x.train, times=times, delta=delta,
## x.test=x.test, K=50)
## run one long MCMC chain in one process
## set.seed(99)
## post <- crisk.bart(x.train=x.train, times=times, delta=delta, x.test=x.test)
## in the interest of time, consider speeding it up by parallel processing
## run "mc.cores" number of shorter MCMC chains in parallel processes
post <- mc.crisk.bart(x.train=x.train, times=times, delta=delta, x.test=x.test,
K=50, seed=99, mc.cores=8)
K <- post$K
typeO.cif.mean <- apply(post$cif.test, 2, mean)
typeO.cif.025 <- apply(post$cif.test, 2, quantile, probs=0.025)
typeO.cif.975 <- apply(post$cif.test, 2, quantile, probs=0.975)
plot(pfit[4,], xscale=7, xmax=735, col=1:3, lwd=2, ylim=c(0, 0.8),
xlab='t (weeks)', ylab='CI(t)')
points(c(0, post$times)*7, c(0, typeO.cif.mean), col=4, type='s', lwd=2)
points(c(0, post$times)*7, c(0, typeO.cif.025), col=4, type='s', lwd=2, lty=2)
points(c(0, post$times)*7, c(0, typeO.cif.975), col=4, type='s', lwd=2, lty=2)
legend(450, .4, c("Transplant(BART)", "Transplant(AJ)",
"Death(AJ)", "Withdrawal(AJ)"),
col=c(4, 2, 1, 3), lwd=2)
## plot(pfit[4,], xscale=30.5, xmax=735, col=1:3, lwd=2, ylim=c(0, 0.8),
## xlab='t (months)', ylab='CI(t)')
## points(c(0, post$times)*30.5, c(0, typeO.cif.mean), col=4, type='s', lwd=2)
## points(c(0, post$times)*30.5, c(0, typeO.cif.025), col=4, type='s', lwd=2, lty=2)
## points(c(0, post$times)*30.5, c(0, typeO.cif.975), col=4, type='s', lwd=2, lty=2)
## legend(450, .4, c("Transplant(BART)", "Transplant(AJ)",
## "Death(AJ)", "Withdrawal(AJ)"),
## col=c(4, 2, 1, 3), lwd=2)
| /scratch/gouwar.j/cran-all/cranData/BART/demo/liver50.crisk.bart.R |
library(BART)
B <- getOption('mc.cores', 1)
figures = getOption('figures', default='NONE')
## load survival package for the advanced lung cancer example
data(lung)
N <- length(lung$status)
table(lung$ph.karno, lung$pat.karno)
## if physician's KPS unavailable, then use the patient's
h <- which(is.na(lung$ph.karno))
lung$ph.karno[h] <- lung$pat.karno[h]
times <- lung$time
delta <- lung$status-1 ##lung$status: 1=censored, 2=dead
##delta: 0=censored, 1=dead
## this study reports time in days rather than weeks or months
## coarsening from days to weeks or months will reduce the computational burden
##times <- ceiling(times/30)
times <- ceiling(times/7) ## weeks
##table(times)
table(delta)
## matrix of observed covariates
x.train <- cbind(lung$sex, lung$age, lung$ph.karno)
## lung$sex: Male=1 Female=2
## lung$age: Age in years
## lung$ph.karno: Karnofsky performance score (dead=0:normal=100:by=10)
## rated by physician
dimnames(x.train)[[2]] <- c('M(1):F(2)', 'age(39:82)', 'ph.karno(50:100:10)')
table(x.train[ , 1])
summary(x.train[ , 2])
table(x.train[ , 3])
## run one long MCMC chain in one process
## set.seed(99)
## post <- surv.bart(x.train=x.train, times=times, delta=delta, x.test=x.test)
## in the interest of time, consider speeding it up by parallel processing
## run "mc.cores" number of shorter MCMC chains in parallel processes
post <- mc.surv.bart(x.train=x.train, times=times, delta=delta,
mc.cores=B, seed=99)##(, K=50)
pre <- surv.pre.bart(times=times, delta=delta, x.train=x.train,
x.test=x.train)##(, K=50)
K <- pre$K
M <- post$ndpost
pre$tx.test <- rbind(pre$tx.test, pre$tx.test)
pre$tx.test[ , 2] <- c(rep(1, N*K), rep(2, N*K))
## sex pushed to col 2, since time is always in col 1
pred <- predict(post, newdata=pre$tx.test, mc.cores=B)
pd <- matrix(nrow=M, ncol=2*K)
for(j in 1:K) {
h <- seq(j, N*K, by=K)
pd[ , j] <- apply(pred$surv.test[ , h], 1, mean)
pd[ , j+K] <- apply(pred$surv.test[ , h+N*K], 1, mean)
}
pd.mu <- apply(pd, 2, mean)
pd.025 <- apply(pd, 2, quantile, probs=0.025)
pd.975 <- apply(pd, 2, quantile, probs=0.975)
males <- 1:K
females <- males+K
plot(c(0, pre$times), c(1, pd.mu[males]), type='s', col='blue',
ylim=0:1, ylab='S(t, x)', xlab='t (weeks)')
## main=paste('Advanced Lung Cancer ex. (BART::lung)',
## "Friedman's partial dependence function",
## 'Male (blue) vs. Female (red)', sep='\n'))
lines(c(0, pre$times), c(1, pd.025[males]), col='blue', type='s', lty=2)
lines(c(0, pre$times), c(1, pd.975[males]), col='blue', type='s', lty=2)
lines(c(0, pre$times), c(1, pd.mu[females]), col='red', type='s')
lines(c(0, pre$times), c(1, pd.025[females]), col='red', type='s', lty=2)
lines(c(0, pre$times), c(1, pd.975[females]), col='red', type='s', lty=2)
if(figures!='NONE')
dev.copy2pdf(file=paste(figures, 'lung.pdf', sep='/'))
| /scratch/gouwar.j/cran-all/cranData/BART/demo/lung.surv.bart.R |
library(BART)
B <- getOption('mc.cores', 1)
figures = getOption('figures', default='NONE')
## load survival package for the advanced lung cancer example
data(lung)
N <- length(lung$status)
table(lung$ph.karno, lung$pat.karno)
## if physician's KPS unavailable, then use the patient's
h <- which(is.na(lung$ph.karno))
lung$ph.karno[h] <- lung$pat.karno[h]
times <- lung$time
delta <- lung$status-1 ##lung$status: 1=censored, 2=dead
##delta: 0=censored, 1=dead
## this study reports time in days rather than weeks or months
## coarsening from days to weeks or months will reduce the computational burden
##times <- ceiling(times/30)
times <- ceiling(times/7) ## weeks
##table(times)
table(delta)
## matrix of observed covariates
x.train <- cbind(lung$sex, lung$age, lung$ph.karno)
## lung$sex: Male=1 Female=2
## lung$age: Age in years
## lung$ph.karno: Karnofsky performance score (dead=0:normal=100:by=10)
## rated by physician
dimnames(x.train)[[2]] <- c('M(1):F(2)', 'age(39:82)', 'ph.karno(50:100:10)')
table(x.train[ , 1])
summary(x.train[ , 2])
table(x.train[ , 3])
## run one long MCMC chain in one process
## set.seed(99)
## post <- surv.bart(x.train=x.train, times=times, delta=delta, x.test=x.test)
## in the interest of time, consider speeding it up by parallel processing
## run "mc.cores" number of shorter MCMC chains in parallel processes
post <- mc.surv.bart(x.train=x.train, times=times, delta=delta,
mc.cores=B, seed=99, K=100)
pre <- surv.pre.bart(times=times, delta=delta, x.train=x.train,
x.test=x.train, K=100)
K <- pre$K
M <- post$ndpost
NK <- N*K
pre$tx.test <- rbind(pre$tx.test, pre$tx.test)
pre$tx.test[ , 2] <- c(rep(1, N*K), rep(2, N*K))
## sex pushed to col 2, since time is always in col 1
pred <- predict(post, newdata=pre$tx.test, mc.cores=B)
for(i in seq(1, N, by=5)) {
##for(i in 1:N) {
h=(i-1)*K+1:K
if(i==1)
plot(c(0, pre$times), c(1, pred$surv.test.mean[h]),
type='s', col=4, lty=2,
ylim=0:1, ylab='S(t, x)', xlab='t (weeks)',)
else lines(c(0, pre$times), c(1, pred$surv.test.mean[h]),
type='s', col=4, lty=2)
lines(c(0, pre$times), c(1, pred$surv.test.mean[h+NK]),
type='s', col=2, lty=3)
}
if(figures!='NONE')
dev.copy2pdf(file=paste(figures, 'lung-ice.pdf', sep='/'))
| /scratch/gouwar.j/cran-all/cranData/BART/demo/lung.surv.ice.R |
library(BART)
## load survival package for the advanced lung cancer example
data(lung)
N <- length(lung$status)
table(lung$ph.karno, lung$pat.karno)
## if physician's KPS unavailable, then use the patient's
h <- which(is.na(lung$ph.karno))
lung$ph.karno[h] <- lung$pat.karno[h]
times <- lung$time
delta <- lung$status-1 ##lung$status: 1=censored, 2=dead
##delta: 0=censored, 1=dead
## this study reports time in days rather than weeks or months
## coarsening from days to weeks or months will reduce the computational burden
##times <- ceiling(times/30)
##times <- ceiling(times/7) ## weeks
##table(times)
table(delta)
## matrix of observed covariates
x.train <- cbind(lung$sex, lung$age, lung$ph.karno)
## lung$sex: Male=1 Female=2
## lung$age: Age in years
## lung$ph.karno: Karnofsky performance score (dead=0:normal=100:by=10)
## rated by physician
dimnames(x.train)[[2]] <- c('M(1):F(2)', 'age(39:82)', 'ph.karno(50:100:10)')
table(x.train[ , 1])
summary(x.train[ , 2])
table(x.train[ , 3])
## run one long MCMC chain in one process
## set.seed(99)
## post <- surv.bart(x.train=x.train, times=times, delta=delta, x.test=x.test)
## in the interest of time, consider speeding it up by parallel processing
## run "mc.cores" number of shorter MCMC chains in parallel processes
post <- mc.surv.bart(x.train=x.train, times=times, delta=delta,
K=50, type='lbart', mc.cores=8, seed=99)
pre <- surv.pre.bart(times=times, delta=delta, x.train=x.train,
x.test=x.train, K=50)
K <- pre$K
M <- nrow(post$yhat.train)
pre$tx.test <- rbind(pre$tx.test, pre$tx.test)
pre$tx.test[ , 2] <- c(rep(1, N*K), rep(2, N*K))
## sex pushed to col 2, since time is always in col 1
pred <- predict(post, newdata=pre$tx.test, mc.cores=8)
pd <- matrix(nrow=M, ncol=2*K)
for(j in 1:K) {
h <- seq(j, N*K, by=K)
pd[ , j] <- apply(pred$surv.test[ , h], 1, mean)
pd[ , j+K] <- apply(pred$surv.test[ , h+N*K], 1, mean)
}
pd.mu <- apply(pd, 2, mean)
pd.025 <- apply(pd, 2, quantile, probs=0.025)
pd.975 <- apply(pd, 2, quantile, probs=0.975)
males <- 1:K
females <- males+K
par(mfrow=c(2, 1))
plot(c(0, pre$times), c(1, pd.mu[males]), type='s', col='blue',
ylim=0:1, ylab='S(t, x)', xlab='t (days)', ##xlab='t (weeks)',
main=paste('Advanced Lung Cancer ex. (BART::lung)',
"Friedman's partial dependence function",
'Top: Logistic BART, Bottom: Probit BART',
sep='\n'))
lines(c(0, pre$times), c(1, pd.025[males]), col='blue', type='s', lty=2)
lines(c(0, pre$times), c(1, pd.975[males]), col='blue', type='s', lty=2)
lines(c(0, pre$times), c(1, pd.mu[females]), col='red', type='s')
lines(c(0, pre$times), c(1, pd.025[females]), col='red', type='s', lty=2)
lines(c(0, pre$times), c(1, pd.975[females]), col='red', type='s', lty=2)
source(system.file('demo/lung.surv.bart.R', package='BART'))
par(mfrow=c(1, 1))
| /scratch/gouwar.j/cran-all/cranData/BART/demo/lung.surv.lbart.R |
library(BART)
set.seed(12)
N=50
P=3
x.train=matrix(runif(N*P, -1, 1), nrow=N, ncol=P)
y=x.train[ , 1]^3
x.miss=matrix(1*(runif(N*P)<0.05), nrow=N, ncol=P)
x.train=x.train*(1-x.miss)
x.train[x.train==0]=NA
post=gbart(x.train, y, x.train)
summary(post$yhat.train.mean)
summary(post$yhat.test.mean)
plot(post$yhat.train.mean, post$yhat.test.mean)
| /scratch/gouwar.j/cran-all/cranData/BART/demo/missing.gbart.R |
library(BART)
B <- getOption('mc.cores', 1)
figures = getOption('figures', default='NONE')
data(arq)
str(arq)
arth <- as.matrix(arq)
N <- length(arth[ , 'riagendr'])
table(arth[ , 'riagendr'])
summary(arth[ , 'bmxbmi'])
post1 <- mc.pbart(x.train=arth[ , 5:10], y.train=arth[ , 4],
mc.cores=B, seed=99)
post2 <- mc.pbart(x.train=arth[ , 5:10], y.train=arth[ , 3],
mc.cores=B, seed=99)
bmxbmi <- seq(15, 45, by=5)
H <- length(bmxbmi)
for(i in 1:2)
for(j in 1:H) {
x. <- arth[ , 5:10]
x.[ , 'riagendr'] <- i
x.[ , 'bmxbmi'] <- bmxbmi[j]
if(i==1 && j==1) x.test <- x.
else x.test <- rbind(x.test, x.)
}
table(x.test[ , 'riagendr'])
table(x.test[ , 'bmxbmi'])
pred1 <- predict(post1, newdata=x.test, mc.cores=B)
pred2 <- predict(post2, newdata=x.test, mc.cores=B)
M <- nrow(pred1$prob.test)
##Friedman's partial dependence function
pd1 <- matrix(nrow=M, ncol=H)
pd2 <- matrix(nrow=M, ncol=H)
par(mfrow=c(1, 2))
for(j in 1:H) {
h <- (j-1)*N
pd1[ , j] <- apply(pred1$prob.test[ , h+1:N], 1, mean)
h <- h+N*H
pd2[ , j] <- apply(pred1$prob.test[ , h+1:N], 1, mean)
}
pd1.mean <- apply(pd1, 2, mean)
pd2.mean <- apply(pd2, 2, mean)
pd1.025 <- apply(pd1, 2, quantile, probs=0.025)
pd2.025 <- apply(pd2, 2, quantile, probs=0.025)
pd1.975 <- apply(pd1, 2, quantile, probs=0.975)
pd2.975 <- apply(pd2, 2, quantile, probs=0.975)
plot(bmxbmi, pd1.mean, type='l', col='blue',
ylim=0:1, xlab='BMI', ylab=expression(p(x)),
sub='Low-back pain: M(blue) vs. F(red)')
##sub='Low-back/buttock pain: M(blue) vs. F(red)')
lines(bmxbmi, pd1.025, type='l', col='blue', lty=2)
lines(bmxbmi, pd1.975, type='l', col='blue', lty=2)
lines(bmxbmi, pd2.mean, type='l', col='red')
lines(bmxbmi, pd2.025, type='l', col='red', lty=2)
lines(bmxbmi, pd2.975, type='l', col='red', lty=2)
lines(bmxbmi, rep(0, H), type='l')
lines(bmxbmi, rep(1, H), type='l')
for(j in 1:H) {
h <- (j-1)*N
pd1[ , j] <- apply(pred2$prob.test[ , h+1:N], 1, mean)
h <- h+N*H
pd2[ , j] <- apply(pred2$prob.test[ , h+1:N], 1, mean)
}
pd1.mean <- apply(pd1, 2, mean)
pd2.mean <- apply(pd2, 2, mean)
pd1.025 <- apply(pd1, 2, quantile, probs=0.025)
pd2.025 <- apply(pd2, 2, quantile, probs=0.025)
pd1.975 <- apply(pd1, 2, quantile, probs=0.975)
pd2.975 <- apply(pd2, 2, quantile, probs=0.975)
plot(bmxbmi, pd1.mean, type='l', col='blue',
ylim=0:1, xlab='BMI', ylab=expression(p(x)),
sub='Neck pain: M(blue) vs. F(red)')
lines(bmxbmi, pd1.025, type='l', col='blue', lty=2)
lines(bmxbmi, pd1.975, type='l', col='blue', lty=2)
lines(bmxbmi, pd2.mean, type='l', col='red')
lines(bmxbmi, pd2.025, type='l', col='red', lty=2)
lines(bmxbmi, pd2.975, type='l', col='red', lty=2)
lines(bmxbmi, rep(0, H), type='l')
lines(bmxbmi, rep(1, H), type='l')
par(mfrow=c(1, 1))
if(figures!='NONE')
dev.copy2pdf(file=paste(figures, 'chronic-pain1.pdf', sep='/'))
##dev.copy2pdf(file='../vignettes/figures/chronic-pain1.pdf')
| /scratch/gouwar.j/cran-all/cranData/BART/demo/nhanes.pbart1.R |
library(BART)
B <- getOption('mc.cores', 1)
figures = getOption('figures', default='NONE')
data(arq)
str(arq)
arth <- as.matrix(arq)
N <- length(arth[ , 'riagendr'])
table(arth[ , 'riagendr'])
summary(arth[ , 'bmxbmi'])
post1 <- mc.pbart(x.train=arth[ , 5:10], y.train=arth[ , 4],
mc.cores=B, seed=99)
post2 <- mc.pbart(x.train=arth[ , 5:10], y.train=arth[ , 3],
mc.cores=B, seed=99)
bmxbmi <- seq(15, 45, by=5)
H <- length(bmxbmi)
for(i in 1:2)
for(j in 1:H) {
x. <- arth[ , 5:10]
x.[ , 'riagendr'] <- i
x.[ , 'bmxbmi'] <- bmxbmi[j]
if(i==1 && j==1) x.test <- x.
else x.test <- rbind(x.test, x.)
}
table(x.test[ , 'riagendr'])
table(x.test[ , 'bmxbmi'])
pred1 <- predict(post1, newdata=x.test, mc.cores=B)
pred2 <- predict(post2, newdata=x.test, mc.cores=B)
M <- nrow(pred1$prob.test)
##Friedman's partial dependence function
pd1 <- matrix(nrow=M, ncol=H)
pd2 <- matrix(nrow=M, ncol=H)
k <- (H+2)*N ## baseline: 25 BMI for women
##k <- 2*N ## baseline: 25 BMI for men
for(j in 1:H) {
h <- (H+j-1)*N ## women
##h <- (j-1)*N ## men
pd1[ , j] <- apply(pred1$prob.test[ , h+1:N]-
pred1$prob.test[ , k+1:N], 1, mean)
pd2[ , j] <- apply(pred2$prob.test[ , h+1:N]-
pred2$prob.test[ , k+1:N], 1, mean)
}
pd1.mean <- apply(pd1, 2, mean)
pd2.mean <- apply(pd2, 2, mean)
pd1.025 <- apply(pd1, 2, quantile, probs=0.025)
pd2.025 <- apply(pd2, 2, quantile, probs=0.025)
pd1.975 <- apply(pd1, 2, quantile, probs=0.975)
pd2.975 <- apply(pd2, 2, quantile, probs=0.975)
par(mfrow=c(1, 2))
plot(bmxbmi, pd1.mean, type='l', col='blue',
ylim=c(-0.2, 0.2),
## ylim=c(min(pd1.025, pd2.025, -pd1.975, -pd2.975),
## max(-pd1.025, -pd2.025, pd1.975, pd2.975)),
xlab='BMI', ylab=expression(p(x)-p(25)),
sub='Chronic pain: low-back(blue)')
##sub='Chronic pain: low-back/buttock(blue)')
lines(bmxbmi, pd1.025, type='l', col='blue', lty=2)
lines(bmxbmi, pd1.975, type='l', col='blue', lty=2)
lines(bmxbmi, rep(0, H))
plot(bmxbmi, pd2.mean, type='l', col='red',
ylim=c(-0.2, 0.2),
## ylim=c(min(pd1.025, pd2.025, -pd1.975, -pd2.975),
## max(-pd1.025, -pd2.025, pd1.975, pd2.975)),
xlab='BMI', ylab=expression(p(x)-p(25)),
sub='Chronic pain: neck(red)')
lines(bmxbmi, pd2.025, type='l', col='red', lty=2)
lines(bmxbmi, pd2.975, type='l', col='red', lty=2)
lines(bmxbmi, rep(0, H))
par(mfrow=c(1, 1))
if(figures!='NONE')
dev.copy2pdf(file=paste(figures, 'chronic-pain2.pdf', sep='/'))
##dev.copy2pdf(file='../vignettes/figures/chronic-pain2.pdf')
| /scratch/gouwar.j/cran-all/cranData/BART/demo/nhanes.pbart2.R |
library(BART)
library(MASS)
##options(figures='../vignettes/figures')
B <- getOption('mc.cores', 1)
figures = getOption('figures', default='NONE')
y = Boston$medv # median value
x.train = as.matrix(cbind(Boston[ , -c(5, 14)], Boston[ , 5]))
dimnames(x.train)[[2]][13] = 'nox'
N=length(y) ## total sample size
post = mc.gbart(x.train, y, mc.cores=B, seed=99)
L=41
x=seq(min(x.train[ , 13]), max(x.train[ , 13]), length.out=L)
x.test = cbind(x.train[ , -13], x[1])
names(x.test)[13]='nox'
for(j in 2:L)
x.test = rbind(x.test, cbind(x.train[ , -13], x[j]))
pred = predict(post, x.test, mc.cores=B)
partial = matrix(nrow=1000, ncol=L)
for(j in 1:L) {
h=(j-1)*N+1:N
partial[ , j] = apply(pred[ , h], 1, mean)
}
plot(x, apply(partial, 2, mean), type='l', lwd=2,
##xlab='nox', ylab='mdev',
xlab='nox: Nitrogen Oxides air pollution',
ylab='mdev: median home value (in thousands)',
ylim=c(0, 50))
lines(x, apply(partial, 2, quantile, probs=0.025), lty=2, lwd=2)
lines(x, apply(partial, 2, quantile, probs=0.975), lty=2, lwd=2)
abline(h=c(0, 50), col='gray')
## uncomment for an ICE plot
## for(i in seq(1, N, by=10))
## lines(x, apply(pred[ , seq(i, N*L, by=N)], 2, mean), type='l',
## lty=3, col='green')
## model similar to that presented in Harrison and Rubinfeld (1978)
fit=lm(log(y)~I(rm^2)+age+I(log(dis))+I(log(rad))+tax+ptratio+
I((black-0.63)^2)+I(log(lstat))+crim+zn+indus+chas+
I((nox-0.55)^2), data=Boston)
summary(fit)
lines(x, mean(y)*exp((x-0.55)^2*fit$coefficients[14]), lty=3, col='red', lwd=2)
if(figures!='NONE')
dev.copy2pdf(file=paste(figures, 'nox.pdf', sep='/'))
if(figures!='NONE')
dev.copy2eps(file=paste(figures, 'nox.eps', sep='/'))
| /scratch/gouwar.j/cran-all/cranData/BART/demo/nox.R |
library(BART)
## simulate recurrent events data set with Exponential nonproportional intensity
N <- 250
K <- 60
NK <- N*K
C <- 8
set.seed(-1)
x <- matrix(nrow=NK, ncol=23)
dimnames(x)[[2]] <- c('t', 'v', 'N', paste0('x', 1:20))
b <- c(1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1.5, 0, 0, 0, 0, 0, 0, 0, 0, 0)
N. <- double(NK)
y <- integer(NK)
cum <- double(NK)
k <- 1
for(i in 1:N) {
v <- 0
x[k, 4:13] <- runif(10)
x[k, 14:23] <- rbinom(10, 1, 0.5)
for(j in 1:K) {
x[k, 1:3] <- c(j, j-v, N.[k])
if(j>1) x[k, 4:23] <- x[k-1, 4:23]
alpha <- 0.0001*exp(sum(b*x[k, 4:23])*2*(N.[k]+1)/sqrt(j))
cum[k] <- pexp(30, alpha)
y[k] <- rbinom(1, 1, cum[k])
if(y[k]==1) v <- j
if(j>1) cum[k] <- cum[k-1]+cum[k]
if(j<K) N.[k+1] <- N.[k]+y[k]
k <- k+1
}
}
table(x[K*(1:N), 3])
table(x[K*(1:N), 3])/N
post <- mc.recur.bart(x.train=x, y.train=y, x.test=x,
nskip=1000, keepevery=100,
sparse=TRUE, mc.cores=C, seed=99)
print(cor(cum, post$cum.test.mean)^2)
par(mfrow=c(1, 2))
for(i in 1:N) {
j <- (i-1)*K+1:K
if(i==1) plot(1:K, cum[j], type='l',
xlab='t', ylab=expression(Lambda(t, x)),
sub='Known Values', ylim=c(0, 20))
else lines(1:K, cum[j], col=i)
}
for(i in 1:N) {
j <- (i-1)*K+1:K
if(i==1) plot(1:K, post$cum.test.mean[j], type='l',
xlab='t', ylab=expression(Lambda(t, x)),
sub='Estimates', ylim=c(0, 20))
else lines(1:K, post$cum.test.mean[j], col=i)
}
par(mfrow=c(1, 1))
dev.copy2pdf(file='np-recur-bart.pdf')
| /scratch/gouwar.j/cran-all/cranData/BART/demo/np.recur.bart.R |
library(BART)
## replication script for JSS article
## directory to create graphics files in, if any
## if option not specified, then default assumed to be 'NONE'
options(figures='.')
## options(figures='../vignettes/figures')
## for single-threading, specify one core
## Windows lacks forking (and generally lacks OpenMP)
## so single-threading only
## for multi-threading, specify the number of cores
## a Unix-like OS provides forking for multi-threading
## (and often OpenMP is available as well)
if(.Platform$OS.type=='unix') {
## there are diminishing returns so often 8 cores is sufficient
options(mc.cores=min(8, parallel::detectCores()))
} else {
options(mc.cores=1)
}
## uncomment these options to compare multiple threading
## with a single thread as a double-check of the package
## due to random seed/stream progression the results will
## NOT be identical, but they should be very comparable
## options(mc.cores=1, figures='../vignettes/single')
## Section 3, The Boston Housing Data including Figures 1-6
source(system.file('demo/boston.R', package='BART'))
## Section 4.2, Probit BART Example: Chronic Pain and Obesity
## Figure 7
source(system.file('demo/nhanes.pbart1.R', package='BART'))
## Figure 8
source(system.file('demo/nhanes.pbart2.R', package='BART'))
## Section 4.4, Multinomial BART Example: Alligator Food Preference
## Figure 9
source(system.file('demo/alligator.R', package='BART'))
## Section 4.5, Convergence Diagnostics for Binary and Categorical Outcomes
## Figures 10-12
source(system.file('demo/geweke.pbart2.R', package='BART'))
## Section 4.6, BART and Variable Selection
## Figure 13
source(system.file('demo/sparse.pbart.R', package='BART'))
## Section 5.1, Survival Analysis with BART Example: Advanced Lung Cancer
## Figure 14
source(system.file('demo/lung.surv.bart.R', package='BART'))
## Section 5.3, Competing Risks with BART Example: Liver Transplants
## Figure 15
source(system.file('demo/liver.crisk.bart.R', package='BART'))
## Section 5.4, Recurrent Events with BART Example: Bladder Tumors}
## Figures 16-18
source(system.file('demo/bladder.recur.bart.R', package='BART'))
figures = getOption('figures', default='NONE')
## Figure 19
library(Rcpp)
dbetapr = function (x, shape1, shape2, scale = 1, log = FALSE)
cpp_dbetapr(x, shape1, shape2, scale, log[1L])
sourceCpp(code='
#include <Rcpp.h>
#define GETV(x, i) x[i % x.length()] // wrapped indexing of vector
// [[Rcpp::plugins(cpp11)]]
using std::pow;
using std::sqrt;
using std::abs;
using std::exp;
using std::log;
using std::floor;
using std::ceil;
using Rcpp::NumericVector;
using std::log1p;
/*
* Beta prime distribution
* Values:
* x > 0
* Parameters:
* alpha > 0
* beta > 0
* sigma > 0
*/
inline double logpdf_betapr(double x, double alpha, double beta,
double sigma, bool& throw_warning) {
#ifdef IEEE_754
if (ISNAN(x) || ISNAN(alpha) || ISNAN(beta) || ISNAN(sigma))
return x+alpha+beta+sigma;
#endif
if (alpha <= 0.0 || beta <= 0.0 || sigma <= 0.0) {
throw_warning = true;
return NAN;
}
if (x <= 0.0 || !R_FINITE(x))
return R_NegInf;
double z = x / sigma;
// pow(z, alpha-1.0) * pow(z+1.0, -alpha-beta) / R::beta(alpha, beta) / sigma;
return log(z) * (alpha-1.0) + log1p(z) * (-alpha-beta) -
R::lbeta(alpha, beta) - log(sigma);
}
// [[Rcpp::export]]
NumericVector cpp_dbetapr(
const NumericVector& x,
const NumericVector& alpha,
const NumericVector& beta,
const NumericVector& sigma,
const bool& log_prob = false
) {
if (std::min({x.length(), alpha.length(),
beta.length(), sigma.length()}) < 1) {
return NumericVector(0);
}
int Nmax = std::max({
x.length(),
alpha.length(),
beta.length(),
sigma.length()
});
NumericVector p(Nmax);
bool throw_warning = false;
for (int i = 0; i < Nmax; i++)
p[i] = logpdf_betapr(GETV(x, i), GETV(alpha, i),
GETV(beta, i), GETV(sigma, i),
throw_warning);
if (!log_prob)
p = Rcpp::exp(p);
if (throw_warning)
Rcpp::warning("NaNs produced");
return p;
}
')
x=seq(0, 5, length.out=1001)
plot(x, dbetapr(x, 0.5, 1, 0.5), col='gray', type='l', lty=4, lwd=2,
log='y', xlab=expression(italic(x)),
ylab=expression(italic(log(f(x, a, b, rho/P)))))
lines(x, dbetapr(x, 0.5, 1, 1), col='blue', lty=3, lwd=2)
lines(x, dbetapr(x, 1, 1, 0.5), col='red', lty=2, lwd=2)
lines(x, dbetapr(x, 1, 1, 1), lwd=2)
legend('topright', col=c('black', 'red', 'blue', 'gray'), lwd=2, lty=1:4,
legend=c(expression(italic(log(f(x, 1, 1, 1)))),
expression(italic(log(f(x, 1, 1, 0.5)))),
expression(italic(log(f(x, 0.5, 1, 1)))),
expression(italic(log(f(x, 0.5, 1, 0.5))))))
if(figures!='NONE')
dev.copy2pdf(file=paste(figures, 'sparse-beta-prime.pdf', sep='/'))
##Figure 20
plot(0, 1, type='h', lwd=3,
xlim=c(-0.5, 5.5), ylim=c(0, 1),
ylab='Proportionate length of chain processing time', xlab='Chains')
for(i in 1:5) lines(i, 0.28, type='h', lwd=3, col=2)
lines(c(-0.25, 5.25), c(0.1, 0.1), type='l', lty=2)
text(-0.35, 0.1, labels='b')
if(figures!='NONE')
dev.copy2pdf(file=paste(figures, 'parallel.pdf', sep='/'))
##Figure 21
C <- 1:64
for(b in c(0.025, 0.1)) {
amdahl <- 1/(b+(1-b)/C)
if(b==0.025)
plot(C, amdahl, type='l', col=2,
xlim=c(1, 80), ylim=c(0, 30), log='x',
xlab='B: number of CPU', ylab='Gain')
else
lines(C, amdahl, type='l')
text(78, amdahl[64], labels=paste0(b))
}
if(figures!='NONE')
dev.copy2pdf(file=paste(figures, 'amdahl.pdf', sep='/'))
| /scratch/gouwar.j/cran-all/cranData/BART/demo/replication.R |
library(BART)
N = 1000
P = 5 #number of covariates
C = 8
set.seed(12)
x.train=matrix(runif(N*P, -2, 2), N, P)
Ey.train = x.train[ , 1]^3
y.train=rnorm(N, Ey.train, 3)
##run BART with C cores in parallel
post.est = mc.gbart(x.train, y.train, mc.cores=C, seed=99,
sigest=3)
post.fix = mc.gbart(x.train, y.train, mc.cores=C, seed=99,
lambda=0, sigest=3)
plot(post.est$yhat.train.mean, post.fix$yhat.train.mean)
print(cor(post.est$yhat.train.mean, post.fix$yhat.train.mean))
| /scratch/gouwar.j/cran-all/cranData/BART/demo/sigma.known.gbart.R |
library(BART)
##simulate from Friedman's five-dimensional test function
##Friedman JH. Multivariate adaptive regression splines
##(with discussion and a rejoinder by the author).
##Annals of Statistics 1991; 19:1-67.
f = function(x) #only the first 5 matter
sin(pi*x[ , 1]*x[ , 2]) + 2*(x[ , 3]-.5)^2+x[ , 4]+0.5*x[ , 5]-1.5
sigma = 1.0 #y = f(x) + sigma*z where z~N(0, 1)
P = 100 #number of covariates
C = 8
thin <- c(10, 50, 250)
par(mfrow=c(3, 1))
post <- as.list(1:3)
for(i in 1:3) {
N <- 10^(1+i)
set.seed(12)
x.train=matrix(runif(N*P), N, P)
Ey.train = f(x.train)
y.train=((Ey.train+sigma*rnorm(N))>0)*1
post[[i]] = mc.lbart(x.train, y.train, mc.cores=C,
keepevery=1, sparse=TRUE, seed=99)
plot(post[[i]]$varprob.mean, col=c(rep(2, 5), rep(1, P-5)),
main=paste0('N:', N, ', P:', P, ', thin:', thin[i]),
ylab='Selection Probability', ylim=c(0, 0.2),
pch=1+45*(post[[i]]$varprob.mean <= 1/P))
lines(c(0, 100), c(1/P, 1/P))
table(1+45*(post[[i]]$varprob.mean <= 1/P))
}
par(mfrow=c(1, 1))
##dev.copy2pdf(file='sparse-lbart.pdf')
| /scratch/gouwar.j/cran-all/cranData/BART/demo/sparse.lbart.R |
library(BART)
B <- getOption('mc.cores', 1)
figures = getOption('figures', default='NONE')
##simulate from Friedman's five-dimensional test function
##Friedman JH. Multivariate adaptive regression splines
##(with discussion and a rejoinder by the author).
##Annals of Statistics 1991; 19:1-67.
f = function(x) #only the first 5 matter
sin(pi*x[ , 1]*x[ , 2]) + 2*(x[ , 3]-.5)^2+x[ , 4]+0.5*x[ , 5]-1.5
sigma = 1.0 #y = f(x) + sigma*z where z~N(0, 1)
P = 100 #number of covariates
thin <- c(10, 10, 10)
n <- c(200, 1000, 5000)
post <- as.list(1:3)
for(i in 1:3) {
N <- n[i]
set.seed(12)
x.train=matrix(runif(N*P), N, P)
Ey.train = f(x.train)
y.train=((Ey.train+sigma*rnorm(N))>0)*1
post[[i]] = mc.pbart(x.train, y.train, mc.cores=B,
keepevery=thin[i], sparse=TRUE, seed=99)
}
par(mfrow=c(3, 1))
for(i in 1:3) {
N <- n[i]
plot(post[[i]]$varprob.mean, col=c(rep(2, 5), rep(1, P-5)),
main=paste0('N:', N, ', P:', P, ', thin:', thin[i]),
ylab='Selection Probability', ylim=c(0, 0.3),
pch=1+45*(post[[i]]$varprob.mean <= 1/P))
lines(c(0, 100), c(1/P, 1/P))
table(1+45*(post[[i]]$varprob.mean <= 1/P))
}
par(mfrow=c(1, 1))
if(figures!='NONE')
dev.copy2pdf(file=paste(figures, 'sparse-pbart.pdf', sep='/'))
| /scratch/gouwar.j/cran-all/cranData/BART/demo/sparse.pbart.R |
library(BART)
##simulate from Friedman's five-dimensional test function
##Friedman JH. Multivariate adaptive regression splines
##(with discussion and a rejoinder by the author).
##Annals of Statistics 1991; 19:1-67.
f = function(x) #only the first 5 matter
sin(pi*x[ , 1]*x[ , 2]) + 2*(x[ , 3]-.5)^2+x[ , 4]+0.5*x[ , 5]-1.5
sigma = 1.0 #y = f(x) + sigma*z where z~N(0, 1)
k = 100 #number of covariates
ndpost = 1000
nskip = 100
C = 8
par(mfrow=c(3, 2))
post <- as.list(1:6)
for(i in 1:3) {
n <- 10^(1+i)
set.seed(12)
x.train=matrix(runif(n*k), n, k)
Ey.train = f(x.train)
y.train=Ey.train+sigma*rnorm(n)
for(j in c(TRUE, FALSE)) {
h <- (i-1)*2+j+1
post[[h]] = mc.wbart(x.train, y.train, mc.cores=C, sparse=TRUE,
augment=j, seed=99, ndpost=ndpost, nskip=nskip)
plot(post[[h]]$varprob.mean, col=c(rep(2, 5), rep(1, k-5)),
main=paste0('N:', n, ', P:', k, ', Assumption:', c(2.2, 2.1)[j+1]),
##sub=expression(-1.5+sin(pi*x[1]*x[2]) + 2*(x[3]-.5)^2+x[4]+0.5*x[5]),
ylab='Selection Probability', ylim=0:1)
lines(c(0, 100), c(1/k, 1/k))
}
}
par(mfrow=c(1, 1))
##dev.copy2pdf(file='sparse.wbart.pdf')
## check1=pwbart(x.train, post[[6]]$treedraws, post[[6]]$mu)
## plot(apply(check1, 2, mean), post[[6]]$yhat.train.mean)
## check2=pwbart(x.train, post[[6]]$treedraws, post[[6]]$mu, dodraws=FALSE)
## plot(check2, post[[6]]$yhat.train.mean)
## check1=mc.pwbart(x.train, post[[6]]$treedraws, post[[6]]$mu, mc.cores=C)
## plot(apply(check1, 2, mean), post[[6]]$yhat.train.mean)
## check2=mc.pwbart(x.train, post[[6]]$treedraws, post[[6]]$mu, mc.cores=C,
## dodraws=FALSE)
## plot(check2, post[[6]]$yhat.train.mean)
| /scratch/gouwar.j/cran-all/cranData/BART/demo/sparse.wbart.R |
library(BART)
T <- 1
MU <- 0
set.seed(12)
lambda <- draw_lambda_i(1, MU)
rtnorm(1, MU, sqrt(lambda), T)
##rtnorm(MU, T, sqrt(lambda))
set.seed(12)
N <- 10000
lambda <- draw_lambda_i(1, MU)
y <- rtnorm(N, MU, sqrt(lambda), T)
##y <- rtnorm(MU, T, sqrt(lambda))
for(i in 2:N) {
lambda[i] <- draw_lambda_i(lambda[i-1], MU)
##y[i] <- rtnorm(MU, T, sqrt(lambda[i]))
}
x <- seq(T, T+2, length.out=1000)
plot(x, dlogis(x, MU, 1)/plogis(T, MU, 1, lower.tail=FALSE),
lty=2, type='l',
ylab=expression(Logistic(MU, 1)))
lines(density(y))
abline(v=T)
##dev.copy2pdf(file='test.draw_lambda_i.pdf')
| /scratch/gouwar.j/cran-all/cranData/BART/demo/test.draw_lambda_i.R |
library(BART)
N <- 1
A <- 3
SHAPE <- 5
RATE <- 0.5
set.seed(12)
rtgamma(N, SHAPE, RATE, A)
set.seed(12)
rtgamma(N, SHAPE, RATE, A)
set.seed(12)
N <- 10000
y <- 0
y <- rtgamma(N, SHAPE, RATE, A)
##for(i in 1:N) y[i] <- rtgamma(SHAPE, RATE, A)
x <- seq(A, 4*A, length.out=1000)
plot(x, dgamma(x, SHAPE, RATE)/pgamma(A, SHAPE, RATE, lower.tail=FALSE),
lty=2, type='l', ylim=c(0, 1),
ylab=expression(Gam(SHAPE, RATE)))
lines(density(y, from=A), col='red')
abline(v=A)
dev.copy2pdf(file='test.rtgamma.pdf')
| /scratch/gouwar.j/cran-all/cranData/BART/demo/test.rtgamma.R |
library(BART)
N <- 1
T <- 8
MU <- 5
SD <- 0.5
set.seed(12)
rtnorm(N, MU, SD, T)
set.seed(12)
rtnorm(N, MU, SD, T)
set.seed(12)
N <- 10000
y <- rtnorm(N, MU, SD, T)
x <- seq(T, T+2*SD, length.out=1000)
plot(x, dnorm(x, MU, SD)/pnorm(T, MU, SD, lower.tail=FALSE),
lty=2, type='l',
ylab=expression(N(MU, SD^2)))
lines(density(y, from=T))
abline(v=T)
##dev.copy2pdf(file='test.rtnorm.pdf')
| /scratch/gouwar.j/cran-all/cranData/BART/demo/test.rtnorm.R |
library(BART)
set.seed(12)
N=500
P=501
X=matrix(runif(N*P, -1, 1), nrow=N, ncol=P)
dimnames(X)[[2]]=paste0('x', 1:P)
y=rnorm(N, (X[ , 1]^3)+(X[ , 2]^3)+(X[ , 3]^3)+(X[ , 4]^3)+(X[ , 5]^3))
T=exp(y)
C=rexp(N, 0.65)
delta=(T<C)*1
table(delta)/N
times=T*delta+C*(1-delta)
check=srstepwise(X, times, delta)
print(check)
| /scratch/gouwar.j/cran-all/cranData/BART/demo/test.srstepwise.R |
library(BART)
f = function(x) #only the first 5 matter
sin(pi*x[ , 1]*x[ , 2]) + 2*x[ , 3]*x[ , 4]^2 + x[ , 5]
N = 1000
sigma = 1.0 #y = f(x) + sigma*z where z~N(0, 1)
P = 10 #number of covariates
V = diag(P)
V[5, 6] = 0.8
V[6, 5] = 0.8
L <- chol(V)
set.seed(12)
x.train=matrix(rnorm(N*P), N, P) %*% L
dimnames(x.train)[[2]] <- paste0('x', 1:P)
round(cor(x.train), digits=2)
Ey.train = f(x.train)
y.train=((Ey.train+sigma*rnorm(N))>0)*1
table(y.train)
set.seed(21)
post = pbart(x.train, y.train, sparse=TRUE)
post$varprob.mean>1/P
##write(post$treedraws$trees, 'trees.pbart.txt')
tc <- textConnection(post$treedraws$tree)
trees <- read.table(file=tc, fill=TRUE,
row.names=NULL, header=FALSE,
col.names=c('node', 'var', 'cut', 'leaf'))
close(tc)
m <- 1 ## MCMC samples
M <- trees$node[1]
n <- 0 ## trees
H <- trees$var[1]
branch <- matrix(0, nrow=P, ncol=P)
dimnames(branch)[[1]] <- paste0('x', 1:P)
dimnames(branch)[[2]] <- paste0('x', 1:P)
L <- nrow(trees)
for(l in 2:L) {
if(is.na(trees$leaf[l])) {
n <- n+1
if(n>H) {
n <- 1
m <- m+1
}
C <- trees$node[l] ## nodes in tree
B <- (C-1)/2 ## branches in tree
i <- 0
j <- 0
if(C>1) vars <- integer(C)
branch. <- 0*branch
}
else if(B>1) {
i <- i+1
h <- trees$node[l]
if(i<C) {
t <- floor(log2(h))
k <- h-2^t
if(trees$node[l+1]==(2^(t+1)+2*k)) {
vars[h] <- trees$var[l]+1
j <- j+1
if(j>B) stop('Too many branches')
}
}
else {
for(h. in (C-1):2) {
h <- h.
j <- vars[h]
if(j!=0)
for(t in (floor(log2(h))-1):0) {
if((h%%2)==0) k <- (h-2^(t+1))/2
else k <- (h-2^(t+1)-1)/2
h <- 2^t+k
i <- vars[h]
if(i!=j) branch.[min(i, j), max(i, j)] <- 1
vars[h] <- 0
}
}
branch <- branch+branch.
}
}
}
C <- sum(c(branch))
for(i in 1:(P-1))
for(j in (i+1):P)
if(i!=j) branch[j, i] <- branch[i, j]/C
round(branch, digits=2)
| /scratch/gouwar.j/cran-all/cranData/BART/demo/trees.pbart.R |
#' @title BAS: Bayesian Model Averaging using Bayesian Adaptive Sampling
#'
#' @description Implementation of Bayesian Model Averaging in linear models using stochastic or
#' deterministic sampling without replacement from posterior distributions.
#' Prior distributions on coefficients are of the form of Zellner's g-prior or
#' mixtures of g-priors. Options include the Zellner-Siow Cauchy Priors, the
#' Liang et al hyper-g priors, Local and Global Empirical Bayes estimates of g,
#' and other default model selection criteria such as AIC and BIC. Sampling
#' probabilities may be updated based on the sampled models.
#'
#' @docType package
#' @name BAS
#'
#'
#' @author Merlise Clyde, \cr Maintainer: Merlise Clyde <clyde@@stat.duke.edu>
#' @seealso \code{\link{bas.lm}} \code{\link{bas.glm}}
#
#' @examples
#' data("Hald")
#' hald.gprior = bas.lm(Y ~ ., data=Hald, alpha=13, prior="g-prior")
#'
#' # more complete demos
#'
#' demo(BAS.hald)
#' \dontrun{
#' demo(BAS.USCrime)
#' }
#'
#' @references Clyde, M. Ghosh, J. and Littman, M. (2010) Bayesian Adaptive
#' Sampling for Variable Selection and Model Averaging. Journal of
#' Computational Graphics and Statistics. 20:80-101 \cr
#' \doi{10.1198/jcgs.2010.09049}
#'
#' Clyde, M. and George, E. I. (2004) Model uncertainty. Statist. Sci., 19,
#' 81-94. \cr \doi{10.1214/088342304000000035}
#'
#' Clyde, M. (1999) Bayesian Model Averaging and Model Search Strategies (with
#' discussion). In Bayesian Statistics 6. J.M. Bernardo, A.P. Dawid, J.O.
#' Berger, and A.F.M. Smith eds. Oxford University Press, pages 157-185.
#'
#' Li, Y. and Clyde, M. (2018) Mixtures of g-priors in Generalized Linear
#' Models. Journal of the American Statistical Association, 113:524, 1828-1845 \doi{10.1080/01621459.2018.1469992}
#'
#' Liang, F., Paulo, R., Molina, G., Clyde, M. and Berger, J.O. (2008) Mixtures
#' of g-priors for Bayesian Variable Selection. Journal of the American
#' Statistical Association. 103:410-423. \cr
#'
#' \doi{10.1198/016214507000001337}
#'
#' @keywords package regression
#' @import stats
#' @import graphics
#' @import grDevices
#'
#' @useDynLib BAS, .registration=TRUE, .fixes="C_"
#' @family bas methods
#'
NULL
| /scratch/gouwar.j/cran-all/cranData/BAS/R/BAS-package.R |
#' Find the global Empirical Bayes estimates for BMA
#'
#' Finds the global Empirical Bayes estimates of g in Zellner's g-prior and
#' model probabilities
#'
#' Uses the EM algorithm in Liang et al to estimate the type II MLE of g in
#' Zellner's g prior
#'
#' @aliases EB.global EB.global.bas
#' @param object A 'bas' object created by \code{\link{bas}}
#' @param tol tolerance for estimating g
#' @param g.0 initial value for g
#' @param max.iterations Maximum number of iterations for the EM algorithm
#' @return An object of class 'bas' using Zellner's g prior with an estimate of
#' g based on all models
#' @author Merlise Clyde \email{clyde@@stat.duke.edu}
#' @seealso \code{\link{bas}}, \code{\link{update}}
#' @references Liang, F., Paulo, R., Molina, G., Clyde, M. and Berger, J.O.
#' (2008) Mixtures of g-priors for Bayesian Variable Selection. Journal of the
#' American Statistical Association. 103:410-423. \cr
#' \doi{10.1198/016214507000001337}
#' @keywords regression
#' @examples
#'
#' library(MASS)
#' data(UScrime)
#' UScrime[,-2] = log(UScrime[,-2])
#' # EB local uses a different g within each model
#' crime.EBL = bas.lm(y ~ ., data=UScrime, n.models=2^15,
#' prior="EB-local", initprobs= "eplogp")
#' # use a common (global) estimate of g
#' crime.EBG = EB.global(crime.EBL)
#'
#'
#' @rdname EB.global
#' @family coef priors
#' @export
EB.global = function(object, tol= .1, g.0=NULL, max.iterations=100) {
n = object$n
SSY = var(object$Y)*(n-1)
SSE <- (1.0 - object$R2)*SSY
SSR = SSY - SSE
p = object$size - 1
R2 = object$R2
prior = object$priorprobs
postmodelprob <- function(R2, p, n, g, prior=1) {
logmarg <- .5*((n - 1 - p)*log(1 + g) - (n-1)*log( 1 + g*(1 - R2)))
logmarg[p == 0] = 0
modelprob <- exp(logmarg - max(logmarg))
modelprob <- modelprob*prior/sum(modelprob*prior)
if (any(is.na(modelprob))) warning("NA's in modelprobs") # nocov
return(modelprob)
}
best = sort.list(-object$logmarg)[1]
sbest = min(object$shrinkage[best], .99)
if (is.null(g.0)) g.0 = sbest/(1 - sbest)
tau.0 = g.0 + 1
phi = (n - 1)/(SSY - (g.0/(1 + g.0))*SSR)
post.prob = postmodelprob(object$R2,p, n, max(tau.0 - 1, 0), prior)
tau.0 = sum(post.prob*SSR*phi)/(sum(post.prob*p))
tau = tau.0 - 2*tol
it = 0
while (abs(tau - tau.0) > tol | it < max.iterations) {
g = max(tau - 1, 0)
phi = (n - 1)/(SSY - (g/(1 + g))*SSR)
post.prob = postmodelprob(object$R2,p, n, g, prior)
tau.0 = tau
tau = sum(post.prob*SSR*phi)/(sum(post.prob*p))
it = it + 1
}
g = max(tau -1, 0)
logmarg = .5*((n -1 - p)*log(1 + g) - (n-1)*log( 1 + g*(1 - R2)))
logmarg[p == 0] = 0
postprobs = postmodelprob(object$R2,p, n, g, prior)
which = which.matrix(object$which, object$n.var)
object$probne0 = as.vector(postprobs %*% which)
object$postprobs=postprobs
object$g = g
object$logmarg = logmarg
object$shrinkage = object$shrinkage*0 + g/(1 + g)
object$method = "EB-global"
return(object)
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/EB_global.R |
#' Coerce a BAS list object into a matrix.
#'
#' Models, coefficients, and standard errors in objects of class 'bas' are
#' represented as a list of lists to reduce storage by omitting the zero
#' entries. These functions coerce the list object to a matrix and fill in the
#' zeros to facilitate other computations.
#'
#' \code{list2matrix.bas(x, which)} is equivalent to
#' \code{list2matrix.which(x)}, however, the latter uses sapply rather than a
#' loop.
#' \code{list2matrix.which} and \code{which.matrix} both coerce
#' \code{x$which} into a matrix.
#' @aliases list2matrix
#' @param x a 'bas' object
#' @param what name of bas list to coerce
#' @param which.models a vector of indices use to extract a subset
#' @return a matrix representation of \code{x$what}, with number of rows equal
#' to the length of which.models or total number of models and number of
#' columns \code{x$n.vars}
#' @author Merlise Clyde \email{[email protected]}
#' @seealso \code{\link{bas}}
#' @keywords regression
#'
#'
#' @examples
#'
#' data(Hald)
#' hald.bic <- bas.lm(Y ~ ., data=Hald, prior="BIC",
#' initprobs= "eplogp")
#' coef <- list2matrix.bas(hald.bic, "mle") # extract all coefficients
#' se <- list2matrix.bas(hald.bic, "mle.se")
#' models <- list2matrix.which(hald.bic) #matrix of model indicators
#' models <- which.matrix(hald.bic$which, hald.bic$n.vars) #matrix of model indicators
#'
#' @rdname list2matrix
#' @family as.matrix methods
#' @export
list2matrix.bas <- function(x, what, which.models = NULL) {
namesx <- x$namesx
if (is.null(which.models)) which.models <- 1:x$n.models
listobj <- x[[what]][which.models]
which <- x$which[which.models]
n.models <- length(which.models)
p <- length(namesx)
mat <- matrix(0, nrow = n.models, ncol = p)
for (i in 1:n.models) {
mat[i, which[[i]] + 1] <- listobj[[i]]
}
colnames(mat) <- namesx
return(mat)
}
#' Coerce a BAS list object into a matrix.
#'
#' Models, coefficients, and standard errors in objects of class 'bas' are
#' represented as a list of lists to reduce storage by omitting the zero
#' entries. These functions coerce the list object to a matrix and fill in the
#' zeros to facilitate other computations.
#'
#' \code{list2matrix.bas(x, which)} is equivalent to
#' \code{list2matrix.which(x)}, however, the latter uses sapply rather than a
#' loop.
#' \code{list2matrix.which} and \code{which.matrix} both coerce
#' \code{x$which} into a matrix.
#'
#' @param x a 'bas' object
#' @param which.models a vector of indices use to extract a subset
#' @return a matrix representation of \code{x$what}, with number of rows equal
#' to the length of which.models or total number of models and number of
#' columns \code{x$n.vars}
#' @author Merlise Clyde \email{clyde@@duke.edu}
#' @seealso \code{\link{bas}}
#' @keywords regression
#'
#'
#' @examples
#'
#' data(Hald)
#' Hald.bic <- bas.lm(Y ~ ., data=Hald, prior="BIC", initprobs="eplogp")
#' coef <- list2matrix.bas(Hald.bic, "mle") # extract all ols coefficients
#' se <- list2matrix.bas(Hald.bic, "mle.se")
#' models <- list2matrix.which(Hald.bic) #matrix of model indicators
#' models <- which.matrix(Hald.bic$which, Hald.bic$n.vars) #matrix of model indicators
#'
#' @rdname list2matrix.which
#' @family as.matrix methods
#' @export
list2matrix.which <- function(x, which.models = NULL) {
namesx <- x$namesx
listobj <- x$which
if (!is.null(which.models)) {
listobj <- listobj[which.models]
}
p <- length(namesx)
mat <- t(sapply(
listobj,
function(x, dimp) {
xx <- rep(0, dimp)
xx[x + 1] <- 1
xx
},
p
))
colnames(mat) <- namesx
mat
}
#' Coerce a BAS list object of models into a matrix.
#'
#' This function coerces the list object of models to a matrix and fill in the
#' zeros to facilitate other computations.
#'
#' \code{which.matrix} coerces
#' \code{x$which} into a matrix.
#'
#' @aliases which.matrix
#' @param which a 'bas' model object \code{x$which}
#' @param n.vars the total number of predictors, \code{x$n.vars}
#' @return a matrix representation of \code{x$which}, with number of rows equal
#' to the length of which.models or total number of models and number of
#' columns \code{x$n.vars}
#' @author Merlise Clyde \email{clyde@@duke.edu}
#' @seealso \code{\link{bas}}
#' @keywords regression
#'
#'
#' @examples
#'
#' data(Hald)
#' Hald.bic <- bas.lm(Y ~ ., data=Hald, prior="BIC", initprobs="eplogp")
#' # matrix of model indicators
#' models <- which.matrix(Hald.bic$which, Hald.bic$n.vars)
#'
#' @rdname which.matrix
#' @family as.matrix methods
#' @export
which.matrix <- function(which, n.vars) {
mat <- t(sapply(
which,
function(x, dimp) {
xx <- rep(0, dimp)
xx[x + 1] <- 1
xx
},
n.vars
))
mat
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/as_matrix.R |
normalize.initprobs.glm <- function(initprobs, glm.obj) {
p <- dim(glm.obj$x)[2]
if (!is.numeric(initprobs)) {
initprobs <- switch(initprobs,
"eplogp" = eplogprob(glm.obj),
"uniform" = c(1.0, rep(.5, p - 1)),
"Uniform" = c(1.0, rep(.5, p - 1))
)
}
if (length(initprobs) == (p - 1)) {
initprobs <- c(1.0, initprobs)
}
if (length(initprobs) != p) {
stop(paste("length of initprobs is not", p))
}
if (initprobs[1] < 1.0 | initprobs[1] > 1.0) initprobs[1] <- 1.0
# intercept is always included otherwise we get a segmentation
# fault (relax later)
prob <- as.numeric(initprobs)
pval <- summary(glm.obj)$coefficients[, 4]
if (any(is.na(pval))) {
warning(paste("warning full model is rank deficient; use caution when interpreting restults."))
# prob[is.na(pval)] <- 0.0
}
return(prob)
}
#' Bayesian Adaptive Sampling Without Replacement for Variable Selection in
#' Generalized Linear Models
#'
#' Sample with or without replacement from a posterior distribution on GLMs
#'
#' BAS provides several search algorithms to find high probability models for
#' use in Bayesian Model Averaging or Bayesian model selection. For p less than
#' 20-25, BAS can enumerate all models depending on memory availability, for
#' larger p, BAS samples without replacement using random or deterministic
#' sampling. The Bayesian Adaptive Sampling algorithm of Clyde, Ghosh, Littman
#' (2010) samples models without replacement using the initial sampling
#' probabilities, and will optionally update the sampling probabilities every
#' "update" models using the estimated marginal inclusion probabilities. BAS
#' uses different methods to obtain the \code{initprobs}, which may impact the
#' results in high-dimensional problems. The deterministic sampler provides a
#' list of the top models in order of an approximation of independence using
#' the provided \code{initprobs}. This may be effective after running the
#' other algorithms to identify high probability models and works well if the
#' correlations of variables are small to modest. The priors on coefficients
#' are mixtures of g-priors that provide approximations to the power prior.
#'
#' @param formula generalized linear model formula for the full model with all
#' predictors, Y ~ X. All code assumes that an intercept will be included in
#' each model.
#' @param family a description of the error distribution and link function for
#' exponential family; currently only `binomial()` with the logistic link and
#' `poisson()` and `Gamma()`with the log link are available.
#' @param data data frame
#' @param weights optional vector of weights to be used in the fitting process.
#' May be missing in which case weights are 1.
#' @param subset subset of data used in fitting
#' @param contrasts an optional list. See the contrasts.arg of `model.matrix.default()`.
#' @param offset a priori known component to be included in the linear
#' predictor; by default 0.
#' @param na.action a function which indicates what should happen when the data
#' contain NAs. The default is "na.omit".
#' @param n.models number of unique models to keep. If NULL, BAS will attempt
#' to enumerate unless p > 35 or method="MCMC". For any of methods using MCMC
#' algorithms that sample with replacement, sampling will stop when the number
#' of iterations exceeds the min of 'n.models' or 'MCMC.iterations' and on exit
#' 'n.models' is updated to reflect the unique number of models that have been
#' sampled.
#' @param betaprior Prior on coefficients for model coefficients (except
#' intercept). Options include
#' \code{\link{g.prior}},
#' \code{\link{CCH}},
#' \code{\link{robust}},
#' \code{\link{intrinsic}},
#' \code{\link{beta.prime}},
#' \code{\link{EB.local}},
#' \code{\link{AIC}}, and
#' \code{\link{BIC}}.
#' @param modelprior Family of prior distribution on the models. Choices
#' include \code{\link{uniform}}, \code{\link{Bernoulli}},
#' \code{\link{beta.binomial}}, truncated Beta-Binomial,
#' \code{\link{tr.beta.binomial}}, and truncated power family
#' \code{\link{tr.power.prior}}.
#' @param initprobs vector of length p with the initial inclusion probabilities
#' used for sampling without replacement (the intercept will be included with
#' probability one and does not need to be added here) or a character string
#' giving the method used to construct the sampling probabilities if "Uniform"
#' each predictor variable is equally likely to be sampled (equivalent to
#' random sampling without replacement). If "eplogp", use the
#' \code{\link{eplogprob}} function to approximate the Bayes factor using
#' p-values to find initial marginal inclusion probabilities and sample
#' without replacement using these inclusion probabilities, which may be
#' updated using estimates of the marginal inclusion probabilities. "eplogp"
#' assumes that MLEs from the full model exist; for problems where that is not
#' the case or 'p' is large, initial sampling probabilities may be obtained
#' using \code{\link{eplogprob.marg}} which fits a model to each predictor
#' separately. To run a Markov Chain to provide initial
#' estimates of marginal inclusion probabilities, use method="MCMC+BAS" below.
#' While the initprobs are not used in sampling for method="MCMC", this
#' determines the order of the variables in the lookup table and affects memory
#' allocation in large problems where enumeration is not feasible. For
#' variables that should always be included set the corresponding initprobs to
#' 1, to override the `modelprior` or use `include.always` to force these variables
#' to always be included in the model.
#' @param include.always A formula with terms that should always be included
#' in the model with probability one. By default this is `~ 1` meaning that the
#' intercept is always included.
#' This will also override any of the values in `initprobs`
#' above by setting them to 1.
#' @param method A character variable indicating which sampling method to use:
#' method="BAS" uses Bayesian Adaptive Sampling (without replacement) using the
#' sampling probabilities given in initprobs and updates using the marginal
#' inclusion probabilities to direct the search/sample; method="MCMC" combines
#' a random walk Metropolis Hastings (as in MC3 of Raftery et al 1997) with a
#' random swap of a variable included with a variable that is currently
#' excluded (see Clyde, Ghosh, and Littman (2010) for details);
#' method="MCMC+BAS" runs an initial MCMC as above to calculate marginal
#' inclusion probabilities and then samples without replacement as in BAS;
#' method = "deterministic" runs an deterministic sampling using the initial
#' probabilities (no updating); this is recommended for fast enumeration or if a
#' model of independence is a good approximation to the joint posterior
#' distribution of the model indicators. For BAS, the sampling probabilities
#' can be updated as more models are sampled. (see 'update' below). We
#' recommend "MCMC+BAS" or "MCMC" for high dimensional problems.
#' @param update number of iterations between potential updates of the sampling
#' probabilities in the "BAS" method. If NULL do not update, otherwise the
#' algorithm will update using the marginal inclusion probabilities as they
#' change while sampling takes place. For large model spaces, updating is
#' recommended. If the model space will be enumerated, leave at the default.
#' @param bestmodel optional binary vector representing a model to initialize
#' the sampling. If NULL sampling starts with the null model
#' @param prob.rw For any of the MCMC methods, probability of using the
#' random-walk proposal; otherwise use a random "flip" move to propose a new
#' model.
#' @param MCMC.iterations Number of models to sample when using any of the MCMC
#' options; should be greater than 'n.models'. By default 10*n.models.
#' @param thin oFr "MCMC", thin the MCMC chain every "thin" iterations; default
#' is no
#' thinning. For large p, thinning can be used to significantly reduce memory
#' requirements as models and associated summaries are saved only every thin
#' iterations. For thin = p, the model and associated output are recorded
#' every p iterations,similar to the Gibbs sampler in SSVS.
#' @param control a list of parameters that control convergence in the fitting
#' process. See the documentation for \code{glm.control()}
#' @param laplace logical variable for whether to use a Laplace approximate for
#' integration with respect to g to obtain the marginal likelihood. If FALSE
#' the Cephes library is used which may be inaccurate for large n or large
#' values of the Wald Chisquared statistic.
#' @param renormalize logical variable for whether posterior probabilities
#' should be based on renormalizing marginal likelihoods times prior
#' probabilities or use Monte Carlo frequencies. Applies only to MCMC sampling.
#' @param force.heredity Logical variable to force all levels of a factor to be
#' included together and to include higher order interactions only if lower
#' order terms are included. Currently only supported with `method='MCMC'`
#' and `method='BAS'` (experimental) on non-Solaris platforms.
#' Default is FALSE.
#' @param bigmem Logical variable to indicate that there is access to
#' large amounts of memory (physical or virtual) for enumeration
#' with large model spaces, e.g. > 2^25.
#' @return \code{bas.glm} returns an object of class \code{basglm}
#'
#' An object of class \code{basglm} is a list containing at least the following
#' components:
#'
#' \item{postprobs}{the posterior probabilities of the models selected}
#' \item{priorprobs}{the prior probabilities of the models selected}
#' \item{logmarg}{values of the log of the marginal likelihood for the models}
#' \item{n.vars}{total number of independent variables in the full model,
#' including the intercept}
#' \item{size}{the number of independent variables in
#' each of the models, includes the intercept}
#' \item{which}{a list of lists
#' with one list per model with variables that are included in the model}
#' \item{probne0}{the posterior probability that each variable is non-zero}
#' \item{mle}{list of lists with one list per model giving the GLM
#' estimate of each (nonzero) coefficient for each model.}
#' \item{mle.se}{list of
#' lists with one list per model giving the GLM standard error of each
#' coefficient for each model}
#' \item{deviance}{the GLM deviance for each model}
#' \item{modelprior}{the prior distribution on models that created the BMA
#' object}
#' \item{Q}{the Q statistic for each model used in the marginal
#' likelihood approximation}
#' \item{Y}{response}
#' \item{X}{matrix of predictors}
#' \item{family}{family object from the original call}
#' \item{betaprior}{family object for prior on coefficients, including
#' hyperparameters}
#' \item{modelprior}{family object for prior on the models}
#' \item{include.always}{indices of variables that are forced into the model}
#' @author Merlise Clyde (\email{clyde@@duke.edu}), Quanli Wang and Yingbo
#' Li
#' @references Li, Y. and Clyde, M. (2018) Mixtures of g-priors in Generalized
#' Linear Models.
#' Journal of the American Statistical Association. 113:1828-1845 \cr
#' \doi{10.1080/01621459.2018.1469992} \cr
#' Clyde, M. Ghosh, J. and Littman, M. (2010) Bayesian Adaptive Sampling for
#' Variable Selection and Model Averaging. Journal of Computational Graphics
#' and Statistics. 20:80-101 \cr
#' \doi{10.1198/jcgs.2010.09049} \cr
#' Raftery, A.E, Madigan, D. and Hoeting, J.A. (1997) Bayesian Model Averaging
#' for Linear Regression Models. Journal of the American Statistical
#' Association.
#' @keywords GLM regression
#' @examples
#'
#' library(MASS)
#' data(Pima.tr)
#'
#'
#' # enumeration with default method="BAS"
#' pima.cch = bas.glm(type ~ ., data=Pima.tr, n.models= 2^7,
#' method="BAS",
#' betaprior=CCH(a=1, b=532/2, s=0), family=binomial(),
#' modelprior=beta.binomial(1,1))
#'
#' summary(pima.cch)
#' image(pima.cch)
#'
#' # Note MCMC.iterations are set to 2500 for illustration purposes due to time
#' # limitations for running examples on CRAN servers.
#' # Please check convergence diagnostics and run longer in practice
#'
#' pima.robust = bas.glm(type ~ ., data=Pima.tr, n.models= 2^7,
#' method="MCMC", MCMC.iterations=2500,
#' betaprior=robust(), family=binomial(),
#' modelprior=beta.binomial(1,1))
#'
#' pima.BIC = bas.glm(type ~ ., data=Pima.tr, n.models= 2^7,
#' method="BAS+MCMC", MCMC.iterations=2500,
#' betaprior=bic.prior(), family=binomial(),
#' modelprior=uniform())
#' # Poisson example
#' if(requireNamespace("glmbb", quietly=TRUE)) {
#' data(crabs, package='glmbb')
#' #short run for illustration
#' crabs.bas = bas.glm(satell ~ color*spine*width + weight, data=crabs,
#' family=poisson(),
#' betaprior=EB.local(), modelprior=uniform(),
#' method='MCMC', n.models=2^10, MCMC.iterations=2500,
#' prob.rw=.95)
#'
#' # Gamma example
#' if(requireNamespace("faraway", quietly=TRUE)) {
#' data(wafer, package='faraway')
#'
#' wafer_bas = bas.glm(resist~ ., data=wafer, include.always = ~ .,
#' betaprior = bic.prior() ,
#' family = Gamma(link = "log"))
#' }
#' }
#' @concept BMA
#' @concept variable selection
#' @family BMA functions
#' @rdname bas.glm
#' @export
bas.glm <- function(formula, family = binomial(link = "logit"),
data, weights, subset, contrasts=NULL, offset, na.action = "na.omit",
n.models = NULL,
betaprior = CCH(alpha = .5, beta = as.numeric(nrow(data)), s = 0),
modelprior = beta.binomial(1, 1),
initprobs = "Uniform",
include.always = ~1,
method = "MCMC",
update = NULL,
bestmodel = NULL,
prob.rw = 0.5,
MCMC.iterations = NULL, thin = 1,
control = glm.control(), laplace = FALSE, renormalize = FALSE,
force.heredity = FALSE,
bigmem = FALSE) {
num.updates <- 10
call <- match.call()
if (is.character(family)) {
family <- get(family, mode = "function", envir = parent.frame())
}
if (is.function(family)) {
family <- family()
}
if (!(family$family %in% c("binomial", "poisson", "Gamma"))) {
stop(paste("family ", family$family, "not implemented"))
}
if (missing(data)) {
data <- environment(formula)
}
if (!inherits(modelprior, "prior")) stop("modelprior should be an object of class prior, uniform(), beta.binomial(), etc")
# browser()
mfall <- match.call(expand.dots = FALSE)
m <- match(c(
"formula", "data", "subset", "weights", "na.action",
"etastart", "mustart", "offset"
), names(mfall), 0L)
mf <- mfall[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- quote(stats::model.frame)
mf <- eval(mf, parent.frame())
n.NA <- length(attr(mf, "na.action"))
if (n.NA > 0) {
warning(paste(
"dropping ", as.character(n.NA),
"rows due to missing data"
))
}
Y <- model.response(mf, type = "any")
mt <- attr(mf, "terms")
X <- model.matrix(mt, mf, contrasts)
# X = model.matrix(formula, mf)
# Y = glm.obj$y
# X = glm.obj$x
namesx <- dimnames(X)[[2]]
namesx[1] <- "Intercept"
p <- dim(X)[2]
nobs <- dim(X)[1]
if (nobs == 0) {stop("Sample size is zero; check data and subset arguments")}
# weights = as.vector(model.weights(mf))
weights <- as.vector(model.weights(mf))
if (is.null(weights)) {
weights <- rep(1, nobs)
}
offset <- model.offset(mf)
if (is.null(offset)) offset <- rep(0, nobs)
Y <- glm(Y ~ 1, family = family, weights = weights,
offset = offset, y = T)$y
if (!is.numeric(initprobs)) {
if (nobs <= p && initprobs == "eplogp") {
stop(
"Full model is not full rank so cannot use the eplogp bound to create starting sampling probabilities, perhpas use 'marg-eplogp' for fiting marginal models\n"
)
}
initprobs <- switch(
initprobs,
"eplogp" = eplogprob(glm(Y ~ X - 1,
family = family, weights = weights,
offset = offset)),
"marg-eplogp" = eplogprob.marg(Y, X),
"uniform" = c(1.0, rep(.5, p - 1)),
"Uniform" = c(1.0, rep(.5, p - 1))
)
}
if (length(initprobs) == (p - 1)) {
initprobs <- c(1.0, initprobs)
}
# set up variables to always include
keep <- 1
if ("include.always" %in% names(mfall)) {
minc <- match(c("include.always", "data", "subset"), names(mfall), 0L)
mfinc <- mfall[c(1L, minc)]
mfinc$drop.unused.levels <- TRUE
names(mfinc)[2] <- "formula"
mfinc[[1L]] <- quote(stats::model.frame)
mfinc <- eval(mfinc, parent.frame())
mtinc <- attr(mfinc, "terms")
X.always <- model.matrix(mtinc, mfinc, contrasts)
keep <- c(1L, match(colnames(X.always)[-1], colnames(X)))
initprobs[keep] <- 1.0
if (ncol(X.always) == ncol(X)) {
# just one model with all variables forced in
# use method='BAS" as deterministic and MCMC fail in this context
method <- "BAS"
}
}
parents <- matrix(1, 1, 1)
if (method == "deterministic" | method == "MCMC+BAS" ) force.heredity <- FALSE # not working yet
if (force.heredity) {
parents <- make.parents.of.interactions(mf, data)
# check to see if really necessary
if (sum(parents) == nrow(parents)) {
parents <- matrix(1, 1, 1)
force.heredity <- FALSE
}
}
prob <- normalize.initprobs.lm(initprobs, p)
if (is.null(bestmodel)) {
# bestmodel = as.integer(initprobs)
bestmodel <- c(1, rep(0, p - 1))
}
bestmodel[keep] <- 1
if (force.heredity) {
update <- NULL # do not update tree FIXME LATER
if (prob.heredity(bestmodel, parents) == 0) {
warning("bestmodel violates heredity conditions; resetting to null model. Please check include.always and bestmodel")
bestmodel <- c(1, rep(0, p - 1))
}
# initprobs <- c(1, seq(.95, .55, length = (p - 1))) # keep same order
}
bestmodel <- as.integer(bestmodel)
if (is.null(n.models)) {
n.models <- as.integer(min(2^p, 2^19))
}
n.models <- as.integer(normalize.n.models(n.models, p, prob, method, bigmem))
modelprior <- normalize.modelprior(modelprior, p)
if (is.null(MCMC.iterations)) {
MCMC.iterations <- max(10000, (n.models * 10))
}
MCMC.iterations = as.integer(MCMC.iterations)
Burnin.iterations <- as.integer(MCMC.iterations)
modeldim <- as.integer(rep(0, n.models))
#print(MCMC.iterations)
if (is.null(update)) {
if (force.heredity) { # do not update tree for BAS
update <- n.models + 1}
else {
if (n.models == 2^(p - 1)) {
update <- n.models + 1
} else {
(update <- n.models / num.updates)
}}
}
Yvec <- as.numeric(Y)
# check on priors
if (!inherits(betaprior, "prior")) stop("prior on coeeficients must be an object of type 'prior'")
loglik_null <- as.numeric(-0.5 * glm(Y ~ 1,
weights = weights,
offset = offset,
family = eval(call$family)
)$null.deviance)
betaprior$hyper.parameters$loglik_null <- loglik_null
# browser()
if (betaprior$family == "BIC" & is.null(betaprior$n)) {
betaprior <- bic.prior(as.numeric(nobs))
}
if (betaprior$family == "hyper-g/n" & is.null(betaprior$n)) {
betaprior$hyper.parameters$theta <- 1 / nobs
betaprior$n <- nobs
}
if (betaprior$family == "robust" & is.null(betaprior$n)) betaprior <- robust(as.numeric(nobs))
if (betaprior$family == "intrinsic" & is.null(betaprior$n)) {
betaprior$hyper.parameters$n <- as.numeric(nobs)
}
if (betaprior$family == "betaprime" & is.null(betaprior$hyper.parameters$n)) {
betaprior$hyper.parameters$n <- as.numeric(nobs)
}
#print(MCMC.iterations)
result <- switch(method,
"MCMC" = .Call(C_glm_mcmc,
Y = Yvec, X = X,
Roffset = as.numeric(offset),
Rweights = as.numeric(weights),
Rprobinit = prob,
Rmodeldim = modeldim,
modelprior = modelprior,
betaprior = betaprior,
Rbestmodel = bestmodel,
plocal = as.numeric(1.0 - prob.rw),
BURNIN_Iterations = as.integer(MCMC.iterations),
Rthin = as.integer(thin),
family = family, Rcontrol = control,
Rlaplace = as.integer(laplace),
Rparents = parents
),
"BAS" = .Call(C_glm_sampleworep,
Y = Yvec, X = X,
Roffset = as.numeric(offset),
Rweights = as.numeric(weights),
Rprobinit = prob,
Rmodeldim = modeldim,
modelprior = modelprior,
betaprior = betaprior,
Rbestmodel = bestmodel,
plocal = as.numeric(1.0 - prob.rw),
family = family, Rcontrol = control,
Rupdate = as.integer(update),
Rlaplace = as.integer(laplace),
Rparents = parents
),
"MCMC+BAS" = .Call(C_glm_mcmcbas,
Y = Yvec,
X = X,
Roffset = as.numeric(offset),
Rweights = as.numeric(weights),
Rprobinit = prob,
Rmodeldim = modeldim,
modelprior = modelprior,
betaprior = betaprior,
Rbestmodel = bestmodel,
plocal = as.numeric(1.0 - prob.rw),
BURNIN_Iterations = as.integer(MCMC.iterations),
family = family, Rcontrol = control,
Rupdate = as.integer(update), Rlaplace = as.integer(laplace),
Rparents = parents
),
"deterministic" = .Call(C_glm_deterministic,
Y = Yvec, X = X,
Roffset = as.numeric(offset),
Rweights = as.numeric(weights),
Rprobinit = prob,
Rmodeldim = modeldim,
modelprior = modelprior,
betaprior = betaprior,
family = family,
Rcontrol = control,
Rlaplace = as.integer(laplace)
)
)
result$namesx <- namesx
result$n <- length(Yvec)
result$modelprior <- modelprior
result$probne0[keep] <- 1.0
result$probne0.RN <- result$probne0
result$postprobs.RN <- result$postprobs
result$family <- family
result$betaprior <- betaprior
result$modelprior <- modelprior
result$n.models <- length(result$postprobs)
result$include.always <- keep
# if (method == "MCMC") result$n.models = result$n.Unique
df <- rep(nobs - 1, result$n.models)
if (betaprior$class == "IC") df <- df - result$size + 1
result$df <- df
result$R2 <- .R2.glm.bas(result$deviance, result$size, call)
result$n.vars <- p
result$Y <- Yvec
result$X <- X
result$call <- call
result$terms <- mt
result$contrasts <- attr(X, "contrasts")
result$xlevels <- .getXlevels(mt, mf)
result$model <- mf
# drop null model if it is present
if (betaprior$family == "Jeffreys" & (min(result$size) == 1)) result <- .drop.null.bas(result)
# github issue #74. drop models with zero prior probability
if (any(result$priorprobs == 0)) {
drop.models = result$priorprobs != 0
result$mle = result$mle[drop.models]
result$mle.se = result$mle.se[drop.models]
result$mse = result$mse[drop.models]
result$which = result$which[drop.models]
result$freq = result$freq[drop.models]
result$shrinkage = result$shrinkage[drop.models]
result$R2 = result$R2[drop.models]
result$logmarg = result$logmarg[drop.models]
result$df = result$df[drop.models]
result$size = result$size[drop.models]
result$Q = result$Q[drop.models]
result$rank = result$rank[drop.models]
result$sampleprobs = result$sampleprobs[drop.models]
result$postprobs = result$postprobs[subset = drop.models]
result$priorprobs = result$priorprobs[subset = drop.models]
result$n.models = length(result$postprobs)
}
if (method == "MCMC") {
result$postprobs.MCMC <- result$freq / sum(result$freq)
if (!renormalize) {
result$probne0 <- result$probne0.MCMC
result$postprobs <- result$postprobs.MCMC
}
}
class(result) <- c("basglm", "bas")
return(result)
}
# Drop the null model from Jeffrey's prior
.drop.null.bas <- function(object) {
n.models <- object$n.models
p <- object$size
drop <- (1:n.models)[p == 1]
logmarg <- object$logmarg[-drop]
prior <- object$priorprobs[-drop]
postprobs <- .renormalize.postprobs(logmarg, log(prior))
which <- which.matrix(object$which[-drop], object$n.var)
object$probne0 <- as.vector(postprobs %*% which)
object$postprobs <- postprobs
method <- eval(object$call$method)
if (method == "MCMC+BAS" | method == "MCMC") {
object$freq <- object$freq[-drop]
object$probne0.MCMC <- as.vector(object$freq %*% which)/sum(object$freq)
}
object$priorprobs <- prior
if (!is.null(object$sampleprobs)) object$sampleprobs <- object$sampleprobs[-drop]
object$which <- object$which[-drop]
object$logmarg <- logmarg
object$deviance <- object$deviance[-drop]
object$intercept <- object$intercept[-drop]
object$size <- object$size[-drop]
object$Q <- object$Q[-drop]
object$R2 <- object$R2[-drop]
object$mle <- object$mle[-drop]
object$mle.se <- object$mle.se[-drop]
object$shrinkage <- object$shrinkage[-drop]
object$n.models <- n.models - 1
object$df <- object$df[-drop]
return(object)
}
.renormalize.postprobs <- function(logmarg, logprior) {
probs <- logmarg + logprior
probs <- exp(probs - max(probs))
probs <- probs / sum(probs)
return(probs)
}
.R2.glm.bas <- function(deviance, size, call) {
n.models <- length(deviance)
null.model <- (1:n.models)[size == 1]
if (is.null(null.model)) {
null.deviance <- glm(eval(call$formula),
data = eval(call$data),
family = eval(call$family)
)$null.deviance
}
else {
null.deviance <- deviance[null.model]
}
R2 <- 1 - deviance / null.deviance
return(R2)
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/bas_glm.R |
normalize.initprobs.lm <- function(initprobs, p) {
if (length(initprobs) != p) {
stop(paste(
"length of initprobs is", length(initprobs),
"is not same as dimensions of X", p
))
}
if (initprobs[1] < 1.0 | initprobs[1] > 1.0) initprobs[1] <- 1.0
# intercept is always included otherwise we get a segmentation
# fault (relax later)
prob <- as.numeric(initprobs)
prob[initprobs < 0.0] = 0.0
prob[initprobs > 1.0] = 1.0
# if (!is.null(lm.obj)) {
# pval = summary(lm.obj)$coefficients[,4]
# if (any(is.na(pval))) {
# print(paste("warning full model is rank deficient."))
# }}
#
return(prob)
}
normalize.modelprior <- function(modelprior, p) {
if (modelprior$family == "Bernoulli") {
if (length(modelprior$hyper.parameters) == 1) {
modelprior$hyper.parameters <- c(1, rep(modelprior$hyper.parameters, p - 1))
}
if (length(modelprior$hyper.parameters) == (p - 1)) {
modelprior$hyper.parameters <- c(1,
modelprior$hyper.parameters)
}
if (length(modelprior$hyper.parameters) != p) {
stop(" Number of probabilities in Bernoulli family is not equal to the number of variables or 1")
}
}
return(modelprior)
}
normalize.n.models <- function(n.models, p, initprobs, method, bigmem) {
if (is.null(n.models)) {
p = max(30, p)
n.models <- 2^(p - 1)
}
if (n.models > 2^(p - 1)) n.models <- 2^(p - 1)
deg <- sum(initprobs >= 1) + sum(initprobs <= 0)
if (deg > 1 & n.models == 2^(p - 1)) {
n.models <- 2^(p - deg)
}
if (n.models > 2^25 && !bigmem && !(method == "MCMC")) {
stop(paste0("Number of requested models to sample is ",
n.models,
"; rerun with bigmem=TRUE or using method='MCMC'"
))
}
return(n.models)
}
#' Bayesian Adaptive Sampling for Bayesian Model Averaging and Variable Selection in
#' Linear Models
#'
#' Sample without replacement from a posterior distribution on models
#'
#' BAS provides several algorithms to sample from posterior distributions
#' of models for
#' use in Bayesian Model Averaging or Bayesian variable selection. For p less than
#' 20-25, BAS can enumerate all models depending on memory availability. As BAS saves all
#' models, MLEs, standard errors, log marginal likelihoods, prior and posterior and probabilities
#' memory requirements grow linearly with M*p where M is the number of models
#' and p is the number of predictors. For example, enumeration with p=21 with 2,097,152 takes just under
#' 2 Gigabytes on a 64 bit machine to store all summaries that would be needed for model averaging.
#' (A future version will likely include an option to not store all summaries if
#' users do not plan on using model averaging or model selection on Best Predictive models.)
#' For larger p, BAS samples without replacement using random or deterministic
#' sampling. The Bayesian Adaptive Sampling algorithm of Clyde, Ghosh, Littman
#' (2010) samples models without replacement using the initial sampling
#' probabilities, and will optionally update the sampling probabilities every
#' "update" models using the estimated marginal inclusion probabilities. BAS
#' uses different methods to obtain the \code{initprobs}, which may impact the
#' results in high-dimensional problems. The deterministic sampler provides a
#' list of the top models in order of an approximation of independence using
#' the provided \code{initprobs}. This may be effective after running the
#' other algorithms to identify high probability models and works well if the
#' correlations of variables are small to modest.
#' We recommend "MCMC" for
#' problems where enumeration is not feasible (memory or time constrained)
#' or even modest p if the number of
#' models sampled is not close to the number of possible models and/or there are significant
#' correlations among the predictors as the bias in estimates of inclusion
#' probabilities from "BAS" or "MCMC+BAS" may be large relative to the reduced
#' variability from using the normalized model probabilities as shown in Clyde and Ghosh, 2012.
#' Diagnostic plots with MCMC can be used to assess convergence.
#' For large problems we recommend thinning with MCMC to reduce memory requirements.
#' The priors on coefficients
#' include Zellner's g-prior, the Hyper-g prior (Liang et al 2008, the
#' Zellner-Siow Cauchy prior, Empirical Bayes (local and global) g-priors. AIC
#' and BIC are also included, while a range of priors on the model space are available.
#'
#' @aliases bas bas.lm
#' @param formula linear model formula for the full model with all predictors,
#' Y ~ X. All code assumes that an intercept will be included in each model
#' and that the X's will be centered.
#' @param data a data frame. Factors will be converted to numerical vectors based on
#' the using `model.matrix`.
#' @param subset an optional vector specifying a subset of observations to be
#' used in the fitting process.
#' @param weights an optional vector of weights to be used in the fitting
#' process. Should be NULL or a numeric vector. If non-NULL, Bayes estimates
#' are obtained assuming that \eqn{Y_i \sim N(x^T_i\beta, \sigma^2/w_i)}.
#' @param contrasts an optional list. See the contrasts.arg of `model.matrix.default()`.
#' @param na.action a function which indicates what should happen when the data
#' contain NAs. The default is "na.omit".
#' @param n.models number of models to sample either without replacement
#' (method="BAS" or "MCMC+BAS") or with replacement (method="MCMC"). If NULL,
#' BAS with method="BAS" will try to enumerate all 2^p models. If enumeration
#' is not possible (memory or time) then a value should be supplied which
#' controls the number of sampled models using 'n.models'. With method="MCMC",
#' sampling will stop once the min(n.models, MCMC.iterations) occurs so
#' MCMC.iterations be significantly larger than n.models in order to explore the model space.
#' On exit for method= "MCMC" this is the number of unique models that have
#' been sampled with counts stored in the output as "freq".
#' @param prior prior distribution for regression coefficients. Choices
#' include
#' \itemize{
#' \item "AIC"
#' \item "BIC"
#' \item "g-prior", Zellner's g prior where `g` is specified using the argument `alpha`
#' \item "JZS" Jeffreys-Zellner-Siow prior which uses the Jeffreys
#' prior on sigma and the Zellner-Siow Cauchy prior on the coefficients.
#' The optional parameter `alpha` can be used to control
#' the squared scale of the prior, where the default is alpha=1. Setting
#' `alpha` is equal to rscale^2 in the BayesFactor package of Morey.
#' This uses QUADMATH for numerical integration of g.
#' \item "ZS-null", a Laplace approximation to the 'JZS' prior
#' for integration of g. alpha = 1 only. We recommend
#' using 'JZS' for accuracy and compatibility
#' with the BayesFactor package, although it is
#' slower.
#' \item "ZS-full" (to be deprecated)
#' \item "hyper-g", a mixture of g-priors where the prior on
#' g/(1+g) is a Beta(1, alpha/2) as in Liang et al (2008). This
#' uses the Cephes library for evaluation of the marginal
#' likelihoods and may be numerically unstable for
#' large n or R2 close to 1. Default choice of alpha is 3.
#' \item "hyper-g-laplace", Same as above but using a Laplace
#' approximation to integrate over the prior on g.
#' \item "hyper-g-n", a mixture of g-priors that where
#' u = g/n and u ~ Beta(1, alpha/2) to provide consistency
#' when the null model is true.
#' \item "EB-local", use the MLE of g from the marginal likelihood
#' within each model
#' \item "EB-global" uses an EM algorithm to find a common or
#' global estimate of g, averaged over all models. When it is not possible to
#' enumerate all models, the EM algorithm uses only the
#' models sampled under EB-local.
#' }
#' @param alpha optional hyperparameter in g-prior or hyper g-prior. For
#' Zellner's g-prior, alpha = g, for the Liang et al hyper-g or hyper-g-n
#' method, recommended choice is alpha are between (2 < alpha < 4), with alpha
#' = 3 the default. For the Zellner-Siow prior alpha = 1 by default, but can be used
#' to modify the rate parameter in the gamma prior on g,
#' \deqn{1/g \sim G(1/2, n*\alpha/2)} so that
#' \deqn{\beta \sim C(0, \sigma^2 \alpha (X'X/n)^{-1})}.
#' @param modelprior A function for a family of prior distribution on the models. Choices
#' include \code{\link{uniform}} \code{\link{Bernoulli}} or
#' \code{\link{beta.binomial}}, \code{\link{tr.beta.binomial}},
#' (with truncation) \code{\link{tr.poisson}} (a truncated Poisson), and
#' \code{\link{tr.power.prior}} (a truncated power family),
#' with the default being a
#' \code{beta.binomial(1,1)}. Truncated versions are useful for p > n.
#' @param initprobs Vector of length p or a character string specifying which
#' method is used to create the vector. This is used to order variables for
#' sampling all methods for potentially more efficient storage while sampling
#' and provides the initial inclusion probabilities used for sampling without
#' replacement with method="BAS". Options for the character string giving the
#' method are: "Uniform" or "uniform" where each predictor variable is equally
#' likely to be sampled (equivalent to random sampling without replacement);
#' "eplogp" uses the \code{\link{eplogprob}} function to approximate the Bayes
#' factor from p-values from the full model to find initial marginal inclusion
#' probabilities; "marg-eplogp" uses\code{\link{eplogprob.marg}} function to
#' approximate the Bayes factor from p-values from the full model each simple
#' linear regression. To run a Markov Chain to provide initial estimates of
#' marginal inclusion probabilities for "BAS", use method="MCMC+BAS" below.
#' While the initprobs are not used in sampling for method="MCMC", this
#' determines the order of the variables in the lookup table and affects memory
#' allocation in large problems where enumeration is not feasible. For
#' variables that should always be included set the corresponding initprobs to
#' 1, to override the `modelprior` or use `include.always` to force these variables
#' to always be included in the model.
#' @param include.always A formula with terms that should always be included
#' in the model with probability one. By default this is `~ 1` meaning that the
#' intercept is always included. This will also override any of the values in `initprobs`
#' above by setting them to 1.
#' @param method A character variable indicating which sampling method to use:
#' \itemize{
#' \item "deterministic" uses the "top k" algorithm described in Ghosh and Clyde (2011)
#' to sample models in order of approximate probability under conditional independence
#' using the "initprobs". This is the most efficient algorithm for enumeration.
#' \item "BAS" uses Bayesian Adaptive Sampling (without replacement) using the
#' sampling probabilities given in initprobs under a model of conditional independence.
#' These can be updated based on estimates of the marginal inclusion probabilities.
#' \item "MCMC" samples with
#' replacement via a MCMC algorithm that combines the birth/death random walk
#' in Hoeting et al (1997) of MC3 with a random swap move to interchange a
#' variable in the model with one currently excluded as described in Clyde,
#' Ghosh and Littman (2010).
#' \item "MCMC+BAS" runs an initial MCMC to
#' calculate marginal inclusion probabilities and then samples without
#' replacement as in BAS. For BAS, the sampling probabilities can be updated
#' as more models are sampled. (see update below).
#' }
#' @param update number of iterations between potential updates of the sampling
#' probabilities for method "BAS" or "MCMC+BAS". If NULL do not update, otherwise the
#' algorithm will update using the marginal inclusion probabilities as they
#' change while sampling takes place. For large model spaces, updating is
#' recommended. If the model space will be enumerated, leave at the default.
#' @param bestmodel optional binary vector representing a model to initialize
#' the sampling. If NULL sampling starts with the null model
#' @param prob.local A future option to allow sampling of models "near" the
#' median probability model. Not used at this time.
#' @param prob.rw For any of the MCMC methods, probability of using the
#' random-walk Metropolis proposal; otherwise use a random "flip" move
#' to propose swap a variable that is excluded with a variable in the model.
#' @param MCMC.iterations Number of iterations for the MCMC sampler; the
#' default is n.models*10 if not set by the user.
#' @param lambda Parameter in the AMCMC algorithm (deprecated).
#' @param delta truncation parameter to prevent sampling probabilities to
#' degenerate to 0 or 1 prior to enumeration for sampling without replacement.
#' @param thin For "MCMC" or "MCMC+BAS", thin the MCMC chain every "thin" iterations; default is no
#' thinning. For large p, thinning can be used to significantly reduce memory
#' requirements as models and associated summaries are saved only every thin iterations. For thin = p, the model and associated output are recorded every p iterations,
#' similar to the Gibbs sampler in SSVS.
#' @param renormalize For MCMC sampling, should posterior probabilities be
#' based on renormalizing the marginal likelihoods times prior probabilities
#' (TRUE) or frequencies from MCMC. The latter are unbiased in long runs,
#' while the former may have less variability. May be compared via the
#' diagnostic plot function \code{\link{diagnostics}}.
#' See details in Clyde and Ghosh (2012).
#' @param force.heredity Logical variable to force all levels of a factor to be
#' included together and to include higher order interactions only if lower
#' order terms are included. Currently supported with `method='MCMC'`
#' and experimentally with `method='BAS'` on non-Solaris platforms.
#' Default is FALSE.
#' @param pivot Logical variable to allow pivoting of columns when obtaining the
#' OLS estimates of a model so that models that are not full rank can be fit.
#' Defaults to TRUE.
#' Currently coefficients that are not estimable are set to zero. Use caution with
#' interpreting BMA estimates of parameters.
#' @param tol 1e-7 as
#' @param bigmem Logical variable to indicate that there is access to
#' large amounts of memory (physical or virtual) for enumeration
#' with large model spaces, e.g. > 2^25. default; used in determining rank of X^TX in cholesky
#' decomposition
#' with pivoting.
#'
#' @return \code{bas} returns an object of class \code{bas}
#'
#' An object of class \code{BAS} is a list containing at least the following
#' components:
#'
#' \item{postprob}{the posterior probabilities of the models selected}
#' \item{priorprobs}{the prior probabilities of the models selected}
#' \item{namesx}{the names of the variables}
#' \item{R2}{R2 values for the
#' models}
#' \item{logmarg}{values of the log of the marginal likelihood for the
#' models. This is equivalent to the log Bayes Factor for comparing
#' each model to a base model with intercept only.}
#' \item{n.vars}{total number of independent variables in the full
#' model, including the intercept}
#' \item{size}{the number of independent
#' variables in each of the models, includes the intercept}
#' \item{rank}{the rank of the design matrix; if `pivot = FALSE`, this is the same as size
#' as no checking of rank is conducted.}
#' \item{which}{a list
#' of lists with one list per model with variables that are included in the
#' model}
#' \item{probne0}{the posterior probability that each variable is
#' non-zero computed using the renormalized marginal likelihoods of sampled
#' models. This may be biased if the number of sampled models is much smaller
#' than the total number of models. Unbiased estimates may be obtained using
#' method "MCMC".}
#' \item{mle}{list of lists with one list per model giving the
#' MLE (OLS) estimate of each (nonzero) coefficient for each model. NOTE: The
#' intercept is the mean of Y as each column of X has been centered by
#' subtracting its mean.}
#' \item{mle.se}{list of lists with one list per model
#' giving the MLE (OLS) standard error of each coefficient for each model}
#' \item{prior}{the name of the prior that created the BMA object}
#' \item{alpha}{value of hyperparameter in coefficient prior used to create the BMA
#' object. }
#' \item{modelprior}{the prior distribution on models that created the
#' BMA object}
#' \item{Y}{response}
#' \item{X}{matrix of predictors}
#' \item{mean.x}{vector of means for each column of X (used in
#' \code{\link{predict.bas}})}
#' \item{include.always}{indices of variables that are forced into the model}
#'
#' The function \code{\link{summary.bas}}, is used to print a summary of the
#' results. The function \code{\link{plot.bas}} is used to plot posterior
#' distributions for the coefficients and \code{\link{image.bas}} provides an
#' image of the distribution over models. Posterior summaries of coefficients
#' can be extracted using \code{\link{coefficients.bas}}. Fitted values and
#' predictions can be obtained using the S3 functions \code{\link{fitted.bas}}
#' and \code{\link{predict.bas}}. BAS objects may be updated to use a
#' different prior (without rerunning the sampler) using the function
#' \code{\link{update.bas}}. For MCMC sampling \code{\link{diagnostics}} can be used
#' to assess whether the MCMC has run long enough so that the posterior probabilities
#' are stable. For more details see the associated demos and vignette.
#' @author Merlise Clyde (\email{clyde@@duke.edu}) and Michael Littman
#' @seealso \code{\link{summary.bas}}, \code{\link{coefficients.bas}},
#' \code{\link{print.bas}}, \code{\link{predict.bas}}, \code{\link{fitted.bas}}
#' \code{\link{plot.bas}}, \code{\link{image.bas}}, \code{\link{eplogprob}},
#' \code{\link{update.bas}}
#' @references Clyde, M. Ghosh, J. and Littman, M. (2010) Bayesian Adaptive
#' Sampling for Variable Selection and Model Averaging. Journal of
#' Computational Graphics and Statistics. 20:80-101 \cr
#' \doi{10.1198/jcgs.2010.09049}
#'
#' Clyde, M. and Ghosh. J. (2012) Finite population estimators in stochastic search variable selection.
#' Biometrika, 99 (4), 981-988. \doi{10.1093/biomet/ass040}
#'
#' Clyde, M. and George, E. I. (2004) Model Uncertainty. Statist. Sci., 19,
#' 81-94. \cr \doi{10.1214/088342304000000035}
#'
#' Clyde, M. (1999) Bayesian Model Averaging and Model Search Strategies (with
#' discussion). In Bayesian Statistics 6. J.M. Bernardo, A.P. Dawid, J.O.
#' Berger, and A.F.M. Smith eds. Oxford University Press, pages 157-185.
#'
#' Hoeting, J. A., Madigan, D., Raftery, A. E. and Volinsky, C. T. (1999)
#' Bayesian model averaging: a tutorial (with discussion). Statist. Sci., 14,
#' 382-401. \cr
#' \doi{10.1214/ss/1009212519}
#'
#' Liang, F., Paulo, R., Molina, G., Clyde, M. and Berger, J.O. (2008) Mixtures
#' of g-priors for Bayesian Variable Selection. Journal of the American
#' Statistical Association. 103:410-423. \cr
#' \doi{10.1198/016214507000001337}
#'
#' Zellner, A. (1986) On assessing prior distributions and Bayesian regression
#' analysis with g-prior distributions. In Bayesian Inference and Decision
#' Techniques: Essays in Honor of Bruno de Finetti, pp. 233-243.
#' North-Holland/Elsevier.
#'
#' Zellner, A. and Siow, A. (1980) Posterior odds ratios for selected
#' regression hypotheses. In Bayesian Statistics: Proceedings of the First
#' International Meeting held in Valencia (Spain), pp. 585-603.
#'
#' Rouder, J. N., Speckman, P. L., Sun, D., Morey, R. D., and Iverson, G.
#' (2009). Bayesian t-tests for accepting and rejecting the null hypothesis.
#' Psychonomic Bulletin & Review, 16, 225-237
#'
#' Rouder, J. N., Morey, R. D., Speckman, P. L., Province, J. M., (2012)
#' Default Bayes Factors for ANOVA Designs. Journal of Mathematical Psychology.
#' 56. p. 356-374.
#' @keywords regression
#' @family bas methods
#' @examples
#'
#' library(MASS)
#' data(UScrime)
#'
#' # pivot=FALSE is faster, but should only be used in full rank case
#' # default is pivot = TRUE
#' crime.bic <- bas.lm(log(y) ~ log(M) + So + log(Ed) +
#' log(Po1) + log(Po2) +
#' log(LF) + log(M.F) + log(Pop) + log(NW) +
#' log(U1) + log(U2) + log(GDP) + log(Ineq) +
#' log(Prob) + log(Time),
#' data = UScrime, n.models = 2^15, prior = "BIC",
#' modelprior = beta.binomial(1, 1),
#' initprobs = "eplogp", pivot = FALSE
#' )
#'
#'
#' # use MCMC rather than enumeration
#' crime.mcmc <- bas.lm(log(y) ~ log(M) + So + log(Ed) +
#' log(Po1) + log(Po2) +
#' log(LF) + log(M.F) + log(Pop) + log(NW) +
#' log(U1) + log(U2) + log(GDP) + log(Ineq) +
#' log(Prob) + log(Time),
#' data = UScrime,
#' method = "MCMC",
#' MCMC.iterations = 20000, prior = "BIC",
#' modelprior = beta.binomial(1, 1),
#' initprobs = "eplogp", pivot = FALSE
#' )
#'
#' summary(crime.bic)
#' plot(crime.bic)
#' image(crime.bic, subset = -1)
#'
#' # example with two-way interactions and hierarchical constraints
#' data(ToothGrowth)
#' ToothGrowth$dose <- factor(ToothGrowth$dose)
#' levels(ToothGrowth$dose) <- c("Low", "Medium", "High")
#' TG.bas <- bas.lm(len ~ supp * dose,
#' data = ToothGrowth,
#' modelprior = uniform(), method = "BAS",
#' force.heredity = TRUE
#' )
#' summary(TG.bas)
#' image(TG.bas)
#'
#'
#' # don't run the following due to time limits on CRAN
#'
#' \dontrun{
#'
#' # exmple with non-full rank case
#'
#' loc <- system.file("testdata", package = "BAS")
#' d <- read.csv(paste(loc, "JASP-testdata.csv", sep = "/"))
#' fullModelFormula <- as.formula("contNormal ~ contGamma * contExpon +
#' contGamma * contcor1 + contExpon * contcor1")
#'
#' # should trigger a warning (default is to use pivoting, so use pivot=FALSE
#' # only for full rank case)
#'
#' out = bas.lm(fullModelFormula,
#' data = d,
#' alpha = 0.125316,
#' prior = "JZS",
#' weights = facFifty, force.heredity = FALSE, pivot = FALSE)
#'
#'
#' # use pivot = TRUE to fit non-full rank case (default)
#' # This is slower but safer
#'
#' out = bas.lm(fullModelFormula,
#' data = d,
#' alpha = 0.125316,
#' prior = "JZS",
#' weights = facFifty, force.heredity = FALSE, pivot = TRUE)
#' }
#' # more complete demo's
#' demo(BAS.hald)
#' \dontrun{
#' demo(BAS.USCrime)
#' }
#'
#' @rdname bas.lm
#' @keywords regression
#' @family BAS methods
#' @concept BMA
#' @concept variable selection
#' @export
bas.lm <- function(formula,
data,
subset,
weights,
contrasts=NULL,
na.action = "na.omit",
n.models = NULL,
prior = "ZS-null",
alpha = NULL,
modelprior = beta.binomial(1, 1),
initprobs = "Uniform",
include.always = ~1,
method = "BAS",
update = NULL,
bestmodel = NULL,
prob.local = 0.0,
prob.rw = 0.5,
MCMC.iterations = NULL,
lambda = NULL,
delta = 0.025,
thin = 1,
renormalize = FALSE,
force.heredity = FALSE,
pivot = TRUE,
tol = 1e-7,
bigmem = FALSE) {
num.updates <- 10
call <- match.call()
priormethods <- c(
"g-prior",
"hyper-g",
"hyper-g-laplace",
"hyper-g-n",
"AIC",
"BIC",
"ZS-null",
"ZS-full",
"EB-local",
"EB-global",
"JZS"
)
if (is.null(prior) | !(prior %in% priormethods)) {
stop(paste(
"prior ",
prior,
"is not one of ",
paste(priormethods, collapse = ", ")
))
}
if (!(method %in% c("BAS", "deterministic", "MCMC", "MCMC+BAS"))) {
stop(paste("No available sampling method:", method))
}
# from lm
mfall <- match.call(expand.dots = FALSE)
m <- match(
c(
"formula", "data", "subset", "weights", "na.action",
"offset"
),
names(mfall),
0L
)
mf <- mfall[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- quote(stats::model.frame)
mf <- eval(mf, parent.frame())
# data = model.frame(formula, data, na.action=na.action, weights=weights)
n.NA <- length(attr(mf, "na.action"))
if (n.NA > 0) {
warning(paste(
"dropping ", as.character(n.NA),
"rows due to missing data"
))
}
Y <- model.response(mf, "numeric")
mt <- attr(mf, "terms")
X <- model.matrix(mt, mf, contrasts)
# X = model.matrix(formula, mf)
Xorg <- X
namesx <- dimnames(X)[[2]]
namesx[1] <- "Intercept"
n <- dim(X)[1]
if (n == 0) {stop("Sample size is zero; check data and subset arguments")}
weights <- as.vector(model.weights(mf))
if (is.null(weights)) {
weights <- rep(1, n)
}
if (length(weights) != n) {
stop(simpleError(paste(
"weights are of length ", length(weights), "not of length ", n
)))
}
mean.x <- apply(X[, -1, drop = F], 2, weighted.mean, w = weights)
ones <- X[, 1]
X <- cbind(ones, sweep(X[, -1, drop = FALSE], 2, mean.x))
p <- dim(X)[2] # with intercept
if (!inherits(modelprior, "prior") ) stop("modelprior should be an object of class prior, uniform(), beta.binomial(), etc")
if (n <= p) {
if (modelprior$family == "Uniform" ||
modelprior$family == "Bernoulli") {
warning(
"Uniform prior (Bernoulli) distribution on the Model Space are not recommended for p > n; please consider using tr.beta.binomial or power.prior instead"
)
}
}
if (!is.numeric(initprobs)) {
if (n <= p && initprobs == "eplogp") {
stop(
"Full model is not full rank so cannot use the eplogp bound to create starting sampling probabilities, perhpas use 'marg-eplogp' for fiting marginal models\n"
)
}
initprobs <- switch(
initprobs,
"eplogp" = eplogprob(lm(Y ~ X - 1)),
"marg-eplogp" = eplogprob.marg(Y, X),
"uniform" = c(1.0, rep(.5, p - 1)),
"Uniform" = c(1.0, rep(.5, p - 1))
)
}
if (length(initprobs) == (p - 1)) {
initprobs <- c(1.0, initprobs)
}
keep <- 1
# set up variables to always include
if ("include.always" %in% names(mfall)) {
minc <-
match(c("include.always", "data", "subset"), names(mfall), 0L)
mfinc <- mfall[c(1L, minc)]
mfinc$drop.unused.levels <- TRUE
names(mfinc)[2] <- "formula"
mfinc[[1L]] <- quote(stats::model.frame)
mfinc <- eval(mfinc, parent.frame())
mtinc <- attr(mfinc, "terms")
X.always <- model.matrix(mtinc, mfinc, contrasts)
keep <- c(1L, match(colnames(X.always)[-1], colnames(X)))
initprobs[keep] <- 1.0
if (ncol(X.always) == ncol(X)) {
# just one model with all variables forced in
# use method='BAS" as deterministic and MCMC fail in this context
method <- "BAS"
}
}
if (is.null(n.models)) {
n.models <- min(2^p, 2^19)
}
if (is.null(MCMC.iterations)) {
MCMC.iterations <- as.integer(n.models * 10)
}
Burnin.iterations <- as.integer(MCMC.iterations)
if (is.null(lambda)) {
lambda <- 1.0
}
int <- TRUE # assume that an intercept is always included
method.num <- switch(
prior,
"g-prior" = 0,
"hyper-g" = 1,
"EB-local" = 2,
"BIC" = 3,
"ZS-null" = 4,
"ZS-full" = 5,
"hyper-g-laplace" = 6,
"AIC" = 7,
"EB-global" = 2,
"hyper-g-n" = 8,
"JZS" = 9
)
if (is.null(alpha)) {
alpha <- switch(
prior,
"g-prior" = n,
"hyper-g" = 3,
"EB-local" = 2,
"BIC" = n,
"ZS-null" = 1,
"ZS-full" = n,
"hyper-g-laplace" = 3,
"AIC" = 0,
"EB-global" = 2,
"hyper-g-n" = 3,
"JZS" = 1,
NULL
)
}
# start nocov
# shouldn't be able to get here
if (is.null(alpha)) {
stop("Error in BAS code, please report on GitHub")
}
# end nocov
parents <- matrix(1, 1, 1)
if (method == "MCMC+BAS" |
method == "deterministic" ) {
force.heredity <- FALSE
} # does not work with updating the tree
if (force.heredity) {
parents <- make.parents.of.interactions(mf, data)
# check to see if really necessary
if (sum(parents) == nrow(parents)) {
parents <- matrix(1, 1, 1)
force.heredity <- FALSE
}
}
prob <- normalize.initprobs.lm(initprobs, p)
if (is.null(bestmodel)) {
bestmodel = as.integer(prob)
}
bestmodel[keep] <- 1
if (force.heredity) {
update <- NULL # do not update tree FIXME LATER
if (prob.heredity(bestmodel, parents) == 0) {
warning("bestmodel violates heredity conditions; resetting to null model")
bestmodel <- c(1, rep(0, p - 1))
}
# initprobs=c(1, seq(.95, .55, length=(p-1) ))
}
bestmodel = as.integer(bestmodel)
n.models <- normalize.n.models(n.models, p, prob,
method, bigmem)
# print(n.models)
modelprior <- normalize.modelprior(modelprior, p)
if (is.null(update)) {
if (force.heredity) { # do not update tree for BAS
update <- n.models + 1}
else {
if (n.models == 2^(p - 1)) {
update <- n.models + 1
} else {
(update <- n.models / num.updates)
}}
}
modelindex <- as.list(1:n.models)
Yvec <- as.numeric(Y)
modeldim <- as.integer(rep(0, n.models))
n.models <- as.integer(n.models)
# sampleprobs = as.double(rep(0.0, n.models))
result <- switch(
method,
"BAS" = .Call(
C_sampleworep_new,
Yvec,
X,
sqrt(weights),
prob,
modeldim,
incint = as.integer(int),
alpha = as.numeric(alpha),
method = as.integer(method.num),
modelprior = modelprior,
update = as.integer(update),
Rbestmodel = as.integer(bestmodel),
plocal = as.numeric(prob.local),
Rparents = parents,
Rpivot = pivot,
Rtol = tol,
PACKAGE = "BAS"
),
"MCMC+BAS" = .Call(
C_mcmcbas,
Yvec,
X,
sqrt(weights),
prob,
modeldim,
incint = as.integer(int),
alpha = as.numeric(alpha),
method = as.integer(method.num),
modelprior = modelprior,
update = as.integer(update),
Rbestmodel = as.integer(bestmodel),
plocal = as.numeric(1.0 - prob.rw),
as.integer(Burnin.iterations),
as.integer(MCMC.iterations),
as.numeric(lambda),
as.numeric(delta),
Rthin = as.integer(thin),
Rparents = parents,
Rpivot = pivot,
Rtol = tol
),
"MCMC" = .Call(
C_mcmc_new,
Yvec,
X,
sqrt(weights),
prob,
modeldim,
incint = as.integer(int),
alpha = as.numeric(alpha),
method = as.integer(method.num),
modelprior = modelprior,
update = as.integer(update),
Rbestmodel = as.integer(bestmodel),
plocal = as.numeric(1.0 - prob.rw),
as.integer(Burnin.iterations),
as.integer(MCMC.iterations),
as.numeric(lambda),
as.numeric(delta),
Rthin = as.integer(thin),
Rparents = parents,
Rpivot = pivot,
Rtol = tol
),
"deterministic" = .Call(
C_deterministic,
Yvec,
X,
sqrt(weights),
prob,
modeldim,
incint = as.integer(int),
alpha = as.numeric(alpha),
method = as.integer(method.num),
modelprior = modelprior,
Rpivot = pivot,
Rtol = tol
)
)
result$rank_deficient <- FALSE
if (any(is.na(result$logmarg))) {
warning(
"log marginals and posterior probabilities contain NA's. Consider re-running with the option `pivot=TRUE` if there are models that are not full rank"
)
result$rank_deficient <- TRUE
}
if (any(result$rank != result$size)) {
result$rank_deficient <- TRUE
}
result$n.models <- length(result$postprobs)
result$namesx <- namesx
result$n <- length(Yvec)
result$prior <- prior
result$modelprior <- modelprior
result$alpha <- alpha
result$probne0.RN <- result$probne0
result$postprobs.RN <- result$postprobs
result$include.always <- keep
# github issue #74. drop models with zero prior probability
if (any(result$priorprobs == 0)) {
drop.models = result$priorprobs != 0
result$mle = result$mle[drop.models]
result$mle.se = result$mle.se[drop.models]
result$mse = result$mse[drop.models]
result$which = result$which[drop.models]
result$freq = result$freq[drop.models]
result$shrinkage = result$shrinkage[drop.models]
result$R2 = result$R2[drop.models]
result$logmarg = result$logmarg[drop.models]
result$size = result$size[drop.models]
result$rank = result$rank[drop.models]
result$sampleprobs = result$sampleprobs[drop.models]
result$postprobs = result$postprobs[subset = drop.models]
result$priorprobs = result$priorprobs[subset = drop.models]
result$n.models = length(result$postprobs)
}
if (method == "MCMC" || method == "MCMC_new") {
result$n.models <- result$n.Unique
result$postprobs.MCMC <- result$freq / sum(result$freq)
if (!renormalize) {
result$probne0 <- result$probne0.MCMC
result$postprobs <- result$postprobs.MCMC
}
}
df <- rep(n - 1, result$n.models)
if (prior == "AIC" |
prior == "BIC" | prior == "IC") {
df <- df - result$rank + 1
}
result$df <- df
result$n.vars <- p
result$Y <- Yvec
result$X <- Xorg
result$mean.x <- mean.x
result$call <- call
result$contrasts <- attr(X, "contrasts")
result$xlevels <- .getXlevels(mt, mf)
result$terms <- mt
result$model <- mf
class(result) <- c("bas")
if (prior == "EB-global") {
result <- EB.global(result)
}
return(result)
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/bas_lm.R |
#' Empirical Bayes Prior Distribution for Coefficients in BMA Model
#'
#' Creates an object representing the EB prior for BAS GLM.
#'
#' Creates a structure used for \code{\link{bas.glm}}.
#'
#' @aliases EB EB.local
#' @return returns an object of class "prior", with the family and
#' hyerparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{CCH}} and \code{\link{bas.glm}}
#' @examples
#' EB.local()
#' @rdname EB.local
#' @family beta priors
#' @export
EB.local <- function() {
structure(list(family = "EB-local", class = "EB", hyper.parameters = list(local = TRUE)),
class = "prior"
)
}
#' Generalized g-Prior Distribution for Coefficients in BMA Models
#'
#' Creates an object representing the CCH mixture of g-priors on coefficients
#' for BAS .
#'
#' Creates a structure used for \code{\link{bas.glm}}.
#'
#' @param alpha a scalar > 0, recommended alpha=.5 (betaprime) or 1 for CCH.
#' The hyper.g(alpha) is equivalent to CCH(alpha -2, 2, 0). Liang et al
#' recommended values in the range 2 < alpha_h <= 4
#' @param beta a scalar > 0. The value is not updated by the data; beta should
#' be a function of n for consistency under the null model. The hyper-g
#' corresponds to b = 2
#' @param s a scalar, recommended s=0
#' @return returns an object of class "prior", with the family and
#' hyperparameters.
#' @author Merlise A Clyde
#' @seealso \code{\link{IC.prior}}, \code{\link{bic.prior}},
#' \code{\link{bas.glm}}
#' @examples
#' CCH(alpha = .5, beta = 100, s = 0)
#' @rdname CCH
#' @family beta priors
#' @export
#'
#'
CCH <- function(alpha, beta, s = 0) {
# if (beta == 2 & alpha == 2 & s == 0) {
# structure(list(family="Truncated-Gamma", class="TCCH", hyper.parameters=NULL),
# class="prior")}
# else {
structure(list(
family = "CCH", class = "TCCH",
hyper.parameters = list(alpha = alpha, beta = beta, s = s)
),
class = "prior"
)
# }
}
#' Generalized tCCH g-Prior Distribution for Coefficients in BMA Models
#'
#' Creates an object representing the tCCH mixture of g-priors on coefficients
#' for BAS.
#'
#' Creates a structure used for \code{\link{bas.glm}}.
#'
#' @param alpha a scalar > 0, recommended alpha=.5 (betaprime) or 1.
#' @param beta a scalar > 0. The value is not updated by the data; beta should
#' be a function of n for consistency under the null model.
#' @param s a scalar, recommended s=0 a priori
#' @param r r arbitrary; in the hyper-g-n prior sets r = (alpha + 2)
#' @param v 0 < v
#' @param theta theta > 1
#' @return returns an object of class "prior", with the family and
#' hyerparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{CCH}}, \code{\link{robust}}, \code{\link{hyper.g}},
#' \code{\link{hyper.g.n}}\code{\link{bas.glm}}
#' @examples
#' n <- 500
#' tCCH(alpha = 1, beta = 2, s = 0, r = 1.5, v = 1, theta = 1 / n)
#' @rdname tCCH
#' @family beta priors
#' @export
tCCH <- function(alpha = 1, beta = 2, s = 0, r = 3 / 2, v = 1, theta = 1) {
# if (beta == 2 & alpha == 2 & s == 0) {
# structure(list(family="Truncated-Gamma", class="TCCH", hyper.parameters=NULL),
# class="prior")}
# else {
structure(list(
family = "tCCH", class = "TCCH",
hyper.parameters = list(
alpha = alpha, beta = beta, s = s,
r = r, v = v, theta = theta
)
),
class = "prior"
)
# }
}
#' Intrinsic Prior Distribution for Coefficients in BMA Models
#'
#' Creates an object representing the intrinsic prior on g, a special case of
#' the tCCH mixture of g-priors on coefficients for BAS.
#'
#' Creates a structure used for \code{\link{bas.glm}}.
#'
#' @param n the sample size; if NULL, the value derived from the data in the
#' call to `bas.glm` will be used.
#' @return returns an object of class "prior", with the family "intrinsic" of
#' class "TCCH" and hyperparameters alpha = 1, beta = 1, s = 0, r = 1, n = n
#' for the tCCH prior where theta in the tCCH prior is determined by the model
#' size and sample size.
#' @author Merlise A Clyde
#' @seealso \code{\link{tCCH}}, \code{\link{robust}}, \code{\link{hyper.g}},
#' \code{\link{hyper.g.n}}\code{\link{bas.glm}}
#' @references Womack, A., Novelo,L.L., Casella, G. (2014). "Inference From
#' Intrinsic Bayes' Procedures Under Model Selection and Uncertainty". Journal
#' of the American Statistical Association. 109:1040-1053.
#' \doi{10.1080/01621459.2014.880348}
#'
#' @examples
#' n <- 500
#' tCCH(alpha = 1, beta = 2, s = 0, r = 1.5, v = 1, theta = 1 / n)
#' @rdname intrinsic
#' @family beta priors
#' @export
intrinsic <- function(n = NULL) {
# if (beta == 2 & alpha == 2 & s == 0) {
# structure(list(family="Truncated-Gamma", class="TCCH", hyper.parameters=NULL),
# class="prior")}
# else {
structure(list(
family = "intrinsic", class = "TCCH",
hyper.parameters = list(alpha = 1.0, beta = 1.0, s = 0.0, r = 1.0, n = as.numeric(n))
),
class = "prior"
)
# }
}
#' Generalized hyper-g/n Prior Distribution for g for mixtures of g-priors on
#' Coefficients in BMA Models
#'
#' Creates an object representing the hyper-g/n mixture of g-priors on
#' coefficients for BAS. This is a special case of the tCCH prior
#'
#' Creates a structure used for \code{\link{bas.glm}}. This is a special case
#' of the \code{\link{tCCH}}, where \code{hyper.g.n(alpha=3, n)} is equivalent
#' to \code{ tCCH(alpha=1, beta=2, s=0, r=1.5, v = 1, theta=1/n) }
#'
#' @param alpha a scalar > 0, recommended 2 < alpha <= 3
#' @param n The sample size; if NULL, the value derived from the data in the
#' call to `bas.glm` will be used.
#' @return returns an object of class "prior", with the family and
#' hyerparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{tCCH}}, \code{\link{robust}}, \code{\link{hyper.g}},
#' \code{\link{CCH}}\code{\link{bas.glm}}
#' @examples
#' n <- 500
#' hyper.g.n(alpha = 3, n = n)
#' @rdname hyper.g.n
#' @family beta priors
#' @export
hyper.g.n <- function(alpha = 3, n = NULL) {
# if (beta == 2 & alpha == 2 & s == 0) {
# structure(list(family="Truncated-Gamma", class="TCCH", hyper.parameters=NULL),
# class="prior")}
# else {
structure(list(
family = "hyper-g/n", class = "TCCH",
hyper.parameters = list(
alpha = alpha - 2, beta = 2, s = 0,
r = alpha / 2, v = 1, theta = 1 / n
), n = n
),
class = "prior"
)
# }
}
#' Jeffreys Prior Distribution for $g$ for Mixtures of g-Priors for
#' Coefficients in BMA Models
#'
#' Creates an object representing the Jeffrey's Prior on g mixture of g-priors
#' on coefficients for BAS. This is equivalent to a limiting version of the
#' CCH(a, 2, 0) with a = 0 or they hyper-g(a = 2) and is an improper prior. As
#' $g$ does not appear in the Null Model, Bayes Factors and model probabilities
#' are not well-defined because of arbitrary normalizing constants, and for
#' this reason the null model is excluded and the same constants are used
#' across other models.
#'
#' Creates a structure used for \code{\link{bas.glm}}.
#'
#' @return returns an object of class "prior", with the family and
#' hyerparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{CCH}} \code{\link{bas.glm}}
#' @examples
#' Jeffreys()
#' @rdname Jeffreys
#' @family beta priors
#' @export
Jeffreys <- function() {
structure(list(
family = "Jeffreys", class = "TCCH",
hyper.parameters = list(alpha = 0, beta = 2, s = 0.0)
),
class = "prior"
)
}
#' Hyper-g-Prior Distribution for Coefficients in BMA Models
#'
#' Creates an object representing the hyper-g mixture of g-priors on
#' coefficients for BAS.
#'
#' Creates a structure used for \code{\link{bas.glm}}.
#'
#' @param alpha a scalar > 0. The hyper.g(alpha) is equivalent to CCH(alpha -2,
#' 2, 0). Liang et al recommended values in the range 2 < alpha_h <= 3
#' @return returns an object of class "prior", with the family and
#' hyerparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{CCH}} \code{\link{bas.glm}}
#' @examples
#' hyper.g(alpha = 3)
#' @rdname hyper.g
#' @family beta priors
#' @export
hyper.g <- function(alpha = 3.0) {
if (alpha <= 2) {
stop("alpha must be greater than 2 in hyper.g prior")
}
else {
structure(list(
family = "CCH", class = "TCCH",
hyper.parameters = list(alpha = alpha - 2.0, beta = 2, s = 0.0)
),
class = "prior"
)
}
}
#' Generalized g-Prior Distribution for Coefficients in BMA Models
#'
#' Creates an object representing the Truncated Gamma (tCCH) mixture of
#' g-priors on coefficients for BAS, where u = 1/(1+g) has a Gamma distribution
#' supported on (0, 1].
#'
#' Creates a structure used for \code{\link{bas.glm}}.
#'
#' @param alpha a scalar > 0, recommended alpha=.5 (betaprime) or 1. alpha=2
#' corresponds to the uniform prior on the shrinkage factor.
#' @return returns an object of class "prior", with the family and
#' hyerparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{CCH}} \code{\link{bas.glm}}
#' @examples
#'
#' TG(alpha = 2)
#' CCH(alpha = 2, beta = 100, s = 0)
#' @family beta priors
#' @export
TG <- function(alpha = 2) {
structure(list(
family = "TG", class = "TCCH",
hyper.parameters = list(alpha = alpha, beta = 2.0, s = 0.0)
),
class = "prior"
)
}
#' Beta-Prime Prior Distribution for Coefficients in BMA Model
#'
#' Creates an object representing the Beta-Prime prior that is mixture of
#' g-priors on coefficients for BAS.
#'
#' Creates a structure used for \code{\link{bas.glm}}.
#'
#' @param n the sample size; if NULL, the value derived from the data in the
#' call to `bas.glm` will be used.
#' @return returns an object of class "prior", with the family and
#' hyerparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{CCH}}
#' @examples
#' beta.prime(n = 100)
#' @rdname beta.prime
#' @family beta priors
#' @export
beta.prime <- function(n = NULL) {
if (is.integer(n)) n = as.numeric(n)
structure(list(
family = "betaprime", class = "TCCH",
hyper.parameters = list(n = n, alpha = .5)
),
class = "prior"
)
}
#' Robust-Prior Distribution for Coefficients in BMA Model
#'
#' Creates an object representing the robust prior of Bayarri et al (2012) that
#' is mixture of g-priors on coefficients for BAS.
#'
#' Creates a prior structure used for \code{\link{bas.glm}}.
#'
#' @param n the sample size.
#' @return returns an object of class "prior", with the family and
#' hyerparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{CCH}} and\code{\link{bas.glm}}
#' @examples
#' robust(100)
#' @rdname robust
#' @family beta priors
#' @export
robust <- function(n = NULL) {
structure(list(
family = "robust", class = "TCCH",
hyper.parameters = list(n = n)
),
class = "prior"
)
}
#' @family prior functions
#' @export
bic.prior <- function(n = NULL) {
if (is.null(n)) {
penalty <- NULL
} else {
penalty <- log(n)
}
structure(list(
family = "BIC", class = "IC",
hyper.parameters = list(penalty = penalty, n = as.numeric(n)),
hyper = penalty
),
class = "prior"
)
}
#' @family beta priors
#' @export
aic.prior <- function() {
structure(list(
family = "AIC", class = "IC", hyper.parameters = list(penalty = 2),
hyper = 2
),
class = "prior"
)
}
#' Information Criterion Families of Prior Distribution for Coefficients in BMA
#' Models
#'
#' Creates an object representing the prior distribution on coefficients for
#' BAS.
#'
#' The log marginal likelihood is approximated as -2*(deviance +
#' penalty*dimension). Allows alternatives to AIC (penalty = 2) and BIC
#' (penalty = log(n)). For BIC, the argument may be missing, in which case the
#' sample size is determined from the call to `bas.glm` and used to determine
#' the penalty.
#'
#' @aliases IC.prior aic.prior AIC.prior bic.prior BIC.prior
#' @param penalty a scalar used in the penalized loglikelihood of the form
#' penalty*dimension
#' @return returns an object of class "prior", with the family and
#' hyerparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{g.prior}}
#' @examples
#' IC.prior(2)
#' aic.prior()
#' bic.prior(100)
#' @family beta priors
#' @export
IC.prior <- function(penalty) {
if (as.integer(penalty)) penalty = as.numeric(penalty)
structure(list(
family = "IC", class = "IC", hyper = penalty,
hyper.parameters = list(penalty = penalty)
),
class = "prior"
)
}
#' Families of G-Prior Distribution for Coefficients in BMA Models
#'
#' Creates an object representing the g-prior distribution on coefficients for
#' BAS.
#'
#' Creates a structure used for BAS.
#'
#' @param g a scalar used in the covariance of Zellner's g-prior, Cov(beta) =
#' sigma^2 g (X'X)^-1
#'
#' @return returns an object of class "prior", with the family and
#' hyerparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{IC.prior}}
#' @examples
#' g.prior(100)
#' @family beta priors
#' @export
g.prior <- function(g) {
if (is.integer(g)) g = as.numeric(g)
structure(list(
family = "g.prior", g = g, class = "g-prior",
hyper = as.numeric(g),
hyper.parameters = list(g = g)
),
class = "prior"
)
}
#' Test based Bayes Factors for BMA Models
#'
#' Creates an object representing the prior distribution on coefficients for
#' BAS that corresponds to the test-based Bayes Factors.
#'
#' Creates a prior object structure used for BAS in `bas.glm`.
#'
#' @param g a scalar used in the covariance of Zellner's g-prior, Cov(beta) =
#' sigma^2 g (X'X)^-
#'
#' @return returns an object of class "prior", with the family and
#' hyerparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{g.prior}}, \code{\link{bas.glm}}
#' @examples
#'
#' testBF.prior(100)
#' library(MASS)
#' data(Pima.tr)
#'
#' # use g = n
#' bas.glm(type ~ .,
#' data = Pima.tr, family = binomial(),
#' betaprior = testBF.prior(nrow(Pima.tr)),
#' modelprior = uniform(), method = "BAS"
#' )
#' @family beta priors
#' @export
testBF.prior <- function(g) {
structure(list(
family = "testBF.prior", g = as.numeric(g), class = "g-prior",
hyper = as.numeric(g),
hyper.parameters = list(g = as.numeric(g), loglik_null = NULL)
),
class = "prior"
)
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/beta_priors.R |
#' Compound Confluent hypergeometric function of two variables
#'
#' Compute the Confluent Hypergeometric function of two variables, also know as
#' a Horn hypergeometric function or Humbert's hypergeometric used in Gordy
#' (1998) with integral representation:
#'
#' phi_1(a,b,c,x,y) = [(Gamma(c)/Gamma(a) Gamma(a-c))] Int_0^1
#' t^(a-1) (1 - t)^(c-a-1) (1 - yt)^(-b) exp(x t) dt
#' \url{https://en.wikipedia.org/wiki/Humbert_series} Note that Gordy's
#' arguments for x and y are reversed in the reference above.
#'
#' The original `phi1` function in `BAS` was based on `C` code provided by
#' Gordy. This function returns NA's
#' when x is greater than `log(.Machine$double.xmax)/2`. A more
#' stable method for calculating the `phi1` function using R's `integrate`
#' was suggested by Daniel Heemann and is now an option whenever $x$ is too
#' large. For calculating Bayes factors that use the `phi1` function we
#' recommend using the `log=TRUE` option to compute log Bayes factors.
#'
#'
#' @param a a > 0
#' @param b arbitrary
#' @param c c > 0
#' @param x x > 0
#' @param y y > 0
#' @param log logical indicating whether to return phi1 on the log scale
#' @author Merlise Clyde (\email{clyde@@duke.edu})
#' @author Daniel Heemann (\email{df.heemann@@gmail.com})
#' @references Gordy 1998
#' @keywords math
#' @examples
#'
#' # special cases
#' # phi1(a, b, c, x=0, y) is the same as 2F1(b, a; c, y)
#' phi1(1, 2, 1.5, 0, 1 / 100, log=FALSE)
#' hypergeometric2F1(2, 1, 1.5, 1 / 100, log = FALSE)
#'
#' # phi1(a,0,c,x,y) is the same as 1F1(a,c,x)
#' phi1(1, 0, 1.5, 3, 1 / 100)
#' hypergeometric1F1(1, 1.5, 3, log = FALSE)
#'
#' # use direct integration
#' phi1(1, 2, 1.5, 1000, 0, log=TRUE)
#' @rdname phi1
#' @family special functions
#' @export
#'
#'
phi1 <- function(a, b, c, x, y, log=FALSE) {
# phi_1(a,b,c,x,y) =
# Int_0^1 t^(a-1) (1 - t)^(c-a-1) (1 - y t)^(-b) exp(x t) dt/Beta(a, c-a)
na <- length(a)
nb <- length(b)
nc <- length(c)
nx <- length(x)
ny <- length(y)
# if (any(y < 0 | y >= 1) )stop("y is outside of [0, 1)")
# if (any(x < 0)) stop("x must be >= 0")
ns = c(na,nb, nc, nx, ny)
n = max(ns)
if ((n > 1) && (mean(ns) != n)) {
stop("length of inputs are not the same")
}
MV = log(.Machine$double.xmax)/2
MX = max(x)
div = ceiling(MX/MV)
scale = 1/exp(max(0, (MX - MV)/div))
out <- rep(0, n)
ans <- .C(C_phi1,
as.numeric(a),
as.numeric(b),
as.numeric(c),
as.numeric(x),
as.numeric(y),
as.integer(div),
as.numeric(scale),
out = as.numeric(out), as.integer(n))$out
if (!log) ans=exp(ans)
return(ans)
}
#' Truncated Compound Confluent Hypergeometric function
#'
#' Compute the Truncated Confluent Hypergeometric function from Li and Clyde
#' (2018) which is the normalizing constant in the tcch density of Gordy
#' (1998) with integral representation:
#'
#' tr.cch(a,b,r,s,v,k) = Int_0^1/v
#' u^(a-1) (1 - vu)^(b -1) (k + (1 - k)vu)^(-r) exp(-s u) du
#'
#' This uses a more
#' stable method for calculating the normalizing constant using R's `integrate`
#' function rather than the version in Gordy 1998. For calculating Bayes factors
#' that use the `trCCH` function we
#' recommend using the `log=TRUE` option to compute log Bayes factors.
#'
#'
#' @param a a > 0
#' @param b b > 0
#' @param r r >= 0
#' @param s arbitrary
#' @param v 0 < v
#' @param k arbitrary
#' @param log logical indicating whether to return values on the log scale;
#' useful for Bayes Factor calculations
#' @author Merlise Clyde (\email{clyde@@duke.edu})
#' @references Gordy 1998 Li & Clyde 2018
#' @keywords math
#' @aliases trunc.CCH
#' @examples
#'
#' # special cases
#' # trCCH(a, b, r, s=0, v = 1, k) is the same as
#' # 2F1(a, r, a + b, 1 - 1/k)*beta(a, b)/k^r
#'
#' k = 10; a = 1.5; b = 2; r = 2;
#' trCCH(a, b, r, s=0, v = 1, k=k) *k^r/beta(a,b)
#' hypergeometric2F1(a, r, a + b, 1 - 1/k, log = FALSE)
#'
#' # trCCH(a,b,0,s,1,1) is the same as
#' # beta(a, b) 1F1(a, a + b, -s, log=FALSE)
#' s = 3; r = 0; v = 1; k = 1
#' beta(a, b)*hypergeometric1F1(a, a+b, -s, log = FALSE)
#' trCCH(a, b, r, s, v, k)
#'
#' # Equivalence with the Phi1 function
#' a = 1.5; b = 3; k = 1.25; s = 400; r = 2; v = 1;
#'
#' phi1(a, r, a + b, -s, 1 - 1/k, log=FALSE)*(k^-r)*gamma(a)*gamma(b)/gamma(a+b)
#' trCCH(a,b,r,s,v,k)
#' @rdname trCCH
#' @family special functions
#' @export
trCCH <- function(a, b, r, s, v, k, log=FALSE) {
# phi_1(a,b,c,x,y) = (Gamma(c)/Gamma(a) Gamma(a-c)) Int_0^1
# t^(a-1) (1 - t)^(c-a-1) (1 - y t)^(-b) exp(x t) dt
na <- length(a)
nb <- length(b)
nr <- length(r)
ns <- length(s)
nv <- length(v)
nk <- length(k)
# if (any(v <= 0 | v > 1) )stop("v is outside of (0, 1]")
# if (any(s < 0)) stop("s must be >= 0")
ns = c(na,nb, nr, ns, nv, nk)
n = max(ns)
if ((n > 1) && (mean(ns) != n)) {
stop("length of inputs are not the same")
}
out <- rep(0, n)
ans <- .C(C_tcch,
as.numeric(a),
as.numeric(b),
as.numeric(r),
as.numeric(s),
as.numeric(v),
as.numeric(k),
out = as.numeric(out), as.integer(n))$out
if (!log) ans=exp(ans)
return(ans)
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/cch.R |
#' Coefficients of a Bayesian Model Average object
#'
#' Extract conditional posterior means and standard deviations, marginal
#' posterior means and standard deviations, posterior probabilities, and
#' marginal inclusions probabilities under Bayesian Model Averaging from an
#' object of class 'bas'
#'
#' Calculates posterior means and (approximate) standard deviations of the
#' regression coefficients under Bayesian Model averaging using g-priors and
#' mixtures of g-priors. Print returns overall summaries. For fully Bayesian
#' methods that place a prior on g, the posterior standard deviations do not
#' take into account full uncertainty regarding g. Will be updated in future
#' releases.
#'
#' @aliases coef.bas coef coefficients coefficients.bas print.coef.bas
#' @param object object of class 'bas' created by BAS
#' @param x object of class 'coef.bas' to print
#' @param n.models Number of top models to report in the printed summary, for
#' coef the default is to use all models. To extract summaries for the Highest
#' Probability Model, use n.models=1 or estimator="HPM".
#' @param estimator return summaries for a selected model, rather than using
#' BMA. Options are 'HPM' (highest posterior probability model) ,'MPM' (median
#' probability model), and 'BMA'
#' @param digits number of significant digits to print
#' @param ... other optional arguments
#' @return \code{coefficients} returns an object of class coef.bas with the
#' following:
#' \item{conditionalmeans}{a matrix with conditional posterior means
#' for each model}
#' \item{conditionalsd}{ standard deviations for each model }
#' \item{postmean}{marginal posterior means of each regression coefficient
#' using BMA}
#' \item{postsd}{marginal posterior standard deviations using BMA}
#' \item{postne0}{vector of posterior inclusion probabilities, marginal
#' probability that a coefficient is non-zero}
#' @note With highly correlated variables, marginal summaries may not be
#' representative of the joint distribution. Use \code{\link{plot.coef.bas}} to
#' view distributions. The value reported for the intercept is
#' under the centered parameterization. Under the Gaussian error
#' model it will be centered at the sample mean of Y.
#' @author Merlise Clyde \email{clyde@@duke.edu}
#' @seealso \code{\link{bas}}, \code{\link{confint.coef.bas}}
#' @references Liang, F., Paulo, R., Molina, G., Clyde, M. and Berger, J.O.
#' (2005) Mixtures of g-priors for Bayesian Variable Selection. Journal of the
#' American Statistical Association. 103:410-423. \cr
#' \doi{10.1198/016214507000001337}
#' @keywords regression
#' @examples
#'
#' data("Hald")
#' hald.gprior = bas.lm(Y~ ., data=Hald, n.models=2^4, alpha=13,
#' prior="ZS-null", initprobs="Uniform", update=10)
#' coef.hald.gprior = coefficients(hald.gprior)
#' coef.hald.gprior
#' plot(coef.hald.gprior)
#' confint(coef.hald.gprior)
#'
#' #Estimation under Median Probability Model
#' coef.hald.gprior = coefficients(hald.gprior, estimator="MPM")
#' coef.hald.gprior
#' plot(coef.hald.gprior)
#' plot(confint(coef.hald.gprior))
#'
#'
#' coef.hald.gprior = coefficients(hald.gprior, estimator="HPM")
#' coef.hald.gprior
#' plot(coef.hald.gprior)
#' confint(coef.hald.gprior)
#'
#' # To add estimation under Best Predictive Model
#'
#'
#' @rdname coef
#' @family bas methods
#' @export
coef.bas <- function(object, n.models, estimator = "BMA", ...) {
if (estimator == "BPM") {
stop("Extracting coefficients for the BPM is not implemented yet")
}
if (estimator == "MPM") {
nvar <- object$n.vars - 1
bestmodel <- (0:nvar)[object$probne0 > .5]
best <- 1
models <- rep(0, nvar + 1)
models[bestmodel + 1] <- 1
if (sum(models) > 1) {
# fix for issue #39 and #56
modelform = as.formula(eval(object$call$formula, parent.frame()))
environment(modelform) = environment()
data = eval(object$call$data)
weights = eval(object$call$weights)
object <- bas.lm(
formula=modelform,
data = data,
weights = weights,
n.models = 1,
alpha = object$g,
initprobs = object$probne0,
prior = object$prior,
modelprior = object$modelprior,
update = NULL,
bestmodel = models,
prob.local = 0.0
)
}
}
postprobs <- object$postprobs
if (estimator == "MPM" | estimator == "HPM") {
n.models <- 1
}
if (missing(n.models)) {
n.models <- length(postprobs)
}
topm <- order(-postprobs)[1:n.models]
postprobs <- postprobs[topm] / sum(postprobs[topm])
shrinkage <- object$shrinkage[topm]
conditionalmeans <- list2matrix.bas(object, "mle")[topm, , drop = F]
conditionalmeans[, -1] <- sweep(conditionalmeans[, -1, drop = F], 1,
shrinkage,
FUN = "*"
)
postmean <- as.vector(postprobs %*% conditionalmeans)
# workaround for issue #65
if (inherits(object, "basglm")) {
object$prior = object$betaprior$class
}
conditionalsd <- list2matrix.bas(object, "mle.se")[topm, , drop = F]
if (!(object$prior == "AIC" | object$prior == "BIC" | object$prior == "IC")) {
conditionalsd[, -1] <- sweep(conditionalsd[, -1, drop = F], 1,
sqrt(shrinkage),
FUN = "*"
)
}
postsd <- sqrt(postprobs %*% conditionalsd^2 +
postprobs %*% ((sweep(conditionalmeans, 2, postmean, FUN = "-")
)^2))
postsd <- as.vector(postsd)
if (is.null(object$df[topm])) { # nocov start
df <- rep(object$n, length(postprobs))
if (object$prior == "BIC" | object$prior == "AIC" | object$prior == "IC") {
df <- df - object$rank
} else {
df <- df - 1
}
# nocov end
} else {
df <- object$df[topm]
}
out <- list(
postmean = postmean,
postsd = postsd,
probne0 = object$probne0,
conditionalmeans = conditionalmeans,
conditionalsd = conditionalsd,
namesx = object$namesx,
postprobs = postprobs,
n.vars = object$n.vars,
n.models = n.models,
df = df,
estimator = estimator
)
class(out) <- "coef.bas"
return(out)
}
#' Print coefficients generated from coef.bas
#' @rdname coef
#' @aliases print.coef.bas
#' @family bas coefs
#' @method print coef.bas
#' @export
print.coef.bas <- function(x,
digits = max(3, getOption("digits") - 3), ...) {
out <- cbind(x$postmean, x$postsd, x$probne0)
dimnames(out) <- list(x$namesx, c("post mean", "post SD", "post p(B != 0)"))
cat("\n Marginal Posterior Summaries of Coefficients: \n")
cat("\n Using ", x$estimator, "\n")
cat("\n Based on the top ", x$n.models, "models \n")
print.default(format(out, digits = digits),
print.gap = 2,
quote = FALSE, ...
)
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/coefficients.R |
#' Compute Credible Intervals for BAS regression coefficients from BAS objects
#'
#' Uses Monte Carlo simulations using posterior means and standard deviations
#' of coefficients to generate draws from the posterior distributions and
#' returns highest posterior density (HPD) credible intervals. If the number
#' of models equals one, then use the t distribution to find intervals. These
#' currently condition on the estimate of $g$. %% ~~ If necessary, more details
#' than the description above ~~
#'
#' @aliases confint.coef.bas confint
#' @param object a coef.bas object
#' @param parm a specification of which parameters are to be given credible
#' intervals, either a vector of numbers or a vector of names. If missing, all
#' parameters are considered.
#' @param level the probability coverage required
#' @param nsim number of Monte Carlo draws from the posterior distribution.
#' Used when number of models is greater than 1.
#' @param ... other arguments to passed; none currently
#' @return A matrix (or vector) with columns giving lower and upper HPD
#' credible limits for each parameter. These will be labeled as 1-level)/2 and
#' 1 - (1-level)/2 in percent (by default 2.5 and 97.5).
#' @note For mixture of g-priors these are approximate. This uses Monte Carlo
#' sampling so results may be subject to Monte Carlo variation and larger values
#' of nsim may be needed to reduce variability. %% ~~further notes~~
#' @author Merlise A Clyde
#' @keywords regression
#' @examples
#'
#'
#' data("Hald")
#' hald_gprior <- bas.lm(Y~ ., data=Hald, alpha=13,
#' prior="g-prior")
#' coef_hald <- coef(hald_gprior)
#' confint(coef_hald)
#' confint(coef_hald, approx=FALSE, nsim=5000)
#' # extract just the coefficient of X4
#' confint(coef_hald, parm="X4")
#'
#'
#' @rdname confint.coef
#' @family CI methods
#' @family bas methods
#' @method confint coef.bas
#' @export
confint.coef.bas <- function(object, parm, level = 0.95, nsim = 10000, ...) {
n.models <- length(object$postprob)
if (missing(parm)) parm <- 1:object$n.vars
if (!is.numeric(parm)) parm <- which(object$namesx %in% parm)
if (n.models > 1) {
models <- sample(1:n.models, size = nsim, prob = object$postprobs, replace = TRUE)
means <- object$conditionalmeans[models, parm]
sd <- object$conditionalsd[models, parm]
df <- object$df
if (length(df) == length(object$postprobs)) df <- object$df[models]
betas <- matrix(rt(nsim * length(subset), df = df),
nrow = nsim, ncol = length(parm), byrow = FALSE
)
betas <- betas * sd + means
ci <- .HPDinterval(betas, prob = level)
}
else {
df <- sum(object$postprobs * object$df)
means <- object$postmean[parm]
sd <- object$postsd[parm]
tq <- -qt((1 - level) / 2, df = df)
ci <- cbind(means - tq * sd, means + tq * sd)
}
ci <- cbind(ci, object$postmean[parm])
attr(ci, "Probability") <- level
attr(ci, "class") <- "confint.bas"
lower <- paste0(as.character(round(100 * (1 - level) / 2, 4)), "%")
upper <- paste0(as.character(round(100 * (1 + level) / 2, 4)), "%")
colnames(ci) <- c(lower, upper, "beta")
rownames(ci) <- object$namesx[parm]
return(ci)
}
#' Compute Credible (Bayesian Confidence) Intervals for a BAS predict object
#'
#' Compute credible intervals for in-sample or out of sample prediction or for
#' the regression function
#'
#' This constructs approximate 95 percent Highest Posterior Density intervals
#' for 'pred.bas' objects. If the estimator is based on model selection, the
#' intervals use a Student t distribution using the estimate of g. If the
#' estimator is based on BMA, then nsim draws from the mixture of Student t
#' distributions are obtained with the HPD interval obtained from the Monte
#' Carlo draws. %% ~~ If necessary, more details than the description above ~~
#'
#' @param object an object created by \code{\link{predict.bas}}
#' @param parm character variable, "mean" or "pred". If missing parm='pred'.
#' @param level the nominal level of the (point-wise) credible interval
#' @param nsim number of Monte Carlo simulations for sampling methods with BMA
#' @param ... optional arguments to pass on to next function call; none at this
#' time.
#' @return a matrix with lower and upper level * 100 percent credible intervals
#' for either the mean of the regression function or predicted values. %%
#' @author Merlise A Clyde
#' @seealso \code{\link{predict.bas}}
#' @keywords regression
#' @examples
#'
#' data("Hald")
#' hald.gprior = bas.lm(Y~ ., data=Hald, alpha=13, prior="g-prior")
#' hald.pred = predict(hald.gprior, estimator="BPM", predict=FALSE, se.fit=TRUE)
#' confint(hald.pred, parm="mean")
#' confint(hald.pred) #default
#' hald.pred = predict(hald.gprior, estimator="BMA", predict=FALSE, se.fit=TRUE)
#' confint(hald.pred)
#'
#'
#' @rdname confint.pred
#' @family bas methods
#' @family CI methods
#' @method confint pred.bas
#' @export
confint.pred.bas <- function(object, parm, level = 0.95, nsim = 10000, ...) {
if (missing(parm)) parm <- "pred"
if (parm == "pred") {
sd <- object$se.pred
} else {
sd <- object$se.fit
}
if (is.null(sd)) {
warning("object does not have fitted or prediction standard deviations")
return()
}
if (object$estimator == "BMA") {
n.models <- length(object$postprob)
models <- sample(1:n.models, size = nsim, prob = object$postprobs, replace = TRUE)
means <- object$Ypred[models, ]
df <- object$df[models]
# browser()
sd <- sd[models, ]
npred <- length(object$fit)
pred <- matrix(rt(nsim * npred, df = df),
nrow = nsim, ncol = npred, byrow = FALSE
)
pred <- pred * sd + means
ci <- .HPDinterval(pred, prob = level)
}
else {
df <- object$df
means <- object$fit
tq <- -qt((1 - level) / 2, df = df)
ci <- cbind(means - tq * sd, means + tq * sd)
}
ci <- cbind(ci, object$fit)
attr(ci, "Probability") <- level
attr(ci, "class") <- "confint.bas"
lower <- paste0(as.character(round(100 * (1 - level) / 2, 4)), "%")
upper <- paste0(as.character(round(100 * (1 + level) / 2, 4)), "%")
colnames(ci) <- c(lower, upper, parm)
rownames(ci) <- object$namesx[parm]
return(ci)
}
#' Plot Bayesian Confidence Intervals
#'
#' Function takes the the output of functions that return credible intervals
#' from BAS objects, and creates a plot of the posterior mean with segments
#' representing the credible interval. %% ~~ A concise (1-5 lines) description
#' of what the function does. ~~
#'
#' This function takes the HPD intervals or credible intervals created by
#' \code{\link{confint.coef.bas}} or \code{\link{confint.pred.bas}} from BAS
#' objects, and creates a plot of the posterior mean with segments representing
#' the credible interval. BAS tries to return HPD intervals, and under model
#' averaging these may not be symmetric. %% ~~ If necessary, more details than
#' the description above ~~
#'
#' @param x the output from \code{\link{confint.coef.bas}} or
#' \code{\link{confint.pred.bas}} containing credible intervals and estimates.
#' @param horizontal orientation of the plot
#' @param ... optional graphical arguments to pass on to plot
#' @return A plot of the credible intervals.
#' @author Merlise A Clyde
#' @seealso \code{\link{confint.coef.bas}}, \code{\link{confint.pred.bas}},
#' \code{\link{coef.bas}}, \code{\link{predict.bas}}, \code{link{bas.lm}}
#' @keywords regression bayesian
#' @examples
#'
#' data(Hald)
#' hald.ZS = bas.lm(Y ~ ., data=Hald, prior="ZS-null", modelprior=uniform())
#' hald.coef = confint(coef(hald.ZS), parm=2:5)
#' plot(hald.coef)
#' plot(hald.coef, horizontal=TRUE)
#' plot(confint(predict(hald.ZS, se.fit=TRUE), parm="mean"))
#'
#' @rdname plot.confint
#' @method plot confint.bas
#' @family bas methods
#' @family CI methods
#' @export
plot.confint.bas <- function(x, horizontal = FALSE, ...) {
ci <- x # x is there for generic plot function
namesx <- rownames(ci)
y <- ci[, 3]
x <- 1:nrow(ci)
xlim <- range(x) + c(-0.5, 0.2)
ylim <- range(pretty(ci))
xlab <- "case"
type <- colnames(ci)[3]
if (type == "beta") {
ylab <- bquote(beta)
xlab <- "coefficient"
}
else {
if (type == "mean") {
ylab <- bquote(mu)
} else {
ylab <- "predicted values"
}
}
not.deg <- ci[, 1] != ci[, 2]
par(mar = c(4.5, 5, 1, 1), las = 1)
if (!horizontal) {
plot(y, pch = 16, xlim = xlim, ylim = ylim, xlab = xlab, ylab = ylab, xaxt = "n", bty = "n", ...)
axis(1, at = x, labels = namesx, tick = FALSE, ...)
abline(h = 0, lty = 3, ...)
arrows(x[not.deg], ci[not.deg, 1], x[not.deg], ci[not.deg, 2], code = 3, angle = 90, length = 0.05, ...)
}
### horizontal layout:
else {
plot(
x = y, y = x, pch = 16, xlim = ylim, ylim = xlim,
xlab = ylab, ylab = "", yaxt = "n", bty = "n", ...
)
axis(2, at = x, labels = namesx, tick = FALSE, ...)
abline(v = 0, lty = 3, ...)
arrows(ci[not.deg, 1], x[not.deg], ci[not.deg, 2], x[not.deg], code = 3, angle = 90, length = 0.05, ...)
}
return()
}
.HPDinterval <- function(obj, prob = 0.95, ...) {
# from library coda but used here so that library
# does not have to be loaded
obj <- as.matrix(obj)
vals <- apply(obj, 2, sort)
if (!is.matrix(vals)) {
stop("obj must have nsamp > 1")
}
nsamp <- nrow(vals)
npar <- ncol(vals)
gap <- max(1, min(nsamp - 1, round(nsamp * prob)))
init <- 1:(nsamp - gap)
inds <- apply(vals[init + gap, , drop = FALSE] - vals[init,
,
drop = FALSE
], 2, which.min)
ans <- cbind(vals[cbind(inds, 1:npar)], vals[cbind(inds +
gap, 1:npar)])
lower <- as.character(round(100 * (1 - prob) / 2, 2))
upper <- as.character(round(100 * (prob + 1) / 2, 2))
dimnames(ans) <- list(colnames(obj), c(lower, upper))
attr(ans, "Probability") <- gap / nsamp
ans
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/confint.R |
#' Summaries for Out of Sample Prediction
#'
#' Compute average prediction error from out of sample predictions
#'
#'
#' @param pred fitted or predicted value from the output from
#' \code{\link{predict.bas}}
#' @param ytrue vector of left out response values
#' @param score function used to summarize error rate. Either "squared-error",
#' or "miss-class"
#' @return For squared error, the average prediction error for the Bayesian
#' estimator error = sqrt(sum(ytrue - yhat)^2/npred) while for binary data the
#' misclassification rate is more appropriate.
#' @author Merlise Clyde \email{[email protected]}
#' @seealso \code{\link{predict.bas}}
#' @keywords regression
#' @examples
#'
#' \dontrun{
#' library(foreign)
#' cognitive <- read.dta("https://www.stat.columbia.edu/~gelman/arm/examples/child.iq/kidiq.dta")
#' cognitive$mom_work <- as.numeric(cognitive$mom_work > 1)
#' cognitive$mom_hs <- as.numeric(cognitive$mom_hs > 0)
#' colnames(cognitive) <- c("kid_score", "hs", "iq", "work", "age")
#'
#' set.seed(42)
#' n <- nrow(cognitive)
#' test <- sample(1:n, size = round(.20 * n), replace = FALSE)
#' testdata <- cognitive[test, ]
#' traindata <- cognitive[-test, ]
#' cog_train <- bas.lm(kid_score ~ ., prior = "BIC", modelprior = uniform(), data = traindata)
#' yhat <- predict(cog_train, newdata = testdata, estimator = "BMA", se = F)
#' cv.summary.bas(yhat$fit, testdata$kid_score)
#' }
#' @rdname cv.summary.bas
#' @export
cv.summary.bas <- function(pred, ytrue, score = "squared-error") {
if (length(pred) != length(ytrue)) {
stop("predicted values and observed values are not the same length")
}
if (!(score %in% c("squared-error", "miss-class"))) {
stop(paste(
"score ", score,
"not implemented; please check spelling"
))
}
if (score == "miss-class") {
pred.class <- ifelse(pred < .5, 0, 1)
confusion <- table(pred.class, ytrue)
error <- (sum(confusion) - sum(diag(confusion))) / sum(confusion)
}
else {
error <- sqrt(sum((pred - ytrue)^2) / length(ytrue))
}
return(error)
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/cv_summary.R |
#' Bodyfat Data
#'
#' Lists estimates of the percentage of body fat determined by underwater
#' weighing and various body circumference measurements for 252 men. Accurate
#' measurement of body fat is inconvenient/costly and it is desirable to have
#' easy methods of estimating body fat that are not inconvenient/costly.
#'
#' A variety of popular health books suggest that the readers assess their
#' health, at least in part, by estimating their percentage of body fat. In
#' Bailey (1994), for instance, the reader can estimate body fat from tables
#' using their age and various skin-fold measurements obtained by using a
#' caliper. Other texts give predictive equations for body fat using body
#' circumference measurements (e.g. abdominal circumference) and/or skin-fold
#' measurements. See, for instance, Behnke and Wilmore (1974), pp. 66-67;
#' Wilmore (1976), p. 247; or Katch and McArdle (1977), pp. 120-132).#
#'
#' Percentage of body fat for an individual can be estimated once body density
#' has been determined. Folks (e.g. Siri (1956)) assume that the body consists
#' of two components - lean body tissue and fat tissue. Letting
#'
#' D = Body Density (gm/cm^3) A = proportion of lean body tissue B = proportion
#' of fat tissue (A+B=1) a = density of lean body tissue (gm/cm^3) b = density
#' of fat tissue (gm/cm^3)
#'
#' we have D = 1/[(A/a) + (B/b)] and solving for B we find B = (1/D)*[ab/(a-b)]
#' - [b/(a-b)].
#'
#' Using the estimates a=1.10 gm/cm^3 and b=0.90 gm/cm^3 (see Katch and McArdle
#' (1977), p. 111 or Wilmore (1976), p. 123) we come up with "Siri's equation":
#'
#' Percentage of Body Fat (i.e. 100*B) = 495/D - 450.#
#'
#' Volume, and hence body density, can be accurately measured a variety of
#' ways. The technique of underwater weighing "computes body volume as the
#' difference between body weight measured in air and weight measured during
#' water submersion. In other words, body volume is equal to the loss of weight
#' in water with the appropriate temperature correction for the water's
#' density" (Katch and McArdle (1977), p. 113). Using this technique,
#'
#' Body Density = WA/[(WA-WW)/c.f. - LV]
#'
#' where WA = Weight in air (kg) WW = Weight in water (kg) c.f. = Water
#' correction factor (=1 at 39.2 deg F as one-gram of water occupies exactly
#' one cm^3 at this temperature, =.997 at 76-78 deg F) LV = Residual Lung
#' Volume (liters)
#'
#' (Katch and McArdle (1977), p. 115). Other methods of determining body volume
#' are given in Behnke and Wilmore (1974), p. 22 ff.
#'
#' Measurement standards are apparently those listed in Behnke and Wilmore
#' (1974), pp. 45-48 where, for instance, the abdomen circumference is measured
#' "laterally, at the level of the iliac crests, and anteriorly, at the
#' umbilicus".)
#'
#' @name bodyfat
#' @aliases Bodyfat bodyfat
#' @docType data
#' @format A data frame with 252 observations on the following 15 variables.
#' \describe{ \item{Density}{a numeric vector for the density
#' determined from underwater weighing} \item{Bodyfat}{percent body fat
#' from Siri's (1956) equation} \item{Age}{age of individual in years}
#' \item{Weight}{weight of the individual in pounds}
#' \item{Height}{height of individual in inches}
#' \item{Neck}{neck circumference in centimeters (cm)}
#' \item{Chest}{chest circumference (cm)}
#' \item{Abdomen}{abdomen circumference (cm)} \item{Hip}{hip
#' circumference (cm)} \item{"Thigh"}{thigh circumference (cm)}
#' \item{"Knee"}{knee circumference (cm)} \item{Ankle}{ankle
#' circumference (cm)} \item{Biceps}{bicep (extended) circumference
#' (cm)} \item{Forearm}{forearm circumference (cm)}
#' \item{Wrist}{wrist circumference (cm)} }
#' @references Bailey, Covert (1994). Smart Exercise: Burning Fat, Getting Fit,
#' Houghton-Mifflin Co., Boston, pp. 179-186.
#'
#' Behnke, A.R. and Wilmore, J.H. (1974). Evaluation and Regulation of Body
#' Build and Composition, Prentice-Hall, Englewood Cliffs, N.J.
#'
#' Siri, W.E. (1956), "Gross composition of the body", in Advances in
#' Biological and Medical Physics, vol. IV, edited by J.H. Lawrence and C.A.
#' Tobias, Academic Press, Inc., New York.
#'
#' Katch, Frank and McArdle, William (1977). Nutrition, Weight Control, and
#' Exercise, Houghton Mifflin Co., Boston.
#'
#' Wilmore, Jack (1976). Athletic Training and Physical Fitness: Physiological
#' Principles of the Conditioning Process, Allyn and Bacon, Inc., Boston.
#' @source These data are used to produce the predictive equations for lean
#' body weight given in the abstract "Generalized body composition prediction
#' equation for men using simple measurement techniques", K.W. Penrose, A.G.
#' Nelson, A.G. Fisher, FACSM, Human Performance Research Center, Brigham Young
#' University, Provo, Utah 84602 as listed in _Medicine and Science in Sports
#' and Exercise_, vol. 17, no. 2, April 1985, p. 189. (The predictive equations
#' were obtained from the first 143 of the 252 cases that are listed below).
#' The data were generously supplied by Dr. A. Garth Fisher who gave permission
#' to freely distribute the data and use for non-commercial purposes.
#' @keywords datasets
#' @examples
#'
#' data(bodyfat)
#' bodyfat.bas = bas.lm(Bodyfat ~ Abdomen, data=bodyfat, prior="ZS-null")
#' summary(bodyfat.bas)
#' plot(Bodyfat ~ Abdomen, data=bodyfat, xlab="abdomen circumference (cm)")
#' betas = coef(bodyfat.bas)$postmean # current version has that intercept is ybar
#' betas[1] = betas[1] - betas[2]*bodyfat.bas$mean.x
#' abline(betas)
#' abline(coef(lm(Bodyfat ~ Abdomen, data=bodyfat)), col=2, lty=2)
#'
NULL
#' Climate Data
#' @name climate
#' @docType data
#' @format Scientists are interested in the Earth's temperature change since the last
#' glacial maximum, about 20,000 years ago. The first study to estimate the
#' temperature change was published in 1980, and estimated a change of -1.5 degrees
#' C, +/- 1.2 degrees C in tropical sea surface temperatures.
#' The negative value means that the Earth was colder then than now.
#' Since 1980 there have been many other studies.
#' \code{climate} is a dataset with 63 measurements on 5 variables:
#' \describe{\item{\emph{deltaT}}{ the response variables, which is the change in temperature
#' in degrees Celsius;}
#' \item{\emph{sdev}}{a standard deviation for the calculated \emph{deltaT};}
#' \item{\emph{proxy}}{a number 1-8 reflecting which type of measurement system was used to derive
#' deltaT. Some proxies can be used over land, others over water.
#' The proxies are coded as\cr
#' 1 "Mg/Ca" \cr
#' 2 "alkenone" \cr
#' 3 "Faunal" \cr
#' 4 "Sr/Ca" \cr
#' 5 "del 180" \cr
#' 6 "Ice Core" \cr
#' 7 "Pollen" \cr
#' 8 "Noble Gas" \cr
#'}
#'\item{\emph{T/M}}{, an indicator of whether it was a terrestrial or marine study (T/M),
#' which is coded as 0 for Terrestrial, 1 for Marine;}
#'\item{ \emph{latitude}}{the latitude where the data were collected.}}
#' @source Data provided originally by Michael Lavine
NULL
#' Hald Data
#'
#' The Hald data have been used in many books and papers to illustrate variable
#' selection. The data relate to an engineering application that was concerned
#' with the effect of the composition of cement on heat evolved during
#' hardening. The response variable \emph{Y} is the \emph{heat evolved} in a
#' cement mix. The four explanatory variables are ingredients of the mix, X1:
#' \emph{tricalcium aluminate}, X2: \emph{tricalcium silicate}, X3:
#' \emph{tetracalcium alumino ferrite}, X4: \emph{dicalcium silicate}. An
#' important feature of these data is that the variables X1 and X3 are highly
#' correlated, as well as the variables X2 and X4. Thus we should expect any
#' subset of (X1,X2,X3,X4) that includes one variable from highly correlated
#' pair to do as any subset that also includes the other member.
#'
#'
#' @name Hald
#' @aliases Hald hald
#' @docType data
#' @format \code{hald} is a dataframe with 13 observations and 5 variables
#' (columns),
#'
#' Y: Heat evolved per gram of cement (in calories) X1: Amount of tricalcium
#' aluminate X2: Amount of tricalcium silicate X3: Amount of tetracalcium
#' alumino ferrite X4: Amount of dicalcium silicate
#' @source Wood, H., Steinour, H.H., and Starke, H.R. (1932). "Effect of
#' Composition of Portland cement on Heat Evolved During Hardening", Industrial
#' and Engineering Chemistry, 24, 1207-1214.
#' @keywords datasets
NULL
#' Protein Activity Data
#'
#' This data sets includes several predictors of protein activity from an
#' experiment run at Glaxo.
#'
#'
#' @name protein
#' @docType data
#' @format \code{protein} is a dataframe with 96 observations and 8 predictor
#' variables of protein activity: \tabular{llll}{ [,1] \tab buf \tab factor \tab
#' Buffer \cr [,2] \tab pH \tab numeric \tab \cr [,3] \tab NaCl \tab numeric
#' \tab \cr [,4] \tab con \tab numeric \tab protein concentration\cr [,5] \tab
#' ra \tab factor \tab reducing agent\cr [,6] \tab det \tab factor \tab
#' detergent\cr [,7] \tab MgCl2 \tab numeric\tab \cr [,8] \tab temp \tab
#' numeric\tab (temperature)\cr [,9] \tab prot.act1 \tab numeric\tab \cr [,10]
#' \tab prot.act2 \tab numeric\tab \cr [,11] \tab prot.act3 \tab numeric\tab
#' \cr [,12] \tab prot.act4 \tab numeric\tab protein activity }
#' @source Clyde, M. A. and Parmigiani, G. (1998), Protein Construct Storage:
#' Bayesian Variable Selection and Prediction with Mixtures, Journal of
#' Biopharmaceutical Statistics, 8, 431-443
#' @keywords datasets
NULL
| /scratch/gouwar.j/cran-all/cranData/BAS/R/data.R |
#' BAS MCMC diagnostic plot
#'
#' Function to help assess convergence of MCMC sampling for bas objects.
#'
#' BAS calculates posterior model probabilities in two ways when method="MCMC".
#' The first is using the relative Monte Carlo frequencies of sampled models.
#' The second is to renormalize the marginal likelihood times prior
#' probabilities over the sampled models. If the Markov chain has converged,
#' these two quantities should be the same and fall on a 1-1 line. If not,
#' running longer may be required. If the chain has not converged, the Monte
#' Carlo frequencies may have less bias, although may exhibit more
#' variability on repeated runs.
#'
#' @param obj an object created by bas.lm or bas.glm
#' @param type type of diagnostic plot. If "pip" the marginal inclusion
#' probabilities are used, while if "model", plot posterior model probabilities
#' @param ... additional graphics parameters to be passed to plot
#' @return a plot with of the marginal inclusion probabilities (pip) estimated
#' by MCMC and renormalized marginal likelihoods times prior probabilities or
#' model probabilities.
#' @author Merlise Clyde (\email{[email protected]})
#' @examples
#'
#' library(MASS)
#' data(UScrime)
#' UScrime[, -2] <- log(UScrime[, -2])
#' crime.ZS <- bas.lm(y ~ .,
#' data = UScrime,
#' prior = "ZS-null",
#' modelprior = uniform(),
#' method = "MCMC",
#' MCMC.iter = 1000
#' ) # short run for the example
#' diagnostics(crime.ZS)
#' @family bas methods
#' @export
diagnostics <- function(obj, type = c("pip", "model"), ...) {
if (obj$call$method == "MCMC") {
for (i in 1:length(type)) {
if (type[i] == "pip") {
plot(obj$probne0.RN, obj$probne0.MCMC,
xlab = "pip (renormalized)",
ylab = "pip (MCMC)", xlim = c(0, 1), ylim = c(0, 1),
main = "Convergence Plot: Posterior Inclusion Probabilities",
...
)
abline(0, 1)
}
else {
ax.lim <- range(pretty(c(obj$postprobs.RN, obj$postprobs.MCMC)))
plot(obj$postprobs.RN, obj$postprobs.MCMC,
xlab = "p(M | Y) (renormalized)",
ylab = "p(M | Y) (MCMC)", xlim = ax.lim, ylim = ax.lim,
main = "Convergence Plot: Posterior Model Probabilities",
...
)
abline(0, 1)
}
}
}
else {
stop("Diagnostic plots are only availble using method='MCMC'
for sampling with bas. Please rerun.")
}
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/diagnostics.R |
#' eplogprob - Compute approximate marginal inclusion probabilities from
#' pvalues
#'
#' \code{eplogprob} calculates approximate marginal posterior inclusion
#' probabilities from p-values computed from a linear model using a lower bound
#' approximation to Bayes factors. Used to obtain initial inclusion
#' probabilities for sampling using Bayesian Adaptive Sampling \code{bas.lm}
#'
#' Sellke, Bayarri and Berger (2001) provide a simple calibration of p-values
#'
#' BF(p) = -e p log(p)
#'
#' which provide a lower bound to a Bayes factor for comparing H0: beta = 0
#' versus H1: beta not equal to 0, when the p-value p is less than 1/e. Using
#' equal prior odds on the hypotheses H0 and H1, the approximate marginal
#' posterior inclusion probability
#'
#' p(beta != 0 | data ) = 1/(1 + BF(p))
#'
#' When p > 1/e, we set the marginal inclusion probability to 0.5 or the value
#' given by \code{thresh}.
#'
#' @param lm.obj a linear model object
#' @param thresh the value of the inclusion probability when if the p-value >
#' 1/exp(1), where the lower bound approximation is not valid.
#' @param max maximum value of the inclusion probability; used for the
#' \code{bas.lm} function to keep initial inclusion probabilities away from 1.
#' @param int If the Intercept is included in the linear model, set the
#' marginal inclusion probability corresponding to the intercept to 1
#' @return \code{eplogprob} returns a vector of marginal posterior inclusion
#' probabilities for each of the variables in the linear model. If int = TRUE,
#' then the inclusion probability for the intercept is set to 1. If the model
#' is not full rank, variables that are linearly dependent base on the QR
#' factorization will have NA for their p-values. In bas.lm, where the
#' probabilities are used for sampling, the inclusion probability is set to 0.
#' @author Merlise Clyde \email{clyde@@stat.duke.edu}
#' @seealso \code{\link{bas}}
#' @references Sellke, Thomas, Bayarri, M. J., and Berger, James O. (2001),
#' ``Calibration of p-values for testing precise null hypotheses'', The
#' American Statistician, 55, 62-71.
#' @keywords regression
#' @examples
#'
#' library(MASS)
#' data(UScrime)
#' UScrime[,-2] = log(UScrime[,-2])
#' eplogprob(lm(y ~ ., data=UScrime))
#'
#'
#' @export
eplogprob = function(lm.obj, thresh=.5, max = 0.99, int=TRUE) {
pval = summary(lm.obj)$coefficients[,4]
if (length(lm.obj$coefficients) != length(pval)) {
stop("Full model is not full rank, use `initprobs='marg-eplogp'` instead\n")
}
else {
prob = 1/(1 - exp(1)*pval*log(pval))
prob[pval > 1/exp(1)] = thresh
prob[prob > max] = max
if (int) prob[1] = 1.0
}
return(prob)
}
#' eplogprob.marg - Compute approximate marginal inclusion probabilities from
#' pvalues
#'
#' \code{eplogprob.marg} calculates approximate marginal posterior inclusion
#' probabilities from p-values computed from a series of simple linear
#' regression models using a lower bound approximation to Bayes factors. Used
#' to order variables and if appropriate obtain initial inclusion probabilities
#' for sampling using Bayesian Adaptive Sampling \code{bas.lm}
#'
#' Sellke, Bayarri and Berger (2001) provide a simple calibration of p-values
#'
#' BF(p) = -e p log(p)
#'
#' which provide a lower bound to a Bayes factor for comparing H0: beta = 0
#' versus H1: beta not equal to 0, when the p-value p is less than 1/e. Using
#' equal prior odds on the hypotheses H0 and H1, the approximate marginal
#' posterior inclusion probability
#'
#' p(beta != 0 | data ) = 1/(1 + BF(p))
#'
#' When p > 1/e, we set the marginal inclusion probability to 0.5 or the value
#' given by \code{thresh}. For the eplogprob.marg the marginal p-values are
#' obtained using statistics from the p simple linear regressions
#'
#' P(F > (n-2) R2/(1 - R2)) where F ~ F(1, n-2) where R2 is the square of the
#' correlation coefficient between y and X_j.
#'
#' @param Y response variable
#' @param X design matrix with a column of ones for the intercept
#' @param thresh the value of the inclusion probability when if the p-value >
#' 1/exp(1), where the lower bound approximation is not valid.
#' @param max maximum value of the inclusion probability; used for the
#' \code{bas.lm} function to keep initial inclusion probabilities away from 1.
#' @param int If the Intercept is included in the linear model, set the
#' marginal inclusion probability corresponding to the intercept to 1
#' @return \code{eplogprob.prob} returns a vector of marginal posterior
#' inclusion probabilities for each of the variables in the linear model. If
#' int = TRUE, then the inclusion probability for the intercept is set to 1.
#' @author Merlise Clyde \email{clyde@@stat.duke.edu}
#' @seealso \code{\link{bas}}
#' @references Sellke, Thomas, Bayarri, M. J., and Berger, James O. (2001),
#' ``Calibration of p-values for testing precise null hypotheses'', The
#' American Statistician, 55, 62-71.
#' @keywords regression
#' @examples
#'
#' library(MASS)
#' data(UScrime)
#' UScrime[,-2] = log(UScrime[,-2])
#' eplogprob(lm(y ~ ., data=UScrime))
#' @export
eplogprob.marg = function(Y,X, thresh=.5, max = 0.99, int=TRUE) {
# browser()
R2 = apply(X[,-1], 2, FUN=function(x, y=Y) {cor(x,y)^2})
n = length(Y)
Fstat = (n-2)*R2/(1 - R2)
pval = 1 - pf(Fstat, 1, n-2)
prob = 1/(1 - exp(1)*pval*log(pval))
prob[pval > 1/exp(1)] = thresh
prob[pval < 10^-16] = max
prob[prob > max] = max
if (int) prob = c(1.0, prob)
return(prob)
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/eplogprob.R |
#' Fitting Generalized Linear Models and Bayesian marginal likelihood
#' evaluation
#'
#' A version of glm.fit rewritten in C; also returns marginal likelihoods for
#' Bayesian model comparison
#'
#' C version of glm-fit. For different prior choices returns, marginal
#' likelihood of model using a Laplace approximation.
#' @rdname bayesglm.fit
#' @param x design matrix
#' @param y response
#' @param weights optional vector of weights to be used in the fitting process.
#' Should be NULL or a numeric vector.
#' @param start starting value for coefficients in the linear predictor
#' @param etastart starting values for the linear predictor
#' @param mustart starting values for the vectors of means
#' @param offset a priori known component to be included in the linear
#' predictor
#' @param family a description of the error distribution and link function for
#' exponential family; currently only binomial(), poisson(), and Gamma() with canonical
#' links are implemented.
#' @param coefprior function specifying prior distribution on coefficients with
#' optional hyperparameters leading to marginal likelihood calculations;
#' options include \code{bic.prior()},\code{ aic.prior()}, and
#' \code{ic.prior()}
#' @param control a list of parameters that control convergence in the fitting
#' process. See the documentation for \code{glm.control()}
#' @param intercept should an intercept be included in the null model?
#' @return \item{coefficients}{MLEs} \item{se}{Standard errors of coefficients
#' based on the sqrt of the diagonal of the inverse information matrix}
#' \item{mu}{fitted mean} \item{rank}{numeric rank of the fitted linear model}
#' \item{deviance}{minus twice the log likelihood evaluated at the MLEs}
#' \item{g}{value of g in g-priors} \item{shrinkage}{shrinkage factor for
#' coefficients in linear predictor} \item{RegSS}{quadratic form
#' beta'I(beta)beta used in shrinkage} \item{logmarglik}{the log marginal or
#' integrated log likelihood (up to a constant)}
#' @author Merlise Clyde translated the \code{\link{glm.fit}} from R base into
#' C using the .Call interface
#' @seealso \code{\link{bic.prior}}
#' @references \code{\link{glm}}
#' @keywords regression GLM
#' @examples
#' data(Pima.tr, package="MASS")
#' Y <- as.numeric(Pima.tr$type) - 1
#' X <- cbind(1, as.matrix(Pima.tr[,1:7]))
#' out <- bayesglm.fit(X, Y, family=binomial(),coefprior=bic.prior(n=length(Y)))
#' out$coef
#' out$se
#' # using built in function
#' glm(type ~ ., family=binomial(), data=Pima.tr)
#'
#' @export
#'
bayesglm.fit <-
function(x, y, weights = rep(1, nobs), start = NULL, etastart = NULL,
mustart = NULL, offset = rep(0, nobs), family = binomial(),
coefprior = bic.prior(nobs),
control = glm.control(), intercept = TRUE) {
# if (!is.double(y)) stop("y must be a numeric vector/matrix")
# if (!is.double(x)) stop("x must be a numeric vector/matrix")
x <- as.matrix(x)
storage.mode(x) <- "double"
ynames <- if (is.matrix(y)){
rownames(y)}
else { names(y)
}
if (is.matrix(y)) storage.mode(y) <- "double"
conv <- FALSE
nobs <- NROW(y)
nvars <- ncol(x)
EMPTY <- nvars == 0
if (is.null(weights)) weights <- rep.int(1, nobs)
if (is.null(offset)) offset <- rep.int(0, nobs)
eval(family$initialize)
if (coefprior$family == "BIC" & is.null(coefprior$hyper)) coefprior$hyper = as.numeric(nobs)
newfit <- .Call(C_glm_fit,
RX = x, RY = y,
family = family, Roffset = offset,
Rweights = weights,
Rpriorcoef = coefprior, Rcontrol = control
)
return(newfit)
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/glm_fit.R |
#' Confluent hypergeometric1F1 function
#'
#' Compute the Confluent Hypergeometric function: 1F1(a,b,c,t) =
#' Gamma(b)/(Gamma(b-a)Gamma(a)) Int_0^1 t^(a-1) (1 - t)^(b-a-1) exp(c t) dt
#'
#'
#' @param a arbitrary
#' @param b Must be greater 0
#' @param c arbitrary
#' @param laplace The default is to use the Cephes library; for large a or s
#' this may return an NA, Inf or negative values,, in which case you should use
#' the Laplace approximation.
#' @param log if TRUE, return log(1F1)
#' @author Merlise Clyde (\email{clyde@@stat.duke.edu})
#' @references Cephes library hyp1f1.c
#' @keywords math
#' @examples
#' hypergeometric1F1(11.14756, 0.5, 0.00175097)
#'
#'
#' @rdname hypergeometric1F1
#' @family special functions
#' @export
hypergeometric1F1 = function(a,b,c, laplace=FALSE, log=TRUE) {
n = length(a);
out = rep(0, n);
ans = .C(C_hypergeometric1F1, as.numeric(a), as.numeric(b), as.numeric(c), out=as.numeric(out), as.integer(n),
as.integer(rep(laplace, n)))$out
if (!log) ans = exp(ans)
return(ans)
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/hypergeometric1F1.R |
#' Gaussian hypergeometric2F1 function
#'
#' Compute the Gaussian Hypergeometric2F1 function: 2F1(a,b,c,z) = Gamma(b-c)
#' Int_0^1 t^(b-1) (1 - t)^(c -b -1) (1 - t z)^(-a) dt
#'
#' The default is to use the routine hyp2f1.c from the Cephes library. If that
#' return a negative value or Inf, one should try method="Laplace" which is
#' based on the Laplace approximation as described in Liang et al JASA 2008.
#' This is used in the hyper-g prior to calculate marginal likelihoods.
#'
#' @param a arbitrary
#' @param b Must be greater 0
#' @param c Must be greater than b if |z| < 1, and c > b + a if z = 1
#' @param z |z| <= 1
#' @param method The default is to use the Cephes library routine. This
#' sometimes is unstable for large a or z near one returning Inf or negative
#' values. In this case, try method="Laplace", which use a Laplace
#' approximation for tau = exp(t/(1-t)).
#' @param log if TRUE, return log(2F1)
#' @return if log=T returns the log of the 2F1 function; otherwise the 2F1
#' function.
#' @author Merlise Clyde (\email{clyde@@duke.edu})
#' @references Cephes library hyp2f1.c
#'
#' Liang, F., Paulo, R., Molina, G., Clyde, M. and Berger, J.O. (2005) Mixtures
#' of g-priors for Bayesian Variable Selection. Journal of the American
#' Statistical Association. 103:410-423. \cr
#' \doi{10.1198/016214507000001337}
#' @keywords math
#' @examples
#' hypergeometric2F1(12, 1, 2, .65)
#' @rdname hypergeometric2F1
#' @family special functions
#' @export
hypergeometric2F1 <- function(a, b, c, z, method = "Cephes", log = TRUE) {
out <- 1.0
if (c < b | b < 0) {
warning("Must have c > b > 0 in 2F1 function for integral to converge")
return(Inf)
}
if (abs(z) > 1) {
warning("integral in 2F1 diverges")
return(Inf)
}
if (z == 1.0 & c - b - a <= 0) {
ans <- Inf
} else {
if (method == "Laplace") {
ans <- .C(C_logHyperGauss2F1, as.numeric(a), as.numeric(b), as.numeric(c), as.numeric(z),
out = as.numeric(out))$out
if (!log) ans <- exp(ans)
}
else {
ans <- .C(C_hypergeometric2F1, as.numeric(a), as.numeric(b), as.numeric(c), as.numeric(z),
out = as.numeric(out))$out
if (is.na(ans)) {
warning("2F1 from Cephes routine returned NaN; try Laplace approximation")
return(NA)
}
else {
if (ans < 0) {
warning("2F1 from Cephes library is negative; try Laplace approximation") # nocov
}
if (log) ans <- log(ans)
}
}
}
return(ans)
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/hypergeometric2F1.R |
#' Images of models used in Bayesian model averaging
#'
#' Creates an image of the models selected using \code{\link{bas}}.
#'
#' Creates an image of the model space sampled using \code{\link{bas}}. If a
#' subset of the top models are plotted, then probabilities are renormalized
#' over the subset.
#'
#'
#' @aliases image.bas image
#' @param x A BMA object of type 'bas' created by BAS
#' @param top.models Number of the top ranked models to plot
#' @param intensity Logical variable, when TRUE image intensity is proportional
#' to the probability or log(probability) of the model, when FALSE, intensity
#' is binary indicating just presence (light) or absence (dark) of a variable.
#' @param prob Logical variable for whether the area in the image for each
#' model should be proportional to the posterior probability (or log
#' probability) of the model (TRUE) or with equal area (FALSE).
#' @param log Logical variable indicating whether the intensities should be
#' based on log posterior odds (TRUE) or posterior probabilities (FALSE). The
#' log of the posterior odds is for comparing the each model to the worst model
#' in the top.models.
#' @param rotate Should the image of models be rotated so that models are on
#' the y-axis and variables are on the x-axis (TRUE)
#' @param color The color scheme for image intensities. The value "rainbow"
#' uses the rainbow palette. The value "blackandwhite" produces a black and
#' white image (greyscale image)
#' @param subset indices of variables to include/exclude in plot
#' @param drop.always.included logical variable to drop variables that are
#' always forced into the model. FALSE by default.
#' @param offset numeric value to add to intensity
#' @param digits number of digits in posterior probabilities to keep
#' @param vlas las parameter for placing variable names; see par
#' @param plas las parameter for posterior probability axis
#' @param rlas las parameter for model ranks
#' @param ... Other parameters to be passed to the \code{image} and \code{axis}
#' functions.
#' @note Suggestion to allow area of models be proportional to posterior
#' probability due to Thomas Lumley
#' @author Merlise Clyde \email{clyde@@stat.duke.edu}
#' @seealso \code{\link{bas}}
#' @references Clyde, M. (1999) Bayesian Model Averaging and Model Search
#' Strategies (with discussion). In Bayesian Statistics 6. J.M. Bernardo, A.P.
#' Dawid, J.O. Berger, and A.F.M. Smith eds. Oxford University Press, pages
#' 157-185.
#' @keywords regression
#' @examples
#'
#' require(graphics)
#' data("Hald")
#' hald.ZSprior <- bas.lm(Y ~ ., data = Hald, prior = "ZS-null")
#' image(hald.ZSprior, drop.always.included = TRUE) # drop the intercept
#' @rdname image.bas
#' @family bas methods
#' @family bas plots
#' @method image bas
#' @export
image.bas <- function(x, top.models = 20, intensity = TRUE, prob = TRUE, log = TRUE, rotate = TRUE, color = "rainbow", subset = NULL, drop.always.included = FALSE,
offset = .75, digits = 3, vlas = 2, plas = 0, rlas = 0, ...) {
postprob <- x$postprobs
top.models <- min(top.models, x$n.models)
best <- order(-x$postprobs)[1:top.models]
postprob <- postprob[best] / sum(postprob[best])
which.mat <- list2matrix.which(x, best)
nvar <- ncol(which.mat)
if (is.null(subset)) subset <- 1:nvar
if (drop.always.included) {
keep <- x$include.always
if (is.null(keep)) keep <- 1 # nocov
subset <- subset[!subset %in% keep]
if (length(subset) == 0) stop("no models in subset to show; modify subset or drop.always.included")
}
which.mat <- which.mat[, subset, drop = FALSE]
nvar <- ncol(which.mat)
namesx <- x$namesx[subset]
scale <- postprob
prob.lab <- "Posterior Probability"
if (log) {
scale <- log(postprob) - min(log(postprob))
prob.lab <- "Log Posterior Odds"
# fix problem when scale has duplicate zeros
zeros <- which(scale == 0.0)
nzeros <- length(zeros)
if (nzeros > 1) {
scale[zeros] <- seq(scale[zeros[1] - 1], 0.0, length = nzeros) / 1000 # nocov
}
}
if (intensity) which.mat <- sweep(which.mat, 1, scale + offset, "*")
if (rotate) scale <- rev(scale)
if (prob) {
m.scale <- cumsum(c(0, scale))
} else {
m.scale <- seq(0, top.models)
}
mat <- (m.scale[-1] + m.scale[-(top.models + 1)]) / 2
colors <- switch(color,
"rainbow" = c("black", rainbow(top.models + 1, start = .75, end = .05)),
"blackandwhite" = gray(seq(0, 1, length = top.models))
)
par.old <- par()$mar
if (rotate) {
par(mar = c(6, 6, 3, 5) + .1)
image(0:nvar, mat, t(which.mat[top.models:1, , drop = FALSE]),
xaxt = "n", yaxt = "n",
ylab = "",
xlab = "",
zlim = c(0, max(which.mat)),
col = colors, ...
)
axis(2, at = mat, labels = round(scale, digits = digits), las = plas, ...)
axis(4, at = mat, labels = top.models:1, las = rlas, ...)
mtext("Model Rank", side = 4, line = 3, las = 0)
mtext(prob.lab, side = 2, line = 4, las = 0)
axis(1, at = (1:nvar - .5), labels = namesx, las = vlas, ...)
}
else {
par(mar = c(6, 8, 6, 2) + .1)
image(mat, 0:nvar, which.mat[, nvar:1, drop = FALSE],
xaxt = "n", yaxt = "n",
xlab = "",
ylab = "",
zlim = c(0, max(which.mat)),
col = colors, ...
)
axis(1, at = mat, labels = round(scale, digits = digits), las = plas, ...)
axis(3, at = mat, labels = 1:top.models, las = rlas, ...)
mtext("Model Rank", side = 3, line = 3)
mtext(prob.lab, side = 1, line = 4)
axis(2, at = (1:nvar - .5), labels = rev(namesx), las = vlas, ...)
}
box()
par(mar = par.old)
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/image.R |
make.parents.of.interactions <-
function(mf, data) {
modelterms <- terms(mf, data = data)
termnamesX <- attr(modelterms, "term.labels")
p <- length(termnamesX)
interactions <- grep(":", termnamesX)
parents <- diag(p)
colnames(parents) <- termnamesX
rownames(parents) <- termnamesX
for (j in interactions) {
term <- interactions[j]
main <- unlist(strsplit(termnamesX[j], ":",
fixed = TRUE
))
parents.of.term <- main
for (i in 2:length(main)) {
parents.of.term <- c(
parents.of.term,
utils::combn(main, i, FUN = paste0, collapse = ":")
)
}
parents[j, parents.of.term] <- 1
}
X <- model.matrix(modelterms, data = data)
loc <- attr(X, "assign")[-1] # drop intercept
parents <- parents[loc, loc]
parents <- rbind(0, parents)
parents <- cbind(0, parents)
parents[1, 1] <- 1
rownames(parents) <- colnames(X)
colnames(parents) <- colnames(X)
return(parents)
}
# model.matrix(mf, data)
# attr( , "assign") has where terms are located
# mp = BAS:::make.parents.of.interactions(mf, df)
prob.heredity <- function(model, parents, prob = .5) {
got.parents <- apply(parents, 1,
FUN = function(x) {
all(as.logical(model[as.logical(x)]))
}
)
model.prob <- 0
if (all(model == got.parents)) {
model.prob <- exp(
sum(model * log(prob) + (1 - model) * log(1.0 - prob))
)
}
return(model.prob)
}
check.heredity <- function(model, parents, prob = .5) {
# p = length(model) # model has no intercept, while parents does
# parents = parents[2:p, 2:p]
got.parents <- apply(parents, 1,
FUN = function(x) {
all(as.logical(model[as.logical(x)]))
}
)
# browser()
all(model == got.parents)
}
#' Post processing function to force constraints on interaction inclusion bas BMA objects
#'
#' This function takes the output of a bas object and allows higher
#' order interactions to be included only if their parent
#' lower order interactions terms are in the model, by
#' assigning zero prior probability, and hence posterior
#' probability, to models that do include their respective
#' parents.
#'
#' @param object a bas linear model or generalized linear model object
#' @param prior.prob prior probability that a term is included conditional on parents being included
#' @return a bas object with updated models, coefficients and summaries obtained removing all models with zero prior and posterior probabilities.
#' @note Currently prior probabilities are computed using conditional Bernoulli distributions, i.e. P(gamma_j = 1 | Parents(gamma_j) = 1) = prior.prob. This is not very efficient for models with a large number of levels. Future updates will force this at the time of sampling.
#' @author Merlise A Clyde
#' @keywords regression
#' @examples
#'
#' data("chickwts")
#' bas.chk <- bas.lm(weight ~ feed, data = chickwts)
#' # summary(bas.chk) # 2^5 = 32 models
#' bas.chk.int <- force.heredity.bas(bas.chk)
#' # summary(bas.chk.int) # two models now
#'
#'
#' data(Hald)
#' bas.hald <- bas.lm(Y ~ .^2, data = Hald)
#' bas.hald.int <- force.heredity.bas(bas.hald)
#' image(bas.hald.int)
#'
#' image(bas.hald.int)
#'
#' # two-way interactions
#' data(ToothGrowth)
#' ToothGrowth$dose <- factor(ToothGrowth$dose)
#' levels(ToothGrowth$dose) <- c("Low", "Medium", "High")
#' TG.bas <- bas.lm(len ~ supp * dose, data = ToothGrowth, modelprior = uniform())
#' TG.bas.int <- force.heredity.bas(TG.bas)
#' image(TG.bas.int)
#' @family bas methods
#' @export
force.heredity.bas <- function(object, prior.prob = .5) {
parents <- make.parents.of.interactions(
mf = eval(object$call$formula, parent.frame()),
data = eval(object$call$data, parent.frame())
)
which <- which.matrix(object$which, object$n.vars)
keep <- apply(which, 1,
FUN = function(x) {
check.heredity(model = x, parents = parents)
}
)
# priorprobs = apply(which, 1,
# FUN=function(x) {prob.heredity(model=x, parents=parents)}
# )
# keep = priorprobs > 0.0
object$n.models <- sum(keep)
object$sampleprobs <- object$sampleprobs[keep] # if method=MCMC ??? reweight
object$which <- object$which[keep]
object$priorprobs <- object$priorprobs[keep] / sum(object$priorprobs[keep])
# wts = priorprobs[keep]/object$priorprobs[keep] #importance weights
wts <- 1
method <- object$call$method
if (!is.null(method)) {
if (method == "MCMC" || method == "MCMC_new") {
object$freq <- object$freq[keep]
# object$postprobs.MCMC = object$freq[keep]*wts
object$postprobs.MCMC <- object$freq[keep]
object$postprobs.MCMC <- object$postprobs.MCMC / sum(object$postprobs.MCMC)
object$probne0.MCMC <- as.vector(object$postprobs.MCMC %*% which[keep, ])
}
}
object$logmarg <- object$logmarg[keep]
object$shrinkage <- object$shrinkage[keep]
postprobs.RN <- exp(object$logmarg - min(object$logmarg)) * object$priorprobs
object$postprobs.RN <- postprobs.RN / sum(postprobs.RN)
# browser()
object$probne0.RN <- as.vector(object$postprobs.RN %*% which[keep, ])
object$postprobs <- object$postprobs[keep] * wts / sum(object$postprobs[keep] * wts)
object$probne0 <- as.vector(object$postprobs %*% which[keep, ])
object$mle <- object$mle[keep]
object$mle.se <- object$mle.se[keep]
object$mse <- object$mse[keep]
object$size <- object$size[keep]
object$R2 <- object$R2[keep]
object$df <- object$df[keep]
return(object)
}
# data(Hald)
# bas.hald = bas.lm(Y ~ .^2, data=Hald)
# hald.models = which.matrix(bas.hald$which, n.vars=bas.hald$n.vars)
# par.Hald = make.parents.of.interactions(Y ~ .^2, data=Hald)
# prior = apply(hald.models, 1,
# FUN=function(x) {prob.hereditary(model=x, parents=par.Hald$parents)})
# .prob.heredity(hald.models[1,], par.Hald$parents)
# force_heredity.bas(bas.hald)
| /scratch/gouwar.j/cran-all/cranData/BAS/R/interactions.R |
#' Uniform Prior Distribution for Models
#'
#' Creates an object representing the prior distribution on models for BAS.
#'
#' The Uniform prior distribution is a commonly used prior in BMA, and is a
#' special case of the independent Bernoulli prior with probs=.5. The implied
#' prior distribution on model size is binomial(p, .5).
#'
#' @aliases uniform Uniform
#' @return returns an object of class "prior", with the family name Uniform.
#' @author Merlise Clyde
#' @seealso \code{\link{bas.lm}},
#' \code{\link{beta.binomial}},\code{\link{Bernoulli}},
#' @examples
#' uniform()
#' @rdname uniform
#' @family priors modelpriors
#' @export
uniform <- function() {
structure(list(family = "Uniform", hyper.parameters = .5), class = "prior")
}
#' Independent Bernoulli Prior Distribution for Models
#'
#' Creates an object representing the prior distribution on models for BAS.
#'
#' The independent Bernoulli prior distribution is a commonly used prior in
#' BMA, with the Uniform distribution a special case with probs=.5. If all
#' indicator variables have a independent Bernoulli distributions with common
#' probability probs, the distribution on model size binomial(p, probs)
#' distribution.
#'
#' @aliases bernoulli Bernoulli
#' @param probs a scalar or vector of prior inclusion probabilities. If a
#' scalar, the values is replicated for all variables ans a 1 is added for the
#' intercept. BAS checks to see if the length is equal to the dimension of the
#' parameter vector for the full model and adds a 1 to include the intercept.
#' @return returns an object of class "prior", with the family and
#' hyperparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{bas.lm}},
#' \code{\link{beta.binomial}},\code{\link{uniform} }
#' @examples
#' Bernoulli(.9)
#' @family priors modelpriors
#' @export
#' @rdname Bernoulli
Bernoulli <- function(probs = 0.5) {
if (length(probs) == 1) {
if (probs == .5) {
structure(list(family = "Uniform", hyper.parameters = .5), class = "prior")
} else {
structure(list(family = "Bernoulli", hyper.parameters = probs), class = "prior")
}
}
else {
structure(list(family = "Bernoulli", hyper.parameters = probs), class = "prior")
}
}
#' Beta-Binomial Prior Distribution for Models
#'
#' Creates an object representing the prior distribution on models for BAS.
#'
#' The beta-binomial distribution on model size is obtained by assigning each
#' variable inclusion indicator independent Bernoulli distributions with
#' probability w, and then giving w a beta(alpha,beta) distribution.
#' Marginalizing over w leads to the distribution on model size having the
#' beta-binomial distribution. The default hyperparameters lead to a uniform
#' distribution over model size.
#'
#' @aliases beta.binomial Beta.Binomial
#' @param alpha parameter in the beta prior distribution
#' @param beta parameter in the beta prior distribution
#' @return returns an object of class "prior", with the family and
#' hyperparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{bas.lm}}, \code{\link{Bernoulli}},\code{\link{uniform}}
#' @examples
#' beta.binomial(1, 10) #' @family priors modelpriors
#' @family priors modelpriors
#' @rdname beta.binomial
#' @export
beta.binomial <- function(alpha = 1.0, beta = 1.0) {
structure(list(family = "Beta-Binomial", hyper.parameters = c(alpha, beta)),
class = "prior"
)
}
#' Truncated Beta-Binomial Prior Distribution for Models
#'
#' Creates an object representing the prior distribution on models for BAS
#' using a truncated Beta-Binomial Distribution on the Model Size
#'
#' The beta-binomial distribution on model size is obtained by assigning each
#' variable inclusion indicator independent Bernoulli distributions with
#' probability w, and then giving w a beta(alpha,beta) distribution.
#' Marginalizing over w leads to the number of included
#' predictors having a beta-binomial distribution. The default hyperparameters
#' lead to a uniform distribution over model size. The Truncated version
#' assigns zero probability to all models of size > trunc.
#'
#' @aliases tr.beta.binomial tr.Beta.Binomial
#' @param alpha parameter in the beta prior distribution
#' @param beta parameter in the beta prior distribution
#' @param trunc parameter that determines truncation in the distribution i.e.
#' P(M; alpha, beta, trunc) = 0 if M > trunc.
#' @return returns an object of class "prior", with the family and
#' hyperparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{bas.lm}}, \code{\link{Bernoulli}},\code{\link{uniform}}
#' @examples
#'
#' tr.beta.binomial(1, 10, 5)
#' library(MASS)
#' data(UScrime)
#' UScrime[, -2] <- log(UScrime[, -2])
#' crime.bic <- bas.lm(y ~ .,
#' data = UScrime, n.models = 2^15, prior = "BIC",
#' modelprior = tr.beta.binomial(1, 1, 8),
#' initprobs = "eplogp"
#' )
#' @family priors modelpriors
#' @rdname tr.beta.binomial
#' @export
tr.beta.binomial <- function(alpha = 1.0, beta = 1.0, trunc) {
structure(list(family = "Trunc-Beta-Binomial", hyper.parameters = c(alpha, beta, trunc)),
class = "prior"
)
}
#' Truncated Power Prior Distribution for Models
#'
#' Creates an object representing the prior distribution on models for BAS
#' using a truncated Distribution on the Model Size where the probability of
#' gamma = p^-kappa |gamma| where gamma is the vector of model indicators
#'
#' The beta-binomial distribution on model size is obtained by assigning each
#' variable inclusion indicator independent Bernoulli distributions with
#' probability w, and then giving w a beta(alpha,beta) distribution.
#' Marginalizing over w leads to the number of included
#' predictors having a beta-binomial distribution. The default hyperparameters
#' lead to a uniform distribution over model size. The Truncated version
#' assigns zero probability to all models of size > trunc.
#'
#' @aliases tr.power.prior tr.Power.Prior
#' @param kappa parameter in the prior distribution that controls sparsity
#' @param trunc parameter that determines truncation in the distribution i.e.
#' P(gamma; alpha, beta, trunc) = 0 if |gamma| > trunc.
#' @return returns an object of class "prior", with the family and
#' hyperparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{bas.lm}}, \code{\link{Bernoulli}},\code{\link{uniform}}
#' @examples
#'
#' tr.power.prior(2, 8)
#' library(MASS)
#' data(UScrime)
#' UScrime[, -2] <- log(UScrime[, -2])
#' crime.bic <- bas.lm(y ~ .,
#' data = UScrime, n.models = 2^15, prior = "BIC",
#' modelprior = tr.power.prior(2, 8),
#' initprobs = "eplogp"
#' )
#' @family priors modelpriors
#' @rdname tr.power.prior
#' @export
tr.power.prior <- function(kappa = 2, trunc) {
structure(list(family = "Trunc-Power-Prior", hyper.parameters = c(kappa, trunc)),
class = "prior"
)
}
#' Truncated Poisson Prior Distribution for Models
#'
#' Creates an object representing the prior distribution on models for BAS
#' using a truncated Poisson Distribution on the Model Size
#'
#' The Poisson prior distribution on model size is obtained by assigning each
#' variable inclusion indicator independent Bernoulli distributions with
#' probability w, and then taking a limit as p goes to infinity and w goes to
#' zero, such that p*w converges to lambda. The Truncated version assigns zero
#' probability to all models of size M > trunc.
#'
#' @aliases tr.Poisson tr.poisson
#' @param lambda parameter in the Poisson distribution representing expected
#' model size with infinite predictors
#' @param trunc parameter that determines truncation in the distribution i.e.
#' P(M; lambda, trunc) = 0 if M > trunc
#' @return returns an object of class "prior", with the family and
#' hyperparameters.
#' @author Merlise Clyde
#' @seealso \code{\link{bas.lm}}, \code{\link{Bernoulli}},\code{\link{uniform}}
#' @examples
#' tr.poisson(10, 50)
#' @family priors modelpriors
#' @rdname tr.poisson
#' @export
tr.poisson <- function(lambda, trunc) {
structure(list(family = "Trunc-Poisson", hyper.parameters = c(lambda, trunc)),
class = "prior"
)
}
#' Independent Bernoulli prior on models that with constraints for
#' model hierarchy induced by interactions
#' @param pi Bernoulli probability that term is included
#' @param parents matrix of terms and parents with indicators of which terms
#' are parents for each term
#' @family priors modelpriors
#' @rdname Bernoulli.heredity
#' @note Not implemented yet for use with bas.lm or bas.glm
#' @export
Bernoulli.heredity <- function(pi = 0.5, parents) {
structure(list(
family = "Bernoulli.Constrained",
hyper.parameters = c(hyper.parameters = pi, parents = parents)
),
class = "prior"
)
stop("not implemented fully yet")
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/model_priors.R |
########## Functions for Chaloner and Brant ###############
#' Bayesian Outlier Detection
#'
#' Calculate the posterior probability that the absolute value of
#' error exceeds more than k standard deviations
#' P(|epsilon_j| > k sigma | data)
#' under the model Y = X B + epsilon,
#' with epsilon ~ N(0, sigma^2 I)
#' based on the paper
#' by Chaloner & Brant Biometrika (1988). Either k or the prior
#' probability of there being no outliers must be provided.
#' This only uses the reference prior p(B, sigma) = 1;
#' other priors and model averaging to come.
#'
#' @param lmobj An object of class `lm`
#' @param k number of standard deviations used in calculating
#' probability of an individual case being an outlier,
#' P(|error| > k sigma | data)
#' @param prior.prob The prior probability of there being no
#' outliers in the sample of size n
#' @return Returns a list of three items:
#' \item{e}{residuals}
#' \item{hat}{leverage values}
#' \item{prob.outlier}{posterior probabilities of a point being an outlier}
#' \item{prior.prob}{prior probability of a point being an outlier}
#' @references Chaloner & Brant (1988)
#' A Bayesian Approach to Outlier Detection and Residual Analysis
#' Biometrika (1988) 75, 651-659
#' @examples
#' data("stackloss")
#' stack.lm <- lm(stack.loss ~ ., data = stackloss)
#' stack.outliers <- Bayes.outlier(stack.lm, k = 3)
#' plot(stack.outliers$prob.outlier, type = "h", ylab = "Posterior Probability")
#' # adjust for sample size for calculating prior prob that a
#' # a case is an outlier
#' stack.outliers <- Bayes.outlier(stack.lm, prior.prob = 0.95)
#' # cases where posterior probability exceeds prior probability
#' which(stack.outliers$prob.outlier > stack.outliers$prior.prob)
#' @export
Bayes.outlier <- function(lmobj, k, prior.prob) {
e <- residuals(lmobj)
h <- hatvalues(lmobj)
alpha <- (lmobj$df.residual) / 2
rate <- (lmobj$df.residual * (summary(lmobj)$sigma)^2) / 2
n <- length(e)
if (missing(k) & missing(prior.prob)) {
stop("please provide either k or the prior probability of no outliers")
}
else {
if (missing(k)) k <- qnorm(.5 + .5 * (prior.prob^(1 / n)))
}
pr <- rep(0, n)
for (i in 1:n) {
pr[i] <- integrate(
outlier.prob,
lower = 0,
upper = Inf,
ehat = e[i],
hii = h[i],
alpha = alpha,
rate = rate,
nsd = k
)$value
}
return(list(
e = e,
hat = h,
prob.outlier = pr,
prior.prob = pnorm(-k) * 2
))
}
outlier.prob <- function(phi, ehat, hii, alpha, rate, nsd) {
z1 <- (nsd - ehat * sqrt(phi)) / sqrt(hii)
z2 <- (-nsd - ehat * sqrt(phi)) / sqrt(hii)
pr.phi <- (1 - pnorm(z1) + pnorm(z2)) * dgamma(phi, shape = alpha, rate = rate)
return(pr.phi)
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/outliers.R |
#' Plot Diagnostics for an BAS Object
#'
#' Four plots (selectable by 'which') are currently available: a plot of
#' residuals against fitted values, Cumulative Model Probabilities, log
#' marginal likelihoods versus model dimension, and marginal inclusion
#' probabilities.
#'
#' This provides a panel of 4 plots: the first is a plot of the residuals
#' versus fitted values under BMA. The second is a plot of the cumulative
#' marginal likelihoods of models; if the model space cannot be enumerated then
#' this provides some indication of whether the probabilities are leveling off.
#' The third is a plot of log marginal likelihood versus model dimension and
#' the fourth plot show the posterior marginal inclusion probabilities.
#'
#' @param x \code{bas} BMA object result of 'bas'
#' @param which if a subset of the plots is required, specify a subset of the
#' numbers '1:4'
#' @param caption captions to appear above the plots
#' @param panel panel function. The useful alternative to 'points',
#' 'panel.smooth' can be chosen by 'add.smooth = TRUE'
#' @param sub.caption common title-above figures if there are multiple; used as
#' 'sub' (s.'title') otherwise. If 'NULL', as by default, a possible shortened
#' version of \code{deparse(x$call)} is used
#' @param main title to each plot-in addition to the above 'caption'
#' @param ask logical; if 'TRUE', the user is asked before each plot, see
#' 'par(ask=.)'
#' @param col.in color for the included variables
#' @param col.ex color for the excluded variables
#' @param col.pch color for points in panels 1-3
#' @param cex.lab graphics parameter to control size of variable names
#' @param ... other parameters to be passed through to plotting functions
#' @param id.n number of points to be labeled in each plot, starting with the
#' most extreme
#' @param labels.id vector of labels, from which the labels for extreme points
#' will be chosen. 'NULL' uses observation numbers
#' @param cex.id magnification of point labels.
#' @param add.smooth logical indicating if a smoother should be added to most
#' plots; see also 'panel' above
#' @param label.pos positioning of labels, for the left half and right half of
#' the graph respectively, for plots 1-4
#' @param subset indices of variables to include/exclude in plot of marginal posterior
#' inclusion probabilities (NULL).
#' @param drop.always.included logical variable to drop marginal posterior inclusion
#' probabilities
#' for variables that are always forced into the model. FALSE by default.
#' @author Merlise Clyde, based on plot.lm by John Maindonald and Martin
#' Maechler
#' @seealso \code{\link{plot.coef.bas}} and \code{\link{image.bas}}.
#' @keywords regression
#' @examples
#'
#' data(Hald)
#' hald.gprior = bas.lm(Y~ ., data=Hald, prior="g-prior", alpha=13,
#' modelprior=beta.binomial(1,1),
#' initprobs="eplogp")
#'
#' plot(hald.gprior)
#'
#'
#' @rdname plot
#' @family bas plots
#' @export
plot.bas = function (x,
which = c(1:4),
caption = c(
"Residuals vs Fitted",
"Model Probabilities",
"Model Complexity",
"Inclusion Probabilities"
),
panel = if (add.smooth)
panel.smooth
else
points,
sub.caption = NULL,
main = "",
ask = prod(par("mfcol")) < length(which) && dev.interactive(),
col.in = 2,
col.ex = 1,
col.pch = 1,
cex.lab = 1,
...,
id.n = 3,
labels.id = NULL,
cex.id = 0.75,
add.smooth = getOption("add.smooth"),
label.pos = c(4, 2),
subset = NULL,
drop.always.included = FALSE)
{
if (!inherits(x, "bas"))
stop("use only with \"bas\" objects") # nocov
if (!is.numeric(which) || any(which < 1) || any(which > 4))
stop("'which' must be in 1:4")
show <- rep(FALSE, 4)
show[which] <- TRUE
iid <- 1:id.n
if (show[1]) {
yhat = fitted(x, estimator = "BMA")
r = x$Y - yhat
n <- length(r)
if (id.n > 0) {
if (is.null(labels.id))
labels.id <- paste(1:n)
show.r <- sort.list(abs(r), decreasing = TRUE)[iid]
}
}
text.id <- function(x, y, ind, adj.x = TRUE) {
labpos <- if (adj.x)
label.pos[1 + as.numeric(x > mean(range(x)))]
else
3
text(
x,
y,
labels.id[ind],
cex = cex.id,
xpd = TRUE,
pos = labpos,
offset = 0.25
)
}
if (any(show[2:3])) {
show.m = sort.list(x$logmarg, decreasing = TRUE)[iid]
label.m = paste(1:x$n.models)
}
if (is.null(sub.caption)) {
cal <- x$call
if (!is.na(m.f <- match("formula", names(cal)))) {
cal <- cal[c(1, m.f)]
names(cal)[2] <- ""
}
cc <- deparse(cal, 80)
nc <- nchar(cc[1])
abbr <- length(cc) > 1 || nc > 75
sub.caption <- if (abbr)
paste(substr(cc[1], 1, min(75, nc)), "...")
else
cc[1]
}
one.fig <- prod(par("mfcol")) == 1
if (ask) {
op <- par(ask = TRUE)
on.exit(par(op))
}
if (show[1]) {
ylim <- range(r, na.rm = TRUE)
if (id.n > 0)
ylim <- extendrange(r = ylim, f = 0.08)
plot(
yhat,
r,
xlab = "Predictions under BMA",
ylab = "Residuals",
main = main,
ylim = ylim,
type = "n",
col = col.pch,
...
)
panel(yhat, r, ...)
if (one.fig)
title(sub = sub.caption, ...)
mtext(caption[1], 3, 0.25)
if (id.n > 0) {
y.id <- r[show.r]
y.id[y.id < 0] <- y.id[y.id < 0] - strheight(" ") / 3
text.id(yhat[show.r], y.id, show.r)
}
abline(h = 0, lty = 3, col = "gray")
}
if (show[2]) {
cum.prob = cumsum(x$postprobs)
m.index = 1:x$n.models
ylim <- range(cum.prob, na.rm = TRUE)
ylim[2] <- ylim[2] + diff(ylim) * 0.075
plot(
m.index,
cum.prob,
xlab = "Model Search Order",
ylab = "Cumulative Probability",
type = "n",
col = col.pch,
...
)
panel(m.index, cum.prob)
if (one.fig)
title(sub = sub.caption, ...)
mtext(caption[2], 3, 0.25)
#if (id.n > 0)
# text.id(m.index[show.m], cum.prob[show.m], show.m)
}
if (show[3]) {
logmarg = x$logmarg
dim = x$size
ylim <- range(logmarg, na.rm = TRUE)
plot(
dim,
logmarg,
xlab = "Model Dimension",
ylab = "log(Marginal)",
main = main,
ylim = ylim,
col = col.pch,
...
)
if (one.fig)
title(sub = sub.caption, ...)
mtext(caption[3], 3, 0.25)
if (id.n > 0)
text.id(dim[show.m], logmarg[show.m], show.m)
}
if (show[4]) {
if (is.null(subset))
subset = 1:x$n.vars
if (drop.always.included) {
keep = x$include.always
if (is.null(keep))
keep = 1
subset = subset[!subset %in% keep]
if (length(subset) == 0)
stop("no models in subset to show; modify subset or drop.always.included")
}
probne0 = x$probne0[subset]
nvars = length(subset)
variables = 1:nvars
ylim <- c(0, 1)
colors = rep(0, nvars)
colors[probne0 > .5] = col.in
colors[probne0 <= .5] = col.ex
plot(
variables,
probne0,
xlab = "",
ylab = "Marginal Inclusion Probability",
xaxt = "n",
main = main,
type = "h",
col = colors,
ylim = ylim,
...
)
if (one.fig)
title(sub = sub.caption, ...)
mtext(
x$namesx[subset],
side = 1,
line = 0.25,
at = variables,
las = 2,
cex = cex.lab,
...
)
mtext(caption[4], 3, 0.25)
#if (id.n > 0)
# text.id(dim[show.m], logmarg[show.m], show.m)
}
if (!one.fig && par("oma")[3] >= 1) {
mtext(sub.caption, outer = TRUE, cex = 1.25)
}
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/plot.R |
#' Plots the posterior distributions of coefficients derived from Bayesian
#' model averaging
#'
#' Displays plots of the posterior distributions of the coefficients generated
#' by Bayesian model averaging over linear regression.
#'
#' Produces plots of the posterior distributions of the coefficients under
#' model averaging. The posterior probability that the coefficient is zero is
#' represented by a solid line at zero, with height equal to the probability.
#' The nonzero part of the distribution is scaled so that the maximum height is
#' equal to the probability that the coefficient is nonzero.
#'
#' The parameter \code{e} specifies the range over which the distributions are
#' to be graphed by specifying the tail probabilities that dictate the range to
#' plot over.
#'
#' @param x object of class coef.bas
#' @param e optional numeric value specifying the range over which the
#' distributions are to be graphed.
#' @param subset optional numerical vector specifying which variables to graph
#' (including the intercept)
#' @param ask Prompt for next plot
#' @param ... other parameters to be passed to \code{plot} and \code{lines}
#' @note For mixtures of g-priors, uncertainty in g is not incorporated at this
#' time, thus results are approximate
#' @author based on function \code{plot.bic} by Ian Painter in package BMA;
#' adapted for 'bas' class by Merlise Clyde \email{clyde@@stat.duke.edu}
#' @seealso \code{ \link{coef.bas}}
#' @references Hoeting, J.A., Raftery, A.E. and Madigan, D. (1996). A method
#' for simultaneous variable selection and outlier identification in linear
#' regression. Computational Statistics and Data Analysis, 22, 251-270.
#' @keywords regression
#' @examples
#'
#' \dontrun{library(MASS)
#' data(UScrime)
#' UScrime[,-2] <- log(UScrime[,-2])
#' crime_bic <- bas.lm(y ~ ., data=UScrime, n.models=2^15, prior="BIC")
#' plot(coefficients(crime_bic), ask=TRUE)
#' }
#'
#' @rdname plot.coef
#' @family bas plots
#' @export
plot.coef.bas <- function(x, e = 1e-04, subset = 1:x$n.vars, ask = TRUE, ...) {
plotvar <- function(prob0, mixprobs, df, means, sds, name,
e = 1e-04, nsteps = 500, ...) {
if (prob0 == 1 | length(means) == 0) { # nocov start
xlower <- -0
xupper <- 0
xmax <- 1 # nocov end
}
else {
qmin <- min(qnorm(e / 2, means, sds))
qmax <- max(qnorm(1 - e / 2, means, sds))
xlower <- min(qmin, 0)
xupper <- max(0, qmax)
}
xx <- seq(xlower, xupper, length.out = nsteps)
yy <- rep(0, times = length(xx))
maxyy <- 1
if (prob0 < 1 & length(sds) > 0) {
yy <- mixprobs %*% apply(matrix(xx, ncol = 1), 1,
FUN = function(x, d, m, s) {
dt(x = (x - m) / s, df = d) / s
},
d = df, m = means, s = sds
)
maxyy <- max(yy)
}
ymax <- max(prob0, 1 - prob0)
plot(c(xlower, xupper), c(0, ymax),
type = "n",
xlab = "", ylab = "", main = name, ...
)
lines(c(0, 0), c(0, prob0), lty = 1, lwd = 3, ...)
lines(xx, (1 - prob0) * yy / maxyy, lty = 1, lwd = 1, ...)
invisible()
}
if (ask) {
op <- par(ask = TRUE)
on.exit(par(op))
}
df <- x$df
for (i in subset) {
sel <- x$conditionalmeans[, i] != 0
prob0 <- 1 - x$probne0[i]
mixprobs <- x$postprobs[sel] / (1.0 - prob0)
means <- x$conditionalmeans[sel, i, drop = TRUE]
sds <- x$conditionalsd[sel, i, drop = TRUE]
name <- x$namesx[i]
df.sel <- df[sel]
plotvar(prob0, mixprobs, df.sel, means, sds, name, e = e, ...)
}
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/plot_coef.R |
#' Prediction Method for an Object of Class basglm
#' @description Predictions under model averaging from a BMA (BAS) object for GLMs
#' under different loss functions.
#' @aliases predict.basglm
#' @param object An object of class "basglm", created by \code{bas.glm}
#' @param newdata dataframe, new matrix or vector of data for predictions. May
#' include a column for the intercept or just the predictor variables. If a
#' dataframe, the variables are extracted using model.matrix using the call
#' that created 'object'. May be missing in which case the data used for
#' fitting will be used for prediction.
#' @param se.fit indicator for whether to compute se of fitted and predicted
#' values
#' @param type Type of predictions required. The default is "response" is on the scale of the
#' response variable, with the alternative being on the linear predictor
#' scale, `type ='link'`. Thus for a default binomial model
#' `type = 'response'` gives
#' the predicted probabilities, while with `'link'`, the estimates
#' are of log-odds (probabilities on logit scale).
#' @param top A scalar integer M. If supplied, calculate results using the subset of the top M models
#' based on posterior probabilities.
#' @param estimator estimator used for predictions. Currently supported
#' options include: \cr 'HPM' the highest probability model \cr 'BMA' Bayesian
#' model averaging, using optionally only the 'top' models \cr 'MPM' the median
#' probability model of Barbieri and Berger. \cr 'BPM' the model that is
#' closest to BMA predictions under squared error loss. BMA may be computed
#' using only the 'top' models if supplied
#' @param na.action function determining what should be done with missing values in newdata.
#' The default is to predict NA.
#' @param ... optional extra arguments
#' @return a list of
#' \item{fit}{predictions using BMA or other estimators}
#' \item{Ypred}{matrix of predictions under model(s)}
#' \item{postprobs}{renormalized probabilities of
#' the top models}
#' \item{best}{index of top models included}
#' @details This function first calls the predict method for class bas
#' (linear models) to form predictions on the linear predictor
#' scale for `BMA`, `HPM`, `MPM` etc. If the estimator is `BMA`
#' and `type='response'` then the
#' inverse link is applied to fitted values for type equal `'link'`
#' and model averaging takes place in the `response` scale. Thus applying
#' the inverse link to BMA estimate with `type = 'link'` is
#' not equal to the fitted values for `type = 'response'` under
#' BMA due to the nonlinear transformation under the inverse link.
#'
#' @author Merlise Clyde
#' @seealso \code{\link{bas.glm}}, \code{\link{predict.bas}},
#' \code{\link{fitted.bas}}
#' @keywords regression
#' @examples
#'
#'
#' data(Pima.tr, package="MASS")
#' data(Pima.te, package="MASS")
#' Pima.bas = bas.glm(type ~ ., data=Pima.tr, n.models= 2^7, method="BAS",
#' betaprior=CCH(a=1, b=nrow(Pima.tr)/2, s=0), family=binomial(),
#' modelprior=uniform())
#' pred = predict(Pima.bas, newdata=Pima.te, top=1) # Highest Probability model
#' cv.summary.bas(pred$fit, Pima.te$type, score="miss-class")
#'
#' @rdname predict.basglm
#' @family predict methods
#' @family bas methods
#' @export
predict.basglm <- function(object,
newdata,
se.fit = FALSE,
type = c("response", "link"),
top = NULL,
estimator = "BMA",
na.action = na.pass,
...) {
# browser()
if (estimator == "HPM") {
top <- 1
}
# get predictions on linear predictor scale
pred <- predict.bas(
object,
newdata = newdata,
se.fit = se.fit,
top = top,
estimator = estimator,
na.action = na.action,
...
)
if (length(type) > 1) {
type <- type[1]
}
#
# if type is 'link' do not need to do anything; just return
# pred at end
#
if (type == "response") {
model.specs <- attributes(pred$fit)
if (estimator == "BMA") {
Ypred <- apply(
pred$Ypred,
1,
FUN = function(x) {
eval(object$family)$linkinv(x)
}
)
if (length(pred$postprobs) > 1) {
fit <- as.vector(Ypred %*% pred$postprobs)
} else {
fit <- as.vector(Ypred)
}
}
else {
fit <- eval(object$family)$linkinv(pred$fit)
}
attributes(fit) <- model.specs
# replace predictions
#
pred$fit <- fit
if (se.fit) {
se.fit <- pred$se.fit * abs(eval(object$family)$mu.eta(pred$fit))
se.pred <- pred$se.pred * abs(eval(object$family)$mu.eta(pred$fit))
pred$se.fit <- se.fit
pred$se.pred <- se.pred
}
}
return(pred)
}
#' Prediction Method for an object of class BAS
#'
#' Predictions under model averaging or other estimators from a BMA object of
#' class inheriting from 'bas'.
#'
#' Use BMA and/or model selection to form predictions using the top highest
#' probability models.
#'
#' @aliases predict.bas predict
#' @param object An object of class BAS, created by \code{bas}
#' @param newdata dataframe for predictions. If missing, then use the dataframe
#' used for fitting for obtaining fitted and predicted values.
#' @param se.fit indicator for whether to compute se of fitted and predicted
#' values
#' @param type Type of predictions required. "link" which is on the scale of
#' the linear predictor is the only option currently for linear models, which for the normal model
#' is equivalent to type='response'.
#' @param top a scalar integer M. If supplied, subset the top M models, based
#' on posterior probabilities for model predictions and BMA.
#' @param estimator estimator used for predictions. Currently supported
#' options include: \cr 'HPM' the highest probability model \cr 'BMA' Bayesian
#' model averaging, using optionally only the 'top' models \cr 'MPM' the median
#' probability model of Barbieri and Berger. \cr 'BPM' the model that is
#' closest to BMA predictions under squared error loss. BMA may be computed
#' using only the 'top' models if supplied
#' @param na.action function determining what should be done with missing values in newdata.
#' The default is to predict NA.
#' @param ... optional extra arguments
#' @return a list of
#' \item{fit}{fitted values based on the selected estimator}
#' \item{Ybma}{predictions using BMA, the same as fit for non-BMA methods for
#' compatibility; will be deprecated}
#' \item{Ypred}{matrix of predictions under
#' each model for BMA}
#' \item{se.fit}{se of fitted values; in the case of BMA
#' this will be a matrix}
#' \item{se.pred}{se for predicted values; in the case
#' of BMA this will be a matrix}
#' \item{se.bma.fit}{vector of posterior sd under
#' BMA for posterior mean of the regression function.
#' This will be NULL if estimator is not 'BMA'}
#' \item{se.bma.pred}{vector of posterior sd under BMA
#' for posterior predictive values. this will be NULL if estimator is not
#' 'BMA'}
#' \item{best}{index of top models included}
#' \item{bestmodels}{subset of
#' bestmodels used for fitting or prediction}
#' \item{best.vars}{names of variables in the top model; NULL if estimator='BMA'}
#' \item{df}{scalar or vector of
#' degrees of freedom for models}
#' \item{estimator}{estimator upon which 'fit'
#' is based.}
#' @author Merlise Clyde
#' @seealso \code{\link{bas}}, \code{\link{fitted.bas}},
#' \code{\link{confint.pred.bas}}, \code{\link{variable.names.pred.bas}}
#' @keywords regression
#' @examples
#'
#' data("Hald")
#' hald.gprior = bas.lm(Y ~ ., data=Hald, alpha=13, prior="g-prior")
#'
#' predict(hald.gprior, newdata=Hald, estimator="BPM", se.fit=TRUE)
#' # same as fitted
#' fitted(hald.gprior,estimator="BPM")
#' # default is BMA and estimation of mean vector
#' hald.bma = predict(hald.gprior, top=5, se.fit=TRUE)
#' confint(hald.bma)
#'
#' hald.bpm = predict(hald.gprior, newdata=Hald[1,],
#' se.fit=TRUE,
#' estimator="BPM")
#' confint(hald.bpm)
#' # extract variables
#' variable.names(hald.bpm)
#'
#' hald.hpm = predict(hald.gprior, newdata=Hald[1,],
#' se.fit=TRUE,
#' estimator="HPM")
#' confint(hald.hpm)
#' variable.names(hald.hpm)
#'
#' hald.mpm = predict(hald.gprior, newdata=Hald[1,],
#' se.fit=TRUE,
#' estimator="MPM")
#' confint(hald.mpm)
#' variable.names(hald.mpm)
#'
#' @rdname predict.bas
#' @family predict methods
#' @family bas methods
#' @export
predict.bas <- function(object,
newdata,
se.fit = FALSE,
type = "link",
top = NULL,
estimator = "BMA",
na.action = na.pass,
...) {
if (!(estimator %in% c("BMA", "HPM", "MPM", "BPM"))) {
stop("Estimator must be one of 'BMA', 'BPM', 'HPM', or 'MPM'.")
}
tt <- terms(object)
if (missing(newdata) || is.null(newdata)) {
newdata <- object$X
insample <- TRUE
}
else {
if (is.data.frame(newdata)) {
# newdata = model.matrix(eval(object$call$formula), newdata)
Terms <- delete.response(tt)
m <- model.frame(Terms,
newdata,
na.action = na.action,
xlev = object$xlevels
)
newdata <- model.matrix(Terms, m,
contrasts.arg = object$contrasts
)
insample <- FALSE
}
else {
stop("use of newdata as a vector is depricated,
please supply newdata as a dataframe")
# if (is.vector(newdata)) newdata=matrix(newdata, nrow=1)
}
}
# browser()
n <- nrow(newdata)
if (ncol(newdata) == object$n.vars) {
newdata <- newdata[, -1, drop = FALSE]
} # drop intercept
if (ncol(newdata) != (object$n.vars - 1)) {
stop("Dimension of newdata does not match orginal model")
}
if (!is.null(object$mean.x)) {
newdata <- sweep(newdata, 2, object$mean.x)
}
df <- object$df
if (estimator == "MPM") {
nvar <- object$n.vars - 1
bestmodel <- (0:nvar)[object$probne0 > .5]
newX <- cbind(1, newdata)
best <- 1
models <- rep(0, nvar + 1)
models[bestmodel + 1] <- 1
if (sum(models) > 1) {
if (is.null(eval(object$call$weights))) {
object <- bas.lm(
eval(object$call$formula),
data = eval(object$call$data, parent.frame()),
n.models = 1,
alpha = object$g,
initprobs = object$probne0,
prior = object$prior,
modelprior = object$modelprior,
update = NULL,
bestmodel = models,
prob.local = .0
)
}
else {
object <- bas.lm(
eval(object$call$formula),
data = eval(object$call$data, parent.frame()),
weights = eval(object$call$weights),
n.models = 1,
alpha = object$g,
initprobs = object$probne0,
prior = object$prior,
modelprior = object$modelprior,
update = NULL,
bestmodel = models,
prob.local = .0
)
}
best <- which.max(object$postprobs)
fit <-
as.vector(newX[, object$which[[best]] + 1, drop = FALSE] %*% object$mle[[best]]) * object$shrinkage[[best]]
fit <- fit + (1 - object$shrinkage[[best]]) * (object$mle[[best]])[1]
df <- df[best]
}
else {
fit <- rep(nrow(newX), 1) * as.numeric(object$mle[object$size == 1])
}
models <- bestmodel
attributes(fit) <- list(
model = models,
best = best,
estimator = estimator
)
Ybma <- fit
Ypred <- NULL
postprobs <- NULL
best <- NULL
df <- object$n - 1
}
else {
if (estimator == "HPM") {
top <- 1
}
postprobs <- object$postprobs
best <- order(-postprobs)
if (!is.null(top)) {
best <- best[1:top]
}
models <- object$which[best]
beta <- object$mle[best]
gg <- object$shrinkage[best]
intercept <- object$intercept[best]
postprobs <- postprobs[best]
postprobs <- postprobs / sum(postprobs)
M <- length(postprobs)
Ypred <- matrix(0, M, n)
# lm case
if (is.null(intercept)) {
for (i in 1:M) {
beta.m <- beta[[i]]
model.m <- models[[i]]
Ypred[i, ] <-
(newdata[, model.m[-1], drop = FALSE] %*% beta.m[-1]) * gg[i] + beta.m[1]
}
}
else {
for (i in 1:M) {
beta.m <- beta[[i]]
model.m <- models[[i]]
Ypred[i, ] <-
(newdata[, model.m[-1], drop = FALSE] %*% beta.m[-1]) * gg[i] + intercept[i]
}
}
df <- df[best]
Ybma <- t(Ypred) %*% postprobs
fit <- as.vector(Ybma)
if (estimator == "HPM") {
models <- unlist(object$which[best])
attributes(fit) <- list(
model = models,
best = best,
estimator = estimator
)
}
if (estimator == "BPM") {
# browser()
dis <- apply(
sweep(Ypred, 2, Ybma),
1,
FUN = function(x) {
x[is.na(x)] <- 0 # ignore NA's in finding closest model
sum(x^2)
}
)
bestBPM <- which.min(dis)
fit <- as.vector(Ypred[bestBPM, ])
models <- unlist(object$which[best[bestBPM]])
best <- best[bestBPM]
df <- df[best]
attributes(fit) <- list(
model = models,
best = best,
estimator = estimator
)
}
}
# browser()
se <- list(
se.fit = NULL,
se.pred = NULL,
se.bma.fit = NULL,
se.bma.pred = NULL
)
if (se.fit) {
if (estimator != "BMA") {
se <- .se.fit(fit, newdata, object, insample)
}
else {
se <- .se.bma(
Ybma, newdata, Ypred, best, object,
insample
)
}
}
best.vars <- object$namesx # BMA case
if (!is.list(models)) {
best.vars <- object$namesx[models + 1]
}
out <- list(
fit = fit,
Ybma = Ybma,
Ypred = Ypred,
postprobs = postprobs,
se.fit = se$se.fit,
se.pred = se$se.pred,
se.bma.fit = se$se.bma.fit,
se.bma.pred = se$se.bma.pred,
df = df,
best = best,
bestmodel = models,
best.vars = best.vars,
estimator = estimator
)
class(out) <- "pred.bas"
return(out)
}
#' Fitted values for a BAS BMA objects
#'
#' Calculate fitted values for a BAS BMA object
#'
#' Calculates fitted values at observed design matrix using either the highest
#' probability model, 'HPM', the posterior mean (under BMA) 'BMA', the median
#' probability model 'MPM' or the best predictive model 'BPM". The median
#' probability model is defined by including variable where the marginal
#' inclusion probability is greater than or equal to 1/2. For type="BMA", the
#' weighted average may be based on using a subset of the highest probability
#' models if an optional argument is given for top. By default BMA uses all
#' sampled models, which may take a while to compute if the number of variables
#' or number of models is large. The "BPM" is found be computing the squared
#' distance of the vector of fitted values for a model and the fitted values
#' under BMA and returns the model with the smallest distance. In the presence
#' of multicollinearity this may be quite different from the MPM, with extreme
#' collinearity may drop relevant predictors.
#'
#' @aliases fitted.bas fitted
#' @param object An object of class 'bas' as created by \code{\link{bas}}
#' @param type type equals "response" or "link" in the case of GLMs (default is 'link')
#' @param estimator estimator type of fitted value to return. Default is to use
#' BMA with all models. Options include \cr 'HPM' the highest probability model
#' \cr 'BMA' Bayesian model averaging, using optionally only the 'top' models
#' \cr 'MPM' the median probability model of Barbieri and Berger. 'BPM' the
#' model that is closest to BMA predictions under squared error loss
#' @param top optional argument specifying that the 'top' models will be used
#' in constructing the BMA prediction, if NULL all models will be used. If
#' top=1, then this is equivalent to 'HPM'
#' @param na.action function determining what should be done with missing values in newdata. The default is to predict NA.
#' @param ... optional arguments, not used currently
#' @return A vector of length n of fitted values.
#' @author Merlise Clyde \email{[email protected]}
#' @seealso \code{\link{predict.bas}} \code{\link{predict.basglm}}
#' @references Barbieri, M. and Berger, J.O. (2004) Optimal predictive model
#' selection. Annals of Statistics. 32, 870-897. \cr
#' \url{https://projecteuclid.org/euclid.aos/1085408489&url=/UI/1.0/Summarize/euclid.aos/1085408489}
#'
#' Clyde, M. Ghosh, J. and Littman, M. (2010) Bayesian Adaptive Sampling for
#' Variable Selection and Model Averaging. Journal of Computational Graphics
#' and Statistics. 20:80-101 \cr
#' \doi{10.1198/jcgs.2010.09049}
#' @keywords regression
#' @examples
#'
#' data(Hald)
#' hald.gprior = bas.lm(Y~ ., data=Hald, prior="ZS-null", initprobs="Uniform")
#' plot(Hald$Y, fitted(hald.gprior, estimator="HPM"))
#' plot(Hald$Y, fitted(hald.gprior, estimator="BMA", top=3))
#' plot(Hald$Y, fitted(hald.gprior, estimator="MPM"))
#' plot(Hald$Y, fitted(hald.gprior, estimator="BPM"))
#'
#' @rdname fitted
#' @family bas methods
#' @family predict methods
#' @export
fitted.bas <- function(object,
type = "link",
estimator = "BMA",
top = NULL,
na.action = na.pass,
...) {
nmodels <- length(object$which)
X <- object$X
if (is.null(top)) {
top <- nmodels
}
if (estimator == "HPM") {
yhat <- predict(
object,
newdata = NULL,
top = 1,
estimator = "HPM", type = type,
na.action = na.action
)$fit
}
if (estimator == "BMA") {
yhat <- predict(
object,
newdata = NULL,
top = top,
estimator = "BMA", type = type,
na.action = na.action
)$fit
}
if (estimator == "MPM") {
yhat <- predict(
object,
newdata = NULL,
top = top,
estimator = "MPM", type = type,
na.action = na.action
)$fit
}
if (estimator == "BPM") {
yhat <- predict(
object,
newdata = NULL,
top = top,
estimator = "BPM", type = type,
na.action = na.action
)$fit
}
return(as.vector(yhat))
}
.se.fit <- function(yhat, X, object, insample) {
n <- object$n
model <- attr(yhat, "model")
best <- attr(yhat, "best")
df <- object$df[best]
shrinkage <- object$shrinkage[best]
if (insample) {
xiXTXxiT <- hat(object$X[, model + 1]) - 1 / n
} else {
X <- cbind(1, X[, model[-1], drop = FALSE])
oldX <- (sweep(object$X[, -1, drop = FALSE], 2, object$mean.x))[, model[-1]]
# browser()
XRinv <- X %*% solve(qr.R(qr(cbind(1, oldX))))
xiXTXxiT <- apply(XRinv^2, 1, sum) - 1 / n
}
scale_fit <- 1 / n + object$shrinkage[best] * xiXTXxiT
if (is.null(object$family)) {
family <- gaussian()
}
if (eval(family)$family == "gaussian") {
ssy <- var(object$Y) * (n - 1)
bayes_mse <- ssy * (1 - shrinkage * object$R2[best]) / df
}
else {
bayes_mse <- 1
} # ToDo add overdispersion
se.fit <- sqrt(bayes_mse * scale_fit)
se.pred <- sqrt(bayes_mse * (1 + scale_fit))
return(list(
se.fit = se.fit,
se.pred = se.pred,
residual.scale = sqrt(bayes_mse)
))
}
.se.bma <- function(fit, Xnew, Ypred, best, object, insample) {
n <- object$n
df <- object$df[best]
shrinkage <- object$shrinkage[best]
if (insample) {
xiXTXxiT <- sapply(
object$which[best],
FUN = function(model, X) {
n <- nrow(X)
hat(X[, model[-1] + 1]) - 1 / n
},
object$X
)
}
else {
Xnew <- cbind(1, Xnew)
Xold <- cbind(1, sweep(object$X[, -1], 2, object$mean.x))
xiXTXxiT <- sapply(
object$which[best],
FUN = function(model, Xnew, Xold) {
Xnew <- Xnew[, model + 1]
oldX <- Xold[, model + 1]
n <- nrow(Xold)
XRinv <- Xnew %*% solve(qr.R(qr(oldX)))
xiXTXxiT <- apply(XRinv^2, 1, sum) - 1 / n
},
Xnew,
Xold
)
}
ssy <- var(object$Y) * (n - 1)
bayes_mse <- ssy * (1 - shrinkage * object$R2[best]) / df
if (is.vector(xiXTXxiT)) {
xiXTXxiT <- matrix(xiXTXxiT, nrow = 1)
}
scale_fit <- 1 / n + sweep(xiXTXxiT, 2, shrinkage, FUN = "*")
var.fit <- sweep(scale_fit, 2, bayes_mse, FUN = "*")
var.pred <- sweep((1 + scale_fit), 2, bayes_mse, FUN = "*")
postprobs <- object$postprobs[best]
# expected variance
evar.fit <- as.vector(var.fit %*% postprobs)
evar.pred <- as.vector(var.pred %*% postprobs)
# variance of expectations
var.efit <- as.vector(postprobs %*% (sweep(Ypred, 2, fit))^2)
se.fit <- sqrt(evar.fit + var.efit)
se.pred <- sqrt(evar.pred + var.efit)
return(
list(
se.bma.fit = se.fit,
se.bma.pred = se.pred,
se.fit = t(sqrt(var.fit)),
se.pred = t(sqrt(var.pred)),
residual.scale = sqrt(bayes_mse)
)
)
}
#' Extract the variable names for a model from a BAS prediction object
#'
#' @description S3 method for class 'pred.bas'. Simple utility
#' function to extract the variable names. Used to print names
#' for the selected models using estimators for 'HPM', 'MPM' or 'BPM".
#' for the selected model created by \code{predict} for BAS
#' objects.
#' @param object a BAS object created by \code{predict} from a BAS
#' `bas.lm` or `bas.glm` object
#' @param ... other arguments to pass on
#' @return a character vector with the names of the variables
#' included in the selected model; in the case of 'BMA' this will
#' be all variables
#' @seealso \code{\link{predict.bas}}
#' @method variable.names pred.bas
#' @rdname variable.names.pred.bas
#' @aliases variable.names.pred.bas variable.names
#' @family predict methods
#' @family bas methods
#' @examples
#' data(Hald)
#' hald.gprior = bas.lm(Y~ ., data=Hald, prior="ZS-null", modelprior=uniform())
#' hald.bpm = predict(hald.gprior, newdata=Hald[1,],
#' se.fit=TRUE,
#' estimator="BPM")
#' variable.names(hald.bpm)
#' @export
#'
variable.names.pred.bas <- function(object, ...) {
if (inherits(object, "pred.bas")) {
object$best.vars
}
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/predict.R |
#' Print a Summary of Bayesian Model Averaging objects from BAS
#'
#' \code{summary} and \code{print} methods for Bayesian model averaging objects
#' created by \code{bas} Bayesian Adaptive Sampling
#'
#' The print methods display a view similar to \code{print.lm} . The summary
#' methods display a view specific to Bayesian model averaging giving the top 5
#' highest probability models represented by their inclusion indicators.
#' Summaries of the models include the Bayes Factor (BF) of each model to the
#' model with the largest marginal likelihood, the posterior probability of the
#' models, R2, dim (which includes the intercept) and the log of the marginal
#' likelihood.
#'
#' @aliases print.bas print
#' @param x object of class 'bas'
#' @param digits optional number specifying the number of digits to display
#' @param ... other parameters to be passed to \code{print.default}
#' @author Merlise Clyde \email{clyde@@stat.duke.edu}
#' @seealso \code{\link{coef.bas}}
#' @keywords print regression
#' @examples
#'
#' library(MASS)
#' data(UScrime)
#' UScrime[, -2] <- log(UScrime[, -2])
#' crime.bic <- bas.lm(y ~ ., data = UScrime, n.models = 2^15, prior = "BIC", initprobs = "eplogp")
#' print(crime.bic)
#' summary(crime.bic)
#' @rdname print.bas
#' @method print bas
#' @export
print.bas <- function(x, digits = max(3L, getOption("digits") - 3L), ...) {
cat("\nCall:\n", paste(deparse(x$call),
sep = "\n",
collapse = "\n"
),
"\n\n",
sep = ""
)
cat("\n Marginal Posterior Inclusion Probabilities: \n")
out <- x$probne0
names(out) <- x$namesx
print.default(format(out, digits = digits),
print.gap = 2L,
quote = FALSE
)
invisible()
}
#' Summaries of Bayesian Model Averaging objects from BAS
#'
#' \code{summary} and \code{print} methods for Bayesian model averaging objects
#' created by \code{bas} Bayesian Adaptive Sampling
#'
#' The print methods display a view similar to \code{print.lm} . The summary
#' methods display a view specific to Bayesian model averaging giving the top 5
#' highest probability models represented by their inclusion indicators.
#' Summaries of the models include the Bayes Factor (BF) of each model to the
#' model with the largest marginal likelihood, the posterior probability of the
#' models, R2, dim (which includes the intercept) and the log of the marginal
#' likelihood.
#'
#' @aliases summary.bas summary
#' @param object object of class 'bas'
#' @param n.models optional number specifying the number of best models to
#' display in summary
#' @param ... other parameters to be passed to \code{summary.default}
#' @author Merlise Clyde \email{clyde@@duke.edu}
#' @seealso \code{\link{coef.bas}}
#' @keywords print regression
#' @examples
#' data(UScrime, package = "MASS")
#' UScrime[, -2] <- log(UScrime[, -2])
#' crime.bic <- bas.lm(y ~ ., data = UScrime, n.models = 2^15, prior = "BIC", initprobs = "eplogp")
#' print(crime.bic)
#' summary(crime.bic)
#' @rdname summary
#' @family bas methods
#' @method summary bas
#' @export
summary.bas <- function(object, n.models = 5, ...) {
best <- order(-object$postprobs)
n.models <- min(n.models, length(best))
best <- best[1:n.models]
x <- cbind(
list2matrix.which(object, best),
exp(object$logmarg[best] - max(object$logmarg[best])),
round(object$postprobs[best], 4),
round(object$R2[best], 4),
object$size[best],
object$logmarg[best]
)
x <- t(x)
x <- cbind(NA, x)
x[1:object$n.vars, 1] <- object$probne0
colnames(x) <- c("P(B != 0 | Y)", paste("model", 1:n.models))
rownames(x) <- c(object$namesx, "BF", "PostProbs", "R2", "dim", "logmarg")
return(x)
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/summary.R |
#' Update BAS object using a new prior
#'
#' Update a BMA object using a new prior distribution on the coefficients.
#'
#' Recomputes the marginal likelihoods for the new methods for models already
#' sampled in current object.
#'
#' @aliases update update.bas
#' @param object BMA object to update
#' @param newprior Update posterior model probabilities, probne0, shrinkage,
#' logmarg, etc, using prior based on newprior. See \code{\link{bas}} for
#' available methods
#' @param alpha optional new value of hyperparameter in prior for method
#' @param ... optional arguments
#' @return A new object of class BMA
#' @author Merlise Clyde \email{clyde@@stat.duke.edu}
#' @seealso \code{\link{bas}} for available methods and choices of alpha
#' @references Clyde, M. Ghosh, J. and Littman, M. (2010) Bayesian Adaptive
#' Sampling for Variable Selection and Model Averaging. Journal of
#' Computational Graphics and Statistics. 20:80-101 \cr
#' \doi{10.1198/jcgs.2010.09049}
#' @keywords regression
#' @examples
#'
#' \donttest{
#' library(MASS)
#' data(UScrime)
#' UScrime[,-2] <- log(UScrime[,-2])
#' crime.bic <- bas.lm(y ~ ., data=UScrime, n.models=2^10, prior="BIC",initprobs= "eplogp")
#' crime.ebg <- update(crime.bic, newprior="EB-global")
#' crime.zs <- update(crime.bic, newprior="ZS-null")
#' }
#'
#' @rdname update
#' @method update bas
#' @family bas methods
#' @export
update.bas <- function(object, newprior, alpha = NULL, ...) {
method.num <- switch(newprior,
"g-prior" = 0,
"hyper-g" = 1,
"EB-local" = 2,
"BIC" = 3,
"ZS-null" = 4,
"ZS-full" = 5,
"hyper-g-laplace" = 6,
"AIC" = 7,
"EB-global" = 2,
"hyper-g-n" = 8,
"JZS" = 9,
)
if (is.null(alpha) &&
(method.num == 0 || method.num == 1 || method.num == 6 || method.num == 8)) {
stop(paste("Must specify a value of alpha for", newprior))
}
if (is.null(alpha)) alpha <- 0.0
object$alpha <- alpha
if (newprior == "EB-global") {
object <- EB.global(object)
} else {
object$prior <- newprior
SSY <- sum((object$Y - mean(object$Y))^2)
R2Full <- summary(lm(object$Y ~ object$X[, -1]))$r.squared
logmarg <- object$logmarg
shrinkage <- object$shrinkage
tmp <- .C(C_gexpectations_vect,
nmodels = as.integer(length(object$which)),
p = as.integer(object$n.vars), pmodel = as.integer(object$rank),
nobs = as.integer(object$n), R2 = object$R2, alpha = as.double(alpha),
method = as.integer(method.num), RSquareFull = as.double(R2Full), SSY = as.double(SSY),
logmarg = logmarg, shrinkage = shrinkage
)
object$logmarg <- tmp$logmarg
object$shrinkage <- tmp$shrinkage
object$postprobs <- exp(object$logmarg - min(object$logmarg)) * object$priorprobs
object$postprobs <- object$postprobs / sum(object$postprobs)
which <- which.matrix(object$which, object$n.vars)
object$probne0 <- object$postprobs %*% which
}
return(object)
}
| /scratch/gouwar.j/cran-all/cranData/BAS/R/update.R |
require(MASS)
library(MASS)
data(UScrime)
#UScrime[,-2] = log(UScrime[,-2])
crime.bic = bas.lm(log(y) ~ log(M) + So + log(Ed) + log(Po1) + log(Po2)
+ log(LF) + log(M.F) + log(Pop) + log(NW) +
log(U1) + log(U2) + log(GDP) + log(Ineq) + log(Prob)+
log(Time),
data=UScrime, n.models=2^15, prior="BIC",
modelprior=beta.binomial(1,1),
initprobs= "eplogp")
summary(crime.bic)
plot(crime.bic)
image(crime.bic, subset=-1)
# takes a while to run:
# crime.coef = coefficients(crime.bic)
# crime.coef
# par(mfrow=c(3,2))
# plot(crime.coef, ask=FALSE)
# see update
#crime.aic = update(crime.bic, newprior="AIC")
#crime.zs = update(crime.bic, newprior="ZS-null")
#crime.EBG = EB.global.bma(crime.bic)
# same as update(crime.bic, newprior="EB-global")
#image(crime.EBG, subset=-1)
| /scratch/gouwar.j/cran-all/cranData/BAS/demo/BAS.USCrime.R |
data(Hald)
hald.gprior = bas.lm(Y~ ., data=Hald, prior="g-prior", alpha=13,
modelprior=beta.binomial(1,1),
initprobs="eplogp")
hald.gprior
plot(hald.gprior)
summary(hald.gprior)
image(hald.gprior, subset=-1, vlas=0)
hald.coef = coefficients(hald.gprior)
hald.coef
plot(hald.coef)
predict(hald.gprior, top=5, se.fit=TRUE)
confint(predict(hald.gprior, Hald, estimator="BMA", se.fit=TRUE, top=5), parm="mean")
predict(hald.gprior, estimator="MPM", se.fit=TRUE)
confint(predict(hald.gprior, Hald, estimator="MPM", se.fit=TRUE), parm="mean")
fitted(hald.gprior, estimator="HPM")
hald.gprior = bas.lm(Y~ ., data=Hald, n.models=2^4,
prior="g-prior", alpha=13, modelprior=uniform(),
initprobs="eplogp")
hald.EB = update(hald.gprior, newprior="EB-global")
hald.bic = update(hald.gprior,newprior="BIC")
hald.zs = update(hald.bic, newprior="ZS-null")
| /scratch/gouwar.j/cran-all/cranData/BAS/demo/BAS.hald.R |
## ----setup, include=FALSE-----------------------------------------------------
# require(knitr)
require(MASS)
require(dplyr)
require(GGally)
## ----install, eval=FALSE------------------------------------------------------
# install.packages("BAS")
## ----devtools, eval=FALSE-----------------------------------------------------
# devtools::install_github("merliseclyde/BAS")
## ----data---------------------------------------------------------------------
data(UScrime, package = "MASS")
## ----transform----------------------------------------------------------------
UScrime[, -2] <- log(UScrime[, -2])
## ----bas----------------------------------------------------------------------
library(BAS)
crime.ZS <- bas.lm(y ~ .,
data = UScrime,
prior = "ZS-null",
modelprior = uniform(), initprobs = "eplogp",
force.heredity = FALSE, pivot = TRUE
)
## ----fig.show='hold'----------------------------------------------------------
plot(crime.ZS, ask = F)
## ----pip, fig.width=5, fig.height=5-------------------------------------------
plot(crime.ZS, which = 4, ask = FALSE, caption = "", sub.caption = "")
## ----print--------------------------------------------------------------------
crime.ZS
## ----summary------------------------------------------------------------------
options(width = 80)
summary(crime.ZS)
## ----image, fig.width=5, fig.height=5-----------------------------------------
image(crime.ZS, rotate = F)
## ----coef---------------------------------------------------------------------
coef.ZS <- coef(crime.ZS)
## ----plot---------------------------------------------------------------------
plot(coef.ZS, subset = c(5:6), ask = F)
## ----confint-coef-------------------------------------------------------------
confint(coef.ZS)
## ----plot-confint, fig.width=7------------------------------------------------
plot(confint(coef.ZS, parm = 2:16))
## ----warning=FALSE, fig.width=7----------------------------------------------
plot(confint(coef(crime.ZS, estimator = "HPM")))
## ----warning=FALSE, fig.width=7, eval=FALSE----------------------------------
# plot(confint(coef(crime.ZS, estimator = "MPM")))
## ----choice of estimator------------------------------------------------------
muhat.BMA <- fitted(crime.ZS, estimator = "BMA")
BMA <- predict(crime.ZS, estimator = "BMA")
# predict has additional slots for fitted values under BMA, predictions under each model
names(BMA)
## ----fig.width=5, fig.height=5------------------------------------------------
par(mar = c(9, 9, 3, 3))
plot(muhat.BMA, BMA$fit,
pch = 16,
xlab = expression(hat(mu[i])), ylab = expression(hat(Y[i]))
)
abline(0, 1)
## ----HPM----------------------------------------------------------------------
HPM <- predict(crime.ZS, estimator = "HPM")
# show the indices of variables in the best model where 0 is the intercept
HPM$bestmodel
## -----------------------------------------------------------------------------
variable.names(HPM)
## ----MPM----------------------------------------------------------------------
MPM <- predict(crime.ZS, estimator = "MPM")
variable.names(MPM)
## ----BPM----------------------------------------------------------------------
BPM <- predict(crime.ZS, estimator = "BPM")
variable.names(BPM)
## ----fig.width=6, fig.height=6------------------------------------------------
GGally::ggpairs(data.frame(
HPM = as.vector(HPM$fit), # this used predict so we need to extract fitted values
MPM = as.vector(MPM$fit), # this used fitted
BPM = as.vector(BPM$fit), # this used fitted
BMA = as.vector(BMA$fit)
)) # this used predict
## ----se, fig.width=7----------------------------------------------------------
BPM <- predict(crime.ZS, estimator = "BPM", se.fit = TRUE)
crime.conf.fit <- confint(BPM, parm = "mean")
crime.conf.pred <- confint(BPM, parm = "pred")
plot(crime.conf.fit)
plot(crime.conf.pred)
## ----pred---------------------------------------------------------------------
new.pred <- predict(crime.ZS, newdata = UScrime, estimator = "MPM")
## -----------------------------------------------------------------------------
system.time(
for (i in 1:10) {
crime.ZS <- bas.lm(y ~ .,
data = UScrime,
prior = "ZS-null", method = "BAS",
modelprior = uniform(), initprobs = "eplogp"
)
}
)
system.time(
for (i in 1:10) {
crime.ZS <- bas.lm(y ~ .,
data = UScrime,
prior = "ZS-null", method = "deterministic",
modelprior = uniform(), initprobs = "eplogp"
)
}
)
## ----MCMC---------------------------------------------------------------------
crime.ZS <- bas.lm(y ~ .,
data = UScrime,
prior = "ZS-null",
modelprior = uniform(),
method = "MCMC"
)
## ----diagnostics--------------------------------------------------------------
diagnostics(crime.ZS, type = "pip", pch = 16)
diagnostics(crime.ZS, type = "model", pch = 16)
## ----biggerMCMC, eval=FALSE---------------------------------------------------
# crime.ZS <- bas.lm(y ~ .,
# data = UScrime,
# prior = "ZS-null",
# modelprior = uniform(),
# method = "MCMC", MCMC.iterations = 10^6
# )
#
# diagnostics(crime.ZS, type="model", pch=16)
## ----add-out------------------------------------------------------------------
data("stackloss")
stackloss <- cbind(stackloss, diag(nrow(stackloss)))
stack.bas <- bas.lm(stack.loss ~ .,
data = stackloss,
method = "MCMC", initprobs = "marg-eplogp",
prior = "ZS-null",
modelprior = tr.poisson(4, 10),
MCMC.iterations = 200000
)
## -----------------------------------------------------------------------------
knitr::kable(as.data.frame(summary(stack.bas)))
## -----------------------------------------------------------------------------
data(ToothGrowth)
ToothGrowth$dose <- factor(ToothGrowth$dose)
levels(ToothGrowth$dose) <- c("Low", "Medium", "High")
## ----fig.width=7--------------------------------------------------------------
TG.bas <- bas.lm(len ~ supp*dose,
data = ToothGrowth,
modelprior = uniform(), method = "BAS"
)
image(TG.bas)
## ----fig.width=7--------------------------------------------------------------
TG.bas <- bas.lm(len ~ supp * dose,
data = ToothGrowth,
modelprior = uniform(), method = "BAS", force.heredity = TRUE
)
image(TG.bas)
## ----force-herid--------------------------------------------------------------
TG.bas <- bas.lm(len ~ supp * dose,
data = ToothGrowth,
modelprior = uniform(), method = "BAS", force.heredity = FALSE
)
TG.herid.bas <- force.heredity.bas(TG.bas)
## ----climate------------------------------------------------------------------
data(climate, package="BAS")
str(climate)
summary(climate)
## ----read-climate-------------------------------------------------------------
library(dplyr)
climate <- filter(climate, proxy != 6) %>%
mutate(proxy = factor(proxy))
## ----wtreg--------------------------------------------------------------------
climate.bas <- bas.lm(deltaT ~ proxy * poly(latitude, 2),
data = climate,
weights = 1 / sdev^2,
prior = "hyper-g-n", alpha = 3.0,
n.models = 2^20,
force.heredity=TRUE,
modelprior = uniform()
)
## ----climate-image, fig.width=7-----------------------------------------------
image(climate.bas, rotate = F)
## ----wtreg-wo-contraint, fig.width=7------------------------------------------
# May take a while to enumerate all 2^20 models
climate.bas <- bas.lm(deltaT ~ proxy * poly(latitude, 2),
data = climate,
weights = 1 / sdev^2,
prior = "hyper-g-n", alpha = 3.0,
n.models = 2^20,
modelprior = uniform(),
force.heredity = FALSE
)
image(climate.bas)
| /scratch/gouwar.j/cran-all/cranData/BAS/inst/doc/BAS-vignette.R |
---
title: "Using the Bayesian Adaptive Sampling (BAS) Package for Bayesian Model Averaging and Variable Selection"
author: "Merlise A Clyde"
date: "`r Sys.Date()`"
r_packages:
- rmarkdown
- dplyr
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Using the Bayesian Adaptive Sampling (BAS) Package for Bayesian Model Averaging and Variable Selection}
%VignetteEngine{knitr::rmarkdown}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
# require(knitr)
require(MASS)
require(dplyr)
require(GGally)
```
The `BAS` package provides easy to use functions to implement Bayesian Model Averaging in linear models and generalized linear models.
Prior distributions on coefficients are
based on Zellner's g-prior or mixtures of g-priors, such as the
Zellner-Siow Cauchy prior or mixtures of g-priors from
[Liang et al (2008)](https://doi.org/10.1198/016214507000001337)
for linear models, as well as other options including AIC, BIC, RIC and Empirical Bayes methods. Extensions to Generalized Linear Models are based on the
mixtures of g-priors in GLMs of
[Li and Clyde (2019)](https://doi.org/10.1080/01621459.2018.1469992) using an
integrated Laplace approximation.
`BAS` uses an adaptive sampling algorithm to sample without replacement from the space of models or MCMC sampling which is recommended for sampling problems with a large number of predictors. See [Clyde, Littman & Ghosh](https://doi.org/10.1198/jcgs.2010.09049) for more details for the sampling algorithms.
## Installing BAS
The stable version can be installed easily in the `R` console like any other package:
```{r install, eval=FALSE}
install.packages("BAS")
```
On the other hand, I welcome everyone to use the most recent version
of the package with quick-fixes, new features and probably new
bugs. To get the latest
development version from [GitHub](https://github.com/merliseclyde),
use the `devtools` package from
[CRAN](https://cran.r-project.org/package=devtools) and enter in `R`:
```{r devtools, eval=FALSE}
devtools::install_github("merliseclyde/BAS")
```
As the package does depend on BLAS and LAPACK, installing from GitHub will require that you have FORTRAN and C compilers on your system.
## Demo
We will use the UScrime data to illustrate some of the commands and functionality.
```{r data}
data(UScrime, package = "MASS")
```
Following other analyses, we will go ahead and log transform all of the variables except column 2, which is the indicator variable of the state being a southern state.
```{r transform}
UScrime[, -2] <- log(UScrime[, -2])
```
To get started, we will use `BAS` with the Zellner-Siow Cauchy prior on the coefficients.
```{r bas}
library(BAS)
crime.ZS <- bas.lm(y ~ .,
data = UScrime,
prior = "ZS-null",
modelprior = uniform(), initprobs = "eplogp",
force.heredity = FALSE, pivot = TRUE
)
```
`BAS` uses a model formula similar to `lm` to specify the full model with all of the potential predictors. Here we are using the shorthand `.` to indicate that all remaining variables in the data frame provided by the `data` argument. Different prior distributions on the regression coefficients may be specified using the `prior` argument, and include
* "BIC"
* "AIC
* "g-prior"
* "hyper-g"
* "hyper-g-laplace"
* "hyper-g-n"
* "JZS"
* "ZS-null"
* "ZS-full"
* "EB-local"
* "EB-global"
where the default is the Zellner-Siow prior, ZS-null, where all Bayes factors are compared to the null model. The newest prior option, "JZS", also corresponds to the Zellner-Siow prior on the coefficients, but uses numerical integration rather than a Laplace approximation to obtain the marginal likelihood of models.
By default, `BAS` will try to enumerate all models if $p < 19$ using the default `method="BAS"`. The prior distribution over the models is a `uniform()` distribution which assigns equal probabilities to all models. The last optional argument `initprobs = eplogp` provides a way to initialize the sampling algorithm and order the variables in the tree structure that represents the model space in BAS. The `eplogp` option uses the Bayes factor calibration of p-values $-e p \log(p)$ to provide an approximation to the marginal inclusion probability that the coefficient of each predictor is zero, using the p-values from the full model. Other options for `initprobs` include
* "marg-eplogp""
* "uniform"
* numeric vector of length p
The option "marg-eplogp" uses p-values from the $p$ simple linear regressions (useful for large p or highly correlated variables).
Since we are enumerating under all possible models these options are not important and the `method="deterministic"` may be faster if there are no factors or interactions in the model.
## Plots
Some graphical summaries of the output may be obtained by the `plot` function
```{r, fig.show='hold'}
plot(crime.ZS, ask = F)
```
which produces a panel of four plots. The first is a plot of residuals and fitted values under Bayesian Model Averaging. Ideally, if our model assumptions hold, we will not see outliers or non-constant variance. The second plot shows the cumulative probability of the models in the order that they are sampled. This plot indicates that the cumulative probability is leveling off as each additional model adds only a small increment to the cumulative probability, while earlier, there are larger jumps corresponding to discovering a new high probability model. The third plot shows the dimension of each model (the number of regression coefficients including the intercept) versus the log of the marginal likelihood of the model. The last plot shows the marginal posterior inclusion probabilities (pip) for each of the covariates, with marginal pips greater than 0.5 shown in red. The variables with pip > 0.5 correspond to what is known as the median probability model. Variables with high inclusion probabilities are generally important for explaining the data or prediction, but marginal inclusion probabilities may be small if there are predictors that are highly correlated, similar to how p-values may be large in the presence of multicollinearity.
Individual plots may be obtained using the `which` option.
```{r pip, fig.width=5, fig.height=5}
plot(crime.ZS, which = 4, ask = FALSE, caption = "", sub.caption = "")
```
`BAS` has `print` and `summary` methods defined for objects of class `bas`. Typing the objects name
```{r print}
crime.ZS
```
returns a summary of the marginal inclusion probabilities, while the `summary` function
provides
```{r summary}
options(width = 80)
summary(crime.ZS)
```
a list of the top 5 models (in terms of posterior probability) with the zero-one indicators for variable inclusion. The other columns in the summary are the Bayes factor of each model to the highest probability model (hence its Bayes factor is 1), the posterior probabilities of the models, the ordinary $R^2$ of the models, the dimension of the models (number of coefficients including the intercept) and the log marginal likelihood under the selected prior distribution.
## Visualization of the Model Space
To see beyond the first five models, we can represent the collection of the models via an `image` plot. By default this shows the top 20 models.
```{r image, fig.width=5, fig.height=5}
image(crime.ZS, rotate = F)
```
This image has rows that correspond to each of the variables and intercept, with labels for the variables on the y-axis. The x-axis corresponds to the possible models. These are sorted by their posterior probability from best at the left
to worst at the right with the rank on the top x-axis.
Each column represents one of the 16 models. The variables
that are excluded in a model are shown in black for each column, while
the variables that are included are colored, with the color related to
the log posterior probability.
The color of each column is proportional to the log of the posterior probabilities (the lower x-axis) of that model. The log posterior probabilities are actually scaled so that the 0 corresponds to the lowest probability model in the top 20, so that the values on the axis correspond to log Bayes factors for comparing each model the lowest probability model in the top 20 models. Models that are the same color have similar log Bayes factors which allows us to view models that are clustered together that have Bayes Factors where the differences are not "worth a bare mention".
This plot indicates that the police expenditure in the two years do not enter the model together, and is an indication of the high correlation between the two variables.
## Posterior Distributions of Coefficients
To examine the marginal distributions of the two coefficients for the police expenditures, we can extract the coefficients estimates and standard deviations under BMA.
```{r coef}
coef.ZS <- coef(crime.ZS)
```
an optional argument, `n.models` to `coef` will use only the top `n.models` for BMA and may be more computationally efficient for large problems.
Plots of the posterior distributions averaging over all of the models are obtained using the `plot` method for the `bas` coefficient object.
```{r plot}
plot(coef.ZS, subset = c(5:6), ask = F)
```
The vertical bar represents the posterior probability that the coefficient is 0 while
the bell shaped curve represents the density of plausible values from all the models where the coefficient is non-zero. This is scaled so that the height of the density for non-zero values is the probability that the coefficient is non-zero. Omitting the `subset` argument provides all of the marginal distributions.
To obtain credible intervals for coefficients, `BAS` includes a `confint` method to create Highest Posterior Density intervals from the summaries from `coef`.
```{r confint-coef}
confint(coef.ZS)
```
where the third column is the posterior mean.
This uses Monte Carlo sampling to draw from the mixture model over coefficient where models are sampled based on their posterior probabilities.
We can also plot these via
```{r plot-confint, fig.width=7}
plot(confint(coef.ZS, parm = 2:16))
```
using the `parm` argument to select which coefficients to plot (the intercept is `parm=1`).
For estimation under selection, `BAS` supports additional arguments
via `estimator`. The default is `estimator="BMA"` which uses all models or `n.models`. Other options include estimation under the highest probability model
```{r, warning=FALSE, fig.width=7}
plot(confint(coef(crime.ZS, estimator = "HPM")))
```
or the median probability model
```{r, warning=FALSE, fig.width=7, eval=FALSE}
plot(confint(coef(crime.ZS, estimator = "MPM")))
```
where variables that are excluded have distributions that are point masses at zero under selection.
## Prediction
`BAS` has methods defined to return fitted values, `fitted`, using the observed design matrix and predictions at either the observed data or potentially new values, `predict`, as with `lm`.
```{r choice of estimator}
muhat.BMA <- fitted(crime.ZS, estimator = "BMA")
BMA <- predict(crime.ZS, estimator = "BMA")
# predict has additional slots for fitted values under BMA, predictions under each model
names(BMA)
```
Plotting the two sets of fitted values,
```{r, fig.width=5, fig.height=5}
par(mar = c(9, 9, 3, 3))
plot(muhat.BMA, BMA$fit,
pch = 16,
xlab = expression(hat(mu[i])), ylab = expression(hat(Y[i]))
)
abline(0, 1)
```
we see that they are in perfect agreement. That is always the case as the posterior mean for the regression mean function at a point $x$ is the expected posterior predictive value for $Y$ at $x$. This is true not only for estimators such as BMA, but the expected values under model selection.
### Inference with model selection ###
In addition to using BMA, we can use the posterior means under model selection. This corresponds to a decision rule that combines estimation and selection. `BAS` currently implements the following options
**highest probability model:**
```{r HPM}
HPM <- predict(crime.ZS, estimator = "HPM")
# show the indices of variables in the best model where 0 is the intercept
HPM$bestmodel
```
A little more interpretable version with names:
```{r}
variable.names(HPM)
```
**median probability model:**
```{r MPM}
MPM <- predict(crime.ZS, estimator = "MPM")
variable.names(MPM)
```
This is the model where all predictors have an inclusion probability greater than or equal to 0.5. This coincides with the HPM if the predictors are all mutually orthogonal, and in this case is the best predictive model under squared error loss.
Note that we can also extract the best model from the attribute in the fitted values as well.
**best predictive model:**
In general, the HPM or MPM are not the best predictive models, which from a Bayesian decision theory perspective would be the model that is closest to BMA predictions under squared error loss.
```{r BPM}
BPM <- predict(crime.ZS, estimator = "BPM")
variable.names(BPM)
```
Let's see how they compare:
```{r, fig.width=6, fig.height=6}
GGally::ggpairs(data.frame(
HPM = as.vector(HPM$fit), # this used predict so we need to extract fitted values
MPM = as.vector(MPM$fit), # this used fitted
BPM = as.vector(BPM$fit), # this used fitted
BMA = as.vector(BMA$fit)
)) # this used predict
```
Using the `se.fit = TRUE` option with `predict` we can also calculate standard deviations for prediction or for the mean and use this as input for the `confint` function for the prediction object.
```{r se, fig.width=7}
BPM <- predict(crime.ZS, estimator = "BPM", se.fit = TRUE)
crime.conf.fit <- confint(BPM, parm = "mean")
crime.conf.pred <- confint(BPM, parm = "pred")
plot(crime.conf.fit)
plot(crime.conf.pred)
```
For prediction at new points, we can supply a new dataframe to the predict function as in `lm`.
```{r pred}
new.pred <- predict(crime.ZS, newdata = UScrime, estimator = "MPM")
```
## Alternative algorithms
`BAS` has several options for sampling from the model space with or without enumeration. The (current) default `method="BAS"` samples models without replacement using estimates of the marginal inclusion probabilities using the algorithm described in [Clyde et al (2011)](https://doi.org/10.1198/jcgs.2010.09049). The initial sampling probabilities provided by `initprobs` are updated based on the sampled models, every `update` iterations.
This can be more efficient in some cases if a large fraction of the model space has been sampled, however, in cases of high correlation and a large number of predictors, this can lead to biased estimates
[Clyde and Ghosh (2012)](https://doi.org/10.1093/biomet/ass040), in which case MCMC is preferred. The `method="MCMC"` is described below and is better for large $p$.
A deterministic sampling scheme is also available for enumeration;
```{r}
system.time(
for (i in 1:10) {
crime.ZS <- bas.lm(y ~ .,
data = UScrime,
prior = "ZS-null", method = "BAS",
modelprior = uniform(), initprobs = "eplogp"
)
}
)
system.time(
for (i in 1:10) {
crime.ZS <- bas.lm(y ~ .,
data = UScrime,
prior = "ZS-null", method = "deterministic",
modelprior = uniform(), initprobs = "eplogp"
)
}
)
```
which is faster for enumeration than the default method="BAS".
## Beyond Enumeration
Many problems are too large to enumerate all possible models. In such cases we may use the `method="BAS"` to sample without replacement or the `method="MCMC"` option to sample models using Markov Chain Monte Carlo sampling to sample models based on their posterior probabilities. For spaces where the number of models greatly exceeds the number of models to sample, the MCMC option is recommended as it provides estimates with low bias compared to the sampling without replacement of BAS (Clyde and Ghosh 2011).
```{r MCMC}
crime.ZS <- bas.lm(y ~ .,
data = UScrime,
prior = "ZS-null",
modelprior = uniform(),
method = "MCMC"
)
```
This will run the MCMC sampler until the number of unique sampled models exceeds `n.models` which is $2^p$ (if $p < 19$) by default or until `MCMC.iterations` has been exceeded, where `MCMC.iterations = n.models*2` by default.
### Estimates of Marginal Posterior Inclusion Probabilities (pip) ###
With MCMC sampling there are two estimates of the marginal inclusion probabilities: `object$probne0` which are obtained by using the re-normalized posterior odds from sampled models to estimate probabilities and the estimates based on Monte Carlo frequencies `object$probs.MCMC`. These should be in close agreement if the MCMC sampler has run for enough iterations.
`BAS` includes a diagnostic function to compare the two sets of estimates of posterior inclusion probabilities and posterior model probabilities
```{r diagnostics}
diagnostics(crime.ZS, type = "pip", pch = 16)
diagnostics(crime.ZS, type = "model", pch = 16)
```
In the left hand plot of pips, each point represents one posterior inclusion probability for the 15 variables estimated under the two methods. The two estimators are in pretty close agreement. The plot of the model probabilities suggests that we should use more `MCMC.iterations` if we want more accurate estimates of the posterior model probabilities.
```{r biggerMCMC, eval=FALSE}
crime.ZS <- bas.lm(y ~ .,
data = UScrime,
prior = "ZS-null",
modelprior = uniform(),
method = "MCMC", MCMC.iterations = 10^6
)
diagnostics(crime.ZS, type="model", pch=16)
```
## Outliers
BAS can also be used for exploring mean shift or variance inflation outliers by adding indicator variables for each case being an outlier (the mean is not given by the regression) or not. This is similar to the MC3.REG function in BMA, although here we are using a g-prior or mixture of g-priors for the coefficients for the outlier means.
Using the Stackloss data, we can add an identify matrix to the original dataframe, where each column is an indicator that the ith variable is an outlier.
```{r add-out}
data("stackloss")
stackloss <- cbind(stackloss, diag(nrow(stackloss)))
stack.bas <- bas.lm(stack.loss ~ .,
data = stackloss,
method = "MCMC", initprobs = "marg-eplogp",
prior = "ZS-null",
modelprior = tr.poisson(4, 10),
MCMC.iterations = 200000
)
```
The above call introduces using truncated prior distributions on the model space; in this case the distribution of the number of variables to be included has a Poisson distribution, with mean 4 (under no truncation), and the truncation point is at 10, so that all models with more than 10 (one half of cases rounded down) will have probability zero. This avoids exploration of models that are not full rank.
Looking at the summaries
```{r}
knitr::kable(as.data.frame(summary(stack.bas)))
```
## Factors and Hierarchical Heredity
BAS now includes constraints for factors so that all terms that represent a factor are either included or excluded together.
To illustrate, we will use the data set `ToothGrowth` and convert `dose` to a factor:
```{r}
data(ToothGrowth)
ToothGrowth$dose <- factor(ToothGrowth$dose)
levels(ToothGrowth$dose) <- c("Low", "Medium", "High")
```
and fit the model of main effects and two way interaction without any constraints:
```{r, fig.width=7}
TG.bas <- bas.lm(len ~ supp*dose,
data = ToothGrowth,
modelprior = uniform(), method = "BAS"
)
image(TG.bas)
```
From the image of the model space, we see that levels of a factor enter or drop from the model independently and that interactions may be included without the main effects. This may lead to more parsimonious models, however, the hypotheses that are being tested about the coefficients that represent the factor depend on the choice for the reference group. To force levels of a factor to enter or leave together we can use `force.heredity = TRUE`.
The `force.heredity` option also forces interactions to be included only if the main effects are also included, or for models with several factors and higher order interactions, the heredity constraint implies that all lower order interactions must be included before adding higher order interactions.
```{r, fig.width=7}
TG.bas <- bas.lm(len ~ supp * dose,
data = ToothGrowth,
modelprior = uniform(), method = "BAS", force.heredity = TRUE
)
image(TG.bas)
```
The `force.heredity` is set to FALSE for the sampling methods `MCMC+BAS` and `deterministic`. If there are more than 20 predictors and factors, then we recommend using `MCMC` to enforce the constraints. Alternatively,
there is a function, `force.heredity.bas`, to
post-process the output to drop models that violate the hierarchical heredity constraint:
```{r force-herid}
TG.bas <- bas.lm(len ~ supp * dose,
data = ToothGrowth,
modelprior = uniform(), method = "BAS", force.heredity = FALSE
)
TG.herid.bas <- force.heredity.bas(TG.bas)
```
that can be used with those sampling methods.
## Weighted Regression
`BAS` can perform weighted regression by supplying an optional weight vector that is of the same length as the response where the assumption is that the variance of the response is proportional to 1/weights. The g-prior incorporates the
weights in the prior covariance,
$$
\sigma^2 g (X_\gamma^T W X_\gamma)^{-1}
$$
where $X_\gamma$ is the design matrix under model $\gamma$ and $W$ is the $n \times n$ diagonal matrix with the weights on the diagonal.
To illustrate, we will use the climate data, available at the url below
```{r climate}
data(climate, package="BAS")
str(climate)
summary(climate)
```
which includes measurements of changes in temperature (`deltaT`) at various `latitude`s as well as a measure of the accuracy of the measured values `sdev` for 8 different types `proxy` of obtaining measurements. We will use this to explore weighted regression and the option to group terms in factors or from `poly`. For illustration purposes, we will eliminate `proxy == 6` which has only one level as the interactions are not estimable, and then convert `proxy` to a factor.
```{r read-climate}
library(dplyr)
climate <- filter(climate, proxy != 6) %>%
mutate(proxy = factor(proxy))
```
We can fit a weighted regression with `weights = 1/sdev^2` with the following code
```{r wtreg}
climate.bas <- bas.lm(deltaT ~ proxy * poly(latitude, 2),
data = climate,
weights = 1 / sdev^2,
prior = "hyper-g-n", alpha = 3.0,
n.models = 2^20,
force.heredity=TRUE,
modelprior = uniform()
)
```
Examining the image of the top models,
```{r climate-image, fig.width=7}
image(climate.bas, rotate = F)
```
we see that all levels of a factor enter or drop from the model together, as well as the vectors in the design matrix to represent the term `poly`.
Rerunning without the constraint,
```{r wtreg-wo-contraint, fig.width=7}
# May take a while to enumerate all 2^20 models
climate.bas <- bas.lm(deltaT ~ proxy * poly(latitude, 2),
data = climate,
weights = 1 / sdev^2,
prior = "hyper-g-n", alpha = 3.0,
n.models = 2^20,
modelprior = uniform(),
force.heredity = FALSE
)
image(climate.bas)
```
allows one to see which factors levels are different from the reference group.
## Summary
`BAS` includes other prior distributions on coefficients and models, as well as `bas.glm` for fitting Generalized Linear Models. The syntax for `bas.glm` and `bas.lm` are not yet the same, particularly for how some of the priors on coefficients are represented, so please see the documentation for more features and details until this is updated or another vignette is added!
For issues or feature requests please submit via the package's github page
[merliseclyde/BAS](https://github.com/merliseclyde/BAS)
| /scratch/gouwar.j/cran-all/cranData/BAS/inst/doc/BAS-vignette.Rmd |
---
title: "Using the Bayesian Adaptive Sampling (BAS) Package for Bayesian Model Averaging and Variable Selection"
author: "Merlise A Clyde"
date: "`r Sys.Date()`"
r_packages:
- rmarkdown
- dplyr
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Using the Bayesian Adaptive Sampling (BAS) Package for Bayesian Model Averaging and Variable Selection}
%VignetteEngine{knitr::rmarkdown}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
# require(knitr)
require(MASS)
require(dplyr)
require(GGally)
```
The `BAS` package provides easy to use functions to implement Bayesian Model Averaging in linear models and generalized linear models.
Prior distributions on coefficients are
based on Zellner's g-prior or mixtures of g-priors, such as the
Zellner-Siow Cauchy prior or mixtures of g-priors from
[Liang et al (2008)](https://doi.org/10.1198/016214507000001337)
for linear models, as well as other options including AIC, BIC, RIC and Empirical Bayes methods. Extensions to Generalized Linear Models are based on the
mixtures of g-priors in GLMs of
[Li and Clyde (2019)](https://doi.org/10.1080/01621459.2018.1469992) using an
integrated Laplace approximation.
`BAS` uses an adaptive sampling algorithm to sample without replacement from the space of models or MCMC sampling which is recommended for sampling problems with a large number of predictors. See [Clyde, Littman & Ghosh](https://doi.org/10.1198/jcgs.2010.09049) for more details for the sampling algorithms.
## Installing BAS
The stable version can be installed easily in the `R` console like any other package:
```{r install, eval=FALSE}
install.packages("BAS")
```
On the other hand, I welcome everyone to use the most recent version
of the package with quick-fixes, new features and probably new
bugs. To get the latest
development version from [GitHub](https://github.com/merliseclyde),
use the `devtools` package from
[CRAN](https://cran.r-project.org/package=devtools) and enter in `R`:
```{r devtools, eval=FALSE}
devtools::install_github("merliseclyde/BAS")
```
As the package does depend on BLAS and LAPACK, installing from GitHub will require that you have FORTRAN and C compilers on your system.
## Demo
We will use the UScrime data to illustrate some of the commands and functionality.
```{r data}
data(UScrime, package = "MASS")
```
Following other analyses, we will go ahead and log transform all of the variables except column 2, which is the indicator variable of the state being a southern state.
```{r transform}
UScrime[, -2] <- log(UScrime[, -2])
```
To get started, we will use `BAS` with the Zellner-Siow Cauchy prior on the coefficients.
```{r bas}
library(BAS)
crime.ZS <- bas.lm(y ~ .,
data = UScrime,
prior = "ZS-null",
modelprior = uniform(), initprobs = "eplogp",
force.heredity = FALSE, pivot = TRUE
)
```
`BAS` uses a model formula similar to `lm` to specify the full model with all of the potential predictors. Here we are using the shorthand `.` to indicate that all remaining variables in the data frame provided by the `data` argument. Different prior distributions on the regression coefficients may be specified using the `prior` argument, and include
* "BIC"
* "AIC
* "g-prior"
* "hyper-g"
* "hyper-g-laplace"
* "hyper-g-n"
* "JZS"
* "ZS-null"
* "ZS-full"
* "EB-local"
* "EB-global"
where the default is the Zellner-Siow prior, ZS-null, where all Bayes factors are compared to the null model. The newest prior option, "JZS", also corresponds to the Zellner-Siow prior on the coefficients, but uses numerical integration rather than a Laplace approximation to obtain the marginal likelihood of models.
By default, `BAS` will try to enumerate all models if $p < 19$ using the default `method="BAS"`. The prior distribution over the models is a `uniform()` distribution which assigns equal probabilities to all models. The last optional argument `initprobs = eplogp` provides a way to initialize the sampling algorithm and order the variables in the tree structure that represents the model space in BAS. The `eplogp` option uses the Bayes factor calibration of p-values $-e p \log(p)$ to provide an approximation to the marginal inclusion probability that the coefficient of each predictor is zero, using the p-values from the full model. Other options for `initprobs` include
* "marg-eplogp""
* "uniform"
* numeric vector of length p
The option "marg-eplogp" uses p-values from the $p$ simple linear regressions (useful for large p or highly correlated variables).
Since we are enumerating under all possible models these options are not important and the `method="deterministic"` may be faster if there are no factors or interactions in the model.
## Plots
Some graphical summaries of the output may be obtained by the `plot` function
```{r, fig.show='hold'}
plot(crime.ZS, ask = F)
```
which produces a panel of four plots. The first is a plot of residuals and fitted values under Bayesian Model Averaging. Ideally, if our model assumptions hold, we will not see outliers or non-constant variance. The second plot shows the cumulative probability of the models in the order that they are sampled. This plot indicates that the cumulative probability is leveling off as each additional model adds only a small increment to the cumulative probability, while earlier, there are larger jumps corresponding to discovering a new high probability model. The third plot shows the dimension of each model (the number of regression coefficients including the intercept) versus the log of the marginal likelihood of the model. The last plot shows the marginal posterior inclusion probabilities (pip) for each of the covariates, with marginal pips greater than 0.5 shown in red. The variables with pip > 0.5 correspond to what is known as the median probability model. Variables with high inclusion probabilities are generally important for explaining the data or prediction, but marginal inclusion probabilities may be small if there are predictors that are highly correlated, similar to how p-values may be large in the presence of multicollinearity.
Individual plots may be obtained using the `which` option.
```{r pip, fig.width=5, fig.height=5}
plot(crime.ZS, which = 4, ask = FALSE, caption = "", sub.caption = "")
```
`BAS` has `print` and `summary` methods defined for objects of class `bas`. Typing the objects name
```{r print}
crime.ZS
```
returns a summary of the marginal inclusion probabilities, while the `summary` function
provides
```{r summary}
options(width = 80)
summary(crime.ZS)
```
a list of the top 5 models (in terms of posterior probability) with the zero-one indicators for variable inclusion. The other columns in the summary are the Bayes factor of each model to the highest probability model (hence its Bayes factor is 1), the posterior probabilities of the models, the ordinary $R^2$ of the models, the dimension of the models (number of coefficients including the intercept) and the log marginal likelihood under the selected prior distribution.
## Visualization of the Model Space
To see beyond the first five models, we can represent the collection of the models via an `image` plot. By default this shows the top 20 models.
```{r image, fig.width=5, fig.height=5}
image(crime.ZS, rotate = F)
```
This image has rows that correspond to each of the variables and intercept, with labels for the variables on the y-axis. The x-axis corresponds to the possible models. These are sorted by their posterior probability from best at the left
to worst at the right with the rank on the top x-axis.
Each column represents one of the 16 models. The variables
that are excluded in a model are shown in black for each column, while
the variables that are included are colored, with the color related to
the log posterior probability.
The color of each column is proportional to the log of the posterior probabilities (the lower x-axis) of that model. The log posterior probabilities are actually scaled so that the 0 corresponds to the lowest probability model in the top 20, so that the values on the axis correspond to log Bayes factors for comparing each model the lowest probability model in the top 20 models. Models that are the same color have similar log Bayes factors which allows us to view models that are clustered together that have Bayes Factors where the differences are not "worth a bare mention".
This plot indicates that the police expenditure in the two years do not enter the model together, and is an indication of the high correlation between the two variables.
## Posterior Distributions of Coefficients
To examine the marginal distributions of the two coefficients for the police expenditures, we can extract the coefficients estimates and standard deviations under BMA.
```{r coef}
coef.ZS <- coef(crime.ZS)
```
an optional argument, `n.models` to `coef` will use only the top `n.models` for BMA and may be more computationally efficient for large problems.
Plots of the posterior distributions averaging over all of the models are obtained using the `plot` method for the `bas` coefficient object.
```{r plot}
plot(coef.ZS, subset = c(5:6), ask = F)
```
The vertical bar represents the posterior probability that the coefficient is 0 while
the bell shaped curve represents the density of plausible values from all the models where the coefficient is non-zero. This is scaled so that the height of the density for non-zero values is the probability that the coefficient is non-zero. Omitting the `subset` argument provides all of the marginal distributions.
To obtain credible intervals for coefficients, `BAS` includes a `confint` method to create Highest Posterior Density intervals from the summaries from `coef`.
```{r confint-coef}
confint(coef.ZS)
```
where the third column is the posterior mean.
This uses Monte Carlo sampling to draw from the mixture model over coefficient where models are sampled based on their posterior probabilities.
We can also plot these via
```{r plot-confint, fig.width=7}
plot(confint(coef.ZS, parm = 2:16))
```
using the `parm` argument to select which coefficients to plot (the intercept is `parm=1`).
For estimation under selection, `BAS` supports additional arguments
via `estimator`. The default is `estimator="BMA"` which uses all models or `n.models`. Other options include estimation under the highest probability model
```{r, warning=FALSE, fig.width=7}
plot(confint(coef(crime.ZS, estimator = "HPM")))
```
or the median probability model
```{r, warning=FALSE, fig.width=7, eval=FALSE}
plot(confint(coef(crime.ZS, estimator = "MPM")))
```
where variables that are excluded have distributions that are point masses at zero under selection.
## Prediction
`BAS` has methods defined to return fitted values, `fitted`, using the observed design matrix and predictions at either the observed data or potentially new values, `predict`, as with `lm`.
```{r choice of estimator}
muhat.BMA <- fitted(crime.ZS, estimator = "BMA")
BMA <- predict(crime.ZS, estimator = "BMA")
# predict has additional slots for fitted values under BMA, predictions under each model
names(BMA)
```
Plotting the two sets of fitted values,
```{r, fig.width=5, fig.height=5}
par(mar = c(9, 9, 3, 3))
plot(muhat.BMA, BMA$fit,
pch = 16,
xlab = expression(hat(mu[i])), ylab = expression(hat(Y[i]))
)
abline(0, 1)
```
we see that they are in perfect agreement. That is always the case as the posterior mean for the regression mean function at a point $x$ is the expected posterior predictive value for $Y$ at $x$. This is true not only for estimators such as BMA, but the expected values under model selection.
### Inference with model selection ###
In addition to using BMA, we can use the posterior means under model selection. This corresponds to a decision rule that combines estimation and selection. `BAS` currently implements the following options
**highest probability model:**
```{r HPM}
HPM <- predict(crime.ZS, estimator = "HPM")
# show the indices of variables in the best model where 0 is the intercept
HPM$bestmodel
```
A little more interpretable version with names:
```{r}
variable.names(HPM)
```
**median probability model:**
```{r MPM}
MPM <- predict(crime.ZS, estimator = "MPM")
variable.names(MPM)
```
This is the model where all predictors have an inclusion probability greater than or equal to 0.5. This coincides with the HPM if the predictors are all mutually orthogonal, and in this case is the best predictive model under squared error loss.
Note that we can also extract the best model from the attribute in the fitted values as well.
**best predictive model:**
In general, the HPM or MPM are not the best predictive models, which from a Bayesian decision theory perspective would be the model that is closest to BMA predictions under squared error loss.
```{r BPM}
BPM <- predict(crime.ZS, estimator = "BPM")
variable.names(BPM)
```
Let's see how they compare:
```{r, fig.width=6, fig.height=6}
GGally::ggpairs(data.frame(
HPM = as.vector(HPM$fit), # this used predict so we need to extract fitted values
MPM = as.vector(MPM$fit), # this used fitted
BPM = as.vector(BPM$fit), # this used fitted
BMA = as.vector(BMA$fit)
)) # this used predict
```
Using the `se.fit = TRUE` option with `predict` we can also calculate standard deviations for prediction or for the mean and use this as input for the `confint` function for the prediction object.
```{r se, fig.width=7}
BPM <- predict(crime.ZS, estimator = "BPM", se.fit = TRUE)
crime.conf.fit <- confint(BPM, parm = "mean")
crime.conf.pred <- confint(BPM, parm = "pred")
plot(crime.conf.fit)
plot(crime.conf.pred)
```
For prediction at new points, we can supply a new dataframe to the predict function as in `lm`.
```{r pred}
new.pred <- predict(crime.ZS, newdata = UScrime, estimator = "MPM")
```
## Alternative algorithms
`BAS` has several options for sampling from the model space with or without enumeration. The (current) default `method="BAS"` samples models without replacement using estimates of the marginal inclusion probabilities using the algorithm described in [Clyde et al (2011)](https://doi.org/10.1198/jcgs.2010.09049). The initial sampling probabilities provided by `initprobs` are updated based on the sampled models, every `update` iterations.
This can be more efficient in some cases if a large fraction of the model space has been sampled, however, in cases of high correlation and a large number of predictors, this can lead to biased estimates
[Clyde and Ghosh (2012)](https://doi.org/10.1093/biomet/ass040), in which case MCMC is preferred. The `method="MCMC"` is described below and is better for large $p$.
A deterministic sampling scheme is also available for enumeration;
```{r}
system.time(
for (i in 1:10) {
crime.ZS <- bas.lm(y ~ .,
data = UScrime,
prior = "ZS-null", method = "BAS",
modelprior = uniform(), initprobs = "eplogp"
)
}
)
system.time(
for (i in 1:10) {
crime.ZS <- bas.lm(y ~ .,
data = UScrime,
prior = "ZS-null", method = "deterministic",
modelprior = uniform(), initprobs = "eplogp"
)
}
)
```
which is faster for enumeration than the default method="BAS".
## Beyond Enumeration
Many problems are too large to enumerate all possible models. In such cases we may use the `method="BAS"` to sample without replacement or the `method="MCMC"` option to sample models using Markov Chain Monte Carlo sampling to sample models based on their posterior probabilities. For spaces where the number of models greatly exceeds the number of models to sample, the MCMC option is recommended as it provides estimates with low bias compared to the sampling without replacement of BAS (Clyde and Ghosh 2011).
```{r MCMC}
crime.ZS <- bas.lm(y ~ .,
data = UScrime,
prior = "ZS-null",
modelprior = uniform(),
method = "MCMC"
)
```
This will run the MCMC sampler until the number of unique sampled models exceeds `n.models` which is $2^p$ (if $p < 19$) by default or until `MCMC.iterations` has been exceeded, where `MCMC.iterations = n.models*2` by default.
### Estimates of Marginal Posterior Inclusion Probabilities (pip) ###
With MCMC sampling there are two estimates of the marginal inclusion probabilities: `object$probne0` which are obtained by using the re-normalized posterior odds from sampled models to estimate probabilities and the estimates based on Monte Carlo frequencies `object$probs.MCMC`. These should be in close agreement if the MCMC sampler has run for enough iterations.
`BAS` includes a diagnostic function to compare the two sets of estimates of posterior inclusion probabilities and posterior model probabilities
```{r diagnostics}
diagnostics(crime.ZS, type = "pip", pch = 16)
diagnostics(crime.ZS, type = "model", pch = 16)
```
In the left hand plot of pips, each point represents one posterior inclusion probability for the 15 variables estimated under the two methods. The two estimators are in pretty close agreement. The plot of the model probabilities suggests that we should use more `MCMC.iterations` if we want more accurate estimates of the posterior model probabilities.
```{r biggerMCMC, eval=FALSE}
crime.ZS <- bas.lm(y ~ .,
data = UScrime,
prior = "ZS-null",
modelprior = uniform(),
method = "MCMC", MCMC.iterations = 10^6
)
diagnostics(crime.ZS, type="model", pch=16)
```
## Outliers
BAS can also be used for exploring mean shift or variance inflation outliers by adding indicator variables for each case being an outlier (the mean is not given by the regression) or not. This is similar to the MC3.REG function in BMA, although here we are using a g-prior or mixture of g-priors for the coefficients for the outlier means.
Using the Stackloss data, we can add an identify matrix to the original dataframe, where each column is an indicator that the ith variable is an outlier.
```{r add-out}
data("stackloss")
stackloss <- cbind(stackloss, diag(nrow(stackloss)))
stack.bas <- bas.lm(stack.loss ~ .,
data = stackloss,
method = "MCMC", initprobs = "marg-eplogp",
prior = "ZS-null",
modelprior = tr.poisson(4, 10),
MCMC.iterations = 200000
)
```
The above call introduces using truncated prior distributions on the model space; in this case the distribution of the number of variables to be included has a Poisson distribution, with mean 4 (under no truncation), and the truncation point is at 10, so that all models with more than 10 (one half of cases rounded down) will have probability zero. This avoids exploration of models that are not full rank.
Looking at the summaries
```{r}
knitr::kable(as.data.frame(summary(stack.bas)))
```
## Factors and Hierarchical Heredity
BAS now includes constraints for factors so that all terms that represent a factor are either included or excluded together.
To illustrate, we will use the data set `ToothGrowth` and convert `dose` to a factor:
```{r}
data(ToothGrowth)
ToothGrowth$dose <- factor(ToothGrowth$dose)
levels(ToothGrowth$dose) <- c("Low", "Medium", "High")
```
and fit the model of main effects and two way interaction without any constraints:
```{r, fig.width=7}
TG.bas <- bas.lm(len ~ supp*dose,
data = ToothGrowth,
modelprior = uniform(), method = "BAS"
)
image(TG.bas)
```
From the image of the model space, we see that levels of a factor enter or drop from the model independently and that interactions may be included without the main effects. This may lead to more parsimonious models, however, the hypotheses that are being tested about the coefficients that represent the factor depend on the choice for the reference group. To force levels of a factor to enter or leave together we can use `force.heredity = TRUE`.
The `force.heredity` option also forces interactions to be included only if the main effects are also included, or for models with several factors and higher order interactions, the heredity constraint implies that all lower order interactions must be included before adding higher order interactions.
```{r, fig.width=7}
TG.bas <- bas.lm(len ~ supp * dose,
data = ToothGrowth,
modelprior = uniform(), method = "BAS", force.heredity = TRUE
)
image(TG.bas)
```
The `force.heredity` is set to FALSE for the sampling methods `MCMC+BAS` and `deterministic`. If there are more than 20 predictors and factors, then we recommend using `MCMC` to enforce the constraints. Alternatively,
there is a function, `force.heredity.bas`, to
post-process the output to drop models that violate the hierarchical heredity constraint:
```{r force-herid}
TG.bas <- bas.lm(len ~ supp * dose,
data = ToothGrowth,
modelprior = uniform(), method = "BAS", force.heredity = FALSE
)
TG.herid.bas <- force.heredity.bas(TG.bas)
```
that can be used with those sampling methods.
## Weighted Regression
`BAS` can perform weighted regression by supplying an optional weight vector that is of the same length as the response where the assumption is that the variance of the response is proportional to 1/weights. The g-prior incorporates the
weights in the prior covariance,
$$
\sigma^2 g (X_\gamma^T W X_\gamma)^{-1}
$$
where $X_\gamma$ is the design matrix under model $\gamma$ and $W$ is the $n \times n$ diagonal matrix with the weights on the diagonal.
To illustrate, we will use the climate data, available at the url below
```{r climate}
data(climate, package="BAS")
str(climate)
summary(climate)
```
which includes measurements of changes in temperature (`deltaT`) at various `latitude`s as well as a measure of the accuracy of the measured values `sdev` for 8 different types `proxy` of obtaining measurements. We will use this to explore weighted regression and the option to group terms in factors or from `poly`. For illustration purposes, we will eliminate `proxy == 6` which has only one level as the interactions are not estimable, and then convert `proxy` to a factor.
```{r read-climate}
library(dplyr)
climate <- filter(climate, proxy != 6) %>%
mutate(proxy = factor(proxy))
```
We can fit a weighted regression with `weights = 1/sdev^2` with the following code
```{r wtreg}
climate.bas <- bas.lm(deltaT ~ proxy * poly(latitude, 2),
data = climate,
weights = 1 / sdev^2,
prior = "hyper-g-n", alpha = 3.0,
n.models = 2^20,
force.heredity=TRUE,
modelprior = uniform()
)
```
Examining the image of the top models,
```{r climate-image, fig.width=7}
image(climate.bas, rotate = F)
```
we see that all levels of a factor enter or drop from the model together, as well as the vectors in the design matrix to represent the term `poly`.
Rerunning without the constraint,
```{r wtreg-wo-contraint, fig.width=7}
# May take a while to enumerate all 2^20 models
climate.bas <- bas.lm(deltaT ~ proxy * poly(latitude, 2),
data = climate,
weights = 1 / sdev^2,
prior = "hyper-g-n", alpha = 3.0,
n.models = 2^20,
modelprior = uniform(),
force.heredity = FALSE
)
image(climate.bas)
```
allows one to see which factors levels are different from the reference group.
## Summary
`BAS` includes other prior distributions on coefficients and models, as well as `bas.glm` for fitting Generalized Linear Models. The syntax for `bas.glm` and `bas.lm` are not yet the same, particularly for how some of the priors on coefficients are represented, so please see the documentation for more features and details until this is updated or another vignette is added!
For issues or feature requests please submit via the package's github page
[merliseclyde/BAS](https://github.com/merliseclyde/BAS)
| /scratch/gouwar.j/cran-all/cranData/BAS/vignettes/BAS-vignette.Rmd |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
########################################################################
## make basis functions
########################################################################
## makes negative values 0
pos<-function(vec){
#replace(vec,vec<0,0)
(abs(vec)+vec)/2
}
## largest value of basis function, assuming x's in [0,1], used for scaling
const<-function(signs,knots,degree){
cc<-prod(((signs+1)/2 - signs*knots))^degree
if(cc==0)
return(1)
return(cc)
} # since a product, can find for functional & categorical pieces separately, take product
## make basis function (from continuous variables)
makeBasis<-function(signs,vars,knots,datat,degree){
cc<-const(signs,knots,degree)
temp1<-pos(signs*(datat[vars,,drop=F]-knots))^degree # this only works for t(data)...
if(length(vars)==1){
return(c(temp1)/cc)
} else{
temp2<-1
for(pp in 1:length(vars)){ # faster than apply
temp2<-temp2*temp1[pp,]
}
return(temp2/cc)
}
}
## make basis function (from categorical variables)
makeBasisCat<-function(vars,sub,data){
temp<-1
for(ii in 1:length(vars)){
temp<-temp*as.numeric(data[,vars[ii]] %in% sub[[ii]])
}
return(temp)
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/basis_funcs.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
########################################################################
## main BASS function
########################################################################
#' @title Bayesian Adaptive Spline Surfaces (BASS)
#'
#' @description Fits a BASS model using RJMCMC. Optionally uses parallel tempering to improve mixing. Can be used with scalar or functional response. Also can use categorical inputs.
#' @param xx a data frame or matrix of predictors. Categorical predictors should be included as factors.
#' @param y a response vector (scalar response) or matrix (functional response). Note: If \code{sum(y^2)} is large (i.e. \code{1e10}), please center/rescale (and rescale \code{g1} and \code{g2} if necessary).
#' @param maxInt integer for maximum degree of interaction in spline basis functions. Defaults to the number of predictors, which could result in overfitting.
#' @param maxInt.func (functional response only) integer for maximum degree of interaction in spline basis functions describing the functional response.
#' @param maxInt.cat (categorical input only) integer for maximum degree of interaction of categorical inputs.
#' @param xx.func a vector, matrix or data frame of functional variables.
#' @param degree degree of splines. Stability should be examined for anything other than 1.
#' @param maxBasis maximum number of basis functions. This should probably only be altered if you run out of memory.
#' @param npart minimum number of non-zero points in a basis function. If the response is functional, this refers only to the portion of the basis function coming from the non-functional predictors. Defaults to 20 or 0.1 times the number of observations, whichever is smaller.
#' @param npart.func same as npart, but for functional portion of basis function.
#' @param nmcmc number of RJMCMC iterations.
#' @param nburn number of the \code{nmcmc} iterations to disregard.
#' @param thin keep every \code{thin} samples
#' @param g1 shape for IG prior on \eqn{\sigma^2}.
#' @param g2 scale for IG prior on \eqn{\sigma^2}.
#' @param s2.lower lower bound for s2. Turns IG prior for s2 into a truncated IG.
#' @param h1 shape for gamma prior on \eqn{\lambda}.
#' @param h2 rate for gamma prior on \eqn{\lambda}. This is the primary way to control overfitting. A large value of \code{h2} favors fewer basis functions.
#' @param a.tau shape for gamma prior on \eqn{\tau}.
#' @param b.tau rate for gamma prior on \eqn{\tau}. Defaults to one over the number of observations, which centers the prior for the basis function weights on the unit information prior.
#' @param w1 nominal weight for degree of interaction, used in generating candidate basis functions. Should be greater than 0.
#' @param w2 nominal weight for variables, used in generating candidate basis functions. Should be greater than 0.
#' @param beta.prior what type of prior to use for basis coefficients, "g" or "jeffreys"
#' @param temp.ladder temperature ladder used for parallel tempering. The first value should be 1 and the values should increase.
#' @param start.temper when to start tempering (after how many MCMC iterations). Defaults to 1000 or half of burn-in, whichever is smaller.
#' @param curr.list list of starting models (one element for each temperature), could be output from a previous run under the same model setup.
#' @param save.yhat logical; should predictions of training data be saved?
#' @param small logical; if true, returns a smaller object by leaving out \code{curr.list} and other unnecessary objects. Use in combination with \code{save.yhat} to get smaller memory footprint for very large models.
#' @param verbose logical; should progress be displayed?
#' @param ret.str logical; return data and prior structures
#' @details Explores BASS model space by RJMCMC. The BASS model has \deqn{y = f(x) + \epsilon, ~~\epsilon \sim N(0,\sigma^2)} \deqn{f(x) = a_0 + \sum_{m=1}^M a_m B_m(x)} and \eqn{B_m(x)} is a BASS basis function (tensor product of spline basis functions). We use priors \deqn{a \sim N(0,\sigma^2/\tau (B'B)^{-1})} \deqn{M \sim Poisson(\lambda)} as well as the priors mentioned in the arguments above.
#' @return An object of class 'bass'. The other output will only be useful to the advanced user. Rather, users may be interested in prediction and sensitivity analysis, which are obtained by passing the entire object to the predict.bass or sobol functions.
#' @keywords nonparametric regression splines functional data analysis
#' @seealso \link{predict.bass} for prediction and \link{sobol} for sensitivity analysis.
#' @export
#' @import stats
#' @import utils
#' @example inst/examples.R
#'
bass<-function(xx,y,maxInt=3,maxInt.func=3,maxInt.cat=3,xx.func=NULL,degree=1,maxBasis=1000,npart=NULL,npart.func=NULL,nmcmc=10000,nburn=9000,thin=1,g1=0,g2=0,s2.lower=0,h1=10,h2=10,a.tau=.5,b.tau=NULL,w1=5,w2=5,beta.prior='g',temp.ladder=NULL,start.temper=NULL,curr.list=NULL,save.yhat=TRUE,small=FALSE,verbose=TRUE,ret.str=F){
cl<-match.call()
########################################################################
## setup
## check inputs
if(!posInt(maxInt))
stop('invalid maxInt')
if(!posInt(maxInt.func))
stop('invalid maxInt.func')
if(!posInt(maxInt.cat))
stop('invalid maxInt.cat')
#if(!posInt(degree))
# stop('invalid degree')
if(!is.null(npart)){
if(!posInt(npart))
stop('invalid npart')
}
if(!is.null(npart.func)){
if(!posInt(npart.func))
stop('invalid npart.func')
}
if(!is.null(start.temper)){
if(!posInt(start.temper))
stop('invalid start.temper')
}
if(!posInt(nmcmc))
stop('invalid nmcmc')
#if(!posInt(nburn))
# stop('invalid nburn')
if(!posInt(thin))
stop('invalid thin')
if(nburn>=nmcmc)
stop('nmcmc must be greater than nburn')
if(thin>(nmcmc-nburn))
stop('combination of thin, nmcmc and nburn results in no samples')
if(any(c(g1,g2)<0))
stop('g1 and g2 must be greater than or equal to 0')
if(any(c(h1,h2,a.tau,b.tau,w1,w2)<=0))
stop('h1,h2,a.tau,b.tau,w1,w2 must be greater than 0')
if(s2.lower<0)
stop('s2.lower must be >= 0')
## process data
if(any(is.na(xx)) | any(is.na(y)))
stop('Current version does not allow missing data')
y<-as.matrix(y)
xx<-as.data.frame(xx)
dx<-dim(xx)
dxf<-dim(xx.func)
dy<-dim(y)
if(any(dy==1))
y<-c(y)
dy<-dim(y)
if(is.null(dy)){
func<-F
pfunc<-0
if(!is.null(xx.func))
warning('xx.func ignored because there is no functional variable')
if(length(y)!=dx[1])
stop('dimension mismatch between xx and y')
} else {
func<-T
if(is.null(xx.func))
stop('missing xx.func')
xx.func<-as.matrix(xx.func)
dxf<-dim(xx.func)
if(dy[1]!=dx[1]){
y<-t(y)
dy<-dim(y)
}
if(dy[1]!=dx[1])
stop('dimension mismatch between xx and y')
if(dy[2]!=dxf[1])
xx.func<-t(xx.func)
dxf<-dim(xx.func)
if(dy[2]!=dxf[1])
stop('dimension mismatch between xx.func and y')
pfunc<-dxf[2]
range.func<-apply(xx.func,2,range)
xx.func<-apply(xx.func,2,scale_range)
}
if(func){
if(dx[1]==dxf[1]) # this is dangerous because we would automatically correct it if we could tell it was wrong, but can't tell if it is wrong here
warning('Possible dimension problem: make sure rows of y correspond to functional data')
}
des<-T
cx<-sapply(xx,class)
cx.factor<- cx == 'factor'
if(any(cx.factor)){
cat<-T
if(all(cx.factor))
des<-F
xx.des<-as.matrix(xx[,!cx.factor,drop=F])
xx.cat<-xx[,cx.factor,drop=F]
} else{
cat<-F
xx.des<-as.matrix(xx)
xx.cat<-NULL
}
if(des){
range.des<-apply(xx.des,2,range)
xx.des<-apply(xx.des,2,scale_range)
}
des.vars<-which(!cx.factor)
cat.vars<-which(cx.factor)
pdes<-length(des.vars)
pcat<-length(cat.vars)
type<-''
if(des)
type<-paste(type,'des',sep='_')
if(cat)
type<-paste(type,'cat',sep='_')
if(func)
type<-paste(type,'func',sep='_')
# so cases are des, cat, des_cat, des_func, cat_func, des_cat_func
## handle tempering arguements
if(is.null(temp.ladder)){
temp.ladder<-1
}
if(max(temp.ladder)>(dx[1]/2)){
temp.ladder<-temp.ladder[temp.ladder<(dx[1]/2)]
if(length(temp.ladder)==0)
stop('invalid temp.ladder (temperatures too high)')
}
if(min(temp.ladder)!=1)
warning('min(temp.ladder) should equal 1')
ntemps<-length(temp.ladder)
if(ntemps==1){
start.temper<-nmcmc
}
if(is.null(start.temper))
start.temper<-min(1000,ceiling(nburn*.5))
temp.val<-matrix(nrow=nmcmc,ncol=ntemps)
if(any(temp.ladder<=0))
stop('temp.ladder must be greater than 0 (should be greater than 1)')
if(any(temp.ladder<1))
warning('temp.ladder should be greater than 1')
## make a data object
data<-list()
data$y<-y
if(des){
data$xxt.des<-t(xx.des)
data$vars.len.des<-NA
data$xxt.des.unique<-list()
data$unique.ind.des<-list()
for(i in 1:pdes){
data$xxt.des.unique[[i]]<-unique(data$xxt.des[i,])
data$unique.ind.des[[i]]<-which(!duplicated(data$xxt.des[i,])) # gets the first instance of each unique value
data$vars.len.des[i]<-length(data$xxt.des.unique[[i]])
}
}
if(func){
data$xxt.func<-t(xx.func)
data$vars.len.func<-NA
data$xxt.func.unique<-list()
data$unique.ind.func<-list()
for(i in 1:pfunc){
data$xxt.func.unique[[i]]<-unique(data$xxt.func[i,])
data$unique.ind.func[[i]]<-which(!duplicated(data$xxt.func[i,])) # gets the first instance of each unique value
data$vars.len.func[i]<-length(data$xxt.func.unique[[i]])
}
}
if(cat){
data$levels<-lapply(xx.cat,levels)
data$nlevels<-sapply(data$levels,length)
data$xx.cat<-xx.cat
}
data$pdes<-pdes
data$pfunc<-pfunc
data$pcat<-pcat
data$p<-dx[2]
data$ndes<-dx[1]
data$nfunc<-dxf[1]
data$des<-des
data$func<-func
data$cat<-cat
data$n<-prod(data$ndes,data$nfunc)
data$ssy<-sum(data$y^2)
data$death.prob.next<-1/3
data$birth.prob<-1/3
data$birth.prob.last<-1/3
data$death.prob<-1/3
data$itemp.ladder<-1/temp.ladder
## make a prior object
npart.des<-npart
if(is.null(npart.des)){
npart.des<-min(20,.1*data$ndes)
}
if(is.null(npart.func) & func){
npart.func<-min(20,.1*data$nfunc)
}
maxBasis<-min(maxBasis,data$n) # can't have more basis functions than data points
maxInt.des<-min(maxInt,pdes) # can't have more interactions than variables
maxInt.cat<-min(maxInt.cat,pcat)
maxInt.func<-min(maxInt.func,pfunc)
# prior object
prior<-list()
prior$maxInt.des<-maxInt.des
prior$maxInt.cat<-maxInt.cat
prior$maxInt.func<-maxInt.func
prior$q<-degree
prior$npart.des<-npart.des
prior$npart.func<-npart.func
prior$h1<-h1
prior$h2<-h2
prior$g1<-g1
prior$g2<-g2
prior$s2.lower<-s2.lower
prior$a.beta.prec<-a.tau
if(is.null(b.tau)){
prior$b.beta.prec<-2/data$n
} else{
prior$b.beta.prec<-b.tau
}
prior$maxBasis<-maxBasis
prior$minInt<-0
if(des+cat+func==1) # if there is only one part, can't have minInt of 0
prior$minInt<-1
prior$miC<-abs(prior$minInt-1)
prior$beta.gprior.ind<-as.numeric(beta.prior=='g')
prior$beta.jprior.ind<-as.numeric(beta.prior=='jeffreys')
## make an object to store current MCMC state (one for each temperature)
if(is.null(curr.list)){
curr.list<-list()
for(i in 1:ntemps){
curr.list[[i]]<-list()
if(des){
curr.list[[i]]$I.star.des<-rep(w1,prior$maxInt.des+prior$miC)
curr.list[[i]]$I.vec.des<-curr.list[[i]]$I.star.des/sum(curr.list[[i]]$I.star.des)
curr.list[[i]]$z.star.des<-rep(w2,data$pdes)
curr.list[[i]]$z.vec.des<-curr.list[[i]]$z.star.des/sum(curr.list[[i]]$z.star.des)
curr.list[[i]]$des.basis<-matrix(rep(1,data$ndes))
}
if(cat){
curr.list[[i]]$I.star.cat<-rep(w1,prior$maxInt.cat+prior$miC)
curr.list[[i]]$I.vec.cat<-curr.list[[i]]$I.star.cat/sum(curr.list[[i]]$I.star.cat)
curr.list[[i]]$z.star.cat<-rep(w2,data$pcat)
curr.list[[i]]$z.vec.cat<-curr.list[[i]]$z.star.cat/sum(curr.list[[i]]$z.star.cat)
curr.list[[i]]$cat.basis<-matrix(rep(1,data$ndes))
}
if(func){
curr.list[[i]]$I.star.func<-rep(w1,prior$maxInt.func+prior$miC)
curr.list[[i]]$I.vec.func<-curr.list[[i]]$I.star.func/sum(curr.list[[i]]$I.star.func)
curr.list[[i]]$z.star.func<-rep(w2,data$pfunc)
curr.list[[i]]$z.vec.func<-curr.list[[i]]$z.star.func/sum(curr.list[[i]]$z.star.func)
curr.list[[i]]$func.basis<-matrix(rep(1,data$nfunc))
}
if(des & cat)
curr.list[[i]]$dc.basis<-curr.list[[i]]$des.basis*curr.list[[i]]$cat.basis
curr.list[[i]]$s2<-1
curr.list[[i]]$lam<-1
curr.list[[i]]$beta.prec<-1*prior$beta.gprior.ind
curr.list[[i]]$nbasis<-0
curr.list[[i]]$nc<-1
curr.list[[i]]$knots.des<-matrix(numeric(0),ncol=maxInt.des)
curr.list[[i]]$knotInd.des<-matrix(integer(0),ncol=maxInt.des)
curr.list[[i]]$signs.des<-matrix(integer(0),ncol=maxInt.des)
curr.list[[i]]$vars.des<-matrix(integer(0),ncol=maxInt.des)
curr.list[[i]]$n.int.des<-0
curr.list[[i]]$sub.list<-list()
curr.list[[i]]$sub.size<-matrix(integer(0),ncol=maxInt.cat)
curr.list[[i]]$vars.cat<-matrix(integer(0),ncol=maxInt.cat)
curr.list[[i]]$n.int.cat<-0
curr.list[[i]]$knots.func<-matrix(numeric(0),ncol=maxInt.func)
curr.list[[i]]$knotInd.func<-matrix(integer(0),ncol=maxInt.func)
curr.list[[i]]$signs.func<-matrix(integer(0),ncol=maxInt.func)
curr.list[[i]]$vars.func<-matrix(integer(0),ncol=maxInt.func)
curr.list[[i]]$n.int.func<-0
curr.list[[i]]$Xty<-rep(NA,maxBasis+2)
curr.list[[i]]$Xty[1]<-sum(data$y)
curr.list[[i]]$XtX<-matrix(NA,nrow=maxBasis+2,ncol=maxBasis+2)
curr.list[[i]]$XtX[1,1]<-data$n
curr.list[[i]]$R<-chol(curr.list[[i]]$XtX[1,1])
curr.list[[i]]$R.inv.t<-t(solve(curr.list[[i]]$R))
curr.list[[i]]$bhat<-mean(data$y)
curr.list[[i]]$qf<-crossprod(curr.list[[i]]$R%*%curr.list[[i]]$bhat)
curr.list[[i]]$count<-rep(0,3)
curr.list[[i]]$cmod<-F
curr.list[[i]]$step<-NA
curr.list[[i]]$temp.ind<-i
curr.list[[i]]$type<-type
}
}
# define functions according to type. Doing eval parse every time the functions are used is slow.
funcs<-list()
funcs$birth<-eval(parse(text=paste('birth',type,sep='')))
funcs$death<-eval(parse(text=paste('death',type,sep='')))
funcs$change<-eval(parse(text=paste('change',type,sep='')))
funcs$getYhat<-eval(parse(text=paste('getYhat',type,sep='')))
## prepare storage objects for mcmc draws
nmod.max<-(nmcmc-nburn)/thin # max number of models (models don't necessarily change every iteration)
if(des){
signs.des<-knotInd.des<-vars.des<-array(dim=c(nmod.max,maxBasis,maxInt.des)) # truncate when returning at end of function
n.int.des<-matrix(nrow=nmod.max,ncol=maxBasis) # degree of interaction
}
if(cat){
sub.list<-list() # this is big...
sub.size<-vars.cat<-array(dim=c(nmod.max,maxBasis,maxInt.cat))
n.int.cat<-matrix(nrow=nmod.max,ncol=maxBasis)
}
if(func){
signs.func<-knotInd.func<-vars.func<-array(dim=c(nmod.max,maxBasis,maxInt.func))
# arrays use less space, esp integer arrays
n.int.func<-matrix(nrow=nmod.max,ncol=maxBasis)
}
beta<-matrix(nrow=nmod.max,ncol=maxBasis+1) # +1 for intercept, nmcmc-nburn instead of nmod because beta updates every iteration
nbasis<-s2<-lam<-beta.prec<-NA
cmod<-F # indicator for whether we have changed models since last storing
model.lookup<-NA # lookup table between models and mcmc iterations
log.post.cold<-rep(NA,nmcmc) # log posterior for cold chain (the one we care about)
if(save.yhat){
yhat.sum<-0 # if we don't want to store all yhat draws, can still get running average
yhat<-array(dim=c(nmod.max,data$ndes,data$nfunc))
}
# temperature index
cold.chain<-1 # to start, the cold chain is curr.list[[1]]
temp.ind<-1:ntemps # we will change this vector as we swap temperatures
count.swap<-count.swap1000<-count.swap.prop<-rep(0,ntemps-1) # number of swaps between each set of neighbors
swap<-NA # to keep track of swaps
#require(parallel) # for tempering
########################################################################
## MCMC
if(verbose)
cat('MCMC Start',myTimestamp(),'nbasis:',curr.list[[cold.chain]]$nbasis,'\n')
n.models<-keep.sample<-0 # indexes for storage
for(i in 2:nmcmc){
## update model for each temperature - can be parallel
curr.list<-lapply(curr.list,updateMCMC,prior=prior,data=data,funcs=funcs)
#curr.list<-parLapply(cluster,curr.list,updateMCMC)
#curr.list<-parallel::mclapply(curr.list,updateMCMC,prior=prior,data=data,funcs=funcs,mc.preschedule=T,mc.cores=1)
#curr.list<-parLapplyLB(cl,curr.list,updateMCMC,prior=prior,data=data,funcs=funcs)
# TODO: DO SOMETHING LIKE THIS BUT KEEP EVERYTHING SEPARATE ON THE CLUSTER, all we need is lpost, cmod - MPI
## parallel tempering swap
# if(i>start.temper){# & (i%%20==0)){ #only start after a certain point, and only try every 20
# # sample temp.ind.swap from 1:(ntemps-1), then swap with temp.ind.swap+1
# temp.ind.swap1<-sample(1:(ntemps-1),size=1) # corresponds to temperature temp.ladder[temp.ind.swap1]
# temp.ind.swap2<-temp.ind.swap1+1 # always use the neighboring chain on the right
# chain.ind1<-which(temp.ind==temp.ind.swap1) # which chain has temperature temp.ladder[temp.ind.swap1]
# chain.ind2<-which(temp.ind==temp.ind.swap2)
# alpha.swap<-(data$itemp.ladder[temp.ind.swap1]-data$itemp.ladder[temp.ind.swap2])*(curr.list[[chain.ind2]]$lpost-curr.list[[chain.ind1]]$lpost)
# if(is.nan(alpha.swap) | is.na(alpha.swap)){
# alpha.swap<- -9999
# warning('large values of temp.ladder too large')
# }
# count.swap.prop[temp.ind.swap1]<-count.swap.prop[temp.ind.swap1]+1
# #browser()
# if(log(runif(1)) < alpha.swap){
# # swap temperatures
# temp.ind[chain.ind1]<-temp.ind.swap2
# temp.ind[chain.ind2]<-temp.ind.swap1
# curr.list[[chain.ind1]]$temp.ind<-temp.ind.swap2
# curr.list[[chain.ind2]]$temp.ind<-temp.ind.swap1
#
# count.swap[temp.ind.swap1]<-count.swap[temp.ind.swap1]+1
# count.swap1000[temp.ind.swap1]<-count.swap1000[temp.ind.swap1]+1
# swap[i]<-temp.ind.swap1
# if(temp.ind.swap1==1){
# cmod<-T # we changed models
# cold.chain<-chain.ind2 #which(temp.ind==1)
# }
# }
# }
if(i>start.temper){# & (i%%20==0)){ #only start after a certain point, and only try every 20
# sample temp.ind.swap from 1:(ntemps-1), then swap with temp.ind.swap+1
for(dummy in 1:ntemps){
ts<-sort(sample(1:ntemps,size=2))
temp.ind.swap1<-ts[1]#sample(1:(ntemps-1),size=1) # corresponds to temperature temp.ladder[temp.ind.swap1]
temp.ind.swap2<-ts[2]#temp.ind.swap1+1 # always use the neighboring chain on the right
chain.ind1<-which(temp.ind==temp.ind.swap1) # which chain has temperature temp.ladder[temp.ind.swap1]
chain.ind2<-which(temp.ind==temp.ind.swap2)
alpha.swap<-(data$itemp.ladder[temp.ind.swap1]-data$itemp.ladder[temp.ind.swap2])*(curr.list[[chain.ind2]]$lpost-curr.list[[chain.ind1]]$lpost)
if(is.nan(alpha.swap) | is.na(alpha.swap)){
alpha.swap<- -9999
warning('large values of temp.ladder too large')
}
count.swap.prop[temp.ind.swap1]<-count.swap.prop[temp.ind.swap1]+1
#browser()
if(log(runif(1)) < alpha.swap){
# swap temperatures
temp.ind[chain.ind1]<-temp.ind.swap2
temp.ind[chain.ind2]<-temp.ind.swap1
curr.list[[chain.ind1]]$temp.ind<-temp.ind.swap2
curr.list[[chain.ind2]]$temp.ind<-temp.ind.swap1
count.swap[temp.ind.swap1]<-count.swap[temp.ind.swap1]+1
count.swap1000[temp.ind.swap1]<-count.swap1000[temp.ind.swap1]+1
swap[i]<-temp.ind.swap1
if(temp.ind.swap1==1){
cmod<-T # we changed models
cold.chain<-chain.ind2 #which(temp.ind==1)
}
}
}
}
log.post.cold[i]<-curr.list[[cold.chain]]$lpost
temp.val[i,]<-temp.ind
## write current model if past burnin and model is unique
if((i>nburn) & (((i-nburn)%%thin)==0)){
# these things are updated every time
keep.sample<-keep.sample+1 # indexes samples
nb<-curr.list[[cold.chain]]$nbasis
nbasis[keep.sample]<-nb
beta[keep.sample,1:(nb+1)]<-curr.list[[cold.chain]]$beta
s2[keep.sample]<-curr.list[[cold.chain]]$s2
lam[keep.sample]<-curr.list[[cold.chain]]$lam
beta.prec[keep.sample]<-curr.list[[cold.chain]]$beta.prec
if(save.yhat){
yhat.current<-funcs$getYhat(curr.list[[cold.chain]],nb)
if(func){
yhat[keep.sample,,]<-yhat.current
} else{
yhat[keep.sample,]<-yhat.current
}
yhat.sum<-yhat.sum+yhat.current
}
# save cold chain basis parms if they are different from previous (cmod=T)
if(cmod || curr.list[[cold.chain]]$cmod){ # can I actually get curr.list[[cold.chain]]$cmod easily from the core it is on?
n.models<-n.models+1 # indexes models
if(nb>0){
if(des){
vars.des[n.models,1:nb,]<-as.integer(curr.list[[cold.chain]]$vars.des)
signs.des[n.models,1:nb,]<-as.integer(curr.list[[cold.chain]]$signs.des)
knotInd.des[n.models,1:nb,]<-as.integer(curr.list[[cold.chain]]$knotInd.des)
n.int.des[n.models,1:nb]<-as.integer(curr.list[[cold.chain]]$n.int.des)
}
if(cat){
vars.cat[n.models,1:nb,]<-as.integer(curr.list[[cold.chain]]$vars.cat)
sub.size[n.models,1:nb,]<-as.integer(curr.list[[cold.chain]]$sub.size)
sub.list[[n.models]]<-curr.list[[cold.chain]]$sub.list
n.int.cat[n.models,1:nb]<-as.integer(curr.list[[cold.chain]]$n.int.cat)
}
if(func){
vars.func[n.models,1:nb,]<-as.integer(curr.list[[cold.chain]]$vars.func)
signs.func[n.models,1:nb,]<-as.integer(curr.list[[cold.chain]]$signs.func)
knotInd.func[n.models,1:nb,]<-as.integer(curr.list[[cold.chain]]$knotInd.func)
n.int.func[n.models,1:nb]<-as.integer(curr.list[[cold.chain]]$n.int.func)
}
}
cmod<-F # reset change model indicator after writing current model
curr.list[[cold.chain]]$cmod<-F
}
model.lookup[keep.sample]<-n.models # update lookup table
}
#if(calibrate){
# theta[i,]<-sampleTheta(curr.list[[cold.chain]])
# delta[i,]<-sampleDelta(curr.list[[cold.chain]])
#}
if(verbose & i%%1000==0){
pr<-c('MCMC iteration',i,myTimestamp(),'nbasis:',curr.list[[cold.chain]]$nbasis)
if(i>start.temper)
pr<-c(pr,'tempering acc',round(count.swap1000/1000*(ntemps-1),3)) # swap acceptance rate
#pr<-c(pr,'tempering acc',round(count.swap/count.swap.prop,3)) # swap acceptance rate
cat(pr,'\n')
count.swap1000<-rep(0,ntemps-1)
}
}
########################################################################
## return
out.yhat<-list()
if(save.yhat){
out.yhat<-list(yhat.mean=yhat.sum/nmod.max,yhat=yhat)
}
out.str<-list()
if(ret.str)
out.str<-list(data=data,prior=prior,funcs=funcs)
out<-list(
call=cl,
beta=beta,
s2=s2,
lam=lam,
nbasis=nbasis,
degree=degree,
nmcmc=nmcmc,
nburn=nburn,
thin=thin,
p=data$p,
beta.prec=beta.prec,
y=y,
log.post.cold=log.post.cold,
swap=swap,
count.swap=count.swap,
count.swap.prop=count.swap.prop,
temp.val=temp.val,
temp.ladder=temp.ladder,
n.models=n.models,
model.lookup=model.lookup,
des=des,func=func,cat=cat,type=type,cx=cx
)
if(!small){
out$curr.list<-curr.list # for restarting
}
mb<-max(nbasis)
out.des<-list()
if(des){
out.des<-list(
knotInd.des=knotInd.des[1:n.models,1:mb,,drop=F],
signs.des=signs.des[1:n.models,1:mb,,drop=F],
vars.des=vars.des[1:n.models,1:mb,,drop=F],
n.int.des=n.int.des[1:n.models,1:mb,drop=F],
maxInt.des=maxInt.des,
pdes=pdes,
xx.des=xx.des,range.des=range.des,
unique.ind.des=data$unique.ind.des
)
if(!small){
out.des$des.basis<-curr.list[[cold.chain]]$des.basis
}
}
out.cat<-list()
if(cat){
out.cat<-list(
vars.cat=vars.cat[1:n.models,1:mb,,drop=F],
sub.size=sub.size[1:n.models,1:mb,,drop=F],
sub.list=sub.list,
n.int.cat=n.int.cat[1:n.models,1:mb,drop=F],
maxInt.cat=maxInt.cat,
pcat=pcat,
xx.cat=xx.cat,
nlevels=data$nlevels
)
if(!small){
out.cat$cat.basis<-curr.list[[cold.chain]]$cat.basis
}
}
out.func<-list()
if(func){
out.func<-list(
knotInd.func=knotInd.func[1:n.models,1:mb,,drop=F],
signs.func=signs.func[1:n.models,1:mb,,drop=F],
vars.func=vars.func[1:n.models,1:mb,,drop=F],
n.int.func=n.int.func[1:n.models,1:mb,drop=F],
maxInt.func=maxInt.func,
pfunc=pfunc,
xx.func=xx.func,range.func=range.func,
unique.ind.func=data$unique.ind.func
)
if(!small){
out.func$func.basis=curr.list[[cold.chain]]$func.basis
}
}
#stopCluster(cluster)
ret<-c(out.yhat,out,out.des,out.cat,out.func,out.str)
class(ret)<-'bass'
return(ret)
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/bass.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
########################################################################
## main BASS function
########################################################################
#' @title Bayesian Adaptive Spline Surfaces (BASS) with PCA decomposition of response
#'
#' @description Decomposes a multivariate or functional response onto a principal component basis and fits a BASS model to each basis coefficient.
#' @param xx a data frame or matrix of predictors with dimension n x p. Categorical predictors should be included as factors.
#' @param y a response matrix (functional response) with dimension n x m.
#' @param dat optional (for more control) list with elements \code{xx} (same as above), \code{y} (same as above), \code{n.pc} (number of principal components used), \code{basis} (principal components with dimension m x \code{n.pc}), \code{newy} (reduced dimension \code{y} with dimension \code{n.pc} x n), \code{trunc.error} (optional truncation error with dimension n x m), \code{y.m} (vector mean removed before PCA with dimension m), \code{y.s} (vector sd scaled before PCA with dimension m). If \code{dat} is specified, \code{xx}, \code{y} and \code{n.pc} do not need to be specified.
#' @param n.pc number of principal components to use
#' @param perc.var optionally specify percent of variance to explain instead of n.pc
#' @param n.cores integer number of cores (threads) to use
#' @param parType either "fork" or "socket". Forking is typically faster, but not compatible with Windows. If \code{n.cores==1}, \code{parType} is ignored.
#' @param center logical whether to subtract the mean before getting the principal components, or else a numeric vector of dimension m for the center to be used
#' @param scale logical whether to divide by the standard deviation before getting the principal components, or else a numeric vector of dimension m for the scale to be used
#' @param ... arguements to be passed to \code{bass} function calls.
#' @details Gets the PCA decomposition of the response \code{y}, and fits a bass model to each PCA basis coefficient, \code{bass(dat$xx,dat$newy[i,],...)} for \code{i in 1 to n.pc}, possibly in parallel.
#' @return An object of class 'bassBasis' with two elements:
#' \item{mod.list}{list (of length \code{n.pc}) of individual bass models}
#' \item{dat}{same as dat above}
#' @keywords nonparametric regression splines functional data analysis
#' @seealso \link{predict.bassBasis} for prediction and \link{sobolBasis} for sensitivity analysis.
#' @export
#' @import utils
#' @example inst/examplesPCA.R
#'
bassPCA<-function(xx=NULL,y=NULL,dat=NULL,n.pc=NULL,perc.var=99,n.cores=1,parType="fork",center=T,scale=F,...){
if(is.null(dat))
dat<-bassPCAsetup(xx,y,n.pc,perc.var,center,scale)
return(bassBasis(dat,n.cores,parType = parType,...))
}
bassPCAsetup<-function(xx,y,n.pc=NULL,perc.var=99,center=T,scale=F){
if(perc.var>100 | perc.var<0)
stop('perc.var must be between 0 and 100')
n<-nrow(xx)
y<-as.matrix(y)
xx<-as.data.frame(xx)
if(nrow(y)==1 | ncol(y)==1)
stop('univariate y: use bass instead of bassPCA')
if(nrow(y)!=nrow(xx))
y<-t(y)
if(nrow(y)!=nrow(xx))
stop('x,y dimension mismatch')
if(ncol(y)==nrow(y))
warning("Caution: because y is square, please ensure that each row of x corresponds to a row of y (and not a column)")
if(!is.null(n.pc)){
if(n.pc>nrow(y))
warning('n.pc too large, using all PCs intead')
}
if(inherits(center,'logical') & length(center)==1){
y.m<-colMeans(y)
if(!center)
y.m<-rep(0,ncol(y))
} else if(inherits(center,'numeric') & length(center)==ncol(y)){
y.m<-center
} else{
stop("center parameter wrong dimension")
}
if(inherits(scale,'logical') & length(scale)==1){
y.s<-apply(y,2,sd)
if(!scale)
y.s<-rep(1,ncol(y))
} else if(inherits(scale,'numeric') & length(scale)==ncol(y)){
y.s<-scale
} else{
stop("scale parameter wrong dimension")
}
yc<-t(scale(y,center=y.m,scale=y.s)) # maybe could get away with fewer transposes, but could require a lot of refactoring
S<-svd(yc)
if(is.null(n.pc)){
ev<-S$d^2
n.pc<-which(cumsum(ev/sum(ev))*100>perc.var)[1]
}
basis<-S$u[,1:n.pc,drop=F]%*%diag(S$d[1:n.pc],nrow=n.pc) # columns are basis functions
newy<-t(S$v[,1:n.pc,drop=F])
trunc.error<-basis%*%newy - yc
ret<-list(xx=xx,y=y,n.pc=n.pc,basis=basis,newy=newy,trunc.error=trunc.error,y.m=y.m,y.s=y.s,ev=S$d^2)
class(ret)<-'bassPCAsetup'
return(ret)
}
#' @title Bayesian Adaptive Spline Surfaces (BASS) with basis decomposition of response
#'
#' @description Fits a BASS model to basis coefficients under the specified basis.
#' @param dat list that includes elements \code{xx}, \code{n.pc} (number of basis functions), \code{basis} (dimension m x \code{n.pc}), \code{newy} (dimension \code{n.pc} x n), \code{trunc.error} (optional truncation error with dimension n x m), \code{y.m} (vector mean removed before basis decomposition with dimension m), \code{y.s} (vector sd scaled before basis decomposition with dimension m). See the documentation of \code{bassPCA} for more details.
#' @param n.cores integer number of cores (threads) to use
#' @param parType either "fork" or "socket". Forking is typically faster, but not compatible with Windows. If \code{n.cores==1}, \code{parType} is ignored.
#' @param ... arguements to be passed to \code{bass} function calls.
#' @details Under a user defined basis decomposition, fits a bass model to each PCA basis coefficient independently, \code{bass(dat$xx,dat$newy[i,],...)} for \code{i in 1 to n.pc}, possibly in parallel. The basis does not need to be orthogonal, but independent modeling of basis coefficients should be sensible.
#' @return An object of class 'bassBasis' with two elements:
#' \item{mod.list}{list (of length \code{n.pc}) of individual bass models}
#' \item{dat}{same as dat above}
#' @keywords nonparametric regression splines functional data analysis
#' @seealso \link{predict.bassBasis} for prediction and \link{sobolBasis} for sensitivity analysis.
#' @export
#' @import stats
#' @import utils
#' @example inst/examplesPCA.R
bassBasis<-function(dat,n.cores=1,parType='fork',...){
if(n.cores>parallel::detectCores())
warning(paste0("Specified n.cores = ",n.cores,'. Proceeding with n.cores = min(n.cores,dat$n.pc,detectCores()) = ',min(n.cores,dat$n.pc,parallel::detectCores())))
n.cores<-min(n.cores,dat$n.pc,parallel::detectCores())
if(n.cores==1){
mod.list<-lapply(1:dat$n.pc,function(i) bass(dat$xx,dat$newy[i,],...))
} else if(parType=='socket'){
cl <- parallel::makeCluster(n.cores,setup_strategy = "sequential")
mod.list<-parallel::parLapply(cl,1:dat$n.pc,function(i) bass(dat$xx,dat$newy[i,],...))
parallel::stopCluster(cl)
} else if(parType=='fork'){
mod.list<-parallel::mclapply(1:dat$n.pc,function(i) bass(dat$xx,dat$newy[i,],...),mc.cores = n.cores,mc.preschedule = F)
}
ret<-list(mod.list=mod.list,dat=dat)
class(ret)<-'bassBasis'
return(ret)
}
################################################################################################################
## prediction
#' @title BASS Prediction
#'
#' @description Predict function for BASS. Outputs the posterior predictive samples based on the specified MCMC iterations.
#' @param object a fitted model, output from the \code{bass} function.
#' @param newdata a matrix of new input values at which to predict. The columns should correspond to the same variables used in the \code{bassBasis} or \code{bassPCA} functions.
#' @param mcmc.use a vector indexing which MCMC iterations to use for prediction.
#' @param trunc.error logical, use basis truncation error when predicting?
#' @param nugget logical, use individual \code{bass} nugget variances when predicting?
#' @param n.cores number of cores, though 1 is often the fastest.
#' @param parType either "fork" or "socket". Forking is typically faster, but not compatible with Windows. If \code{n.cores==1}, \code{parType} is ignored.
#' @param ... further arguments passed to or from other methods.
#' @details Prediction combined across \code{bass} models.
#' @return An array with first dimension corresponding to MCMC iteration, second dimension corresponding to the rows of \code{newdata}, and third dimension corresponding to the multivariate/functional response.
#' @seealso \link{bassPCA} and \link{bassBasis} for model fitting and \link{sobolBasis} for sensitivity analysis.
#' @export
#' @examples
#' # See examples in bass documentation.
#'
predict.bassBasis<-function(object,newdata,mcmc.use=NULL,trunc.error=FALSE,nugget=T,n.cores=1,parType="fork",...){
if(is.null(mcmc.use)){ # if null, use all
mcmc.use<-1:((object$mod.list[[1]]$nmcmc-object$mod.list[[1]]$nburn)/object$mod.list[[1]]$thin)
}
if(n.cores==1){
# no parallel
newy.pred<-array(unlist(lapply(1:object$dat$n.pc,function(i) predict1mod(object$mod.list[[i]],newdata,mcmc.use,nugget,...))),dim=c(length(mcmc.use),nrow(newdata),object$dat$n.pc))
#browser()
out<-array(unlist(lapply(1:length(mcmc.use),function(i) predict1mcmc(matrix(newy.pred[i,,],ncol=object$dat$n.pc,nrow=nrow(newdata)),object$dat))),dim=c(length(object$dat$y.m),nrow(newdata),length(mcmc.use)))
} else if(parType=='socket'){
# parLapply (socket)
cl <- parallel::makeCluster(min(n.cores,object$dat$n.pc,parallel::detectCores()),setup_strategy = "sequential") # possibly a faster way to do this, but would need to keep cluster around
parallel::clusterExport(cl,varlist=c("newdata"),envir=environment())
newy.pred<-array(unlist(parallel::parLapply(cl,1:object$dat$n.pc,function(i) predict1mod(object$mod.list[[i]],newdata,mcmc.use,nugget,...))),dim=c(length(mcmc.use),nrow(newdata),object$dat$n.pc))
out<-array(unlist(parallel::parLapply(cl,1:length(mcmc.use),function(i) predict1mcmc(matrix(newy.pred[i,,],ncol=object$dat$n.pc,nrow=nrow(newdata)),object$dat))),dim=c(length(object$dat$y.m),nrow(newdata),length(mcmc.use)))
parallel::stopCluster(cl)
} else if(parType=='fork'){
# mclapply (fork - faster than socket, but not compatible with windows)
newy.pred<-array(unlist(parallel::mclapply(1:object$dat$n.pc,function(i) predict1mod(object$mod.list[[i]],newdata,mcmc.use,nugget,...),mc.cores=n.cores)),dim=c(length(mcmc.use),nrow(newdata),object$dat$n.pc))
out<-array(unlist(parallel::mclapply(1:length(mcmc.use),function(i) predict1mcmc(matrix(newy.pred[i,,],ncol=object$dat$n.pc,nrow=nrow(newdata)),object$dat),mc.cores=n.cores)),dim=c(length(object$dat$y.m),nrow(newdata),length(mcmc.use)))
}
out<-aperm(out,c(3,2,1))
if(trunc.error)
out<-out+array(truncErrSampN(length(mcmc.use)*nrow(newdata),object$dat$trunc.error),dim=c(length(mcmc.use),nrow(newdata),length(object$dat$y.m)))
return(out) # should be nmcmc x npred x nfunc
}
predict1mcmc<-function(mat,dat){
if(is.null(dim(mat)))
mat<-t(mat)
dat$basis%*%t(mat)*dat$y.s + dat$y.m
}
predict1mod<-function(mod,newdata,mcmc.use,nugget,...){
pmat<-predict(mod,newdata,mcmc.use=mcmc.use,...)
if(nugget)
pmat<-pmat+rnorm(length(mcmc.use),0,sqrt(mod$s2[mcmc.use]))
pmat
}
predict_fast.bassBasis<-function(object,newdata,n.cores=1,mcmc.use,trunc.error=FALSE,...){
newy.pred<-array(unlist(parallel::mclapply(1:object$dat$n.pc,function(i) predict1mod_fast(object$mod.list[[i]],newdata,mcmc.use,...),mc.cores=min(n.cores,object$dat$n.pc))),dim=c(length(mcmc.use),nrow(newdata),object$dat$n.pc))
out<-array(unlist(parallel::mclapply(1:length(mcmc.use),function(i) predict1mcmc(newy.pred[i,,],object$dat),mc.cores=min(n.cores,length(mcmc.use)))),dim=c(length(object$dat$y.m),nrow(newdata),length(mcmc.use)))
out<-aperm(out,c(3,2,1))
return(out) # should be nmcmc x npred x nfunc
}
predict1mod_fast<-function(mod,newdata,mcmc.use,...){
pmat<-predict_fast(mod,newdata,mcmc.use=mcmc.use,...)
#pmat<-pmat+rnorm(length(mcmc.use),0,sqrt(mod$s2[mcmc.use]))
pmat
}
truncErrSampN<-function(n,te.mat){ # a function to quickly sample one of the truncation
# errors at each space and time, vectorized
# te.mat is truncation error matrix, ncol is number of sims, nrow is length of EOFs
# (space time combinations)
t(te.mat[,sample.int(nrow(te.mat),n,replace=T)])
}
################################################################################################################
## sobol
#' @title BASS Sensitivity Analysis
#'
#' @description Decomposes the variance of the BASS model into variance due to main effects, two way interactions, and so on, similar to the ANOVA decomposition for linear models. Uses the Sobol' decomposition, which can be done analytically for MARS models.
#' @param mod output from the \code{bassBasis} or \code{bassPCA} function.
#' @param int.order an integer indicating the highest order of interactions to include in the Sobol decomposition.
#' @param prior a list with the same number of elements as there are inputs to mod. Each element specifies the prior for the particular input. Each prior is specified as a list with elements \code{dist} (one of \code{c("normal", "student", "uniform")}), \code{trunc} (a vector of dimension 2 indicating the lower and upper truncation bounds, taken to be the data bounds if omitted), and for "normal" or "student" priors, \code{mean} (scalar mean of the Normal/Student, or a vector of means for a mixture of Normals or Students), \code{sd} (scalar standard deviation of the Normal/Student, or a vector of standard deviations for a mixture of Normals or Students), \code{df} (scalar degrees of freedom of the Student, or a vector of degrees of freedom for a mixture of Students), and \code{weights} (a vector of weights that sum to one for the mixture components, or the scalar 1). If unspecified, a uniform is assumed with the same bounds as are represented in the input to mod.
#' @param mcmc.use an integer indicating which MCMC iteration to use for sensitivity analysis. Defaults to the last iteration.
#' @param nind number of Sobol indices to keep (will keep the largest nind).
#' @param n.cores number of cores to use (nearly linear speedup for adding cores).
#' @param parType either "fork" or "socket". Forking is typically faster, but not compatible with Windows. If \code{n.cores==1}, \code{parType} is ignored.
#' @param plot logical; whether to plot results.
#' @param verbose logical; print progress.
#' @details Performs analytical Sobol' decomposition for each MCMC iteration in mcmc.use (each corresponds to a MARS model), yeilding a posterior distribution of sensitivity indices. Can obtain Sobol' indices as a function of one functional variable.
#' @return If non-functional (\code{func.var = NULL}), a list with two elements:
#' \item{S}{a data frame of sensitivity indices with number of rows matching the length of \code{mcmc.use}. The columns are named with a particular main effect or interaction. The values are the proportion of variance in the model that is due to each main effect or interaction.}
#' \item{T}{a data frame of total sensitivity indices with number of rows matching the length of \code{mcmc.use}. The columns are named with a particular variable.}
#' Otherwise, a list with four elements:
#' \item{S}{an array with first dimension corresponding to MCMC samples (same length as \code{mcmc.use}), second dimension corresponding to different main effects and interactions (labeled in \code{names.ind}), and third dimension corresponding to the grid used for the functional variable. The elements of the array are sensitivity indices.}
#' \item{S.var}{same as \code{S}, but scaled in terms of total variance rather than percent of variance.}
#' \item{names.ind}{a vector of names of the main effects and interactions used.}
#'
#' @keywords Sobol decomposition
#' @seealso \link{bassPCA} and \link{bassBasis} for model fitting and \link{predict.bassBasis} for prediction.
#' @export
#' @examples
#' # See examples in bass documentation.
sobolBasis<-function(mod,int.order,prior=NULL,mcmc.use=NULL,nind=NULL,n.cores=1,parType='fork',plot=F,verbose=T){
if(is.null(mcmc.use))
mcmc.use<-length(mod$mod.list[[1]]$s2)
bassMod<-mod$mod.list[[1]] # for structuring everything, assuming that model structures are the same for different PCs
pdescat<-sum(bassMod$pdes)+sum(bassMod$pcat) # sums make NULLs 0s
if(is.null(prior))
prior<-list()
if(length(prior)<pdescat){
for(i in (length(prior)+1):pdescat)
prior[[i]]<-list(dist=NA)
}
#browser()
for(i in 1:pdescat){
if(is.null(prior[[i]]))
prior[[i]]<-list(dist=NA)
if(is.na(prior[[i]]$dist)){
prior[[i]]<-list()
prior[[i]]$dist<-'uniform'
#prior[[i]]$trunc<-bassMod$range.des[,i] - not right index when there are categorical vars
}
}
if(bassMod$func){
if(is.null(prior.func)){
prior.func<-list()
for(i in 1:bassMod$pfunc){
prior.func[[i]]<-list()
prior.func[[i]]$dist<-'uniform'
#prior.func[[i]]$trunc<-bassMod$range.func[,i]
}
}
for(i in 1:length(prior.func))
class(prior.func[[i]])<-prior.func[[i]]$dist
}
for(i in 1:length(prior))
class(prior[[i]])<-prior[[i]]$dist # class will be used for integral functions, should be uniform, normal, or student
if(bassMod$cat){
which.cat<-which(bassMod$cx=='factor')
prior.cat<-list()
for(i in 1:length(which.cat)){
prior.cat[i]<-prior[which.cat[i]]
}
prior[which.cat]<-NULL
} else{
prior.cat<-NULL
}
#browser()
if(bassMod$des){
for(i in 1:length(prior)){
if(is.null(prior[[i]]$trunc)){
prior[[i]]$trunc<-c(0,1)
} else{
#browser()
prior[[i]]$trunc<-scale_range(prior[[i]]$trunc,bassMod$range.des[,i])
}
if(prior[[i]]$dist %in% c('normal','student')){
prior[[i]]$mean<-scale_range(prior[[i]]$mean,bassMod$range.des[,i])
prior[[i]]$sd<-prior[[i]]$sd/(bassMod$range.des[2,i]-bassMod$range.des[1,i])
if(prior[[i]]$dist == 'normal'){
prior[[i]]$z<-pnorm((prior[[i]]$trunc[2]-prior[[i]]$mean)/prior[[i]]$sd) - pnorm((prior[[i]]$trunc[1]-prior[[i]]$mean)/prior[[i]]$sd)
} else{
prior[[i]]$z<-pt((prior[[i]]$trunc[2]-prior[[i]]$mean)/prior[[i]]$sd,prior[[i]]$df) - pt((prior[[i]]$trunc[1]-prior[[i]]$mean)/prior[[i]]$sd,prior[[i]]$df)
}
cc<-sum(prior[[i]]$weights*prior[[i]]$z)
prior[[i]]$weights<-prior[[i]]$weights/cc#prior[[i]]$z # change weights with truncation # divide by cc instead to keep the same prior shape
# does the truncation change the distribution shape in the non-truncated regions??
#browser()
}
}
}
#browser()
tl<-list(prior=prior)
pc.mod<-mod$mod.list
pcs<-mod$dat$basis
if(verbose)
cat('Start',timestamp(quiet = T),'\n')
p<-pc.mod[[1]]$p
if(int.order>p){
int.order<-p
warning("int.order > number of inputs, changing to int.order = number of inputs")
}
u.list<-lapply(1:int.order,function(i) combn(1:p,i))
ncombs.vec<-unlist(lapply(u.list,ncol))
ncombs<-sum(ncombs.vec)
nxfunc<-nrow(pcs)
#sob<-matrix(nrow=nxfunc,ncol=ncombs)
sob<-ints<-list()
n.pc<-ncol(pcs)
w0<-unlist(lapply(1:n.pc,function(pc) get.f0(prior,pc.mod,pc,mcmc.use)))
#browser()
f0r2<-(pcs%*%w0)^2
max.nbasis<-max(unlist(lapply(pc.mod,function(x) x$nbasis[mcmc.use])))
C1Basis.array<-array(dim=c(n.pc,p,max.nbasis))
for(i in 1:n.pc){
nb<-pc.mod[[i]]$nbasis[mcmc.use]
mcmc.mod.usei<-pc.mod[[i]]$model.lookup[mcmc.use]
for(j in 1:p){
for(k in 1:nb){
C1Basis.array[i,j,k]<-C1Basis(prior,pc.mod,j,k,i,mcmc.mod.usei)
}
}
#print(i)
}
# browser()
#
# C2Basis.array<-array(dim=c(n.pc,n.pc,p,max.nbasis,max.nbasis))
# for(i1 in 1:n.pc){
# nb1<-pc.mod[[i1]]$nbasis[mcmc.use]
# mcmc.mod.usei1<-pc.mod[[i1]]$model.lookup[mcmc.use]
# for(i2 in 1:n.pc){
# nb2<-pc.mod[[i2]]$nbasis[mcmc.use]
# mcmc.mod.usei2<-pc.mod[[i2]]$model.lookup[mcmc.use]
# for(j in 1:p){
# for(k1 in 1:nb1){
# for(k2 in 1:nb2){
# C2Basis.array[i1,i2,j,k1,k2]<-C2Basis(pc.mod,j,k1,k2,i1,i2,mcmc.mod.usei1,mcmc.mod.usei2) #C2Basis(pc.mod,l,mi,mj,i,j,mcmc.mod.usei,mcmc.mod.usej)
# }
# }
# }
# }
# print(i1)
# }
#browser()
u.list1<-list()
for(i in 1:int.order)
u.list1<-c(u.list1,split(u.list[[i]], col(u.list[[i]])))
#require(parallel)
#browser()
if(verbose)
cat('Integrating',timestamp(quiet = T),'\n')
u.list.temp<-c(list(1:p),u.list1)
if(n.cores==1){
# no parallel
ints1.temp<-lapply(u.list.temp,function(x) func.hat(prior,x,pc.mod,pcs,mcmc.use,f0r2,C1Basis.array))
} else if(parType=='socket'){
# parLapply (socket)
cl <- parallel::makeCluster(min(n.cores,parallel::detectCores()),setup_strategy = "sequential") # possibly a faster way to do this, but would need to keep cluster around
parallel::clusterExport(cl,varlist=c("prior","x","pc.mod","pcs","mcmc.use","f0r2","C1Basis.array"),envir=environment())
ints1.temp<-parallel::parLapply(cl,u.list.temp,function(x) func.hat(prior,x,pc.mod,pcs,mcmc.use,f0r2,C1Basis.array))
parallel::stopCluster(cl)
} else if(parType=='fork'){
# mclapply (fork - faster than socket, but not compatible with windows)
ints1.temp<-parallel::mclapply(u.list.temp,function(x) func.hat(prior,x,pc.mod,pcs,mcmc.use,f0r2,C1Basis.array),mc.cores=n.cores,mc.preschedule = F)
}
V.tot<-ints1.temp[[1]]
ints1<-ints1.temp[-1]
#ints1<-mclapply(u.list1,function(x) func.hat(prior,x,pc.mod,pcs,mcmc.use,f0r2,C1Basis.array),mc.cores=n.cores,mc.preschedule = preschedule)
ints<-list()
ints[[1]]<-do.call(cbind,ints1[1:ncol(u.list[[1]])])
if(int.order>1){
for(i in 2:int.order)
ints[[i]]<-do.call(cbind,ints1[sum(ncombs.vec[1:(i-1)])+1:ncol(u.list[[i]])])
}
# for(i in 1:length(u.list))
# ints[[i]]<-apply(u.list[[i]],2,function(x) func.hat(x,pc.mod,pcs,mcmc.use,f0r2)) # the heavy lifting
sob[[1]]<-ints[[1]]
# matplot(t(apply(sob[[1]],1,cumsum)),type='l')
# matplot(t(apply(sens.func$S.var[1,1:5,],2,cumsum)),type='l',add=T)
#V.tot<-func.hat(prior,1:p,pc.mod,pcs,mcmc.use,f0r2) # need to add this to the above
# plot(V.tot)
# points(apply(sens.func$S.var[1,,],2,sum),col=2)
if(verbose)
cat('Shuffling',timestamp(quiet = T),'\n')
if(length(u.list)>1){
for(i in 2:length(u.list)){
sob[[i]]<-matrix(nrow=nxfunc,ncol=ncol(ints[[i]]))
for(j in 1:ncol(u.list[[i]])){
cc<-rep(0,nxfunc)
for(k in 1:(i-1)){
ind<-which(apply(u.list[[k]],2,function(x) all(x%in%u.list[[i]][,j])))
cc<-cc+(-1)^(i-k)*rowSums(ints[[k]][,ind])
}
sob[[i]][,j]<-ints[[i]][,j]+cc
}
}
}
# sens.func.use<-lapply(strsplit(sens.func$names.ind,'x'),as.numeric)
# sl<-sapply(sens.func.use,length)
# ind.list<-list()
# sob.small<-list()
# for(i in 1:length(u.list)){
# ind.list[[i]]<-NA
# k<-0
# for(j in which(sl==i)){
# k<-k+1
# ind.list[[i]][k]<-which(apply(u.list[[i]],2,function(x) all(x==sens.func.use[[j]])))
# }
# sob.small[[i]]<-sob[[i]][,ind.list[[i]]]
# }
#
# sob.small<-do.call(cbind,sob.small)
# matplot(t(apply(sob.small,1,cumsum)),type='l')
# matplot(t(apply(sens.func$S.var[1,,],2,cumsum)),type='l',add=T)
#browser()
if(is.null(nind))
nind<-ncombs
sob.comb.var<-do.call(cbind,sob)
vv<-colMeans(sob.comb.var)
ord<-order(vv,decreasing = T)
cutoff<-vv[ord[nind]]
if(nind>length(ord))
cutoff<-min(vv)
use<-sort(which(vv>=cutoff))
V.other<-V.tot-rowSums(sob.comb.var[,use])
use<-c(use,ncombs+1)
sob.comb.var<-t(cbind(sob.comb.var,V.other))
sob.comb<-t(t(sob.comb.var)/c(V.tot))
sob.comb.var<-sob.comb.var[use,,drop=F]
sob.comb<-sob.comb[use,,drop=F]
dim(sob.comb)<-c(1,length(use),nxfunc)
dim(sob.comb.var)<-c(1,length(use),nxfunc)
names.ind<-c(unlist(lapply(u.list,function(x) apply(x,2,paste,collapse='x',sep=''))),'other')
names.ind<-names.ind[use]
if(verbose)
cat('Finish',timestamp(quiet = T),'\n')
#browser()
ret<-list(S=sob.comb,S.var=sob.comb.var,Var.tot=V.tot,names.ind=names.ind,xx=seq(0,1,length.out = nxfunc),func=T)
class(ret)<-'bassSob'
if(plot)
plot(ret)
return(ret)
}
################################################################################
## Functions
################################################################################
func.hat<-function(prior,u,pc.mod,pcs,mcmc.use,f0r2,C1Basis.array){ # could speed this up
#browser()
res<-rep(0,nrow(pcs))
n.pc<-length(pc.mod)
for(i in 1:n.pc){
res<-res+pcs[,i]^2*Ccross(prior,pc.mod,i,i,u,mcmc.use,C1Basis.array)
if(i<n.pc){
for(j in (i+1):n.pc){
res<-res+2*pcs[,i]*pcs[,j]*Ccross(prior,pc.mod,i,j,u,mcmc.use,C1Basis.array)
#print(c(i,j))
}
}
}
return(res-f0r2)
}
Ccross<-function(prior,pc.mod,i,j,u,mcmc.use=1,C1Basis.array){ # inner product of main effects from different eof models
p<-pc.mod[[1]]$p
mcmc.mod.usei<-pc.mod[[i]]$model.lookup[mcmc.use]
mcmc.mod.usej<-pc.mod[[j]]$model.lookup[mcmc.use]
Mi<-pc.mod[[i]]$nbasis[mcmc.use]
Mj<-pc.mod[[j]]$nbasis[mcmc.use]
mat<-matrix(nrow=Mi,ncol=Mj)
#CC<-C2Basis.temp<-CCu<-matrix(1,nrow=Mi,ncol=Mj)
a0i<-pc.mod[[i]]$beta[mcmc.use,1]
a0j<-pc.mod[[j]]$beta[mcmc.use,1]
f0i<-get.f0(prior,pc.mod,i,mcmc.use)
f0j<-get.f0(prior,pc.mod,j,mcmc.use)
out<- a0i*a0j + a0i*(f0j-a0j) + a0j*(f0i-a0i)
#browser()
if(Mi>0 & Mj>0){
ai<-pc.mod[[i]]$beta[mcmc.use,1+1:Mi]
aj<-pc.mod[[j]]$beta[mcmc.use,1+1:Mj]
for(mi in 1:Mi){
for(mj in 1:Mj){
temp1<-ai[mi]*aj[mj]
temp2<-temp3<-1
for(l in (1:p)[-u]){
#temp2<-temp2*C1Basis(pc.mod,l,mi,i,mcmc.mod.usei)*C1Basis(pc.mod,l,mj,j,mcmc.mod.usej) # make a C1Basis lookup table instead (this is the bottleneck)
temp2<-temp2*C1Basis.array[i,l,mi]*C1Basis.array[j,l,mj]
#browser()
}
#CC[mi,mj]<-temp2
for(l in u){
temp3<-temp3*C2Basis(prior,pc.mod,l,mi,mj,i,j,mcmc.mod.usei,mcmc.mod.usej) # would be nice to use a lookup table here too, but its too big
}
#C2Basis.temp[mi,mj]<-temp3
#CCu[mi,mj]<-temp4
out<-out+temp1*temp2*temp3#(temp3-1) not -1 since we subtract f0^2 later
#print(out)
#mat[mi,mj]<-temp
#print(c(temp1*temp2*temp3))
}
}
}
#out<-out+ai%*%(CC*C2Basis.temp/CCu)%*%aj
if(length(out)==0)
browser()
return(out)
}
C1Basis<-function(prior,pc.mod,l,m,pc,mcmc.mod.use){ # l = variable, m = basis function, pc = eof index
if(l<=pc.mod[[pc]]$pdes){
int.use.l<-which(pc.mod[[pc]]$vars.des[mcmc.mod.use,m,]==l)
if(length(int.use.l)==0)
return(1)
s<-pc.mod[[pc]]$signs[mcmc.mod.use,m,int.use.l]
t.ind<-pc.mod[[pc]]$knotInd.des[mcmc.mod.use,m,int.use.l]
t<-pc.mod[[pc]]$xx.des[t.ind,l]
q<-pc.mod[[pc]]$degree
#return((1/(q+1)*((s+1)/2-s*t))*s^2)
if(s==0)
return(0)
cc<-const(signs=s,knots=t,degree=q)
if(s==1){
a<-max(prior[[l]]$trunc[1],t)
b<-prior[[l]]$trunc[2]
if(b<t)
return(0)
out<-intabq1(prior[[l]],a,b,t,q)/cc
#return(intabq1(tl$prior[[k]],a,b,t,q)/cc)
} else{
a<-prior[[l]]$trunc[1]
b<-min(prior[[l]]$trunc[2],t)
if(t<a)
return(0)
out<-intabq1(prior[[l]],a,b,t,q)*(-1)^q/cc
#return(intabq1(tl$prior[[k]],a,b,t,q)*(-1)^q/cc)
}
if(out< -1e-15)
browser()
return(out)
} else{
l.cat<-l-pc.mod[[pc]]$pdes # assumes that des vars come before cat vars, which I think we do internally.
int.use.l<-which(pc.mod[[pc]]$vars.cat[mcmc.mod.use,m,]==l.cat)
if(length(int.use.l)==0)
return(1)
lD1<-pc.mod[[pc]]$sub.size[mcmc.mod.use,m,int.use.l]
nlevels<-pc.mod[[pc]]$nlevels[l.cat]
return(lD1/nlevels)
}
}
C2Basis<-function(prior,pc.mod,l,m1,m2,pc1,pc2,mcmc.mod.use1,mcmc.mod.use2){
if(l<=pc.mod[[pc1]]$pdes){ # could do pc1 or pc2, they have the same vars
int.use.l1<-which(pc.mod[[pc1]]$vars.des[mcmc.mod.use1,m1,]==l)
int.use.l2<-which(pc.mod[[pc2]]$vars.des[mcmc.mod.use2,m2,]==l)
if(length(int.use.l1)==0 & length(int.use.l2)==0)
return(1)
if(length(int.use.l1)==0)
return(C1Basis(prior,pc.mod,l,m2,pc2,mcmc.mod.use2))
if(length(int.use.l2)==0)
return(C1Basis(prior,pc.mod,l,m1,pc1,mcmc.mod.use1))
#if(pc1==pc2 & m1==m2)
# return(C1Basis(prior,pc.mod,l,m1,pc1,mcmc.mod.use1)^2) ## is this right??
q<-pc.mod[[pc1]]$degree
s1<-pc.mod[[pc1]]$signs[mcmc.mod.use1,m1,int.use.l1]
s2<-pc.mod[[pc2]]$signs[mcmc.mod.use2,m2,int.use.l2]
t.ind1<-pc.mod[[pc1]]$knotInd.des[mcmc.mod.use1,m1,int.use.l1]
t.ind2<-pc.mod[[pc2]]$knotInd.des[mcmc.mod.use2,m2,int.use.l2]
t1<-pc.mod[[pc1]]$xx.des[t.ind1,l]
t2<-pc.mod[[pc2]]$xx.des[t.ind2,l]
if(t2<t1){
temp<-t1
t1<-t2
t2<-temp
temp<-s1
s1<-s2
s2<-temp
}
#browser()
return(C22Basis(prior[[l]],t1,t2,s1,s2,q,m1,m2,pc1,pc2))
} else{
l.cat<-l-pc.mod[[pc1]]$pdes
int.use.l1<-which(pc.mod[[pc1]]$vars.cat[mcmc.mod.use1,m1,]==l.cat)
int.use.l2<-which(pc.mod[[pc2]]$vars.cat[mcmc.mod.use2,m2,]==l.cat)
if(length(int.use.l1)==0 & length(int.use.l2)==0)
return(1)
if(length(int.use.l1)==0)
return(C1Basis(prior,pc.mod,l,m2,pc2,mcmc.mod.use2))
if(length(int.use.l2)==0)
return(C1Basis(prior,pc.mod,l,m1,pc1,mcmc.mod.use1))
#browser()
sub1<-pc.mod[[pc1]]$sub.list[[mcmc.mod.use1]][[m1]][[int.use.l1]]
sub2<-pc.mod[[pc2]]$sub.list[[mcmc.mod.use2]][[m2]][[int.use.l2]]
if(is.na(sub1[1]) & is.na(sub2[1]))
browser()
nlevels<-pc.mod[[pc1]]$nlevels[l.cat]
return(length(intersect(sub1,sub2))/nlevels)
}
}
C22Basis<-function(prior,t1,t2,s1,s2,q,m1,m2,pc1,pc2){ # t1<t2
cc<-const(signs=c(s1,s2),knots=c(t1,t2),degree=q)
if((s1*s2)==0){
return(0)
}
# if(m1==m2 & pc1==pc2){ #t1=t2, s1=s2 - NOT TRUE, since these could be different eof models
# return(1/(2*q+1)*((s1+1)/2-s1*t1)^(2*q+1)/cc)
# intabq1(prior[[l]],a,b,t,q)/cc
# if(s1==1){
#
# } else{
#
# }
# } else{
if(s1==1){
if(s2==1){
return(intabq2(prior,t2,1,t1,t2,q)/cc)
} else{
return(intabq2(prior,t1,t2,t1,t2,q)*(-1)^q/cc)
}
} else{
if(s2==1){
return(0)
} else{
return(intabq2(prior,0,t1,t1,t2,q)/cc)
}
}
#}
}
get.f0<-function(prior,pc.mod,pc,mcmc.use){ # mcmc.mod.use is mcmc index not model index
mcmc.mod.use<-pc.mod[[pc]]$model.lookup[mcmc.use]
out<-pc.mod[[pc]]$beta[mcmc.use,1] # intercept
if(pc.mod[[pc]]$nbasis[mcmc.use] > 0){
for(m in 1:pc.mod[[pc]]$nbasis[mcmc.use]){
out1<-pc.mod[[pc]]$beta[mcmc.use,1+m]
for(l in 1:pc.mod[[pc]]$p){
out1<-out1*C1Basis(prior,pc.mod,l,m,pc,mcmc.mod.use)
}
out<-out+out1
}
}
return(out)
}
##################################################################################################################################################################
##################################################################################################################################################################
plot_prior<-function(prior,plot=TRUE,n=1000,...){
xx<-seq(prior$trunc[1],prior$trunc[2],length.out=n)
if(prior$dist=='uniform'){
out<-dunif(xx,prior$trunc[1],prior$trunc[2])
z<-1
}
if(prior$dist=='normal'){
out<-0
z<-0
for(i in 1:length(prior$weights)){
zi<-pnorm(prior$trunc[2],prior$mean[i],prior$sd[i]) - pnorm(prior$trunc[1],prior$mean[i],prior$sd[i])
z<-z+zi*prior$weights[i]
out<-out+prior$weights[i]*dnorm(xx,prior$mean[i],prior$sd[i])
}
}
if(prior$dist=='student'){
out<-0
z<-0
for(i in 1:length(prior$weights)){
zi<-pt((prior$trunc[2]-prior$mean[i])/prior$sd[i],prior$df[i]) - pt((prior$trunc[1]-prior$mean[i])/prior$sd[i],prior$df[i])
z<-z+zi*prior$weights[i]
out<-out+prior$weights[i]*(dt((xx-prior$mean[i])/prior$sd[i],prior$df[i])/prior$sd[i])
}
}
if(plot)
plot(xx,out/z,...)
return(cbind(xx,out/z))
}
sample.prior<-function(prior,n){
p<-length(prior)
out<-matrix(nrow=n,ncol=p)
for(i in 1:p){
if(prior[[i]]$dist=='uniform'){
out[,i]<-runif(n,prior[[i]]$trunc[1],prior[[i]]$trunc[2])
} else{
ncomp<-length(prior[[i]]$weights)
comp<-sample(1:ncomp,size=n,prob=prior[[i]]$weights,replace=T)
if(prior[[i]]$dist=='normal')
#out[,i]<-rnorm(n,prior[[i]]$mean[comp],prior[[i]]$sd[comp])
out[,i]<-suppressWarnings(truncdist::rtrunc(n,spec='norm',a=(prior[[i]]$trunc[1]-prior[[i]]$mean[comp])/prior[[i]]$sd[comp],b=(prior[[i]]$trunc[2]-prior[[i]]$mean[comp])/prior[[i]]$sd[comp])*prior[[i]]$sd[comp]+prior[[i]]$mean[comp])
if(prior[[i]]$dist=='student')
out[,i]<-truncdist::rtrunc(n,spec='t',df=prior[[i]]$df[comp],a=(prior[[i]]$trunc[1]-prior[[i]]$mean[comp])/prior[[i]]$sd[comp],b=(prior[[i]]$trunc[2]-prior[[i]]$mean[comp])/prior[[i]]$sd[comp])*prior[[i]]$sd[comp]+prior[[i]]$mean[comp]
}
}
out
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/bassPCA.R |
#' @title Calibrate a bassPCA or bassBasis Model to Data
#'
#' @description Robust modular calibration of a bassPCA or bassBasis emulator using adaptive Metropolis, tempering, and decorrelation steps in an effort to be free of any user-required tuning.
#' @param y vector of calibration data
#' @param mod a emulator of class bassBasis, whose predictions should match y (i.e., predictions from mod should be the same length as y)
#' @param type one of c(1,2). 1 indicates a model that uses independent truncation error variance, no measurement error correlation, and discrepancy on a basis while type 2 indicates a model that uses a full truncation error covariance matrix, a full measurement error correlation matrix, a fixed full discrepancy covariance matrix, and a fixed discrepancy mean. 1 is for situations where computational efficiency is important (because y is dense), while 2 is only for cases where y is a short vector.
#' @param sd.est vector of prior estimates of measurement error standard deviation
#' @param s2.df vector of degrees of freedom for measurement error sd prior estimates
#' @param s2.ind index vector, same length as y, indicating which sd.est goes with which y
#' @param meas.error.cor a fixed correlation matrix for the measurement errors
#' @param bounds a 2xp matrix of bounds for each input parameter, where p is the number of input parameters.
#' @param discrep.mean discrepancy mean (fixed), only used if type=2
#' @param discrep.mat discrepancy covariance (fixed, for type 2) or basis (if not square, for type 1)
#' @param nmcmc number of MCMC iterations.
#' @param temperature.ladder an increasing vector, all greater than 1, for tempering. Geometric spacing is recommended, so that you have (1+delta)^(0:ntemps), where delta is small (typically between 0.05 and 0.2) and ntemps is the number of elements in the vector.
#' @param decor.step.every integer number of MCMC iterations between decorrelation steps.
#' @param verbose logical, whether to print progress.
#'
#' @details Fits a modular Bayesian calibration model, with \deqn{y = Kw(\theta) + Dv + \epsilon, ~~\epsilon \sim N(0,\sigma^2 R)} \deqn{f(x) = a_0 + \sum_{m=1}^M a_m B_m(x)} and \eqn{B_m(x)} is a BASS basis function (tensor product of spline basis functions). We use priors \deqn{a \sim N(0,\sigma^2/\tau (B'B)^{-1})} \deqn{M \sim Poisson(\lambda)} as well as the priors mentioned in the arguments above.
#' @return An object
#' @seealso \link{predict.bassBasis} for prediction and \link{sobolBasis} for sensitivity analysis.
#' @export
#' @import utils
#' @example inst/examplesPCA.R
#'
calibrate.bassBasis<-function(y,
mod,
type,
sd.est,
s2.df,
s2.ind,
meas.error.cor,
bounds,
discrep.mean,
discrep.mat,
nmcmc=10000,
temperature.ladder=1.05^(0:30),
decor.step.every=100,
verbose=T
){
tl<-temperature.ladder
decor<-decor.step.every
func<-function(mod,xx,ii)
predict(mod,xx,mcmc.use=ii,nugget=F,trunc.error=F)
vars<-do.call(cbind,lapply(mod$mod.list,function(a) a$s2))
basis<-mod$dat$basis
if(type==1)
trunc.error.cov<-diag(diag(cov(t(mod$dat$trunc.error))))
if(type==2)
trunc.error.cov<-cov(t(mod$dat$trunc.error))
rmnorm<-function(mu, S){
mu+c(rnorm(length(mu))%*%chol(S))
}
if(any(s2.df==0)){
ldig.kern<-function(x,a,b)
-log(x+1)
} else{
ldig.kern<-function(x,a,b)
-(a+1)*log(x)-b/x
}
unscale.range<-function(x,r){
x*(r[2]-r[1])+r[1]
}
rigammaTemper<-function(n,shape,scale,itemper){
1/rgamma(n,itemper*(shape+1)-1,rate=itemper*scale)
}
myTimestamp<-function(){
x<-Sys.time()
paste('#--',format(x,"%b %d %X"),'--#')
}
cor2cov<-function(R,S) # https://stats.stackexchange.com/questions/62850/obtaining-covariance-matrix-from-correlation-matrix
outer(S,S) * R
p<-ncol(bounds)
a<-s2.df/2
b<-a*sd.est^2
ns2<-length(unique(s2.ind))
n.s2.ind<-rep(0,ns2)
for(j in 1:ns2)
n.s2.ind[j]<-sum(s2.ind==j)
s2.first.ind<-NA
for(j in 1:ns2)
s2.first.ind[j]<-which(s2.ind==j)[1]
ny<-length(y)
ntemps<-length(tl)
class<-'mult'
nd<-1
if(ncol(discrep.mat)<ny){
class<-'func'
nd<-ncol(discrep.mat)
}
theta<-array(dim=c(nmcmc,ntemps,p))
s2<-array(dim=c(nmcmc,ntemps,ns2))
discrep.vars<-array(dim=c(nmcmc,ntemps,nd))
#temp.ind<-matrix(nrow=nmcmc,ncol=ntemps)
itl<-1/tl # inverse temperature ladder
tran<-function(th){
#th2<-pnorm(th)
for(i in 1:ncol(th)){
th[,i]<-unscale.range(th[,i],bounds[,i])
}
th
}
theta[1,,]<-runif(prod(dim(theta[1,,,drop=F])))
s2[1,,]<-1
discrep.vars[1,,]<-0
my.solve<-function(x){
u <- chol(x)
chol2inv(u)
}
swm<-function(Ainv,U,Cinv,V){ # sherman woodbury morrison (A+UCV)^-1
Ainv - Ainv %*% U %*% my.solve(Cinv + V %*% Ainv %*% U) %*% V %*% Ainv
}
swm.ldet<-function(Ainv,U,Cinv,V,Aldet,Cldet){ # sherman woodbury morrison |A+UCV|
determinant(Cinv + V %*% Ainv %*% U, logarithm=T)$mod + Aldet + Cldet
}
curr<-list()
for(t in 1:ntemps)
curr[[t]]<-list()
dat<-list(y=y,basis=basis,s2.ind=s2.ind)
if(class=='mult'){
dat$trunc.error.cov<-trunc.error.cov
dat$meas.error.cor<-meas.error.cor
dat$discrep.cov<-discrep.mat
dat$discrep.mean<-discrep.mean
for(t in 1:ntemps){
curr[[t]]$s2<-s2[1,t,]
}
lik.cov.inv<-function(dat,curr){#trunc.error.cov,Sigma,discrep.mat,discrep.vars,basis,emu.vars){
Sigma<-cor2cov(dat$meas.error.cor,sqrt(curr$s2[dat$s2.ind]))
mat<-chol(dat$trunc.error.cov+Sigma+dat$discrep.cov+dat$basis%*%diag(curr$emu.vars,mod$dat$n.pc)%*%t(dat$basis))
inv<-chol2inv(mat)
ldet<-2*sum(log(diag(mat)))
return(list(inv=inv, ldet=ldet))
}
llik<-function(dat,curr){#y,pred,discrep,cov.inv,ldet){ # use ldet=0 if it doesn't matter
vec <- dat$y-curr$pred-dat$discrep.mean
-.5*(curr$cov$ldet + t(vec)%*%curr$cov$inv%*%(vec))
}
}
if(class=='func'){
dat$trunc.error.var<-diag(trunc.error.cov) # assumed diagonal
dat$D<-discrep.mat
dat$discrep<-dat$D%*%discrep.vars[1,t,]
dat$discrep.tau<-1
dat$nd<-ncol(dat$D)
for(t in 1:ntemps){
curr[[t]]$s2<-s2[1,t,]
#curr[[t]]$vars.sigma<-curr[[t]]$s2[s2.ind]
curr[[t]]$discrep<-dat$D%*%discrep.vars[1,t,]
}
lik.cov.inv<-function(dat,curr){#trunc.error.cov,Sigma,discrep.mat,discrep.vars,basis,emu.vars){ # Sigma, trunc.error.cov are diagonal
vec<-dat$trunc.error.var+curr$s2[dat$s2.ind]
Ainv<-diag(1/vec)
Aldet<-sum(log(vec))
inv<-swm(Ainv,basis,diag(1/curr$emu.vars),t(basis))
ldet<-swm.ldet(Ainv,basis,diag(1/curr$emu.vars),t(basis),Aldet,sum(log(curr$emu.vars)))
return(list(inv=inv, ldet=ldet))
}
llik<-function(dat,curr){#y,pred,discrep,cov.inv,ldet){ # use ldet=0 if it doesn't matter
vec <- dat$y-curr$pred-curr$discrep
-.5*(curr$cov$ldet + t(vec)%*%curr$cov$inv%*%(vec))
}
}
bigMat.curr<-Sigma.curr<-list()
nmcmc.emu<-nrow(vars)
ii<-NA
ii[1]<-sample(nmcmc.emu,1)
pred.curr<-matrix(func(mod,tran(matrix(theta[1,,],ncol=p)),ii[1]),nrow=ntemps)
for(t in 1:ntemps){
curr[[t]]$pred<-pred.curr[t,]
curr[[t]]$emu.vars<-vars[ii[1],]
}
eps<-1e-13
tau<-rep(-4,ntemps) # scaling
tau.ls2<-rep(0,ntemps)
cc<-2.4^2/p
S<-mu<-cov<-S.ls2<-mu.ls2<-cov.ls2<-list()
for(t in 1:ntemps){
S[[t]]<-diag(p)*1e-6
S.ls2[[t]]<-diag(ns2)*1e-6
}
count<-matrix(0,nrow=ntemps,ncol=ntemps)
count.decor<-matrix(0,nrow=p,ncol=ntemps)
count100<-count.s2<-rep(0,ntemps)
for(i in 2:nmcmc){
theta[i,,]<-theta[i-1,,] # current set at previous (update below)
s2[i,,]<-s2[i-1,,]
########################################################
## update s2
ii[i]<-sample(nmcmc.emu,1)
pred.curr<-matrix(func(mod,tran(matrix(theta[i-1,,],ncol=p)),ii[i]),nrow=ntemps)
for(t in 1:ntemps){
curr[[t]]$pred<-pred.curr[t,]
curr[[t]]$emu.vars<-vars[ii[i],]
curr[[t]]$cov<-lik.cov.inv(dat,curr[[t]])
curr[[t]]$llik<-llik(dat,curr[[t]])
}
if(i==300){ # start adapting
for(t in 1:ntemps){
if(ns2>1){
mu.ls2[[t]]<-colMeans(log(s2[1:(i-1),t,]))
cov.ls2[[t]]<-cov(log(s2[1:(i-1),t,]))
} else{
mu.ls2[[t]]<-matrix(mean(log(s2[1:(i-1),t,])))
cov.ls2[[t]]<-matrix(var(log(s2[1:(i-1),t,])))
}
S.ls2[[t]]<- (cov.ls2[[t]]*cc+diag(eps*cc,ns2))*exp(tau.ls2[t])
}
}
if(i>300){ # adaptation updates
for(t in 1:ntemps){
#browser()
mu.ls2[[t]]<-mu.ls2[[t]]+(log(s2[(i-1),t,])-mu.ls2[[t]])/(i-1)
cov.ls2[[t]]<-(i-2)/(i-1)*cov.ls2[[t]] + (i-2)/(i-1)^2*tcrossprod(log(s2[(i-1),t,])-mu.ls2[[t]])
S.ls2[[t]]<-(cov.ls2[[t]]*cc+diag(eps*cc,ns2))*exp(tau.ls2[t])
}
}
#ls2.cand<-matrix(nrow=ntemps,ncol=ns2)
for(t in 1:ntemps){
ls2.cand<-rmnorm(log(curr[[t]]$s2),S.ls2[[t]]) # generate candidate
cand<-curr[[t]]
cand$s2<-exp(ls2.cand)
cand$cov<-lik.cov.inv(dat,cand)
cand$llik<-llik(dat,cand)
alpha<- itl[t]*(
+ cand$llik + sum(ldig.kern(cand$s2,a,b)) + sum(log(cand$s2))
- curr[[t]]$llik - sum(ldig.kern(curr[[t]]$s2,a,b)) - sum(log(curr[[t]]$s2))
) # log lik + log prior + log jacobian
#if(i>5000)
# browser()
if(log(runif(1))<alpha){
curr[[t]]<-cand
s2[i,t,]<-cand$s2
count.s2[t]<-count.s2[t]+1
}
}
#if(i>5000)
# browser()
########################################################
## update discrep.vars with gibbs step
#???~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(class=='func'){
for(t in 1:ntemps){
discrep.S<-my.solve(diag(dat$nd)/dat$discrep.tau + t(dat$D) %*% curr[[t]]$cov$inv %*% dat$D)
discrep.m<-t(dat$D) %*% curr[[t]]$cov$inv %*% (dat$y-curr[[t]]$pred)
discrep.vars[i,t,]<-c(rmnorm(discrep.S%*%discrep.m, discrep.S/itl[t]))
curr[[t]]$discrep.vars<-discrep.vars[i,t,]
curr[[t]]$discrep<-dat$D%*%discrep.vars[i,t,]
}
}
########################################################
## adaptive block update for theta within each temperature - covariance of previous samples scaled (by exp(tau)) based on acceptance rate for last 100 samples, since tempering makes large gaps
if(i==300){ # start adapting
for(t in 1:ntemps){
mu[[t]]<-colMeans(theta[1:(i-1),t,])
cov[[t]]<-cov(theta[1:(i-1),t,])
S[[t]]<-(cov[[t]]*cc+diag(eps*cc,p))*exp(tau[t])
}
}
if(i>300){ # adaptation updates
for(t in 1:ntemps){
#browser()
mu[[t]]<-mu[[t]]+(theta[(i-1),t,]-mu[[t]])/(i-1)
cov[[t]]<-(i-2)/(i-1)*cov[[t]] + (i-2)/(i-1)^2*tcrossprod(theta[(i-1),t,]-mu[[t]])
S[[t]]<-(cov[[t]]*cc+diag(eps*cc,p))*exp(tau[t])
}
}
theta.cand<-matrix(nrow=ntemps,ncol=p)
for(t in 1:ntemps)
theta.cand[t,]<-rmnorm(theta[i-1,t,],S[[t]]) # generate candidate for each temperature
pred.cand<-matrix(func(mod,tran(matrix(theta.cand,ncol=p)),ii[i]),nrow=ntemps)
# Dinv.cand is the same as Dinv.curr
for(t in 1:ntemps){ # loop over temperatures, do a block MCMC update
cand<-curr[[t]]
cand$pred<-pred.cand[t,]
cand$theta<-theta.cand[t,]
if(any(cand$theta<0 | cand$theta>1))
alpha<- -9999
else{
cand$llik<-llik(dat,cand)
alpha<- itl[t]*(
+ cand$llik
- curr[[t]]$llik
) # log lik + uniform prior
}
if(log(runif(1))<alpha){
curr[[t]]<-cand
theta[i,t,]<-theta.cand[t,]
count[t,t]<-count[t,t]+1
pred.curr[t,]<-pred.cand[t,]
count100[t]<-count100[t]+1
}
}
########################################################
## acceptance-rate based adaptation of covariance scale
if(i%%100==0){
delta<-min(.1,1/sqrt(i)*5)
for(t in 1:ntemps){
if(count100[t]<23){
tau[t]<-tau[t]-delta
} else if(count100[t]>23){
tau[t]<-tau[t]+delta
}
} # could be vectorized, but probably not expensive
count100<-count100*0
}
########################################################
## decorrelation step for theta, especially to decorrelate the thetas that dont change anything
if(i%%decor==0){ # every so often, do a decorrelation step (use single-site independence sampler)
for(k in 1:p){
theta.cand<-theta[i,,,drop=F] # most up-to-date value
theta.cand[1,,k]<-runif(ntemps) # independence sampler candidate (vectorize over temperatures)
pred.cand<-matrix(func(mod,tran(matrix(theta.cand[1,,],ncol=p)),ii[i]),nrow=ntemps)
for(t in 1:ntemps){
cand<-curr[[t]]
cand$pred<-pred.cand[t,]
cand$theta<-theta.cand[1,t,]
cand$llik<-llik(dat,cand)
alpha<- itl[t]*(
+ cand$llik
- curr[[t]]$llik
) # log lik + uniform prior
if(log(runif(1))<alpha){
curr[[t]]<-cand
theta[i,t,k]<-theta.cand[1,t,k]
count.decor[k,t]<-count.decor[k,t]+1
pred.curr[t,]<-pred.cand[t,]
}
}
}
}
########################################################
## tempering swaps
if(i>1000 & ntemps>1){ # tempering swap
for(dummy in 1:ntemps){ # repeat tempering step a bunch of times
sw<-sort(sample(ntemps,size=2))
alpha<-(itl[sw[2]]-itl[sw[1]])*(
curr[[sw[1]]]$llik + sum(ldig.kern(curr[[sw[1]]]$s2,a,b)) # plus discrep terms
- curr[[sw[2]]]$llik - sum(ldig.kern(curr[[sw[2]]]$s2,a,b))
)
if(log(runif(1))<alpha){
temp<-theta[i,sw[1],]
theta[i,sw[1],]<-theta[i,sw[2],]
theta[i,sw[2],]<-temp
temp<-s2[i,sw[1],]
s2[i,sw[1],]<-s2[i,sw[2],]
s2[i,sw[2],]<-temp
temp<-discrep.vars[i,sw[1],]
discrep.vars[i,sw[1],]<-discrep.vars[i,sw[2],]
discrep.vars[i,sw[2],]<-temp
count[sw[1],sw[2]]<-count[sw[1],sw[2]]+1
temp<-pred.curr[sw[1],] # not sampling posterior predictive each time, for speed
pred.curr[sw[1],]<-pred.curr[sw[2],]
pred.curr[sw[2],]<-temp
temp<-curr[[sw[1]]]
curr[[sw[1]]]<-curr[[sw[2]]]
curr[[sw[2]]]<-temp
}
}
}
if(verbose & i%%100==0){
pr<-c('MCMC iteration',i,myTimestamp(),'count:',diag(count))
cat(pr,'\n')
}
}
##th2<-pnorm(theta)
#for(ii in 1:p){
# theta[,,ii]<-unscale.range(theta[,,ii],bounds[,ii])
#}
return(list(theta=theta,s2=s2,count=count,count.decor=count.decor,tau=tau,ii=ii,curr=curr,dat=dat,discrep.vars=discrep.vars))
}
#
#
#
# ##################################################################################################################################################################
# ##################################################################################################################################################################
# ## modularized calibration
# rmnorm<-function(mu, S){
# mu+c(rnorm(length(mu))%*%chol(S))
# }
# calibrate<-function(mod,y,sd.est,s2.df,bounds,nmcmc,tl=1,verbose=T,decor=100,pred.ncores=1,pred.parType='fork',trunc.error=F){ # assumes inputs to mod are standardized to (0,1), equal variance for all y values (should change to sim covariance)
# p<-ncol(bounds)
# a<-s2.df/2
# b<-a*sd.est^2
#
# # could allow s2.ind vector, like in python code
# # or could allow s2.mult for a functional setup
#
# ny<-length(y)
# ntemps<-length(tl)
#
# theta<-array(dim=c(nmcmc,ntemps,p))
# s2<-matrix(nrow=nmcmc,ncol=ntemps)
# #temp.ind<-matrix(nrow=nmcmc,ncol=ntemps)
#
# itl<-1/tl # inverse temperature ladder
#
# tran<-function(th){
# #th2<-pnorm(th)
# for(i in 1:ncol(th)){
# th[,i]<-unscale.range(th[,i],bounds[,i])
# }
# th
# }
#
# #browser()
# theta[1,,]<-runif(prod(dim(theta[1,,,drop=F])))
# # pred.curr<-matrix(
# # predict(mod,
# # tran(matrix(theta[1,,],ncol=p)),
# # mcmc.use=sample(ns,size=1),
# # trunc.error=trunc.error,
# # nugget = T,
# # n.cores=pred.ncores,
# # parType = pred.parType),
# # nrow=ntemps)
#
# pred.curr<-matrix(mod(tran(matrix(theta[1,,],ncol=p))),nrow=ntemps)
# s2[1,]<-rigammaTemper(ntemps, ny/2+a, b+colSums((t(pred.curr)-y)^2)/2, itl)
#
#
#
# eps<-1e-13
# tau<-rep(-4,ntemps) # scaling
# cc<-2.4^2/p
# S<-mu<-cov<-S.rev<-mu.rev<-cov.rev<-list()
# for(t in 1:ntemps)
# S[[t]]<-diag(p)*1e-6
# count<-matrix(0,nrow=ntemps,ncol=ntemps)
# count.decor<-matrix(0,nrow=p,ncol=ntemps)
# count100<-rep(0,ntemps)
#
#
# for(i in 2:nmcmc){
#
# theta[i,,]<-theta[i-1,,] # current set at previous (update below)
#
# ########################################################
# ## adaptive block update for theta within each temperature - covariance of previous samples scaled (by exp(tau)) based on acceptance rate for last 100 samples, since tempering makes large gaps
#
# if(i==300){ # start adapting
# for(t in 1:ntemps){
# mu[[t]]<-colMeans(theta[1:(i-1),t,])
# cov[[t]]<-cov(theta[1:(i-1),t,])
# S[[t]]<-cov(theta[1:(i-1),t,])*cc+diag(eps*cc,p)
# }
# }
# if(i>300){ # adaptation updates
# for(t in 1:ntemps){
# mu[[t]]<-mu[[t]]+(theta[(i-1),t,]-mu[[t]])/(i-1)
# cov[[t]]<-(i-2)/(i-1)*cov[[t]] + (i-2)/(i-1)^2*tcrossprod(theta[(i-1),t,]-mu[[t]])
# S[[t]]<-(cov[[t]]*cc+diag(eps*cc,p))*exp(tau[t])
# }
# }
#
# # if(i>300 & i<1000){ # start adapting
# # for(t in 1:ntemps){
# # mu[[t]]<-colMeans(theta[1:(i-1),t,])
# # cov[[t]]<-cov(theta[1:(i-1),t,])
# # S[[t]]<-cov[[t]]*cc+diag(eps*cc,p)
# # }
# # }
# # if(i>1000){ # radius-based adaptation
# # for(t in 1:ntemps){
# # #browser()
# # dist<-sqrt(colSums((t(theta[1:(i-2),t,])-theta[i-1,t,])^2))
# # use<-which(dist<=quantile(dist,.15))
# # mu[[t]]<-colMeans(theta[use,t,])
# # cov[[t]]<-cov(theta[use,t,])
# # S[[t]]<-cov[[t]]*cc+diag(eps*cc,p)
# # }
# # }
#
# theta.cand<-matrix(nrow=ntemps,ncol=p)
# for(t in 1:ntemps)
# theta.cand[t,]<-rmnorm(theta[i-1,t,],S[[t]]) # generate candidate for each temperature
#
# # if(i>1000){ # for reversibility when radius-based adapting
# # for(t in 1:ntemps){
# # #browser()
# # dist<-sqrt(colSums((t(theta[1:(i-2),t,])-theta.cand[t,])^2))
# # use<-which(dist<=quantile(dist,.15))
# # mu.rev[[t]]<-colMeans(theta[use,t,])
# # cov.rev[[t]]<-cov(theta[use,t,])
# # S.rev[[t]]<-cov.rev[[t]]*cc+diag(eps*cc,p)
# # }
# # }
#
# # pred.cand<-matrix(
# # predict(mod,
# # tran(matrix(theta.cand,ncol=p)),
# # mcmc.use=sample(ns,size=1),
# # trunc.error=trunc.error,
# # nugget=T,
# # n.cores=pred.ncores,
# # parType = pred.parType),
# # nrow=ntemps) # get BASS prediction at each candidate (major speedup by vectorizing across temperatures)
#
# pred.cand<-matrix(mod(tran(matrix(theta.cand,ncol=p))),nrow=ntemps)
#
# for(t in 1:ntemps){ # loop over temperatures, do a block MCMC update
# if(any(theta.cand[t,]<0 | theta.cand[t,]>1))
# alpha<- -9999
# else{
# alpha<- (-.5/s2[i-1,t]*itl[t]*(sum((y-pred.cand[t,])^2)-sum((y-pred.curr[t,])^2)) # posterior
# #-mnormt::dmnorm(theta.cand[t,],theta[i-1,t,],S[[t]],log = T) + mnormt::dmnorm(theta[i-1,t,],theta.cand[t,],S.rev[[t]],log = T) # proposal when using radius-based adapting
# )
# }
# if(log(runif(1))<alpha){
# theta[i,t,]<-theta.cand[t,]
# count[t,t]<-count[t,t]+1
# pred.curr[t,]<-pred.cand[t,]
# count100[t]<-count100[t]+1
# }
# }
#
# ########################################################
# ## acceptance-rate based adaptation of covariance scale
# if(i%%100==0){
# delta<-min(.1,1/sqrt(i)*5)
# for(t in 1:ntemps){
# if(count100[t]<23){
# tau[t]<-tau[t]-delta
# } else if(count100[t]>23){
# tau[t]<-tau[t]+delta
# }
# } # could be vectorized, but probably not expensive
# count100<-count100*0
# }
#
#
# ########################################################
# ## decorrelation step for theta, especially to decorrelate the thetas that dont change anything
#
# if(i%%decor==0){ # every so often, do a decorrelation step (use single-site independence sampler)
# for(k in 1:p){
# theta.cand<-theta[i,,,drop=F] # most up-to-date value
# theta.cand[1,,k]<-runif(ntemps) # independence sampler candidate (vectorize over temperatures)
# # pred.cand<-matrix(
# # predict(mod,
# # tran(matrix(theta.cand[1,,],ncol=p)),
# # mcmc.use=sample(ns,size=1),
# # trunc.error=trunc.error,
# # nugget=T,
# # n.cores=pred.ncores,
# # parType = pred.parType),
# # nrow=ntemps)
# pred.cand<-matrix(mod(tran(matrix(theta.cand[1,,],ncol=p))),nrow=ntemps)
# for(t in 1:ntemps){
# alpha<- -.5/s2[i-1,t]*itl[t]*(sum((y-pred.cand[t,])^2)-sum((y-pred.curr[t,])^2)) # could do with colsums, but this is cheap
# if(log(runif(1))<alpha){
# theta[i,t,k]<-theta.cand[1,t,k]
# count.decor[k,t]<-count.decor[k,t]+1
# pred.curr[t,]<-pred.cand[t,]
# }
# }
# }
# }
#
# ########################################################
# ## update s2 with gibbs step
#
# s2[i,]<-rigammaTemper(ntemps,ny/2+a, b+colSums((t(pred.curr)-y)^2)/2, itl) # update error variance
#
# #if(i==10000){
# # browser()
# #}
#
# #pred.curr<-predict(mod,theta[i,,],mcmc.use=sample(ns,size=1),trunc.error=F,nugget=T)[1,,]
#
# ########################################################
# ## tempering swaps
#
# if(i>1000 & ntemps>1){ # tempering swap
# for(dummy in 1:ntemps){ # repeat tempering step a bunch of times
# sw<-sort(sample(ntemps,size=2))
#
# alpha<-(itl[sw[2]]-itl[sw[1]])*(
# -ny/2*log(s2[i,sw[1]]) - .5/s2[i,sw[1]]*sum((y-pred.curr[sw[1],])^2) -(a+1)*log(s2[i,sw[1]])-b/s2[i,sw[1]]
# +ny/2*log(s2[i,sw[2]]) + .5/s2[i,sw[2]]*sum((y-pred.curr[sw[2],])^2) +(a+1)*log(s2[i,sw[2]])+b/s2[i,sw[2]]
# )
#
# if(log(runif(1))<alpha){
# temp<-theta[i,sw[1],]
# theta[i,sw[1],]<-theta[i,sw[2],]
# theta[i,sw[2],]<-temp
# temp<-s2[i,sw[1]]
# s2[i,sw[1]]<-s2[i,sw[2]]
# s2[i,sw[2]]<-temp
# count[sw[1],sw[2]]<-count[sw[1],sw[2]]+1
# temp<-pred.curr[sw[1],] # not sampling posterior predictive each time, for speed
# pred.curr[sw[1],]<-pred.curr[sw[2],]
# pred.curr[sw[2],]<-temp
# }
# }
# }
#
# ########################################################
# ## take a new posterior predictive sample from the emulator (helps not get stuck in a mode from a particularly good sample)
# # pred.curr<-matrix(
# # predict(mod,
# # tran(matrix(theta[i,,],ncol=p)),
# # mcmc.use=sample(ns,size=1),
# # trunc.error=trunc.error,
# # nugget=T,
# # n.cores=pred.ncores,
# # parType = pred.parType),
# # nrow=ntemps)
#
# #if(!all(pred.curr == matrix(mod(tran(theta[i,,])),nrow=ntemps)))
# # browser()
#
#
# if(verbose & i%%100==0){
# pr<-c('MCMC iteration',i,myTimestamp(),'count:',diag(count))
# cat(pr,'\n')
# }
# }
#
# #th2<-pnorm(theta)
# for(ii in 1:p){
# theta[,,ii]<-unscale.range(theta[,,ii],bounds[,ii])
# }
# return(list(theta=theta,s2=s2,count=count,count.decor=count.decor,tau=tau))
# }
#
#
#
#
#
# # calibrateIndep<-function(mod,y,a,b,nmcmc,verbose=T){ # assumes inputs to mod are standardized to (0,1), equal variance for all y values (should change to sim covariance)
# # p<-ncol(mod$dat$xx)
# # ny<-length(y)
# # ns<-mod$mod.list[[1]]$nmcmc-mod$mod.list[[1]]$nburn # number of emu mcmc samples
# #
# # theta<-matrix(nrow=nmcmc,ncol=p)
# # s2<-rep(NA,nmcmc)
# #
# # #browser()
# # theta[1,]<-.5
# # pred.curr<-predict(mod,theta[1,,drop=F],mcmc.use=sample(ns,size=1),trunc.error=F)
# # s2[1]<-1/rgamma(1,ny/2+a,b+sum((y-pred.curr)^2))
# #
# # count<-rep(0,p)
# # for(i in 2:nmcmc){
# # s2[i]<-1/rgamma(1,ny/2+a,b+sum((y-pred.curr)^2))
# #
# # theta[i,]<-theta[i-1,]
# #
# # for(j in 1:p){
# # theta.cand<-theta[i,]
# # theta.cand[j]<-runif(1)
# # pred.cand<-predict(mod,t(theta.cand),mcmc.use=sample(ns,size=1),trunc.error=F)
# # alpha<- -.5/s2[i]*(sum((y-pred.cand)^2)-sum((y-pred.curr)^2))
# # if(log(runif(1))<alpha){
# # theta[i,]<-theta.cand
# # count[j]<-count[j]+1
# # }
# # }
# #
# # pred.curr<-predict(mod,theta[i,,drop=F],mcmc.use=sample(ns,size=1),trunc.error=F)
# #
# # if(verbose & i%%100==0){
# # pr<-c('MCMC iteration',i,myTimestamp(),'count:',count)
# # cat(pr,'\n')
# # }
# # }
# #
# # return(list(theta=theta,s2=s2,count=count))
# # }
#
# ldig.kernal<-function(x,a,b)
# (-a-1)*log(x) - b/x
#
# calibrate.probit<-function(mod,y,s2.est,s2.df,bounds,nmcmc,tl=1,verbose=T,decor=100,pred.ncores=1,pred.parType='fork',trunc.error=F){ # assumes inputs to mod are standardized to (0,1), equal variance for all y values (should change to sim covariance)
# # if(class(mod)=='bass'){
# # p<-mod$p
# # ns<-length(mod$s2) # number of emu mcmc samples
# # } else if(class(mod)=='bassBasis'){
# # p<-ncol(mod$dat$xx)
# # ns<-length(mod$mod.list[[1]]$s2) # number of emu mcmc samples
# # }
#
# p<-ncol(bounds)
# a<-s2.df/2
# b<-a*sd.est^2
#
# lpost<-NA
# # could allow s2.ind vector, like in python code
# # or could allow s2.mult for a functional setup
#
# ny<-length(y)
# ntemps<-length(tl)
#
# theta<-array(dim=c(nmcmc,ntemps,p))
# s2<-matrix(nrow=nmcmc,ncol=ntemps)
# #temp.ind<-matrix(nrow=nmcmc,ncol=ntemps)
#
# itl<-1/tl # inverse temperature ladder
#
# tran<-function(th){
# th<-pnorm(th)
# for(i in 1:ncol(th)){
# th[,i]<-unscale.range(th[,i],bounds[,i])
# }
# th
# }
#
# #browser()
# theta[1,,]<-rnorm(prod(dim(theta[1,,,drop=F])))
# # pred.curr<-matrix(
# # predict(mod,
# # tran(matrix(theta[1,,],ncol=p)),
# # mcmc.use=sample(ns,size=1),
# # trunc.error=trunc.error,
# # nugget = T,
# # n.cores=pred.ncores,
# # parType = pred.parType),
# # nrow=ntemps)
# pred.curr<-matrix(mod(tran(matrix(theta[1,,],ncol=p))),nrow=ntemps)
# s2[1,]<-1/rgammaTemper(ntemps, ny/2+a, b+colSums((t(pred.curr)-y)^2)/2, itl)
#
#
#
# eps<-1e-13
# tau<-rep(-0,ntemps) # scaling
# cc<-2.4^2/p
# S<-mu<-cov<-S.rev<-mu.rev<-cov.rev<-list()
# for(t in 1:ntemps)
# S[[t]]<-diag(p)*1e-6
# count<-matrix(0,nrow=ntemps,ncol=ntemps)
# count.decor<-matrix(0,nrow=p,ncol=ntemps)
# count100<-rep(0,ntemps)
#
#
# for(i in 2:nmcmc){
#
# theta[i,,]<-theta[i-1,,] # current set at previous (update below)
#
# ########################################################
# ## adaptive block update for theta within each temperature - covariance of previous samples scaled (by exp(tau)) based on acceptance rate for last 100 samples, since tempering makes large gaps
#
# if(i==300){ # start adapting
# for(t in 1:ntemps){
# mu[[t]]<-colMeans(theta[1:(i-1),t,])
# cov[[t]]<-cov(theta[1:(i-1),t,])
# S[[t]]<-cov(theta[1:(i-1),t,])*cc+diag(eps*cc,p)
# }
# }
# if(i>300){ # adaptation updates
# for(t in 1:ntemps){
# mu[[t]]<-mu[[t]]+(theta[(i-1),t,]-mu[[t]])/(i-1)
# cov[[t]]<-(i-2)/(i-1)*cov[[t]] + (i-2)/(i-1)^2*tcrossprod(theta[(i-1),t,]-mu[[t]])
# S[[t]]<-(cov[[t]]*cc+diag(eps*cc,p))*exp(tau[t])
# }
# }
#
# theta.cand<-matrix(nrow=ntemps,ncol=p)
# for(t in 1:ntemps)
# theta.cand[t,]<-rmnorm(theta[i-1,t,],S[[t]]) # generate candidate for each temperature
#
# # pred.cand<-matrix(
# # predict(mod,
# # tran(matrix(theta.cand,ncol=p)),
# # mcmc.use=sample(ns,size=1),
# # trunc.error=trunc.error,
# # nugget=T,
# # n.cores=pred.ncores,
# # parType = pred.parType),
# # nrow=ntemps) # get BASS prediction at each candidate (major speedup by vectorizing across temperatures)
# pred.cand<-matrix(mod(tran(matrix(theta.cand,ncol=p))),nrow=ntemps)
#
# for(t in 1:ntemps){ # loop over temperatures, do a block MCMC update
# #alpha<- (-.5/s2[i-1,t]*itl[t]*(sum((y-pred.cand[t,])^2)-sum((y-pred.curr[t,])^2)) + itl[t]*(sum(dnorm(theta.cand[t,],log=T)) - sum(dnorm(theta[i-1,t,],log=T))) )
# alpha<-itl[t]*(
# sum(dnorm(y,pred.cand[t,],sqrt(s2[i-1,t]),log=T))
# -sum(dnorm(y,pred.curr[t,],sqrt(s2[i-1,t]),log=T))
# +sum(dnorm(theta.cand[t,],log=T))
# -sum(dnorm(theta[i-1,t,],log=T))
# )
# if(log(runif(1))<alpha){
# theta[i,t,]<-theta.cand[t,]
# count[t,t]<-count[t,t]+1
# pred.curr[t,]<-pred.cand[t,]
# count100[t]<-count100[t]+1
# }
# }
#
# ########################################################
# ## acceptance-rate based adaptation of covariance scale
# if(i%%100==0){
# delta<-min(.1,1/sqrt(i)*5)
# for(t in 1:ntemps){
# if(count100[t]<23){
# tau[t]<-tau[t]-delta
# } else if(count100[t]>23){
# tau[t]<-tau[t]+delta
# }
# } # could be vectorized, but probably not expensive
# count100<-count100*0
# }
#
#
# ########################################################
# ## decorrelation step for theta, especially to decorrelate the thetas that dont change anything
#
# if(i%%decor==0 & i>1000){ # every so often, do a decorrelation step (use single-site independence sampler)
# for(k in 1:p){
# theta.cand<-theta[i,,,drop=F] # most up-to-date value
# theta.cand[1,,k]<-rnorm(ntemps) # independence sampler candidate (vectorize over temperatures)
# #pred.cand<-matrix(
# # predict(mod,
# # tran(matrix(theta.cand[1,,],ncol=p)),
# # mcmc.use=sample(ns,size=1),
# # trunc.error=trunc.error,
# # nugget=T,
# # n.cores=pred.ncores,
# # parType = pred.parType),
# # nrow=ntemps)
# pred.cand<-matrix(mod(tran(matrix(theta.cand[1,,],ncol=p))),nrow=ntemps)
#
# for(t in 1:ntemps){
# #alpha<- -.5/s2[i-1,t]*itl[t]*(sum((y-pred.cand[t,])^2)-sum((y-pred.curr[t,])^2)) + itl[t]*(sum(dnorm(theta.cand[1,t,],log=T)) - sum(dnorm(theta[i,t,],log=T))) # could do with colsums, but this is cheap
# alpha<-itl[t]*(
# sum(dnorm(y,pred.cand[t,],sqrt(s2[i-1,t]),log=T))
# -sum(dnorm(y,pred.curr[t,],sqrt(s2[i-1,t]),log=T))
# +dnorm(theta.cand[1,t,k],log=T)
# -dnorm(theta[i,t,k],log=T)
# )- dnorm(theta.cand[1,t,k],log=T)+ dnorm(theta[i,t,k],log=T)
#
# if(log(runif(1))<alpha){
# #browser()
# theta[i,t,k]<-theta.cand[1,t,k]
# count.decor[k,t]<-count.decor[k,t]+1
# pred.curr[t,]<-pred.cand[t,]
# }
# }
# }
# }
#
# ########################################################
# ## update s2 with gibbs step
#
# #browser()
# s2[i,]<-1/rgammaTemper(ntemps,ny/2+a, b+colSums((t(pred.curr)-y)^2)/2, itl) # update error variance
#
# ########################################################
# ## tempering swaps
#
# if(i>1000 & ntemps>1){ # tempering swap
# #browser()
# for(dummy in 1:ntemps){ # repeat tempering step a bunch of times
# sw<-sort(sample(ntemps,size=2))
#
# # alpha<-(itl[sw[2]]-itl[sw[1]])*(
# # -ny/2*log(s2[i,sw[1]]) - .5/s2[i,sw[1]]*sum((y-pred.curr[sw[1],])^2) -(a+1)*log(s2[i,sw[1]])-b/s2[i,sw[1]]
# # +ny/2*log(s2[i,sw[2]]) + .5/s2[i,sw[2]]*sum((y-pred.curr[sw[2],])^2) +(a+1)*log(s2[i,sw[2]])+b/s2[i,sw[2]]
# # + sum(dnorm(theta[i,sw[1],],log=T)) - sum(dnorm(theta[i,sw[2],],log=T))
# # )
#
# alpha<-(itl[sw[2]]-itl[sw[1]])*(
# sum(dnorm(y,pred.curr[sw[1],],sqrt(s2[i,sw[1]]),log=T))
# -sum(dnorm(y,pred.curr[sw[2],],sqrt(s2[i,sw[2]]),log=T))
# +sum(dnorm(theta[i,sw[1],],log=T))
# -sum(dnorm(theta[i,sw[2],],log=T))
# +ldig.kernal(s2[i,sw[1]],a,b)
# -ldig.kernal(s2[i,sw[2]],a,b)
# )
#
# if(log(runif(1))<alpha){
# if(sw[1]==0 & any(abs(theta[i,sw[2],])>4.5)){
# print('bad')
# }
# #temp<-theta[i,sw[1],]
# #theta[i,sw[1],]<-theta[i,sw[2],]
# #theta[i,sw[2],]<-temp
# theta[i,sw,]<-theta[i,sw[2:1],]
# #temp<-s2[i,sw[1]]
# #s2[i,sw[1]]<-s2[i,sw[2]]
# #s2[i,sw[2]]<-temp
# s2[i,sw]<-s2[i,sw[2:1]]
# count[sw[1],sw[2]]<-count[sw[1],sw[2]]+1
# #temp<-pred.curr[sw[1],] # not sampling posterior predictive each time, for speed
# #pred.curr[sw[1],]<-pred.curr[sw[2],]
# #pred.curr[sw[2],]<-temp
# pred.curr[sw,]<-pred.curr[sw[2:1],]
# }
# }
# }
#
# ########################################################
# ## take a new posterior predictive sample from the emulator (helps not get stuck in a mode from a particularly good sample)
# # pred.curr<-matrix(
# # predict(mod,
# # tran(matrix(theta[i,,],ncol=p)),
# # mcmc.use=sample(ns,size=1),
# # trunc.error=trunc.error,
# # nugget=T,
# # n.cores=pred.ncores,
# # parType = pred.parType),
# # nrow=ntemps)
#
# lpost[i]<-(sum(dnorm(y,pred.curr[1,],sqrt(s2[i,1]),log=T))
# +sum(dnorm(theta[i,1,],log=T))
# +ldig.kernal(s2[i,1],a,b)
# )
#
#
# if(verbose & i%%1000==0){
# pr<-c('MCMC iteration',i,myTimestamp(),'count:',diag(count))
# cat(pr,'\n')
# }
# }
#
# #th2<-pnorm(theta)
# #for(ii in 1:p){
# # theta[,,ii]<-unscale.range(pnorm(theta[,,ii]),bounds[,ii])
# #}
# return(list(theta=theta,s2=s2,count=count,count.decor=count.decor,tau=tau,lpost=lpost))
# }
#
#
#
#
#
#
#
# #calibrate.full<-function(mod,y,s2.est,s2.df,bounds,nmcmc,tl=1,verbose=T,decor=100,pred.ncores=1,pred.parType='fork',trunc.error=F){ # assumes inputs to mod are standardized to (0,1), equal variance for all y values (should change to sim covariance)
#
# #}
#
#
# calibrate.naive.cut<-function(mod,y,s2.est,s2.df,bounds,nmcmc,tl=1,verbose=T,decor=100,pred.ncores=1,pred.parType='fork',trunc.error=F,stoch=F){ # assumes inputs to mod are standardized to (0,1), equal variance for all y values (should change to sim covariance)
#
#
# p<-ncol(bounds)
# a<-s2.df/2
# b<-a*sd.est^2
#
# # could allow s2.ind vector, like in python code
# # or could allow s2.mult for a functional setup
#
# ny<-length(y)
# ntemps<-length(tl)
#
# theta<-array(dim=c(nmcmc,ntemps,p))
# s2<-matrix(nrow=nmcmc,ncol=ntemps)
# #temp.ind<-matrix(nrow=nmcmc,ncol=ntemps)
#
# itl<-1/tl # inverse temperature ladder
#
# tran<-function(th){
# #th2<-pnorm(th)
# for(i in 1:ncol(th)){
# th[,i]<-unscale.range(th[,i],bounds[,i])
# }
# th
# }
#
# #browser()
# theta[1,,]<-runif(prod(dim(theta[1,,,drop=F])))
# # pred.curr<-matrix(
# # predict(mod,
# # tran(matrix(theta[1,,],ncol=p)),
# # mcmc.use=sample(ns,size=1),
# # trunc.error=trunc.error,
# # nugget = T,
# # n.cores=pred.ncores,
# # parType = pred.parType),
# # nrow=ntemps)
#
# pred.curr<-matrix(mod(tran(matrix(theta[1,,],ncol=p))),nrow=ntemps)
# s2[1,]<-rigammaTemper(ntemps, ny/2+a, b+colSums((t(pred.curr)-y)^2)/2, itl)
#
#
#
# eps<-1e-13
# tau<-rep(-4,ntemps) # scaling
# cc<-2.4^2/p
# S<-mu<-cov<-S.rev<-mu.rev<-cov.rev<-list()
# for(t in 1:ntemps)
# S[[t]]<-diag(p)*1e-6
# count<-matrix(0,nrow=ntemps,ncol=ntemps)
# count.decor<-matrix(0,nrow=p,ncol=ntemps)
# count100<-rep(0,ntemps)
#
#
# for(i in 2:nmcmc){
#
# theta[i,,]<-theta[i-1,,] # current set at previous (update below)
#
# ########################################################
# ## adaptive block update for theta within each temperature - covariance of previous samples scaled (by exp(tau)) based on acceptance rate for last 100 samples, since tempering makes large gaps
#
# if(i==300){ # start adapting
# for(t in 1:ntemps){
# mu[[t]]<-colMeans(theta[1:(i-1),t,])
# cov[[t]]<-cov(theta[1:(i-1),t,])
# S[[t]]<-cov(theta[1:(i-1),t,])*cc+diag(eps*cc,p)
# }
# }
# if(i>300){ # adaptation updates
# for(t in 1:ntemps){
# mu[[t]]<-mu[[t]]+(theta[(i-1),t,]-mu[[t]])/(i-1)
# cov[[t]]<-(i-2)/(i-1)*cov[[t]] + (i-2)/(i-1)^2*tcrossprod(theta[(i-1),t,]-mu[[t]])
# S[[t]]<-(cov[[t]]*cc+diag(eps*cc,p))*exp(tau[t])
# }
# }
#
#
#
# theta.cand<-matrix(nrow=ntemps,ncol=p)
# for(t in 1:ntemps)
# theta.cand[t,]<-rmnorm(theta[i-1,t,],S[[t]]) # generate candidate for each temperature
#
#
# # pred.cand<-matrix(
# # predict(mod,
# # tran(matrix(theta.cand,ncol=p)),
# # mcmc.use=sample(ns,size=1),
# # trunc.error=trunc.error,
# # nugget=T,
# # n.cores=pred.ncores,
# # parType = pred.parType),
# # nrow=ntemps) # get BASS prediction at each candidate (major speedup by vectorizing across temperatures)
#
# pred.cand<-matrix(mod(tran(matrix(theta.cand,ncol=p))),nrow=ntemps)
#
# for(t in 1:ntemps){ # loop over temperatures, do a block MCMC update
# if(any(theta.cand[t,]<0 | theta.cand[t,]>1))
# alpha<- -9999
# else{
# alpha<- (-.5/s2[i-1,t]*itl[t]*(sum((y-pred.cand[t,])^2)-sum((y-pred.curr[t,])^2)) # posterior
# #-mnormt::dmnorm(theta.cand[t,],theta[i-1,t,],S[[t]],log = T) + mnormt::dmnorm(theta[i-1,t,],theta.cand[t,],S.rev[[t]],log = T) # proposal when using radius-based adapting
# )
# }
# if(log(runif(1))<alpha){
# theta[i,t,]<-theta.cand[t,]
# count[t,t]<-count[t,t]+1
# pred.curr[t,]<-pred.cand[t,]
# count100[t]<-count100[t]+1
# }
# }
#
# ########################################################
# ## acceptance-rate based adaptation of covariance scale
# if(i%%100==0){
# delta<-min(.1,1/sqrt(i)*5)
# for(t in 1:ntemps){
# if(count100[t]<23){
# tau[t]<-tau[t]-delta
# } else if(count100[t]>23){
# tau[t]<-tau[t]+delta
# }
# } # could be vectorized, but probably not expensive
# count100<-count100*0
# }
#
#
# ########################################################
# ## decorrelation step for theta, especially to decorrelate the thetas that dont change anything
#
# if(i%%decor==0){ # every so often, do a decorrelation step (use single-site independence sampler)
# for(k in 1:p){
# theta.cand<-theta[i,,,drop=F] # most up-to-date value
# theta.cand[1,,k]<-runif(ntemps) # independence sampler candidate (vectorize over temperatures)
# # pred.cand<-matrix(
# # predict(mod,
# # tran(matrix(theta.cand[1,,],ncol=p)),
# # mcmc.use=sample(ns,size=1),
# # trunc.error=trunc.error,
# # nugget=T,
# # n.cores=pred.ncores,
# # parType = pred.parType),
# # nrow=ntemps)
# pred.cand<-matrix(mod(tran(matrix(theta.cand[1,,],ncol=p))),nrow=ntemps)
# for(t in 1:ntemps){
# alpha<- -.5/s2[i-1,t]*itl[t]*(sum((y-pred.cand[t,])^2)-sum((y-pred.curr[t,])^2)) # could do with colsums, but this is cheap
# if(log(runif(1))<alpha){
# theta[i,t,k]<-theta.cand[1,t,k]
# count.decor[k,t]<-count.decor[k,t]+1
# pred.curr[t,]<-pred.cand[t,]
# }
# }
# }
# }
#
# ########################################################
# ## update s2 with gibbs step
#
# s2[i,]<-rigammaTemper(ntemps,ny/2+a, b+colSums((t(pred.curr)-y)^2)/2, itl) # update error variance
#
# #pred.curr<-predict(mod,theta[i,,],mcmc.use=sample(ns,size=1),trunc.error=F,nugget=T)[1,,]
#
# ########################################################
# ## tempering swaps
#
# if(i>1000 & ntemps>1){ # tempering swap
# for(dummy in 1:ntemps){ # repeat tempering step a bunch of times
# sw<-sort(sample(ntemps,size=2))
#
# alpha<-(itl[sw[2]]-itl[sw[1]])*(
# -ny/2*log(s2[i,sw[1]]) - .5/s2[i,sw[1]]*sum((y-pred.curr[sw[1],])^2) -(a+1)*log(s2[i,sw[1]])-b/s2[i,sw[1]]
# +ny/2*log(s2[i,sw[2]]) + .5/s2[i,sw[2]]*sum((y-pred.curr[sw[2],])^2) +(a+1)*log(s2[i,sw[2]])+b/s2[i,sw[2]]
# )
#
# if(log(runif(1))<alpha){
# temp<-theta[i,sw[1],]
# theta[i,sw[1],]<-theta[i,sw[2],]
# theta[i,sw[2],]<-temp
# temp<-s2[i,sw[1]]
# s2[i,sw[1]]<-s2[i,sw[2]]
# s2[i,sw[2]]<-temp
# count[sw[1],sw[2]]<-count[sw[1],sw[2]]+1
# temp<-pred.curr[sw[1],] # not sampling posterior predictive each time, for speed
# pred.curr[sw[1],]<-pred.curr[sw[2],]
# pred.curr[sw[2],]<-temp
# }
# }
# }
#
# ########################################################
# ## take a new posterior predictive sample from the emulator (helps not get stuck in a mode from a particularly good sample)
# # pred.curr<-matrix(
# # predict(mod,
# # tran(matrix(theta[i,,],ncol=p)),
# # mcmc.use=sample(ns,size=1),
# # trunc.error=trunc.error,
# # nugget=T,
# # n.cores=pred.ncores,
# # parType = pred.parType),
# # nrow=ntemps)
#
# #if(!all(pred.curr == matrix(mod(tran(theta[i,,])),nrow=ntemps)))
# # browser()
#
#
# if(verbose & i%%100==0){
# pr<-c('MCMC iteration',i,myTimestamp(),'count:',diag(count))
# cat(pr,'\n')
# }
# }
#
# #th2<-pnorm(theta)
# for(ii in 1:p){
# theta[,,ii]<-unscale.range(theta[,,ii],bounds[,ii])
# }
# return(list(theta=theta,s2=s2,count=count,count.decor=count.decor,tau=tau))
#
#
# }
| /scratch/gouwar.j/cran-all/cranData/BASS/R/calib.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
#' @title Print BASS Details
#'
#' @description Print some of the details of a BASS model.
#' @param x a \code{bass} object, returned from \code{bass}.
#' @param ... further arguments passed to or from other methods.
#' @export
#'
print.bass<-function(x,...){
cat("\nCall:\n",deparse(x$call),'\n')
# numeric inputs
p.cat<-sum(x$cx=='factor')
p.num<-x$p-p.cat
p.func<-ncol(x$xx.func)
reps<-nrow(x$xx.des)
func.grid<-nrow(x$xx.func)
if(p.cat==0)
cat("\n Number of variables: ",x$p,sep='')
if(p.cat>0)
cat("\n Number of variables: ",x$p,' (',p.cat,' categorical)',sep='')
cat("\n Sample size: ",reps,sep='')
if(!is.null(p.func)){
cat("\n Number of functional variables:",p.func)
cat("\n Functional grid size:",func.grid)
}
cat('\n\n')
}
#' @title Print BASS Details
#'
#' @description Print some of the details of a BASS model.
#' @param x a \code{bassBasis} object, returned from \code{bassPCA} or \code{bassBasis}.
#' @param ... further arguments passed to or from other methods.
#' @export
#'
print.bassBasis<-function(x,...){
# numeric inputs
p.cat<-sum(x$mod.list[[1]]$cx=='factor')
p.num<-x$mod.list[[1]]$p-p.cat
reps<-nrow(x$mod.list[[1]]$xx.des)
if(p.cat==0)
cat("\n Number of variables: ",x$mod.list[[1]]$p,sep='')
if(p.cat>0)
cat("\n Number of variables: ",x$mod.list[[1]]$p,' (',p.cat,' categorical)',sep='')
cat("\n Sample size: ",reps,sep='')
cat("\n Number of output basis functions: ",length(x$mod.list))
cat('\n\n')
}
#' @title Summarize BASS Details
#'
#' @description Summarize some of the details of a BASS model.
#' @param object a \code{bassBasis} object, returned from \code{bassPCA} or \code{bassBasis}.
#' @param ... further arguments passed to or from other methods.
#' @export
#'
summary.bassBasis<-function(object,...){
x<-object
p.cat<-sum(x$mod.list[[1]]$cx=='factor')
p.num<-x$mod.list[[1]]$p-p.cat
reps<-nrow(x$mod.list[[1]]$xx.des)
if(p.cat==0)
cat("\n Number of variables: ",x$mod.list[[1]]$p,sep='')
if(p.cat>0)
cat("\n Number of variables: ",x$mod.list[[1]]$p,' (',p.cat,' categorical)',sep='')
cat("\n Sample size: ",reps,sep='')
cat("\n Number of output basis functions: ",length(x$mod.list))
cat('\n\n')
}
#' @title Summarize BASS Details
#'
#' @description Summarize some of the details of a BASS model.
#' @param object a \code{bass} object, returned from \code{bass}.
#' @param ... further arguments passed to or from other methods.
#' @export
#'
summary.bass<-function(object,...){
cat("\nCall:\n",deparse(object$call),'\n')
# numeric inputs
p.cat<-sum(object$cx=='factor')
p.num<-object$p-p.cat
p.func<-ncol(object$xx.func)
reps<-nrow(object$xx.des)
func.grid<-nrow(object$xx.func)
if(p.cat==0)
cat("\n Number of variables: ",object$p,sep='')
if(p.cat>0)
cat("\n Number of variables: ",object$p,' (',p.cat,' categorical)',sep='')
cat("\n Sample size: ",reps,sep='')
if(!is.null(p.func)){
cat("\n Number of functional variables:",p.func)
cat("\n Functional grid size:",func.grid)
}
cat("\n\nNumber of basis functions (range):",range(object$nbasis))
cat("\n Posterior mean error variance:",mean(object$s2),'\n\n')
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/generics.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
########################################################################
## functions used in MCMC
########################################################################
dmwnchBass<-function(z.vec,vars){
alpha<-z.vec[vars]/sum(z.vec[-vars])
j<-length(alpha)
ss<-1 + (-1)^j * 1/(sum(alpha)+1)
for(i in 1:(j-1))
ss <- ss + (-1)^(i) * sum(1/(colSums(combn(alpha,i))+1))
ss
}
## RJ reversibility term (and prior)
logProbChangeMod<-function(n.int,vars,I.vec,z.vec,p,vars.len,maxInt,miC){
if(n.int==1){ #for acceptance ratio
out<-log(I.vec[n.int+miC])-log(2*p*vars.len[vars]) + #proposal
log(2*p*vars.len[vars])+log(maxInt) # prior
} else{
# perms<-permutations(n.int,n.int,vars)
# sum.perm<-sum(apply(perms,1,function(row){1/prod(1-cumsum(z.vec[row][-n.int]))}))
# lprob.vars.noReplace<-sum(log(z.vec[vars]))+log(sum.perm)
#require(BiasedUrn)
x<-rep(0,p)
x[vars]<-1
#lprob.vars.noReplace<-log(BiasedUrn::dMWNCHypergeo(x,rep(1,p),n.int,z.vec)) - do this in combination with imports: BiasedUrn in DESCRIPTION file, but that has a limit to MAXCOLORS
#lprob.vars.noReplace<-log(dMWNCHypergeo(x,rep(1,p),n.int,z.vec)) #BiasedUrn version
lprob.vars.noReplace<-log(dmwnchBass(z.vec,vars))
out<-log(I.vec[n.int+miC])+lprob.vars.noReplace-n.int*log(2)-sum(log(vars.len[vars])) + # proposal
+n.int*log(2)+sum(log(vars.len[vars]))+lchoose(p,n.int)+log(maxInt) # prior
}
return(out)
}
## RJ reversibility term (and prior) for categorical
logProbChangeModCat<-function(n.int,vars,I.vec,z.vec,p,nlevels,sub.size,maxInt,miC){
if(n.int==1){ #for acceptance ratio
out<-log(I.vec[n.int+miC])-log(p*(nlevels[vars]-1))-lchoose(nlevels[vars],sub.size[1:n.int]) + # proposal
log(p*(nlevels[vars]-1))+lchoose(nlevels[vars],sub.size[1:n.int])+log(maxInt) # prior
} else{
x<-rep(0,p)
x[vars]<-1
#lprob.vars.noReplace<-log(dMWNCHypergeo(x,rep(1,p),n.int,z.vec))
lprob.vars.noReplace<-log(dmwnchBass(z.vec,vars))
out<-log(I.vec[n.int+miC])+lprob.vars.noReplace-n.int*sum(log(nlevels[vars]-1))-sum(lchoose(nlevels[vars],sub.size[1:n.int])) + # proposal
n.int*sum(log(nlevels[vars]-1))+sum(lchoose(nlevels[vars],sub.size[1:n.int]))+lchoose(p,n.int)+log(maxInt) # prior
}
if(length(out)>1)
browser()
if(is.na(out))
browser()
return(out)
}
## log posterior
lp<-function(curr,prior,data){
tt<-(
- (curr$s2.rate+prior$g2)/curr$s2
-(data$n/2+1+(curr$nbasis+1)/2 +prior$g1)*log(curr$s2) # changed -g1 to +g1
+ sum(log(abs(diag(curr$R)))) # .5*determinant of XtX
- (curr$nbasis+1)/2*log(2*pi)
+ (prior$h1+curr$nbasis-1)*log(curr$lam) - curr$lam*(prior$h2+1) # curr$nbasis-1 because poisson prior is excluding intercept (for curr$nbasis instead of curr$nbasis+1)
#-lfactorial(curr$nbasis) # added, but maybe cancels with prior
)
if(prior$beta.gprior.ind){
tt<-tt + (prior$a.beta.prec+(curr$nbasis+1)/2-1)*log(curr$beta.prec) - prior$b.beta.prec*curr$beta.prec
}
if(curr$nbasis==0){
return(tt)
}
#priors for basis parameters
if(F){#(data$des){ # should these be involved in tempering??
tt<-tt+(
- sum(curr$n.int.des)*log(2) # signs for each basis function
- sum(lchoose(data$pdes,curr$n.int.des)) # variables for each basis function
- sum(log(data$vars.len.des[na.omit(c(curr$vars.des))])) # knots for each basis function
- curr$nbasis*log(prior$maxInt.des) # degree of interaction for each basis function
)
}
if(F){#(data$cat){
tt<-tt+(
- sum(sapply(1:curr$nbasis,function(i) curr$n.int.cat[i]*sum(log(data$nlevels[na.omit(curr$vars.cat[i,])]-1))))
- sum(sapply(1:curr$nbasis,function(i) sum(lchoose(data$nlevels[na.omit(c(curr$vars.cat[i,]))],curr$sub.size[i,1:curr$n.int.cat[i]]))))
- sum(lchoose(data$pcat,curr$n.int.cat))
- curr$nbasis*log(prior$maxInt.cat)
)
}
if(F){#(data$func){
tt<-tt+(
- sum(curr$n.int.func)*log(2)
- sum(lchoose(data$pfunc,curr$n.int.func))
- sum(log(data$vars.len.func[na.omit(c(curr$vars.func))]))
- curr$nbasis*log(prior$maxInt.func)
)
}
return(tt)
}
## get quadratic form that shows up in RJ acceptance probability
getQf<-function(XtX,Xty){
R<-tryCatch(chol(XtX), error=function(e) matrix(F))
if(R[1,1]){
dr<-diag(R)
if(length(dr)>1){
if(max(dr[-1])/min(dr)>1e3) # TODO: this is a hack, otherwise we get huge variance inflation in beta
return(NULL)
}
bhat<-backsolve(R,forwardsolve(R,Xty,transpose=T,upper.tri=T))
qf<-crossprod(bhat,Xty)# same as sum((R%*%bhat)^2)
return(list(R=R,bhat=bhat,qf=qf))
} else{
return(NULL)
}
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/mcmc_funcs.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
########################################################################
## MCMC update
########################################################################
updateMCMC<-function(curr,prior,data,funcs=funcs){
## RJMCMC update
u<-sample(1:3,size=1)
if(curr$nbasis==0){
u<-1 # birth for sure
}
if(curr$nbasis==prior$maxBasis){
u<-sample(2:3,size=1) # no birth
}
#browser()
if(u==1){ # birth
curr<-funcs$birth(curr,prior,data)
} else if(u==2){ # death
curr<-funcs$death(curr,prior,data)
} else{ # change
curr<-funcs$change(curr,prior,data)
}
## Gibbs updates
# beta
curr$beta<-curr$bhat/(1+curr$beta.prec)+curr$R.inv.t%*%rnorm(curr$nc)*sqrt(curr$s2/(1+curr$beta.prec)/data$itemp.ladder[curr$temp.ind])
# lambda
lam.a<-prior$h1+curr$nbasis
lam.b<-prior$h2+1
curr$lam<-rgammaTemper(1,lam.a,lam.b,data$itemp.ladder[curr$temp.ind])
# # s2
# qf2<-crossprod(curr$R%*%curr$beta)
# curr$s2.rate<-(data$ssy + (1+curr$beta.prec)*qf2 - 2*crossprod(curr$beta,curr$Xty[1:curr$nc]))/2
# #cat('True:',sum((getYhat_des_func(curr,curr$nbasis)-data$y)^2)+curr$beta.prec*qf2,curr$s2.rate*2,'\n')
# if(curr$s2.rate<0)
# browser()
# s2.a<-prior$g1+(data$n+curr$nbasis+1)/2 # +1 for intercept
# s2.b<-prior$g2+curr$s2.rate
# if(s2.b<=0){
# prior$g2<-prior$g2+1
# s2.b<-prior$g2+curr$s2.rate
# warning('Increased g2 for numerical stability')
# }
# #curr$s2<-rigammaTemper(1,s2.a,s2.b,data$itemp.ladder[curr$temp.ind])
# curr$s2<-rtigammaTemper(1,s2.a,s2.b,data$itemp.ladder[curr$temp.ind],prior$s2.lower)
# if(is.nan(curr$s2) | is.na(curr$s2)) # major variance inflation, get huge betas from curr$R.inv.t, everything becomes unstable
# browser()
# if(curr$s2==0 | curr$s2>1e10){ # tempering instability, this temperature too small
# #browser()
# curr$s2<-runif(1,0,1e6)
# prior$g2<-prior$g2+1
# warning('High temperature too high...increased g2 for numerical stability')
# }
# t1<-s2.b/(s2.a-1)
# s2 - with beta marginalized out (maybe better stability)
qf2<-crossprod(curr$R%*%curr$beta)
curr$s2.rate<-.5*(data$ssy - crossprod(curr$bhat,curr$Xty[1:curr$nc])/(1+curr$beta.prec))
if(curr$s2.rate<=0)
curr$s2.rate<-.Machine$double.eps
#stop('sum(y^2) too large, please center/rescale y for better stability')
s2.a<-prior$g1+(data$n-prior$beta.jprior.ind)/2
s2.b<-prior$g2+curr$s2.rate
if(s2.b<=0){
prior$g2<-prior$g2+1
s2.b<-prior$g2+curr$s2.rate
warning('Increased g2 for numerical stability')
}
#curr$s2<-rigammaTemper(1,s2.a,s2.b,data$itemp.ladder[curr$temp.ind])
curr$s2<-rtigammaTemper(1,s2.a,s2.b,data$itemp.ladder[curr$temp.ind],prior$s2.lower)
#browser()
if(is.nan(curr$s2) | is.na(curr$s2)) # major variance inflation, get huge betas from curr$R.inv.t, everything becomes unstable
#browser()
if(curr$s2==0 | curr$s2>1e10){ # tempering instability, this temperature too small
if(curr$s2>(data$ssy/data$n)){
# browser()
curr$s2<-runif(1,0,1e6)
prior$g2<-prior$g2+1
warning('High temperature too high...increased g2 for numerical stability')
}
}
#cat('Compare expected values:',t1,s2.b/(s2.a-1),'\n')
# beta.prec
beta.prec.a<-prior$a.beta.prec+(curr$nbasis+1)/2
beta.prec.b<-prior$b.beta.prec+1/(2*curr$s2)*qf2
curr$beta.prec<-rgammaTemper(1,beta.prec.a,beta.prec.b,data$itemp.ladder[curr$temp.ind])*prior$beta.gprior.ind
## save log posterior
curr$lpost<-lp(curr,prior,data)
return(curr)
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/mcmc_update.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
########################################################################
## miscellaneous functions
########################################################################
## sample a tempered gamma
rgammaTemper<-function(n,shape,rate,itemper){
rgamma(n,itemper*(shape-1)+1,itemper*rate)
}
## sample a tempered IG
rigammaTemper<-function(n,shape,scale,itemper){
1/rgamma(n,itemper*(shape+1)-1,rate=itemper*scale)
}
## sample a truncated tempered IG
rtigammaTemper<-function(n,shape,scale,itemper,lower){
1/rtgamma(n,1/lower,itemper*(shape+1)-1,rate=itemper*scale)
}
## sample from an upper-truncated gamma
rtgamma<-function(n,upper,shape,rate){
out<-rep(upper,n)
if(pgamma(upper,shape=shape,rate=rate)>0) # if cdf at upper bound is positive, sample, otherwise use upper bound
out<-truncdist::rtrunc(n,'gamma',b=upper,shape=shape,rate=rate)
return(out)
}
## scale a vector to be between 0 and 1
scale_range<-function(x,r=NULL){ # x is a vector
if(is.null(r))
r<-range(x)
if((r[2]-r[1])==0)
return(x-r[1])
return((x-r[1])/(r[2]-r[1]))
}
## rescale a vector between 0 and 1 to range r
unscale.range<-function(x,r){
x*(r[2]-r[1])+r[1]
}
## get yhat under the different scenarios
getYhat_des<-function(curr,nb){
curr$des.basis%*%curr$beta
}
getYhat_cat<-function(curr,nb){
curr$cat.basis%*%curr$beta
}
getYhat_des_cat<-function(curr,nb){
curr$dc.basis%*%curr$beta
}
getYhat_des_func<-function(curr,nb){
tcrossprod(curr$des.basis%*%diag(c(curr$beta),nb+1),curr$func.basis)
}
getYhat_cat_func<-function(curr,nb){
tcrossprod(curr$cat.basis%*%diag(c(curr$beta),nb+1),curr$func.basis)
}
getYhat_des_cat_func<-function(curr,nb){
tcrossprod(curr$dc.basis%*%diag(c(curr$beta),nb+1),curr$func.basis)
}
getYhat_des2<-function(des.basis,beta){
des.basis%*%beta
}
getYhat_des_func2<-function(des.basis,func.basis,beta){
tcrossprod(des.basis%*%diag(beta),func.basis)
}
## for checking inputs
posInt<-function(x){
x==as.integer(x) & x>0
}
## replacement for timestamp(), since that seems to give Rstudio trouble on Windows
myTimestamp<-function(){
x<-Sys.time()
paste('#--',format(x,"%b %d %X"),'--#')
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/misc.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
###############################################################
## plot objects
###############################################################
#' @title BASS Plot Diagnostics
#'
#' @description Generate diagnostic plots for BASS model fit.
#' @param x a \code{bass} object.
#' @param quants quantiles for intervals, if desired. NULL if not desired.
#' @param ... graphical parameters.
#' @details The first two plots are trace plots for diagnosing convergence. The third plot is posterior predicted vs observed, with intervals for predictions. The fourth plot is a histogram of the residuals (of the posterior mean model), with a red curve showing the assumed Normal density (using posterior mean variance). If \code{bass} was run with \code{save.yhat = FALSE}, the third and fourth plots are omitted.
#' @export
#' @import graphics
#' @seealso \link{bass}, \link{predict.bass}, \link{sobol}
#' @examples
#' # See examples in bass documentation.
#'
plot.bass<-function(x,quants=c(.025,.975),...){
if(!inherits(x,'bass'))
stop('x must be an object of class bass')
pred<-T
if(is.null(x$yhat.mean))
pred<-F
op<-par(no.readonly=T)
if(pred)
par(mfrow=c(2,2))
else
par(mfrow=c(1,2))
plot(x$nbasis,type='l',ylab='number of basis functions',xlab='MCMC iteration (post-burn)')
plot(x$s2,type='l',ylab='error variance',xlab='MCMC iteration (post-burn)')
if(pred){
margin<-2
if(x$func)
margin<-2:3
s<-sqrt(x$s2)
if(!is.null(quants)){
qq1<-apply(x$yhat+qnorm(quants[2])*s,margin,quantile,probs=quants[2])
qq2<-apply(x$yhat+qnorm(quants[1])*s,margin,quantile,probs=quants[1])
ylim=range(c(qq1,qq2))
ylab='interval'
} else{
ylim=range(x$yhat.mean)
ylab='mean'
}
plot(x$y,x$yhat.mean,ylim=ylim,ylab=paste('posterior predictive',ylab),xlab='observed',main='Training Fit',type='n',...)
if(!is.null(quants))
segments(x$y,qq1,x$y,qq2,col='lightgrey')
points(x$y,x$yhat.mean)
abline(a=0,b=1,col=2)
hist(x$y-x$yhat.mean,freq=F,main='Posterior mean residuals',xlab='residuals')
curve(dnorm(x,sd=mean(s)),col=2,add=T)
}
par(op)
}
#' @title BASS Plot Diagnostics
#'
#' @description Generate diagnostic plots for BASS model fit.
#' @param x a \code{bassBasis} object.
#' @param quants quantiles for intervals, if desired. NULL if not desired.
#' @param pred logical, should predictive performance be plotted?
#' @param ... graphical parameters.
#' @details The first two plots are trace plots for diagnosing convergence. The third plot is posterior predicted vs observed, with intervals for predictions. The fourth plot is a histogram of the residuals (of the posterior mean model). If \code{pred = FALSE}, the third and fourth plots are omitted.
#' @export
#' @import graphics
#' @seealso \link{bassBasis}, \link{bassPCA}, \link{predict.bassBasis}, \link{sobolBasis}
#' @examples
#' # See examples in bassBasis documentation.
#'
plot.bassBasis<-function(x,quants=c(.025,.975),pred=T,...){
if(!inherits(x,'bassBasis'))
stop('x must be an object of class bassBasis')
op<-par(no.readonly=T)
if(pred)
par(mfrow=c(2,2))
else
par(mfrow=c(1,2))
matplot(do.call(cbind,lapply(x$mod.list,function(ii) ii$nbasis)),type='l',ylab='number of basis functions',xlab='MCMC iteration (post-burn)')
matplot(do.call(cbind,lapply(x$mod.list,function(ii) ii$s2)),type='l',ylab='error variance',xlab='MCMC iteration (post-burn)')
if(pred){
pp<-predict(x,x$dat$xx)
mm<-apply(pp,2:3,mean)
if(!is.null(quants)){
qq1<-apply(pp,2:3,quantile,probs=quants[2])
qq2<-apply(pp,2:3,quantile,probs=quants[1])
ylim=range(c(qq1,qq2))
ylab='interval'
} else{
ylim=range(mm,x$dat$y)
ylab='mean'
}
#browser()
plot(x$dat$y,mm,ylim=ylim,xlim=ylim,ylab=paste('posterior predictive',ylab),xlab='observed',main='Training Fit',type='n',...)
if(!is.null(quants))
segments(x$dat$y,qq1,x$dat$y,qq2,col='lightgrey')
points(x$dat$y,mm)
abline(a=0,b=1,col=2)
hist(x$dat$y-mm,freq=F,main='Posterior mean residuals',xlab='residuals')
}
par(op)
}
#' @title Plot BASS sensitivity indices
#'
#' @description Generate plots for sensitivity analysis of BASS.
#' @param x a \code{bassSob} object, returned from \code{sobol}.
#' @param ... graphical parameters.
#' @details If \code{func.var} in the call to \code{sobol} was \code{NULL}, this returns boxplots of sensitivity indices and total sensitivity indices. If there were functional variables, they are labeled with letters alphabetically. Thus, if I fit a model with 4 categorical/continuous inputs and 2 functional inputs, the functional inputs are labeled a and b. If \code{func.var} was not \code{NULL}, then posterior mean functional sensitivity indices are plotted, along with the functional partitioned variance. Variables and interactions that are excluded did not explain any variance.
#' @export
#' @seealso \link{bass}, \link{predict.bass}, \link{sobol}
#' @examples
#' # See examples in bass documentation.
#'
plot.bassSob<-function(x,...){
op<-par(no.readonly=T)
par(mfrow=c(1,2),xpd=T)
if(x$func){
ord<-order(x$xx)
x.mean<-apply(x$S,2:3,mean)
matplot(x$xx[ord],t(apply(x.mean,2,cumsum))[ord,],type='l',xlab='x',ylab='proportion variance',ylim=c(0,1),main='Sensitivity',...)
lab.x<-apply(x.mean,1,which.max)
cs<-rbind(0,apply(x.mean,2,cumsum))
cs.diff<-apply(x.mean,2,function(x) diff(cumsum(c(0,x))))
text(x=x$xx[lab.x],y=cs[cbind(1:length(lab.x),lab.x)] + (cs.diff/2)[cbind(1:length(lab.x),lab.x)],x$names.ind,...)
x.mean.var<-apply(x$S.var,2:3,mean)
matplot(x$xx[ord],t(apply(x.mean.var,2,cumsum))[ord,],type='l',xlab='x',ylab='variance',main='Variance Decomposition',...)
} else{
boxplot(x$S,las=2,ylab='proportion variance',main='Sensitivity',range=0,...)
boxplot(x$T,main='Total Sensitivity',range=0,...)
}
par(op)
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/plot.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
###############################################################
## predict methods
###############################################################
scale_range_mat<-function(x,r){
#sweep(sweep(x,2,r[1,]),2,r[2,]-r[1,],FUN='/')
t((t(x)-r[1,])/c(diff(r)))
#(x - matrix(r[1,], dim(x)[1], dim(x)[2], byrow = TRUE))/matrix(diff(r), dim(x)[1], dim(x)[2], byrow = TRUE)
}
#' @title BASS Prediction
#'
#' @description Predict function for BASS. Outputs the posterior predictive samples based on the specified MCMC iterations.
#' @param object a fitted model, output from the \code{bass} function.
#' @param newdata a matrix of new input values at which to predict. The columns should correspond to the same variables used in the \code{bass} function.
#' @param newdata.func a matrix of new values of the functional variable. If none, the same values will be used as in the training data.
#' @param mcmc.use a vector indexing which MCMC iterations to use for prediction.
#' @param verbose logical; should progress be displayed?
#' @param nugget logical; should predictions include error? If FALSE, predictions will be for mean.
#' @param ... further arguments passed to or from other methods.
#' @details Efficiently predicts when two MCMC iterations have the same basis functions (but different weights).
#' @return If model output is a scalar, this returns a matrix with the same number of rows as \code{newdata} and columns corresponding to the the MCMC iterations \code{mcmc.use}. These are samples from the posterior predictive distribution. If model output is functional, this returns an array with first dimension corresponding to MCMC iteration, second dimension corresponding to the rows of \code{newdata}, and third dimension corresponding to the rows of \code{newdata.func}.
#' @seealso \link{bass} for model fitting and \link{sobol} for sensitivity analysis.
#' @export
#' @examples
#' # See examples in bass documentation.
#'
predict.bass<-function(object,newdata,newdata.func=NULL,mcmc.use=NULL,verbose=FALSE,nugget=FALSE,...){
if(is.null(mcmc.use)){ # if null, use all
mcmc.use<-1:((object$nmcmc-object$nburn)/object$thin)
}
if(object$func){
if(is.null(newdata.func))
newdata.func<-object$xx.func
else{
dxf<-dim(newdata.func)
if(is.null(dxf))
newdata.func<-matrix(newdata.func)
for(i in 1:ncol(newdata.func)){
newdata.func[,i]<-scale_range(newdata.func[,i],object$range.func[,i])
}
}
} else{
newdata.func<-t(1) # placeholder
}
tnewdata.func<-t(newdata.func)
dx<-dim(newdata)
if(is.null(dx)){
newdata<-data.frame(newdata)
dx<-dim(newdata)
}
pd<-sum(object$pdes)+sum(object$pcat)
if(dx[2]!=pd){
newdata<-t(newdata)
dx<-dim(newdata)
if(dx[2]!=pd)
stop('number of variables in newdata does not match number used in object')
}
newdata<-as.data.frame(newdata)
cx<-sapply(newdata,class)
cx.factor<- cx == 'factor'
object.cx.factor<- object$cx == 'factor'
#if(!all(cx==object$cx))
# stop('number/order of columns of newdata does not match number/order of inputs used to train object')
if(!all(cx.factor == object.cx.factor))
stop('number/order of columns of newdata does not match number/order of inputs used to train object')
newdata.des<-newdata[,!cx.factor,drop=F]
newdata.cat<-newdata[,cx.factor,drop=F]
if(ncol(newdata.des)>0){
# for(i in 1:ncol(newdata.des)){
# newdata.des[,i]<-scale_range(newdata.des[,i],object$range.des[,i])
# }
# browser()
newdata.des<-scale_range_mat(newdata.des,object$range.des)
}
tnewdata.des<-t(newdata.des)
out<-array(dim=c(length(mcmc.use),nrow(newdata),nrow(newdata.func)))
k<-0
models<-object$model.lookup[mcmc.use]
if(verbose)
cat('Predict Start',myTimestamp(),'Models:',length(unique(models)),'\n')
#func<-eval(parse(text=paste('mult',object$type,sep='')))
func<-get(paste('mult',object$type,sep=''))
mod.ind<-0
for(j in unique(models)){ # loop though models, could be parallel?
mod.ind<-mod.ind+1
mcmc.use.j<-mcmc.use[models==j]
ind<-k+(1:length(mcmc.use.j)) # index for storage
k<-k+length(ind) # used for start of index
out[ind,,]<-func(model=j,mcmc.use.mod=mcmc.use.j,object=object,tnewdata.des=tnewdata.des,newdata.cat=newdata.cat,tnewdata.func=tnewdata.func)
if(verbose & mod.ind%%100==0)
cat('Predict',myTimestamp(),'Model:',mod.ind,'\n')
}
if(nugget)
return(drop(out)+rnorm(n=prod(dim(out)),sd=sqrt(object$s2[mcmc.use])))
return(drop(out))
}
predict_fast<-function(object,newdata,newdata.func=NULL,mcmc.use=NULL,verbose=FALSE,...){
if(is.null(mcmc.use)){ # if null, use all
mcmc.use<-1:((object$nmcmc-object$nburn)/object$thin)
}
if(object$func){
if(is.null(newdata.func))
newdata.func<-object$xx.func
else{
dxf<-dim(newdata.func)
if(is.null(dxf))
newdata.func<-matrix(newdata.func)
for(i in 1:ncol(newdata.func)){
newdata.func[,i]<-scale_range(newdata.func[,i],object$range.func[,i])
}
}
} else{
newdata.func<-t(1) # placeholder
}
tnewdata.func<-t(newdata.func)
dx<-dim(newdata)
if(is.null(dx)){
newdata<-data.frame(newdata)
dx<-dim(newdata)
}
pd<-sum(object$pdes)+sum(object$pcat)
if(dx[2]!=pd){
newdata<-t(newdata)
dx<-dim(newdata)
if(dx[2]!=pd)
stop('number of variables in newdata does not match number used in object')
}
newdata<-as.data.frame(newdata)
cx<-sapply(newdata,class)
cx.factor<- cx == 'factor'
if(!all(cx==object$cx))
stop('number/order of columns of newdata does not match number/order of inputs used to train object')
newdata.des<-newdata[,!cx.factor,drop=F]
newdata.cat<-newdata[,cx.factor,drop=F]
#if(ncol(newdata.des)>0){
# for(i in 1:ncol(newdata.des)){
# newdata.des[,i]<-scale_range(newdata.des[,i],object$range.des[,i])
# }
#}
tnewdata.des<-t(newdata.des)
out<-array(dim=c(length(mcmc.use),nrow(newdata),nrow(newdata.func)))
k<-0
models<-object$model.lookup[mcmc.use]
if(verbose)
cat('Predict Start',myTimestamp(),'Models:',length(unique(models)),'\n')
func<-mult_des#eval(parse(text=paste('mult',object$type,sep='')))
mod.ind<-0
for(j in unique(models)){ # loop though models, could be parallel?
mod.ind<-mod.ind+1
mcmc.use.j<-mcmc.use[models==j]
ind<-k+(1:length(mcmc.use.j)) # index for storage
k<-k+length(ind) # used for start of index
out[ind,,]<-func(model=j,mcmc.use.mod=mcmc.use.j,object=object,tnewdata.des=tnewdata.des,newdata.cat=newdata.cat,tnewdata.func=tnewdata.func)
if(verbose & mod.ind%%100==0)
cat('Predict',myTimestamp(),'Model:',mod.ind,'\n')
}
return(drop(out))
}
## make basis functions for model i - continuous portion
makeBasisMatrix<-function(i,nbasis,vars,signs,knots.ind,q,xxt,n.int,xx.train){
n<-ncol(xxt)
tbasis.mat<-matrix(nrow=nbasis+1,ncol=n)
tbasis.mat[1,]<-1
if(nbasis>0){
for(m in 1:nbasis){
if(n.int[i,m]==0){
tbasis.mat[m+1,]<-1
} else{
use<-1:n.int[i,m]
knots<-xx.train[cbind(knots.ind[i,m,use],vars[i,m,use])] # get knots from knots.ind
tbasis.mat[m+1,]<-makeBasis(signs[i,m,use],vars[i,m,use],knots,xxt,q)
}
}
}
return(tbasis.mat)
}
# trying to speed up prediction for large number of basis functions: vectorizing like this doesn't help
# makeBasis_vec<-Vectorize(makeBasis,c('signs','vars','knots'))
#
# makeBasisMatrix<-function(i,nbasis,vars,signs,knots.ind,q,xxt,n.int,xx.train){
# n<-ncol(xxt)
# tbasis.mat<-matrix(nrow=nbasis+1,ncol=n)
# tbasis.mat[1,]<-1
# signs.list<-knots.list<-vars.list<-list()
# if(nbasis>0){
# for(m in 1:nbasis){
# if(n.int[i,m]==0){
# tbasis.mat[m+1,]<-1
# } else{
# use<-1:n.int[i,m]
# knots.list[[m]]<-xx.train[cbind(knots.ind[i,m,use],vars[i,m,use])] # get knots from knots.ind
# signs.list[[m]]<-signs[i,m,use]
# vars.list[[m]]<-vars[i,m,use]
# #tbasis.mat[m+1,]<-makeBasis(signs[i,m,use],vars[i,m,use],knots,xxt,q)
# }
# }
# #browser()
# tbasis.mat<-t(cbind(1,makeBasis_vec(signs.list,vars.list,knots.list,xxt,q)))
# }
# return(tbasis.mat)
# }
## make basis functions for model i - categorical portion
makeBasisMatrixCat<-function(i,nbasis,vars,xx,n.int,sub){
n<-nrow(xx)
tbasis.mat<-matrix(nrow=nbasis+1,ncol=n)
tbasis.mat[1,]<-1
for(m in 1:nbasis){
if(n.int[i,m]==0){
tbasis.mat[m+1,]<-1
} else{
use<-1:n.int[i,m]
tbasis.mat[m+1,]<-makeBasisCat(vars[i,m,use],sub[[i]][[m]],xx)
}
}
return(tbasis.mat)
}
## do multiplication to get yhat under the different scenarios
mult_des<-function(model,mcmc.use.mod,object,tnewdata.des,newdata.cat,tnewdata.func){
M<-object$nbasis[mcmc.use.mod[1]]
#browser()
tmat.des<-makeBasisMatrix(model,M,object$vars,object$signs,object$knotInd,object$degree,tnewdata.des,object$n.int,object$xx.des)
out<-object$beta[mcmc.use.mod,1:(M+1),drop=F]%*%tmat.des
return(out)
}
mult_cat<-function(model,mcmc.use.mod,object,tnewdata.des,newdata.cat,tnewdata.func){
M<-object$nbasis[mcmc.use.mod[1]]
tmat.cat<-makeBasisMatrixCat(model,M,object$vars.cat,newdata.cat,object$n.int.cat,object$sub.list)
out<-object$beta[mcmc.use.mod,1:(M+1),drop=F]%*%tmat.cat
return(out)
}
mult_des_cat<-function(model,mcmc.use.mod,object,tnewdata.des,newdata.cat,tnewdata.func){
M<-object$nbasis[mcmc.use.mod[1]]
tmat.des<-makeBasisMatrix(model,M,object$vars.des,object$signs.des,object$knotInd.des,object$degree,tnewdata.des,object$n.int.des,object$xx.des)
tmat.cat<-makeBasisMatrixCat(model,M,object$vars.cat,newdata.cat,object$n.int.cat,object$sub.list)
out<-object$beta[mcmc.use.mod,1:(M+1),drop=F]%*%(tmat.des*tmat.cat)
return(out)
}
mult_des_func<-function(model,mcmc.use.mod,object,tnewdata.des,newdata.cat,tnewdata.func){
M<-object$nbasis[mcmc.use.mod[1]]
tmat.des<-makeBasisMatrix(model,M,object$vars.des,object$signs.des,object$knotInd.des,object$degree,tnewdata.des,object$n.int.des,object$xx.des)
tmat.func<-makeBasisMatrix(model,M,object$vars.func,object$signs.func,object$knotInd.func,object$degree,tnewdata.func,object$n.int.func,object$xx.func)
out<-array(dim=c(length(mcmc.use.mod),ncol(tnewdata.des),ncol(tnewdata.func)))
for(i in 1:length(mcmc.use.mod)){
out[i,,]<-crossprod(diag(c(object$beta[mcmc.use.mod[i],1:(M+1)]),M+1)%*%tmat.des,tmat.func)
}
return(out)
}
mult_cat_func<-function(model,mcmc.use.mod,object,tnewdata.des,newdata.cat,tnewdata.func){
M<-object$nbasis[mcmc.use.mod[1]]
tmat.cat<-makeBasisMatrixCat(model,M,object$vars.cat,newdata.cat,object$n.int.cat,object$sub.list)
tmat.func<-makeBasisMatrix(model,M,object$vars.func,object$signs.func,object$knotInd.func,object$degree,tnewdata.func,object$n.int.func,object$xx.func)
out<-array(dim=c(length(mcmc.use.mod),nrow(newdata.cat),ncol(tnewdata.func)))
for(i in 1:length(mcmc.use.mod)){
out[i,,]<-crossprod(diag(c(object$beta[mcmc.use.mod[i],1:(M+1)]),M+1)%*%tmat.cat,tmat.func)
}
return(out)
}
mult_des_cat_func<-function(model,mcmc.use.mod,object,tnewdata.des,newdata.cat,tnewdata.func){
M<-object$nbasis[mcmc.use.mod[1]]
tmat.des<-makeBasisMatrix(model,M,object$vars.des,object$signs.des,object$knotInd.des,object$degree,tnewdata.des,object$n.int.des,object$xx.des)
tmat.cat<-makeBasisMatrixCat(model,M,object$vars.cat,newdata.cat,object$n.int.cat,object$sub.list)
tmat.func<-makeBasisMatrix(model,M,object$vars.func,object$signs.func,object$knotInd.func,object$degree,tnewdata.func,object$n.int.func,object$xx.func)
out<-array(dim=c(length(mcmc.use.mod),ncol(tnewdata.des),ncol(tnewdata.func)))
for(i in 1:length(mcmc.use.mod)){
out[i,,]<-crossprod(diag(c(object$beta[mcmc.use.mod[i],1:(M+1)]),M+1)%*%(tmat.des*tmat.cat),tmat.func)
}
return(out)
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/predict.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
########################################################################
## generate a cadidate basis (birth, change)
########################################################################
genCandBasis<-function(minInt,maxInt,I.vec,z.vec,p,xxt,q,xx.unique.ind,vars.len,prior){
# get number of variables in interaction
n.int<-sample(minInt:maxInt,size=1,prob=I.vec)
if(n.int==0)
return(list(basis=rep(1,ncol(xxt)),n.int=n.int,lbmcmp=0))
#return(NULL)
# get signs, vars, knots
signs<-sample(c(-1,1),size=n.int,replace=T)
if(n.int==1){
vars<-sample(1:p,size=1)
#knotInd<-sample.int(vars.len[vars],size=1)
knotInd<-sample(xx.unique.ind[[vars]],size=1)
} else{
vars<-sort(sample(1:p,size=n.int,prob=z.vec,replace=F))
#knotInd<-sapply(vars.len[vars],sample.int,size=1)
knotInd<-sapply(xx.unique.ind[vars],sample,size=1)
}
#browser()
# make basis, get reversibility term
#knots<-sapply(1:n.int,function(nn) xxt.unique[[vars[nn]]][knotInd[nn]])
knots<-xxt[cbind(vars,knotInd)]
basis<-makeBasis(signs,vars,knots,xxt,q)
lpbmcmp<-logProbChangeMod(n.int,vars,I.vec,z.vec,p,vars.len,maxInt,prior$miC)
return(list(basis=basis,n.int=n.int,signs=signs,vars=vars,knotInd=knotInd,knots=knots,lbmcmp=lpbmcmp))
}
genCandBasisCat<-function(minInt,maxInt,I.vec,z.vec,p,xx,nlevels,levels,prior){
# get number of variables in interaction
n.int<-sample(minInt:maxInt,size=1,prob=I.vec)
if(n.int==0)
return(list(basis=rep(1,nrow(xx)),n.int=n.int,lbmcmp=0,sub=list(NA)))
# get vars, subsets
if(n.int==1){
vars<-sample(1:p,size=1)
} else{
vars<-sort(sample(1:p,size=n.int,prob=z.vec,replace=F))
}
sub.size<-NA # for each of vars, number of categories included in subset
sub<-list() # actual subsets
for(ii in 1:n.int){
sub.size[ii]<-sample(1:(nlevels[vars[ii]]-1),size=1) # sample the size of the subset
sub[[ii]]<-sample(levels[[vars[ii]]],size=sub.size[ii]) # sample the subset
}
# make basis, get reversibility term
basis<-makeBasisCat(vars,sub,xx)
lpbmcmp<-logProbChangeModCat(n.int,vars,I.vec,z.vec,p,nlevels,sub.size,maxInt,prior$miC)
if(is.na(lpbmcmp))
browser()
return(list(basis=basis,n.int=n.int,vars=vars,sub.size=sub.size,sub=sub,lbmcmp=lpbmcmp))
}
genBasisChange<-function(curr,basis,int.change,xxt,q,knots,knotInd,signs,vars,xx.unique.ind){
signs[int.change]<-sample(c(-1,1),size=1)
#knotInd[int.change]<-sample.int(vars.len[vars[int.change]],size=1)
knotInd[int.change]<-sample(xx.unique.ind[[vars[int.change]]],size=1)
#knots[int.change]<-xxt.unique[[vars[int.change]]][knotInd[int.change]]
knots[int.change]<-xxt[vars[int.change],knotInd[int.change]]
basis<-makeBasis(signs,vars,knots,xxt,q)
return(list(knots=knots,knotInd=knotInd,signs=signs,basis=basis))
}
genBasisChangeCat<-function(curr,basis,int.change,xx,nlevels,levels,sub.size,sub,vars){
sub.size[int.change]<-sample(1:(nlevels[vars[int.change]]-1),size=1)
sub[[int.change]]<-sample(levels[[vars[int.change]]],size=sub.size[int.change])
basis<-makeBasisCat(vars,sub,xx)
return(list(sub.size=sub.size,sub=sub,basis=basis))
}
########################################################################
## write to curr
########################################################################
addBasis<-function(curr,cand,qf.cand.list,prior){
# basis characteristics
curr$nbasis<-curr$nbasis+1
curr$nc<-curr$nbasis+1
# updates to quantities used elsewhere (XtX & Xty are already updated)
curr$qf<-qf.cand.list$qf
curr$bhat<-qf.cand.list$bhat
curr$R<-qf.cand.list$R
curr$R.inv.t<-backsolve(curr$R,diag(curr$nc))
# diagnostics
curr$cmod<-T
curr$step<-1
curr$count[1]<-curr$count[1]+1
return(curr)
}
addBasisDes<-function(curr,cand,qf.cand.list,prior){
curr$n.int.des[curr$nbasis]<-cand$n.int
fill<-rep(NA,prior$maxInt.des-cand$n.int)
curr$knots.des<-rbind(curr$knots.des,c(cand$knots,fill))
curr$knotInd.des<-rbind(curr$knotInd.des,c(cand$knotInd,fill))
curr$signs.des<-rbind(curr$signs.des,c(cand$signs,fill))
curr$vars.des<-rbind(curr$vars.des,c(cand$vars,fill))
curr$I.star.des[cand$n.int+prior$miC]<-curr$I.star.des[cand$n.int+prior$miC]+1
curr$I.vec.des<-curr$I.star.des/sum(curr$I.star.des)
curr$z.star.des[cand$vars]<-curr$z.star.des[cand$vars]+1
curr$z.vec.des<-curr$z.star.des/sum(curr$z.star.des)
# basis functions
curr$des.basis<-cbind(curr$des.basis,cand$basis)
return(curr)
}
addBasisFunc<-function(curr,cand,qf.cand.list,prior){
curr$n.int.func[curr$nbasis]<-cand$n.int
fill<-rep(NA,prior$maxInt.func-cand$n.int)
curr$knots.func<-rbind(curr$knots.func,c(cand$knots,fill))
curr$knotInd.func<-rbind(curr$knotInd.func,c(cand$knotInd,fill))
curr$signs.func<-rbind(curr$signs.func,c(cand$signs,fill))
curr$vars.func<-rbind(curr$vars.func,c(cand$vars,fill))
curr$I.star.func[cand$n.int+prior$miC]<-curr$I.star.func[cand$n.int+prior$miC]+1
curr$I.vec.func<-curr$I.star.func/sum(curr$I.star.func)
curr$z.star.func[cand$vars]<-curr$z.star.func[cand$vars]+1
curr$z.vec.func<-curr$z.star.func/sum(curr$z.star.func)
# basis functions
curr$func.basis<-cbind(curr$func.basis,cand$basis)
return(curr)
}
addBasisCat<-function(curr,cand,qf.cand.list,prior){
curr$n.int.cat[curr$nbasis]<-cand$n.int
fill<-rep(NA,prior$maxInt.cat-cand$n.int)
curr$sub.size<-rbind(curr$sub.size,c(cand$sub.size,fill))
#browser()
curr$sub.list[[curr$nbasis]]<-cand$sub #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ maybe should use a design matrix style for factors instead of this list...wouldn't need to use %in%
curr$vars.cat<-rbind(curr$vars.cat,c(cand$vars,fill))
curr$I.star.cat[cand$n.int+prior$miC]<-curr$I.star.cat[cand$n.int+prior$miC]+1
curr$I.vec.cat<-curr$I.star.cat/sum(curr$I.star.cat)
curr$z.star.cat[cand$vars]<-curr$z.star.cat[cand$vars]+1
curr$z.vec.cat<-curr$z.star.cat/sum(curr$z.star.cat)
# basis functions
curr$cat.basis<-cbind(curr$cat.basis,cand$basis)
return(curr)
}
addBasisDC<-function(curr,dc){
curr$dc.basis<-cbind(curr$dc.basis,dc)
return(curr)
}
deleteBasis<-function(curr,basis,ind,qf.cand.list,I.star,I.vec,z.star,z.vec){
# basis characteristics
curr$nbasis<-curr$nbasis-1
curr$nc<-curr$nbasis+1
# updates to quantities used elsewhere
curr$Xty[1:curr$nc]<-curr$Xty[ind]
curr$XtX[1:curr$nc,1:curr$nc]<-curr$XtX[ind,ind]
curr$qf<-qf.cand.list$qf
curr$bhat<-qf.cand.list$bhat
curr$R<-qf.cand.list$R
curr$R.inv.t<-backsolve(curr$R,diag(curr$nc))
# diagnostics
curr$cmod<-T
curr$step<-2
curr$count[2]<-curr$count[2]+1
return(curr)
}
deleteBasisDes<-function(curr,basis,ind,qf.cand.list,I.star,I.vec,z.star,z.vec){
curr$n.int.des<-curr$n.int.des[-basis]
curr$knots.des<-curr$knots.des[-basis,,drop=F]
curr$knotInd.des<-curr$knotInd.des[-basis,,drop=F]
curr$signs.des<-curr$signs.des[-basis,,drop=F]
curr$vars.des<-curr$vars.des[-basis,,drop=F]
curr$I.star.des<-I.star
curr$I.vec.des<-I.vec
curr$z.star.des<-z.star
curr$z.vec.des<-z.vec
# basis functions
curr$des.basis<-curr$des.basis[,-(basis+1),drop=F]
return(curr)
}
deleteBasisFunc<-function(curr,basis,ind,qf.cand.list,I.star,I.vec,z.star,z.vec){
curr$n.int.func<-curr$n.int.func[-basis]
curr$knots.func<-curr$knots.func[-basis,,drop=F]
curr$knotInd.func<-curr$knotInd.func[-basis,,drop=F]
curr$signs.func<-curr$signs.func[-basis,,drop=F]
curr$vars.func<-curr$vars.func[-basis,,drop=F]
curr$I.star.func<-I.star
curr$I.vec.func<-I.vec
curr$z.star.func<-z.star
curr$z.vec.func<-z.vec
# basis functions
curr$func.basis<-curr$func.basis[,-(basis+1),drop=F]
return(curr)
}
deleteBasisCat<-function(curr,basis,ind,qf.cand.list,I.star,I.vec,z.star,z.vec){
curr$n.int.cat<-curr$n.int.cat[-basis]
curr$sub.size<-curr$sub.size[-basis,,drop=F]
curr$sub.list[[basis]]<-NULL
curr$vars.cat<-curr$vars.cat[-basis,,drop=F]
curr$I.star.cat<-I.star
curr$I.vec.cat<-I.vec
curr$z.star.cat<-z.star
curr$z.vec.cat<-z.vec
# basis functions
curr$cat.basis<-curr$cat.basis[,-(basis+1),drop=F]
return(curr)
}
deleteBasisDC<-function(curr,basis.ind){
curr$dc.basis<-curr$dc.basis[,-(basis.ind+1),drop=F]
return(curr)
}
changeBasis<-function(curr,cand,basis,qf.cand.list,XtX.cand,Xty.cand){
# updates to quantities used elsewhere
curr$Xty[basis+1]<-Xty.cand[basis+1]
curr$XtX[1:curr$nc,1:curr$nc]<-XtX.cand
curr$qf<-qf.cand.list$qf
curr$bhat<-qf.cand.list$bhat
curr$R<-qf.cand.list$R
curr$R.inv.t<-backsolve(curr$R,diag(curr$nc))
# diagnostics
curr$cmod<-T
curr$step<-3
curr$count[3]<-curr$count[3]+1
return(curr)
}
changeBasisDes<-function(curr,cand,basis,qf.cand.list,XtX.cand,Xty.cand){
# basis characteristics
curr$knots.des[basis,1:curr$n.int.des[basis]]<-cand$knots
curr$knotInd.des[basis,1:curr$n.int.des[basis]]<-cand$knotInd
curr$signs.des[basis,1:curr$n.int.des[basis]]<-cand$signs
# basis functions
curr$des.basis[,basis+1]<-cand$basis
return(curr)
}
changeBasisFunc<-function(curr,cand,basis,qf.cand.list,XtX.cand,Xty.cand){
# basis characteristics
curr$knots.func[basis,1:curr$n.int.func[basis]]<-cand$knots
curr$knotInd.func[basis,1:curr$n.int.func[basis]]<-cand$knotInd
curr$signs.func[basis,1:curr$n.int.func[basis]]<-cand$signs
# basis functions
curr$func.basis[,basis+1]<-cand$basis
return(curr)
}
changeBasisCat<-function(curr,cand,basis,qf.cand.list,XtX.cand,Xty.cand){
# basis characteristics
curr$sub.size[basis,1:curr$n.int.cat[basis]]<-cand$sub.size
curr$sub.list[[basis]]<-cand$sub
# basis functions
curr$cat.basis[,basis+1]<-cand$basis
return(curr)
}
changeBasisDC<-function(curr,cand,basis){
curr$dc.basis[,basis+1]<-cand
return(curr)
}
# check monotonicity of functional output - for now only with des (no cat) and degree=1
checkMono<-function(tdes.basis.ext,func.basis,bhat){
if(length(bhat)>1)
out<-func.basis%*%diag(c(bhat))%*%tdes.basis.ext
else
out<-func.basis%*%bhat%*%tdes.basis.ext
#browser()
min(diff(out)) >= 0
}
# checkMono<-function(cand.des,cand.func,curr,data){
#
# basis<-makeBasis(cand.des$signs,cand.des$vars,cand.des$knots,data$ext,1)
#
# tdes.basis.ext<-rbind(makeBasisMatrixCurr(curr,data),makeBasis(cand.des$signs,cand.des$vars,cand.des$knots,data$ext,1))
# out<-cbind(curr$func.basis,cand.func$basis)%*%diag(curr$bhat)%*%tdes.basis.ext
# min(apply(out,2,function(x) min(diff(x)))) < 0
# }
checkMono2<-function(curr,data,k){
# make des.basis.ext (n.ext x nbasis) using data$ext
# make func.deriv (nfunc x nbasis) matrix
# func.deriv%*%diag(beta)%*%t(des.basis.ext)
# shortcut: use regular func.basis, get differences
tdes.basis.ext<-makeBasisMatrixCurr(curr,data)
if(length(curr$beta)>1){
out<-curr$func.basis%*%diag(c(curr$beta))%*%tdes.basis.ext
} else{
out<-curr$func.basis%*%curr$beta%*%tdes.basis.ext
}
if(k>10)
browser()
min(diff(out)) >= 0
}
makeBasisMatrixCurr<-function(curr,data){#i,nbasis,vars,signs,knots.ind,q,xxt,n.int,xx.train){
xxt.ext<-data$ext
n<-ncol(xxt.ext)
tbasis.mat<-matrix(nrow=curr$nbasis+1,ncol=n)
tbasis.mat[1,]<-1
if(curr$nbasis>0){
for(m in 1:curr$nbasis){
if(curr$n.int.des[m]==0){
tbasis.mat[m+1,]<-1
} else{
use<-1:curr$n.int.des[m]
#knots<-xx.train[cbind(knots.ind[i,m,use],vars[i,m,use])] # get knots from knots.ind
tbasis.mat[m+1,]<-makeBasis(curr$signs.des[m,use],curr$vars.des[m,use],curr$knots.des[m,use],xxt.ext,1)
}
}
}
return(tbasis.mat)
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/rjmcmc_all.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
########################################################################
## perform RJMCMC step (birth, death, or change)
########################################################################
birth_cat<-function(curr,prior,data){
cand.cat<-genCandBasisCat(minInt=prior$minInt,maxInt=prior$maxInt.cat,I.vec=curr$I.vec.cat,z.vec=curr$z.vec.cat,p=data$pcat,xx=data$xx.cat,nlevels=data$nlevels,levels=data$levels,prior)
if(sum(cand.cat$basis!=0)<prior$npart.des){
return(curr)
}
ata<-crossprod(cand.cat$basis)
Xta<-crossprod(curr$cat.basis,cand.cat$basis)
aty<-crossprod(cand.cat$basis,data$y)
curr$Xty[curr$nc+1]<-aty
curr$XtX[1:curr$nc,curr$nc+1]<-Xta
curr$XtX[curr$nc+1,curr$nc+1]<-ata
qf.cand.list<-getQf(curr$XtX[1:(curr$nc+1),1:(curr$nc+1)],curr$Xty[1:(curr$nc+1)])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
## calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
+ log(curr$lam) - log(curr$nc) + log(data$death.prob.next/data$birth.prob)
- cand.cat$lbmcmp + .5*log(curr$beta.prec+prior$beta.jprior.ind) - .5*log(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
.5*log(2*pi)#*curr$s2)
+ .5*sum(log(diag(qf.cand.list$R)))
-.5*sum(log(diag(curr$R)))
)
)
## assign new values
if(log(runif(1)) < alpha){
curr<-addBasis(curr,cand.cat,qf.cand.list,prior)
curr<-addBasisCat(curr,cand.cat,qf.cand.list,prior)
}
return(curr)
}
death_cat<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
ind<-(1:curr$nc)[-(basis+1)]
qf.cand.list<-getQf(curr$XtX[ind,ind],curr$Xty[ind])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr) # TODO: not sure why I need this, I shouldn't need it in theory
}
I.star.cat<-curr$I.star.cat
I.star.cat[curr$n.int.cat[basis]]<-I.star.cat[curr$n.int.cat[basis]]-1
I.vec.cat<-I.star.cat/sum(I.star.cat)
z.star.cat<-curr$z.star.cat
z.star.cat[curr$vars.cat[basis,1:curr$n.int.cat[basis]]]<-z.star.cat[curr$vars.cat[basis,1:curr$n.int.cat[basis]]]-1
z.vec.cat<-z.star.cat/sum(z.star.cat)
lpbmcmp<-logProbChangeModCat(curr$n.int.cat[basis],curr$vars.cat[basis,1:curr$n.int.cat[basis]],I.vec.cat,z.vec.cat,data$pcat,data$nlevels,curr$sub.size[basis,],prior$maxInt.cat,prior$miC)
# calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
- log(curr$lam) + log(data$birth.prob.last/data$death.prob)
+ log(curr$nbasis) + lpbmcmp
- .5*log(curr$beta.prec+prior$beta.jprior.ind) + .5*log(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
-.5*log(2*pi)#*curr$s2)
+.5*sum(log(diag(qf.cand.list$R)))
-.5*sum(log(diag(curr$R)))
)
)
if(log(runif(1)) < alpha){
curr<-deleteBasis(curr,basis,ind,qf.cand.list,I.star.cat,I.vec.cat,z.star.cat,z.vec.cat)
curr<-deleteBasisCat(curr,basis,ind,qf.cand.list,I.star.cat,I.vec.cat,z.star.cat,z.vec.cat)
}
return(curr)
}
change_cat<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
int.change<-sample(1:(curr$n.int.cat[basis]),size=1)
use<-1:curr$n.int.cat[basis]
cand.cat<-genBasisChangeCat(curr,basis,int.change,data$xx.cat,data$nlevels,data$levels,curr$sub.size[basis,use],curr$sub.list[[basis]],vars=curr$vars.cat[basis,use])
if(sum(cand.cat$basis!=0)<prior$npart.des){
return(curr)
}
XtX.cand<-curr$XtX[1:curr$nc,1:curr$nc]
XtX.cand[basis+1,]<-XtX.cand[,basis+1]<-crossprod(curr$cat.basis,cand.cat$basis)
XtX.cand[basis+1,basis+1]<-crossprod(cand.cat$basis)
Xty.cand<-curr$Xty[1:curr$nc]
Xty.cand[basis+1]<-crossprod(cand.cat$basis,data$y)
qf.cand.list<-getQf(XtX.cand,Xty.cand)
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
alpha<-data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
.5*sum(log(diag(qf.cand.list$R)))-.5*sum(log(diag(curr$R)))
)
)
if(log(runif(1))<alpha){
curr<-changeBasis(curr,cand.cat,basis,qf.cand.list,XtX.cand,Xty.cand)
curr<-changeBasisCat(curr,cand.cat,basis,qf.cand.list,XtX.cand,Xty.cand)
}
return(curr)
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/rjmcmc_cat.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
########################################################################
## perform RJMCMC step (birth, death, or change)
########################################################################
birth_cat_func<-function(curr,prior,data){
cand.cat<-genCandBasisCat(minInt=prior$minInt,maxInt=prior$maxInt.cat,I.vec=curr$I.vec.cat,z.vec=curr$z.vec.cat,p=data$pcat,xx=data$xx.cat,nlevels=data$nlevels,levels=data$levels,prior)
if(sum(cand.cat$basis!=0)<prior$npart.des)
return(curr)
cand.func<-genCandBasis(minInt=prior$minInt,maxInt=prior$maxInt.func,I.vec=curr$I.vec.func,z.vec=curr$z.vec.func,p=data$pfunc,xxt=data$xxt.func,q=prior$q,xx.unique.ind=data$unique.ind.func,vars.len=data$vars.len.func,prior)
if(sum(cand.func$basis!=0)<prior$npart.func)
return(curr)
if(cand.cat$n.int + cand.func$n.int == 0) # intercept
return(curr)
ata<-crossprod(cand.cat$basis)*crossprod(cand.func$basis)
Xta<-crossprod(curr$cat.basis,cand.cat$basis)*crossprod(curr$func.basis,cand.func$basis)
aty<-tcrossprod(crossprod(cand.cat$basis,data$y),cand.func$basis)
curr$Xty[curr$nc+1]<-aty
curr$XtX[1:curr$nc,curr$nc+1]<-Xta
curr$XtX[curr$nc+1,curr$nc+1]<-ata
qf.cand.list<-getQf(curr$XtX[1:(curr$nc+1),1:(curr$nc+1)],curr$Xty[1:(curr$nc+1)])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
## calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
+ log(curr$lam) - log(curr$nc) + log(data$death.prob.next/data$birth.prob)
- cand.cat$lbmcmp - cand.func$lbmcmp
+ .5*log(curr$beta.prec+prior$beta.jprior.ind) - .5*log(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
.5*log(2*pi)#*curr$s2)
+ .5*sum(log(diag(qf.cand.list$R)))
-.5*sum(log(diag(curr$R)))
)
)
## assign new values
if(log(runif(1)) < alpha){
curr<-addBasis(curr,cand.cat,qf.cand.list,prior)
curr<-addBasisCat(curr,cand.cat,qf.cand.list,prior)
curr<-addBasisFunc(curr,cand.func,qf.cand.list,prior)
# if type has cat and des, want to update curr$dc.basis also
}
return(curr)
}
death_cat_func<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
ind<-(1:curr$nc)[-(basis+1)]
qf.cand.list<-getQf(curr$XtX[ind,ind],curr$Xty[ind])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr) # TODO: not sure why I need this, I shouldn't need it in theory
}
I.star.cat<-curr$I.star.cat
I.star.cat[curr$n.int.cat[basis]+1]<-I.star.cat[curr$n.int.cat[basis]+1]-1
I.vec.cat<-I.star.cat/sum(I.star.cat)
z.star.cat<-curr$z.star.cat
if(curr$n.int.cat[basis]>0)
z.star.cat[curr$vars.cat[basis,1:curr$n.int.cat[basis]]]<-z.star.cat[curr$vars.cat[basis,1:curr$n.int.cat[basis]]]-1
z.vec.cat<-z.star.cat/sum(z.star.cat)
I.star.func<-curr$I.star.func
I.star.func[curr$n.int.func[basis]+1]<-I.star.func[curr$n.int.func[basis]+1]-1
I.vec.func<-I.star.func/sum(I.star.func)
z.star.func<-curr$z.star.func
if(curr$n.int.func[basis]>0)
z.star.func[curr$vars.func[basis,1:curr$n.int.func[basis]]]<-z.star.func[curr$vars.func[basis,1:curr$n.int.func[basis]]]-1
z.vec.func<-z.star.func/sum(z.star.func)
lpbmcmp<-0
if(curr$n.int.cat[basis]>0){
lpbmcmp<-lpbmcmp+logProbChangeModCat(curr$n.int.cat[basis],curr$vars.cat[basis,1:curr$n.int.cat[basis]],I.vec.cat,z.vec.cat,data$pcat,data$nlevels,curr$sub.size[basis,],prior$maxInt.cat,prior$miC)
}
if(curr$n.int.func[basis]>0){
lpbmcmp<-lpbmcmp+logProbChangeMod(curr$n.int.func[basis],curr$vars.func[basis,1:curr$n.int.func[basis]],I.vec.func,z.vec.func,data$pfunc,data$vars.len.func,prior$maxInt.func,prior$miC)
}
# calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
- log(curr$lam) + log(data$birth.prob.last/data$death.prob)
+ log(curr$nbasis) + lpbmcmp
- .5*log(curr$beta.prec+prior$beta.jprior.ind) + .5*log(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
-.5*log(2*pi)#*curr$s2)
+.5*sum(log(diag(qf.cand.list$R)))
-.5*sum(log(diag(curr$R)))
)
)
if(log(runif(1)) < alpha){
curr<-deleteBasis(curr,basis,ind,qf.cand.list,I.star.cat,I.vec.cat,z.star.cat,z.vec.cat)
curr<-deleteBasisCat(curr,basis,ind,qf.cand.list,I.star.cat,I.vec.cat,z.star.cat,z.vec.cat)
curr<-deleteBasisFunc(curr,basis,ind,qf.cand.list,I.star.func,I.vec.func,z.star.func,z.vec.func)
}
return(curr)
}
change_cat_func<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
type.change<-sample(c('cat','func'),size=1,prob=c(curr$n.int.cat[basis],curr$n.int.func[basis]))
if(type.change=='cat'){
int.change<-sample(1:(curr$n.int.cat[basis]),size=1)
use<-1:curr$n.int.cat[basis]
cand.cat<-genBasisChangeCat(curr,basis,int.change,data$xx.cat,data$nlevels,data$levels,curr$sub.size[basis,use],curr$sub.list[[basis]],vars=curr$vars.cat[basis,use])
cand.func<-list(basis=curr$func.basis[,basis+1])
} else{
int.change<-sample(1:(curr$n.int.func[basis]),size=1)
use<-1:curr$n.int.func[basis]
cand.func<-genBasisChange(curr,basis,int.change,data$xxt.func,prior$q,knots=curr$knots.func[basis,use],knotInd=curr$knotInd.func[basis,use],signs=curr$signs.func[basis,use],vars=curr$vars.func[basis,use],xx.unique.ind=data$unique.ind.func)
cand.cat<-list(basis=curr$cat.basis[,basis+1])
}
if(sum(cand.cat$basis!=0)<prior$npart.des){
return(curr)
}
if(sum(cand.func$basis!=0)<prior$npart.func){
return(curr)
}
XtX.cand<-curr$XtX[1:curr$nc,1:curr$nc]
XtX.cand[basis+1,]<-XtX.cand[,basis+1]<-crossprod(curr$cat.basis,cand.cat$basis)*crossprod(curr$func.basis,cand.func$basis)
XtX.cand[basis+1,basis+1]<-crossprod(cand.cat$basis)*crossprod(cand.func$basis)
Xty.cand<-curr$Xty[1:curr$nc]
Xty.cand[basis+1]<-tcrossprod(crossprod(cand.cat$basis,data$y),cand.func$basis)
qf.cand.list<-getQf(XtX.cand,Xty.cand)
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
alpha<-data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
.5*sum(log(diag(qf.cand.list$R)))-.5*sum(log(diag(curr$R)))
)
)
if(log(runif(1))<alpha){
curr<-changeBasis(curr,cand.cat,basis,qf.cand.list,XtX.cand,Xty.cand)
if(type.change=='cat')
curr<-changeBasisCat(curr,cand.cat,basis,qf.cand.list,XtX.cand,Xty.cand)
if(type.change=='func')
curr<-changeBasisFunc(curr,cand.func,basis,qf.cand.list,XtX.cand,Xty.cand)
}
return(curr)
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/rjmcmc_cat_func.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
########################################################################
## perform RJMCMC step (birth, death, or change)
########################################################################
birth_des<-function(curr,prior,data){
cand.des<-genCandBasis(minInt=prior$minInt,maxInt=prior$maxInt.des,I.vec=curr$I.vec.des,z.vec=curr$z.vec.des,p=data$pdes,xxt=data$xxt.des,q=prior$q,xx.unique.ind=data$unique.ind.des,vars.len=data$vars.len.des,prior)
if(sum(cand.des$basis!=0)<prior$npart.des){
return(curr)
}
ata<-crossprod(cand.des$basis)
Xta<-crossprod(curr$des.basis,cand.des$basis)
aty<-crossprod(cand.des$basis,data$y)
curr$Xty[curr$nc+1]<-aty
curr$XtX[1:curr$nc,curr$nc+1]<-Xta
curr$XtX[curr$nc+1,curr$nc+1]<-ata
qf.cand.list<-getQf(curr$XtX[1:(curr$nc+1),1:(curr$nc+1)],curr$Xty[1:(curr$nc+1)])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
## calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
+ log(curr$lam) - log(curr$nc)
+ log(data$death.prob.next/data$birth.prob) - cand.des$lbmcmp
+ .5*log(curr$beta.prec+prior$beta.jprior.ind) - .5*log(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
.5*log(2*pi)-.5*log(curr$s2)
+ .5*sum(log(diag(qf.cand.list$R)))
-.5*sum(log(diag(curr$R)))
)
)
## assign new values
if(log(runif(1)) < alpha){
curr<-addBasis(curr,cand.des,qf.cand.list,prior)
curr<-addBasisDes(curr,cand.des,qf.cand.list,prior)
# if type has cat and des, want to update curr$dc.basis also
}
return(curr)
}
death_des<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
ind<-(1:curr$nc)[-(basis+1)]
qf.cand.list<-getQf(curr$XtX[ind,ind],curr$Xty[ind])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr) # TODO: not sure why I need this, I shouldn't need it in theory
}
I.star.des<-curr$I.star.des
I.star.des[curr$n.int.des[basis]]<-I.star.des[curr$n.int.des[basis]]-1
I.vec.des<-I.star.des/sum(I.star.des)
z.star.des<-curr$z.star.des
z.star.des[curr$vars.des[basis,1:curr$n.int.des[basis]]]<-z.star.des[curr$vars.des[basis,1:curr$n.int.des[basis]]]-1
z.vec.des<-z.star.des/sum(z.star.des)
lpbmcmp<-logProbChangeMod(curr$n.int.des[basis],curr$vars.des[basis,1:curr$n.int.des[basis]],I.vec.des,z.vec.des,data$pdes,data$vars.len.des,prior$maxInt.des,prior$miC)
# calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
- log(curr$lam) + log(data$birth.prob.last/data$death.prob)
+ log(curr$nbasis) + lpbmcmp
- .5*log(curr$beta.prec+prior$beta.jprior.ind) + .5*log(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
-.5*log(2*pi)+.5*log(curr$s2)
+.5*sum(log(diag(qf.cand.list$R)))
-.5*sum(log(diag(curr$R)))
)
)
if(log(runif(1)) < alpha){
curr<-deleteBasis(curr,basis,ind,qf.cand.list,I.star.des,I.vec.des,z.star.des,z.vec.des)
curr<-deleteBasisDes(curr,basis,ind,qf.cand.list,I.star.des,I.vec.des,z.star.des,z.vec.des)
}
return(curr)
}
change_des<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
int.change<-sample(1:(curr$n.int.des[basis]),size=1)
use<-1:curr$n.int.des[basis]
cand.des<-genBasisChange(curr,basis,int.change,data$xxt.des,prior$q,knots=curr$knots.des[basis,use],knotInd=curr$knotInd.des[basis,use],signs=curr$signs.des[basis,use],vars=curr$vars.des[basis,use],xx.unique.ind=data$unique.ind.des)
if(sum(cand.des$basis!=0)<prior$npart.des){
return(curr)
}
XtX.cand<-curr$XtX[1:curr$nc,1:curr$nc]
XtX.cand[basis+1,]<-XtX.cand[,basis+1]<-crossprod(curr$des.basis,cand.des$basis)
XtX.cand[basis+1,basis+1]<-crossprod(cand.des$basis)
Xty.cand<-curr$Xty[1:curr$nc]
Xty.cand[basis+1]<-crossprod(cand.des$basis,data$y)
qf.cand.list<-getQf(XtX.cand,Xty.cand)
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
alpha<-data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
.5*sum(log(diag(qf.cand.list$R)))-.5*sum(log(diag(curr$R)))
)
)
if(log(runif(1))<alpha){
curr<-changeBasis(curr,cand.des,basis,qf.cand.list,XtX.cand,Xty.cand)
curr<-changeBasisDes(curr,cand.des,basis,qf.cand.list,XtX.cand,Xty.cand)
}
return(curr)
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/rjmcmc_des.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
########################################################################
## perform RJMCMC step (birth, death, or change)
########################################################################
birth_des_cat<-function(curr,prior,data){
cand.des<-genCandBasis(minInt=prior$minInt,maxInt=prior$maxInt.des,I.vec=curr$I.vec.des,z.vec=curr$z.vec.des,p=data$pdes,xxt=data$xxt.des,q=prior$q,xx.unique.ind=data$unique.ind.des,vars.len=data$vars.len.des,prior)
cand.cat<-genCandBasisCat(minInt=prior$minInt,maxInt=prior$maxInt.cat,I.vec=curr$I.vec.cat,z.vec=curr$z.vec.cat,p=data$pcat,xx=data$xx.cat,nlevels=data$nlevels,levels=data$levels,prior)
if(cand.des$n.int + cand.cat$n.int == 0) # intercept
return(curr)
dc<-cand.des$basis*cand.cat$basis
if(sum(dc!=0)<prior$npart.des){
return(curr)
}
ata<-crossprod(dc)
Xta<-crossprod(curr$dc.basis,dc)
aty<-crossprod(dc,data$y)
curr$Xty[curr$nc+1]<-aty
curr$XtX[1:curr$nc,curr$nc+1]<-Xta
curr$XtX[curr$nc+1,curr$nc+1]<-ata
qf.cand.list<-getQf(curr$XtX[1:(curr$nc+1),1:(curr$nc+1)],curr$Xty[1:(curr$nc+1)])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
## calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
+ log(curr$lam) - log(curr$nc) + log(data$death.prob.next/data$birth.prob)
- cand.des$lbmcmp - cand.cat$lbmcmp
+ .5*log(curr$beta.prec+prior$beta.jprior.ind) - .5*log(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
.5*log(2*pi)#*curr$s2)
+ .5*sum(log(diag(qf.cand.list$R)))
-.5*sum(log(diag(curr$R)))
)
)
## assign new values
if(log(runif(1)) < alpha){
curr<-addBasis(curr,cand.des,qf.cand.list,prior)
curr<-addBasisDes(curr,cand.des,qf.cand.list,prior)
curr<-addBasisCat(curr,cand.cat,qf.cand.list,prior)
curr<-addBasisDC(curr,dc)
# if type has cat and des, want to update curr$dc.basis also
}
return(curr)
}
death_des_cat<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
ind<-(1:curr$nc)[-(basis+1)]
qf.cand.list<-getQf(curr$XtX[ind,ind],curr$Xty[ind])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr) # TODO: not sure why I need this, I shouldn't need it in theory
}
I.star.des<-curr$I.star.des
I.star.des[curr$n.int.des[basis]+1]<-I.star.des[curr$n.int.des[basis]+1]-1
I.vec.des<-I.star.des/sum(I.star.des)
z.star.des<-curr$z.star.des
z.star.des[curr$vars.des[basis,1:curr$n.int.des[basis]]]<-z.star.des[curr$vars.des[basis,1:curr$n.int.des[basis]]]-1
z.vec.des<-z.star.des/sum(z.star.des)
I.star.cat<-curr$I.star.cat
I.star.cat[curr$n.int.cat[basis]+1]<-I.star.cat[curr$n.int.cat[basis]+1]-1
I.vec.cat<-I.star.cat/sum(I.star.cat)
z.star.cat<-curr$z.star.cat
z.star.cat[curr$vars.cat[basis,1:curr$n.int.cat[basis]]]<-z.star.cat[curr$vars.cat[basis,1:curr$n.int.cat[basis]]]-1
z.vec.cat<-z.star.cat/sum(z.star.cat)
lpbmcmp<-0
if(curr$n.int.des[basis]>0){
lpbmcmp<-lpbmcmp+logProbChangeMod(curr$n.int.des[basis],curr$vars.des[basis,1:curr$n.int.des[basis]],I.vec.des,z.vec.des,data$pdes,data$vars.len.des,prior$maxInt.des,prior$miC)
}
if(curr$n.int.cat[basis]>0){#n.int,vars,I.vec,z.vec,p,nlevels,sub.size,maxInt
lpbmcmp<-lpbmcmp+logProbChangeModCat(curr$n.int.cat[basis],curr$vars.cat[basis,1:curr$n.int.cat[basis]],I.vec.cat,z.vec.cat,data$pcat,data$nlevels,curr$sub.size[basis,],prior$maxInt.cat,prior$miC)
}
#if(is.na(lpbmcmp))
#browser()
# calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
- log(curr$lam) + log(data$birth.prob.last/data$death.prob)
+ log(curr$nbasis) + lpbmcmp
- .5*log(curr$beta.prec+prior$beta.jprior.ind) + .5*log(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
-.5*log(2*pi)#*curr$s2)
+.5*sum(log(diag(qf.cand.list$R)))
-.5*sum(log(diag(curr$R)))
)
)
if(log(runif(1)) < alpha){
curr<-deleteBasis(curr,basis,ind,qf.cand.list,I.star.des,I.vec.des,z.star.des,z.vec.des)
curr<-deleteBasisDes(curr,basis,ind,qf.cand.list,I.star.des,I.vec.des,z.star.des,z.vec.des)
curr<-deleteBasisCat(curr,basis,ind,qf.cand.list,I.star.cat,I.vec.cat,z.star.cat,z.vec.cat)
curr<-deleteBasisDC(curr,basis)
}
return(curr)
}
change_des_cat<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
type.change<-sample(c('des','cat'),size=1,prob=c(curr$n.int.des[basis],curr$n.int.cat[basis]))
if(type.change=='des'){
int.change<-sample(1:(curr$n.int.des[basis]),size=1)
use<-1:curr$n.int.des[basis]
cand.des<-genBasisChange(curr,basis,int.change,data$xxt.des,prior$q,knots=curr$knots.des[basis,use],knotInd=curr$knotInd.des[basis,use],signs=curr$signs.des[basis,use],vars=curr$vars.des[basis,use],xx.unique.ind=data$unique.ind.des)
dc<-cand.des$basis*curr$cat.basis[,basis+1]
} else{
int.change<-sample(1:(curr$n.int.cat[basis]),size=1)
use<-1:curr$n.int.cat[basis]
#curr,basis,int.change,xx,nlevels,levels,sub.size,sub,vars
cand.cat<-genBasisChangeCat(curr,basis,int.change,data$xx.cat,data$nlevels,data$levels,curr$sub.size[basis,use],curr$sub.list[[basis]],vars=curr$vars.cat[basis,use])
dc<-cand.cat$basis*curr$des.basis[,basis+1]
}
if(sum(dc!=0)<prior$npart.des){
return(curr)
}
XtX.cand<-curr$XtX[1:curr$nc,1:curr$nc]
XtX.cand[basis+1,]<-XtX.cand[,basis+1]<-crossprod(curr$dc.basis,dc)
XtX.cand[basis+1,basis+1]<-crossprod(dc)
Xty.cand<-curr$Xty[1:curr$nc]
Xty.cand[basis+1]<-crossprod(dc,data$y)
qf.cand.list<-getQf(XtX.cand,Xty.cand)
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
alpha<-data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
.5*sum(log(diag(qf.cand.list$R)))-.5*sum(log(diag(curr$R)))
)
)
if(log(runif(1))<alpha){
curr<-changeBasis(curr,cand.des,basis,qf.cand.list,XtX.cand,Xty.cand)
curr<-changeBasisDC(curr,dc,basis)
if(type.change=='des')
curr<-changeBasisDes(curr,cand.des,basis,qf.cand.list,XtX.cand,Xty.cand)
if(type.change=='cat')
curr<-changeBasisCat(curr,cand.cat,basis,qf.cand.list,XtX.cand,Xty.cand)
}
return(curr)
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/rjmcmc_des_cat.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
########################################################################
## perform RJMCMC step (birth, death, or change)
########################################################################
birth_des_cat_func<-function(curr,prior,data){
cand.des<-genCandBasis(minInt=prior$minInt,maxInt=prior$maxInt.des,I.vec=curr$I.vec.des,z.vec=curr$z.vec.des,p=data$pdes,xxt=data$xxt.des,q=prior$q,xx.unique.ind=data$unique.ind.des,vars.len=data$vars.len.des,prior)
cand.cat<-genCandBasisCat(minInt=prior$minInt,maxInt=prior$maxInt.cat,I.vec=curr$I.vec.cat,z.vec=curr$z.vec.cat,p=data$pcat,xx=data$xx.cat,nlevels=data$nlevels,levels=data$levels,prior)
dc<-cand.des$basis*cand.cat$basis
if(sum(dc!=0)<prior$npart.des)
return(curr)
cand.func<-genCandBasis(minInt=prior$minInt,maxInt=prior$maxInt.func,I.vec=curr$I.vec.func,z.vec=curr$z.vec.func,p=data$pfunc,xxt=data$xxt.func,q=prior$q,xx.unique.ind=data$unique.ind.func,vars.len=data$vars.len.func,prior)
if(sum(cand.func$basis!=0)<prior$npart.func)
return(curr)
if(cand.des$n.int + cand.cat$n.int + cand.func$n.int == 0) # intercept
return(curr)
ata<-crossprod(dc)*crossprod(cand.func$basis)
Xta<-crossprod(curr$dc.basis,dc)*crossprod(curr$func.basis,cand.func$basis)
aty<-tcrossprod(crossprod(dc,data$y),cand.func$basis)
curr$Xty[curr$nc+1]<-aty
curr$XtX[1:curr$nc,curr$nc+1]<-Xta
curr$XtX[curr$nc+1,curr$nc+1]<-ata
qf.cand.list<-getQf(curr$XtX[1:(curr$nc+1),1:(curr$nc+1)],curr$Xty[1:(curr$nc+1)])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
## calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
+ log(curr$lam) - log(curr$nc) + log(data$death.prob.next/data$birth.prob)
- cand.des$lbmcmp - cand.cat$lbmcmp - cand.func$lbmcmp
+ .5*log(curr$beta.prec+prior$beta.jprior.ind) - .5*log(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
.5*log(2*pi)#*curr$s2)
+ .5*sum(log(diag(qf.cand.list$R)))
-.5*sum(log(diag(curr$R)))
)
)
## assign new values
if(log(runif(1)) < alpha){
curr<-addBasis(curr,dc,qf.cand.list,prior)
curr<-addBasisDes(curr,cand.des,qf.cand.list,prior)
curr<-addBasisCat(curr,cand.cat,qf.cand.list,prior)
curr<-addBasisDC(curr,dc)
curr<-addBasisFunc(curr,cand.func,qf.cand.list,prior)
# if type has cat and des, want to update curr$dc.basis also
}
return(curr)
}
death_des_cat_func<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
ind<-(1:curr$nc)[-(basis+1)]
qf.cand.list<-getQf(curr$XtX[ind,ind],curr$Xty[ind])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr) # TODO: not sure why I need this, I shouldn't need it in theory
}
I.star.des<-curr$I.star.des
I.star.des[curr$n.int.des[basis]+1]<-I.star.des[curr$n.int.des[basis]+1]-1
I.vec.des<-I.star.des/sum(I.star.des)
z.star.des<-curr$z.star.des
if(curr$n.int.des[basis]>0)
z.star.des[curr$vars.des[basis,1:curr$n.int.des[basis]]]<-z.star.des[curr$vars.des[basis,1:curr$n.int.des[basis]]]-1
z.vec.des<-z.star.des/sum(z.star.des)
I.star.cat<-curr$I.star.cat
I.star.cat[curr$n.int.cat[basis]+1]<-I.star.cat[curr$n.int.cat[basis]+1]-1
I.vec.cat<-I.star.cat/sum(I.star.cat)
z.star.cat<-curr$z.star.cat
if(curr$n.int.cat[basis]>0)
z.star.cat[curr$vars.cat[basis,1:curr$n.int.cat[basis]]]<-z.star.cat[curr$vars.cat[basis,1:curr$n.int.cat[basis]]]-1
z.vec.cat<-z.star.cat/sum(z.star.cat)
I.star.func<-curr$I.star.func
I.star.func[curr$n.int.func[basis]+1]<-I.star.func[curr$n.int.func[basis]+1]-1
I.vec.func<-I.star.func/sum(I.star.func)
z.star.func<-curr$z.star.func
if(curr$n.int.func[basis]>0)
z.star.func[curr$vars.func[basis,1:curr$n.int.func[basis]]]<-z.star.func[curr$vars.func[basis,1:curr$n.int.func[basis]]]-1
z.vec.func<-z.star.func/sum(z.star.func)
lpbmcmp<-0
if(curr$n.int.des[basis]>0){
lpbmcmp<-lpbmcmp+logProbChangeMod(curr$n.int.des[basis],curr$vars.des[basis,1:curr$n.int.des[basis]],I.vec.des,z.vec.des,data$pdes,data$vars.len.des,prior$maxInt.des,prior$miC)
}
if(curr$n.int.cat[basis]>0){
lpbmcmp<-lpbmcmp+logProbChangeModCat(curr$n.int.cat[basis],curr$vars.cat[basis,1:curr$n.int.cat[basis]],I.vec.cat,z.vec.cat,data$pcat,data$nlevels,curr$sub.size[basis,],prior$maxInt.cat,prior$miC)
}
if(curr$n.int.func[basis]>0){
lpbmcmp<-lpbmcmp+logProbChangeMod(curr$n.int.func[basis],curr$vars.func[basis,1:curr$n.int.func[basis]],I.vec.func,z.vec.func,data$pfunc,data$vars.len.func,prior$maxInt.func,prior$miC)
}
# calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
- log(curr$lam) + log(data$birth.prob.last/data$death.prob)
+ log(curr$nbasis) + lpbmcmp
- .5*log(curr$beta.prec+prior$beta.jprior.ind) + .5*log(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
-.5*log(2*pi)#*curr$s2)
+.5*sum(log(diag(qf.cand.list$R)))
-.5*sum(log(diag(curr$R)))
)
)
if(log(runif(1)) < alpha){
curr<-deleteBasis(curr,basis,ind,qf.cand.list,I.star.des,I.vec.des,z.star.des,z.vec.des)
curr<-deleteBasisDes(curr,basis,ind,qf.cand.list,I.star.des,I.vec.des,z.star.des,z.vec.des)
curr<-deleteBasisCat(curr,basis,ind,qf.cand.list,I.star.cat,I.vec.cat,z.star.cat,z.vec.cat)
curr<-deleteBasisDC(curr,basis)
curr<-deleteBasisFunc(curr,basis,ind,qf.cand.list,I.star.func,I.vec.func,z.star.func,z.vec.func)
}
return(curr)
}
change_des_cat_func<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
type.change<-sample(c('des','cat','func'),size=1,prob=c(curr$n.int.des[basis],curr$n.int.cat[basis],curr$n.int.func[basis]))
if(type.change=='des'){
int.change<-sample(1:(curr$n.int.des[basis]),size=1)
use<-1:curr$n.int.des[basis]
cand.des<-genBasisChange(curr,basis,int.change,data$xxt.des,prior$q,knots=curr$knots.des[basis,use],knotInd=curr$knotInd.des[basis,use],signs=curr$signs.des[basis,use],vars=curr$vars.des[basis,use],xx.unique.ind=data$unique.ind.des)
cand.cat<-list(basis=curr$cat.basis[,basis+1])
cand.func<-list(basis=curr$func.basis[,basis+1])
} else if(type.change=='cat'){
int.change<-sample(1:(curr$n.int.cat[basis]),size=1)
use<-1:curr$n.int.cat[basis]
cand.cat<-genBasisChangeCat(curr,basis,int.change,data$xx.cat,data$nlevels,data$levels,curr$sub.size[basis,use],curr$sub.list[[basis]],vars=curr$vars.cat[basis,use])
cand.des<-list(basis=curr$des.basis[,basis+1])
cand.func<-list(basis=curr$func.basis[,basis+1])
} else{
int.change<-sample(1:(curr$n.int.func[basis]),size=1)
use<-1:curr$n.int.func[basis]
cand.func<-genBasisChange(curr,basis,int.change,data$xxt.func,prior$q,knots=curr$knots.func[basis,use],knotInd=curr$knotInd.func[basis,use],signs=curr$signs.func[basis,use],vars=curr$vars.func[basis,use],xx.unique.ind=data$unique.ind.func)
cand.des<-list(basis=curr$des.basis[,basis+1])
cand.cat<-list(basis=curr$cat.basis[,basis+1])
}
dc<-cand.cat$basis*cand.des$basis
if(sum(dc!=0)<prior$npart.des){
return(curr)
}
if(sum(cand.func$basis!=0)<prior$npart.func){
return(curr)
}
XtX.cand<-curr$XtX[1:curr$nc,1:curr$nc]
XtX.cand[basis+1,]<-XtX.cand[,basis+1]<-crossprod(curr$dc.basis,dc)*crossprod(curr$func.basis,cand.func$basis)
XtX.cand[basis+1,basis+1]<-crossprod(dc)*crossprod(cand.func$basis)
Xty.cand<-curr$Xty[1:curr$nc]
Xty.cand[basis+1]<-tcrossprod(crossprod(dc,data$y),cand.func$basis)
qf.cand.list<-getQf(XtX.cand,Xty.cand)
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
alpha<-data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
.5*sum(log(diag(qf.cand.list$R)))-.5*sum(log(diag(curr$R)))
)
)
if(log(runif(1))<alpha){
curr<-changeBasis(curr,cand.des,basis,qf.cand.list,XtX.cand,Xty.cand)
if(type.change=='des'){
curr<-changeBasisDes(curr,cand.des,basis,qf.cand.list,XtX.cand,Xty.cand)
curr<-changeBasisDC(curr,dc,basis)
}
if(type.change=='cat'){
curr<-changeBasisCat(curr,cand.cat,basis,qf.cand.list,XtX.cand,Xty.cand)
curr<-changeBasisDC(curr,dc,basis)
}
if(type.change=='func')
curr<-changeBasisFunc(curr,cand.func,basis,qf.cand.list,XtX.cand,Xty.cand)
}
return(curr)
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/rjmcmc_des_cat_func.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
########################################################################
## perform RJMCMC step (birth, death, or change)
########################################################################
birth_des_func<-function(curr,prior,data){
cand.des<-genCandBasis(minInt=prior$minInt,maxInt=prior$maxInt.des,I.vec=curr$I.vec.des,z.vec=curr$z.vec.des,p=data$pdes,xxt=data$xxt.des,q=prior$q,xx.unique.ind=data$unique.ind.des,vars.len=data$vars.len.des,prior)
if(sum(cand.des$basis!=0)<prior$npart.des)
return(curr)
cand.func<-genCandBasis(minInt=prior$minInt,maxInt=prior$maxInt.func,I.vec=curr$I.vec.func,z.vec=curr$z.vec.func,p=data$pfunc,xxt=data$xxt.func,q=prior$q,xx.unique.ind=data$unique.ind.func,vars.len=data$vars.len.func,prior)
if(sum(cand.func$basis!=0)<prior$npart.func)
return(curr)
if(cand.des$n.int + cand.func$n.int == 0) # intercept
return(curr)
ata<-crossprod(cand.des$basis)*crossprod(cand.func$basis)
Xta<-crossprod(curr$des.basis,cand.des$basis)*crossprod(curr$func.basis,cand.func$basis)
aty<-tcrossprod(crossprod(cand.des$basis,data$y),cand.func$basis)
curr$Xty[curr$nc+1]<-aty
curr$XtX[1:curr$nc,curr$nc+1]<-Xta
curr$XtX[curr$nc+1,curr$nc+1]<-ata
qf.cand.list<-getQf(curr$XtX[1:(curr$nc+1),1:(curr$nc+1)],curr$Xty[1:(curr$nc+1)])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
## calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
+ log(curr$lam) - log(curr$nc) + log(data$death.prob.next/data$birth.prob)
- cand.des$lbmcmp - cand.func$lbmcmp
+ .5*log(curr$beta.prec+prior$beta.jprior.ind) - .5*log(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
.5*log(2*pi)#*curr$s2)
+ .5*sum(log(diag(qf.cand.list$R)))
-.5*sum(log(diag(curr$R)))
)
)
## assign new values
if(log(runif(1)) < alpha){
curr<-addBasis(curr,cand.des,qf.cand.list,prior)
curr<-addBasisDes(curr,cand.des,qf.cand.list,prior)
curr<-addBasisFunc(curr,cand.func,qf.cand.list,prior)
# if type has cat and des, want to update curr$dc.basis also
}
return(curr)
}
death_des_func<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
ind<-(1:curr$nc)[-(basis+1)]
qf.cand.list<-getQf(curr$XtX[ind,ind],curr$Xty[ind])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr) # TODO: not sure why I need this, I shouldn't need it in theory
}
I.star.des<-curr$I.star.des
I.star.des[curr$n.int.des[basis]+1]<-I.star.des[curr$n.int.des[basis]+1]-1
I.vec.des<-I.star.des/sum(I.star.des)
z.star.des<-curr$z.star.des
if(curr$n.int.des[basis]>0)
z.star.des[curr$vars.des[basis,1:curr$n.int.des[basis]]]<-z.star.des[curr$vars.des[basis,1:curr$n.int.des[basis]]]-1
z.vec.des<-z.star.des/sum(z.star.des)
I.star.func<-curr$I.star.func
I.star.func[curr$n.int.func[basis]+1]<-I.star.func[curr$n.int.func[basis]+1]-1
I.vec.func<-I.star.func/sum(I.star.func)
z.star.func<-curr$z.star.func
if(curr$n.int.func[basis]>0)
z.star.func[curr$vars.func[basis,1:curr$n.int.func[basis]]]<-z.star.func[curr$vars.func[basis,1:curr$n.int.func[basis]]]-1
z.vec.func<-z.star.func/sum(z.star.func)
lpbmcmp<-0
if(curr$n.int.des[basis]>0){
lpbmcmp<-lpbmcmp+logProbChangeMod(curr$n.int.des[basis],curr$vars.des[basis,1:curr$n.int.des[basis]],I.vec.des,z.vec.des,data$pdes,data$vars.len.des,prior$maxInt.des,prior$miC)
}
if(curr$n.int.func[basis]>0){
lpbmcmp<-lpbmcmp+logProbChangeMod(curr$n.int.func[basis],curr$vars.func[basis,1:curr$n.int.func[basis]],I.vec.func,z.vec.func,data$pfunc,data$vars.len.func,prior$maxInt.func,prior$miC)
}
# calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
- log(curr$lam) + log(data$birth.prob.last/data$death.prob)
+ log(curr$nbasis) + lpbmcmp
- .5*log(curr$beta.prec+prior$beta.jprior.ind) + .5*log(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
-.5*log(2*pi)#*curr$s2)
+.5*sum(log(diag(qf.cand.list$R)))
-.5*sum(log(diag(curr$R)))
)
)
if(log(runif(1)) < alpha){
curr<-deleteBasis(curr,basis,ind,qf.cand.list,I.star.des,I.vec.des,z.star.des,z.vec.des)
curr<-deleteBasisDes(curr,basis,ind,qf.cand.list,I.star.des,I.vec.des,z.star.des,z.vec.des)
curr<-deleteBasisFunc(curr,basis,ind,qf.cand.list,I.star.func,I.vec.func,z.star.func,z.vec.func)
}
return(curr)
}
change_des_func<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
type.change<-sample(c('des','func'),size=1,prob=c(curr$n.int.des[basis],curr$n.int.func[basis]))
if(type.change=='des'){
int.change<-sample(1:(curr$n.int.des[basis]),size=1)
use<-1:curr$n.int.des[basis]
cand.des<-genBasisChange(curr,basis,int.change,data$xxt.des,prior$q,knots=curr$knots.des[basis,use],knotInd=curr$knotInd.des[basis,use],signs=curr$signs.des[basis,use],vars=curr$vars.des[basis,use],xx.unique.ind=data$unique.ind.des)
cand.func<-list(basis=curr$func.basis[,basis+1])
} else{
int.change<-sample(1:(curr$n.int.func[basis]),size=1)
use<-1:curr$n.int.func[basis]
cand.func<-genBasisChange(curr,basis,int.change,data$xxt.func,prior$q,knots=curr$knots.func[basis,use],knotInd=curr$knotInd.func[basis,use],signs=curr$signs.func[basis,use],vars=curr$vars.func[basis,use],xx.unique.ind=data$unique.ind.func)
cand.des<-list(basis=curr$des.basis[,basis+1])
}
if(sum(cand.des$basis!=0)<prior$npart.des){
return(curr)
}
if(sum(cand.func$basis!=0)<prior$npart.func){
return(curr)
}
XtX.cand<-curr$XtX[1:curr$nc,1:curr$nc]
XtX.cand[basis+1,]<-XtX.cand[,basis+1]<-crossprod(curr$des.basis,cand.des$basis)*crossprod(curr$func.basis,cand.func$basis)
XtX.cand[basis+1,basis+1]<-crossprod(cand.des$basis)*crossprod(cand.func$basis)
Xty.cand<-curr$Xty[1:curr$nc]
Xty.cand[basis+1]<-tcrossprod(crossprod(cand.des$basis,data$y),cand.func$basis)
qf.cand.list<-getQf(XtX.cand,Xty.cand)
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
alpha<-data$itemp.ladder[curr$temp.ind]*(
.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
+ prior$beta.jprior.ind*(
.5*sum(log(diag(qf.cand.list$R)))-.5*sum(log(diag(curr$R)))
)
)
if(log(runif(1))<alpha){
curr<-changeBasis(curr,cand.des,basis,qf.cand.list,XtX.cand,Xty.cand)
if(type.change=='des')
curr<-changeBasisDes(curr,cand.des,basis,qf.cand.list,XtX.cand,Xty.cand)
if(type.change=='func')
curr<-changeBasisFunc(curr,cand.func,basis,qf.cand.list,XtX.cand,Xty.cand)
}
return(curr)
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/rjmcmc_des_func.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
#######################################################
########################################################################
## perform RJMCMC step (birth, death, or change)
########################################################################
birth_des_ridge<-function(curr,prior,data){
cand.des<-genCandBasis(minInt=prior$minInt,maxInt=prior$maxInt.des,I.vec=curr$I.vec.des,z.vec=curr$z.vec.des,p=data$pdes,xxt=data$xxt.des,q=prior$q,xx.unique.ind=data$unique.ind.des,vars.len=data$vars.len.des,prior)
if(sum(cand.des$basis!=0)<prior$npart.des){
return(curr)
}
ata<-crossprod(cand.des$basis)
Xta<-crossprod(curr$des.basis,cand.des$basis)
aty<-crossprod(cand.des$basis,data$y)
curr$Xty[curr$nc+1]<-aty
curr$XtX[1:curr$nc,curr$nc+1]<-Xta
curr$XtX[curr$nc+1,curr$nc+1]<-ata
qf.cand.list<-getQf(curr$XtX[1:(curr$nc+1),1:(curr$nc+1)],curr$Xty[1:(curr$nc+1)])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
## calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec) + log(curr$lam) - log(curr$nc) + log(data$death.prob.next/data$birth.prob) - cand.des$lbmcmp + .5*log(curr$beta.prec) - .5*log(1+curr$beta.prec))
#cat(- cand.des$lbmcmp,' ')
## assign new values
if(log(runif(1)) < alpha){
curr<-addBasis(curr,cand.des,qf.cand.list,prior)
curr<-addBasisDes(curr,cand.des,qf.cand.list,prior)
# if type has cat and des, want to update curr$dc.basis also
}
return(curr)
}
death_des_ridge<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
ind<-(1:curr$nc)[-(basis+1)]
qf.cand.list<-getQf(curr$XtX[ind,ind],curr$Xty[ind])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr) # TODO: not sure why I need this, I shouldn't need it in theory
}
I.star.des<-curr$I.star.des
I.star.des[curr$n.int.des[basis]]<-I.star.des[curr$n.int.des[basis]]-1
I.vec.des<-I.star.des/sum(I.star.des)
z.star.des<-curr$z.star.des
z.star.des[curr$vars.des[basis,1:curr$n.int.des[basis]]]<-z.star.des[curr$vars.des[basis,1:curr$n.int.des[basis]]]-1
z.vec.des<-z.star.des/sum(z.star.des)
lpbmcmp<-logProbChangeMod(curr$n.int.des[basis],curr$vars.des[basis,1:curr$n.int.des[basis]],I.vec.des,z.vec.des,data$pdes,data$vars.len.des,prior$maxInt.des,prior$miC)
# calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec) - log(curr$lam) + log(data$birth.prob.last/data$death.prob) + log(curr$nbasis) + lpbmcmp - .5*log(curr$beta.prec) + .5*log(1+curr$beta.prec))
if(log(runif(1)) < alpha){
curr<-deleteBasis(curr,basis,ind,qf.cand.list,I.star.des,I.vec.des,z.star.des,z.vec.des)
curr<-deleteBasisDes(curr,basis,ind,qf.cand.list,I.star.des,I.vec.des,z.star.des,z.vec.des)
}
return(curr)
}
change_des_ridge<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
int.change<-sample(1:(curr$n.int.des[basis]),size=1)
use<-1:curr$n.int.des[basis]
cand.des<-genBasisChange(curr,basis,int.change,data$xxt.des,prior$q,knots=curr$knots.des[basis,use],knotInd=curr$knotInd.des[basis,use],signs=curr$signs.des[basis,use],vars=curr$vars.des[basis,use],xx.unique.ind=data$unique.ind.des)
if(sum(cand.des$basis!=0)<prior$npart.des){
return(curr)
}
XtX.cand<-curr$XtX[1:curr$nc,1:curr$nc]
XtX.cand[basis+1,]<-XtX.cand[,basis+1]<-crossprod(curr$des.basis,cand.des$basis)
XtX.cand[basis+1,basis+1]<-crossprod(cand.des$basis)
Xty.cand<-curr$Xty[1:curr$nc]
Xty.cand[basis+1]<-crossprod(cand.des$basis,data$y)
qf.cand.list<-getQf(XtX.cand,Xty.cand)
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
alpha<-data$itemp.ladder[curr$temp.ind]*.5/curr$s2*(qf.cand.list$qf-curr$qf)/(1+curr$beta.prec)
if(log(runif(1))<alpha){
curr<-changeBasis(curr,cand.des,basis,qf.cand.list,XtX.cand,Xty.cand)
curr<-changeBasisDes(curr,cand.des,basis,qf.cand.list,XtX.cand,Xty.cand)
}
return(curr)
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/rjmcmc_des_ridge.R |
#######################################################
# Author: Devin Francom, Los Alamos National Laboratory
# Protected under GPL-3 license
# Los Alamos Computer Code release C19031
# github.com/lanl/BASS
# Full copyright in the README.md in the repository
#######################################################
############################################################
## get Sobol decomposition
############################################################
#' @title BASS Sensitivity Analysis
#'
#' @description Decomposes the variance of the BASS model into variance due to main effects, two way interactions, and so on, similar to the ANOVA decomposition for linear models. Uses the Sobol' decomposition, which can be done analytically for MARS models.
#' @param bassMod a fitted model output from the \code{bass} function.
#' @param prior a list of priors; uniform, truncated mixture of Normals or Ts for continuous; vector of category weights for categorical. Default is uniform over range of data.
#' @param prior.func prior for functional variable. In almost all cases, keep this as the uniform default.
#' @param mcmc.use an integer vector indexing which MCMC iterations to use for sensitivity analysis.
#' @param func.var an integer indicating which functional variable to make sensitivity indices a function of. Disregard if \code{bassMod} is non-functional or if scalar sensitivity indices are desired.
#' @param xx.func.var grid for functional variable specified by \code{func.var}. Disregard if \code{func.var} is not specified. If \code{func.var} is specified and \code{xx.func.var} not specified, the grid used to fit \code{bass} will be used.
#' @param verbose logical; should progress be displayed?
#' @param getEffects logical; should Sobols ANOVA decomposition be computed?
#' @details Performs analytical Sobol' decomposition for each MCMC iteration in mcmc.use (each corresponds to a MARS model), yeilding a posterior distribution of sensitivity indices. Can obtain Sobol' indices as a function of one functional variable.
#' @return If non-functional (\code{func.var = NULL}), a list with two elements:
#' \item{S}{a data frame of sensitivity indices with number of rows matching the length of \code{mcmc.use}. The columns are named with a particular main effect or interaction. The values are the proportion of variance in the model that is due to each main effect or interaction.}
#' \item{T}{a data frame of total sensitivity indices with number of rows matching the length of \code{mcmc.use}. The columns are named with a particular variable.}
#' Otherwise, a list with four elements:
#' \item{S}{an array with first dimension corresponding to MCMC samples (same length as \code{mcmc.use}), second dimension corresponding to different main effects and interactions (labeled in \code{names.ind}), and third dimension corresponding to the grid used for the functional variable. The elements of the array are sensitivity indices.}
#' \item{S.var}{same as \code{S}, but scaled in terms of total variance rather than percent of variance.}
#' \item{names.ind}{a vector of names of the main effects and interactions used.}
#' \item{xx}{the grid used for the functional variable.}
#'
#' @keywords Sobol decomposition
#' @seealso \link{bass} for model fitting and \link{predict.bass} for prediction.
#' @import hypergeo
#' @export
#' @examples
#' # See examples in bass documentation.
#'
sobol<-function(bassMod,prior=NULL,prior.func=NULL,mcmc.use=NULL,func.var=NULL,xx.func.var=NULL,verbose=TRUE,getEffects=FALSE){
if(!inherits(bassMod,'bass'))
stop('First input needs to be a bass object')
if(bassMod$p==1 & !bassMod$func)
stop('Sobol only used for multiple input models')
if(bassMod$p==1 & sum(func.var)>0)
stop('Cannot decompose the variance in terms of only one variable')
mcmc.use.poss<-1:((bassMod$nmcmc-bassMod$nburn)/bassMod$thin)
if(any(!(mcmc.use%in%mcmc.use.poss))){
mcmc.use<-mcmc.use.poss
warning('disregarding mcmc.use because of bad values')
}
if(any(is.null(mcmc.use))){
mcmc.use<-mcmc.use.poss
}
pdescat<-sum(bassMod$pdes)+sum(bassMod$pcat) # sums make NULLs 0s
if(is.null(prior))
prior<-list()
if(length(prior)<pdescat){
for(i in (length(prior)+1):pdescat)
prior[[i]]<-list(dist=NA)
}
#browser()
for(i in 1:pdescat){
if(is.null(prior[[i]]))
prior[[i]]<-list(dist=NA)
if(is.na(prior[[i]]$dist)){
prior[[i]]<-list()
prior[[i]]$dist<-'uniform'
#prior[[i]]$trunc<-bassMod$range.des[,i] - not right index when there are categorical vars
}
}
if(bassMod$func){
if(is.null(prior.func)){
prior.func<-list()
for(i in 1:bassMod$pfunc){
prior.func[[i]]<-list()
prior.func[[i]]$dist<-'uniform'
#prior.func[[i]]$trunc<-bassMod$range.func[,i]
}
}
for(i in 1:length(prior.func))
class(prior.func[[i]])<-prior.func[[i]]$dist
}
for(i in 1:length(prior))
class(prior[[i]])<-prior[[i]]$dist # class will be used for integral functions, should be uniform, normal, or student
if(bassMod$cat){
which.cat<-which(bassMod$cx=='factor')
prior.cat<-list()
for(i in 1:length(which.cat)){
prior.cat[i]<-prior[which.cat[i]]
}
prior[which.cat]<-NULL
} else{
prior.cat<-NULL
}
#browser()
if(bassMod$des){
for(i in 1:length(prior)){
if(is.null(prior[[i]]$trunc)){
prior[[i]]$trunc<-bassMod$range.des[,i]#c(0,1)
}
prior[[i]]$trunc<-scale_range(prior[[i]]$trunc,bassMod$range.des[,i])
if(prior[[i]]$trunc[1]<0 | prior[[i]]$trunc[2]>1)
warning('truncation range larger than training range...it is unwise to ask an emulator to extrapolate.')
#browser()
if(prior[[i]]$dist %in% c('normal','student')){
prior[[i]]$mean<-scale_range(prior[[i]]$mean,bassMod$range.des[,i])
prior[[i]]$sd<-prior[[i]]$sd/(bassMod$range.des[2,i]-bassMod$range.des[1,i])
if(prior[[i]]$dist == 'normal'){
prior[[i]]$z<-pnorm((prior[[i]]$trunc[2]-prior[[i]]$mean)/prior[[i]]$sd) - pnorm((prior[[i]]$trunc[1]-prior[[i]]$mean)/prior[[i]]$sd)
} else{
prior[[i]]$z<-pt((prior[[i]]$trunc[2]-prior[[i]]$mean)/prior[[i]]$sd,prior[[i]]$df) - pt((prior[[i]]$trunc[1]-prior[[i]]$mean)/prior[[i]]$sd,prior[[i]]$df)
}
cc<-sum(prior[[i]]$weights*prior[[i]]$z)
prior[[i]]$weights<-prior[[i]]$weights/cc#prior[[i]]$z # change weights with truncation # divide by cc instead to keep the same prior shape
# does the truncation change the distribution shape in the non-truncated regions??
#browser()
}
}
}
if(bassMod$func){
for(i in 1:length(prior.func)){
if(is.null(prior.func[[i]]$trunc)){
prior.func[[i]]$trunc<-bassMod$range.func[,i]#c(0,1)
}
prior.func[[i]]$trunc<-scale_range(prior.func[[i]]$trunc,bassMod$range.func[,i])
if(prior.func[[i]]$trunc[1]<0 | prior.func[[i]]$trunc[2]>1)
stop('truncation range of functional variable larger than training range...it is unwise to ask an emulator to extrapolate.')
#browser()
if(prior.func[[i]]$dist %in% c('normal','student')){
prior.func[[i]]$mean<-scale_range(prior.func[[i]]$mean,bassMod$range.func[,i])
prior.func[[i]]$sd<-prior.func[[i]]$sd/(bassMod$range.func[2,i]-bassMod$range.func[1,i])
if(prior.func[[i]]$dist == 'normal'){
prior.func[[i]]$z<-pnorm((prior.func[[i]]$trunc[2]-prior.func[[i]]$mean)/prior.func[[i]]$sd) - pnorm((prior.func[[i]]$trunc[1]-prior.func[[i]]$mean)/prior.func[[i]]$sd)
} else{
prior.func[[i]]$z<-pt((prior.func[[i]]$trunc[2]-prior.func[[i]]$mean)/prior.func[[i]]$sd,prior.func[[i]]$df) - pt((prior.func[[i]]$trunc[1]-prior.func[[i]]$mean)/prior.func[[i]]$sd,prior.func[[i]]$df)
}
cc<-sum(prior.func[[i]]$weights*prior.func[[i]]$z)
prior.func[[i]]$weights<-prior.func[[i]]$weights/cc#prior[[i]]$z # change weights with truncation # divide by cc instead to keep the same prior shape
# does the truncation change the distribution shape in the non-truncated regions??
#browser()
}
}
}
# if(bassMod$func){
# for(i in 1:length(prior.func)){
# if(is.null(prior.func[[i]]$trunc)){
# prior.func[[i]]$trunc<-c(0,1)
# } else{
# prior.func[[i]]$trunc<-scale_range(prior.func[[i]]$trunc,bassMod$range.func[,i])
# }
#
# if(prior.func[[i]]$dist %in% c('normal','student')){
# prior.func[[i]]$mean<-scale_range(prior.func[[i]]$mean,bassMod$range.func[,i])
# prior.func[[i]]$sd<-prior.func[[i]]$sd/(bassMod$range.func[2,i]-bassMod$range.func[1,i])
# if(prior.func[[i]]$dist == 'normal'){
# prior.func[[i]]$z<-pnorm((prior.func[[i]]$trunc[2]-prior.func[[i]]$mean)/prior.func[[i]]$sd) - pnorm((prior.func[[i]]$trunc[1]-prior.func[[i]]$mean)/prior.func[[i]]$sd)
# } else{
# prior.func[[i]]$z<-pt((prior.func[[i]]$trunc[2]-prior.func[[i]]$mean)/prior.func[[i]]$sd,prior.func[[i]]$df) - pt((prior.func[[i]]$trunc[1]-prior.func[[i]]$mean)/prior.func[[i]]$sd,prior.func[[i]]$df)
# }
# prior.func[[i]]$weights<-prior.func[[i]]$weights/prior.func[[i]]$z # change weights with truncation
# }
# }
# }
#
#
# prior.func[[func.var]]<-NULL
# prior<-c(prior,prior.func)
# check to see if this worked how we expected, pass prior and prior.cat around
prior<-c(prior,prior.func)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(getEffects){
if(bassMod$cat | bassMod$func){
getEffects<-F
warning('getEffects not yet implemented for functional response or categorical inputs.')
}
}
if(is.null(func.var)){
func<-F
} else{
func<-T
if(!bassMod$func){
func<-F
warning('disregarding func.var because bassMod parameter is not functional')
}
}
if(!is.null(xx.func.var) & !func)
warning('disregarding xx.func.var because bassMod parameter is not functional')
if(func){
if(!(func.var%in%(1:ncol(bassMod$xx.func))))
stop('func.var in wrong range of values')
if(is.null(xx.func.var)){
xx.func.var<-bassMod$xx.func[,func.var,drop=F]
} else{
rr<-range(xx.func.var)
if(rr[1]<bassMod$range.func[1,func.var] | rr[2]>bassMod$range.func[2,func.var])
warning(paste('range of func.var in bass function (',bassMod$range.func[1,func.var],',',bassMod$range.func[2,func.var],') is smaller than range of xx.func.var (',rr[1],',',rr[2],'), indicating some extrapolation',sep=''))
xx.func.var<-scale_range(xx.func.var,bassMod$range.func[,func.var])
}
return(sobol_des_func(bassMod=bassMod,mcmc.use=mcmc.use,verbose=verbose,func.var=func.var,xx.func.var=xx.func.var,prior=prior,prior.cat=prior.cat))
} else{
return(sobol_des(bassMod=bassMod,mcmc.use=mcmc.use,verbose=verbose,prior=prior,prior.cat=prior.cat,getEffects=getEffects)) # applies to both des & func as long as functional sobol indices are not desired
}
}
## get sobol indices - no functional
sobol_des<-function(bassMod,mcmc.use,verbose,prior,prior.cat,getEffects){
models<-bassMod$model.lookup[mcmc.use] # only do the heavy lifting once for each model
uniq.models<-unique(models)
nmodels<-length(uniq.models)
maxInt.tot<-sum(bassMod$maxInt.des)+sum(bassMod$maxInt.cat)+sum(bassMod$maxInt.func)
maxBasis<-max(bassMod$nbasis)
q<-bassMod$degree
pdes<-sum(bassMod$pdes)
pcat<-sum(bassMod$pcat)
pfunc<-sum(bassMod$pfunc)
p<-pdes+pcat+pfunc
#prior<-c(prior,prior.func)
################################################
# get combs including functional variables
#browser()
allCombs<-getCombs(bassMod,uniq.models,nmodels,maxBasis,maxInt.tot)
combs<-allCombs$combs # which main effects and interactions included
names.ind<-allCombs$names.ind # which main effects and interactions included
num.ind<-allCombs$num.ind # number of terms in each interaction level
cs.num.ind<-allCombs$cs.num.ind # cumsum of num.ind
################################################
sob<-array(0,dim=c(length(mcmc.use),sum(num.ind)))
var.tot.store<-f0.store<-rep(0,length(mcmc.use))
ngrid<-100
#xx=seq(0,1,length.out=ngrid) # make a different size xx for each variable...to debug
xxt<-lapply(1:p,function(i) t(seq(0,1,length.out=ngrid+i)))
effects<-list()
if(getEffects){
for(iint in 1:min(length(combs),2)){
effects[[iint]]<-array(dim=c(length(mcmc.use),ncol(combs[[iint]]),rep(ngrid,iint)))
} # do above somewhere
}
if(verbose)
cat('Sobol Start',myTimestamp(),'Models:',length(unique(models)),'\n')
i<-1
mod.count<-0
for(mod in uniq.models){ #do this in parallel?
mod.count<-mod.count+1 # number of models
mcmc.use.mod<-mcmc.use[models==mod] # which part of mcmc.use does this correspond to?
mod.ind<-i:(i+length(mcmc.use.mod)-1) # index for which mcmc samples are this model?
M<-bassMod$nbasis[mcmc.use.mod][1] # number of basis functions in this model
if(M>0){
# for each model, m, tl stores everything necessary to get sobol indices
tl<-get_tl(bassMod,mcmc.use.mod,M,mod,p,q,cs.num.ind,combs) # tl initially stores coefs, knots, signs, variables, which combinations included, etc.
tl$prior<-prior
tl$prior.cat<-prior.cat
if(tl$cat)
tl<-add_tlCat(tl,bassMod,mcmc.use.mod,mod) # if there are categorical variables, add the associated info
tl<-add_tl(tl,p) # calculates and adds the C1 and C2 integrals to tl
lens<-apply(tl$Kind,1,function(x) length(na.omit(x))) # number of interactions in each basis function
#browser()
var.tot<-Vu(1:p,tl) # total variance, one for each mcmc iteration (since coefs change each mcmc iteration rather than each model)
# vt<-myCC<-myC2<-matrix(nrow=M,ncol=M)
# for(i in 1:M){
# for(j in 1:M){
# prod1<-prod2<-1
# for(k in 1:p){
# tt1<-C1(k,i,tl)
# tt2<-C1(k,j,tl)
# tt3<-C2(k,i,j,tl)
# # if(!any(na.omit(tl$Kind[i,]==k)))
# # tt1=1
# # if(!any(na.omit(tl$Kind[j,]==k)))
# # tt2=1
# # if(!any(na.omit(intersect(tl$Kind[i,], tl$Kind[j,])==k)))
# # tt3=1
# prod1<-prod1*tt1*tt2
# prod2<-prod2*tt3#~~~~~~~~~~~~~~~~~~~~~~~~~ finish this debugging!!!!
# }
# myCC[i,j]<-prod1
# myC2[i,j]<-prod2
# vt[i,j]=prod2-prod1#prod1*(prod2/prod1-1)
# }
# }
#
# u=1:p
# CCu<-apply(tl$C1.all.prod3[,,u,drop=F],1:2,prod) # product over u's TODO: utilize symmetry
# C2.temp<-apply(tl$C2.all2[,,u,drop=F],1:2,prod)
# mat<-tl$CC*(C2.temp/CCu-1)
# mat[tl$CC==0]<-0
#
# C2.temp[CCu==0]
# CCu[C2.temp==0]
#
# CCu[1,4]
# myCC[1,4]
# tl$CC[1,4]
# C2.temp[1,4]
# myC2[1,4]
#
# VuMat(1:p,tl)[4,1]
# vt[4,1]
# range(VuMat(1:p,tl)-vt)
# (tl$a%*%vt%*%t(tl$a))
# (tl$a%*%mat%*%t(tl$a))
# var.tot
# browser()
vars.used<-unique(unlist(na.omit(c(tl$Kind)))) # which variables are used in this model?
vars.used<-sort(vars.used)
tl$integrals<-matrix(0,nrow=length(mcmc.use.mod),ncol=max(cs.num.ind)) # where we store all the integrals (not normalized by subtraction) - matches dim of sob
## get main effect variances
tl$integrals[,which(combs[[1]]%in%vars.used)]<-apply(t(vars.used),2,Vu,tl=tl) # one for each effect for each mcmc iteration, only done for the used vars, but inserted into the correct column of tl$integrals
sob[mod.ind,1:cs.num.ind[1]]<-tl$integrals[,1:cs.num.ind[1]] # main effects are done
## get interactions
# consider changing lens to n.int
if(max(lens)>1){ # if there are any interactions
for(l in 2:max(lens)){ # must go in order for it to work (tl$integrals is made sequentially)
int.l.ind<-(cs.num.ind[l-1]+1):cs.num.ind[l] # index for which columns of sob will have this level of interaction, and they are names.ind[[l]]
basis.int.l.use<-matrix(nrow=M,ncol=num.ind[l]) # basis.int.l.use[i,j]=1 if interaction combs[[l]][,j] occurs in ith basis function, shows us which interactions (of order l) are used in which basis functions
for(m in 1:M){
#basis.int.l.use[m,]<-unlist(lapply(combs.list[[l]],function(el){prod(el%in%tl$Kind[m,])})) #all the variables in question must be in the basis
basis.int.l.use[m,]<-apply(combs[[l]],2,function(el){prod(el%in%tl$Kind[m,])})
}
use<-colSums(basis.int.l.use)>0 # indicator for which interactions (of order l) are used at all in this model
tl$integrals[,int.l.ind[use]]<-apply(combs[[l]][,use,drop=F],2,Vu,tl=tl) # perform the necessary integration, but only for interactions that are actually used
sob.l<-matrix(0,nrow=length(mcmc.use.mod),ncol=num.ind[l])
for(l2 in (1:num.ind[l])[use]){ # only go through the interactions that are actually in Kind (but still allow for 2-way when actual is 3-way, etc.)
sob.l[,l2]<-VuInt(combs[[l]][,l2],tl) # do the normalizing (subtract out lower order terms to make this orthogonal to them)
}
sob[mod.ind,int.l.ind]<-sob.l # variance explained
}
}
sob[mod.ind,]<-sob[mod.ind,]/var.tot # scale so that percent variance explained
i<-i+length(mcmc.use.mod) # for index
var.tot.store[mod.ind]<-var.tot # store the total variance for this model
#browser()
C1.temp<-(1/(tl$q+1)*((tl$s+1)/2-tl$s*tl$t))*tl$s^2
C1.temp[C1.temp==0]<-1
f0.store[mcmc.use.mod]<-bassMod$beta[mcmc.use.mod,1]+bassMod$beta[mcmc.use.mod,2:(tl$M+1)]%*%matrix(apply(C1.temp,1,prod))
#browser()
main.effects<-T
# if(length(combs[[1]])==0)
# main.effects<-F
two.ints<-F
if(length(combs)>1)
two.ints<-T
if(getEffects){
##################################################################
# main effects - trying to get fi - f0, can leave off the intercepts (a0 in Chen 2004)
f0<-bassMod$beta[mcmc.use.mod,2:(tl$M+1)]%*%matrix(apply(C1.temp,1,prod)) # without a0
if(main.effects){
for(ef in 1:ncol(combs[[1]])){ # go through each effect
effects[[1]][mod.ind,ef,]<- -f0
for(m in 1:tl$M){ # go through each basis function
pp.use<-combs[[1]][,ef] # which variable is this effect
if(tl$s[m,pp.use]!=0){ # if the basis function uses the variable
effects[[1]][mod.ind,ef,]<-effects[[1]][mod.ind,ef,]+tcrossprod(bassMod$beta[mcmc.use.mod,m+1],makeBasis(tl$s[m,pp.use],1,tl$t[m,pp.use],xxt,1)*prod(C1.temp[m,-pp.use]))
} else{ # if the basis function does not use the variable (integrate over all)
effects[[1]][mod.ind,ef,]<-effects[[1]][mod.ind,ef,]+bassMod$beta[mcmc.use.mod,m+1]*prod(C1.temp[m,])
}
}
#matplot(t(effects[[1]][1,,]),type='l')
#matplot(cbind(10*sin(pi*xx)^2/(pi*xx) - a1, 10*sin(pi*xx)^2/(pi*xx) - a1, 20*(xx-.5)^2 - 5/3, 10*xx - 5, 5*xx-5/2),type='l')
#matplot(t(effects[[1]][,1,]),type='l')
}
}
if(two.ints){
## 2-way interactions
for(ef in 1:ncol(combs[[2]])){
for(kk in 1:length(mcmc.use.mod))
effects[[2]][mod.ind[kk],ef,,]<- -f0[kk]
pp.use<-combs[[2]][,ef]
pp.use1<-which(combs[[1]]%in%combs[[2]][1,ef])
pp.use2<-which(combs[[1]]%in%combs[[2]][2,ef])
effects[[2]][mod.ind,ef,,]<- sweep(effects[[2]][mod.ind,ef,,,drop=F], c(1,2,3), effects[[1]][mod.ind,pp.use1,,drop=F])
effects[[2]][mod.ind,ef,,]<- sweep(effects[[2]][mod.ind,ef,,,drop=F], c(1,2,4), effects[[1]][mod.ind,pp.use2,,drop=F])
#image.plot(effects[[2]][mcmc.use.mod[1],ef,,])
for(m in 1:tl$M){
b1<-makeBasis(tl$s[m,pp.use[1]],1,tl$t[m,pp.use[1]],xxt,1)
b2<-makeBasis(tl$s[m,pp.use[2]],1,tl$t[m,pp.use[2]],xxt,1)
if(all(b1==0))
b1=b1+1
if(all(b2==0))
b2=b2+1
effects[[2]][mod.ind,ef,,]<-effects[[2]][mod.ind,ef,,] + drop(bassMod$beta[mcmc.use.mod,m+1]%o%(tcrossprod(b1,b2)*prod(C1.temp[m,-pp.use])))
}
}
#browser()
#image.plot(effects[[2]][1,1,,],zlim=c(-10,10))
#image.plot(matrix(10*sin(2*pi*xx2[,1]*xx2[,2]),ncol=100))
#xx2<-expand.grid(t(xxt),t(xxt))
#a1<--5/pi*sum( (-4*pi^2)^(1:50)/((2*(1:50))*factorial(2*(1:50))) )
#image.plot(matrix(10*sin(2*pi*xx2[,1]*xx2[,2]) - 10*sin(pi*xx2[,1])^2/(pi*xx2[,1]) - 10*sin(pi*xx2[,2])^2/(pi*xx2[,2]) + a1,nrow=100),zlim=c(-10,10))
}
}
##################################################################
if(verbose & mod.count%%10==0)
cat('Sobol',myTimestamp(),'Model:',mod.count,'\n')
}
}
sob<-as.data.frame(sob)
names(sob)<-unlist(names.ind) # give labels
if(verbose)
cat('Total Sensitivity',myTimestamp(),'\n')
tot<-getTot(combs,sob,names.ind) # get total indices
## reorder socolumns of sob & tot matrices to match original data order
sob.reorder<-NA
sob.reorder[1:length(names.ind[[1]])]<-mixOrd(allCombs$dispNames[[1]])
if(length(names.ind)>1){
for(l in 2:length(names.ind)){
sob.reorder[(cs.num.ind[l-1]+1):(cs.num.ind[l])]<-cs.num.ind[l-1]+mixOrd(allCombs$dispNames[[l]])
}
}
sob<-sob[,sob.reorder,drop=F]
names(sob)<-unlist(allCombs$dispNames)[sob.reorder]
tot<-tot[,sob.reorder[1:length(names.ind[[1]])],drop=F]
names(tot)<-allCombs$dispNames[[1]][sob.reorder[1:length(names.ind[[1]])]]
if(any(sob<0))
browser()
ret<-list(S=sob,T=tot,func=F,var.tot=var.tot.store,f0=f0.store,ints=tl$integrals,prior=prior,effects=effects,names.ind=names.ind)
class(ret)<-'bassSob'
return(ret)
}
## get sobol indices - functional
sobol_des_func<-function(bassMod,mcmc.use,verbose,func.var,xx.func.var,prior,prior.cat){
models<-bassMod$model.lookup[mcmc.use] # only do the heavy lifting once for each model
uniq.models<-unique(models)
nmodels<-length(uniq.models)
maxInt.tot<-sum(bassMod$maxInt.des)+sum(bassMod$maxInt.cat)+sum(bassMod$maxInt.func)
maxBasis<-max(bassMod$nbasis)
q<-bassMod$degree
pdes<-sum(bassMod$pdes)
pcat<-sum(bassMod$pcat)
pfunc<-sum(bassMod$pfunc)
p<-pdes+pcat+pfunc
################################################
# get combs including functional variables
allCombs<-getCombs(bassMod,uniq.models,nmodels,maxBasis,maxInt.tot,func.var)
combs<-allCombs$combs
names.ind<-allCombs$names.ind
num.ind<-allCombs$num.ind
cs.num.ind<-allCombs$cs.num.ind
################################################
sob<-sob2<-array(0,dim=c(length(mcmc.use),sum(num.ind),length(xx.func.var)))
#prior.func[[func.var]]<-NULL
#prior<-c(prior,prior.func)
if(verbose)
cat('Sobol Start',myTimestamp(),'Models:',length(unique(models)),'\n')
i<-1
mod.count<-0
for(mod in uniq.models){ #do this in parallel?
mod.count<-mod.count+1
mcmc.use.mod<-mcmc.use[models==mod] # which part of mcmc.use does this correspond to?
mod.ind<-i:(i+length(mcmc.use.mod)-1)
M<-bassMod$nbasis[mcmc.use.mod][1] # number of basis functions in this model
if(M>0){
tl<-get_tl(bassMod,mcmc.use.mod,M,mod,p,q,cs.num.ind,combs,func.var,xx.func.var)
tl$prior<-prior
tl$prior.cat<-prior.cat
if(tl$cat)
tl<-add_tlCat(tl,bassMod,mcmc.use.mod,mod)
tl<-add_tl(tl,p)
lens<-apply(tl$Kind,1,function(x) length(na.omit(x)))
tl$tfunc.basis<-makeBasisMatrixVar(mod,M,vars=bassMod$vars.func,signs=bassMod$signs.func,knots.ind=bassMod$knotInd.func,q=bassMod$degree,xxt=t(tl$xx),n.int=bassMod$n.int.func,xx.train=bassMod$xx.func,var=func.var)[-1,,drop=F]
var.tot<-Vu_des_func(1:p,tl) # total variance
vars.used<-unique(unlist(na.omit(c(tl$Kind)))) # which variables are used?
vars.used<-sort(vars.used)
tl$integrals<-array(0,dim=c(length(mcmc.use.mod),max(cs.num.ind),length(xx.func.var))) # where we store all the integrals (not normalized by subtraction)
jj=0
for(pp in vars.used){
#jj=jj+1
tl$integrals[,which(combs[[1]]==pp),]<-Vu_des_func(pp,tl)
}
sob[mod.ind,1:cs.num.ind[1],]<-tl$integrals[,1:cs.num.ind[1],]
if(max(lens)>1){ # if there are any interactions
for(l in 2:max(lens)){ # must go in order for it to work (tl$integrals is made sequentially)
int.l.ind<-(cs.num.ind[l-1]+1):cs.num.ind[l]
#basis.int.l.use<-matrix(nrow=M,ncol=length(combs.list[[l]]))
basis.int.l.use<-matrix(nrow=M,ncol=num.ind[l])
for(m in 1:M){
#basis.int.l.use[m,]<-unlist(lapply(combs.list[[l]],function(el){prod(el%in%tl$Kind[m,])})) # all the variables in question must be in the basis
basis.int.l.use[m,]<-apply(combs[[l]],2,function(el){prod(el%in%tl$Kind[m,])})
}
use<-colSums(basis.int.l.use)>0
for(pp in which(use)){
tl$integrals[,int.l.ind[pp],]<-Vu_des_func(combs[[l]][,pp,drop=F],tl) # perform the necessary integration
}
sob.l<-array(0,dim=c(length(mcmc.use.mod),num.ind[l],length(xx.func.var)))
for(l2 in (1:num.ind[l])[use]){ # only go through the interactions that are actually in Kind (but still allow for 2-way when actual is 3-way, etc.)
sob.l[,l2,]<-VuInt_des_func(combs[[l]][,l2],tl) # do the normalizing
}
sob[mod.ind,int.l.ind,]<-sob.l
}
}
kk<-0
for(ii in mod.ind){
kk=kk+1
sob2[ii,,]<-t(t(sob[ii,,])/var.tot[kk,]) # sobol indices
}
i<-i+length(mcmc.use.mod) # for index
if(verbose & mod.count%%10==0)
cat('Sobol',myTimestamp(),'Model:',mod.count,'\n')
}
}
# reorder for display
sob.reorder<-NA
sob.reorder[1:length(names.ind[[1]])]<-mixOrd(allCombs$dispNames[[1]])
if(length(names.ind)>1){
for(l in 2:length(names.ind)){
sob.reorder[(cs.num.ind[l-1]+1):(cs.num.ind[l])]<-cs.num.ind[l-1]+mixOrd(allCombs$dispNames[[l]])
}
}
sob<-sob[,sob.reorder,,drop=F]
sob2<-sob2[,sob.reorder,,drop=F]
#if(any(sob2<0))
# browser()
ret<-list(S=sob2,S.var=sob,names.ind=unlist(allCombs$dispNames)[sob.reorder],xx=unscale.range(tl$xx,bassMod$range.func),func=T,prior=prior)
class(ret)<-'bassSob'
return(ret)
}
# get total sensitivity indices
getTot<-function(combs,sob,names.ind){
vars.use<-unique(unlist(combs))
puse<-length(vars.use)
ncombs<-sapply(combs,ncol)
tot<-sob[,1:ncombs[1]] # start with main effects, then add to them
ncombs[1]<-0
if(length(combs)>1){ # if there are interactions
int.use<-2:length(combs) # this should work because the lower order combs have to be there
for(pp in 1:puse){ # go through variables
for(l in int.use){ # go through interactions
tot[,pp]<-tot[,pp]+rowSums(
sob[,
puse + # after main effects
ncombs[l-1] + # after interactions of order l-1
which(apply(t(combs[[l]]),1,function(r){vars.use[pp]%in%r})) # which combs use this variable
,drop=F]
)
}
}
}
return(tot)
}
########################################################################
## processing functions
########################################################################
## make for only one variable
makeBasisMatrixVar<-function(i,nbasis,vars,signs,knots.ind,q,xxt,n.int,xx.train,var){
n<-ncol(xxt)
tbasis.mat<-matrix(nrow=nbasis+1,ncol=n)
tbasis.mat[1,]<-1
if(nbasis>0){
for(m in 1:nbasis){
if(all(na.omit(vars[i,m,])!=var)){
tbasis.mat[m+1,]<-1
} else{
use<-which(vars[i,m,]==var)
knots<-xx.train[cbind(knots.ind[i,m,use],vars[i,m,use])]
tbasis.mat[m+1,]<-makeBasis(signs[i,m,use],1,knots,xxt,q)
}
}
}
return(tbasis.mat)
}
## get all the variable combinations used in the models, storing in proper structures
getCombs<-function(bassMod,uniq.models,nmodels,maxBasis,maxInt.tot,func.var=NULL){
#browser()
des.labs<-which(bassMod$cx%in%c('numeric','integer'))
cat.labs<-which(bassMod$cx=='factor')
func.labs<-letters[0:sum(bassMod$pfunc)]
labs<-c(des.labs,func.labs,cat.labs) # this is the order things end up in
vf<-bassMod$vars.func[uniq.models,,]
sub<-0
if(!is.null(func.var)){
vf[vf==func.var]<-NA
sub=-1
}
n.un<-array(c(as.integer(bassMod$vars.des[uniq.models,,]),as.integer(vf+sum(bassMod$pdes)),as.integer(bassMod$vars.cat[uniq.models,,]+sum(bassMod$pdes)+sum(bassMod$pfunc))),dim=c(nmodels,maxBasis,maxInt.tot)) # all the variables/interactions used (with non-overlapping variable indices)
#n.un<-apply(n.un,1:2,sort) # this has list properties, n.un[1,] is a list (for the first model) with elements for basis function interactions - something wrong here when there are no NAs
x<-apply(n.un,3,function(x) x)
n.un<-lapply(1:nrow(x),function(i) sort(x[i,]))
n.un<-unique(c(n.un)) # unique combinations
n.un[sapply(n.un,length)==0]<-NULL # get rid of list elements with nothing in them
int.lower<-list() # will hold lower order combinations
for(ii in 1:length(n.un)){
pp<-length(n.un[[ii]])
if(pp==1){
int.lower<-c(int.lower,n.un[[ii]])
} else{
int.lower<-c(int.lower,do.call(c,sapply(1:pp,function(x) combn(n.un[[ii]],x,simplify=F)))) # get all the combinations, add them to the list
}
}
#browser()
int.lower<-lapply(int.lower,as.integer)
n.un<-unique(c(n.un,unique(int.lower))) # this now has all the lower order combinations
ord.intSize<-order(sapply(n.un,length))
n.un<-n.un[ord.intSize] # order by interaction size
int.begin.ind<-NA # a[i] is the index in n.un where ith order interactions begin
for(ii in 1:maxInt.tot)
int.begin.ind[ii]<-which(sapply(n.un,length)==ii)[1]
int.begin.ind[maxInt.tot+1]<-length(n.un)+1 # need the top level to help with indexing later
combs<-names.ind<-dispNames<-dispCombs<-mat<-list()
ints.used<-(1:maxInt.tot)[!is.na(int.begin.ind[-(maxInt.tot+1)])] # which interactions we actually use in the models
int.begin.ind<-na.omit(int.begin.ind) # will help index things
k<-0
for(ii in ints.used){ # go through used interactions
k<-k+1
if(!is.na(int.begin.ind[ii])){ # probably don't need this if anymore
mat<-do.call(rbind,n.un[int.begin.ind[k]:(int.begin.ind[k+1]-1)]) # get all the ineractions of order ii
mat<-mat[do.call(order, as.data.frame(mat)),,drop=F] # sort them (order works the way we want for ordering vectors)
combs[[ii]]<-t(mat) # the combinations of order ii
names.ind[[ii]]<-apply(combs[[ii]],2,paste,collapse='x') # labels for output
dispCombs[[ii]]<-matrix(labs[combs[[ii]]],nrow = nrow(combs[[ii]])) # takes into account categorical and functional labels
dd<-dim(dispCombs[[ii]])
dispCombs[[ii]]<-apply(dispCombs[[ii]],2,mixSort) # sorts numbers and letters the way we want
dim(dispCombs[[ii]])<-dd
dispNames[[ii]]<-apply(dispCombs[[ii]],2,paste,collapse='x') # same as names.ind, but better indexing for display (with functional, categorical variables)
#combs.list[[ii]]<-split(combs[[ii]],rep(1:ncol(combs[[ii]]),each=nrow(combs[[ii]]))) # list version of combs[[ii]]
}
}
num.ind<-sapply(combs,ncol) # num.ind[i] is number of interactions of order i
cs.num.ind<-cumsum(num.ind) # used for indexing
return(list(combs=combs,names.ind=names.ind,num.ind=num.ind,cs.num.ind=cs.num.ind,dispCombs=dispCombs,dispNames=dispNames))
}
## process model information into a temporary list
get_tl<-function(bassMod,mcmc.use.mod,M,mod,p,q,cs.num.ind,combs,func.var=NULL,xx.func.var=NULL){
a<-bassMod$beta[mcmc.use.mod,2:(M+1),drop=F] # basis coefficients excluding intercept
vf<-bassMod$vars.func[mod,1:M,]
pfunc<-sum(bassMod$pfunc)
if(!is.null(func.var)){
vf[vf==func.var]<-NA # if there is a functional variable specified, we make it NA so that we don't integrate over it
pfunc<-pfunc-1 # disregarding the func.var so we don't integrate over it
}
if(bassMod$des){
Kind<-cbind(bassMod$vars.des[mod,1:M,],vf+bassMod$pdes) # Kind[i,] is the variables used in the ith basis function, including relevant functional ones
} else if(bassMod$func){
Kind<-matrix(vf)
} else{
Kind<-NA
}
if(M==1){
Kind<-t(Kind)
}
p.df<-sum(bassMod$pdes)+sum(bassMod$pfunc)
t<-s<-matrix(0,nrow=M,ncol=sum(bassMod$pdes)+sum(bassMod$pfunc))
if(p.df>0){
for(k in 1:M){ # these matrices mimic the output of earth
if(bassMod$des){
n.int.des<-bassMod$n.int.des[mod,k]
knotInd.des<-bassMod$knotInd.des[mod,k,1:n.int.des]
vars.des<-bassMod$vars.des[mod,k,1:n.int.des]
t[k,vars.des]<-bassMod$xx.des[cbind(knotInd.des,vars.des)]
s[k,vars.des]<-bassMod$signs.des[mod,k,1:n.int.des]
}
if(pfunc>0){
if(bassMod$n.int.func[mod,k]>0){
n.int.func<-bassMod$n.int.func[mod,k]
knotInd.func<-bassMod$knotInd.func[mod,k,1:n.int.func]
vars.func<-bassMod$vars.func[mod,k,1:n.int.func]
t[k,vars.func+sum(bassMod$pdes)]<-bassMod$xx.func[cbind(knotInd.func,vars.func)]
s[k,vars.func+sum(bassMod$pdes)]<-bassMod$signs.func[mod,k,1:n.int.func]
}
}
}
}
if(!is.null(func.var)){
s[,bassMod$pdes+func.var]<-t[,bassMod$pdes+func.var]<-0
}
tl<-list(s=s,t=t,q=q,a=a,M=M,Kind=Kind,cs.num.ind=cs.num.ind,combs=combs,xx=xx.func.var,pfunc=sum(bassMod$pfunc),cat=bassMod$cat,pdes=sum(bassMod$pdes)) #temporary list
return(tl)
}
## process model information into a temporary list - categorical part
add_tlCat<-function(tl,bassMod,mcmc.use.mod,mod){
tl$pcat<-bassMod$pcat
tl$sub.cnt<-matrix(0,nrow=tl$M,ncol=tl$pcat)
tl$sub<-list()
for(mm in 1:tl$M){
vars<-na.omit(bassMod$vars.cat[mod,mm,])
tl$sub.cnt[mm,vars]<-bassMod$sub.size[mod,mm,bassMod$vars.cat[mod,mm,]%in%vars]/bassMod$nlevels[vars]
tl$sub[[mm]]<-list()
for(k in 1:tl$pcat){
tl$sub[[mm]][[k]]<-NA
if(k %in% vars)
tl$sub[[mm]][[k]]<-bassMod$sub.list[[mod]][[mm]][[which(vars==k)]]
}
}
p.df<-sum(bassMod$pdes)+sum(tl$pfunc)
if(p.df>0)
tl$Kind<-cbind(tl$Kind,bassMod$vars.cat[mod,1:tl$M,]+sum(bassMod$pdes)+sum(tl$pfunc))
if(p.df==0)
tl$Kind<-bassMod$vars.cat[mod,1:tl$M,]
if(is.null(nrow(tl$Kind)))
tl$Kind<-matrix(tl$Kind)
tl$nlevels<-bassMod$nlevels
return(tl)
}
## process model information into a temporary list - evaluate integrals (from Chen 2004, but vectorized as much as possible)
add_tl<-function(tl,p){
#browser()
p.df<-sum(tl$pdes)+sum(tl$pfunc)
p.use<-p
if(p.df==0){
C1.all.cat<-tl$sub.cnt
C1.all<-C1.all.cat
} else{
#C1.all<-C1(tl)#(1/(tl$q+1)*((tl$s+1)/2-tl$s*tl$t))*tl$s^2 # so I don't need C function anymore
C1.all<-C1All(tl) # C1.all[i,j] is C1 (from Chen, 2004) for variable j in basis function i
if(tl$cat){
C1.all.cat<-tl$sub.cnt
C1.all.cat[C1.all.cat==0]<-1
C1.all<-cbind(C1.all,C1.all.cat)
}
}
#C1.all.temp<-replace(C1.all,which(C1.all==0,arr.ind=T),1) # for products, make 0's into 1's, see Eq 50 of Chen 2004 to understand why (if we didn't the products wouldn't work)
#browser()
C1.all.temp<-C1.all
C1.all.prod<-apply(C1.all.temp,1,prod) # product over basis functions (from 1 to M)
tl$CC<-tcrossprod(C1.all.prod) # Eq 35, this is the product from 1:p (M in their notation) for all combinations of basis functions
C2.all<-C1.all.prod2<-both.ind<-array(0,dim=c(tl$M,tl$M,p.use))
for(ii in 1:tl$M){
for(jj in ii:tl$M){
bb<-intersect(na.omit(tl$Kind[ii,]),na.omit(tl$Kind[jj,])) # variables that basis functions ii and jj have in common
## test
C1.all.prod2[ii,jj,]<-C1.all.prod2[jj,ii,]<-C1.all.temp[ii,]*C1.all.temp[jj,]
##
#if(length(bb)>0){
#C1.all.prod2[ii,jj,]<-C1.all.prod2[jj,ii,]<-C1.all[ii,]*C1.all[jj,] # pairwise products of C1.all, without final product like tl$CC. Nothing is ever multiplied by 0 because of if statement above
#if(ii==11 & jj==11)
# browser()
#both.ind[ii,jj,bb]<-both.ind[jj,ii,bb]<-1
#bb.cat<-bb[bb>p.df]
#bb.des<-bb[bb<=p.df]
#browser()
temp<-rep(0,p.use)
#if(length(bb.des)>0){
if(p.df>0)
temp[1:p.df]<-apply(t(1:p.df),2,C2,m=ii,n=jj,tl=tl)
#}
#if(length(bb.cat)>0){
if(tl$cat){
temp[(p.df+1):p.use]<-apply(t(1:tl$pcat),2,C2Cat,m=ii,n=jj,tl=tl)
#if(temp[(p.df+1):p.use]==0)
#browser()
}
C2.all[ii,jj,]<-C2.all[jj,ii,]<-temp
#}
}
}
tl$C1.all.prod3<-C1.all.prod2
tl$C1.all.prod3[C1.all.prod2==0]<-1 # again, for products...otherwise you have to deal with divide by 0 by putting mat[tl$CC==0]<-0 inside Vu function
tl$C2.all2<-C2.all
#tl$C2.all2[!as.logical(both.ind)]<-1
#tl$C2.all2[tl$C2.all2==0]<-1
#browser()
#if(min(tl$C2.all2)<1e-13)
# browser()
return(tl)
}
## sorting function with mixed numerical and character
mixSort<-function(x){
ind<-is.na(suppressWarnings(as.numeric(x)))
num<-which(!ind)
char<-which(ind)
return(c(sort(as.numeric(x[num])),x[char]))
}
## ordering function with mixed numerical and character
mixOrd<-function(x){
ind<-is.na(suppressWarnings(as.numeric(x)))
ord<-1:length(x)
num<-which(!ind)
char<-which(ind)
return(c(ord[num][order(as.numeric(x[num]))],ord[char]))
}
########################################################################
## functions for Sobol decomposition - these all use scaling from const function
########################################################################
# C1<-function(tl){ # can have a different one for each variable
# (1/(tl$q+1)*((tl$s+1)/2-tl$s*tl$t))*tl$s^2
# }
# C1All<-function(tl)
# C1(tl)
C1All<-function(tl){
M<-tl$M
puse<-ncol(tl$s)
out<-matrix(nrow=M,ncol=puse)
for(m in 1:M){
for(p in 1:puse){
out[m,p]<-C1(p,m,tl)
}
}
out
}
################################################################################
# integral from a to b of (x-t)^q * prior(x) when q positive integer
intabq1 <- function (prior,a,b,t,q) {
UseMethod("intabq1", prior)
}
intabq1.uniform<-function(prior,a,b,t,q){
1/(q+1)*((b-t)^(q+1)-(a-t)^(q+1)) * 1/(prior$trunc[2]-prior$trunc[1])
#int<-integrate(function(x) (x-t)*dunif(x,prior$trunc[1],prior$trunc[2]),lower=a,upper=b)$value
}
intabq1.normal<-function(prior,a,b,t,q){
if(q!=1)
stop('degree other than 1 not supported for normal priors')
out<-0
for(k in 1:length(prior$weights)){
zk<-pnorm(b,prior$mean[k],prior$sd[k]) - pnorm(a,prior$mean[k],prior$sd[k])
#tnorm.mean.zk<-prior$mean[k]*zk - prior$sd[k]*(dnorm(b,prior$mean[k],prior$sd[k]) - dnorm(a,prior$mean[k],prior$sd[k]))
ast<-(a-prior$mean[k])/prior$sd[k]
bst<-(b-prior$mean[k])/prior$sd[k]
dnb<-dnorm(bst)
dna<-dnorm(ast)
tnorm.mean.zk<-prior$mean[k]*zk - prior$sd[k]*(dnb - dna)
out<-out+prior$weights[k]*(tnorm.mean.zk - t*zk)
# in parens should match integrate(function(x){(x-t)*dnorm(x,prior$mean[k],prior$sd[k])},lower=a,upper=b)
}
out
}
intabq1.student<-function(prior,a,b,t,q){
if(q!=1)
stop('degree other than 1 not supported for student priors')
out<-0
for(k in 1:length(prior$weights)){
# zk<-pnorm(b,prior$mean[k],prior$sd[k]) - pnorm(a,prior$mean[k],prior$sd[k])
# ast<-(a-prior$mean[k])/prior$sd[k]
# bst<-(b-prior$mean[k])/prior$sd[k]
# dnb<-dnorm(bst)
# dna<-dnorm(ast)
# tnorm.mean.zk<-prior$mean[k]*zk - prior$sd[k]*(dnb - dna)
# out<-out+prior$weights[k]*(tnorm.mean.zk - t*zk)
# in parens should match integrate(function(x){(x-t)*dnorm(x,prior$mean[k],prior$sd[k])},lower=a,upper=b)
#int<-prior$mean[k]+prior$sd[k]*integrate(function(x) (x-t)*dt(x,prior$df[k]),lower=(a-prior$mean[k])/prior$sd[k],upper=(b-prior$mean[k])/prior$sd[k])$value
zk<-pt((b-prior$mean[k])/prior$sd[k],prior$df[k]) - pt((a-prior$mean[k])/prior$sd[k],prior$df[k])
# ast<-(a-prior$mean[k])/prior$sd[k]
# bst<-(b-prior$mean[k])/prior$sd[k]
# dnb<-dt(bst)/prior$sd[k]
# dna<-dt(ast)/prior$sd[k]
# tnorm.mean.zk<-prior$mean[k]*zk - prior$sd[k]*(dnb - dna)
# out<-out+prior$weights[k]*(tnorm.mean.zk - t*zk)
#int<-integrate(function(x) (x-t)*dt((x-prior$mean[k])/prior$sd[k],prior$df[k])/prior$sd[k],lower=a,upper=b)$value
#browser()
int<-intx1Student(b,prior$mean[k],prior$sd[k],prior$df[k],t) - intx1Student(a,prior$mean[k],prior$sd[k],prior$df[k],t)
#integrate(function(x) (x-t)*dt.scaled(x,prior$df[k],prior$mean[k],prior$sd[k]),lower=a,upper=b)
out<-out+prior$weights[k]*int ## TODO: handle truncation, mixture...~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~v
}
#browser()
out
}
.S3method("intabq1", "uniform")
.S3method("intabq1", "normal")
.S3method("intabq1", "student")
intx1Student<-function(x,m,s,v,t){
#browser()
# a<-1/2
# b<-(1 + v)/2
# c<-3/2
# xx<- -(m - x)^2/(s^2 *v)
# f21<-gsl::hyperg_2F1(a,c-b,c,1-1/(1-xx))/(1-xx)^a # instead of hyperg_2F1(a,b,c,x)
# if(is.nan(f21))
# f21<-gsl::hyperg_2F1(a,b,c,xx)
temp<-(s^2*v)/(m^2 + s^2*v - 2*m*x + x^2)
-((v/(v + (m - x)^2/s^2))^(v/2) *
sqrt(temp) *
sqrt(1/temp) *
(s^2*v* (sqrt(1/temp) -
(1/temp)^(v/2)) +
(t-m)*(-1 + v)*(-m + x) *
(1/temp)^(v/2) *
robust2f1(1/2,(1 + v)/2,3/2,-(m - x)^2/(s^2 *v)) )) /
(s *(-1 + v)* sqrt(v) *beta(v/2, 1/2))
}
C1<-function(k,m,tl,tq=F){ #k is variable, m is basis function # deals with sign & truncation
t<-tl$t[m,k]
s<-tl$s[m,k]
#browser()
if(s==0)
return(1)
q<-tl$q
cc<-const(signs=s,knots=t,degree=q)
#int<-integrate(function(x) pos(s*(x-t))*dunif(x,tl$prior[[k]]$trunc[1],tl$prior[[k]]$trunc[2]),lower=0,upper=1)$value
#browser()
#return(int/cc)
if(tq){
q<-2*q
cc<-cc^2
}
#browser()
if(s==1){
a<-max(tl$prior[[k]]$trunc[1],t)
b<-tl$prior[[k]]$trunc[2]
if(b<t)
return(0)
out<-intabq1(tl$prior[[k]],a,b,t,q)/cc
#return(intabq1(tl$prior[[k]],a,b,t,q)/cc)
} else{
a<-tl$prior[[k]]$trunc[1]
b<-min(tl$prior[[k]]$trunc[2],t)
if(t<a)
return(0)
out<-intabq1(tl$prior[[k]],a,b,t,q)*(-1)^q/cc
#return(intabq1(tl$prior[[k]],a,b,t,q)*(-1)^q/cc)
}
if(out<0)
browser()
return(out)
}
## refer to francom 2016 paper
pCoef<-function(i,q){
factorial(q)^2*(-1)^i/(factorial(q-i)*factorial(q+1+i))
}
################################################################################
## integral from a to b of [(x-t1)(x-t2)]^q * prior(x) when q positive integer
intabq2 <- function (prior,a,b,t1,t2,q) {
UseMethod("intabq2", prior)
}
intabq2.uniform<-function(prior,a,b,t1,t2,q){
(sum(pCoef(0:q,q)*(b-t1)^(q-0:q)*(b-t2)^(q+1+0:q)) - sum(pCoef(0:q,q)*(a-t1)^(q-0:q)*(a-t2)^(q+1+0:q))) * 1/(prior$trunc[2]-prior$trunc[1])
#integrate(function(x) (x-t1)*(x-t2)*dunif(x,prior$trunc[1],prior$trunc[2]),lower=a,upper=b)$value
}
intabq2.normal<-function(prior,a,b,t1,t2,q){
if(q>1)
stop('spline degree >1 not supported yet')
out<-0
for(k in 1:length(prior$weights)){
zk<-pnorm(b,prior$mean[k],prior$sd[k]) - pnorm(a,prior$mean[k],prior$sd[k])
if(zk<.Machine$double.eps)
next
ast<-(a-prior$mean[k])/prior$sd[k]
bst<-(b-prior$mean[k])/prior$sd[k]
dnb<-dnorm(bst)
dna<-dnorm(ast)
tnorm.mean.zk<-prior$mean[k]*zk - prior$sd[k]*(dnb - dna)
tnorm.var.zk<-zk*prior$sd[k]^2*(1 + (ast*dna-bst*dnb)/zk - ((dna-dnb)/zk)^2) + tnorm.mean.zk^2/zk # variance + expectation^2
out<-out+prior$weights[k]*(tnorm.var.zk - (t1+t2)*tnorm.mean.zk + t1*t2*zk)
# in parens should be integrate(function(x){(x-t1)*(x-t2)*dnorm(x,prior$mean[k],prior$sd[k])},lower=a,upper=b)
if(out<0 & abs(out)<1e-12)
out<-0
}
#browser()
out
}
intabq2.student<-function(prior,a,b,t1,t2,q){
out<-0
for(k in 1:length(prior$weights)){
#int<-prior$mean[k]+prior$sd[k]*integrate(function(x) (x-t1)*(x-t2)*dt(x,prior$df[k]),lower=(a-prior$mean[k])/prior$sd[k],upper=(b-prior$mean[k])/prior$sd[k])$value
# browser()
#int<-integrate(function(x) (x-t1)*(x-t2)*dt((x-prior$mean[k])/prior$sd[k],prior$df[k])/prior$sd[k],lower=a,upper=b)$value
int<-intx2Student(b,prior$mean[k],prior$sd[k],prior$df[k],t1,t2) - intx2Student(a,prior$mean[k],prior$sd[k],prior$df[k],t1,t2)
out<-out+prior$weights[k]*int
}
out
}
.S3method("intabq2", "uniform")
.S3method("intabq2", "normal")
.S3method("intabq2", "student")
# robust2f1<-function(a,b,c,x){
# if(abs(x)<1)
# return(gsl::hyperg_2F1(a,b,c,x))
# return(gsl::hyperg_2F1(a,c-b,c,1-1/(1-x))/(1-x)^a)
# }
robust2f1<-function(a,b,c,x){
if(abs(x)<1)
return(hypergeo::f15.3.8(a,b,c,x))
return(hypergeo::f15.3.8(a,c-b,c,1-1/(1-x))/(1-x)^a)
}
intx2Student<-function(x,m,s,v,t1,t2){
#x=b;m=prior$mean[k];s=prior$sd[k];v=prior$df[k]
temp<-(s^2*v)/(m^2 + s^2*v - 2*m*x + x^2)
((v/(v + (m - x)^2/s^2))^(v/2) *
sqrt(temp) *
sqrt(1/temp) *
(-3*(-t1-t2+2*m)*s^2*v* (sqrt(1/temp) -
(1/temp)^(v/2)) +
3*(-t1+m)*(-t2+m)*(-1 + v)*(-m + x) *
(1/temp)^(v/2) *
robust2f1(1/2,(1 + v)/2,3/2,-(m - x)^2/(s^2 *v)) +
(-1+v)*(-m+x)^3*(1/temp)^(v/2) *
robust2f1(3/2,(1 + v)/2,5/2,-(m - x)^2/(s^2 *v)) )) /
(3*s *(-1 + v)* sqrt(v) *beta(v/2, 1/2))
}
## integral of two pieces of tensor that have same variable - deals with sign, truncation
C2<-function(k,m,n,tl){ # k is variable, n & m are basis indices
#browser()
q<-tl$q
t1<-tl$t[n,k]
s1<-tl$s[n,k]
t2<-tl$t[m,k]
s2<-tl$s[m,k]
cc<-const(signs=c(s1,s2),knots=c(t1,t2),degree=q)
# if((s1*s2)==0){
# return(0)
# }
## test
if(s1==0 & s2==0){
#browser()
return(1)
}
if(s1==0 & s2!=0){
return(C1(k,m,tl))
}
if(s1!=0 & s2==0){
return(C1(k,n,tl))
}
##
if(t2<t1){
t1<-tl$t[m,k]
s1<-tl$s[m,k]
t2<-tl$t[n,k]
s2<-tl$s[n,k]
}
#cc1<-const(signs=s1,knots=t1,degree=q)
#cc2<-const(signs=s2,knots=t2,degree=q)
#int<-integrate(function(x) pos(s1*(x-t1))/cc1*pos(s2*(x-t2))/cc2*dunif(x,tl$prior[[k]]$trunc[1],tl$prior[[k]]$trunc[2]),lower=0,upper=1,stop.on.error = F)$value
#browser()
#return(int)
#if(m==n){ #t1=t2, s1=s2 # didn't need this part
#return(1/(2*q+1)*((s1+1)/2-s1*t1)^(2*q+1)/cc)
# browser()
# return(C1(k,m,tl,tq=T)) # this is the same as if you let it run below, maybe faster?
#} #else{
if(s1==1){
if(s2==1){
a<-max(t2,tl$prior[[k]]$trunc[1])
b<-tl$prior[[k]]$trunc[2]
if(a>=b)
return(0)
out<-intabq2(tl$prior[[k]],a,b,t1,t2,q)/cc
#return(intabq2(tl$prior[[k]],a,b,t1,t2,q)/cc)
} else{
a<-max(t1,tl$prior[[k]]$trunc[1])
b<-min(t2,tl$prior[[k]]$trunc[2])
if(a>=b)
return(0)
out<-intabq2(tl$prior[[k]],a,b,t1,t2,q)*(-1)^q/cc
#return(intabq2(tl$prior[[k]],a,b,t1,t2,q)*(-1)^q/cc)
}
} else{
if(s2==1){
return(0)
} else{
a<-tl$prior[[k]]$trunc[1]
b<-min(t1,tl$prior[[k]]$trunc[2])
if(a>=b)
return(0)
out<-intabq2(tl$prior[[k]],a,b,t1,t2,q)/cc
#return(intabq2(tl$prior[[k]],a,b,t1,t2,q)/cc)
}
}
#}
if(abs(out)<.Machine$double.eps)
out<-0
if(out<0)
browser()
return(out)
}
## same as C2, but categorical
C2Cat<-function(k,m,n,tl){ # k is variable (categorical), m & n are basis functions
if(tl$sub.cnt[n,k]==0 & tl$sub.cnt[m,k]==0){
#browser()
return(1)
}
if(tl$sub.cnt[n,k]==0){
#browser()
return(tl$sub.cnt[m,k])
}
if(tl$sub.cnt[m,k]==0)
return(tl$sub.cnt[n,k])
# return(length(na.omit(intersect(tl$sub[[m]][[k]],tl$sub[[n]][[k]])))/tl$nlevels[k])
out<-length((intersect(tl$sub[[m]][[k]],tl$sub[[n]][[k]])))/tl$nlevels[k]
#print(out)
#if(out==0)
# browser()
return(out)
}
## matrix used in sobol main effect variances - where most of the time is spent
VuMat<-function(u,tl){
#browser()
CCu<-apply(tl$C1.all.prod3[,,u,drop=F],1:2,prod) # product over u's TODO: utilize symmetry
C2.temp<-apply(tl$C2.all2[,,u,drop=F],1:2,prod)
mat<-tl$CC*(C2.temp/CCu-1) # Eq 35 of Chen 2004
#mat[tl$CC==0]<-0
return(mat)
}
## sobol main effect variances
Vu<-function(u,tl){
mat<-VuMat(u,tl)
out<-apply(tl$a,1,function(x) t(x)%*%mat%*%x) # Eq 35 of Chen 2004. Should mat have non-negative eigenvalues?
if(any(out<0))
browser()
return(out)
}
## functional sobol main effect variances
Vu_des_func<-function(u,tl){
mat<-VuMat(u,tl)
nx<-length(tl$xx)
nmodels<-length(tl$a[,1])
out<-matrix(nrow=nmodels,ncol=nx)
for(i in 1:nmodels){
for(j in 1:nx){
tt<-tl$a[i,]*tl$tfunc.basis[,j]
out[i,j]<-t(tt)%*%mat%*%tt # Eq 35 of Chen 2004, not integrating over one of the variables
}
}
if(any(out<0))
browser()
return(out)
}
## sobol interaction variances
VuInt<-function(u,tl){
add<-0
len<-length(u)
for(l in 1:len){
ind<-((sum(tl$cs.num.ind[l-1])+1):tl$cs.num.ind[l])[apply(tl$combs[[l]],2,function(x) all(x%in%u))] # this gets index for which combs are subsets of u, sum(cs.num.ind[l-1]) makes it 0 when it should be
add<-add+(-1)^(len-l)*rowSums(tl$integrals[,ind,drop=F])
}
add[abs(add)<1.5e-13]<-0
if(any(add<0))
browser()
return(add)
}
## sobol interaction variances - functional
VuInt_des_func<-function(u,tl){
add<-0
len<-length(u)
#browser()
for(l in 1:len){
ind<-((sum(tl$cs.num.ind[l-1])+1):tl$cs.num.ind[l])[apply(tl$combs[[l]],2,function(x) all(x%in%u))] # this gets index for which combs are subsets of u, sum() makes it 0 when it should be
add<-add+(-1)^(len-l)*apply(tl$integrals[,ind,,drop=F],c(1,3),sum)
}
add[abs(add)<1e-13]<-0
if(any(add<0))
browser()
return(add)
}
| /scratch/gouwar.j/cran-all/cranData/BASS/R/sobol_normTmix.R |
\dontrun{
####################################################################################################
### univariate example
####################################################################################################
## simulate data (Friedman function)
f<-function(x){
10*sin(pi*x[,1]*x[,2])+20*(x[,3]-.5)^2+10*x[,4]+5*x[,5]
}
sigma<-1 # noise sd
n<-500 # number of observations
x<-matrix(runif(n*10),n,10) #10 variables, only first 5 matter
y<-rnorm(n,f(x),sigma)
## fit BASS, no tempering
mod<-bass(x,y)
plot(mod)
## fit BASS, tempering
mod<-bass(x,y,temp.ladder=1.3^(0:8),start.temper=1000)
plot(mod)
## prediction
npred<-1000
xpred<-matrix(runif(npred*10),npred,10)
pred<-predict(mod,xpred,verbose=TRUE) # posterior predictive samples
true.y<-f(xpred)
plot(true.y,colMeans(pred),xlab='true values',ylab='posterior predictive means')
abline(a=0,b=1,col=2)
## sensitivity
sens<-sobol(mod)
plot(sens,cex.axis=.5)
####################################################################################################
### functional example
####################################################################################################
## simulate data (Friedman function with first variable as functional)
sigma<-1 # noise sd
n<-500 # number of observations
nfunc<-50 # size of functional variable grid
xfunc<-seq(0,1,length.out=nfunc) # functional grid
x<-matrix(runif(n*9),n,9) # 9 non-functional variables, only first 4 matter
X<-cbind(rep(xfunc,each=n),kronecker(rep(1,nfunc),x)) # to get y
y<-matrix(f(X),nrow=n)+rnorm(n*nfunc,0,sigma)
## fit BASS
mod<-bass(x,y,xx.func=xfunc)
plot(mod)
## prediction
npred<-100
xpred<-matrix(runif(npred*9),npred,9)
Xpred<-cbind(rep(xfunc,each=npred),kronecker(rep(1,nfunc),xpred))
ypred<-matrix(f(Xpred),nrow=npred)
pred<-predict(mod,xpred) # posterior predictive samples (each is a curve)
matplot(ypred,apply(pred,2:3,mean),type='l',xlab='observed',ylab='mean prediction')
abline(a=0,b=1,col=2)
matplot(t(ypred),type='l') # actual
matplot(t(apply(pred,2:3,mean)),type='l') # mean prediction
## sensitivity
sens<-sobol(mod,mcmc.use=1:10) # for speed, only use a few samples
plot(sens) # functional variable labelled "a"
sens.func<-sobol(mod,mcmc.use=1:10,func.var=1)
plot(sens.func)
}
## minimal example for CRAN testing
mod<-bass(1:2,1:2,nmcmc=2,nburn=1) | /scratch/gouwar.j/cran-all/cranData/BASS/inst/examples.R |
\dontrun{
## simulate data (Friedman function)
f<-function(x){
10*sin(pi*x[,1]*x[,2])+20*(x[,3]-.5)^2+10*x[,4]+5*x[,5]
}
## simulate data (Friedman function with first variable as functional)
sigma<-.1 # noise sd
n<-500 # number of observations
nfunc<-50 # size of functional variable grid
xfunc<-seq(0,1,length.out=nfunc) # functional grid
x<-matrix(runif(n*9),n,9) # 9 non-functional variables, only first 4 matter
X<-cbind(rep(xfunc,each=n),kronecker(rep(1,nfunc),x)) # to get y
y<-matrix(f(X),nrow=n)+rnorm(n*nfunc,0,sigma)
## fit BASS
library(parallel)
mod<-bassPCA(x,y,n.pc=5,n.cores=min(5,parallel::detectCores()))
plot(mod$mod.list[[1]])
plot(mod$mod.list[[2]])
plot(mod$mod.list[[3]])
plot(mod$mod.list[[4]])
plot(mod$mod.list[[5]])
hist(mod$dat$trunc.error)
## prediction
npred<-100
xpred<-matrix(runif(npred*9),npred,9)
Xpred<-cbind(rep(xfunc,each=npred),kronecker(rep(1,nfunc),xpred))
ypred<-matrix(f(Xpred),nrow=npred)
pred<-predict(mod,xpred,mcmc.use=1:1000) # posterior predictive samples (each is a curve)
matplot(ypred,apply(pred,2:3,mean),type='l',xlab='observed',ylab='mean prediction')
abline(a=0,b=1,col=2)
matplot(t(ypred),type='l') # actual
matplot(t(apply(pred,2:3,mean)),type='l') # mean prediction
## sensitivity
sens<-sobolBasis(mod,int.order = 2,ncores = max(parallel::detectCores()-2,1),
mcmc.use=1000) # for speed, only use a few samples
plot(sens)
## calibration
x.true<-runif(9,0,1) # what we are trying to learn
yobs<-f(cbind(xfunc,kronecker(rep(1,nfunc),t(x.true)))) +
rnorm(nfunc,0,.1) # calibration data (with measurement error)
plot(yobs)
cal<-calibrate.bassBasis(y=yobs,mod=mod,
discrep.mean=rep(0,nfunc),
discrep.mat=diag(nfunc)[,1:2]*.0000001,
sd.est=.1,
s2.df=50,
s2.ind=rep(1,nfunc),
meas.error.cor=diag(nfunc),
bounds=rbind(rep(0,9),rep(1,9)),
nmcmc=10000,
temperature.ladder=1.05^(0:30),type=1)
nburn<-5000
uu<-seq(nburn,10000,5)
pairs(rbind(cal$theta[uu,1,],x.true),col=c(rep(1,length(uu)),2),ylim=c(0,1),xlim=c(0,1))
pred<-apply(predict(mod,cal$theta[uu,1,],nugget = T,trunc.error = T,
mcmc.use = cal$ii[uu]),3,function(x) diag(x)+rnorm(length(uu),0,sqrt(cal$s2[uu,1,1])))
qq<-apply(pred,2,quantile,probs=c(.025,.975))
matplot(t(qq),col='lightgrey',type='l')
lines(yobs,lwd=3)
}
## minimal example for CRAN testing
mod<-bassPCA(1:10,matrix(1:20,10),n.pc=2,nmcmc=2,nburn=1)
| /scratch/gouwar.j/cran-all/cranData/BASS/inst/examplesPCA.R |
###############################################################
## f1: McKay 1997 function ("Nonparametric variance-based methods of assessing uncertainty importance"), legendre polynomials - continuous and categorical variables
###############################################################
library(BASS)
f<-function(x,t){
#t<-t*4+1
(t==1)*x + (t==2)*.5*(3*x^2-1) + (t==3)*.5*(5*x^3-3*x) + (t==4)*1/8*(35*x^4-30*x^2+3) + (t==5)*1/8*(63*x^5-70*x^3+15*x)
}
n<-500
x<-cbind(runif(n,-1,1),sample(1:5,size=n,replace=T),sample(1:5,size=n,replace=T),runif(n))
xf<-as.data.frame(x)
xf[,2]<-as.factor(xf[,2])
xf[,3]<-as.factor(xf[,3])
y<-f(x[,1],x[,2])
plot(x[,1],y)
bm<-bass(xf,y)
plot(bm)
var(predict(bm,xf,mcmc.use=1))
ss<-BASS::sobol(bm)
mean(ss$var.tot) # answer=3034/17325
plot(ss)
mean(ss$S$'1') # answer=.2
mean(ss$S$'1x2') # answer=.8
ntest<-100
xtest<-cbind(runif(ntest,-1,1),sample(1:5,size=ntest,replace=T),sample(1:5,size=ntest,replace=T),runif(ntest))
xftest<-as.data.frame(xtest)
xftest[,2]<-as.factor(xftest[,2])
xftest[,3]<-as.factor(xftest[,3])
plot(colMeans(predict(bm,xftest)),f(xtest[,1],xtest[,2])); abline(a=0,b=1,col=2)
# save small model
bm<-bass(xf,y,save.yhat = F,nmcmc=10000,nburn=9998,small=T)
ss<-BASS::sobol(bm)
pred<-predict(bm,xftest)
saveRDS(bm,'tests/f1_mod.rda')
saveRDS(ss,'tests/f1_sob.rda')
saveRDS(xftest,'tests/f1_testX.rda')
saveRDS(pred,'tests/f1_testPred.rda')
| /scratch/gouwar.j/cran-all/cranData/BASS/inst/testf1.R |
###############################################################
## f2: modified Friedman (1991) function (Francom et al., 2018), continuous variables
###############################################################
library(BASS)
f<-function(x){
10*sin(2*pi*x[,1]*x[,2])+20*(x[,3]-.5)^2+10*x[,4]+5*x[,5]
}
sigma<-1 # noise sd
n<-500 # number of observations
x<-matrix(runif(n*10),n,10) #10 variables, only first 5 matter
y<-rnorm(n,f(x),sigma)
## fit BASS, no tempering
mod<-bass(x,y)
plot(mod)
## prediction
npred<-100
xpred<-matrix(runif(npred*10),npred,10)
pred<-predict(mod,xpred,verbose=TRUE) # posterior predictive samples
true.y<-f(xpred)
plot(true.y,colMeans(pred),xlab='true values',ylab='posterior predictive means')
abline(a=0,b=1,col=2)
## sensitivity
sens<-sobol(mod)
plot(sens,cex.axis=.5)
# true values
Si<-function(x,j=1:50){sum((-1)^(j-1)*(x^(2*j-1))/((2*j-1)*factorial(2*j-1)))}
Si2pi<-Si(2*pi);Si4pi<-Si(4*pi);a1<--5/pi*sum( (-4*pi^2)^(1:50)/((2*(1:50))*factorial(2*(1:50))) )
a2<-5/3;a3<-5;a4<-5/2;a5<-a2+a3+a4;a6<-a1
f0<-a1+a2+a3+a4
D0<-50-25*Si4pi/(2*pi)+5+125/3+2*(a1*a2+a1*a3+a1*a4+a2*a3+a2*a4+a3*a4) - f0^2
D1<-50/pi*(2*Si2pi-Si4pi)-a1^2
D2<-D1
D3<-5-a2^2
D4<-100/3-a3^2
D5<-25/3-a4^2
D12<-50-25*Si4pi/(2*pi)-100/pi*(2*Si2pi-Si4pi) + a1^2
c(D1,D2,D3,D4,D5,D12)/D0
# save small model
bm<-bass(x,y,save.yhat = F,nmcmc=10000,nburn=9998,small=T)
ss<-BASS::sobol(bm)
pred<-predict(bm,xpred)
saveRDS(bm,'tests/f2_mod.rda')
saveRDS(ss,'tests/f2_sob.rda')
saveRDS(xpred,'tests/f2_testX.rda')
saveRDS(pred,'tests/f2_testPred.rda')
| /scratch/gouwar.j/cran-all/cranData/BASS/inst/testf2.R |
###############################################################
## f3: modified Friedman (1991) function (Francom et al., 2018), continuous variables with functional variable
###############################################################
library(BASS)
f<-function(x){
10*sin(2*pi*x[,1]*x[,2])+20*(x[,3]-.5)^2+10*x[,4]+5*x[,5]
}
sigma<-1 # noise sd
n<-500 # number of observations
nfunc<-50 # size of functional variable grid
xfunc<-seq(0,1,length.out=nfunc) # functional grid
x<-matrix(runif(n*9),n,9) # 9 non-functional variables, only first 4 matter
X<-cbind(rep(xfunc,each=n),kronecker(rep(1,nfunc),x)) # to get y
y<-matrix(f(X),nrow=n)+rnorm(n*nfunc,0,sigma)
## fit BASS
mod<-bass(x,y,xx.func=xfunc)
plot(mod)
## prediction
npred<-100
xpred<-matrix(runif(npred*9),npred,9)
Xpred<-cbind(rep(xfunc,each=npred),kronecker(rep(1,nfunc),xpred))
ypred<-matrix(f(Xpred),nrow=npred)
pred<-predict(mod,xpred) # posterior predictive samples (each is a curve)
matplot(ypred,apply(pred,2:3,mean),type='l',xlab='observed',ylab='mean prediction')
abline(a=0,b=1,col=2)
matplot(t(ypred),type='l') # actual
matplot(t(apply(pred,2:3,mean)),type='l') # mean prediction
## sensitivity
sens<-sobol(mod,mcmc.use=1:10) # for speed, only use a few samples
plot(sens) # functional variable labelled "a"
sens.func<-sobol(mod,mcmc.use=1:10,func.var=1)
plot(sens.func)
# true values
Si<-function(x,j=1:50){sum((-1)^(j-1)*(x^(2*j-1))/((2*j-1)*factorial(2*j-1)))}
Si2pi<-Si(2*pi);Si4pi<-Si(4*pi);a1<--5/pi*sum( (-4*pi^2)^(1:50)/((2*(1:50))*factorial(2*(1:50))) )
a2<-5/3;a3<-5;a4<-5/2;a5<-a2+a3+a4;a6<-a1
f0<-a1+a2+a3+a4
D0<-50-25*Si4pi/(2*pi)+5+125/3+2*(a1*a2+a1*a3+a1*a4+a2*a3+a2*a4+a3*a4) - f0^2
D1<-50/pi*(2*Si2pi-Si4pi)-a1^2
D2<-D1
D3<-5-a2^2
D4<-100/3-a3^2
D5<-25/3-a4^2
D12<-50-25*Si4pi/(2*pi)-100/pi*(2*Si2pi-Si4pi) + a1^2
c(D1,D2,D3,D4,D5,D12)/D0
xx<-xfunc
f0.func<-10*sin(pi*xx)^2/(pi*xx)+a2+a3+a4
D0.func<-50-25*sin(4*pi*xx)/(2*pi*xx)+5+125/3+2*(a2+a3+a4)*10*sin(pi*xx)^2/(pi*xx)+2*a2*a3+2*a2*a4+2*a3*a4-f0.func^2
D2.func<-50-25*sin(4*pi*xx)/(2*pi*xx)-100*sin(pi*xx)^4/(pi^2*xx^2)
D3.func<-rep(D3,nfunc)
D4.func<-rep(D4,nfunc)
D5.func<-rep(D5,nfunc)
Dmat<-cbind(D2.func/D0.func,D3.func/D0.func,D4.func/D0.func,D5.func/D0.func)
S.func<-t(apply(Dmat,1,cumsum))
matplot(S.func,type='l')
# save small model
bm<-bass(x,y,xx.func = xfunc,save.yhat = F,nmcmc=10000,nburn=9998,small=T)
ss<-BASS::sobol(bm)
ss.func<-BASS::sobol(bm,func.var = 1)
pred<-predict(bm,xpred)
saveRDS(bm,'tests/f3_mod.rda')
saveRDS(ss,'tests/f3_sob.rda')
saveRDS(ss.func,'tests/f3_sobFunc.rda')
saveRDS(xpred,'tests/f3_testX.rda')
saveRDS(pred,'tests/f3_testPred.rda')
| /scratch/gouwar.j/cran-all/cranData/BASS/inst/testf3.R |
###############################################################
## f4: modified Friedman (1991) function (Francom et al., 2018), continuous variables with functional variable - PCA approach
###############################################################
library(BASS)
f<-function(x){
10*sin(2*pi*x[,1]*x[,2])+20*(x[,3]-.5)^2+10*x[,4]+5*x[,5]
}
sigma<-1 # noise sd
n<-500 # number of observations
nfunc<-50 # size of functional variable grid
xfunc<-seq(0,1,length.out=nfunc) # functional grid
x<-matrix(runif(n*9),n,9) # 9 non-functional variables, only first 4 matter
X<-cbind(rep(xfunc,each=n),kronecker(rep(1,nfunc),x)) # to get y
y<-matrix(f(X),nrow=n)+rnorm(n*nfunc,0,sigma)
## fit BASS
mod<-bassPCA(x,y,n.pc=5,n.cores=min(5,parallel::detectCores()))
plot(mod)
## prediction
npred<-100
xpred<-matrix(runif(npred*9),npred,9)
Xpred<-cbind(rep(xfunc,each=npred),kronecker(rep(1,nfunc),xpred))
ypred<-matrix(f(Xpred),nrow=npred)
pred<-predict(mod,xpred) # posterior predictive samples (each is a curve)
matplot(ypred,apply(pred,2:3,mean),type='l',xlab='observed',ylab='mean prediction')
abline(a=0,b=1,col=2)
matplot(t(ypred),type='l') # actual
matplot(t(apply(pred,2:3,mean)),type='l') # mean prediction
## sensitivity
sens<-sobolBasis(mod,int.order = 2,n.cores = max(parallel::detectCores()-2,1),mcmc.use=1000)
plot(sens) # functional variable labelled "a"
# true values
Si<-function(x,j=1:50){sum((-1)^(j-1)*(x^(2*j-1))/((2*j-1)*factorial(2*j-1)))}
Si2pi<-Si(2*pi);Si4pi<-Si(4*pi);a1<--5/pi*sum( (-4*pi^2)^(1:50)/((2*(1:50))*factorial(2*(1:50))) )
a2<-5/3;a3<-5;a4<-5/2;a5<-a2+a3+a4;a6<-a1
f0<-a1+a2+a3+a4
D0<-50-25*Si4pi/(2*pi)+5+125/3+2*(a1*a2+a1*a3+a1*a4+a2*a3+a2*a4+a3*a4) - f0^2
D1<-50/pi*(2*Si2pi-Si4pi)-a1^2
D2<-D1
D3<-5-a2^2
D4<-100/3-a3^2
D5<-25/3-a4^2
D12<-50-25*Si4pi/(2*pi)-100/pi*(2*Si2pi-Si4pi) + a1^2
c(D1,D2,D3,D4,D5,D12)/D0
xx<-xfunc
f0.func<-10*sin(pi*xx)^2/(pi*xx)+a2+a3+a4
D0.func<-50-25*sin(4*pi*xx)/(2*pi*xx)+5+125/3+2*(a2+a3+a4)*10*sin(pi*xx)^2/(pi*xx)+2*a2*a3+2*a2*a4+2*a3*a4-f0.func^2
D2.func<-50-25*sin(4*pi*xx)/(2*pi*xx)-100*sin(pi*xx)^4/(pi^2*xx^2)
D3.func<-rep(D3,nfunc)
D4.func<-rep(D4,nfunc)
D5.func<-rep(D5,nfunc)
Dmat<-cbind(D2.func/D0.func,D3.func/D0.func,D4.func/D0.func,D5.func/D0.func)
S.func<-t(apply(Dmat,1,cumsum))
matplot(S.func,type='l')
# save small model
bm<-bassPCA(x,y,save.yhat = F,nmcmc=10000,nburn=9998,n.pc=5,n.cores=min(5,parallel::detectCores()),small=T)
ss<-sobolBasis(bm,int.order = 2,mcmc.use = 1)
pred<-predict(bm,xpred,trunc.error = F,nugget=F)
saveRDS(bm,'tests/f4_mod.rda')
saveRDS(ss,'tests/f4_sob.rda')
saveRDS(xpred,'tests/f4_testX.rda')
saveRDS(pred,'tests/f4_testPred.rda')
| /scratch/gouwar.j/cran-all/cranData/BASS/inst/testf4.R |
###############################################################
## f5: Ishigami function, continuous variables (https://www.sfu.ca/~ssurjano/ishigami.html)
###############################################################
library(BASS)
f<-function(x,a=7,b=.1)
sin(x[,1])*(1+b*x[,3]^4)+a*sin(x[,2])^2
b=.1
a=7
V1=(5+b*pi^4)^2/50
V2=a^2/8
V3=0
V13=8*b^2*pi^8/225
Vt=V1+V2+V3+V13
p1<-c(V1,V2,V3,V13)/Vt
V1=(1+3*b)^2*(exp(2)-1)/exp(2)/2
V2=a^2*(exp(4)-1)^2/exp(8)/8
V3=0
V13=48*b^2*(exp(2)-1)/exp(2)
Vt=V1+V2+V3+V13
p2<-c(V1,V2,V3,V13)/Vt
V1=(pi^2-8)*(5+b*pi^4)^2/(50*pi^2)
V2=a^2/8
V3=64*b^2*pi^6/225
V13=8*b^2*pi^6*(pi^2-8)/225
Vt=V1+V2+V3+V13
p3<-c(V1,V2,V3,V13)/Vt
n<-1000
p<-3
library(lhs)
xst<-maximinLHS(n,p)
low<- -4
high<- 4
rr<-c(low,high)
#f<-function(x)
# x[,1] * x[,2] * x[,3]
x<-do.call(cbind,lapply(1:ncol(xst),function(i) BASS:::unscale.range(xst[,i],rr)))
#x<-rbind(x,as.matrix(expand.grid(rr,rr,rr)))
y<-f(x)
#library(rgl)
#plot3d(cbind(x[,c(1,2)],y))
#pairs(cbind(x,y))
library(BASS)
mod<-bass(x,y)
plot(mod)
prior1<-prior2<-prior3<-list()
for(i in 1:p){
prior1[[i]]<-list(dist='uniform',trunc=c(-pi,pi))
prior2[[i]]<-list(dist='normal',mean=0,sd=1,weights=1,trunc=mod$range.des[,i])
prior3[[i]]<-list(dist='uniform',trunc=c(0,pi))
}
#BASS:::plot_prior(prior1[[1]])
#BASS:::plot_prior(prior2[[1]])
#curve(dnorm(x,0,1)*2.93,col=2,add=T)
BASS:::plot_prior(prior2[[1]])
ss1<-BASS::sobol(mod,prior1)
ss2<-BASS::sobol(mod,prior2)
ss3<-BASS::sobol(mod,prior3)
boxplot(ss1$S,range=0)
points(p1,col=2)
boxplot(ss2$S,range=0)
points(p2,col=2)
boxplot(ss3$S,range=0)
points(p3,col=2)
ntest<-100
xtest<-matrix(runif(ntest*p),ncol=p)
plot(colMeans(predict(mod,xtest)),f(xtest)); abline(a=0,b=1,col=2)
# save small model
bm<-bass(x,y,save.yhat = F,nmcmc=10000,nburn=9998,small=T)
ss1<-BASS::sobol(bm,prior1)
ss2<-BASS::sobol(bm,prior2)
ss3<-BASS::sobol(bm,prior3)
pred<-predict(bm,xtest)
saveRDS(bm,'tests/f5_mod.rda')
saveRDS(ss1,'tests/f5_sob1.rda')
saveRDS(ss2,'tests/f5_sob2.rda')
saveRDS(ss3,'tests/f5_sob3.rda')
saveRDS(xtest,'tests/f5_testX.rda')
saveRDS(pred,'tests/f5_testPred.rda')
saveRDS(prior1,'tests/f5_sobPrior1.rda')
saveRDS(prior2,'tests/f5_sobPrior2.rda')
saveRDS(prior3,'tests/f5_sobPrior3.rda')
| /scratch/gouwar.j/cran-all/cranData/BASS/inst/testf5.R |
#'@title Performs the classification methodology using complex network theory
#'@name classification
#'
#'@description Given two distinct data sets, one of mRNA and one of lncRNA.
#'The classification of the data is done from the structure of the networks formed by the sequences.
#'After this is done classifying with the J48 classifier and randomForest.
#'Can be also created in the current directory a file of type arff called' result 'with all values so that it can be used later.
#'There is also the graphic parameter that when TRUE generates graphs based on the results of each measure.
#'Using the J48 classifier it is possible to generate a tree based on the dataset and then save this tree so that it can be used to predict other RNA sequences
#'
#'@param mRNA Directory where the file .FASTA lies with the mRNA sequences
#'@param lncRNA Directory where the file .FASTA lies with the lncRNA sequences
#'@param word Integer that defines the size of the word to parse. By default the word parameter is set to 3
#'@param step Integer that determines the distance that will be traversed in the sequences for creating a new connection. By default the step parameter is set to 1
#'@param sncRNA Directory where the file .FASTA lies with the sncRNA sequences (OPTIONAL)
#'@param graphic Parameter of the logical type, TRUE or FALSE for graphics generation. As default graphic gets FALSE
#'@param classifier Character Parameter. By default the classifier is J48, but the user can choose to use randomForest by configuring as classifier = "RF". The prediction with a model passed by the param load only works with the classifier J48.
#'@param save when set, this parameter saves a .arff file with the results of the features in the current directory and also saves the tree created by the J48 classifier so that it can be used to predict RNA sequences. This parameter sets the file name. No file is created by default
#'@param load When defined this parameter will be loaded the file which is the model previously saved in the current directory with the name entered in this parameter. No file is loaded by default
#'
#' @return Results with cross-validation or the prediction result
#'
#' @author Eric Augusto Ito
#'
#' @examples
#'\donttest{
#' # Classification - cross validation
#' library(BASiNET)
#' arqSeqMRNA <- system.file("extdata", "sequences2.fasta", package = "BASiNET")
#' arqSeqLNCRNA <- system.file("extdata", "sequences.fasta", package = "BASiNET")
#' classification(mRNA=arqSeqMRNA,lncRNA=arqSeqLNCRNA)
#' classification(mRNA=arqSeqMRNA,lncRNA=arqSeqLNCRNA, save="example") #Save Tree to Predict Sequences
#' # Prediction
#' mRNApredict <- system.file("extdata", "sequences2-predict.fasta", package = "BASiNET")
#' lncRNApredict <- system.file("extdata", "sequences-predict.fasta", package = "BASiNET")
#' modelPredict <- system.file("extdata", "modelPredict.dat", package = "BASiNET")
#' classification(mRNApredict,lncRNApredict,load=modelPredict)
#'}
#' @importFrom Biostrings readBStringSet
#' @importFrom stats predict
#' @import igraph
#' @import RWeka
#' @import randomForest
#' @import rJava
#' @export
classification <- function(mRNA, lncRNA, word=3, step=1, sncRNA, graphic, classifier = c('J48', 'RF'), load, save){
classifier <- match.arg(classifier)
seqMRNA<-readBStringSet(mRNA)
seqLNCRNA<-readBStringSet(lncRNA)
if(!missing(sncRNA)){
seqSNCRNA<-readBStringSet(sncRNA)
numClass<-3
}else{
seqSNCRNA<-NULL
numClass<-2
}
numSeq<-(length(seqMRNA)+length(seqLNCRNA)+length(seqSNCRNA))
averageShortestPathLengths <- matrix(nrow=numSeq,ncol=200)
clusteringCoefficient <- matrix(nrow=numSeq,ncol=200)
standardDeviation <- matrix(nrow=numSeq,ncol=200)
maximum <- matrix(nrow=numSeq,ncol=200)
assortativity<- matrix(nrow=numSeq,ncol=200)
betweenness <- matrix(nrow=numSeq,ncol=200)
degree <- matrix(nrow=numSeq,ncol=200)
minimum <- matrix(nrow=numSeq,ncol=200)
motifs3 <- matrix(nrow=numSeq,ncol=200)
motifs4 <- matrix(nrow=numSeq,ncol=200)
for(k in seq_len(numClass)){
if(k==1){
if(!missing(load)){
message("Analyzing")
}else{
message("Analyzing mRNA from number: ")
}
aux<-0
seq<-seqMRNA
}else{
if(k==2){
if(missing(load)){
message("Analyzing lncRNA from number: ")
}
seq<-seqLNCRNA
aux<-length(seqMRNA)
}else{
if(k==3){
if(missing(load)){
message("Analyzing sncRNA from number: ")
}
seq<-seqSNCRNA
aux<-(length(seqMRNA)+length(seqLNCRNA))
}
}
}
for(x in seq_along(seq)){
if(missing(load)){
message(x)
}
sequence<-strsplit(toString(seq[x]),split='')
sequence<-sequence[[1]]
net<-createNet(word, step, sequence)
limitThreshold<-max(net[])
if(limitThreshold>200){
limitThreshold<-200;
}
vector <- sapply(seq_len(limitThreshold), function(t) {
net<<-threshold(t, net)
measures(net)
})
cidx <- seq_len(ncol(vector))
averageShortestPathLengths[aux + x, cidx] <- vector[1,]
clusteringCoefficient[aux+x, cidx] <- vector[2,]
degree[aux+x,cidx] <- vector[3,]
assortativity[aux+x,cidx] <- vector[4,]
betweenness[aux+x,cidx] <- vector[5,]
standardDeviation[aux+x,cidx] <- vector[6,]
maximum[aux+x,cidx] <- vector[7,]
minimum[aux+x,cidx] <- vector[8,]
motifs3[aux+x,cidx] <- vector[9,]
motifs4[aux+x,cidx] <- vector[10,]
}
}
if(!missing(load)){
load(load)
}
rangeMinMax <- matrix(nrow=10,ncol=6)
rangeMinMax[1,]<-minMax(averageShortestPathLengths,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA), rangeMinMax[1,])
rangeMinMax[2,]<-minMax(clusteringCoefficient,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA), rangeMinMax[2,])
rangeMinMax[3,]<-minMax(standardDeviation,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA), rangeMinMax[3,])
rangeMinMax[4,]<-minMax(maximum,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA), rangeMinMax[4,])
rangeMinMax[5,]<-minMax(assortativity,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA), rangeMinMax[5,])
rangeMinMax[6,]<-minMax(betweenness,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA), rangeMinMax[6,])
rangeMinMax[7,]<-minMax(degree,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA), rangeMinMax[7,])
rangeMinMax[8,]<-minMax(minimum,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA), rangeMinMax[8,])
rangeMinMax[9,]<-minMax(motifs3,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA), rangeMinMax[9,])
rangeMinMax[10,]<-minMax(motifs4,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA), rangeMinMax[10,])
rangeMinMax[is.na(rangeMinMax)]<-0
message("Rescaling values")
numCol<-length(averageShortestPathLengths[1,])
averageShortestPathLengths<-reschedule(averageShortestPathLengths,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA),rangeMinMax[1,])
colnames(averageShortestPathLengths) <- paste('ASPL',seq_len(numCol))
clusteringCoefficient<-reschedule(clusteringCoefficient,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA),rangeMinMax[2,])
colnames(clusteringCoefficient) <- paste('CC', seq_len(numCol))
standardDeviation<-reschedule(standardDeviation,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA),rangeMinMax[3,])
colnames(standardDeviation) <- paste('SD', seq_len(numCol))
maximum<-reschedule(maximum,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA),rangeMinMax[4,])
colnames(maximum) <- paste('MAX', seq_len(numCol))
assortativity<-reschedule(assortativity,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA),rangeMinMax[5,])
colnames(assortativity) <- paste('ASS', seq_len(numCol))
betweenness<-reschedule(betweenness,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA),rangeMinMax[6,])
colnames(betweenness) <- paste('BET', seq_len(numCol))
degree<-reschedule(degree,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA),rangeMinMax[7,])
colnames(degree) <- paste('DEG', seq_len(numCol))
minimum<-reschedule(minimum,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA),rangeMinMax[8,])
colnames(minimum) <- paste('MIN', seq_len(numCol))
motifs3<-reschedule(motifs3,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA),rangeMinMax[9,])
colnames(motifs3) <- paste('MT3', seq_len(numCol))
motifs4<-reschedule(motifs4,length(seqMRNA),length(seqLNCRNA),length(seqSNCRNA),rangeMinMax[10,])
colnames(motifs4) <- paste('MT4', seq_len(numCol))
listMatrix<-list(
averageShortestPathLengths,clusteringCoefficient,
standardDeviation,maximum,assortativity,betweenness,degree,
minimum,motifs3,motifs4)
namesMeasure<-c(
"Average shortest path length", "Cluster Coefficient",
"Standard deviation", "Maximum", "Assortativity",
"Betweenness", "Degree", "Minimal", "Motifs 3", "Motifs 4"
)
if(missing(graphic)||graphic==FALSE){
}else{
if(graphic==TRUE){
for(i in seq_len(10)){
createGraph2D(listMatrix[[i]],length(seqMRNA),length(seqLNCRNA), namesMeasure[i])
}
}
}
message("Creating data frame")
data<-cbind(assortativity, betweenness, averageShortestPathLengths, clusteringCoefficient, degree, minimum, maximum, standardDeviation, motifs3, motifs4)
data<-data.frame(data)
if(missing(load)){
if(numClass==2){
data["CLASS"]<-factor(c("lncRNA"), levels = c("mRNA","lncRNA"));
for(i in seq_along(seqMRNA)){
data$CLASS[i] <- "mRNA"
}
}else{
if(numClass==3){
data["CLASS"]<-factor(c("lncRNA"), levels = c("mRNA","lncRNA","sncRNA"));
data$CLASS[seq_along(seqMRNA)] <- "mRNA"
data$CLASS[(length(seqMRNA)+length(seqLNCRNA)+1):(length(seqMRNA)+length(seqLNCRNA)+length(seqSNCRNA))] <- "sncRNA"
}
}
}
data[is.na(data)] <- 0
if(!missing(save)){
rmcfs::write.arff(data, file = "Result.arff")
message("Result.arff file generated in the current R directory")
}
if(classifier=="J48"){
if(missing(load)){
message("Sorting the data with the J48")
obj <- J48(CLASS ~ ., data = data)
if(!missing(save)){
.jcache(obj$classifier)
save(obj, file=paste(save,".dat",sep=""))
}
result <- evaluate_Weka_classifier(obj, numFolds = 10, complexity = TRUE, seed = 1, class = TRUE)
print(obj)
print(result)
}else{
predict_res <- predict(object = obj, newdata=data)
message("Results")
print(predict_res)
}
}
if(classifier=="RF"){
message("Sorting the data with the Random Forest")
set.seed(1)
rf <- randomForest(data[,seq_along(data[1,])], data[,"CLASS"])
print(rf)
print(getTree(randomForest(data[,-40], data[,5], ntree=10), 3, labelVar=TRUE))
}
if(missing(load)){
return(invisible(result))
}else{
return(invisible(predict_res))
}
} | /scratch/gouwar.j/cran-all/cranData/BASiNET/R/classification.R |
#'@title Creates a two-dimensional graph between a measure and the threshold
#'@name createGraph2D
#'
#'@description For an analysis of each measure, the createGraph2D () function was created in order to visualize the behavior of each measurement in relation to the threshold. This function creates a graph (Measure x Threshold) from an array, mRNA sequences are given the blue color, the lncRNA sequences are given a red color. In cases where there is a third class this will be given the green color
#'
#'@param matrix matrix of the measure for the creation of two-dimensional graph
#'@param numSeqMRNA Integer number of mRNA sequences
#'@param numSeqLNCRNA Integer number of lncRNA sequences
#'@param nameMeasure Character Parameter that defines the name of the measure to put in the title of the graph
#'
#'@author Eric Augusto Ito
#'
#'
#'@import igraph
#'@importFrom grDevices dev.new
#'@importFrom graphics lines plot title
createGraph2D <- function(matrix, numSeqMRNA,numSeqLNCRNA, nameMeasure){
dev.new()
somador<-(1/(length(matrix[1,])-1))
threshold<-seq(0,1,somador)
nameFile<-paste(nameMeasure,".png",sep="")
plot(threshold,matrix[1,], type="l", col="blue",xlab="Threshold",ylab=nameMeasure,ylim=c(0,matrix[which.max(matrix)]))
title(main="Two-dimensional graph", col.main="red", font.main=4)
for(i in 2:length(matrix[,1])){
if(i<=numSeqMRNA){
lines(threshold, matrix[i,] , type="l", pch=22, lty=2, col="blue")
}else{
if(i<=(numSeqLNCRNA+numSeqMRNA)){
lines(threshold, matrix[i,] , type="l", pch=22, lty=2, col="red")
}else{
lines(threshold, matrix[i,] , type="l", pch=22, lty=2, col="green")
}
}
}
} | /scratch/gouwar.j/cran-all/cranData/BASiNET/R/createGraph2D.R |
#'@title Creates an untargeted graph from a biological sequence
#'@name createNet
#'
#'@description A function that from a biological sequence generates a graph not addressed having as words vertices, this being able to have its size parameter set by the' word 'parameter. The connections between words depend of the' step 'parameter that indicates the next connection to be formed
#'
#'@param step It is the integer parameter that decides the step that will be taken to make a new connection
#'@param word This integer parameter decides the size of the word that will be formed
#'@param sequence It is a vector that represents the sequence
#'
#'
#'@return Returns the non-directed graph formed through the sequence
#'
#' @author Eric Augusto Ito
#'
#'
createNet <- function(word, step, sequence){
aux<-""
index<-1
position<-0
cont<-length(sequence)
comma<-0
x<-0
k<-1
vector<-c()
while((index-1+(word*2))<cont){
while(x<word){
aux<-paste(aux,sequence[index],sep="");
x<-x+1;
index<-index+1;
}
vector<-c(vector,aux)
aux<-""
x<-0;
while(x<word){
aux<-paste(aux,sequence[index],sep="");
x<-x+1;
index<-index+1;
}
vector<-c(vector,aux)
aux<-""
x<-0;
position<-position+step;
index<-position+1;
}
net<-graph <- graph(edges=vector,directed=FALSE)
return(net)
} | /scratch/gouwar.j/cran-all/cranData/BASiNET/R/createNet.R |
#'@title Abstracting Characteristics on Network Structure
#'@name measures
#'
#'@description Given a graph, it is made up of several features on the graph structure and returns a vector with the data obtained
#'
#'@param graph The complex network that will be measured
#'
#'
#'@return Return a vector with the results of the measurements in order:
#'Average shortest path length, clustering Coefficient, degree, assortativity,
#'betweenness, standard deviation, maximum, minimum, number of motifs
#'size 3 and number of motifs of size 4
#'
#' @author Eric Augusto Ito
#'
#'
#' @import igraph
#' @importFrom stats sd
measures <- function(graph){
measures<-c()
measures<-c(measures,average.path.length(graph,directed=FALSE,unconnected=FALSE))
measures<-c(measures,transitivity(graph,type=c("undirected"),vids=NULL,weights=NULL, isolates=c("NaN","zero")))
measures<-c(measures,mean(degree(graph, v=V(graph), normalized=FALSE)))
measures<-c(measures,assortativity_degree(graph, directed = FALSE))
measures<-c(measures,mean(betweenness(graph, v = V(graph), directed = FALSE, weights = NULL, normalized = FALSE)))
measures<-c(measures,sd(degree(graph, v=V(graph), normalized= FALSE), na.rm = FALSE))
measures<-c(measures,which.max(degree(graph, v=V(graph), normalized=FALSE)))
measures<-c(measures,which.min(degree(graph, v=V(graph), normalized=FALSE)))
measures<-c(measures,(count_motifs(graph, size = 3)))
measures<-c(measures,(count_motifs(graph, size = 4)))
names(measures)[1]<-"ASPL"
names(measures)[2]<-"CC"
names(measures)[3]<-"DEG"
names(measures)[4]<-"ASS"
names(measures)[5]<-"BET"
names(measures)[6]<-"SD"
names(measures)[7]<-"MAX"
names(measures)[8]<-"MIN"
names(measures)[9]<-"MT3"
names(measures)[10]<-"MT4"
return(measures)
}
| /scratch/gouwar.j/cran-all/cranData/BASiNET/R/measures.R |
#'@title Minimum and maximum
#'@name minMax
#'
#'@description Verifies the minimum and maximum values of the results.
#'
#'@param matrix Array with results numerics
#'@param mRNA Integer number of mRNA sequences
#'@param lncRNA Integer number of lncRNA sequences
#'@param sncRNA Integer number of sncRNA sequences
#'@param rangeMinMax Vector that will be returned with the minimum and maximum values
#'
#'
#'@return Returns the vector with the minimum and maximum values for the scale
#'
#' @author Eric Augusto Ito
#'
#'
minMax <- function(matrix, mRNA, lncRNA, sncRNA, rangeMinMax){
maxMin<-range(matrix[],na.rm = TRUE)
rangeMinMax[2]<-maxMin[2]
rangeMinMax[1]<-maxMin[1]
if(lncRNA!=0){
maxMin<-range(matrix[(mRNA+1):(mRNA+lncRNA),],na.rm = TRUE)
rangeMinMax[4]<-maxMin[2]
rangeMinMax[3]<-maxMin[1]
}
if(sncRNA!=0){
maxMin<-range(matrix[(mRNA+lncRNA+1):(mRNA+lncRNA+sncRNA),],na.rm = TRUE)
rangeMinMax[6]<-maxMin[2]
rangeMinMax[5]<-maxMin[1]
}
return(rangeMinMax)
} | /scratch/gouwar.j/cran-all/cranData/BASiNET/R/minMax.R |
#'@title Rescales the results between values from 0 to 1
#'@name reschedule
#'
#'@description Given the results the data is rescaled for values between 0 and 1, so that the length of the sequences does not influence the results. The rescaling of the mRNA and lncRNA are made separately
#'
#'@param matrix Array with results numerics
#'@param mRNA Integer number of mRNA sequences
#'@param lncRNA Integer number of lncRNA sequences
#'@param sncRNA Integer number of sncRNA sequences
#'@param rangeMinMax Vector with the minimum and maximum values for the scale
#'
#'@return Returns the array with the rescaled values
#'
#' @author Eric Augusto Ito
#'
#'
reschedule <- function(matrix, mRNA, lncRNA, sncRNA, rangeMinMax){
for(x in 1:mRNA){
for(y in 1:length(matrix[1,])){
matrix[x,y]<-((matrix[x,y]-rangeMinMax[1])/(rangeMinMax[2]-rangeMinMax[1]))
}
}
if(lncRNA!=0){
for(x in (mRNA+1):(mRNA+lncRNA)){
for(y in 1:length(matrix[1,])){
matrix[x,y]<-((matrix[x,y]-rangeMinMax[3])/(rangeMinMax[4]-rangeMinMax[3]))
}
}
}
if(sncRNA!=0){
for(x in (mRNA+lncRNA+1):(mRNA+lncRNA+sncRNA)){
for(y in 1:length(matrix[1,])){
matrix[x,y]<-((matrix[x,y]-rangeMinMax[5])/(rangeMinMax[6]-rangeMinMax[5]))
}
}
}
return(matrix)
} | /scratch/gouwar.j/cran-all/cranData/BASiNET/R/reschedule.R |
#'@title Applies threshold on the network from a value
#'@name threshold
#'
#'@description Given an integer value X, a cut, that is, edges that are cut will be assigned zero. This cut will be done in the network where the edges have a weight less than the value of X.
#'
#'@param x Integer value that would limit the edges
#'@param net Complex network where the edges will be cut
#'
#'@return Returns the complex network with the cuts already made
#'
#'@author Eric Augusto Ito
#'
#' @import igraph
threshold <- function(x, net){
# tamanho <- length(vetor<-net[1,])
# for(i in 1:tamanho){
# for(k in 1:tamanho){
# if((net[i,k]!=0)&&(net[i,k]<x)){
# net[i,k]<-0
# }
# }
# }
if(x!=1){
matriz<-as_adjacency_matrix(net)
matriz[(matriz==(x-1))]<-0
net<-graph_from_adjacency_matrix(matriz, mode="undirected")
}
return(net)
} | /scratch/gouwar.j/cran-all/cranData/BASiNET/R/threshold.R |
## -----------------------------------------------------------------------------
mRNA <- system.file("extdata", "sequences2.fasta", package = "BASiNET")
lncRNA <- system.file("extdata", "sequences.fasta", package = "BASiNET")
library(BASiNET)
classification(mRNA,lncRNA, save="example")
## ---- out.width = "400px"-----------------------------------------------------
knitr::include_graphics("2d.png")
## -----------------------------------------------------------------------------
mRNApredict <- system.file("extdata", "sequences2-predict.fasta", package = "BASiNET")
lncRNApredict <- system.file("extdata", "sequences-predict.fasta", package = "BASiNET")
modelPredict <- system.file("extdata", "modelPredict.dat", package = "BASiNET")
library(BASiNET)
classification(mRNApredict,lncRNApredict,load=modelPredict)
| /scratch/gouwar.j/cran-all/cranData/BASiNET/inst/doc/BASiNET.R |
---
title: "BASiNET - Classification of RNA sequences using complex network theory"
author: "Eric Augusto Ito"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Classification of mRNA and lncRNA sequences}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## Introduction
<div style="text-align: justify">
The BASiNET package aims to classify messenger RNA and long non-coding RNA, optionally also a third class such as small non-coding RNA may be included. The classification is made from measurements drawn from complex networks, for each RNA sequence a complex network is created. The networks are formed of vertices and edges, the vertices will be formed by words that can have their size defined by the parameter 'word'. It is adopted a methodology of Thresholds in the networks so that each extraction of measures is made a cut in the network for a new extraction of measures. Finally, all measurements taken from the networks are used for classification using the algorithms J48 or Random Forest. There are four data present in the 'BASiNET' package, "sequences", "sequences2", "sequences-predict" and "sequences2-predict" with 11, 10, 11 and 11 sequences respectively. These sequences were taken from the data set used in the article (LI, Aimin; ZHANG, Junying; ZHOU, Zhongyin, Plek: a tool for predicting long non-coding messages and based on an improved k-mer scheme BMC bioinformatics, BioMed Central, 2014). These sequences are used to run examples. The BASiNET was published (ITO, Eric; KATAHIRA, Isaque; VICENTE, Fábio; PEREIRA, Felipe; LOPES, Fabrício, BASiNET—BiologicAl Sequences NETwork: a case study on coding and non-coding RNAs identification, Nucleic Acids Research, 2018).
</div>
## Instalation
<div style="text-align: justify">
To install BASiNET correctly it is necessary to install dependencies: RWeka, igraph, rJava, randomForest, Biostrings, rmcfs. The Biostrings package is in the BioConductor repository, the other packages are available in CRAN. The following commands must be executed in the R for the deployments to be installed.
install.packages("RWeka")
install.packages("rJava")
install.packages("igraph")
install.packages("randomForest")
install.packages("rmcfs")
source("https://bioconductor.org/biocLite.R")
biocLite("Biostrings")
In order for the rJava package to work properly, you must have installed JDK java(https://www.oracle.com/java/technologies/downloads/) and JRE java(https://www.java.com/pt-BR/download/manual.jsp).
</div>
## Classification
<div style="text-align: justify">
The function classification" applies an RNA classification methodology, at the end of the execution of the function is exposed the result for two classification algorithms: J48 and Random Forest.
</div>
Parameters:
<div style="text-align: justify">
word - Define the number of nitrogenous bases that formed a word. By default the word parameter is set to 3.
step - Defines the distance that will be traversed in the sequence for the formation of a new connection. By default the step parameter is set to 1
mRNA - Directory of an FASTA file containing mRNA sequences.
lncRNA - Directory of an FASTA file containing lncRNA sequences.
sncRNA - Directory of an FASTA file containing lncRNA sequences, this parameter is optional.
graphic - If TRUE is used to generate two-dimensional graphs between Thresholds x Measure. By default it is considered FALSE.
classifier - Character Parameter. By default the classifier is J48, but the user can choose to use randomForest by configuring as classifier = "RF". The prediction with a model passed by the param load only works with the classifier J48.
load - Name of the .dat file that will be loaded as a template for the prediction of new RNA sequences. By default is NULL.
save - Name of the .dat file in which the measurement results will be saved. The generated file can be used in the "load" parameter for the prediction of new data. By default is NULL.
</div>
<div style="text-align: justify">
Within the BASiNET package there are two sample files, one for mRNA sequence and one for lncRNA sequences. For the example below you will use these two files.
</div>
Defining parameters:
```{r}
mRNA <- system.file("extdata", "sequences2.fasta", package = "BASiNET")
lncRNA <- system.file("extdata", "sequences.fasta", package = "BASiNET")
library(BASiNET)
classification(mRNA,lncRNA, save="example")
```
<div style="text-align: justify">
After the completion of the function the results for J48 and Random Forest will be shown. For example data the results are J48 = 95.2381% hit, Random Forest = 4.76% error.
It will also generate 10 two-dimensional graphs, one for each measurement. The blue lines represent the mRNA sequences, red lines are the lncRNA and when you have a third class will be represented by green lines.
</div>
Example of generated graph:
Bidimensional graph for the measurement Average Minimum Path
```{r, out.width = "400px"}
knitr::include_graphics("2d.png")
```
## Predict
<div style="text-align: justify">
To predict a set of data, two parameters need to be set up, the first one is called "predicting" and the second is the "load". In the "predicting" the directory of the file is set where the sequences to be predicted are found. The "load" parameter defines the model that will be used to predict the sequences.
</div>
Defining parameters:
```{r}
mRNApredict <- system.file("extdata", "sequences2-predict.fasta", package = "BASiNET")
lncRNApredict <- system.file("extdata", "sequences-predict.fasta", package = "BASiNET")
modelPredict <- system.file("extdata", "modelPredict.dat", package = "BASiNET")
library(BASiNET)
classification(mRNApredict,lncRNApredict,load=modelPredict)
```
| /scratch/gouwar.j/cran-all/cranData/BASiNET/inst/doc/BASiNET.Rmd |
---
title: "BASiNET - Classification of RNA sequences using complex network theory"
author: "Eric Augusto Ito"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Classification of mRNA and lncRNA sequences}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## Introduction
<div style="text-align: justify">
The BASiNET package aims to classify messenger RNA and long non-coding RNA, optionally also a third class such as small non-coding RNA may be included. The classification is made from measurements drawn from complex networks, for each RNA sequence a complex network is created. The networks are formed of vertices and edges, the vertices will be formed by words that can have their size defined by the parameter 'word'. It is adopted a methodology of Thresholds in the networks so that each extraction of measures is made a cut in the network for a new extraction of measures. Finally, all measurements taken from the networks are used for classification using the algorithms J48 or Random Forest. There are four data present in the 'BASiNET' package, "sequences", "sequences2", "sequences-predict" and "sequences2-predict" with 11, 10, 11 and 11 sequences respectively. These sequences were taken from the data set used in the article (LI, Aimin; ZHANG, Junying; ZHOU, Zhongyin, Plek: a tool for predicting long non-coding messages and based on an improved k-mer scheme BMC bioinformatics, BioMed Central, 2014). These sequences are used to run examples. The BASiNET was published (ITO, Eric; KATAHIRA, Isaque; VICENTE, Fábio; PEREIRA, Felipe; LOPES, Fabrício, BASiNET—BiologicAl Sequences NETwork: a case study on coding and non-coding RNAs identification, Nucleic Acids Research, 2018).
</div>
## Instalation
<div style="text-align: justify">
To install BASiNET correctly it is necessary to install dependencies: RWeka, igraph, rJava, randomForest, Biostrings, rmcfs. The Biostrings package is in the BioConductor repository, the other packages are available in CRAN. The following commands must be executed in the R for the deployments to be installed.
install.packages("RWeka")
install.packages("rJava")
install.packages("igraph")
install.packages("randomForest")
install.packages("rmcfs")
source("https://bioconductor.org/biocLite.R")
biocLite("Biostrings")
In order for the rJava package to work properly, you must have installed JDK java(https://www.oracle.com/java/technologies/downloads/) and JRE java(https://www.java.com/pt-BR/download/manual.jsp).
</div>
## Classification
<div style="text-align: justify">
The function classification" applies an RNA classification methodology, at the end of the execution of the function is exposed the result for two classification algorithms: J48 and Random Forest.
</div>
Parameters:
<div style="text-align: justify">
word - Define the number of nitrogenous bases that formed a word. By default the word parameter is set to 3.
step - Defines the distance that will be traversed in the sequence for the formation of a new connection. By default the step parameter is set to 1
mRNA - Directory of an FASTA file containing mRNA sequences.
lncRNA - Directory of an FASTA file containing lncRNA sequences.
sncRNA - Directory of an FASTA file containing lncRNA sequences, this parameter is optional.
graphic - If TRUE is used to generate two-dimensional graphs between Thresholds x Measure. By default it is considered FALSE.
classifier - Character Parameter. By default the classifier is J48, but the user can choose to use randomForest by configuring as classifier = "RF". The prediction with a model passed by the param load only works with the classifier J48.
load - Name of the .dat file that will be loaded as a template for the prediction of new RNA sequences. By default is NULL.
save - Name of the .dat file in which the measurement results will be saved. The generated file can be used in the "load" parameter for the prediction of new data. By default is NULL.
</div>
<div style="text-align: justify">
Within the BASiNET package there are two sample files, one for mRNA sequence and one for lncRNA sequences. For the example below you will use these two files.
</div>
Defining parameters:
```{r}
mRNA <- system.file("extdata", "sequences2.fasta", package = "BASiNET")
lncRNA <- system.file("extdata", "sequences.fasta", package = "BASiNET")
library(BASiNET)
classification(mRNA,lncRNA, save="example")
```
<div style="text-align: justify">
After the completion of the function the results for J48 and Random Forest will be shown. For example data the results are J48 = 95.2381% hit, Random Forest = 4.76% error.
It will also generate 10 two-dimensional graphs, one for each measurement. The blue lines represent the mRNA sequences, red lines are the lncRNA and when you have a third class will be represented by green lines.
</div>
Example of generated graph:
Bidimensional graph for the measurement Average Minimum Path
```{r, out.width = "400px"}
knitr::include_graphics("2d.png")
```
## Predict
<div style="text-align: justify">
To predict a set of data, two parameters need to be set up, the first one is called "predicting" and the second is the "load". In the "predicting" the directory of the file is set where the sequences to be predicted are found. The "load" parameter defines the model that will be used to predict the sequences.
</div>
Defining parameters:
```{r}
mRNApredict <- system.file("extdata", "sequences2-predict.fasta", package = "BASiNET")
lncRNApredict <- system.file("extdata", "sequences-predict.fasta", package = "BASiNET")
modelPredict <- system.file("extdata", "modelPredict.dat", package = "BASiNET")
library(BASiNET)
classification(mRNApredict,lncRNApredict,load=modelPredict)
```
| /scratch/gouwar.j/cran-all/cranData/BASiNET/vignettes/BASiNET.Rmd |
#'@title Performs the classification methodology using complex network and
#'entropy theories
#'@name classify
#'
#'@description Given three or two distinct data sets, one of mRNA, one
#'of lncRNA and one of sncRNA.
#'The classification of the data is done from the structure of the networks
#' formed by the sequences, that is filtered by an entropy methodology.
#'After this is done, the classification starts.
#'
#'@param mRNA Directory where the file .FASTA lies with the mRNA sequences
#'@param lncRNA Directory where the file .FASTA lies with the lncRNA sequences
#'@param sncRNA Directory where the file .FASTA lies with the sncRNA sequences
#' (optional)
#'@param trainingResult The result of the training, (three or two matrices)
#'@param save_dataframe save when set, this parameter saves a .csv file with
#'the features in the current directory. No file is created by default.
#'@param save_model save when set, this parameter saves a .rds file with
#'the model in the current directory. No file is created by default.
#'@param predict_with_model predict the input sequences with the previously
#'generated model.
#'
#'@return Results
#'
#'@author Murilo Montanini Breve
#'
#'@examples
#'library(BASiNETEntropy)
#'arqSeqMRNA <- system.file("extdata", "mRNA.fasta",package = "BASiNETEntropy")
#'arqSeqLNCRNA <- system.file("extdata", "ncRNA.fasta", package = "BASiNETEntropy")
#'load(system.file("extdata", "trainingResult.RData", package = "BASiNETEntropy"))
#'r_classify <- classify(mRNA=arqSeqMRNA, lncRNA=arqSeqLNCRNA, trainingResult = trainingResult)
#'@importFrom Biostrings readBStringSet
#'@import igraph
#'@import randomForest
#'@importFrom graphics abline axis legend text
#'@importFrom stats sd predict
#'@importFrom utils write.csv2
#'@export
classify <- function(mRNA,
lncRNA,
sncRNA = NULL,
trainingResult,
save_dataframe = NULL,
save_model = NULL,
predict_with_model = NULL) {
if (missing(trainingResult))
trainingResult <- training(mRNA, lncRNA, sncRNA)
vectorm <- NULL
vectorlnc <- NULL
vectorsnc <- NULL
edgeslistmrna <- NULL
edgeslistlncrna <- NULL
edgeslistsncrna <- NULL
MRNA <- readBStringSet(mRNA)
LNCRNA <- readBStringSet(lncRNA)
if (length(sncRNA)) {
SNCRNA <- readBStringSet(sncRNA)
}
for (t in 1:3) {
if (t == 1) {
message("[INFO] Classifying mRNA:")
seq <- c(MRNA)
}
if (t == 2) {
message("[INFO] Classifying lncRNA:")
seq <- c(LNCRNA)
}
if (t == 3) {
if (length(sncRNA)) {
message("[INFO] Classifying sncRNA:")
seq <- c(SNCRNA)
} else
break
}
for (u in seq_along(seq)) {
sequence <- strsplit(toString(seq[u]), split = '')
sequence <- sequence[[1]]
if (t == 1)
edgeslistmrna[[u]] <- createedges(sequence)
if (t == 2)
edgeslistlncrna[[u]] <- createedges(sequence)
if (t == 3)
edgeslistsncrna[[u]] <- createedges(sequence)
message(u)
}
}
message("[INFO] Filtering the graphs")
sequenciam <- filtering(trainingResult[[1]], edgeslistmrna)
sequencial <- filtering(trainingResult[[2]], edgeslistlncrna)
if (length(sncRNA)) {
sequencias <- filtering(trainingResult[[3]], edgeslistsncrna)
}
message("[INFO] Extracting measurements from graphs")
measures <- NULL
for(q in seq_along(sequenciam)){
net <- graph_from_adjacency_matrix(sequenciam[[q]], mode = c("undirected"))
measures<-c(measures,average.path.length(net,directed = FALSE,
unconnected=FALSE))
measures<-c(measures,transitivity(net,
type=c("undirected"),
vids=NULL,
weights=NULL,
isolates=c("NaN","zero")))
measures<-c(measures,mean(degree(net, v=V(net),
normalized=FALSE)))
measures<-c(measures,assortativity_degree(net,
directed = FALSE))
measures<-c(measures,mean(betweenness(net,
v = V(net),
directed = FALSE,
weights = NULL,
normalized = FALSE)))
measures<-c(measures,sd(degree(net,
v=V(net),
normalized = FALSE),
na.rm = FALSE))
measures<-c(measures,which.max(degree(net,
v=V(net),
normalized=FALSE)))
measures<-c(measures,which.min(degree(net,
v = V(net),
normalized=FALSE)))
measures<-c(measures,(count_motifs(net, size = 3)))
measures<-c(measures,(count_motifs(net, size = 4)))
}
for(q in seq_along(sequencial)){
net<-graph_from_adjacency_matrix(sequencial[[q]],
mode = c("undirected"))
measures<-c(measures,average.path.length(net,
directed = FALSE,
unconnected=FALSE))
measures<-c(measures,transitivity(net,
type = c("undirected"),
vids=NULL,
weights=NULL,
isolates=c("NaN","zero")))
measures<-c(measures,mean(degree(net,
v = V(net),
normalized=FALSE)))
measures<-c(measures,assortativity_degree(net,
directed = FALSE))
measures<-c(measures,mean(betweenness(net,
v = V(net),
directed = FALSE,
weights = NULL,
normalized = FALSE)))
measures<-c(measures,sd(degree(net,
v = V(net),
normalized= FALSE),
na.rm = FALSE))
measures<-c(measures,which.max(degree(net,
v=V(net),
normalized=FALSE)))
measures<-c(measures,which.min(degree(net,
v = V(net),
normalized=FALSE)))
measures<-c(measures,(count_motifs(net, size = 3)))
measures<-c(measures,(count_motifs(net, size = 4)))
}
if(length(sncRNA)) {
for(q in seq_along(sequencias)){
net<-graph_from_adjacency_matrix(sequencias[[q]],
mode = c("undirected"))
measures<-c(measures,average.path.length(net,
directed=FALSE,
unconnected=FALSE))
measures<-c(measures,transitivity(net,
type = c("undirected"),
vids=NULL,
weights=NULL,
isolates=c("NaN","zero")))
measures<-c(measures,mean(degree(net,
v = V(net),
normalized=FALSE)))
measures<-c(measures,assortativity_degree(net,
directed = FALSE))
measures<-c(measures,mean(betweenness(net,
v = V(net),
directed = FALSE,
weights = NULL,
normalized = FALSE)))
measures<-c(measures,sd(degree(net,
v = V(net),
normalized= FALSE),
na.rm = FALSE))
measures<-c(measures,which.max(degree(net,
v = V(net),
normalized=FALSE)))
measures<-c(measures,which.min(degree(net,
v = V(net),
normalized=FALSE)))
measures<-c(measures,(count_motifs(net, size = 3)))
measures<-c(measures,(count_motifs(net, size = 4)))
}}
message("[INFO] Building the dataframes")
if (length(sncRNA)) {
data <- creatingDataframe(measures,
tamM = length(sequenciam),
tamLNC = length(sequencial),
tamSNC = length(sequencias))
} else
data <- creatingDataframe(measures,
tamM = length(sequenciam),
tamLNC = length(sequencial))
DF <- data[,-11]
DF[is.na(DF)] <- 0
if (!missing(save_dataframe)) {
write.csv2(data, file = "feature_matrix.csv")
message("feature_matrix.csv file generated in the current R directory")
}
if (missing(predict_with_model)) {
message("[INFO] Sorting with Randomforest")
rf <- randomForest(DF, as.factor(data[, 11]))
print(rf)
if (!missing(save_model)) {
save(rf, file = paste(save_model, ".Rda", sep = ""))
message(
paste(predict_with_model, ".Rda", sep = ""),
" file generated in the current R directory"
)
}
return(rf)
} else{
message("[INFO] Predicting with the input model")
load(file = paste(predict_with_model, ".Rda", sep = ""))
rf.pred <- predict(rf, DF)
print(rf.pred)
return(rf.pred)
}
}
| /scratch/gouwar.j/cran-all/cranData/BASiNETEntropy/R/classify.R |
#'@title Creates an untargeted graph from a biological sequence
#'@name createedges
#'
#'@description A function that from a biological sequence generates a graph
#' not addressed having as words vertices, this being able to have its size
#' parameter set by the' word 'parameter. The connections between words depend
#' of the' step 'parameter that indicates the next connection to be formed
#'
#'@param step It is the integer parameter that decides the step that will
#'be taken to make a new connection
#'@param word This integer parameter decides the size of the word that will
#'be formed
#'@param sequence It is a vector that represents the sequence
#'
#'@return Returns the array used to creates the edge list
#'@author Murilo Montanini Breve
createedges <- function(sequence, word = 3, step = 1) {
aux <- ""
index <- 1
position <- 0
cont <- length(sequence)
comma <- 0
x <- 0
k <- 1
vector <- c()
while ((index - 1 + (word * 2)) < cont){
while (x < word){
aux <- paste(aux, sequence[index], sep = "")
x <- x + 1
index <- index + 1
}
vector <- c(vector, aux)
aux <- ""
x <- 0
while (x < word){
aux <- paste(aux, sequence[index], sep = "")
x <- x + 1
index <- index + 1
}
vector <- c(vector, aux)
aux <- ""
x <- 0
position <- position + step
index <- position + 1
}
return(vector)
}
| /scratch/gouwar.j/cran-all/cranData/BASiNETEntropy/R/createedges.R |
#'@title Creates a feature matrix using complex network topological measures
#'@name creatingDataframe
#'
#'@description A function that from the complex network topological measures
#'create the feature matrix.
#'
#'@param measures The complex network topological measures
#'@param tamM mRNA sequence size
#'@param tamLNC lncRNA sequence size
#'@param tamSNC snRNA sequence size
#'
#'@return Returns the feature matrix in scale 0-1
#'@author Murilo Montanini Breve
creatingDataframe <- function(measures, tamM, tamLNC, tamSNC) {
if (missing(tamSNC)) {
Specie <- NULL
for (i in 1:tamM)
Specie <- c(Specie, "mRNA")
for (i in (tamM + 1):(tamM + tamLNC))
Specie <- c(Specie, "lncRNA")
dataframe <- NULL
dataframe <- matrix(data = measures,
nrow = (tamM + tamLNC),
ncol = 10,
byrow = TRUE | TRUE | FALSE | FALSE)
dataframe <- preprocessing(dataframe, tamM, tamLNC)
Species <- matrix(data = Specie,
nrow = (tamM + tamLNC),
ncol = 1,
byrow = TRUE | TRUE | FALSE | FALSE)
dataframe <- cbind(dataframe, Species)
rownames(dataframe) <- Specie
colnames(dataframe) <- c("ASPL",
"CC",
"DEG",
"ASS",
"BET",
"SD",
"MAX",
"MIN",
"MT3",
"MT4",
"CLASS")
DF = as.data.frame(dataframe)
}
else{
Specie <- NULL
for (i in 1:tamM)
Specie <- c(Specie, "mRNA")
for (i in (tamM + 1):(tamM + tamLNC))
Specie <- c(Specie, "lncRNA")
for (i in (tamM + tamLNC + 1):(tamM + tamLNC + tamSNC))
Specie <- c(Specie, "sncRNA")
dataframe <- NULL
dataframe <- matrix(data = measures,
nrow = (tamM + tamLNC + tamSNC),
ncol = 10,
byrow = TRUE | TRUE | FALSE | FALSE)
dataframe <- preprocessing(dataframe, tamM, tamLNC, tamSNC)
Species <- matrix(data = Specie,
nrow = (tamM + tamLNC + tamSNC),
ncol = 1,
byrow = TRUE | TRUE | FALSE | FALSE)
dataframe <- cbind(dataframe, Species)
rownames(dataframe) <- Specie
colnames(dataframe) <- c("ASPL",
"CC",
"DEG",
"ASS",
"BET",
"SD",
"MAX",
"MIN",
"MT3",
"MT4",
"CLASS")
DF = as.data.frame(dataframe)
}
return(DF)
}
| /scratch/gouwar.j/cran-all/cranData/BASiNETEntropy/R/creatingDataframe.R |
#'@title Creates an entropy curve
#'@name curveofentropy
#'
#'@description A function that from the entropy measures and threshold creates
#'an entropy curve.
#'@param H The 'training' return for the entropy measures
#'@param threshold The 'training' return for the threshold
#'
#'@return Returns a entropy curve
#'@author Murilo Montanini Breve
#'@importFrom graphics abline axis legend text plot
#'@export
curveofentropy <- function(H, threshold) {
H <- unlist(H)
threshold <- unlist(threshold)
plot(H,
main = "Curve of entropy",
ylab = "Sum of Entropies",
xlab = "Edges distribution",
xaxt = "n")
axis(1, xaxp = c(0, 4096, 8), las = 1)
abline(v = threshold, col = "red")
text(print(paste0("T = ", threshold)),
x = threshold,
y = 0,
srt = 90,
adj = c(0,-0.5))
legend("topright",
legend = "Threshold",
pch = "|",
col = "red")
}
| /scratch/gouwar.j/cran-all/cranData/BASiNETEntropy/R/curveofentropy.R |
#'@title Calculates the entropy
#'@name entropy
#'
#'@description A function that calculates the entropy
#'
#'@param x The probabilities P0 and P1
#'
#'@return Returns the entropy
#'@author Murilo Montanini Breve
entropy <- function(x){
entrop <- 0
if (x > 0)
entrop <- (-(x * log2(x)))
if (is.nan(x) == TRUE)
entrop <- 0
return(entrop)
}
| /scratch/gouwar.j/cran-all/cranData/BASiNETEntropy/R/entropy.R |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.