content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#This example will run a standard Bayesian LASSO
rm(list=ls())
setwd(tempdir())
data(wheat)
set.seed(12345)
varB<-0.5*(1/sum(apply(X=wheat.X,MARGIN=2,FUN=var)))
b0<-rnorm(n=1279,sd=sqrt(varB))
signal<-wheat.X%*%b0
error<-rnorm(599,sd=sqrt(0.5))
y<-100+signal+error
nIter=500;
burnIn=100;
thin=3;
saveAt='';
S0=NULL;
weights=NULL;
R2=0.5;
ETA<-list(list(X=wheat.X,model='BL'))
fit_BL=BGLR(y=y,ETA=ETA,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2)
plot(fit_BL$yHat,y)
|
/scratch/gouwar.j/cran-all/cranData/BGLR/demo/BL.R
|
#This example will run a standard Bayesian LASSO
rm(list=ls())
setwd(tempdir())
data(wheat)
set.seed(12345)
varB<-0.5*(1/sum(apply(X=wheat.X,MARGIN=2,FUN=var)))
b0<-rnorm(n=1279,sd=sqrt(varB))
signal<-wheat.X%*%b0
error<-rnorm(599,sd=sqrt(0.5))
y<-100+signal+error
nIter=500;
burnIn=100;
thin=3;
saveAt='';
S0=NULL;
weights=NULL;
R2=0.5;
ETA<-list(list(X=wheat.X,model='BRR'))
fit_BRR=BGLR(y=y,ETA=ETA,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2)
plot(fit_BRR$yHat,y)
|
/scratch/gouwar.j/cran-all/cranData/BGLR/demo/BRR.R
|
#This example will run a standard Bayesian LASSO
rm(list=ls())
setwd(tempdir())
library(BGLR)
data(mice)
X=scale(mice.X,center=T,scale=F)
QTLs=seq(from=100,to=10000,length=10)
signal=rowSums(X[,QTLs])
signal=signal/sd(signal)
y=signal+rnorm(nrow(X))
mrkGroups=rep(1:2000,each=10)[1:ncol(X)]
fm=BGLR(y=y,ETA=list(list(X=X,model='BRR_sets',sets=mrkGroups))) # Note: method and sets
plot(fm$ETA[[1]]$varB,cex=.1,col=4,type='o')
abline(v=QTLs,col=2,lty=2,lwd=.5)
|
/scratch/gouwar.j/cran-all/cranData/BGLR/demo/BRR_sets.R
|
rm(list=ls())
setwd(tempdir())
data(wheat)
n<-599 # should be <= 599
p<-1279 # should be <= than 1279=ncol(X)
nQTL<-30 # should be <= than p
X<-wheat.X[1:n,1:p]
## Centering and standarization
for(i in 1:p)
{
X[,i]<-(X[,i]-mean(X[,i]))/sd(X[,i])
}
# Simulation
b0<-rep(0,p)
whichQTL<-sample(1:p,size=nQTL,replace=FALSE)
b0[whichQTL]<-rnorm(length(whichQTL),
sd=sqrt(1/length(whichQTL)))
signal<-as.vector(X%*%b0)
error<-rnorm(n=n,sd=sqrt(0.5))
y<-signal +error
nIter=5000;
burnIn=2500;
thin=3;
saveAt='';
S0=NULL;
weights=NULL;
R2=0.5;
ETA<-list(list(X=X,model='BayesC'))
fit_BC=BGLR(y=y,ETA=ETA,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2)
plot(fit_BC$yHat,y)
|
/scratch/gouwar.j/cran-all/cranData/BGLR/demo/BayesC.R
|
#Finney, D. J. (1947). The estimation from Individual Records of the Relationship Between Dose and Quantal Response.
#Biometrika, 34, 320-334
#Albert, J., Chib, S. (1993). Bayesian Analysis of Binary and Polychotomus Response Data.
#JASA, 88, 669-679.
rm(list=ls())
setwd(tempdir())
y=c(1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,0,1,0,1,0,1,0,1,0,0,1,1,1,0,0,1)
v=c(3.7,3.5,1.25,.75,.8,.7,.6,1.1,.9,.9,.8,.55,.6,1.4,.75,2.3,3.2,.85,1.7,1.8,
0.4,.95,1.35,1.5,1.6,.6,1.8,.95,1.9,1.6,2.7,2.35,1.1,1.1,1.2,.8,.95,.75,1.3)
r=c(.825,1.09,2.5,1.5,3.2,3.5,.75,1.7,.75,.45,.57,2.75,3,2.33,3.75,1.64,1.6,1.415,
1.06,1.8,2,1.36,1.35,1.36,1.78,1.5,1.5,1.9,.95,.4,.75,.03,1.83,2.2,2,3.33,1.9,1.9,1.625)
X=cbind(v,r)
nIter=5000;
burnIn=2500;
thin=10;
saveAt='';
ETA=list(list(X=X,model='FIXED'))
fit_Bernoulli_BGLR=BGLR(y=y,response_type='ordinal',ETA=ETA,nIter=nIter,burnIn=burnIn,
thin=thin,saveAt=saveAt)
fit_Bernoulli_BGLR$ETA[[1]]$b
fit_Bernoulli_BGLR$ETA[[1]]$SD.b
#In our parameterization, mu is set to 0 and it is estimated as a threshold
fit_Bernoulli_BGLR$threshold
fit_Bernoulli_BGLR$SD.threshold
fit_mle=glm(y~v+r,family=binomial(link="probit"))
summary(fit_mle)
#Example of prediction for missing values
rm(list=ls())
setwd(tempdir())
#data
data(wheat)
#libraries
library(pROC)
# extracts phenotypes
#continous
y=wheat.Y[,1]
#binary
yBin=ifelse(y>0,1,0)
# generates testing dataset
tst=sample(1:599,size=100,replace=FALSE)
yNA=yBin
yNA[tst]=NA
nIter=5000;
burnIn=2500;
thin=10;
saveAt='';
ETA=list(list(X=wheat.X,model='BRR'))
fit_Bernoulli_BGLR=BGLR(y=yNA,response_type='ordinal',ETA=ETA,nIter=nIter,burnIn=burnIn,
thin=thin,saveAt=saveAt)
mean((yBin[tst]-pnorm(fit_Bernoulli_BGLR$yHat[tst]))^2) # mean-sq. error
auc(response=yBin[tst],predictor=fit_Bernoulli_BGLR$yHat[tst])
|
/scratch/gouwar.j/cran-all/cranData/BGLR/demo/Bernoulli.R
|
#This example will run a RKHS model
rm(list=ls())
setwd(tempdir())
data(wheat)
set.seed(12345)
varB=0.5*(1/sum(apply(X=wheat.X,MARGIN=2,FUN=var)))
b0=rnorm(n=1279,sd=sqrt(varB))
signal=wheat.X%*%b0
error=rnorm(599,sd=sqrt(0.5))
y=100+signal+error
nIter=500;
burnIn=100;
thin=3;
saveAt='';
S0=NULL;
weights=NULL;
R2=0.5;
K=wheat.X%*%t(wheat.X)
ETA=list(list(K=K,model='RKHS'))
fit_RKHS=BGLR(y=y,ETA=ETA,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2)
plot(fit_RKHS$yHat,y)
|
/scratch/gouwar.j/cran-all/cranData/BGLR/demo/RKHS.R
|
#This example will run a RKHS with kernel averaging
rm(list=ls())
setwd(tempdir())
data(wheat)
set.seed(12345)
varB=0.5*(1/sum(apply(X=wheat.X,MARGIN=2,FUN=var)))
b0=rnorm(n=1279,sd=sqrt(varB))
signal=wheat.X%*%b0
error=rnorm(599,sd=sqrt(0.5))
y=100+signal+error
nIter=500;
burnIn=100;
thin=3;
saveAt='';
weights=NULL;
R2=0.5;
#Euclidean distance matrix
D=as.matrix(dist(wheat.X,method="euclidean"))
h=quantile(as.vector(D)^2,probs=.05)
K1=exp(-5/h*(as.matrix(D)^2))
K2=exp(-1/h*(as.matrix(D)^2))
K3=exp(-1/5/h*(as.matrix(D)^2))
df=5
S=as.numeric(var(y))/2*(df-2)
ETA=list(list(K=K1,model='RKHS',df0=df,S0=S),list(K=K2,model='RKHS',df0=df,S0=S),list(K=K3,model='RKHS',df0=df,S0=S))
fit_RKHS=BGLR(y=y,ETA=ETA,nIter=nIter,burnIn=burnIn,thin=thin,
saveAt=saveAt,df0=5,S0=NULL,weights=weights,R2=R2)
plot(fit_RKHS$yHat,y)
|
/scratch/gouwar.j/cran-all/cranData/BGLR/demo/RKHS_KA.R
|
rm(list=ls())
setwd(tempdir())
#loading libraries
library(survival)
#loading data included in BGLR
data(wheat)
#simulation of data
X=wheat.X[,1:4]
n=nrow(X)
b=c(-2,2,-1,1)
error=rnorm(n)
y=X%*%b+ error
cen=sample(1:n,size=200)
yCen=y
yCen[cen]=NA
a=rep(NA,n)
b=rep(NA,n)
a[cen]=y[cen]-runif(min=0,max=1,n=200)
b[cen]=Inf
nIter=6000;
burnIn=1000;
thin=10;
saveAt='';
df0=5
S0=var(y)/2*(df0-2)
weights=NULL;
ETA=list(list(X=X,model='FIXED'))
fm1=BGLR(y=y,a=a,b=b,ETA=ETA,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,
df0=df0,S0=S0,weights=weights)
#fits the model using survreg
event=ifelse(is.na(yCen),0,1)
time=ifelse(is.na(yCen),a,yCen)
surv.object=Surv(time=time,event=event,type='right')
fm2=survreg(surv.object~X, dist="gaussian")
plot(fm1$ETA[[1]]$b~fm2$coeff[-1],pch=19,col=2,cex=1.5,
xlab="survreg()", ylab="BGLR()")
abline(a=0,b=1,lty=2)
|
/scratch/gouwar.j/cran-all/cranData/BGLR/demo/censored.R
|
#A Cheese testing experiment
#The following data kindly provided by Dr. Graeme Newell were obtained from an
#experiment concerning the effect on taste of various cheese additives.
#The so-called hedonic scale has nine response categories, ranging from
#'strong dislike' (1) to 'excellent taste' (9). In this instance, four additives
#labeled A, B, C and D were tested.
#Here the effects are so great that the qualitative ordering (D, A, C, B) can easily
#be deduced from visual inspection. Nevertheless it is of some interest to check
#whether the models described earlier are capable of describing these differences
#and evaluating the statistical significance of the differences observed
#References
#Albert, J., Chib, S. (1993). Bayesian Analysis of Binary and Polychotomus
#Response Data. JASA, 88, 669-679.
rm(list=ls())
setwd(tempdir())
#polr function
library(MASS)
#Function to expand a data.frame using column Freq
#x is a data.frame, it should inclde a column named Freq
#the function returns another data.frame
expand.dft=function(x, na.strings = "NA", as.is = FALSE, dec = ".")
{
DF=sapply(1:nrow(x), function(i) x[rep(i, each = x$Freq[i]), ],
simplify = FALSE)
DF=subset(do.call("rbind", DF), select = -Freq)
for (i in 1:ncol(DF))
{
DF[[i]]=type.convert(as.character(DF[[i]]),
na.strings = na.strings,
as.is = as.is, dec = dec)
}
DF
}
#Data
Freq=c(0,0,1,7,8,8,19,8,1,6,9,12,11,7,6,1,0,0,1,1,6,8,23,7,5,1,0,0,0,0,1,3,7,14,16,11)
response=gl(9,1,36,labels=c("I","II","III","IV","V","VI","VII","VIII","IX"))
additive=gl(4,9,labels=c("A","B","C","D"))
cheese=data.frame(Freq,response,additive)
cheese
#a)Bayesian model
#Design matrix without intercept and expand it using frequencies
data=expand.dft(as.data.frame(cbind(response,model.matrix(~additive)[,-1],Freq)))
#The response should be ordered
data=data[order(data[,1]),]
#Response
y=as.vector(data[,1])
#Design matrix
X=as.matrix(data[,c(2:4)])
nIter=5000;
burnIn=2500;
thin=10;
saveAt='';
ETA=list(list(X=X,model='FIXED'))
fit_ordinal_BGLR=BGLR(y=y,response_type='ordinal',ETA=ETA,nIter=nIter,burnIn=burnIn,
thin=thin,saveAt=saveAt)
fit_ordinal_BGLR$ETA[[1]]$b
fit_ordinal_BGLR$threshold
fit_ordinal_BGLR$SD.threshold
#b)Frequentist
#polr function
fitted_mle=polr(response ~ additive, weights = Freq, data = cheese,method="probit")
fitted_mle
|
/scratch/gouwar.j/cran-all/cranData/BGLR/demo/ordinal.R
|
bed_file = system.file("extdata/sample.bed", package="BGLR")
#Extended map file (this gives the number of snps)
bim_file = system.file("extdata/sample.bim", package="BGLR")
#First 6 columns of ped file (this gives the number of individuals)
fam_file = system.file("extdata/sample.fam", package="BGLR")
out=read_bed(bed_file=bed_file,bim_file=bim_file,fam_file=fam_file,verbose=TRUE)
p=out$p
n=out$n
out=out$x
#Recode snp to 0,1,2 format using allele 1
# 0 --> 0
# 1 --> 1
# 2 --> NA
# 3 --> 2
out[out==2]=NA
out[out==3]=2
X=matrix(out,nrow=p,ncol=n,byrow=TRUE)
X=t(X)
|
/scratch/gouwar.j/cran-all/cranData/BGLR/demo/read_bed.R
|
ped_file = system.file("extdata/sample.ped", package="BGLR")
out=read_ped(ped_file)
p=out$p
n=out$n
out=out$x
#Recode snp to 0,1,2 format using allele 1
# 0 --> 0
# 1 --> 1
# 2 --> NA
# 3 --> 2
out[out==2]=NA
out[out==3]=2
X=matrix(out,nrow=p,ncol=n,byrow=TRUE)
X=t(X)
|
/scratch/gouwar.j/cran-all/cranData/BGLR/demo/read_ped.R
|
bed_file = system.file("extdata/sample.bed", package="BGLR")
#Extended map file (this gives the number of snps)
bim_file = system.file("extdata/sample.bim", package="BGLR")
#First 6 columns of ped file (this gives the number of individuals)
fam_file = system.file("extdata/sample.fam", package="BGLR")
out=read_bed(bed_file=bed_file,bim_file=bim_file,fam_file=fam_file,verbose=TRUE)
#Now write the bed file using the internal routine
#Using the routine xxd compare both files, i.e. extdata/sample.bed and test.bed
new_bed_file="test.bed"
write_bed(x=out$x,n=out$n,p=out$p,bed_file=new_bed_file)
|
/scratch/gouwar.j/cran-all/cranData/BGLR/demo/write_bed.R
|
#' BGPHazard: A package bayesian nonparametric inference in survival analysis.
#'
#' The BGPHazard package provides three categories of important functions:
#' simulating, diagnostic and result.
#'
#' @section Simulating functions:
#' The simulating functions are used to make posterior inference for the bayesian survival
#' semiparametric models as described by
#' Nieto-Barajas and Walker (2002), Nieto-Barajas (2003) and Nieto-Barajas, L. E., & Yin, G. (2008)
#'
#' @section Diagnostic functions:
#' The diagnostic functions are used to make convergence diagnosics plots about the simulations of the parameters/variables.
#'
#' @section Result functions:
#' The result functions are used to produce estimators plots of the hazard function
#' along with the survival function defined by the model.
#'
#' @docType package
#' @name BGPHazard
#' @importFrom magrittr %>% %<>%
#' @importFrom rlang !! !!!
#' @importFrom stats acf as.formula dgamma dnorm median quantile rbeta rgamma rnorm runif time
#' @importFrom utils data
#'
utils::globalVariables(c("name", "V1", "V2","lower","upper","S^(t)","h.est","surv",".x","value","x","y","times","."))
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/BGPHazard-package.R
|
#' BSBHaz posterior samples using Gibbs Sampler
#'
#' \code{BSBHaz} samples posterior observations from the bivariate survival
#' model (BSBHaz model) proposed by Nieto-Barajas & Walker (2007).
#'
#' BSBHaz (Nieto-Barajas & Walker, 2007) is a bayesian semiparametric model for
#' bivariate survival data. The marginal densities are nonparametric survival
#' models and the joint density is constructed via a mixture. Dependence between
#' failure times is modeled using two frailties, and the dependence between
#' these frailties is modeled with a copula.
#'
#' This command obtains posterior samples from model parameters. The samples
#' from omega, gamma, and theta are obtained using the Metropolis-Hastings
#' algorithm. The proposal distributions are uniform for the three parameters.
#' The parameters \code{omega_d}, \code{gamma_d} and \code{theta_d} modify the
#' intervals from which the uniform proposals are sampled. If these parameters
#' are too large, the acceptance rates will decrease and the chains will get
#' stuck. On the other hand, if these parameters are small, the acceptance rates
#' will be too high and the chains will not explore the posterior support
#' effectively.
#'
#' @param bsb_init An object of class 'BSBinit' created by
#' \code{\link{BSBInit}}.
#' @param iter A positive integer. Number of samples generated by the Gibbs
#' Sampler.
#' @param burn_in A positive integer. Number of iterations that should be
#' discarded as burn in period.
#' @param omega_d A positive double. This parameter defines the interval used in
#' the Metropolis-Hastings algorithm to sample proposals for omega. See
#' details.
#' @param gamma_d A positive double. This parameter defines the interval used in
#' the Metropolis-Hastings algorithm to sample proposals for gamma. See
#' details.
#' @param theta_d A positive double. This parameter defines the interval used in
#' the Metropolis-Hastings algorithm to sample proposals for theta. See
#' details.
#' @param seed Random seed used in sampling.
#'
#' @return An object of class '\code{BSBHaz}' containing the samples from the
#' variables of interest.
#' @export
#'
#' @examples
#' t1 <- survival::Surv(c(1, 2, 3))
#' t2 <- survival::Surv(c(1, 2, 3))
#'
#' init <- BSBInit(t1 = t1, t2 = t2, seed = 0)
#' samples <- BSBHaz(init, iter = 10, omega_d = 2,
#' gamma_d = 10, seed = 10)
BSBHaz <- function(bsb_init,
iter,
burn_in = 0,
omega_d = NULL,
gamma_d = NULL,
theta_d = NULL,
seed = 42){
stopifnot(iter > burn_in)
stopifnot(is.double(omega_d) & omega_d > 0)
stopifnot(is.double(gamma_d) & gamma_d > 0)
stopifnot(is.double(theta_d) & theta_d > 0)
stopifnot(is.double(seed))
if (!inherits(bsb_init, "BSBinit")) {
stop("'bsb_init' must be an object created by BSBInit")
}
t1 <- bsb_init$t1
t2 <- bsb_init$t2
delta1 <- bsb_init$delta1
delta2 <- bsb_init$delta2
omega1 <- bsb_init$omega1
omega2 <- bsb_init$omega2
pred_matrix <- bsb_init$pred_matrix
theta <- bsb_init$theta
t_part <- bsb_init$t_part
lambda1 <- bsb_init$lambda1
lambda2 <- bsb_init$lambda2
omega1 <- pmax(
omega1, cum_h(t1, t_part, lambda1) * exp(pred_matrix %*% theta) + 1e-5
)
omega2 <- pmax(
omega2, cum_h(t2, t_part, lambda2) * exp(pred_matrix %*% theta) + 1e-5
)
y <- bsb_init$y
gamma <- bsb_init$gamma
u1 <- bsb_init$u1
u2 <- bsb_init$u2
alpha <- bsb_init$alpha
beta <- bsb_init$beta
c <- bsb_init$c
int_len <- t_part[[2]] - t_part[[1]]
n_obs <- attr(bsb_init, "individuals")
n_intervals <- attr(bsb_init, "intervals")
has_predictors <- attr(bsb_init, "has_predictors")
x <- list()
for (i in 1:nrow(pred_matrix)) {
x[[i]] <- pred_matrix[i, ]
}
# Outputs
n_sim <- iter - burn_in
omega1_mat <- matrix(rep(0, times = n_sim * n_obs), nrow = n_obs)
omega2_mat <- matrix(rep(0, times = n_sim * n_obs), nrow = n_obs)
lambda1_mat <- matrix(rep(0, times = n_sim * n_intervals), nrow = n_intervals)
lambda2_mat <- matrix(rep(0, times = n_sim * n_intervals), nrow = n_intervals)
gamma_mat <- matrix(rep(0, times = n_sim), nrow = 1)
t1_mat <- matrix(rep(0, times = n_sim * n_obs), nrow = n_obs)
t2_mat <- matrix(rep(0, times = n_sim * n_obs), nrow = n_obs)
s1_mat <- matrix(rep(0, times = n_sim * (n_intervals + 1)), ncol = n_sim)
s2_mat <- matrix(rep(0, times = n_sim * (n_intervals + 1)), ncol = n_sim)
rownames(s1_mat) <- t_part
rownames(s2_mat) <- t_part
if (has_predictors) {
theta_mat <-
matrix(rep(0, times = n_sim * ncol(pred_matrix)), nrow = ncol(pred_matrix))
rownames(theta_mat) <- colnames(pred_matrix)
}
p.bar <- progress::progress_bar$new(
format = "[:bar] :current/:total (:percent)",
total = iter
)
p.bar$tick(0)
t1_current <- t1
t2_current <- t2
# Simulations
set.seed(seed)
for (i in 1:iter) {
p.bar$tick(1)
cum_h1 <- cum_h(t1_current, t_part, lambda1)
cum_h2 <- cum_h(t2_current, t_part, lambda2)
part_count1 <- partition_count(t1_current, t_part)
part_count2 <- partition_count(t2_current, t_part)
part_loc1 <- partition_location(t1_current, t_part)
part_loc2 <- partition_location(t2_current, t_part)
omega1 <-
purrr::pmap_dbl(
list(omega1, y, cum_h1, x),
function(omega, y, cum_h, x) {
sample_omega(omega, y, cum_h, x, theta, gamma, omega_d)
}
)
omega2 <-
purrr::pmap_dbl(
list(omega2, y, cum_h2, x),
function(omega, y, cum_h, x) {
sample_omega(omega, y, cum_h, x, theta, gamma, omega_d)
}
)
y <- purrr::map2_dbl(omega1, omega2, ~sample_y(.x, .y, gamma))
t_part_low1 <- purrr::map_dbl(part_loc1, ~t_part[.])
t_part_low2 <- purrr::map_dbl(part_loc2, ~t_part[.])
u1_1 <- u1
u1_2 <- c(0, u1[1:(length(u1) - 1)])
u2_1 <- u2
u2_2 <- c(0, u2[1:(length(u2) - 1)])
c1 <- rep(c, times = length(lambda1))
c2 <- c(0, c1[1:(length(c1) - 1)])
for (j in seq_along(lambda1)) {
bound1 <- get_min_bound(t1_current,
omega1,
x,
part_loc1,
t_part_low1,
j,
theta,
lambda1,
int_len)
bound2 <- get_min_bound(t2_current,
omega2,
x,
part_loc2,
t_part_low2,
j,
theta,
lambda2,
int_len)
lambda1[[j]] <- sample_lambda(
u1_1[[j]], u1_2[[j]], alpha, beta, c1[[j]], c2[[j]], bound1,
part_count1[[j]]
)
lambda2[[j]] <- sample_lambda(
u2_1[[j]], u2_2[[j]], alpha, beta, c1[[j]], c2[[j]], bound2,
part_count2[[j]]
)
}
index_indicator <- c(rep(1, times = (length(u1) - 1)), 0)
lambda1_lag <- c(lambda1[2:length(lambda1)], 1)
lambda2_lag <- c(lambda2[2:length(lambda2)], 1)
u1 <- purrr::pmap_dbl(
list(lambda1, lambda1_lag, index_indicator),
function(l, l1, index_indicator) {
sample_u(l, l1, alpha, beta, c, index_indicator)
}
)
u2 <- purrr::pmap_dbl(
list(lambda2, lambda2_lag, index_indicator),
function(l, l1, index_indicator) {
sample_u(l, l1, alpha, beta, c, index_indicator)
}
)
gamma <- sample_gamma(gamma, omega1, omega2, y, gamma_d)
if (has_predictors) {
for (j in seq_along(theta)) {
bound1 <- get_min_bound_theta(j, t1_current, omega1, cum_h1, x, theta)
bound2 <- get_min_bound_theta(j, t2_current, omega2, cum_h2, x, theta)
bound <- min(bound1, bound2)
theta[[j]] <- sample_theta(
bound, colSums(pred_matrix)[[j]], theta[[j]], theta_d
)
}
}
t1_current <- purrr::pmap_dbl(
list(t1, t1_current, omega1, delta1, x),
function(t_orig, t_prev, omega, delta, x) {
sample_t(t_orig, t_prev, omega, delta, max(t_part), x, theta, t_part, lambda1)
}
)
t2_current <- purrr::pmap_dbl(
list(t2, t2_current, omega2, delta2, x),
function(t_orig, t_prev, omega, delta, x) {
sample_t(t_orig, t_prev, omega, delta, max(t_part), x, theta, t_part, lambda2)
}
)
# Outputs
if (i > burn_in) {
omega1_mat[, i - burn_in] <- omega1
omega2_mat[, i - burn_in] <- omega2
lambda1_mat[, i - burn_in] <- lambda1
lambda2_mat[, i - burn_in] <- lambda2
gamma_mat[, i - burn_in] <- gamma
t1_mat[, i - burn_in] <- t1_current
t2_mat[, i - burn_in] <- t2_current
s1_mat[, i - burn_in] <- exp(-cum_h(t_part, t_part, lambda1))
s2_mat[, i - burn_in] <- exp(-cum_h(t_part, t_part, lambda2))
if (has_predictors) theta_mat[, i - burn_in] <- theta
}
}
l <- list("omega1" = omega1_mat, "omega2" = omega2_mat,
"lambda1" = lambda1_mat, "lambda2" = lambda2_mat,
"gamma" = gamma_mat, "t1" = t1_mat, "t2" = t2_mat,
"s1" = s1_mat, "s2" = s2_mat)
if (has_predictors) l$theta <- theta_mat
out <- new_BSBHaz(
l,
individuals = as.integer(n_obs),
intervals = as.integer(n_intervals),
has_predictors = has_predictors,
samples = as.integer(iter - burn_in),
int_len = as.double(int_len)
)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/BSBHaz.R
|
#' Initial setup for BSBHaz model
#'
#' \code{BSBInit} creates the necessary data structure for use in
#' \code{\link{BSBHaz}}.
#'
#' This function reads and formats censored bivariate survival data in the
#' following way. If \code{df} is provided, failure times and censoring
#' indicadors are assumed to be columns named 't1', 't2', 'delta1', and
#' 'delta2'. Other columns not named 'id' (ignoring case) are taken to be
#' predictors. If \code{df} has no columns 'delta1' or 'delta2', observations
#' are taken as exact.
#'
#' If \code{df} is not provided, then \code{t1} and \code{t2} are expected to be
#' objects of class 'Surv' created by \code{\link[survival]{Surv}} and the model
#' does not use predictors. Only right-censored observations are supported. Only
#' \code{df} or \code{t1} and \code{t2} must be supplied. \code{df} argument
#' comes first for use in pipes.
#'
#' @param df A data frame with columns 't1', 't2', 'delta1', 'delta2'. Any other
#' columns not named 'id' are taken to be predictors. These predictors must be
#' numeric, i.e., \strong{categorical predictors must be one-hot encoded}.
#' @param t1,t2 Objects of class 'Surv' as created by
#' \code{\link[survival]{Surv}}.
#' @param alpha,beta,c Doubles. Parameters for Markov gamma hazard priors.
#' @param part_len A double that gives the length of time partition intervals.
#' @param seed Random seed for variable initialization.
#'
#' @return An object of class '\code{BSBinit}'
#'
#' @examples
#' t1 <- survival::Surv(c(1, 2, 3))
#' t2 <- survival::Surv(c(1, 2, 3))
#'
#' init <- BSBInit(t1 = t1, t2 = t2, seed = 0)
#'
#' @export
BSBInit <- function(df = NULL,
t1 = NULL,
t2 = NULL,
alpha = 0.001,
beta = 0.001,
c = 1000,
part_len = 1,
seed = 42) {
if (!is.null(df)) {
if (!inherits(df, "data.frame")) {
stop("df must be of class 'data.frame'")
}
if (!(is.null(t1) & is.null(t2))) {
stop("Only one of df or t1 and t2 must be supplied")
}
}
if (!is.null(t1)) {
if (!inherits(t1, "Surv")) {
stop("t1 is not of class 'Surv'")
}
if (attributes(t1)$type != "right") {
stop(paste("t1 has censoring of type:", attributes(t1)$type))
}
stopifnot(attributes(t1)$type == "right")
if (!is.null(df)) {
stop("Only one of df or t1 and t2 must be supplied")
}
if (is.null(t2)) {
stop("Missing t2")
}
if (length(t1) != length(t2)) {
stop("t1 and t2 must have the same length")
}
}
if (!is.null(t2)) {
if (!inherits(t2, "Surv")) {
stop("t2 is not of class 'Surv'")
}
if (attributes(t2)$type != "right") {
stop(paste("t2 has censoring of type:", attributes(t2)$type))
}
if (!is.null(df)) {
stop("Only one of df or t1 and t2 must be supplied")
}
if (is.null(t1)) {
stop("Missing t1")
}
if (length(t1) != length(t2)) {
stop("t1 and t2 must have the same length")
}
}
# Handling the 'Surv' objects
if (!is.null(t1)) {
n_obs <- attributes(t1)$dim[[1]]
delta1 <- as.double(t1)[(n_obs + 1):(n_obs * 2)]
t1 <- as.double(t1)[1:n_obs]
delta2 <- as.double(t2)[(n_obs + 1):(n_obs * 2)]
t2 <- as.double(t2)[1:n_obs]
has_predictors <- FALSE
pred_matrix <- matrix(rep(0, times = n_obs), nrow = n_obs)
}
if (!is.null(df)) {
stopifnot(!is.null(df[["t1"]]))
stopifnot(!is.null(df[["t2"]]))
df[["id"]] <- NULL
df[["Id"]] <- NULL
df[["iD"]] <- NULL
df[["ID"]] <- NULL
n_obs <- length(df[["t1"]])
if (is.null(df[["delta1"]])) {
delta1 <- rep(1, times = n_obs)
} else {
stopifnot(is.double(df[["delta1"]]) | is.integer(df[["delta1"]]))
delta1 <- as.double(df[["delta1"]])
}
if (is.null(df[["delta2"]])) {
delta2 <- rep(1, times = n_obs)
} else {
stopifnot(is.double(df[["delta2"]]) | is.integer(df[["delta2"]]))
delta2 <- as.double(df[["delta2"]])
}
stopifnot(is.double(df[["t1"]]) | is.integer(df[["t1"]]))
t1 <- as.double(df[["t1"]])
stopifnot(is.double(df[["t2"]]) | is.integer(df[["t2"]]))
t2 <- as.double(df[["t2"]])
df[["t1"]] <- NULL
df[["t2"]] <- NULL
df[["delta1"]] <- NULL
df[["delta2"]] <- NULL
# Predictors
if (length(names(df)) != 0) {
has_predictors <- TRUE
pred_matrix <- as.matrix(df)
colnames(pred_matrix) <- colnames(df)
} else {
has_predictors <- FALSE
pred_matrix <- matrix(rep(0, times = n_obs), nrow = n_obs)
}
}
# Time partition
max_t <- max(max(t1), max(t2))
t_part <- partition(t = max_t, int_len = part_len)
# Variable initialization
set.seed(seed)
rho <- stats::runif(n = 1)
gamma <- rho / (1 - rho)
omega1 <- stats::rgamma(n = n_obs, shape = 2, rate = 1)
y <- stats::rpois(n = n_obs, lambda = omega1)
omega2 <- stats::rgamma(n = n_obs, shape = 2, rate = 1)
theta <- stats::rnorm(n = ncol(pred_matrix))
n_intervals <- length(t_part) - 1
lambda1 <- stats::rgamma(n = n_intervals, shape = alpha, rate = beta)
lambda2 <- stats::rgamma(n = n_intervals, shape = alpha, rate = beta)
lambda1 <- pmax(lambda1, rep(1e-5, times = n_intervals))
lambda2 <- pmax(lambda2, rep(1e-5, times = n_intervals))
u1 <- stats::rpois(n = n_intervals, lambda = lambda1)
u2 <- stats::rpois(n = n_intervals, lambda = lambda2)
list_out <- list(
"t1" = t1, "t2" = t2, "delta1" = delta1, "delta2" = delta2,
"alpha" = alpha, "beta" = beta, "c" = c,
"gamma" = gamma, "omega1" = omega1, "omega2" = omega2, "y" = y,
"theta" = theta, "pred_matrix" = pred_matrix, "u1" = u1, "u2" = u2,
"lambda1" = lambda1, "lambda2" = lambda2, "t_part" = t_part
)
new_BSBinit(
l = list_out,
individuals = as.integer(n_obs),
intervals = as.integer(n_intervals),
has_predictors = has_predictors
)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/BSBInit.R
|
#' Plot diagnostics for BSBHaz model
#'
#' @param bsbhaz An object of class 'BSBHaz' created by
#' \code{\link{BSBHaz}}.
#' @param variable A character indicating which variable to get the plot from.
#' @param type A character indicating if the plot should be a traceplot or plot
#' the ergodic means.
#'
#' @export
#'
#' @examples
#' t1 <- survival::Surv(c(1, 2, 3))
#' t2 <- survival::Surv(c(1, 2, 3))
#'
#' init <- BSBInit(t1 = t1, t2 = t2, seed = 0)
#' samples <- BSBHaz(init, iter = 10, omega_d = 2,
#' gamma_d = 10, seed = 10)
#'
#' BSBPlotDiag(samples, variable = "omega1", type = "traceplot")
#'
#' @importFrom tibble as_tibble
#' @importFrom dplyr mutate
#' @importFrom dplyr group_by
#' @importFrom dplyr ungroup
#' @importFrom dplyr n
#' @importFrom tidyr pivot_longer
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 geom_line
#' @importFrom ggplot2 facet_wrap
#' @importFrom ggplot2 labs
#' @importFrom ggplot2 theme_bw theme
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 element_blank
#' @importFrom magrittr %>%
#' @importFrom stringr str_remove_all
#' @importFrom rlang .data
BSBPlotDiag <- function(bsbhaz,
variable = c("omega1", "omega2", "lambda1",
"lambda2", "gamma", "theta"),
type = c("traceplot", "ergodic_means")) {
stopifnot(inherits(bsbhaz, "BSBHaz"))
variable <- match.arg(variable)
type <- match.arg(type)
p <- switch (type,
traceplot = plot_traceplots(bsbhaz[[variable]]),
ergodic_means = plot_ergodic_means(bsbhaz[[variable]])
)
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/BSBPlotDiag.R
|
#' Plot summaries for BSBHaz model
#'
#' @param bsbhaz An object of class 'BSBHaz' created by
#' \code{\link{BSBHaz}}.
#' @param variable A character indicating the variable to plot.
#'
#' @export
#'
#' @examples
#' t1 <- survival::Surv(c(1, 2, 3))
#' t2 <- survival::Surv(c(1, 2, 3))
#'
#' init <- BSBInit(t1 = t1, t2 = t2, seed = 0)
#' samples <- BSBHaz(init, iter = 10, omega_d = 2,
#' gamma_d = 10, seed = 10)
#'
#' BSBPlotSumm(samples, "s1")
#' @importFrom magrittr %>%
#' @importFrom dplyr mutate select
#' @importFrom ggplot2 ggplot aes geom_segment theme_bw theme scale_x_continuous
#' labs element_blank
BSBPlotSumm <- function(bsbhaz,
variable = c("lambda1", "lambda2", "s1", "s2")
) {
stopifnot(inherits(bsbhaz, "BSBHaz"))
variable <- match.arg(variable)
p <- switch (
variable,
lambda1 = plot_hazards(
BSBSumm(bsbhaz, "lambda1"),
attr(bsbhaz, "int_len"),
"lambda1"
),
lambda2 = plot_hazards(
BSBSumm(bsbhaz, "lambda2"),
attr(bsbhaz, "int_len"),
"lambda2"
),
s1 = plot_survival(
BSBSumm(bsbhaz, "s1"),
attr(bsbhaz, "int_len"),
"S1"
),
s2 = plot_survival(
BSBSumm(bsbhaz, "s2"),
attr(bsbhaz, "int_len"),
"S2"
)
)
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/BSBPlotSumm.R
|
#' Get posterior summaries for BSBHaz model
#'
#' @param bsbhaz An object of class 'BSBHaz' created by
#' \code{\link{BSBHaz}}.
#' @param variable A character indicating which variable to get summaries from.
#'
#' @return A data frame with posterior sample means and a 95 \% probability
#' interval. For \code{omega1}, \code{omega2}, \code{gamma}, and \code{theta}
#' also includes a column with the acceptance rates for the
#' Metropolis-Hastings algorithm.
#' @export
#'
#' @examples
#' t1 <- survival::Surv(c(1, 2, 3))
#' t2 <- survival::Surv(c(1, 2, 3))
#'
#' init <- BSBInit(t1 = t1, t2 = t2, seed = 0)
#' samples <- BSBHaz(init, iter = 10, omega_d = 2,
#' gamma_d = 10, seed = 10)
#'
#' BSBSumm(samples, variable = "gamma")
#' BSBSumm(samples, variable = "omega1")
#' BSBSumm(samples, variable = "lambda1")
BSBSumm <- function(bsbhaz,
variable = c("omega1", "omega2", "lambda1",
"lambda2", "gamma", "theta",
"s1", "s2")) {
stopifnot(inherits(bsbhaz, "BSBHaz"))
variable <- match.arg(variable)
switch (variable,
omega1 = summaries_omega(bsbhaz$omega1),
omega2 = summaries_omega(bsbhaz$omega2),
lambda1 = summaries_lambda(bsbhaz$lambda1),
lambda2 = summaries_lambda(bsbhaz$lambda2),
theta = summaries_theta(bsbhaz$theta),
gamma = summaries_gamma(bsbhaz$gamma),
s1 = summaries_surv(bsbhaz$s1),
s2 = summaries_surv(bsbhaz$s2)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/BSBSumm.R
|
#' Markov Beta Model
#'
#' Posterior inference for the Bayesian non-parametric Markov beta model for discrete
#' survival times.
#'
#' Computes the Gibbs sampler given by the full conditional distributions of u
#' and Pi (Nieto-Barajas & Walker, 2002) and arranges the resulting Markov
#' chain into a tibble which can be used to obtain posterior summaries.
#'
#' @param times Numeric positive vector. Failure times.
#' @param delta Logical vector. Status indicator. \code{TRUE} (1) indicates
#' exact lifetime is known, \code{FALSE} (0) indicates that the corresponding
#' failure time is right censored.
#' @param alpha Nonnegative vector. Small entries are recommended in order to
#' specify a non-informative prior distribution.
#' @param beta Nonnegative vector. Small entries are recommended in order to
#' specify a non-informative prior distribution.
#' @param c.r Nonnegative vector. The higher the entries, the higher the
#' correlation of two consecutive failure times.
#' @param a.eps Numeric. Shape parameter for the prior gamma distribution of
#' epsilon when \code{type.c = 4}.
#' @param b.eps Numeric. Scale parameter for the prior gamma distribution of
#' epsilon when \code{type.c = 4}.
#' @param type.c Integer. 1=defines \code{c.r} as a zero-entry vector; 2=lets
#' the user define \code{c.r} freely; 3=assigns \code{c.r} an
#' exponential prior distribution with mean \code{epsilon}; 4=assigns \code{c.r}
#' an exponential hierarchical distribution with mean \code{epsilon} which in turn has a
#' a Ga(a.eps, b.eps) distribution.
#' @param epsilon Double. Mean of the exponential distribution assigned to
#' \code{c.r}
#' @param iterations Integer. Number of iterations including the \code{burn.in}
#' and \code{thining} to be computed for the Markov chain.
#' @param burn.in Integer. Length of the burn-in period for the Markov chain.
#' @param thinning Integer. Factor by which the chain will be thinned. Thinning
#' the Markov chain is to reduces autocorrelation.
#' @param printtime Logical. If \code{TRUE}, prints out the execution time.
#' @note It is recommended to verify chain's stationarity. This can be done by
#' checking each partition element individually. See \link{BePlotDiag}.
#' @seealso \link{BePlotDiag}, \link{BePloth}
#' @references - Nieto-Barajas, L. E. & Walker, S. G. (2002). Markov beta and
#' gamma processes for modelling hazard rates. \emph{Scandinavian Journal of
#' Statistics} \strong{29}: 413-424.
#' @examples
#'
#'
#'
#' ## Simulations may be time intensive. Be patient.
#'
#' ## Example 1
#' # data(psych)
#' # timesP <- psych$time
#' # deltaP <- psych$death
#' # BEX1 <- BeMRes(timesP, deltaP, iterations = 3000, burn.in = 300, thinning = 1)
#'
#' ## Example 2
#' # data(gehan)
#' # timesG <- gehan$time[gehan$treat == "control"]
#' # deltaG <- gehan$cens[gehan$treat == "control"]
#' # BEX2 <- BeMRes(timesG, deltaG, type.c = 2, c.r = rep(50, 22))
#'
#'
#'
#' @export BeMRes
BeMRes <-
function(times, delta = rep(1, length(times)), alpha = rep(0.0001, K),
beta = rep(0.0001, K), c.r = rep(0, K-1),
a.eps = 0.1, b.eps = 0.1, type.c = 4,
epsilon = 1, iterations = 2000,
burn.in = floor(iterations * 0.2), thinning = 5, printtime = TRUE) {
tInit <- proc.time()
K <- max(times)
tol = .Machine$double.eps ^ 0.5
if (min(times) < 0) {
stop ("Invalid argument: 'times' must be a nonnegative integer vector.")
}
if (max(abs(times - round(times)) > tol) == 1) {
stop ("Invalid argument: 'times' must be a nonnegative integer vector.")
}
if (min((delta == 0) + (delta == 1 )) == 0) {
stop ("Invalid argument: 'delta' must have 0 - 1 entries.")
}
if (length(times) != length(delta)) {
stop ("Invalid argument: 'times' and 'delta' must have same length.")
}
if (length(alpha) != K || length(beta) != K) {
stop (c("Invalid argument: 'alpha', 'beta', must have length "), K, ("."))
}
if (min(c(alpha, beta)) < 0) {
stop ("Invalid argument: 'alpha' and 'beta' must have nonnegative entries.")
}
if (abs(type.c - round(type.c)) > tol || type.c < 1 || type.c > 4) {
stop ("Invalid argument: 'type.c' must be an integer between 1 and 4.")
}
if (type.c == 2) {
if (length(c.r) != (K - 1)) {
stop (c("Invalid argument: 'c.r' must have length, ", K - 1, "."))
}
if (max(abs(c.r - round(c.r)) > tol) == 1 || min(c.r) < 0) {
stop ("Invalid argument: 'c.r' entries must be positive integers.")
}
}
if (type.c == 1 && sum(abs(c.r)) != 0 ) {
c.r <- rep(0, K - 1)
warning (c("'c.r' redefined as rep(0,", K - 1, ") because type.c = 1."))
}
if ((type.c == 3 || type.c == 4) && epsilon < 0) {
stop ("Invalid argument: 'epsilon' must be nonnegative.")
}
if (iterations <= 0 || abs(iterations - round(iterations)) > tol
|| iterations < 50) {
stop ("Invalid argument: 'iterations' must be an integer greater than 50.")
}
if (burn.in < 0 || abs(burn.in - round(burn.in)) > tol
|| burn.in > iterations * 0.9) {
stop ("Invalid argument: 'burn.in' must be a postitive integer smaller than
iterations = ", iterations * 0.9, ".")
}
if (!inherits(thinning, "numeric")) {
stop ("Invalid argument: 'thinning' must be a logical value.")
}
if (thinning <= 0 || abs(thinning - round(thinning)) > tol
|| thinning > 0.1 * iterations) {
stop ("Invalid argument: 'thinning' must be a postitive integer smaller than
iterations * 0.10 = ", iterations * 0.1, ".")
}
if (printtime != TRUE && printtime != FALSE) {
stop ("Invalid argument: 'printtime' must be a logical value.")
}
nm <- BeNM(times, delta)
n <- nm$n
m <- nm$m
tao <- nm$tao
t.unc <- nm$t.unc
if (type.c == 3 || type.c == 4) {
c.r <- rep(5, (K - 1))
}
if (type.c == 4) {
Epsilon <- rep(NA, iterations)
}
cat(c("Iterating...", "\n"), sep = "")
PI <- matrix(NA, nrow = iterations, ncol = K)
U <- matrix(NA, nrow = iterations, ncol = (K - 1))
C <- matrix(NA, nrow = iterations, ncol = (K - 1))
Pi.r <- rep(0.1, K)
pb <- dplyr::progress_estimated(iterations)
for(j in seq_len(iterations)) {
pb$tick()$print()
u.r <- BeUpdU(alpha, beta, c.r, Pi.r)
Pi.r <- UpdPi(alpha, beta, c.r, u.r, n, m)
if (type.c == 3 || type.c == 4) {
if (type.c == 4) {
epsilon <- rgamma(1, shape = a.eps + K, scale = 1 / (b.eps + sum(c.r)))
}
c.r <- BeUpdC(alpha, beta, Pi.r, u.r, epsilon)
}
PI[j, ] <- Pi.r
U[j, ] <- u.r
C[j, ] <- c.r
if(type.c == 4) Epsilon[j] <- epsilon
}
PI <- PI[seq(burn.in + 1, iterations, thinning), ]
U <- U[seq(burn.in + 1, iterations, thinning), ]
C <- C[seq(burn.in + 1, iterations, thinning), ]
if (type.c == 4) Epsilon <- Epsilon[seq(burn.in + 1, iterations, thinning)]
cat(c("Done.", "\n", "Generating survival function estimates.", "\n"),
sep = "")
rows <- nrow(PI)
s <- seq_len(K)
S <- tibble::as_tibble(do.call(rbind,unname(purrr::map_dfr(purrr::map(tibble::as_tibble(t(PI)) ,~purrr::accumulate(.x,`+`)),~exp(-.x)))))
if (printtime) {
cat(">>> Total processing time (sec.):\n")
print(procTime <- proc.time() - tInit)
}
if(type.c == 4) {
X = tibble::enframe(list(PI = PI,
U = U, C = C, Epsilon = Epsilon))
} else {
X = tibble::enframe(list(PI = PI, U = U, C = C))
}
out <- tibble::enframe(list(times = times, delta = delta, tao = tao, K = K, t.unc = t.unc,
iterations = rows, simulations = X, s = s, S = S))
out
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/BeMRes.R
|
BeNM <-
function(times, delta) {
K <- max(times)
tao <- seq(0, K)
t.unc <- sort(times[delta == 1])
n <- cut(t.unc,tao) %>% table %>% as.character %>% readr::parse_integer()
m <- tao[-1] %>% purrr::map_int(~length(times[times > .x]))
out <- list(n = n, m = m, tao = tao, K = K, t.unc = t.unc)
out
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/BeNM.R
|
#' Diagnosis plots for PI, U, C and Epsilon
#'
#' Diagnostic plots for hazard rate (PI), latent variable (U), dependence
#' parameter (C) and parameter of the hierarchical model (Epsilon).
#'
#' This function returns a diagnostics plot for the chain of the selected
#' variable. The diagnostics includes trace, ergodic mean, autocorrelation
#' function and histogram.
#'
#' @param M Tibble. Contains the output by
#' \code{BeMRes}
#' @param variable Either "PI", "U", "C" or "Epsilon". Variable for which
#' diagnostic plot will be shown.
#' @param pos Positive integer. Position of the selected \code{variable} to be
#' plotted.
#' @seealso \link{BeMRes}
#' @references - Nieto-Barajas, L. E. & Walker, S. G. (2002). Markov beta and
#' gamma processes for modelling hazard rates. \emph{Scandinavian Journal of
#' Statistics} \strong{29}: 413-424.
#' @examples
#'
#'
#'
#' ## Simulations may be time intensive. Be patient.
#'
#' ## Example 1
#' # data(psych)
#' # timesP <- psych$time
#' # deltaP <- psych$death
#' # BEX1 <- BeMRes(timesP, deltaP, iterations = 3000, burn.in = 300, thinning = 1)
#' # BePlotDiag(BEX1, variable = "PI", pos = 2)
#' # BePlotDiag(BEX1, variable = "U", pos = 3)
#'
#' ## Example 2
#' # data(gehan)
#' # timesG <- gehan$time[gehan$treat == "control"]
#' # deltaG <- gehan$cens[gehan$treat == "control"]
#' # BEX2 <- BeMRes(timesG, deltaG, type.c = 2, c.r = rep(50, 22))
#' # BePlotDiag(BEX2, variable = "PI", pos = 5)
#' # BePlotDiag(BEX2, variable = "U", pos = 4)
#'
#'
#'
#' @export BePlotDiag
BePlotDiag <-
function(M, variable = "PI", pos = 1) {
variable <- match.arg(variable,c("U","C","PI","Epsilon"))
K <- M %>% extract("K")
tol = .Machine$double.eps ^ 0.5
if (pos < 0 || pos > K || abs(pos - round(pos)) > tol ) {
stop ("Invalid position.")
}
if ((variable == "C" || variable == "U") && pos > K - 1) {
stop ("Invalid position.")
}
if (!("Epsilon" %in% (M %>% extract("simulations") %>% dplyr::pull(name))) && variable == "Epsilon"){
stop("Plots for 'Epsilon' are not available.")
}
if (variable == "Epsilon" && pos != 1) {
warning("'Epsilon' has only one entry (1). Graphics shown for Epsilon_1.")
pos <- 1
}
MAT <- M %>% extract(c("simulations",variable))
if(variable %in% c("Epsilon")){
pos = 1
MAT <- matrix(MAT, nrow = M %>% extract("iterations"), ncol = 1) %>% tibble::as_tibble()
}
if(variable %in% c("PI","U","C")){
MAT %<>% tibble::as_tibble() %>% dplyr::select(pos) %>% rlang::set_names("V1")
}
var <- switch(variable,
PI = expression(pi),
Epsilon = expression(epsilon),
U = "U",
C = "C")
if(variable %in% c("Epsilon")) title <- "" else{title <- paste0("Position: ", pos)}
a <- MAT %>% ggplot2::ggplot() + ggplot2::geom_line(ggplot2::aes(x=seq_len(nrow(MAT)), y = V1), color = "slateblue4") +
ggplot2::labs(x = "Iteration", y = variable) + ggplot2::ylab(var) + ggplot2::ggtitle("Trace") +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
b <- MAT %>% ggplot2::ggplot() + ggplot2::geom_line(ggplot2::aes(x=seq_len(nrow(MAT)), y = cumsum(V1)/seq_len(nrow(MAT))), color = "slateblue4") +
ggplot2::labs(x = "Iteration", y = variable) + ggplot2::ggtitle("Ergodic mean") +
ggplot2::ylab(var) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
acf.aux <- acf(MAT, plot = F)
c <- cbind(acf.aux$lag, acf.aux$acf) %>% tibble::as_tibble() %>% ggplot2::ggplot() +
ggplot2::geom_segment(ggplot2::aes(x = V1, xend = V1, y = V2, yend = 0)) + ggplot2::labs(x = "Lag", y ="ACF")+
ggplot2::ggtitle("Autocorrelation function") +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
d <- MAT %>% ggplot2::ggplot() + ggplot2::geom_histogram(ggplot2::aes(x = V1), fill = "lightblue", color = "black", bins = 30) +
ggplot2::ggtitle("Histogram") + ggplot2::xlab(var) + ggplot2::ylab("") + #coord_flip() +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
gridExtra::grid.arrange(a,b,c,d, top = title)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/BePlotDiag.R
|
#' Plots for the discrete Hazard and Survival Function Estimates
#'
#' Plots the resulting hazard function along with the survival function
#' estimates defined by the Markov beta process (Nieto-Barajas and Walker,
#' 2002).
#'
#' This function returns estimators plots for the hazard rate as computed
#' by \code{\link{BeMRes}} together with the Nelson-Aalen estimate along with their
#' confidence intervals for the data set given. Additionally, it plots the
#' survival function and the Kaplan-Meier estimate with their corresponding
#' credible intervals.
#'
#' @param M tibble. Contains the output generated by \code{BeMRres}.
#' @param type.h character, "line" = plots the hazard rate of each interval
#' joined by a line, "dot" = plots the hazard rate of each interval with a dot.
#' @param add.survival logical, If \code{TRUE}, plots the Nelson-Alen based
#' estimate in the same graphic of the hazard rate and the Kaplan-Meier
#' estimates of the survival function.
#' @param intervals logical. If TRUE, plots confidence bands for the selected functions including Nelson-Aalen and/or Kaplan-Meier estimate.
#' @param confidence Numeric. Confidence band width.
#' @param summary Logical. If \code{TRUE}, a summary for hazard and survival
#' functions is returned as a tibble.
#' @return \item{SUM.h}{Numeric tibble. Summary for the mean, median, and a
#' \code{confint / 100} confidence interval for each failure time of the hazard
#' function.} \item{SUM.S}{Numeric tibble. Summary for the mean, median, and a
#' \code{confint / 100} confidence interval for each failure time of the survival
#' function.}
#' @seealso \link{BeMRes}, \link{BePlotDiag}
#' @references - Nieto-Barajas, L. E. & Walker, S. G. (2002). Markov beta and
#' gamma processes for modelling hazard rates. \emph{Scandinavian Journal of
#' Statistics} \strong{29}: 413-424.
#' @examples
#'
#'
#'
#' ## Simulations may be time intensive. Be patient.
#'
#' ## Example 1
#' # data(psych)
#' # timesP <- psych$time
#' # deltaP <- psych$death
#' # BEX1 <- BeMRes(timesP, deltaP, iterations = 3000, burn.in = 300, thinning = 1)
#' # BePloth(BEX1)
#' # sum <- BePloth(BEX1, type.h = "line", summary = T)
#'
#' ## Example 2
#' # data(gehan)
#' # timesG <- gehan$time[gehan$treat == "control"]
#' # deltaG <- gehan$cens[gehan$treat == "control"]
#' # BEX2 <- BeMRes(timesG, deltaG, type.c = 2, c.r = rep(50, 22))
#' # BePloth(BEX2)
#'
#'
#'
#' @export BePloth
BePloth <-
function(M, type.h = "dot", add.survival = T, intervals = T,
confidence = 0.95, summary = FALSE) {
SUM <- PiSumm(M, confidence)
s <- SUM %>% tibble::deframe()
v <- list("tao",
"K",
"times",
"delta"
) %>% purrr::map(~extract(M,.x)) %>% rlang::set_names(c("tao","K","times","delta"))
tao <- v$tao
K <- v$K
delta <- v$delta
times <- v$times
if(type.h == "dot") {
h <- s$SUM.h %>% ggplot2::ggplot() +
ggplot2::geom_point(ggplot2::aes(x = tao[-1],
y = mean, color = "Hazard Function")) +
ggplot2::scale_color_manual(values = c("black","#b22222")) +
ggplot2::guides(color = ggplot2::guide_legend(title = "")) +
ggplot2::guides(color = ggplot2::guide_legend(title = "")) +
ggplot2::xlab("Time") + ggplot2::ylab("Hazard rate") + ggplot2::scale_alpha_continuous(guide = F) +
ggplot2::ggtitle(paste0("Estimate of hazard rates with intervals at ",confidence * 100,"% of credibility")) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"),
legend.position="bottom")
if(intervals){
h <- h + ggplot2::geom_errorbar(ggplot2::aes(ymin = lower, ymax = upper, x = tao[-1]),
alpha = 0.5, color = "gray50", width = 1)
}
}
if(type.h == "line"){
h <- s$SUM.h %>% ggplot2::ggplot() +
ggplot2::geom_line(ggplot2::aes(x = (tao[-(K+1)] + tao[-1])/2, y = mean, color = "Hazard Function")) +
ggplot2::scale_color_manual(values = c("black","#b22222")) +
ggplot2::guides(color = ggplot2::guide_legend(title = "")) +
ggplot2::xlab("Time") + ggplot2::ylab("Hazard rate") + ggplot2::scale_alpha_continuous(guide = F) +
ggplot2::ggtitle(paste0("Estimate of hazard rates with intervals at ",confidence * 100,"% of credibility")) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"),
legend.position="bottom")
if(intervals){
h <- h + ggplot2::geom_ribbon(ggplot2::aes(x = (tao[-(K+1)] + tao[-1])/2, ymin = lower, ymax = upper), alpha = .5, fill = "gray70")
}
}
S <- s$SUM.S %>% ggplot2::ggplot() + ggplot2::geom_step(na.rm = T, ggplot2::aes(x = t, y = `S^(t)`,color = "Model estimate")) +
ggplot2::scale_color_manual(limits = c("Model estimate","Kaplan-Meier"),values = c("black","#b22222")) +
ggplot2::guides(color = ggplot2::guide_legend(title = "")) +
ggplot2::scale_y_continuous(limits = c(0,1)) +
ggplot2::ggtitle(paste0("Estimate of Survival Function with intervals at ", confidence * 100,"% of credibility")) +
ggplot2::labs(x = "t",
y = expression(S^{(t)})) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"),
legend.position = "bottom")
if(intervals){
S <- S + ggplot2::geom_step(na.rm = T, ggplot2::aes(x = t, y = lower), alpha = 0.5, linetype = "dashed") +
ggplot2::geom_step(na.rm = T, ggplot2::aes(x = t, y = upper), alpha = 0.5, linetype = "dashed")
}
if(add.survival){
fit <- survival::survfit(survival::Surv(time = times, event = delta) ~ 1,
conf.int = confidence)
km.data <- tibble::tibble(time = fit$time,surv = fit$surv, lower = fit$lower,
upper = fit$upper)
if(km.data$time[1]!= 0){
km.data <- dplyr::bind_rows(tibble::tibble(time = 0, surv = 1, lower = 1, upper = 1),km.data)
}
na.data <- tibble::tibble(time = fit$time, h.est = fit$n.event / fit$n.risk)
h <- h + ggplot2::geom_point(data = na.data, ggplot2::aes(x = time, y = h.est, color = "Nelson-Aalen based estimate"))
S <- S + ggplot2::geom_step(data = km.data, na.rm = T, ggplot2::aes(x = time,y = surv, color = "Kaplan-Meier"))
if(intervals){
S <- S + ggplot2::geom_step(data = km.data, na.rm = T, ggplot2::aes(x = time, y = lower), alpha = 0.5, color = "#b22222", linetype = "dashed") +
ggplot2::geom_step(data = km.data, na.rm = T, ggplot2::aes(x = time, y = upper), alpha = 0.5, color = "#b22222", linetype = "dashed")
}
}
if (summary == TRUE) {
return(list(h,S,SUM))
} else{
return(list(h,S))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/BePloth.R
|
BeUpdC <-
function(alpha, beta, Pi.r, u.r, epsilon) {
ck <- 50
K <- length(Pi.r)
c.r <- purrr::map_int(seq_len(K-1),function(index = .x){
id <- seq.int(u.r[index], u.r[index] + ck)
probs <- (lgamma(alpha[index + 1] + beta[index + 1] + id)
- lgamma(beta[index + 1] + id - u.r[index]) - lgamma(id - u.r[index] + 1)) +
id * (log(epsilon) + log(1 - Pi.r[index])
+ log(1 - Pi.r[index + 1]))
probs <- exp(probs)
sample(x = id, size = 1, prob = probs)
})
return(c.r)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/BeUpdC.R
|
BeUpdU <-
function(alpha, beta, c.r, Pi.r) {
ind <- which(c.r != 0)
u.r <- rep(0,length(c.r))
if(length(ind)>0){
K <- length(Pi.r)
lphi <- (log(Pi.r[-K]) + log(Pi.r[-1])
- log(1 - Pi.r[-K]) - log(1 - Pi.r[-1]))
aux_u <- purrr::map(ind,
~exp(seq.int(0, c.r[.x]) * lphi[.x] -
lgamma(seq.int(0, c.r[.x]) + 1) -
lgamma(c.r[.x] - seq.int(0, c.r[.x]) + 1) -
lgamma(alpha[.x + 1] + seq.int(0,c.r[.x])) -
lgamma(beta[.x + 1] + c.r[.x] - seq.int(0,c.r[.x])))
)
u.r[ind] <- purrr::map_int(aux_u,
~sample(x = seq.int(0,length(.x)-1),size = 1, prob = .x))
}
return(u.r)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/BeUpdU.R
|
CCuLambdaSumm<-
function(M, new=NULL, confidence = 0.95) {
if (confidence <= 0 || confidence >= 1) {
stop ("Invalid parameter: confidence must be between 0 and 1.")
}
if(!is.null(new)){
if(ncol(new) != ncol(M %>% extract("data"))){
stop("Covariables doesn't match.")
}
if(!(match(names(new),M %>% extract("data") %>% names) %>% is.na %>% sum() == 0)){
stop(paste("Invalid colnames, should be", paste(M %>% extract("data") %>% names, collapse = ", ")))
}
}
v <- list("K",
"iterations",
"tao",
"data",
"covs.x",
"covs.y",
"s",
"S",
c("simulations","Lambda"),
c("simulations","Lambda.m"),
c("simulations","Delta"),
c("simulations","Theta"),
c("simulations","Pi.m"),
c("simulations","Z.m")
) %>% purrr::map(~extract(M,.x)) %>% rlang::set_names(c("K","iterations","tao","data","covs.x","covs.y",
"s","S","Lambda","Lambda.m","Delta",
"Theta","Pi.m","Z.m"))
tao <- v$tao
Lambda <- v$Lambda.m
Theta <- v$Theta
Z <- v$Z.m %>% tibble::enframe() %>% dplyr::select(mean.obs = value)
Pi <- v$Pi.m %>% tibble::enframe() %>% dplyr::select(mean.obs = value)
S <- v$S
if(!is.null(new)) {
names <- names(v$data)
new <- tibble::as_tibble(new)
Lambda <- v$Lambda
new.x <- dplyr::select(new, !!! colnames(v$covs.x))
new.y <- dplyr::select(new, !!! colnames(v$covs.y))
Z <- matrix(data = (purrr::map_int(purrr::map(purrr::map(purrr::cross2(tibble::as_tibble(t(new.y)),
tibble::as_tibble(t(v$Delta))),
purrr::lift(`*`)),
~exp(sum(.x))),
~rpois(n = 1, lambda = .x)) + 1 ),
nrow = v$iterations, ncol = nrow(new.y), byrow = T)
Z[Z>v$K] <- v$K
Z <- rlang::set_names(tibble::as_tibble(Z),
paste0("new_obs_", seq_len(nrow(new.y))))
Lambda <- purrr::map_dfc(purrr::cross2(seq_len(v$K), Z),~ dplyr::pull((.x[[1]] <= .x[[2]])*Lambda[,.x[[1]]],1))
Lambda <- purrr::map(seq_len(ncol(Lambda)/v$K), ~tibble::as_tibble(Lambda[,seq_len(v$K) + (.x-1)*v$K]))
Pi <- rlang::set_names(tibble::as_tibble(matrix(purrr::map2_dbl(purrr::map_dfc(purrr::cross2(seq_len(v$iterations),tibble::as_tibble(t(new.x))),
.f = ~ exp(sum(Theta[.x[[1]],] * .x[[2]])) * (tao[-1] - tao[-length(tao)])),
.y = tibble::as_tibble(t(purrr::reduce(purrr::map(Lambda, ~rlang::set_names(.x,paste0("V",seq_len(v$K)))), dplyr::bind_rows))),
.f = ~exp(-sum(.x*.y))),
nrow = v$iterations, ncol = nrow(new.x))),
paste0("new_obs_", seq_len(nrow(new.x))))
writeLines("Generating survival function estimates for new observations.")
pb <- dplyr::progress_estimated(length(v$s))
Lambda <- purrr::map2(.x = Lambda, .y =seq_len(nrow(new)),function(a,b){
eff <- as.numeric(exp(Theta%*%as.numeric(new[b,])))
a <- dplyr::mutate_all(a,.f = ~.x*eff)
return(a)
})
S <- do.call(dplyr::bind_cols, purrr::map(v$s, function(s = .x){
pb$tick()$print()
tibble::as_tibble(matrix(data = purrr::map_dbl(purrr::map(purrr::cross2(seq_len(v$iterations),purrr::map2(Lambda, tibble::as_tibble(t(new.x)),~list(.x,.y))),
.f= ~(s > tao[-1]) * tao[-1] * as.numeric(.x[[2]][[1]][.x[[1]],])+
(s > tao[-length(tao)] & s <= tao[-1]) * s * as.numeric(.x[[2]][[1]][.x[[1]],]) -
(s > tao[-length(tao)]) * tao[-(length(tao))] * as.numeric(.x[[2]][[1]][.x[[1]],])
), ~exp(-sum(.x))),
ncol = nrow(new.x), byrow = F))
}))
S <- purrr::map(seq_len(nrow(new.x)),
~S[,seq(.x, (nrow(new.x))*length(v$s), nrow(new.x))])
cat("\n Done.")
}
pr <- (1 - confidence) / 2
SUM.h <- purrr::map(Lambda, ~rlang::set_names(tibble::tibble(seq_len(v$K),
purrr::map_dbl(.x, mean,na.rm = T),
purrr::map_dbl(.x, quantile, probs = pr, na.rm = T),
purrr::map_dbl(.x, quantile, probs = 0.5, na.rm = T),
purrr::map_dbl(.x, quantile, probs = 1 - pr, na.rm = T)
),
c("k", "mean", "lower", "median", "upper")))
SUM.S <- purrr::map(S, ~ rlang::set_names(tibble::tibble(v$s,
purrr::map_dbl(.x, mean, na.rm = T),
purrr::map_dbl(.x, quantile, probs = pr, na.rm = T),
purrr::map_dbl(.x, quantile, probs = 0.5, na.rm = T),
purrr::map_dbl(.x, quantile, probs = 1-pr, na.rm = T)),
c("t", "S^(t)", "lower", "median", "upper")))
SUM.Pi <- purrr::map(Pi, ~rlang::set_names(tibble::tibble(mean(.x, na.rm=T),
quantile(.x, probs = pr,na.rm=T),
quantile(.x, probs = 0.5,na.rm=T),
quantile(.x, probs = 1 - pr,na.rm=T)),
c("mean", "lower", "median", "upper")))
z <- purrr::map(Z, ~rlang::set_names(tibble::tibble(mean(.x,na.rm=T),
quantile(.x, probs = pr,na.rm=T),
quantile(.x, probs = 0.5,na.rm=T),
quantile(.x, probs = 1 - pr,na.rm=T)),
c("mean", "lower", "median", "upper")))
out <- tibble::enframe(list(SUM.h = SUM.h, SUM.S = SUM.S, SUM.z = z, SUM.pi = SUM.Pi, K = nrow(new),
simulations = tibble::enframe(list(Z = Z, Pi = Pi))))
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CCuLambdaSumm.R
|
#' Bayesian Semiparametric Cure Rate Model with an Unknown Threshold and
#' Covariate Information
#'
#' Posterior inference for the bayesian semiparmetric cure rate model with
#' covariates in survival analysis.
#'
#' Computes the Gibbs sampler with the full conditional distributions of
#' all model parameters (Nieto-Barajas & Yin, 2008) and arranges the resulting Markov
#' chain into a tibble which can be used to obtain posterior summaries. Prior
#' distributions for the regression coefficients Theta and Delta are assumend
#' independent normals with zero mean and variance \code{var.theta.ini},
#' \code{var.delta.ini}, respectively.
#'
#' @param data Double tibble. Contains failure times in the first column,
#' status indicator in the second, and, from the third to the last column, the
#' covariate(s).
#' @param covs.x Character. Names of covariables to be part of the
#' multiplicative part of the hazard
#' @param covs.y Character. Names of covariables to determine the cure
#' threshold por each patient.
#' @param type.t Integer. 1=computes uniformly-dense intervals; 2=
#' partition arbitrarily defined by the user with parameter utao and 3=same length intervals.
#' @param K Integer. Partition length for the hazard function.
#' @param utao vector. Partition specified by the user when type.t = 2. The first value of
#' the vector has to be 0 and the last one the maximum observed time, either censored or uncensored.
#' @param alpha Nonnegative entry vector. Small entries are recommended in
#' order to specify a non-informative prior distribution.
#' @param beta Nonnegative entry vector. Small entries are recommended in order
#' to specify a non-informative prior distribution.
#' @param c.r Nonnegative vector. The higher the entries, the higher the correlation of two consective intervals.
#' @param c.nu Tuning parameter for the proposal distribution for c.
#' Only when \code{type.c} is 3 or 4.
#' @param var.theta.str Double. Variance of the proposal normal distribution
#' for theta in the Metropolis-Hastings step.
#' @param var.delta.str Double. Variance of the proposal normal distribution
#' for delta in the Metropolis-Hastings step.
#' @param var.theta.ini Double. Variance of the prior normal distribution for theta.
#' @param var.delta.ini Double. Variance of the prior normal distribution for delta.
#' from the acceptance ratio in the Metropolis-Hastings algorithm for delta*.
#' @param type.c 1=defines \code{c.r} as a zero-entry vector; 2=lets the user
#' define \code{c.r} freely; 3=assigns \code{c.r} an exponential prior
#' distribution with mean 1; 4=assigns \code{c.r} an exponential hierarchical
#' distribution with mean \code{epsilon} which in turn has a Ga(a.eps, b.eps)
#' distribution.
#' @param a.eps Double. Shape parameter for the prior gamma distribution of
#' epsilon when \code{type.c = 4}.
#' @param b.eps Double. Scale parameter for the prior gamma distribution of
#' epsilon when \code{type.c = 4}.
#' @param epsilon Double. Mean of the exponencial distribution assigned to
#' \code{c.r} when \code{type.c = 3}.
#' @param iterations Integer. Number of iterations including the \code{burn.in}
#' to be computed for the Markov chain.
#' @param burn.in Integer. Length of the burn-in period for the Markov chain.
#' @param thinning Integer. Factor by which the chain will be thinned. Thinning
#' the Markov chain reduces autocorrelation.
#' @param printtime Logical. If \code{TRUE}, prints out the execution time.
#' @note It is recommended to verify chain's stationarity. This can be done by
#' checking each element individually. See \code{\link{CCuPlotDiag}}.
#' @seealso \link{CCuPlotDiag}, \link{CCuPloth}
#' @references - Nieto-Barajas, L. E., & Yin, G. (2008). Bayesian
#' semiparametric cure rate model with an unknown threshold. Scandinavian
#' Journal of Statistics, 35(3), 540-556.
#' https://doi.org/10.1111/j.1467-9469.2007.00589.x
#'
#' - Nieto-barajas, L. E. (2002). Discrete time Markov gamma processes and time
#' dependent covariates in survival analysis. Statistics, 2-5.
#' @examples
#'
#'
#'
#' # data(BMTKleinbook)
#' # res <- CCuMRes(BMTKleinbook, covs.x = c("tTransplant","hodgkin","karnofsky","waiting"),
#' # covs.y = c("tTransplant","hodgkin","karnofsky","waiting"),
#' # type.t = 2, K = 72, length = 30,
#' # alpha = rep(2,72), beta = rep(2,72), c.r = rep(50, 71), type.c = 2,
#' # var.delta.str = .1, var.theta.str = 1,
#' # var.delta.ini = 100, var.theta.ini = 100,
#' # iterations = 100, burn.in = 10, thinning = 1)
#'
#'
#'
#' @export CCuMRes
CCuMRes <-
function(data, covs.x = names(data)[seq.int(3,ncol(data))],
covs.y = names(data)[seq.int(3,ncol(data))],
type.t = 3, K = 50, utao = NULL, alpha = rep(0.01, K),
beta = rep(0.01, K), c.r = rep(0, K - 1), c.nu = 1,
var.theta.str = 25, var.delta.str = 25, var.theta.ini = 100, var.delta.ini = 100,
type.c = 4, a.eps = 0.1, b.eps = 0.1, epsilon = 1, iterations = 5000,
burn.in = floor(iterations * 0.2), thinning = 3, printtime = TRUE) {
tInit <- proc.time()
data <- tibble::as_tibble(data)
writeLines(c(sprintf("Using %s as times and %s as delta, status indicator.",names(data)[1], names(data)[2]),
"The other variables are used as covariables"))
times <- as.numeric(dplyr::pull(data, 1))
delta <- as.numeric(dplyr::pull(data, 2))
covar <- dplyr::select(data, -c(1, 2))
covar2 <- covar
median.obs <- covar %>% dplyr::summarise(dplyr::across(dplyr::everything(),~quantile(x = .x, probs = .5)))
k.const <- as.numeric(covar %>% dplyr::summarise_all(.funs = ~max(abs(.x))))
covar %<>% purrr::modify2(.y = k.const, .f = ~.x/.y)
median.obs.x <- dplyr::select(median.obs, !!covs.x)
median.obs.y <- dplyr::select(median.obs, !!covs.y)
covs.x <- as.matrix(dplyr::select(covar, !!covs.x))
covs.y <- as.matrix(dplyr::select(covar, !!covs.y))
covar <- as.matrix(covar)
if (min(times) < 0) {
stop ("Invalid argument: 'times' must be a nonnegative vector.")
}
if (min((delta == 0) + (delta == 1 )) == 0) {
stop ("Invalid argument: 'delta' must have 0 - 1 entries.")
}
if (length(times) != length(delta)) {
stop ("Invalid argument: 'times' and 'delta' must have same length.")
}
if (type.t == 2) {
if(is.null(utao)) stop("If type.t = 2 you need to specify utao.")
utao <- sort(utao)
if(utao[1]!=0){
warning("The first value of the partition needs to be 0, utao fixed and now starting with 0.")
utao <- c(0, utao)
}
if(max(times) > max(utao)){
utao <- c(utao,max(times))
warning("The last value of the partition needs to be", max(times),", utao fixed and set to",max(times))
}
K <- length(utao) - 1
}
if (type.t == 1 || type.t == 3) {
if (inherits(try(K != 0, TRUE), "try-error")) {
K.aux <- 5
warning ("'K' value not specified. 'K' fixed at ", K.aux, ".")
} else {K.aux <- K}
K <- K.aux
}
tol <- .Machine$double.eps ^ 0.5
if (abs(type.t - round(type.t)) > tol || type.t < 1 || type.t > 3) {
stop ("Invalid argument: 'type.t' must be an integer between 1 and 3.")
}
if (K <= 2 || abs(K - round(K)) > tol) {
stop ("Invalid argument: 'K' must be an integer greater than 2.")
}
if (length(alpha) != K || length(beta) != K) {
stop (c("Invalid argument: 'alpha', 'beta', must have length "), K)
}
if (min(c(alpha, beta)) < 0) {
stop ("Invalid argument: 'alpha' and 'beta' must have nonnegative entries.")
}
if (abs(type.c - round(type.c)) > tol || type.c < 1 || type.c > 4) {
stop ("Invalid argument: 'type.c' must be an integer between 1 and 4.")
}
if (type.c == 1 || type.c == 2) {
if (length(c.r) != (K - 1)) {
stop (c("Invalid argument: 'c.r' must have length, ", K - 1))
}
if (sum(abs(c.r - round(c.r)) > tol) != 0 || min(c.r) < 0) {
stop ("Invalid argument: 'c.r' entries must be nonnegative integers.")
}
}
if (type.c == 1 && sum(abs(c.r)) != 0 ) {
c.r <- rep(0, K - 1)
warning (c("'c.r' redefined as rep0,", K - 1, ") because type.c = 1."))
}
if (type.c == 3 && epsilon < 0) {
stop ("Invalid argument: 'epsilon' must be nonnegative.")
}
if (iterations <= 0 || abs(iterations - round(iterations)) > tol
|| iterations < 50) {
stop ("Invalid argument: 'iterations' must be an integer greater than 50.")
}
if (burn.in < 0 || abs(burn.in - round(burn.in)) > tol
|| burn.in > iterations*0.9) {
stop ("Invalid argument: 'burn.in' must be a postitive integer smaller than
iterations = ", iterations * 0.9, ".")
}
if (!inherits(thinning, "numeric")) {
stop ("Invalid argument: 'thinning' must be a logical value.")
}
if (thinning <= 0 || abs(thinning - round(thinning)) > tol
|| thinning > 0.1 * iterations) {
stop ("Invalid argument: 'thpar' must be a postitive integer smaller than
iterations * 0.10 = ", iterations * 0.1, ".")
}
if (printtime != TRUE && printtime != FALSE) {
stop ("Invalid argument: 'printtime' must be a logical value.")
}
tao <- Tao(times, delta, type.t, K, utao)
t.unc <- sort(times[delta == 1])
n <- readr::parse_integer(as.character(table(cut(t.unc,tao))))
acceptance.c <- 0
if (type.c %in% c(3,4)) {
c.r <- rep(5, (K - 1))
Epsilon <- rep(NA, iterations)
}
p <- ncol(covs.x)
p2 <- ncol(covs.y)
acceptance.th <- rep(0,p)
acceptance.d <- rep(0,p2)
ind <- nrow(covs.x)
Theta <- matrix(NA, nrow = iterations, ncol = p)
Lambda <- matrix(NA, nrow = iterations, ncol = K)
U <- matrix(NA, nrow = iterations, ncol = K - 1)
C <- matrix(NA, nrow = iterations, ncol = K - 1)
Z <- matrix(NA, nrow = iterations, ncol = ind)
Delta <- matrix(NA, nrow = iterations, ncol = p)
k_i <- rep(1, length(times))
k_i[delta==1] <- as.numeric(cut(times[delta==1],tao,labels = seq_len(length(tao)-1),include.lowest = T,right = T))
z <- k_i
lambda.r <- rep(0.1, K)
theta <- rep(0, p)
delta.r <- rep(0, p2)
cat(paste("Iterating...", "\n"), sep = "")
pb <- dplyr::progress_estimated(iterations)
for(j in seq_len(iterations)) {
pb$tick()$print()
u.r <- UpdU(alpha, beta, c.r, lambda.r)
W <- CCuW(theta, times, K, covs.x, tao, ind)
IDW <- purrr::map2(W, .y = z, ~(seq_len(K) <= .y)*.x)
m <- purrr::reduce(IDW, `+`)
z <- CCuUpdZ(times,tao, lambda.r, W, delta.r, covs.y, k_i)
lambda.r <- UpdLambda(alpha, beta, c.r, u.r, n, m)
aux.th <- CCuUpdTheta(theta, lambda.r, times, delta, K, covs.x, tao, ind, z, var.theta.str, var.theta.ini, acceptance.th)
theta <- aux.th[[1]]
acceptance.th <- aux.th[[2]]
aux.d <- CCuUpdDelta(delta.r, covs.y, z, var.delta.str, var.delta.ini, acceptance.d)
delta.r <- aux.d[[1]]
acceptance.d <- aux.d[[2]]
if (type.c %in% c(3,4)) {
if (type.c == 4) {
epsilon <- rgamma(1, shape = a.eps + K, scale = 1 / (b.eps + sum(c.r)))
}
auxc.r <- GaUpdC(alpha, beta, c.r, lambda.r, u.r, epsilon, c.nu, acceptance.c)
c.r <- auxc.r[[1]]
acceptance.c <- auxc.r[[2]]
}
C[j, ] <- c.r
Lambda[j, ] <- lambda.r
U[j, ] <- u.r
Z[j, ] <- z
Theta[j, ] <- theta
Delta[j, ] <- delta.r
if (type.c == 4) Epsilon[j] <- epsilon
}
Lambda <- Lambda[seq(burn.in + 1, iterations, thinning), ]
U <- U[seq(burn.in + 1, iterations, thinning), ]
C <- C[seq(burn.in + 1, iterations, thinning), ]
Z <- Z[seq(burn.in + 1, iterations, thinning), ]
Theta <- Theta[seq(burn.in + 1, iterations, thinning), ]
Theta <- sweep(Theta, MARGIN=2,k.const, `/`)
Delta <- Delta[seq(burn.in + 1, iterations, thinning), ]
Delta <- sweep(Delta, MARGIN=2,k.const, `/`)
if (type.c == 4) Epsilon <- Epsilon[seq(burn.in + 1, iterations, thinning)]
rows <- nrow(Lambda)
aux.median.obs.x <- as.numeric(median.obs.x)
aux.median.obs.y <- as.numeric(median.obs.y)
writeLines(c("","Done.","Generating predictive values for Z por the median observation."))
z_median.obs <- purrr::map_int(purrr::map(seq_len(nrow(Delta)),
~exp(purrr::reduce(purrr::map2(.x = aux.median.obs.y, .y = Delta[.x,], .f = ~.x*.y), `+`))),
~{rpois(n = 1, lambda = .x)}) + 1
z_median.obs[z_median.obs>K] <- K
writeLines(c("", "Done.", "Generating predictive hazard rates for the median observation."))
Lambda.median.obs <- purrr::map_dfc(seq_len(ncol(Lambda)), ~ ((.x <= z_median.obs)*Lambda[,.x]))
X <- as.matrix(unname(Lambda.median.obs))
writeLines(c("","Done.","Generating cure rate for the median observation."))
pb <- dplyr::progress_estimated(rows)
Pi.m <- do.call(base::c, purrr::map(seq_len(rows),
.f = ~ {
pb$tick()$print()
exp(-sum(exp(sum(Theta[.x,] * aux.median.obs.x)) * (tao[-1] - tao[-length(tao)]) *
Lambda.median.obs[.x,])
)}))
writeLines(c("","Done.","Generating survival function estimates of the median observation."))
ss <- max(tao) * seq.int(0, 100) / 100
pb <- dplyr::progress_estimated(length(ss))
S <- purrr::map_dfc(ss, function(s = .x){
pb$tick()$print()
do.call(base::c, purrr::map(seq_len(rows), .f= ~exp(-sum((s > tao[-1]) * tao[-1] * X[.x,] * exp(sum(Theta[.x,] * aux.median.obs.x)) +
(s > tao[-length(tao)] & s <= tao[-1]) * s * X[.x,] * exp(sum(Theta[.x,] * aux.median.obs.x)) -
(s > tao[-length(tao)]) * tao[-(length(tao))] * X[.x,] * exp(sum(Theta[.x,] * aux.median.obs.x)))
)))
})
S <- purrr::map(.x = 1, ~S)
eff <- as.numeric(exp(Theta%*%aux.median.obs.x))
Lambda.median.obs <- dplyr::mutate_all(Lambda.median.obs,.f = ~.x*eff)
Lambda.median.obs <- purrr::map(.x = 1, ~Lambda.median.obs)
cat(c("\n","Done.", "\n"), sep = "")
if (printtime) {
cat(">>> Total processing time (sec.):\n")
print(procTime <- proc.time() - tInit)
}
if(type.c == 4) {
X = list(Lambda = tibble::as_tibble(Lambda), Lambda.m = Lambda.median.obs,
U = U, C = C, Theta = Theta, Delta = Delta, Z.m = z_median.obs, Pi.m = Pi.m, Epsilon = Epsilon)} else {
X = list(Lambda = tibble::as_tibble(Lambda), Lambda.m = Lambda.median.obs, U = U, C = C, Theta = Theta, Delta = Delta, Z = Z, Z.m = z_median.obs, Pi.m = Pi.m)
}
X <- tibble::enframe(X)
out <- tibble::enframe(list(times = times, delta = delta, data = covar2, covs.x = covs.x, covs.y = covs.y, type.t = type.t,
tao = tao, K = K, t.unc = t.unc, iterations = rows, burn.in = burn.in, thinning = thinning,
acceptance = tibble::enframe(list(a.d = acceptance.d/iterations, a.th = acceptance.th/iterations, a.c = acceptance.c/((K-1)*iterations))),
simulations = X, p = p, s = ss, S = S))
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CCuMRes.R
|
#' Diagnostics plots for Lambda, Theta, Delta,
#' U, C, Pi, Z and Epsilon. Hazard function, cure proportion and cure time for the median observation.
#'
#' Diagnostic plots for hazard rate (Lambda), regression parameters for the
#' hazard (Theta), regression parameters for the cure rate (Delta), latent
#' variable (U), dependence parameter (C), mean of cure threshold (Mu),
#' cure proportion (Pi), cure threshold (Z) and the
#' parameter of the hierarchical prior (Epsilon).
#'
#' This function returns a diagnosyics plot for which the chain for the selected
#' variable can be monitored. Diagnostics includes trace, ergodic mean,
#' autocorrelation function and histogram.
#'
#' @param M tibble. Contains the output by
#' \code{CCuMRes}.
#' @param variable Either "Lambda", "U", "C", "Mu", "Pi", "Z" or "Epsilon".
#' Variable for which diagnostic plot will be shown.
#' @param pos Positive integer. Position of the selected \code{variable} to be
#' plotted.
#' @seealso \link{CCuMRes}
#' @references Nieto-Barajas, L. E., & Yin, G. (2008). Bayesian semiparametric
#' cure rate model with an unknown threshold. \emph{Scandinavian Journal of
#' Statistics}, \strong{35(3)}, 540-556.
#' https://doi.org/10.1111/j.1467-9469.2007.00589.x
#' @examples
#'
#'
#'
#' ## Simulations may be time intensive. Be patient.
#'
#' ## Example 1
#' # data(BMTKleinbook)
#' # res <- CCuMRes(BMTKleinbook, covs.x = c("tTransplant","hodgkin","karnofsky","waiting"),
#' # covs.y = c("tTransplant","hodgkin","karnofsky","waiting"),
#' # type.t = 2, K = 72, length = 30,
#' # alpha = rep(2,72), beta = rep(2,72), c.r = rep(50, 71), type.c = 2,
#' # var.delta.str = .1, var.theta.str = 1,
#' # var.delta.ini = 100, var.theta.ini = 100,
#' # iterations = 100, burn.in = 10, thinning = 1)
#' # CCuPlotDiag(M = res, variable = "Z")
#' # CCuPlotDiag(M = res, variable = "Pi.m")
#' # CCuPlotDiag(M = res, variable = "Lambda", pos = 2)
#' # CCuPlotDiag(M = res, variable = "U", pos = 4)
#'
#'
#'
#'
#' @export CCuPlotDiag
CCuPlotDiag <-
function(M, variable = "Lambda", pos = 1) {
variable <- match.arg(variable,c("Lambda","Lambda.m","U","C","Theta","Delta","Pi.m","Pi","Z","Z.m","Epsilon"))
K <- extract(M, "K")
if (pos < 0 || pos > K ) {
stop ("Invalid position.")
}
if (pos > (K - 1) && (variable == "U" || variable == "C")) {
stop ("Invalid position.")
}
if (pos > (K) && (variable == "Z" || variable == "Pi")) {
stop ("Invalid observation")
}
if (!("Epsilon" %in% (dplyr::pull(extract(M, c("simulations")), name))) && variable == "Epsilon"){
stop("Plots for 'epsilon' are not available.")
}
if (variable == "Epsilon" && pos != 1) {
warning("'epsilon' has only one entry (1). Graphics shown for epsilon_1.")
pos <- 1
}
if (variable == "Z.m" && pos != 1) {
warning("'Z.m' has only one entry (1). Graphics shown for Z.m_1.")
pos <- 1
}
if (variable == "Pi.m" && pos != 1) {
warning("'Pi.m' has only one entry (1). Graphics shown for Pi.m_1.")
pos <- 1
}
MAT <- extract(M,c("simulations",variable))
if(variable %in% c("Lambda.m")){
MAT <- rlang::set_names(dplyr::select(MAT[[1]],pos), "V1")
} else{
MAT <- rlang::set_names(dplyr::select(tibble::as_tibble(MAT),pos), "V1")
}
var <- switch(variable, Lambda = expression(lambda),
Lambda.m = expression(lambda[median]),
Pi.m = expression(pi),
Epsilon = expression(epsilon),
Theta = expression(theta),
Delta = expression(delta),
Z.m = expression(Z[median]),
Pi = "Pi",
Z = "Z",
U = "U",
C = "C")
title <- paste0("Position: ", pos)
a <- ggplot2::ggplot(MAT) + ggplot2::geom_line(ggplot2::aes(x=seq_len(nrow(MAT)), y = V1), color = "slateblue4") +
ggplot2::labs(x = "Iteration", y = variable) + ggplot2::ylab(var) + ggplot2::ggtitle("Trace")+
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
b <- ggplot2::ggplot(MAT) + ggplot2::geom_line(ggplot2::aes(x=seq_len(nrow(MAT)), y = cumsum(V1)/seq_len(nrow(MAT))), color = "slateblue4") +
ggplot2::labs(x = "Iteration", y = variable) + ggplot2::ggtitle("Ergodic mean") +
ggplot2::ylab(var) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
acf.aux <- acf(MAT, plot = F)
c <- ggplot2::ggplot(tibble::as_tibble(cbind(acf.aux$lag, acf.aux$acf))) +
ggplot2::geom_segment(ggplot2::aes(x = V1, xend = V1, y = V2, yend = 0)) +
ggplot2::labs(x = "Lag", y ="ACF")+
ggplot2::ggtitle("Autocorrelation function") +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
d <- ggplot2::ggplot(MAT) + ggplot2::geom_histogram(ggplot2::aes(x = V1), fill = "lightblue", color = "black", bins = 30) +
ggplot2::ggtitle("Histogram") + ggplot2::xlab(var) + ggplot2::ylab("") +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
gridExtra::grid.arrange(a,b,c,d, top = title)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CCuPlotDiag.R
|
#' Plots for the Hazard and Survival Funcion Estimates
#'
#' Plots the resulting hazard function and the survival function
#' estimates defined by the bayesian semiparametric cure rate model with
#' an unknown threshold (Nieto-Barajas & Yin, 2008).
#'
#' This function returns estimators plots for the hazard rate as it is computed
#' by \link{CCuMRes} and the cure time (quantile of Tao specified by the user)
#' together with credible intervals. Additionally, it plots the survival function
#' and the cure proportion estimates with their corresponding credible intervals.
#'
#' @param M tibble. Contains the output generated by \code{CuMRres}.
#' @param new_obs tibble. Contains the covariate information for new observations.
#' @param type.h character. "segment"= use segments to plot hazard rates,
#' "line" = link hazard rates by a line
#' @param qn Numeric. Quantile for Tao (cure time) that should be visualized on the plot.
#' @param intervals logical. If TRUE, plots credible intervals.
#' @param confidence Numeric. Confidence level.
#' @param summary Logical. If \code{TRUE}, a summary for the hazard and survival
#' functions is returned as a tibble.
#' @return
#' \item{SUM.h} {Numeric tibble. Summary for the mean, median, and a
#' \code{confint / 100} confidence interval for each segment of the hazard
#' function. If \code{summary = TRUE}}}
#' \item{SUM.S} {Numeric tibble. Summary for
#' the mean, median, and a \code{confint / 100} confidence interval for a grid
#' of the survival function. If \code{summary = TRUE}}
#' @return \item{SUM.h}{Numeric tibble. Summary for the mean, median, and a
#' \code{confint / 100} confidence interval for each segment of the hazard
#' function. If \code{summary = TRUE}} \item{SUM.S}{Numeric tibble. Summary for
#' the mean, median, and a \code{confint / 100} confidence interval for each
#' segment of the survival function. If \code{summary = TRUE}}
#' @seealso \link{CCuMRes},
#' @references - Nieto-Barajas, L. E. (2003). Discrete time Markov gamma
#' processes and time dependent covariates in survival analysis. \emph{Bulletin
#' of the International Statistical Institute 54th Session}. Berlin. (CD-ROM).
#'
#' -Nieto-Barajas, L. E., & Yin, G. (2008). Bayesian semiparametric cure rate
#' model with an unknown threshold. \emph{Scandinavian Journal of Statistics},
#' \strong{35(3)}, 540-556. https://doi.org/10.1111/j.1467-9469.2007.00589.x
#' @examples
#'
#'
#'
#' ## Simulations may be time intensive. Be patient.
#'
#' ## Example 1
#' # data(BMTKleinbook)
#' # res <- CCuMRes(BMTKleinbook, covs.x = c("tTransplant","hodgkin","karnofsky","waiting"),
#' # covs.y = c("tTransplant","hodgkin","karnofsky","waiting"),
#' # type.t = 2, K = 72, length = 30,
#' # alpha = rep(2,72), beta = rep(2,72), c.r = rep(50, 71), type.c = 2,
#' # var.delta.str = .1, var.theta.str = 1,
#' # var.delta.ini = 100, var.theta.ini = 100,
#' # iterations = 100, burn.in = 10, thinning = 1)
#' #
#' # CCuPloth(res, type.h = "segment",qn=.5, summary = T)
#' #
#' # new_obs <- tibble(tTransplant=c(0,0,0,0),
#' # hodgkin=c(0,1,0,1),
#' # karnofsky=c(90,90,60,60),
#' # waiting=c(36,36,36,36)
#' # )
#' #
#' # ind <- CCuPloth(res, new_obs, qn = .5)
#' # ind
#'
#'
#'
#' @export CCuPloth
CCuPloth <-
function(M, new_obs = NULL, type.h= "segment",qn = 0.5, intervals = T,
confidence = 0.95, summary = FALSE) {
SUM <- CCuLambdaSumm(M, new = new_obs, confidence)
h <- extract(SUM, "SUM.h")
S <- extract(SUM, "SUM.S")
SUM.Z <- extract(SUM, "SUM.z")
Z <- extract(SUM, c("simulations","Z"))
SUM.Pi <- extract(SUM,"SUM.pi")
v <- rlang::set_names(purrr::map(list("tao",
"K"), ~extract(M,.x)),c("tao","K"))
tao <- v$tao
K <- v$K
ribbon <- purrr::map2(.x = SUM.Z,h,~tibble::tibble(x = seq(to = tao[dplyr::pull(.x, 4) + 1],
from = tao[dplyr::pull(.x, 2) + 1], by = 0.1),
y = max(.y$upper)))
if(type.h == "segment") h.graf <- purrr::pmap(list(h,S,SUM.Z,SUM.Pi,Z, ribbon,seq_along(h)),function(h,S,SUM.Z,SUM.Pi,Z,ribbon,ind){
if(is.null(new_obs)) tit <- "median observation" else{
tit <- sprintf("observation %s",ind)
}
out <- ggplot2::ggplot(h) +
ggplot2::geom_segment(ggplot2::aes(x = tao[-(K+1)], xend = tao[-1],
y = mean, yend = mean)) +
ggplot2::xlab("Time") + ggplot2::ylab("Hazard rate") + ggplot2::scale_alpha_continuous(guide = F) +
ggplot2::ggtitle(paste0("Estimate of hazard rates for ",tit," with intervals at ",confidence * 100,"% of credibility")) +
ggplot2::geom_vline(xintercept = round(tao[quantile(Z,qn)+1],2), linetype = "dotted") +
ggplot2::annotate("text",x = round(tao[dplyr::pull(SUM.Z, 4) + 1],2), y = max(h$upper),
label = paste0(expression(tau[z])," ==", round(tao[quantile(Z,qn)+1],2)),
hjust = -.1, vjust = 1,parse = T) +
ggplot2::annotate("text",x = round(tao[dplyr::pull(SUM.Z, 4) + 1],2), y = max(h$upper),
label = paste0(expression(pi)," == ", round(dplyr::pull(SUM.Pi, mean),2)),
hjust = -.1, vjust = 2.5,parse = T) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
if(intervals){
out <- out + ggplot2::geom_errorbar(ggplot2::aes(ymin = lower, ymax = upper, x = (tao[-(K+1)] + tao[-1])/2, width = tao[-1]-tao[-(K+1)]),
alpha = 0.5, color = "gray50") +
ggplot2::geom_ribbon(data = ribbon, ggplot2::aes(x= x, ymin = 0, ymax = y),
alpha = .1, fill = "red")
}
return(out)
})
if(type.h == "line") h.graf <- purrr::pmap(list(h,S,SUM.Z,SUM.Pi,Z, ribbon,seq_along(h)),function(h,S,SUM.Z,SUM.Pi,Z,ribbon,ind){
if(is.null(new_obs)) tit <- "median observation" else{
tit <- sprintf("observation %s",ind)
}
out <- ggplot2::ggplot(h) +
ggplot2::geom_line(ggplot2::aes(x = (tao[-(K+1)] + tao[-1])/2, y = mean)) +
ggplot2::xlab("Time") + ggplot2::ylab("Hazard rate") + ggplot2::scale_alpha_continuous(guide = F) +
ggplot2::ggtitle(paste0("Estimate of hazard rates for ",tit," with intervals at ",confidence * 100,"% of credibility")) +
ggplot2::geom_vline(xintercept = round(tao[quantile(Z,qn)+1],2), linetype = "dotted") +
ggplot2::annotate("text",x = round(tao[dplyr::pull(SUM.Z, 4) + 1],2), y = max(h$upper),
label = paste0(expression(tau[z])," ==", round(tao[quantile(Z,qn)+1],2)),
hjust = -.1, vjust = 1,parse = T) +
ggplot2::annotate("text",x = round(tao[dplyr::pull(SUM.Z, 4) + 1],2), y = max(h$upper),
label = paste0(expression(pi)," == ", round(dplyr::pull(SUM.Pi, mean),2)),
hjust = -.1, vjust = 2.5,parse = T) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
if(intervals){
out <- out + ggplot2::geom_ribbon(ggplot2::aes(x = (tao[-(K+1)] + tao[-1])/2, ymin = lower, ymax = upper), alpha = .5, fill = "gray70") +
ggplot2::geom_ribbon(data = ribbon, ggplot2::aes(x= x, ymin = 0, ymax = y),
alpha = .1, fill = "red")
}
return(out)
})
S.graf <- purrr::pmap(list(h,S,SUM.Z,SUM.Pi,Z, ribbon,seq_along(h)),function(h,S,SUM.Z,SUM.Pi,Z,ribbon,ind){
if(is.null(new_obs)) tit <- "median observation" else{
tit <- sprintf("observation %s",ind)
}
out <- ggplot2::ggplot(S) + ggplot2::geom_line(ggplot2::aes(x = t, y = `S^(t)`)) +
ggplot2::scale_y_continuous(limits = c(0,1)) +
ggplot2::ggtitle(paste0("Estimate of Survival Function for ",tit," with intervals at ", confidence * 100,"% of credibility")) +
ggplot2::labs(x = "t",
y = expression(S^{(t)})) +
ggplot2::geom_vline(xintercept = round(tao[quantile(Z,qn)+1],2), linetype = "dotted") +
ggplot2::geom_hline(yintercept = round(dplyr::pull(SUM.Pi, mean),4), linetype = "dotted") +
ggplot2::annotate("text",x = round(tao[dplyr::pull(SUM.Z, 4) + 1],2), y = 1,
label = paste0(expression(tau[z])," ==", round(tao[quantile(Z,qn)+1],2)),
hjust = -.1, vjust = 1,parse = T) +
ggplot2::annotate("text",x = 0, y = round(dplyr::pull(SUM.Pi, mean),4),
label = paste0(expression(pi)," == ", round(dplyr::pull(SUM.Pi, mean),4)),
hjust = 0, vjust = -2.5,parse = T) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
if(intervals){
out <- out + ggplot2::geom_ribbon(ggplot2::aes(x = t, ymin = lower, ymax = upper), fill = "gray50", alpha = 0.3) +
ggplot2::geom_ribbon(data = ribbon, ggplot2::aes(x= x, ymin = 0, ymax = 1),
alpha = .1, fill = "red")
}
return(out)
})
if (summary == TRUE) {
return(list(h.graf,S.graf, SUM))
} else{
return(list(h.graf, S.graf))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CCuPloth.R
|
CCuUpdDelta <-
function(delta, y , z, var.delta.str, var.delta.ini, acceptance.d) {
delta.upd <- delta
p <- length(delta)
for(s in seq_len(p)){
delta.str <- delta.upd
delta.str[s] <- rnorm(1, mean = delta.upd[s], sd = sqrt(var.delta.str))
pr <- dnorm(delta.str[s], mean = 0, sd = sqrt(var.delta.ini), log = T) -
dnorm(delta.upd[s], mean = 0, sd = sqrt(var.delta.ini), log = T) +
sum((delta.str[s] - delta.upd[s]) * (y[, s]) * (z - 1) -
(exp(unname(purrr::map_dbl(tibble::as_tibble(t(y),.name_repair = "minimal"),~sum(.x*delta.str))))) +
(exp(unname(purrr::map_dbl(tibble::as_tibble(t(y),.name_repair = "minimal"),~sum(.x*delta.upd)))))
)
if(log(runif(1)) <= pr) {
delta.upd <- delta.str
acceptance.d[s] <- acceptance.d[s] + 1
}
}
return(list(delta.upd,acceptance.d))
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CCuUpdDelta.R
|
CCuUpdTheta <-
function(theta, lambda.r, times, delta, K, covar, tao, ind, z, var.theta.str, var.theta.ini, acceptance.th) {
p <- length(theta)
theta.upd <- theta
m.upd <- purrr::reduce(purrr::map2(CCuW(theta.upd, times, K, covar, tao, ind), .y = z, ~(seq_len(K) <= .y)*.x), `+`)
for(s in 1:p){
theta.str <- theta.upd
theta.str[s] <- rnorm(1, mean = theta.upd[s], sd = sqrt(var.theta.str))
m.str <- purrr::reduce(purrr::map2(CCuW(theta.str, times, K, covar, tao, ind),
.y = z, ~(seq_len(K) <= .y)*.x), `+`)
pr <- dnorm(theta.str[s], mean = 0, sd = sqrt(var.theta.ini), log = T) -
dnorm(theta.upd[s], mean = 0, sd = sqrt(var.theta.ini), log = T) +
(theta.str[s] - theta.upd[s]) * sum(covar[delta==1, s]) +
sum(lambda.r * (m.upd - m.str))
if(log(runif(1)) <= pr) {
theta.upd <- theta.str
m.upd <- m.str
acceptance.th[s] <- acceptance.th[s] + 1
}
}
return(list(theta.upd, acceptance.th))
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CCuUpdTheta.R
|
CCuUpdZ <- function(times, tao, lambda, W, delta.r, y, k_i){
K <- length(lambda)
z <- purrr::map_int(
purrr::map2(
purrr::map2(.x = purrr::map(W, ~-cumsum(.x*lambda)),
.y = purrr::map(seq_len(length(times)), ~ -exp(sum(delta.r * y[.x,])) + (seq_len(K) - 1) * sum(delta.r * y[.x,]) - lgamma(seq_len(K))),
.f = ~exp(.x + .y)
),
.y = k_i, ~(seq_len(K) >= .y) *.x
),
~sample(x = seq_len(K), size = 1, prob = .x))
return(z)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CCuUpdZ.R
|
CCuW <- function(theta, times, K, covar, tao, ind) {
w <- purrr::map2(times, .y = seq_len(ind),
.f= ~ (.x > tao[-1]) * as.vector(exp(sum(theta * covar[.y, ]))) * (tao[-1]) +
(.x > tao[-(K+1)] & .x <= tao[-1]) * as.vector(exp(theta %*% covar[.y, ])) * .x -
(.x > tao[-(K+1)]) * as.vector(exp(theta %*% covar[.y, ])) * tao[-(K+1)])
return(w)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CCuW.R
|
CGaLambdaSumm<-
function(M, new=NULL, confidence = 0.95) {
if (confidence <= 0 || confidence >= 1) {
stop ("Invalid parameter: confidence must be between 0 and 1.")
}
if(!is.null(new)){
if(ncol(new) != ncol(M %>% extract("data"))){
stop("Covariables doesn't match.")
}
if(!(match(names(new),M %>% extract("data") %>% names) %>% is.na %>% sum() == 0)){
stop(paste("Invalid colnames, should be", paste(M %>% extract("data") %>% names, collapse = ", ")))
}
}
v <- list("K",
"iterations",
"tao",
"data",
"s",
"S",
"S.m",
c("simulations","Lambda"),
c("simulations","Lambda.m"),
c("simulations","Theta")
) %>% purrr::map(~extract(M,.x)) %>% rlang::set_names(c("K","iterations","tao","data",
"s","S","S.m","Lambda","Lambda.m",
"Theta"))
tao <- v$tao
Lambda.b <- v$Lambda
Lambda.m <- v$Lambda.m
Theta <- v$Theta
Z <- v$Z.m %>% tibble::enframe() %>% dplyr::select(mean.obs = value)
Pi <- v$Pi.m %>% tibble::enframe() %>% dplyr::select(mean.obs = value)
S.b <- v$S
S.m <- v$S.m
Lambda.obs <- NULL
S.obs <- NULL
if(!is.null(new)) {
names <- names(v$data)
new <- tibble::as_tibble(new)
new <- dplyr::select(new, !!! colnames(data[,c(-1,-2)]))
Lambda.obs <- v$Lambda
Lambda.obs <- purrr::map2(.x = Lambda.obs, .y =seq_len(nrow(new)),function(a,b){
eff <- as.numeric(exp(Theta%*%as.numeric(new[b,])))
a <- dplyr::mutate_all(a,.f = ~.x*eff)
return(a)
})
writeLines("Generating survival function estimates for new observations.")
pb <- dplyr::progress_estimated(length(v$s))
S.obs <- do.call(dplyr::bind_cols, purrr::map(v$s, function(s = .x){
pb$tick()$print()
tibble::as_tibble(matrix(data = purrr::map_dbl(purrr::map(purrr::cross2(seq_len(v$iterations),purrr::map2(Lambda.obs, tibble::as_tibble(t(new)),~list(.x,.y))),
.f= ~(s > tao[-1]) * tao[-1] * as.numeric(.x[[2]][[1]][.x[[1]],]) +
(s > tao[-length(tao)] & s <= tao[-1]) * s * as.numeric(.x[[2]][[1]][.x[[1]],]) -
(s > tao[-length(tao)]) * tao[-(length(tao))] * as.numeric(.x[[2]][[1]][.x[[1]],])
), ~exp(-sum(.x))),
ncol = nrow(new), byrow = F))
}))
S.obs <- purrr::map(seq_len(nrow(new)),
~S.obs[,seq(.x, (nrow(new))*length(v$s), nrow(new))])
cat("\n Done.")
}
pr <- (1 - confidence) / 2
Lambda <- c(Lambda.b,Lambda.m,Lambda.obs)
SUM.h <- purrr::map(Lambda, ~rlang::set_names(tibble::tibble(a=seq_len(v$K),
b=purrr::map_dbl(.x, mean,na.rm = T),
c=purrr::map_dbl(.x, quantile, probs = pr, na.rm = T),
d=purrr::map_dbl(.x, quantile, probs = 0.5, na.rm = T),
e=purrr::map_dbl(.x, quantile, probs = 1 - pr, na.rm = T)
),
c("k", "mean", "lower", "median", "upper")))
S <- c(S.b,S.m,S.obs)
SUM.S <- purrr::map(S, ~ rlang::set_names(tibble::tibble(a=v$s,
b=purrr::map_dbl(.x, mean, na.rm = T),
c=purrr::map_dbl(.x, quantile, probs = pr, na.rm = T),
d=purrr::map_dbl(.x, quantile, probs = 0.5, na.rm = T),
e=purrr::map_dbl(.x, quantile, probs = 1-pr, na.rm = T)),
c("t", "S^(t)", "lower", "median", "upper")))
out <- tibble::enframe(list(SUM.h = SUM.h, SUM.S = SUM.S, K = nrow(new)))
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CGaLambdaSumm.R
|
CGaM <-
function(times, tao, K, covar, theta) {
m <- rep(0, K)
for(i in 1:length(times)) {
for(k in 1:K) {
if (tao[k + 1] < times[i]) {
m[k] <- m[k] + (tao[k + 1] - tao[k]) * exp(theta %*% covar[i, ])
}
if (tao[k] < times[i] && times[i] <= tao[k + 1]) {
m[k] <- m[k] + (times[i] - tao[k]) * exp(theta %*% covar[i, ])
}
}
}
return(m)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CGaM.R
|
#' Markov Gamma Model with Covariates
#'
#' Posterior inference for the Bayesian non-parametric Markov gamma model with
#' covariates in survival analysis.
#'
#' Computes the Gibbs sampler with the full conditional distributions of
#' Lambda and Theta (Nieto-Barajas, 2003) and arranges the resulting Markov
#' chain into a matrix which can be used to obtain posterior summaries. Prior
#' distributions for the re gression coefficients (Theta) are assumed independent normals
#' with zero mean and variance \code{var.theta.ini}.
#'
#' @param data Double tibble. Contains failure times in the first column,
#' status indicator in the second, and, from the third to the last column, the
#' covariate(s).
#' @param type.t Integer. 1=computes uniformly-dense intervals; 2=length
#' intervals defined by user and 3=same length intervals.
#' @param length Integer. Interval length of the partition.
#' @param K Integer. Partition length for the hazard function.
#' @param alpha Nonnegative entry vector. Small entries are recommended in
#' order to specify a non-informative prior distribution.
#' @param beta Nonnegative entry vector. Small entries are recommended in order
#' to specify a non-informative prior distribution.
#' @param c.r Nonnegative vector. The higher the entries, the higher the correlation of
#' two consecutive intervals.
#' @param c.nu Tuning parameter for the proposal distribution for c.
#' @param var.theta.str Double. Variance of the proposal normal distribution
#' for theta in the Metropolis-Hastings step.
#' @param var.theta.ini Double. Variance of the prior normal distribution for theta.
#' @param a.eps Double. Shape parameter for the prior gamma distribution of
#' epsilon when \code{type.c = 4}.
#' @param b.eps Double. Scale parameter for the prior gamma distribution of
#' epsilon when \code{type.c = 4}.
#' @param type.c 1=defines \code{c.r} as a zero-entry vector; 2=lets the user
#' define \code{c.r} freely; 3=assigns \code{c.r} by computing an exponential
#' prior distribution with mean 1; 4=assigns \code{c.r} an exponential hierarchical
#' distribution with mean \code{epsilon} which in turn has a Ga(a.eps, b.eps)
#' distribution.
#' @param epsilon Double. Mean of the exponential distribution assigned to
#' \code{c.r} when \code{type.c = 3}.
#' @param iterations Integer. Number of iterations including the \code{burn.in}
#' to be computed for the Markov chain.
#' @param burn.in Integer. Length of the burn-in period for the Markov chain.
#' @param thinning Integer. Factor by which the chain will be thinned. Thinning
#' the Markov chain reduces autocorrelation.
#' @param printtime Logical. If \code{TRUE}, prints out the execution time.
#' @note It is recommended to verify chain's stationarity. This can be done by
#' checking each element individually. See \link{CGaPlotDiag}
#' To obtain posterior summaries of the coefficients use function
#' \link{CGaPloth}.
#' @seealso \link{CGaPlotDiag}, \link{CGaPloth}
#' @references - Nieto-Barajas, L. E. (2003). Discrete time Markov gamma
#' processes and time dependent covariates in survival analysis. \emph{Bulletin
#' of the International Statistical Institute 54th Session}. Berlin. (CD-ROM).
#'
#' - Nieto-Barajas, L. E. & Walker, S. G. (2002). Markov beta and gamma
#' processes for modelling hazard rates. \emph{Scandinavian Journal of
#' Statistics} \strong{29}: 413-424.
#' @examples
#'
#'
#'
#' ## Simulations may be time intensive. Be patient.
#'
#' ## Example 1
#' # data(leukemiaFZ)
#' # leukemia1 <- leukemiaFZ
#' # leukemia1$wbc <- log(leukemiaFZ$wbc)
#' # CGEX1 <- CGaMRes(data = leukemia1, K = 10, iterations = 100, thinning = 1)
#'
#' ## Example 2. Refer to "Cox-gamma model example" section in package vignette for details.
#' # SampWeibull <- function(n, a = 10, b = 1, beta = c(1, 1)) {
#' # M <- tibble(i = seq(n), x_i1 = runif(n), x_i2 = runif(n),
#' # t_i = rweibull(n, shape = b,
#' # scale = 1 / (a * exp(x_i1*beta[1] + x_i2*beta[2]))),
#' # c_i = rexp(n), delta = t_i > c_i,
#' # `min{c_i, d_i}` = min(t_i, c_i))
#' # return(M)
#' # }
#' # dat <- SampWeibull(100, 0.1, 1, c(1, 1))
#' # dat <- dat %>% select(4,6,2,3)
#' # CG <- CGaMRes(data = leukemia1, K = 10, iterations = 100, thinning = 1)
#' # CGaPloth(CG)
#'
#'
#'
#' @export CGaMRes
CGaMRes <-
function(data, type.t = 2, length = 1, K = 5, alpha = rep(0.01, K),
beta = rep(0.01, K), c.r = rep(1, K - 1), c.nu = 1,
var.theta.str = 25, var.theta.ini = 100,
a.eps = 0.1, b.eps = 0.1,
type.c = 4, epsilon = 1, iterations = 1000,
burn.in = floor(iterations * 0.2), thinning = 3, printtime = TRUE) {
tInit <- proc.time()
data <- tibble::as_tibble(data)
print(sprintf("Using %s as times and %s as delta, status indicator. The other variables are used as covariables",names(data)[1], names(data)[2]))
times <- as.numeric(dplyr::pull(data, 1))
delta <- as.numeric(dplyr::pull(data, 2))
covar <- dplyr::select(data, -c(1, 2))
covar2 <- covar
median.obs <- purrr::map_df(covar, ~quantile(x = .x, probs = .5))
k.const <- as.numeric(covar %>% dplyr::summarise_all(.funs = ~max(abs(.x))))
covar %<>% purrr::modify2(.y = k.const, .f = ~.x/.y)
covar <- as.matrix(covar)
if (min(times) < 0) {
stop ("Invalid argument: 'times' must be a nonnegative vector.")
}
if (min((delta == 0) + (delta == 1 )) == 0) {
stop ("Invalid argument: 'delta' must have 0 - 1 entries.")
}
if (length(times) != length(delta)) {
stop ("Invalid argument: 'times' and 'delta' must have same length.")
}
if (type.t == 2) {
if(is.null(utao)) stop("If type.t = 2 you need to specify utao.")
utao <- sort(utao)
if(utao[1]!=0){
warning("The first value of the partition needs to be 0, utao fixed and now starting with 0.")
utao <- c(0, utao)
}
if(max(times) > max(utao)){
utao <- c(utao,max(times))
warning("The last value of the partition needs to be", max(times),", utao fixed and set to ",max(times),".")
}
K <- length(utao) - 1
}
if (type.t == 1 || type.t == 3) {
if (inherits(try(K != 0, TRUE), "try-error")) {
K.aux <- 5
warning ("'K' value not specified. 'K' fixed at ", K.aux, ".")
} else {K.aux <- K}
K <- K.aux
}
tol <- .Machine$double.eps ^ 0.5
if (abs(type.t - round(type.t)) > tol || type.t < 1 || type.t > 3) {
stop ("Invalid argument: 'type.t' must be an integer between 1 and 3.")
}
if (K <= 2 || abs(K - round(K)) > tol) {
stop ("Invalid argument: 'K' must be an integer greater than 2.")
}
if (length(alpha) != K || length(beta) != K) {
stop (c("Invalid argument: 'alpha', 'beta', must have length "), K)
}
if (min(c(alpha, beta)) < 0) {
stop ("Invalid argument: 'alpha' and 'beta' must have nonnegative entries.")
}
if (abs(type.c - round(type.c)) > tol || type.c < 1 || type.c > 4) {
stop ("Invalid argument: 'type.c' must be an integer between 1 and 4.")
}
if (type.c == 1 || type.c == 2) {
if (length(c.r) != (K - 1)) {
stop (c("Invalid argument: 'c.r' must have length, "), K - 1)
}
if (sum(abs(c.r - round(c.r)) > tol) != 0 || min(c.r) < 0) {
stop ("Invalid argument: 'c.r' entries must be nonnegative integers.")
}
}
if (type.c == 1 && sum(abs(c.r)) != 0 ) {
c.r <- rep(0, K - 1)
warning (c("'c.r' redefined as rep(0,", K - 1, ") because type.c = 1."))
}
if ((type.c == 3 || type.c == 4) && epsilon < 0) {
stop ("Invalid argument: 'epsilon' must be nonnegative.")
}
if (iterations <= 0 || abs(iterations - round(iterations)) > tol
|| iterations < 50) {
stop ("Invalid argument: 'iterations' must be an integer greater than 50.")
}
if (burn.in < 0 || abs(burn.in - round(burn.in)) > tol
|| burn.in > iterations*0.9) {
stop ("Invalid argument: 'burn.in' must be a postitive integer smaller than
iterations = ", iterations * 0.9, ".")
}
if (!inherits(thinning, "numeric")) {
stop ("Invalid argument: 'thinning' must be a numeric value.")
}
if (thinning <= 0 || abs(thinning - round(thinning)) > tol
|| thinning > 0.1 * iterations) {
stop ("Invalid argument: 'thinning' must be a postitive integer smaller than
iterations * 0.10 = ", iterations * 0.1, ".")
}
if (printtime != TRUE && printtime != FALSE) {
stop ("Invalid argument: 'printtime' must be a logical value.")
}
tao <- Tao(times, delta, type.t, K, length)
t.unc <- sort(times[delta==1])
n <- readr::parse_integer(as.character(table(cut(t.unc,tao))))
if (type.c == 3) {
c.r <- rep(5, (K - 1))
}
if (type.c == 4) {
Epsilon <- rep(NA, iterations)
}
p <- ncol(covar)
acceptance.th <- rep(0,p)
acceptance.c <- 0
Theta <- matrix(NA, nrow = iterations, ncol = p)
Lambda <- matrix(NA, nrow = iterations, ncol = K)
U <- matrix(NA, nrow = iterations, ncol = K - 1)
C <- matrix(NA, nrow = iterations, ncol = K - 1)
lambda.r <- rep(0.1, K)
theta <- rep(1, p)
cat(paste("Iterating...", "\n"), sep = "")
pb <- dplyr::progress_estimated(iterations)
covar <- as.matrix(covar)
for(j in seq_len(iterations)) {
pb$tick()$print()
u.r <- UpdU(alpha, beta, c.r, lambda.r)
m <- CGaM(times, tao, K, covar, theta)
lambda.r <- UpdLambda(alpha, beta, c.r, u.r, n, m)
aux.th <- CUpdTheta(theta, m, lambda.r, times, delta, K, covar, tao, var.theta.str, var.theta.ini, acceptance.th)
theta <- aux.th[[1]]
acceptance.th <- aux.th[[2]]
if (type.c == 3 || type.c == 4) {
if (type.c == 4) {
epsilon <- rgamma(1, shape = a.eps + K, scale = 1 / (b.eps + sum(c.r)))
}
auxc.r <- GaUpdC(alpha, beta, c.r, lambda.r, u.r, epsilon, c.nu, acceptance.c)
c.r <- auxc.r[[1]]
acceptance.c <- auxc.r[[2]]
}
Lambda[j, ] <- lambda.r
U[j, ] <- u.r
C[j, ] <- c.r
if (type.c == 4) Epsilon[j] <- epsilon
Theta[j, ] <- theta
}
Lambda <- Lambda[seq(burn.in + 1, iterations, thinning), ]
U <- U[seq(burn.in + 1, iterations, thinning), ]
C <- C[seq(burn.in + 1, iterations, thinning), ]
if (type.c == 4) Epsilon <- Epsilon[seq(burn.in + 1, iterations, thinning)]
Theta <- Theta[seq(burn.in + 1, iterations, thinning), ]
Theta <- sweep(Theta, MARGIN=2,k.const, `/`)
rows <- nrow(Lambda)
aux.median.obs <- as.numeric(median.obs)
Lambda.median.obs <- tibble::as_tibble(Lambda)
eff <- as.numeric(exp(Theta%*%aux.median.obs))
Lambda.median.obs <- dplyr::mutate_all(Lambda.median.obs,.f = ~.x*eff)
ss <- max(tao) * seq.int(0,100) / 100
X <- as.matrix(unname(Lambda))
writeLines(c("","Done.","Generating baseline survival function estimates."))
pb <- dplyr::progress_estimated(length(ss))
S <- purrr::map(ss, function(s = .x){
pb$tick()$print()
do.call(base::c,purrr::map(seq_len(rows),.f= ~exp(-sum((s > tao[-1]) * tao[-1] * X[.x,] +
(s > tao[-length(tao)] & s <= tao[-1]) * s * X[.x,] -
(s > tao[-length(tao)]) * tao[-(length(tao))] * X[.x,])
)))
})
X.median.obs <- as.matrix(unname(Lambda.median.obs))
writeLines(c("","Done.","Generating survival function estimates of the median observation."))
pb <- dplyr::progress_estimated(length(ss))
S.median.obs <- purrr::map(ss, function(s = .x){
pb$tick()$print()
do.call(base::c,purrr::map(seq_len(rows),.f= ~exp(-sum((s > tao[-1]) * tao[-1] * X.median.obs[.x,] +
(s > tao[-length(tao)] & s <= tao[-1]) * s * X.median.obs[.x,] -
(s > tao[-length(tao)]) * tao[-(length(tao))] * X.median.obs[.x,])
)))
})
# H[2:rows, 2:101] <- -log(S[2:rows, 2:101])
S <- purrr::map(.x = 1, ~S)
S.median.obs <- purrr::map(.x = 1, ~S.median.obs)
Lambda <- purrr::map(.x = 1, ~tibble::as_tibble(Lambda))
Lambda.median.obs <- purrr::map(.x = 1, ~Lambda.median.obs)
cat(paste("Done.", "\n"), sep = "")
if (printtime) {
cat(">>> Total processing time (sec.):\n")
print(procTime <- proc.time() - tInit)
}
if(type.c == 4) {
X = tibble::enframe(list(Lambda = Lambda, Lambda.m = Lambda.median.obs,
U = U, C = C, Epsilon = Epsilon, Theta = Theta)) } else {
X = tibble::enframe(list(Lambda = Lambda, Lambda.m = Lambda.median.obs, U = U, C = C, Theta = Theta))
}
out <- tibble::enframe(list(times = times, delta = delta, data = covar2, type.t = type.t,
tao = tao, K = K, t.unc = t.unc, iterations = rows, burn.in = burn.in, thinning = thinning,
acceptance = tibble::enframe(list(a.th = acceptance.th/iterations, a.c = acceptance.c/((K-1)*iterations))),
simulations = X, p = p, s = ss, S = S, S.m = S.median.obs
))
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CGaMRes.R
|
#' Diagnostics plots for lambda, U, C, Epsilon and Theta
#'
#' Diagnostics plots for hazard rate (Lambda), latent variable (U), dependence
#' variable (C), parameter of the hierarchical model (Epsilon) and regression
#' coefficients (Theta).
#'
#' This function returns a diagnostics plot for the chain of the selected
#' variable. The diagnostics includes trace, ergodic mean, autocorrelation
#' function and histogram.
#'
#' @param M Tibble. Contains the output by \code{CGaMRes}
#' @param variable Either "Lambda", "U", "C", "Epsilon" or "Theta". Variable
#' for which diagnostics plot will be shown.
#' @param pos Positive integer. Position of the selected \code{variable} to be
#' plotted.
#' @seealso \link{CGaMRes}
#' @references - Nieto-Barajas, L. E. (2003). Discrete time Markov gamma
#' processes and time dependent covariates in survival analysis. \emph{Bulletin
#' of the International Statistical Institute 54th Session}. Berlin. (CD-ROM).
#'
#' - Nieto-Barajas, L. E. & Walker, S. G. (2002). Markov beta and gamma
#' processes for modelling hazard rates. \emph{Scandinavian Journal of
#' Statistics} \strong{29}: 413-424.
#' @examples
#'
#'
#'
#' ## Simulations may be time intensive. Be patient.
#'
#' ## Example 1
#' # data(leukemiaFZ)
#' # leukemia1 <- leukemiaFZ
#' # leukemia1$wbc <- log(leukemiaFZ$wbc)
#' # CGEX1 <- CGaMRes(data = leukemia1, K = 10, iterations = 1000, thinning = 1)
#' # CGaPlotDiag(CGEX1,variable="Theta",pos=1)
#'
#'
#'
#' @export CGaPlotDiag
CGaPlotDiag <-
function(M, variable = "Lambda", pos = 1) {
variable <- match.arg(variable,c("Lambda","Theta","U","C","Epsilon"))
K <- M %>% extract("K")
if (pos < 0 || pos > K ) {
stop ("Invalid position.")
}
if (pos > (K - 1) && (variable == "U" || variable == "C")) {
stop ("Invalid position.")
}
if (!("Epsilon" %in% (M %>% extract(c("simulations")) %>%
dplyr::pull(name))) && variable == "Epsilon"){
stop("Plots for 'epsilon' are not available.")
}
if (variable == "Epsilon" && pos != 1) {
warning("'epsilon' has only one entry (1). Graphics shown for epsilon_1.")
pos <- 1
}
MAT <- M %>% extract(c("simulations",variable))
if(variable %in% c("Epsilon")){
pos = 1
MAT <- matrix(MAT, nrow = M %>% extract("iterations"), ncol = 1) %>% tibble::as_tibble()
} else{
if(variable %in% c("Lambda")){
MAT %<>% purrr::pluck(1) %>% tibble::as_tibble %>% dplyr::select(pos) %>% rlang::set_names("V1")
} else{
MAT %<>% tibble::as_tibble() %>% dplyr::select(pos) %>% rlang::set_names("V1")
}
}
var <- switch(variable, Lambda = expression(lambda),
Epsilon = expression(epsilon),
Theta = expression(theta),
U = "U",
C = "C")
if(variable %in% c("Epsilon")) title <- "" else{title <- paste0("Position: ", pos)}
a <- MAT %>% ggplot2::ggplot() + ggplot2::geom_line(ggplot2::aes(x=seq_len(nrow(MAT)), y = V1), color = "slateblue4") +
ggplot2::labs(x = "Iteration", y = variable) + ggplot2::ylab(var) + ggplot2::ggtitle("Trace")+
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
b <- MAT %>% ggplot2::ggplot() + ggplot2::geom_line(ggplot2::aes(x=seq_len(nrow(MAT)), y = cumsum(V1)/seq_len(nrow(MAT))), color = "slateblue4") +
ggplot2::labs(x = "Iteration", y = variable) + ggplot2::ggtitle("Ergodic mean") +
ggplot2::ylab(var) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
acf.aux <- acf(MAT, plot = F)
c <- cbind(acf.aux$lag, acf.aux$acf) %>% tibble::as_tibble() %>% ggplot2::ggplot() +
ggplot2::geom_segment(ggplot2::aes(x = V1, xend = V1, y = V2, yend = 0)) + ggplot2::labs(x = "Lag", y ="ACF")+
ggplot2::ggtitle("Autocorrelation function") +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
d <- MAT %>% ggplot2::ggplot() + ggplot2::geom_histogram(ggplot2::aes(x = V1), fill = "lightblue", color = "black", bins = 30) +
ggplot2::ggtitle("Histogram") + ggplot2::xlab(var) + ggplot2::ylab("") + #coord_flip() +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
gridExtra::grid.arrange(a,b,c,d, top = title)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CGaPlotDiag.R
|
#' Plots for the Hazard and Survival Funcion Estimates for the Bayesian
#' non-parametric Markov gamma model with covariates in survival analysis.
#'
#' Plots the resulting hazard function along with the survival function
#' estimate defined by the Markov gamma process with covariates (Nieto-Barajas,
#' 2003).
#'
#' This function return plots for the resulting hazard rate as it is computed
#' by \code{\link{CGaMRes} and the quantile of Tao specified by the user aswell
#' as an annotation}. In the same plot the credible intervals for both
#' variables are plotted; The mean of Pi is also annotated. Additionally, it
#' plots the survival function with their corresponding credible intervals.
#'
#' @param M tibble. Contains the output generated by \code{CuMRres}.
#' @param new_obs tibble. The function calculates the hazard rates and survival
#' function estimates for specific individuals expressed in a tibble, the names of the
#' columns have to be the same as the data input.
#' @param type.h character. "segment"= use segments to plot hazard rates,
#' "line" = link hazard rates by a line
#' @param coxSurv logical. Add estimated Survival function with the Cox-Model
#' @param intervals logical. If TRUE, plots confidence bands for the selected functions including Cox-Model.
#' @param confidence Numeric. Confidence level.
#' @param summary logical. If \code{TRUE}, a summary for hazard and survival
#' functions is returned as a tibble.
#' @return \item{SUM.h}{Numeric tibble. Summary for the mean, median, and a
#' \code{confint / 100} confidence interval for each segment of the hazard
#' function. If \code{summary = TRUE}} \item{SUM.S}{Numeric tibble. Summary for
#' the mean, median, and a \code{confint / 100} confidence interval for each
#' segment of the survival function. If \code{summary = TRUE}}
#' @seealso \link{CGaMRes},
#' @references - Nieto-Barajas, L. E. (2003). Discrete time Markov gamma
#' processes and time dependent covariates in survival analysis. \emph{Bulletin
#' of the International Statistical Institute 54th Session}. Berlin. (CD-ROM).
#'
#' - Nieto-Barajas, L. E. & Walker, S. G. (2002). Markov beta and gamma
#' processes for modelling hazard rates. \emph{Scandinavian Journal of
#' Statistics} \strong{29}: 413-424.
#' @examples
#'
#'
#'
#' ## Simulations may be time intensive. Be patient.
#'
#' # ## Example 1
#' # data(leukemiaFZ)
#' # leukemia1 <- leukemiaFZ
#' # leukemia1$wbc <- log(leukemiaFZ$wbc)
#' # CGEX1 <- CGaMRes(data = leukemia1, K = 10, iterations = 100, thinning = 1)
#' # CGaPloth(CGEX1)
#'
#'
#'
#' @export CGaPloth
CGaPloth <-
function(M, new_obs = NULL, type.h= "segment", coxSurv = T, intervals = T,
confidence = 0.95, summary = FALSE) {
SUM <- CGaLambdaSumm(M, new = new_obs, confidence)
h <- extract(SUM, "SUM.h")
S <- extract(SUM, "SUM.S")
v <- list("tao",
"K",
"times",
"delta",
"data"
) %>% purrr::map(~extract(M,.x)) %>% rlang::set_names(c("tao","K","times","delta","data"))
tao <- v$tao
K <- v$K
delta <- v$delta
times <- v$times
data <- v$data
if(type.h == "segment") h.graf <- purrr::imap(h,~{
if(.y == 1) tit <- "Estimate of Baseline Hazard Rates"
if(.y == 2) tit <- "Estimate of Hazard Rates for median observation"
if(!.y %in% c(1,2)) tit <- sprintf("Estimate of Hazard Rates for observation %s",.y-2)
out <- .x %>% ggplot2::ggplot() +
ggplot2::geom_segment(ggplot2::aes(x = tao[-(K+1)], xend = tao[-1],
y = mean, yend = mean, color = "Hazard Function")) +
ggplot2::scale_color_manual(values = c("black"), limits = "Hazard Function") +
ggplot2::guides(color = ggplot2::guide_legend(title = "")) +
ggplot2::xlab("Time") + ggplot2::ylab("Hazard rate") + ggplot2::scale_alpha_continuous(guide = F) +
ggplot2::ggtitle(paste0(tit," with intervals at ",confidence * 100,"% of credibility")) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"),
legend.position="bottom")
if(intervals){
out <- out + ggplot2::geom_errorbar(ggplot2::aes(ymin = lower, ymax = upper, x = (tao[-(K+1)] + tao[-1])/2, width = tao[-1]-tao[-(K+1)]),
alpha = 0.5, color = "gray50")
}
return(out)
}
)
if(type.h == "line") h.graf <- purrr::imap(h,~{
if(.y == 1) tit <- "Estimate of Baseline Hazard Rates"
if(.y == 2) tit <- "Estimate of Hazard Rates for median observation"
if(!.y %in% c(1,2)) tit <- sprintf("Estimate of hazard rates for observation %s",.y-2)
out <- .x %>% ggplot2::ggplot() +
ggplot2::geom_line(ggplot2::aes(x = (tao[-(K+1)] + tao[-1])/2, y = mean, color = "Hazard Function")) +
ggplot2::scale_color_manual(values = c("black"), limits = "Hazard Function") +
ggplot2::guides(color = ggplot2::guide_legend(title = "")) +
ggplot2::xlab("Time") + ggplot2::ylab("Hazard rate") + ggplot2::scale_alpha_continuous(guide = F) +
ggplot2::ggtitle(paste0(tit," with intervals at ",confidence * 100,"% of credibility")) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"),
legend.position="bottom")
if(intervals){
out <- out + ggplot2::geom_ribbon(ggplot2::aes(x = (tao[-(K+1)] + tao[-1])/2, ymin = lower, ymax = upper), alpha = .5, fill = "gray70")
}
return(out)
}
)
S.graf <- purrr::imap(S,~{
if(.y == 1) tit <- "Estimate of Baseline Survival Function"
if(.y == 2) tit <- "Estimate of Survival Function for median observation"
if(!.y %in% c(1,2)) tit <- sprintf("Estimate of Survival Function for observation %s",.y-2)
out <- .x %>% ggplot2::ggplot() + ggplot2::geom_line(ggplot2::aes(x = t, y = `S^(t)`,color = "Model estimate")) +
ggplot2::scale_color_manual(limits = c("Model estimate"),values = c("black")) +
ggplot2::guides(color = ggplot2::guide_legend(title = "")) +
ggplot2::scale_y_continuous(limits = c(0,1)) +
ggplot2::ggtitle(paste0(tit," with intervals at ", confidence * 100,"% of credibility")) +
ggplot2::labs(x = "t",
y = expression(S^{(t)})) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"),
legend.position = "bottom")
if(intervals){
out <- out + ggplot2::geom_ribbon(ggplot2::aes(x = t, ymin = lower, ymax = upper), fill = "gray50", alpha = 0.3)
}
return(out)
}
)
if(coxSurv){
t<-survival::Surv(times,delta)
xfitc<-survival::coxph(as.formula(sprintf("t~%s",paste(names(data),collapse = "+"))),data=data)
data.b <- rep(0,ncol(data)) %>% matrix(nrow = 1,byrow = T) %>%
tibble::as_tibble() %>% rlang::set_names(names(data))
data.m <- data %>% dplyr::summarise_all(median)
new <- rbind(data.b,data.m,new_obs)
S.graf<- purrr::map(seq_len(nrow(new)),~{
fit <- survival::survfit(xfitc,newdata=new[.x,],conf.type="none")
km.data <- tibble::tibble(time = fit$time,surv = fit$surv)
if(km.data$time[1]!= 0){
km.data <- dplyr::bind_rows(tibble::tibble(time = 0, surv = 1),km.data)
}
S.graf[[.x]] + ggplot2::geom_step(data = km.data,na.rm = T, ggplot2::aes(x = time,y = surv), color = "#b22222") +
ggplot2::scale_color_manual(limits = c("Model estimate","Cox proportional hazards"),
values = c("black","#b22222"))
})
}
if (summary == TRUE) {
return(list(h.graf, S.graf, SUM))
} else{
return(list(h.graf, S.graf))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CGaPloth.R
|
CUpdTheta <-
function(theta, m, lambda.r, times, delta, K, covar, tao, var.theta.str, var.theta.ini, acceptance) {
p <- length(theta)
theta.upd <- theta
m.upd <- m
for(s in 1:p){
theta.str <- theta.upd
theta.str[s] <- rnorm(1, mean = theta.upd[s], sd = sqrt(var.theta.str))
m.str <- CGaM(times, tao, K, covar, theta.str)
pr <- dnorm(theta.str[s], mean = 0, sd = sqrt(var.theta.ini), log = T) -
dnorm(theta.upd[s], mean = 0, sd = sqrt(var.theta.ini), log = T) +
(theta.str[s] - theta.upd[s]) * sum(covar[delta==1,s]) +
sum(lambda.r * (m.upd - m.str))
if(log(runif(1)) <= pr) {
theta.upd[s] <- theta.str[s]
m.upd <- m.str
acceptance[s] <- acceptance[s] + 1
}
}
return(list(theta.upd, acceptance))
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CUpdTheta.R
|
CuLambdaSumm <-
function(M, confidence = 0.95) {
if (confidence <= 0 || confidence >= 1) {
stop ("Invalid parameter: confidence must be between 0 and 1.")
}
v <- rlang::set_names(purrr::map(list("K",
"iterations",
"s",
"S",
c("simulations","Lambda"),
c("simulations","Pi"),
c("simulations","Z")
),
~extract(M,.x)),
c("K","iterations","s","S","Lambda","Pi","Z"))
K <- v$K
iterations <- v$iterations
pr <- (1 - confidence) / 2
S <- v$S
SUM.h <- rlang::set_names(tibble::tibble(a=seq_len(K),
b=purrr::map_dbl(v$Lambda, mean),
c=purrr::map_dbl(v$Lambda, quantile, probs = pr),
d=purrr::map_dbl(v$Lambda, quantile, probs = 0.5),
e=purrr::map_dbl(v$Lambda, quantile, probs = 1 - pr)
),
c("k", "mean", "lower", "median", "upper"))
SUM.S <- rlang::set_names(tibble::tibble(a=v$s,
b=purrr::map_dbl(v$S, mean),
c=purrr::map_dbl(v$S, quantile, probs = pr),
d=purrr::map_dbl(v$S, quantile, probs = 0.5),
e=purrr::map_dbl(v$S, quantile, probs = 1-pr)),
c("t", "S^(t)", "lower", "median", "upper"))
prop.pi <- v$Pi
prop.pi <- dplyr::rename(tibble::as_tibble(t(c(mean(prop.pi), quantile(prop.pi, c(pr, 0.5, 1 - pr))))), "mean" = "V1")
z <- v$Z
z <- dplyr::rename(tibble::as_tibble(t(c(mean(z), quantile(z, c(pr, 0.5, 1 - pr))))), "mean" = "V1")
out <- tibble::enframe(list(SUM.h = SUM.h, SUM.S = SUM.S, SUM.pi = prop.pi, SUM.z = z))
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CuLambdaSumm.R
|
#' Bayesian Semiparametric Cure Rate Model with an Unknown Threshold
#'
#' Posterior inference for the bayesian semiparametric cure rate model in
#' survival analysis.
#'
#'
#' Computes the Gibbs sampler with the full conditional distributions of
#' all model parameters (Nieto-Barajas & Yin 2008) and arranges the resulting Markov
#' chain into a tibble which can be used to obtain posterior summaries.
#'
#'
#' @param times Numeric positive vector. Failure times.
#' @param delta Logical vector. Status indicator. \code{TRUE} (1) indicates
#' exact lifetime is known, \code{FALSE} (0) indicates that the corresponding
#' failure time is right censored.
#' @param type.t Integer. 1=computes uniformly-dense intervals; 2=
#' partition arbitrarily defined by the user with parameter utao and 3=same length intervals.
#' @param K Integer. Partition length for the hazard function if
#' \code{type.t}=1 or \code{type.t}=3.
#' @param utao vector. Partition specified by the user when type.t = 2. The first value of
#' the vector has to be 0 and the last one the maximum observed time, either censored or uncensored.
#' @param alpha Nonnegative entry vector. Small entries are recommended in
#' order to specify a non-informative prior distribution.
#' @param beta Nonnegative entry vector. Small entries are recommended in order
#' to specify a non-informative prior distribution.
#' @param c.r Nonnegative vector. The higher the entries, the higher the correlation of two consecutive intervals.
#' @param type.c 1=defines \code{c.r} as a zero-entry vector; 2=lets the user
#' define \code{c.r} freely; 3=assigns \code{c.r} by computing an exponential
#' prior distribution with mean epsilon; 4=assigns \code{c.r} by computing an exponential hierarchical
#' distribution with mean \code{epsilon} which in turn has a Ga(a.eps, b.eps)
#' distribution.
#' @param epsilon Double. Mean of the exponential distribution assigned to
#' \code{c.r} when \code{type.c = 3}. When \code{type.c = 4}, \code{epsilon} is
#' assigned a Ga(a.eps,b.eps) distribution.
#' @param c.nu Tuning parameter for the proposal distribution for c.
#' @param a.eps Numeric. Shape parameter for the prior gamma distribution of
#' epsilon when \code{type.c = 4}.
#' @param b.eps Numeric. Scale parameter for the prior gamma distribution of
#' epsilon when \code{type.c = 4}.
#' @param a.mu Numeric. Shape parameter for the prior gamma distribution of
#' mu
#' @param b.mu Numeric. Scale parameter for the prior gamma distribution of
#' mu
#' @param iterations Integer. Number of iterations including the \code{burn.in}
#' to be computed for the Markov Chain.
#' @param burn.in Integer. Length of the burn-in period for the Markov chain.
#' @param thinning Integer. Factor by which the chain will be thinned. Thinning
#' the Markov chain is to reduces autocorrelation.
#' @param printtime Logical. If \code{TRUE}, prints out the execution time.
#' @note It is recommended to verify chain's stationarity. This can be done by
#' checking each element individually. See \code{\link{CuPlotDiag}}.
#' @examples
#'
#'
#' ## Simulations may be time intensive. Be patient.
#' ## Example 1
#' # data(crm3)
#' # times<-crm3$times
#' # delta<-crm3$delta
#' # res <- CuMRes(times, delta, type.t = 2,
#' # K = 100, length = .1, alpha = rep(1, 100 ),
#' # beta = rep(1, 100),c.r = rep(50, 99),
#' # iterations = 100, burn.in = 10, thinning = 1, type.c = 2)
#'
#'
#' @export CuMRes
CuMRes <-
function(times, delta = rep(1, length(times)), type.t = 3, K = 5, utao = NULL,
alpha = rep(0.01, K), beta = rep(0.01, K),
c.r = rep(1, (K - 1)),
type.c = 4, epsilon = 1, c.nu = 1, a.eps = 0.1, b.eps = 0.1,
a.mu = 0.01, b.mu = 0.01,
iterations = 1000, burn.in = floor(iterations * 0.2),
thinning = 5, printtime = TRUE) {
tInit <- proc.time()
if (min(times) < 0) {
stop ("Invalid argument: 'times' must be a nonnegative vector.")
}
if (min((delta == 0) + (delta == 1 )) == 0) {
stop ("Invalid argument: 'delta' must have 0 - 1 entries.")
}
if (length(times) != length(delta)) {
stop ("Invalid argument: 'times' and 'delta' must have same length.")
}
if (type.t == 2) {
if(is.null(utao)) stop("If type.t = 2 you need to specify utao.")
utao <- sort(utao)
if(utao[1]!=0){
warning("The first value of the partition needs to be 0, utao fixed and now starting with 0.")
utao <- c(0, utao)
}
if(max(times) > max(utao)){
utao <- c(utao,max(times))
warning("The last value of the partition needs to be", max(times),", utao fixed and set to ",max(times),".")
}
K <- length(utao) - 1
}
if (type.t == 1 || type.t == 3) {
if (inherits(try(K != 0, TRUE), "try-error")) {
K.aux <- 5
warning ("'K' value not specified. 'K' fixed at ", K.aux, ".")
} else {K.aux <- K}
K <- K.aux
}
tol <- .Machine$double.eps ^ 0.5
if (abs(type.t - round(type.t)) > tol || type.t < 1 || type.t > 3) {
stop ("Invalid argument: 'type.t' must be an integer between 1 and 3.")
}
if (K <= 2 || abs(K - round(K)) > tol) {
stop ("Invalid argument: 'K' must be an integer greater than 2.")
}
if (length(alpha) != K || length(beta) != K) {
stop (c("Invalid argument: 'alpha', 'beta', must have length "), K)
}
if (min(c(alpha, beta)) < 0) {
stop ("Invalid argument: 'alpha' and 'beta' must have nonnegative entries.")
}
if (abs(type.c - round(type.c)) > tol || type.c < 1 || type.c > 4) {
stop ("Invalid argument: 'type.c' must be an integer between 1 and 4.")
}
if (type.c %in% c( 2)) {
if (length(c.r) != (K - 1)) {
stop (c("Invalid argument: 'c.r' must have length, ", K - 1))
}
if (sum(abs(c.r - round(c.r)) > tol) != 0 || min(c.r) < 0) {
stop ("Invalid argument: 'c.r' entries must be nonnegative integers.")
}
}
if (type.c == 1 && sum(abs(c.r)) != (K-1) ) {
c.r <- rep(0, K - 1)
warning (c("'c.r' redefined as rep(0,", K - 1, ") because type.c = 1."))
}
if (type.c == 3 && epsilon < 0) {
stop ("Invalid argument: 'epsilon' must be nonnegative.")
}
if (iterations <= 0 || abs(iterations - round(iterations)) > tol
|| iterations < 50) {
stop ("Invalid argument: 'iterations' must be an integer greater than 50.")
}
if (burn.in < 0 || abs(burn.in - round(burn.in)) > tol
|| burn.in > iterations*0.9) {
stop ("Invalid argument: 'burn.in' must be a postitive integer smaller than
iterations = ", iterations * 0.9, ".")
}
if (!inherits(thinning, "numeric")) {
stop ("Invalid argument: 'thinning' must be a logical value.")
}
if (thinning <= 0 || abs(thinning - round(thinning)) > tol
|| thinning > 0.1 * iterations) {
stop ("Invalid argument: 'thinning' must be a postitive integer smaller than
iterations * 0.10 = ", iterations * 0.1, ".")
}
if (printtime != TRUE && printtime != FALSE) {
stop ("Invalid argument: 'printtime' must be a logical value.")
}
nm <- NM(times, delta, type.t, K, utao)
n <- nm$n
m <- nm$m
tao <- nm$tao
t.unc <- nm$t.unc
acceptance.c <- 0
if (type.c %in% c(3,4)) {
c.r <- rep(5, (K - 1))
Epsilon <- rep(NA, iterations)
}
cat(c("Iterating...", "\n"), sep = "")
Lambda <- matrix(NA, nrow = iterations, ncol = K)
U <- matrix(NA, nrow = iterations, ncol = K - 1)
C <- matrix(NA, nrow = iterations, ncol = K - 1)
lambda.r <- rep(0.1, K)
Mu <- rep(NA, iterations)
Z <- rep(NA, iterations) #iniciar vector de tiempo de quiebre
Pi <- rep(NA, iterations) #iniciar vector de probabilidades
k.star <- min(which(max(times[delta==1]) <= tao)) - 1 #k m?s grande donde hay al menos una observaci?n exacta
z <- k.star # inicial para tiempo de quiebre
pb <- dplyr::progress_estimated(iterations)
for(j in seq_len(iterations)) {
pb$tick()$print()
u.r <- UpdU(alpha, beta, c.r, lambda.r)
lambda.r <- CuUpdLambda(alpha, beta, c.r, u.r, n, m, z)
mu <- rgamma(1, shape = a.mu + z - 1, rate = b.mu + 1) # simular mu
aux.pi <- sum(lambda.r[seq_len(z)] * (tao[seq_len(z) + 1] - tao[seq_len(z)]))
prop.pi <- exp(-aux.pi)
z <- CuUpdZ(mu, m, lambda.r, k.star) # actualizar tiempo de quiebre
if (type.c %in% c(3,4)) {
if (type.c == 4) {
epsilon <- rgamma(1, shape = a.eps + K, scale = 1 / (b.eps + sum(c.r)))
}
auxc.r <- GaUpdC(alpha, beta, c.r, lambda.r, u.r, epsilon, c.nu, acceptance.c)
c.r <- auxc.r[[1]]
acceptance.c <- auxc.r[[2]]
}
Lambda[j, ] <- lambda.r
U[j, ] <- u.r
C[j, ] <- c.r
Mu[j] <- mu
Pi[j] <- prop.pi
Z[j] <- z
if (type.c %in% c(3,4)) Epsilon[j] <- epsilon
}
Lambda <- Lambda[seq(burn.in + 1, iterations, thinning), ]
U <- U[seq(burn.in + 1, iterations, thinning), ]
C <- C[seq(burn.in + 1, iterations, thinning), ]
Mu <- Mu[seq(burn.in + 1, iterations, thinning)]
Pi <- Pi[seq(burn.in + 1, iterations, thinning)]
Z <- Z[seq(burn.in + 1, iterations, thinning)]
Lambda <- purrr::map_dfc(seq_len(ncol(Lambda)), ~ as.numeric((.x <= Z)*Lambda[,.x]))
if (type.c %in% c(3,4)){ Epsilon <- Epsilon[seq(burn.in + 1, iterations, thinning)]}
writeLines(c("","Done.", "\n", "Generating survival function estimates"))
rows <- nrow(Lambda)
s <- max(tao) * seq.int(0,100) / 100
X <- as.matrix(unname(Lambda))
pb <- dplyr::progress_estimated(length(s))
S <- purrr::map(s, function(s = .x){
pb$tick()$print()
do.call(base::c, purrr::map(seq_len(rows),.f= ~exp(-sum((s > tao[-1]) * tao[-1] * X[.x,] +
(s > tao[-length(tao)] & s <= tao[-1]) * s * X[.x,] -
(s > tao[-length(tao)]) * tao[-(length(tao))] * X[.x,])
)))
})
cat(c("Done.", "\n"), sep = "")
if (printtime) {
cat(">>> Total processing time (sec.):\n")
print(procTime <- proc.time() - tInit)
}
if(type.c %in% c(3,4)) {
X = list(Lambda = Lambda,
U = U, C = C, Mu = Mu, Pi = Pi, Z = Z, Epsilon = Epsilon)} else {
X = list(Lambda = Lambda, U = U, C = C, Mu = Mu, Pi = Pi, Z = Z)
}
X <- tibble::enframe(X)
out <- list(times = times, delta = delta, type.t = type.t, tao = tao, K = K,
t.unc = t.unc, iterations = rows, simulations = X, s = s,
acceptance = acceptance.c/((K-1)*iterations),
S = S)
out <- tibble::enframe(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CuMRes.R
|
#' Diagnosis plots for Lambda, U, C, Mu, Pi, Z and Epsilon
#'
#' Diagnostics plots for hazard rate (Lambda), latent variable (U), dependence
#' variable (C), mean of cure threshold (Mu), cure proportion (Pi), cure threshold (Z) and the parameter of the
#' hierarchical prior (Epsilon).
#'
#' This function returns a diagnostics plot for which the chain for the selected
#' variable can be monitored. Diagnostics includes trace, ergodic mean,
#' autocorrelation function and histogram.
#'
#' @param M List. Contains the output by \code{CuMRes}.
#' @param variable Either "Lambda", "U", "C", "Mu", "Pi", "Z" or "Epsilon".
#' Variable for which diagnostic plot will be shown.
#' @param pos Positive integer. Position of the selected \code{variable} to be
#' plotted.
#' @seealso \link{CuMRes}
#' @references Nieto-Barajas, L. E., & Yin, G. (2008). Bayesian semiparametric
#' cure rate model with an unknown threshold. \emph{Scandinavian Journal of
#' Statistics}, \strong{35(3)}, 540-556.
#' https://doi.org/10.1111/j.1467-9469.2007.00589.x
#' @examples
#'
#'
#'
#' ## Simulations may be time intensive. Be patient.
#'
#' ## Example 1
#' # data(crm3)
#' # times<-crm3$times
#' # delta<-crm3$delta
#' # res <- CuMRes(times, delta, type.t = 2,
#' # K = 100, length = .1, alpha = rep(1, 100 ),
#' # beta = rep(1, 100),c.r = rep(50, 99),
#' # iterations = 100, burn.in = 10, thinning = 1, type.c = 2)
#' # CuPlotDiag(M = res, variable = "Mu")
#' # CuPlotDiag(M = res, variable = "Z")
#' # CuPlotDiag(M = res, variable = "Pi")
#' # CuPlotDiag(M = res, variable = "Lambda", pos = 2)
#' # CuPlotDiag(M = res, variable = "U", pos = 4)
#' # CuPlotDiag(M = res, variable = "C", pos = 3)
#'
#'
#'
#' @export CuPlotDiag
CuPlotDiag <-
function(M, variable = "Lambda", pos = 1) {
variable <- match.arg(variable,c("Lambda","U","C","Mu","Pi","Z","Epsilon"))
K <- extract(M, "K")
if (pos < 0 || pos > K ) {
stop ("Invalid position.")
}
if (pos > (K - 1) && (variable == "U" || variable == "C")) {
stop ("Invalid position.")
}
if (!("Epsilon" %in% (dplyr::pull(extract(M, c("simulations")), name))) && variable == "Epsilon"){
stop("Plots for 'epsilon' are not available.")
}
if (variable == "Epsilon" && pos != 1) {
warning("'epsilon' has only one entry (1). Graphics shown for epsilon_1.")
pos <- 1
}
MAT <- extract(M, c("simulations",variable))
if(variable %in% c("Epsilon","Mu","Pi","Z")){
pos = 1
MAT <- tibble::as_tibble(matrix(MAT, nrow = extract(M, "iterations"), ncol = 1))
}
if(variable == "Lambda"){
MAT <- rlang::set_names(dplyr::select(MAT, pos), "V1")
}
if(variable %in% c("U","C")){
MAT <- rlang::set_names(dplyr::select(tibble::as_tibble(MAT), pos), "V1")
}
var <- switch(variable, Lambda = expression(lambda),
Pi = expression(pi),
Mu = expression(mu),
Epsilon = expression(epsilon),
Z = "Z",
U = "U",
C = "C")
if(variable %in% c("Epsilon","Mu","Pi","Z")) title <- "" else{title <- paste0("Position: ", pos)}
a <- ggplot2::ggplot(MAT) + ggplot2::geom_line(ggplot2::aes(x=seq_len(nrow(MAT)), y = V1), color = "slateblue4") +
ggplot2::labs(x = "Iteration", y = variable) + ggplot2::ylab(var) + ggplot2::ggtitle("Trace")+
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
b <- ggplot2::ggplot(MAT) + ggplot2::geom_line(ggplot2::aes(x=seq_len(nrow(MAT)), y = cumsum(V1)/seq_len(nrow(MAT))), color = "slateblue4") +
ggplot2::labs(x = "Iteration", y = variable) + ggplot2::ggtitle("Ergodic mean") +
ggplot2::ylab(var) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
acf.aux <- acf(MAT, plot = F)
c <- ggplot2::ggplot(tibble::as_tibble(cbind(acf.aux$lag, acf.aux$acf))) +
ggplot2::geom_segment(ggplot2::aes(x = V1, xend = V1, y = V2, yend = 0)) + ggplot2::labs(x = "Lag", y ="ACF")+
ggplot2::ggtitle("Autocorrelation function") +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
d <- ggplot2::ggplot(MAT) + ggplot2::geom_histogram(ggplot2::aes(x = V1), fill = "lightblue", color = "black", bins = 30) +
ggplot2::ggtitle("Histogram") + ggplot2::xlab(var) + ggplot2::ylab("") + #coord_flip() +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
gridExtra::grid.arrange(a,b,c,d, top = title)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CuPlotDiag.R
|
#' Plots for the Hazard and Survival Funcion Estimates
#'
#' Plots the hazard function and the survival function
#' estimates defined by the bayesian semiparametric cure
#' rate model with an unknown threshold
#' (Nieto-Barajas & Yin, 2008).
#'
#' This function return estimators plots for the resulting hazard rate as it is computed
#' by \link{CuMRes} and the cure time (quantile of Tao specified by the user),
#' together with credible intervals. Additionally, it plots the survival function and the cure proportion estimates
#' with their corresponding credible intervals.
#'
#' @param M tibble. Contains the output generated by \code{CuMRres}.
#' @param type.h character. "segment"= use segments to plot hazard rates,
#' "line" = link hazard rates by a line
#' @param intervals logical. If TRUE, plots credible intervals.
#' @param confidence Numeric. Confidence level.
#' @param qn Numeric. Quantile for Tao that should be visualized on the plot.
#' @param summary Logical. If \code{TRUE}, a summary for hazard and survival
#' functions is returned as a tibble.
#' @param position_label character. Labels on the right or left side of the
#' plot.
#' @return \item{SUM.h}{Numeric tibble. Summary for the mean, median, and a
#' \code{confint / 100} confidence interval for each segment of the hazard
#' function. If \code{summary = TRUE}} \item{SUM.S}{Numeric tibble. Summary for
#' the mean, median, and a \code{confint / 100} confidence interval for a grid of
#' the survival function. If \code{summary = TRUE}}
#' @seealso \link{CuMRes},
#' @references - Nieto-Barajas, L. E. (2003). Discrete time Markov gamma
#' processes and time dependent covariates in survival analysis. \emph{Bulletin
#' of the International Statistical Institute 54th Session}. Berlin. (CD-ROM).
#'
#' -Nieto-Barajas, L. E., & Yin, G. (2008). Bayesian semiparametric cure rate
#' model with an unknown threshold. \emph{Scandinavian Journal of Statistics},
#' \strong{35(3)}, 540-556. https://doi.org/10.1111/j.1467-9469.2007.00589.x
#' @examples
#'
#'
#'
#' ## Simulations may be time intensive. Be patient.
#'
#' ## Example 1
#' # data(crm3)
#' # times<-crm3$times
#' # delta<-crm3$delta
#' # res <- CuMRes(times, delta, type.t = 2, length = .1,
#' # K = 100, alpha = rep(1, 100 ),
#' # beta = rep(1, 100),c.r = rep(50, 99),
#' # iterations = 100, burn.in = 10, thinning = 1, type.c = 2)
#' # CuPloth(res, type.h = "segment",qn=.5, summary = T)
#' # CuPloth(res, type.h = "line",qn=.5)
#'
#'
#'
#'
#' @export CuPloth
CuPloth <-
function(M, type.h = "segment", intervals = T,
confidence = 0.95, qn = 0.5, summary = FALSE, position_label = "right") {
SUM <- CuLambdaSumm(M, confidence)
v <- tibble::deframe(SUM)
tao <- extract(M, "tao")
K <- extract(M, "K")
z <- extract(M, c("simulations","Z"))
ribbon <- tibble::tibble(x = seq(to = tao[dplyr::pull(v$SUM.z, 4) + 1],
from = tao[dplyr::pull(v$SUM.z, 2) + 1], by = 0.1),
y = max(v$SUM.h$upper))
if(position_label == "left") {
position <- round(tao[dplyr::pull(v$SUM.z, 2) + 1],2)
pos_just <- 1.1
}
if(position_label == "right") {
position <- round(tao[dplyr::pull(v$SUM.z, 4) + 1],2)
pos_just <- -.1
}
if(type.h == "segment") {
h <- ggplot2::ggplot(v$SUM.h) +
ggplot2::geom_segment(ggplot2::aes(x = tao[-(K+1)], xend = tao[-1],
y = mean, yend = mean)) +
ggplot2::xlab("Time") + ggplot2::ylab("Hazard rate") + ggplot2::scale_alpha_continuous(guide = F) +
ggplot2::ggtitle(paste0("Estimate of hazard rates with intervals at ",confidence * 100,"% of credibility")) +
ggplot2::geom_vline(xintercept = round(tao[quantile(z,qn)+1],2), linetype = "dotted") +
ggplot2::annotate("text",x = position, y = max(v$SUM.h$upper),
label = paste0(expression(tau[z])," ==", round(tao[quantile(z,qn)+1],2)),
hjust = pos_just, vjust = 1, parse = T) +
ggplot2::annotate("text",x = position, y = max(v$SUM.h$upper),
label = paste0(expression(pi)," == ", round(dplyr::pull(v$SUM.pi, mean),2)),
hjust = pos_just, vjust = 2.5,parse = T) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
if(intervals){
h <- h + ggplot2::geom_errorbar(ggplot2::aes(ymin = lower, ymax = upper, x = (tao[-(K+1)] + tao[-1])/2, width = tao[-1]-tao[-(K+1)]),
alpha = 0.5, color = "gray50") +
ggplot2::geom_ribbon(data = ribbon, ggplot2::aes(x= x, ymin = 0, ymax = y),
alpha = .1, fill = "red")
}
}
if(type.h == "line") {
h <- ggplot2::ggplot(v$SUM.h) +
ggplot2::geom_line(ggplot2::aes(x = (tao[-(K+1)] + tao[-1])/2, y = mean)) +
ggplot2::xlab("Time") + ggplot2::ylab("Hazard rate") + ggplot2::scale_alpha_continuous(guide = F) +
ggplot2::ggtitle(paste0("Estimate of hazard rates with intervals at ",confidence * 100,"% of credibility")) +
ggplot2::geom_vline(xintercept = round(tao[quantile(z,qn)+1], 2), linetype = "dotted") +
ggplot2::annotate("text",x = position, y = max(v$SUM.h$upper),
label = paste0(expression(tau[z])," ==", round(tao[quantile(z,qn)+1],2)),
hjust = pos_just, vjust = 1,parse = T) +
ggplot2::annotate("text",x = position, y = max(v$SUM.h$upper),
label = paste0(expression(pi)," == ", round(dplyr::pull(v$SUM.pi, mean),2)),
hjust = pos_just, vjust = 2.5,parse = T) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
if(intervals){
h <- h + ggplot2::geom_ribbon(data = ribbon, ggplot2::aes(x= x, ymin = 0, ymax = y),
alpha = .1, fill = "red") +
ggplot2::geom_ribbon(ggplot2::aes(x = (tao[-(K+1)] + tao[-1])/2, ymin = lower, ymax = upper), alpha = .5, fill = "gray70")
}
}
S <- ggplot2::ggplot(v$SUM.S) + ggplot2::geom_line(ggplot2::aes(x = t, y = `S^(t)`)) +
ggplot2::scale_y_continuous(limits = c(0,1)) +
ggplot2::ggtitle(paste0("Estimate of Survival Function with intervals at ", confidence * 100,"% of credibility")) +
ggplot2::labs(x = "t",
y = expression(S^{(t)})) +
ggplot2::geom_vline(xintercept = round(tao[quantile(z,qn)+1],2), linetype = "dotted") +
ggplot2::geom_hline(yintercept = round(dplyr::pull(v$SUM.pi, mean),4), linetype = "dotted") +
ggplot2::annotate("text",x = position, y = 1,
label = paste0(expression(tau[z])," ==", round(tao[quantile(z,qn)+1],2)),
hjust = pos_just, vjust = 1,parse = T) +
ggplot2::annotate("text",x = 0, y = round(dplyr::pull(v$SUM.pi, mean),4),
label = paste0(expression(pi)," == ", round(dplyr::pull(v$SUM.pi, mean),4)),
hjust = 0, vjust = -2.5,parse = T) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
if(intervals){
S <- S + ggplot2::geom_ribbon(data = ribbon, ggplot2::aes(x= x, ymin = 0, ymax = 1),
alpha = .1, fill = "red") +
ggplot2::geom_ribbon(ggplot2::aes(x = t, ymin = lower, ymax = upper), fill = "gray50", alpha = 0.3)
}
if (summary) {
return(list(h,S,SUM))
} else{
return(list(h,S))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CuPloth.R
|
CuUpdLambda <-
function(alpha, beta, c.r, u.r, n, m, z) {
K <- length(alpha)
tol <- 1e-7
lambda.r <- rgamma(K,shape = c(alpha[1] + u.r[1] + n[1],
alpha[seq_len(K-2) + 1] + u.r[seq_len(K-2)] + u.r[seq_len(K-2) + 1] + n[seq_len(K-2) + 1],
alpha[K] + u.r[K - 1] + n[K]),
scale = c(1 / (beta[1] + c.r[1] + m[1]),
1/(beta[seq_len(K-2) + 1] + c.r[seq_len(K-2)] + c.r[seq_len(K-2) + 1] + m[seq_len(K-2) + 1] * ((seq_len(K-2) + 1) <= z)),
1 / (beta[K] + c.r[K - 1] + m[K]))
)
lambda.r[abs(lambda.r - 0) < tol] <- 0.000001
return(lambda.r)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CuUpdLambda.R
|
CuUpdZ <-
function(mu, m, lambda.r, k.star) {
k <- length(lambda.r)
propfz <- purrr::map_dbl(seq.int(k.star,k), function(z){
logfz <- (z - 1) * log(mu) - lgamma(z) - sum( m[seq_len(z)] * lambda.r[seq_len(z)] )
# fz <- exp(logfz)
return(logfz)
})
if(length(propfz) == 1) z <- k else{
z <- sample(x = seq.int(k.star,k), size = 1, prob = as.numeric(Brobdingnag::brob(propfz)/sum(Brobdingnag::brob(propfz))))
}
return(z)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/CuUpdZ.R
|
#' Markov Gamma Model
#'
#' Computes the Gibbs sampler given by the full conditional distributions of U,
#' Lambda, C and Epsilon (Nieto-Barajas & Walker, 2002) and arranges the
#' resulting Markov chain into a tibble which can be used to obtain posterior
#' summaries.
#'
#'
#' @details Posterior inference for the Bayesian non-parametric Markov gamma model in
#' survival analysis.
#'
#'
#' @param times Numeric positive vector. Failure times.
#' @param delta Logical vector. Status indicator. \code{TRUE} (1) indicates
#' exact lifetime is known, \code{FALSE} (0) indicates that the corresponding
#' failure time is right censored.
#' @param type.t Integer. 1=computes uniformly-dense intervals; 2=
#' partition arbitrarily defined by the user with parameter utao and 3=same length intervals.
#' @param K Integer. Partition length for the hazard function if
#' \code{type.t}=1 or \code{type.t}=3.
#' @param utao vector. Partition specified by the user when type.t = 2. The first value of
#' the vector has to be 0 and the last one the maximum observed time, either censored or uncensored.
#' @param alpha Nonnegative entry vector. Small entries are recommended in
#' order to specify a non-informative prior distribution.
#' @param beta Nonnegative entry vector. Small entries are recommended in order
#' to specify a non-informative prior distribution.
#' @param c.r Nonnegative vector. The higher the entries, the higher the correlation
#' of two consecutive intervals.
#' @param c.nu Tuning parameter for the proposal distribution for c.
#' @param a.eps Numeric. Shape parameter for the prior gamma distribution of
#' epsilon when \code{type.c = 4}.
#' @param b.eps Numeric. Scale parameter for the prior gamma distribution of
#' epsilon when \code{type.c = 4}.
#' @param type.c 1=assigns \code{c.r} a zero-entry vector; 2=lets the user
#' define \code{c.r} freely; 3=assigns \code{c.r} an exponential prior
#' distribution with mean 1; 4=assigns \code{c.r} an exponential hierarchical
#' distribution with mean \code{epsilon} which in turn has a Ga(a.eps, b.eps)
#' distribution.
#' @param epsilon Double. Mean of the exponential distribution assigned to
#' \code{c.r} when \code{type.c = 3}
#' @param iterations Integer. Number of iterations including the \code{burn.in}
#' to be computed for the Markov chain.
#' @param burn.in Integer. Length of the burn-in period for the Markov chain.
#' @param thinning Integer. Factor by which the chain will be thinned. Thinning
#' the Markov chain is to reducec autocorrelation.
#' @param printtime Logical. If \code{TRUE}, prints out the execution time.
#' @examples
#'
#'
#' ## Simulations may be time intensive. Be patient.
#'
#' ## Example 1
#' data(gehan)
#' timesG <- gehan$time[gehan$treat == "6-MP"]
#' deltaG <- gehan$cens[gehan$treat == "6-MP"]
#' GEX1 <- GaMRes(timesG, deltaG, K = 8, iterations = 3000)
#'
#' ## Example 2
#' data(leukemiaFZ)
#' timesFZ <- leukemiaFZ$time
#' deltaFZ <- leukemiaFZ$delta
#' GEX2 <- GaMRes(timesFZ, deltaFZ, type.c = 4)
#'
#'
#'
#' @export GaMRes
GaMRes<-
function(times, delta = rep(1, length(times)), type.t = 3, K = 5, utao = NULL,
alpha = rep(0.01, K), beta = rep(0.01, K),
c.r = rep(1, (K - 1)), c.nu = 1, a.eps = 0.1, b.eps = 0.1,
type.c = 4, epsilon = 1,
iterations = 1000, burn.in = floor(iterations * 0.2),
thinning = 5, printtime = TRUE) {
tInit <- proc.time()
if (min(times) < 0) {
stop ("Invalid argument: 'times' must be a nonnegative vector.")
}
if (min((delta == 0) + (delta == 1 )) == 0) {
stop ("Invalid argument: 'delta' must have 0 - 1 entries.")
}
if (length(times) != length(delta)) {
stop ("Invalid argument: 'times' and 'delta' must have same length.")
}
if (type.t == 2) {
if(is.null(utao)) stop("If type.t = 2 you need to specify utao.")
utao <- sort(utao)
if(utao[1]!=0){
warning("The first value of the partition needs to be 0, utao fixed and now starting with 0.")
utao <- c(0, utao)
}
if(max(times) > max(utao)){
utao <- c(utao,max(times))
warning("The last value of the partition needs to be", max(times),", utao fixed and set to ",max(times),".")
}
K <- length(utao) - 1
}
if (type.t == 1 || type.t == 3) {
if (inherits(try(K != 0, TRUE), "try-error")) {
K.aux <- 5
warning ("'K' value not specified. 'K' fixed at ", K.aux, ".")
} else {K.aux <- K}
K <- K.aux
}
tol <- .Machine$double.eps ^ 0.5
if (abs(type.t - round(type.t)) > tol || type.t < 1 || type.t > 3) {
stop ("Invalid argument: 'type.t' must be an integer between 1 and 3.")
}
if (K <= 2 || abs(K - round(K)) > tol) {
stop ("Invalid argument: 'K' must be an integer greater than 2.")
}
if (length(alpha) != K || length(beta) != K) {
stop (c("Invalid argument: 'alpha', 'beta', must have length "), K)
}
if (min(c(alpha, beta)) < 0) {
stop ("Invalid argument: 'alpha' and 'beta' must have nonnegative entries.")
}
if (abs(type.c - round(type.c)) > tol || type.c < 1 || type.c > 4) {
stop ("Invalid argument: 'type.c' must be an integer between 1 and 4.")
}
if (type.c == 1 || type.c == 2) {
if (length(c.r) != (K - 1)) {
stop (c("Invalid argument: 'c.r' must have length, "), K - 1)
}
if (sum(abs(c.r - round(c.r)) > tol) != 0 || min(c.r) < 0) {
stop ("Invalid argument: 'c.r' entries must be nonnegative integers.")
}
}
if (type.c == 1 && sum(abs(c.r)) != 0 ) {
c.r <- rep(0, K - 1)
warning (c("'c.r' redefined as rep(0,", K - 1, ") because type.c = 1."))
}
if ((type.c == 3 || type.c == 4) && epsilon < 0) {
stop ("Invalid argument: 'epsilon' must be nonnegative.")
}
if (iterations <= 0 || abs(iterations - round(iterations)) > tol
|| iterations < 50) {
stop ("Invalid argument: 'iterations' must be an integer greater than 50.")
}
if (burn.in < 0 || abs(burn.in - round(burn.in)) > tol
|| burn.in > iterations*0.9) {
stop ("Invalid argument: 'burn.in' must be a postitive integer smaller than
iterations = ", iterations * 0.9, ".")
}
if (!inherits(thinning, "numeric")) {
stop ("Invalid argument: 'thinning' must be a numeric value.")
}
if (thinning <= 0 || abs(thinning - round(thinning)) > tol
|| thinning > 0.1 * iterations) {
stop ("Invalid argument: 'thinning' must be a postitive integer smaller than
iterations * 0.10 = ", iterations * 0.1, ".")
}
if (printtime != TRUE && printtime != FALSE) {
stop ("Invalid argument: 'printtime' must be a logical value.")
}
nm <- NM(times, delta, type.t, K, utao)
n <- nm$n
m <- nm$m
tao <- nm$tao
t.unc <- nm$t.unc
acceptance.c <- 0
if (type.c == 3) {
c.r <- rep(5, (K - 1))
}
if (type.c == 4) {
Epsilon <- rep(NA, iterations)
}
cat(c("Iterating...", "\n"), sep = "")
Lambda <- matrix(NA, nrow = iterations, ncol = K)
U <- matrix(NA, nrow = iterations, ncol = K - 1)
C <- matrix(NA, nrow = iterations, ncol = K - 1)
lambda.r <- rep(0.1, K)
pb <- dplyr::progress_estimated(iterations)
for(j in 1:iterations) {
pb$tick()$print()
u.r <- UpdU(alpha, beta, c.r, lambda.r)
lambda.r <- UpdLambda(alpha, beta, c.r, u.r, n, m)
if (type.c == 3 || type.c == 4) {
if (type.c == 4) {
epsilon <- rgamma(1, shape = a.eps + K, scale = 1 / (b.eps + sum(c.r)))
}
auxc.r <- GaUpdC(alpha, beta, c.r, lambda.r, u.r, epsilon, c.nu, acceptance.c)
c.r <- auxc.r[[1]]
acceptance.c <- auxc.r[[2]]
}
Lambda[j, ] <- lambda.r
U[j, ] <- u.r
C[j, ] <- c.r
if (type.c == 4) Epsilon[j] <- epsilon
}
Lambda <- Lambda[seq(burn.in + 1, iterations, thinning), ]
U <- U[seq(burn.in + 1, iterations, thinning), ]
C <- C[seq(burn.in + 1, iterations, thinning), ]
if (type.c == 4) Epsilon <- Epsilon[seq(burn.in + 1, iterations, thinning)]
writeLines(c("Done.", "\n", "Generating survival function estimates.", "\n"),
sep = "")
rows <- nrow(Lambda)
s <- max(tao) * seq.int(0,100) / 100
X <- as.matrix(unname(Lambda))
pb <- dplyr::progress_estimated(length(s))
S <-purrr::map(s,function(s = .x){
pb$tick()$print()
do.call(base::c,purrr::map(seq_len(rows) ,.f= ~exp(-sum((s > tao[-1]) * tao[-1] * X[.x,] +
(s > tao[-length(tao)] & s <= tao[-1]) * s * X[.x,] -
(s > tao[-length(tao)]) * tao[-(length(tao))] * X[.x,])
)))
})
cat(c("Done.", "\n"), sep = "")
if (printtime) {
cat(">>> Total processing time (sec.):\n")
print(procTime <- proc.time() - tInit)
}
if(type.c == 4) {
X = tibble::enframe(list(Lambda = Lambda,
U = U, C = C, Epsilon = Epsilon))} else {
X = tibble::enframe(list(Lambda = Lambda, U = U, C = C))
}
out <- tibble::enframe(list(times = times, delta = delta, type.t = type.t, tao = tao, K = K,
t.unc = t.unc, iterations = rows, simulations = X, s = s,
acceptance = acceptance.c/((K-1)*iterations), S = S))
out
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/GaMRes.R
|
#' Diagnosis plots for Lambda, U, C and Epsilon
#'
#' Diagnostics plots for hazard rate (Lambda), latent variable (U), dependence
#' parameter (C) and the parameter of the hierarchical prior (Epsilon).
#'
#' This function returns a diagnostics plot for which the chain of the selected
#' variable can be monitored. Diagnostics includes trace, ergodic mean,
#' autocorrelation function and histogram.
#'
#' @param M List. Contains the output
#' by \code{GaMRes}.
#' @param variable Either "Lambda", "U", "C" or "Epsilon". Variable for which
#' informative plot will be shown.
#' @param pos Positive integer. Position of the selected \code{variable} to be
#' plotted.
#' @seealso \link{GaMRes}
#' @references - Nieto-Barajas, L. E. & Walker, S. G. (2002). Markov beta and
#' gamma processes for modelling hazard rates. \emph{Scandinavian Journal of
#' Statistics} \strong{29}: 413-424.
#' @examples
#'
#'
#'
#' ## Simulations may be time intensive. Be patient.
#'
#' ## Example 1
#' # data(gehan)
#' # timesG <- gehan$time[gehan$treat == "6-MP"]
#' # deltaG <- gehan$cens[gehan$treat == "6-MP"]
#' # GEX1 <- GaMRes(timesG, deltaG, K = 8, iterations = 3000)
#' # GaPlotDiag(GEX1, variable = "Lambda", pos = 2)
#' # GaPlotDiag(GEX1, variable = "U", pos = 5)
#'
#' ## Example 2
#' # data(leukemiaFZ)
#' # timesFZ <- leukemiaFZ$time
#' # deltaFZ <- leukemiaFZ$delta
#' # GEX2 <- GaMRes(timesFZ, deltaFZ, type.c = 4)
#' # GaPlotDiag(GEX2, variable = "Lambda", pos = 2)
#' # GaPlotDiag(GEX2, variable = "U", pos = 3)
#'
#'
#'
#' @export GaPlotDiag
GaPlotDiag <-
function(M, variable = "Lambda", pos = 1) {
variable <- match.arg(variable,c("Lambda","U","C","Epsilon"))
K <- extract(M,"K")
if (pos < 0 || pos > K ) {
stop ("Invalid position.")
}
if (pos > (K - 1) && (variable == "U" || variable == "C")) {
stop ("Invalid position.")
}
if (!("Epsilon" %in% (M %>% extract(c("simulations")) %>% dplyr::pull(name))) && variable == "Epsilon"){
stop("Plots for 'epsilon' are not available.")
}
if (variable == "Epsilon" && pos != 1) {
warning("'epsilon' has only one entry (1). Graphics shown for epsilon_1.")
pos <- 1
}
MAT <- M %>% extract(c("simulations",variable))
if(variable %in% c("Epsilon")){
pos = 1
MAT <- matrix(MAT, nrow = M %>% extract("iterations"), ncol = 1) %>% tibble::as_tibble()
} else{
MAT %<>% tibble::as_tibble() %>% dplyr::select(pos) %>% rlang::set_names("V1")
}
var <- switch(variable, Lambda = expression(lambda),
Epsilon = expression(epsilon),
U = "U",
C = "C")
if(variable %in% c("Epsilon")) title <- "" else{title <- paste0("Position: ", pos)}
a <- MAT %>% ggplot2::ggplot() + ggplot2::geom_line(ggplot2::aes(x=seq_len(nrow(MAT)), y = V1), color = "slateblue4") +
ggplot2::labs(x = "Iteration", y = variable) + ggplot2::ylab(var) + ggplot2::ggtitle("Trace")+
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
b <- MAT %>% ggplot2::ggplot() + ggplot2::geom_line(ggplot2::aes(x=seq_len(nrow(MAT)), y = cumsum(V1)/seq_len(nrow(MAT))), color = "slateblue4") +
ggplot2::labs(x = "Iteration", y = variable) + ggplot2::ggtitle("Ergodic mean") +
ggplot2::ylab(var) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
acf.aux <- acf(MAT, plot = F)
c <- cbind(acf.aux$lag, acf.aux$acf) %>% tibble::as_tibble() %>% ggplot2::ggplot() +
ggplot2::geom_segment(ggplot2::aes(x = V1, xend = V1, y = V2, yend = 0)) + ggplot2::labs(x = "Lag", y ="ACF")+
ggplot2::ggtitle("Autocorrelation function") +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
d <- MAT %>% ggplot2::ggplot() + ggplot2::geom_histogram(ggplot2::aes(x = V1), fill = "lightblue", color = "black", bins = 30) +
ggplot2::ggtitle("Histogram") + ggplot2::xlab(var) + ggplot2::ylab("") + #coord_flip() +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"))
gridExtra::grid.arrange(a,b,c,d, top = title)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/GaPlotDiag.R
|
#' Plots for the Hazard and Survival Function Estimates
#'
#' Plots the hazard function and with the survival function
#' estimates defined by the Markov gamma process with and without covariates
#' (Nieto-Barajas & Walker, 2002).
#'
#' This function returns estimators plots for the resulting hazard rate as it is computed
#' by \link{GaMRes} and \link{CGaMRes} and the Nelson-Aalen
#' estimate along with their confidence intervals for the data set given.
#' Additionally, it plots the survival function and the Kaplan-Meier estimate
#' with their corresponding credible/confidence intervals.
#'
#' @param M tibble. Contains the output by \code{CGaMRres} and \code{GaMRes}.
#' @param type.h character. "segment"= use segments to plot hazard rates,
#' "line" = link hazard rates by a line
#' @param addSurvival Logical. If \code{TRUE}, Nelson-Aalen estimate is plotted
#' over the hazard function and Kaplan-Meier estimate is plotted over the
#' survival function.
#' @param intervals logical. If TRUE, plots confidence bands for the selected functions including Nelson-Aalen and/or Kaplan-Meier estimate.
#' @param confidence Numeric. Confidence level.
#' @param summary Logical. If \code{TRUE}, a summary for hazard and survival
#' functions is returned as a tibble.
#' @return \item{SUM.h}{Numeric tibble. Summary for the mean, median, and a
#' \code{confint / 100} confidence interval for each segment of the hazard
#' function. If \code{summary = TRUE}} \item{SUM.S}{Numeric tibble. Summary for
#' the mean, median, and a \code{confint / 100} confidence interval for a grid
#' of the survival function. If \code{summary = TRUE}}
#' @seealso \link{GaMRes}, \link{CGaMRes}, \link{CGaPlotDiag},
#' \link{GaPlotDiag}
#' @references - Nieto-Barajas, L. E. (2003). Discrete time Markov gamma
#' processes and time dependent covariates in survival analysis. \emph{Bulletin
#' of the International Statistical Institute 54th Session}. Berlin. (CD-ROM).
#'
#' - Nieto-Barajas, L. E. & Walker, S. G. (2002). Markov beta and gamma
#' processes for modelling hazard rates. \emph{Scandinavian Journal of
#' Statistics} \strong{29}: 413-424.
#' @examples
#'
#'
#'
#' ## Simulations may be time intensive. Be patient.
#'
#' ## Example 1
#' # data(gehan)
#' # timesG <- gehan$time[gehan$treat == "6-MP"]
#' # deltaG <- gehan$cens[gehan$treat == "6-MP"]
#' # GEX1 <- GaMRes(timesG, deltaG, K = 8, iterations = 3000)
#' # GaPloth(GEX1)
#'
#'
#' ## Example 2
#' # data(leukemiaFZ)
#' # timesFZ <- leukemiaFZ$time
#' # deltaFZ <- leukemiaFZ$delta
#' # GEX2 <- GaMRes(timesFZ, deltaFZ, type.c = 4)
#' # GaPloth(GEX2)
#'
#'
#'
#'
#'
#' @export GaPloth
GaPloth <-
function(M, type.h = "segment", addSurvival = T, intervals = T,
confidence = 0.95, summary = FALSE) {
SUM <- LambdaSumm(M, confidence)
s <- SUM %>% tibble::deframe()
v <- list("tao",
"K",
"times",
"delta"
) %>% purrr::map(~extract(M,.x)) %>% rlang::set_names(c("tao","K","times","delta"))
tao <- v$tao
K <- v$K
delta <- v$delta
times <- v$times
if(type.h == "segment") {
h <- s$SUM.h %>% ggplot2::ggplot() +
ggplot2::geom_segment(ggplot2::aes(x = tao[-(K+1)], xend = tao[-1],
y = mean, yend = mean, color = "Hazard Function")) +
ggplot2::scale_color_manual(values = c("black"), limits = "Hazard Function") +
ggplot2::guides(color = ggplot2::guide_legend(title = "")) +
ggplot2::xlab("Time") +ggplot2::ylab("Hazard rate") + ggplot2::scale_alpha_continuous(guide = F) +
ggplot2::ggtitle(paste0("Estimate of hazard rates with intervals at ",confidence * 100,"% of credibility")) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"),
legend.position="bottom")
if(intervals){
h <- h + ggplot2::geom_errorbar(ggplot2::aes(ymin = lower, ymax = upper, x = (tao[-(K+1)] + tao[-1])/2, width = tao[-1]-tao[-(K+1)]),
alpha = 0.5, color = "gray50")
}
}
if(type.h == "line"){
h <- s$SUM.h %>% ggplot2::ggplot() +
ggplot2::geom_line(ggplot2::aes(x = (tao[-(K+1)] + tao[-1])/2, y = mean, color = "Hazard Function")) +
ggplot2::scale_color_manual(values = c("black"), limits = "Hazard Function") +
ggplot2::guides(color = ggplot2::guide_legend(title = "")) +
ggplot2::xlab("Time") + ggplot2::ylab("Hazard rate") + ggplot2::scale_alpha_continuous(guide = F) +
ggplot2::ggtitle(paste0("Estimate of hazard rates with intervals at ",confidence * 100,"% of credibility")) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"),
legend.position="bottom")
if(intervals){
h <- h + ggplot2::geom_ribbon(ggplot2::aes(x = (tao[-(K+1)] + tao[-1])/2, ymin = lower, ymax = upper), alpha = .5, fill = "gray70")
}
}
S <- s$SUM.S %>% ggplot2::ggplot() + ggplot2::geom_line(ggplot2::aes(x = t, y = `S^(t)`,color = "Model estimate")) +
ggplot2::scale_color_manual(limits = c("Model estimate"),values = c("black")) +
ggplot2::guides(color = ggplot2::guide_legend(title = "")) +
ggplot2::scale_y_continuous(limits = c(0,1)) +
ggplot2::ggtitle(paste0("Estimate of Survival Function with intervals at ", confidence * 100,"% of credibility")) +
ggplot2::labs(x = "t",
y = expression(S^{(t)})) +
ggthemes::theme_tufte() +
ggplot2::theme(axis.line = ggplot2::element_line(colour = "black"),
legend.position = "bottom")
if(intervals){
S <- S + ggplot2::geom_ribbon(ggplot2::aes(x = t, ymin = lower, ymax = upper), fill = "gray50", alpha = 0.3)
}
if(addSurvival){
fit <- survival::survfit(survival::Surv(time = times, event = delta) ~ 1,
conf.int = confidence)
km.data <- tibble::tibble(time = fit$time,surv = fit$surv, lower = fit$lower,
upper = fit$upper)
if(km.data$time[1]!= 0){
km.data <- dplyr::bind_rows(tibble::tibble(time = 0, surv = 1, lower = NA, upper = NA),km.data)
}
na.data <- tibble::tibble(time = fit$time, h.est = fit$n.event / fit$n.risk)
h <- h + ggplot2::geom_point(data = na.data, ggplot2::aes(x = time, y = h.est), color = "#b22222") +
ggplot2::scale_color_manual(limits = c("Hazard Function","Nelson-Aalen based estimate"),
values = c("black","#b22222"))
S <- S + ggplot2::geom_step(data = km.data,na.rm = T, ggplot2::aes(x = time,y = surv), color = "#b22222") +
ggplot2::scale_color_manual(limits = c("Model estimate","Kaplan Meier"),
values = c("black","#b22222"))
if(intervals){
S <- S + ggplot2::geom_step(data = km.data, na.rm = T, ggplot2::aes(x = time, y = lower), alpha = 0.5, color = "#b22222", linetype = "dashed") +
ggplot2::geom_step(data = km.data, na.rm = T, ggplot2::aes(x = time, y = upper), alpha = 0.5, color = "#b22222", linetype = "dashed")
}
}
if (summary == TRUE) {
return(list(h,S,SUM))
} else{
return(list(h,S))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/GaPloth.R
|
GaUpdC <-
function(alpha, beta, c.r, lambda.r, u.r, epsilon, nu, acceptance.c) {
K <- length(lambda.r)
c.str <- rgamma(K-1, shape = nu,
rate = nu/c.r)
lw.c.str <- (alpha[seq_len(K-1) + 1] + u.r[seq_len(K-1)]) * log(beta[seq_len(K-1) + 1] + c.str) +
u.r[seq_len(K-1)]*log(c.str) - (lambda.r[seq_len(K-1) + 1] + lambda.r[seq_len(K-1) ] + 1/epsilon)*c.str +
log(dgamma(x = c.r, shape = nu, rate = nu/c.str))
lw.c.r <- (alpha[seq_len(K-1) + 1] + u.r[seq_len(K-1)]) * log(beta[seq_len(K-1) + 1] + c.r) +
u.r[seq_len(K-1)]*log(c.r) - (lambda.r[seq_len(K-1) + 1] + lambda.r[seq_len(K-1) ] + 1/epsilon)*c.r +
log(dgamma(x = c.str, shape = nu, rate = nu/c.r))
ratio <- lw.c.str - lw.c.r
unifs <- runif(K-1)
criteria <- log(unifs) <= ratio
c.r[criteria] <- c.str[criteria]
acceptance.c <- acceptance.c + sum(criteria)
return(list(c.r, acceptance.c))
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/GaUpdC.R
|
LambdaSumm <-
function(M, confidence = 0.95) {
if (confidence <= 0 || confidence >= 1) {
stop ("Invalid parameter: confidence must be between 0 and 1.")
}
v <- list("K",
"iterations",
"s",
"S",
c("simulations","Lambda")
) %>% purrr::map(~extract(M,.x)) %>% rlang::set_names(c("K","iterations","s","S","Lambda"))
K <- v$K
iterations <- v$iterations
pr <- (1 - confidence) / 2
S <- v$S
SUM.h <- tibble::tibble(a=seq_len(K),
b=v$Lambda %>% tibble::as_tibble() %>% purrr::map_dbl(mean),
c=v$Lambda %>% tibble::as_tibble() %>% purrr::map_dbl(quantile, probs = pr),
d=v$Lambda %>% tibble::as_tibble() %>% purrr::map_dbl(quantile, probs = 0.5),
e=v$Lambda %>% tibble::as_tibble() %>% purrr::map_dbl(quantile, probs = 1 - pr)
) %>% rlang::set_names(c("k", "mean", "lower", "median", "upper"))
SUM.S <- tibble::tibble(a=v$s,
b=v$S %>% purrr::map_dbl(mean),
c=v$S %>% purrr::map_dbl(quantile, probs = pr),
d=v$S %>% purrr::map_dbl(quantile, probs = 0.5),
e=v$S %>% purrr::map_dbl(quantile, probs = 1-pr)) %>%
rlang::set_names(c("t", "S^(t)", "lower", "median", "upper"))
out <- list(SUM.h = SUM.h, SUM.S = SUM.S) %>% tibble::enframe()
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/LambdaSumm.R
|
NM <-
function(times, delta, type.t, K, utao) {
tao <- Tao(times, delta, type.t, K, utao)
t.unc <- sort(times[delta == 1])
n <- readr::parse_integer(as.character(table(cut(t.unc,breaks = tao))))
w <- purrr::map(times, .f= ~ (.x > tao[-1]) * tao[-1] +
(.x > tao[-length(tao)] & .x <= tao[-1]) * .x -
(.x > tao[-length(tao)]) * tao[-(length(tao))])
m <- purrr::reduce(w,`+`)
out <- list(n = n, m = m, tao = tao, t.unc = t.unc)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/NM.R
|
PiSumm <-
function(M, confidence = 0.95) {
if (confidence <= 0 || confidence >= 1) {
stop ("Invalid parameter: confidence must be between 0 and 1.")
}
v <- list("K",
"tao",
"iterations",
"s",
"S",
c("simulations","PI")
) %>% purrr::map(~extract(M,.x)) %>% rlang::set_names(c("K","tao","iterations","s","S","PI"))
K <- v$K
iterations <- v$iterations
pr <- (1 - confidence) / 2
S <- v$S
SUM.h <- tibble::tibble(a=seq_len(K),
b=v$PI %>% tibble::as_tibble() %>% purrr::map_dbl(mean),
c=v$PI %>% tibble::as_tibble() %>% purrr::map_dbl(quantile, probs = pr),
d=v$PI %>% tibble::as_tibble() %>% purrr::map_dbl(quantile, probs = 0.5),
e=v$PI %>% tibble::as_tibble() %>% purrr::map_dbl(quantile, probs = 1 - pr)
) %>% rlang::set_names(c("k", "mean", "lower", "median", "upper"))
SUM.S <- tibble::tibble(a=c(0,v$s),
b=c(1,v$S %>% purrr::map_dbl(mean)),
c=c(1,v$S %>% purrr::map_dbl(quantile, probs = pr)),
d=c(1,v$S %>% purrr::map_dbl(quantile, probs = 0.5)),
e=c(1,v$S %>% purrr::map_dbl(quantile, probs = 1-pr))) %>%
rlang::set_names(c("t", "S^(t)", "lower", "median", "upper"))
out <- list(SUM.h = SUM.h, SUM.S = SUM.S) %>% tibble::enframe()
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/PiSumm.R
|
Tao <-
function(times, delta, type.t, K, utao) {
t.unc <- sort(times[delta == 1])
if (type.t == 1) {
n <- length(t.unc)
if (n > K) {
tao <- c(0, quantile(x = t.unc, probs = (1:(K-1)) / (K-1), names = FALSE), max(times))
if (type.t == 1 && length(unique(tao)) != length(tao)) {
warning("Too many repeated observations. Zero-length intervals may
appear.")
}
} else {
stop (paste("The partition length (", K,") must be smaller than the number
of uncensored times (", n, ").", sep = ""))
}
}
if (type.t == 2) {
tao <- utao
}
if (type.t == 3) {
K.t2 <- ceiling(max(times))
tao <- seq(0, K.t2, K.t2 / K)
}
return(tao)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/Tao.R
|
UpdLambda <-
function(alpha, beta, c.r, u.r, n, m) {
K <- length(alpha)
tol <- 1e-7
lambda.r <- rgamma(K,shape = c(alpha[1] + u.r[1] + n[1],
alpha[seq_len(K-2) + 1] + u.r[seq_len(K-2)] + u.r[seq_len(K-2) + 1] + n[seq_len(K-2) + 1],
alpha[K] + u.r[K - 1] + n[K]),
scale = c(1 / (beta[1] + c.r[1] + m[1]),
1/(beta[seq_len(K-2) + 1] + c.r[seq_len(K-2)] + c.r[seq_len(K-2) + 1] + m[seq_len(K-2) + 1]),
1 / (beta[K] + c.r[K - 1] + m[K]))
)
lambda.r[abs(lambda.r - 0) < tol] <- 0.000001
return(lambda.r)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/UpdLambda.R
|
UpdPi <-
function(alpha, beta, c.r, u.r, n, m) {
K <- length(alpha)
a <- c(alpha[1] + u.r[1] + n[1],
alpha[seq_len(K-2) + 1] + u.r[seq_len(K-2)] + u.r[seq_len(K-2) + 1] + n[seq_len(K-2) + 1],
alpha[K] + u.r[K - 1] + n[K])
b <- c(beta[1] + c.r[1] - u.r[1] + m[1],
beta[seq_len(K-2) + 1] + c.r[seq_len(K-2)] - u.r[seq_len(K-2)] + c.r[seq_len(K-2) + 1] - u.r[seq_len(K-2) + 1] + m[seq_len(K-2) + 1],
beta[K] + c.r[K - 1] - u.r[K - 1] + m[K])
Pi.r <- purrr::map_dbl(rbeta(K,shape1 = a, shape2 = b),~min(max(0.001, .x), 0.999))
return(Pi.r)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/UpdPi.R
|
UpdU <-
function(alpha, beta, c.r, lambda.r) {
uk <- 100
ind <- which(c.r != 0)
u <- rep(0,length(c.r))
if(length(ind)>0){
aux_u <- purrr::map(ind,function(k) {
exp(seq.int(0,uk) * (log(c.r[k]) + log(c.r[k] + beta[k + 1]) +
log(lambda.r[k]) + log(lambda.r[k + 1])) -
(lgamma(seq.int(0,uk)+1) + lgamma(alpha[k + 1] + seq.int(0,uk))))
})
u[ind] <- purrr::map_dbl(.x = aux_u, .f=~sample(x=seq.int(0,uk), size=1, prob=.x))
}
return(u)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/UpdU.R
|
# Not exported
acceptance_rate <- function(.x) {
len_x <- length(.x)
s <- sum(.x[1:(len_x-1)] == .x[2:len_x])
return(1 - s/(len_x - 1))
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/acceptance_rate.R
|
#' Conditional Predictive Ordinate (CPO) Statistic
#'
#' Makes the CPO Plot and calculates the logarithm of the Pseudomarginal
#' likelihood (LPML).
#'
#' Computes de CPO as a goodness of fit measure
#'
#' @param res tibble. The output from the *Res functions, where * could either
#' be BeM, GaM, CGaM, CuM, CCuM
#' @return
#' \item{LPML}{The value of the logarithm of the Pseudomarginal likelihood}
#' \item{plot}{CPO Plot} %% ...
#' @references See Geisser (1993); Gelfand, Dey, and Chang (1992); Dey, Chen,
#' and Chang (1997); and Sinha and Dey (1997)
#' @examples
#'
#'
#'
#' ## Example 1
#' # data(gehan)
#' # timesG <- gehan$time[gehan$treat == "6-MP"]
#' # deltaG <- gehan$cens[gehan$treat == "6-MP"]
#' # GEX1 <- GaMRes(timesG, deltaG, K = 8, iterations = 3000)
#' # cpo(GEX1)
#'
#'
#'
#' @export cpo
cpo <- function(res){
#Extraer variables necesarias para el cálculo de CPO
aux <- res %>% extract("simulations") %>% dplyr::pull(name)
if("Lambda" %in% aux) parameter <- "Lambda" else parameter <- "PI"
aux <- list("times",
"delta",
"K",
"tao",
c("simulations",parameter),
"s",
"S"
) %>% purrr::map(~res %>% extract(.x)) %>% rlang::set_names("times","delta","k","tao","lambda","s","S")
if(length(aux$S) == 1) aux$S <- aux$S %>% purrr::pluck(1)
if(length(aux$lambda) == 1) aux$lambda <- aux$lambda %>% purrr::pluck(1)
uncensored <- aux$times[aux$delta == 1] %>%
cut(aux$tao,labels = F,include.lowest = T) %>%
purrr::map2_dbl(.y = aux$times[aux$delta == 1] %>%
cut(aux$s,labels = F,include.lowest = T),
~(mean(((tibble::as_tibble(aux$lambda) %>% dplyr::pull(.x)) * (aux$S %>% purrr::pluck(.y)))^(-1)))^(-1)
)
censored <- aux$times[aux$delta == 0] %>%
cut(aux$s,labels = F,include.lowest = T) %>%
purrr::map_dbl(~mean(aux$S[[.x]]^(-1))^(-1) )
g <- tibble::tibble(times=c(aux$times[aux$delta == 1],
aux$times[aux$delta == 0]), cpo = c(uncensored,censored)) %>%
ggplot2::ggplot(ggplot2::aes(x=times,y=cpo))+ ggplot2::geom_point()
lpml <- c(uncensored,censored) %>% log %>% sum
return(list(LPML = lpml,plot = g))
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/cpo.R
|
#' Recurrent infection of kidney catheters
#'
#' Data on the recurrent times to infection, at the point of insertion of the
#' catheter, for kidney patients using portable dialysis equipment. Catheters
#' may be removed for reasons other than infection, in which case the
#' observation is censored. Each patient has exactly 2 observations. Only sex
#' was kept as an explanatory variable.
#'
#' @format A data frame with 38 rows and 6 variables:
#' \describe{
#' \item{id}{patient ID}
#' \item{t1,t2}{times to infection}
#' \item{delta1,delta2}{censorship indicators (1 = exact, 0 = right-censored)}
#' \item{sex}{0 = female, 1 = male}
#' }
#' @source \url{https://www.mayo.edu/research/documents/kidneyhtml/doc-10027569}
"KIDNEY"
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/data.R
|
extract <- function(tb, dir){
for(i in seq_along(dir)){
tb %<>% dplyr::filter(name == !!rlang::parse_expr("dir[i]")) %>% dplyr::pull(value) %>% .[[1]]
}
return(tb)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/extract.R
|
# Nothing here is exported
# summaries_omega ---------------------------------------------------------
summaries_omega <- function(bsbhaz_omega) {
individuals <- nrow(bsbhaz_omega)
iter <- ncol(bsbhaz_omega)
means <- vector(mode = "double", length = individuals)
prob_low <- vector(mode = "double", length = individuals)
prob_high <- vector(mode = "double", length = individuals)
acc_rate <- vector(mode = "double", length = individuals)
for (i in 1:individuals) {
means[[i]] <- mean(bsbhaz_omega[i, ])
probs <- stats::quantile(bsbhaz_omega[i, ], probs = c(.025, .975))
prob_low[[i]] <- probs[[1]]
prob_high[[i]] <- probs[[2]]
acc_rate[[i]] <- acceptance_rate(bsbhaz_omega[i, ])
}
data.frame("Individual" = 1:individuals,
"Mean" = means,
"Prob. Low 95%" = prob_low,
"Prob. High 95%" = prob_high,
"Acceptance Rate" = acc_rate,
check.names = FALSE)
}
# summaries_lambda --------------------------------------------------------
summaries_lambda <- function(bsbhaz_lambda) {
intervals <- nrow(bsbhaz_lambda)
iter <- ncol(bsbhaz_lambda)
means <- vector(mode = "double", length = intervals)
prob_low <- vector(mode = "double", length = intervals)
prob_high <- vector(mode = "double", length = intervals)
for (i in 1:intervals) {
means[[i]] <- mean(bsbhaz_lambda[i, ])
probs <- stats::quantile(bsbhaz_lambda[i, ], probs = c(.025, .975))
prob_low[[i]] <- probs[[1]]
prob_high[[i]] <- probs[[2]]
}
data.frame("Interval" = 1:intervals,
"Mean" = means,
"Prob. Low 95%" = prob_low,
"Prob. High 95%" = prob_high,
check.names = FALSE)
}
# summaries_gamma ---------------------------------------------------------
summaries_gamma <- function(bsbhaz_gamma) {
iter <- ncol(bsbhaz_gamma)
means <- mean(bsbhaz_gamma[1, ])
probs <- stats::quantile(bsbhaz_gamma[1, ], probs = c(.025, .975))
prob_low <- probs[[1]]
prob_high <- probs[[2]]
acc_rate <- acceptance_rate(bsbhaz_gamma[1, ])
data.frame("Gamma" = "Gamma",
"Mean" = means,
"Prob. Low 95%" = prob_low,
"Prob. High 95%" = prob_high,
"Acceptance Rate" = acc_rate,
check.names = FALSE)
}
# summaries_theta ---------------------------------------------------------
summaries_theta <- function(bsbhaz_theta) {
predictors <- nrow(bsbhaz_theta)
iter <- ncol(bsbhaz_theta)
means <- vector(mode = "double", length = predictors)
prob_low <- vector(mode = "double", length = predictors)
prob_high <- vector(mode = "double", length = predictors)
acc_rate <- vector(mode = "double", length = predictors)
for (i in 1:predictors) {
means[[i]] <- mean(bsbhaz_theta[i, ])
probs <- stats::quantile(bsbhaz_theta[i, ], probs = c(.025, .975))
prob_low[[i]] <- probs[[1]]
prob_high[[i]] <- probs[[2]]
acc_rate[[i]] <- acceptance_rate(bsbhaz_theta[i, ])
}
data.frame("Predictor" = rownames(bsbhaz_theta),
"Coefficient Mean" = means,
"Prob. Low 95%" = prob_low,
"Prob. High 95%" = prob_high,
"Acceptance Rate" = acc_rate,
check.names = FALSE)
}
# summaries_surv ----------------------------------------------------------
summaries_surv <- function(bsbhaz_surv) {
times <- nrow(bsbhaz_surv)
iter <- ncol(bsbhaz_surv)
means <- vector(mode = "double", length = times)
prob_low <- vector(mode = "double", length = times)
prob_high <- vector(mode = "double", length = times)
for (i in 1:times) {
means[[i]] <- mean(bsbhaz_surv[i, ])
probs <- stats::quantile(bsbhaz_surv[i, ], probs = c(.025, .975))
prob_low[[i]] <- probs[[1]]
prob_high[[i]] <- probs[[2]]
}
data.frame("t" = rownames(bsbhaz_surv),
"S(t)" = means,
"Prob. Low 95%" = prob_low,
"Prob. High 95%" = prob_high,
check.names = FALSE)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/get_summaries.R
|
# Nothing here is exported
# partition ---------------------------------------------------------------
# Creates the time interval partition
partition <- function(t, int_len) {
last <- ifelse(max(t) %% int_len == 0, max(t), max(t) + int_len)
seq(from = 0, to = last, by = int_len)
}
# partition_location ------------------------------------------------------
# Returns the interval of the partition in which t is located
partition_location <- function(t, partition) {
lt <- length(t)
t_loc <- rep(0, times = lt)
for (i in 1:lt) {
j <- 1
while(t[i] > partition[j + 1] & j < length(partition)) {
j <- j + 1
}
t_loc[i] <- j
}
return(t_loc)
}
# partition_count ---------------------------------------------------------
# Count how many times are at each interval
partition_count <- function(t, partition) {
t_loc <- partition_location(t, partition)
counts <- rep(0, times = (length(partition) - 1))
for (i in 1:length(counts)) {
counts[i] <- sum(t_loc == i)
}
return(counts)
}
# cum_h -------------------------------------------------------------------
# Compute the cumulative hazard H(t)
cum_h <- function(t, partition, lambda) {
cumhaz <- vector(mode = "double", length = length(t))
int_len <- partition[2] - partition[1]
index <- partition_location(t, partition)
for (i in 1:length(t)) {
loc <- index[i]
if (loc > length(lambda)) {
loc <- length(lambda)
}
cum <- 0
if (loc > 1) {
cum <- sum(lambda[1:(loc - 1)]) * int_len
}
cumhaz[i] <- cum + (t[i] - partition[loc]) * lambda[loc]
}
return(cumhaz)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/miscellaneous.R
|
# Not exported
new_BSBHaz <- function(l = list(),
individuals = integer(),
intervals = integer(),
has_predictors = logical(),
samples = integer(),
int_len = double()) {
stopifnot(is.list(l))
stopifnot(is.integer(individuals))
stopifnot(is.integer(intervals))
stopifnot(is.logical(has_predictors))
stopifnot(is.integer(samples))
structure(l,
class = "BSBHaz",
individuals = individuals,
intervals = intervals,
has_predictors = has_predictors,
samples = samples,
int_len = int_len
)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/new_BSBHaz.R
|
# Not exported
# Creates an object of class 'BSBinit'
new_BSBinit <- function(l = list(),
individuals = integer(),
intervals = integer(),
has_predictors = logical()) {
stopifnot(is.list(l))
stopifnot(is.integer(individuals))
stopifnot(is.integer(intervals))
stopifnot(is.logical(has_predictors))
structure(
l,
class = "BSBinit",
individuals = individuals,
intervals = intervals,
has_predictors = has_predictors
)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/new_BSBinit.R
|
plot_traceplots <- function(matrix) {
suppressMessages(
p <- matrix %>%
tibble::as_tibble(.name_repair = "unique") %>%
dplyr::mutate(individual = 1:dplyr::n()) %>%
tidyr::pivot_longer(cols = -.data$individual) %>%
dplyr::mutate(name = as.double(stringr::str_remove_all(.data$name, "\\."))) %>%
ggplot2::ggplot() +
ggplot2::aes(x = .data$name, y = .data$value) +
ggplot2::geom_line(color = "steelblue1") +
ggplot2::facet_wrap(~.data$individual, scales = "free_y") +
ggplot2::labs(x = "Iteration", y = "") +
ggplot2::theme_bw() +
ggplot2::theme(panel.grid = ggplot2::element_blank())
)
return(p)
}
plot_ergodic_means <- function(matrix) {
suppressMessages(
p <- matrix %>%
tibble::as_tibble(.name_repair = "unique") %>%
dplyr::mutate(individual = 1:dplyr::n()) %>%
tidyr::pivot_longer(cols = -.data$individual) %>%
dplyr::mutate(
name = as.double(stringr::str_remove_all(.data$name, "\\."))
) %>%
dplyr::group_by(.data$individual) %>%
dplyr::mutate(value = cumsum(.data$value)) %>%
dplyr::ungroup() %>%
dplyr::mutate(value = .data$value / .data$name) %>%
ggplot2::ggplot() +
ggplot2::aes(x = .data$name, y = .data$value) +
ggplot2::geom_line(color = "steelblue1") +
ggplot2::facet_wrap(~.data$individual, scales = "free_y") +
ggplot2::labs(x = "Iteration", y = "") +
ggplot2::theme_bw() +
ggplot2::theme(panel.grid = ggplot2::element_blank())
)
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/plot_diagnostics.R
|
plot_hazards <- function(df, int_len, y_lab) {
suppressMessages(
p <- df %>%
dplyr::select(
int = .data$Interval,
mean = .data$Mean,
low = .data$`Prob. Low 95%`,
high = .data$`Prob. High 95%`
) %>%
dplyr::mutate(
int_start = seq(from = 0, by = int_len, length.out = NROW(df)),
int_end = seq(from = 0 + int_len, by = int_len, length.out = NROW(df))
) %>%
ggplot2::ggplot(ggplot2::aes(x = .data$int_start)) +
ggplot2::geom_segment(
ggplot2::aes(y = .data$mean, xend = .data$int_end, yend = .data$mean),
color = "steelblue3",
size = 0.7
) +
ggplot2::geom_segment(
ggplot2::aes(y = .data$low, xend = .data$int_end, yend = .data$low),
lty = 2,
color = "lightcoral",
size = 0.5
) +
ggplot2::geom_segment(
ggplot2::aes(y = .data$high, xend = .data$int_end, yend = .data$high),
lty = 2,
color = "lightcoral",
size = 0.5
) +
ggplot2::scale_x_continuous(
breaks = seq(from = 0, by = 4*int_len, length.out = NROW(df)/4)
) +
ggplot2::labs(x = "t", y = y_lab) +
ggplot2::theme_bw() +
ggplot2::theme(panel.grid = ggplot2::element_blank())
)
return(p)
}
plot_survival <- function(df, int_len, y_lab) {
suppressMessages(
p <- df %>%
dplyr::select(
.data$t,
mean = .data$`S(t)`,
low = .data$`Prob. Low 95%`,
high = .data$`Prob. High 95%`
) %>%
dplyr::mutate(
t = as.double(.data$t),
t_end = c(.data$t[2:length(.data$t)], .data$t[[length(.data$t)]]),
y_start = .data$mean,
y_end = c(.data$mean[1:(length(.data$mean) - 1)], 0)
) %>%
dplyr::rename(t_start = .data$t) %>%
ggplot2::ggplot(ggplot2::aes(x = .data$t_start)) +
ggplot2::geom_segment(
ggplot2::aes(y = .data$y_start, xend = .data$t_end, yend = .data$y_end),
color = "steelblue1",
size = 0.7
) +
ggplot2::geom_segment(
ggplot2::aes(y = .data$low, xend = .data$t_end, yend = .data$low),
lty = 2,
color = "lightcoral",
size = 0.5
) +
ggplot2::geom_segment(
ggplot2::aes(y = .data$high, xend = .data$t_end, yend = .data$high),
lty = 2,
color = "lightcoral",
size = 0.5
) +
ggplot2::scale_x_continuous(
breaks = seq(from = 0, by = 4 * int_len, length.out = NROW(df)/4 + 1)
) +
ggplot2::labs(x = "t", y = y_lab) +
ggplot2::theme_bw() +
ggplot2::theme(panel.grid = ggplot2::element_blank())
)
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/plot_summaries.R
|
#' @export
print.BSBinit <- function(x, ...) {
stopifnot(inherits(x, "BSBinit"))
individuals <- attr(x, "individuals")
intervals <- attr(x, "intervals")
has_predictors <- attr(x, "has_predictors")
max_t <-
if (has_predictors) {
pred_names <- colnames(x$pred_matrix)
} else {
pred_names <- " "
}
cat(
"\t\n",
sprintf("Individuals: %s\n", individuals),
sprintf("Time partition intervals: %s\n", intervals),
sprintf("Censored t1: %s\n", individuals - sum(x$delta1)),
sprintf("Censored t2: %s\n", individuals - sum(x$delta2)),
"Predictors:", as.character(has_predictors), "\t", pred_names
)
}
#' @export
print.BSBHaz <- function(x, ...) {
stopifnot(inherits(x, "BSBHaz"))
individuals <- attr(x, "individuals")
intervals <- attr(x, "intervals")
has_predictors <- attr(x, "has_predictors")
samples <- attr(x, "samples")
max_t <-
if (has_predictors) {
pred_names <- rownames(x$theta_mat)
} else {
pred_names <- " "
}
cat(
"\t\n",
sprintf("Samples: %s\n", samples),
sprintf("Individuals: %s\n", individuals),
sprintf("Time partition intervals: %s\n", intervals),
"Predictors:", as.character(has_predictors), "\t", pred_names
)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/print.R
|
# Nothing here is exported
# log_density_gamma -------------------------------------------------------
# Computes the proportional log-density of gamma
log_density_gamma <- function(gamma, proposal, omega1, omega2, y, gamma_d) {
n <- length(omega1)
sum_y <- sum(y)
sum_omega <- sum(omega1) + sum(omega2)
pi <- (2 * n - 2) * (log(1 + proposal) - log(1 + gamma)) +
sum_y * (log(proposal) - log(gamma) + log(1 + proposal) - log(1 + gamma)) -
sum_omega * (proposal - gamma)
# q <- log(proposal) - log(gamma)
return(pi)
}
# sample_gamma ------------------------------------------------------------
# Samples from gamma posterior
sample_gamma <- function(gamma, omega1, omega2, y, gamma_d) {
# if (is.null(gamma_d)) gamma_d <- 2
if (is.null(gamma_d)) gamma_d <- gamma * 0.5
l1 <- max(0, gamma - gamma_d)
l2 <- gamma + gamma_d
# proposal <- stats::rgamma(n = 1, shape = gamma_d, rate = gamma_d / gamma)
proposal <- stats::runif(n = 1, min = min(l1, l2), max = max(l1, l2))
l_density <- log_density_gamma(gamma, proposal, omega1, omega2, y, gamma_d)
alpha <- min(exp(l_density), 1)
u <- stats::runif(n = 1)
out <- gamma + (proposal - gamma) * (u <= alpha)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/sample_gamma.R
|
# Nothing here is exported
# lambda_restriction ------------------------------------------------------
#Computes the restriction imposed by a single individual. For use in purrr::map.
lambda_restriction <- function(t,
omega,
x,
part_loc,
t_partition_low,
lambda_index,
theta,
lambda,
part_len) {
omega_h <- omega * exp(-(x %*% theta))
if (lambda_index < part_loc) {
current_bound <- 0
if (part_loc > 1) {
for (k in 1:(part_loc - 1)) {
current_bound <-
current_bound + (lambda[k] * part_len) * (k != lambda_index)
}
}
current_bound <-
current_bound + lambda[part_loc] * (t - t_partition_low)
current_bound <- (omega_h - current_bound) / part_len
if (is.na(current_bound)) {
warning("Lambda bound failed")
current_bound <- 2
}
} else if (lambda_index == part_loc) {
current_bound <- 0
if (part_loc > 1) {
for (k in 1:(part_loc - 1)) {
current_bound <- current_bound + (lambda[k] * part_len)
}
}
current_bound <- (omega_h - current_bound) / (t - t_partition_low)
if (is.na(current_bound)) {
warning("Lambda bound failed")
current_bound <- 2
}
} else {
current_bound <- 2
}
return(current_bound)
}
# get_min_bound -----------------------------------------------------------
# Computes the most restrictive bound for a given lambda
get_min_bound <- function(t,
omega,
x,
part_loc,
t_partition_low,
l_index,
theta,
lambda,
part_len) {
bounds <-purrr::pmap_dbl(
list(t, omega, x, part_loc, t_partition_low),
function(x1, x2, x3, x4, x5) {
lambda_restriction(x1, x2, x3, x4, x5, l_index, theta, lambda, part_len)
}
)
return(min(bounds))
}
# sample_lambda -----------------------------------------------------------
# Samples a single scalar observation. For use in purrr::pmap.
sample_lambda <- function(u1, u2, alpha, beta, c1, c2, min_bound, part_count) {
# u2 = 0 if lambda is the first interval
# c2 = c1 if lambda is not in the first interval
alpha_l <- alpha + u1 + u2 + part_count
beta_l <- beta + c1 + c2
if (min_bound < 1e-5) min_bound <- 1e-5
denominator <- stats::pgamma(min_bound * beta_l, shape = alpha_l, rate = 1)
if (denominator < 1e-5) denominator <- 1e-5
unif <- stats::runif(n = 1)
f <- function(x) {
stats::pgamma(x, shape = alpha_l, rate = 1) / denominator - unif
}
if (f(min_bound * beta_l) <= 0) {
solution <- min_bound * beta_l
} else {
solution <- stats::uniroot(f, lower = 0, upper = min_bound * beta_l)$root
}
lambda_prueba <- solution / beta_l
if (lambda_prueba < 1e-5) lambda_prueba <- 1e-5
return(lambda_prueba)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/sample_lambda.R
|
# Not exported
# Samples a single scalar observation. For use in purrr::pmap.
sample_omega <- function(omega, y, cum_h, x, theta, gamma, omega_d = NULL) {
if (is.null(omega_d)) omega_d <- y + 1
bound <- cum_h * exp(x %*% theta)
l1 <- max(bound, omega - omega_d)
l2 <- omega + omega_d
proposal <- stats::runif(n = 1, min = min(l1, l2), max = max(l1, l2))
if (omega <= bound) {
omega_out <- proposal
return(omega_out)
}
log_alpha <-
y * (log(proposal) - log(omega)) - (1 + gamma) * (proposal - omega)
prob <- min(exp(log_alpha), 1)
u <- stats::runif(n = 1)
omega_out <- omega + (proposal - omega) * (u <= prob)
return(omega_out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/sample_omega.R
|
# Nothing here is exported
# sample_t ----------------------------------------------------------------
# Samples censored observations from distribution function. For use in pmap.
sample_t <- function(t_orig,
t_prev,
omega,
delta,
max_part,
x,
theta,
partition,
lambda) {
bound <- t_orig
if (delta == 1) {
return(t_orig)
}
u <- stats::runif(n = 1)
f <- function(var) {
cum_h(var, partition, lambda) * exp(x %*% theta) - u * omega
}
up <- max_part
while (f(up) < 0) {
up <- up + 10
}
proposal <- stats::uniroot(f, lower = 0 , upper = up)$root
out <-
t_prev + (proposal - t_prev) * (proposal > bound & proposal <= max_part)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/sample_t.R
|
# Nothing here is exported
# theta_restriction -------------------------------------------------------
# Computes the restriction imposed on a single theta by a single individual
theta_restriction <- function(t, omega, cum_h, x, theta_index, theta) {
current_th <- theta[theta_index]
current_x <- x[theta_index]
if (current_x == 0) {
bound <- 1e4
return(bound)
}
out <-
(log(omega) - log(cum_h) - x %*% theta + current_th * current_x) / current_x
return(out)
}
# get_min_bound_theta -----------------------------------------------------
# Computes the most restrictive bound for a single theta
get_min_bound_theta <- function(theta_index, t, omega, cum_h, x, theta) {
bounds <- purrr::pmap_dbl(
list(t, omega, cum_h, x),
function(x1, x2, x3, x4) {
theta_restriction(
x1,
x2,
x3,
x4,
theta_index,
theta
)
}
)
return(min(bounds))
}
# sample_theta ------------------------------------------------------------
# Samples a single posterior observation. For use in purrr::pmap.
sample_theta <- function(bound, sum_x, theta, theta_d = NULL) {
if (is.null(theta_d)) theta_d <- 0.5 * theta
l1 <- theta - theta_d
l2 <- min(bound, theta + theta_d)
proposal <- stats::runif(n = 1, min = min(l1, l2), max = max(l1, l2))
if (theta > bound) {
out <- proposal
return(out)
}
l_rho <-
proposal * (2 * sum_x - 0.5 * proposal) - theta * (2 * sum_x - 0.5 * theta)
alpha <- min(exp(l_rho), 1)
u <- stats::runif(n = 1)
out <- theta + (proposal - theta) * (u <= alpha)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/sample_theta.R
|
# Nothing here is exported
# log_density_u -----------------------------------------------------------
# Computes the proportional log-density of u
log_density_u <- function(u, l_k, l_k1, alpha, beta, c, index) {
# index = 0 if u is the last, 1 in any other case
u * (log(c) + index * log(c + beta) + log(l_k) + index * log(l_k1)) -
lgamma(u + 1) - lgamma(alpha + u) * index
}
# prob_u ------------------------------------------------------------------
# Creates the distribution function for u
prob_u <- function(l, l1, alpha, beta, c, index) {
u <- vector(mode = "numeric", length = 5e2L)
acum <- 0
j <- 0
prob <- 1
while (j <= 5e2 & prob > 1e-6) {
pi_j <- exp(log_density_u(j, l, l1, alpha, beta, c, index))
if (is.nan(pi_j)) {
break
}
prueba_acum <- acum + pi_j
if (is.infinite(prueba_acum)) {
break
}
if (prueba_acum == 0) {
break
}
acum <- prueba_acum
prob <- pi_j / acum
u[j + 1] <- pi_j
j <- j + 1
}
u <- u[1:j] / acum
prob_fun <- cumsum(u)
return(prob_fun)
}
# sample_u ----------------------------------------------------------------
# Samples a single scalar observation. For use in purrr::pmap.
sample_u <- function(l, l1, alpha, beta, c, index_indicator) {
distribution <- prob_u(l, l1, alpha, beta, c, index_indicator)
if(is.na(distribution[1])) {
distribution <- c(1)
warning("No distribution for U")
}
u <- stats::runif(n = 1)
index <- 1
while (u > distribution[index]) {
index <- index + 1
}
y <- index - 1
return(y)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/sample_u.R
|
# Nothing here is exported
# log_density_y -----------------------------------------------------------
# Computes the proportional log-density of y
log_density_y <- function(y, omega1, omega2, gamma) {
y * (log(gamma) + log(1 + gamma) + log(omega1) + log(omega2)) -
lgamma(1 + y) - lgamma(2 + y)
}
# prob_y ------------------------------------------------------------------
# Creates the distribution function for y
prob_y <- function(omega1, omega2, gamma) {
y <- vector(mode = "numeric", length = 5e2)
acum <- 0
j <- 0
prob <- 1
while (j <= 5e2 & prob > 1e-6) {
gj <- exp(log_density_y(j, omega1, omega2, gamma))
if (is.nan(gj)) {
break
}
prueba_acum <- acum + gj
if (is.infinite(prueba_acum)) {
break
}
if (prueba_acum == 0) {
break
}
acum <- prueba_acum
prob <- gj / acum
y[j + 1] <- gj
j <- j + 1
}
y <- y[1:j] / acum
prob_fun <- cumsum(y)
return(prob_fun)
}
# sample_y ----------------------------------------------------------------
# Samples a single scalar observation. For use in purrr::pmap.
sample_y <- function(omega1, omega2, gamma) {
distribution <- prob_y(omega1, omega2, gamma)
if(is.na(distribution[1])) {
distribution <- c(1)
warning("No distribution for Y")
}
u <- stats::runif(n = 1)
index <- 1
while (u > distribution[index]) {
index <- index + 1
}
y <- index - 1
return(y)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/sample_y.R
|
#' @export
summary.BSBinit <- function(object, ...) {
stopifnot(inherits(object, "BSBinit"))
individuals <- attr(object, "individuals")
intervals <- attr(object, "intervals")
has_predictors <- attr(object, "has_predictors")
max_t <-
if (has_predictors) {
pred_names <- colnames(object$pred_matrix)
} else {
pred_names <- " "
}
cat(
"\t\n",
sprintf("Individuals: %s\n", individuals),
sprintf("Time partition intervals: %s\n", intervals),
sprintf("Censored t1: %s\n", individuals - sum(object$delta1)),
sprintf("Censored t2: %s\n", individuals - sum(object$delta2)),
"Predictors:", as.character(has_predictors), "\t", pred_names
)
}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/R/summary.R
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/inst/doc/BGPHhazard.R
|
---
title: "Introduction to BGPhazard"
author: "José A. García-Bueno Emilio A. Morones Ishikawa and Luis E. Nieto-Barajas"
date: "`r Sys.Date()`"
output:
pdf_document:
toc: yes
header-includes:
- "\\usepackage{subcaption}"
vignette: >
%\VignetteIndexEntry{Introduction to BGPhazard}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
# Abstract
We present `BGPhazard`, an R package which computes hazard rates from a Bayesian nonparametric view. This is achieved by computing the posterior distribution of a gamma or a beta process through a Gibbs sampler. The purpose of this document is to guide the user on how to use the package rather than conducting a thorough analysis of the theoretical results. Nevertheless, section 2 briefly discuss the main results of the models proposed by Nieto-Barajas and Walker (2002) and by Nieto-Barajas (2003). These results will be helpful to understand the usage of the functions contained in the package. In section 3 we show some examples to illustrate the models.
# Introduction
Survival analysis focuses on studying data related to the occurrence time of an event. A typical function is the *survival function*, which in nonparametric statistics is estimated through the product limit estimator (Kaplan & Meier, 1958). This estimator is used as an approximation to the survival function. In some cases, its stair-step nature can return misleading estimators in a neighborhood of the steps: just before and after each we will have significant differences between estimators.
Lack of smoothness on nonparametric estimations give rise to methods whose outputs are smooth functions. One of many approaches is given by Nieto-Barajas and Walker (2002) for the survival function. The theory behind these models combines Bayesian Statistics and Survival Analysis to obtain hazard rate estimates. Bayesian Statistics let us introduce previous knowledge to a data set to improve estimations. Nieto-Barajas & Walker (2002) estimate the hazard function in segments by introducing dependence between each other so that information is shared and a smooth hazard rate is obtained. We review the three models contained in the package: beta model for discrete data, gamma model for continuous data and cox-gamma model for continuous data in a proportional hazards model.
A consequence of using a nonparametric Bayesian model with a dependence structure is that the resulting estimators are smoother than those obtained with a frequentist nonparametric model.
# Hazard rate estimation
In this brief review, we examine a generalization of the independent gamma process of Walker and Mallick (1997) --gamma model--; then, a generalization of the beta process introduced by Hjort (1990) --beta model-- which is often used to model discrete failure times, and lastly, the proportional risk model extension to the gamma process that copes with explanatory variables that remain constant during time (Nieto-Barajas, 2003).
We provide nonparametric prior distributions for the hazard rate based on the dependence processes previously defined and we obtain the posterior distributions through a Bayesian update.
## Markov beta and gamma prior processes
Let $\lambda_k$ represent the gamma process and let $\pi_k$ represent the beta process. Let $\theta_k$ represent both $\lambda_k$ and $\pi_k$. For interpretation of the Markov model, the main priority is to ensure
$$
E[\theta_{k+1}|\theta_k]=a+b \theta_k
$$
A latent process $\{u_k\}$ is introduced in order to obtain $\{\theta_k\}$ from
$$
\theta_1 \rightarrow u_1 \rightarrow \theta_2 \rightarrow u_2 \rightarrow \cdots
$$
### Gamma Process
Walker \& Mallick (1997) consider $\{\lambda_k\}$ as independent gamma variables, *i.e.*, $\lambda_k \sim Ga(\alpha_k,\beta_k)$ independent for $k=1,2,...$ Nieto-Barajas \& Walker (2002) consider a dependent process for $\{\lambda_k\}$. They start with $\lambda_1\sim Ga(\alpha_1,\beta_1)$ and take $u_k|\lambda_k \sim Po(c_k\lambda_k)$ and $\lambda_{k+1}|u_k \sim Ga(\alpha_{k+1} + u_k, \beta_{k+1} + c_k)$ and so on. These updates arise from the joint density
$$
f(u,\lambda)=Ga(\lambda|\alpha,\beta)Po(u|c\lambda)
$$
and so constitute a Gibbs type update. The difference is that they are changing the parameters $(\alpha,\beta,c)$ at each update so the chain is not stationary and marginally the $\{\lambda_k\}$ are not gamma.
However,
$$
\mathrm{E}[\lambda_{k+1}|\lambda_k] = \frac{\alpha_{k+1}+c_k\lambda_k}{\beta_{k+1}+c_k}
$$
If $c_k=0$, then $P(u_k=0)=1$ and hence the $\{\lambda_k\}$ are independent gamma and we have the prior process of Walker and Mallick (1997).
An important result is that if we take $\alpha_k=\alpha_1$ and $\beta_k=\beta_1$ to be constant for all $k$, then the process $\{u_k\}$ is a Poisson-gamma process with implied marginals $\lambda_k\sim Ga(\alpha_1,\beta_1)$. One can note that if $u_1$ is Poisson distributed and $\lambda_2|u_1$ is conditionally gamma, then $\lambda_2$ is never gamma.
### Beta Process
Nieto-Barajas and Walker (2002) start with $\pi_1 \sim Be(\alpha_1,\beta_1)$ and take $u_k|\pi_k \sim Bi(c_k,\pi_k)$, $\pi_{k+1}|u_k \sim Be(\alpha_{k+1} + u_k, \beta_{k+1} + c_k -u_k)$ and so on. These arise from a binomial-beta conjugate set-up, from the joint density
$$
f(u,\pi)=Be(\pi|\alpha,\beta)Bi(u|c,\pi)
$$
Clearly
$$
\mathrm{E}[\pi_{k+1}|\pi_k] = \frac{\alpha_{k+1}+c_k\pi_k}{\alpha_{k+1}+\beta_{k+1}+c_k}
$$
As with the gamma process, if we choose $c_k=0$, then $P(u_k=0)=1$ and so the $\{u_k\}$ become independent beta and we obtain the prior of Hjort (1990).
A significant result is that if we take $\alpha_k=\alpha_1$ and $\beta_k=\beta_1$ to be constant for all $k$, then the process $\{u_k\}$ is a binomial-beta process with marginals $u_k\sim BiBe(\alpha_1,\beta_1,c_k)$. Moreover, the process $\{\pi_k\}$ becomes strictly stationary and marginally $\pi_k\sim Be(\alpha_1,\beta_1)$.
## Prior to posterior analysis
We use the gamma and beta processes to define nonparametric prior distributions. In order to obtain $f(\theta|data)$, we introduce the latent variable $u$ and so constitute a Gibbs update for $f(\theta|u,data)$ and $f(u|\theta,data)$. As we generate a sample from $f(\theta,u|data)$, we automatically obtain a sample from $f(\theta|data)$. Therefore, given $u$ from $f(u|\theta,data)$, we can obtain a sample from $f(\theta,u| data)$ by simulating from $\theta \sim f(\theta|u,data)$.
It can be shown that
$$
f(\theta|u,data) \propto f(data|\theta) \times f(\theta|u)
$$
and
$$
f(u|\theta,data) \propto f(data|\theta,u) \times f(u|\theta) \propto f(u|\theta)
$$
because $u$ and $data$ are conditionally independent given $\theta$.
### Gamma Process
Let $T$ be a continuous random variable with cumulative distribution function $F(t)=P(T\leq t)$ on $[0,\infty)$. Consider the time axis partition $0=\tau_0<\tau_1<\tau_2<\cdots$, and let $\lambda_k$ be the hazard rate in the interval $(\tau_{k-1},\tau_k]$, then the hazard function is given by
$$
h (t) = \sum_{k=1}^{\infty} \lambda_kI_{(\tau_{k-1},\tau_k]}(t)
$$
So, the cumulative distribution and density functions, given $\{ \lambda_k \}$, are $F(t|\{\lambda_k\}) = 1 - e^{-H(t)}$, $f(t|\{\lambda_k\}) = h(t) e^{-H(t)}$, where $H(t) = \int_0^t h(s)ds$. We also have that
$$
f(\lambda_k|u_{k-1},u_k) = Ga(\alpha_{k} + u_{k-1} + u_k, \beta_{k} + c_{k-1} + c_k)
$$
Therefore, given a sample $T_1,T_2,...,T_n$ from $f(t\{\lambda_k\})$, it is straightforward to derive
$$
f(\lambda_k|u_{k-1},u_k,data) = Ga(\alpha_{k} + u_{k-1} + u_k + n_k, \beta_{k} + c_{k-1} + c_k + m_k),
$$
where $n_k$ = number of uncensored observations in $(\tau_{k-1},\tau_k]$, $m_k = \sum_i r_{ki}$, and
\[ r_{ki} = \left \{
\begin{array}{l l}
\tau_k - \tau_{k-1} & t_i > \tau_k \\
t_i - \tau_{k-1} & $t$ \in (\tau_{k-1},\tau_k] \\
0 & $otherwise$
\end{array} \right. \]
Additionally,
$$
P(u_k=u | \lambda_k,\lambda_{k+1},data) \propto f(u|\lambda) \propto \frac{[c_k(c_k+\beta_{k+1})\lambda_k\lambda_{k+1}]^u}{\Gamma(u+1)\Gamma(\alpha_{k+1}+u)}
$$
with $u=0,1,2,...$. Hence, with these full conditional distributions, a Gibbs sampler is straightforward to implement in order to obtain posterior summaries.
We can learn about the $\{c_k\}$ by assigning an independent exponential distribution with mean $\epsilon$ for each $c_k$, $k=1,..,K-1$. The Gibbs sampler can be extended to include the full conditional densities for each $c_k$. It is not difficult to derive that a $c_k$ from $f(c_k|u,\lambda,data)$ can be taken from the density
$$
f(c_k|u_k,\lambda_k,\lambda_{k+1}) \propto (\beta_{k+1}+c_k)^{\alpha_{k+1}+u_k}c_k^{u_k}\exp\left\{ -c_k \left (\lambda_{k+1}+\lambda_k+\frac{1}{\epsilon} \right) \right\}
$$
for $c_k >0$.
Dependence between $c_k'$s can be introduced through a hierarchical model via assigning a distribution to $\epsilon \sim Ga(a_0,b_0)$. The update would be given by:
$$
f(\epsilon|\{c_k\}) = Ga\left(\epsilon |a_0 + K, b_0 + \sum_{k=1}^K c_k \right)
$$
where $K$ is the number of intervals generated by the time axis partition. This hierarchical specification of the initial distribution for $c_k$ let us assign a better value for $c_k$.
Simulating from this distribution is not so straightforward. However, we construct a hybrid algorithm using a Metropolis-Hastings scheme taking advantage of the Markov chain generated by the Gibbs sampling.
### Beta process
Let $T$ be a discrete random variable taking values in the set $\{\tau_1,\tau_2,...\}$ with probability density function $f(\tau_k)=P(T=\tau_k)$. Let $\pi_k$ be the hazard rate at $\tau_k$, then the cumulative distribution and the density functions, given $\{\pi_k\}$, are $F(\tau_j|\{\pi_k\}) = 1 - \prod_{k=1}^j(1-\pi_k)$ and $f(\tau_j|\{\pi_k\}) = \pi_j \prod_{k=1}^{j-1}(1-\pi_k)$
The conditional distribution of $\pi_k$ is
$$
f(\pi_k|u_{k-1},u_k) = Be(\pi_k|\alpha_k+ u_{k-1} + u_k,\beta_k + c_{k-1} -u_{k-1}+ c_k-u_k),
$$
Thus, given a sample $T_1,T_2,...,T_n$ form $f(\cdot|\{\pi_k\})$ it is straightforward to derive
$$
f(\pi_k | u_{k-1}, u_k, data) =Be(\pi_k|\alpha_k+ u_{k-1} + u_k +n_k,\beta_k + c_{k-1} -u_{k-1}+ c_k-u_k+m_k),
$$
where $n_k$ = number of failures at $\tau_k$, $m_k = \sum_i r_{ki}$ and
\[ r_{ki} = \left \{
\begin{array}{l l}
1 & \quad t_i > \tau_k \\
0 & \quad $otherwise$
\end{array} \right. \]
Additionally,
$$
P(u_k=u | \pi_k,\pi_{k+1}, data) \propto \frac{\theta_k^u}{\Gamma(u+1)\Gamma(c_k-u+1)\Gamma(\alpha_{k+1}+u)\Gamma(\beta_{k+1}+c_k-u)}
$$
with $u=0,1,...,c_k$ and
$$
\theta_k = \frac{\pi_k\pi_{k+1}}{(1-\pi_k)(1-\pi_{k+1})}
$$
As before, obtaining posterior summaries via Gibbs sampler is simple.
We can learn about the $\{c_k\}$ via assigning each $c_k$ an independent Poisson distribution with mean $\epsilon$. The Gibbs sampler can be extended to include the full conditional densities for each $c_k$. A $c_k$ from $f(c_k|u,\pi,data)$ can be taken from the density
$$
f(c_k|u_k,\pi_k,\pi_{k+1}) \propto \frac{\Gamma(\alpha_{k+1}+\beta_{k+1}+c_k)}{\Gamma(\beta_{k+1}+c_k-u_k)\Gamma(c_k-u_k+1)} \left[\epsilon (1-\pi_{k+1})(1-\pi_k) \right]^{c_k}
$$
with $c_k \in \{u_k,u_k+1,u_k+2,...\}$.
Dependence between $c_k$'s can be introduced through a hierarchical model via assigning a distribution to $\epsilon \sim Ga(a_0,b_0)$. So the update would be given by:
$$
f(\epsilon|\{c_k\}) = Ga\left(\epsilon |a_0 + K, b_0 + \sum_{k=1}^K c_k \right)
$$
where $K$ is the number of discrete values in random variable $T$.
### Cox-gamma model
Differing from most of the previous Bayesian analysis of the proportional hazards model, Nieto-Barajas (2003) models the baseline hazard rate function with a stochastic process. Let $T_i$ be a non negative random variable which represents the failure time of $i$ and $Z_i=(Z_{i1},...,Z_{ip})$ the vector containing its $p$ explanatory variables. Therefore, the hazard function for individual $i$ is:
$$
\lambda_i(t)=\lambda_0(t)exp\{Z_i'\theta\}
$$
where $\lambda_0(t)$ is the baseline hazard rate and $\theta$ is regression's coefficient vector. The cumulative hazard function for individual $i$ becomes
$$
H_i(t)=\sum_{k=1}^{\infty}\lambda_k W_{i,k}(t,\lambda),
$$
where,
\[ W_{i,k}(t,\theta) = \left \{
\begin{array}{l l}
(\tau_k - \tau_{k-1})\exp\{Z_i'\theta \} & t_i > \tau_k \\
(t_i - \tau_{k-1})\exp\{Z_i'\theta \} & t_i \in (\tau_{k-1},\tau_k] \\
0 & otherwise
\end{array} \right. \]
Given a sample of possible right-censored observations where $T_1, ..., T_n$ are uncensored and $T_{n_u+1},...,T_n$ are right-censored, the conditional posterior distributions for the parameters of the semi-parametric model are:
\begin{itemize}
\item{$f(\lambda_k | u_{k-1}, u_k, data,\theta) = Ga(\lambda_k|\alpha_k+ u_{k-1} + u_k + n_k,\beta_k + c_{k-1} + c_k+m_k(\theta))$}
\item{$P(u_k=u | \lambda_k,\lambda_{k+1},data) \propto f(u|\lambda) \propto \frac{[c_k(c_k+\beta_{k+1})\lambda_k\lambda_{k+1}]^u}{\Gamma(u+1)\Gamma(\alpha_{k+1}+u)}$}
\item{$f(\theta|\lambda,data)\propto f(\theta) \exp\left\{{\sum_{i=1}^{n_u} \theta'Z_i} -\sum_{k=1}^\infty \lambda_k m_k(\theta) \right\} $}
\end{itemize}
where $n_k=\sum_{i=1}^{n_u}I_{(\tau_{k-1},\tau_k]}(t_i)$ and $m_k(\theta)=\sum_{i=1}^n W_{i,k}(t_i,\theta)$.
Similarly as we pointed out in the previous two cases, we incorporate a hyper prior process for the $\{c_k\}$ such that $c_k\sim Ga(1,\epsilon_k)$. The set of full conditional posterior distributions can then be extended to include
$$
f(c_k|u_k,\lambda_k,\lambda_{k+1}) \propto (\beta_{k+1}+c_k)^{\alpha_{k+1}+u_k}c_k^{u_k}\exp\left\{ -c_k \left (\lambda_{k+1}+\lambda_k+\frac{1}{\epsilon} \right) \right\}
$$
Dependence between $c_k$'s can be introduced through a hierarchical model via assigning a distribution to $\epsilon \sim Ga(a_0,b_0)$. So the update would be given by:
\begin{equation*}
f(\epsilon|\{c_k\}) = Ga\left(\epsilon |a_0 + K, b_0 + \sum_{k=1}^K c_k \right)
\end{equation*}
where $K$ is the number of intervals generated by the time axis partition. This hierarchical specification of the initial distribution for $c_k$ let us assign a better value for $c_k$.
## Gamma model examples
For this model, we will be using the control observations of the 6-MP data set (Freireich, E. J., et al., 1963) --data from a trial of 42 leukemia patients organised by pairs in which the first element of the pair is treated with a drug and the other is control--. For examples 1 to 4, we use a partition of unitary length intervals; for the last three examples --5, 6 \& 7--, we use uniformly-dense intervals. The `"time"` column is taken as the observed times vector --`times`-- and the `"cens"` column as the censoring status vector --`delta`--:
```
data(gehan)
times <- gehan$time[gehan$treat == "6-MP"]
delta <- gehan$cens[gehan$treat == "6-MP"]
```
Now that we have a time and censoring status vector, we can run several examples for this model. Default values are used for each function unless otherwise noted. Every example shows our estimate overlapped with the Nelson-Aalen / Kaplan-Meier estimator, so the user can compare them.
### Example 1. Independence case. Unitary length intervals}
We obtain with our model the Nelson-Aalen and Kaplan-Meier estimators by defining $c_k$ as a null vector --through fixing `type.c = 1`--. A unitary partitioned axis is obtained by fixing `type.t = 2`. In Figure \ref{fig:G1} we show that our estimators --under independence-- return the same results as the Nelson-Aalen and Kaplan-Meier estimators.
```
ExG1 <- GaMRes(times, delta, type.t = 2, K = 35, type.c = 1,
iterations = 3000)
GaPloth(ExG1, confint = FALSE)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{G11.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{G12.png}
\caption{Survival function}
\end{subfigure}
\caption{Gamma Example 1 - Independence case. Unitary length intervals}
\label{fig:G1}
\end{figure}
### Example 2. Introducing dependence through c. Unitary length intervals
The influence of $c$ --or `c.r`-- can be understood as a dependence parameter: the greater the value of each $c_k$, $k=0,1,2,...,K-1$, the higher the dependence between intervals $k$ and $k+1$. For this example, we assign a fixed value to vector $c$, so we use `type.c = 2`. Comparing with the previous example, we see that the estimates that were zero, now have a positive value --see Figures \ref{fig:G1} and \ref{fig:G2}--. Note how this model compares to Example 5 --see Figure \ref{fig:G5}--. The difference between those examples is how the partition is defined.
```
ExG2 <- GaMRes(times, delta, type.t = 2, K = 35, type.c = 2,
c.r = rep(50, 34), iterations = 3000)
GaPloth(ExG2)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{G21.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{G22.png}
\caption{Survival function}
\end{subfigure}
\caption{Gamma Example 2 - Introducing dependence through c ($c_k=50, \forall k$). Unitary length intervals}
\label{fig:G2}
\end{figure}
Additionally, we can get further detail on the Gibbs sampler with a diagnosis of the resulting Markov chain. We can run this diagnosis for each entry of $\lambda$, $u$, $c$ or $\epsilon$. In Figure \ref{fig:G2a} we show the diagnosis for $\lambda_6$ which includes the trace, the ergodic mean, the ACF function and the histogram for the generated chain.
```
GaPlotDiag(ExG2, variable = "lambda", pos = 6)
```
\begin{figure}
\centering
\includegraphics[width=\textwidth]{G23.png}
\caption{Gamma Example 2 - Diagnosis for $\lambda_6$}
\label{fig:G2a}
\end{figure}
### Example 3. Varying c through a distribution. Unitary length intervals
As we reviewed, we can learn about the $\{c_k\}$ via assigning an exponential distribution with mean $\epsilon$. The estimates using $\epsilon = 1$ --`type.c = 3`-- and unitary length intervals --`type.t = 2`-- are shown in Figure \ref{fig:G3}. We can compare the hazard function with the previous example, where $c$ was fixed, and observe that because of the variability given to $c$, the change on the estimated values between two contiguous intervals are greater in this example. The survival function echoes the shape of the Kaplan-Meier with higher decrease rates.
Comparing this example with Example 6 --Figure \ref{fig:G6}--, as with the previous example, the main difference is given by the partition of the time axis. This affects the estimates as we will note later.
```
ExG3 <- GaMRes(times, delta, type.t = 2, K = 35, type.c = 3, epsilon = 1,
iterations = 3000)
GaPloth(ExG3)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{G31.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{G32.png}
\caption{Survival function}
\end{subfigure}
\caption{Gamma Example 3 - Varying $c$ through a distribution $c_k\sim Ga(1,\epsilon = 1$). Unitary length intervals}
\label{fig:G3}
\end{figure}
### Example 4. Using a hierarchical model to estimate c. Unitary intervals
Previous example can be extended with a hierarchical model, assigning a distribution to $\epsilon ~ \sim Ga(a_0,b_0)$ with $a_0=b_0=0.01$. In order to set up the model we should set `type.c = 4`. The result displayed on Figure \ref{fig:G4} is a soft hazard function and a survival function that decreases faster than the Kaplan-Meier estimate.
```
ExG4 <- GaMRes(times, delta, type.t = 2, K = 35, type.c = 4,
iterations = 3000)
GaPloth(ExG4)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{G41.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{G42.png}
\caption{Survival function}
\end{subfigure}
\caption{Gamma Example 4 - Using a hierarchical model to estimate $c$. Unitary intervals}
\label{fig:G4}
\end{figure}
### Example 5. Introducing dependence through c. Equally dense intervals
This example illustrates the same concept as Example 2 --how $c$ introduces dependence--, but with a different partition of the time axis. To get this partition we set `type.t = 1`. Figure \ref{fig:G5} shows that the survival function is close to the Kaplan-Meier estimate. The fact that it gets closer to the K-M estimates does not makes it a better estimate, we only could say that this partition yields, in average, a smaller hazard rate than the Example 2 built with a unitary length partition.
```
ExG5 <- GaMRes(times, delta, type.t = 1, K = 8, type.c = 2,
c.r = rep(50, 7), iterations = 3000)
GaPloth(ExG5)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{G51.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{G52.png}
\caption{Survival function}
\end{subfigure}
\caption{Gamma Example 5 - Introducing dependence through $c$ ($c_k=50, \forall k$). Equally dense intervals}
\label{fig:G5}
\end{figure}
### Example 6. Varying c through a distribution. Equally dense intervals
We can compare this example on Figure \ref{fig:G6} with Example 3 --Figure \ref{fig:G3}--. We use less intervals, so the survival function is smoother. Our estimate decreases faster than the Kaplan-Meier estimate.
```
ExG6 <- GaMRes(times, delta, type.t = 1, K = 8, type.c = 3,
iterations=3000)
GaPloth(ExG6)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{G61.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{G62.png}
\caption{Survival function}
\end{subfigure}
\caption{Gamma Example 6 - Varying $c$ through a distribution $c_k\sim Ga(1,\epsilon = 1$). Equally dense intervals}
\label{fig:G6}
\end{figure}
### Example 7. Using a hierarchical model to estimate c. Equally dense intervals
The survival curve for this particular example results in a smoothed version of the Kaplan-Meier estimate (Figure \ref{fig:G7}). As with previous examples, it can be compared with the unitary partition example (see Example 4, Figure \ref{fig:G4}).
```
ExG7 <- GaMRes(times, delta, type.t = 1, K = 8, type.c = 4,
iterations = 3000)
GaPloth(ExG7)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{G71.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{G72.png}
\caption{Survival function}
\end{subfigure}
\caption{Gamma Example 7 - Using a hierarchical model to estimate $c$. Equally dense intervals}
\label{fig:G7}
\end{figure}
%\clearpage
## Beta model examples
For this model, we use survival data on 26 psychiatric inpatients admitted to the University of Iowa hospitals during the years 1935-1948. This sample is part of a larger study of psychiatric inpatients discussed by Tsuang and Woolson (1977). We take the `"time"` column as the observed times vector --`times`-- and the `"death"` column as the censoring status vector --`delta`--:
```
data(psych)
times <- psych$time
delta <- psych$death
```
### Example 1. Independence case
As with the Gamma Example, we obtain the Nelson-Aalen and Kaplan-Meier estimators by defining $c_k$ as a null vector through fixing `type.c = 1` --see Figure \ref{fig:B1}-- The conclusion does not change: the independence case of our model results on the N-A and K-M estimators.
```
ExB1 <- BeMRes(times, delta, type.c = 1, iterations = 3000)
BePloth(ExB1, confint = FALSE)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{B11.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{B12.png}
\caption{Survival function}
\end{subfigure}
\caption{Beta Example 1 - Independence case}
\label{fig:B1}
\end{figure}
### Example 2. Introducing dependence through c
The influence of $c$ --or `c.r`-- can be also understood as a dependence parameter: the greater the value of each $c_k$, $k=0,1,2,...,K-1$, the higher the dependence between intervals $k$ and $k+1$. In this example, we fix each $c$ entry at 100. As we are defining vector $c$ with fixed values, we should fix `type.c = 2`. We see on Figure \ref{fig:B2} that the hazard function estimate turns smoother than on the previous example. The steps on the survival function appear to be more uniform than on the Kaplan-Meier estimate.
```
ExB2 <- BeMRes(times, delta, type.c = 2, c.r = rep(100, 39),
iterations = 3000)
BePloth(ExB2)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{B21.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{B22.png}
\caption{Survival function}
\end{subfigure}
\caption{Beta Example 2 - Introducing dependence through $c$ ($c_k=100, \forall k)$}
\label{fig:B2}
\end{figure}
Additionally, we can get further detail on the Gibbs sampler with a diagnosis of the resulting Markov chain. We can run this diagnosis for each entry from $\pi$, $u$, $c$ or $\epsilon$. In Figure \ref{fig:B2a} we show the diagnosis for $\pi_{10}$ which includes plot of the trace, the ergodic mean, the ACF function and the histogram of the chain.
```
BePlotDiag(ExB2, variable = "Pi", pos = 6)
```
\begin{figure}
\centering
\includegraphics[width=\textwidth]{B23.png}
\caption{Beta Example 2 - Diagnosis for $\pi_{10}$}
\label{fig:B2a}
\end{figure}
### Example 3. Varying c through a distribution
As with the gamma model, we can learn about the $\{c_k\}$ via assigning an exponential distribution with mean $\epsilon$. The estimates using $\epsilon = 1$ and unitary length intervals are shown on Figure \ref{fig:B3}. Note that the confidence intervals have widen: it is a signal that we have introduced variability to the model --because of the distribution assigned to $c$--.
```
ExB3 <- BeMRes(times, delta, type.c = 3, epsilon = 1, iterations = 3000)
BePloth(ExB3)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{B31.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{B32.png}
\caption{Survival function}
\end{subfigure}
\caption{Beta Example 3 - Varying $c$ through a distribution $c_k\sim Ga(1,\epsilon = 1)$}
\label{fig:B3}
\end{figure}
### Example 4. Using a hierarchical model to estimate c
The previous example can be extended with a hierarchical model, assigning a distribution to $\epsilon ~ \sim Ga(a_0,b_0)$, with $a_0=b_0=0.01$. In order to set up the model we should set `type.c = 4`. The result displayed on Figure \ref{fig:B4} is a soft hazard function and a survival function that decreases faster than the Kaplan-Meier estimate.
```
ExB4 <- BeMRes(times, delta, type.c = 4, iterations = 3000)
BePloth(ExB4)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{B41.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{B42.png}
\caption{Survival function}
\end{subfigure}
\caption{Beta Example 4 - Using a hierarchical model to estimate $c$}
\label{fig:B4}
\end{figure}
\clearpage
## Cox-gamma model example
In this example, we simulate the data from a Weibull model, frequently used with continuous and non negative data. The advantage of setting a model is that we know in advance the results, so we can compare the estimates with the exact values from model. The Weibull model has the probability functions:
$$
h_0(t)=abt^{b-1}, S_0(t)=e^{-at^b}
$$
We construct the proportional hazard model as:
$$
h_i(t_i|Z_i)=h_0(t)e^{\theta 'Z_i}bt^{b-1}, S_i(t_i|Z_i)=exp\left\{-H_0(t_i)e^{\theta 'Z_i}\right\}
$$
Note that this Weibull proportional hazard model is also another Weibull model with parameters ($a_i^*=ae^{\theta 'Z_i},b$). Based on the previous densities and on fixed parameters $a,b,\theta_1$ and $\theta_2$ -these last two covariates are simulated from uniform distributions on the interval (0,1)-, we simulate $n$ observations. We gather the results of the model in a table --see Table \ref{cuad:weibull}--. $t_i|Z_i \sim Weibull(a_i^*, b)$ and $Z_i = (Z_{i1},Z_{i2})$ are the explanatory variables; $c_i$, the censoring time; $\delta_i$, censoring indicator, and $min\{t_i,c_i\}$ represents \emph{observed} values deeming censorship time.
\begin{table}
\centering
\begin{tabular}{|c| c| c| c| c| c| c|}
\hline
i & $t_i$ &$Z_{i1}\sim U(0,1)$ & $Z_{i2}\sim U(0,1)$ & $c_i\sim Exp(1)$ & $\delta_i=I(t_i>c_i)$ & $min\{t_i,c_i\}$ \\
\hline
1 &$t_1$ & $Z_{11}$ & $Z_{12}$ & $c_1$ & $\delta_1$ & $min\{t_1,c_1\}$ \\
2 &$t_2$ & $Z_{21}$ & $Z_{22}$ & $c_2$ & $\delta_2$ & $min\{t_2,c_2\}$ \\
$\vdots$ & $\vdots$ &$\vdots$ &$\vdots$ &$\vdots$ &$\vdots$ &$\vdots$ \\
$n$ &$t_n$ & $Z_{n1}$ & $Z_{n2}$ & $c_n$ & $\delta_n$ & $min\{t_n,c_n\}$ \\
\hline
\end{tabular}
\caption{Weibull simulation model}
\label{cuad:weibull}
\end{table}
We generate a size $n=100$ sample based on the simulation model with parameters $a=$0.1, $b=1,\theta=(1,1)$ y $Z_i\sim U(0,1)$, $i=1,2$. The result is a table with $n=100$ observations.
On the other hand, we use almost every default parameter from `CGaMRes` excluding `K = 10`, `iterations = 3000` and `thpar = 10`. Theoretically, our model should estimate a constant risk function at $a\times b=0.1$.
Below, we show the code for the Weibull model and the calls for the plots for the hazard and survival functions --command `CGaPloth(M)`--, the predictive distribution for an observation defined as the median of the data --`CGaPred(M)`--, and the plots for $\theta_1$ and $\theta_2$ --`PlotTheta(M)`--.
```
SampWeibull <- function(n, a = 10, b = 1, beta = c(1, 1)) {
M <- matrix(0, ncol = 7, nrow = n)
for(i in 1:n){
M[i, 1] <- i
M[i, 2] <- x1 <- runif(1)
M[i, 3] <- x2 <- runif(1)
M[i, 4] <- rweibull(1, shape = b,
scale = 1 / (a * exp(cbind(x1, x2) %*% beta)))
M[i, 5] <- rexp(1)
M[i, 6] <- M[i, 4] > M[i, 5]
M[i, 7] <- min(M[i, 4], M[i, 5])
}
colnames(M) <- c("i", "x_i1", "x_i2", "t_i", "c_i", "delta",
"min{c_i, d_i}")
return(M)
}
dat <- SampWeibull(100, 0.1, 1, c(1, 1))
dat <- cbind(dat[, c(4, 6)], dat[, c(2, 3)])
CG <- CGaMRes(dat, K = 10, iterations = 3000, thpar = 10)
CGaPloth(CG)
PlotTheta(CG)
CGaPred(CG)
```
Because of the way we built the model, we can compare against theoretical results on the Weibull model. In Figure \ref{fig:CG1} we see that the hazard rate estimate is practically the same from t = 8 to t = 37. The estimate for the survival functions gets very close to the real value. Plots and histograms for $\theta$ from Figure \ref{fig:CG2} show estimated values for the regression coefficients $(\theta_1,\theta_2)$ and they are consistently near to 1. Finally, the plots from Figure \ref{fig:CG3} show the hazard rate estimate over a equally dense partition for a future individual where its explanatory variable is equal to the median of the observations --$x_F$--. Note that the effect over the hazard function is given by the product of the baseline hazard function and $\exp\{x_F'\theta\}$.
\begin{figure}
\centering
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{CG1.png}
\caption{Hazard rate estimate}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{CG2.png}
\caption{Survival function estimate}
\end{subfigure}
\caption{Cox-gamma example}
\label{fig:CG1}
\end{figure}
\begin{figure}
\centering
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{CG3.png}
\caption{$\theta_1$ estimate}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{CG4.png}
\caption{$\theta_2$ estimate}
\end{subfigure}
\caption{$\theta$ estimate on the Cox-gamma example}
\label{fig:CG2}
\end{figure}
\begin{figure}
\centering
\begin{subfigure}{\textwidth}
\includegraphics[width=\textwidth]{CG5.png}
\end{subfigure}
\caption{Hazard rate estimate for the median on the Cox-gamma example}
\label{fig:CG3}
\end{figure}
\pagebreak
\section{References}
\begin{itemize}
\item{ \textsc{Freireich. E.J., et al.}, Estimation of Exponential Survival Probabilities with Concomitant Information, \textit{Biometrics}, {\bf 21}, pages: 826-838, 1965.}
\item{ \textsc{Gehan, E.A.}, A generalized Wilcoxon test for comparing arbitrarily single-censored samples, \textit{Biometrika}, {\bf 52}, pages: 687-696, 1965.}
\item{ \textsc{Hjort, N.L.}, Nonparametric Bayes estimators based on beta processes in models for life history data, \textit{Annals of Statistics}, {\bf 18}, p'ags: 1259-1294, 1990.}
\item{\textsc{Kaplan, E.L.} y \textsc{Meier, P.} Nonparametric estimation from incomplete observations, \textit{Journal of the American Statistical Association} {\bf 53}. 282, p'ags: 457-481, 1958.}
\item{\textsc{Klein. J.P.} y \textsc{Moeschberger, M.L.} Survival analysis: techniques for censored and truncated data, Springer Science \& Business Media, 2003.}
\item{ \textsc{Nieto-Barajas, L.E.} \& \textsc{Walker, S.G.}, Markov beta and gamma processes for modelling hazard rates, \textit{Scandinavian Journal of Statistics} no. {\bf 29}, pages 413-424, 2002.}
\item{ \textsc{Nieto-Barajas, L.E.}, Discrete time Markov gamma processes and time dependent covariates in survival analysis, \textit{Bulletin of the International Statistical Institute 54th Session}, 2003.}
\item{ \textsc{Tsuang, M.T. and Woolson}, R.F. Mortality in Patients with Schizophrenia, Mania and Depression, \textit{British Journal of Psychiatry}, {\bf 130}, pages 162-166, 1977.}
\item{ \textsc{Walker, S.G.} y \textsc{Mallick, B.K.}, Hierarchical generalized linear models and frailty models with Bayesian nonparametric mixing, \textit{Journal of the Royal Statistical Society}, Series B {\bf 59}, p'ags: 845-860, 1997.}
\item{ \textsc{Woolson, R.F.}, Rank Tests and a One-Sample Log Rank Test for Comparing Observed Survival Data to a Standard Population, \textit{Biometrics}, {\bf 37}, pages: 687-696, 1981.}
\end{itemize}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/inst/doc/BGPHhazard.Rmd
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width=8,
fig.height=6,
fig.align = "center"
)
## ----setup--------------------------------------------------------------------
library(BGPhazard)
library(dplyr)
library(ggplot2)
## -----------------------------------------------------------------------------
KIDNEY
## -----------------------------------------------------------------------------
bsb_init <- BSBInit(
KIDNEY,
alpha = 0.001,
beta = 0.001,
c = 1000,
part_len = 10,
seed = 42
)
summary(bsb_init)
## -----------------------------------------------------------------------------
samples <- BSBHaz(
bsb_init,
iter = 100,
burn_in = 10,
gamma_d = 0.6,
theta_d = 0.3,
seed = 42
)
print(samples)
## -----------------------------------------------------------------------------
BSBSumm(samples, "omega1")
BSBSumm(samples, "lambda1")
## -----------------------------------------------------------------------------
BSBPlotSumm(samples, "lambda1")
BSBPlotSumm(samples, "lambda2")
## -----------------------------------------------------------------------------
BSBPlotSumm(samples, "s1")
BSBPlotSumm(samples, "s2")
## -----------------------------------------------------------------------------
BSBPlotDiag(samples, "omega1", type = "traceplot")
BSBPlotDiag(samples, "omega1", type = "ergodic_means")
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/inst/doc/bivariate-model-example.R
|
---
title: "Bivariate Model Example"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Bivariate Model Example}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width=8,
fig.height=6,
fig.align = "center"
)
```
```{r setup}
library(BGPhazard)
library(dplyr)
library(ggplot2)
```
We will use the built-in dataset `KIDNEY` to show how the bivariate model functions work. All the functions for the bivariate model start with the letters **BSB**, which stand for *Bayesian Semiparametric Bivariate*.
```{r}
KIDNEY
```
## Initial setup
First, we use the `BSBInit` function to create the necessary data structure that we have to feed the Gibbs Sampler. We can skim the data structure with the summary and print methods.
```{r}
bsb_init <- BSBInit(
KIDNEY,
alpha = 0.001,
beta = 0.001,
c = 1000,
part_len = 10,
seed = 42
)
summary(bsb_init)
```
Our data consists of 38 individuals with two failure times each. For the first failure time `t1` we have six censored observations, while for the second failure time we have twelve. The model will use `sex` as a predictor variable.
## Gibbs Sampler
To obtain the posterior samples, we use the function `BSBHaz`. We run 100 iterations with a burn-in period of 10. The number of simulations is low in order to reduce the complexity of building this vignette. In practice, you should see how many iterations the model needs to reach convergence.
```{r}
samples <- BSBHaz(
bsb_init,
iter = 100,
burn_in = 10,
gamma_d = 0.6,
theta_d = 0.3,
seed = 42
)
print(samples)
```
The `print` method shows that we only kept the last 90 iterations as posterior simulations.
## Summaries
### Tables
We can get posterior sample summaries with the function `get_summaries`. This function returns the posterior mean and a 0.95 probability interval for all the model parameters. Additionally, it returns the acceptance rate for variables sampled using the Metropolis-Hastings algorithm.
```{r}
BSBSumm(samples, "omega1")
BSBSumm(samples, "lambda1")
```
It is important to notice that `lambda1` and `lambda2` are the estimated hazard rates for the baseline hazards $h_0$. They do not include the effect of predictor variables. The same applies for the survival function estimates `s1` and `s2`.
### Plots
We can get two summary plots: estimated hazard rates and estimated survival functions.
**Baseline hazards**
```{r}
BSBPlotSumm(samples, "lambda1")
BSBPlotSumm(samples, "lambda2")
```
**Survival functions**
```{r}
BSBPlotSumm(samples, "s1")
BSBPlotSumm(samples, "s2")
```
You can also get diagnostic plots for the simulated variables. Choose the type of plot with the argument `type`.
```{r}
BSBPlotDiag(samples, "omega1", type = "traceplot")
BSBPlotDiag(samples, "omega1", type = "ergodic_means")
```
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/inst/doc/bivariate-model-example.Rmd
|
---
title: "Introduction to BGPhazard"
author: "José A. García-Bueno Emilio A. Morones Ishikawa and Luis E. Nieto-Barajas"
date: "`r Sys.Date()`"
output:
pdf_document:
toc: yes
header-includes:
- "\\usepackage{subcaption}"
vignette: >
%\VignetteIndexEntry{Introduction to BGPhazard}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
# Abstract
We present `BGPhazard`, an R package which computes hazard rates from a Bayesian nonparametric view. This is achieved by computing the posterior distribution of a gamma or a beta process through a Gibbs sampler. The purpose of this document is to guide the user on how to use the package rather than conducting a thorough analysis of the theoretical results. Nevertheless, section 2 briefly discuss the main results of the models proposed by Nieto-Barajas and Walker (2002) and by Nieto-Barajas (2003). These results will be helpful to understand the usage of the functions contained in the package. In section 3 we show some examples to illustrate the models.
# Introduction
Survival analysis focuses on studying data related to the occurrence time of an event. A typical function is the *survival function*, which in nonparametric statistics is estimated through the product limit estimator (Kaplan & Meier, 1958). This estimator is used as an approximation to the survival function. In some cases, its stair-step nature can return misleading estimators in a neighborhood of the steps: just before and after each we will have significant differences between estimators.
Lack of smoothness on nonparametric estimations give rise to methods whose outputs are smooth functions. One of many approaches is given by Nieto-Barajas and Walker (2002) for the survival function. The theory behind these models combines Bayesian Statistics and Survival Analysis to obtain hazard rate estimates. Bayesian Statistics let us introduce previous knowledge to a data set to improve estimations. Nieto-Barajas & Walker (2002) estimate the hazard function in segments by introducing dependence between each other so that information is shared and a smooth hazard rate is obtained. We review the three models contained in the package: beta model for discrete data, gamma model for continuous data and cox-gamma model for continuous data in a proportional hazards model.
A consequence of using a nonparametric Bayesian model with a dependence structure is that the resulting estimators are smoother than those obtained with a frequentist nonparametric model.
# Hazard rate estimation
In this brief review, we examine a generalization of the independent gamma process of Walker and Mallick (1997) --gamma model--; then, a generalization of the beta process introduced by Hjort (1990) --beta model-- which is often used to model discrete failure times, and lastly, the proportional risk model extension to the gamma process that copes with explanatory variables that remain constant during time (Nieto-Barajas, 2003).
We provide nonparametric prior distributions for the hazard rate based on the dependence processes previously defined and we obtain the posterior distributions through a Bayesian update.
## Markov beta and gamma prior processes
Let $\lambda_k$ represent the gamma process and let $\pi_k$ represent the beta process. Let $\theta_k$ represent both $\lambda_k$ and $\pi_k$. For interpretation of the Markov model, the main priority is to ensure
$$
E[\theta_{k+1}|\theta_k]=a+b \theta_k
$$
A latent process $\{u_k\}$ is introduced in order to obtain $\{\theta_k\}$ from
$$
\theta_1 \rightarrow u_1 \rightarrow \theta_2 \rightarrow u_2 \rightarrow \cdots
$$
### Gamma Process
Walker \& Mallick (1997) consider $\{\lambda_k\}$ as independent gamma variables, *i.e.*, $\lambda_k \sim Ga(\alpha_k,\beta_k)$ independent for $k=1,2,...$ Nieto-Barajas \& Walker (2002) consider a dependent process for $\{\lambda_k\}$. They start with $\lambda_1\sim Ga(\alpha_1,\beta_1)$ and take $u_k|\lambda_k \sim Po(c_k\lambda_k)$ and $\lambda_{k+1}|u_k \sim Ga(\alpha_{k+1} + u_k, \beta_{k+1} + c_k)$ and so on. These updates arise from the joint density
$$
f(u,\lambda)=Ga(\lambda|\alpha,\beta)Po(u|c\lambda)
$$
and so constitute a Gibbs type update. The difference is that they are changing the parameters $(\alpha,\beta,c)$ at each update so the chain is not stationary and marginally the $\{\lambda_k\}$ are not gamma.
However,
$$
\mathrm{E}[\lambda_{k+1}|\lambda_k] = \frac{\alpha_{k+1}+c_k\lambda_k}{\beta_{k+1}+c_k}
$$
If $c_k=0$, then $P(u_k=0)=1$ and hence the $\{\lambda_k\}$ are independent gamma and we have the prior process of Walker and Mallick (1997).
An important result is that if we take $\alpha_k=\alpha_1$ and $\beta_k=\beta_1$ to be constant for all $k$, then the process $\{u_k\}$ is a Poisson-gamma process with implied marginals $\lambda_k\sim Ga(\alpha_1,\beta_1)$. One can note that if $u_1$ is Poisson distributed and $\lambda_2|u_1$ is conditionally gamma, then $\lambda_2$ is never gamma.
### Beta Process
Nieto-Barajas and Walker (2002) start with $\pi_1 \sim Be(\alpha_1,\beta_1)$ and take $u_k|\pi_k \sim Bi(c_k,\pi_k)$, $\pi_{k+1}|u_k \sim Be(\alpha_{k+1} + u_k, \beta_{k+1} + c_k -u_k)$ and so on. These arise from a binomial-beta conjugate set-up, from the joint density
$$
f(u,\pi)=Be(\pi|\alpha,\beta)Bi(u|c,\pi)
$$
Clearly
$$
\mathrm{E}[\pi_{k+1}|\pi_k] = \frac{\alpha_{k+1}+c_k\pi_k}{\alpha_{k+1}+\beta_{k+1}+c_k}
$$
As with the gamma process, if we choose $c_k=0$, then $P(u_k=0)=1$ and so the $\{u_k\}$ become independent beta and we obtain the prior of Hjort (1990).
A significant result is that if we take $\alpha_k=\alpha_1$ and $\beta_k=\beta_1$ to be constant for all $k$, then the process $\{u_k\}$ is a binomial-beta process with marginals $u_k\sim BiBe(\alpha_1,\beta_1,c_k)$. Moreover, the process $\{\pi_k\}$ becomes strictly stationary and marginally $\pi_k\sim Be(\alpha_1,\beta_1)$.
## Prior to posterior analysis
We use the gamma and beta processes to define nonparametric prior distributions. In order to obtain $f(\theta|data)$, we introduce the latent variable $u$ and so constitute a Gibbs update for $f(\theta|u,data)$ and $f(u|\theta,data)$. As we generate a sample from $f(\theta,u|data)$, we automatically obtain a sample from $f(\theta|data)$. Therefore, given $u$ from $f(u|\theta,data)$, we can obtain a sample from $f(\theta,u| data)$ by simulating from $\theta \sim f(\theta|u,data)$.
It can be shown that
$$
f(\theta|u,data) \propto f(data|\theta) \times f(\theta|u)
$$
and
$$
f(u|\theta,data) \propto f(data|\theta,u) \times f(u|\theta) \propto f(u|\theta)
$$
because $u$ and $data$ are conditionally independent given $\theta$.
### Gamma Process
Let $T$ be a continuous random variable with cumulative distribution function $F(t)=P(T\leq t)$ on $[0,\infty)$. Consider the time axis partition $0=\tau_0<\tau_1<\tau_2<\cdots$, and let $\lambda_k$ be the hazard rate in the interval $(\tau_{k-1},\tau_k]$, then the hazard function is given by
$$
h (t) = \sum_{k=1}^{\infty} \lambda_kI_{(\tau_{k-1},\tau_k]}(t)
$$
So, the cumulative distribution and density functions, given $\{ \lambda_k \}$, are $F(t|\{\lambda_k\}) = 1 - e^{-H(t)}$, $f(t|\{\lambda_k\}) = h(t) e^{-H(t)}$, where $H(t) = \int_0^t h(s)ds$. We also have that
$$
f(\lambda_k|u_{k-1},u_k) = Ga(\alpha_{k} + u_{k-1} + u_k, \beta_{k} + c_{k-1} + c_k)
$$
Therefore, given a sample $T_1,T_2,...,T_n$ from $f(t\{\lambda_k\})$, it is straightforward to derive
$$
f(\lambda_k|u_{k-1},u_k,data) = Ga(\alpha_{k} + u_{k-1} + u_k + n_k, \beta_{k} + c_{k-1} + c_k + m_k),
$$
where $n_k$ = number of uncensored observations in $(\tau_{k-1},\tau_k]$, $m_k = \sum_i r_{ki}$, and
\[ r_{ki} = \left \{
\begin{array}{l l}
\tau_k - \tau_{k-1} & t_i > \tau_k \\
t_i - \tau_{k-1} & $t$ \in (\tau_{k-1},\tau_k] \\
0 & $otherwise$
\end{array} \right. \]
Additionally,
$$
P(u_k=u | \lambda_k,\lambda_{k+1},data) \propto f(u|\lambda) \propto \frac{[c_k(c_k+\beta_{k+1})\lambda_k\lambda_{k+1}]^u}{\Gamma(u+1)\Gamma(\alpha_{k+1}+u)}
$$
with $u=0,1,2,...$. Hence, with these full conditional distributions, a Gibbs sampler is straightforward to implement in order to obtain posterior summaries.
We can learn about the $\{c_k\}$ by assigning an independent exponential distribution with mean $\epsilon$ for each $c_k$, $k=1,..,K-1$. The Gibbs sampler can be extended to include the full conditional densities for each $c_k$. It is not difficult to derive that a $c_k$ from $f(c_k|u,\lambda,data)$ can be taken from the density
$$
f(c_k|u_k,\lambda_k,\lambda_{k+1}) \propto (\beta_{k+1}+c_k)^{\alpha_{k+1}+u_k}c_k^{u_k}\exp\left\{ -c_k \left (\lambda_{k+1}+\lambda_k+\frac{1}{\epsilon} \right) \right\}
$$
for $c_k >0$.
Dependence between $c_k'$s can be introduced through a hierarchical model via assigning a distribution to $\epsilon \sim Ga(a_0,b_0)$. The update would be given by:
$$
f(\epsilon|\{c_k\}) = Ga\left(\epsilon |a_0 + K, b_0 + \sum_{k=1}^K c_k \right)
$$
where $K$ is the number of intervals generated by the time axis partition. This hierarchical specification of the initial distribution for $c_k$ let us assign a better value for $c_k$.
Simulating from this distribution is not so straightforward. However, we construct a hybrid algorithm using a Metropolis-Hastings scheme taking advantage of the Markov chain generated by the Gibbs sampling.
### Beta process
Let $T$ be a discrete random variable taking values in the set $\{\tau_1,\tau_2,...\}$ with probability density function $f(\tau_k)=P(T=\tau_k)$. Let $\pi_k$ be the hazard rate at $\tau_k$, then the cumulative distribution and the density functions, given $\{\pi_k\}$, are $F(\tau_j|\{\pi_k\}) = 1 - \prod_{k=1}^j(1-\pi_k)$ and $f(\tau_j|\{\pi_k\}) = \pi_j \prod_{k=1}^{j-1}(1-\pi_k)$
The conditional distribution of $\pi_k$ is
$$
f(\pi_k|u_{k-1},u_k) = Be(\pi_k|\alpha_k+ u_{k-1} + u_k,\beta_k + c_{k-1} -u_{k-1}+ c_k-u_k),
$$
Thus, given a sample $T_1,T_2,...,T_n$ form $f(\cdot|\{\pi_k\})$ it is straightforward to derive
$$
f(\pi_k | u_{k-1}, u_k, data) =Be(\pi_k|\alpha_k+ u_{k-1} + u_k +n_k,\beta_k + c_{k-1} -u_{k-1}+ c_k-u_k+m_k),
$$
where $n_k$ = number of failures at $\tau_k$, $m_k = \sum_i r_{ki}$ and
\[ r_{ki} = \left \{
\begin{array}{l l}
1 & \quad t_i > \tau_k \\
0 & \quad $otherwise$
\end{array} \right. \]
Additionally,
$$
P(u_k=u | \pi_k,\pi_{k+1}, data) \propto \frac{\theta_k^u}{\Gamma(u+1)\Gamma(c_k-u+1)\Gamma(\alpha_{k+1}+u)\Gamma(\beta_{k+1}+c_k-u)}
$$
with $u=0,1,...,c_k$ and
$$
\theta_k = \frac{\pi_k\pi_{k+1}}{(1-\pi_k)(1-\pi_{k+1})}
$$
As before, obtaining posterior summaries via Gibbs sampler is simple.
We can learn about the $\{c_k\}$ via assigning each $c_k$ an independent Poisson distribution with mean $\epsilon$. The Gibbs sampler can be extended to include the full conditional densities for each $c_k$. A $c_k$ from $f(c_k|u,\pi,data)$ can be taken from the density
$$
f(c_k|u_k,\pi_k,\pi_{k+1}) \propto \frac{\Gamma(\alpha_{k+1}+\beta_{k+1}+c_k)}{\Gamma(\beta_{k+1}+c_k-u_k)\Gamma(c_k-u_k+1)} \left[\epsilon (1-\pi_{k+1})(1-\pi_k) \right]^{c_k}
$$
with $c_k \in \{u_k,u_k+1,u_k+2,...\}$.
Dependence between $c_k$'s can be introduced through a hierarchical model via assigning a distribution to $\epsilon \sim Ga(a_0,b_0)$. So the update would be given by:
$$
f(\epsilon|\{c_k\}) = Ga\left(\epsilon |a_0 + K, b_0 + \sum_{k=1}^K c_k \right)
$$
where $K$ is the number of discrete values in random variable $T$.
### Cox-gamma model
Differing from most of the previous Bayesian analysis of the proportional hazards model, Nieto-Barajas (2003) models the baseline hazard rate function with a stochastic process. Let $T_i$ be a non negative random variable which represents the failure time of $i$ and $Z_i=(Z_{i1},...,Z_{ip})$ the vector containing its $p$ explanatory variables. Therefore, the hazard function for individual $i$ is:
$$
\lambda_i(t)=\lambda_0(t)exp\{Z_i'\theta\}
$$
where $\lambda_0(t)$ is the baseline hazard rate and $\theta$ is regression's coefficient vector. The cumulative hazard function for individual $i$ becomes
$$
H_i(t)=\sum_{k=1}^{\infty}\lambda_k W_{i,k}(t,\lambda),
$$
where,
\[ W_{i,k}(t,\theta) = \left \{
\begin{array}{l l}
(\tau_k - \tau_{k-1})\exp\{Z_i'\theta \} & t_i > \tau_k \\
(t_i - \tau_{k-1})\exp\{Z_i'\theta \} & t_i \in (\tau_{k-1},\tau_k] \\
0 & otherwise
\end{array} \right. \]
Given a sample of possible right-censored observations where $T_1, ..., T_n$ are uncensored and $T_{n_u+1},...,T_n$ are right-censored, the conditional posterior distributions for the parameters of the semi-parametric model are:
\begin{itemize}
\item{$f(\lambda_k | u_{k-1}, u_k, data,\theta) = Ga(\lambda_k|\alpha_k+ u_{k-1} + u_k + n_k,\beta_k + c_{k-1} + c_k+m_k(\theta))$}
\item{$P(u_k=u | \lambda_k,\lambda_{k+1},data) \propto f(u|\lambda) \propto \frac{[c_k(c_k+\beta_{k+1})\lambda_k\lambda_{k+1}]^u}{\Gamma(u+1)\Gamma(\alpha_{k+1}+u)}$}
\item{$f(\theta|\lambda,data)\propto f(\theta) \exp\left\{{\sum_{i=1}^{n_u} \theta'Z_i} -\sum_{k=1}^\infty \lambda_k m_k(\theta) \right\} $}
\end{itemize}
where $n_k=\sum_{i=1}^{n_u}I_{(\tau_{k-1},\tau_k]}(t_i)$ and $m_k(\theta)=\sum_{i=1}^n W_{i,k}(t_i,\theta)$.
Similarly as we pointed out in the previous two cases, we incorporate a hyper prior process for the $\{c_k\}$ such that $c_k\sim Ga(1,\epsilon_k)$. The set of full conditional posterior distributions can then be extended to include
$$
f(c_k|u_k,\lambda_k,\lambda_{k+1}) \propto (\beta_{k+1}+c_k)^{\alpha_{k+1}+u_k}c_k^{u_k}\exp\left\{ -c_k \left (\lambda_{k+1}+\lambda_k+\frac{1}{\epsilon} \right) \right\}
$$
Dependence between $c_k$'s can be introduced through a hierarchical model via assigning a distribution to $\epsilon \sim Ga(a_0,b_0)$. So the update would be given by:
\begin{equation*}
f(\epsilon|\{c_k\}) = Ga\left(\epsilon |a_0 + K, b_0 + \sum_{k=1}^K c_k \right)
\end{equation*}
where $K$ is the number of intervals generated by the time axis partition. This hierarchical specification of the initial distribution for $c_k$ let us assign a better value for $c_k$.
## Gamma model examples
For this model, we will be using the control observations of the 6-MP data set (Freireich, E. J., et al., 1963) --data from a trial of 42 leukemia patients organised by pairs in which the first element of the pair is treated with a drug and the other is control--. For examples 1 to 4, we use a partition of unitary length intervals; for the last three examples --5, 6 \& 7--, we use uniformly-dense intervals. The `"time"` column is taken as the observed times vector --`times`-- and the `"cens"` column as the censoring status vector --`delta`--:
```
data(gehan)
times <- gehan$time[gehan$treat == "6-MP"]
delta <- gehan$cens[gehan$treat == "6-MP"]
```
Now that we have a time and censoring status vector, we can run several examples for this model. Default values are used for each function unless otherwise noted. Every example shows our estimate overlapped with the Nelson-Aalen / Kaplan-Meier estimator, so the user can compare them.
### Example 1. Independence case. Unitary length intervals}
We obtain with our model the Nelson-Aalen and Kaplan-Meier estimators by defining $c_k$ as a null vector --through fixing `type.c = 1`--. A unitary partitioned axis is obtained by fixing `type.t = 2`. In Figure \ref{fig:G1} we show that our estimators --under independence-- return the same results as the Nelson-Aalen and Kaplan-Meier estimators.
```
ExG1 <- GaMRes(times, delta, type.t = 2, K = 35, type.c = 1,
iterations = 3000)
GaPloth(ExG1, confint = FALSE)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{G11.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{G12.png}
\caption{Survival function}
\end{subfigure}
\caption{Gamma Example 1 - Independence case. Unitary length intervals}
\label{fig:G1}
\end{figure}
### Example 2. Introducing dependence through c. Unitary length intervals
The influence of $c$ --or `c.r`-- can be understood as a dependence parameter: the greater the value of each $c_k$, $k=0,1,2,...,K-1$, the higher the dependence between intervals $k$ and $k+1$. For this example, we assign a fixed value to vector $c$, so we use `type.c = 2`. Comparing with the previous example, we see that the estimates that were zero, now have a positive value --see Figures \ref{fig:G1} and \ref{fig:G2}--. Note how this model compares to Example 5 --see Figure \ref{fig:G5}--. The difference between those examples is how the partition is defined.
```
ExG2 <- GaMRes(times, delta, type.t = 2, K = 35, type.c = 2,
c.r = rep(50, 34), iterations = 3000)
GaPloth(ExG2)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{G21.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{G22.png}
\caption{Survival function}
\end{subfigure}
\caption{Gamma Example 2 - Introducing dependence through c ($c_k=50, \forall k$). Unitary length intervals}
\label{fig:G2}
\end{figure}
Additionally, we can get further detail on the Gibbs sampler with a diagnosis of the resulting Markov chain. We can run this diagnosis for each entry of $\lambda$, $u$, $c$ or $\epsilon$. In Figure \ref{fig:G2a} we show the diagnosis for $\lambda_6$ which includes the trace, the ergodic mean, the ACF function and the histogram for the generated chain.
```
GaPlotDiag(ExG2, variable = "lambda", pos = 6)
```
\begin{figure}
\centering
\includegraphics[width=\textwidth]{G23.png}
\caption{Gamma Example 2 - Diagnosis for $\lambda_6$}
\label{fig:G2a}
\end{figure}
### Example 3. Varying c through a distribution. Unitary length intervals
As we reviewed, we can learn about the $\{c_k\}$ via assigning an exponential distribution with mean $\epsilon$. The estimates using $\epsilon = 1$ --`type.c = 3`-- and unitary length intervals --`type.t = 2`-- are shown in Figure \ref{fig:G3}. We can compare the hazard function with the previous example, where $c$ was fixed, and observe that because of the variability given to $c$, the change on the estimated values between two contiguous intervals are greater in this example. The survival function echoes the shape of the Kaplan-Meier with higher decrease rates.
Comparing this example with Example 6 --Figure \ref{fig:G6}--, as with the previous example, the main difference is given by the partition of the time axis. This affects the estimates as we will note later.
```
ExG3 <- GaMRes(times, delta, type.t = 2, K = 35, type.c = 3, epsilon = 1,
iterations = 3000)
GaPloth(ExG3)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{G31.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{G32.png}
\caption{Survival function}
\end{subfigure}
\caption{Gamma Example 3 - Varying $c$ through a distribution $c_k\sim Ga(1,\epsilon = 1$). Unitary length intervals}
\label{fig:G3}
\end{figure}
### Example 4. Using a hierarchical model to estimate c. Unitary intervals
Previous example can be extended with a hierarchical model, assigning a distribution to $\epsilon ~ \sim Ga(a_0,b_0)$ with $a_0=b_0=0.01$. In order to set up the model we should set `type.c = 4`. The result displayed on Figure \ref{fig:G4} is a soft hazard function and a survival function that decreases faster than the Kaplan-Meier estimate.
```
ExG4 <- GaMRes(times, delta, type.t = 2, K = 35, type.c = 4,
iterations = 3000)
GaPloth(ExG4)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{G41.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{G42.png}
\caption{Survival function}
\end{subfigure}
\caption{Gamma Example 4 - Using a hierarchical model to estimate $c$. Unitary intervals}
\label{fig:G4}
\end{figure}
### Example 5. Introducing dependence through c. Equally dense intervals
This example illustrates the same concept as Example 2 --how $c$ introduces dependence--, but with a different partition of the time axis. To get this partition we set `type.t = 1`. Figure \ref{fig:G5} shows that the survival function is close to the Kaplan-Meier estimate. The fact that it gets closer to the K-M estimates does not makes it a better estimate, we only could say that this partition yields, in average, a smaller hazard rate than the Example 2 built with a unitary length partition.
```
ExG5 <- GaMRes(times, delta, type.t = 1, K = 8, type.c = 2,
c.r = rep(50, 7), iterations = 3000)
GaPloth(ExG5)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{G51.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{G52.png}
\caption{Survival function}
\end{subfigure}
\caption{Gamma Example 5 - Introducing dependence through $c$ ($c_k=50, \forall k$). Equally dense intervals}
\label{fig:G5}
\end{figure}
### Example 6. Varying c through a distribution. Equally dense intervals
We can compare this example on Figure \ref{fig:G6} with Example 3 --Figure \ref{fig:G3}--. We use less intervals, so the survival function is smoother. Our estimate decreases faster than the Kaplan-Meier estimate.
```
ExG6 <- GaMRes(times, delta, type.t = 1, K = 8, type.c = 3,
iterations=3000)
GaPloth(ExG6)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{G61.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{G62.png}
\caption{Survival function}
\end{subfigure}
\caption{Gamma Example 6 - Varying $c$ through a distribution $c_k\sim Ga(1,\epsilon = 1$). Equally dense intervals}
\label{fig:G6}
\end{figure}
### Example 7. Using a hierarchical model to estimate c. Equally dense intervals
The survival curve for this particular example results in a smoothed version of the Kaplan-Meier estimate (Figure \ref{fig:G7}). As with previous examples, it can be compared with the unitary partition example (see Example 4, Figure \ref{fig:G4}).
```
ExG7 <- GaMRes(times, delta, type.t = 1, K = 8, type.c = 4,
iterations = 3000)
GaPloth(ExG7)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{G71.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{G72.png}
\caption{Survival function}
\end{subfigure}
\caption{Gamma Example 7 - Using a hierarchical model to estimate $c$. Equally dense intervals}
\label{fig:G7}
\end{figure}
%\clearpage
## Beta model examples
For this model, we use survival data on 26 psychiatric inpatients admitted to the University of Iowa hospitals during the years 1935-1948. This sample is part of a larger study of psychiatric inpatients discussed by Tsuang and Woolson (1977). We take the `"time"` column as the observed times vector --`times`-- and the `"death"` column as the censoring status vector --`delta`--:
```
data(psych)
times <- psych$time
delta <- psych$death
```
### Example 1. Independence case
As with the Gamma Example, we obtain the Nelson-Aalen and Kaplan-Meier estimators by defining $c_k$ as a null vector through fixing `type.c = 1` --see Figure \ref{fig:B1}-- The conclusion does not change: the independence case of our model results on the N-A and K-M estimators.
```
ExB1 <- BeMRes(times, delta, type.c = 1, iterations = 3000)
BePloth(ExB1, confint = FALSE)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{B11.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{B12.png}
\caption{Survival function}
\end{subfigure}
\caption{Beta Example 1 - Independence case}
\label{fig:B1}
\end{figure}
### Example 2. Introducing dependence through c
The influence of $c$ --or `c.r`-- can be also understood as a dependence parameter: the greater the value of each $c_k$, $k=0,1,2,...,K-1$, the higher the dependence between intervals $k$ and $k+1$. In this example, we fix each $c$ entry at 100. As we are defining vector $c$ with fixed values, we should fix `type.c = 2`. We see on Figure \ref{fig:B2} that the hazard function estimate turns smoother than on the previous example. The steps on the survival function appear to be more uniform than on the Kaplan-Meier estimate.
```
ExB2 <- BeMRes(times, delta, type.c = 2, c.r = rep(100, 39),
iterations = 3000)
BePloth(ExB2)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{B21.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{B22.png}
\caption{Survival function}
\end{subfigure}
\caption{Beta Example 2 - Introducing dependence through $c$ ($c_k=100, \forall k)$}
\label{fig:B2}
\end{figure}
Additionally, we can get further detail on the Gibbs sampler with a diagnosis of the resulting Markov chain. We can run this diagnosis for each entry from $\pi$, $u$, $c$ or $\epsilon$. In Figure \ref{fig:B2a} we show the diagnosis for $\pi_{10}$ which includes plot of the trace, the ergodic mean, the ACF function and the histogram of the chain.
```
BePlotDiag(ExB2, variable = "Pi", pos = 6)
```
\begin{figure}
\centering
\includegraphics[width=\textwidth]{B23.png}
\caption{Beta Example 2 - Diagnosis for $\pi_{10}$}
\label{fig:B2a}
\end{figure}
### Example 3. Varying c through a distribution
As with the gamma model, we can learn about the $\{c_k\}$ via assigning an exponential distribution with mean $\epsilon$. The estimates using $\epsilon = 1$ and unitary length intervals are shown on Figure \ref{fig:B3}. Note that the confidence intervals have widen: it is a signal that we have introduced variability to the model --because of the distribution assigned to $c$--.
```
ExB3 <- BeMRes(times, delta, type.c = 3, epsilon = 1, iterations = 3000)
BePloth(ExB3)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{B31.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{B32.png}
\caption{Survival function}
\end{subfigure}
\caption{Beta Example 3 - Varying $c$ through a distribution $c_k\sim Ga(1,\epsilon = 1)$}
\label{fig:B3}
\end{figure}
### Example 4. Using a hierarchical model to estimate c
The previous example can be extended with a hierarchical model, assigning a distribution to $\epsilon ~ \sim Ga(a_0,b_0)$, with $a_0=b_0=0.01$. In order to set up the model we should set `type.c = 4`. The result displayed on Figure \ref{fig:B4} is a soft hazard function and a survival function that decreases faster than the Kaplan-Meier estimate.
```
ExB4 <- BeMRes(times, delta, type.c = 4, iterations = 3000)
BePloth(ExB4)
```
\begin{figure}
\centering
\begin{subfigure}[a]{\textwidth}\centering
\includegraphics[width=\textwidth]{B41.png}
\caption{Hazard rates}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{B42.png}
\caption{Survival function}
\end{subfigure}
\caption{Beta Example 4 - Using a hierarchical model to estimate $c$}
\label{fig:B4}
\end{figure}
\clearpage
## Cox-gamma model example
In this example, we simulate the data from a Weibull model, frequently used with continuous and non negative data. The advantage of setting a model is that we know in advance the results, so we can compare the estimates with the exact values from model. The Weibull model has the probability functions:
$$
h_0(t)=abt^{b-1}, S_0(t)=e^{-at^b}
$$
We construct the proportional hazard model as:
$$
h_i(t_i|Z_i)=h_0(t)e^{\theta 'Z_i}bt^{b-1}, S_i(t_i|Z_i)=exp\left\{-H_0(t_i)e^{\theta 'Z_i}\right\}
$$
Note that this Weibull proportional hazard model is also another Weibull model with parameters ($a_i^*=ae^{\theta 'Z_i},b$). Based on the previous densities and on fixed parameters $a,b,\theta_1$ and $\theta_2$ -these last two covariates are simulated from uniform distributions on the interval (0,1)-, we simulate $n$ observations. We gather the results of the model in a table --see Table \ref{cuad:weibull}--. $t_i|Z_i \sim Weibull(a_i^*, b)$ and $Z_i = (Z_{i1},Z_{i2})$ are the explanatory variables; $c_i$, the censoring time; $\delta_i$, censoring indicator, and $min\{t_i,c_i\}$ represents \emph{observed} values deeming censorship time.
\begin{table}
\centering
\begin{tabular}{|c| c| c| c| c| c| c|}
\hline
i & $t_i$ &$Z_{i1}\sim U(0,1)$ & $Z_{i2}\sim U(0,1)$ & $c_i\sim Exp(1)$ & $\delta_i=I(t_i>c_i)$ & $min\{t_i,c_i\}$ \\
\hline
1 &$t_1$ & $Z_{11}$ & $Z_{12}$ & $c_1$ & $\delta_1$ & $min\{t_1,c_1\}$ \\
2 &$t_2$ & $Z_{21}$ & $Z_{22}$ & $c_2$ & $\delta_2$ & $min\{t_2,c_2\}$ \\
$\vdots$ & $\vdots$ &$\vdots$ &$\vdots$ &$\vdots$ &$\vdots$ &$\vdots$ \\
$n$ &$t_n$ & $Z_{n1}$ & $Z_{n2}$ & $c_n$ & $\delta_n$ & $min\{t_n,c_n\}$ \\
\hline
\end{tabular}
\caption{Weibull simulation model}
\label{cuad:weibull}
\end{table}
We generate a size $n=100$ sample based on the simulation model with parameters $a=$0.1, $b=1,\theta=(1,1)$ y $Z_i\sim U(0,1)$, $i=1,2$. The result is a table with $n=100$ observations.
On the other hand, we use almost every default parameter from `CGaMRes` excluding `K = 10`, `iterations = 3000` and `thpar = 10`. Theoretically, our model should estimate a constant risk function at $a\times b=0.1$.
Below, we show the code for the Weibull model and the calls for the plots for the hazard and survival functions --command `CGaPloth(M)`--, the predictive distribution for an observation defined as the median of the data --`CGaPred(M)`--, and the plots for $\theta_1$ and $\theta_2$ --`PlotTheta(M)`--.
```
SampWeibull <- function(n, a = 10, b = 1, beta = c(1, 1)) {
M <- matrix(0, ncol = 7, nrow = n)
for(i in 1:n){
M[i, 1] <- i
M[i, 2] <- x1 <- runif(1)
M[i, 3] <- x2 <- runif(1)
M[i, 4] <- rweibull(1, shape = b,
scale = 1 / (a * exp(cbind(x1, x2) %*% beta)))
M[i, 5] <- rexp(1)
M[i, 6] <- M[i, 4] > M[i, 5]
M[i, 7] <- min(M[i, 4], M[i, 5])
}
colnames(M) <- c("i", "x_i1", "x_i2", "t_i", "c_i", "delta",
"min{c_i, d_i}")
return(M)
}
dat <- SampWeibull(100, 0.1, 1, c(1, 1))
dat <- cbind(dat[, c(4, 6)], dat[, c(2, 3)])
CG <- CGaMRes(dat, K = 10, iterations = 3000, thpar = 10)
CGaPloth(CG)
PlotTheta(CG)
CGaPred(CG)
```
Because of the way we built the model, we can compare against theoretical results on the Weibull model. In Figure \ref{fig:CG1} we see that the hazard rate estimate is practically the same from t = 8 to t = 37. The estimate for the survival functions gets very close to the real value. Plots and histograms for $\theta$ from Figure \ref{fig:CG2} show estimated values for the regression coefficients $(\theta_1,\theta_2)$ and they are consistently near to 1. Finally, the plots from Figure \ref{fig:CG3} show the hazard rate estimate over a equally dense partition for a future individual where its explanatory variable is equal to the median of the observations --$x_F$--. Note that the effect over the hazard function is given by the product of the baseline hazard function and $\exp\{x_F'\theta\}$.
\begin{figure}
\centering
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{CG1.png}
\caption{Hazard rate estimate}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{CG2.png}
\caption{Survival function estimate}
\end{subfigure}
\caption{Cox-gamma example}
\label{fig:CG1}
\end{figure}
\begin{figure}
\centering
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{CG3.png}
\caption{$\theta_1$ estimate}
\end{subfigure}
\begin{subfigure}[b]{\textwidth}\centering
\includegraphics[width=\textwidth]{CG4.png}
\caption{$\theta_2$ estimate}
\end{subfigure}
\caption{$\theta$ estimate on the Cox-gamma example}
\label{fig:CG2}
\end{figure}
\begin{figure}
\centering
\begin{subfigure}{\textwidth}
\includegraphics[width=\textwidth]{CG5.png}
\end{subfigure}
\caption{Hazard rate estimate for the median on the Cox-gamma example}
\label{fig:CG3}
\end{figure}
\pagebreak
\section{References}
\begin{itemize}
\item{ \textsc{Freireich. E.J., et al.}, Estimation of Exponential Survival Probabilities with Concomitant Information, \textit{Biometrics}, {\bf 21}, pages: 826-838, 1965.}
\item{ \textsc{Gehan, E.A.}, A generalized Wilcoxon test for comparing arbitrarily single-censored samples, \textit{Biometrika}, {\bf 52}, pages: 687-696, 1965.}
\item{ \textsc{Hjort, N.L.}, Nonparametric Bayes estimators based on beta processes in models for life history data, \textit{Annals of Statistics}, {\bf 18}, p'ags: 1259-1294, 1990.}
\item{\textsc{Kaplan, E.L.} y \textsc{Meier, P.} Nonparametric estimation from incomplete observations, \textit{Journal of the American Statistical Association} {\bf 53}. 282, p'ags: 457-481, 1958.}
\item{\textsc{Klein. J.P.} y \textsc{Moeschberger, M.L.} Survival analysis: techniques for censored and truncated data, Springer Science \& Business Media, 2003.}
\item{ \textsc{Nieto-Barajas, L.E.} \& \textsc{Walker, S.G.}, Markov beta and gamma processes for modelling hazard rates, \textit{Scandinavian Journal of Statistics} no. {\bf 29}, pages 413-424, 2002.}
\item{ \textsc{Nieto-Barajas, L.E.}, Discrete time Markov gamma processes and time dependent covariates in survival analysis, \textit{Bulletin of the International Statistical Institute 54th Session}, 2003.}
\item{ \textsc{Tsuang, M.T. and Woolson}, R.F. Mortality in Patients with Schizophrenia, Mania and Depression, \textit{British Journal of Psychiatry}, {\bf 130}, pages 162-166, 1977.}
\item{ \textsc{Walker, S.G.} y \textsc{Mallick, B.K.}, Hierarchical generalized linear models and frailty models with Bayesian nonparametric mixing, \textit{Journal of the Royal Statistical Society}, Series B {\bf 59}, p'ags: 845-860, 1997.}
\item{ \textsc{Woolson, R.F.}, Rank Tests and a One-Sample Log Rank Test for Comparing Observed Survival Data to a Standard Population, \textit{Biometrics}, {\bf 37}, pages: 687-696, 1981.}
\end{itemize}
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/vignettes/BGPHhazard.Rmd
|
---
title: "Bivariate Model Example"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Bivariate Model Example}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width=8,
fig.height=6,
fig.align = "center"
)
```
```{r setup}
library(BGPhazard)
library(dplyr)
library(ggplot2)
```
We will use the built-in dataset `KIDNEY` to show how the bivariate model functions work. All the functions for the bivariate model start with the letters **BSB**, which stand for *Bayesian Semiparametric Bivariate*.
```{r}
KIDNEY
```
## Initial setup
First, we use the `BSBInit` function to create the necessary data structure that we have to feed the Gibbs Sampler. We can skim the data structure with the summary and print methods.
```{r}
bsb_init <- BSBInit(
KIDNEY,
alpha = 0.001,
beta = 0.001,
c = 1000,
part_len = 10,
seed = 42
)
summary(bsb_init)
```
Our data consists of 38 individuals with two failure times each. For the first failure time `t1` we have six censored observations, while for the second failure time we have twelve. The model will use `sex` as a predictor variable.
## Gibbs Sampler
To obtain the posterior samples, we use the function `BSBHaz`. We run 100 iterations with a burn-in period of 10. The number of simulations is low in order to reduce the complexity of building this vignette. In practice, you should see how many iterations the model needs to reach convergence.
```{r}
samples <- BSBHaz(
bsb_init,
iter = 100,
burn_in = 10,
gamma_d = 0.6,
theta_d = 0.3,
seed = 42
)
print(samples)
```
The `print` method shows that we only kept the last 90 iterations as posterior simulations.
## Summaries
### Tables
We can get posterior sample summaries with the function `get_summaries`. This function returns the posterior mean and a 0.95 probability interval for all the model parameters. Additionally, it returns the acceptance rate for variables sampled using the Metropolis-Hastings algorithm.
```{r}
BSBSumm(samples, "omega1")
BSBSumm(samples, "lambda1")
```
It is important to notice that `lambda1` and `lambda2` are the estimated hazard rates for the baseline hazards $h_0$. They do not include the effect of predictor variables. The same applies for the survival function estimates `s1` and `s2`.
### Plots
We can get two summary plots: estimated hazard rates and estimated survival functions.
**Baseline hazards**
```{r}
BSBPlotSumm(samples, "lambda1")
BSBPlotSumm(samples, "lambda2")
```
**Survival functions**
```{r}
BSBPlotSumm(samples, "s1")
BSBPlotSumm(samples, "s2")
```
You can also get diagnostic plots for the simulated variables. Choose the type of plot with the argument `type`.
```{r}
BSBPlotDiag(samples, "omega1", type = "traceplot")
BSBPlotDiag(samples, "omega1", type = "ergodic_means")
```
|
/scratch/gouwar.j/cran-all/cranData/BGPhazard/vignettes/bivariate-model-example.Rmd
|
#' @name bgvar
#' @export
#' @title Estimation of Bayesian GVAR
#' @description Estimates a Bayesian GVAR with either the Stochastic Search Variable Selection (SSVS), the Minnesota prior (MN), the Normal-Gamma (NG), or the Horseshoe (HS) prior. All specifications can be estimated with stochastic volatility.
#' @usage bgvar(Data, W, plag=1, draws=5000, burnin=5000, prior="NG", SV=TRUE, hold.out=0, thin=1,
#' hyperpara=NULL, eigen=TRUE, Ex=NULL, trend=FALSE, expert=NULL, verbose=TRUE)
#' @param Data Either a \describe{
#' \item{\code{list object}}{ of length \code{N} that contains the data. Each element of the list refers to a country/entity. The number of columns (i.e., variables) in each country model can be different. The \code{T} rows (i.e., number of time observations), however, need to be the same for each country. Country and variable names are not allowed to contain a dot \code{.} (i.e., a dot) since this is our naming convention.}
#' \item{\code{matrix object}}{ of dimension \code{T} times \code{K}, with \code{K} denoting the sum of all endogenous variables of the system. The column names should consist of two parts, separated by a \code{.} (i.e., a dot). The first part should denote the country / entity name and the second part the name of the variable. Country and variable names are not allowed to contain a \code{.} (i.e., a dot).}
#' }
#' @param W An N times N weight matrix with 0 elements on the diagonal and row sums that sum up to unity or a list of weight matrices.
#' @param plag Number of lags used. Either a single value for domestic and weakly exogenous, or a vector of length two. Default set to \code{plag=1}.
#' @param draws Number of retained draws. Default set to \code{draws=5000}.
#' @param burnin Number of burn-ins. Default set to \code{burnin=5000}.
#' @param prior Either \code{SSVS} for the Stochastic Search Variable Selection prior, \code{MN} for the Minnesota prior, \code{NG} for the Normal-Gamma prior or \code{HS} for the Horseshoe prior. See Details below.
#' @param SV If set to \code{TRUE}, models are fitted with stochastic volatility using the \code{stochvol} package. Due to storage issues, not the whole history of the \code{T} variance covariance matrices are kept, only the median. Consequently, the \code{BGVAR} package shows only one set of impulse responses (with variance covariance matrix based on mean sample point volatilities) instead of \code{T} sets. Specify \code{SV=FALSE} to turn SV off.
#' @param hold.out Defines the hold-out sample. Default without hold-out sample, thus set to zero.
#' @param thin Is a thinning interval of the MCMC chain. As a rule of thumb, workspaces get large if draws/thin>500. Default set to \code{thin=1}.
#' @param Ex For including truly exogenous variables to the model. Either a \describe{
#' \item{\code{list object}}{ of maximum length \code{N} that contains the data. Each element of the list refers to a country/entity and has to match the country/entity names in \code{Data}. If no truly exogenous variables are added to the respective country/entity model, omit the entry. The \code{T} rows (i.e., number of time observations), however, need to be the same for each country. Country and variable names are not allowed to contain a dot \code{.} (i.e., a dot) since this is our naming convention.}
#' \item{\code{matrix object}}{ of dimension \code{T} times number of truly exogenous variables. The column names should consist of two parts, separated by a \code{.} (i.e., a dot). The first part should denote the country / entity name and the second part the name of the variable. Country and variable names are not allowed to contain a \code{.} (i.e., a dot).}
#' }
#' @param trend If set to \code{TRUE} a deterministic trend is added to the country models.
#' @param hyperpara Is a list object that defines the hyperparameters when the prior is set to either \code{MN}, \code{SSVS}, \code{NG}, or \code{HS}. \describe{
#' \item{\code{a_1}}{ is the prior hyperparameter for the inverted gamma prior (shape) (set a_1 = b_1 to a small value for the standard uninformative prior). Default is set to \code{a_1=0.01}.}
#' \item{\code{b_1}}{ is the prior hyperparameter for the inverted gamma prior (rate). Default is set to \code{b_1=0.01}.}
#' \item{\code{prmean}}{ Prior mean on the first lag of the autoregressive coefficients, standard value is \code{prmean=1} for non-stationary data. Prior mean for the remaining autoregressive coefficients automatically set to 0.}
#' \item{\code{bmu}}{ If \code{SV=TRUE}, this is the prior hyperparameter for the mean of the the mean of the log-volatilities. Default is \code{bmu=0}.}
#' \item{\code{Bmu}}{ If \code{SV=TRUE}, this is the prior hyperparameter for the variance of the mean of the log-volatilities. Default is \code{Bmu=100}.}
#' \item{\code{a0}}{ If \code{SV=TRUE}, this is the hyperparameter of the shape1 parameter for the Beta prior on the persistence parameter of the log-volatilities. Default is \code{a0=25}.}
#' \item{\code{b0}}{ If \code{SV=TRUE}, this is the hyperparameter of the shape2 parameter for the Beta prior on the persistence parameter of the log-volatilities. Default is \code{b0=1.5}.}
#' \item{\code{Bsigma}}{ If \code{SV=TRUE}, this is the hyperparameter for the Gamma prior on the variance of the log-volatilities. Default is set to \code{Bsigma=1}.}
#' \item{"MN"}{\describe{
#' \item{\code{shrink1}}{ Starting value of \code{shrink1}. Default set to 0.1.}
#' \item{\code{shrink2}}{ Starting value of \code{shrink2}. Default set to 0.2.}
#' \item{\code{shrink3}}{ Hyperparameter of \code{shrink3}. Default set to 100.}
#' \item{\code{shrink4}}{ Starting value of \code{shrink4}. Default set to 0.1.}
#' }}
#' \item{"SSVS"}{\describe{
#' \item{\code{tau0}}{ is the prior variance associated with the normal prior on the regression coefficients if a variable is NOT included (spike, tau0 should be close to zero).}
#' \item{\code{tau1}}{ is the prior variance associated with the normal prior on the regression coefficients if a variable is included (slab, tau1 should be large).}
#' \item{\code{kappa0}}{ is the prior variance associated with the normal prior on the covariances if a covariance equals zero (spike, kappa0 should be close to zero).}
#' \item{\code{kappa1}}{ is the prior variance associated with the normal prior on the covariances if a covariance is unequal to zero (slab, kappa1 should be large).}
#' \item{\code{p_i}}{ is the prior inclusion probability for each regression coefficient whether it is included in the model (default set to \code{p_i=0.5}).}
#' \item{\code{q_ij}}{ is the prior inclusion probability for each covariance whether it is included in the model (default set to \code{q_ij=0.5}).}
#' }}
#' \item{"NG":}{\describe{
#' \item{\code{e_lambda}}{ Prior hyperparameter for the Gamma prior on the lag-specific shrinkage components, standard value is \code{e_lambda=1.5}.}
#' \item{\code{d_lambda}}{ Prior hyperparameter for the Gamma prior on the lag-specific shrinkage components, standard value is \code{d_lambda=1}.}
#' \item{\code{tau_theta}}{ Parameter of the Normal-Gamma prior that governs the heaviness of the tails of the prior distribution. A value of \code{tau_theta=1} would lead to the Bayesian LASSO. Default value differs per entity and set to \code{tau_theta=1/log(M)}, where \code{M} is the number of endogenous variables per entity.}
#' \item{\code{sample_tau}}{ If set to \code{TRUE} \code{tau_theta} is sampled.}
#' }}
#' \item{"HS":}{ No additional hyperparameter needs to be elicited for the horseshoe prior.}
#' }
#' @param eigen Set to TRUE if you want to compute the largest eigenvalue of the companion matrix for each posterior draw. If the modulus of the eigenvalue is significantly larger than unity, the model is unstable. Unstable draws exceeding an eigenvalue of one are then excluded. If \code{eigen} is set to a numeric value, then this corresponds to the maximum eigenvalue. The default is set to 1.05 (which excludes all posterior draws for which the eigenvalue of the companion matrix was larger than 1.05 in modulus).
#' @param expert Expert settings, must be provided as list. Default is set to \code{NULL}.\describe{
#' \item{\code{variable.list}}{ In case \code{W} is a list of weight matrices, specify here which set of variables should be weighted by which weighting matrix. Default is \code{NULL}.}
#' \item{\code{OE.weights}}{ Default value is set to \code{NULL}. Can be used to provide information of how to handle additional country models (other entities). Additional country models can be used to endogenously determine variables that are (weakly) exogenous for the majority of the other country models. As examples, one could think of an additional oil price model (see also Mohaddes and Raissi 2019) or a model for the joint euro area monetary policy (see also Georgiadis 2015; Feldkircher, Gruber and Huber (2020)). The data for these additional country models has to be contained in \code{Data}. The number of additional country models is unlimited. Each list entry of \code{OE.weights} has to be named similar to the name of the additional country model contained in \code{Data}. Each slot of \code{OE.weight} has to contain the following information: \describe{
#' \item{\code{weights}}{ Vector of weights with names relating to the countries for which data should be aggregated. Can also relate to a subset of countries contained in the data.}
#' \item{\code{variables}}{ Vector of variables names that should be included in the additional country model. Variables that are not contained in the data slot of the extra country model are assumed to be weakly exogenous for the additional country model (aggregated with \code{weight}).}
#' \item{\code{exo}}{ Vector of variable names that should be fed into the other countries as (weakly) exogenous variables.}
#' }}
#' \item{\code{Wex.restr}}{ Character vector containing variables that should be excluded from being used as weakly exogenous from all unit models. An example that has often been used in the literature is to place these restrictions on nominal exchange rates. Default is \code{NULL} in which case all weakly exogenous variables are treated symmetrically.}
#' \item{\code{save.country.store}}{ If set to \code{TRUE} then function also returns the container of all draws of the individual country models. Significantly raises object size of output and default is thus set to \code{FALSE}.}
#' \item{\code{save.shrink.store}}{If set to \code{TRUE} the function also inspects posterior output of shrinkage coefficients. Default set to \code{FALSE}.}
#' \item{\code{save.vola.store}}{If set to \code{TRUE} the function also inspects posterior output of coefficients associated with the volatility process. Default set to \code{FALSE}.}
#' \item{\code{use_R}}{ Boolean whether estimation should fall back on \code{R} version, otherwise \code{Rcpp} version is used (default).}
#' \item{\code{applyfun}}{ In case \code{use_R=TRUE}, this allows for user-specific apply function, which has to have the same interface than \code{lapply}. If \code{cores=NULL} then \code{lapply} is used, if set to a numeric either \code{parallel::parLapply()} is used on Windows platforms and \code{parallel::mclapply()} on non-Windows platforms.}
#' \item{\code{cores}}{ Numeric specifying the number of cores which should be used, also \code{all} and \code{half} is possible. By default only one core is used.}
#' }
#' @param verbose If set to \code{FALSE} it suppresses printing messages to the console.
#' @details We provide three priors, the Minnesota labeled \code{MN}, the Stochastic Search Variable Selection prior labeled \code{SSVS} and the Normal-Gamma prior labeled \code{NG}. The first one has been implemented for global VARs in Feldkircher and Huber (2016) and the second one in Crespo Cuaresma et al. (2016), while the last one has been introduced to VAR modeling in Huber and Feldkircher (2019).
#' Please consult these references for more details on the specification. In the following we will briefly explain the difference between the three priors. The Minnesota prior pushes the variables in the country-specific VAR towards their unconditional stationary mean, or toward a situation where there is at least one unit root present. The SSVS prior is a form of a 'spike' and 'slab' prior. Variable selection is based on the probability of assigning the corresponding regression coefficient to the 'slab' component. If a regression coefficient is non informative, the 'spike' component pushes the associated posterior estimate more strongly towards zero. Otherwise, the slab component resembles a non-informative prior that has little impact on the posterior. Following George et. al. (2008) we set the prior variances for the normal distribution in a semi-automatic fashion. This implies scaling the mixture normal with the OLS standard errors of the coefficients for the full model. The NG prior is a form of global-local shrinkage prior. Hence, the local component shrinks each coefficient towards zero if there is no information for the associated dependent variable. Otherwise, the prior exerts a fat-tail structure such that deviations from zero are possible. The global component is present for each lag, thus capturing the idea that higher lags should be shrunk more aggressively towards zero.
#' @author Maximilian Boeck, Martin Feldkircher, Florian Huber
#' @return Returns a list of class \code{bgvar} with the following elements: \describe{
#' \item{\code{args}}{ is a list object that contains the arguments submitted to function \code{bgvar}.}
#' \item{\code{xglobal}}{ is a matrix object of dimension T times N (T # of observations, K # of variables in the system).}
#' \item{\code{gW}}{ is the global weight matrix. It is a list, with \code{N} entries, each of which contains the weight matrix of each country.}
#' \item{\code{country.res}}{ is a matrix that contains the posterior mean of the country models' residuals. The residuals have been obtained as a running mean and thus always relate to the full set of posterior draws. This implies that in case you have opted for trimming the draws the residuals do not correspond to the posterior draws of the "trimmed" coefficients. This is a storage problem, rather than a statistical problem. Experiments, however, show that residual properties (autocorrelation, cross-sectional correlation) of trimmed and reported residuals are close.}
#' \item{\code{stacked results}}{\describe{
#' \item{\code{S_large}}{ is a three-dimensional array (K times K times draws) of the (block-diagonal) posterior variance covariance matrix.}
#' \item{\code{F_large}}{ is a four-dimensional array (K times K times lags times draws) of the coefficients.}
#' \item{\code{Ginv_large}}{ is a three-dimensional array (K times K times draws) of the inverse of the G matrix.}
#' \item{\code{A_large}}{ is a three-dimensional array (K times K+1 times draws) of the posterior estimates for the K coefficients plus a global constant.}
#' \item{\code{F.eigen}}{ in case \code{eigen="TRUE"}, returns a vector that contains for each posterior draw the modulus of the largest eigenvalue of the companion matrix.}
#' \item{\code{trim.info}}{ is a character vector. Contains information regarding the nr. of stable draws out of total (thinned) draws. Experience shows that a maximum eigenvalue of \code{1.05} seems a reasonable choice when working with data in levels to generate stable impulse responses.}
#' }}
#' \item{\code{cc.results}}{ each entry of this list contains an list object of length \code{N}. Each entry in the list corresponds to one country model and contains one of the following posterior medians.
#' \describe{
#' \item{\code{coeffs}}{ contains in each entry the matrix with the posterior median of the estimated coefficients. Columns of the matrix correspond to an equation in the country model (i.e., the dependent variable) and rows to coefficient estimates of the explanatory variables.}
#' \item{\code{sig}}{ contains in each entry the variance-covariance matrix for each point in time. If \code{SV=FALSE} all entries along the time dimension are the same.}
#' \item{\code{theta}}{ contains in each entry the estimated prior variances for the coefficients. Explains how much shrinkage is induced on each coefficient depending on the prior setup.}
#' \item{\code{res}}{ contains in each entry a matrix of dimension (T-p times K) with the posterior median of the residuals of the cross-country models.}
#' \item{\code{shrink}}{ in case \code{prior="MN"} each entry contains the estimated shrinkage parameters.}
#' \item{\code{PIP}}{ in case \code{prior="SSVS"} returns a list object. The first slot in the list \code{PIP.cc}, is a list of length \code{N} and contains the posterior inclusion probabilities of the country models. The second slot in the list, named \code{PIP.avg} yields simple averages (over the country models where a particular variable has been included) of the posterior inclusion probabilities.}
#' \item{\code{lambda2}}{ in case \code{prior="NG"} each entry contains the estimated global shrinkage parameters. It is a matrix of dimension (p+1 times 3). Columns refer to the endogenous, weakly exogenous and shrinkage parameters for the covariances. Rows correspond to different degree of shrinkage per lag of the variables starting with the contemporaneous lag (only for weakly exogenous variables). In case of the covariances just one global shrinkage parameter is estimated.}
#' \item{\code{tau}}{ in case \code{prior="NG"} each entry contains the estimated parameter that governs the heaviness of the tails of the marginal prior distribution of the coefficients associated to endogenous variables. Structure is the same as \code{lambda2}.}
#' }}}
#' @examples
#' library(BGVAR)
#' data(testdata)
#' hyperpara <- list(tau0=0.1,tau1=3,kappa0=0.1,kappa1=7,a_1=0.01,b_1=0.01,p_i=0.5,q_ij=0.5)
#' model.ssvs <- bgvar(Data=testdata,W=W.test,plag=1,draws=100,burnin=100,
#' prior="SSVS",SV=FALSE,hyperpara=hyperpara,thin=1)
#' \dontrun{
#' library(BGVAR)
#' # replicate Feldkircher and Huber (2016) using trade based weights
#' data(eerData)
#' hyperpara <- list(tau0=0.1,tau1=3,kappa0=0.1,kappa1=7,a_1=0.01,b_1=0.01,p_i=0.5,q_ij=0.5)
#' model.ssvs <- bgvar(Data=eerData,W=W.trade0012,plag=1,draws=100,burnin=100,
#' prior="SSVS",SV=FALSE,hyperpara=hyperpara,thin=1)
#' print(model.ssvs)
#'
#' # use different weight matrices
#' variable.list<-list();variable.list$real<-c("y","Dp","tb");variable.list$fin<-c("stir","ltir","rer")
#' model.mn <- bgvar(Data=eerData, W=W.list[c("tradeW.0012","finW0711")], plag=1, draws=200,
#' burnin=100,prior="MN",SV=TRUE,thin=2,expert=list(variable.list=variable.list))
#' print(model.mn)
#'
#' data(monthlyData)
#' cN = names(EB.weights$weights)
#' Data = monthlyData[c(cN,"EB","OC")]
#' W = W[cN,cN]
#' OC.weights$weights = OC.weights$weights[cN]
#' OE.weights <- list(EB=EB.weights, OC=OC.weights)
#' hyperpara<-list(d_lambda = 0.01, e_lambda = 0.01,e_lambda=1.5,d_lambda=1,
#' prmean=0,a_1=0.01,b_1=0.01,tau_theta=.6,sample_tau=FALSE)
#' model.ssvs <- bgvar(Data=Data,W=W,plag=2,draws=100,burnin=100,prior="SSVS",
#' hyperpara=hyperpara,eigen=TRUE,SV=TRUE,expert=list(OE.weights=OE.weights))
#' print(model.ssvs)
#' }
#' @references
#' Boeck, M., Feldkircher, M. and F. Huber (2022) BGVAR: Bayesian Global Vector Autoregressions with Shrinkage Priors in R. \emph{Journal of Statistical Software}, Vol. 104(9), pp. 1-28.
#'
#' Crespo Cuaresma, J., Feldkircher, M. and F. Huber (2016) Forecasting with Global Vector Autoregressive Models: A Bayesian Approach. \emph{Journal of Applied Econometrics}, Vol. 31(7), pp. 1371-1391.
#'
#' Doan, T. R., Litterman, B. R. and C. A. Sims (1984) Forecasting and Conditional Projection Using Realistic Prior Distributions. \emph{Econometric Reviews}, Vol. 3, pp. 1-100.
#'
#' Dovern, J., Feldkircher, M. and F. Huber (2016) Does joint modelling of the world economy pay off? Evaluating multivariate forecasts from a Bayesian GVAR. \emph{Journal of Economic Dynamics and Control}, Vol. 70, pp. 86-100.
#'
#' Feldkircher, M. and F. Huber (2016) The International Transmission of US Shocks - Evidence from Bayesian Global Vector Autoregressions. \emph{European Economic Review}, Vol. 81, pp. 167-188.
#'
#' Feldkircher, M. Gruber, T. and F. Huber (2020) International effects of a compression of euro area yield curves. \emph{Journal of Banking & Finance}, Vol. 113, pp. 11-14.
#'
#' George, E.I., Sun, D. and S. Ni (2008) Bayesian stochastic search for var model restrictions. \emph{Journal of Econometrics}, Vol. 142, pp. 553-580.
#'
#' Georgiadis, G. (2015) Examining asymmetries in the transmission of monetary policy in the euro area: Evidence from a mixed cross-section global VAR model. \emph{European Economic Review}, Vol. 75, pp. 195-215.
#'
#' Huber, F. and M. Feldkircher (2016) Adaptive Shrinkage in Bayesian Vector Autoregressive Models. \emph{Journal of Business and Economic Statistics}, Vol. 37(1), pp. 27-39.
#'
#' Mohaddes, K. and M. Raissi (2018). Compilation, Revision and Updating of the Global VAR (GVAR) Database, 1979Q2-2016Q4. University of Cambridge: Faculty of Economics (mimeo).
#'
#' Mohaddes, K. and M. Raissi (2019) The US oil supply revolution and the global economy. \emph{Empirical Economics}, Vol. 57, pp. 515-546.
#'
#' Pesaran, M.H., Schuermann T. and S.M. Weiner (2004) Modeling Regional Interdependencies Using a Global Error-Correcting Macroeconometric Model. \emph{Journal of Business and Economic Statistics}, Vol. 22, pp. 129-162.
#'
#' Sims, C. A. (1992) Bayesian Inference for Multivariate Time Series with Trend. \emph{Mimeo}, presented at the American statistical Association meeting.
#'
#' Sims, C.A. and T. Zha (1998) Bayesian Methods for Dynamic Multivariate Models. \emph{International Economic Review}, Vol. 39, pp. 949-968.
#' @importFrom abind adrop
#' @importFrom GIGrvg rgig
#' @importFrom Rcpp evalCpp
#' @importFrom stats is.ts median time ts
#' @importFrom parallel parLapply mclapply
#' @importFrom xts is.xts
#' @importFrom zoo coredata
bgvar<-function(Data,W,plag=1,draws=5000,burnin=5000,prior="NG",SV=TRUE,hold.out=0,thin=1,hyperpara=NULL,
eigen=TRUE,Ex=NULL,trend=FALSE,expert=NULL,verbose=TRUE){
Sys.setenv(LANGUAGE='en')
if(verbose) cat("\014")
start.bgvar <- Sys.time()
#--------------------------------- checks ------------------------------------------------------#
if(!is.list(Data) & !is.matrix(Data) & is.data.frame(Data)){
stop("Please provide the argument 'Data' either as 'list' or as 'matrix' object.")
}
if(!is.list(W) & !is.matrix(W)){
stop("Please provide the argument 'W' either as 'list' or as 'matrix' object.")
}
if(!is.null(Ex)){
if(!is.list(Ex) & !is.matrix(Ex)){
stop("Please provide the argument 'Ex' either as 'list' or as 'matrix' object.")
}
}
if(!is.numeric(plag)){
stop("Please specify number of lags as numeric.")
}
if(any(is.na(plag))){
stop("Please specify number of lags.")
}
if(!length(plag)%in%c(1,2)){
stop("Please specify number of lags accordingly. One lag length parameter for the whole model.")
}
if(!is.numeric(draws) | !is.numeric(burnin)){
stop("Please specify number of draws and burnin as numeric.")
}
if(length(draws)>1 || draws<0 || length(burnin)>1 || burnin<0){
stop("Please specify number of draws and burnin accordingly. One draws and burnin parameter for the whole model.")
}
if(!prior%in%c("MN","SSVS","NG","HS")){
stop("Please selecte one of the following prior options: MN, SSVS, NG, or HS.")
}
#-------------------------- expert settings -------------------------------------------------------#
# expert settings
expert.list <- list(variable.list=NULL, OE.weights=NULL, Wex.restr=NULL, save.country.store=FALSE, save.shrink.store = FALSE, save.vola.store = FALSE, use_R=FALSE, applyfun=NULL, cores=NULL)
if(!is.null(expert)){
if(!(is.null(expert$cores) || is.numeric(expert$cores))){
stop("Please provide the expert argument 'cores' in appropriate form. Please recheck.")
}
for(n in names(expert))
expert.list[[n]] <- expert[[n]]
}
variable.list <- expert.list$variable.list
OE.weights <- expert.list$OE.weights
Wex.restr <- expert.list$Wex.restr
use_R <- expert.list$use_R
applyfun <- expert.list$applyfun
cores <- expert.list$cores
save.country.store <- expert.list$save.country.store
save.shrink.store <- expert.list$save.shrink.store
save.vola.store <- expert.list$save.vola.store
# construct args
args <- .construct.arglist(bgvar)
# specify lags
if(length(plag)==1) lags <- rep(plag,2) else lags <- plag
args$lags <- lags
#-------------------------- construct arglist ----------------------------------------------------#
printtext <- paste0("\n\nStart estimation of Bayesian Global Vector Autoregression.\n\n",
paste("Prior: ",ifelse(prior=="MN","Minnesota prior",ifelse(prior=="SSVS","Stochastic Search Variable Selection prior",ifelse(prior=="NG","Normal-Gamma prior","Horseshoe prior"))),".\n",sep=""),
paste("Lag order: ",lags[1]," (endo.), ",lags[2]," (w. exog.)","\n",sep=""),
paste("Stochastic volatility: ", ifelse(SV,"enabled","disabled"),".\n",sep=""),
paste("Number of cores used: ", ifelse(is.null(cores),1,cores),".\n",sep=""))
if(verbose) cat(printtext)
#------------------------------ user checks ---------------------------------------------------#
# check Data
if(is.matrix(Data)){
if(any(is.na(Data))){
stop("The data you have submitted contains NAs. Please check the data.")
}
if(!all(grepl("\\.",colnames(Data)))){
stop("Please separate country- and variable names with a point.")
}
cN <- unique(unlist(lapply(strsplit(colnames(Data),".",fixed=TRUE),function(l) l[1])))
N <- length(cN)
if(!all(nchar(cN)==2)){
stop("Please provide entity names with exactly two characters.")
}
temp <- list()
for(cc in 1:N){
temp[[cN[cc]]] <- Data[,grepl(cN[cc],colnames(Data))]
colnames(temp[[cN[cc]]]) <- unlist(lapply(strsplit(colnames(temp[[cN[cc]]]),".",fixed=TRUE),function(l)l[2]))
}
Data <- temp
if(any(unlist(lapply(Data,ncol))==1)){
stop("Please provide for each country more than one variable.")
}
}else if(is.list(Data)){
if(any(unlist(lapply(Data,is.na)))){
stop("The data you have submitted contains NAs. Please check the data.")
}
N <- length(Data)
# check names
if(is.null(names(Data))){
names(Data)<-paste(c,1:length(Data),sep="")
}
cN <- names(Data)
if(!all(nchar(cN)==2)){
stop("Please provide entity names with exactly two characters.")
}
isTS <- unlist(lapply(Data,function(l)is.ts(l)))
isXTS <- unlist(lapply(Data,function(l)is.xts(l)))
if(!all(isTS) & any(isTS)){
stop("Please provide all list elements as time-series objects.")
}
if(!all(isXTS) & any(isXTS)){
stop("Please provide all list elements as xts objects.")
}
isTS <- all(isTS); isXTS <- all(isXTS)
Traw <- unique(unlist(lapply(Data,function(l)nrow(l))))
if(length(Traw)>1){
stop("Please provide same sample size for all countries.")
}
Data_new <- list()
timeindex <- seq(1, Traw)
if(isTS || isXTS) timeindex <- as.character(time(Data[[1]]))
for(cc in 1:N){
if(isTS || isXTS){
Data_new[[cc]] <- coredata(Data[[cc]])
}else{
Data_new[[cc]] <- Data[[cc]]
}
}
names(Data_new) <- cN
Data <- Data_new
args$time <- timeindex
args$Traw <- length(timeindex)
}
args$Data <- Data
# check Weight matrix if matrix
if(is.matrix(W)){
W.aux<-list();W.aux$W<-W;W<-W.aux;rm(W.aux) # convert W into a list
}
if(any(unlist(lapply(W,is.na)))){
stop("The weight matrix you have provided contains NAs. Please check the weight matrix.")
}
for(ww in 1:length(W)){
if(is.null(OE.weights)){
if(!nrow(W[[ww]])==N){
stop("Data and W matrix not of the same dimension.")
}
if(!all(cN%in%rownames(W[[ww]]))){
stop("Please provide the same country names for the Data and W objects.")
}
# make sure that W and Data are in the same order
W[[ww]]<-W[[ww]][cN,cN]
}else{
if(!(nrow(W[[ww]])+length(OE.weights))==N){
stop("Data and W matrix plus additional weights for other entities are not of the same dimension.")
}
if(!all(cN%in%c(rownames(W[[ww]]),names(OE.weights)))){
stop("Please provide the same country names for the Data and W matrix plus additional weights for other entities.")
}
W[[ww]] <- W[[ww]][cN[!cN%in%names(OE.weights)],cN[!cN%in%names(OE.weights)]]
}
}
args$W <- W
# check truly exogenous variables
if(!is.null(Ex)){
if(is.matrix(Ex)){
if(any(is.na(Ex))){
stop("The data for exogenous variables you have submitted contains NAs. Please check the data.")
}
if(nrow(Ex)!=args$Traw){
stop("Provided data and truly exogenous data not equally long. Please check.")
}
if(!all(grepl("\\.",colnames(Ex)))){
stop("Please separate country- and variable names with a point.")
}
ExcN <- unique(unlist(lapply(strsplit(colnames(Ex),".",fixed=TRUE),function(l) l[1])))
if(!all(ExcN%in%cN)){
stop("Provided country names in data and truly exogenous data not equal. Please check.")
}
ExN <- length(ExcN)
if(!all(nchar(ExcN)>1)){
stop("Please provide entity names with minimal two characters.")
}
temp <- list()
for(cc in 1:ExN){
temp[[cc]] <- Ex[,grepl(ExcN[cc],colnames(Ex)),drop=FALSE]
colnames(temp[[cc]]) <- unlist(lapply(strsplit(colnames(temp[[cc]]),".",fixed=TRUE),function(l)l[2]))
}
names(temp)<-ExcN
Ex <- temp
}else if(is.list(Ex)){
# check for NAs
if(any(unlist(lapply(Ex,is.na)))){
stop("The data for exogenous variables you have submitted contains NAs. Please check the data.")
}
ExN <- length(Ex)
# check names
if(is.null(names(Ex))){
names(Ex)<-paste(c,1:length(Ex),sep="")
}
ExcN <- names(Ex)
if(!all(nchar(ExcN)>1)){
stop("Please provide entity names with minimal two characters..")
}
if(!all(ExcN%in%cN)){
stop("Provided country names in data and truly exogenous data not equal. Please check.")
}
isTS <- unlist(lapply(Ex,function(l)is.ts(l)))
isXTS <- unlist(lapply(Ex,function(l)is.xts(l)))
if(!all(isTS) & any(isTS)){
stop("Please provide all list elements as time-series objects.")
}
if(!all(isXTS) & any(isXTS)){
stop("Please provide all list elements as xts objects.")
}
isTS <- all(isTS); isXTS <- all(isXTS)
ExTraw <- unique(unlist(lapply(Ex,function(l)nrow(l))))
if(length(ExTraw)>1){
stop("Please provide same sample size for all countries.")
}
if(ExTraw!=args$Traw){
stop("Provided data and truly exogenous data not equally long. Please check.")
}
}
}
args$Ex <- Ex
# check thinning factor
if(thin<1){
printtext <- paste0(printtext, paste("Thinning factor of ",thin," not possible. Adjusted to ",round(1/thin,2),".\n",sep=""))
if(verbose) cat(paste("Thinning factor of ",thin," not possible. Adjusted to ",round(1/thin,2),".\n",sep=""))
thin <- round(1/thin,2)
}
if(draws%%thin!=0){
thin_mess <- paste("Thinning factor of ",thin," no divisor of ",draws," (number of draws to save for posterior analysis).\n",sep="")
div <- .divisors(draws,thin)
thin <- min(div[which(abs(div-thin)==min(abs(div-thin)))])
thin_mess <- paste(thin_mess,"New thinning factor: ", thin,". This means every", ifelse(thin==1,"",ifelse(thin==2,paste(" ",thin,"nd ",sep=""), ifelse(thin==3,paste(" ",thin,"rd ",sep=""),paste(" ",thin,"th ",sep="")))), "draw is saved.\n",sep="")
}else{
thin_mess <- paste("Thinning factor: ", thin,". This means every ",ifelse(thin==1,"",ifelse(thin==2,paste(thin,"nd ",sep=""),ifelse(thin==3,paste(thin,"rd ",sep=""),paste(thin,"th ",sep="")))),"draw is saved.\n",sep="")
}
printtext <- paste0(printtext,thin_mess)
if(verbose) cat(thin_mess)
args$thin <- thin
args$thindraws <- draws/thin
# set default
if(verbose) cat("Hyperparameter setup: \n")
default_hyperpara <- list(a_1=3,b_1=0.3, prmean=0,# Gamma hyperparameter SIGMA (homoskedastic case) and mean
Bsigma=1, a0=25, b0=1.5, bmu=0, Bmu=100^2, # SV hyper parameter
shrink1=0.1,shrink2=0.2,shrink3=10^2,shrink4=0.1, # MN
tau0=.1,tau1=3,kappa0=0.1,kappa1=7,p_i=0.5,q_ij=0.5, # SSVS
d_lambda=0.01,e_lambda=0.01,tau_theta=0.7,sample_tau=TRUE,tau_log=TRUE) # NG
paras <- c("a_1","b_1","prmean","Bsigma_sv","a0","b0","bmu","Bmu","shrink1","shrink2","shrink3",
"shrink4","tau0","tau1","kappa0","kappa1","p_i","q_ij","d_lambda","e_lambda","tau_theta","sample_tau","tau_log")
if(is.null(hyperpara)){
printtext <- paste0(printtext, "\t No hyperparameters are chosen, default setting applied.\n")
if(verbose) cat("\t No hyperparameters are chosen, default setting applied.\n")
}
if(!is.null(hyperpara)){
for(para in names(hyperpara)){
if(!para%in%paras){
warning(paste0(para," no valid hyperparameter. Please check.\n"))
next
}
default_hyperpara[para] <- hyperpara[para]
if(para=="tau_theta") default_hyperpara["tau_log"] <- FALSE
}
printtext <- paste0(printtext, "Default values for chosen hyperparamters overwritten.\n")
if(verbose) cat("Default values for chosen hyperparamters overwritten.\n")
}
# store setting
setting_store <- list(shrink_MN = FALSE, shrink_SSVS = FALSE, shrink_NG = FALSE, shrink_HS = FALSE,
vola_pars = FALSE)
if(expert.list$save.shrink.store)
setting_store[[paste0("shrink_",prior)]] <- TRUE
if(expert.list$save.vola.store)
setting_store[["vola_pars"]] <- TRUE
#------------------------------ get weights -----------------------------------------------------------------#
xglobal = .getweights(Data=Data,W=W,OE.weights=OE.weights,Wex.restr=Wex.restr,variable.list=variable.list)
exo.countries = xglobal$exo.countries
exo = xglobal$exo
endo = xglobal$endo
gW = xglobal$gW
xglobal = xglobal$bigx
#---------------------------------hold out sample------------------------------------------------------------#
args$yfull <- xglobal
xglobal <- xglobal[1:(nrow(xglobal)-hold.out),,drop=FALSE]
if(!is.null(Ex)){
Ex <- lapply(Ex,function(l)l[1:(nrow(l)-hold.out),,drop=FALSE])
}
args$time <- args$time[1:(length(args$time)-hold.out)]
#------------------------------ prepare applyfun --------------------------------------------------------#
if(is.null(applyfun)) {
applyfun <- if(is.null(cores)) {
lapply
} else {
if(.Platform$OS.type == "windows") {
cl_cores <- parallel::makeCluster(cores)
on.exit(parallel::stopCluster(cl_cores))
function(X, FUN, ...) parallel::parLapply(cl = cl_cores, X, FUN, ...)
} else {
function(X, FUN, ...) parallel::mclapply(X, FUN, ..., mc.cores =
cores)
}
}
}
if(is.null(cores)) {cores <- 1}
#------------------------------ estimate BVAR ---------------------------------------------------------------#
# define constant
printtext <- paste0(printtext,"\nEstimation of country models starts...")
if(verbose) cat("\nEstimation of country models starts...")
# Rcpp::sourceCpp("./src/BVAR_linear.cpp")
start.estim <- Sys.time()
globalpost <- applyfun(1:N, function(cc){
if(verbose) cat("\f",printtext,"\nModel: ",cc,"/",N," done.")
.BVAR_linear_wrapper(cc=cc,cN=cN,xglobal=xglobal,gW=gW,prior=prior,lags=lags,draws=draws,burnin=burnin,trend=trend,SV=SV,thin=thin,default_hyperpara=default_hyperpara,Ex=Ex,use_R=use_R,setting_store=setting_store)
})
cat("\014")
cat(printtext)
names(globalpost) <- cN
end.estim <- Sys.time()
diff.estim <- difftime(end.estim,start.estim,units="mins")
mins <- round(diff.estim,0); secs <- round((diff.estim-floor(diff.estim))*60,0)
if(verbose) cat(paste("\nEstimation done and took ",mins," ",ifelse(mins==1,"min","mins")," ",secs, " ",ifelse(secs==1,"second.","seconds.\n"),sep=""))
#--------------------------- stacking part for global model -----------------------------------------------------#
if(is.logical(eigen)){
if(eigen){trim<-1.05}else{trim<-NULL}
}else{
trim<-eigen;eigen<-TRUE
}
if(verbose) cat("Stacking of global model starts... \n")
# insert stacking function here
# Rcpp::sourceCpp("./src/gvar_stacking.cpp")
stacked.results <- .gvar.stacking.wrapper(xglobal=xglobal,plag=max(lags),globalpost=globalpost,draws=draws,thin=thin,trend=trend,eigen=eigen,trim=trim,verbose=verbose)
if(!is.null(trim)) {args$thindraws <- length(stacked.results$F.eigen)}
if(verbose) cat("\nStacking finished.\n")
if(verbose) cat(paste0("Computation of BGVAR yields ",args$thindraws," (",round(args$thindraws/(draws/thin),2)*100,"%) draws (",
ifelse(eigen,"active","inactive")," trimming)."))
#--------------------------- prepare country models -------------------------------------------------------------#
# country model residuals
country.coeffs <- lapply(globalpost,function(l) l$post$A_post)
country.sig <- lapply(globalpost,function(l) l$post$SIGMA_post)
country.theta <- lapply(globalpost,function(l) l$post$theta_post)
country.res <- lapply(globalpost,function(l) l$post$res_post)
varNames <- lapply(gW,function(x) dimnames(x)[[1]])
for(cc in 1:N){
varx = varNames[[cc]]
endo = grep(cN[cc],varx)
exx = which(varx%in%names(exo))
wex = seq(1,length(varx))[-c(endo,exx)]
if(length(wex)>0 && length(exx)>0){
wex0 = c(paste(varx[wex],"*",sep=""),paste(varx[exx],"**",sep=""))
}else if(length(wex)>0 && length(exx)==0){
wex0 = c(paste(varx[wex],"*",sep=""))
}else if(length(wex)==0 && length(exx)==0){
wex0 = NULL
}
wexL = endoL = c()
for(pp in 1:lags[1]){
endoL = c(endoL,paste(varx[endo],"_lag",pp,sep=""))
}
for(pp in 1:lags[2]){
if(length(wex)>0 && length(exx)>0){
wexL = c(wexL, paste(varx[wex],"*_lag",pp,sep=""), paste(varx[exx],"**_lag",pp,sep=""))
}else if(length(wex)>0 && length(exx)==0){
wexL = c(wexL, paste(varx[wex],"*_lag",pp,sep=""))
}else if(length(wex)==0 && length(exx)==0){
wexL = NULL
}
}
if(cN[cc]%in%names(Ex)){
tex <- colnames(Ex[[cN[cc]]])
}else{tex<-NULL}
names <- c(endoL,wex0,wexL,tex,"cons")
if(trend) names <- c(names,"trend")
rownames(country.coeffs[[cc]]) = names
dimnames(country.sig[[cc]])[[2]] = dimnames(country.sig[[cc]])[[3]] = varx[endo]
}
cc.results <- list(coeffs=country.coeffs,sig=country.sig,theta=country.theta,res=country.res)
if(prior=="MN"){
if(setting_store$shrink_MN){
cc.results$shrink <- lapply(globalpost,function(l) l$post$shrink)
}else{
cc.results$shrink <- NULL
}
}else if(prior=="SSVS"){
if(setting_store$shrink_SSVS){
country.shrink <- lapply(globalpost,function(l) l$post$PIP)
for(cc in 1:N) rownames(country.shrink[[cc]]) <- rownames(country.coeffs[[cc]])
cc.results$PIP <- .avg.shrink(country.shrink,prior="SSVS")
}else{
cc.results$PIP <- NULL
}
}else if(prior=="NG"){
if(setting_store$shrink_NG){
cc.results$lambda2 <- lapply(globalpost,function(l) l$post$lambda2_post)
cc.results$tau <- lapply(globalpost,function(l) l$post$tau_post)
}else{
cc.results$lambda2 <- cc.results$tau <- NULL
}
}
if(save.country.store){
cc.results$store <- lapply(globalpost,function(l) l$store)
}
#---------------------- return output ---------------------------------------------------------------------------#
out <- structure(list("args"=args,
"xglobal"=xglobal,
"gW"=gW,
"stacked.results"=stacked.results,
"cc.results"=cc.results), class = "bgvar")
end.bgvar <- Sys.time()
diff.bgvar <- difftime(end.bgvar,start.bgvar,units="mins")
mins.bgvar <- round(diff.bgvar,0); secs.bgvar <- round((diff.bgvar-floor(diff.bgvar))*60,0)
if(verbose) cat(paste("\n Needed time for estimation of bgvar: ",mins.bgvar," ",ifelse(mins.bgvar==1,"min","mins")," ",secs.bgvar, " ",ifelse(secs.bgvar==1,"second.","seconds.\n"),sep=""))
return(out)
}
#' @method print bgvar
#' @export
#' @importFrom utils object.size
print.bgvar<-function(x, ...){
cat("---------------------------------------------------------------------------")
cat("\n")
cat("Model Info:")
cat("\n")
cat(paste("Prior: ",ifelse(x$args$prior=="MN","Minnesota prior (MN)",
ifelse(x$args$prior=="SSVS","Stochastic Search Variable Selection prior (SSVS)",
"Normal-Gamma prior (NG)")),sep=""))
cat("\n")
cat(paste("Number of lags for endogenous variables: ",x$args$lags[1],sep=""))
cat("\n")
cat(paste("Number of lags for weakly exogenous variables: ",x$args$lags[2],sep=""))
cat("\n")
cat(paste("Number of posterior draws: ",x$args$draws,"/",x$args$thin,"=",floor(x$args$draws/x$args$thin),sep=""))
cat("\n")
cat(paste("Size of GVAR object: ",format(object.size(x),units="MB"),sep=""))
cat("\n")
cat(x$stacked.results$trim.info)
cat("\n")
cat("---------------------------------------------------------------------------")
cat("\n")
cat("Model specification:")
cat("\n")
endo <- lapply(x$cc.results$coeffs,colnames)
exo <- lapply(x$cc.results$coeffs,rownames)
cN <- names(endo)
vars <- list()
for(i in 1:length(endo)){
vars[[i]] <- c(gsub(paste(cN[i],".",sep=""),"",endo[[i]]), exo[[i]][-grep("_lag",exo[[i]])])
vars[[i]] <- vars[[i]][-charmatch("cons",vars[[i]])]
}
varNames <- lapply(vars,function(l) paste(l,collapse=", "))
names(varNames) <- cN
for(i in 1:length(varNames)){
cat("\n")
cat(paste0(names(varNames[i]),": ",varNames[[i]]))
}
invisible(x)
}
#' @name summary
#' @title Summary of Bayesian GVAR
#' @description Output gives model information as well as some descriptive statistics on convergence properties, likelihood, serial autocorrelation in the errors and the average pairwise autocorrelation of cross-country residuals.
#' @aliases summary summary.bgvar
#' @param object An object of class \code{bgvar}.
#' @param ... Additional arguments.
#' @return No return value.
#' @seealso
#' \code{\link{bgvar}} to estimate a \code{bgvar} object.
#' \code{\link{avg.pair.cc}} to compute average pairwise cross-country correlation of cross-country residuals separately.
#' \code{\link{resid.corr.test}} to compute F-test on first-order autocorrelation of cross-country residuals separately.
#' @author Maximilian Boeck
#' @export
summary.bgvar <- function(object, ...){
if(!inherits(object, "bgvar")) {stop("Please provide a `bgvar` object.")}
# check if posterior draws are available
if(object$args$thindraws == 0){
cat("Computation of BGVAR has yielded no stable posterior draws!")
return(invisible(object))
}
CD <- conv.diag(object)
res <- resid.corr.test(object,lag.cor=1,alpha=0.95)
cross.corr <- avg.pair.cc(object)
out <- structure(list("object"=object,
"CD"=CD,
"res"=res,
"cross.corr"=cross.corr), class = "bgvar.summary")
return(out)
}
#' @method print bgvar.summary
#' @importFrom knitr kable
#' @export
print.bgvar.summary <- function(x, ...){
cat("---------------------------------------------------------------------------")
cat("\n")
cat("Model Info:")
cat("\n")
cat(paste("Prior: ",ifelse(x$object$args$prior=="MN","Minnesota prior (MN)",
ifelse(x$object$args$prior=="SSVS","Stochastic Search Variable Selection prior (SSVS)",
"Normal-Gamma prior (NG)")),sep=""))
cat("\n")
cat(paste("Number of lags for endogenous variables: ",x$object$args$lags[1],sep=""))
cat("\n")
cat(paste("Number of lags for weakly exogenous variables: ",x$object$args$lags[2],sep=""))
cat("\n")
cat(paste("Number of posterior draws: ",x$object$args$draws,"/",x$object$args$thin,"=",x$object$args$draws/x$object$args$thin,sep=""))
cat("\n")
if(x$object$args$eigen){
cat(paste("Number of stable posterior draws: ",length(x$object$stacked.results$F.eigen),sep=""))
cat("\n")
}
cat(paste("Number of cross-sectional units: ",length(x$object$gW),sep=""))
cat("\n")
cat("---------------------------------------------------------------------------")
cat("\n")
cat("Convergence diagnostics")
cat("\n")
cat(paste("Geweke statistic:\n",x$CD$perc,sep=""))
cat("\n")
cat("---------------------------------------------------------------------------")
cat("\n")
cat("F-test, first order serial autocorrelation of cross-unit residuals")
cat("\n")
cat("Summary statistics:")
cat("\n")
temp <- kable(x$res$p.res, row.names=TRUE, "rst")
for(ii in 1:length(temp)){
cat(paste0(temp[ii],"\n"))
}
cat("---------------------------------------------------------------------------")
cat("\n")
cat("Average pairwise cross-unit correlation of unit-model residuals")
cat("\n")
cat("Summary statistics:")
cat("\n")
temp <- kable(x$cross.corr$res.res, row.names=TRUE, "rst")
for(ii in 1:length(temp)){
cat(paste0(temp[ii],"\n"))
}
cat("---------------------------------------------------------------------------")
cat("\n")
invisible(x)
}
#' @name residuals
#' @export
#' @title Extract Residuals of Bayesian GVAR
#' @description Calculate residuals of the global model and the country models.
#' @aliases residuals residuals.bgvar
#' @param object A fitted \code{bgvar} object.
#' @param ... Additional arguments.
#' @details This function calculates residuals of the global and the country models based on a \code{bgvar} object. Country models' residuals are equivalent to output generated by the \code{print.bgvar} function in case no trimming has been used. If trimming was invoked to discard unstable draws output of both functions might differ since \code{print.bgvar} calculates residuals as a running mean to save storage which is based on the \emph{whole} set of posterior draws (including discarded draws). In this case it is recommended to recalculate the residuals with \code{residuals.bgvar} and re-do the serial autocorrelation or average pairwise cross-correlation analysis using functions \code{resid.corr.test} and \code{avg.pair.cc}.
#' @return Returns a list with the following arguments \describe{
#' \item{\code{global}}{ A (T-p) times K times draws/thin array containing the residuals of the global model.}
#' \item{\code{country}}{ A (T-p) times K times draws/thin array containing the residuals of the country models.}
#' \item{\code{Data}}{ A (T-p) times K matrix containing the data of the model.}
#' }
#' @author Maximilian Boeck, Martin Feldkircher
#' @seealso \code{\link{bgvar}} for estimation of a \code{bgvar} object.
#' @importFrom stats resid
#' @examples
#' \donttest{
#' library(BGVAR)
#' data(testdata)
#' model.ng <- bgvar(Data=testdata,W=W.test,plag=1,draws=100,burnin=100)
#' resid(model.ng)
#' }
residuals.bgvar <- function(object, ...){
if(!inherits(object, "bgvar")) {stop("Please provide a `bgvar` object.")}
# check if posterior draws are available
if(object$args$thindraws == 0){
cat("Computation of BGVAR has yielded no stable posterior draws!")
return(invisible(object))
}
G.mat <- object$stacked.results$Ginv_large
A.mat <- object$stacked.results$A_large
lags <- object$args$lags
pmax <- max(lags)
draws <- object$args$thindraws
time <- object$args$time
trend <- object$args$trend
xglobal <- object$xglobal
YY <- xglobal[(pmax+1):nrow(xglobal),]
XX <- cbind(.mlag(xglobal,pmax),1)
XX <- XX[(pmax+1):nrow(XX),]
if(trend) XX <- cbind(XX,seq(1,nrow(XX)))
rownames(YY) <- as.character(time[-c(1:pmax)])
res.array.country<-res.array.global<-array(0,dim=c(draws,dim(YY)))
for(irep in 1:draws){
res.array.global[irep,,] <- (YY-XX%*%t(A.mat[,,irep]))
res.array.country[irep,,] <- (res.array.global[irep,,]%*%t(solve(G.mat[,,irep])))
}
out <- structure(list(global=res.array.global,country=res.array.country,Data=YY),
class = "bgvar.resid")
return(out)
}
#' @rdname residuals
#' @examples
#' \donttest{
#' resid(model.ng)
#' }
#' @export
resid.bgvar <- residuals.bgvar
#' @name coef
#' @title Extract Model Coefficients of Bayesian GVAR
#' @description Extracts the global model coefficients for \code{bgvar} for certain quantiles of the posterior distribution. \code{coefficients} is an \emph{alias} for it.
#' @param object An object of class \code{bgvar}.
#' @param ... Additional arguments.
#' @param quantile reported quantiles. Default is set to the median.
#' @return Returns an \code{q} times \code{K} times \code{K} times \code{p} array of the global coefficients, where \code{q} is the number of specified quantiles (this dimension is dropped if \code{q=1}), \code{K} the number of endogenous variables and \code{p} number of lags.
#' @export
#' @seealso \code{\link{bgvar}} for estimation of a \code{bgvar} object.
#' @examples
#' \donttest{
#' library(BGVAR)
#' data(testdata)
#' model.ng <- bgvar(Data=testdata,W=W.test,plag=1,draws=100,burnin=100)
#' coef(model.ng)
#' }
#' @importFrom stats quantile
coef.bgvar<-function(object, ..., quantile=.50){
if(!inherits(object, "bgvar")) {stop("Please provide a `bgvar` object.")}
# check if posterior draws are available
if(object$args$thindraws == 0){
cat("Computation of BGVAR has yielded no stable posterior draws!")
return(invisible(object))
}
out <- apply(object$stacked.results$F_large,c(1,2,3),quantile,quantile,na.rm=TRUE)
dimnames(out)[[1]] <- colnames(object$xglobal)
return(out)
}
#' @rdname coef
#' @importFrom stats coefficients
#' @examples
#' \donttest{
#' coefficients(model.ng)
#' }
#' @export
coefficients.bgvar <- coef.bgvar
#' @name vcov
#' @title Extract Variance-covariance Matrix of Bayesian GVAR
#' @description Extracts the global variance-covariance matrix for \code{bgvar} for certain quantiles of the posterior distribution.
#' @param object An object of class \code{bgvar}.
#' @param ... Additional arguments.
#' @param quantile Reported quantiles. Default is set to median.
#' @return Returns an \code{q} times \code{K} times \code{K} array of the global variance-covariance matrix, where \code{q} is the number of specified quantiles (this dimension is dropped if \code{q=1}) and \code{K} the number of endogenous variables.
#' @seealso \code{\link{bgvar}} for estimation of a \code{bgvar} object.
#' @importFrom stats vcov
#' @examples
#' \donttest{
#' library(BGVAR)
#' data(testdata)
#' model.ng <- bgvar(Data=testdata,W=W.test,plag=1,draws=100,burnin=100)
#' vcov(model.ng)
#' }
#' @export
vcov.bgvar<-function(object, ..., quantile=.50){
if(!inherits(object, "bgvar")) {stop("Please provide a `bgvar` object.")}
# check if posterior draws are available
if(object$args$thindraws == 0){
cat("Computation of BGVAR has yielded no stable posterior draws!")
return(invisible(object))
}
S_qu <- apply(object$stacked.results$S_large,c(1,2),quantile,quantile,na.rm=TRUE)
Ginv_qu <- apply(object$stacked.results$Ginv_large,c(1,2),quantile,quantile,na.rm=TRUE)
if(length(quantile)==1){
out <- Ginv_qu%*%S_qu%*%t(Ginv_qu)
}else{
out <- sapply(1:length(quantile),function(qq)Ginv_qu[qq,,]%*%S_qu[qq,,]%*%t(Ginv_qu[qq,,]),simplify="array")
out <- aperm(out,c(3,1,2))
}
return(out)
}
#' @name fitted
#' @title Extract Fitted Values of Bayesian GVAR
#' @description Extracts the fitted values for \code{bgvar}.
#' @param object An object of class \code{bgvar}.
#' @param ... Additional arguments.
#' @param global If \code{global=TRUE} global fitted values are returned otherwise country fitted values.
#' @return Returns an \code{T} times \code{K} matrix, where \code{T} is the number of observations and \code{K} number of endogenous variables.
#' @seealso \code{\link{bgvar}} for estimation of a \code{bgvar} object.
#' @importFrom stats fitted
#' @examples
#' \donttest{
#' library(BGVAR)
#' data(testdata)
#' model.ng <- bgvar(Data=testdata,W=W.test,plag=1,draws=100,burnin=100)
#' fitted(model.ng)
#' }
#' @export
fitted.bgvar<-function(object, ..., global=TRUE){
if(!inherits(object, "bgvar")) {stop("Please provide a `bgvar` object.")}
# check if posterior draws are available
if(object$args$thindraws == 0){
cat("Computation of BGVAR has yielded no stable posterior draws!")
return(invisible(object))
}
lags <- object$args$lags
pmax <- max(lags)
xglobal <- object$xglobal
trend <- object$args$trend
XX <- .mlag(xglobal,pmax)
YY <- xglobal[-c(1:pmax),,drop=FALSE]
XX <- cbind(XX[-c(1:pmax),,drop=FALSE],1)
bigT <- nrow(YY)
if(trend) XX <- cbind(XX,seq(1,bigT))
if(global){
A_post <- apply(object$stacked.results$A_large,c(1,2),median)
fit <- XX%*%t(A_post)
}else{
fit <- YY-do.call("cbind",object$cc.results$res)
}
return(fit)
}
#' @name logLik
#' @title Extract Log-likelihood of Bayesian GVAR
#' @description Extracts Log-Likelihood for \code{bgvar}.
#' @param object An object of class \code{bgvar}.
#' @param ... Additional arguments.
#' @param quantile Reported quantiles. Default is set to median.
#' @return Returns an vector of dimension \code{q} (number of specified quantiles) of global log-likelihoods.
#' @seealso \code{\link{bgvar}} for estimation of a \code{bgvar} object.
#' @importFrom stats logLik
#' @examples
#' \donttest{
#' library(BGVAR)
#' data(testdata)
#' model.ng <- bgvar(Data=testdata,W=W.test,plag=1,draws=100,burnin=100)
#' logLik(model.ng)
#' }
#' @export
logLik.bgvar<-function(object, ..., quantile=.50){
if(!inherits(object, "bgvar")) {stop("Please provide a `bgvar` object.")}
# check if posterior draws are available
if(object$args$thindraws == 0){
cat("Computation of BGVAR has yielded no stable posterior draws!")
return(invisible(object))
}
if(length(quantile)!=1){
stop("Please provide only one quantile.")
}
temp <- object$args$logLik
if(is.null(temp)){
xglobal <- object$xglobal
lags <- object$args$lags
pmax <- max(lags)
trend <- object$args$trend
bigT <- nrow(xglobal)
bigK <- ncol(xglobal)
thindraws <- object$args$thindraws
X_large <- cbind(.mlag(xglobal,pmax),1)
if(trend) X_large <- cbind(X_large,seq(1:bigT))
Y_large <- xglobal[(pmax+1):bigT,,drop=FALSE]
X_large <- X_large[(pmax+1):bigT,,drop=FALSE]
A_large <- object$stacked.results$A_large
S_large <- object$stacked.results$S_large
Ginv_large<- object$stacked.results$Ginv_large
globalLik <- try(globalLik(Y_in=Y_large,X_in=X_large,A_in=A_large,S_in=S_large,Ginv_in=Ginv_large,thindraws=thindraws)$globalLik,silent=TRUE)
# if(all(as.numeric(globalLik)==-Inf)){
# for(irep in 1:thindraws){
# for(tt in 1:bigT){
# dmvnorm(x = Y_large[tt,],
# mean = X_large[tt,,drop=FALSE]%*%t(A_large[,,irep]),
# sigma = Ginv_large[,,irep]%*%S_large[,,irep]%*%t(Ginv_large[,,irep]),
# log = TRUE
# )
# }
# }
# }
if(is(globalLik,"try-error")){
out <- -Inf
}else{
out <- quantile(globalLik,quantile,na.rm=TRUE)
}
eval.parent(substitute(object$args$logLik<-out))
}
attributes(out) <- list(nall=bigT, nobs=bigT, df=bigK)
class(out) <- "logLik"
return(out)
}
#' @name dic
#' @export
"dic" <- function(object, ...){
UseMethod("dic", object)
}
#' @name dic
#' @method dic bgvar
#' @title Deviance Information Criterion
#' @description Computes the Deviance information criterion for an object \code{bgvar}.
#' @param object An object of class \code{bgvar}.
#' @param ... Additional arguments.
#' @return Returns a numeric value with the corresponding DIC.
#' @seealso \code{\link{bgvar}} for estimation of a \code{bgvar} object.
#' @author Maximilian Boeck
#' @export
#' @examples
#' \donttest{
#' library(BGVAR)
#' data(testdata)
#' model.mn <- bgvar(Data=testdata,W=W.test,plag=1,draws=100,burnin=100,prior="MN")
#' dic(model.mn)
#' }
#' @references
#' Spiegelhalter, D. J. and Best, N. G., Carlin, B. P. and Linde, A. (2002) \emph{Bayesian measures of model complexity and fit.} Journal of the Royal Statistical Society, Series B, Vol. 64(4), pp. 583-639.
dic.bgvar <- function(object, ...){
if(!inherits(object, "bgvar")) {stop("Please provide a `bgvar` object.")}
# check if posterior draws are available
if(object$args$thindraws == 0){
cat("Computation of BGVAR has yielded no stable posterior draws!")
return(invisible(object))
}
if(!is.null(object$args$dic)){
out <- object$args$dic
}else{
xglobal <- object$xglobal
lags <- object$args$lags
pmax <- max(lags)
trend <- object$args$trend
bigT <- nrow(xglobal)
bigK <- ncol(xglobal)
thindraws <- object$args$thindraws
X_large <- cbind(.mlag(xglobal,pmax),1)
if(trend) X_large <- cbind(X_large,seq(1:bigT))
Y_large <- xglobal[(pmax+1):bigT,,drop=FALSE]
X_large <- X_large[(pmax+1):bigT,,drop=FALSE]
A_large <- object$stacked.results$A_large
S_large <- object$stacked.results$S_large
Ginv_large<- object$stacked.results$Ginv_large
globalLik <- c(globalLik(Y_in=Y_large,X_in=X_large,A_in=A_large,S_in=S_large,Ginv_in=Ginv_large,thindraws=thindraws)$globalLik)
A_mean <- apply(A_large,c(1,2),mean)
S_mean <- apply(S_large,c(1,2),mean)
Ginv_mean <- apply(Ginv_large,c(1,2),mean)
Dbar <- -2*mean(globalLik,na.rm=TRUE)
pD <- Dbar+2*sum(dmvnrm_arma_fast(Y_large,X_large%*%t(A_mean),Ginv_mean%*%S_mean%*%t(Ginv_mean),TRUE))
out <- Dbar+pD
}
if(is.null(object$args$dic)){
eval.parent(substitute(object$args$dic<-out))
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGVAR/R/BGVAR.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
BVAR_linear <- function(Yraw, Wraw, Exraw, lags, draws, burnin, thin, cons, trend, sv, prior, hyperparam, setting_store) {
.Call(`_BGVAR_BVAR_linear`, Yraw, Wraw, Exraw, lags, draws, burnin, thin, cons, trend, sv, prior, hyperparam, setting_store)
}
#' @name do_rgig
#' @noRd
do_rgig1 <- function(lambda, chi, psi) {
.Call(`_BGVAR_do_rgig1`, lambda, chi, psi)
}
#' @name gvar_stacking
#' @noRd
gvar_stacking <- function(xglobal, plag, globalpost, draws, thin, trend, eigen, verbose) {
.Call(`_BGVAR_gvar_stacking`, xglobal, plag, globalpost, draws, thin, trend, eigen, verbose)
}
#' @name globalLik
#' @noRd
globalLik <- function(Y_in, X_in, A_in, S_in, Ginv_in, thindraws_in) {
.Call(`_BGVAR_globalLik`, Y_in, X_in, A_in, S_in, Ginv_in, thindraws_in)
}
#' @name dmvnrm_arma_fast
#' @noRd
dmvnrm_arma_fast <- function(x, mean, sigma, logd = FALSE) {
.Call(`_BGVAR_dmvnrm_arma_fast`, x, mean, sigma, logd)
}
#' @name compute_irf
#' @noRd
compute_irf <- function(A_large, S_large, Ginv_large, type, nhor, thindraws, shocklist_in, save_rot, verbose) {
.Call(`_BGVAR_compute_irf`, A_large, S_large, Ginv_large, type, nhor, thindraws, shocklist_in, save_rot, verbose)
}
# Register entry points for exported C++ functions
methods::setLoadAction(function(ns) {
.Call(`_BGVAR_RcppExport_registerCCallable`)
})
|
/scratch/gouwar.j/cran-all/cranData/BGVAR/R/RcppExports.R
|
#' @name BGVAR-package
#' @title BGVAR: Bayesian Global Vector Autoregressions
#' @description The Bayesian Global Vector Autoregression (BGVAR) package allows to estimate Global Vector Autoregressions and consists of various tools for predicting and doing structural analysis.
#' @details It provides a fully Bayesian implementation of Global Vector Autoregressions. It utilizes Markov chain Monte Carlo (MCMC) samplers to conduct inference by obtaining draws from the posterior distribution of parameters. One of the main advantages is the implementation of different shrinkage prior setups for estimating the model. The packages consists thus of various post-processing functions to carry out predictions or structural analysis. It is possible to perform structural identification via short-run or sign/zero restrictions. The available structural tools comprise impulse response functions, historical decompositions and forecast error variance decompositions. For all the aforementioned tools plotting functions are implemented. Furthermore, various functions of the package are intended to inspect the convergence properties of the MCMC chain and to do model evaluation. The main focus of this paper is to show the functionality of \code{BGVAR}. In addition, it provides a brief mathematical description of the model, an overview of the implemented sampling scheme, and several illustrative examples using global macroeconomic data.
#' @importFrom utils data
#' @docType package
#' @seealso
#' \code{\link{bgvar}} for estimating a Bayesian GVAR.
#' \code{\link{predict}} for doing predictions with a Bayesian GVAR.
#' \code{\link{irf}} for doing impulse response analysis with a Bayesian GVAR.
#' @importFrom Rcpp sourceCpp
#' @importFrom RcppParallel RcppParallelLibs
#' @useDynLib BGVAR, .registration=TRUE
NULL
#' @title Example data set to replicate Feldkircher and Huber (2016)
#' @description This data set contains 76 quarterly observations by country, spanning the period from 1995Q1 to 2013Q4. The country coverage is 43 countries and the Euro area (EA) as a regional aggregate.
#' @format The data loads two objects \code{eerData}, which is a list object of length \code{N} (i.e, the number of countries) and \code{W.trade0012}, which is an \code{N} times \code{N} weight matrix with rowsums summing up to unity and zero elements on its diagonal. The global variable, oil prices, is included in the US country model as e.g., in Dees et al. (2007). The countries are abbreviated using ISO-2 codes. The weight matrix corresponds to average annual bilateral trade flows (including services) over the period from 2000 to 2012.\code{eerData} contains the country data, for more details, see below:
#' \describe{
#' \item{\code{W.trade0012}}{ Weight matrix based on trade flows, rowsums equal unity.}
#' \item{\code{W.list}}{ List of ten weight matrices, described in Feldkircher and Huber (2016).}
#' \item{\code{eerData}}{ is a list object of length \code{N} containing \describe{
#' \item{\code{y}}{ Real GDP, average of 2005=100. Seasonally adjusted, in logarithms.}
#' \item{\code{Dp}}{ Consumer prices (period-on-period). CPI seasonally adjusted, in logarithm.}
#' \item{\code{stir}}{ Short-term interest rate, typically 3-months money market rate.}
#' \item{\code{ltir}}{ Long-term interest rates, typically 10-year government bond yields.}
#' \item{\code{reer}}{ Real effective exchange rate, deflated by consumer prices.}
#' \item{\code{tb}}{ Trade balance (ratio of real exports to real imports).}
#' \item{\code{poil}}{ Price of oil, seasonally adjusted, in logarithms.}
#' }}
#' \item{\code{USexpectations}}{ is a time series object containing US expectations data: \describe{
#' \item{\code{y_t+4}}{ Four-quarter ahead expectation of Real GDP growth.}
#' \item{\code{Dp_t+4}}{ Four-quarter ahead expectation of consumer price inflation.}
#' \item{\code{stir_t+4}}{ Four-quarter ahead expectation of short-term interest rates.}
#' }}
#' }
#' @aliases W.list W.trade0012 USexpectations
#' @docType data
"eerData"
#' @title Example data set to show functionality of the package
#' @description This data set is a subset of \code{eerData} containing just three countries with 76 quarterly observations, spanning the period from 1995Q1 to 2013Q4. The country coverage are the United States, the United Kingdom and the Euro area (EA) as a regional aggregate.
#' @format The data loads two objects \code{eerDatasmall}, which is a list object of length \code{N} (i.e, the number of countries) and \code{W.trade0012}, which is an \code{N} times \code{N} weight matrix with rowsums summing up to unity and zero elements on its diagonal. The global variable, oil prices, is included in the US country model as e.g., in Dees et al. (2007). The countries are abbreviated using ISO-2 codes. The weight matrix corresponds to average annual bilateral trade flows (including services) over the period from 2000 to 2012.\code{eerDatasmall} contains the country data, for more details, see below:
#' \describe{
#' \item{\code{W.test}}{ Weight matrix based on trade flows, rowsums equal unity.}
#' \item{\code{testdata}}{ List object of length \code{N} containing \describe{
#' \item{\code{y}}{ Real GDP, average of 2005=100. Seasonally adjusted, in logarithms.}
#' \item{\code{Dp}}{ Consumer prices (period-on-period). CPI seasonally adjusted, in logarithm.}
#' \item{\code{stir}}{ Short-term interest rate, typically 3-months money market rate.}
#' \item{\code{ltir}}{ Long-term interest rates, typically 10-year government bond yields.}
#' \item{\code{reer}}{ Real effective exchange rate, deflated by consumer prices.}
#' \item{\code{tb}}{ Trade balance (ratio of real exports to real imports).}
#' \item{\code{poil}}{ Price of oil, seasonally adjusted, in logarithms.}}
#' }
#' }
#' @aliases W.test
#' @docType data
"testdata"
#' @title Monthly EU / G8 countries macroeconomic dataset
#' @description This data set contains monthly observations on industrial production, consumer price indices, short- and long-term interest rates, the nominal exchange rate against the euro and equity prices. The time period covered is from January 2001 to June 2021 and the country coverage amounts to 31 countries -- roughly corresponding to EU member states and G-8 countries, a country model to model common monetary policy in the euro area and an oil price model.
#' @format The data loads four objects \code{monthly.data}, which is a list object of length \code{N+2} (i.e, the number of countries, the ECB country model and an oil price model), \code{W}, which is an \code{N} times \code{N} weight matrix with rowsums summing up to unity and zero elements on its diagonal. The countries are abbreviated using ISO-2 codes. The weight matrix corresponds to average annual input output flows for the \code{N} countries over the period from 2000 to 2014. The data are from the world input output table database (\url{https://www.rug.nl/ggdc/valuechain/wiod/}) and are fully described in Timmerman et al. (2015). Akin to Georgiadis (2015), interest setting in the euro area is modeled by a Taylor rule that includes ppp-weighted output and prices of euro area countries. The euro area interest rate enters other country models as an additional exogeneous variable. For more details, see below:
#' \describe{
#' \item{W}{\code{N} times \code{N} weight matrix, rowsums equal unity and the \code{i,jth} element reflecting flows from unit \code{i} to unit \code{j}.}
#' \item{\code{EB.weights}}{To model the common monetary policy in the euro area, it is possible to augment the GVAR countries by a country model for the ECB. It is important that this country model is labeled 'EB'. Akin to Georgidas (2015) we use a Taylor rule to determine interest rates in the euro area. The Taylor rule typically relates short-term interest rates to a weighted average of output (\code{ip} and prices \code{p}). \code{EB.weights} is a list whith the first slot containing a vector of weights to aggregate single euro area countrys' output and price figures. In the example using the \code{monthlyData} set, we use purchasing power parity weights, averaged over the sample period. The second slot contains a character vector that specifies the variables which should enter the Taylor rule (typicall output and prices).}
#' \item{\code{OC.weights}}{This feature is very similar to \code{EB.weights} above and should be specified if an own-standing unit model for the oil price should be included -- as opposed to having oil prices attached to a particular country model, as is standard in the literature. It is important that the country model is labeled 'OC'. Again, \code{OC.weights} is a list of length 2, the first slot should be a vector of weights to aggregate variables, second one the variables to aggregate. The vector of weights should have country names attached to it. In the example using the \code{monthlyData} set, we use purchasing power parity weights to aggregate world output to resemble demand for oil.}
#' \item{\code{monthlyData}}{ is a list object of length \code{N} containing \describe{
#' \item{\code{y}}{ Industrial production index, in real terms, logarithmic transform and seasonally adjusted.}
#' \item{\code{p}}{ Harmonized Consumer Price Index (HCPI) for EU member states, for other countries Consumer Price Index. Data in logarithmic transform and seasonally adjusted.}
#' \item{\code{stir}}{ Short-term interest rate, typically 3 months money market rate.}
#' \item{\code{EAstir}}{ Short-term interest rate, typically 3 months money market rate (3 months euribor).}
#' \item{\code{ltir}}{ Long term interest rates, typically 10-year government bond yields.}
#' \item{\code{eur_er}}{ Nominal exchange rate against the euro in logarithmic transform. An increase implies an appreciation of the euro. }
#' \item{\code{eq}}{ Equity price index, in logarithmic transform.}
#' \item{\code{poil}}{ Price of oil, seasonally adjusted, in logarithms.}
#' \item{\code{qoil}}{ World oil production of crude oil, in thousands of barrels per day, in logarithms.}}}
#' }
#' @aliases EB.weights OC.weights W
#' @references
#' Georgiadis, G. (2015) Examining asymmetries in the transmission of monetary policy in the euro area: Evidence from a mixed cross-section global VAR model. In: European Economic Review, Vol. 75, pp. 195-215.
#'
#' Timmer, M. P., Dietzenbacher, E., Los, B., Stehrer, R. and de Vries, G. J. (2015) An Illustrated User Guide to the World Input–Output Database: the Case of Global Automotive Production. In: Review of International Economics, Vol. 23, pp. 575–605.
#' @docType data
"monthlyData"
#' @title pesaranData
#' @description This data set contains quarterly observations by country, spanning the period from 1979Q2 to 2019Q4. It can be downloaded from \url{https://www.mohaddes.org/gvar}. The country coverage is 28 countries.
#' @format The data loads \code{pesaranData}, which is a list object of length \code{N} (i.e, the number of countries) and contains the country-level data as described in Mohaddes and Raissi (2020). The countries are abbreviated using ISO-2 codes. Furthermore, we also provide two datasets with first differences of some variables in \code{pesaranDiff}. \code{dominant} contains data that is considered global. \code{tA} is a three-dimensional array that contains \code{N} times \code{N} annual trade flow matrices over the period from 1980 to 2016. This array can be used to construct weight matrices. For more details, see below:
#' \describe{
#' \item{\code{W.8016}}{ Weight matrix for the \code{pesaran.level} and \code{pesaran.diff} data sets, based on averaged trade flows covering the period 1980 to 2016 (based on \code{tA}).}
#' \item{\code{tA}}{ Three-dimensional array that contains the yearly, bilateral trade flows, which were used to construct \code{W.8016}.}
#' \item{\code{peseranData}}{ List object of length \code{N} containing \describe{
#' \item{\code{y}}{ Real GDP.}
#' \item{\code{Dp}}{ Consumer price inflation.}
#' \item{\code{r}}{ Short-term interest rate, typically 3-months money market rate.}
#' \item{\code{lr}}{ Long-term interest rate.}
#' \item{\code{eq}}{ Equity prices.}
#' \item{\code{ep}}{ Exchange rate vis a vis the US dollar, deflated by the domestic CPI.}}}
#' \item{\code{pesaranDiff}}{ List object of length \code{N} containing \describe{
#' \item{\code{y}}{ Growth rate of real GDP.}
#' \item{\code{Dp}}{ First differences of consumer price inflation.}
#' \item{\code{r}}{ First differences of short-term interest rate, typically 3-months money market rate.}
#' \item{\code{lr}}{ Long-term interest rate.}
#' \item{\code{eq}}{ Equity prices.}
#' \item{\code{ep}}{ Exchange rate vis a vis the US dollar, deflated by the domestic CPI.}}}
#' \item{\code{dominant}}{ Data set containing global variables: \describe{
#' \item{\code{poil}}{ Oil prices.}
#' \item{\code{pmetal}}{ Metal price index.}
#' \item{\code{pmat}}{ Agricultural price index.}}}
#' }
#' @aliases pesaranDiff W.8016 tA dominant
#' @references
#' Mohaddes, K. and M. Raissi (2018). Compilation, Revision and Updating of the Global VAR (GVAR) Database, 1979Q2-2016Q4. University of Cambridge: Faculty of Economics (mimeo).
#' @docType data
"pesaranData"
bgvar.env <- new.env()
bgvar.env$plot <- list(
cex.main = 1.7,
cex.axis = 1.7,
cex.lab = 2.5,
col.unc = c("grey60","grey40","grey20"),
col.50 = "black",
col.tick = "lightgrey",
col.zero = "red",
lty.zero = 2,
lty.tick = 3,
lwd.line = 4,
lwd.zero = 3
)
bgvar.env$mar <- c(4.3,4.3,2.3,2.3)
.onAttach <- function(lib, pkg) {
if(interactive() || getOption("verbose")){
packageStartupMessage(sprintf("Package %s %s attached. To cite, see citation(\"%s\").", pkg, utils::packageDescription(pkg)$Version, pkg))
}
}
.onUnload <- function (libpath) {
library.dynam.unload("BGVAR", libpath)
}
|
/scratch/gouwar.j/cran-all/cranData/BGVAR/R/bgvar-package.R
|
#' @export
"fevd" <- function(x, rotation.matrix=NULL, var.slct=NULL, verbose=TRUE){
UseMethod("fevd", x)
}
#' @name fevd
#' @title Forecast Error Variance Decomposition
#' @description This function calculates the forecast error variance decomposition (FEVDs) for Cholesky and sign-identified shocks.
#' @usage fevd(x, rotation.matrix=NULL, var.slct=NULL, verbose=TRUE)
#' @details Since the calculations are very time consuming, the FEVDs are based on the posterior median only (as opposed to calculating FEVDs for each MCMC sweep). In case the underlying shock has been identified via sign restrictions, the rotation matrix corresponds to the one that fulfills the sign restrictions at the posterior median of the estimated coefficients. More precisely, the algorithm searches for 50 rotation matrices that fulfill the sign restrictions at the \emph{posterior median} of the coefficients and then singles out the rotation matrix that minimizes the distance to the median of the impulse responses as suggested in Fry and Pagan (2011).
#' @param x an object of class \code{bgvar.irf}.
#' @param rotation.matrix If \code{NULL} and the \code{x} has been fitted via sign restrictions, the rotation matrix is used that minimizes the distance to the median impulse responses at the posterior median.
#' @param var.slct character vector that contains the variables for which forecast error variance decomposition should be performed. If \code{NULL} the FEVD is computed for the whole system, which is very time consuming.
#' @param verbose If set to \code{FALSE} it suppresses printing messages to the console.
#' @return Returns a list with two elements \describe{
#' \item{\code{FEVD}}{ an array of size (K times horizon times N), where K are all variables in the system, horizon is the specified impulse response horizon and N is the size of the decomposed structural variables (if \code{var.slct=NULL} then K=N).}
#' \item{\code{xglobal}}{ used data of the model.}
#' }
#' @author Maximilian Boeck, Martin Feldkircher, Florian Huber
#' @seealso \code{\link{bgvar}}, \code{\link{irf}}
#' @examples
#' \dontshow{
#' set.seed(123)
#' library(BGVAR)
#' data(testdata)
#' model.eer<-bgvar(Data=testdata,W=W.test,prior="MN",
#' draws=200,burnin=50,plag=1,eigen=TRUE)
#'
#' # US monetary policy shock
#' shockinfo <- get_shockinfo("chol")
#' shockinfo$shock <- "US.stir"; shockinfo$scale <- -100
#' irf.chol.us.mp<-irf(model.eer,n.ahead=48,shockinfo=shockinfo)
#'
#' # calculates FEVD for variables US.Dp and EA.y
#' fevd.us.mp=fevd(irf.chol.us.mp,var.slct=c("US.Dp","EA.y"))
#'
#' # US monetary policy shock with sign restrictions
#' shockinfo <- get_shockinfo("sign")
#' shockinfo <- add_shockinfo(shockinfo, shock="US.stir",
#' restriction=c("US.y","US.Dp"),
#' sign=c("<","<"), horizon=c(1,1), 1, 100)
#' irf.sign.us.mp<-irf(model.eer,n.ahead=24,shockinfo=shockinfo)
#'
#' # calculates FEVD for variables US.Dp and EA.y
#' fevd.us.mp=fevd(irf.sign.us.mp,var.slct=c("US.Dp","EA.y"))
#' }
#' @export
fevd.bgvar.irf <- function(x, rotation.matrix=NULL, var.slct=NULL, verbose=TRUE){
start.fevd <- Sys.time()
if(verbose) cat("Start computing forecast error variance decomposition of Bayesian Global Vector Autoregression.\n\n")
#------------------------------ get stuff -------------------------------------------------------#
xglobal = x$model.obj$xglobal
lags = x$model.obj$lags
pmax = max(lags)
ident = x$ident
Traw = nrow(xglobal)
bigK = ncol(xglobal)
xdat = xglobal[(pmax+1):Traw,,drop=FALSE]
bigT = nrow(x)
A = x$struc.obj$A
Fmat = x$struc.obj$Fmat
Ginv = x$struc.obj$Ginv
Smat = x$struc.obj$S
Rmed = x$struc.obj$Rmed
Sigma_u = Ginv%*%Smat%*%t(Ginv)
horizon = dim(x$posterior)[2]
varNames = colnames(xglobal)
shock = x$shock
sign.constr = x$sign.constr
cN = unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x)x[1]))
N = length(cN)
if(ident=="sign"){
if(verbose) cat("Identification scheme: Sign-restrictions provided.\n")
shock.cN <- strsplit(unique(x$shockinfo$shock),".",fixed=TRUE)[[1]][1]
}else if(ident=="chol"){
if(verbose) cat("Identification scheme: Short-run restrictions via Cholesky decomposition.\n")
shock.cN <- unique(x$shockinfo$shock)
}
#-------------------- some checks ------------------------------------------------------------------#
if(!ident%in%c("sign","chol")){
stop("FEVD implemented for shocks identified via cholesky ordering or sign restrictions only.")
}
if(!is.null(var.slct)){
if(!all(var.slct%in%varNames)){
stop("One of the variables you want to decompose is not contained in the system. Please re-specify!")
}
}
if((is.null(Rmed) || any(is.na(Rmed))) && ident == "sign"){
stop("No rotation matrix available. Please supply rotation matrix or re-estimate IRFs with sign-restrictions.")
}else if(ident=="sign" && !is.null(Rmed)){
rotation.matrix = Rmed
}else{
rotation.matrix = diag(bigK)
}
if(is.null(var.slct)){
if(verbose) cat("FEVD computed for all variables.\n\n")
var.slct<-varNames
}else{
var.print <- var.slct[1]
if(length(var.slct)>1) for(kk in 2:length(var.slct)) var.print <- paste(var.print,", ",var.slct[kk],sep="")
if(verbose) cat(paste("FEVD computed for the following variables: ",var.print,".\n",sep=""))
}
#----------------------------------------------------------------------------------------------------#
P0G <- diag(bigK); colnames(P0G) <- rownames(P0G) <- varNames
gcov <- Smat
for(cc in 1:N){
if(cN[cc] %in% shock.cN){
idx <- grep(cN[cc],varNames)
P0G[idx,idx] <- t(chol(gcov[idx,idx,drop=FALSE])) # calculate local cholesky factor of gcov
gcov[idx,idx] <- diag(length(idx)) #set vcv matrix to identity for coutnry where shock occurs
}
}
# create dynamic multiplier
PHIx <- array(0,c(bigK,bigK,pmax+horizon+1)); dimnames(PHIx)[[1]] <- dimnames(PHIx)[[2]] <- varNames
PHIx[,,pmax+1] <- diag(bigK)
for (ihor in (pmax+2):(pmax+horizon+1)){
acc = matrix(0,bigK,bigK)
for (pp in 1:pmax){
acc <- acc + Fmat[,,pp]%*%PHIx[,,ihor-pp]
}
PHIx[,,ihor] <- acc
}
PHI <- PHIx[,,(pmax+1):(pmax+horizon+1)]
#----------------------------------------------------------------------------------------------------#
if(verbose) cat("Start computing FEVDs...\n")
vslct <- diag(bigK)
invGSigmau <- Ginv%*%gcov # use gcov not SIGMA_u from irf.obj
invGSinvG <- invGSigmau%*%t(Ginv)
scale <- 1/diag(gcov)
FEVDres <- array(0,dim=c(bigK,length(var.slct),horizon+1))
dimnames(FEVDres) <- list(varNames,paste("Decomp. of",var.slct),0:horizon)
for(zz in 1:length(var.slct)){
eslct <-matrix(0,bigK,1);rownames(eslct) <- varNames
eslct[var.slct[zz],1] <- 1
num <- matrix(0,bigK,horizon+1)
den <- matrix(0,bigK,horizon+1)
N <- 1
while (N<=horizon+1){
for (l in 1:N){
acc1 <- t((t(eslct)%*%rotation.matrix%*%PHI[,,l]%*%invGSigmau%*%vslct)^2)
num[,N] <- num[,N] + acc1
acc2 <- (t(eslct)%*%rotation.matrix%*%PHI[,,l]%*%invGSinvG%*%t(rotation.matrix%*%PHI[,,l])%*%eslct)
den[,N] <- den[,N] + matrix(1,bigK,1)*as.numeric(acc2)
}
FEVDres[,paste("Decomp. of",var.slct[zz]),N] <- (scale*num[,N])/den[,N]
N <- N+1
}
}
#------------------------------------------------------------------------------------------------------
out <- structure(list(FEVD=FEVDres,
xglobal=xglobal,
rotation.matrix=rotation.matrix),
class="bgvar.fevd", type="fevd")
if(verbose) cat(paste("\nSize of FEVD object: ", format(object.size(FEVDres),unit="MB")))
end.fevd <- Sys.time()
diff.fevd <- difftime(end.fevd,start.fevd,units="mins")
mins.fevd <- round(diff.fevd,0); secs.fevd <- round((diff.fevd-floor(diff.fevd))*60,0)
if(verbose) cat(paste("\nNeeded time for computation: ",mins.fevd," ",ifelse(mins.fevd==1,"min","mins")," ",secs.fevd, " ",ifelse(secs.fevd==1,"second.","seconds.\n"),sep=""))
return(out)
}
#' @export
"gfevd" <- function(x, n.ahead=24, running=TRUE, applyfun=NULL, cores=NULL, verbose=TRUE){
UseMethod("gfevd", x)
}
#' @name gfevd
#' @title Generalized Forecast Error Variance Decomposition
#' @description This function calculates a complete generalized forecast error variance decomposition (GFEVDs) based on generalized impulse response functions akin to Lanne-Nyberg (2016). The Lanne-Nyberg (2016) corrected GFEVD sum up to unity.
#' @method gfevd bgvar
#' @export
#' @usage gfevd(x, n.ahead=24, running=TRUE, applyfun=NULL, cores=NULL, verbose=TRUE)
#' @param x an object of class \code{bgvar}.
#' @param n.ahead the forecast horizon.
#' @param running Default is set to \code{TRUE} and implies that only a running mean over the posterior draws is calculated. A full analysis including posterior bounds is likely to cause memory issues.
#' @param applyfun Allows for user-specific apply function, which has to have the same interface than \code{lapply}. If \code{cores=NULL} then \code{lapply} is used, if set to a numeric either \code{parallel::parLapply()} is used on Windows platforms and \code{parallel::mclapply()} on non-Windows platforms.
#' @param cores Specifies the number of cores which should be used. Default is set to \code{NULL} and \code{applyfun} is used.
#' @param verbose If set to \code{FALSE} it suppresses printing messages to the console.
#' @return Returns a list with two elements \describe{
#' \item{\code{GFEVD}}{ a three or four-dimensional array, with the first dimension referring to the K time series that are decomposed into contributions of K time series (second dimension) for \code{n.ahead} forecast horizons. In case \code{running=TRUE} only the posterior mean else also its 16\% and 84\% credible intervals is contained in the fourth dimension.}
#' \item{\code{xglobal}}{ used data of the model.}
#' }
#' @author Maximilian Boeck, Martin Feldkircher
#' @references
#' Lanne, M. and H. Nyberg (2016) \emph{Generalized Forecast Error Variance Decomposition for Linear and Nonlinear Multivariate Models.} Oxford Bulletin of Economics and Statistics, Vol. 78(4), pp. 595-603.
#' @seealso \code{\link{bgvar}}.
#' @examples
#' \dontshow{
#' library(BGVAR)
#' data(testdata)
#' model.eer<-bgvar(Data=testdata, W=W.test, draws=100, burnin=100,
#' plag=1, prior="SSVS", eigen=TRUE)
#'
#' GFEVD<-gfevd(model.eer, n.ahead=24)
#' }
#' @importFrom abind adrop
gfevd.bgvar<-function(x,n.ahead=24,running=TRUE,applyfun=NULL,cores=NULL,verbose=TRUE){
start.gfevd <- Sys.time()
if(verbose) cat("\nStart computing generalized forecast error variance decomposition of Bayesian Global Vector Autoregression.\n\n")
#------------------------------ get stuff -------------------------------------------------------#
lags <- x$args$lags
pmax <- max(lags)
xglobal <- x$xglobal
Traw <- nrow(xglobal)
bigK <- ncol(xglobal)
Kbig <- pmax*bigK
bigT <- Traw-pmax
A_large <- x$stacked.results$A_large
F_large <- x$stacked.results$F_large
S_large <- x$stacked.results$S_large
Ginv_large <- x$stacked.results$Ginv_large
F.eigen <- x$stacked.results$F.eigen
x <- xglobal[(pmax+1):Traw,,drop=FALSE]
thindraws <- length(F.eigen) ### prior: draws
varNames <- colnames(xglobal)
#------------------------------ prepare applyfun --------------------------------------------------------#
if(is.null(applyfun)) {
applyfun <- if(is.null(cores)) {
lapply
} else {
if(.Platform$OS.type == "windows") {
cl_cores <- parallel::makeCluster(cores)
on.exit(parallel::stopCluster(cl_cores))
function(X, FUN, ...) parallel::parLapply(cl = cl_cores, X, FUN, ...)
} else {
function(X, FUN, ...) parallel::mclapply(X, FUN, ..., mc.cores =
cores)
}
}
}
if(is.null(cores)) cores <- 1
#-----------------------------------------------------------------------------------------------------#
if(running){
GFEVD_post <- array(0,dim=c(bigK,bigK,n.ahead)); dimnames(GFEVD_post)<-list(varNames, paste("Decomp. of",varNames),0:(n.ahead-1))
if(verbose) cat(paste("Start computation on ", cores, " cores", " (",thindraws," stable draws in total).",sep=""),"\n")
imp.obj <- applyfun(1:thindraws,function(irep){
irfa <- .irf.girf.sims(invG=Ginv_large[,,irep],
lF=adrop(F_large[,,,irep,drop=FALSE],drop=4),
gcov=S_large[,,irep],
x,horizon=n.ahead)$impl
GFEVD <- .mk_fevd.sims(irfa)
return(list(GFEVD=GFEVD))
})
thindraws2 <- 0
for(irep in 1:thindraws){
if(!any(is.na(imp.obj[[irep]]$GFEVD))){
GFEVD_post<-GFEVD_post+imp.obj[[irep]]$GFEVD
thindraws2 <- thindraws2+1
}
}
GFEVD_post<-GFEVD_post/thindraws2
}else{ #-------------------------HERE DO FULL CALCULATION INCLUDING BOUNDS- VERY MEMORY INTENSIVE!!!-----
GFEVD_draws<-array(NA,dim=c(thindraws,bigK,bigK,n.ahead))
GFEVD_post <-array(NA,dim=c(bigK,bigK,n.ahead,3))
dimnames(GFEVD_post)[[1]]<-dimnames(GFEVD_post)[[2]]<-varNames
dimnames(GFEVD_post)[[3]]<-0:(n.ahead-1)
dimnames(GFEVD_post)[[4]]<-c("low16","median","high84")
if(verbose) cat(paste("Start computation on ", cores, " cores", " (",thindraws," stable draws in total).",sep=""),"\n")
imp.obj <- applyfun(1:thindraws,function(irep){
irfa <- .irf.girf.sims(invG=Ginv_large[,,irep],
lF=adrop(F_large[,,,irep,drop=FALSE],drop=1),
gcov=S_large[,,irep],
x,horizon=n.ahead)$impl
GFEVD <- .mk_fevd.sims(irfa)
return(list(GFEVD=GFEVD))
})
for(irep in 1:thindraws){
GFEVD_draws[irep,,,] <- imp.obj[[irep]]$GFEVD
}
GFEVD_post[,,,1] <- apply(GFEVD_draws,c(2,3,4),quantile,.16,na.rm=TRUE)
GFEVD_post[,,,2] <- apply(GFEVD_draws,c(2,3,4),median,na.rm=TRUE)
GFEVD_post[,,,3] <- apply(GFEVD_draws,c(2,3,4),quantile,.16,na.rm=TRUE)
}
#----------------------------------------------------------------------------------------------------------------
out <- structure(list(FEVD=GFEVD_post,
xglobal=xglobal,
R=NULL),
class="bgvar.fevd", type="gfevd")
if(!running) out$GFEVD_store <- GFEVD_draws
if(verbose) cat(paste("Size of IRF object:", format(object.size(out),unit="MB")))
end.gfevd <- Sys.time()
diff.gfevd <- difftime(end.gfevd,start.gfevd,units="mins")
mins.gfevd <- round(diff.gfevd,0); secs.gfevd <- round((diff.gfevd-floor(diff.gfevd))*60,0)
if(verbose) cat(paste("\nNeeded time for computation: ",mins.gfevd," ",ifelse(mins.gfevd==1,"min","mins")," ",secs.gfevd, " ",ifelse(secs.gfevd==1,"second.","seconds.\n"),sep=""))
return(out)
}
#' @method print bgvar.fevd
#' @export
print.bgvar.fevd <- function(x, ...){
cat("---------------------------------------------------------------------------------------")
cat("\n")
cat("Object contains forecast error variance decomposition of object estimated with 'bgvar':")
cat("\n")
cat(paste0("Size of FEVD containing forecast error variance decompositions: ",dim(x$FEVD)[[1]]," x ",dim(x$FEVD)[[2]]," x ",dim(x$FEVD)[[3]],"."))
cat("\n")
if(attributes(x)$type=="fevd"){
cat("Identification scheme: ")
if(is.null(x$rotation.matrix)){
cat("Short-run restrictions via Cholesky decomposition.")
}else{
cat("Sign-restrictions.")
}
}else if(attributes(x)$type=="gfevd"){
cat("Identification scheme: Generalized - no identification scheme employed.")
}
cat("\n")
cat(paste0("Size ob object: ",format(object.size(x),unit="MB")))
cat("\n")
cat("---------------------------------------------------------------------------------------")
return(invisible(x))
}
|
/scratch/gouwar.j/cran-all/cranData/BGVAR/R/fevd.R
|
#' @export
"hd" <- function(x, rotation.matrix=NULL, verbose=TRUE){
UseMethod("hd", x)
}
#' @name hd
#' @title Historical Decomposition
#' @description A function that calculates historical decomposition (HD) of the time series and the structural error.
#' @method hd bgvar.irf
#' @export
#' @usage hd(x, rotation.matrix=NULL, verbose=TRUE)
#' @param x an item fitted by \code{irf}.
#' @param rotation.matrix If \code{NULL} and the \code{irf.bgvar} object has been fitted via sign restrictions, the rotation matrix is used that minimizes the distance to the median impulse responses at the posterior median.
#' @param verbose If set to \code{FALSE} it suppresses printing messages to the console.
#' @details To save computational time as well as due to storage limits, both functions are based on the posterior median (as opposed to calculating HDs and the structural error for each draw of the MCMC chain). In case the shock has been identified via sign restrictions, a rotation matrix has to be selected to calculate both statistics. If not specified otherwise (via \code{R}), the algorithm searches for 50 rotation matrices that fulfill the sign restrictions at the \emph{posterior median} of the coefficients and then singles out the rotation matrix that minimizes the distance to the median of the impulse responses as suggested in Fry and Pagan (2011).
#' @return Returns a list with the following objects \describe{
#' \item{\code{hd_array}}{ is a three-dimensional array with the first dimension referring to the K time series, the second to the T observations and the third dimensions containing the contribution of the shocks in explaining historically deviations in the time series from their trend. The third dimension is K+3, since the last three entries contain the contributions of the constant, the initial condition and a residual component that the contributions sum up to the original time series. If a trend i specified in the model the third dimension is K+3 with trend ordered after the constant.}
#' \item{\code{struc.shcok}}{ contains the structural shock.}
#' \item{\code{x}}{ is a matrix object that contains the original time series, which is of dimension K times (T-plag).}
#' }
#' @author Maximilian Boeck, Martin Feldkircher, Florian Huber
#' @seealso \code{\link{bgvar}} and \code{\link{irf}}.
#' @examples
#' \dontshow{
#' library(BGVAR)
#' data(testdata)
#' model.eer<-bgvar(Data=testdata, W=W.test, draws=100, burnin=100,
#' plag=1, prior="SSVS", eigen=TRUE)
#'
#' # US monetary policy shock
#' shockinfo <- get_shockinfo("chol")
#' shockinfo$shock <- "US.stir"; shockinfo$scale <- -100
#' irf.chol.us.mp<-irf(model.eer,n.ahead=48,shockinfo=shockinfo)
#'
#' # calculates historical decomposition
#' HD <- hd(irf.chol.us.mp)
#' }
#' @references
#' Fry, R. and A. Pagan (2011) \emph{Sign restrictions in Structural Vector Autoregressions: A Critical Review}. Journal of Economic Literature, Vol. 49(4), pp. 938-960.
hd.bgvar.irf<-function(x, rotation.matrix=NULL, verbose=TRUE){
start.hd <- Sys.time()
if(verbose) cat("Start computing historical decomposition of Bayesian Global Vector Autoregression.\n\n")
#------------------------------ get stuff -------------------------------------------------------#
xglobal <- x$model.obj$xglobal
lags <- x$model.obj$lags
pmax <- max(lags)
ident <- x$ident
Traw <- nrow(xglobal)
bigK <- ncol(xglobal)
xdat <- xglobal[(pmax+1):Traw,,drop=FALSE]
bigT <- nrow(xdat)
ALPHA <- x$struc.obj$A
Ginv <- x$struc.obj$Ginv
Smat <- x$struc.obj$Smat
Sigma_u <- Ginv%*%Smat%*%t(Ginv)
varNames<- colnames(xglobal)
trend <- FALSE
if(!is.null(rotation.matrix)){
rotation.matrix<-x$struc.obj$Rmed
}else{
rotation.matrix<-diag(bigK)
}
rownames(rotation.matrix) <- colnames(rotation.matrix) <- varNames
#------------------------checks-------------------------------------------------------------------#
if(ident=="girf"){
message("Historical decomposition of the time series not implemented for GIRFs since cross-correlation is unequal to zero (and hence decompositions do not sum up to original time series).")
return(list(hd_array=NA,struc.shock=vv,xglobal=xglobal))
}
#------ initialize objects -----------------------------------------------------------------------#
if("trend"%in%dimnames(ALPHA)[[2]]) trend <- TRUE
struc_post <- array(NA,dim=c(bigT,bigK))
hd_array <- array(0,c(bigK,bigT,(bigK+3+trend)))
if(trend){
dimnames(hd_array)<-list(rownames(x), NULL, c(paste("contribution of shock to", c(varNames)),"constant","trend","initial cond.","residual"))
}else{
dimnames(hd_array)<-list(rownames(x), NULL, c(paste("contribution of shock to", c(varNames)),"constant","initial cond.","residual"))
}
#------------------------------------------------------------------------------------------------#
Rinv <- solve(rotation.matrix)
Sigchol_u <- t(chol(Sigma_u))
Sigcholinv <- solve(Sigchol_u)
vv <- matrix(0,bigT,bigK,dimnames=list(NULL,varNames))
YY <- xglobal[(pmax+1):Traw,]
XX <- cbind(.mlag(xglobal,pmax),1)
XX <- XX[(pmax+1):nrow(XX),]
if(trend) XX <- cbind(XX,seq(1,bigT))
strMat <- Rinv%*%Sigcholinv
for (t in 1:bigT){
Yhat <- strMat%*%YY[t,]
Xhat <- strMat%*%ALPHA%*%XX[t,]
PHI <- Rinv%*%Sigcholinv%*%ALPHA
vv[t,] <- Yhat-Xhat
}
#Start historical decompositions -------------------------------------------------------------------------------#
if(verbose) cat("Start computing HDs...\n")
HDshock_big <- array(0,c(pmax*bigK,bigT,bigK))
HDconst_big <- matrix(0,pmax*bigK,bigT)
HDinit_big <- matrix(0,pmax*bigK,bigT)
HDshock <- array(0,c(bigK,bigT,bigK))
HDinit <- matrix(0,bigK,bigT)
HDconst <- matrix(0,bigK,bigT)
if(trend){
HDtrend_big <- matrix(0,pmax*bigK,bigT)
HDtrend <- matrix(0,bigK,bigT)
}
solveA <- (Sigchol_u%*%rotation.matrix) #Depends on identification, if Cholesky then solveA = t(chol(SIGMA)), where SIGMA is the VC of the global model
eps <- (YY-XX%*%t(ALPHA))%*%t(solve(solveA)) #Atilda is the matrix of autoregressive coefficients of the global model
Fcomp <- .get_companion(ALPHA[,1:(bigK*pmax)],varndxv = c(bigK,0,pmax))$MM#Fcomp is the companion matrix (used in the eigenvalue stuff without the constant)
invA_big <- matrix(0,bigK*pmax,bigK) #M is the number of endogenous variables ; p is the number of lags
invA_big[1:bigK,] <- solveA
Icomp <- cbind(diag(bigK),matrix(0,bigK,(pmax-1)*bigK))
for (nn in 2:bigT){
for (jj in 1:bigK){
eps_big <- matrix(0,bigK,1)
eps_big[jj,] <- eps[nn,jj]
HDshock_big[,nn,jj] <- (invA_big)%*%eps_big+Fcomp%*%HDshock_big[,nn-1,jj]
HDshock[,nn,jj] <- Icomp%*%HDshock_big[,nn,jj]
}
#Initial value
HDinit_big[,1] <- XX[1,1:(pmax*bigK)]
HDinit[,1] <- Icomp%*%HDinit_big[,1]
HDinit_big[,nn] <- Fcomp%*%HDinit_big[,nn-1]
HDinit[,nn] <- Icomp%*%HDinit_big[,nn]
#Constant
CC <- matrix(0,bigK*pmax,1)
CC[1:bigK] <- t(ALPHA)[(bigK*pmax)+1,]
HDconst_big[,nn] <- CC+Fcomp%*%HDconst_big[,nn-1]
HDconst[,nn] <- Icomp%*%HDconst_big[,nn]
# Trend
if(trend){
TT <- matrix(0,bigK*pmax,1)
TT[1:bigK] <- t(ALPHA)[(bigK*pmax)+2,]
HDtrend_big[,nn] <- TT+Fcomp%*%HDtrend_big[,nn-1]
HDtrend[,nn] <- Icomp%*%HDtrend_big[,nn]
}
}
hd_array[,,1:bigK] <- HDshock_big
hd_array[,,(bigK+1)] <- HDconst_big
if(trend) hd_array[,,(bigK+1+trend)] <- HDtrend_big
hd_array[,,(bigK+2+trend)] <- HDinit_big
hd_array[,,(bigK+3+trend)] <- (t(xdat)-apply(hd_array,c(1,2),sum)) # residual part
#----------------------------------------------------------------------------------#
hd_array <- aperm(hd_array,c(2,1,3))
out <- structure(list(hd_array=hd_array,struc_shock=vv,xglobal=xdat, R=NULL), class="bgvar.hd")
if(verbose) cat(paste("Size of object:", format(object.size(out),unit="MB")))
end.hd <- Sys.time()
diff.hd <- difftime(end.hd,start.hd,units="mins")
mins.hd <- round(diff.hd,0); secs.hd <- round((diff.hd-floor(diff.hd))*60,0)
if(verbose) cat(paste("\nNeeded time for computation: ",mins.hd," ",ifelse(mins.hd==1,"min","mins")," ",secs.hd, " ",ifelse(secs.hd==1,"second.","seconds.\n"),sep=""))
return(out)
}
#' @method print bgvar.hd
#' @export
print.bgvar.hd <- function(x, ...){
cat("---------------------------------------------------------------------------------------")
cat("\n")
cat("Object contains historical decomposition of object estimated with 'bgvar':")
cat("\n")
cat(paste0("Size of hd_array containing historical decompositions: ",dim(x$hd_array)[[1]]," x ",dim(x$hd_array)[[2]]," x ",dim(x$hd_array)[[3]],"."))
cat("\n")
cat(paste0("Size of struc_shock containing structural errors: ",dim(x$struc_shock)[[1]]," x ",dim(x$struc_shock)[[2]],"."))
cat("\n")
cat("Identification scheme: ")
if(is.null(x$R)){
cat("Short-run restrictions via Cholesky decomposition.")
}else{
cat("Sign-restrictions.")
}
cat("\n")
cat(paste0("Size ob object: ",format(object.size(x),unit="MB")))
cat("\n")
cat("---------------------------------------------------------------------------------------")
return(invisible(x))
}
|
/scratch/gouwar.j/cran-all/cranData/BGVAR/R/hd.R
|
#' @name avg.pair.cc
#' @export
#' @title Average Pairwise Cross-Sectional Correlations
#' @description Computes average pairwise cross-sectional correlations of the data and the country models' residuals.
#' @details If used for analyzing the country models' residuals, \code{avg.pair.cc} computes for each country and a given variable, the average cross-sectional correlation (either for the data or for the residuals). In theory, including foreign variables should soak up cross-sectional residual dependence and correlation of the residuals should be small. Otherwise dynamic analysis, especially using GIRFs, might lead to invalid results. See Dees et al. (2007) for more details.
#' @usage avg.pair.cc(object, digits=3)
#' @param object Either an object of class \code{bgvar} or residuals of class \code{bgvar.res}.
#' @param digits Number of digits that should be used to print output to the console.
#' @return Returns a list with the following elements
#' \item{\code{data.cor}}{ is a matrix containing in the rows the cross-sections and in the columns the cross-sectional pairwise correlations of the data per variable.}
#' \item{\code{resid.cor}}{ is a matrix containing in the rows the cross-sections and in the columns the cross-sectional pairwise correlations of the country models' residuals per variable.}
#' \item{\code{resid.corG}}{ is a matrix containing in the rows the cross-sections and in the columns the cross-sectional pairwise correlations of the global models' residuals per variable. Only available when \code{avg.pair.cc} has been applied to a \code{bgvar.res} object from \code{residuals}.}
#' \item{\code{data.res}}{ is a summary object showing the number and percentage of correlations <0.1, between 0.1-0.2, 0.2-0.5 and <0.5 per variable of the data.}
#' \item{\code{res.res}}{ is a summary object showing the number and percentage of correlations <0.1, between 0.1-0.2, 0.2-0.5 and <0.5 per variable of the country models' residuals. This is also what is used by \code{print.bgvar}.}
#' \item{\code{res.resG}}{ is a summary object showing the number and percentage of correlations <0.1, between 0.1-0.2, 0.2-0.5 and <0.5 per variable of the global models' residuals. Only available when \code{avg.pair.cc} has been applied to a \code{bgvar. res} object from \code{residuals}.}
#' @author Martin Feldkircher
#' @references
#' Dees, S., Di Mauro F., Pesaran, M. H. and Smith, L. V. (2007) \emph{Exploring the international linkages of the euro area: A global VAR analysis.} Journal of Applied Econometrics, Vol. 22, pp. 1-38.
#' @seealso
#' \code{\link{bgvar}} for estimation of a \code{bgvar} object.
#' \code{\link{residuals}} for calculating the residuals from a \code{bgvar} object and creating a \code{bgvar.res} object.
#' @examples
#' \donttest{
#' library(BGVAR)
#' data(testdata)
#' model.mn <- bgvar(Data=testdata,W=W.test,plag=1,SV=TRUE,
#' draws=100,burnin=100,prior="MN")
#' avg.pair.cc(model.mn)
#'
#' res <- residuals(model.mn)
#' avg.pair.cc(res)
#' }
#' @importFrom stats cor
avg.pair.cc=function(object, digits=3){
if(inherits(object, "bgvar")){
lags <- object$args$lags
pmax <- max(lags)
dat <- object$xglobal[-c(1:pmax),]
res <- do.call("cbind",object$cc.results$res)
res <- res[,colnames(dat)]
}
if(inherits(object, "bgvar.resid")){
dat <- object$Data
res <- apply(object$country,c(2,3),mean)
res.g <- apply(object$global,c(2,3),mean)
}
bigT <- nrow(res)
varNames <- colnames(dat)
cN <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[1]))
vars <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[2]))
idx<-lapply(as.list(1:length(vars)),function(x) grep(paste(".",vars[x],sep=""),colnames(dat)))
names(idx)<-vars
# kick out exo variables
exo<-which(sapply(idx,length)==1)
if(length(exo)>0){
idx<-idx[-exo]
}
datL<-resL<-resL.g<-matrix("-",nrow=length(cN),ncol=length(idx))
rownames(datL)<-rownames(resL)<-rownames(resL.g)<-cN
colnames(datL)<-colnames(resL)<-colnames(resL.g)<-names(idx)
if(inherits(object,"bgvar")){
for(i in 1:length(idx)){
aux.dat <- cor(dat[,idx[[i]]])
aux.res <- cor(res[,idx[[i]]])
diag(aux.dat)<-diag(aux.res)<-NA
ii <- sapply(strsplit(rownames(aux.dat),".",fixed=TRUE),function(x) x[1])
aux.dat <- round(rowMeans(aux.dat,na.rm=TRUE),digits=digits)
aux.res <-round(rowMeans(aux.res,na.rm=TRUE),digits=digits)
datL[ii,i]<-aux.dat
resL[ii,i]<-aux.res
}
}
if(inherits(object,"bgvar.resid")){ # include analysis based on residuals of the global model as well
for(i in 1:length(idx)){
aux.dat <- cor(dat[,idx[[i]]])
aux.res <- cor(res[,idx[[i]]])
aux.rg <- cor(res.g[,idx[[i]]])
diag(aux.dat)<-diag(aux.res)<-diag(aux.rg)<-NA
ii <- sapply(strsplit(rownames(aux.dat),".",fixed=TRUE),function(x) x[1]) #should be the same for u
aux.dat <- round(rowMeans(aux.dat,na.rm=TRUE),digits=digits)
aux.res <- round(rowMeans(aux.res,na.rm=TRUE),digits=digits)
aux.rg <- round(rowMeans(aux.rg,na.rm=TRUE),digits=digits)
datL[ii,i] <- aux.dat
resL[ii,i] <- aux.res
resL.g[ii,i] <- aux.rg
}
}
# Generate summary-table
pp<-suppressWarnings(apply(datL,2,as.numeric))
rr<-suppressWarnings(apply(resL,2,as.numeric))
rg<-suppressWarnings(apply(resL.g,2,as.numeric))
dat.res<-res.res<-res.resG<-matrix(0,nrow=4,ncol=ncol(datL))
for(i in 1:ncol(datL)){
aux<-pp[,i];aux<-abs(aux[which(!is.na(aux))]);K<-length(aux)
aux2<-rr[,i];aux2<-abs(aux2[which(!is.na(aux2))]);K2<-length(aux2)
aux3<-rg[,i];aux3<-abs(aux3[which(!is.na(aux3))]);K3<-length(aux3)
dat.res[1,i]<-paste(length(which(aux<=0.1))," (",round((length(which(aux<=0.1))/K)*100,2),"%)",sep="")
res.res[1,i]<-paste(length(which(aux2<=0.1))," (",round((length(which(aux2<=0.1))/K2)*100,2),"%)",sep="")
temp<-round((length(which(aux3<=0.1))/K3)*100,2)
res.resG[1,i]<-paste(length(which(aux3<=0.1))," (",ifelse(is.nan(temp),0,1),"%)",sep="")
dat.res[2,i]<-paste(length(which(aux>0.1&aux<=0.2))," (",round((length(which(aux>0.1&aux<=0.2))/K)*100,2),"%)",sep="")
res.res[2,i]<-paste(length(which(aux2>0.1&aux2<=0.2))," (",round((length(which(aux2>0.1&aux2<=0.2))/K2)*100,2),"%)",sep="")
temp<-round((length(which(aux3>0.1&aux3<=0.2))/K3)*100,2)
res.resG[2,i]<-paste(length(which(aux3>0.1&aux2<=0.2))," (",ifelse(is.nan(temp),0,1),"%)",sep="")
dat.res[3,i]<-paste(length(which(aux>0.2&aux<=0.5))," (",round((length(which(aux>0.2&aux<=0.5))/K)*100,2),"%)",sep="")
res.res[3,i]<-paste(length(which(aux2>0.2&aux2<=0.5))," (",round((length(which(aux2>0.2&aux2<=0.5))/K2)*100,2),"%)",sep="")
temp<-round((length(which(aux3>0.2&aux3<=0.5))/K3)*100,2)
res.resG[3,i]<-paste(length(which(aux3>0.2&aux3<=0.5))," (",ifelse(is.nan(temp),0,temp),"%)",sep="")
dat.res[4,i]<-paste(length(which(aux>0.5&aux<=1))," (",round((length(which(aux>0.5&aux<=1))/K)*100,2),"%)",sep="")
res.res[4,i]<-paste(length(which(aux2>0.5&aux2<=1))," (",round((length(which(aux2>0.5&aux2<=1))/K2)*100,2),"%)",sep="")
temp<-round((length(which(aux3>0.5&aux3<=1))/K3)*100,2)
res.resG[4,i]<-paste(length(which(aux3>0.5&aux3<=1))," (",ifelse(is.nan(temp),0,temp),"%)",sep="")
}
colnames(dat.res) <- colnames(res.res) <- colnames(res.resG) <- colnames(datL)
rownames(dat.res) <- rownames(res.res) <- rownames(res.resG) <- c("<0.1","0.1-0.2","0.2-0.5",">0.5")
#dat.res <- rbind(c("",colnames(datL)),cbind(c("<0.1","0.1-0.2","0.2-0.5",">0.5"),dat.res))
#res.res <- rbind(c("",colnames(datL)),cbind(c("<0.1","0.1-0.2","0.2-0.5",">0.5"),res.res))
#res.resG <- rbind(c("",colnames(datL)),cbind(c("<0.1","0.1-0.2","0.2-0.5",">0.5"),res.resG))
avg.cc<-list(data.cor=datL,resid.cor=resL,resid.corG=resL.g,dat.res=dat.res,res.res=res.res,res.resG=res.resG)
return(avg.cc)
}
#' @name conv.diag
#' @export
#' @title MCMC Convergence Diagnostics
#' @description This function computes Geweke's Convergence diagnostic making use of the \code{coda} package.
#' @usage conv.diag(object, crit.val=1.96)
#' @param object A fitted \code{bgvar} object.
#' @param crit.val Critical value used for test statistic.
#' @details Geweke (1992) proposed a convergence diagnostic for Markov chains based on a test for equality of the means of the first and last part of a Markov chain (by default we use the first 10\% and the last 50\%). If the samples are drawn from the stationary distribution of the chain, the two means are equal and Geweke's statistic has an asymptotically standard normal distribution. The test statistic is a standard Z-score: the difference between the two sample means divided by its estimated standard error. The standard error is estimated from the spectral density at zero and so takes into account any autocorrelation.
#' @return Returns an object of class \code{bgvar.CD}. This is a list with \describe{
#' \item{\code{geweke.z}}{ Z-scores for a test of equality of means between the first and last parts of the chain. A separate statistic is calculated for each variable in each chain.}
#' \item{\code{perc}}{ is the percentage of Z-scores exceeding \code{crit.val} (in absolute terms).}
#' }
#' @seealso
#' \code{\link[coda]{geweke.diag}} in the \code{coda} package.
#' \code{\link{bgvar}} for estimation of a \code{bgvar} object.
#' @author Martin Feldkircher
#' @examples
#' \donttest{
#' library(BGVAR)
#' data(testdata)
#' model.mn <- bgvar(Data=testdata,W=W.test,plag=1,draws=200,burnin=200,prior="MN")
#' geweke <- conv.diag(model.mn)
#' }
#' @references
#' Geweke, J. (1992) Evaluating the accuracy of sampling-based approaches to calculating posterior moments. \emph{Bayesian Statistics} 4 (edited by JM Bernado, JO Berger, AP Dawid and AFM Smith). Clarendon Press, Oxford, UK.
#' @importFrom coda mcmc geweke.diag
conv.diag<-function(object, crit.val=1.96){
if(!inherits(object, "bgvar")) {stop("Please provide a `bgvar` object.")}
ALPHA <- object$stacked.results$A_large
draws <- dim(ALPHA)[3]
d1 <- dim(ALPHA)[1]
d2 <- dim(ALPHA)[2]
K <- d1*d2
geweke.z<-NULL
for(i in 1:d1){
for(j in 1:d2){
mcmc.obj<-mcmc(ALPHA[i,j,])
geweke<-try(geweke.diag(mcmc.obj),silent=TRUE)
if(!is(geweke,"try-error")){
geweke.z<-c(geweke.z,as.numeric(geweke$z))
}else{
K<-K-1
}
}
}
idx<-which(abs(geweke.z)>crit.val) # if z is smaller or greater than 1.96 there is evidence that the means of both distributions are different
xx<-paste(length(idx), " out of ",K, " variables' z-values exceed the 1.96 threshold", " (", round(length(idx)/K*100,2),"%).",sep="")
return <- structure(list(geweke.z=geweke.z,perc=xx), class="bgvar.CD")
return(return)
}
#' @method print bgvar.CD
#' @export
print.bgvar.CD <- function(x, ...){
cat(x$perc)
}
#' @name resid.corr.test
#' @export resid.corr.test
#' @title Residual Autocorrelation Test
#' @description An F-test for serial autocorrelation in the residuals.
#' @usage resid.corr.test(obj, lag.cor=1, alpha=0.95, dig1=5, dig2=3)
#' @param obj An object of class \code{bgvar}.
#' @param lag.cor The order of serial correlation to be tested for. Default is set to \code{lag.cor=1}.
#' @param alpha Significance level of test. Default is set to \code{alpha=0.95}.
#' @param dig1 Number of digits to display F-statistics and its critical values.
#' @param dig2 Number of digits to display p-values.
#' @details It is the F-test of the familiar Lagrange Multiplier (LM) statistic (see Godfrey 1978a, 1978b), also known as the 'modified LM' statistic. The null hypothesis is that \eqn{rho}, the autoregressive parameter on the residuals, equals 0 indicating absence of serial autocorrelation. For higher order serial correlation, the null is that all \eqn{rho}'s jointly are 0. The test is implemented as in Vanessa Smith's and Alessandra Galesi's ''GVAR toolbox 2.0 User Guide'', page 129.
#' @return Returns a list with the following objects \describe{
#' \item{\code{Fstat}}{ contains a list of length \code{N} with the associated F-statistic for each variable in each country.}
#' \item{\code{resTest}}{ contains a matrix of size 2N times K+3, with the F-statistics for each country and each variable.}
#' \item{\code{p.res}}{ contains a table which summarizes the output.}
#' \item{\code{pL}}{ contains a list of length \code{N} with the associated p-values for each variable in each country.}
#' }
#' @author Martin Feldkircher
#' @seealso \code{\link{bgvar}} for estimation of a \code{bgvar} object.
#' @references
#' Godfrey, L.G. (1978a) \emph{Testing Against General Autoregressive and Moving Average Error Models When the Regressors Include Lagged Dependent Variables.} Econometrica, 46, pp. 1293-1302.
#' Godfrey, L.G. (1978b) \emph{Testing for Higher Order Serial Correlation in Regression Equations When the Regressors Include Lagged Dependent Variables.} Econometrica, 46, pp. 1303-1310.
#' Smith, L. V. and A. Galesi (2014) \emph{GVAR Toolbox 2.0 User Guide}, available at \url{https://sites.google.com/site/gvarmodelling/gvar-toolbox}.
#' @examples
#' \donttest{
#' library(BGVAR)
#' data(testdata)
#' model.ng <- bgvar(Data=testdata,W=W.test,draws=100,burnin=100)
#' resid.corr.test(model.ng)
#' }
#' @importFrom stats pf qf
resid.corr.test=function(obj, lag.cor=1, alpha=0.95, dig1=5, dig2=3){
# Residual correlation test
# Tests the residuals of the country VECM models for serial autocorrelation
# Input arguments:
# x......GVAR object
# lag....the degree of serial autocorrelation that should be tested for
# alpha..significance level
# dig1 / dig2...nr. of digits for the test statistic / p-value
if(!inherits(obj, "bgvar")) {stop("Please provide a `bgvar` object.")}
# get data and arguments - note each second column of V has sign switched -> does not impact on results of F test so keep it as it is
xglobal <- obj$xglobal
res <- obj$cc.results$res
lags <- obj$args$lags
pmax <- max(lags)
bigT <- nrow(xglobal)-pmax
pidx <- 1:lag.cor
varNames <- colnames(xglobal)
cN <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[1]))
vars <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[2]))
# helper function to construct W projector matrix
w.t<-function(x,lag=1){
x.n<-c(rep(0,lag),rev(rev(x)[-c(1:lag)]))
return(x.n)
}
# Calculate F-Statistic in a loop
Fstat<-critL<-pL<-dofL<-list() # list objects since not for every country some nr. of regressors
for(cc in 1:length(cN)){
idx <- grep(paste("^",cN[cc],".",sep=""),varNames)
X.dat <- xglobal[-c(1:pmax),idx,drop=FALSE]
r.dat <- res[[cN[cc]]]
ki <- ncol(X.dat)
dof <- (bigT-ki-lag.cor)
M <- diag(bigT)-tcrossprod(X.dat%*%chol2inv(chol(crossprod(X.dat))),X.dat)
# construct W matrix
w.array <- array(0,dim=c(bigT,ki,lag.cor))
faux<-critV<-pV<-NULL
for(j in 1:ki){ # for all variables
w<-NULL
for(p in 1:lag.cor){
w<-cbind(w,w.t(r.dat[,j],lag=p))
}
aux <- bigT*((crossprod(r.dat[,j],w)%*%solve(t(w)%*%M%*%w)%*%(t(w)%*%r.dat[,j]))/crossprod(r.dat[,j]))
faux <- c(faux,(dof/lag.cor)*(aux/(bigT-aux)))
pV <- c(pV,c(1-pf(aux,lag.cor,dof)))
}
names(faux) <- sapply(strsplit(colnames(X.dat),".",fixed=TRUE),function(x) x[[2]])
Fstat[[cc]] <- faux
critL[[cc]] <- critV <-c(critV,qf(alpha, lag.cor,dof))
pL[[cc]] <- pV
dofL[[cc]] <- dof
}
names(Fstat) <- names(pL) <- cN
# Generate Output
resTest<-array("-",dim=c(length(cN)*2,(length(vars)+3)))
colnames(resTest)<-c("Country","DoF",paste("F-crit."," (",alpha,")",sep=""),vars)
arrayIdx<-(1:(length(cN)*2))[c(TRUE, FALSE)]
for(i in 1:length(arrayIdx)){
resTest[arrayIdx[i],1:3]<-c(cN[i],paste("F(",lag.cor," ,",dofL[[i]],")",sep=""),format(round(critL[[i]],dig1)))
resTest[arrayIdx[i],names(Fstat[[i]])]<-c(format(round(Fstat[[i]],dig1)))
resTest[arrayIdx[i]+1,names(Fstat[[i]])]<-paste("(",format(round(pL[[i]],dig2)),")",sep="")
}
# Generate p-table
pp<-unlist(pL);K<-length(pp)
p.res<-matrix(0,nrow=4,ncol=2)
p.res[1,]<-c(length(which(pp>0.10)),paste(round((length(which(pp>0.10))/K)*100,2),"%",sep=""))
p.res[2,]<-c(length(which(pp>0.05& pp<=0.1)),paste(round((length(which(pp>0.05& pp<=0.1))/K)*100,2),"%",sep=""))
p.res[3,]<-c(length(which(pp>0.01& pp<=0.05)),paste(round((length(which(pp>0.01& pp<=0.05))/K)*100,2),"%",sep=""))
p.res[4,]<-c(length(which(pp<=0.01)),paste(round((length(which(pp<=0.01))/K)*100,2),"%",sep=""))
rownames(p.res)<-c(">0.1","0.05-0.1","0.01-0.05","<0.01")
colnames(p.res)<-c("# p-values", "in %")
res<-list(Fstat=Fstat,resTest=resTest,p.res=p.res,pL=pL)
return(res)
}
#' @name matrix_to_list
#' @export
#' @title Convert Input Matrix to List
#' @description Converts a big input matrix to an appropriate list for use of \code{bgvar}.
#' @usage matrix_to_list(datamat)
#' @details Note the naming convention. Columns should indicate entity and variable name, separated by a dot, e.g. \code{US.y}.
#' @param datamat A matrix of size T times K, where T are time periods and K total amount of variables.
#' @return returns a list of length \code{N} (number of entities).
#' @author Maximilian Boeck
#' @seealso \code{\link{bgvar}} for estimation of a \code{bgvar} object.
#' @importFrom stats time
matrix_to_list <- function(datamat){
if(any(is.na(datamat))){
stop("The data you have submitted contains NAs. Please check the data.")
}
if(!all(grepl("\\.",colnames(datamat)))){
stop("Please seperate country- and variable names with a point.")
}
cN <- unique(unlist(lapply(strsplit(colnames(datamat),".",fixed=TRUE),function(l) l[1])))
N <- length(cN)
if(!all(nchar(cN)>1)){
stop("Please provide entity names with minimal two characters.")
}
datalist <- list()
for(cc in 1:N){
datalist[[cN[cc]]] <- datamat[,grepl(cN[cc],colnames(datamat)),drop=FALSE]
colnames(datalist[[cN[cc]]]) <- unlist(lapply(strsplit(colnames(datalist[[cN[cc]]]),".",fixed=TRUE),function(l)l[2]))
}
return(datalist)
}
#' @name list_to_matrix
#' @export
#' @title Convert Input List to Matrix
#' @description Converts a list to an appropriate input matrix for use of \code{bgvar}.
#' @usage list_to_matrix(datalist)
#' @details Note the naming convention. Columns should indicate entity and variable name, separated by a dot, e.g. \code{US.y}.
#' @param datalist A list of length \code{N} which contains each a matrix of size T times k, where T are time periods and k variables per entity.
#' @return Returns a matrix of size T times K (number of time periods times number of total variables).
#' @author Maximilian Boeck
#' @seealso \code{\link{bgvar}} for estimation of a \code{bgvar} object.
#' @importFrom stats time
list_to_matrix <- function(datalist){
if(any(unlist(lapply(datalist,function(l)any(is.na(l)))))){
stop("The data you have submitted contains NAs. Please check the data.")
}
if(!all(nchar(names(datalist))>1)){
stop("Please provide entity names with minimal two characters.")
}
cN <- names(datalist)
N <- length(cN)
datamat<-NULL
cc<-NULL
for(i in 1:N){
datamat<-cbind(datamat,datalist[[i]])
cc<-c(cc,paste0(cN[i],".",colnames(datalist[[i]]),sep=""))
}
colnames(datamat)<-cc
return(datamat)
}
#' @name excel_to_list
#' @export
#' @title Read Data from Excel
#' @description Reads a spreadsheet from excel and converts it to a list for use of \code{bgvar}.
#' @usage excel_to_list(file, first_column_as_time=TRUE, skipsheet=NULL, ...)
#' @details Note that each sheet has to be named for a respective country. Column names are used as variable names. Reader uses the \code{readxl} R package, hence additional arguments can be passed to the function. Furthermore, if \code{first_column_as_time=TRUE} then the column name has also to be time.
#' @param file A path to the file.
#' @param first_column_as_time Logical indicating whether the first column indicates the time.
#' @param skipsheet If one or more sheets should be skipped for reading, this can be provided with this argument. Either a vector of numeric indices or a vector of strings.
#' @param ... Additional arguments.
#' @return Returns a list of length \code{N} which contains each a matrix of size T times k, where T are time periods and k variables per entity.
#' @author Maximilian Boeck
#' @seealso \code{\link{bgvar}} for estimation of a \code{bgvar} object.
#' @importFrom readxl excel_sheets excel_format read_xls read_xlsx
#' @importFrom xts xts
excel_to_list <- function(file, first_column_as_time=TRUE, skipsheet=NULL, ...){
if(!file.exists(file))
stop("The provided file does not exist.")
if(!grepl("(xls|xlsx)$",file))
stop("Please provide a path to an excel filesheet (ending with xls/xlsx).")
skiptype <- typeof(skipsheet)
if(!(skiptype %in% c("numeric","character","NULL")))
stop("Please provide skipsheet argument in right format.")
cN <- excel_sheets(file)
if(skiptype == "character")
cN <- cN[!cN%in%skipsheet]
if(skiptype == "numeric")
cN <- cN[-skipsheet]
format <- excel_format(file)
datalist <- list()
for(cc in cN){
if(format == "xls"){
temp <- read_xls(path = file, sheet = cc, col_names = TRUE, ...)
}else if(format == "xlsx"){
temp <- read_xlsx(path = file, sheet = cc, col_names = TRUE, ...)
}
if(first_column_as_time){
if(typeof(as.matrix(temp[,1])) != "character")
stop(paste0("Please provide as first column in sheet ",cc," as time in character format."))
time <- as.Date(c(as.matrix(temp[,1])))
temp <- as.matrix(temp[,2:ncol(temp)])
temp <- xts(temp, order.by=time)
}else{
temp <- as.matrix(temp)
}
datalist[[cc]] <- temp
}
return(datalist)
}
|
/scratch/gouwar.j/cran-all/cranData/BGVAR/R/helpers.R
|
#' @export
"irf" <- function(x, n.ahead=24, shockinfo=NULL, quantiles=NULL, expert=NULL, verbose=TRUE){
UseMethod("irf", x)
}
#' @name irf
#' @title Impulse Response Function
#' @description This function calculates three alternative ways of dynamic responses, namely generalized impulse response functions (GIRFs) as in Pesaran and Shin (1998), orthogonalized impulse response functions using a Cholesky decomposition and finally impulse response functions given a set of user-specified sign restrictions.
#' @export
#' @usage irf(x, n.ahead=24, shockinfo=NULL, quantiles=NULL,
#' expert=NULL, verbose=TRUE)
#' @param x Object of class \code{bgvar}.
#' @param n.ahead Forecasting horizon.
#' @param shockinfo Dataframe with additional information about the nature of shocks. Depending on the \code{ident} argument, the dataframe has to be specified differently. In order to get a dummy version for each identification scheme use \code{\link{get_shockinfo}}.
#' @param quantiles Numeric vector with posterior quantiles. Default is set to compute median along with 68\%/80\%/90\% confidence intervals.
#' @param expert Expert settings, must be provided as list. Default is set to \code{NULL}.\describe{
#' \item{\code{MaxTries}}{ Numeric specifying maximal number of tries for finding a rotation matrix with sign-restrictions. Attention: setting this number very large may results in very long computational times. Default is set to \code{MaxTries=100}.}
#' \item{\code{save.store}}{ If set to \code{TRUE} the full posterior of both, impulses responses and rotation matrices, are returned. Default is set to \code{FALSE} in order to save storage.}
#' \item{\code{use_R}}{ Boolean whether IRF computation should fall back on \code{R} version, otherwise \code{Rcpp} version is used.}
#' \item{\code{applyfun}}{ In case \code{use_R=TRUE}, this allows for user-specific apply function, which has to have the same interface than \code{lapply}. If \code{cores=NULL} then \code{lapply} is used, if set to a numeric either \code{parallel::parLapply()} is used on Windows platforms and \code{parallel::mclapply()} on non-Windows platforms.}
#' \item{\code{cores}}{ Numeric specifying the number of cores which should be used, also \code{all} and \code{half} is possible. By default only one core is used.}
#' }
#' @param verbose If set to \code{FALSE} it suppresses printing messages to the console.
#' @return Returns a list of class \code{bgvar.irf} with the following elements: \describe{
#' \item{\code{posterior}}{ Four-dimensional array (K times n.ahead times number of shocks times Q) that contains Q quantiles of the posterior distribution of the impulse response functions.}
#' \item{\code{shockinfo}}{ Dataframe with details on identification specification.}
#' \item{\code{rot.nr}}{ In case identification is based on sign restrictions (i.e., \code{ident="sign"}), this provides the number of rotation matrices found for the number of posterior draws (save*save_thin).}
#' \item{\code{struc.obj}}{ List object that contains posterior quantitites needed when calculating historical decomposition and structural errors via \code{hd.decomp}.\describe{
#' \item{\code{A}}{ Median posterior of global coefficient matrix.}
#' \item{\code{Ginv}}{ Median posterior of matrix \code{Ginv}, which describes contemporaneous relationships between countries.}
#' \item{\code{S}}{ Posterior median of matrix with country variance-covariance matrices on the main diagonal.}
#' \item{\code{Rmed}}{ Posterior rotation matrix if \code{ident="sign"}.}
#' }}
#' \item{\code{model.obj}}{ List object that contains model-specific information, in particular\describe{
#' \item{\code{xglobal}}{ Data of the model.}
#' \item{\code{lags}}{ Lag specification of the model.}
#' }}
#' \item{\code{IRF_store}}{ Four-dimensional array (K times n.ahead times number of shock times draws) which stores the whole posterior distribution. Exists only if \code{save.store=TRUE}.}
#' \item{\code{R_store}}{ Three-dimensional array (K times K times draws) which stores all rotation matrices. Exists only if \code{save.store=TRUE}.}
#' }
#' @author Maximilian Boeck, Martin Feldkircher, Florian Huber
#' @references
#' Arias, J.E., Rubio-Ramirez, J.F, and D.F. Waggoner (2018) \emph{Inference Based on SVARs Identified with Sign and Zero Restrictions: Theory and Applications.} Econometrica Vol. 86(2), pp. 685-720.
#'
#' D'Amico, S. and T. B. King (2017) \emph{What Does Anticipated Monetary Policy Do?} Federal Reserve Bank of Chicago Working paper series, Nr. 2015-10.
#'
#' Pesaran, H.M. and Y. Shin (1998) \emph{Generalized impulse response analysis in linear multivariate models.} Economics Letters, Volume 58, Issue 1, p. 17-29.
#' @examples
#' oldpar <- par(no.readonly = TRUE)
#' # First example, a US monetary policy shock, quarterly data
#' library(BGVAR)
#' data(testdata)
#' # US monetary policy shock
#' model.eer<-bgvar(Data=testdata, W=W.test, draws=100, burnin=100,
#' plag=1, prior="SSVS", eigen=TRUE)
#'
#' # generalized impulse responses
#' shockinfo<-get_shockinfo("girf")
#' shockinfo$shock<-"US.stir"; shockinfo$scale<--100
#'
#' irf.girf.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
#'
#' # cholesky identification
#' shockinfo<-get_shockinfo("chol")
#' shockinfo$shock<-"US.stir"; shockinfo$scale<--100
#'
#' irf.chol.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
#'
#' # sign restrictions
#' shockinfo <- get_shockinfo("sign")
#' shockinfo <- add_shockinfo(shockinfo, shock="US.stir", restriction=c("US.y","US.Dp"),
#' sign=c("<","<"), horizon=c(1,1), scale=1, prob=1)
#' irf.sign.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
#'
#' \donttest{
#' # sign restrictions
#' shockinfo <- get_shockinfo("sign")
#' shockinfo <- add_shockinfo(shockinfo, shock="US.stir", restriction=c("US.y","US.Dp"),
#' sign=c("<","<"), horizon=c(1,1), scale=1, prob=1)
#' irf.sign.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
#'
#' #' # sign restrictions with relaxed cross-country restrictions
#' shockinfo <- get_shockinfo("sign")
#' # restriction for other countries holds to 75\%
#' shockinfo <- add_shockinfo(shockinfo, shock="US.stir", restriction=c("US.y","EA.y","UK.y"),
#' sign=c("<","<","<"), horizon=1, scale=1, prob=c(1,0.75,0.75))
#' shockinfo <- add_shockinfo(shockinfo, shock="US.stir", restriction=c("US.Dp","EA.Dp","UK.Dp"),
#' sign=c("<","<","<"), horizon=1, scale=1, prob=c(1,0.75,0.75))
#' irf.sign.us.mp<-irf(model.eer, n.ahead=20, shockinfo=shockinfo)
#' }
#' @seealso \code{\link{bgvar}}, \code{\link{get_shockinfo}}, \code{\link{add_shockinfo}}
#' @importFrom abind adrop abind
#' @importFrom stochvol sv_normal sv_beta sv_gamma
#' @importFrom RcppParallel RcppParallelLibs setThreadOptions defaultNumThreads
irf.bgvar <- function(x,n.ahead=24,shockinfo=NULL,quantiles=NULL,expert=NULL,verbose=TRUE){
start.irf <- Sys.time()
# get identification
ident <- attr(shockinfo, "ident")
if(is.null(ident)){
ident <- "chol"
}
#--------------- checks ------------------------------------------------------------------------------------#
if(!ident%in%c("chol","girf","sign")){
stop("Please choose available identification scheme!")
}
if(is.null(shockinfo) && ident=="sign"){
stop("Please provide 'shockinfo' argument.")
}
if(is.null(quantiles)){
quantiles <- c(.05,.10,.16,.50,.84,.90,.95)
}
if(!is.numeric(quantiles)){
stop("Please provide 'quantiles' as numeric vector.")
}
if(!is.null(shockinfo)){ # delete double entries
shockinfo<-shockinfo[!duplicated(shockinfo),]
}
#-----------------------------------------------------------------------------------------------------------#
if(verbose) cat("Start computing impulse response functions of Bayesian Global Vector Autoregression.\n\n")
#------------------------------ get stuff -------------------------------------------------------#
lags <- x$args$lags
pmax <- max(lags)
xglobal <- x$xglobal
Traw <- nrow(xglobal)
bigK <- ncol(xglobal)
bigT <- Traw-pmax
A_large <- x$stacked.results$A_large
F_large <- x$stacked.results$F_large
S_large <- x$stacked.results$S_large
Ginv_large <- x$stacked.results$Ginv_large
F.eigen <- x$stacked.results$F.eigen
thindraws <- length(F.eigen) ### prior: draws
Global <- FALSE
if(!is.null(shockinfo)) Global <- ifelse(any(shockinfo$global),TRUE,FALSE)
Rmed <- NULL
rot.nr <- NULL
xdat <- xglobal[(pmax+1):Traw,,drop=FALSE]
varNames <- colnames(xdat)
cN <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x)x[1]))
vars <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x)x[2]))
N <- length(cN)
Q <- length(quantiles)
# expert settings
expert.list <- list(MaxTries=100, save.store=FALSE, use_R=FALSE, applyfun=NULL, cores=NULL)
if(!is.null(expert)){
if(!(is.null(expert$cores) || is.numeric(expert$cores) || expert$cores%in%c("all","half"))){
stop("Please provide the expert argument 'cores' in appropriate form. Please recheck.")
}
for(n in names(expert))
expert.list[[n]] <- expert[[n]]
}
MaxTries <- expert.list$MaxTries
save.store <- expert.list$save.store
use_R <- expert.list$use_R
applyfun <- expert.list$applyfun
cores <- expert.list$cores
#---------------------------- identification schemes --------------------------------------------#
if(ident=="chol")
{
if(verbose){
cat("Identification scheme: Short-run identification via Cholesky decomposition.\n")
}
if(is.null(shockinfo)){
shockinfo <- get_shockinfo("chol", nr_rows = length(varNames))
shockinfo$shock <- varNames
}
if(!all(c("shock","scale")%in%colnames(shockinfo))){
stop("Please provide appropriate dataframe for argument 'shockinfo'. Respecify.")
}
if(!all(shockinfo$shock%in%varNames)){
stop("Please provide shock of 'shockinfo' only to variables available in the dataset used for estimation. Respecify.")
}
irf.fun <- .irf.chol
shock.nr <- nrow(shockinfo)
# shock details
shocks <- shocknames <- unique(shockinfo$shock)
select_shocks <- NULL
for(ss in 1:shock.nr) select_shocks <- c(select_shocks,which(shocks[ss] == varNames))
scale <- shockinfo$scale[!duplicated(shockinfo$shock)]
shock.cN <- sapply(strsplit(shockinfo$shock,".",fixed=TRUE),function(x)x[1])
shock.var <- sapply(strsplit(shockinfo$shock,".",fixed=TRUE),function(x)x[2])
shock.idx <- list()
for(cc in 1:N) shock.idx[[cc]] <- grep(cN[cc],varNames)
shock.cidx <- cN%in%shock.cN
if(Global){
if(length(unique(shock.var[shockinfo$global])) != 1){
stop("Please indicate global shock for same variables. Respecify.")
}
shock.nr <- shock.nr-(sum(shockinfo$global)-1)
scale.new <- rep(1,shock.nr)
shocknames <- shocks
idx_global <- which(shockinfo$global)
shocknames[idx_global[1]] <- paste0("Global.",unique(shock.var[shockinfo$global]))
shocknames <- shocknames[-idx_global[c(2:length(idx_global))]]
shock.global <- list()
tt <- 1
for(ss in 1:shock.nr){
if(shockinfo$global[tt] == TRUE){
shock.global[[shocknames[ss]]] <- varNames%in%shocks[shockinfo$global]
scale.new[ss] <- scale[shockinfo$global][1]
tt<-max(which(shockinfo$global))+1
}else{
shock.global[[shocknames[ss]]] <- varNames%in%shocks[tt]
scale.new[ss] <- scale[tt]
tt<-tt+1
}
}
scale <- scale.new
}
shocklist = list(shock.idx=shock.idx,shock.cidx=shock.cidx,plag=pmax,MaxTries=MaxTries)
}else if(ident=="girf")
{
if(verbose){
cat("Identification scheme: Generalized impulse responses.\n")
}
if(is.null(shockinfo)){
shockinfo <- get_shockinfo("girf", nr_rows = length(varNames))
shockinfo$shock <- varNames
}
if(!all(c("shock","scale")%in%colnames(shockinfo))){
stop("Please provide appropriate dataframe for argument 'shockinfo'. Respecify.")
}
if(!all(shockinfo$shock%in%varNames)){
stop("Please provide shock of 'shockinfo' only to variables available in the dataset used for estimation. Respecify.")
}
if(!is.null(shockinfo)){
shocks <- shocknames <- unique(shockinfo$shock)
scale <- shockinfo$scale[!duplicated(shockinfo$shock)]
}else{
shocks <- shocknames <- varNames
scale <- rep(1,length(shocks))
}
irf.fun <- .irf.girf
shock.nr <- length(shocks)
select_shocks <- NULL
for(ss in 1:shock.nr) select_shocks <- c(select_shocks,which(shocks[ss] == varNames))
shock.idx <- list()
for(cc in 1:N) shock.idx[[cc]] <- grep(cN[cc],varNames)
shock.cidx <- rep(FALSE,N)
if(Global){
shock.var <- sapply(strsplit(shockinfo$shock,".",fixed=TRUE),function(x)x[2])
if(length(unique(shock.var[shockinfo$global])) != 1){
stop("Please indicate global shock for same variables. Respecify.")
}
shock.nr <- shock.nr-(sum(shockinfo$global)-1)
scale.new <- rep(1,shock.nr)
shocknames <- shocks
idx_global <- which(shockinfo$global)
shocknames[idx_global[1]] <- paste0("Global.",unique(shock.var[shockinfo$global]))
shocknames <- shocknames[-idx_global[c(2:length(idx_global))]]
shock.global <- list()
tt <- 1
for(ss in 1:shock.nr){
if(shockinfo$global[tt] == TRUE){
shock.global[[shocknames[ss]]] <- varNames%in%shocks[shockinfo$global]
scale.new[ss] <- scale[shockinfo$global][1]
tt<-max(which(shockinfo$global))+1
}else{
shock.global[[shocknames[ss]]] <- varNames%in%shocks[tt]
scale.new[ss] <- scale[tt]
tt<-tt+1
}
}
scale <- scale.new
}
shocklist = list(shock.idx=shock.idx,shock.cidx=shock.cidx,plag=pmax,MaxTries=MaxTries)
}else if(ident=="sign")
{
# --------------- checks -------------------------------------------------#
if(!all(c("shock","restriction","sign","horizon","scale","prob")%in%colnames(shockinfo))){
stop("Please provide columns 'shock', 'restriction', 'sign', 'horizon' and 'scal' in dataframe 'shockinfo'.")
}
if(!(all(shockinfo$shock%in%varNames) && all(shockinfo$restriction%in%varNames))){
stop("Please provide in columns 'shock' and 'restriction' of 'shockinfo' only variable names available in the dataset used for estimation. Respecify.")
}
if(!any(shockinfo$sign%in%c(">","<","0","ratio.H","ratio.avg"))){
stop("Misspecification in 'sign'. Only the following is allowed: <, >, 0, ratio.H, ratio.avg")
}
if(verbose){
cat("Identification scheme: identification via sign-restriction.\n")
}
irf.fun<-.irf.sign.zero
shocks <- shocknames <- unique(shockinfo$shock)
shock.nr <- length(shocks)
select_shocks <- NULL
for(ss in 1:shock.nr) select_shocks <- c(select_shocks,which(shocks[ss] == varNames))
shock.cN <- unique(sapply(strsplit(shockinfo$shock,".",fixed=TRUE),function(x)x[1]))
shock.var <- sapply(strsplit(shockinfo$shock,".",fixed=TRUE),function(x)x[2])
shock.idx <- list()
for(cc in 1:N) shock.idx[[cc]] <- grep(cN[cc],varNames)
shock.cidx <- cN%in%shock.cN
scale <- shockinfo$scale[!duplicated(shockinfo$shock)]
if(Global){
if(length(unique(shock.var[shockinfo$global])) != 1){
stop("Please indicate global shock for same variables. Respecify.")
}
shock.nr <- shock.nr-(sum(shockinfo$global[!duplicated(shockinfo$shock)])-1)
scale.new <- rep(1,shock.nr)
shocknames <- shocks
idx_global <- which(shockinfo$global)
shocknames[idx_global[1]] <- paste0("Global.",unique(shock.var[shockinfo$global]))
shocknames <- shocknames[-idx_global[c(2:length(idx_global))]]
shock.global <- list()
tt <- 1
for(ss in 1:shock.nr){
if(shockinfo$global[tt] == TRUE){
shock.global[[shocknames[ss]]] <- varNames%in%shocks[shockinfo$global[!duplicated(shockinfo$shock)]]
scale.new[ss] <- scale[shockinfo$global][1]
tt<-max(which(shockinfo$global))+1
}else{
shock.global[[shocknames[ss]]] <- varNames%in%shockinfo$shock[tt]
scale.new[ss] <- scale[tt]
tt<-tt+1
}
}
scale <- scale.new
}
# check zero/rationality
if(any(shockinfo$sign%in%c("0","ratio.H","ratio.avg"))){
for(ss in 1:length(shocks)){
idx <- shockinfo$sign[grep(shocks[ss],shockinfo$shock)]%in%c("0","ratio.H","ratio.avg")
if(!all(sapply(strsplit(shockinfo$restriction[grep(shocks[ss],shockinfo$shock)[idx]],".",fixed=TRUE),function(x)x[1])==shock.cN[ss])){
stop("Please provide zero and rationality conditions only in same country as the origin of the shock.")
}
}
}
# adjust for rationality conditions
if(any(shockinfo$sign=="ratio.H")){
idx <- which(shockinfo$sign=="ratio.H")
for(ii in idx){
Kshock <- nrow(shockinfo)
Mshock <- as.numeric(shockinfo$horizon[ii])
shockinfo[(Kshock+1):(Kshock+2),] <- NA
shockinfo$shock[(Kshock+1):nrow(shockinfo)] <- rep(shockinfo$shock[ii],2)
shockinfo$restriction[(Kshock+1):nrow(shockinfo)] <- c(shockinfo$restriction[ii], strsplit(shockinfo$restriction[ii],"_")[[1]][1])
shockinfo$sign[(Kshock+1):nrow(shockinfo)] <- c("0","-1")
shockinfo$horizon[(Kshock+1):nrow(shockinfo)] <- c(1,Mshock)
shockinfo$scale[(Kshock+1):nrow(shockinfo)] <- rep(shockinfo$scale[ii],2)
shockinfo$prob[(Kshock+1):nrow(shockinfo)] <- rep(shockinfo$prob[ii],2)
}
shockinfo <- shockinfo[-idx,]
rownames(shockinfo)<-seq(1,nrow(shockinfo))
}
if(any(shockinfo$sign=="ratio.avg")){
idx <- which(shockinfo$sign=="ratio.avg")
for(ii in idx){
Kshock <- nrow(shockinfo)
Mshock <- as.numeric(shockinfo$horizon[ii])
shockinfo[(Kshock+1):(Kshock+Mshock),] <- NA
shockinfo$shock[(Kshock+1):nrow(shockinfo)] <- rep(shockinfo$shock[ii],Mshock)
shockinfo$restriction[(Kshock+1):nrow(shockinfo)] <- c(shockinfo$restriction[ii],rep(strsplit(shockinfo$restriction[ii],"_")[[1]][1],Mshock-1))
shockinfo$sign[(Kshock+1):nrow(shockinfo)] <- c("0",rep(-1/(Mshock-1),Mshock-1))
shockinfo$horizon[(Kshock+1):nrow(shockinfo)] <- seq(1,Mshock)
shockinfo$scale[(Kshock+1):nrow(shockinfo)] <- rep(shockinfo$scale[ii],Mshock)
shockinfo$prob[(Kshock+1):nrow(shockinfo)] <- rep(shockinfo$prob[ii],Mshock)
}
shockinfo <- shockinfo[-idx,]
rownames(shockinfo)<-seq(1,nrow(shockinfo))
}
#---------------------------------------------------------------------------
# create Scube and Zcube with signs
sign.horizon <- unique(shockinfo$horizon)
sign.horizon <- sort(sign.horizon, decreasing=FALSE)
sign.shockvars <- unique(shockinfo$shock)
H.restr <- length(sign.horizon)
N.restr <- bigK*H.restr
S.cube <- matrix(0, N.restr, bigK) # sign restriction
P.cube <- matrix(0, N.restr, bigK) # probability of sign-restriction
Z.cube <- array(NA, c(N.restr, N.restr, bigK)) # zero restriction
dimnames(S.cube)[[1]] <- dimnames(Z.cube)[[1]] <- dimnames(Z.cube)[[2]] <- dimnames(P.cube)[[1]] <- paste(rep(varNames,H.restr),".",rep(sign.horizon,each=bigK),sep="")
dimnames(S.cube)[[2]] <- dimnames(Z.cube)[[3]] <- dimnames(P.cube)[[2]] <- varNames
for(vv in 1:length(varNames)){
Z.temp <- matrix(0, N.restr, N.restr)
if(varNames[vv]%in%sign.shockvars){
idx <- which(shockinfo$shock==varNames[vv])
sign.restr <- shockinfo$restriction[idx]
sign.signs <- shockinfo$sign[idx]
sign.horiz <- shockinfo$horizon[idx]
sign.probs <- shockinfo$prob[idx]
s.point <- which(sign.signs=="<"|sign.signs==">")
z.point <- seq(1,length(idx))[-s.point]
# own shock: default is positive and for one period
S.cube[paste(varNames[vv],".1",sep=""),varNames[vv]] <- 1
P.cube[paste(varNames[vv],".1",sep=""),varNames[vv]] <- 1
if(length(s.point)>0){
for(ss in 1:length(s.point)){
S.cube[paste(sign.restr[s.point[ss]],sign.horiz[s.point[ss]],sep="."),varNames[vv]] <- ifelse(sign.signs[s.point[ss]]=="<",-1,1)
P.cube[paste(sign.restr[s.point[ss]],sign.horiz[s.point[ss]],sep="."),varNames[vv]] <- sign.probs[s.point[ss]]
}
}
if(length(z.point)>0){
for(zz in 1:length(z.point)){
if(sign.signs[z.point[zz]]=="0"){
grp <- which(sign.horiz[z.point[zz]] == sign.horizon)
row <- (grp-1)*bigK+which(sign.restr[z.point[zz]]==varNames)
Z.temp[row,row] <- 1
}else{ # take row from above
grp <- which(sign.horiz[z.point[zz]] == sign.horizon)
col <- (grp-1)*bigK+which(sign.restr[z.point[zz]]==varNames)
Z.temp[row,col] <- as.numeric(sign.signs[z.point[zz]])
}
}
}
}
Z.cube[,,vv] <- Z.temp
}
# stuff needed for zero-restriction
no.zero.restr <- rep(TRUE,N)
shock.order <- seq(bigK)
for(cc in 1:N){
idx <- shock.idx[[cc]]
no.zero.restr[cc] <- ifelse(base::sum(abs(Z.cube[,,idx]))>0,FALSE,TRUE)
shock.names <- names(sort(apply(Z.cube[,,idx], 3, function(x) base::sum(abs(x))), decreasing=TRUE))
for(kk in 1:length(shock.names)) shock.order[idx[kk]] <- which(shock.names[kk]==varNames)
}
#---------------------------------------------------------------------------
shocklist <- list(shock.idx=shock.idx,shock.cidx=shock.cidx,MaxTries=MaxTries,S.cube=S.cube,Z.cube=Z.cube,P.cube=P.cube,
shock.order=shock.order,shock.horz=sign.horizon,plag=pmax,no.zero.restr=no.zero.restr)
rm(S.cube,Z.cube,P.cube)
}
#------------------------------ prepare applyfun --------------------------------------------------------#
if(is.null(applyfun)) {
applyfun <- if(is.null(cores)) {
lapply
} else {
if(.Platform$OS.type == "windows") {
cl_cores <- parallel::makeCluster(cores)
on.exit(parallel::stopCluster(cl_cores))
function(X, FUN, ...) parallel::parLapply(cl = cl_cores, X, FUN, ...)
} else {
function(X, FUN, ...) parallel::mclapply(X, FUN, ..., mc.cores =
cores)
}
}
}
if(is.null(cores)) cores <- 1
#------------------------------ container -------------------------------------------------------#
# initialize objects to save IRFs, HDs, etc.
if(ident=="sign"){
R_store <- array(NA_real_, dim=c(bigK,bigK,thindraws), dimnames=list(colnames(xglobal),colnames(xglobal),NULL))
}else{
R_store <- NULL
}
IRF_store <- array(NA_real_, dim=c(bigK,bigK,n.ahead+1,thindraws), dimnames=list(colnames(xglobal),paste0("shock_",colnames(xglobal)),seq(0,n.ahead),NULL))
imp_posterior <- array(NA_real_, dim=c(bigK,n.ahead+1,shock.nr,Q))
dimnames(imp_posterior) <- list(colnames(xglobal),seq(0,n.ahead),shocknames,paste0("Q",quantiles*100))
#------------------------------ start computing irfs ---------------------------------------------------#
start.comp <- Sys.time()
if(verbose) cat(paste("Start impulse response analysis on ", cores, " core",ifelse(cores>1,"s",""), " (",thindraws," stable draws in total).",sep=""),"\n")
if(use_R)
{
#--------------------------------------------------------------
# r-version
counter <- numeric(length=thindraws)
imp.obj <- applyfun(1:thindraws,function(irep){
Ginv <- Ginv_large[,,irep]
Fmat <- adrop(F_large[,,,irep,drop=FALSE],drop=4)
Smat <- S_large[,,irep]
imp.obj <- irf.fun(xdat=xdat,plag=pmax,n.ahead=n.ahead,Ginv=Ginv,Fmat=Fmat,Smat=Smat,shocklist=shocklist)
if(verbose){
if(ident=="sign"){
if(!any(is.null(imp.obj$rot))){
cat("\n",as.character(Sys.time()), "MCMC draw", irep, ": rotation found after ",imp.obj$icounter," tries", "\n")
}else{
cat("\n",as.character(Sys.time()), "MCMC draw", irep, ": no rotation found", "\n")
}
}
}
return(list(impl=imp.obj$impl,rot=imp.obj$rot,icounter=imp.obj$icounter))
})
for(irep in 1:thindraws){
counter[irep] <- imp.obj[[1]]$icounter
if(imp.obj[[1]]$icounter == MaxTries){
imp.obj[[1]] <- NULL
}else{
IRF_store[,,,irep] <- imp.obj[[1]]$impl
if(ident=="sign") R_store[,,,irep] <- imp.obj[[1]]$rot
imp.obj[[1]] <- NULL
}
if(irep %% 50 == 0) gc() # free up memory
}
rm(imp.obj)
}else{ # cpp-version
#--------------------------------------------------------------
# adjust indexes due to different indexation (starting with zero in cpp)
shocklist$shock.idx<-lapply(shocklist$shock.idx,function(l)l-1)
shocklist$shock.horz <- shocklist$shock.horz-1
shocklist$shock.order <- shocklist$shock.order-1
# type
type <- ifelse(ident=="chol",1,ifelse(ident=="girf",2,3))
counter <- numeric(length=thindraws)
save_rot <- ifelse(ident=="sign",TRUE,FALSE)
# Rcpp::sourceCpp("./src/irf.cpp")
# Rcpp::sourceCpp("/users/mboeck/documents/packages/bgvar/src/irf.cpp")
temp = compute_irf(A_large=A_large,S_large=S_large,Ginv_large=Ginv_large,type=type,nhor=n.ahead+1,thindraws=thindraws,shocklist_in=shocklist,save_rot=save_rot,verbose=verbose)
for(irep in 1:thindraws){
counter[irep] <- temp$counter[irep,1]
if(temp$counter[irep,1] == MaxTries){
temp$irf[[1]] <- NULL; temp$rot[[1]] <- NULL
}else{
IRF_store[,,,irep] <- temp$irf[[1]]
if(ident=="sign") R_store[,,irep] <- temp$rot[[1]]
temp$irf[[1]] <- NULL; temp$rot[[1]] <- NULL
}
if(irep %% 50 == 0) gc() # free up memory
}
rm(temp)
# transform back to R-version
shocklist$shock.idx = lapply(shocklist$shock.idx,function(l)l+1)
shocklist$shock.horz = shocklist$shock.horz+1
shocklist$shock.order = shocklist$shock.order+1
}
end.comp <- Sys.time()
diff.comp <- difftime(end.comp,start.comp,units="mins")
mins <- round(diff.comp,0); secs <- round((diff.comp-floor(diff.comp))*60,0)
if(verbose) cat(paste("\nImpulse response analysis took ",mins," ",ifelse(mins==1,"min","mins")," ",secs, " ",ifelse(secs==1,"second.\n","seconds.\n"),sep=""))
#------------------------------ post processing ---------------------------------------------------#
# re-set IRF object in case we have found only a few rotation matrices
if(ident=="sign")
{
idx <- which(counter!=MaxTries)
rot.nr<-paste("For ", length(idx), " draws out of ", thindraws, " draws, a rotation matrix has been found.")
if(length(idx)==0){
stop("No rotation matrix found with imposed sign restrictions. Please respecify.")
}
if(verbose) cat(rot.nr)
# subset posterior draws
#IRF_store <- IRF_store[,,,idx,drop=FALSE]
#R_store <- R_store[,,idx,drop=FALSE]
Ginv_large<-Ginv_large[,,idx,drop=FALSE]
A_large <- A_large[,,idx,drop=FALSE]
S_large <- S_large[,,idx,drop=FALSE]
thindraws <- length(idx)
}
# Subset to shocks under consideration
if(Global){
impulse <- NULL
for(ss in 1:shock.nr){
temp <- apply(IRF_store[,shock.global[[ss]],,,drop=FALSE],c(1,3,4),sum)
Mean <- temp[which(shock.global[[ss]])[1],1,]
for(irep in 1:thindraws){
temp[,,irep]<-(temp[,,irep]/Mean[irep])*scale[ss]
}
impulse <- abind(impulse,temp,along=4)
}
IRF_store <- aperm(impulse,c(1,4,2,3))
dimnames(IRF_store)[[2]] <- names(shock.global)
}else{
IRF_store <- IRF_store[,select_shocks,,,drop=FALSE]
for(ss in 1:shock.nr){
Mean<-IRF_store[select_shocks[ss],ss,1,]
for(irep in 1:thindraws){
IRF_store[,ss,,irep]<-(IRF_store[,ss,,irep]/Mean[irep])*scale[ss]
}
}
}
# Normalization
for(ss in 1:shock.nr){
for(qq in 1:Q){
imp_posterior[,,ss,qq] <- apply(IRF_store[,ss,,],c(1,2),quantile,quantiles[qq],na.rm=TRUE)
}
}
# calculate objects needed for HD and struc shock functions later---------------------------------------------
# median quantitities
A <- apply(A_large,c(1,2),median)
Fmat <- apply(F_large,c(1,2,3),median)
Ginv <- apply(Ginv_large,c(1,2),median)
Smat <- apply(S_large,c(1,2),median)
Sigma_u <- Ginv%*%Smat%*%t(Ginv)
if(ident=="sign")
{
imp.obj <- try(irf.fun(xdat=xdat,plag=pmax,n.ahead=n.ahead,Ginv=Ginv,Fmat=Fmat,Smat=Smat,shocklist=shocklist),silent=TRUE)
if(!is(imp.obj,"try-error")){
Rmed<-imp.obj$rot
}else{
Rmed<-NULL
}
}
struc.obj <- list(A=A,Fmat=Fmat,Ginv=Ginv,Smat=Smat,Rmed=Rmed)
model.obj <- list(xglobal=xglobal,lags=lags)
#--------------------------------- prepare output----------------------------------------------------------------------#
out <- structure(list("posterior" = imp_posterior,
"ident" = ident,
"shockinfo" = shockinfo,
"rot.nr" = rot.nr,
"struc.obj" = struc.obj,
"model.obj" = model.obj),
class="bgvar.irf")
if(save.store){
out$IRF_store = IRF_store
out$R_store = R_store
}
if(verbose) cat(paste("\nSize of irf object: ", format(object.size(out),unit="MB")))
end.irf <- Sys.time()
diff.irf <- difftime(end.irf,start.irf,units="mins")
mins.irf <- round(diff.irf,0); secs.irf <- round((diff.irf-floor(diff.irf))*60,0)
if(verbose) cat(paste("\nNeeded time for impulse response analysis: ",mins.irf," ",ifelse(mins.irf==1,"min","mins")," ",secs.irf, " ",ifelse(secs.irf==1,"second.","seconds.\n"),sep=""))
return(out)
}
#' @method print bgvar.irf
#' @export
print.bgvar.irf <- function(x, ...){
cat("---------------------------------------------------------------------------------------")
cat("\n")
cat("Object contains impulse responses of object estimated with 'bgvar':")
cat("\n")
cat(paste0("Size of posterior containing impulse responses: ",dim(x$posterior)[[1]]," x ",dim(x$posterior)[[2]]," x ",dim(x$posterior)[[3]]," x ",dim(x$posterior)[[4]],"."))
cat("\n")
cat(paste0("Number of shocks identified: ",dim(x$posterior)[[3]],"."))
cat("\n")
cat("Identification scheme: ")
if(x$ident=="chol"){
cat("Short-run restrictions via Cholesky decomposition.")
}else if(x$ident=="sign"){
cat("Sign-restrictions.")
}else if(x$ident=="girf"){
cat("Generalized - no identification scheme employed.")
}
cat("\n")
cat(paste0("Size ob object: ",format(object.size(x),unit="MB")))
cat("\n")
cat("---------------------------------------------------------------------------------------")
return(invisible(x))
}
#' @name get_shockinfo
#' @title Create \code{shockinfo} argument
#' @description Creates dummy \code{shockinfo} argument for appropriate use in \code{irf} function.
#' @param ident Definition of identification scheme, either \code{chol}, \code{girf} or \code{sign}.
#' @param nr_rows Number of rows in the created dataframe.
#' @details Depending on the identification scheme a different \code{shockinfo} argument in the \code{irf} function is needed. To handle this convenient, an appropriate data.frame with is created with this function.
#' @usage get_shockinfo(ident="chol", nr_rows=1)
#' @seealso \code{\link{irf}}
#' @export
get_shockinfo <- function(ident="chol", nr_rows=1){
if(ident == "chol"){
df <- data.frame(shock=rep(NA,nr_rows),scale=rep(1,nr_rows),global=rep(FALSE,nr_rows))
attr(df, "ident") <- "chol"
}else if(ident == "girf"){
df <- data.frame(shock=rep(NA,nr_rows),scale=rep(1,nr_rows),global=rep(FALSE,nr_rows))
attr(df, "ident") <- "girf"
}else if(ident=="sign"){
df <- data.frame(shock=rep(NA,nr_rows),restriction=rep(NA,nr_rows),sign=rep(NA,nr_rows),
horizon=rep(NA,nr_rows),scale=rep(NA,nr_rows),prob=rep(NA,nr_rows),global=rep(NA,nr_rows))
attr(df, "ident") <- "sign"
}
return(df)
}
#' @name add_shockinfo
#' @title Adding shocks to 'shockinfo' argument
#' @description Adds automatically rows to 'shockinfo' data.frame for appropriate use in \code{irf}.
#' @usage add_shockinfo(shockinfo=NULL, shock=NULL, restriction=NULL, sign=NULL, horizon=NULL,
#' prob=NULL, scale=NULL, global=NULL, horizon.fillup=TRUE)
#' @param shockinfo Dataframe to append shocks. If \code{shockinfo=NULL} appropriate dataframe for sign-restrictions will be created.
#' @param shock String element. Variable of interest for structural shock. Only possible to add restrictions to one structural shock at a time.
#' @param restriction Character vector with variables that are supposed to be sign restricted.
#' @param sign Character vector with signs.
#' @param horizon Numeric vector with horizons to which restriction should hold. Set \code{horizon.fillup} to \code{FALSE} to just restrict one specific horizon.
#' @param prob Number between zero and one determining the probability with which restriction is supposed to hold.
#' @param scale Scaling parameter.
#' @param global If set to \code{TRUE}, shock is defined as global shock.
#' @param horizon.fillup Default set to \code{TRUE}, horizon specified up to given horizon. Otherwise just one specific horizon is restricted.
#' @details This is only possible for sign restriction, hence if \code{ident="sign"} in \code{get_shockinfo()}.
#' @seealso \code{\link{irf}}
#' @export
add_shockinfo <- function(shockinfo=NULL, shock=NULL, restriction=NULL, sign=NULL, horizon=NULL, prob=NULL, scale=NULL, global=NULL, horizon.fillup=TRUE){
if(is.null(shockinfo)){
shockinfo <- get_shockinfo(ident="sign")
}
if(is.null(shock)){
stop("Please specify structural shock. This corresponds to the variable the shock is originating from.")
}
if(length(shock)>1){
stop("Please only specify one structural shock at once.")
}
if(is.null(restriction) || is.null(sign)){
stop("Please specify 'restriction' together with 'sign'.")
}
if(length(restriction)!=length(sign)){
stop("Please provide the arguments 'restriction' and 'sign' with equal length. Please respecify.")
}
if(length(restriction)!=length(horizon)){
if(length(horizon)!=1) stop("Please provide the argument 'horizon' either with length equal to one for all shocks or with an equal length of the restrictions.")
}
nr <- length(sign)
if(!(is.null(restriction) && is.null(sign)) && is.null(horizon)){
warning("No horizon specified, is set to one, i.e., a shock restriction on impact.")
horizon <- rep(1,nr)
}
if(!any(sign%in%c(">","<","0","ratio.H","ratio.avg"))){
stop("Misspecification in 'sign'. Only the following is allowed: <, >, 0, ratio.H, ratio.avg")
}
if(is.null(scale)){
warning("Scaling is not specified, set positive.")
scale <- rep(1,nr)
}
if(length(scale)==1) scale <- rep(scale,nr)
scale <- sign(scale)
if(length(unique(scale))>1){
warning("Different scaling supplied. Set to default value: positive.")
scale <- rep(1,nr)
}
if(is.null(prob)){
warning("Restriction proabilities not specified, set to one.")
prob <- rep(1,nr)
}
if(is.null(global)){
global <- rep(FALSE,nr)
}else{
global <- rep(global,nr)
}
if(length(prob)==1) prob <- rep(prob,nr)
if(length(prob)!=nr || length(scale)!=nr){
stop("Please specify 'prob' or 'scale' with unit length for all restrictions or equal length than restriction.")
}
if(length(horizon)==1 && length(horizon)<nr){
warning("Only one horizon specified, is used for all horizons.")
horizon <- rep(horizon,nr)
}
for(irep in 1:nr){
# if horizon is bigger than one
idx_ratio <- sign[irep] %in% c("ratio.H","ratio.avg")
if(horizon[irep]>1 && !idx_ratio && horizon.fillup){
repetition <- max(horizon[irep])
# horizon <- c(unlist(sapply(horizon[idx_nr],seq)),horizon[idx_r])
}else{
repetition <- 1
}
# add to shockinfo
nt<-ifelse(all(is.na(shockinfo)),0,nrow(shockinfo))
for(nn in 1:repetition){
shockinfo[nt+nn,] <- NA
shockinfo$shock[nt+nn] <- shock
shockinfo$restriction[nt+nn] <- restriction[irep]
shockinfo$sign[nt+nn] <- sign[irep]
if(repetition == 1){
shockinfo$horizon[nt+nn] <- horizon[irep]
}else{
shockinfo$horizon[nt+nn] <- seq(1,horizon[irep])[nn]
}
shockinfo$prob[nt+nn] <- prob[irep]
shockinfo$scale[nt+nn] <- scale[irep]
shockinfo$global[nt+nn] <- global[irep]
}
}
# delete duplicate lines
shockinfo<-shockinfo[!duplicated(shockinfo),]
return(shockinfo)
}
|
/scratch/gouwar.j/cran-all/cranData/BGVAR/R/irf.R
|
#' @name plot
#' @title Graphical Summary of Output Created with \code{bgvar}
#' @description Plotting function for fitted values, residuals, predictions, impulse responses and forecast error variance decompositions created with the \code{BGVAR} package.
#' @param x Either an object of class \code{bgvar}, \code{bgvar.res}, \code{bgvar.irf}, \code{bgvar.predict} or \code{bgvar.fevd}.
#' @param ... Additional arguments; set graphical parameters.
#' @param resp If only a subset of variables or countries should be plotted. If set to default value \code{NULL} all countries/variables are plotted.
#' @param global If \code{TRUE} global fitted values are plotted, otherwise country fitted values.
#' @param quantiles Numeric vector with posterior quantiles. Default is set to plot median along with 68\%/80\% confidence intervals.
#' @return No return value.
#' @author Maximilian Boeck, Martin Feldkircher
#' @export
#' @examples
#' \dontshow{
#' library(BGVAR)
#' data(testdata)
#' model.ssvs <- bgvar(Data=testdata,W=W.test,plag=1,draws=100,burnin=100,
#' prior="SSVS",eigen=TRUE)
#' }
#' \donttest{
#' # example for class 'bgvar'
#' plot(model.ssvs, resp=c("EA.y","US.Dp"))
#' }
#' @importFrom graphics axis lines par plot abline matplot polygon segments
#' @importFrom stats median quantile plot.ts
plot.bgvar <- function(x, ..., resp=NULL, global=TRUE){
# reset user par settings on exit
oldpar <- par(no.readonly=TRUE)
on.exit(par(oldpar))
plag <- x$args$plag
xglobal <- x$xglobal
trend <- x$args$trend
XX <- .mlag(xglobal,plag[1])
YY <- xglobal[-c(1:plag[1]),,drop=FALSE]
XX <- cbind(XX[-c(1:plag[1]),,drop=FALSE],1)
bigT <- nrow(YY)
if(trend) XX <- cbind(XX,seq(1,bigT))
time <- .timelabel(x$args$time)
varNames <- dimnames(xglobal)[[2]]
cN <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[1]))
vars <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[2]))
bigK <- length(vars)
Ki <- unlist(lapply(cN,function(x)length(grep(x,varNames))))
if(global){
A_post <- apply(x$stacked.results$A_large,c(1,2),median)
fit <- XX%*%t(A_post)
}else{
fit <- YY-do.call("cbind",x$cc.results$res)
}
# adapt styles
bgvar.env$plot$cex.axis = 1.1 # adjust for this particular plot
args <- list(...)
args.env <- names(bgvar.env$plot)
if(length(args)>0){
for(aa in args.env){
if(aa%in%names(args)) bgvar.env$plot[[aa]] = args[[aa]]
}
}
if(is.null(resp)){
nrc <- lapply(Ki,function(k).get_nrc(k))
for(cc in 1:length(nrc)){
par(mar=bgvar.env$mar,mfrow=c(nrc[[cc]][1],nrc[[cc]][2]))
for(kk in 1:bigK){
idx <- which(paste0(cN[cc],".",vars[kk])==varNames)
if(length(idx) == 0) next
lims <- c(min(fit[,idx],YY[,idx]),max(fit[,idx],YY[,idx]))
plot.ts(fit[,idx], type="l", xlab="", ylab="", main = varNames[idx], ylim=lims,
xaxt="n",yaxt="n", cex.main=bgvar.env$plot$cex.main, cex.lab=bgvar.env$plot$cex.lab,
lwd=3)
lines(YY[,idx],col="grey40", lwd=3, lty=2)
axisindex <- round(seq(1,bigT,length.out=8))
axis(1, at=axisindex, labels=time[axisindex], las=2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.lab)
axis(2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.lab)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}
}else if(all(resp%in%cN)){
cidx <- which(cN%in%resp)
cN <- cN[cidx]; Ki <- Ki[cidx]
nrc <- lapply(Ki,function(k).get_nrc(k))
for(cc in 1:length(nrc)){
par(mar=bgvar.env$mar,mfrow=c(nrc[[cc]][1],nrc[[cc]][2]))
for(kk in 1:bigK){
idx <- which(paste0(cN[cc],".",vars[kk])==varNames)
if(length(idx) == 0) next
lims <- c(min(fit[,idx],YY[,idx]),max(fit[,idx],YY[,idx]))
plot.ts(fit[,idx], type="l", xlab="", ylab="", main = varNames[idx], ylim=lims,
xaxt="n",yaxt="n", cex.main=bgvar.env$plot$cex.main, cex.lab=bgvar.env$plot$cex.lab,
lwd=3)
lines(YY[,idx],col="grey40", lwd=3, lty=2)
axisindex <- round(seq(1,bigT,length.out=8))
axis(1, at=axisindex, labels=time[axisindex], las=2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.lab)
axis(2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.lab)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}
}else if(all(resp%in%vars)){
vidx <- which(vars%in%resp)
vars <- vars[vidx]; Ki <- rep(length(cN),length(vidx))
nrc <- lapply(Ki,function(k).get_nrc(k))
for(vv in 1:length(nrc)){
par(mar=bgvar.env$mar,mfrow=c(nrc[[vv]][1],nrc[[vv]][2]))
for(kk in 1:Ki[vv]){
idx <- which(paste0(cN[kk],".",vars[vv])==varNames)
if(length(idx)==0) next
lims <- c(min(fit[,idx],YY[,idx]),max(fit[,idx],YY[,idx]))
plot.ts(fit[,idx], type="l", xlab="", ylab="", main = varNames[idx], ylim=lims,
xaxt="n",yaxt="n", cex.main=bgvar.env$plot$cex.main, cex.lab=bgvar.env$plot$cex.lab,
lwd=3)
lines(YY[,idx],col="grey40", lwd=3, lty=2)
axisindex <- round(seq(1,bigT,length.out=8))
axis(1, at=axisindex, labels=time[axisindex], las=2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.lab)
axis(2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.lab)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}
}else if(all(resp%in%varNames)){
ridx <- which(varNames%in%resp)
Ki <- length(ridx)
nrc <- .get_nrc(Ki)
par(mar=bgvar.env$mar,mfrow=c(nrc[1],nrc[2]))
for(kk in 1:Ki){
idx <- ridx[kk]
lims <- c(min(fit[,idx],YY[,idx]),max(fit[,idx],YY[,idx]))
plot.ts(fit[,idx], type="l", xlab="", ylab="", main = varNames[idx], ylim=lims,
xaxt="n",yaxt="n", cex.main=bgvar.env$plot$cex.main, cex.lab=bgvar.env$plot$cex.lab,
lwd=3)
lines(YY[,idx],col="grey40", lwd=3, lty=2)
axisindex <- round(seq(1,bigT,length.out=8))
axis(1, at=axisindex, labels=time[axisindex], las=2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.axis)
axis(2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.lab)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}else{
stop("Please specify 'resp' either as one or more specific variable names in the dataset, as general variable name or as unit name, but not as a combination therof. Respecify.")
}
return(invisible(x))
}
#' @name plot
#' @param global If \code{global=TRUE} global residuals are plotted, otherwise country residuals.
#' @param resp Default to \code{NULL}. Either specify a single country or a group of variables to be plotted.
#' @export
#' @examples
#' \donttest{
#' # example for class 'bgvar.resid'
#' res <- residuals(model.ssvs)
#' plot(res, resp="EA.y")
#' }
plot.bgvar.resid <- function(x, ..., resp=NULL, global=TRUE){
# reset user par settings on exit
oldpar <- par(no.readonly=TRUE)
on.exit(par(oldpar))
bigT <- nrow(x$Data)
time <- .timelabel(rownames(x$Data))
varNames <- dimnames(x$Data)[[2]]
cN <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[1]))
vars <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[2]))
bigK <- length(vars)
Ki <- unlist(lapply(cN,function(x)length(grep(x,varNames))))
if(global){
res <- apply(x$global,c(2,3),median)
}else{
res <- apply(x$country,c(2,3),median)
}
# adapt styles
args <- list(...)
args.env <- names(bgvar.env$plot)
if(length(args)>0){
for(aa in args.env){
if(aa%in%names(args)) bgvar.env$plot[[aa]] = args[[aa]]
}
}
if(is.null(resp)){
nrc <- lapply(Ki,function(k).get_nrc(k))
for(cc in 1:length(nrc)){
par(mar=bgvar.env$mar,mfrow=c(nrc[[cc]][1],nrc[[cc]][2]))
for(kk in 1:bigK){
idx <- which(paste0(cN[cc],".",vars[kk])==varNames)
if(length(idx) == 0) next
lims <- c(min(res[,idx]),max(res[,idx]))
plot.ts(res[,idx], type="l", xlab="", ylab="", main = varNames[idx], ylim=lims,
xaxt="n",yaxt="n", cex.main=bgvar.env$plot$cex.main, cex.lab=bgvar.env$plot$cex.lab,
lwd=3)
axisindex <- round(seq(1,bigT,length.out=8))
axis(1, at=axisindex, labels=time[axisindex], las=2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.lab)
axis(2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.lab)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}
}else if(all(resp%in%cN)){
cidx <- which(cN%in%resp)
cN <- cN[cidx]; Ki <- Ki[cidx]
nrc <- lapply(Ki,function(k).get_nrc(k))
for(cc in 1:length(nrc)){
par(mar=bgvar.env$mar,mfrow=c(nrc[[cc]][1],nrc[[cc]][2]))
for(kk in 1:bigK){
idx <- which(paste0(cN[cc],".",vars[kk])==varNames)
if(length(idx) == 0) next
lims <- c(min(res[,idx]),max(res[,idx]))
plot.ts(res[,idx], type="l", xlab="", ylab="", main = varNames[idx], ylim=lims,
xaxt="n",yaxt="n", cex.main=bgvar.env$plot$cex.main, cex.lab=bgvar.env$plot$cex.lab,
lwd=3)
axisindex <- round(seq(1,bigT,length.out=8))
axis(1, at=axisindex, labels=time[axisindex], las=2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.lab)
axis(2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.lab)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}
}else if(all(resp%in%vars)){
vidx <- which(vars%in%resp)
vars <- vars[vidx]; Ki <- rep(length(cN),length(vidx))
nrc <- lapply(Ki,function(k).get_nrc(k))
for(vv in 1:length(nrc)){
par(mar=bgvar.env$mar,mfrow=c(nrc[[vv]][1],nrc[[vv]][2]))
for(kk in 1:Ki[vv]){
idx <- which(paste0(cN[kk],".",vars[vv])==varNames)
if(length(idx)==0) next
lims <- c(min(res[,idx]),max(res[,idx]))
plot.ts(res[,idx], type="l", xlab="", ylab="", main = varNames[idx], ylim=lims,
xaxt="n",yaxt="n", cex.main=bgvar.env$plot$cex.main, cex.lab=bgvar.env$plot$cex.lab,
lwd=3)
axisindex <- round(seq(1,bigT,length.out=8))
axis(1, at=axisindex, labels=time[axisindex], las=2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.lab)
axis(2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.lab)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}
}else if(all(resp%in%varNames)){
ridx <- which(varNames%in%resp)
Ki <- length(ridx)
nrc <- .get_nrc(Ki)
par(mar=bgvar.env$mar,mfrow=c(nrc[1],nrc[2]))
for(kk in 1:Ki){
idx <- ridx[kk]
lims <- c(min(res[,idx]),max(res[,idx]))
plot.ts(res[,idx], type="l", xlab="", ylab="", main = varNames[idx], ylim=lims,
xaxt="n",yaxt="n", cex.main=bgvar.env$plot$cex.main, cex.lab=bgvar.env$plot$cex.lab,
lwd=3)
axisindex <- round(seq(1,bigT,length.out=8))
axis(1, at=axisindex, labels=time[axisindex], las=2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.lab)
axis(2, cex.axis=bgvar.env$plot$cex.axis, cex.lab=bgvar.env$plot$cex.lab)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}else{
stop("Please specify 'resp' either as one or more specific variable names in the dataset, as general variable name or as unit name, but not as a combination therof. Respecify.")
}
return(invisible(x))
}
#' @name plot
#' @param resp Specify a variable to plot predictions.
#' @param cut Length of series to be plotted before prediction begins.
#' @examples
#' \donttest{
#' # example for class 'bgvar.pred'
#' fcast <- predict(model.ssvs,n.ahead=8)
#' plot(fcast, resp="y", cut=20)
#' }
#' @export
plot.bgvar.pred<-function(x, ..., resp=NULL, cut=40, quantiles=c(.10,.16,.50,.84,.90)){
# reset user par settings on exit
oldpar<- par(no.readonly=TRUE)
on.exit(par(oldpar))
fcast <- x$fcast
Xdata <- x$xglobal
hstep <- x$n.ahead
if(!all(paste0("Q",quantiles*100)%in%dimnames(fcast)[[3]])){
stop("Please provide available quantiles.")
}
thin<-nrow(Xdata)-hstep
if(thin>cut){
Xdata<-Xdata[(nrow(Xdata)-cut+1):nrow(Xdata),]
}
varNames <- colnames(Xdata)
cN <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[1]))
vars <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[2]))
K <- length(vars)
Ki <- unlist(lapply(cN,function(x)length(grep(x,varNames))))
Q <- length(quantiles)
if((Q %% 2) == 0){
stop("Please provide odd numbers of quantiles: median along with intervals.")
}
# adapt styles
args <- list(...)
args.env <- names(bgvar.env$plot)
if(length(args)>0){
for(aa in args.env){
if(aa%in%names(args)) bgvar.env$plot[[aa]] = args[[aa]]
}
}
if(is.null(resp)){
nrc <- lapply(Ki,function(k).get_nrc(k))
for(cc in 1:length(nrc)){
par(mar=bgvar.env$mar,mfrow=c(nrc[[cc]][1],nrc[[cc]][2]))
for(kk in 1:K){
idx <- which(paste0(cN[cc],".",vars[kk])==varNames)
if(length(idx) == 0) next
x <- rbind(cbind(matrix(NA,nrow(Xdata),floor(Q/2)),Xdata[,idx],matrix(NA,nrow(Xdata),floor(Q/2))),fcast[idx,,paste0("Q",quantiles*100)])
b <- range(x,na.rm=TRUE); b1<-b[1];b2<-rev(b)[1]
plot.ts(x[,median(seq(Q))], col=bgvar.env$plot$col.50, lty=1, yaxt="n", xaxt="n",
lwd=bgvar.env$plot$lwd.line,ylab="",main=varNames[idx],cex.main=bgvar.env$plot$cex.main,
cex.axis=bgvar.env$plot$cex.axis,cex.lab=bgvar.env$plot$cex.lab,ylim=c(b1,b2))
for(qq in 1:floor(Q/2)){
polygon(c(1:nrow(x),rev(1:nrow(x))),c(x[,qq],rev(x[,Q-qq+1])),col=bgvar.env$plot$col.unc[qq],border=NA)
}
lines(c(rep(NA,cut),x[seq(cut+1,cut+hstep),median(seq(Q))]),col=bgvar.env$plot$col.50,lwd=4)
axisnames <- c(rownames(Xdata),paste("t+",1:hstep,sep=""))
axisindex <- round(seq(1,length(axisnames),length.out=8))
axis(side=1, at=axisindex, labels=axisnames[axisindex], cex.axis=bgvar.env$plot$cex.axis,tick=FALSE,las=2)
axis(side=2, cex.axis=bgvar.env$plot$cex.axis)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}
}else if(all(resp%in%cN)){
cidx <- which(cN%in%resp)
cN <- cN[cidx]; Ki <- Ki[cidx]
nrc <- lapply(Ki,function(k).get_nrc(k))
for(cc in 1:length(nrc)){
par(mar=bgvar.env$mar,mfrow=c(nrc[[cc]][1],nrc[[cc]][2]))
for(kk in 1:K){
idx <- which(paste0(cN[cc],".",vars[kk])==varNames)
if(length(idx) == 0) next
x <- rbind(cbind(matrix(NA,nrow(Xdata),floor(Q/2)),Xdata[,idx],matrix(NA,nrow(Xdata),floor(Q/2))),fcast[idx,,paste0("Q",quantiles*100)])
b <- range(x,na.rm=TRUE); b1<-b[1];b2<-rev(b)[1]
plot.ts(x[,median(seq(Q))], col=bgvar.env$plot$col.50, lty=1, yaxt="n", xaxt="n",
lwd=bgvar.env$plot$lwd.line,ylab="",main=varNames[idx],cex.main=bgvar.env$plot$cex.main,
cex.axis=bgvar.env$plot$cex.axis,cex.lab=bgvar.env$plot$cex.lab,ylim=c(b1,b2))
for(qq in 1:floor(Q/2)){
polygon(c(1:nrow(x),rev(1:nrow(x))),c(x[,qq],rev(x[,Q-qq+1])),col=bgvar.env$plot$col.unc[qq],border=NA)
}
lines(c(rep(NA,cut),x[seq(cut+1,cut+hstep),median(seq(Q))]),col=bgvar.env$plot$col.50,lwd=4)
axisnames <- c(rownames(Xdata),paste("t+",1:hstep,sep=""))
axisindex <- round(seq(1,length(axisnames),length.out=8))
axis(side=1, at=axisindex, labels=axisnames[axisindex], cex.axis=bgvar.env$plot$cex.axis, tick=FALSE, las=2)
axis(side=2, cex.axis=bgvar.env$plot$cex.axis)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}
}else if(all(resp%in%vars)){
vidx <- which(vars%in%resp)
vars <- vars[vidx]; Ki <- rep(length(cN),length(vidx))
nrc <- lapply(Ki,function(k).get_nrc(k))
for(vv in 1:length(nrc)){
par(mar=bgvar.env$mar,mfrow=c(nrc[[vv]][1],nrc[[vv]][2]))
for(kk in 1:Ki[vv]){
idx <- which(paste0(cN[kk],".",vars[vv])==varNames)
if(length(idx)==0) next
x <- rbind(cbind(matrix(NA,nrow(Xdata),floor(Q/2)),Xdata[,idx],matrix(NA,nrow(Xdata),floor(Q/2))),fcast[idx,,paste0("Q",quantiles*100)])
b <- range(x,na.rm=TRUE); b1<-b[1];b2<-rev(b)[1]
plot.ts(x[,median(seq(Q))], col=bgvar.env$plot$col.50, lty=1, yaxt="n", xaxt="n",
lwd=bgvar.env$plot$lwd.line,ylab="",main=varNames[idx],cex.main=bgvar.env$plot$cex.main,
cex.axis=bgvar.env$plot$cex.axis,cex.lab=bgvar.env$plot$cex.lab,ylim=c(b1,b2))
for(qq in 1:floor(Q/2)){
polygon(c(1:nrow(x),rev(1:nrow(x))),c(x[,qq],rev(x[,Q-qq+1])),col=bgvar.env$plot$col.unc[qq],border=NA)
}
lines(c(rep(NA,cut),x[seq(cut+1,cut+hstep),median(seq(Q))]),col=bgvar.env$plot$col.50,lwd=4)
axisnames <- c(rownames(Xdata),paste("t+",1:hstep,sep=""))
axisindex <- round(seq(1,length(axisnames),length.out=8))
axis(side=1, at=axisindex, labels=axisnames[axisindex], cex.axis=bgvar.env$plot$cex.axis,tick=FALSE,las=2)
axis(side=2, cex.axis=bgvar.env$plot$cex.axis)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}
}else if(all(resp%in%varNames)){
ridx <- which(varNames%in%resp)
Ki <- length(ridx)
nrc <- .get_nrc(Ki)
par(mar=bgvar.env$mar,mfrow=c(nrc[1],nrc[2]))
for(kk in 1:Ki){
idx <- ridx[kk]
x <- rbind(cbind(matrix(NA,nrow(Xdata),floor(Q/2)),Xdata[,idx],matrix(NA,nrow(Xdata),floor(Q/2))),fcast[idx,,paste0("Q",quantiles*100)])
b <- range(x,na.rm=TRUE); b1<-b[1];b2<-rev(b)[1]
plot.ts(x[,median(seq(Q))], col=bgvar.env$plot$col.50, lty=1, yaxt="n", xaxt="n",
lwd=bgvar.env$plot$lwd.line,ylab="",main=varNames[idx],cex.main=bgvar.env$plot$cex.main,
cex.axis=bgvar.env$plot$cex.axis,cex.lab=bgvar.env$plot$cex.lab,ylim=c(b1,b2))
for(qq in 1:floor(Q/2)){
polygon(c(1:nrow(x),rev(1:nrow(x))),c(x[,qq],rev(x[,Q-qq+1])),col=bgvar.env$plot$col.unc[qq],border=NA)
}
lines(c(rep(NA,cut),x[seq(cut+1,cut+hstep),median(seq(Q))]),col=bgvar.env$plot$col.50,lwd=4)
axisnames <- c(rownames(Xdata),paste("t+",1:hstep,sep=""))
axisindex <- round(seq(1,length(axisnames),length.out=8))
axis(side=1, at=axisindex, labels=axisnames[axisindex], cex.axis=bgvar.env$plot$cex.axis,tick=FALSE,las=2)
axis(side=2, cex.axis=bgvar.env$plot$cex.axis)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}else{
stop("Please specify 'resp' either as one or more specific variable names in the dataset, as general variable name or as unit name, but not as a combination therof. Respecify.")
}
return(invisible(x))
}
#' @name plot
#' @param resp Specify either a specific variable, a specific country or a specific variable in a specific country which should be plotted. If set to \code{NULL} all countries is plotted.
#' @param shock Specify the shock which should be plotted.
#' @param cumulative Default is set to \code{FALSE}. If \code{cumulative=TRUE} cumulative impulse response functions are plotted.
#' @examples
#' \donttest{
#' # example for class 'bgvar.irf'
#' shockinfo <- get_shockinfo("chol")
#' shockinfo$shock <- "US.stir"; shockinfo$scale <- +1
#' irf.chol<-irf(model.ssvs, n.ahead=24, shockinfo=shockinfo)
#' plot(irf.chol, resp="US")
#' }
#' @export
plot.bgvar.irf<-function(x, ...,resp=NULL, shock=1, quantiles=c(.10,.16,.50,.84,.90), cumulative=FALSE){
# restore user par settings on exit
oldpar <- par(no.readonly=TRUE)
on.exit(par(oldpar))
if(length(shock)!=1){
stop("Please select only one shock.")
}
posterior <- x$posterior
if(!all(paste0("Q",quantiles*100)%in%dimnames(posterior)[[4]])){
stop("Please provide available quantiles.")
}
varNames <- dimnames(posterior)[[1]]
cN <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[1]))
vars <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[2]))
Ki <- unlist(lapply(cN,function(x)length(grep(x,varNames))))
K <- length(vars)
Q <- length(quantiles)
if((Q %% 2) == 0){
stop("Please provide odd numbers of quantiles: median along with intervals.")
}
# adapt styles
args <- list(...)
args.env <- names(bgvar.env$plot)
if(length(args)>0){
for(aa in args.env){
if(aa%in%names(args)) bgvar.env$plot[[aa]] = args[[aa]]
}
}
irf_list = list(); count<-0
if(is.null(resp)){
nrc <- lapply(Ki,function(k).get_nrc(k))
for(cc in 1:length(nrc)){
par(mar=bgvar.env$mar,mfrow=c(nrc[[cc]][1],nrc[[cc]][2]))
for(kk in 1:K){
plot_varname = paste0(cN[cc],".",vars[kk])
idx = which(plot_varname==varNames)
if(length(idx) == 0) next
# get plot data
x<-posterior[idx,,shock,paste0("Q",quantiles*100),drop=TRUE]
if(cumulative){x<-apply(x,2,cumsum)}
# save plot data
irf_list[[paste0("IRF.",plot_varname)]] = x
count = count+1
# do plot
b <- range(x);b1<-b[1];b2<-rev(b)[1]
plot.ts(x[,median(seq(Q))], col=bgvar.env$plot$col.50, lty=1, yaxt="n", xaxt="n",
lwd=bgvar.env$plot$lwd.line,ylab="",xlab="",main=varNames[idx],cex.main=bgvar.env$plot$cex.main,
cex.axis=bgvar.env$plot$cex.axis,cex.lab=bgvar.env$plot$cex.lab,ylim=c(b1,b2))
for(qq in 1:floor(Q/2)){
polygon(c(1:nrow(x),rev(1:nrow(x))),c(x[,qq],rev(x[,Q-qq+1])),col=bgvar.env$plot$col.unc[qq],border=NA)
}
lines(x[,median(seq(Q))],col=bgvar.env$plot$col.50,lwd=4)
segments(x0=1,y0=0,x1=nrow(x),y1=0,col=bgvar.env$plot$col.zero,lty=bgvar.env$plot$lty.zero,lwd=bgvar.env$plot$lwd.zero)
axis(2, at=seq(b1,b2,length.out=5), labels=format(seq(b1,b2,length.out=5),digits=1,nsmall=1),cex.axis=bgvar.env$plot$cex.axis,las=1)
axisindex<-seq(1,nrow(x),by=4)
axis(side=1, las=1,at=axisindex, labels=axisindex-1, cex.axis=bgvar.env$plot$cex.axis,tick=FALSE)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}
}else if(all(resp%in%cN)){
cidx <- which(cN%in%resp)
cN <- cN[cidx]; Ki <- Ki[cidx]
nrc <- lapply(Ki,function(k).get_nrc(k))
for(cc in 1:length(nrc)){
par(mar=bgvar.env$mar,mfrow=c(nrc[[cc]][1],nrc[[cc]][2]))
vars.cc <- sapply(strsplit(varNames[grep(cN[cc],varNames)],".",fixed=TRUE),function(x) x[2])
for(kk in 1:K){
plot_varname = paste0(cN[cc],".",vars[kk])
idx <- which(plot_varname==varNames)
if(length(idx) == 0) next
# get plot data
x<-posterior[idx,,shock,paste0("Q",quantiles*100),drop=TRUE]
if(cumulative){x<-apply(x,2,cumsum)}
# save plot data
irf_list[[paste0("IRF.",plot_varname)]] = x
count = count+1
# do plot
b <- range(x);b1<-b[1];b2<-rev(b)[1]
plot.ts(x[,median(seq(Q))], col=bgvar.env$plot$col.50, lty=1, yaxt="n", xaxt="n",
lwd=bgvar.env$plot$lwd.line,ylab="",xlab="",main=varNames[idx],cex.main=bgvar.env$plot$cex.main,
cex.axis=bgvar.env$plot$cex.axis,cex.lab=bgvar.env$plot$cex.lab,ylim=c(b1,b2))
for(qq in 1:floor(Q/2)){
polygon(c(1:nrow(x),rev(1:nrow(x))),c(x[,qq],rev(x[,Q-qq+1])),col=bgvar.env$plot$col.unc[qq],border=NA)
}
lines(x[,median(seq(Q))],col=bgvar.env$plot$col.50,lwd=4)
segments(x0=1,y0=0,x1=nrow(x),y1=0,col=bgvar.env$plot$col.zero,lty=bgvar.env$plot$lty.zero,lwd=bgvar.env$plot$lwd.zero)
axis(2, at=seq(b1,b2,length.out=5), labels=format(seq(b1,b2,length.out=5),digits=1,nsmall=1),cex.axis=bgvar.env$plot$cex.axis,las=1)
axisindex<-seq(1,nrow(x),by=4)
axis(side=1, las=1,at=axisindex, labels=axisindex-1, cex.axis=bgvar.env$plot$cex.axis,tick=FALSE)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}
}else if(all(resp%in%vars)){
vidx <- which(vars%in%resp)
vars <- vars[vidx]; Ki <- rep(length(cN),length(vidx))
nrc <- lapply(Ki,function(k).get_nrc(k))
for(vv in 1:length(nrc)){
par(mar=bgvar.env$mar,mfrow=c(nrc[[vv]][1],nrc[[vv]][2]))
for(kk in 1:Ki[vv]){
plot_varname = paste0(cN[kk],".",vars[vv])
idx <- which(plot_varname==varNames)
if(length(idx)==0) next
# get plot data
x<-posterior[idx,,shock,paste0("Q",quantiles*100),drop=TRUE]
if(cumulative){x<-apply(x,2,cumsum)}
# save plot data
irf_list[[paste0("IRF.",plot_varname)]] = x
count = count+1
# do plot
b <- range(x);b1<-b[1];b2<-rev(b)[1]
plot.ts(x[,median(seq(Q))], col=bgvar.env$plot$col.50, lty=1, yaxt="n", xaxt="n",
lwd=bgvar.env$plot$lwd.line,ylab="",xlab="",main=varNames[idx],cex.main=bgvar.env$plot$cex.main,
cex.axis=bgvar.env$plot$cex.axis,cex.lab=bgvar.env$plot$cex.lab,ylim=c(b1,b2))
for(qq in 1:floor(Q/2)){
polygon(c(1:nrow(x),rev(1:nrow(x))),c(x[,qq],rev(x[,Q-qq+1])),col=bgvar.env$plot$col.unc[qq],border=NA)
}
lines(x[,median(seq(Q))],col=bgvar.env$plot$col.50,lwd=4)
segments(x0=1,y0=0,x1=nrow(x),y1=0,col=bgvar.env$plot$col.zero,lty=bgvar.env$plot$lty.zero,lwd=bgvar.env$plot$lwd.zero)
axis(2, at=seq(b1,b2,length.out=5), labels=format(seq(b1,b2,length.out=5),digits=1,nsmall=1),cex.axis=bgvar.env$plot$cex.axis,las=1)
axisindex<-seq(1,nrow(x),by=4)
axis(side=1, las=1,at=axisindex, labels=axisindex-1, cex.axis=bgvar.env$plot$cex.axis,tick=FALSE)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}
}else if(all(resp%in%varNames)){
ridx <- which(varNames%in%resp)
Ki <- length(ridx)
nrc <- .get_nrc(Ki)
par(mar=bgvar.env$mar,mfrow=c(nrc[1],nrc[2]))
for(kk in 1:Ki){
idx <- ridx[kk]
# get plot data
x<-posterior[idx,,shock,paste0("Q",quantiles*100),drop=TRUE]
if(cumulative){x<-apply(x,2,cumsum)}
# save plot data
irf_list[[paste0("IRF.",resp[kk])]] = x
count = count+1
# do plot
b <- range(x);b1<-b[1];b2<-rev(b)[1]
plot.ts(x[,median(seq(Q))], col=bgvar.env$plot$col.50, lty=1, yaxt="n", xaxt="n",
lwd=bgvar.env$plot$lwd.line,ylab="",xlab="",main=varNames[idx],cex.main=bgvar.env$plot$cex.main,
cex.axis=bgvar.env$plot$cex.axis,cex.lab=bgvar.env$plot$cex.lab,ylim=c(b1,b2))
for(qq in 1:floor(Q/2)){
polygon(c(1:nrow(x),rev(1:nrow(x))),c(x[,qq],rev(x[,Q-qq+1])),col=bgvar.env$plot$col.unc[qq],border=NA)
}
lines(x[,median(seq(Q))],col=bgvar.env$plot$col.50,lwd=4)
segments(x0=1,y0=0,x1=nrow(x),y1=0,col=bgvar.env$plot$col.zero,lty=bgvar.env$plot$lty.zero,lwd=bgvar.env$plot$lwd.zero)
axis(2, at=seq(b1,b2,length.out=5), labels=format(seq(b1,b2,length.out=5),digits=1,nsmall=1),cex.axis=bgvar.env$plot$cex.axis,las=1)
axisindex<-seq(1,nrow(x),by=4)
axis(side=1, las=1,at=axisindex, labels=axisindex-1, cex.axis=bgvar.env$plot$cex.axis,tick=FALSE)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}else{
stop("Please specify 'resp' either as one or more specific variable names in the dataset, as general variable name or as unit name, but not as a combination therof. Respecify.")
}
return(invisible(irf_list))
}
#' @name plot
#' @param k.max plots the k series with the highest for the decomposition of \code{resp}.
#' @examples
#' \donttest{
#' # example for class 'bgvar.fevd'
#' fevd.us=fevd(irf.chol,var.slct=c("US.stir"))
#' plot(fevd.us, resp="US.stir", k.max=10)
#' }
#' @export
plot.bgvar.fevd<-function(x, ..., resp, k.max=10){
# restore user par settings on exit
oldpar <- par(no.readonly=TRUE)
on.exit(par(oldpar))
fevd <- x[[1]]
xglobal <- x$xglobal
varNames <- colnames(xglobal)
varAll <- varNames
cN <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[1]))
vars <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[2]))
resp0 <- paste("Decomp. of ",resp,sep="")
if(length(resp0)>1){
stop("Please provide just one time series in 'resp'.")
}
if(!(resp0%in%dimnames(fevd)[[2]])){
stop("Please provide time series present in dataset.")
}
if(is.numeric(k.max)){
mean <- apply(fevd,c(1,2),mean)
resp <- names(sort(mean[,resp0], decreasing=TRUE))[1:k.max]
}
varNames <- list()
for(kk in 1:ceiling(k.max/10)){
if(kk*10>k.max) kk.max <- k.max else kk.max <- kk*10
varNames[[kk]] <- resp[((kk-1)*10+1):kk.max]
}
# adapt styles
args <- list(...)
args.env <- names(bgvar.env$plot)
if(length(args)>0){
for(aa in args.env){
if(aa%in%names(args)) bgvar.env$plot[[aa]] = args[[aa]]
}
}
for(kk in 1:length(varNames)){
rows <- length(varNames[[kk]])/2
if(rows<1) cols <- 1 else cols <- 2
if(rows%%1!=0) rows <- ceiling(rows)
if(rows%%1!=0) rows <- ceiling(rows)
# update par settings
par(mar=bgvar.env$mar,mfrow=c(rows,cols))
for(kkk in 1:length(varNames[[kk]])){
idx <- grep(varNames[[kk]][kkk],varAll)
x<-fevd[idx,resp0,]
b<-range(x); b1<-b[1]; b2<-b[2]
plot.ts(x,col=bgvar.env$plot$col.50,xaxt="n",yaxt="n",lwd=bgvar.env$plot.lwd.line,ylab="",xlab="",
main=varAll[idx],cex.main=bgvar.env$plot.cex.main,cex.axis=bgvar.env$plot$cex.axis,
cex.lab=bgvar.env$plot$cex.lab,lty=1,ylim=c(b1,b2))
axis(2, at=seq(b1,b2,length.out=5), labels=format(seq(b1,b2,length.out=5),digits=2,nsmall=1),cex.axis=bgvar.env$plot$cex.axis,las=1)
axisindex<-seq(1,length(x),by=4)
axis(side=1, las=1,at=axisindex, labels=c(0:length(x))[axisindex], cex.axis=bgvar.env$plot$cex.axis,tick=FALSE)
abline(v=axisindex,col=bgvar.env$plot$col.tick,lty=bgvar.env$plot$lty.tick)
}
}
return(invisible(x))
}
|
/scratch/gouwar.j/cran-all/cranData/BGVAR/R/plot.R
|
#' @name predict
#' @title Predictions
#' @description A function that computes predictions and conditional predictions based on a object of class \code{bgvar}.
#' @details Predictions are performed up to an horizon of \code{n.ahead}. Note that conditional forecasts need a fully identified system. Therefore this function utilizes short-run restrictions via the Cholesky decomposition on the global solution of the variance-covariance matrix of the Bayesian GVAR.
#' @param object An object of class \code{bgvar}.
#' @param ... Additional arguments.
#' @param n.ahead Forecast horizon.
#' @param constr Matrix containing the conditional forecasts of size horizon times K, where horizon corresponds to the forecast horizon specified in \code{pred.obj}, while K is the number of variables in the system. The ordering of the variables have to correspond the ordering of the variables in the system. Rest is just set to NA.
#' @param constr_sd Matrix containing the standard deviations around the conditional forecasts. Must have the same size as \code{constr}.
#' @param quantiles Numeric vector with posterior quantiles. Default is set to compute median along with 68\%/80\%/90\% confidence intervals.
#' @param save.store If set to \code{TRUE} the full distribution is returned. Default is set to \code{FALSE} in order to save storage.
#' @param verbose If set to \code{FALSE} it suppresses printing messages to the console.
#' @return Returns an object of class \code{bgvar.pred} with the following elements \describe{
#' \item{\code{fcast}}{ is a K times n.ahead times Q-dimensional array that contains Q quantiles of the posterior predictive distribution.}
#' \item{\code{xglobal}}{ is a matrix object of dimension T times N (T # of observations, K # of variables in the system).}
#' \item{\code{n.ahead}}{ specified forecast horizon.}
#' \item{\code{lps.stats}}{ is an array object of dimension K times 2 times n.ahead and contains the mean and standard deviation of the log-predictive scores for each variable and each forecast horizon.}
#' \item{\code{hold.out}}{ if \code{h} is not set to zero, this contains the hold-out sample.}
#' }
#' @examples
#' library(BGVAR)
#' data(testdata)
#' model.ssvs <- bgvar(Data=testdata,W=W.test,plag=1,draws=100,burnin=100,
#' prior="SSVS")
#' fcast <- predict(model.ssvs, n.ahead=8)
#'
#' # conditional predictions
#' # et up constraints matrix of dimension n.ahead times K
#' constr <- matrix(NA,nrow=8,ncol=ncol(model.ssvs$xglobal))
#' colnames(constr) <- colnames(model.ssvs$xglobal)
#' constr[1:5,"US.Dp"] <- model.ssvs$xglobal[76,"US.Dp"]
#'
#' # add uncertainty to conditional forecasts
#' constr_sd <- matrix(NA,nrow=8,ncol=ncol(model.ssvs$xglobal))
#' colnames(constr_sd) <- colnames(model.ssvs$xglobal)
#' constr_sd[1:5,"US.Dp"] <- 0.001
#'
#' fcast_cond <- predict(model.ssvs, n.ahead=8, constr=constr, constr_sd=constr_sd)
#' @references
#' Jarocinski, M. (2010) \emph{Conditional forecasts and uncertainty about forecasts revisions in vector autoregressions.} Economics Letters, Vol. 108(3), pp. 257-259.
#'
#' Waggoner, D., F. and T. Zha (1999) \emph{Conditional Forecasts in Dynamic Multivariate Models.} Review of Economics and Statistics, Vol. 81(4), pp. 639-561.
#' @importFrom stats rnorm tsp sd
#' @author Maximilian Boeck, Martin Feldkircher, Florian Huber
#' @export
predict.bgvar <- function(object, ..., n.ahead=1, constr=NULL, constr_sd=NULL, quantiles=NULL, save.store=FALSE, verbose=TRUE){
start.pred <- Sys.time()
if(!inherits(object, "bgvar")) {stop("Please provide a `bgvar` object.")}
# check if posterior draws are available
if(object$args$thindraws == 0){
cat("Computation of BGVAR has yielded no stable posterior draws!")
return(invisible(object))
}
if(is.null(quantiles)){
quantiles <- c(.05,.10,.16,.50,.84,.90,.95)
}
if(!is.numeric(quantiles)){
stop("Please provide 'quantiles' as numeric vector.")
}
if(verbose) cat("Start computing predictions of Bayesian Global Vector Autoregression.\n\n")
thindraws <- object$args$thindraws
lags <- object$args$lags
pmax <- max(lags)
xglobal <- object$xglobal
S_large <- object$stacked.results$S_large
F_large <- object$stacked.results$F_large
A_large <- object$stacked.results$A_large
Ginv_large <- object$stacked.results$Ginv_large
F.eigen <- object$stacked.results$F.eigen
varNames <- colnames(xglobal)
cN <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[1]))
vars <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(x) x[2]))
N <- length(cN)
Traw <- nrow(xglobal)
bigT <- Traw-pmax
bigK <- ncol(xglobal)
cons <- 1
trend <- ifelse(object$args$trend,1,0)
Q <- length(quantiles)
flag_cond <- FALSE
#---------------------check conditional predictions--------------------------------#
if(!is.null(constr)){
if(!all(dim(constr)==c(n.ahead,bigK))){
stop("Please respecify dimensions of 'constr'.")
}
if(!is.null(constr_sd)){
if(!all(dim(constr_sd)==c(n.ahead,bigK))){
stop("Please respecify dimensions of 'constr_sd'.")
}
constr_sd[is.na(constr_sd)] <- 0
}else{
constr_sd <- matrix(0,n.ahead,bigK)
}
flag_cond <- TRUE
}
#---------------------------------------------------------------------------------#
varndxv <- c(bigK,cons+trend,pmax)
nkk <- (pmax*bigK)+cons+trend
Yn <- xglobal
Xn <- cbind(.mlag(Yn,pmax),1)
Xn <- Xn[(pmax+1):Traw,,drop=FALSE]
Yn <- Yn[(pmax+1):Traw,,drop=FALSE]
if(trend) Xn <- cbind(Xn,seq(1,bigT))
pred_store <- array(NA,dim=c(thindraws,bigK,n.ahead))
# start loop here
if(verbose){
if(flag_cond)
cat("Start computing conditional predictions...\n")
else
cat("Start computing predictions...\n")
}
for(irep in 1:thindraws){
#Step I: Construct a global VC matrix Omega_t
Ginv <- Ginv_large[,,irep]
Sig_t <- Ginv%*%(S_large[,,irep])%*%t(Ginv)
Sig_t <- as.matrix(Sig_t)
zt <- Xn[bigT,]
z1 <- zt
Mean00 <- zt
Sigma00 <- matrix(0,nkk,nkk)
y2 <- NULL
#gets companion form
aux <- .get_companion(A_large[,,irep],varndxv)
Mm <- aux$MM
Jm <- aux$Jm
Jsigt <- Jm%*%Sig_t%*%t(Jm)
# this is the forecast loop
stop <- FALSE
for(ih in 1:n.ahead){
z1 <- Mm%*%z1
Sigma00 <- Mm%*%Sigma00%*%t(Mm) + Jsigt
chol_varyt <- try(t(chol(Sigma00[1:bigK,1:bigK])),silent=TRUE)
if(is(chol_varyt,"matrix")){
yf <- z1[1:bigK]+chol_varyt%*%rnorm(bigK,0,1)
}
if(is(chol_varyt,"try-error")){
yf <- try(mvrnorm(1,mu=z1[1:bigK],Sigma00[1:bigK,1:bigK]),silent=TRUE)
}
if(is(yf,"try-error")){
stop = TRUE
break # break inner loop
}
y2 <- cbind(y2,yf)
}
if(stop){next} # continue outer loop
pred_store[irep,,] <- y2
}
#----------do conditional forecasting -------------------------------------------#
if(flag_cond){
cond_store <- array(NA, c(thindraws, bigK, n.ahead))
dimnames(cond_store)[[2]] <- varNames
if(verbose) pb <- txtProgressBar(min = 0, max = thindraws, style = 3)
for(irep in 1:thindraws){
pred <- pred_store[irep,,]
Sigma_u <- Ginv_large[,,irep]%*%S_large[,,irep]%*%t(Ginv_large[,,irep])
chol_varyt <- try(t(chol(Sigma_u)), silent=TRUE)
if(is(chol_varyt,"try-error")) {next}
irf <- .impulsdtrf(B=adrop(F_large[,,,irep,drop=FALSE],drop=4),
smat=chol_varyt,nstep=n.ahead)
temp <- as.vector(constr) + rnorm(bigK*n.ahead,0,as.vector(constr_sd))
constr_use <- matrix(temp,n.ahead,bigK)
v <- sum(!is.na(constr))
s <- bigK * n.ahead
r <- c(rep(0, v))
R <- matrix(0, v, s)
pos <- 1
for(i in 1:n.ahead) {
for(j in 1:bigK) {
if(is.na(constr_use[i, j])) {next}
r[pos] <- constr_use[i, j] - pred[j, i]
for(k in 1:i) {
R[pos, ((k - 1) * bigK + 1):(k * bigK)] <- irf[j,,(i - k + 1)]
}
pos <- pos + 1
}
}
R_svd <- svd(R, nu=nrow(R), nv=ncol(R))
U <- R_svd[["u"]]
P_inv <- diag(1/R_svd[["d"]])
V1 <- R_svd[["v"]][,1:v]
V2 <- R_svd[["v"]][,(v+1):s]
eta <- V1 %*% P_inv %*% t(U) %*% r + V2 %*% rnorm(s-v)
eta <- matrix(eta, n.ahead, bigK, byrow=TRUE)
for(ih in 1:n.ahead) {
temp <- matrix(0, bigK, 1)
for(k in 1:ih) {
temp <- temp + irf[, , (ih - k + 1)] %*% t(eta[k , , drop=FALSE])
}
cond_store[irep,,ih] <- pred[,ih,drop=FALSE] + temp
}
if(verbose) setTxtProgressBar(pb, irep)
}
}
#--------------- compute posteriors ----------------------------------------------#
imp_posterior<-array(NA,dim=c(bigK,n.ahead,Q), dimnames=list(varNames,seq(1,n.ahead),paste0("Q",quantiles*100)))
for(qq in 1:Q){
if(flag_cond){
imp_posterior[,,qq] <- apply(cond_store,c(2,3),quantile,quantiles[qq],na.rm=TRUE)
}else{
imp_posterior[,,qq] <- apply(pred_store,c(2,3),quantile,quantiles[qq],na.rm=TRUE)
}
}
#---------------------------------------------------------------------------------#
hold.out <- object$args$hold.out
if(hold.out>n.ahead) hold.out <- n.ahead
yfull <- object$args$yfull
if(hold.out>0){
lps.stats <- array(0,dim=c(bigK,2,hold.out), dimnames=list(colnames(xglobal),c("mean","sd"),seq(1,hold.out)))
lps.stats[,"mean",] <- apply(pred_store[,,1:hold.out],c(2:3),mean,na.rm=TRUE)
lps.stats[,"sd",] <- apply(pred_store[,,1:hold.out],c(2:3),sd,na.rm=TRUE)
hold.out.sample<-yfull[(nrow(yfull)+1-hold.out):nrow(yfull),,drop=FALSE]
}else{
lps.stats<-NULL
hold.out.sample<-NULL
}
#---------------------------------------------------------------------------------#
rownames(xglobal)<-.timelabel(object$args$time)
out <- structure(list(fcast=imp_posterior,
xglobal=xglobal,
n.ahead=n.ahead,
lps.stats=lps.stats,
hold.out.sample=hold.out.sample),
class="bgvar.pred")
if(save.store){
out$pred_store = pred_store
}
if(verbose) cat(paste("\n\nSize of object:", format(object.size(out),unit="MB")))
end.pred <- Sys.time()
diff.pred <- difftime(end.pred,start.pred,units="mins")
mins.pred <- round(diff.pred,0); secs.pred <- round((diff.pred-floor(diff.pred))*60,0)
if(verbose) cat(paste("\nNeeded time for computation: ",mins.pred," ",ifelse(mins.pred==1,"min","mins")," ",secs.pred, " ",ifelse(secs.pred==1,"second.","seconds.\n"),sep=""))
return(out)
}
#' @method print bgvar.pred
#' @export
print.bgvar.pred <- function(x, ...){
cat("---------------------------------------------------------------------------------------")
cat("\n")
cat("Object contains predictions of object estimated with 'bgvar':")
cat("\n")
cat(paste0("Size of posterior containing predictions: ",dim(x$fcast)[[1]]," x ",dim(x$fcast)[[2]]," x ",dim(x$fcast)[[3]],"."))
cat("\n")
cat(paste0("Size ob object: ",format(object.size(x),unit="MB")))
cat("\n")
cat("---------------------------------------------------------------------------------------")
return(invisible(x))
}
#' @export
"lps" <- function(object){
UseMethod("lps", object)
}
#' @name lps
#' @title Compute Log-Predictive Scores
#' @method lps bgvar.pred
#' @description Computes and prints log-predictive score of an object of class \code{bgvar.predict}.
#' @param object An object of class \code{bgvar.predict}.
#' @param ... Additional arguments.
#' @return Returns an object of class \code{bgvar.lps}, which is a matrix of dimension h times K, whereas h is the forecasting horizon and K is the number of variables in the system.
#' @examples
#' library(BGVAR)
#' data(testdata)
#' model.ssvs.eer<-bgvar(Data=testdata,W=W.test,draws=100,burnin=100,
#' plag=1,prior="SSVS",eigen=TRUE,hold.out=8)
#' fcast <- predict(model.ssvs.eer,n.ahead=8,save.store=TRUE)
#' lps <- lps(fcast)
#' @author Maximilian Boeck, Martin Feldkircher
#' @importFrom stats dnorm
#' @export
lps.bgvar.pred <- function(object, ...){
hold.out <- object$hold.out
h <- nrow(hold.out)
K <- ncol(hold.out)
if(is.null(hold.out)){
stop("Please submit a forecast object that includes a hold out sample for evaluation (set hold.out>0 when estimating the model with bgvar)!")
}
lps.stats <- object$lps.stats
lps.scores <- matrix(NA,h,K)
for(i in 1:K){
lps.scores[,i]<-dnorm(hold.out[,i],mean=lps.stats[i,"mean",],sd=lps.stats[i,"sd",],log=TRUE)
}
colnames(lps.scores)<-dimnames(lps.stats)[[1]]
out <- structure(lps.scores, class="bgvar.lps")
return(out)
}
#' @export
"rmse" <- function(object){
UseMethod("rmse", object)
}
#' @name rmse
#' @title Compute Root Mean Squared Errors
#' @method rmse bgvar.pred
#' @description Computes and prints root mean squared errors (RMSEs) of an object of class \code{bgvar.predict}.
#' @param object An object of class \code{bgvar.predict}.
#' @param ... Additional arguments.
#' @return Returns an object of class \code{bgvar.rmse}, which is a matrix of dimension h times K, whereas h is the forecasting horizon and K is the number of variables in the system.
#' @examples
#' library(BGVAR)
#' data(testdata)
#' model.ssvs.eer<-bgvar(Data=testdata,W=W.test,draws=100,burnin=100,
#' plag=1,prior="SSVS",eigen=TRUE,hold.out=8)
#' fcast <- predict(model.ssvs.eer,n.ahead=8,save.store=TRUE)
#' rmse <- rmse(fcast)
#' @author Maximilian Boeck, Martin Feldkircher
#' @importFrom stats dnorm
#' @export
rmse.bgvar.pred <- function(object, ...){
hold.out <- object$hold.out
h <- nrow(hold.out)
K <- ncol(hold.out)
if(is.null(hold.out)){
stop("Please submit a forecast object that includes a hold out sample for evaluation (set hold.out>0 in fcast)!")
}
lps.stats <- object$lps.stats
rmse.scores <- matrix(NA,h,K)
for(i in 1:K){
rmse.scores[,i]<-sqrt((hold.out[,i]-lps.stats[i,"mean",])^2)
}
colnames(rmse.scores)<-dimnames(lps.stats)[[1]]
out <- structure(rmse.scores, class="bgvar.rmse")
return(out)
}
#' @method print bgvar.lps
#' @export
print.bgvar.lps<-function(x, ..., resp=NULL){
h <- dim(x)[1]
varNames <- colnames(x)
cN <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(y)y[1]))
vars <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(y)y[2]))
cntry <- round(sapply(cN,function(y)mean(x[grepl(y,colnames(x))])),2)
Ki <- unlist(lapply(cN,function(x)length(grep(x,varNames))))
bigK <- length(vars)
cat("---------------------------------------------------------------------------")
cat("\n")
cat("Log-Predictive Density Scores")
cat("\n")
cat(paste0("Available for hold.out times K: ",h," times ",bigK))
cat("\n")
cat(paste0("Total: ", round(sum(x),2)))
cat("\n")
cat("---------------------------------------------------------------------------")
return(invisible(x))
}
#' @method print bgvar.rmse
#' @export
print.bgvar.rmse<-function(x, ..., resp=NULL){
h <- dim(x)[1]
varNames <- colnames(x)
cN <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(y)y[1]))
vars <- unique(sapply(strsplit(varNames,".",fixed=TRUE),function(y)y[2]))
cntry <- round(sapply(cN,function(y)mean(x[grepl(y,colnames(x))])),2)
Ki <- unlist(lapply(cN,function(x)length(grep(x,varNames))))
bigK <- length(vars)
cat("---------------------------------------------------------------------------")
cat("\n")
cat("Root-Mean Squared Error")
cat("\n")
cat(paste0("Available for hold.out times K: ",h," times ",bigK))
cat("\n")
cat(paste0("Total: ", round(sum(x),2)))
cat("\n")
cat("---------------------------------------------------------------------------")
return(invisible(x))
}
|
/scratch/gouwar.j/cran-all/cranData/BGVAR/R/predict.R
|
#' @name .getweights
#' @noRd
.getweights <- function(W,Data,OE.weights=NULL,Wex.restr=NULL,variable.list=NULL){
cN<-names(Data) # ord
nn<-lapply(Data,colnames)
exo=which(table(unlist(nn))==1) #exo variables are those that are only in one country model
endo<-which(!table(unlist(nn))==1)
exo.countries<-names(which(sapply(lapply(nn,function(y) names(exo)%in%y),any))) #gives you a vector of exo countries
OE.flag <- FALSE
W.sets<-length(W)
if(is.null(variable.list)){
variable.list<-list();variable.list$vars<-names(endo)
}
#------------------------------------- additional checks -----------------------------------------------------#
if(is.list(W)&&length(W)>1){
if(is.null(variable.list)){
stop("You have submitted more than 1 weight matrix but not specified the according variable sets.")
}
if(length(W)!=length(variable.list)){
stop("You have submitted more than 1 weight matrix but not the same number of variable sets.")
}
if(!all(names(endo)%in%unlist(variable.list))){
stop("You have submitted more than 1 weight matrix but some of the variables are not assigned to a weight matrix by the variable list")
}
}
if(!is.null(OE.weights)){
OE.flag <- TRUE
OE.sets <- length(OE.weights)
OE.cN <- names(OE.weights)
OE.vars <- lapply(OE.weights,function(l)l$variables)
OE <- list()
for(kk in 1:OE.sets){
OE[[OE.cN[kk]]] <- Data[[OE.cN[kk]]]
Data[[OE.cN[kk]]] <- NULL
}
if(!all(unlist(lapply(OE.vars,function(l)l%in%c(names(exo),names(endo)))))){
stop("Please specify for the additional entities variables which are also contained in the data. Please respecify.")
}
OE.exo <- lapply(OE.weights,function(l)l$exo)
for(kk in 1:OE.sets){
if(is.null(OE.exo[[kk]])) OE.exo[[kk]] <- colnames(OE[[kk]])
}
if(!any(sapply(1:OE.sets,function(oo)any(OE.exo[[oo]]%in%OE.vars[[oo]])))){
stop("Please specify the exogenous variables also in the element 'variables'. Please respecify.")
}
OE.weights <- lapply(OE.weights,function(l)l$weights)
if(any(unlist(lapply(OE.weights,function(l)is.null(names(l))||!all(names(l)%in%names(Data)))))){
stop("Either you have not provided names attached to the weights of other entities or the ones you are provided are not contained in the country data. Please respecify.")
}
}
#------------------------------------- build W matrix -----------------------------------------------------#
#make sure that W and Data names are in the same order
cnames <- names(Data)
W <- lapply(W,function(x) x[cnames,cnames])
xglobal <- c()
for(jj in 1:length(Data)){
pretemp <- Data[[jj]];class(pretemp) <- "numeric"
temp <- as.matrix(pretemp[,colSums(is.na(pretemp))<nrow(pretemp)])
colnames(temp) <- paste(cnames[jj],colnames(pretemp),sep=".")
xglobal <- cbind(xglobal,temp)
}
Max.char<-max(nchar(colnames(xglobal)))
cnt.char<-max(nchar(cnames))
# for each country
gW<-list()
for(cc in 1:length(cnames)){
xglobal <- xglobal[,!duplicated(colnames(xglobal))]
#creates a dynamic list of variables
varnames <- substr(colnames(xglobal),cnt.char+2,Max.char); varnames <- varnames[!duplicated(varnames)]
if(!is.null(Wex.restr)){
varnames = varnames[-charmatch(Wex.restr,varnames)]
}
# names of endo variables
endnames <- unlist(lapply(strsplit(colnames(xglobal)[grepl(paste0("^",cnames[[cc]]),colnames(xglobal))],".",fixed=TRUE),
function(l)l[2]))
Wnew <- matrix(0,length(varnames),ncol(xglobal));colnames(Wnew) <- colnames(xglobal);rownames(Wnew) <- varnames
#----------------------here we specify the part for the weakly exogenous variabes-------------------------------------#
# loop over all variables
for (kk in 1:nrow(Wnew)){
# gives you name of countries with this specific variable
var.cntry.indic <- substr(colnames(Wnew),4,Max.char)==rownames(Wnew)[kk]
cntry.indic <- substr(names(Wnew[kk,var.cntry.indic]),1,2)
if(length(cntry.indic)>0){
# this selects the weight matrix according to the variable we want to weight (financial, real, etc)
wghts <- W[[which(sapply(variable.list, function(x) rownames(Wnew)[kk] %in% x))]][cc,]
# this gives the variable name (e.g., y)
Wnew[kk,var.cntry.indic] <- wghts[cntry.indic]
}else{# in case we have exo variables
# this includes the exo variables in all country models and the exo countries
Wnew[kk,var.cntry.indic] = 1
# if all zero then set zero
if(all(W$W[cc,] == 0)) Wnew[kk,var.cntry.indic] = 0
# in case we look at an exo country, where the variable is endog. defined, we have to set the exo variable to zero
if(cnames[[cc]]%in%exo.countries&&paste(cnames[cc],rownames(Wnew)[kk],sep=".")%in%colnames(Wnew)){
Wnew[kk,var.cntry.indic] <- 0
}
}
}
#----------------------here we specify the part for the endogenous variabes-----------------------------------------#
endoW <- matrix(0,length(endnames),ncol(Wnew))
endonr <- xglobal[,substr(colnames(xglobal),1,2)==cnames[cc]];rownames(endoW) <- colnames(endonr)
colnames(endoW) <- colnames(Wnew)
namesW <- colnames(endoW)
namesNr <- colnames(endonr)
for (j in 1:nrow(endoW)){
for (i in 1:length(namesW)){
if(namesNr[[j]]==namesW[[i]]){
endoW[j,i]=1
}
}
}
# WfinNR <- WfinNR[!(rowSums(abs(WfinNR)) == 0),] # only chang MB 14/01/23 !!!!
#gW[[cc]]<-apply(WfinNR,2,function(x) x/rowSums(WfinNR))
zero_rows <- apply(Wnew,1,sum) == 0
if(sum(zero_rows) != nrow(Wnew)){
Wnew <- Wnew[!zero_rows,]
Wnew <- apply(Wnew,2,function(x)x/rowSums(Wnew))
}
WfinNR <- rbind(endoW,Wnew)
gW[[cc]] <- WfinNR
}
names(gW)<-cnames
#----------------------here we specify the part for extra weights-----------------------------------------#
if(OE.flag){
for(kk in 1:OE.sets){
temp <- OE[[kk]];class(temp) <- "numeric"
xglobal <- cbind(xglobal,temp);OE.x<-ncol(OE[[kk]]); OEnames <- colnames(OE[[kk]])
colnames(xglobal)[(ncol(xglobal)-OE.x+1):ncol(xglobal)] <- paste(OE.cN[kk],".",OEnames,sep="")
for(i in 1:length(cnames)){
aux<-gW[[cnames[i]]];ii<-nrow(aux)
aux<-rbind(cbind(aux,matrix(0,ncol=OE.x,nrow=nrow(aux))),matrix(0,nrow=length(OE.exo[[kk]]),ncol=c(ncol(aux)+OE.x)))
rownames(aux)[(ii+1):(ii+length(OE.exo[[kk]]))]<-OE.exo[[kk]]
colnames(aux)[(ncol(aux)-OE.x+1):ncol(aux)]<-paste(OE.cN[kk],OEnames,sep=".")
if(length(OE.exo[[kk]])>1){
diag(aux[OE.exo[[kk]],paste(OE.cN[kk],".",OE.exo[[kk]],sep="")])<-1
}else{
aux[OE.exo[[kk]],paste(OE.cN[kk],".",OE.exo[[kk]],sep="")]<-1
}
gW[[cnames[i]]]<-aux
}
# this creates the W matrix for the other entity model
if(!is.null(OE.vars[[kk]])){
Wnew = matrix(0,length(OE.vars[[kk]]),ncol(xglobal))
colnames(Wnew) = colnames(xglobal)
rownames(Wnew) = c(paste(OE.cN[kk],".",OE.vars[[kk]][!OE.vars[[kk]]%in%names(endo)],sep=""),
OE.vars[[kk]][OE.vars[[kk]]%in%names(endo)])
if(OE.x>1){
diag(Wnew[paste(OE.cN[kk],".",OEnames,sep=""),paste(OE.cN[kk],".",OEnames,sep="")])<-1
}else{
Wnew[paste(OE.cN[kk],".",OEnames,sep=""),paste(OE.cN[kk],".",OEnames,sep="")]<-1
}
vars <- OE.vars[[kk]][OE.vars[[kk]]%in%names(endo)]
if(length(vars)>1){
for(i in 1:length(vars)){
Wnew[vars[i],paste(names(OE.weights[[kk]]),".",vars[i],sep="")] <- OE.weights[[kk]]
Wnew[vars[i],]
}
}
# this creates the part if there are more than one other entities
if(OE.sets>1 && kk < OE.sets){
aux <- Wnew
xx <- lapply(OE[(kk+1):OE.sets],function(l)ncol(l))
xx <- sum(unlist(xx[!OE.cN%in%OE.cN[(kk+1):OE.sets]]))
names <- c()
for(kkk in (kk+1):OE.sets){
names <- c(names,paste(OE.cN[kkk],".",colnames(OE[[kkk]]),sep=""))
}
Wnew <- cbind(aux,matrix(0,ncol=xx,nrow=nrow(aux)))
colnames(Wnew)[(ncol(Wnew)-xx+1):ncol(Wnew)] <- names
}
gW[[(length(gW)+1)]] <- Wnew
names(gW)[length(gW)] <- OE.cN[kk]
}
}
}
#----------------------- return everything to main function ---------------------------------------------#
gW<-gW[cN]
return(list(gW=gW,bigx=xglobal,exo=exo,exo.countries=exo.countries,endo=endo))
}
#' @name .get_V
#' @noRd
.get_V <- function(k=k,M=M,Mstar,plag,plagstar,shrink1,shrink2,shrink3,shrink4,sigma_sq,sigma_wex,trend=FALSE,wexo=TRUE){
V_i <- matrix(0,k,M)
# endogenous part
for(i in 1:M){
for(pp in 1:plag){
for(j in 1:M){
if(i==j){
#V_i[j+M*(pp-1),i] <- a_bar_1/(pp^2) ######
V_i[j+M*(pp-1),i] <- (shrink1/pp)^2
}else{
#V_i[j+M*(pp-1),i] <- (a_bar_2 * sigma_sq[i])/(pp^2*sigma_sq[j]) #####
V_i[j+M*(pp-1),i] <- (shrink1*shrink2/pp)^2 * (sigma_sq[i]/sigma_sq[j])
}
}
}
}
# exogenous part
if(wexo){
for(i in 1:M){
for(pp in 0:plagstar){
for(j in 1:Mstar){
#V_i[M*p+pp*Mstar+j,i] <- a_bar_4 * sigma_sq[i]/(sigma_wex[j]*(pp+1)) #####
V_i[M*plag+pp*Mstar+j,i] <- (shrink1*shrink4/(pp+1))^2 * (sigma_sq[i]/sigma_wex[j])
}
}
}
}
# deterministics
for(i in 1:M){
if(trend){
V_i[(k-1):k,i] <- shrink3 * sigma_sq[i]
}else{
V_i[k,i] <- shrink3 * sigma_sq[i]
}
}
return(V_i)
}
#' @name .bernoulli
#' @importFrom stats runif
#' @noRd
.bernoulli <- function(p){
u <- runif(1)
if (u<p){
x=0
}else{
x=1
}
return(x)
}
#' @name .get_nrc
#' @noRd
.get_nrc <- function(k){
if(k==1) return(c(1,1))
if(k==2) return(c(2,1))
if(k%in%c(3,4)) return(c(2,2))
if(k%in%c(5,6)) return(c(3,2))
if(k>6) return(c(3,3))
}
#' @name .atau_post
#' @importFrom stats dgamma dexp
#' @noRd
.atau_post <- function(atau,lambda2,thetas,k,rat=1){
logpost <- sum(dgamma(thetas,atau,(atau*lambda2/2),log=TRUE))+dexp(atau,rate=rat,log=TRUE)
return(logpost)
}
#' @name .BVAR_linear_wrapper
#' @noRd
#' @importFrom utils capture.output
.BVAR_linear_wrapper <- function(cc, cN, xglobal, gW, prior, lags, draws, burnin, trend, SV, thin, default_hyperpara, Ex, use_R, setting_store){
Yraw = xglobal[,substr(colnames(xglobal),1,2)==cN[cc],drop=FALSE]; class(Yraw) = "numeric"
W = gW[[cc]]
Exraw = matrix(NA_real_)
if(!is.null(Ex)) if(cN[cc]%in%names(Ex)) Exraw <- Ex[[cN[cc]]]
all = t(W%*%t(xglobal))
if(ncol(Yraw) == ncol(all)){
Wraw = NULL
Mstar = 0
}else{
Wraw = all[,(ncol(Yraw)+1):ncol(all),drop=FALSE]; class(Wraw) = "numeric"
Mstar = ncol(Wraw)
}
if(all(Wraw==0)){ # case of no exogenous variables -- always uses R version (not implemented in Rcpp)
Wraw = NULL
wexo = FALSE
}else{
wexo = TRUE
}
default_hyperpara$Mstar <- Mstar
prior_in <- ifelse(prior=="MN",1,ifelse(prior=="SSVS",2,ifelse(prior=="NG",3,4)))
if(default_hyperpara[["tau_log"]]){
default_hyperpara["tau_theta"] <- 1/log(ncol(Yraw))
}
# estimation
if(!use_R){
# Rcpp::sourceCpp("./src/BVAR_linear.cpp")
invisible(capture.output(bvar<-try(BVAR_linear(Yraw,Wraw,Exraw,lags,as.integer(draws),as.integer(burnin),
as.integer(thin),TRUE,trend,SV,as.integer(prior_in),
default_hyperpara,setting_store)), type="message"))
}else{
bvar <- structure("message",class=c("try-error","character"))
}
if(is(bvar,"try-error")){
# Rcpp::sourceCpp("./src/do_rgig1.cpp")
bvar<-try(.BVAR_linear_R(Yraw,Wraw,Exraw,lags,draws,burnin,thin,TRUE,trend,SV,
prior_in,default_hyperpara,TRUE,setting_store), silent=TRUE)
}
# error handling
if(inherits(bvar,"try-error")){
message("\nBGVAR incurred an error when estimating the models.\nSee original error message:")
message(paste0("Error occured in countrymodel: ", cN[cc],". Please check."))
message("Error in detail: \n")
message(bvar)
#stop()
}
#------------------------------------------------ get data ----------------------------------------#
Y <- bvar$Y; colnames(Y) <- colnames(Yraw); X <- bvar$X
M <- ncol(Y); bigT <- nrow(Y); K <- ncol(X)
plag <- lags[1]; plagstar <- lags[2]; pmax <- max(lags)
if(!any(is.na(Exraw))) Mex <- ncol(Exraw)
if(wexo){
xnames <- c(paste(rep("Ylag",M),rep(seq(1,plag),each=M),sep=""),rep("Wex",Mstar),
paste(rep("Wexlag",Mstar),rep(seq(1,plagstar),each=Mstar),sep=""))
if(!any(is.na(Exraw))) xnames <- c(xnames,paste(rep("Tex",Mex)))
xnames <- c(xnames,"cons")
if(trend) xnames <- c(xnames,"trend")
xnames_end <- xnames
}else{
xnames <- c(paste0(rep("Ylag",M),rep(seq(1,plag),each=M),sep=""))
if(!any(is.na(Exraw))) xnames <- c(xnames,paste(rep("Tex",Mex)))
xnames <- c(xnames,"cons")
if(trend) xnames <- c(xnames,"trend")
xnames_end <- c(paste(rep("Ylag",M),rep(seq(1,plag),each=M),sep=""),rep("Wex",Mstar),
paste(rep("Wexlag",Mstar),rep(seq(1,plagstar),each=Mstar),sep=""))
if(!any(is.na(Exraw))) xnames_end <- c(xnames_end,paste(rep("Tex",Mex)))
xnames_end <- c(xnames_end,"cons")
if(trend) xnames_end <- c(xnames_end,"trend")
}
colnames(X) <- xnames
#-----------------------------------------get containers ------------------------------------------#
A_store <- bvar$A_store; dimnames(A_store)[[1]] <- xnames_end; dimnames(A_store)[[2]] <- colnames(Y)
# splitting up stores
dims <- dimnames(A_store)[[1]]
a0store <- adrop(A_store[which(dims=="cons"),,,drop=FALSE],drop=1)
a1store <- Exstore <- NULL
if(trend){
a1store <- adrop(A_store[which(dims=="trend"),,,drop=FALSE],drop=1)
}
if(!any(is.na(Exraw))){
Exstore <- A_store[which(dims=="Tex"),,,drop=FALSE]
}
Lambda0store <- A_store[which(dims=="Wex"),,,drop=FALSE]
Lambdastore <- NULL
Phistore <- NULL
for(pp in 1:pmax){
if(pp %in% seq(plag)){
Phistore[[pp]] <- A_store[which(dims==paste("Ylag",pp,sep="")),,,drop=FALSE]
}else{
Phistore[[pp]] <- array(0, c(M, M, draws/thin),
dimnames=list(rep(paste0("Ylag",pp),M),colnames(Y),NULL))
}
if(pp %in% seq(plagstar)){
Lambdastore[[pp]] <- A_store[which(dims==paste("Wexlag",pp,sep="")),,,drop=FALSE]
}else{
Lambdastore[[pp]] <- array(0, c(Mstar, M, draws/thin),
dimnames=list(rep(paste0("Wexlag",pp),Mstar),colnames(Y),NULL))
}
}
SIGMA_store <- array(NA, c(bigT,M,M,draws/thin)); dimnames(SIGMA_store) <- list(NULL,colnames(Y),colnames(Y),NULL)
L_store <- bvar$L_store
for(irep in 1:(draws/thin)){
for(tt in 1:bigT){
if(M>1){
SIGMA_store[tt,,,irep] <- L_store[,,irep]%*%diag(exp(bvar$Sv_store[tt,,irep]))%*%t(L_store[,,irep])
}else{
SIGMA_store[tt,,,irep] <- L_store[,,irep]%*%exp(bvar$Sv_store[tt,,irep])%*%t(L_store[,,irep])
}
}
}
SIGMAmed_store <- apply(SIGMA_store, c(2,3,4), median)
res_store <- bvar$res_store; dimnames(res_store) <- list(NULL,colnames(Y),NULL)
if(SV){
vola_store <- bvar$Sv_store; dimnames(vola_store) <- list(NULL,colnames(Y),NULL)
vola_post <- apply(vola_store,c(1,2),median)
}else{
vola_store <- bvar$Sv_store;
vola_post <- apply(vola_store,c(1,2),median)
}
# additional stuff
if(SV & setting_store$vola_pars){
pars_store <- bvar$pars_store
pars_post <- apply(pars_store,c(1,2),median)
}else{
pars_store <- pars_post <- NULL
}
# MN
if(prior=="MN" & setting_store$shrink_MN){
shrink_store <- bvar$MN$shrink_store; dimnames(shrink_store) <- list(c("shrink1","shrink2","shrink4"),NULL,NULL)
shrink_post <- apply(shrink_store,c(1,2),median)
}else{
shrink_store <- shrink_post <- NULL
}
# SSVS
if(prior=="SSVS" & setting_store$shrink_SSVS){
gamma_store <- bvar$SSVS$gamma_store; dimnames(gamma_store) <- list(colnames(X),colnames(Y),NULL)
omega_store <- bvar$SSVS$omega_store; dimnames(omega_store) <- list(colnames(Y),colnames(Y),NULL)
PIP <- apply(gamma_store,c(1,2),mean)
PIP_omega <- apply(omega_store,c(1,2),mean)
}else{
gamma_store <- omega_store <- PIP <- PIP_omega <- NULL
}
# NG
if(prior=="NG" & setting_store$shrink_NG){
theta_store <- bvar$NG$theta_store; dimnames(theta_store)[[1]] <- colnames(X); dimnames(theta_store)[[2]] <- colnames(Y)
lambda2_store <- bvar$NG$lambda2_store
tau_store <- bvar$NG$tau_store
dimnames(lambda2_store) <- list(paste("lag",0:plag,sep="_"),c("endogenous","weakly exogenous","covariance"),NULL)
dimnames(lambda2_store) <- list(paste("lag",0:plag,sep="_"),c("endogenous","weakly exogenous","covariance"),NULL)
theta_post <- apply(theta_store,c(1,2),median)
lambda2_post <- apply(lambda2_store,c(1,2),median)
tau_post <- apply(tau_store,c(1,2),median)
}else{
theta_store <- lambda2_store <- tau_store <- theta_post <- lambda2_post <- tau_post <- NULL
}
# HS
lambda_A_endo_store <- lambda_A_exo_store <- lambda_L_store <- NULL
nu_A_endo_store <- nu_A_exo_store <- nu_L_store <- NULL
tau_A_endo_store <- tau_A_exo_store <- tau_L_store <- NULL
zeta_A_endo_store <- zeta_A_exo_store <- zeta_L_store <- NULL
lambda_A_endo_post <- lambda_A_exo_post <- lambda_L_post <- NULL
nu_A_endo_post <- nu_A_exo_post <- nu_L_post <- NULL
tau_A_endo_post <- tau_A_exo_post <- tau_L_post <- NULL
zeta_A_endo_post <- zeta_A_exo_post <- zeta_L_post <- NULL
if(prior=="HS" & setting_store$shrink_HS){
lambda_A_endo_store <- bvar$HS$lambda_A_endo_store
lambda_A_exo_store <- bvar$HS$lambda_A_exo_store
lambda_L_store <- bvar$HS$lambda_L_store
nu_A_endo_store <- bvar$HS$nu_A_endo_store
nu_A_exo_store <- bvar$HS$nu_A_exo_store
nu_L_store <- bvar$HS$nu_L_store
tau_A_endo_store <- bvar$HS$tau_A_endo_store
tau_A_exo_store <- bvar$HS$tau_A_exo_store
tau_L_store <- bvar$HS$tau_L_store
zeta_A_endo_store <- bvar$HS$zeta_A_endo_store
zeta_A_exo_store <- bvar$HS$zeta_A_exo_store
zeta_L_store <- bvar$HS$zeta_L_store
lambda_A_endo_post <- apply(lambda_A_endo_store, 1, median)
lambda_A_exo_post <- apply(lambda_A_exo_store, 1, median)
lambda_L_post <- apply(lambda_L_store, 1, median)
nu_A_endo_post <- apply(nu_A_endo_store, 1, median)
nu_A_exo_post <- apply(nu_A_exo_store, 1, median)
nu_L_post <- apply(nu_L_store, 1, median)
tau_A_endo_post <- apply(tau_A_endo_store, 1, median)
tau_A_exo_post <- apply(tau_A_exo_store, 1, median)
tau_L_post <- apply(tau_L_store, 1, median)
zeta_A_endo_post <- apply(zeta_A_endo_store, 1, median)
zeta_A_exo_post <- apply(zeta_A_exo_store, 1, median)
zeta_L_post <- apply(zeta_L_store, 1, median)
}
#------------------------------------ compute posteriors -------------------------------------------#
A_post <- apply(A_store, c(1,2), median)
L_post <- apply(L_store, c(1,2), median)
SIGMA_post <- apply(SIGMA_store,c(1,2,3),median)
S_post <- apply(SIGMA_post,c(1,2),mean)
Sig <- S_post/(bigT-K)
res_post <- apply(res_store,c(1,2),median)
# splitting up posteriors
a0post <- A_post[which(dims=="cons"),,drop=FALSE]
a1post <- Expost <- NULL
if(trend){
a1post <- A_post[which(dims=="trend"),,drop=FALSE]
}
if(!any(is.na(Exraw))){
Expost <- A_post[which(dims=="Tex"),,drop=FALSE]
}
Lambda0post <- A_post[which(dims=="Wex"),,drop=FALSE]
Lambdapost <- NULL
Phipost <- NULL
for(pp in 1:pmax){
if(pp %in% seq(plag)){
Phipost <- rbind(Phipost,A_post[which(dims==paste("Ylag",pp,sep="")),,drop=FALSE])
}else{
Phipost <- rbind(Phipost, matrix(0, M, M, dimnames=list(rep(paste0("Ylag",pp),M),colnames(Y))))
}
if(pp %in% seq(plagstar)){
Lambdapost <- rbind(Lambdapost,A_post[which(dims==paste("Wexlag",pp,sep="")),,drop=FALSE])
}else{
Lambdapost <- rbind(Lambdapost, matrix(0, Mstar, M, dimnames=list(rep(paste0("Wexlag",pp),Mstar),colnames(Y))))
}
}
post <- list(A_post=A_post,a0post=a0post,a1post=a1post,Lambda0post=Lambda0post,Lambdapost=Lambdapost,
Phipost=Phipost,Expost=Expost,S_post=S_post,Sig=Sig,theta_post=theta_post,L_post=L_post,
SIGMA_post=SIGMA_post,
vola_post=vola_post,pars_post=pars_post,res_post=res_post,shrink_post=shrink_post,
PIP=PIP,PIP_omega=PIP_omega,lambda2_post=lambda2_post,tau_post=tau_post,
lambda_A_endo_post=lambda_A_endo_post,lambda_A_exo_post=lambda_A_exo_post,lambda_L_post=lambda_L_post,
nu_A_endo_post=nu_A_endo_post,nu_A_exo_post=nu_A_exo_post,nu_L_post=nu_L_post,
tau_A_endo_post=tau_A_endo_post,tau_A_exo_post=tau_A_exo_post,tau_L_post=tau_L_post,
zeta_A_endo_post=zeta_A_endo_post,zeta_A_exo_post=zeta_A_exo_post,zeta_L_post=zeta_L_post)
store <- list(a0store=a0store,a1store=a1store,Lambda0store=Lambda0store,Lambdastore=Lambdastore,
Phistore=Phistore,Exstore=Exstore,SIGMAmed_store=SIGMAmed_store,
L_store=L_store,theta_store=theta_store,vola_store=vola_store,pars_store=pars_store,
res_store=res_store,shrink_store=shrink_store,gamma_store=gamma_store,omega_store=omega_store,
lambda2_store=lambda2_store,tau_store=tau_store,
lambda_A_endo_store=lambda_A_endo_store,lambda_A_exo_store=lambda_A_exo_store,lambda_L_store=lambda_L_store,
nu_A_endo_store=nu_A_endo_store,nu_A_exo_store=nu_A_exo_store,nu_L_store=nu_L_store,
tau_A_endo_store=tau_A_endo_store,tau_A_exo_store=tau_A_exo_store,tau_L_store=tau_L_store,
zeta_A_endo_store=zeta_A_endo_store,zeta_A_exo_store=zeta_A_exo_store,zeta_L_store=zeta_L_store)
out <- list(Y=Y,X=X,W=W,store=store,post=post)
return(out)
}
#' @name .BVAR_linear_R
#' @importFrom stochvol svsample_fast_cpp specify_priors get_default_fast_sv
#' @importFrom MASS ginv mvrnorm
#' @importFrom methods is
#' @importFrom stats rnorm rgamma runif dnorm
#' @noRd
.BVAR_linear_R <- function(Yraw,Wraw,Exraw,lags,draws,burnin,thin,cons,trend,sv,prior,hyperpara,verbose,setting_store){
#----------------------------------------INPUTS----------------------------------------------------#
plag <- lags[1]
plagstar <- lags[2]
pmax <- max(lags)
Traw <- nrow(Yraw)
M <- ncol(Yraw)
K <- M*plag
Ylag <- .mlag(Yraw,plag)
nameslags <- NULL
for (ii in 1:plag) nameslags <- c(nameslags,rep(paste("Ylag",ii,sep=""),M))
colnames(Ylag) <- nameslags
Mstar <- hyperpara$Mstar
Kstar <- Mstar*(plagstar+1)
wexnames <- rep("Wex",Mstar)
wexnameslags <- NULL
for (ii in 1:plagstar) wexnameslags <- c(wexnameslags,rep(paste("Wexlag",ii,sep=""),Mstar))
if(!is.null(Wraw)){
wexo <- TRUE
Wexlag <- .mlag(Wraw,plagstar)
colnames(Wraw) <- wexnames
colnames(Wexlag) <- wexnameslags
}else{
wexo <- FALSE
Wexlag <- NULL
}
texo <- FALSE; Mex <- 0; exnames <- NULL
if(nrow(Exraw) != 1){
Mex <- ncol(Exraw)
texo <- TRUE
exnames <- rep("Tex",Mex)
colnames(Exraw) <- exnames
}
nameslags_end <- c(nameslags,wexnames,wexnameslags,exnames)
Xraw <- cbind(Ylag,Wraw,Wexlag)
if(texo) Xraw <- cbind(Xraw,Exraw)
X <- Xraw[(pmax+1):nrow(Xraw),,drop=FALSE]
Y <- Yraw[(pmax+1):Traw,,drop=FALSE]
bigT <- nrow(X)
if(cons){
X <- cbind(X,1)
colnames(X)[ncol(X)] <- "cons"
nameslags_end <- c(nameslags_end,"cons")
}
if(trend){
X <- cbind(X,seq(1,bigT))
colnames(X)[ncol(X)] <- "trend"
nameslags_end <- c(nameslags_end,"trend")
}
k <- ncol(X)
k_end <- ncol(Ylag) + Kstar + ifelse(cons,1,0) + ifelse(trend,1,0)
v <- (M*(M-1))/2
n <- K*M
nstar <- Kstar*M
#---------------------------------------------------------------------------------------------------------
# HYPERPARAMETERS
#---------------------------------------------------------------------------------------------------------
prmean <- hyperpara$prmean
a_1 <- hyperpara$a_1
b_1 <- hyperpara$b_1
crit_eig <- hyperpara$crit_eig
Bsigma <- hyperpara$Bsigma
a0 <- hyperpara$a0
b0 <- hyperpara$b0
bmu <- hyperpara$bmu
Bmu <- hyperpara$Bmu
# prior == 1: MN
shrink1 <- hyperpara$shrink1
shrink2 <- hyperpara$shrink2
shrink3 <- hyperpara$shrink3
shrink4 <- hyperpara$shrink4
# prior == 2: SSVS
tau00 <- hyperpara$tau0
tau11 <- hyperpara$tau1
p_i <- hyperpara$p_i
kappa0 <- hyperpara$kappa0
kappa1 <- hyperpara$kappa1
q_ij <- hyperpara$q_ij
# prior == 3: NG
d_lambda <- hyperpara$d_lambda
e_lambda <- hyperpara$e_lambda
tau_theta <- hyperpara$tau_theta
sample_tau <- hyperpara$sample_tau
#---------------------------------------------------------------------------------------------------------
# STORE SETTINGS
#---------------------------------------------------------------------------------------------------------
save_shrink_MN <- setting_store$shrink_MN
save_shrink_SSVS <- setting_store$shrink_SSVS
save_shrink_NG <- setting_store$shrink_NG
save_shrink_HS <- setting_store$shrink_HS
save_vola_pars <- setting_store$vola_pars
#---------------------------------------------------------------------------------------------------------
# OLS Quantitites
#---------------------------------------------------------------------------------------------------------
XtXinv <- try(solve(crossprod(X)),silent=TRUE)
if(is(XtXinv,"try-error")) XtXinv <- ginv(crossprod(X))
A_OLS <- XtXinv%*%(t(X)%*%Y)
E_OLS <- Y - X%*%A_OLS
#a_OLS <- as.vector(A_OLS)
#SSE <- t((Y - X%*%A_OLS))%*%(Y - X%*%A_OLS)
SIGMA_OLS <- crossprod(E_OLS)/(bigT-k)
#IXY <- kronecker(diag(M),(t(X)%*%Y))
#---------------------------------------------------------------------------------------------------------
# Initial Values
#---------------------------------------------------------------------------------------------------------
A_draw <- A_OLS
SIGMA <- array(SIGMA_OLS, c(M,M,bigT))
Em <- Em_str <- E_OLS
L_draw <- diag(M)
L_drawinv <- diag(M)
#---------------------------------------------------------------------------------------------------------
# PRIORS
#---------------------------------------------------------------------------------------------------------
# Priors on VAR coefs
#-----------------------------
# prior mean
A_prior <- matrix(0,k,M)
diag(A_prior) <- prmean
a_prior <- as.vector(A_prior)
# prior variance
theta <- matrix(10,k,M)
# MN stuff
accept1 <- 0
accept2 <- 0
accept4 <- 0
scale1 <- .43
scale2 <- .43
scale4 <- .43
sigma_sq <- matrix(0,M,1) #vector which stores the residual variance
for (i in 1:M){
Ylag_i <- .mlag(Yraw[,i],plag)
Ylag_i <- Ylag_i[(plag+1):nrow(Ylag_i),,drop=FALSE]
Y_i <- Yraw[(plag+1):nrow(Yraw),i,drop=FALSE]
Ylag_i <- cbind(Ylag_i,seq(1,nrow(Y_i)))
alpha_i <- solve(crossprod(Ylag_i))%*%crossprod(Ylag_i,Y_i)
sigma_sq[i,1] <- (1/(nrow(Y_i)-plag-1))*t(Y_i-Ylag_i%*%alpha_i)%*%(Y_i-Ylag_i%*%alpha_i)
}
if(wexo){
sigma_wex <- matrix(0,Mstar,1)
for (j in 1:Mstar){
Ywex_i <- .mlag(Wraw[,j],plagstar)
Ywex_i <- Ywex_i[(plag+1):Traw,]
Yw_i <- Wraw[(plag+1):Traw,j,drop=FALSE]
Ywex_i <- cbind(Ywex_i,seq(1,nrow(Yw_i)))
alpha_w <- solve(crossprod(Ywex_i))%*%t(Ywex_i)%*%Yw_i
sigma_wex[j,1] <- (1/(nrow(Yw_i)-plag-1))*t(Yw_i-Ywex_i%*%alpha_w)%*%(Yw_i-Ywex_i%*%alpha_w)
}
}else{
sigma_wex <- NULL
}
# MN prior
if(prior == 1){
theta <- .get_V(k=k,M=M,Mstar,plag,plagstar,shrink1,shrink2,shrink3,shrink4,sigma_sq,sigma_wex,trend,wexo)
post1 <- sum(dnorm(as.vector(A_draw),a_prior,sqrt(as.vector(theta)),log=TRUE))+dgamma(shrink1,0.01,0.01,log=TRUE)+log(shrink1) # correction term
post2 <- sum(dnorm(as.vector(A_draw),a_prior,sqrt(as.vector(theta)),log=TRUE))+dgamma(shrink2,0.01,0.01,log=TRUE)+log(shrink2) # correction term
post4 <- sum(dnorm(as.vector(A_draw),a_prior,sqrt(as.vector(theta)),log=TRUE))+dgamma(shrink4,0.01,0.01,log=TRUE)+log(shrink4) # correction term
}
# SSVS prior
if(prior == 2){
gamma <- matrix(1,k,M)
sigma_alpha <- sqrt(diag(kronecker(SIGMA_OLS,XtXinv)))
tau0 <- matrix(NA_real_, k, M); tau1 <- matrix(NA_real_, k, M)
ii <- 1
for(mm in 1:M){
for(kk in 1:k){
tau0[kk,mm] <- tau00*sigma_alpha[ii]
tau1[kk,mm] <- tau11*sigma_alpha[ii]
ii <- ii+1
}
}
}
# NG stuff
if(prior == 3){
lambda2_A <- matrix(0.01,pmax+1,2)
A_tau <- matrix(tau_theta,pmax+1,2)
colnames(A_tau) <- colnames(lambda2_A) <- c("endo","exo")
rownames(A_tau) <- rownames(lambda2_A) <- paste("lag.",seq(0,pmax),sep="")
A_tuning <- matrix(.43,pmax+1,2)
A_accept <- matrix(0,pmax+1,2)
lambda2_A[1,1] <- A_tau[1,1] <- A_tuning[1,1] <- A_accept[1,1] <- NA
}
# HS stuff
if(prior == 4){
lambda_A_endo <- nu_A_endo <- rep(1, n)
lambda_A_exo <- nu_A_exo <- rep(1, nstar)
lambda_L <- nu_L <- rep(1, v)
tau_A_endo <- tau_A_exo <- tau_L <- 1
zeta_A_endo <- zeta_A_exo <- zeta_L <- 1
}
#------------------------------------
# Priors on coefs in H matrix of VCV
#------------------------------------
# prior mean
l_prior <- matrix(0,M,M)
# prior variance
L_prior <- matrix(kappa1,M,M)
L_prior[upper.tri(L_prior)] <- 0; diag(L_prior) <- 0
# SSVS
omega <- matrix(1,M,M)
omega[upper.tri(omega)] <- 0; diag(omega) <- 0
# NG
lambda2_L <- 0.01
L_tau <- tau_theta
L_accept <- 0
L_tuning <- .43
#------------------------------------
# SV quantities
#------------------------------------
Sv_draw <- matrix(-3,bigT,M)
pars_var <- matrix(c(-3,.9,.2,-3),4,M,dimnames=list(c("mu","phi","sigma","latent0"),NULL))
Sv_priors <- specify_priors(mu=sv_normal(mean=bmu, sd=Bmu), phi=sv_beta(a0,b0), sigma2=sv_gamma(shape=0.5,rate=1/(2*Bsigma)))
#---------------------------------------------------------------------------------------------------------
# SAMPLER MISCELLANEOUS
#---------------------------------------------------------------------------------------------------------
ntot <- draws+burnin
# thinning
count <- 0
thindraws <- draws/thin
thin.draws <- seq(burnin+1,ntot,by=thin)
#---------------------------------------------------------------------------------------------------------
# STORAGES
#---------------------------------------------------------------------------------------------------------
A_store <- array(0, c(k_end,M,thindraws))
L_store <- array(NA_real_, c(M,M,thindraws))
res_store <- array(NA_real_, c(bigT,M,thindraws))
# SV
Sv_store <- array(NA_real_, c(bigT,M,thindraws))
if(save_vola_pars){
pars_store <- array(NA_real_, c(4,M,thindraws))
}else{
pars_store <- NULL
}
# MN
if(save_shrink_MN){
shrink_store <- array(NA_real_, c(3,1,thindraws))
}else{
shrink_store <- NULL
}
# SSVS
if(save_shrink_SSVS){
gamma_store <- array(0, c(k_end,M,thindraws))
omega_store <- array(NA_real_, c(M,M,thindraws))
}else{
gamma_store <- omega_store <- NULL
}
# NG
if(save_shrink_NG){
theta_store <- array(0, c(k_end,M,thindraws))
lambda2_store<- array(NA_real_, c(pmax+1,3,thindraws))
tau_store <- array(NA_real_, c(pmax+1,3,thindraws))
}else{
theta_store <- lambda2_store <- tau_store <- NULL
}
# HS
if(save_shrink_HS){
lambda_A_endo_store <- array(0, c(n, thindraws))
lambda_A_exo_store <- array(0, c(nstar, thindraws))
lambda_L_store <- array(0, c(v, thindraws))
nu_A_endo_store <- array(0, c(n, thindraws))
nu_A_exo_store <- array(0, c(nstar, thindraws))
nu_L_store <- array(0, c(v, thindraws))
tau_A_endo_store <- array(0, c(1, thindraws))
tau_A_exo_store <- array(0, c(1, thindraws))
tau_L_store <- array(0, c(1, thindraws))
zeta_A_endo_store <- array(0, c(1, thindraws))
zeta_A_exo_store <- array(0, c(1, thindraws))
zeta_L_store <- array(0, c(1, thindraws))
}else{
lambda_A_endo_store <- lambda_A_exo_store <- lambda_L_store <- NULL
nu_A_endo_store <- nu_A_exo_store <- nu_L_store <- NULL
tau_A_endo_store <- tau_A_exo_store <- tau_L_store <- NULL
zeta_A_endo_store <- zeta_A_exo_store <- zeta_L_store <- NULL
}
#---------------------------------------------------------------------------------------------------------
# MCMC LOOP
#---------------------------------------------------------------------------------------------------------
for (irep in 1:ntot){
#----------------------------------------------------------------------------
# Step 1: Sample coefficients
for(mm in 1:M){
A0_draw = A_draw
A0_draw[,mm] <- 0
ztilde <- as.vector((Y - X%*%A0_draw)%*%t(L_drawinv[mm:M,,drop=FALSE])) * exp(-0.5*as.vector(Sv_draw[,mm:M,drop=FALSE]))
xtilde <- (L_drawinv[mm:M,mm,drop=FALSE] %x% X) * exp(-0.5*as.vector(Sv_draw[,mm:M,drop=FALSE]))
V_post <- try(chol2inv(chol(crossprod(xtilde)+diag(1/theta[,mm]))),silent=TRUE)
if(is(V_post,"try-error")) V_post <- try(solve(crossprod(xtilde)+diag(1/theta[,mm])),silent=TRUE)
if(is(V_post,"try-error")) V_post <- ginv(crossprod(xtilde)+diag(1/theta[,mm]))
A_post <- V_post%*%(crossprod(xtilde,ztilde)+diag(1/theta[,mm])%*%A_prior[,mm])
A.draw.i <- try(A_post+t(chol(V_post))%*%rnorm(ncol(X)),silent=TRUE)
if(is(A.draw.i,"try-error")) A.draw.i <- t(mvrnorm(1,A_post,V_post))
A_draw[,mm] <- A.draw.i
Em[,mm] <- Y[,mm]-X%*%A.draw.i
}
rownames(A_draw) <- colnames(X)
# Step 1b: Sample coefficients in L matrix
if(M > 1){
for(mm in 2:M){
eps.m <- Em[,mm]*exp(-0.5*Sv_draw[,mm,drop=TRUE])
eps.x <- Em[,1:(mm-1),drop=FALSE]*exp(-0.5*Sv_draw[,mm,drop=TRUE])
L_post <- try(chol2inv(chol(crossprod(eps.x)+diag(1/L_prior[mm,1:(mm-1)],mm-1,mm-1))),silent=TRUE)
if(is(L_post,"try-error")) L_post <- try(solve(crossprod(eps.x)+diag(1/L_prior[mm,1:(mm-1)],mm-1,mm-1)),silent=TRUE)
if(is(L_post,"try-error")) L_post <- ginv(crossprod(eps.x)+diag(1/L_prior[mm,1:(mm-1)],mm-1,mm-1))
l_post <- L_post%*%(crossprod(eps.x,eps.m)+diag(1/L_prior[mm,1:(mm-1)],mm-1,mm-1)%*%l_prior[mm,1:(mm-1)])
L.draw.i <- try(l_post+t(chol(L_post))%*%rnorm(length(1:(mm-1))),silent=TRUE)
if(is(L.draw.i,"try-error")) L.draw.i <- t(mvrnorm(1,l_post,L_post))
L_draw[mm,1:(mm-1)] <- L.draw.i
}
}
# Step 1c: Compute Em_str
L_drawinv = solve(L_draw)
Em_str = Y%*%t(L_drawinv) - X%*%A_draw%*%t(L_drawinv)
# for (mm in 1:M){
# if (mm==1){
# Y.i <- Y[,mm]*exp(-0.5*Sv_draw[,mm])
# X.i <- X*exp(-0.5*Sv_draw[,mm])
#
# V_post <- try(chol2inv(chol(crossprod(X.i)+diag(1/theta[,mm]))),silent=TRUE)
# if (is(V_post,"try-error")) V_post <- ginv(crossprod(X.i)+diag(1/theta[,mm]))
# A_post <- V_post%*%(crossprod(X.i,Y.i)+diag(1/theta[,mm])%*%A_prior[,mm])
#
# A.draw.i <- try(A_post+t(chol(V_post))%*%rnorm(ncol(X.i)),silent=TRUE)
# if (is(A.draw.i,"try-error")) A.draw.i <- mvrnorm(1,A_post,V_post)
# A_draw[,mm] <- A.draw.i
# Em[,mm] <- Em_str[,mm] <- Y[,mm]-X%*%A.draw.i
# }else{
# Y.i <- Y[,mm]*exp(-0.5*Sv_draw[,mm])
# X.i <- cbind(X,Em[,1:(mm-1)])*exp(-0.5*Sv_draw[,mm])
#
# V_post <- try(chol2inv(chol((crossprod(X.i)+diag(1/c(theta[,mm],L_prior[mm,1:(mm-1)]))))),silent=TRUE)
# if (is(V_post,"try-error")) V_post <- ginv((crossprod(X.i)+diag(1/c(theta[,mm],L_prior[mm,1:(mm-1)]))))
# A_post <- V_post%*%(crossprod(X.i,Y.i)+diag(1/c(theta[,mm],L_prior[mm,1:(mm-1)]))%*%c(A_prior[,mm],l_prior[mm,1:(mm-1)]))
#
# A.draw.i <- try(A_post+t(chol(V_post))%*%rnorm(ncol(X.i)),silent=TRUE)
# if (is(A.draw.i,"try-error")) A.draw.i <- mvrnorm(1,A_post,V_post)
#
# A_draw[,mm] <- A.draw.i[1:ncol(X)]
# Em[,mm] <- Y[,mm]-X%*%A.draw.i[1:ncol(X)]
# Em_str[,mm] <- Y[,mm]-X%*%A.draw.i[1:ncol(X)]-Em[,1:(mm-1),drop=FALSE]%*%A.draw.i[(ncol(X)+1):ncol(X.i),drop=FALSE]
# L_draw[mm,1:(mm-1)] <- A.draw.i[(ncol(X)+1):ncol(X.i)]
# }
# }
# rownames(A_draw) <- colnames(X)
#----------------------------------------------------------------------------
# Step 2: different shrinkage prior setups
# MN
if(prior==1){
#Step for the first shrinkage parameter (own lags)
shrink1.prop <- exp(rnorm(1,0,scale1))*shrink1
theta1.prop <- .get_V(k,M,Mstar,plag,plagstar,shrink1.prop,shrink2,shrink3,shrink4,sigma_sq,sigma_wex,trend,wexo)
post1.prop <- sum(dnorm(as.vector(A_draw),a_prior,sqrt(as.vector(theta1.prop)),log=TRUE))+dgamma(shrink1.prop,0.01,0.01,log=TRUE)+log(shrink1.prop) # correction term
if ((post1.prop-post1)>log(runif(1,0,1))){
shrink1 <- shrink1.prop
theta <- theta1.prop
post1 <- post1.prop
accept1 <- accept1+1
}
#Step for the second shrinkage parameter (cross equation)
shrink2.prop <- exp(rnorm(1,0,scale2))*shrink2
theta2.prop <- .get_V(k,M,Mstar,plag,plagstar,shrink1,shrink2.prop,shrink3,shrink4,sigma_sq,sigma_wex,trend,wexo)
post2.prop <- sum(dnorm(as.vector(A_draw),a_prior,sqrt(as.vector(theta2.prop)),log=TRUE))+dgamma(shrink2.prop,0.01,0.01,log=TRUE)+log(shrink2.prop) # correction term
if ((post2.prop-post2)>log(runif(1,0,1))){
shrink2 <- shrink2.prop
theta <- theta2.prop
post2 <- post2.prop
accept2 <- accept2+1
}
#Step for the final shrinkage parameter (weakly exogenous)
shrink4.prop <- exp(rnorm(1,0,scale4))*shrink4
theta4.prop <- .get_V(k,M,Mstar,plag,plagstar,shrink1,shrink2,shrink3,shrink4.prop,sigma_sq,sigma_wex,trend,wexo)
post4.prop <- sum(dnorm(as.vector(A_draw),a_prior,sqrt(as.vector(theta4.prop)),log=TRUE))+dgamma(shrink4.prop,0.01,0.01,log=TRUE)+log(shrink4.prop)
if((post4.prop-post4)>log(runif(1,0,1))){
shrink4 <- shrink4.prop
theta <- theta4.prop
post4 <- post4.prop
accept4 <- accept4+1
}
if (irep<(0.5*burnin)){
if((accept1/irep)<0.15) scale1 <- 0.99*scale1
if((accept1/irep)>0.3) scale1 <- 1.01*scale1
if((accept2/irep)<0.15) scale2 <- 0.99*scale2
if((accept2/irep)>0.3) scale2 <- 1.01*scale2
if((accept4/irep)<0.15) scale4 <- 0.99*scale4
if((accept4/irep)>0.3) scale4 <- 1.01*scale4
}
}
# SSVS
if(prior==2){
for(mm in 1:M){
for(kk in 1:k){
u_i1 <- dnorm(A_draw[kk,mm],A_prior[kk,mm],tau0[kk,mm]) * p_i
u_i2 <- dnorm(A_draw[kk,mm],A_prior[kk,mm],tau1[kk,mm]) * (1-p_i)
gst <- u_i1/(u_i1 + u_i2)
if(gst=="NaN") gst <- 0
gamma[kk,mm] <- .bernoulli(gst)
gamma[is.na(gamma)] <- 1
if (gamma[kk,mm] == 0){
theta[kk,mm] <- tau0[kk,mm]^2
}else if (gamma[kk,mm] == 1){
theta[kk,mm] <- tau1[kk,mm]^2
}
}
}
if(M>1){
for(mm in 2:M){
for(ii in 1:(mm-1)){
u_ij1 <- dnorm(L_draw[mm,ii],l_prior[mm,ii],kappa0) * q_ij
u_ij2 <- dnorm(L_draw[mm,ii],l_prior[mm,ii],kappa1) * (1-q_ij)
ost <- u_ij1/(u_ij1 + u_ij2)
if(is.na(ost)) ost <- 1
omega[mm,ii] <- .bernoulli(ost)
if (is.na(omega[mm,ii])) omega[mm,ii] <- 1
if(omega[mm,ii]==1){
L_prior[mm,ii] <- kappa1^2
}else{
L_prior[mm,ii] <- kappa0^2
}
}
}
} # END-if M>1
}
# NG
if(prior==3){
# Normal-Gamma for Covariances
if(M>1){
lambda2_L <- rgamma(1,d_lambda+L_tau*v,e_lambda+L_tau/2*sum(L_prior[lower.tri(L_prior)]))
#Step VI: Sample the prior scaling factors for covariances from GIG
for(mm in 2:M){
for(ii in 1:(mm-1)){
temp <- do_rgig1(lambda = L_tau-0.5,
chi = (L_draw[mm,ii] - l_prior[mm,ii])^2,
psi = L_tau*lambda2_L)
temp <- ifelse(temp<1e-7,1e-7,ifelse(temp>1e+7,1e+7,temp))
L_prior[mm,ii] <- temp
}
}
if(sample_tau){
#Sample L_tau through a simple RWMH step
L_tau_prop <- exp(rnorm(1,0,L_tuning))*L_tau
post_L_tau_prop <- .atau_post(atau=L_tau_prop, thetas=L_prior[lower.tri(L_prior)], k=v, lambda2=lambda2_L)
post_L_tau_old <- .atau_post(atau=L_tau, thetas=L_prior[lower.tri(L_prior)], k=v, lambda2=lambda2_L)
post.diff <- post_L_tau_prop-post_L_tau_old+log(L_tau_prop)-log(L_tau)
post.diff <- ifelse(is.nan(post.diff),-Inf,post.diff)
if (post.diff > log(runif(1,0,1))){
L_tau <- L_tau_prop
L_accept <- L_accept+1
}
if (irep<(0.5*burnin)){
if ((L_accept/irep)>0.3) L_tuning <- 1.01*L_tuning
if ((L_accept/irep)<0.15) L_tuning <- 0.99*L_tuning
}
}
} # END-if M>1
# Norml-Gamma for weakly exogenous
if(wexo){
for(ss in 0:plagstar){
if(ss==0) slct.i <- which(rownames(A_draw)=="Wex") else slct.i <- which(rownames(A_draw)==paste("Wexlag",ss,sep=""))
A.lag.star <- A_draw[slct.i,,drop=FALSE]
A.lag.prior <- A_prior[slct.i,,drop=FALSE]
theta.lag <- theta[slct.i,,drop=FALSE]
if (ss==0){
lambda2_A[ss+1,2] <- rgamma(n = 1,
shape = d_lambda + A_tau[ss+1,2]*Mstar*M,
rate = e_lambda + A_tau[ss+1,2]/2*sum(theta.lag))
}else{
lambda2_A[ss+1,2] <- rgamma(n = 1,
shape = d_lambda + A_tau[ss+1,2]*Mstar^2,
rate = e_lambda + A_tau[ss+1,2]*0.5*prod(lambda2_A[1:ss,2])*sum(theta.lag))
}
for(ii in 1:Mstar){
for(mm in 1:M){
temp <- do_rgig1(lambda = A_tau[ss+1,2]-0.5,
chi = (A.lag.star[ii,mm] - A.lag.prior[ii,mm])^2,
psi = A_tau[ss+1,2]*prod(lambda2_A[1:(ss+1),2]))
temp <- ifelse(temp<1e-7,1e-7,ifelse(temp>1e+7,1e+7,temp))
theta.lag[ii,mm] <- temp
}
}
theta[slct.i,] <- theta.lag
if(sample_tau){
#Sample a_tau through a simple RWMH step (on-line tuning of the MH scaling within the first 50% of the burn-in phase)
A_tau_prop <- exp(rnorm(1,0,A_tuning[ss+1,2]))*A_tau[ss+1,2]
post_A_tau_prop <- .atau_post(atau=A_tau_prop, thetas=as.vector(theta.lag),lambda2 = prod(lambda2_A[1:(ss+1),2]))
post_A_tau_old <- .atau_post(atau=A_tau[ss+1,2], thetas=as.vector(theta.lag),lambda2 = prod(lambda2_A[1:(ss+1),2]))
post.diff <- post_A_tau_prop - post_A_tau_old + log(A_tau_prop) - log(A_tau[ss+1,2])
post.diff <- ifelse(is.nan(post.diff),-Inf,post.diff)
if (post.diff > log(runif(1,0,1))){
A_tau[ss+1,2] <- A_tau_prop
A_accept[ss+1,2] <- A_accept[ss+1,2]+1
}
if (irep<(0.5*burnin)){
if ((A_accept[ss+1,2]/irep)>0.3) A_tuning[ss+1,2] <- 1.01*A_tuning[ss+1,2]
if ((A_accept[ss+1,2]/irep)<0.15) A_tuning[ss+1,2] <- 0.99*A_tuning[ss+1,2]
}
}
}
}
# Normal-Gamma for endogenous variables
for(ss in 1:plag){
slct.i <- which(rownames(A_draw)==paste("Ylag",ss,sep=""))
A.lag <- A_draw[slct.i,,drop=FALSE]
A.prior <- A_prior[slct.i,,drop=FALSE]
theta.lag <- theta[slct.i,,drop=FALSE]
if (ss==1){
lambda2_A[ss+1,1] <- rgamma(n = 1,
shape = d_lambda + A_tau[ss+1,1]*M^2,
rate = e_lambda + A_tau[ss+1,1]/2*sum(theta.lag))
}else{
lambda2_A[ss+1,1] <- rgamma(n = 1,
shape = d_lambda + A_tau[ss+1,1]*M^2,
rate = e_lambda + A_tau[ss+1,1]/2*prod(lambda2_A[2:(ss+1),1])*sum(theta.lag))
}
for(ii in 1:M){
for(mm in 1:M){
temp <- do_rgig1(lambda = A_tau[ss+1,1] - 0.5,
chi = (A.lag[ii,mm] - A.prior[ii,mm])^2,
psi = A_tau[ss+1,1]*prod(lambda2_A[2:(ss+1),1]))
temp <- ifelse(temp<1e-7,1e-7,ifelse(temp>1e+7,1e+7,temp))
theta.lag[ii,mm] <- temp
}
}
theta[slct.i,] <- theta.lag
if (sample_tau){
#Sample a_tau through a simple RWMH step (on-line tuning of the MH scaling within the first 50% of the burn-in phase)
A_tau_prop <- exp(rnorm(1,0,A_tuning[ss+1,1]))*A_tau[ss+1,1]
post_A_tau_prop <- .atau_post(atau=A_tau_prop, thetas=as.vector(theta.lag),lambda2=prod(lambda2_A[2:(ss+1),1]))
post_A_tau_old <- .atau_post(atau=A_tau[ss+1,1], thetas=as.vector(theta.lag),lambda2=prod(lambda2_A[2:(ss+1),1]))
post.diff <- post_A_tau_prop-post_A_tau_old+log(A_tau_prop)-log(A_tau[ss+1,1])
post.diff <- ifelse(is.nan(post.diff),-Inf,post.diff)
if (post.diff > log(runif(1,0,1))){
A_tau[ss+1,1] <- A_tau_prop
A_accept[ss+1,1] <- A_accept[ss+1,1]+1
}
if (irep<(0.5*burnin)){
if ((A_accept[ss+1,1]/irep)>0.3) A_tuning[ss+1,1] <- 1.01*A_tuning[ss+1,1]
if ((A_accept[ss+1,1]/irep)<0.15) A_tuning[ss+1,1] <- 0.99*A_tuning[ss+1,1]
}
}
}
}
if(prior == 4){
# local shrinkage parameters - L
lambda_L = 1 / rgamma(n = v,
shape = 1,
rate = 1 / nu_L + 0.5 * as.vector(L_draw[lower.tri(L_draw)])^2 / tau_L)
nu_L = 1 /rgamma(n = v,
shape = 1,
rate = 1 + 1 / lambda_L)
# global shrinkage parameter - L
WSSR_L = sum(as.vector(L_draw[lower.tri(L_draw)])^2 / lambda_L)
tau_L = 1 / rgamma(n = 1,
shape = (v + 1)/2,
rate = 1 / zeta_L + 0.5*WSSR_L)
zeta_L = 1 / rgamma(n = 1,
shape = 1,
rate = 1 + 1 / tau_L)
# update prior VCV
L_prior[lower.tri(L_prior)] = tau_L * lambda_L
############# - A endo
slct.i <- grep("Ylag",rownames(A_draw))
# local shrinkage parameter - A endo
lambda_A_endo = 1 / rgamma(n = n,
shape = 1,
rate = 1 / nu_A_endo + 0.5 * as.vector(A_draw[slct.i,])^2 / tau_A_endo)
nu_A_endo = 1 / rgamma(n = n,
shape = 1,
rate = 1 + 1 / lambda_A_endo)
# global shrinkage parameter - A endo
WSSR_A_endo = sum(as.vector(A_draw[slct.i,])^2 / lambda_A_endo)
tau_A_endo = 1 / rgamma(n = 1,
shape = (n + 1)/2,
rate = 1 / zeta_A_endo + 0.5*WSSR_A_endo)
zeta_A_endo = 1 / rgamma(n = 1,
shape = 1,
rate = 1 + 1 / tau_A_endo)
# update prior VCV
theta[slct.i,] <- tau_A_endo * lambda_A_endo
############# - A endo
if(wexo){
slct.w <- grep("Wex", rownames(A_draw))
# local shrinkage parameter - A exo
lambda_A_exo = 1 / rgamma(n = nstar,
shape = 1,
rate = 1 / nu_A_exo + 0.5 * as.vector(A_draw[slct.w,])^2 / tau_A_exo)
nu_A_exo = 1 / rgamma(n = nstar,
shape = 1,
rate = 1 + 1 / lambda_A_exo)
# global shrinkage parameter - A exo
WSSR_A_exo = sum(as.vector(A_draw[slct.w,])^2 / lambda_A_exo)
tau_A_exo = 1 / rgamma(n = 1,
shape = (nstar + 1)/2,
rate = 1 / zeta_A_endo + 0.5*WSSR_A_exo)
zeta_A_exo = 1 / rgamma(n = 1,
shape = 1,
rate = 1 + 1 / tau_A_exo)
# update prior VCV
theta[slct.w,] <- tau_A_exo * lambda_A_exo
}
}
#----------------------------------------------------------------------------
# Step 3: Sample variances
if(sv){
for (mm in 1:M){
para <- as.list(pars_var[,mm])
para$nu = Inf; para$rho=0; para$beta<-0
svdraw <- svsample_fast_cpp(y=Em_str[,mm], draws=1, burnin=0, designmatrix=matrix(NA_real_),
priorspec=Sv_priors, thinpara=1, thinlatent=1, keeptime="all",
startpara=para, startlatent=Sv_draw[,mm],
keeptau=FALSE, print_settings=list(quiet=TRUE, n_chains=1, chain=1),
correct_model_misspecification=FALSE, interweave=TRUE, myoffset=0,
fast_sv=get_default_fast_sv())
para$mu <- svdraw$para[1,"mu"]
para$phi <- svdraw$para[1,"phi"]
para$sigma <- svdraw$para[1,"sigma"]
para$latent0 <- svdraw$latent0[1,"h_0"]
pars_var[,mm] <- unlist(para[c("mu","phi","sigma","latent0")])
Sv_draw[,mm] <- svdraw$latent[1,]
}
}else{
for (jj in 1:M){
S_1 <- a_1+bigT/2
S_2 <- b_1+crossprod(Em_str[,jj])/2
sig_eta <- 1/rgamma(1,S_1,S_2)
Sv_draw[,jj] <- log(sig_eta)
}
}
#----------------------------------------------------------------------------
# Step 4: store draws
if(irep %in% thin.draws){
count <- count+1
A_tmp <- matrix(0, k_end, M)
if(wexo){
A_tmp <- A_draw
}else{
A_tmp[1:K,] <- A_draw[1:K,]
if(cons) A_tmp[K+Kstar+1,] <- A_draw[K+1,]
if(trend) A_tmp[K+Kstar+ifelse(cons,1,0)+1,] <- A_draw[K+ifelse(cons,1,0)+1,]
}
A_store[,,count] = A_tmp
L_store[,,count] = L_draw
res_store[,,count] = Y-X%*%A_draw
# SV
Sv_store[,,count] = Sv_draw
if(save_vola_pars){
pars_store[,,count] = pars_var
}
# MN
if(save_shrink_MN){
shrink_store[,,count] = c(shrink1,shrink2,shrink4)
}
# SSVS
if(save_shrink_SSVS){
gamma_store[,,count] = gamma
omega_store[,,count] = omega
}
# NG
if(save_shrink_NG){
theta_store[,,count] = theta
lambda2_store[1,3,count] = lambda2_L
lambda2_store[1:(plag+1),1:2,count] = lambda2_A
tau_store[1,3,count] = L_tau
tau_store[1:(plag+1),1:2,count] = A_tau
}
# HS
if(save_shrink_HS){
lambda_A_endo_store[,count] = lambda_A_endo
lambda_A_exo_store[,count] = lambda_A_exo
lambda_L_store[,count] = lambda_L
nu_A_endo_store[,count] = nu_A_endo
nu_A_exo_store[,count] = nu_A_exo
nu_L_store[,count] = nu_L
tau_A_endo_store[,count] = tau_A_endo
tau_A_exo_store[,count] = tau_A_exo
tau_L_store[,count] = tau_L
zeta_A_endo_store[,count] = zeta_A_endo
zeta_A_exo_store[,count] = zeta_A_exo
zeta_L_store[,count] = zeta_L
}
}
}
#---------------------------------------------------------------------------------------------------------
# END ESTIMATION
#---------------------------------------------------------------------------------------------------------
dimnames(A_store)=list(nameslags_end,colnames(A_OLS),NULL)
ret <- list(Y=Y,X=X,A_store=A_store,L_store=L_store,Sv_store=Sv_store,pars_store=pars_store,res_store=res_store,
MN=list(
shrink_store=shrink_store
),
SSVS=list(
gamma_store=gamma_store,
omega_store=omega_store),
NG=list(
theta_store=theta_store,
lambda2_store=lambda2_store,
tau_store=tau_store),
HS=list(
lambda_A_endo_store=lambda_A_endo_store,
lambda_A_exo_store=lambda_A_exo_store,
lambda_L_store=lambda_L_store,
nu_A_endo_store=nu_A_endo_store,
nu_A_exo_store=nu_A_exo_store,
nu_L_store=nu_L_store,
tau_A_endo_store=tau_A_endo_store,
tau_A_exo_store=tau_A_exo_store,
tau_L_store=tau_L_store,
zeta_A_endo_store=zeta_A_endo_store,
zeta_A_exo_store=zeta_A_exo_store,
zeta_L_store=zeta_L_store
))
return(ret)
}
#' @name .gvar.stacking.wrapper
#' @importFrom stats median
#' @importFrom utils memory.limit
#' @noRd
.gvar.stacking.wrapper<-function(xglobal,plag,globalpost,draws,thin,trend,eigen,trim,verbose){
results <- tryCatch(
{
bigT <- nrow(xglobal)
bigK <- ncol(xglobal)
cN <- names(globalpost)
thindraws <- draws/thin
F_large <- array(NA, dim=c(bigK,bigK,plag,thindraws))
trim.info <- "No trimming"
## call Rcpp
# Rcpp::sourceCpp("./src/gvar_stacking.cpp")
out <- gvar_stacking(xglobal = xglobal,
plag = as.integer(plag),
globalpost = globalpost,
draws = as.integer(draws),
thin = as.integer(thin),
trend = trend,
eigen = TRUE,
verbose = verbose)
A_large <- out$A_large
for(pp in 1:plag){
F_large[,,pp,] <- out$F_large[,((bigK*(pp-1))+1):(bigK*pp),,drop=FALSE]
}
S_large <- out$S_large
Ginv_large <- out$Ginv_large
F.eigen <- out$F_eigen
dimnames(S_large)[[1]]<-dimnames(S_large)[[2]]<-dimnames(Ginv_large)[[1]]<-dimnames(Ginv_large)[[2]]<-dimnames(A_large)[[1]]<-colnames(xglobal)
names <- c(paste(rep(colnames(xglobal),plag),".",rep(seq(1,plag),each=bigK),sep=""),"cons")
if(trend) names <- c(names,"trend")
dimnames(A_large)[[2]]<-names
# kick out in-stable draws
if(eigen){
idx<-which(F.eigen<trim)
F_large <- F_large[,,,idx,drop=FALSE]
S_large <- S_large[,,idx,drop=FALSE]
Ginv_large <- Ginv_large[,,idx,drop=FALSE]
A_large <- A_large[,,idx,drop=FALSE]
F.eigen <- F.eigen[idx]
if(length(idx)<10){
stop("Less than 10 stable draws have been found. Please re-estimate the model.")
}
trim.info <- round((length(idx)/thindraws)*100,2)
trim.info <- paste("Trimming leads to ",length(idx) ," (",trim.info,"%) stable draws out of ",thindraws," total draws.",sep="")
}
results<-list(S_large=S_large,F_large=F_large,Ginv_large=Ginv_large,A_large=A_large,F.eigen=F.eigen,trim.info=trim.info)
},
error=function(cond){
if(cond$message == "vector memory exhausted (limit reached?)"){
message("BGVAR incurred an error due to memory exhaustiveness.\n See original error message:")
message(cond)
if(.get_os() == "osx"){
message("\nWe advise you to do the following on Mac OS: The enviornment variable R_MAX_VSIZE can be adjusted to to specify the maximal vector heap size.")
message("\nCurrent value of R_MAX_VSIZE: ", Sys.getenv("R_MAX_VSIZE"))
message("\nAdjust this parameter as follows: Sys.setenv(R_MAX_VSIZE='100GB')")
}else if(.get_os() == "windows"){
message("\nWe advise you to do the following on Windows OS: The memory limit can be adjusted to specify the maximal vector heap size.")
message("\nCurrent value of memory.limit(): ", memory.limit())
message("\nAdjust this parameter as follows: memory.limit(size=100000).")
}else if(.get_os() == "linux"){
message("\nWe advise you to do the following on Linux: The memory limit can be adjusted to specify the maximal vector heap size. The maximal vector heap size of physical and virtual memory is set to the maximum of 16Gb. \n Adjust this parameter as follows: unix::rlimit_as(Inf).")
}
message("\n In any case, increasin the thinning factor (argument 'thin' of 'bgvar') reduces memory requirements.")
}
return(NULL)
},
warning=function(cond){},
finally={}
)
return(results)
}
#' @name .gvar.stacking
#' @importFrom abind adrop
#' @importFrom Matrix bdiag
#' @importFrom stats median
#' @importFrom utils txtProgressBar setTxtProgressBar
#' @noRd
.gvar.stacking<-function(xglobal,plag,globalpost,draws,thin,trend,eigen=FALSE,trim=NULL,verbose=TRUE){
# initialize objects here
bigT <- nrow(xglobal)
bigK <- ncol(xglobal)
cN <- names(globalpost)
thindraws <- draws/thin
F.eigen <- numeric(thindraws)
trim.info <- "No trimming"
A_large <- array(NA_real_, dim=c(bigK,bigK*plag+1+ifelse(trend,1,0),thindraws))
S_large <- array(NA_real_, dim=c(bigK,bigK,thindraws))
Ginv_large <- array(NA_real_, dim=c(bigK,bigK,thindraws))
F_large <- array(NA_real_, dim=c(bigK,bigK,plag,thindraws))
dimnames(S_large)[[1]]<-dimnames(S_large)[[2]]<-dimnames(Ginv_large)[[1]]<-dimnames(Ginv_large)[[2]]<-dimnames(A_large)[[1]]<-colnames(xglobal)
pb <- txtProgressBar(min = 0, max = thindraws, style = 3)
for (irep in 1:thindraws){
a0 <- NULL
a1 <- NULL
G <- NULL
x <- NULL
S_post <- list()
for (cc in 1:length(cN)){
VAR <- globalpost[[cc]]
W <- VAR$W
A <- cbind(diag(ncol(VAR$Y)),-t(adrop(VAR$store$Lambda0store[,,irep,drop=FALSE],drop=3)))
for(pp in 1:plag){
assign(paste("B",pp,sep=""),cbind(t(adrop(VAR$store$Phistore[[pp]][,,irep,drop=FALSE],drop=3)),
t(adrop(VAR$store$Lambdastore[[pp]][,,irep,drop=FALSE],drop=3))))
if(cc==1) assign(paste("H",pp,sep=""), get(paste("B",pp,sep=""))%*%W)
if(cc>1) assign(paste("H",pp,sep=""), rbind(get(paste("H",pp,sep="")),get(paste("B",pp,sep=""))%*%W))
}
G <- rbind(G,A%*%W)
a0 <- rbind(a0,VAR$store$a0store[,irep,drop=FALSE])
if(trend) a1 <- rbind(a1,VAR$store$a1store[,irep,drop=FALSE])
S_post[[cc]] <- adrop(VAR$store$SIGMAmed_store[,,irep,drop=FALSE],drop=3)
}
G.inv <- solve(G)
S_large[,,irep] <- as.matrix(bdiag(S_post))
b0 <- G.inv%*%a0
if(trend) b1 <- G.inv%*%a1 else b1 <- NULL
Ginv_large[,,irep] <- G.inv
ALPHA <- NULL
for (kk in 1:plag){
assign(paste("F",kk,sep=""),G.inv%*%get(paste("H",kk,sep="")))
F_large[,,kk,irep] <- get(paste("F",kk,sep=""))
ALPHA <- cbind(ALPHA,F_large[,,kk,irep])
}
ALPHA <- cbind(ALPHA,b0,b1)
A_large[,,irep]<-ALPHA
if(eigen){
MM <- .get_companion(ALPHA,c(ncol(xglobal),ifelse(trend,2,1),plag))$MM
aux <- suppressWarnings(eigen(MM[1:(bigK*plag),1:(bigK*plag)]))
F.eigen[irep] <- max(abs(Re(aux$values)))
}
# if(stats){
# X_large <- cbind(.mlag(xglobal,plag),1)
# if(trend) X_large <- cbind(X_large,seq(1:nrow(X_large)))
# Y_large <- xglobal[(plag+1):nrow(xglobal),]
# X_large <- X_large[(plag+1):nrow(X_large),]
# globalLik[irep] <- .globalLik(Y=Y_large,X=X_large,Sig=G.inv%*%S_large[irep,,]%*%t(G.inv),ALPHA=ALPHA,bigT=bigT-plag)
# }
if(verbose) setTxtProgressBar(pb, irep)
}
# kick out in-stable draws
if(!is.null(trim)){
if(trim==TRUE) trim <- 1.05
idx<-which(F.eigen<trim)
F_large <- F_large[,,,idx,drop=FALSE]
S_large <- S_large[,,idx,drop=FALSE]
Ginv_large <- Ginv_large[,,idx,drop=FALSE]
A_large <- A_large[,,idx,drop=FALSE]
F.eigen <- F.eigen[idx]
if(length(idx)<10){
stop("Less than 10 stable draws have been found. Please re-estimate the model.")
}
trim.info <- round((length(idx)/thindraws)*100,2)
trim.info <- paste("Trimming leads to ",length(idx) ," (",trim.info,"%) stable draws out of ",thindraws," total draws.",sep="")
}
results<-list(S_large=S_large,F_large=F_large,Ginv_large=Ginv_large,A_large=A_large,F.eigen=F.eigen,trim.info=trim.info)
return(results)
}
#' @name .get_companion
#' @noRd
.get_companion <- function(Beta_,varndxv){
nn <- varndxv[[1]] # anzahl variablen
nd <- varndxv[[2]] # anzahl deterministics
nl <- varndxv[[3]] # anzahl lags
nkk <- nn*nl+nd
Jm <- matrix(0,nkk,nn)
Jm[1:nn,1:nn] <- diag(nn)
if (nd>0){
MM <- rbind(Beta_,cbind(diag((nl-1)*nn), matrix(0,(nl-1)*nn,nn+nd)),cbind(matrix(0,nd,nn*nl),diag(nd)))
}else{
MM <- rbind(Beta_,cbind(diag((nl-1)*nn),matrix(0,(nl-1)*nn,nn)))
}
return(list(MM=MM,Jm=Jm))
}
#' @name .get_os
#' @noRd
.get_os <- function(){
sysinf <- Sys.info()
if (!is.null(sysinf)){
os <- sysinf['sysname']
if (os == 'Darwin')
os <- "osx"
} else { ## mystery machine
os <- .Platform$OS.type
if (grepl("^darwin", R.version$os))
os <- "osx"
if (grepl("linux-gnu", R.version$os))
os <- "linux"
}
return(tolower(os))
}
#' @name .globalLik
#' @noRd
.globalLik <- function(Y,X,Sig,ALPHA,bigT){
PLS <- sum(dmvnrm_arma_fast(Y,X%*%t(ALPHA),Sig,TRUE))
return(PLS)
}
#' @name .avg.shrink
#' @noRd
.avg.shrink <- function(country.shrink,prior){
cN <- names(country.shrink)
N <- length(cN)
colNames <- lapply(country.shrink,colnames)
varNames <- lapply(country.shrink,rownames)
for(cc in 1:N) {
colNames[[cc]] <- gsub(paste(cN[cc],"\\.",sep=""),"",colNames[[cc]])
varNames[[cc]] <- gsub(paste(cN[cc],"\\.",sep=""),"",varNames[[cc]])
}
colNames <- unique(unlist(colNames))
varNames <- unique(unlist(varNames))
shrink <- array(NA,dim=c(length(varNames),length(colNames),N))
dimnames(shrink)[[1]] <- varNames; dimnames(shrink)[[2]] <- colNames; dimnames(shrink)[[3]] <- cN
for(cc in 1:N){
aux <- country.shrink[[cc]]
rownames(aux)<-gsub(paste(cN[cc],"\\.",sep=""),"",rownames(aux))
colnames(aux)<-gsub(paste(cN[cc],"\\.",sep=""),"",colnames(aux))
for(z in 1:ncol(aux)){
shrink[rownames(aux),colnames(aux)[z],cc] <- aux[,z]
}
}
avg.shrink <- apply(shrink,c(1,2),function(x) mean(x,na.rm=TRUE))
idx1 <- apply(shrink,3,function(x) which(colSums(x,na.rm=TRUE)==0))
idx2 <- apply(shrink,3,function(x) which(rowSums(x,na.rm=TRUE)==0))
shrink2 <- list()
for(cc in 1:N){
shrink2[[cc]] <- shrink[,,cc]
idx1.cc <- ifelse(length(idx1)>0,idx1[[cc]],integer(0))
idx2.cc <- ifelse(length(idx2)>0,idx2[[cc]],integer(0))
if(length(idx1.cc)>0){
shrink2[[cc]] <- shrink2[[cc]][,-idx1.cc,drop=FALSE]
}
if(length(idx2.cc)>0){
shrink2[[cc]] <- shrink2[[cc]][-idx2.cc,,drop=FALSE]
}
}
names(shrink2) <- cN
return(list(PIP.cc=shrink2,PIP.avg=avg.shrink))
}
#' @name .construct.arglist
#' @noRd
.construct.arglist = function (funobj, envir = NULL){
namedlist = formals(funobj)
argnames = names(namedlist)
if (!is.environment(envir))
envir = sys.frame(-1)
for (argn in 1:length(namedlist)) {
testval = as.logical(try(exists(argnames[argn], envir = envir),
silent = TRUE))
if (is.na(testval))
testval = FALSE
if (testval) {
testout = try(get(argnames[argn], envir = envir),silent = TRUE)
if (is.null(testout)) {
namedlist[[argn]] = "list(NULL)blabla"
} else {
namedlist[[argn]] = testout
}
}
}
namedlist = lapply(namedlist,function(x) if (any(x=="list(NULL)blabla")) NULL else x)
lapply(namedlist, function(l) if(any(l=="list(NULL)blabla")){NULL}else{l})
return(namedlist)
}
#' @name .mlag
#' @noRd
.mlag <- function(X,lag){
p <- lag
X <- as.matrix(X)
Traw <- nrow(X)
N <- ncol(X)
Xlag <- matrix(0,Traw,p*N)
for (ii in 1:p){
Xlag[(p+1):Traw,(N*(ii-1)+1):(N*ii)] <- X[(p+1-ii):(Traw-ii),(1:N)]
}
colnames(Xlag) <- paste(colnames(X),".lag",rep(seq(p),each=N),sep="")
return(Xlag)
}
#' @name .timelabel
#' @noRd
.timelabel <- function(time){
time <- as.character(time)
years <- regmatches(time,regexpr("^[0-9]{4}",time))
freq <- sum(years%in%unique(years)[2])
xlabel <- gsub("-[0-9]{2}$","",time)
if(freq==12){
xlabel<-gsub("-01","-Jan",xlabel,fixed=TRUE)
xlabel<-gsub("-02","-Feb",xlabel,fixed=TRUE)
xlabel<-gsub("-03","-Mar",xlabel,fixed=TRUE)
xlabel<-gsub("-04","-Apr",xlabel,fixed=TRUE)
xlabel<-gsub("-05","-May",xlabel,fixed=TRUE)
xlabel<-gsub("-06","-Jun",xlabel,fixed=TRUE)
xlabel<-gsub("-07","-Jul",xlabel,fixed=TRUE)
xlabel<-gsub("-08","-Aug",xlabel,fixed=TRUE)
xlabel<-gsub("-09","-Sep",xlabel,fixed=TRUE)
xlabel<-gsub("-10","-Oct",xlabel,fixed=TRUE)
xlabel<-gsub("-11","-Nov",xlabel,fixed=TRUE)
xlabel<-gsub("-12","-Dec",xlabel,fixed=TRUE)
}
if(freq==4){
xlabel<-gsub("-01"," Q1",xlabel,fixed=TRUE)
xlabel<-gsub("-02"," Q1",xlabel,fixed=TRUE)
xlabel<-gsub("-03"," Q1",xlabel,fixed=TRUE)
xlabel<-gsub("-04"," Q2",xlabel,fixed=TRUE)
xlabel<-gsub("-05"," Q2",xlabel,fixed=TRUE)
xlabel<-gsub("-06"," Q2",xlabel,fixed=TRUE)
xlabel<-gsub("-07"," Q3",xlabel,fixed=TRUE)
xlabel<-gsub("-08"," Q3",xlabel,fixed=TRUE)
xlabel<-gsub("-09"," Q3",xlabel,fixed=TRUE)
xlabel<-gsub("-10"," Q4",xlabel,fixed=TRUE)
xlabel<-gsub("-11"," Q4",xlabel,fixed=TRUE)
xlabel<-gsub("-12"," Q4",xlabel,fixed=TRUE)
}
return(xlabel)
}
#' @name .irf.sign.zero
#' @importFrom abind abind
#' @importFrom MASS Null
#' @importFrom stats rnorm
#' @noRd
.irf.sign.zero <- function(xdat,plag,n.ahead,Ginv,Fmat,Smat,shocklist,...){
bigT <- nrow(xdat)
bigK <- ncol(xdat)
varNames <- colnames(xdat)
shock.idx <- shocklist$shock.idx
shock.cidx <- shocklist$shock.cidx
MaxTries <- shocklist$MaxTries
S.cube <- shocklist$S.cube
P.cube <- shocklist$P.cube
Z.cube <- shocklist$Z.cube
shock.horz <- shocklist$shock.horz
shock.order <- shocklist$shock.order
H.restr <- length(shock.horz)
N.restr <- bigK*H.restr
N <- length(shock.idx)
no.zero.restr <- shocklist$no.zero.restr
#-----------------------------------------------------------------------------
# create P0G
P0G <- diag(bigK); colnames(P0G) <- rownames(P0G) <- varNames
for(cc in 1:N){
idx <- shock.idx[[cc]]
if(shock.cidx[cc]){
temp <- try(t(chol(Smat[idx,idx,drop=FALSE])),silent=TRUE)
if(is(temp,"try-error")){
return(list(impl=NA,rot=NA,icounter=NA))
}
P0G[idx,idx] <- temp
}else{
P0G[idx,idx] <- Smat[idx,idx,drop=FALSE]
}
}
# create dynamic multiplier
PHIx <- array(0,c(bigK,bigK,plag+n.ahead+1)); dimnames(PHIx)[[1]] <- dimnames(PHIx)[[2]] <- varNames
PHIx[,,plag+1] <- diag(bigK)
for (ihor in (plag+2):(plag+n.ahead+1)){
acc = matrix(0,bigK,bigK)
for (pp in 1:plag){
acc <- acc + Fmat[,,pp]%*%PHIx[,,ihor-pp]
}
PHIx[,,ihor] <- acc
}
PHI <- PHIx[,,(plag+1):(plag+n.ahead+1)]
#-----------------------------------------------------------------------------
irf.restr <- matrix(NA, N.restr, bigK)
invGSigma_u <- Ginv%*%P0G
for(hh in 1:H.restr){
# ARRW: Definition 1
if(shock.horz[hh]!=Inf) irf.hh<-PHI[,,shock.horz[hh]]%*%invGSigma_u
# ARRW: Definition 2
#if(sign.horizon[hh]==Inf) irf.hh <- solve(A0-A0%*%Cm[1:M,]%*%do.call("rbind",rep(list(diag(M)),p)))
irf.restr[((hh-1)*bigK+1):(bigK*hh),1:bigK] <- irf.hh
}
colnames(irf.restr) <- varNames
rownames(irf.restr) <- paste(rep(varNames,H.restr),".",
rep(shock.horz,each=bigK),sep="")
#-----------------------------------------------------------------------------
# reorder - important!!
Z.cube <- Z.cube[,,shock.order]
# draw rotation matrix here
icounter <- 0
condall <- 0
impresp<-Q_bar<-NA
while(condall == 0 && icounter < MaxTries){
Q <- diag(bigK)
for(cc in 1:N){
idx <- shock.idx[[cc]]
Kidx <- length(idx)
if(shock.cidx[cc]){
randMat <- matrix(rnorm(Kidx^2),Kidx,Kidx)
Qc <- matrix(0, Kidx, Kidx)
if(no.zero.restr[cc]){
QR <- qr(randMat)
Qc <- qr.Q(QR)
}else{
for(kk in 1:Kidx){
Z.temp <- Z.cube[,,idx[kk]]
Z.temp <- Z.temp[rowSums(abs(Z.temp))!=0,,drop=F]
if(nrow(Z.temp)==0){
Z.temp <- matrix(0, 1, N.restr)
}
if(all(Z.temp==0) && kk>1){
R <- c()
}else{
R <- Z.temp%*%irf.restr[,idx]
}
if(kk > 1){R <- rbind(R, t(Qc[,(1:(kk-1)), drop=FALSE]))}
NU <- Null(t(R))
x_j <- randMat[,kk,drop=FALSE]
q_j <- NU%*%(t(NU)%*%x_j/sqrt(as.numeric(crossprod(t(NU)%*%x_j))))
Qc[,kk] <- q_j
}
}
Q[idx,idx] <- Qc
}
}
colnames(Q) <- varNames[shock.order]; rownames(Q) <- varNames
Q_bar <- Q[,varNames]
# Q_bar <- Q%*%diag(((diag(Q)>0)-(diag(Q)<0)))
# check irf
irf.check <- irf.restr%*%Q_bar
colnames(irf.check) <- varNames
rownames(irf.check) <- paste(rep(varNames,H.restr),".",rep(shock.horz,each=bigK),sep="")
signCheck <- matrix(NA,bigK,1)
for(ss in 1:bigK){
STemp <- S.cube[,ss,drop=FALSE]
if(sum(abs(STemp))==0){
signCheck[ss,] <- TRUE
next
}
PDiag <- diag(N.restr); diag(PDiag) <- sign(P.cube[,ss,drop=TRUE]>runif(N.restr))
IrfCheckTemp <- sign(irf.check[,ss,drop = FALSE])
signCheck[ss,] <- t(IrfCheckTemp)%*%PDiag%*%STemp==sum(abs(PDiag%*%STemp))
}
condall <- prod(signCheck)
icounter <- icounter + 1
}
# compute impulses
st_impulses <- array(NA,c(bigK,bigK,n.ahead+1));dimnames(st_impulses)[[1]] <- dimnames(st_impulses)[[2]] <- varNames
Cmhat <- invGSigma_u%*%Q_bar
for(ihor in 1:(n.ahead+1)){
st_impulses[,,ihor] <- as.matrix((PHI[,,ihor]%*%Cmhat))
}
if(icounter==MaxTries){
st_impulses <- Q_bar <- NA
}
# end rotation matrix loop ----------------------------------------------------------------------------
return(list(impl=st_impulses,rot=Q_bar,icounter=icounter))
}
#' @name .irf.chol
#' @noRd
.irf.chol <- function(xdat,plag,n.ahead,Ginv,Fmat,Smat,shocklist,...){
bigT <- nrow(xdat)
bigK <- ncol(xdat)
varNames <- colnames(xdat)
shock.idx <- shocklist$shock.idx
shock.cidx <- shocklist$shock.cidx
N <- length(shock.idx)
# create P0G
P0G <- diag(bigK); colnames(P0G) <- rownames(P0G) <- varNames
for(cc in 1:N){
idx <- shock.idx[[cc]]
if(shock.cidx[cc]){
P0G[idx,idx] <- t(chol(Smat[idx,idx,drop=FALSE])) # calculate local cholesky factor of gcov
}else{
P0G[idx,idx] <- Smat[idx,idx,drop=FALSE]
}
}
# create dynamic multiplier
PHIx <- array(0,c(bigK,bigK,plag+n.ahead+1)); dimnames(PHIx)[[1]] <- dimnames(PHIx)[[2]] <- varNames
PHIx[,,plag+1] <- diag(bigK)
for (ihor in (plag+2):(plag+n.ahead+1)){
acc = matrix(0,bigK,bigK)
for (pp in 1:plag){
acc <- acc + Fmat[,,pp]%*%PHIx[,,ihor-pp]
}
PHIx[,,ihor] <- acc
}
PHI <- PHIx[,,(plag+1):(plag+n.ahead+1)]
# compute shock
invGSigma_u <- Ginv%*%P0G
# computing impulse response function
irfa <- array(0,c(bigK,bigK,n.ahead+1)); dimnames(irfa)[[2]] <- varNames
for (ihor in 1:(n.ahead+1)){
irfa[,,ihor] <- PHI[,,ihor]%*%invGSigma_u
}
# define output
out <- list(impl=irfa,rot=NULL,icounter=1)
return(out)
}
#' @name .irf.girf
#' @noRd
.irf.girf <- function(xdat,plag,n.ahead,Ginv,Fmat,Smat, ...){
bigT <- nrow(xdat)
bigK <- ncol(xdat)
varNames <- colnames(xdat)
# create dynamic multiplier
PHIx <- array(0,c(bigK,bigK,plag+n.ahead+1)); dimnames(PHIx)[[1]] <- dimnames(PHIx)[[2]] <- varNames
PHIx[,,plag+1] <- diag(bigK)
for (ihor in (plag+2):(plag+n.ahead+1)){
acc = matrix(0,bigK,bigK)
for (pp in 1:plag){
acc <- acc + Fmat[,,pp]%*%PHIx[,,ihor-pp]
}
PHIx[,,ihor] <- acc
}
PHI <- PHIx[,,(plag+1):(plag+n.ahead+1)]
# create shock
invGSigma_u <- Ginv%*%Smat
# computing impulse response function
irfa <- array(0,c(bigK,bigK,n.ahead+1)); dimnames(irfa)[[1]] <- varNames
for (ihor in 1:(n.ahead+1)){
irfa[,,ihor] <- PHI[,,ihor]%*%invGSigma_u
}
# define output
out <- list(impl=irfa,rot=NULL,icounter=1)
return(out)
}
#' @name .irf.girf.sims
#' @noRd
.irf.girf.sims <- function(invG,lF,gcov,x,horizon=40,...){
cN <- unique(substr(colnames(x),1,2))
N <- length(cN)
Cm <- as.matrix(gcov)
K <- ncol(x)
p<-dim(lF)[[3]] # number of lags
invGSigma_u <- invG%*%Cm;rownames(invGSigma_u)<-colnames(invGSigma_u)<-rownames(x)
impls.girf<- .impulsdtrf(lF,invGSigma_u,horizon)
cons.girf<-(1/diag(invGSigma_u))*sqrt(diag(invGSigma_u))
# irfa is a K responses times K shocks times horizon array
irfa<-impls.girf*array(matrix(cons.girf,K,K,byrow=TRUE),dim=c(K,K,(horizon)))
return(list(impl=irfa,gcov=Cm))
}
#' @name .mk_fevd.sims
#' @noRd
.mk_fevd.sims <- function(irfa){
ny <- dim(irfa)[[1]];nH <- dim(irfa)[[3]]
fevda <- apply(irfa*irfa,c(1,2),cumsum);
fevda <- aperm(fevda,c(2,3,1))
accm <- matrix(0,ny,ny)
for (ih in 1:nH){
accm <- accm+irfa[,,ih]%*%t(irfa[,,ih])
denm <- matrix((diag(accm)),ny,ny)
fevda[,,ih]=fevda[,,ih]/denm
}
return(fevda)
}
#' @name .impulsdtrf
#' @noRd
.impulsdtrf <- function(B,smat,nstep){
neq <- dim(B)[1]
nvar <- dim(B)[2]
lags <- dim(B)[3]
dimnB <- dimnames(B)
if(dim(smat)[2] != dim(B)[2]) stop("B and smat conflict on # of variables")
response <- array(0,dim=c(neq,nvar,nstep+lags-1));
response[ , , lags] <- smat
response <- aperm(response, c(1,3,2))
irhs <- 1:(lags*nvar)
ilhs <- lags * nvar + (1:nvar)
response <- matrix(response, ncol=neq)
B <- B[, , seq(from=lags, to=1, by=-1)] #reverse time index to allow matrix mult instead of loop
B <- matrix(B,nrow=nvar)
for (it in 1:(nstep-1)) {
response[ilhs, ] <- B %*% response[irhs, ]
irhs <- irhs + nvar
ilhs <- ilhs + nvar
}
dim(response) <- c(nvar, nstep + lags - 1, nvar)
#drop the zero initial conditions; array in usual format
if(lags>1){
response<-response[,-(1:(lags-1)),]
}
response <- aperm(response, c(1, 3, 2))
dimnames(response) <- list(dimnB[[1]], dimnames(smat)[[2]], NULL)
## dimnames(response)[2] <- dimnames(smat)[1]
## dimnames(response)[1] <- dimnames(B)[2]
return(response)
}
|
/scratch/gouwar.j/cran-all/cranData/BGVAR/R/utils.R
|
#' @name bgvar.sim
#' @title Simulating a Global Vector Autoregression
#' @description This function is used to produce simulated realizations which follow a Global Vector Autorgression (GVAR). It will also automatically simulate coefficients. All parameters can also be set by the user.
#' @usage bgvar.sim(len, M, N, plag=1, cons=FALSE, trend=FALSE, SV=FALSE)
#' @details For testing purposes, this function enables to simulate time series processes which can be described by a Global Vector Autoregression. Since stability conditions are not checked, it is only implemented for \code{M=3}.
#' @param len length of the simulated time series.
#' @param M number of endogenous variables.
#' @param N number of countries.
#' @param plag number of lags.
#' @param cons logical indicating whether to include an intercept. Default set to \code{FALSE}.
#' @param trend logical indicating whether to include an intercept. Default set to \code{FALSE}.
#' @param SV logical indicating whether the process should be simulated with or without stochastic volatility. Default set to \code{FALSE}.
#' @return Returns a list with the following elements
#' @author Maximilian Boeck
#' @seealso
#' \code{\link{bgvar}} for estimation of a \code{bgvar} object.
#' @examples
#' library(BGVAR)
#' sim <- bgvar.sim(len=200, M=3, N=4, plag=2, cons=TRUE, trend=FALSE, SV=TRUE)
#' Data = sim$obs$xglobal
#' W = sim$obs$W
#' @importFrom abind adrop
#' @importFrom bayesm rdirichlet
#' @importFrom stats rnorm
#' @importFrom stochvol svsim
#' @noRd
bgvar.sim <- function(len, M, N, plag=1, cons=FALSE, trend=FALSE, SV=FALSE){
idi.para <- t(matrix(rep(c(-5,0.9,0.01),M*N),3,M*N))
# -----------------------------------------------------------------------------------------
# simluate shocks
shock <- matrix(NA, len, M*N)
vol_true <- matrix(NA, len, M*N)
if(SV) {
for(mm in 1:(M*N)) {
temp <- svsim(len=len, mu=idi.para[mm,1], phi=idi.para[mm,2], sigma=idi.para[mm,3])
shock[,mm] <- temp$y
vol_true[,mm] <- temp$vol
}
} else {
for(mm in 1:(M*N)) {
shock[,mm] <- rnorm(len, 0, exp(-5))
vol_true[,mm] <- exp(-5)
}
}
xglobal <- matrix(0,len,M*N)
ident.country <- t(matrix(seq(1,M*N),M,N))
cN <- paste0(letters[1:N],letters[1:N],sep="")
vars <- c("y","inf","stir")
colnames(xglobal) <- colnames(shock) <- paste0(rep(cN,each=M),".",vars)
# state 0
Theta.mean <- matrix(c(0.7,-0.03,0.08,-0.11,0.4,0,0.1,0.03,0.7),M,M)
Lambda0.mean <- matrix(c(0.05,0.01,0.07,0,0.03,0,0.01,0,0.04),M,M)
Lambda.mean <- matrix(c(0.1,0,-0.05,0.02,0.08,-0.01,0,0,0.12),M,M)
max(abs(Re(eigen(Theta.mean)$values)))
max(abs(Re(eigen(Lambda0.mean)$values)))
max(abs(Re(eigen(Lambda.mean)$values)))
V.Theta <- diag(M)*1e-7
V.Lambda0 <- diag(M)*1e-7
V.Lambda <- diag(M)*1e-7
# -----------------------------------------------------------------------------------------
# get weights
# weights <- list()
# for(cc in 1:N){
# temp <- matrix(0,M,N); colnames(temp) <- cN; rownames(temp) <- vars
# for(ii in 1:length(vars)){
# temp[ii,-cc] <- rdirichlet(rep(1/(N-1),N-1))
# }
# weights[[cc]] <- temp
# }
# names(weights) <- cN
weights <- matrix(0,N,N); rownames(weights) <- colnames(weights) <- cN
for(cc in 1:N){
weights[cc,-cc] <- rdirichlet(rep(1/(N-1),N-1)*100)
}
# -----------------------------------------------------------------------------------------
# create weight matrix
W <- list()
for(cc in 1:N){
Wnew <- matrix(0,2*M,M*N); colnames(Wnew) <- colnames(xglobal)
rownames(Wnew) <- c(vars,paste0(vars,"*"))
diag(Wnew[,grep(cN[cc],colnames(Wnew))]) <- 1
for(ii in 1:M){
#Wnew[paste0(vars[ii],"*"),grep(vars[ii],colnames(Wnew))] <- weights[[cc]][grep(vars[ii],rownames(weights[[cc]])),]
Wnew[paste0(vars[ii],"*"),grep(vars[ii],colnames(Wnew))] <- weights[cc,]
}
W[[cc]] <- Wnew
}
names(W) <- cN
# -----------------------------------------------------------------------------------------
# simulate coefficients
Theta <- Lambda <- list()
for(pp in 1:plag){
Theta[[pp]] <- array(NA,dim=c(M,M,N))
Lambda[[pp]] <- array(NA,dim=c(M,M,N))
dimnames(Theta[[pp]])[[1]] <- vars
dimnames(Lambda[[pp]])[[1]] <- paste0(vars,"*")
dimnames(Theta[[pp]])[[2]] <- dimnames(Lambda[[pp]])[[2]] <- vars
dimnames(Theta[[pp]])[[3]] <- dimnames(Lambda[[pp]])[[3]] <- cN
}
Lambda0 <- array(NA,dim=c(M,M,N))
a0l <- array(NA,dim=c(1,M,N))
a1l <- array(NA,dim=c(1,M,N))
for(cc in 1:N){
for(pp in 1:plag){
if(pp==1){
Theta[[pp]][,,cc] <- matrix(as.vector(Theta.mean) + kronecker(diag(M),t(chol(V.Theta))) %*% rnorm(M*M), M, M)
Lambda[[pp]][,,cc] <- matrix(as.vector(Lambda.mean) + kronecker(diag(M),t(chol(V.Lambda))) %*% rnorm(M*M), M, M)
}else{
Theta[[pp]][,,cc] <- matrix(kronecker(diag(M),t(chol(V.Theta))) %*% rnorm(M*M), M, M)
Lambda[[pp]][,,cc] <- matrix(kronecker(diag(M),t(chol(V.Lambda))) %*% rnorm(M*M), M, M)
}
}
Lambda0[,,cc] <- matrix(as.vector(Lambda0.mean) + kronecker(diag(M),t(chol(V.Lambda0))) %*% rnorm(M*M), M, M)
a0l[1,,cc] <- runif(M,-.3,.3)
a1l[1,,cc] <- runif(M,1e-10,5e-4)
}
names(Theta) <- names(Lambda) <- paste0("lag.",seq(1,plag))
# -----------------------------------------------------------------------------------------
# get global solution
a0 <- NULL
a1 <- NULL
G <- NULL
S_post <- list()
for (cc in 1:N){
A <- cbind(diag(M),-t(Lambda0[,,cc]))
for(pp in 1:plag){
assign(paste0("B",pp),cbind(t(Theta[[pp]][,,cc]),t(Lambda[[pp]][,,cc])))
if(cc==1) assign(paste0("H",pp), get(paste0("B",pp))%*%W[[cc]])
if(cc>1) assign(paste0("H",pp), rbind(get(paste0("H",pp)),get(paste0("B",pp))%*%W[[cc]]))
}
G <- rbind(G,A%*%W[[cc]])
a0 <- rbind(a0,t(adrop(a0l[,,cc,drop=FALSE],drop=3)))
a1 <- rbind(a1,t(adrop(a1l[,,cc,drop=FALSE],drop=3)))
S_post[[cc]] <- crossprod(shock[,grep(cN[cc],colnames(shock))])
}
G.inv <- solve(G)
b0 <- G.inv%*%a0
b1 <- G.inv%*%a1
F_large <- NULL
F_sum <- matrix(0,M*N,M*N)
for (pp in 1:plag){
assign(paste("F",pp,sep=""),G.inv%*%get(paste0("H",pp)))
F_large <- cbind(F_large,get(paste0("F",pp)))
F_sum <- F_sum + get(paste0("F",pp))
}
Cm <- rbind(F_large,cbind(diag(M*N*(plag-1)),matrix(0,M*N,M*N)))
if(cons){
F_large <- cbind(F_large,b0)
}else{
a0l <- NULL; a0 <- NULL; b0 <- matrix(0,M*N,1)
}
if(trend){
F_large <- cbind(F_large,b1)
}else{
a1l <- NULL; a1 <- NULL; b1 <- matrix(0,M*N,1)
}
mu <- solve(diag(M*N)-F_sum)%*%b0
#max(abs(Re(eigen(Cm)$values)))
##### need global solution
for(pp in 1:plag) xglobal[pp,] <- mu + pp*b1 + shock[pp,]
for (tt in (plag+1):len){
xlag <- NULL
for(pp in 1:plag) xlag <- cbind(xlag,xglobal[tt-1,,drop=FALSE])
if(cons) xlag <- cbind(xlag,1)
if(trend) xlag <- cbind(xlag,tt)
xglobal[tt,] <- xlag%*% t(F_large) + shock[tt,]
}
true.global <- list(F_large=F_large, G.inv=G.inv, S_post=S_post)
true.cc <- list(a0l=a0l,a1l=a1l,Theta=Theta,Lambda0=Lambda0,Lambda=Lambda,vol_true)
obs <- list(xglobal=xglobal,W=weights)
return(list(obs=obs,true.global=true.global,true.cc=true.cc))
}
#' @noRd
"irfcf" <- function(x, shockvar, resp, n.ahead=24, save.store=FALSE, verbose=TRUE){
UseMethod("irfcf", x)
}
#' @name irfcf
#' @title Counterfactual Analysis
#' @description Function to perform counterfactual analysis. It enables to neutralize the response of a specific variable to a given shock.
#' @export
#' @usage irfcf(x, shockvar, resp, n.ahead=24, save.store=FALSE, verbose=TRUE)
#' @param x an object of class \code{bgvar}.
#' @param shockvar structural shock of interest.
#' @param resp response variable to neutralize.
#' @param n.ahead forecasting horizon.
#' @param save.store If set to \code{TRUE} the full posterior is returned. Default is set to \code{FALSE} in order to save storage.
#' @param verbose If set to \code{FALSE} it suppresses printing messages to the console.
#' @return Returns a list of class \code{bgvar.irf} with the following elements: \describe{
#' \item{\code{posterior}}{ is a four-dimensional array (K times K times n.ahead times 7) that contains 7 quantiles of the posterior distribution of the impulse response functions: the 50\% ("low25" and "high75"), the 68\% ("low16" and "high84") and the 90\% ("low05" and "high95") credible sets along with the posterior median ("median").}
#' \item{\code{struc.obj}}{ is a list object that contains posterior quantitites needed when calculating historical decomposition and structural errors via \code{hd.decomp}.\describe{
#' \item{\code{A}}{ median posterior of global coefficient matrix.}
#' \item{\code{Ginv}}{ median posterior of matrix \code{Ginv}, which describes contemporaneous relationships between countries.}
#' \item{\code{S}}{ posterior median of matrix with country variance-covariance matrices on the main diagonal.}
#' }}
#' \item{\code{model.obj}}{ is a list object that contains model-specific information, in particular\describe{
#' \item{\code{xglobal}}{ used data of the model.}
#' \item{\code{plag}}{ used lag specification of the model.}
#' }}
#' \item{\code{IRF_store}}{ is a four-dimensional array (K times n.ahead times nr. of shock times draws) which stores the whole posterior distribution. Exists only if \code{save.irf.store=TRUE}.}
#' }
#' @author Maximilian Boeck, Martin Feldkircher
#' @examples
#' \dontrun{
#' library(BGVAR)
#' data(eerDatasmall)
#' model.ssvs.eer<-bgvar(Data=eerDatasmall,W=W.trade0012.small,draws=100,burnin=100,
#' plag=1,prior="SSVS",eigen=TRUE)
#' # very time-consuming
#' irfcf <- irfcf(model.ssvs.eer,shockvar="US.stir",resp="US.rer",n.ahead=24)
#' }
#' @noRd
#' @importFrom stats quantile
irfcf.bgvar.irf <- function(x, shockvar, resp, n.ahead=24, save.store=FALSE, verbose=TRUE){
start.irf <- Sys.time()
if(verbose) cat("\nStart counterfactual analysis of Bayesian Global Vector Autoregression.\n\n")
#----------------get stuff-------------------------------------------------------#
plag <- x$args$plag
xglobal <- x$xglobal
bigK <- ncol(xglobal)
A_large <- x$stacked.results$A_large
F_large <- x$stacked.results$F_large
S_large <- x$stacked.results$S_large
Ginv_large <- x$stacked.results$Ginv_large
F.eigen <- x$stacked.results$F.eigen
thindraws <- length(F.eigen)
varNames <- colnames(xglobal)
#----------------------checks-----------------------------------------------------#
if(length(shockvar)!=1&&length(resp)!=1){
stop("Please specify only one shock and one response variable to neutralize.")
}
if(!(shockvar%in%varNames)){
stop("Please respecify shockvar. Variable not contained in dataset.")
}
if(!(resp%in%varNames)){
stop("Please respecify response variable. Variable not contained in dataset.")
}
neutR <- which(varNames%in%shockvar)
neutS <- which(varNames%in%resp)
if(verbose){
cat(paste("Shock of interest: ",shockvar,".\n",sep=""))
cat(paste("Response to neutralize: ",resp,".\n",sep=""))
}
#--------------compute-----------------------------------------------------------#
if(verbose) cat("Start computing...\n")
IRF_store <- array(NA, dim=c(thindraws,bigK,bigK,n.ahead))
dimnames(IRF_store)[[2]] <- dimnames(IRF_store)[[3]] <- colnames(xglobal)
pb <- txtProgressBar(min = 0, max = thindraws, style = 3)
for(irep in 1:thindraws){
Sigma_u <- Ginv_large[irep,,]%*%S_large[irep,,]%*%t(Ginv_large[irep,,])
irf<-Phi2<- .impulsdtrf(B=adrop(F_large[irep,,,,drop=FALSE],drop=1),
smat=t(chol(Sigma_u)),nstep=n.ahead)
for(h in 1:n.ahead){
aux<-NULL
e0<-Phi2[neutR,,h]/irf[neutR,neutS,1] # shocks are vectorized, no loop here
for(i in 1:bigK){ # loop over varibles / responses
idx<-c(1:(n.ahead-h+1))
aux<-rbind(aux,matrix(irf[i,neutS,idx],nrow=bigK,ncol=length(idx),byrow=TRUE)*e0)
}
dim(aux)<-c(bigK,bigK,length(idx));aux<-aperm(aux,c(2,1,3))
Phi2[,,(h:n.ahead)]<-Phi2[,,(h:n.ahead),drop=FALSE]-aux
}
IRF_store[irep,,,] <- Phi2
setTxtProgressBar(pb, irep)
}
#---------------------compute posterior----------------------------------------#
imp_posterior <- array(NA, dim=c(bigK,bigK,n.ahead,7))
dimnames(imp_posterior)[[1]] <- colnames(xglobal)
dimnames(imp_posterior)[[2]] <- colnames(xglobal)
dimnames(imp_posterior)[[3]] <- 1:n.ahead
dimnames(imp_posterior)[[4]] <- c("low25","low16","low05","median","high75","high84","high95")
imp_posterior[,,,"low25"] <- apply(IRF_store,c(2,3,4),quantile,.25,na.rm=TRUE)
imp_posterior[,,,"low16"] <- apply(IRF_store,c(2,3,4),quantile,.16,na.rm=TRUE)
imp_posterior[,,,"low05"] <- apply(IRF_store,c(2,3,4),quantile,.05,na.rm=TRUE)
imp_posterior[,,,"median"]<- apply(IRF_store,c(2,3,4),quantile,.50,na.rm=TRUE)
imp_posterior[,,,"high75"]<- apply(IRF_store,c(2,3,4),quantile,.75,na.rm=TRUE)
imp_posterior[,,,"high84"]<- apply(IRF_store,c(2,3,4),quantile,.84,na.rm=TRUE)
imp_posterior[,,,"high95"]<- apply(IRF_store,c(2,3,4),quantile,.95,na.rm=TRUE)
# other stuff
A <- apply(A_large,c(2,3),median)
Fmat <- apply(F_large,c(2,3,4),median)
Ginv <- apply(Ginv_large,c(2,3),median)
Smat <- apply(S_large,c(2,3),median)
Sigma_u <- Ginv%*%Smat%*%t(Ginv)
struc.obj <- list(A=A,Fmat=Fmat,Ginv=Ginv,Smat=Smat)
model.obj <- list(xglobal=xglobal,plag=plag)
#--------------------------------- prepare output----------------------------------------------------------------------#
out <- structure(list("posterior" = imp_posterior,
"struc.obj" = struc.obj,
"model.obj" = model.obj),
class="bgvar.irf")
if(save.store){
out$IRF_store = IRF_store
}
if(verbose) cat(paste("\nSize of irf object: ", format(object.size(out),unit="MB")))
end.irf <- Sys.time()
diff.irf <- difftime(end.irf,start.irf,units="mins")
mins.irf <- round(diff.irf,0); secs.irf <- round((diff.irf-floor(diff.irf))*60,0)
if(verbose) cat(paste("\nNeeded time for impulse response analysis: ",mins.irf," ",ifelse(mins.irf==1,"min","mins")," ",secs.irf, " ",ifelse(secs.irf==1,"second.","seconds.\n"),sep=""))
return(out)
}
#' @name .divisors
#' @noRd
.divisors <- function (n,div) {
div <- round(div)
for(dd in div:1){
if(n%%div==0) break else div<-div-1
}
return(div)
}
|
/scratch/gouwar.j/cran-all/cranData/BGVAR/R/zzz.R
|
## ----include=FALSE------------------------------------------------------------
knitr::opts_chunk$set(fig.width = 12, fig.height=8, fig.align="default")
knitr::opts_chunk$set(error = TRUE)
## ----hide=TRUE----------------------------------------------------------------
oldpar <- par(no.readonly=TRUE)
set.seed(123)
library(BGVAR)
## ----"eerData"----------------------------------------------------------------
data(eerData)
## ----"eerData2"---------------------------------------------------------------
names(eerData)
## ----"eerData3"---------------------------------------------------------------
colnames(eerData$UK)
## ----"US",echo=TRUE-----------------------------------------------------------
head(eerData$US)
## ----"convert",echo=TRUE------------------------------------------------------
bigX<-list_to_matrix(eerData)
## ----"convert2",echo=TRUE-----------------------------------------------------
colnames(bigX)[1:10]
## ----"tradeW",echo=TRUE-------------------------------------------------------
head(W.trade0012)
## ----"rownames.W"-------------------------------------------------------------
all(colnames(W.trade0012)==names(eerData))
## ----"rowSums.W"--------------------------------------------------------------
rowSums(W.trade0012)
diag(W.trade0012)
## ----"eerDatasmall", hide=TRUE------------------------------------------------
cN<-c("EA","US","RU")
eerData<-eerData[cN]
W.trade0012<-W.trade0012[cN,cN]
W.trade0012<-apply(W.trade0012,2,function(x)x/rowSums(W.trade0012))
W.list<-lapply(W.list,function(l){l<-apply(l[cN,cN],2,function(x)x/rowSums(l[cN,cN]))})
## ----"export excel", eval=FALSE-----------------------------------------------
# time <- as.character(seq.Date(as.Date("1995-01-01"),as.Date("2013-10-01"),by="quarter"))
#
# for(cc in 1:length(eerData)){
# x <- coredata(eerData[[cc]])
# rownames(x) <- time
# write.xlsx(x = x, file="./excel_eerData.xlsx", sheetName = names(eerData)[cc],
# col.names=TRUE, row.names=TRUE, append=TRUE)
# }
## ----"import excel", eval=FALSE-----------------------------------------------
# eerData_read <- excel_to_list(file = "./excel_eerData.xlsx", first_column_as_time=TRUE, skipsheet=NULL, ...)
## ----"transform to matrix", eval=FALSE----------------------------------------
# eerData_matrix <- list_to_matrix(eerData_read)
# eerData_list <- matrix_to_list(eerData_matrix)
## ----"model.1",results="hide"-------------------------------------------------
model.1<-bgvar(Data=eerData,
W=W.trade0012,
draws=100,
burnin=100,
plag=1,
prior="NG",
hyperpara=NULL,
SV=TRUE,
thin=1,
trend=TRUE,
hold.out=0,
eigen=1
)
## ----"SV",results="hide"------------------------------------------------------
model.1$cc.results$sig$EA[,"EA.y","EA.y"]
## ----"ng.eigen",echo=TRUE-----------------------------------------------------
model.1$stacked.results$F.eigen[1:10]
## ----"print.model",echo=TRUE--------------------------------------------------
print(model.1)
## ----"summary.model"----------------------------------------------------------
summary(model.1)
## ----"stats",echo=TRUE, results="hide"----------------------------------------
Fmat <- coef(model.1)
Smat <- vcov(model.1)
lik <- logLik(model.1)
## ----"insample",fig.margin=TRUE,fig.width=6,fig.height=8,fig.cap="In-sample fit for euro area variables"----
yfit <- fitted(model.1)
plot(model.1, global=FALSE, resp="EA")
## ----"ssvs.1",echo=TRUE, results="hide"---------------------------------------
model.ssvs.1<-bgvar(Data=eerData,
W=W.trade0012,
draws=100,
burnin=100,
plag=1,
prior="SSVS",
hyperpara=NULL,
SV=TRUE,
thin=1,
Ex=NULL,
trend=TRUE,
expert=list(save.shrink.store=TRUE),
hold.out=0,
eigen=1,
verbose=TRUE
)
## ----"Pips"-------------------------------------------------------------------
model.ssvs.1$cc.results$PIP$PIP.cc$EA
## ----"pips.avg"---------------------------------------------------------------
model.ssvs.1$cc.results$PIP$PIP.avg
## ----"var.weight"-------------------------------------------------------------
variable.list<-list();variable.list$real<-c("y","Dp","tb");variable.list$fin<-c("stir","ltir","rer")
## ----results="hide"-----------------------------------------------------------
# weights for first variable set tradeW.0012, for second finW0711
model.ssvs.2<-bgvar(Data=eerData,
W=W.list[c("tradeW.0012","finW0711")],
plag=1,
draws=100,
burnin=100,
prior="SSVS",
SV=TRUE,
eigen=1,
expert=list(variable.list=variable.list,save.shrink.store=TRUE),
trend=TRUE
)
## ----"ltir.estimate", results="hide"------------------------------------------
# does include ltir* only when ltir is missing domestically
model.ssvs.3<-bgvar(Data=eerData,
W=W.trade0012,
plag=1,
draws=100,
burnin=100,
prior="SSVS",
SV=TRUE,
eigen=1,
expert=list(Wex.restr="ltir",save.shrink.store=TRUE),
trend=TRUE,
)
## ----"print.model.ssvs.3"-----------------------------------------------------
print(model.ssvs.3)
## ----"OC"---------------------------------------------------------------------
eerData2<-eerData
eerData2$OC<-eerData$US[,c("poil"),drop=FALSE] # move oil prices into own slot
eerData2$US<-eerData$US[,c("y","Dp", "rer" , "stir", "ltir","tb")] # exclude it from US m odel
## ----"OC.weights"-------------------------------------------------------------
OC.weights<-list()
OC.weights$weights<-rep(1/3, 3)
names(OC.weights$weights)<-names(eerData2)[1:3] # last one is OC model, hence only until 3
OC.weights$variables<-c(colnames(eerData2$OC),"y") # first entry, endog. variables, second entry weighted average of y from the other countries to proxy demand
OC.weights$exo<-"poil"
## ----"OC.weights2"------------------------------------------------------------
# other entities weights with same name as new oil country
OE.weights <- list(OC=OC.weights)
## ----"estimate.OC",results="hide"---------------------------------------------
model.ssvs.4<-bgvar(Data=eerData2,
W=W.trade0012,
plag=1,
draws=100,
burnin=100,
prior="SSVS",
SV=TRUE,
expert=list(OE.weights=OE.weights,save.shrink.store=TRUE),
trend=TRUE
)
## ----"aux"--------------------------------------------------------------------
aux1<-model.ssvs.1$cc.results$PIP$PIP.avg;aux1<-aux1[-nrow(aux1),1:6]
aux2<-model.ssvs.2$cc.results$PIP$PIP.avg;aux2<-aux2[-nrow(aux2),1:6]
aux3<-model.ssvs.3$cc.results$PIP$PIP.avg;aux3<-aux3[-nrow(aux3),1:6]
aux4<-model.ssvs.4$cc.results$PIP$PIP.avg;aux4<-aux4[-nrow(aux4),1:6]
## ----"heat1", fig.show="hold",out.width="25%",fig.cap="Heatmaps of PIPs."-----
heatmap(aux1,Rowv=NA,Colv=NA, main="Model 1", cex.main=2, cex.axis=1.7)
heatmap(aux2,Rowv=NA,Colv=NA, main="Model 2", cex.main=2, cex.axis=1.7)
heatmap(aux3,Rowv=NA,Colv=NA, main="Model 3", cex.main=2, cex.axis=1.7)
heatmap(aux4,Rowv=NA,Colv=NA, main="Model 4", cex.main=2, cex.axis=1.7)
## ----"shocks", results="hide"-------------------------------------------------
irf.chol<-irf(model.ssvs.1, n.ahead=24, expert=list(save.store=FALSE))
## ----"us.mp", results="hide"--------------------------------------------------
# US monetary policy shock - Cholesky
shockinfo_chol<-get_shockinfo("chol")
shockinfo_chol$shock<-"US.stir"
shockinfo_chol$scale<--100
# US monetary policy shock - GIRF
shockinfo_girf<-get_shockinfo("girf")
shockinfo_girf$shock<-"US.stir"
shockinfo_girf$scale<--100
## ----"shockinfo"--------------------------------------------------------------
shockinfo_chol
shockinfo_girf
## ----"us.mp.chol", results="hide"---------------------------------------------
irf.chol.us.mp<-irf(model.ssvs.1, n.ahead=24, shockinfo=shockinfo_chol, expert=list(save.store=TRUE))
## ----"us.mp2"-----------------------------------------------------------------
names(irf.chol.us.mp)
## ----"us.mp4", fig.margin=TRUE,out.width="80%",fig.cap="Responses of US country model"----
plot(irf.chol.us.mp, resp="US", shock="US.stir")
## ----"us.gdp", results="hide"-------------------------------------------------
# cholesky
shockinfo_chol <- get_shockinfo("chol", nr_rows = 2)
shockinfo_chol$shock <- c("US.stir","US.y")
shockinfo_chol$scale <- c(1,1)
# generalized impulse responses
shockinfo_girf <- get_shockinfo("girf", nr_rows = 2)
shockinfo_girf$shock <- c("US.stir","US.y")
shockinfo_girf$scale <- c(1,1)
# Recursive US GDP
irf.chol.us.y<-irf(model.ssvs.1, n.ahead=24, shockinfo=shockinfo_chol)
# GIRF US GDP
irf.girf.us.y<-irf(model.ssvs.1, n.ahead=24, shockinfo=shockinfo_girf)
## ----"us.gdp.plots",fig.cap="Comparison of responses Cholesky (left) and GIRF (right) to a negative GDP shock.",fig.show="hold",out.width="25%"----
plot(irf.chol.us.y, resp="US.y", shock="US.y")
plot(irf.girf.us.y, resp="US.y", shock="US.y")
plot(irf.chol.us.y, resp="US.rer", shock="US.y")
plot(irf.girf.us.y, resp="US.rer", shock="US.y")
## ----"global.gdp",results="hide",out.width="50%"------------------------------
shockinfo<-get_shockinfo("girf", nr_rows = 3)
shockinfo$shock<-c("EA.y","US.y","RU.y")
shockinfo$global<-TRUE
shockinfo$scale<--1
irf.global<-irf(model.ssvs.1, n.ahead=24, shockinfo=shockinfo)
plot(irf.global, resp=c("US.y","EA.y","RU.y"), shock="Global.y")
## ----hide=TRUE----------------------------------------------------------------
data("eerData")
eerData<-eerData[cN]
W.trade0012<-W.trade0012[cN,cN]
W.trade0012<-apply(W.trade0012,2,function(x)x/rowSums(W.trade0012))
# append expectations data to US model
temp <- cbind(USexpectations, eerData$US)
colnames(temp) <- c(colnames(USexpectations),colnames(eerData$US))
eerData$US <- temp
## ----"us.spf", results="hide"-------------------------------------------------
model.ssvs.eer<-bgvar(Data=eerData,
W=W.trade0012,
plag=1,
draws=100,
burnin=100,
prior="SSVS",
SV=TRUE)
## ----"us.spf.sign.spec"-------------------------------------------------------
shockinfo<-get_shockinfo("sign")
shockinfo<-add_shockinfo(shockinfo, shock="US.y",
restriction="US.Dp", sign=">", horizon=1, prob=1, scale=1)
shockinfo<-add_shockinfo(shockinfo, shock="US.Dp",
restriction="US.y", sign="<", horizon=1, prob=1, scale=1)
## ----"us.spf.sign",message=FALSE, results="hide"------------------------------
irf.sign<-irf(model.ssvs.eer, n.ahead=24, shockinfo=shockinfo,
expert=list(MaxTries=100, save.store=FALSE, cores=NULL))
## ----"us.spf.sign2"-----------------------------------------------------------
irf.sign$rot.nr
## ----"us.spf.plots",fig.cap="Responses to AS (upper panel) and AD (lower panel) shock.",fig.show="hold",out.width="50%"----
plot(irf.sign, resp=c("US.y","US.Dp"), shock="US.y")
plot(irf.sign, resp=c("US.y","US.Dp"), shock="US.Dp")
## ----"us.spf.sign3",results="hide"--------------------------------------------
shockinfo<-get_shockinfo("sign")
shockinfo<-add_shockinfo(shockinfo, shock="US.stir_t+4",
restriction=c("US.Dp_t+4","US.stir","US.y_t+4","US.stir_t+4","US.Dp_t+4","US.y_t+4"),
sign=c("<","0","<","ratio.avg","ratio.H","ratio.H"),
horizon=c(1,1,1,5,5,5),
prob=1, scale=1)
irf.sign.zero<-irf(model.ssvs.eer, n.ahead=20, shockinfo=shockinfo,
expert=list(MaxTries=100, save.store=TRUE))
## ----"eer.spf.plots",fig.cap="Rationality conditions I.",out.width="50%",fig.show="hold"----
# rationality condition: US.stir_t+4 on impact is equal to average of IRF of
# US.stir between horizon 2 and 5
matplot(cbind(irf.sign.zero$IRF_store["US.stir_t+4",1,,1],
irf.sign.zero$IRF_store["US.stir",1,,1]),
type="l",ylab="",main="Short-term Interest Rate",lwd=2,xaxt="n", cex.main=2);
axis(side=1,at=c(1:5,9,13,17,21,25),label=c(0:4,8,12,16,20,24), cex.axis=1.7)
legend("topright",lty=c(1,2),c("expected","actual"),lwd=2,bty="n",col=c("black","red"))
segments(x0=2,y0=1,x1=5,y1=1,lwd=2,lty=3,col="grey")
points(1,1,col="grey",pch=19,lwd=4)
abline(v=c(2,5),lty=3,col="grey",lwd=2)
# rationality condition: US.y_t+4 on impact is equal to H-step ahead IRF
# of US.y in horizon 5
matplot(cbind(irf.sign.zero$IRF_store["US.y_t+4",1,,1],
irf.sign.zero$IRF_store["US.y",1,,1]),
type="l",ylab="",main="Output",lwd=2,xaxt="n", cex.main=2)
axis(side=1,at=c(1:5,9,13,17,21,25),label=c(0:4,8,12,16,20,24), cex.axis=1.7)
legend("topright",lty=c(1,2),c("expected","actual"),lwd=2,bty="n",col=c("black","red"))
yy<-irf.sign.zero$IRF_store["US.y_t+4",1,1,1]
segments(x0=1,y0=yy,x1=5,y1=yy,lwd=2,lty=3,col="grey");abline(v=c(1,5),col="grey",lty=3)
points(1,yy,col="grey",pch=19,lwd=4);points(5,yy,col="grey",pch=19,lwd=4)
## ----"ea.data"----------------------------------------------------------------
data(monthlyData);monthlyData$OC<-NULL
names(monthlyData)
# list of weights of other entities with same name as additional country model
OE.weights = list(EB=EB.weights)
EA_countries <- c("AT", "BE", "DE","ES", "FI","FR")
# "IE", "IT", "NL", "PT","GR","SK","MT","CY","EE","LT","LV")
## ----"restrict_sample", hide=TRUE---------------------------------------------
monthlyData <- monthlyData[c(EA_countries,"EB")]
W<-W[EA_countries,EA_countries]
W<-apply(W,2,function(x)x/rowSums(W))
OE.weights$EB$weights <- OE.weights$EB$weights[names(OE.weights$EB$weights)%in%EA_countries]
## ----"ea.estimate", results="hide"--------------------------------------------
# estimates the model
model.ssvs<-bgvar(Data=monthlyData,
W=W,
draws=200,
burnin=200,
plag=1,
prior="SSVS",
eigen=1.05,
expert=list(OE.weights=OE.weights))
## ----"ea.sign"----------------------------------------------------------------
# imposes sign restrictions on the cross-section and for a global shock
# (long-term interest rates)
shockinfo<-get_shockinfo("sign")
for(cc in c("AT","BE","FR")){
shockinfo<-add_shockinfo(shockinfo, shock=paste0(cc,".ltir"),
restriction=paste0(cc,c(".ip",".p")),
sign=c("<","<"), horizon=c(1,1),
prob=c(0.5,0.5), scale=c(-100,-100),
global=TRUE)
}
## ----"global.restrictions"----------------------------------------------------
shockinfo
## ----"global.shock.irf",echo=TRUE,results="hide"------------------------------
irf.sign.ssvs<-irf(model.ssvs, n.ahead=24, shockinfo=shockinfo, expert=list(MaxTries=500))
## ----"ea.sign.verify"---------------------------------------------------------
irf.sign.ssvs$posterior[paste0(EA_countries[-c(3,12)],".ltir"),1,1,"Q50"]
irf.sign.ssvs$posterior[paste0(EA_countries,".ip"),1,1,"Q50"]
irf.sign.ssvs$posterior[paste0(EA_countries,".p"),1,1,"Q50"]
## ----"ea.sign.plots",fig.show="hold",out.width="25%",fig.cap="Output responses of selected euro area countries."----
plot(irf.sign.ssvs, resp=c("AT.ip"), shock="Global.ltir")
plot(irf.sign.ssvs, resp=c("BE.ip"), shock="Global.ltir")
plot(irf.sign.ssvs, resp=c("DE.ip"), shock="Global.ltir")
plot(irf.sign.ssvs, resp=c("ES.ip"), shock="Global.ltir")
## ----"fevd"-------------------------------------------------------------------
#calculates the LN GFEVD
gfevd.us.mp=gfevd(model.ssvs.eer,n.ahead=24,running=TRUE,cores=4)$FEVD
# get position of EA
idx<-which(grepl("EA.",dimnames(gfevd.us.mp)[[2]]))
own<-colSums(gfevd.us.mp["EA.y",idx,])
foreign<-colSums(gfevd.us.mp["EA.y",-idx,])
## ----"fevd.plot",fig.cap="FEVD of EA GDP.",out.width="50%"--------------------
barplot(t(cbind(own,foreign)),legend.text =c("own","foreign"))
## ----"fevd.struc"-------------------------------------------------------------
# calculates FEVD for variables US.y
fevd.us.y=fevd(irf.chol.us.mp, var.slct=c("US.y"))$FEVD
idx<-which(grepl("US.",rownames(fevd.us.y)))
## ----"fevd.struc.plot",fig.cap="FEVD of US GDP.",out.width="50%"--------------
barplot(fevd.us.y[idx,1,])
## ----"hd"---------------------------------------------------------------------
HD<-hd(irf.chol.us.mp)
# summing them up should get you back the original time series
org.ts<-apply(HD$hd_array,c(1,2),sum) # this sums up the contributions of all shocks + constant, initial conditions and residual component (last three entries in the third dimension of the array)
## ----"hd.plot",fig.cap="Historical decomposition of euro area GDP.",out.width="50%"----
matplot(cbind(HD$x[,1],org.ts[,1]),type="l",ylab="",lwd=2, cex.axis=1.7)
legend("bottomright",c("hd series","original"),col=c("black","red"),lty=c(1,2),bty="n",cex=2)
## ----"fcast.est", results="hide"----------------------------------------------
model.ssvs.h8<-bgvar(Data=eerData,
W=W.trade0012,
draws=500,
burnin=500,
plag=1,
prior="SSVS",
hyperpara=NULL,
SV=TRUE,
thin=1,
trend=TRUE,
hold.out=8,
eigen=1
)
## ----"fcast.predict", results="hide"------------------------------------------
fcast <- predict(model.ssvs.h8, n.ahead=8, save.store=TRUE)
## ----"lps"--------------------------------------------------------------------
lps.h8 <- lps(fcast)
rmse.h8 <- rmse(fcast)
## ----"fcast.plot",fig.cap="Forecast plot.",out.width="50%"--------------------
plot(fcast, resp="US.Dp", cut=8)
## ----"cond.predict",results="hide"--------------------------------------------
# matrix with constraints
constr <- matrix(NA,nrow=fcast$n.ahead,ncol=ncol(model.ssvs.h8$xglobal))
colnames(constr) <- colnames(model.ssvs.h8$xglobal)
# set "US.Dp" for five periods on its last value
constr[1:5,"US.Dp"] <-model.ssvs.h8$xglobal[nrow(model.ssvs.h8$xglobal),"US.Dp"]
# compute conditional forecast (hard restriction)
cond_fcast <- predict(model.ssvs.h8, n.ahead=8, constr=constr, constr_sd=NULL)
## ----"cond.predict.sd",results="hide"-----------------------------------------
# add uncertainty to conditional forecasts
constr_sd <- matrix(NA,nrow=fcast$n.ahead,ncol=ncol(model.ssvs.h8$xglobal))
colnames(constr_sd) <- colnames(model.ssvs.h8$xglobal)
constr_sd[1:5,"US.Dp"] <- 0.001
# compute conditional forecast with soft restrictions
cond_fcast2 <- predict(model.ssvs.h8, n.ahead=8, constr=constr, constr_sd=constr_sd)
## ----"cond.plot.1",out.width="50%",fig.show="hold",fig.cap="Conditional forecast of US Inflation, top panel without uncertainty during the conditioning, bottom panel with uncertainty."----
plot(cond_fcast, resp="US.Dp", cut=10)
plot(cond_fcast2, resp="US.Dp", cut=10)
## ----eval=FALSE---------------------------------------------------------------
# # load dataset
# data(eerData)
# # Minnesota prior and two different weight matrices and no SV
# # weights for first variable set tradeW.0012, for second finW0711
# variable.list <- list()
# variable.list$real <- c("y","Dp","tb")
# variable.list$fin <- c("stir","ltir","rer")
# Hyperparm.MN <- list(a_i = 0.01, # prior for the shape parameter of the IG
# b_i = 0.01 # prior for the scale parameter of the IG
# )
# model.MN<-bgvar(Data=eerData,
# W=W.list[c("tradeW.0012","finW0711")],
# draws=200,
# burnin=200,
# plag=1,
# hyperpara=Hyperparm.MN,
# prior="MN",
# thin=1,
# eigen=TRUE,
# SV=TRUE,
# expert=list(variable.list=variable.list))
# # SSVS prior
# Hyperparm.ssvs <- list(tau0 = 0.1, # coefficients: prior variance for the spike
# # (tau0 << tau1)
# tau1 = 3, # coefficients: prior variance for the slab
# # (tau0 << tau1)
# kappa0 = 0.1, # covariances: prior variance for the spike
# # (kappa0 << kappa1)
# kappa1 = 7, # covariances: prior variance for the slab
# # (kappa0 << kappa1)
# a_1 = 0.01, # prior for the shape parameter of the IG
# b_1 = 0.01, # prior for the scale parameter of the IG
# p_i = 0.5, # prior inclusion probability of coefficients
# q_ij = 0.5 # prior inclusion probability of covariances
# )
# model.ssvs<-bgvar(Data=eerData,
# W=W.trade0012,
# draws=100,
# burnin=100,
# plag=1,
# hyperpara=Hyperparm.ssvs,
# prior="SSVS",
# thin=1,
# eigen=TRUE)
# # Normal Gamma prior
# data(monthlyData)
# monthlyData$OC<-NULL
# Hyperparm.ng<-list(d_lambda = 1.5, # coefficients: prior hyperparameter for the NG-prior
# e_lambda = 1, # coefficients: prior hyperparameter for the NG-prior
# prmean = 0, # prior mean for the first lag of the AR coefficients
# a_1 = 0.01, # prior for the shape parameter of the IG
# b_1 = 0.01, # prior for the scale parameter of the IG
# tau_theta = .6, # (hyper-)parameter for the NG
# sample_tau = FALSE # estimate a?
# )
# model.ng<-bgvar(Data=monthlyData,
# W=W,
# draws=200,
# burnin=100,
# plag=1,
# hyperpara=Hyperparm.ng,
# prior="NG",
# thin=2,
# eigen=TRUE,
# SV=TRUE,
# expert=list(OE.weights=list(EB=EA.weights)))
## ----eval=FALSE---------------------------------------------------------------
# # First example, a US monetary policy shock, quarterly data
# library(BGVAR)
# data(eerData)
# model.eer<-bgvar(Data=eerData,W=W.trade0012,draws=500,burnin=500,plag=1,prior="SSVS",thin=10,eigen=TRUE,trend=TRUE)
#
# # generalized impulse responses
# shockinfo<-get_shockinfo("girf")
# shockinfo$shock<-"US.stir"; shockinfo$scale<--100
#
# irf.girf.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
#
# # cholesky identification
# shockinfo<-get_shockinfo("chol")
# shockinfo$shock<-"US.stir"; shockinfo$scale<--100
#
# irf.chol.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
# # sign restrictions
# shockinfo <- get_shockinfo("sign")
# shockinfo <- add_shockinfo(shockinfo, shock="US.stir", restriction=c("US.y","US.Dp"),
# sign=c("<","<"), horizon=c(1,1), scale=1, prob=1)
# irf.sign.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
#
# # sign restrictions with relaxed cross-country restrictions
# shockinfo <- get_shockinfo("sign")
# # restriction for other countries holds to 75\%
# shockinfo <- add_shockinfo(shockinfo, shock="US.stir", restriction=c("US.y","EA.y","UK.y"),
# sign=c("<","<","<"), horizon=1, scale=1, prob=c(1,0.75,0.75))
# shockinfo <- add_shockinfo(shockinfo, shock="US.stir", restriction=c("US.Dp","EA.Dp","UK.Dp"),
# sign=c("<","<","<"), horizon=1, scale=1, prob=c(1,0.75,0.75))
# irf.sign.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
#
# # Example with zero restriction (Arias et al., 2018) and
# # rationality conditions (D'Amico and King, 2017).
# data("eerDataspf")
# model.eer<-bgvar(Data=eerDataspf, W=W.trade0012.spf, draws=300, burnin=300,
# plag=1, prior="SSVS", eigen=TRUE)
# shockinfo <- get_shockinfo("sign")
# shockinfo <- add_shockinfo(shockinfo, shock="US.stir_t+4",
# restriction=c("US.Dp_t+4","US.stir","US.y_t+4"),
# sign=c("<","0","<"), horizon=1, prob=1, scale=1)
# # rationality condition: US.stir_t+4 on impact is equal to average of
# # IRF of US.stir between horizon 1 to 4
# shockinfo <- add_shockinfo(shockinfo, shock="US.stir_t+4", restriction="US.stir_t+4",
# sign="ratio.avg", horizon=5, prob=1, scale=1)
# # rationality condition: US.Dp_t+4 on impact is equal to IRF of US.Dp at horizon 4
# shockinfo <- add_shockinfo(shockinfo, shock="US.stir_t+4", restriction="US.Dp_t+4",
# sign="ratio.H", horizon=5, prob=1, scale=1)
# # rationality condition: US.y_t+4 on impact is equal to IRF of US.y at horizon 4
# shockinfo <- add_shockinfo(shockinfo, shock="US.stir_t+4", restriction="US.y_t+4",
# sign="ratio.H", horizon=5, prob=1, scale=1)
# # regulate maximum number of tries with expert settings
# irf.ratio <- irf(model.eer, n.ahead=20, shockinfo=shockinfo,
# expert=list(MaxTries=10))
## ----hide=TRUE----------------------------------------------------------------
par(oldpar)
|
/scratch/gouwar.j/cran-all/cranData/BGVAR/inst/doc/examples.R
|
---
title: "BGVAR: Bayesian Global Vector Autoregression"
author: "Maximilian Böck and Martin Feldkircher and Florian Huber"
date: "`r format(Sys.time(), '%d %B %Y')`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{BGVAR: Bayesian Global Vector Autoregression}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
bibliography: fullbib.bib
fig_caption: yes
citation_package: natbib
tags:
- Bayesian
- GVAR
abstract: |
<p align=justify>
This document describes the `BGVAR` library to estimate Bayesian Global vector autoregressions (GVAR) with different prior specifications and stochastic volatility. The library offers a fully fledged toolkit to conduct impulse response functions, forecast error variance and historical error variance decompositions. To identify structural shocks in a given country model or joint regional shocks, the library offers simple Cholesky decompositions, generalized impulse response functions and zero and sign restrictions -- the latter of which can also be put on the cross-section. We also allow for different structures of the GVAR like including different weights for different variables or setting up additional country models that determine global variables such as oil prices. Last, we provide functions to conduct and evaluate out-of-sample forecasts as well as conditional forecasts that allow for the setting of a future path for a particular variable of interest. The toolbox requires `R>=3.5`. </p>
---
```{r, include=FALSE}
knitr::opts_chunk$set(fig.width = 12, fig.height=8, fig.align="default")
knitr::opts_chunk$set(error = TRUE)
```
\section{Introduction}
# Introduction
<p align=justify>
This vignette describes the BGVAR package that allows for the estimation of Bayesian global vector autoregressions (GVARs). The focus of the vignette is to provide a range of examples that demonstrate the full functionality of the library. It is accompanied by a more technical description of the GVAR framework. Here, it suffices to briefly summarize the main idea of a GVAR, which is a large system of equations designed to analyze or control for interactions across units. Most often, these units refer to countries and the interactions between them arise through economic and financial interdependencies. Also in this document, the examples we provide contain cross-country data. In principle, however, the GVAR framework can be applied to other units, such as regions, firms, etc. The following examples show how the GVAR can be used to either estimate spillover effects from one country to another, or alternatively, to look at the effects of a domestic shock controlling for global factors.
</p>
<p align=justify>
In a nutshell, the GVAR consists of two stages. In the first, $N$ vector autoregressive (VAR) models are estimated, one per unit. Each equation in a unit model is augmented with foreign variables, that control for global factors and link the unit-specific models later. Typically, these foreign variables are constructed using exogenous, bilateral weights, stored in an $N \times N$ weight matrix. The classical framework of @Pesaran2004 and @Dees2007a proposes estimating these country models in vector error correction form, while in this package we take a Bayesian stance and estimation is carried out using VARs. The user can transform the data prior estimation into stationary form or estimate the model in levels. The `BGVAR` package also allows us to include a trend to get trend-stationary data. In the second step, the single country models are combined using the assumption that single models are linked via the exogenous weights, to yield a global representation of the model. This representation of the model is then used to carry out impulse response analysis and forecasting.
</p>
<p align=justify>
This vignette consists of four blocks: getting started and data handling, estimation, structural analysis and forecasting. In the next part, we discuss which data formats the `bgvar` library can handle. We then proceed by showing examples of how to estimate a model using different Bayesian shrinkage priors -- for references see @CrespoCuaresma2016 and @Feldkircher2016a. We also discuss how to run diagnostic and convergence checks and examine the main properties of the model. In the third section, we turn to structural analysis, either using recursive (Cholesky) identification or sign restrictions. We will also discuss structural and generalized forecast error variance decompositions and historical decompositions. In the last section, we show how to compute unconditional and conditional forecasts with the package.
</p>
\section{Getting Started}
# Getting Started
We start by installing the package from CRAN and attaching it with
```{r,hide=TRUE}
oldpar <- par(no.readonly=TRUE)
set.seed(123)
library(BGVAR)
```
<p align=justify>
To ensure reproducibility of the examples that follow, we have set a particular seed (for `R`s random number generator). As every `R` library, the `BGVAR` package provides built-in help files which can be accessed by typing `?` followed by the function / command of interest. It also comes along with four example data sets, two of them correspond to data the quarterly data set used in @Feldkircher2016a (`eerData`, `eerDataspf`), one is on monthly frequency (`monthlyData`). For convenience we also include the data that come along with the Matlab GVAR toolbox of @matlabToolbox, `pesaranData`. We include the 2019 vintage [@Mohaddes2020]. </p>
<p align=justify>
We start illustrating the functionality of the `BGVAR` package by using the `eerData` data set from @Feldkircher2016a. It contains 76 quarterly observations for 43 countries over the period from 1995Q1 to 2013Q4. The euro area (EA) is included as a regional aggregate.
</p>
We can load the data by typing
```{r "eerData"}
data(eerData)
```
<p align=justify>
This loads two objects: `eerData`, which is a list object of length $N$ (i.e., the number of countries) and `W.trade0012`, which is an $N \times N$ weight matrix.
</p>
We can have a look at the names of the countries contained in `eerData`
```{r "eerData2"}
names(eerData)
```
and at the names of the variables contained in a particular country by
```{r "eerData3"}
colnames(eerData$UK)
```
We can zoom in into each country by accessing the respective slot of the data list:
```{r "US",echo=TRUE}
head(eerData$US)
```
<p align=justify>
Here, we see that the global variable, oil prices (`poil`) is attached to the US country model. This corresponds to the classical GVAR set-up used among others in @Pesaran2004 and @Dees2007a. We also see that in general, each country model $i$ can contain a different set of variables $k_i$ as opposed to requirements in a balanced panel.
</p>
<p align=justify>
The GVAR toolbox relies on one important *naming convention*, though: It is assumed that neither the country names nor the variable names contain a `.` [dot]. The reason is that the program internally has to collect and separate the data more than once and in doing that, it uses the `.` to separate countries / entities from variables. To give a concrete example, the slot in the `eerData` list referring to the USA should not be labelled `U.S.A.`, nor should any of the variable names contain a `.`
</p>
<p align=justify>
The toolbox also allows the user to submit the data as a $T \times k$ data matrix, with $k=\sum^N_{i=1} k_i$ denoting the sum of endogenous variables in the system. We can switch from data representation in list form to matrix from by using the function `list_to_matrix` (and vice versa using `matrix_to_list`).
</p>
To convert the `eerData` we can type:
```{r "convert",echo=TRUE}
bigX<-list_to_matrix(eerData)
```
<p align=justify>
For users who want to submit data in matrix form, the above mentioned naming convention implies that the column names of the data matrix have to include the name of the country / entity and the variable name, separated by a `.` For example, for the converted `eerData` data set, the column names look like:
</p>
```{r "convert2",echo=TRUE}
colnames(bigX)[1:10]
```
<p align=justify>
with the first part of each columname indicating the country (e.g., `EA`) and the second the variable (e.g., `y`), separated by a `.`
Regardless whether the data are submitted as list or as big matrix, the underlying data can be either of `matrix` class or time series classes such as `ts` or `xts`.
</p>
<p align=justify>
Finally, we look at the second important ingredient to build our GVAR model, the weight matrix. Here, we use annual bilateral trade flows (including services), averaged over the period from 2000 to 2012. This implies that the $ij^{th}$ element of $W$ contains trade flows from unit $i$ to unit $j$. These weights can also be made symmetric by calculating $\frac{(W_{ij}+W_{ji})}{2}$. Using trade weights to establish the links in the GVAR goes back to the early GVAR literature [@Pesaran2004] but is still used in the bulk of GVAR studies. Other weights, such as financial flows, have been proposed in @Eickmeier2015 and examined in @Feldkircher2016a. Another approach is to use estimated weights as in @Feldkircher2019b. The weight matrix should have `rownames` and `colnames` that correspond to the $N$ country names contained in `Data`.
</p>
```{r "tradeW",echo=TRUE}
head(W.trade0012)
```
The countries in the weight matrix should be in the same order as in the data list:
```{r "rownames.W"}
all(colnames(W.trade0012)==names(eerData))
```
The weight matrix should be row-standardized and the diagonal elements should be zero:
```{r "rowSums.W"}
rowSums(W.trade0012)
diag(W.trade0012)
```
Note that through row-standardizing, the final matrix is typically not symmetric (even when using the symmetric weights as raw input).
<p align=justify>
In what follows, we restrict the dataset to contain only three countries, `EA`, `US` and `RU` and adjust the weight matrix accordingly. We do this only for *illustrational purposes to save time and storage in this document*:
</p>
```{r, "eerDatasmall", hide=TRUE}
cN<-c("EA","US","RU")
eerData<-eerData[cN]
W.trade0012<-W.trade0012[cN,cN]
W.trade0012<-apply(W.trade0012,2,function(x)x/rowSums(W.trade0012))
W.list<-lapply(W.list,function(l){l<-apply(l[cN,cN],2,function(x)x/rowSums(l[cN,cN]))})
```
This results in the same dataset as available in `testdata`.
\section{Reading Data from Excel}
# Reading Data from Excel
<p align=justify>
In order to make BGVAR easier to handle for users working and organising data in spreadsheets via Excel, we provide a own reader function relying on the `readxl` package. In this section we intend to provide some code to write the provided datasets to Excel spreadsheets, and to show then how to read the data from Excel. Hence, we provide an easy-to-follow approach with an example how the data should be organised in Excel.
</p>
<p align=justify>
We start by exporting the data to excel. The spreadsheet should be organised as follows. Each sheet consists of the data set for one particular country, hence the naming of the sheets with the country names is essential. In each sheet, you should provide the time in the first column of the matrix, followed by one column per variable. In the following, we will export the `eerData` data set to Excel:
```{r, "export excel", eval=FALSE}
time <- as.character(seq.Date(as.Date("1995-01-01"),as.Date("2013-10-01"),by="quarter"))
for(cc in 1:length(eerData)){
x <- coredata(eerData[[cc]])
rownames(x) <- time
write.xlsx(x = x, file="./excel_eerData.xlsx", sheetName = names(eerData)[cc],
col.names=TRUE, row.names=TRUE, append=TRUE)
}
```
which will create in your current working directory an excel sheet named `excel_eerData.xlsx`. This can then be read to R with the `BGVAR` package as follows:
```{r, "import excel", eval=FALSE}
eerData_read <- excel_to_list(file = "./excel_eerData.xlsx", first_column_as_time=TRUE, skipsheet=NULL, ...)
```
which creates a list in the style of the original `eerData` data set. The first argument `file` has to be valid path to an excel file. The second argument `first_column_as_time` is a logical indicating whether you provide as first column in each spreadsheet a time index, while the `skipsheet` argument can be specified to leave out specific sheets (either as vector of strings or numeric indices). If you want to transform the list object to a matrix, you can use the command `list_to_matrix` or to transform it back to a list with `matrix_to_list`:
```{r, "transform to matrix", eval=FALSE}
eerData_matrix <- list_to_matrix(eerData_read)
eerData_list <- matrix_to_list(eerData_matrix)
```
</p>
\section{Estimation}
# Estimation
<p align=justify>
The main function of the `BGVAR` package is its `bgvar` function. The unique feature of this toolbox is that we use Bayesian shrinkage priors with optionally stochastic volatility to estimate the country models in the GVAR. In its current version, three priors for the country VARs are implemented:
</p>
* Non-conjugate Minnesota prior [`MN`, @Litterman1986;@Koop2010]
* Stochastic Search Variable Selection prior [`SSVS`, @George2008]
* Normal-Gamma prior [`NG`, @Huber2019]
<p align=justify>
The first two priors are described in more detail in @CrespoCuaresma2016. For a more technical description of the Normal-Gamma prior see @Huber2019 and for an application in the GVAR context @Feldkircher2019b. For the variances we can assume homoskedasticity or time variation (stochastic volatility). For the latter, the library relies on the `stochvol` package of @Kastner2016.
</p>
We start with estimating our toy model using the `NG` prior, the reduced `eerData` data set and
the adjusted `W.trade0012` weight matrix:
```{r "model.1",results="hide" }
model.1<-bgvar(Data=eerData,
W=W.trade0012,
draws=100,
burnin=100,
plag=1,
prior="NG",
hyperpara=NULL,
SV=TRUE,
thin=1,
trend=TRUE,
hold.out=0,
eigen=1
)
```
<p align=justify>
The default prior specification in `bgvar` is to use the NG prior with stochastic volatility and one lag for both the endogenous and weakly exogenous variables (`plag=1`). In general, due to its high cross-sectional dimension, the GVAR can allow for very complex univariate dynamics and it might thus not be necessary to increase the lag length considerably as in a standard VAR [@Burriel2018]. The setting `hyperpara=NULL` implies that we use the standard hyperparameter specification for the NG prior; see the helpfiles for more details.
</p>
<p align=justify>
Other standard specifications that should be submitted by the user comprise the number of posterior draws (`draws`) and burn-ins (`burnin`, i.e., the draws that are discarded). To ensure that the MCMC estimation has converged, a high-number of burn-ins is recommended (say 15,000 to 30,000). Saving the full set of posterior draws can eat up a lot of storage. To reduce this, we can use a thinning interval which stores only a thin$^{th}$ draw of the global posterior output. For example, with `thin=10` and `draws=5000` posterior draws, the amount of MCMC draws stored is 500. `TREND=TRUE` implies that the model is estimated using a trend. Note that regardless of the trend specification, each equation always automatically includes an intercept term.
</p>
<p align=justify>
Expert users might want to take further adjustments. These have to be provided via a list (`expert`). For example, to speed up computation, it is possible to invoke parallel computing in `R`. The number of available cpu cores can be specified via `cores`. Ideally this number is equal to the number of units $N$ (`expert=list(cores=N)`). Based on the user's operating system, the package then either uses `parLapply` (Windows platform) or `mclapply` (non-Windows platform) to invoke parallel computing. If `cores=NULL`, the unit models are estimated subsequently in a loop (via `R`'s `lapply` function). To use other / own apply functions, pass them on via the argument `applyfun`. As another example, we might be interested in inspecting the output of the $N$ country models in more detail. To do so, we could provide `expert=list(save.country.store=TRUE)`, which allows to save the whole posterior distribution of each unit / country model. Due to storage reasons, the default is set to `FALSE` and only the *posterior medians* of the single country models are reported. Note that even in this case, the whole posterior distribution of the *global model* is stored.
</p>
<p align=justify>
We estimated the above model with stochastic volatility (`SV=TRUE`). There are several reasons why one may want to let the residual variances change over time. First and foremost, most time periods used in macroeconometrics are nowadays rather volatile including severe recessions. Hence accounting for time variation might improve the fit of the model [@primiceri2005time; @sims2006were; @Dovern2016; @Huber2016]. Second, the specification implemented in the toolbox nests the homoskedastic case. It is thus a good choice to start with the more general case when first confronting the model with the data. For structural analysis such as the calculation of impulse responses, we take the variance covariance matrix with the median volatilities (over the sample period) on its diagonal.\footnote{Alternatively, one would have $T$ variance covariance matrices and hence $T$ impulse responses for each variable. Since the size of the shock (i.e., the residual variance) varies over time, the resulting impulses would be typically either up- or down-scaled, whereas the shapes of the IRFs are not affected.} If we want to look at the volatilities of the first equation (`y`) in the euro area country model, we can type:
</p>
```{r "SV",results="hide"}
model.1$cc.results$sig$EA[,"EA.y","EA.y"]
```
<p align=justify>
To discard explosive draws, we can compute the eigenvalues of the reduced form of the global model, written in its companion form. Unfortunately, this can only be done once the single models have been estimated and stacked together (and hence not directly built into the MCMC algorithm for the country models). To discard draws that lead to higher eigenvalues than 1.05, set `eigen=1.05`. We can look at the 10 largest eigenvalues by typing:
</p>
```{r "ng.eigen",echo=TRUE}
model.1$stacked.results$F.eigen[1:10]
```
<p align=justify>
Last, we have used the default option `h=0`, which implies that we use the full sample period to estimate the GVAR. For the purpose of forecast evaluation, `h` could be specified to a positive number, which then would imply that the last `h` observations are reserved as a hold-out sample and not used to estimate the model.
</p>
\subsection{Model Output and Diagnostic Checks}
## Model Output and Diagnostic Checks
<p align="justify">
Having estimated the model, we can summarize the outcome in various ways.
First, we can use the `print` method
</p>
```{r "print.model",echo=TRUE}
print(model.1)
```
<p align="justify">
This just prints the submitted arguments of the `bgvar` object along with the model specification for each unit. The asterisks indicate weakly exogenous variables, double asterisks exogenous variables and variables without asterisks the endogenous variables per unit.
</p>
<p align="justify">
The `summary` method is a more enhanced way to analyze output. It computes descriptive statistics like convergence properties of the MCMC chain, serial autocorrelation in the errors and the average pairwise autocorrelation of cross-unit residuals.
</p>
```{r "summary.model"}
summary(model.1)
```
<p align="justify">
We can now have a closer look at the output provided by `summary`. The header contains some basic information about the prior used to estimate the model, how many lags, posterior draws and countries. The next line shows Geweke's CD statistic, which is calculated using the `coda` package. Geweke's CD assesses practical convergence of the MCMC algorithm. In a nutshell, the diagnostic is based on a test for equality of the means of the first and last part of a Markov chain (by default we use the first 10% and the last 50%). If the samples are drawn from the stationary distribution of the chain, the two means are equal and Geweke's statistic has an asymptotically standard normal distribution.
</p>
<p align="justify">
The test statistic is a standard Z-score: the difference between the two sample means divided by its estimated standard error. The standard error is estimated from the spectral density at zero and so takes into account any autocorrelation. The test statistic shows that only a small fraction of all coefficients did not convergence. Increasing the number of burn-ins can help decreasing this fraction further. The statistic can also be calculated by typing `conv.diag(model.1)`.
</p>
<p align="justify">
The next model statistic is the likelihood of the global model. This statistic can be used for model comparison. Next and to assess, whether there is first order serial autocorrelation present, we provide the results of a simple F-test. The table shows the share of p-values that fall into different significance categories. Since the null hypothesis is that of no serial correlation, we would like to have as many large ($>0.1$) p-values as possible. The statistics show that already with one lag, serial correlation is modest in most equations' residuals. This could be the case since we have estimated the unit models with stochastic volatility. To further decrease serial correlation in the errors, one could increase the number of lags via `plag`.
</p>
<p align="justify">
The last part of the summary output contains a statistic of cross-unit correlation of (posterior median) residuals. One assumption of the GVAR framework is that of negligible, cross-unit correlation of the residuals. Significant correlations prohibit structural and spillover analysis [@Dees2007a]. In this example, correlation is reasonably small.
</p>
<p align="justify">
Some other useful methods the `BGVAR` toolbox offers contain the `coef` (or `coefficients` as its alias) methods to extract the $k \times k \times plag$ matrix of reduced form coefficients of the global model. Via the `vcov` command, we can access the global variance covariance matrix and the `logLik()` function allows us to gather the global log likelihood (as provided by the `summary` command).
</p>
```{r "stats",echo=TRUE, results="hide"}
Fmat <- coef(model.1)
Smat <- vcov(model.1)
lik <- logLik(model.1)
```
<p align="justify">
Last, we can have a quick look at the in-sample fit using either the posterior median of the country models' residuals (`global=FALSE`) or those of the global solution of the GVAR (`global=TRUE`). The in-sample fit can also be extracted by using `fitted()`.
</p>
Here, we show the in-sample fit of the euro area model (`global=FALSE`).
```{r "insample",fig.margin=TRUE,fig.width=6,fig.height=8,fig.cap="In-sample fit for euro area variables"}
yfit <- fitted(model.1)
plot(model.1, global=FALSE, resp="EA")
```
We can estimate the model with two further priors on the unit models, the SSVS prior and the Minnesota prior. To give a concrete example, the SSVS prior can be invoked by typing:
```{r "ssvs.1",echo=TRUE, results="hide"}
model.ssvs.1<-bgvar(Data=eerData,
W=W.trade0012,
draws=100,
burnin=100,
plag=1,
prior="SSVS",
hyperpara=NULL,
SV=TRUE,
thin=1,
Ex=NULL,
trend=TRUE,
expert=list(save.shrink.store=TRUE),
hold.out=0,
eigen=1,
verbose=TRUE
)
```
<p align="justify">
One feature of the SSVS prior is that it allows us to look at the posterior inclusion probabilities to gauge the importance of particular variables. Per default, `bgvar` does not save the volatilities of the coefficients to save memory. If we set `expert=list(save.shrink.store=TRUE)` to `TRUE` (default is `FALSE`) then those probabilities are saved and posterior inclusion probabilities (PIPs) are computed. For example, we can have a look at the PIPs of the euro area model by typing:
</p>
```{r "Pips"}
model.ssvs.1$cc.results$PIP$PIP.cc$EA
```
<p align="justify">
The equations in the EA country model can be read column-wise with the rows representing the associated explanatory variables. The example shows that besides other variables, the trade balance (`tb`) is an important determinant of the real exchange rate (`rer`).
</p>
We can also have a look at the average of the PIPs across all units:
```{r "pips.avg"}
model.ssvs.1$cc.results$PIP$PIP.avg
```
This shows that the same determinants for the real exchange rate appear as important regressors in other country models.
\subsection{Different Specifications of the Model}
## Different Specifications of the Model
<p align="justify">
In this section we explore different specifications of the structure of the GVAR model. Other specification choices that relate more to the time series properties of the data, such as specifying different lags and priors are left for the reader to explore. We will use the SSVS prior and judge the different specifications by examining the posterior inclusion probabilities.
</p>
<p align="justify">
As a first modification, we could use different weights for different variable classes as proposed in @Eickmeier2015. For example we could use financial weights to construct weakly exogenous variables of financial factors and trade weights for real variables.
</p>
The `eerData` set provides us with a list of different weight matrices that are described in the help files.
Now we specify the sets of variables to be weighted:
```{r "var.weight"}
variable.list<-list();variable.list$real<-c("y","Dp","tb");variable.list$fin<-c("stir","ltir","rer")
```
We can then re-estimate the model and hand over the `variable.list` via the argument `expert`:
```{r,results="hide"}
# weights for first variable set tradeW.0012, for second finW0711
model.ssvs.2<-bgvar(Data=eerData,
W=W.list[c("tradeW.0012","finW0711")],
plag=1,
draws=100,
burnin=100,
prior="SSVS",
SV=TRUE,
eigen=1,
expert=list(variable.list=variable.list,save.shrink.store=TRUE),
trend=TRUE
)
```
<p align="justify">
Another specification would be to include a foreign variable only when its domestic counterpart is missing. For example, when working with nominal bilateral exchange rates we probably do not want to include also its weighted average (which corresponds to something like an effective exchange rate). Using the previous model we could place an exclusion restriction on foreign long-term interest rates using `Wex.restr` which is again handed over via `expert`. The following includes foreign long-term rates only in those country models where no domestic long-term rates are available:
</p>
```{r "ltir.estimate", results="hide"}
# does include ltir* only when ltir is missing domestically
model.ssvs.3<-bgvar(Data=eerData,
W=W.trade0012,
plag=1,
draws=100,
burnin=100,
prior="SSVS",
SV=TRUE,
eigen=1,
expert=list(Wex.restr="ltir",save.shrink.store=TRUE),
trend=TRUE,
)
```
```{r "print.model.ssvs.3"}
print(model.ssvs.3)
```
<p align="justify">
Last, we could also use a different specification of oil prices in the model. Currently, the oil price is determined endogenously within the US model. Alternatively, one could set up an own standing oil price model with additional variables that feeds the oil price back into the other economies as exogenous variable [@Mohaddes2019].
</p>
The model structure would then look something like in the Figure below:
{width=70%}
For that purpose we have to remove oil prices from the US model and attach them to a separate slot in the data list. This slot has to have its own country label. We use 'OC' for "oil country".
```{r "OC"}
eerData2<-eerData
eerData2$OC<-eerData$US[,c("poil"),drop=FALSE] # move oil prices into own slot
eerData2$US<-eerData$US[,c("y","Dp", "rer" , "stir", "ltir","tb")] # exclude it from US m odel
```
<p align="justify">
Now we have to specify a list object that we label `OC.weights`. The list has to consist of three slots with the following names `weights`, `variables` and `exo`:
</p>
```{r "OC.weights"}
OC.weights<-list()
OC.weights$weights<-rep(1/3, 3)
names(OC.weights$weights)<-names(eerData2)[1:3] # last one is OC model, hence only until 3
OC.weights$variables<-c(colnames(eerData2$OC),"y") # first entry, endog. variables, second entry weighted average of y from the other countries to proxy demand
OC.weights$exo<-"poil"
```
<p align="justify">
The first slot, `weights`, should be a vector of weights that sum up to unity. In the example above, we simply use $1/N$, other weights could include purchasing power parities (PPP). The weights are used to aggregate specific variables that in turn enter the oil model as weakly exogenous. The second slot, `variables`, should specify the names of the endogenous and weakly exogenous variables that are used in the OC model. In the oil price example, we include the oil price (`poil`) as an endogenous variable (not contained in any other country model) and a weighted average using `weights` of output (`y`) to proxy world demand as weakly exogenous variable. Next, we specify via `exo` which one of the endogenous variables of the OC model are fed back into the other country models. In this example we specify `poil`. Last, we put all this information in a further list called `OE.weights` (other entity weights). This is done to allow for multiple other entity models (i.e., an oil price model, a joint monetary union model, etc.). It is important that the list entry has the same name as the other entity model, in our example `OC`.
</p>
```{r "OC.weights2"}
# other entities weights with same name as new oil country
OE.weights <- list(OC=OC.weights)
```
<p align="justify">
Now we can re-estimate the model where we pass on `OE.weights` via the `expert` argument.
</p>
```{r "estimate.OC",results="hide"}
model.ssvs.4<-bgvar(Data=eerData2,
W=W.trade0012,
plag=1,
draws=100,
burnin=100,
prior="SSVS",
SV=TRUE,
expert=list(OE.weights=OE.weights,save.shrink.store=TRUE),
trend=TRUE
)
```
and can compare the results of the four models by e.g., looking at the average PIPs.
```{r "aux"}
aux1<-model.ssvs.1$cc.results$PIP$PIP.avg;aux1<-aux1[-nrow(aux1),1:6]
aux2<-model.ssvs.2$cc.results$PIP$PIP.avg;aux2<-aux2[-nrow(aux2),1:6]
aux3<-model.ssvs.3$cc.results$PIP$PIP.avg;aux3<-aux3[-nrow(aux3),1:6]
aux4<-model.ssvs.4$cc.results$PIP$PIP.avg;aux4<-aux4[-nrow(aux4),1:6]
```
```{r "heat1", fig.show="hold",out.width="25%",fig.cap="Heatmaps of PIPs."}
heatmap(aux1,Rowv=NA,Colv=NA, main="Model 1", cex.main=2, cex.axis=1.7)
heatmap(aux2,Rowv=NA,Colv=NA, main="Model 2", cex.main=2, cex.axis=1.7)
heatmap(aux3,Rowv=NA,Colv=NA, main="Model 3", cex.main=2, cex.axis=1.7)
heatmap(aux4,Rowv=NA,Colv=NA, main="Model 4", cex.main=2, cex.axis=1.7)
```
<p align="justify">
We could also compare the models based on their fit, the likelihood, information criteria such as the DIC, residual properties or their forecasting performance.
</p>
\section{Impulse Response Functions}
# Impulse response functions
<p align="justify">
The package allows to calculate three different ways of dynamic responses, namely generalized impulse response functions (GIRFs) as in @Pesaran1998, orthogonalized impulse response functions using a Cholesky decomposition of the variance covariance matrix and finally impulse response functions given a set of user-specified sign restrictions.
</p>
\subsection{Recursive Identification and GIRFs}
## Recursive Identification and GIRFs
<p align="justify">
Most of the GVAR applications deal with *locally* identified shocks. This implies that the shock of interest is orthogonal to the other shocks in the same unit model and hence can be interpreted in a *structural* way. There is still correlation between the shocks of the unit models, and these responses (the spillovers) are hence not fully structural [@Eickmeier2015]. Hence some GVAR applications favor generalized impulse response functions, which per se do not rely on an orthogonalization. In `BGVAR`, responses to both types of shocks can be easily analyzed using the `irf` function.
</p>
<p align="justify">
This function needs as input a model object (`x`), the impulse response horizon (`n.ahead`) and the default identification method is the recursive identification scheme via the Cholesky decomposition. Further arguments can be passed on using the wrapper `expert` and are discussed in the helpfiles. The following provides impulse response to all `N` shocks with unit scaling and using generalized impulse response functions:
</p>
```{r, "shocks", results="hide"}
irf.chol<-irf(model.ssvs.1, n.ahead=24, expert=list(save.store=FALSE))
```
<p align="justify">
The results are stored in `irf.chol$posterior`, which is a
four-dimensional array: $K \times n.ahead \times nr.of shocks \times Q$, with `Q` referring to the 50\%, 68\% and 95\% quantiles of the posterior distribution of the impulse response functions. The posterior median of responses to the first shock could be accessed via `irf.girf$posterior[,,1,"Q50"]`
</p>
<p align="justify">
Note that this example was for illustrational purposes; in most instances, we would be interested in a particular shock and calculating responses to all shocks in the system is rather inefficient. Hence, we can provide the `irf` function with more information. To be more precise, let us assume that we are interested in an expansionary monetary policy shock (i.e., a decrease in short-term interest rates) in the US country model.
</p>
<p align="justify">
For that purpose, we can set up an `shockinfo` object, which contains information about which variable we want to shock (`shock`), the size of the shock (`scale`), the specific identification method(`ident`), and whether it is a shock applied in a single country or in multiple countries (`global`). We can use the helper function `get_shockinfo()` to set up a such a dummy object which we can subsequently modify according to our needs. The following lines of code are used for a negative 100 bp shock applied to US short term interest rates:
</p>
```{r "us.mp", results="hide"}
# US monetary policy shock - Cholesky
shockinfo_chol<-get_shockinfo("chol")
shockinfo_chol$shock<-"US.stir"
shockinfo_chol$scale<--100
# US monetary policy shock - GIRF
shockinfo_girf<-get_shockinfo("girf")
shockinfo_girf$shock<-"US.stir"
shockinfo_girf$scale<--100
```
The `shockinfo` objects for Cholesky and GIRFs look exactly the same but have additionally an attribute which classifies the particular identification scheme. If we compare them, we notice that both have three columns defining the shock, the scale and whether it is defined as global shock. But we also see that the attributes differ which is important for the identification in the `irf` function.
```{r, "shockinfo"}
shockinfo_chol
shockinfo_girf
```
Now, we identify a monetary policy shock with recursive identification:
```{r, "us.mp.chol", results="hide"}
irf.chol.us.mp<-irf(model.ssvs.1, n.ahead=24, shockinfo=shockinfo_chol, expert=list(save.store=TRUE))
```
The results are stored in `irf.chol.us.mp`. In order to save the complete set of draws, one can activate the `save.store` argument by setting it to `TRUE` within the expert settings (note: this may need a lot of storage).
```{r, "us.mp2"}
names(irf.chol.us.mp)
```
<p align="justify">
Again, `irf.chol.us.mp$posterior` is a $K \times n.ahead \times nr.of shocks \times 7$ object and the last slot contains the 50\%, 68\% and 95\% credible intervals along with the posterior median. If `save.store=TRUE`, `IRF_store` contains the full set of impulse response draws and you can calculate additional quantiles of interest.
</p>
We can plot the complete responses of a particular country by typing:
```{r "us.mp4", fig.margin=TRUE,out.width="80%",fig.cap="Responses of US country model"}
plot(irf.chol.us.mp, resp="US", shock="US.stir")
```
The plot shows the posterior median response (solid, black line) along 50\% (dark grey) and 68\% (light grey) credible intervals.
We can also compare the Cholesky responses with GIRFs. For that purpose, let us look at a GDP shock.
```{r, "us.gdp", results="hide"}
# cholesky
shockinfo_chol <- get_shockinfo("chol", nr_rows = 2)
shockinfo_chol$shock <- c("US.stir","US.y")
shockinfo_chol$scale <- c(1,1)
# generalized impulse responses
shockinfo_girf <- get_shockinfo("girf", nr_rows = 2)
shockinfo_girf$shock <- c("US.stir","US.y")
shockinfo_girf$scale <- c(1,1)
# Recursive US GDP
irf.chol.us.y<-irf(model.ssvs.1, n.ahead=24, shockinfo=shockinfo_chol)
# GIRF US GDP
irf.girf.us.y<-irf(model.ssvs.1, n.ahead=24, shockinfo=shockinfo_girf)
```
```{r, "us.gdp.plots",fig.cap="Comparison of responses Cholesky (left) and GIRF (right) to a negative GDP shock.",fig.show="hold",out.width="25%",}
plot(irf.chol.us.y, resp="US.y", shock="US.y")
plot(irf.girf.us.y, resp="US.y", shock="US.y")
plot(irf.chol.us.y, resp="US.rer", shock="US.y")
plot(irf.girf.us.y, resp="US.rer", shock="US.y")
```
<p align="justify">
We see that the responses are similar. This is not surprising because we have shocked the first variable in the US country model (`y`) and there are no timing restrictions on the remaining variables (they are all affected without any lag). In that case, the orthogonal impulse responses and the GIRF coincide.
</p>
<p align="justify">
Last, we could also look at a *joint or global shock*. For example, we could be interested in the effects of a *simultaneous* decrease in output across major economies, such as the G-7 and Russia. For that purpose, we have to set `global<-TRUE`. The following lines illustrate the joint GDP shock:
</p>
```{r, "global.gdp",results="hide",out.width="50%"}
shockinfo<-get_shockinfo("girf", nr_rows = 3)
shockinfo$shock<-c("EA.y","US.y","RU.y")
shockinfo$global<-TRUE
shockinfo$scale<--1
irf.global<-irf(model.ssvs.1, n.ahead=24, shockinfo=shockinfo)
plot(irf.global, resp=c("US.y","EA.y","RU.y"), shock="Global.y")
```
\subsection{Identification with Zero- and Sign-Restrictions}
## Identification with Zero- and Sign-Restrictions
<p align="justify">
In this section, we identify the shocks locally with sign-restrictions. For that purpose, we will use another example data set and estimate a new GVAR. This data set contains one-year ahead GDP, inflation and short-term interest rate forecasts for the USA. The forecasts are from the
[survey of professional forecasters (SPF)](https://www.philadelphiafed.org/research-and-data/real-time-center/survey-of-professional-forecasters) data base.
</p>
```{r, hide=TRUE}
data("eerData")
eerData<-eerData[cN]
W.trade0012<-W.trade0012[cN,cN]
W.trade0012<-apply(W.trade0012,2,function(x)x/rowSums(W.trade0012))
# append expectations data to US model
temp <- cbind(USexpectations, eerData$US)
colnames(temp) <- c(colnames(USexpectations),colnames(eerData$US))
eerData$US <- temp
```
```{r, "us.spf", results="hide"}
model.ssvs.eer<-bgvar(Data=eerData,
W=W.trade0012,
plag=1,
draws=100,
burnin=100,
prior="SSVS",
SV=TRUE)
```
<p align="justify">
For now, we start with an identification of two standard shocks in economics in the US model, namely an aggregate demand and aggregate supply shock. While the `shockinfo` was optional when using Cholesky / GIRFs, it is *mandatory* when working with sign restrictions. We do this in two steps, first we create a dummy object with `get_shockinfo("sign")` that contains information on the general shock setting and then add sign restrictions one-by-one using `add_shockinfo()`. The following illustrates this:
</p>
```{r, "us.spf.sign.spec"}
shockinfo<-get_shockinfo("sign")
shockinfo<-add_shockinfo(shockinfo, shock="US.y",
restriction="US.Dp", sign=">", horizon=1, prob=1, scale=1)
shockinfo<-add_shockinfo(shockinfo, shock="US.Dp",
restriction="US.y", sign="<", horizon=1, prob=1, scale=1)
```
<p align="justify">
In `add_shockinfo` we provide information on which variable to shock (`shock`), on which responses to put the sign restrictions (`restriction`), the direction of the restriction (`sign`) and the horizon how long these restrictions should hold (`horizon`). Note that the shock is always positive, but can be re-scaled by `scale`. The argument `prob` allows you to specify a percentage of the draws for which the restrictions have to hold. This argument might be useful when working with cross-sectional sign restrictions, where the idea is that some restrictions have to hold on average or at a certain percentage. The default is `prob=1`. If we want to add more restrictions to a particular shock, we can simply provide a vector instead of a scalar `add_shockinfo(shockinfo, shock="US.Dp",restriction=c("US.y", "US.stir"), sign=c("<","<"), horizon=c(1,1), prob=c(1,1), scale=1)` Note that increasing the number of restrictions (on the variables or the horizon) will lead to more precise inference; however, finding a suitable rotation matrix will become substantially harder.
</p>
<p align="justify">
We then invoke the `irf()` command to compute the impulse responses. The function draws rotation matrices using the algorithm of @Ramirez2010. In case we specify additional zero restrictions (see the next example below), we use the algorithm of @Arias2018. By default, we use one CPU core (`cores=NULL`) and do not store the full set of responses (`save.store=FALSE`). The maximum number of rotation matrices sampled per MCMC draw before we jump to the next draw can be specified by `MaxTries`.
</p>
```{r, "us.spf.sign",message=FALSE, results="hide"}
irf.sign<-irf(model.ssvs.eer, n.ahead=24, shockinfo=shockinfo,
expert=list(MaxTries=100, save.store=FALSE, cores=NULL))
```
We can infer the number of successful rotation matrices by looking at
```{r, "us.spf.sign2"}
irf.sign$rot.nr
```
```{r,"us.spf.plots",fig.cap="Responses to AS (upper panel) and AD (lower panel) shock.",fig.show="hold",out.width="50%"}
plot(irf.sign, resp=c("US.y","US.Dp"), shock="US.y")
plot(irf.sign, resp=c("US.y","US.Dp"), shock="US.Dp")
```
<p align="justify">
Several recent papers advocate the inclusion of survey data in a VAR. @Castelnuovo2010 show that including inflation expectations mitigates the price puzzle (i.e., the counter intuitive positive movement of inflation in response to a monetary tightening). @Damico2015 go one step further and argue that expectations should always be included in a VAR model since they contain information that is not contained in standard macroeconomic data. They also show how to make inference with survey data in a VAR framework and propose so-called rationality conditions. For an application in a GVAR context, see @Boeck2021a. In a nutshell, these conditions put restrictions on actual data to match the expectations either on average over (`ratio.average`) or at the end of (`ratio.H`) the forecast horizon. Let us look at a concrete example.
</p>
```{r, "us.spf.sign3",results="hide"}
shockinfo<-get_shockinfo("sign")
shockinfo<-add_shockinfo(shockinfo, shock="US.stir_t+4",
restriction=c("US.Dp_t+4","US.stir","US.y_t+4","US.stir_t+4","US.Dp_t+4","US.y_t+4"),
sign=c("<","0","<","ratio.avg","ratio.H","ratio.H"),
horizon=c(1,1,1,5,5,5),
prob=1, scale=1)
irf.sign.zero<-irf(model.ssvs.eer, n.ahead=20, shockinfo=shockinfo,
expert=list(MaxTries=100, save.store=TRUE))
```
<p align="justify">
The figure below shows the results for short term interest rates (`stir`) and output (`y`).
</p>
```{r, "eer.spf.plots",fig.cap="Rationality conditions I.",out.width="50%",fig.show="hold"}
# rationality condition: US.stir_t+4 on impact is equal to average of IRF of
# US.stir between horizon 2 and 5
matplot(cbind(irf.sign.zero$IRF_store["US.stir_t+4",1,,1],
irf.sign.zero$IRF_store["US.stir",1,,1]),
type="l",ylab="",main="Short-term Interest Rate",lwd=2,xaxt="n", cex.main=2);
axis(side=1,at=c(1:5,9,13,17,21,25),label=c(0:4,8,12,16,20,24), cex.axis=1.7)
legend("topright",lty=c(1,2),c("expected","actual"),lwd=2,bty="n",col=c("black","red"))
segments(x0=2,y0=1,x1=5,y1=1,lwd=2,lty=3,col="grey")
points(1,1,col="grey",pch=19,lwd=4)
abline(v=c(2,5),lty=3,col="grey",lwd=2)
# rationality condition: US.y_t+4 on impact is equal to H-step ahead IRF
# of US.y in horizon 5
matplot(cbind(irf.sign.zero$IRF_store["US.y_t+4",1,,1],
irf.sign.zero$IRF_store["US.y",1,,1]),
type="l",ylab="",main="Output",lwd=2,xaxt="n", cex.main=2)
axis(side=1,at=c(1:5,9,13,17,21,25),label=c(0:4,8,12,16,20,24), cex.axis=1.7)
legend("topright",lty=c(1,2),c("expected","actual"),lwd=2,bty="n",col=c("black","red"))
yy<-irf.sign.zero$IRF_store["US.y_t+4",1,1,1]
segments(x0=1,y0=yy,x1=5,y1=yy,lwd=2,lty=3,col="grey");abline(v=c(1,5),col="grey",lty=3)
points(1,yy,col="grey",pch=19,lwd=4);points(5,yy,col="grey",pch=19,lwd=4)
```
<p align="justify">
Impulse responses that refer to observed data are in red (dashed), and the ones referring to expected data in black. The condition we have imposed on short-term interest rates (top panel) was that observed rates should equal the shock to expected rates *on average over the forecast horizon* (one year, i.e., on impact plus 4 quarters). The respective period is marked by the two vertical, grey lines. Put differently, the average of the red-dashed line over the forecast horizon has to equal the expectation shock on impact (grey dot).
On output, shown in the bottom panel, by contrast, we have imposed a condition that has to hold exactly at the forecast horizon. The red line, the impulse response of observed output, has to meet the *impact response* of expected output at $h=5$. In the figure, these two points are indicated by the two grey dots.
</p>
<p align="justify">
The last example we look at is how to put restrictions on the cross-section. @Chudik2011b and @Cashin2014 argue that a major advantage of GVARs is that they allow to put restrictions also on variables from different countries, which should further sharpen inference. They apply cross-sectional restrictions to identify oil supply and demand shocks with the restrictions on oil importing countries' GDP.
</p>
<p align="justify">
Here, we follow @Feldkircher2020 who use cross-sectional restrictions to identify a term spread shock in the euro area. Since they use separate country models for members of the euro area, the joint monetary policy has to be modeled. One idea that has been put forth in recent applications is to set up an additional country for the joint monetary policy in the euro area. In the next example, we follow @Georgiadis2015 and set up a ECB model that determines euro area interest rates according to a Taylor rule. This idea follows the set-up of the additional oil price model and can be summarized graphically in the picture below.
</p>
{width=70%}
We can look at the data by typing:
```{r, "ea.data"}
data(monthlyData);monthlyData$OC<-NULL
names(monthlyData)
# list of weights of other entities with same name as additional country model
OE.weights = list(EB=EB.weights)
EA_countries <- c("AT", "BE", "DE","ES", "FI","FR")
# "IE", "IT", "NL", "PT","GR","SK","MT","CY","EE","LT","LV")
```
<p align="justify">
To estimate the GVAR with an 'EB' country model, we have to specify additional arguments similar to the example with the oil price model discussed above. The `monthlyData` set already comes along with a pre-specified list `EA.weights` with the mandatory slots `weights`, `variables` and `exo`. The specification implies that the euro area monetary policy model (`EB`) includes `EAstir`, `total.assets`, `M3`, `ciss` as endogenous variables (these are contained in `monthlyData$EB`). We use PPP-weights contained in `weights` to aggregate output (`y`) and prices (`p`) from euro area countries and include them as weakly exogenous variables. Euro area short-term interest rates (`EAstir`) and the ciss indicator (`ciss`), specified in `exo`, are then passed on as exogenous variables to the remaining countries. Finally, we put `EA.weights` into the `OE.weights` list and label the slot `EB` (as the name of the additional country model, `names(monthlyData)`) and estimate the model:
</p>
```{r, "restrict_sample", hide=TRUE}
monthlyData <- monthlyData[c(EA_countries,"EB")]
W<-W[EA_countries,EA_countries]
W<-apply(W,2,function(x)x/rowSums(W))
OE.weights$EB$weights <- OE.weights$EB$weights[names(OE.weights$EB$weights)%in%EA_countries]
```
```{r,"ea.estimate", results="hide"}
# estimates the model
model.ssvs<-bgvar(Data=monthlyData,
W=W,
draws=200,
burnin=200,
plag=1,
prior="SSVS",
eigen=1.05,
expert=list(OE.weights=OE.weights))
```
We can now impose a joint shock on long-term interest rates for selected countries using sign restrictions on the cross section with the following lines of code:
```{r,"ea.sign"}
# imposes sign restrictions on the cross-section and for a global shock
# (long-term interest rates)
shockinfo<-get_shockinfo("sign")
for(cc in c("AT","BE","FR")){
shockinfo<-add_shockinfo(shockinfo, shock=paste0(cc,".ltir"),
restriction=paste0(cc,c(".ip",".p")),
sign=c("<","<"), horizon=c(1,1),
prob=c(0.5,0.5), scale=c(-100,-100),
global=TRUE)
}
```
We can have a look at the restrictions by looking at the `shockinfo` object:
```{r,"global.restrictions"}
shockinfo
```
<p align="justify">
Note the column `prob`. Here, we have specified that the restrictions have to hold only for half of the countries. We could make the restrictions stricter by increasing the percentage.
</p>
We can now compute the impulse responses using the same function as before.
```{r,"global.shock.irf",echo=TRUE,results="hide"}
irf.sign.ssvs<-irf(model.ssvs, n.ahead=24, shockinfo=shockinfo, expert=list(MaxTries=500))
```
To verify the sign restrictions, type:
```{r,"ea.sign.verify"}
irf.sign.ssvs$posterior[paste0(EA_countries[-c(3,12)],".ltir"),1,1,"Q50"]
irf.sign.ssvs$posterior[paste0(EA_countries,".ip"),1,1,"Q50"]
irf.sign.ssvs$posterior[paste0(EA_countries,".p"),1,1,"Q50"]
```
The following plots the output responses for selected euro area countries.
```{r, "ea.sign.plots",fig.show="hold",out.width="25%",fig.cap="Output responses of selected euro area countries."}
plot(irf.sign.ssvs, resp=c("AT.ip"), shock="Global.ltir")
plot(irf.sign.ssvs, resp=c("BE.ip"), shock="Global.ltir")
plot(irf.sign.ssvs, resp=c("DE.ip"), shock="Global.ltir")
plot(irf.sign.ssvs, resp=c("ES.ip"), shock="Global.ltir")
```
\subsection{Generalized Forecast Error Variance Decomposition (GFEVD)}
## Forecast Error Variance Decomposition (FEVD)
<p align="justify">
Forecast error variance decompositions indicate the amount of information each variable contributes to the other variables in the autoregression. It is calculated by examining how much of the forecast error variance of each of the variables can be explained by exogenous shocks to the other variables. In a system with fully orthogonalized errors, the shares of FEVD sum up to 1. In the GVAR context, however, since we identify a shock only locally in particular country model and we still have a certain degree of residual correlation, shares typically exceed unity. By contrast, a fully orthogonalized system obtained for example by means of a Cholesky decomposition would yield shares that sum up to unity but inherits assumptions that are probably hard to defend. In the case of the Cholesky decomposition, this would imply timing restrictions, i.e., which variables in which units are immediately affected or affected only with a lag.
</p>
<p align="justify">
One way of fixing this is to use generalized forecast error variance decompositions. Like with GIRFs, these are independent of the ordering but, since the shocks are not orthogonalized, yield shares that exceed unity. Recently, @Lanne2016 proposed a way of scaling the GFEVDs, which has the nice property of shares summing up to 1 and results being independent of the ordering of the variables in the system. To calculate them, we can use the `GFEVD.LN` command. We can either use a running mean (`running=TRUE`) or the full set of posterior draws. The latter is computationally very expensive.
</p>
```{r, "fevd"}
#calculates the LN GFEVD
gfevd.us.mp=gfevd(model.ssvs.eer,n.ahead=24,running=TRUE,cores=4)$FEVD
# get position of EA
idx<-which(grepl("EA.",dimnames(gfevd.us.mp)[[2]]))
own<-colSums(gfevd.us.mp["EA.y",idx,])
foreign<-colSums(gfevd.us.mp["EA.y",-idx,])
```
```{r, "fevd.plot",fig.cap="FEVD of EA GDP.",out.width="50%"}
barplot(t(cbind(own,foreign)),legend.text =c("own","foreign"))
```
<p align="justify">
The plot above shows a typical pattern: On impact and in the first periods, EA variables (own) explain a large share of GFEVD. With time and through the lag structure in the model, other countries' variables show up more strongly as important determinants of EA output error variance.
</p>
<p align="justify">
In case we want to focus on a single country, which we have fully identified either using a Cholesky decomposition or sign restrictions, we can compute a simple forecast error variance decomposition (FEVD). This can be done by using the command `fevd()`. Since the computation is very time consuming, the FEVDs are based on the posterior median only (as opposed to calculating FEVDs for each MCMC draw or using a running mean). In case the underlying shock has been identified via sign restrictions, the corresponding rotation matrix is the one that fulfills the sign restrictions at the point estimate of the posterior median of the reduced form coefficients (stored in `irf.obj$struc.obj$Rmed`). Alternatively one can submit a rotation matrix using the option `R`.
</p>
```{r, "fevd.struc"}
# calculates FEVD for variables US.y
fevd.us.y=fevd(irf.chol.us.mp, var.slct=c("US.y"))$FEVD
idx<-which(grepl("US.",rownames(fevd.us.y)))
```
```{r, "fevd.struc.plot",fig.cap="FEVD of US GDP.",out.width="50%"}
barplot(fevd.us.y[idx,1,])
```
\subsection{Historical Decomposition}
## Historical Decomposition
<p align="justify">
Historical decompositions allow us to examine the relative importance of structural shocks in explaining deviations of a time series from its unconditional mean. This can be used to assess the hypothetical question of how data would have looked like if it was driven only by a particular structural shock (e.g., monetary policy shock) or a combination of structural shocks. It can be calculated using the function `hd()`. The function also allows us to compute the structural error of the model. To save computational time as well as due to storage limits, we use the point estimate of the posterior median (as opposed to calculating HDs and the structural error for each draw of the MCMC chain). In case the shock has been identified via sign restrictions, a rotation matrix has to be selected. If not specified otherwise (via `R`), the rotation matrix based on the posterior median of the reduced form coefficients (`irf.obj$struc.obj$Rmed`) will be used.
</p>
```{r,"hd"}
HD<-hd(irf.chol.us.mp)
# summing them up should get you back the original time series
org.ts<-apply(HD$hd_array,c(1,2),sum) # this sums up the contributions of all shocks + constant, initial conditions and residual component (last three entries in the third dimension of the array)
```
```{r, "hd.plot",fig.cap="Historical decomposition of euro area GDP.",out.width="50%"}
matplot(cbind(HD$x[,1],org.ts[,1]),type="l",ylab="",lwd=2, cex.axis=1.7)
legend("bottomright",c("hd series","original"),col=c("black","red"),lty=c(1,2),bty="n",cex=2)
```
\section{Unconditional and Conditional Forecasts}
# Unconditional and Conditional Forecasts
<p align="justify">
In this section, we demonstrate how the package can be used for forecasting. We distinguish between unconditional and conditional forecasting. Typical applications of unconditional forecasting are to select a model from a range of candidate models or for out-of-sample forecasting. Conditional forecasts can be used for scenario analysis by comparing a forecast with a fixed future path of a variable of interest to its unconditional forecast.
</p>
\subsection{Unconditional Forecasts}
## Unconditional Forecasts
<p align="justify">
Since the GVAR framework was developed to capture cross-country dependencies, it can handle a rich set of dynamics and interdependencies. This can also be useful for forecasting either global components (e.g., global output) or country-specific variables controlling for global factors. @Pesaran2009 show that the GVAR yields competitive forecasts for a range of macroeconomic and financial variables. @CrespoCuaresma2016 demonstrate that Bayesian shrinkage priors can help improving GVAR forecasts and @Dovern2016 and @Huber2016 yield evidence for further gains in forecast performance by using GVARs with stochastic volatility.
</p>
<p align="justify">
To compute forecasts with the `BGVAR` package, we use the command `predict`. To be able to evaluate the forecast, we have to specify the size of the hold-out sample when estimating the model. Here, we choose a hold-out-sample of 8 observations by setting `h=8` (the default values is `h=0`):
</p>
```{r,"fcast.est", results="hide"}
model.ssvs.h8<-bgvar(Data=eerData,
W=W.trade0012,
draws=500,
burnin=500,
plag=1,
prior="SSVS",
hyperpara=NULL,
SV=TRUE,
thin=1,
trend=TRUE,
hold.out=8,
eigen=1
)
```
<p align="justify">
The forecasts can then be calculated using the `predict` function. We calculate forecasts up to 8 periods ahead by setting `n.ahead=8`-step
</p>
```{r,"fcast.predict", results="hide"}
fcast <- predict(model.ssvs.h8, n.ahead=8, save.store=TRUE)
```
<p align="justify">
The forecasts are stored in `fcast$fcast` which contains also the credible intervals of the predictive posterior distribution. We can evaluate the forecasts with the retained observations looking at the root mean squared errors (RMSEs) or log-predictive scores (LPS).
</p>
```{r "lps"}
lps.h8 <- lps(fcast)
rmse.h8 <- rmse(fcast)
```
The objects `lps.h8` and `rmse.h8` then each contain a $8 \times k$ matrix with the LPS scores / RMSEs for each variable in the system over the forecast horizon.
Last, we can visualize the forecasts by typing
```{r, "fcast.plot",fig.cap="Forecast plot.",out.width="50%"}
plot(fcast, resp="US.Dp", cut=8)
```
with `Cut` denoting the number of realized data points that should be shown in the plot prior the forecasts start.
\subsection{Conditional Forecasts}
## Conditional Forecasts
<p align="justify">
Similar to structural analysis, it is possible to use conditional forecasts, identified in a country model. For that purpose, we use the methodology outlined in @Waggoner1999 and applied in @Feldkircher2015 in the GVAR context. The following lines set up a conditional forecast holding inflation in the US country model fixed for five periods to its last observed value in the sample. Make sure that the inputs to `cond.predict` `bgvar.obj` and `pred.obj` belong to the same model.
</p>
```{r "cond.predict",results="hide"}
# matrix with constraints
constr <- matrix(NA,nrow=fcast$n.ahead,ncol=ncol(model.ssvs.h8$xglobal))
colnames(constr) <- colnames(model.ssvs.h8$xglobal)
# set "US.Dp" for five periods on its last value
constr[1:5,"US.Dp"] <-model.ssvs.h8$xglobal[nrow(model.ssvs.h8$xglobal),"US.Dp"]
# compute conditional forecast (hard restriction)
cond_fcast <- predict(model.ssvs.h8, n.ahead=8, constr=constr, constr_sd=NULL)
```
<p align="justify">
We could impose the same restrictions as "soft conditions" accounting for uncertainty by drawing from a Gaussian distribution with the conditional forecast in `constr` as mean and standard deviations in the matrix `constr_sd` of same size as `constr`.
</p>
```{r "cond.predict.sd",results="hide"}
# add uncertainty to conditional forecasts
constr_sd <- matrix(NA,nrow=fcast$n.ahead,ncol=ncol(model.ssvs.h8$xglobal))
colnames(constr_sd) <- colnames(model.ssvs.h8$xglobal)
constr_sd[1:5,"US.Dp"] <- 0.001
# compute conditional forecast with soft restrictions
cond_fcast2 <- predict(model.ssvs.h8, n.ahead=8, constr=constr, constr_sd=constr_sd)
```
We can then compare the results
```{r, "cond.plot.1",out.width="50%",fig.show="hold",fig.cap="Conditional forecast of US Inflation, top panel without uncertainty during the conditioning, bottom panel with uncertainty."}
plot(cond_fcast, resp="US.Dp", cut=10)
plot(cond_fcast2, resp="US.Dp", cut=10)
```
with `Cut` denoting the number of realized data points that should be shown in the plot prior the conditioning starts.
\section{Appendix}
# Appendix
\subsection{Main Function: `bgvar`}
## Function Arguments `bgvar`
Main arguments and description of the function `bgvar`.
* `Data`: Either a
+ list object of length $N$ that contains the data. Each element of the list refers to a country / entity. The number of columns (i.e., variables) in each country model can be different. The $T$ rows (i.e., number of time observations), however, need to be the same for each country. Country and variable names are not allowed to contain a `.` [dot].
+ matrix of dimension $T \times k$, with $k$ denoting the sum of all endogenous variables of the system. The column names should consist of two parts, separated by a `.` The first part should denote the country / entity and the second part the name of the variable. Country and variable names are not allowed to contain a `.` [dot].
* `W`: An $N \times N$ weight matrix with 0 elements on the diagonal and row sums that sum up to unity or a list of weight matrices. See the help files for getweights for more details.
* `plag`: Number of lags used (the same for domestic, exogenous and weakly exogenous variables). Default set to `plag=1`.
* `draws`: Number of draws saved. Default set to `draws=5000`.
* `burnin`: Number of burn-ins. Default set to `burnin=5000`.
* `prior`: Either "SSVS", "MN" or "NG". See details below. Default set to `prior=NG`.
* `SV`: If set to `"TRUE"`, models are fitted with stochastic volatility using the `stochvol` and `GIGrvg` packages. Due to storage issues, not the whole history of the $T$ variance covariance matrices are kept. Consequently, the BGVAR package shows only one set of impulse responses (with variance covariance matrix based on the median volatilities over the sample period) instead of $T$ sets. Specify `SV=FALSE` to turn SV off.
* `hold.out`: Defines the hold-out sample. Default without hold-out sample, thus set to zero.
* `thin`: Is a thinning interval which grabs every 'thin'th draw from the posterior output. For example, `thin=10` saves every tenth draw from the posterior. Default set to `thin=1`.
* `hyperpara`: Is a list object that defines the hyperparameters when the prior is set to either `"MN"`, `"SSVS"`, `"NG"`, or `HS`.
+ `"miscellaneous:"`
+ `a_1` is the prior hyperparameter for the inverted gamma prior (shape) (set `a_1 = b_1` to a small value for the standard uninformative prior). Default is set to `a_1=0.01`.
+ `b_1` is the prior hyperparameter for the inverted gamma prior (rate). Default sit set to `b_1=0.01`.
+ `prmean` is the prior mean on the first own lag of the autoregressive coefficient, standard value is `prmean=1` for non-stationary data. The prior mean for the remaining autoregressive coefficients automatically set to 0.
+ `bmu` If `SV=TRUE`, this is the prior hyperparameter for the mean of the log-volatilities. Default is `bmu=0`.
+ `Bmu` If `SV=TRUE`, this is the prior hyperparameter for the variance of the mean of the log-volatilities. Default is `Bmu=0`.
+ `a0` If `SV=TRUE`, this is the hyperparameter for the Beta prior on the persistence parameter of the log-volatilities. Default is `a0=25`.
+ `b0` If `SV=TRUE`, this is the hyperparameter for the Beta prior on the persistence parameter of the log-volatilities. Default is `b0=1.5`.
+ `Bsigma` If `SV=TRUE`, this is the hyperparameter for the Gamma prior on the variance of the log-volatilities. Default is `Bsigma=1`.
+ `"MN"`
+ `shrink1` Starting value of `shrink1`. Default set to 0.1.
+ `shrink2` Starting value of `shrink2`. Default set to 0.2.
+ `shrink3` Hyperparameter of `shrink3`. Default set to 100.
+ `shrink4` Starting value of `shrink4`. Default set to 0.1.
+ `"SSVS"`
+ `tau0` is the prior variance associated with the normal prior on the regression coefficients if a variable is NOT included (spike, tau0 should be close to zero).
+ `tau1` is the prior variance associated with the normal prior on the regression coefficients if a variable is included (slab, tau1 should be large).
+ `kappa0` is the prior variance associated with the normal prior on the covariances if a covariance equals zero (spike, kappa0 should be close to zero).
+ `kappa1` is the prior variance associated with the normal prior on the covariances if a covariance is unequal to zero (slab, kappa1 should be large).
+ `p_i` is the prior inclusion probability for each regression coefficient (default is 0.5).
+ `q_ij` is the prior inclusion probability for each covariance (default is 0.5).
+ `"NG"`
+ `e_lambda` Prior hyperparameter for the Gamma prior on the lag-specific shrinkage components, standard value is `e_lambda=1.5`.
+ `d_lambda` Prior hyperparameter for the Gamma prior on the lag-specific shrinkage components, standard value is `d_lambda=1`.
+ `tau_theta` Parameter of the Normal-Gamma prior that governs the heaviness of the tails of the prior distribution. A value of `tau_theta=1` would lead to the Bayesian LASSO. Default value differs per entity and set to `tau_theta=1/log(M)`, where `M` is the number of endogenous variables per entity.
+ `sample_tau` If set to `TRUE` `tau_theta` is sampled.
+ `"HS"`: No additional hyperparameter needs to be elicited for the horseshoe prior.
* `eigen` Set to `TRUE` if you want to compute the largest eigenvalue of the companion matrix for each posterior draw. If the modulus of the eigenvalue is significantly larger than unity, the model is unstable. Unstable draws exceeding an eigenvalue of one are then excluded. If `eigen` is set to a numeric value, then this corresponds to the maximum eigenvalue. The default is set to $1.05$ (which excludes all posterior draws for which the eigenvalue of the companion matrix was larger than $1.05$ in modulus).
* `Ex` For including truly exogenous variables to the model. Either a
+ `list object` of maximum length `N` that contains the data. Each element of the list refers to a country/entity and has to match the country/entity names in `Data`. If no truly exogenous variables are added to the respective country/entity model, omit the entry. The `T` rows (i.e., number of time observations), however, need to be the same for each country. Country and variable names are not allowed to contain a `.` [dot] since this is our naming convention.
+ `matrix object` of dimension `T` times number of truly exogenous variables. The column names should consist of two parts, separated by a `.` [dot]. The first part should denote the country / entity name and the second part the name of the variable. Country and variable names are not allowed to contain a `.` [dot].
* `trend` If set to `TRUE` a deterministic trend is added to the country models.
* `expert` Expert settings, must be provided as list. Default is set to `NULL`.
+ `variable.list` In case `W` is a list of weight matrices, specify here which set of variables should be weighted by which weighting matrix. Default is set to `NULL`.
+ `OE.weights`: Default value is `NULL`. Can be used to provide information of how to handle additional country models (other entities). Additional country models can be used to endogenously determine variables that are (weakly) exogenous for the majority of the other country models. As examples, one could think of an additional oil price model [@Mohaddes2019] or a model for the joint euro area monetary policy [@Georgiadis2015; @Feldkircher2020]. The data for these additional country models has to be contained in `Data`. The number of additional country models is unlimited. Each list entry of `OE.weights` has to be named similar to the name of the additional country model contained in `Data`. Each slot of `OE.weights` has to contain the following information:
+ `weights` a vector of weights with names relating to the countries for which data should be aggregated. Can also relate to a subset of countries contained in the data.
+ `variables` a vector of variable names that should be included in the additional country model. Variables that are not contained in the data slot of the extra country model are assumed to be weakly exogenous for the additional country model (aggregated with `weights`).
+ `exo` a vector of variable names that should be fed into the other countries as (weakly) exogenous variables.
+ `Wex.restr` A character vector that contains variables that should only be specified as weakly exogenous if not contained as endogenous variable in a particular country. An example that has often been used in the literature is to place these restrictions on nominal exchange rates. Default is `NULL` in which case all weakly exogenous variables are treated symmetrically. See function getweights for more details.
+ `save.country.store` If set to `TRUE` then function also returns the container of all draws of the individual country models. Significantly raises object size of output and default is thus set to `FALSE`.
+ `save.shrink.store` If set to `TRUE` the function also inspects posterior output of shrinkage coefficients. Default set to `FALSE`.
+ `save.vola.store` If set to `TRUE` the function also inspects posterior output of coefficients associated with the volatility process. Default set to `FALSE`.
+ `use_R` Boolean whether estimation should fall back on `R` version, otherwise `Rcpp` version is used (default).
+ `applyfun` applyfun Allows for user-specific apply function, which has to have the same interface than \code{lapply}. If `cores=NULL` then `lapply` is used, if set to a numeric either `parallel::parLapply()` is used on Windows platforms and `parallel::mclapply()` on non-Windows platforms.
+ `cores` Specifies the number of cores which should be used. Default is set to \code{NULL} and \code{applyfun} is used.
* `verbose` If set to `FALSE` it suppresses printing messages to the console.
Below, find some example code for all three priors.
```{r, eval=FALSE}
# load dataset
data(eerData)
# Minnesota prior and two different weight matrices and no SV
# weights for first variable set tradeW.0012, for second finW0711
variable.list <- list()
variable.list$real <- c("y","Dp","tb")
variable.list$fin <- c("stir","ltir","rer")
Hyperparm.MN <- list(a_i = 0.01, # prior for the shape parameter of the IG
b_i = 0.01 # prior for the scale parameter of the IG
)
model.MN<-bgvar(Data=eerData,
W=W.list[c("tradeW.0012","finW0711")],
draws=200,
burnin=200,
plag=1,
hyperpara=Hyperparm.MN,
prior="MN",
thin=1,
eigen=TRUE,
SV=TRUE,
expert=list(variable.list=variable.list))
# SSVS prior
Hyperparm.ssvs <- list(tau0 = 0.1, # coefficients: prior variance for the spike
# (tau0 << tau1)
tau1 = 3, # coefficients: prior variance for the slab
# (tau0 << tau1)
kappa0 = 0.1, # covariances: prior variance for the spike
# (kappa0 << kappa1)
kappa1 = 7, # covariances: prior variance for the slab
# (kappa0 << kappa1)
a_1 = 0.01, # prior for the shape parameter of the IG
b_1 = 0.01, # prior for the scale parameter of the IG
p_i = 0.5, # prior inclusion probability of coefficients
q_ij = 0.5 # prior inclusion probability of covariances
)
model.ssvs<-bgvar(Data=eerData,
W=W.trade0012,
draws=100,
burnin=100,
plag=1,
hyperpara=Hyperparm.ssvs,
prior="SSVS",
thin=1,
eigen=TRUE)
# Normal Gamma prior
data(monthlyData)
monthlyData$OC<-NULL
Hyperparm.ng<-list(d_lambda = 1.5, # coefficients: prior hyperparameter for the NG-prior
e_lambda = 1, # coefficients: prior hyperparameter for the NG-prior
prmean = 0, # prior mean for the first lag of the AR coefficients
a_1 = 0.01, # prior for the shape parameter of the IG
b_1 = 0.01, # prior for the scale parameter of the IG
tau_theta = .6, # (hyper-)parameter for the NG
sample_tau = FALSE # estimate a?
)
model.ng<-bgvar(Data=monthlyData,
W=W,
draws=200,
burnin=100,
plag=1,
hyperpara=Hyperparm.ng,
prior="NG",
thin=2,
eigen=TRUE,
SV=TRUE,
expert=list(OE.weights=list(EB=EA.weights)))
```
\subsection{Main Function `irf`}
## Function Arguments `irf`
* `x`: An objected fitted by function `bgvar`.
* `n.ahead`: Forecasting horizon.
* `shockinfo` Dataframe with additional information about the nature of shocks. Depending on the `ident` argument, the dataframe has to be specified differently. In order to get a dummy version for each identification scheme use `get_shockinfo`.
* `quantiles` Numeric vector with posterior quantiles. Default is set to compute median along with 68%/80%/90% confidence intervals.
* `expert` Expert settings, must be provided as list. Default is set to `NULL`.
+ `MaxTries` Numeric specifying maximal number of tries for finding a rotation matrix with sign-restrictions. Attention: setting this number very large may results in very long computation times.
+ `save.store` If set to `TRUE` the full posterior of both, impulse response and rotation matrices, are returned. Default is `FALSE` in order to save storage.
+ `use_R` Boolean whether IRF computation should fall back on `R` version, otherwise `Rcpp` version is used (default).
+ `applyfun` In case `use_R=TRUE`, this allows for user-specific apply function, which has to have the same interface as `lapply`. If `cores=NULL` then `lapply` is used, if set to a numeric either `parallel::parLapply()` is used on Windows platforms and `parallel::mclapply()` on non-Windows platforms.
+ `cores` Numeric specifying the number of cores which should be used, also `all` and `half` is possible. By default only one core is used.
* `verbose` If set to `FALSE` it suppresses printing messages to the console.
Below, find some further examples.
```{r, eval=FALSE}
# First example, a US monetary policy shock, quarterly data
library(BGVAR)
data(eerData)
model.eer<-bgvar(Data=eerData,W=W.trade0012,draws=500,burnin=500,plag=1,prior="SSVS",thin=10,eigen=TRUE,trend=TRUE)
# generalized impulse responses
shockinfo<-get_shockinfo("girf")
shockinfo$shock<-"US.stir"; shockinfo$scale<--100
irf.girf.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
# cholesky identification
shockinfo<-get_shockinfo("chol")
shockinfo$shock<-"US.stir"; shockinfo$scale<--100
irf.chol.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
# sign restrictions
shockinfo <- get_shockinfo("sign")
shockinfo <- add_shockinfo(shockinfo, shock="US.stir", restriction=c("US.y","US.Dp"),
sign=c("<","<"), horizon=c(1,1), scale=1, prob=1)
irf.sign.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
# sign restrictions with relaxed cross-country restrictions
shockinfo <- get_shockinfo("sign")
# restriction for other countries holds to 75\%
shockinfo <- add_shockinfo(shockinfo, shock="US.stir", restriction=c("US.y","EA.y","UK.y"),
sign=c("<","<","<"), horizon=1, scale=1, prob=c(1,0.75,0.75))
shockinfo <- add_shockinfo(shockinfo, shock="US.stir", restriction=c("US.Dp","EA.Dp","UK.Dp"),
sign=c("<","<","<"), horizon=1, scale=1, prob=c(1,0.75,0.75))
irf.sign.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
# Example with zero restriction (Arias et al., 2018) and
# rationality conditions (D'Amico and King, 2017).
data("eerDataspf")
model.eer<-bgvar(Data=eerDataspf, W=W.trade0012.spf, draws=300, burnin=300,
plag=1, prior="SSVS", eigen=TRUE)
shockinfo <- get_shockinfo("sign")
shockinfo <- add_shockinfo(shockinfo, shock="US.stir_t+4",
restriction=c("US.Dp_t+4","US.stir","US.y_t+4"),
sign=c("<","0","<"), horizon=1, prob=1, scale=1)
# rationality condition: US.stir_t+4 on impact is equal to average of
# IRF of US.stir between horizon 1 to 4
shockinfo <- add_shockinfo(shockinfo, shock="US.stir_t+4", restriction="US.stir_t+4",
sign="ratio.avg", horizon=5, prob=1, scale=1)
# rationality condition: US.Dp_t+4 on impact is equal to IRF of US.Dp at horizon 4
shockinfo <- add_shockinfo(shockinfo, shock="US.stir_t+4", restriction="US.Dp_t+4",
sign="ratio.H", horizon=5, prob=1, scale=1)
# rationality condition: US.y_t+4 on impact is equal to IRF of US.y at horizon 4
shockinfo <- add_shockinfo(shockinfo, shock="US.stir_t+4", restriction="US.y_t+4",
sign="ratio.H", horizon=5, prob=1, scale=1)
# regulate maximum number of tries with expert settings
irf.ratio <- irf(model.eer, n.ahead=20, shockinfo=shockinfo,
expert=list(MaxTries=10))
```
```{r, hide=TRUE}
par(oldpar)
```
# References
|
/scratch/gouwar.j/cran-all/cranData/BGVAR/inst/doc/examples.Rmd
|
---
title: "BGVAR: Bayesian Global Vector Autoregression"
author: "Maximilian Böck and Martin Feldkircher and Florian Huber"
date: "`r format(Sys.time(), '%d %B %Y')`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{BGVAR: Bayesian Global Vector Autoregression}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
bibliography: fullbib.bib
fig_caption: yes
citation_package: natbib
tags:
- Bayesian
- GVAR
abstract: |
<p align=justify>
This document describes the `BGVAR` library to estimate Bayesian Global vector autoregressions (GVAR) with different prior specifications and stochastic volatility. The library offers a fully fledged toolkit to conduct impulse response functions, forecast error variance and historical error variance decompositions. To identify structural shocks in a given country model or joint regional shocks, the library offers simple Cholesky decompositions, generalized impulse response functions and zero and sign restrictions -- the latter of which can also be put on the cross-section. We also allow for different structures of the GVAR like including different weights for different variables or setting up additional country models that determine global variables such as oil prices. Last, we provide functions to conduct and evaluate out-of-sample forecasts as well as conditional forecasts that allow for the setting of a future path for a particular variable of interest. The toolbox requires `R>=3.5`. </p>
---
```{r, include=FALSE}
knitr::opts_chunk$set(fig.width = 12, fig.height=8, fig.align="default")
knitr::opts_chunk$set(error = TRUE)
```
\section{Introduction}
# Introduction
<p align=justify>
This vignette describes the BGVAR package that allows for the estimation of Bayesian global vector autoregressions (GVARs). The focus of the vignette is to provide a range of examples that demonstrate the full functionality of the library. It is accompanied by a more technical description of the GVAR framework. Here, it suffices to briefly summarize the main idea of a GVAR, which is a large system of equations designed to analyze or control for interactions across units. Most often, these units refer to countries and the interactions between them arise through economic and financial interdependencies. Also in this document, the examples we provide contain cross-country data. In principle, however, the GVAR framework can be applied to other units, such as regions, firms, etc. The following examples show how the GVAR can be used to either estimate spillover effects from one country to another, or alternatively, to look at the effects of a domestic shock controlling for global factors.
</p>
<p align=justify>
In a nutshell, the GVAR consists of two stages. In the first, $N$ vector autoregressive (VAR) models are estimated, one per unit. Each equation in a unit model is augmented with foreign variables, that control for global factors and link the unit-specific models later. Typically, these foreign variables are constructed using exogenous, bilateral weights, stored in an $N \times N$ weight matrix. The classical framework of @Pesaran2004 and @Dees2007a proposes estimating these country models in vector error correction form, while in this package we take a Bayesian stance and estimation is carried out using VARs. The user can transform the data prior estimation into stationary form or estimate the model in levels. The `BGVAR` package also allows us to include a trend to get trend-stationary data. In the second step, the single country models are combined using the assumption that single models are linked via the exogenous weights, to yield a global representation of the model. This representation of the model is then used to carry out impulse response analysis and forecasting.
</p>
<p align=justify>
This vignette consists of four blocks: getting started and data handling, estimation, structural analysis and forecasting. In the next part, we discuss which data formats the `bgvar` library can handle. We then proceed by showing examples of how to estimate a model using different Bayesian shrinkage priors -- for references see @CrespoCuaresma2016 and @Feldkircher2016a. We also discuss how to run diagnostic and convergence checks and examine the main properties of the model. In the third section, we turn to structural analysis, either using recursive (Cholesky) identification or sign restrictions. We will also discuss structural and generalized forecast error variance decompositions and historical decompositions. In the last section, we show how to compute unconditional and conditional forecasts with the package.
</p>
\section{Getting Started}
# Getting Started
We start by installing the package from CRAN and attaching it with
```{r,hide=TRUE}
oldpar <- par(no.readonly=TRUE)
set.seed(123)
library(BGVAR)
```
<p align=justify>
To ensure reproducibility of the examples that follow, we have set a particular seed (for `R`s random number generator). As every `R` library, the `BGVAR` package provides built-in help files which can be accessed by typing `?` followed by the function / command of interest. It also comes along with four example data sets, two of them correspond to data the quarterly data set used in @Feldkircher2016a (`eerData`, `eerDataspf`), one is on monthly frequency (`monthlyData`). For convenience we also include the data that come along with the Matlab GVAR toolbox of @matlabToolbox, `pesaranData`. We include the 2019 vintage [@Mohaddes2020]. </p>
<p align=justify>
We start illustrating the functionality of the `BGVAR` package by using the `eerData` data set from @Feldkircher2016a. It contains 76 quarterly observations for 43 countries over the period from 1995Q1 to 2013Q4. The euro area (EA) is included as a regional aggregate.
</p>
We can load the data by typing
```{r "eerData"}
data(eerData)
```
<p align=justify>
This loads two objects: `eerData`, which is a list object of length $N$ (i.e., the number of countries) and `W.trade0012`, which is an $N \times N$ weight matrix.
</p>
We can have a look at the names of the countries contained in `eerData`
```{r "eerData2"}
names(eerData)
```
and at the names of the variables contained in a particular country by
```{r "eerData3"}
colnames(eerData$UK)
```
We can zoom in into each country by accessing the respective slot of the data list:
```{r "US",echo=TRUE}
head(eerData$US)
```
<p align=justify>
Here, we see that the global variable, oil prices (`poil`) is attached to the US country model. This corresponds to the classical GVAR set-up used among others in @Pesaran2004 and @Dees2007a. We also see that in general, each country model $i$ can contain a different set of variables $k_i$ as opposed to requirements in a balanced panel.
</p>
<p align=justify>
The GVAR toolbox relies on one important *naming convention*, though: It is assumed that neither the country names nor the variable names contain a `.` [dot]. The reason is that the program internally has to collect and separate the data more than once and in doing that, it uses the `.` to separate countries / entities from variables. To give a concrete example, the slot in the `eerData` list referring to the USA should not be labelled `U.S.A.`, nor should any of the variable names contain a `.`
</p>
<p align=justify>
The toolbox also allows the user to submit the data as a $T \times k$ data matrix, with $k=\sum^N_{i=1} k_i$ denoting the sum of endogenous variables in the system. We can switch from data representation in list form to matrix from by using the function `list_to_matrix` (and vice versa using `matrix_to_list`).
</p>
To convert the `eerData` we can type:
```{r "convert",echo=TRUE}
bigX<-list_to_matrix(eerData)
```
<p align=justify>
For users who want to submit data in matrix form, the above mentioned naming convention implies that the column names of the data matrix have to include the name of the country / entity and the variable name, separated by a `.` For example, for the converted `eerData` data set, the column names look like:
</p>
```{r "convert2",echo=TRUE}
colnames(bigX)[1:10]
```
<p align=justify>
with the first part of each columname indicating the country (e.g., `EA`) and the second the variable (e.g., `y`), separated by a `.`
Regardless whether the data are submitted as list or as big matrix, the underlying data can be either of `matrix` class or time series classes such as `ts` or `xts`.
</p>
<p align=justify>
Finally, we look at the second important ingredient to build our GVAR model, the weight matrix. Here, we use annual bilateral trade flows (including services), averaged over the period from 2000 to 2012. This implies that the $ij^{th}$ element of $W$ contains trade flows from unit $i$ to unit $j$. These weights can also be made symmetric by calculating $\frac{(W_{ij}+W_{ji})}{2}$. Using trade weights to establish the links in the GVAR goes back to the early GVAR literature [@Pesaran2004] but is still used in the bulk of GVAR studies. Other weights, such as financial flows, have been proposed in @Eickmeier2015 and examined in @Feldkircher2016a. Another approach is to use estimated weights as in @Feldkircher2019b. The weight matrix should have `rownames` and `colnames` that correspond to the $N$ country names contained in `Data`.
</p>
```{r "tradeW",echo=TRUE}
head(W.trade0012)
```
The countries in the weight matrix should be in the same order as in the data list:
```{r "rownames.W"}
all(colnames(W.trade0012)==names(eerData))
```
The weight matrix should be row-standardized and the diagonal elements should be zero:
```{r "rowSums.W"}
rowSums(W.trade0012)
diag(W.trade0012)
```
Note that through row-standardizing, the final matrix is typically not symmetric (even when using the symmetric weights as raw input).
<p align=justify>
In what follows, we restrict the dataset to contain only three countries, `EA`, `US` and `RU` and adjust the weight matrix accordingly. We do this only for *illustrational purposes to save time and storage in this document*:
</p>
```{r, "eerDatasmall", hide=TRUE}
cN<-c("EA","US","RU")
eerData<-eerData[cN]
W.trade0012<-W.trade0012[cN,cN]
W.trade0012<-apply(W.trade0012,2,function(x)x/rowSums(W.trade0012))
W.list<-lapply(W.list,function(l){l<-apply(l[cN,cN],2,function(x)x/rowSums(l[cN,cN]))})
```
This results in the same dataset as available in `testdata`.
\section{Reading Data from Excel}
# Reading Data from Excel
<p align=justify>
In order to make BGVAR easier to handle for users working and organising data in spreadsheets via Excel, we provide a own reader function relying on the `readxl` package. In this section we intend to provide some code to write the provided datasets to Excel spreadsheets, and to show then how to read the data from Excel. Hence, we provide an easy-to-follow approach with an example how the data should be organised in Excel.
</p>
<p align=justify>
We start by exporting the data to excel. The spreadsheet should be organised as follows. Each sheet consists of the data set for one particular country, hence the naming of the sheets with the country names is essential. In each sheet, you should provide the time in the first column of the matrix, followed by one column per variable. In the following, we will export the `eerData` data set to Excel:
```{r, "export excel", eval=FALSE}
time <- as.character(seq.Date(as.Date("1995-01-01"),as.Date("2013-10-01"),by="quarter"))
for(cc in 1:length(eerData)){
x <- coredata(eerData[[cc]])
rownames(x) <- time
write.xlsx(x = x, file="./excel_eerData.xlsx", sheetName = names(eerData)[cc],
col.names=TRUE, row.names=TRUE, append=TRUE)
}
```
which will create in your current working directory an excel sheet named `excel_eerData.xlsx`. This can then be read to R with the `BGVAR` package as follows:
```{r, "import excel", eval=FALSE}
eerData_read <- excel_to_list(file = "./excel_eerData.xlsx", first_column_as_time=TRUE, skipsheet=NULL, ...)
```
which creates a list in the style of the original `eerData` data set. The first argument `file` has to be valid path to an excel file. The second argument `first_column_as_time` is a logical indicating whether you provide as first column in each spreadsheet a time index, while the `skipsheet` argument can be specified to leave out specific sheets (either as vector of strings or numeric indices). If you want to transform the list object to a matrix, you can use the command `list_to_matrix` or to transform it back to a list with `matrix_to_list`:
```{r, "transform to matrix", eval=FALSE}
eerData_matrix <- list_to_matrix(eerData_read)
eerData_list <- matrix_to_list(eerData_matrix)
```
</p>
\section{Estimation}
# Estimation
<p align=justify>
The main function of the `BGVAR` package is its `bgvar` function. The unique feature of this toolbox is that we use Bayesian shrinkage priors with optionally stochastic volatility to estimate the country models in the GVAR. In its current version, three priors for the country VARs are implemented:
</p>
* Non-conjugate Minnesota prior [`MN`, @Litterman1986;@Koop2010]
* Stochastic Search Variable Selection prior [`SSVS`, @George2008]
* Normal-Gamma prior [`NG`, @Huber2019]
<p align=justify>
The first two priors are described in more detail in @CrespoCuaresma2016. For a more technical description of the Normal-Gamma prior see @Huber2019 and for an application in the GVAR context @Feldkircher2019b. For the variances we can assume homoskedasticity or time variation (stochastic volatility). For the latter, the library relies on the `stochvol` package of @Kastner2016.
</p>
We start with estimating our toy model using the `NG` prior, the reduced `eerData` data set and
the adjusted `W.trade0012` weight matrix:
```{r "model.1",results="hide" }
model.1<-bgvar(Data=eerData,
W=W.trade0012,
draws=100,
burnin=100,
plag=1,
prior="NG",
hyperpara=NULL,
SV=TRUE,
thin=1,
trend=TRUE,
hold.out=0,
eigen=1
)
```
<p align=justify>
The default prior specification in `bgvar` is to use the NG prior with stochastic volatility and one lag for both the endogenous and weakly exogenous variables (`plag=1`). In general, due to its high cross-sectional dimension, the GVAR can allow for very complex univariate dynamics and it might thus not be necessary to increase the lag length considerably as in a standard VAR [@Burriel2018]. The setting `hyperpara=NULL` implies that we use the standard hyperparameter specification for the NG prior; see the helpfiles for more details.
</p>
<p align=justify>
Other standard specifications that should be submitted by the user comprise the number of posterior draws (`draws`) and burn-ins (`burnin`, i.e., the draws that are discarded). To ensure that the MCMC estimation has converged, a high-number of burn-ins is recommended (say 15,000 to 30,000). Saving the full set of posterior draws can eat up a lot of storage. To reduce this, we can use a thinning interval which stores only a thin$^{th}$ draw of the global posterior output. For example, with `thin=10` and `draws=5000` posterior draws, the amount of MCMC draws stored is 500. `TREND=TRUE` implies that the model is estimated using a trend. Note that regardless of the trend specification, each equation always automatically includes an intercept term.
</p>
<p align=justify>
Expert users might want to take further adjustments. These have to be provided via a list (`expert`). For example, to speed up computation, it is possible to invoke parallel computing in `R`. The number of available cpu cores can be specified via `cores`. Ideally this number is equal to the number of units $N$ (`expert=list(cores=N)`). Based on the user's operating system, the package then either uses `parLapply` (Windows platform) or `mclapply` (non-Windows platform) to invoke parallel computing. If `cores=NULL`, the unit models are estimated subsequently in a loop (via `R`'s `lapply` function). To use other / own apply functions, pass them on via the argument `applyfun`. As another example, we might be interested in inspecting the output of the $N$ country models in more detail. To do so, we could provide `expert=list(save.country.store=TRUE)`, which allows to save the whole posterior distribution of each unit / country model. Due to storage reasons, the default is set to `FALSE` and only the *posterior medians* of the single country models are reported. Note that even in this case, the whole posterior distribution of the *global model* is stored.
</p>
<p align=justify>
We estimated the above model with stochastic volatility (`SV=TRUE`). There are several reasons why one may want to let the residual variances change over time. First and foremost, most time periods used in macroeconometrics are nowadays rather volatile including severe recessions. Hence accounting for time variation might improve the fit of the model [@primiceri2005time; @sims2006were; @Dovern2016; @Huber2016]. Second, the specification implemented in the toolbox nests the homoskedastic case. It is thus a good choice to start with the more general case when first confronting the model with the data. For structural analysis such as the calculation of impulse responses, we take the variance covariance matrix with the median volatilities (over the sample period) on its diagonal.\footnote{Alternatively, one would have $T$ variance covariance matrices and hence $T$ impulse responses for each variable. Since the size of the shock (i.e., the residual variance) varies over time, the resulting impulses would be typically either up- or down-scaled, whereas the shapes of the IRFs are not affected.} If we want to look at the volatilities of the first equation (`y`) in the euro area country model, we can type:
</p>
```{r "SV",results="hide"}
model.1$cc.results$sig$EA[,"EA.y","EA.y"]
```
<p align=justify>
To discard explosive draws, we can compute the eigenvalues of the reduced form of the global model, written in its companion form. Unfortunately, this can only be done once the single models have been estimated and stacked together (and hence not directly built into the MCMC algorithm for the country models). To discard draws that lead to higher eigenvalues than 1.05, set `eigen=1.05`. We can look at the 10 largest eigenvalues by typing:
</p>
```{r "ng.eigen",echo=TRUE}
model.1$stacked.results$F.eigen[1:10]
```
<p align=justify>
Last, we have used the default option `h=0`, which implies that we use the full sample period to estimate the GVAR. For the purpose of forecast evaluation, `h` could be specified to a positive number, which then would imply that the last `h` observations are reserved as a hold-out sample and not used to estimate the model.
</p>
\subsection{Model Output and Diagnostic Checks}
## Model Output and Diagnostic Checks
<p align="justify">
Having estimated the model, we can summarize the outcome in various ways.
First, we can use the `print` method
</p>
```{r "print.model",echo=TRUE}
print(model.1)
```
<p align="justify">
This just prints the submitted arguments of the `bgvar` object along with the model specification for each unit. The asterisks indicate weakly exogenous variables, double asterisks exogenous variables and variables without asterisks the endogenous variables per unit.
</p>
<p align="justify">
The `summary` method is a more enhanced way to analyze output. It computes descriptive statistics like convergence properties of the MCMC chain, serial autocorrelation in the errors and the average pairwise autocorrelation of cross-unit residuals.
</p>
```{r "summary.model"}
summary(model.1)
```
<p align="justify">
We can now have a closer look at the output provided by `summary`. The header contains some basic information about the prior used to estimate the model, how many lags, posterior draws and countries. The next line shows Geweke's CD statistic, which is calculated using the `coda` package. Geweke's CD assesses practical convergence of the MCMC algorithm. In a nutshell, the diagnostic is based on a test for equality of the means of the first and last part of a Markov chain (by default we use the first 10% and the last 50%). If the samples are drawn from the stationary distribution of the chain, the two means are equal and Geweke's statistic has an asymptotically standard normal distribution.
</p>
<p align="justify">
The test statistic is a standard Z-score: the difference between the two sample means divided by its estimated standard error. The standard error is estimated from the spectral density at zero and so takes into account any autocorrelation. The test statistic shows that only a small fraction of all coefficients did not convergence. Increasing the number of burn-ins can help decreasing this fraction further. The statistic can also be calculated by typing `conv.diag(model.1)`.
</p>
<p align="justify">
The next model statistic is the likelihood of the global model. This statistic can be used for model comparison. Next and to assess, whether there is first order serial autocorrelation present, we provide the results of a simple F-test. The table shows the share of p-values that fall into different significance categories. Since the null hypothesis is that of no serial correlation, we would like to have as many large ($>0.1$) p-values as possible. The statistics show that already with one lag, serial correlation is modest in most equations' residuals. This could be the case since we have estimated the unit models with stochastic volatility. To further decrease serial correlation in the errors, one could increase the number of lags via `plag`.
</p>
<p align="justify">
The last part of the summary output contains a statistic of cross-unit correlation of (posterior median) residuals. One assumption of the GVAR framework is that of negligible, cross-unit correlation of the residuals. Significant correlations prohibit structural and spillover analysis [@Dees2007a]. In this example, correlation is reasonably small.
</p>
<p align="justify">
Some other useful methods the `BGVAR` toolbox offers contain the `coef` (or `coefficients` as its alias) methods to extract the $k \times k \times plag$ matrix of reduced form coefficients of the global model. Via the `vcov` command, we can access the global variance covariance matrix and the `logLik()` function allows us to gather the global log likelihood (as provided by the `summary` command).
</p>
```{r "stats",echo=TRUE, results="hide"}
Fmat <- coef(model.1)
Smat <- vcov(model.1)
lik <- logLik(model.1)
```
<p align="justify">
Last, we can have a quick look at the in-sample fit using either the posterior median of the country models' residuals (`global=FALSE`) or those of the global solution of the GVAR (`global=TRUE`). The in-sample fit can also be extracted by using `fitted()`.
</p>
Here, we show the in-sample fit of the euro area model (`global=FALSE`).
```{r "insample",fig.margin=TRUE,fig.width=6,fig.height=8,fig.cap="In-sample fit for euro area variables"}
yfit <- fitted(model.1)
plot(model.1, global=FALSE, resp="EA")
```
We can estimate the model with two further priors on the unit models, the SSVS prior and the Minnesota prior. To give a concrete example, the SSVS prior can be invoked by typing:
```{r "ssvs.1",echo=TRUE, results="hide"}
model.ssvs.1<-bgvar(Data=eerData,
W=W.trade0012,
draws=100,
burnin=100,
plag=1,
prior="SSVS",
hyperpara=NULL,
SV=TRUE,
thin=1,
Ex=NULL,
trend=TRUE,
expert=list(save.shrink.store=TRUE),
hold.out=0,
eigen=1,
verbose=TRUE
)
```
<p align="justify">
One feature of the SSVS prior is that it allows us to look at the posterior inclusion probabilities to gauge the importance of particular variables. Per default, `bgvar` does not save the volatilities of the coefficients to save memory. If we set `expert=list(save.shrink.store=TRUE)` to `TRUE` (default is `FALSE`) then those probabilities are saved and posterior inclusion probabilities (PIPs) are computed. For example, we can have a look at the PIPs of the euro area model by typing:
</p>
```{r "Pips"}
model.ssvs.1$cc.results$PIP$PIP.cc$EA
```
<p align="justify">
The equations in the EA country model can be read column-wise with the rows representing the associated explanatory variables. The example shows that besides other variables, the trade balance (`tb`) is an important determinant of the real exchange rate (`rer`).
</p>
We can also have a look at the average of the PIPs across all units:
```{r "pips.avg"}
model.ssvs.1$cc.results$PIP$PIP.avg
```
This shows that the same determinants for the real exchange rate appear as important regressors in other country models.
\subsection{Different Specifications of the Model}
## Different Specifications of the Model
<p align="justify">
In this section we explore different specifications of the structure of the GVAR model. Other specification choices that relate more to the time series properties of the data, such as specifying different lags and priors are left for the reader to explore. We will use the SSVS prior and judge the different specifications by examining the posterior inclusion probabilities.
</p>
<p align="justify">
As a first modification, we could use different weights for different variable classes as proposed in @Eickmeier2015. For example we could use financial weights to construct weakly exogenous variables of financial factors and trade weights for real variables.
</p>
The `eerData` set provides us with a list of different weight matrices that are described in the help files.
Now we specify the sets of variables to be weighted:
```{r "var.weight"}
variable.list<-list();variable.list$real<-c("y","Dp","tb");variable.list$fin<-c("stir","ltir","rer")
```
We can then re-estimate the model and hand over the `variable.list` via the argument `expert`:
```{r,results="hide"}
# weights for first variable set tradeW.0012, for second finW0711
model.ssvs.2<-bgvar(Data=eerData,
W=W.list[c("tradeW.0012","finW0711")],
plag=1,
draws=100,
burnin=100,
prior="SSVS",
SV=TRUE,
eigen=1,
expert=list(variable.list=variable.list,save.shrink.store=TRUE),
trend=TRUE
)
```
<p align="justify">
Another specification would be to include a foreign variable only when its domestic counterpart is missing. For example, when working with nominal bilateral exchange rates we probably do not want to include also its weighted average (which corresponds to something like an effective exchange rate). Using the previous model we could place an exclusion restriction on foreign long-term interest rates using `Wex.restr` which is again handed over via `expert`. The following includes foreign long-term rates only in those country models where no domestic long-term rates are available:
</p>
```{r "ltir.estimate", results="hide"}
# does include ltir* only when ltir is missing domestically
model.ssvs.3<-bgvar(Data=eerData,
W=W.trade0012,
plag=1,
draws=100,
burnin=100,
prior="SSVS",
SV=TRUE,
eigen=1,
expert=list(Wex.restr="ltir",save.shrink.store=TRUE),
trend=TRUE,
)
```
```{r "print.model.ssvs.3"}
print(model.ssvs.3)
```
<p align="justify">
Last, we could also use a different specification of oil prices in the model. Currently, the oil price is determined endogenously within the US model. Alternatively, one could set up an own standing oil price model with additional variables that feeds the oil price back into the other economies as exogenous variable [@Mohaddes2019].
</p>
The model structure would then look something like in the Figure below:
{width=70%}
For that purpose we have to remove oil prices from the US model and attach them to a separate slot in the data list. This slot has to have its own country label. We use 'OC' for "oil country".
```{r "OC"}
eerData2<-eerData
eerData2$OC<-eerData$US[,c("poil"),drop=FALSE] # move oil prices into own slot
eerData2$US<-eerData$US[,c("y","Dp", "rer" , "stir", "ltir","tb")] # exclude it from US m odel
```
<p align="justify">
Now we have to specify a list object that we label `OC.weights`. The list has to consist of three slots with the following names `weights`, `variables` and `exo`:
</p>
```{r "OC.weights"}
OC.weights<-list()
OC.weights$weights<-rep(1/3, 3)
names(OC.weights$weights)<-names(eerData2)[1:3] # last one is OC model, hence only until 3
OC.weights$variables<-c(colnames(eerData2$OC),"y") # first entry, endog. variables, second entry weighted average of y from the other countries to proxy demand
OC.weights$exo<-"poil"
```
<p align="justify">
The first slot, `weights`, should be a vector of weights that sum up to unity. In the example above, we simply use $1/N$, other weights could include purchasing power parities (PPP). The weights are used to aggregate specific variables that in turn enter the oil model as weakly exogenous. The second slot, `variables`, should specify the names of the endogenous and weakly exogenous variables that are used in the OC model. In the oil price example, we include the oil price (`poil`) as an endogenous variable (not contained in any other country model) and a weighted average using `weights` of output (`y`) to proxy world demand as weakly exogenous variable. Next, we specify via `exo` which one of the endogenous variables of the OC model are fed back into the other country models. In this example we specify `poil`. Last, we put all this information in a further list called `OE.weights` (other entity weights). This is done to allow for multiple other entity models (i.e., an oil price model, a joint monetary union model, etc.). It is important that the list entry has the same name as the other entity model, in our example `OC`.
</p>
```{r "OC.weights2"}
# other entities weights with same name as new oil country
OE.weights <- list(OC=OC.weights)
```
<p align="justify">
Now we can re-estimate the model where we pass on `OE.weights` via the `expert` argument.
</p>
```{r "estimate.OC",results="hide"}
model.ssvs.4<-bgvar(Data=eerData2,
W=W.trade0012,
plag=1,
draws=100,
burnin=100,
prior="SSVS",
SV=TRUE,
expert=list(OE.weights=OE.weights,save.shrink.store=TRUE),
trend=TRUE
)
```
and can compare the results of the four models by e.g., looking at the average PIPs.
```{r "aux"}
aux1<-model.ssvs.1$cc.results$PIP$PIP.avg;aux1<-aux1[-nrow(aux1),1:6]
aux2<-model.ssvs.2$cc.results$PIP$PIP.avg;aux2<-aux2[-nrow(aux2),1:6]
aux3<-model.ssvs.3$cc.results$PIP$PIP.avg;aux3<-aux3[-nrow(aux3),1:6]
aux4<-model.ssvs.4$cc.results$PIP$PIP.avg;aux4<-aux4[-nrow(aux4),1:6]
```
```{r "heat1", fig.show="hold",out.width="25%",fig.cap="Heatmaps of PIPs."}
heatmap(aux1,Rowv=NA,Colv=NA, main="Model 1", cex.main=2, cex.axis=1.7)
heatmap(aux2,Rowv=NA,Colv=NA, main="Model 2", cex.main=2, cex.axis=1.7)
heatmap(aux3,Rowv=NA,Colv=NA, main="Model 3", cex.main=2, cex.axis=1.7)
heatmap(aux4,Rowv=NA,Colv=NA, main="Model 4", cex.main=2, cex.axis=1.7)
```
<p align="justify">
We could also compare the models based on their fit, the likelihood, information criteria such as the DIC, residual properties or their forecasting performance.
</p>
\section{Impulse Response Functions}
# Impulse response functions
<p align="justify">
The package allows to calculate three different ways of dynamic responses, namely generalized impulse response functions (GIRFs) as in @Pesaran1998, orthogonalized impulse response functions using a Cholesky decomposition of the variance covariance matrix and finally impulse response functions given a set of user-specified sign restrictions.
</p>
\subsection{Recursive Identification and GIRFs}
## Recursive Identification and GIRFs
<p align="justify">
Most of the GVAR applications deal with *locally* identified shocks. This implies that the shock of interest is orthogonal to the other shocks in the same unit model and hence can be interpreted in a *structural* way. There is still correlation between the shocks of the unit models, and these responses (the spillovers) are hence not fully structural [@Eickmeier2015]. Hence some GVAR applications favor generalized impulse response functions, which per se do not rely on an orthogonalization. In `BGVAR`, responses to both types of shocks can be easily analyzed using the `irf` function.
</p>
<p align="justify">
This function needs as input a model object (`x`), the impulse response horizon (`n.ahead`) and the default identification method is the recursive identification scheme via the Cholesky decomposition. Further arguments can be passed on using the wrapper `expert` and are discussed in the helpfiles. The following provides impulse response to all `N` shocks with unit scaling and using generalized impulse response functions:
</p>
```{r, "shocks", results="hide"}
irf.chol<-irf(model.ssvs.1, n.ahead=24, expert=list(save.store=FALSE))
```
<p align="justify">
The results are stored in `irf.chol$posterior`, which is a
four-dimensional array: $K \times n.ahead \times nr.of shocks \times Q$, with `Q` referring to the 50\%, 68\% and 95\% quantiles of the posterior distribution of the impulse response functions. The posterior median of responses to the first shock could be accessed via `irf.girf$posterior[,,1,"Q50"]`
</p>
<p align="justify">
Note that this example was for illustrational purposes; in most instances, we would be interested in a particular shock and calculating responses to all shocks in the system is rather inefficient. Hence, we can provide the `irf` function with more information. To be more precise, let us assume that we are interested in an expansionary monetary policy shock (i.e., a decrease in short-term interest rates) in the US country model.
</p>
<p align="justify">
For that purpose, we can set up an `shockinfo` object, which contains information about which variable we want to shock (`shock`), the size of the shock (`scale`), the specific identification method(`ident`), and whether it is a shock applied in a single country or in multiple countries (`global`). We can use the helper function `get_shockinfo()` to set up a such a dummy object which we can subsequently modify according to our needs. The following lines of code are used for a negative 100 bp shock applied to US short term interest rates:
</p>
```{r "us.mp", results="hide"}
# US monetary policy shock - Cholesky
shockinfo_chol<-get_shockinfo("chol")
shockinfo_chol$shock<-"US.stir"
shockinfo_chol$scale<--100
# US monetary policy shock - GIRF
shockinfo_girf<-get_shockinfo("girf")
shockinfo_girf$shock<-"US.stir"
shockinfo_girf$scale<--100
```
The `shockinfo` objects for Cholesky and GIRFs look exactly the same but have additionally an attribute which classifies the particular identification scheme. If we compare them, we notice that both have three columns defining the shock, the scale and whether it is defined as global shock. But we also see that the attributes differ which is important for the identification in the `irf` function.
```{r, "shockinfo"}
shockinfo_chol
shockinfo_girf
```
Now, we identify a monetary policy shock with recursive identification:
```{r, "us.mp.chol", results="hide"}
irf.chol.us.mp<-irf(model.ssvs.1, n.ahead=24, shockinfo=shockinfo_chol, expert=list(save.store=TRUE))
```
The results are stored in `irf.chol.us.mp`. In order to save the complete set of draws, one can activate the `save.store` argument by setting it to `TRUE` within the expert settings (note: this may need a lot of storage).
```{r, "us.mp2"}
names(irf.chol.us.mp)
```
<p align="justify">
Again, `irf.chol.us.mp$posterior` is a $K \times n.ahead \times nr.of shocks \times 7$ object and the last slot contains the 50\%, 68\% and 95\% credible intervals along with the posterior median. If `save.store=TRUE`, `IRF_store` contains the full set of impulse response draws and you can calculate additional quantiles of interest.
</p>
We can plot the complete responses of a particular country by typing:
```{r "us.mp4", fig.margin=TRUE,out.width="80%",fig.cap="Responses of US country model"}
plot(irf.chol.us.mp, resp="US", shock="US.stir")
```
The plot shows the posterior median response (solid, black line) along 50\% (dark grey) and 68\% (light grey) credible intervals.
We can also compare the Cholesky responses with GIRFs. For that purpose, let us look at a GDP shock.
```{r, "us.gdp", results="hide"}
# cholesky
shockinfo_chol <- get_shockinfo("chol", nr_rows = 2)
shockinfo_chol$shock <- c("US.stir","US.y")
shockinfo_chol$scale <- c(1,1)
# generalized impulse responses
shockinfo_girf <- get_shockinfo("girf", nr_rows = 2)
shockinfo_girf$shock <- c("US.stir","US.y")
shockinfo_girf$scale <- c(1,1)
# Recursive US GDP
irf.chol.us.y<-irf(model.ssvs.1, n.ahead=24, shockinfo=shockinfo_chol)
# GIRF US GDP
irf.girf.us.y<-irf(model.ssvs.1, n.ahead=24, shockinfo=shockinfo_girf)
```
```{r, "us.gdp.plots",fig.cap="Comparison of responses Cholesky (left) and GIRF (right) to a negative GDP shock.",fig.show="hold",out.width="25%",}
plot(irf.chol.us.y, resp="US.y", shock="US.y")
plot(irf.girf.us.y, resp="US.y", shock="US.y")
plot(irf.chol.us.y, resp="US.rer", shock="US.y")
plot(irf.girf.us.y, resp="US.rer", shock="US.y")
```
<p align="justify">
We see that the responses are similar. This is not surprising because we have shocked the first variable in the US country model (`y`) and there are no timing restrictions on the remaining variables (they are all affected without any lag). In that case, the orthogonal impulse responses and the GIRF coincide.
</p>
<p align="justify">
Last, we could also look at a *joint or global shock*. For example, we could be interested in the effects of a *simultaneous* decrease in output across major economies, such as the G-7 and Russia. For that purpose, we have to set `global<-TRUE`. The following lines illustrate the joint GDP shock:
</p>
```{r, "global.gdp",results="hide",out.width="50%"}
shockinfo<-get_shockinfo("girf", nr_rows = 3)
shockinfo$shock<-c("EA.y","US.y","RU.y")
shockinfo$global<-TRUE
shockinfo$scale<--1
irf.global<-irf(model.ssvs.1, n.ahead=24, shockinfo=shockinfo)
plot(irf.global, resp=c("US.y","EA.y","RU.y"), shock="Global.y")
```
\subsection{Identification with Zero- and Sign-Restrictions}
## Identification with Zero- and Sign-Restrictions
<p align="justify">
In this section, we identify the shocks locally with sign-restrictions. For that purpose, we will use another example data set and estimate a new GVAR. This data set contains one-year ahead GDP, inflation and short-term interest rate forecasts for the USA. The forecasts are from the
[survey of professional forecasters (SPF)](https://www.philadelphiafed.org/research-and-data/real-time-center/survey-of-professional-forecasters) data base.
</p>
```{r, hide=TRUE}
data("eerData")
eerData<-eerData[cN]
W.trade0012<-W.trade0012[cN,cN]
W.trade0012<-apply(W.trade0012,2,function(x)x/rowSums(W.trade0012))
# append expectations data to US model
temp <- cbind(USexpectations, eerData$US)
colnames(temp) <- c(colnames(USexpectations),colnames(eerData$US))
eerData$US <- temp
```
```{r, "us.spf", results="hide"}
model.ssvs.eer<-bgvar(Data=eerData,
W=W.trade0012,
plag=1,
draws=100,
burnin=100,
prior="SSVS",
SV=TRUE)
```
<p align="justify">
For now, we start with an identification of two standard shocks in economics in the US model, namely an aggregate demand and aggregate supply shock. While the `shockinfo` was optional when using Cholesky / GIRFs, it is *mandatory* when working with sign restrictions. We do this in two steps, first we create a dummy object with `get_shockinfo("sign")` that contains information on the general shock setting and then add sign restrictions one-by-one using `add_shockinfo()`. The following illustrates this:
</p>
```{r, "us.spf.sign.spec"}
shockinfo<-get_shockinfo("sign")
shockinfo<-add_shockinfo(shockinfo, shock="US.y",
restriction="US.Dp", sign=">", horizon=1, prob=1, scale=1)
shockinfo<-add_shockinfo(shockinfo, shock="US.Dp",
restriction="US.y", sign="<", horizon=1, prob=1, scale=1)
```
<p align="justify">
In `add_shockinfo` we provide information on which variable to shock (`shock`), on which responses to put the sign restrictions (`restriction`), the direction of the restriction (`sign`) and the horizon how long these restrictions should hold (`horizon`). Note that the shock is always positive, but can be re-scaled by `scale`. The argument `prob` allows you to specify a percentage of the draws for which the restrictions have to hold. This argument might be useful when working with cross-sectional sign restrictions, where the idea is that some restrictions have to hold on average or at a certain percentage. The default is `prob=1`. If we want to add more restrictions to a particular shock, we can simply provide a vector instead of a scalar `add_shockinfo(shockinfo, shock="US.Dp",restriction=c("US.y", "US.stir"), sign=c("<","<"), horizon=c(1,1), prob=c(1,1), scale=1)` Note that increasing the number of restrictions (on the variables or the horizon) will lead to more precise inference; however, finding a suitable rotation matrix will become substantially harder.
</p>
<p align="justify">
We then invoke the `irf()` command to compute the impulse responses. The function draws rotation matrices using the algorithm of @Ramirez2010. In case we specify additional zero restrictions (see the next example below), we use the algorithm of @Arias2018. By default, we use one CPU core (`cores=NULL`) and do not store the full set of responses (`save.store=FALSE`). The maximum number of rotation matrices sampled per MCMC draw before we jump to the next draw can be specified by `MaxTries`.
</p>
```{r, "us.spf.sign",message=FALSE, results="hide"}
irf.sign<-irf(model.ssvs.eer, n.ahead=24, shockinfo=shockinfo,
expert=list(MaxTries=100, save.store=FALSE, cores=NULL))
```
We can infer the number of successful rotation matrices by looking at
```{r, "us.spf.sign2"}
irf.sign$rot.nr
```
```{r,"us.spf.plots",fig.cap="Responses to AS (upper panel) and AD (lower panel) shock.",fig.show="hold",out.width="50%"}
plot(irf.sign, resp=c("US.y","US.Dp"), shock="US.y")
plot(irf.sign, resp=c("US.y","US.Dp"), shock="US.Dp")
```
<p align="justify">
Several recent papers advocate the inclusion of survey data in a VAR. @Castelnuovo2010 show that including inflation expectations mitigates the price puzzle (i.e., the counter intuitive positive movement of inflation in response to a monetary tightening). @Damico2015 go one step further and argue that expectations should always be included in a VAR model since they contain information that is not contained in standard macroeconomic data. They also show how to make inference with survey data in a VAR framework and propose so-called rationality conditions. For an application in a GVAR context, see @Boeck2021a. In a nutshell, these conditions put restrictions on actual data to match the expectations either on average over (`ratio.average`) or at the end of (`ratio.H`) the forecast horizon. Let us look at a concrete example.
</p>
```{r, "us.spf.sign3",results="hide"}
shockinfo<-get_shockinfo("sign")
shockinfo<-add_shockinfo(shockinfo, shock="US.stir_t+4",
restriction=c("US.Dp_t+4","US.stir","US.y_t+4","US.stir_t+4","US.Dp_t+4","US.y_t+4"),
sign=c("<","0","<","ratio.avg","ratio.H","ratio.H"),
horizon=c(1,1,1,5,5,5),
prob=1, scale=1)
irf.sign.zero<-irf(model.ssvs.eer, n.ahead=20, shockinfo=shockinfo,
expert=list(MaxTries=100, save.store=TRUE))
```
<p align="justify">
The figure below shows the results for short term interest rates (`stir`) and output (`y`).
</p>
```{r, "eer.spf.plots",fig.cap="Rationality conditions I.",out.width="50%",fig.show="hold"}
# rationality condition: US.stir_t+4 on impact is equal to average of IRF of
# US.stir between horizon 2 and 5
matplot(cbind(irf.sign.zero$IRF_store["US.stir_t+4",1,,1],
irf.sign.zero$IRF_store["US.stir",1,,1]),
type="l",ylab="",main="Short-term Interest Rate",lwd=2,xaxt="n", cex.main=2);
axis(side=1,at=c(1:5,9,13,17,21,25),label=c(0:4,8,12,16,20,24), cex.axis=1.7)
legend("topright",lty=c(1,2),c("expected","actual"),lwd=2,bty="n",col=c("black","red"))
segments(x0=2,y0=1,x1=5,y1=1,lwd=2,lty=3,col="grey")
points(1,1,col="grey",pch=19,lwd=4)
abline(v=c(2,5),lty=3,col="grey",lwd=2)
# rationality condition: US.y_t+4 on impact is equal to H-step ahead IRF
# of US.y in horizon 5
matplot(cbind(irf.sign.zero$IRF_store["US.y_t+4",1,,1],
irf.sign.zero$IRF_store["US.y",1,,1]),
type="l",ylab="",main="Output",lwd=2,xaxt="n", cex.main=2)
axis(side=1,at=c(1:5,9,13,17,21,25),label=c(0:4,8,12,16,20,24), cex.axis=1.7)
legend("topright",lty=c(1,2),c("expected","actual"),lwd=2,bty="n",col=c("black","red"))
yy<-irf.sign.zero$IRF_store["US.y_t+4",1,1,1]
segments(x0=1,y0=yy,x1=5,y1=yy,lwd=2,lty=3,col="grey");abline(v=c(1,5),col="grey",lty=3)
points(1,yy,col="grey",pch=19,lwd=4);points(5,yy,col="grey",pch=19,lwd=4)
```
<p align="justify">
Impulse responses that refer to observed data are in red (dashed), and the ones referring to expected data in black. The condition we have imposed on short-term interest rates (top panel) was that observed rates should equal the shock to expected rates *on average over the forecast horizon* (one year, i.e., on impact plus 4 quarters). The respective period is marked by the two vertical, grey lines. Put differently, the average of the red-dashed line over the forecast horizon has to equal the expectation shock on impact (grey dot).
On output, shown in the bottom panel, by contrast, we have imposed a condition that has to hold exactly at the forecast horizon. The red line, the impulse response of observed output, has to meet the *impact response* of expected output at $h=5$. In the figure, these two points are indicated by the two grey dots.
</p>
<p align="justify">
The last example we look at is how to put restrictions on the cross-section. @Chudik2011b and @Cashin2014 argue that a major advantage of GVARs is that they allow to put restrictions also on variables from different countries, which should further sharpen inference. They apply cross-sectional restrictions to identify oil supply and demand shocks with the restrictions on oil importing countries' GDP.
</p>
<p align="justify">
Here, we follow @Feldkircher2020 who use cross-sectional restrictions to identify a term spread shock in the euro area. Since they use separate country models for members of the euro area, the joint monetary policy has to be modeled. One idea that has been put forth in recent applications is to set up an additional country for the joint monetary policy in the euro area. In the next example, we follow @Georgiadis2015 and set up a ECB model that determines euro area interest rates according to a Taylor rule. This idea follows the set-up of the additional oil price model and can be summarized graphically in the picture below.
</p>
{width=70%}
We can look at the data by typing:
```{r, "ea.data"}
data(monthlyData);monthlyData$OC<-NULL
names(monthlyData)
# list of weights of other entities with same name as additional country model
OE.weights = list(EB=EB.weights)
EA_countries <- c("AT", "BE", "DE","ES", "FI","FR")
# "IE", "IT", "NL", "PT","GR","SK","MT","CY","EE","LT","LV")
```
<p align="justify">
To estimate the GVAR with an 'EB' country model, we have to specify additional arguments similar to the example with the oil price model discussed above. The `monthlyData` set already comes along with a pre-specified list `EA.weights` with the mandatory slots `weights`, `variables` and `exo`. The specification implies that the euro area monetary policy model (`EB`) includes `EAstir`, `total.assets`, `M3`, `ciss` as endogenous variables (these are contained in `monthlyData$EB`). We use PPP-weights contained in `weights` to aggregate output (`y`) and prices (`p`) from euro area countries and include them as weakly exogenous variables. Euro area short-term interest rates (`EAstir`) and the ciss indicator (`ciss`), specified in `exo`, are then passed on as exogenous variables to the remaining countries. Finally, we put `EA.weights` into the `OE.weights` list and label the slot `EB` (as the name of the additional country model, `names(monthlyData)`) and estimate the model:
</p>
```{r, "restrict_sample", hide=TRUE}
monthlyData <- monthlyData[c(EA_countries,"EB")]
W<-W[EA_countries,EA_countries]
W<-apply(W,2,function(x)x/rowSums(W))
OE.weights$EB$weights <- OE.weights$EB$weights[names(OE.weights$EB$weights)%in%EA_countries]
```
```{r,"ea.estimate", results="hide"}
# estimates the model
model.ssvs<-bgvar(Data=monthlyData,
W=W,
draws=200,
burnin=200,
plag=1,
prior="SSVS",
eigen=1.05,
expert=list(OE.weights=OE.weights))
```
We can now impose a joint shock on long-term interest rates for selected countries using sign restrictions on the cross section with the following lines of code:
```{r,"ea.sign"}
# imposes sign restrictions on the cross-section and for a global shock
# (long-term interest rates)
shockinfo<-get_shockinfo("sign")
for(cc in c("AT","BE","FR")){
shockinfo<-add_shockinfo(shockinfo, shock=paste0(cc,".ltir"),
restriction=paste0(cc,c(".ip",".p")),
sign=c("<","<"), horizon=c(1,1),
prob=c(0.5,0.5), scale=c(-100,-100),
global=TRUE)
}
```
We can have a look at the restrictions by looking at the `shockinfo` object:
```{r,"global.restrictions"}
shockinfo
```
<p align="justify">
Note the column `prob`. Here, we have specified that the restrictions have to hold only for half of the countries. We could make the restrictions stricter by increasing the percentage.
</p>
We can now compute the impulse responses using the same function as before.
```{r,"global.shock.irf",echo=TRUE,results="hide"}
irf.sign.ssvs<-irf(model.ssvs, n.ahead=24, shockinfo=shockinfo, expert=list(MaxTries=500))
```
To verify the sign restrictions, type:
```{r,"ea.sign.verify"}
irf.sign.ssvs$posterior[paste0(EA_countries[-c(3,12)],".ltir"),1,1,"Q50"]
irf.sign.ssvs$posterior[paste0(EA_countries,".ip"),1,1,"Q50"]
irf.sign.ssvs$posterior[paste0(EA_countries,".p"),1,1,"Q50"]
```
The following plots the output responses for selected euro area countries.
```{r, "ea.sign.plots",fig.show="hold",out.width="25%",fig.cap="Output responses of selected euro area countries."}
plot(irf.sign.ssvs, resp=c("AT.ip"), shock="Global.ltir")
plot(irf.sign.ssvs, resp=c("BE.ip"), shock="Global.ltir")
plot(irf.sign.ssvs, resp=c("DE.ip"), shock="Global.ltir")
plot(irf.sign.ssvs, resp=c("ES.ip"), shock="Global.ltir")
```
\subsection{Generalized Forecast Error Variance Decomposition (GFEVD)}
## Forecast Error Variance Decomposition (FEVD)
<p align="justify">
Forecast error variance decompositions indicate the amount of information each variable contributes to the other variables in the autoregression. It is calculated by examining how much of the forecast error variance of each of the variables can be explained by exogenous shocks to the other variables. In a system with fully orthogonalized errors, the shares of FEVD sum up to 1. In the GVAR context, however, since we identify a shock only locally in particular country model and we still have a certain degree of residual correlation, shares typically exceed unity. By contrast, a fully orthogonalized system obtained for example by means of a Cholesky decomposition would yield shares that sum up to unity but inherits assumptions that are probably hard to defend. In the case of the Cholesky decomposition, this would imply timing restrictions, i.e., which variables in which units are immediately affected or affected only with a lag.
</p>
<p align="justify">
One way of fixing this is to use generalized forecast error variance decompositions. Like with GIRFs, these are independent of the ordering but, since the shocks are not orthogonalized, yield shares that exceed unity. Recently, @Lanne2016 proposed a way of scaling the GFEVDs, which has the nice property of shares summing up to 1 and results being independent of the ordering of the variables in the system. To calculate them, we can use the `GFEVD.LN` command. We can either use a running mean (`running=TRUE`) or the full set of posterior draws. The latter is computationally very expensive.
</p>
```{r, "fevd"}
#calculates the LN GFEVD
gfevd.us.mp=gfevd(model.ssvs.eer,n.ahead=24,running=TRUE,cores=4)$FEVD
# get position of EA
idx<-which(grepl("EA.",dimnames(gfevd.us.mp)[[2]]))
own<-colSums(gfevd.us.mp["EA.y",idx,])
foreign<-colSums(gfevd.us.mp["EA.y",-idx,])
```
```{r, "fevd.plot",fig.cap="FEVD of EA GDP.",out.width="50%"}
barplot(t(cbind(own,foreign)),legend.text =c("own","foreign"))
```
<p align="justify">
The plot above shows a typical pattern: On impact and in the first periods, EA variables (own) explain a large share of GFEVD. With time and through the lag structure in the model, other countries' variables show up more strongly as important determinants of EA output error variance.
</p>
<p align="justify">
In case we want to focus on a single country, which we have fully identified either using a Cholesky decomposition or sign restrictions, we can compute a simple forecast error variance decomposition (FEVD). This can be done by using the command `fevd()`. Since the computation is very time consuming, the FEVDs are based on the posterior median only (as opposed to calculating FEVDs for each MCMC draw or using a running mean). In case the underlying shock has been identified via sign restrictions, the corresponding rotation matrix is the one that fulfills the sign restrictions at the point estimate of the posterior median of the reduced form coefficients (stored in `irf.obj$struc.obj$Rmed`). Alternatively one can submit a rotation matrix using the option `R`.
</p>
```{r, "fevd.struc"}
# calculates FEVD for variables US.y
fevd.us.y=fevd(irf.chol.us.mp, var.slct=c("US.y"))$FEVD
idx<-which(grepl("US.",rownames(fevd.us.y)))
```
```{r, "fevd.struc.plot",fig.cap="FEVD of US GDP.",out.width="50%"}
barplot(fevd.us.y[idx,1,])
```
\subsection{Historical Decomposition}
## Historical Decomposition
<p align="justify">
Historical decompositions allow us to examine the relative importance of structural shocks in explaining deviations of a time series from its unconditional mean. This can be used to assess the hypothetical question of how data would have looked like if it was driven only by a particular structural shock (e.g., monetary policy shock) or a combination of structural shocks. It can be calculated using the function `hd()`. The function also allows us to compute the structural error of the model. To save computational time as well as due to storage limits, we use the point estimate of the posterior median (as opposed to calculating HDs and the structural error for each draw of the MCMC chain). In case the shock has been identified via sign restrictions, a rotation matrix has to be selected. If not specified otherwise (via `R`), the rotation matrix based on the posterior median of the reduced form coefficients (`irf.obj$struc.obj$Rmed`) will be used.
</p>
```{r,"hd"}
HD<-hd(irf.chol.us.mp)
# summing them up should get you back the original time series
org.ts<-apply(HD$hd_array,c(1,2),sum) # this sums up the contributions of all shocks + constant, initial conditions and residual component (last three entries in the third dimension of the array)
```
```{r, "hd.plot",fig.cap="Historical decomposition of euro area GDP.",out.width="50%"}
matplot(cbind(HD$x[,1],org.ts[,1]),type="l",ylab="",lwd=2, cex.axis=1.7)
legend("bottomright",c("hd series","original"),col=c("black","red"),lty=c(1,2),bty="n",cex=2)
```
\section{Unconditional and Conditional Forecasts}
# Unconditional and Conditional Forecasts
<p align="justify">
In this section, we demonstrate how the package can be used for forecasting. We distinguish between unconditional and conditional forecasting. Typical applications of unconditional forecasting are to select a model from a range of candidate models or for out-of-sample forecasting. Conditional forecasts can be used for scenario analysis by comparing a forecast with a fixed future path of a variable of interest to its unconditional forecast.
</p>
\subsection{Unconditional Forecasts}
## Unconditional Forecasts
<p align="justify">
Since the GVAR framework was developed to capture cross-country dependencies, it can handle a rich set of dynamics and interdependencies. This can also be useful for forecasting either global components (e.g., global output) or country-specific variables controlling for global factors. @Pesaran2009 show that the GVAR yields competitive forecasts for a range of macroeconomic and financial variables. @CrespoCuaresma2016 demonstrate that Bayesian shrinkage priors can help improving GVAR forecasts and @Dovern2016 and @Huber2016 yield evidence for further gains in forecast performance by using GVARs with stochastic volatility.
</p>
<p align="justify">
To compute forecasts with the `BGVAR` package, we use the command `predict`. To be able to evaluate the forecast, we have to specify the size of the hold-out sample when estimating the model. Here, we choose a hold-out-sample of 8 observations by setting `h=8` (the default values is `h=0`):
</p>
```{r,"fcast.est", results="hide"}
model.ssvs.h8<-bgvar(Data=eerData,
W=W.trade0012,
draws=500,
burnin=500,
plag=1,
prior="SSVS",
hyperpara=NULL,
SV=TRUE,
thin=1,
trend=TRUE,
hold.out=8,
eigen=1
)
```
<p align="justify">
The forecasts can then be calculated using the `predict` function. We calculate forecasts up to 8 periods ahead by setting `n.ahead=8`-step
</p>
```{r,"fcast.predict", results="hide"}
fcast <- predict(model.ssvs.h8, n.ahead=8, save.store=TRUE)
```
<p align="justify">
The forecasts are stored in `fcast$fcast` which contains also the credible intervals of the predictive posterior distribution. We can evaluate the forecasts with the retained observations looking at the root mean squared errors (RMSEs) or log-predictive scores (LPS).
</p>
```{r "lps"}
lps.h8 <- lps(fcast)
rmse.h8 <- rmse(fcast)
```
The objects `lps.h8` and `rmse.h8` then each contain a $8 \times k$ matrix with the LPS scores / RMSEs for each variable in the system over the forecast horizon.
Last, we can visualize the forecasts by typing
```{r, "fcast.plot",fig.cap="Forecast plot.",out.width="50%"}
plot(fcast, resp="US.Dp", cut=8)
```
with `Cut` denoting the number of realized data points that should be shown in the plot prior the forecasts start.
\subsection{Conditional Forecasts}
## Conditional Forecasts
<p align="justify">
Similar to structural analysis, it is possible to use conditional forecasts, identified in a country model. For that purpose, we use the methodology outlined in @Waggoner1999 and applied in @Feldkircher2015 in the GVAR context. The following lines set up a conditional forecast holding inflation in the US country model fixed for five periods to its last observed value in the sample. Make sure that the inputs to `cond.predict` `bgvar.obj` and `pred.obj` belong to the same model.
</p>
```{r "cond.predict",results="hide"}
# matrix with constraints
constr <- matrix(NA,nrow=fcast$n.ahead,ncol=ncol(model.ssvs.h8$xglobal))
colnames(constr) <- colnames(model.ssvs.h8$xglobal)
# set "US.Dp" for five periods on its last value
constr[1:5,"US.Dp"] <-model.ssvs.h8$xglobal[nrow(model.ssvs.h8$xglobal),"US.Dp"]
# compute conditional forecast (hard restriction)
cond_fcast <- predict(model.ssvs.h8, n.ahead=8, constr=constr, constr_sd=NULL)
```
<p align="justify">
We could impose the same restrictions as "soft conditions" accounting for uncertainty by drawing from a Gaussian distribution with the conditional forecast in `constr` as mean and standard deviations in the matrix `constr_sd` of same size as `constr`.
</p>
```{r "cond.predict.sd",results="hide"}
# add uncertainty to conditional forecasts
constr_sd <- matrix(NA,nrow=fcast$n.ahead,ncol=ncol(model.ssvs.h8$xglobal))
colnames(constr_sd) <- colnames(model.ssvs.h8$xglobal)
constr_sd[1:5,"US.Dp"] <- 0.001
# compute conditional forecast with soft restrictions
cond_fcast2 <- predict(model.ssvs.h8, n.ahead=8, constr=constr, constr_sd=constr_sd)
```
We can then compare the results
```{r, "cond.plot.1",out.width="50%",fig.show="hold",fig.cap="Conditional forecast of US Inflation, top panel without uncertainty during the conditioning, bottom panel with uncertainty."}
plot(cond_fcast, resp="US.Dp", cut=10)
plot(cond_fcast2, resp="US.Dp", cut=10)
```
with `Cut` denoting the number of realized data points that should be shown in the plot prior the conditioning starts.
\section{Appendix}
# Appendix
\subsection{Main Function: `bgvar`}
## Function Arguments `bgvar`
Main arguments and description of the function `bgvar`.
* `Data`: Either a
+ list object of length $N$ that contains the data. Each element of the list refers to a country / entity. The number of columns (i.e., variables) in each country model can be different. The $T$ rows (i.e., number of time observations), however, need to be the same for each country. Country and variable names are not allowed to contain a `.` [dot].
+ matrix of dimension $T \times k$, with $k$ denoting the sum of all endogenous variables of the system. The column names should consist of two parts, separated by a `.` The first part should denote the country / entity and the second part the name of the variable. Country and variable names are not allowed to contain a `.` [dot].
* `W`: An $N \times N$ weight matrix with 0 elements on the diagonal and row sums that sum up to unity or a list of weight matrices. See the help files for getweights for more details.
* `plag`: Number of lags used (the same for domestic, exogenous and weakly exogenous variables). Default set to `plag=1`.
* `draws`: Number of draws saved. Default set to `draws=5000`.
* `burnin`: Number of burn-ins. Default set to `burnin=5000`.
* `prior`: Either "SSVS", "MN" or "NG". See details below. Default set to `prior=NG`.
* `SV`: If set to `"TRUE"`, models are fitted with stochastic volatility using the `stochvol` and `GIGrvg` packages. Due to storage issues, not the whole history of the $T$ variance covariance matrices are kept. Consequently, the BGVAR package shows only one set of impulse responses (with variance covariance matrix based on the median volatilities over the sample period) instead of $T$ sets. Specify `SV=FALSE` to turn SV off.
* `hold.out`: Defines the hold-out sample. Default without hold-out sample, thus set to zero.
* `thin`: Is a thinning interval which grabs every 'thin'th draw from the posterior output. For example, `thin=10` saves every tenth draw from the posterior. Default set to `thin=1`.
* `hyperpara`: Is a list object that defines the hyperparameters when the prior is set to either `"MN"`, `"SSVS"`, `"NG"`, or `HS`.
+ `"miscellaneous:"`
+ `a_1` is the prior hyperparameter for the inverted gamma prior (shape) (set `a_1 = b_1` to a small value for the standard uninformative prior). Default is set to `a_1=0.01`.
+ `b_1` is the prior hyperparameter for the inverted gamma prior (rate). Default sit set to `b_1=0.01`.
+ `prmean` is the prior mean on the first own lag of the autoregressive coefficient, standard value is `prmean=1` for non-stationary data. The prior mean for the remaining autoregressive coefficients automatically set to 0.
+ `bmu` If `SV=TRUE`, this is the prior hyperparameter for the mean of the log-volatilities. Default is `bmu=0`.
+ `Bmu` If `SV=TRUE`, this is the prior hyperparameter for the variance of the mean of the log-volatilities. Default is `Bmu=0`.
+ `a0` If `SV=TRUE`, this is the hyperparameter for the Beta prior on the persistence parameter of the log-volatilities. Default is `a0=25`.
+ `b0` If `SV=TRUE`, this is the hyperparameter for the Beta prior on the persistence parameter of the log-volatilities. Default is `b0=1.5`.
+ `Bsigma` If `SV=TRUE`, this is the hyperparameter for the Gamma prior on the variance of the log-volatilities. Default is `Bsigma=1`.
+ `"MN"`
+ `shrink1` Starting value of `shrink1`. Default set to 0.1.
+ `shrink2` Starting value of `shrink2`. Default set to 0.2.
+ `shrink3` Hyperparameter of `shrink3`. Default set to 100.
+ `shrink4` Starting value of `shrink4`. Default set to 0.1.
+ `"SSVS"`
+ `tau0` is the prior variance associated with the normal prior on the regression coefficients if a variable is NOT included (spike, tau0 should be close to zero).
+ `tau1` is the prior variance associated with the normal prior on the regression coefficients if a variable is included (slab, tau1 should be large).
+ `kappa0` is the prior variance associated with the normal prior on the covariances if a covariance equals zero (spike, kappa0 should be close to zero).
+ `kappa1` is the prior variance associated with the normal prior on the covariances if a covariance is unequal to zero (slab, kappa1 should be large).
+ `p_i` is the prior inclusion probability for each regression coefficient (default is 0.5).
+ `q_ij` is the prior inclusion probability for each covariance (default is 0.5).
+ `"NG"`
+ `e_lambda` Prior hyperparameter for the Gamma prior on the lag-specific shrinkage components, standard value is `e_lambda=1.5`.
+ `d_lambda` Prior hyperparameter for the Gamma prior on the lag-specific shrinkage components, standard value is `d_lambda=1`.
+ `tau_theta` Parameter of the Normal-Gamma prior that governs the heaviness of the tails of the prior distribution. A value of `tau_theta=1` would lead to the Bayesian LASSO. Default value differs per entity and set to `tau_theta=1/log(M)`, where `M` is the number of endogenous variables per entity.
+ `sample_tau` If set to `TRUE` `tau_theta` is sampled.
+ `"HS"`: No additional hyperparameter needs to be elicited for the horseshoe prior.
* `eigen` Set to `TRUE` if you want to compute the largest eigenvalue of the companion matrix for each posterior draw. If the modulus of the eigenvalue is significantly larger than unity, the model is unstable. Unstable draws exceeding an eigenvalue of one are then excluded. If `eigen` is set to a numeric value, then this corresponds to the maximum eigenvalue. The default is set to $1.05$ (which excludes all posterior draws for which the eigenvalue of the companion matrix was larger than $1.05$ in modulus).
* `Ex` For including truly exogenous variables to the model. Either a
+ `list object` of maximum length `N` that contains the data. Each element of the list refers to a country/entity and has to match the country/entity names in `Data`. If no truly exogenous variables are added to the respective country/entity model, omit the entry. The `T` rows (i.e., number of time observations), however, need to be the same for each country. Country and variable names are not allowed to contain a `.` [dot] since this is our naming convention.
+ `matrix object` of dimension `T` times number of truly exogenous variables. The column names should consist of two parts, separated by a `.` [dot]. The first part should denote the country / entity name and the second part the name of the variable. Country and variable names are not allowed to contain a `.` [dot].
* `trend` If set to `TRUE` a deterministic trend is added to the country models.
* `expert` Expert settings, must be provided as list. Default is set to `NULL`.
+ `variable.list` In case `W` is a list of weight matrices, specify here which set of variables should be weighted by which weighting matrix. Default is set to `NULL`.
+ `OE.weights`: Default value is `NULL`. Can be used to provide information of how to handle additional country models (other entities). Additional country models can be used to endogenously determine variables that are (weakly) exogenous for the majority of the other country models. As examples, one could think of an additional oil price model [@Mohaddes2019] or a model for the joint euro area monetary policy [@Georgiadis2015; @Feldkircher2020]. The data for these additional country models has to be contained in `Data`. The number of additional country models is unlimited. Each list entry of `OE.weights` has to be named similar to the name of the additional country model contained in `Data`. Each slot of `OE.weights` has to contain the following information:
+ `weights` a vector of weights with names relating to the countries for which data should be aggregated. Can also relate to a subset of countries contained in the data.
+ `variables` a vector of variable names that should be included in the additional country model. Variables that are not contained in the data slot of the extra country model are assumed to be weakly exogenous for the additional country model (aggregated with `weights`).
+ `exo` a vector of variable names that should be fed into the other countries as (weakly) exogenous variables.
+ `Wex.restr` A character vector that contains variables that should only be specified as weakly exogenous if not contained as endogenous variable in a particular country. An example that has often been used in the literature is to place these restrictions on nominal exchange rates. Default is `NULL` in which case all weakly exogenous variables are treated symmetrically. See function getweights for more details.
+ `save.country.store` If set to `TRUE` then function also returns the container of all draws of the individual country models. Significantly raises object size of output and default is thus set to `FALSE`.
+ `save.shrink.store` If set to `TRUE` the function also inspects posterior output of shrinkage coefficients. Default set to `FALSE`.
+ `save.vola.store` If set to `TRUE` the function also inspects posterior output of coefficients associated with the volatility process. Default set to `FALSE`.
+ `use_R` Boolean whether estimation should fall back on `R` version, otherwise `Rcpp` version is used (default).
+ `applyfun` applyfun Allows for user-specific apply function, which has to have the same interface than \code{lapply}. If `cores=NULL` then `lapply` is used, if set to a numeric either `parallel::parLapply()` is used on Windows platforms and `parallel::mclapply()` on non-Windows platforms.
+ `cores` Specifies the number of cores which should be used. Default is set to \code{NULL} and \code{applyfun} is used.
* `verbose` If set to `FALSE` it suppresses printing messages to the console.
Below, find some example code for all three priors.
```{r, eval=FALSE}
# load dataset
data(eerData)
# Minnesota prior and two different weight matrices and no SV
# weights for first variable set tradeW.0012, for second finW0711
variable.list <- list()
variable.list$real <- c("y","Dp","tb")
variable.list$fin <- c("stir","ltir","rer")
Hyperparm.MN <- list(a_i = 0.01, # prior for the shape parameter of the IG
b_i = 0.01 # prior for the scale parameter of the IG
)
model.MN<-bgvar(Data=eerData,
W=W.list[c("tradeW.0012","finW0711")],
draws=200,
burnin=200,
plag=1,
hyperpara=Hyperparm.MN,
prior="MN",
thin=1,
eigen=TRUE,
SV=TRUE,
expert=list(variable.list=variable.list))
# SSVS prior
Hyperparm.ssvs <- list(tau0 = 0.1, # coefficients: prior variance for the spike
# (tau0 << tau1)
tau1 = 3, # coefficients: prior variance for the slab
# (tau0 << tau1)
kappa0 = 0.1, # covariances: prior variance for the spike
# (kappa0 << kappa1)
kappa1 = 7, # covariances: prior variance for the slab
# (kappa0 << kappa1)
a_1 = 0.01, # prior for the shape parameter of the IG
b_1 = 0.01, # prior for the scale parameter of the IG
p_i = 0.5, # prior inclusion probability of coefficients
q_ij = 0.5 # prior inclusion probability of covariances
)
model.ssvs<-bgvar(Data=eerData,
W=W.trade0012,
draws=100,
burnin=100,
plag=1,
hyperpara=Hyperparm.ssvs,
prior="SSVS",
thin=1,
eigen=TRUE)
# Normal Gamma prior
data(monthlyData)
monthlyData$OC<-NULL
Hyperparm.ng<-list(d_lambda = 1.5, # coefficients: prior hyperparameter for the NG-prior
e_lambda = 1, # coefficients: prior hyperparameter for the NG-prior
prmean = 0, # prior mean for the first lag of the AR coefficients
a_1 = 0.01, # prior for the shape parameter of the IG
b_1 = 0.01, # prior for the scale parameter of the IG
tau_theta = .6, # (hyper-)parameter for the NG
sample_tau = FALSE # estimate a?
)
model.ng<-bgvar(Data=monthlyData,
W=W,
draws=200,
burnin=100,
plag=1,
hyperpara=Hyperparm.ng,
prior="NG",
thin=2,
eigen=TRUE,
SV=TRUE,
expert=list(OE.weights=list(EB=EA.weights)))
```
\subsection{Main Function `irf`}
## Function Arguments `irf`
* `x`: An objected fitted by function `bgvar`.
* `n.ahead`: Forecasting horizon.
* `shockinfo` Dataframe with additional information about the nature of shocks. Depending on the `ident` argument, the dataframe has to be specified differently. In order to get a dummy version for each identification scheme use `get_shockinfo`.
* `quantiles` Numeric vector with posterior quantiles. Default is set to compute median along with 68%/80%/90% confidence intervals.
* `expert` Expert settings, must be provided as list. Default is set to `NULL`.
+ `MaxTries` Numeric specifying maximal number of tries for finding a rotation matrix with sign-restrictions. Attention: setting this number very large may results in very long computation times.
+ `save.store` If set to `TRUE` the full posterior of both, impulse response and rotation matrices, are returned. Default is `FALSE` in order to save storage.
+ `use_R` Boolean whether IRF computation should fall back on `R` version, otherwise `Rcpp` version is used (default).
+ `applyfun` In case `use_R=TRUE`, this allows for user-specific apply function, which has to have the same interface as `lapply`. If `cores=NULL` then `lapply` is used, if set to a numeric either `parallel::parLapply()` is used on Windows platforms and `parallel::mclapply()` on non-Windows platforms.
+ `cores` Numeric specifying the number of cores which should be used, also `all` and `half` is possible. By default only one core is used.
* `verbose` If set to `FALSE` it suppresses printing messages to the console.
Below, find some further examples.
```{r, eval=FALSE}
# First example, a US monetary policy shock, quarterly data
library(BGVAR)
data(eerData)
model.eer<-bgvar(Data=eerData,W=W.trade0012,draws=500,burnin=500,plag=1,prior="SSVS",thin=10,eigen=TRUE,trend=TRUE)
# generalized impulse responses
shockinfo<-get_shockinfo("girf")
shockinfo$shock<-"US.stir"; shockinfo$scale<--100
irf.girf.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
# cholesky identification
shockinfo<-get_shockinfo("chol")
shockinfo$shock<-"US.stir"; shockinfo$scale<--100
irf.chol.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
# sign restrictions
shockinfo <- get_shockinfo("sign")
shockinfo <- add_shockinfo(shockinfo, shock="US.stir", restriction=c("US.y","US.Dp"),
sign=c("<","<"), horizon=c(1,1), scale=1, prob=1)
irf.sign.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
# sign restrictions with relaxed cross-country restrictions
shockinfo <- get_shockinfo("sign")
# restriction for other countries holds to 75\%
shockinfo <- add_shockinfo(shockinfo, shock="US.stir", restriction=c("US.y","EA.y","UK.y"),
sign=c("<","<","<"), horizon=1, scale=1, prob=c(1,0.75,0.75))
shockinfo <- add_shockinfo(shockinfo, shock="US.stir", restriction=c("US.Dp","EA.Dp","UK.Dp"),
sign=c("<","<","<"), horizon=1, scale=1, prob=c(1,0.75,0.75))
irf.sign.us.mp<-irf(model.eer, n.ahead=24, shockinfo=shockinfo)
# Example with zero restriction (Arias et al., 2018) and
# rationality conditions (D'Amico and King, 2017).
data("eerDataspf")
model.eer<-bgvar(Data=eerDataspf, W=W.trade0012.spf, draws=300, burnin=300,
plag=1, prior="SSVS", eigen=TRUE)
shockinfo <- get_shockinfo("sign")
shockinfo <- add_shockinfo(shockinfo, shock="US.stir_t+4",
restriction=c("US.Dp_t+4","US.stir","US.y_t+4"),
sign=c("<","0","<"), horizon=1, prob=1, scale=1)
# rationality condition: US.stir_t+4 on impact is equal to average of
# IRF of US.stir between horizon 1 to 4
shockinfo <- add_shockinfo(shockinfo, shock="US.stir_t+4", restriction="US.stir_t+4",
sign="ratio.avg", horizon=5, prob=1, scale=1)
# rationality condition: US.Dp_t+4 on impact is equal to IRF of US.Dp at horizon 4
shockinfo <- add_shockinfo(shockinfo, shock="US.stir_t+4", restriction="US.Dp_t+4",
sign="ratio.H", horizon=5, prob=1, scale=1)
# rationality condition: US.y_t+4 on impact is equal to IRF of US.y at horizon 4
shockinfo <- add_shockinfo(shockinfo, shock="US.stir_t+4", restriction="US.y_t+4",
sign="ratio.H", horizon=5, prob=1, scale=1)
# regulate maximum number of tries with expert settings
irf.ratio <- irf(model.eer, n.ahead=20, shockinfo=shockinfo,
expert=list(MaxTries=10))
```
```{r, hide=TRUE}
par(oldpar)
```
# References
|
/scratch/gouwar.j/cran-all/cranData/BGVAR/vignettes/examples.Rmd
|
#' @keywords internal
"_PACKAGE"
## usethis namespace: start
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/BGmisc-package.R
|
#' Validates and Optionally Repairs Unique IDs in a Pedigree Dataframe
#'
#' This function takes a pedigree object and performs two main tasks:
#' 1. Checks for the uniqueness of individual IDs.
#' 2. Optionally repairs non-unique IDs based on a specified logic.
#'
#' @param ped A dataframe representing the pedigree data with columns `ID`, `dadID`, and `momID`.
#' @param verbose A logical flag indicating whether to print progress and validation messages to the console.
#' @param repair A logical flag indicating whether to attempt repairs on non-unique IDs.
#'
#' @return Depending on `repair` value, either returns a list containing validation results or a repaired dataframe
#' @examples
#' \dontrun{
#' ped <- data.frame(ID = c(1, 2, 2, 3), dadID = c(NA, 1, 1, 2), momID = c(NA, NA, 2, 2))
#' checkIDs(ped, verbose = TRUE, repair = FALSE)
#' }
#' @export
checkIDs <- function(ped, verbose = FALSE, repair = FALSE) {
# Standardize column names in the input dataframe
ped <- standardizeColnames(ped)
# Initialize a list to store validation results
validation_results <- list()
if (verbose) {
cat("Step 1: Checking for unique IDs...\n")
}
# Identify non-unique IDs
duplicated_ids <- ped$ID[duplicated(ped$ID) | duplicated(ped$ID, fromLast = TRUE)]
# Update the validation_results list
if (length(duplicated_ids) > 0) {
if (verbose) {
cat("Non-unique IDs found.\n")
}
validation_results$all_unique_ids <- FALSE
validation_results$total_non_unique_ids <- length(duplicated_ids)
validation_results$non_unique_ids <- unique(duplicated_ids)
} else {
if (verbose) {
cat("All IDs are unique.\n")
}
validation_results$all_unique_ids <- TRUE
validation_results$total_non_unique_ids <- 0
validation_results$non_unique_ids <- NULL
}
if (repair) {
if (verbose) {
cat("Validation Results:\n")
print(validation_results)
cat("Step 2: Attempting to repair non-unique IDs...\n")
}
# Initialize a list to track changes made during repair
changes <- list()
if (verbose) {
cat("Is the row a duplicate?\n")
}
repaired_ped <- ped
# if there are non-unique IDs
if (length(validation_results$non_unique_ids) > 0) {
# loop through each non-unique ID
for (id in validation_results$non_unique_ids) {
rows_with_id <- repaired_ped[repaired_ped$ID == id, ]
# If all rows with the same ID are truly identical, keep only the first occurrence
if (nrow(unique(rows_with_id)) == 1) {
# Mark as removed in the changes list
changes[[paste0("ID", id)]] <- "Removed duplicates"
# Keep only the first row, remove the rest
repaired_ped <- repaired_ped[-which(repaired_ped$ID == id)[-1], ] # Remove all but the first occurrence
} else {
# Mark as kept in the changes list
changes[[paste0("ID", id)]] <- "Kept duplicates"
}
}
}
if (verbose) {
cat("Changes Made:\n")
print(changes)
}
return(repaired_ped)
} else {
return(validation_results)
}
}
#' Repair Missing IDs
#'
#' This function repairs missing IDs in a pedigree.
#' @param ped A pedigree object
#' @param verbose A logical indicating whether to print progress messages
#' @return A corrected pedigree
repairIDs <- function(ped, verbose = FALSE) {
checkIDs(ped = ped, verbose = verbose, repair = TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/checkIDs.R
|
#' Validates and Optionally Repairs Sex Coding in a Pedigree Dataframe
#'
#' This function performs two main tasks:
#' 1. Optionally recodes the 'sex' variable based on a given code for males.
#' 2. Optionally repairs the sex coding based on a specified logic.
#'
#' @param ped A dataframe representing the pedigree data with a 'sex' column.
#' @param code_male The current code used to represent males in the 'sex' column. At least one is needed.
#' @param code_female The current code used to represent females in the 'sex' column. If both male and female are NULL, no recoding is performed.
#' @param verbose A logical flag indicating whether to print progress and validation messages to the console.
#' @param repair A logical flag indicating whether to attempt repairs on the sex coding.
#'
#' @return Depending on the value of `repair`, either a list containing validation results or a repaired dataframe is returned.
#' @examples
#' \dontrun{
#' ped <- data.frame(ID = c(1, 2, 3), sex = c("M", "F", "M"))
#' checkSex(ped, code_male = "M", verbose = TRUE, repair = FALSE)
#' }
#' @export
#'
checkSex <- function(ped, code_male = NULL, code_female = NULL, verbose = FALSE, repair = FALSE) {
# Standardize column names in the input dataframe
ped <- standardizeColnames(ped)
# TO DO bypass the rest of the function if recode_only is TRUE
# Initialize a list to store validation results
validation_results <- list()
if (verbose) {
cat("Step 1: Checking how many genders...\n")
}
# check how many genders
validation_results$sex_unique <- unique(ped$sex)
validation_results$sex_length <- length(unique(ped$sex))
if (verbose) {
cat(paste0(
validation_results$sex_length, " unique values found.\n ",
paste0(validation_results$sex_unique)
))
}
# are there multiple genders in the list of dads and moms?
table_sex_dad <- sort(table(ped$sex[ped$ID %in% ped$dadID]), decreasing = TRUE)
table_sex_mom <- sort(table(ped$sex[ped$ID %in% ped$momID]), decreasing = TRUE)
validation_results$all_sex_dad <- names(table_sex_dad)
validation_results$all_sex_mom <- names(table_sex_mom)
validation_results$most_frequent_sex_dad <- validation_results$all_sex_dad[1]
validation_results$most_frequent_sex_mom <- validation_results$all_sex_mom[1]
# list ids for dads that are female, moms that are male
if (length(validation_results$all_sex_dad) > 1) {
df_dads <- ped[ped$ID %in% ped$dadID, ]
validation_results$ID_female_dads <- df_dads$ID[df_dads$sex != validation_results$most_frequent_sex_dad]
validation_results$ID_child_female_dads <- ped$ID[ped$dadID %in% validation_results$ID_female_dads]
remove(df_dads)
}
if (length(validation_results$all_sex_mom) > 1) {
df_moms <- ped[ped$ID %in% ped$momID, ]
validation_results$ID_male_moms <- df_moms$ID[df_moms$sex != validation_results$most_frequent_sex_mom]
validation_results$ID_child_male_moms <- ped$ID[ped$momID %in% validation_results$ID_female_moms]
remove(df_moms)
}
if (repair) {
if (verbose) {
cat("Step 2: Attempting to repair sex coding...\n")
}
# Initialize a list to track changes made during repair
changes <- list()
original_ped <- ped
if (validation_results$sex_length == 2) {
# if length of all_sex_dad >1, then recode all the dads to the most frequent male value
ped <- recodeSex(ped, code_male = validation_results$most_frequent_sex_dad)
# Count and record the change
num_changes <- sum(original_ped$sex != ped$sex)
# Record the change and the count
changes[[length(changes) + 1]] <- sprintf(
"Recode sex based on most frequent sex in dads: %s. Total gender changes made: %d",
validation_results$most_frequent_sex_dad, num_changes
)
}
# Update the pedigree dataframe after repair
repaired_ped <- ped
if (verbose) {
cat("Changes Made:\n")
print(changes)
}
return(repaired_ped)
} else {
if (verbose) {
cat("Checks Made:\n")
print(validation_results)
}
return(validation_results)
}
}
#' Repairs Sex Coding in a Pedigree Dataframe
#'
#' This function serves as a wrapper around `checkSex` to specifically handle
#' the repair of the sex coding in a pedigree dataframe.
#'
#' @inheritParams checkSex
#' @inheritParams plotPedigree
#' @return A modified version of the input data.frame \code{ped}, containing an additional or modified 'sex_recode' column where the 'sex' values are recoded according to \code{code_male}. NA values in the 'sex' column are preserved.
#' @examples
#' \dontrun{
#' ped <- data.frame(ID = c(1, 2, 3), sex = c("M", "F", "M"))
#' repairSex(ped, code_male = "M", verbose = TRUE)
#' }
#' @export
#'
#' @seealso \code{\link{checkSex}}
repairSex <- function(ped, verbose = FALSE, code_male = NULL) {
checkSex(ped = ped, verbose = verbose, repair = TRUE, code_male = code_male)
}
#' Recodes Sex Variable in a Pedigree Dataframe
#'
#' This function serves as a wrapper around `checkSex` to specifically handle
#' the recoding of the 'sex' variable in a pedigree dataframe.
#' @inheritParams checkSex
#' @inheritParams plotPedigree
#' @return A modified version of the input data.frame \code{ped}, containing an additional or modified 'sex_recode' column where the 'sex' values are recoded according to \code{code_male}. NA values in the 'sex' column are preserved.
#' @keywords internal
#' @seealso \code{\link{plotPedigree}}
recodeSex <- function(
ped, verbose = FALSE, code_male = NULL, code_na = NULL, code_female = NULL,
recode_male = "M", recode_female = "F", recode_na = NA_character_) {
if (!is.null(code_na)) {
ped$sex[ped$sex == code_na] <- NA
}
# Recode as "F" or "M" based on code_male, preserving NAs
if (!is.null(code_male) & !is.null(code_female)) {
# Initialize sex_recode as NA, preserving the length of the 'sex' column
ped$sex_recode <- recode_na
ped$sex_recode[ped$sex == code_female] <- recode_female
ped$sex_recode[ped$sex == code_male] <- recode_male
# overwriting temp recode variable
ped$sex <- ped$sex_recode
ped$sex_recode <- NULL
} else if (!is.null(code_male) & is.null(code_female)) {
# Initialize sex_recode as NA, preserving the length of the 'sex' column
ped$sex_recode <- recode_na
ped$sex_recode[ped$sex != code_male & !is.na(ped$sex)] <- recode_female
ped$sex_recode[ped$sex == code_male] <- recode_male
# overwriting temp recode variable
ped$sex <- ped$sex_recode
ped$sex_recode <- NULL
} else if (is.null(code_male) & !is.null(code_female)) {
# Initialize sex_recode as NA, preserving the length of the 'sex' column
ped$sex_recode <- recode_na
ped$sex_recode[ped$sex != code_female & !is.na(ped$sex)] <- recode_male
ped$sex_recode[ped$sex == code_female] <- recode_female
# overwriting temp recode variable
ped$sex <- ped$sex_recode
ped$sex_recode <- NULL
} else {
if (verbose) {
warning(" both code male and code female are empty. No recoding was done.")
}
}
return(ped)
}
|
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/checkSex.R
|
#' Standardize Column Names in a Dataframe (Internal)
#'
#' This internal function standardizes the column names of a given dataframe.
#' It utilizes regular expressions and the `tolower()` function to match column names
#' against a list of predefined standard names. The approach is case-insensitive and
#' allows for flexible matching of column names.
#'
#' @param df A dataframe whose column names need to be standardized.
#' @param verbose A logical indicating whether to print progress messages.
#' @return A dataframe with standardized column names.
#'
#' @keywords internal
standardizeColnames <- function(df, verbose = FALSE) {
# Internal mapping of standardized names to possible variants
mapping <- list(
"fam" = "^(?:fam(?:ily)?(?:id)?)",
"ID" = "^(?:i(?:d$|ndiv(?:idual)?)|p(?:erson)?id)",
"gen" = "^(?:gen(?:s|eration)?)",
"dadID" = "^(?:d(?:ad)?id|paid|fatherid)",
"patID" = "^(?:datid|patid|paternal(?:id)?)",
"momID" = "^(?:m(?:om|a|other)?id)",
"matID" = "^(?:matid|maternal(?:id)?)",
"spt" = "^(?:s(?:pt)?id|spouse(?:id)?|partner(?:id)?)",
"twinID" = "^(?:twin(?:id)?)",
"sex" = "^(?:sex|gender|female|m(?:a(?:le|n)|en)|wom[ae]n)"
)
if (verbose) {
print("Standardizing column names...")
}
lowered_colnames <- tolower(colnames(df))
for (standard_name in names(mapping)) {
regex_pattern <- mapping[[standard_name]]
matched_variant <- grep(regex_pattern, lowered_colnames, value = TRUE)
if (length(matched_variant) > 0) {
# Update the first match in original case
original_matched <- colnames(df)[tolower(colnames(df)) == matched_variant[1]]
colnames(df)[colnames(df) == original_matched] <- standard_name
}
}
return(df)
}
# Repair Pedigree
#
# This function applies a list of repair functions sequentially to a pedigree.
#
# @param ped A pedigree object.
# @param repair_funs A list of functions to repair the pedigree.
# @param verbose Logical. Indicates whether to print progress messages
# @param check_sex Logical. Indicates that sex should be validated
# @param check_parents Logical. Indicates that parents should be validated
# @param check_id Logical. Indicates that IDs should be validated
# @return A corrected pedigree.
# repairPedigree <- function(
# ped,
# repair_funs = NULL,
# check_id = TRUE,
# check_sex = TRUE,
# check_parents = TRUE,
# verbose = FALSE) {
# corrected_ped <- ped <- standardizeColnames(ped)
# if (verbose) {
# print("Repairing pedigree...")
# }
# # applies a list of repair functions sequentially to a pedigree.
# if (!is.null(repair_funs)) {
# for (fun in repair_funs) {
# corrected_ped <- fun(corrected_ped)
# }
# return(corrected_ped)
# # if not provided, use the default repair functions
# } else if (is.null(repair_funs)) {
# if (check_id) {
# corrected_ped <- repairIDs(corrected_ped)
# }
# if (check_sex) {
# corrected_ped <- repairSex(corrected_ped)
# }
# if (check_parents) {
# corrected_ped <- repairParentIDs(corrected_ped)
# }
# return(corrected_ped)
# } else {
# print("You should never see this message. If you do, that means the repair_funs variable in repairPedigree is broken")
# }
# }
# To do
# - Missing rows: Sometimes, ID-codes in the mother or father column do not exist in the ID-column. That is, the people listed as someone's parents sometimes do not have their own rows, with columns for their parents etc.
# - Wrong IDs: It is possible that the ID code written for e.g. someone's mother is simply written wrong. This is especially problematic if there are people in the file who actually have the code that was mistakenly given.
# - A person's child being registered as their parent: I randomly found a case of this. A girl was registered as her father's mother.
# - People existing in both the mother and father column. This can happen through error. And it can also happen when same-sex couples have children (e.g. through adoption or fertilization). In the MoBa sample we had some cases of this, where there were same-sex pairs with several children, and where it was switched around from one child to the next whether one or the other of these parents were the father or mother in the registry.
# Validate Pedigrees
#
# This function validates pedigrees based on several criteria.
# @param ped A pedigree object
# @inheritParams repairPedigree
# @param ... Additional arguments to be passed to \code{\link{validatePedigree}}
# @return A logical indicating whether the pedigree is valid and a list of warnings and ids of potentially invalid relationships
# @export
# validatePedigree <- function(ped,
# verbose = FALSE,
# check_sex = TRUE,
# check_parents = TRUE,
# check_id = TRUE) {
# corrected_ped <- repairPedigree(ped$ID,
# ped$dadID,
# ped$momID,
# ped$sex,
# check_sex = check_sex,
# check_parents = check_parents,
# check_id = check_id
# )
# # Validation checks
# if (check_id) {
# if (verbose) {
# print("Checking IDs...")
# }
# id_valid <- all(corrected_ped$ID == ped$ID)
# } else {
# id_valid <- TRUE
# }
# if (check_parents) {
# if (verbose) {
# print("Checking parents...")
# }
# dadID_valid <- all(corrected_ped$dadID == ped$dadID)
# momID_valid <- all(corrected_ped$momID == ped$momID)
# } else {
# dadID_valid <- TRUE
# momID_valid <- TRUE
# }
# if (check_sex) {
# if (verbose) {
# print("Checking sex...")
# }
# sex_valid <- all(corrected_ped$sex == ped$sex)
# } else {
# sex_valid <- TRUE
# }
# # Compile results
# is_valid <- id_valid && dadID_valid && momID_valid && sex_valid
# # Prepare warnings and feedback
# warnings <- list()
# if (!id_valid) {
# warnings$id_warning <- "IDs in the corrected pedigree do not match the original IDs."
# }
# if (!dadID_valid) {
# warnings$dadID_warning <- "Father IDs in the corrected pedigree do not match the original IDs."
# }
# if (!momID_valid) {
# warnings$momID_warning <- "Mother IDs in the corrected pedigree do not match the original IDs."
# }
# if (!sex_valid) {
# warnings$sex_warning <- "Sex values in the corrected pedigree do not match the original values."
# }
# # Return results
# if (verbose) {
# return(list(is_valid = is_valid, corrected_ped = corrected_ped, warnings = warnings))
# } else if (is_valid) {
# return(corrected_ped)
# } else {
# print("Pedigree is not valid. Refer to the warnings for more details.")
# return(warnings)
# }
# }
|
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/cleanPedigree.R
|
#' Take a pedigree and turn it into a relatedness matrix
#' @param ped a pedigree dataset. Needs ID, momID, and dadID columns
#' @param component character. Which component of the pedigree to return. See Details.
#' @param max.gen the maximum number of generations to compute
#' (e.g., only up to 4th degree relatives). The default of Inf uses as many
#' generations as there are in the data.
#' @param sparse logical. If TRUE, use and return sparse matrices from Matrix package
#' @param verbose logical. If TRUE, print progress through stages of algorithm
#' @param gc logical. If TRUE, do frequent garbage collection via \code{\link{gc}} to save memory
#' @param flatten.diag logical. If TRUE, overwrite the diagonal of the final relatedness matrix with ones
#' @param standardize.colnames logical. If TRUE, standardize the column names of the pedigree dataset
#' @param ... additional arguments to be passed to \code{\link{ped2com}}
#' @details The algorithms and methodologies used in this function are further discussed and exemplified in the vignette titled "examplePedigreeFunctions".
#' @export
#'
ped2com <- function(ped, component,
max.gen = Inf,
sparse = FALSE,
verbose = FALSE,
gc = FALSE,
flatten.diag = FALSE,
standardize.colnames = TRUE,
...) {
# Validate the 'component' argument and match it against predefined choices
component <- match.arg(tolower(component),
choices = c(
"generation",
"additive",
"common nuclear",
"mitochondrial"
)
)
# standardize colnames
if (standardize.colnames) {
ped <- standardizeColnames(ped)
}
# Get the number of rows in the pedigree dataset, representing the size of the family
nr <- nrow(ped)
# Print the family size if verbose is TRUE
if (verbose) {
cat(paste0("Family Size = ", nr, "\n"))
}
# Initialize variables
parList <- list()
lens <- integer(nr)
# Loop through each individual in the pedigree build the adjacency matrix for parent-child relationships
# Is person in column j the parent of the person in row i? .5 for yes, 0 for no.
for (i in 1:nr) {
x <- ped[i, , drop = FALSE]
# Handle parentage according to the 'component' specified
if (component %in% c("generation", "additive")) {
# Code for 'generation' and 'additive' components
# Checks if is mom of ID or is dad of ID
sMom <- (as.numeric(x["ID"]) == as.numeric(ped$momID))
sDad <- (as.numeric(x["ID"]) == as.numeric(ped$dadID))
val <- sMom | sDad
val[is.na(val)] <- FALSE
} else if (component %in% c("common nuclear")) {
# Code for 'common nuclear' component
# IDs have the Same mom and Same dad
sMom <- (as.numeric(x["momID"]) == as.numeric(ped$momID))
sMom[is.na(sMom)] <- FALSE
sDad <- (as.numeric(x["dadID"]) == as.numeric(ped$dadID))
sDad[is.na(sDad)] <- FALSE
val <- sMom & sDad
} else if (component %in% c("mitochondrial")) {
# Code for 'mitochondrial' component
sMom <- (as.numeric(x["ID"]) == as.numeric(ped$momID))
sDad <- TRUE
val <- sMom & sDad
val[is.na(val)] <- FALSE
} else {
stop("Unknown relatedness component requested")
}
# Storing the indices of the parent-child relationships
# keep track of indices only, and then initialize a single sparse matrix
wv <- which(val)
parList[[i]] <- wv
lens[i] <- length(wv)
# Print progress if verbose is TRUE
if (verbose && !(i %% 100)) {
cat(paste0("Done with ", i, " of ", nr, "\n"))
}
}
# Construct sparse matrix
jss <- rep(1L:nr, times = lens)
iss <- unlist(parList)
# Garbage collection if gc is TRUE
if (gc) {
rm(parList, lens)
gc()
}
# Set parent values depending on the component type
if (component %in% c("generation", "additive")) {
parVal <- .5
} else if (component %in% c("common nuclear", "mitochondrial")) {
parVal <- 1
} else {
stop("Don't know how to set parental value")
}
# Initialize adjacency matrix for parent-child relationships
isPar <- Matrix::sparseMatrix(
i = iss,
j = jss,
x = parVal,
dims = c(nr, nr),
dimnames = list(ped$ID, ped$ID)
)
if (verbose) {
cat("Completed first degree relatives (adjacency)\n")
}
# isPar is the adjacency matrix. 'A' matrix from RAM
if (component %in% c("common nuclear")) {
Matrix::diag(isPar) <- 1
if (!sparse) {
isPar <- as.matrix(isPar)
}
return(isPar)
}
isChild <- apply(ped[, c("momID", "dadID")], 1, function(x) {
2^(-!all(is.na(x)))
})
# isChild is the 'S' matrix from RAM
r <- Matrix::Diagonal(x = 1, n = nr)
gen <- rep(1, nr)
mtSum <- sum(r, na.rm = TRUE)
newIsPar <- isPar
count <- 0
maxCount <- max.gen + 1
if (verbose) {
cat("About to do RAM path tracing\n")
}
# r is I + A + A^2 + ... = (I-A)^-1 from RAM
while (mtSum != 0 & count < maxCount) {
r <- r + newIsPar
gen <- gen + (Matrix::rowSums(newIsPar) > 0)
newIsPar <- newIsPar %*% isPar
mtSum <- sum(newIsPar)
count <- count + 1
if (verbose) {
cat(paste0("Completed ", count - 1, " degree relatives\n"))
}
}
# compute rsq <- r %*% sqrt(diag(isChild))
# compute rel <- tcrossprod(rsq)
if (gc) {
rm(isPar, newIsPar)
}
if (gc) {
gc()
}
if (verbose) {
cat("Doing I-A inverse times diagonal multiplication\n")
}
r2 <- r %*% Matrix::Diagonal(x = sqrt(isChild), n = nr)
if (gc) {
rm(r, isChild)
}
if (gc) {
gc()
}
if (verbose) {
cat("Doing tcrossprod\n")
}
r <- Matrix::tcrossprod(r2)
if (component == "generation") {
return(gen)
} else {
if (component == "mitochondrial") {
r@x <- rep(1, length(r@x))
# Assign 1 to all nonzero elements for mitochondrial component
}
if (!sparse) {
r <- as.matrix(r)
}
if (flatten.diag) { # flattens diagonal if you don't want to deal with inbreeding
diag(r) <- 1
}
return(r)
}
}
#' Take a pedigree and turn it into an additive genetics relatedness matrix
#' @inheritParams ped2com
#' @details The algorithms and methodologies used in this function are further discussed and exemplified in the vignette titled "examplePedigreeFunctions".
#' For more advanced scenarios and detailed explanations, consult this vignette.
#' @export
#'
ped2add <- function(ped, max.gen = Inf, sparse = FALSE, verbose = FALSE, gc = FALSE, flatten.diag = FALSE) {
ped2com(
ped = ped,
max.gen = max.gen,
sparse = sparse,
verbose = verbose,
gc = gc,
component = "additive",
flatten.diag = flatten.diag
)
}
#' Take a pedigree and turn it into a mitochondrial relatedness matrix
#' @inheritParams ped2com
#' @details The algorithms and methodologies used in this function are further discussed and exemplified in the vignette titled "examplePedigreeFunctions".
#' @export
#' @aliases ped2mt
#'
ped2mit <- ped2mt <- function(ped, max.gen = Inf, sparse = FALSE, verbose = FALSE, gc = FALSE, flatten.diag = FALSE) {
ped2com(
ped = ped,
max.gen = max.gen,
sparse = sparse,
verbose = verbose,
gc = gc,
component = "mitochondrial",
flatten.diag = flatten.diag
)
}
#' Take a pedigree and turn it into a common nuclear environmental relatedness matrix
#' @inheritParams ped2com
#' @details The algorithms and methodologies used in this function are further discussed and exemplified in the vignette titled "examplePedigreeFunctions".
#' @export
#'
ped2cn <- function(ped, max.gen = Inf, sparse = FALSE, verbose = FALSE, gc = FALSE, flatten.diag = FALSE) {
ped2com(
ped = ped,
max.gen = max.gen,
sparse = sparse,
verbose = verbose,
gc = gc,
component = "common nuclear",
flatten.diag = flatten.diag
)
}
#' Take a pedigree and turn it into an extended environmental relatedness matrix
#' @inheritParams ped2com
#' @details The algorithms and methodologies used in this function are further discussed and exemplified in the vignette titled "examplePedigreeFunctions".
#' @export
#'
ped2ce <- function(ped) {
matrix(1, nrow = nrow(ped), ncol = nrow(ped), dimnames = list(ped$ID, ped$ID))
}
|
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/convertPedigree.R
|
##' Artificial pedigree data on eight families with inbreeding
##'
##' A dataset created purely from imagination that includes several types of inbreeding.
##' Different kinds of inbreeding occur in each extended family.
##'
##' The types of inbreeding are as follows:
##'
#' \itemize{
#' \item Extended Family 1: Sister wives - Children with the same father and different mothers who are sisters.
#' \item Extended Family 2: Full siblings have children.
#' \item Extended Family 3: Half siblings have children.
#' \item Extended Family 4: First cousins have children.
#' \item Extended Family 5: Father has child with his daughter.
#' \item Extended Family 6: Half sister wives - Children with the same father and different mothers who are half sisters.
#' \item Extended Family 7: Uncle-niece and Aunt-nephew have children.
#' \item Extended Family 8: A father-son pairs has children with a corresponding mother-daughter pair.
#' }
##'
##' Although not all of the above structures are technically inbreeding, they aim to test pedigree diagramming and path tracing algorithms.
##'
##' The variables are as follows:
##'
##' \itemize{
##' \item \code{ID}: Person identification variable
##' \item \code{sex}: Sex of the ID: 1 is female; 0 is male
##' \item \code{dadID}: ID of the father
##' \item \code{momID}: ID of the mother
##' \item \code{FamID}: ID of the extended family
##' \item \code{Gen}: Generation of the person
##' \item \code{proband}: Always FALSE
##' }
##'
##' @docType data
##' @keywords datasets
##' @name inbreeding
##' @usage data(inbreeding)
##' @format A data frame (and ped object) with 134 rows and 7 variables
NULL
##' Simulated pedigree with two extended families and an age-related hazard
##'
##' A dataset simulated to have an age-related hazard.
##' There are two extended families that are sampled from the same population.
##'
##' The variables are as follows:
##'
##' \itemize{
##' \item \code{FamID}: ID of the extended family
##' \item \code{ID}: Person identification variable
##' \item \code{sex}: Sex of the ID: 1 is female; 0 is male
##' \item \code{dadID}: ID of the father
##' \item \code{momID}: ID of the mother
##' \item \code{affected}: logical. Whether the person is affected or not
##' \item \code{DA1}: Binary variable signifying the meaninglessness of life
##' \item \code{DA2}: Binary variable signifying the fundamental unknowability of existence
##' \item \code{birthYr}: Birth year for person
##' \item \code{onsetYr}: Year of onset for person
##' \item \code{deathYr}: Death year for person
##' \item \code{available}: logical. Whether
##' \item \code{Gen}: Generation of the person
##' \item \code{proband}: logical. Whether the person is a proband or not
##' }
##'
##' @docType data
##' @keywords datasets
##' @name hazard
##' @usage data(hazard)
##' @format A data frame with 43 rows and 14 variables
NULL
##' Fictional pedigree data on a wizarding family
##'
##' A dataset created purely from imagination that includes a subset of the Potter extended family.
##'
##' The variables are as follows:
##'
##' \itemize{
##' \item \code{personID}: Person identification variable
##' \item \code{famID}: Family identification variable
##' \item \code{name}: Name of the person
##' \item \code{gen}: Generation of the person
##' \item \code{momID}: ID of the mother
##' \item \code{dadID}: ID of the father
##' \item \code{spouseID}: ID of the spouse
##' \item \code{sex}: Sex of the ID: 1 is male; 0 is female
##'
##' }
##'
##' IDs in the 100s \code{momID}s and \code{dadID}s are for people not in the dataset.
##'
##' @docType data
##' @keywords datasets
##' @name potter
##' @usage data(potter)
##' @format A data frame (and ped object) with 36 rows and 8 variables
NULL
|
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/dataDoc.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.