content
stringlengths
0
14.9M
filename
stringlengths
44
136
Update_aE <- function(last.params){ aE_prime <- last.params$aE+stats::rnorm(length(last.params$aE),0,last.params$aE_stp) prior_prob_alphaE_prime <- Prior_prob_alphaE(aE_prime) new.params <- last.params if(prior_prob_alphaE_prime != -Inf) { covariance_prime <- Covariance(last.params$a0,last.params$aD,aE_prime,last.params$a2,last.params$D,last.params$E,last.params$delta) if(matrixcalc::is.positive.definite(covariance_prime)){ LnL_thetas_vec_prime <- Likelihood_thetas(last.params$thetas,covariance_prime) if(exp((prior_prob_alphaE_prime+sum(LnL_thetas_vec_prime)) - (last.params$prior_prob_alphaE+sum(last.params$LnL_thetas_vec))) >= stats::runif(1)){ new.params$aE <- aE_prime new.params$covariance <- covariance_prime new.params$prior_prob_alphaE <- prior_prob_alphaE_prime new.params$LnL_thetas_vec <- LnL_thetas_vec_prime new.params$aE_accept <- new.params$aE_accept + 1 } } } new.params$aE_moves <- new.params$aE_moves + 1 return(new.params) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/Update_aE.R
Update_beta <- function(last.params){ new.params <- last.params new.params$beta <- stats::rgamma(1,shape=(last.params$loci/2+0.001),rate=(0.001+sum(((last.params$mu[1,])^2)/2))) new.params$prior_prob_beta <- Prior_prob_beta(new.params$beta) new.params$prior_prob_mu <- Prior_prob_mu(last.params$mu,new.params$beta) new.params$beta_moves <- new.params$beta_moves+1 return(new.params) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/Update_beta.R
Update_mu <- function(last.params){ mu_update <- stats::rnorm(last.params$loci,0,last.params$mu_stp) mu_prime <- Shift(last.params$mu,mu_update) prior_prob_mu_prime <- Prior_prob_mu(mu_prime,last.params$beta) allele.frequencies_prime <- transform_frequencies(last.params$thetas,mu_prime) LnL_counts_mat_prime <- Likelihood_counts(last.params$counts,last.params$sample_sizes,allele.frequencies_prime) new.params <- last.params for(i in 1:ncol(last.params$mu)){ if(!any(is.na(LnL_counts_mat_prime[,i]))){ if(exp((prior_prob_mu_prime[i]+sum(LnL_counts_mat_prime[,i])) - (last.params$prior_prob_mu[i]+sum(last.params$LnL_counts_mat[,i]))) >= stats::runif(1)){ new.params$mu[,i] <- mu_prime[,i] new.params$allele.frequencies[,i] <- allele.frequencies_prime[,i] new.params$LnL_counts_mat[,i] <- LnL_counts_mat_prime[,i] new.params$prior_prob_mu[i] <- prior_prob_mu_prime[i] new.params$mu_accept <- new.params$mu_accept + 1 } } } new.params$mu_moves <- new.params$mu_moves + last.params$loci return(new.params) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/Update_mu.R
Update_thetas <- function(last.params){ new.params <- last.params cholcovmat <- chol(last.params$covariance) thetas_prime <- sapply(1:ncol(last.params$thetas),function(i){last.params$thetas[,i]+stats::rnorm(last.params$k,0,last.params$thetas_stp)%*%cholcovmat}) allele.frequencies_prime <- transform_frequencies(thetas_prime,last.params$mu) LnL_thetas_vec_prime <- Likelihood_thetas(thetas_prime,last.params$covariance) LnL_counts_mat_prime <- Likelihood_counts(last.params$counts,last.params$sample_sizes,allele.frequencies_prime) for(i in 1:last.params$loci){ if(!is.na(sum(LnL_counts_mat_prime[,i])) && !is.na(LnL_thetas_vec_prime[i])){ if(exp((LnL_thetas_vec_prime[i]+sum(LnL_counts_mat_prime[,i])) - (last.params$LnL_thetas_vec[i]+sum(last.params$LnL_counts_mat[,i]))) >= stats::runif(1)){ new.params$thetas[,i] <- thetas_prime[,i] new.params$allele.frequencies[,i] <- allele.frequencies_prime[,i] new.params$LnL_thetas_vec[i] <- LnL_thetas_vec_prime[i] new.params$LnL_counts_mat[,i] <- LnL_counts_mat_prime[,i] new.params$thetas_accept <- new.params$thetas_accept + 1 } } } new.params$thetas_moves <- new.params$thetas_moves + last.params$loci return(new.params) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/Update_thetas.R
a0_gibbs_rate <- function(thetas,covmat,a0){ cholcov <- chol(covmat) invcholcov <- MASS::ginv(cholcov) tmp <- (1/2)*(colSums((crossprod(invcholcov,thetas))^2)) return(sum(tmp)/a0) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/a0_gibbs_rate.R
calculate.all.pairwise.Fst <- function(allele.counts,sample.sizes){ number.of.populations <- nrow(allele.counts) list.of.pairwise.comparisons <- utils::combn(1:number.of.populations,2) pairwise.Fst.matrix <- matrix(0,nrow=number.of.populations,ncol=number.of.populations) Fst.vector <- numeric(ncol(list.of.pairwise.comparisons)) for(i in 1:ncol(list.of.pairwise.comparisons)){ pair.of.allele.counts <- allele.counts[list.of.pairwise.comparisons[,i],] pair.of.sample.sizes <- sample.sizes[list.of.pairwise.comparisons[,i],] Fst.vector[i] <- calculate.pairwise.Fst(pair.of.allele.counts,pair.of.sample.sizes) } pairwise.Fst.matrix[lower.tri(pairwise.Fst.matrix)] <- Fst.vector pairwise.Fst.matrix[upper.tri(pairwise.Fst.matrix)] <- t(pairwise.Fst.matrix)[upper.tri(pairwise.Fst.matrix)] return(pairwise.Fst.matrix) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/calculate.all.pairwise.Fst.R
calculate.pairwise.Fst <- function(allele.counts,sample.sizes){ raw.population.allele.frequencies <- allele.counts/sample.sizes missing.data.loci <- which(is.na(raw.population.allele.frequencies),arr.ind=TRUE)[,2] if(sum(as.numeric(missing.data.loci)) > 0){ allele.counts <- allele.counts[,-c(missing.data.loci)] sample.sizes <- sample.sizes[,-c(missing.data.loci)] } population.allele.frequencies <- allele.counts/sample.sizes mean.allele.frequencies <- colSums(allele.counts)/colSums(sample.sizes) MSP <- colSums(sample.sizes*t(apply(population.allele.frequencies,1,'-',mean.allele.frequencies)^2)) MSG <- (1/(colSums(sample.sizes-1))) * colSums(sample.sizes*population.allele.frequencies*(1-population.allele.frequencies)) n.c <- colSums(sample.sizes)-colSums(sample.sizes^2)/colSums(sample.sizes) theta <- sum(MSP-MSG) / sum(MSP + (n.c-1)*MSG) return(theta) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/calculate.pairwise.Fst.R
identify_invariant_loci <- function(allele.counts){ there.are.invariants <- FALSE if(length(unique(allele.counts)) < 2){ there.are.invariants <- TRUE } return(there.are.invariants) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/identify_invariant_loci.R
link.up.posteriors <- function(MCMC.output1,MCMC.output2,linked.up.output.file.name){ # recover() load(MCMC.output1) parameters <- objects() for(i in 1:length(parameters)){ assign(sprintf("tmp.%s",parameters[i]),get(parameters[i])) } load(MCMC.output2) for(i in 1:length(parameters)){ if(length(get(parameters[i])) > 1 && grepl(pattern="last.params",parameters[i])==FALSE){ if(inherits(get(parameters[i]),"numeric")){ if(grepl("moves",parameters[i]) || grepl("accept",parameters[i])){ assign(parameters[i], c(get(sprintf("tmp.%s",parameters[i])), get(parameters[i]) + get(sprintf("tmp.%s",parameters[i]))[length(get(sprintf("tmp.%s",parameters[i])))])) } else{ assign(parameters[i],c(get(sprintf("tmp.%s",parameters[i])),get(parameters[i]))) } } if(inherits(get(parameters[i]),"matrix")){ assign(parameters[i],cbind(get(sprintf("tmp.%s",parameters[i])),get(parameters[i]))) } } } rm(list=unique(c(objects(pattern="tmp."),objects(pattern="MCMC.output")))) save(list=setdiff(ls(all.names=TRUE),"linked.up.output.file.name"),file=paste(linked.up.output.file.name,".Robj",sep="")) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/link.up.posteriors.R
load_MCMC_output <- function(MCMC.output){ tmpenv <- environment() tmp <- load(MCMC.output,envir=tmpenv) mcmc.output <- lapply(tmp,get,envir=tmpenv) names(mcmc.output) <- tmp stopifnot(length(intersect(names(mcmc.output),c("a0","aD","aE","a2", "beta","last.params", "LnL_thetas","LnL_counts", "Prob","samplefreq","ngen", "a0_moves","aD_moves", "aE_moves","a2_moves", "thetas_moves","mu_moves","beta_moves", "aD_accept","aE_accept","a2_accept", "thetas_accept","mu_accept") )) > 20) return(mcmc.output) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/load_MCMC_output.R
load_posterior_predictive_samples <- function(posterior.predictive.sample.file){ tmpenv <- environment() tmp <- load(posterior.predictive.sample.file,envir=tmpenv) posterior.predictive.samples <- lapply(tmp,get,envir=tmpenv) names(posterior.predictive.samples) <- tmp stopifnot(setequal(names(posterior.predictive.samples), c("observed.Fst","posterior.sample.Fst", "D","E","posterior.predictive.sample.size"))) return(posterior.predictive.samples) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/load_posterior_predictive_samples.R
make.continuing.params <- function(MCMC.output,file.name){ MCMC.output.list <- load_MCMC_output(MCMC.output) with(MCMC.output.list, { if(!exists("phi_mat")){ continuing.params <- list(last.params$a0, last.params$aD, last.params$aE, last.params$a2, last.params$beta, last.params$mu, last.params$thetas) names(continuing.params) <- c("a0","aD","aE","a2","beta","mu","thetas") } if(exists("phi_mat")){ continuing.params <- list(last.params$a0, last.params$aD, last.params$aE, last.params$a2, last.params$beta, last.params$phi, last.params$mu, last.params$thetas) names(continuing.params) <- c("a0","aD","aE","a2","beta","phi","mu","thetas") } save(continuing.params,file=file.name) }) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/make.continuing.params.R
plot_acceptance_rate <- function(accepted.moves,proposed.moves,param.name=deparse(substitute(accepted.moves))){ param.name <- strsplit(param.name,split="_")[[1]][1] x <- seq(1,length(which(proposed.moves!=0))) acceptance.rate <- accepted.moves[x]/proposed.moves[x] plot(acceptance.rate, pch=20, col=grDevices::adjustcolor(1,alpha.f=0.7), xlab="MCMC sampled generations", ylab="acceptance rate", main=paste(param.name,"acceptance rate",sep=" "), ylim=c(0,1)) graphics::abline(h=c(0.2,0.7),col="green",lty="dashed",lwd=2) if(stats::median(acceptance.rate) > 0.7 || stats::median(acceptance.rate) < 0.2){ graphics::polygon(x=c(0-0.04*length(acceptance.rate), 0-0.04*length(acceptance.rate), length(acceptance.rate)+0.04*length(acceptance.rate), length(acceptance.rate)+0.04*length(acceptance.rate)), y=c(0-0.04*length(acceptance.rate), 1+0.04*length(acceptance.rate), 1+0.04*length(acceptance.rate), 0-0.04*length(acceptance.rate)), col=grDevices::adjustcolor("red",alpha.f=0.2)) } }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/plot_acceptance_rate.R
plot_all_acceptance_rates <- function(MCMC.output){ MCMC.output.list <- load_MCMC_output(MCMC.output) with(MCMC.output.list, { plot_acceptance_rate(aD_accept,aD_moves) devAskNewPage(ask=TRUE) plot_acceptance_rate(aE_accept,aE_moves) plot_acceptance_rate(a2_accept,a2_moves) plot_acceptance_rate(mu_accept,mu_moves) plot_acceptance_rate(thetas_accept,thetas_moves) if(exists("phi_accept")){ plot_acceptance_rate(phi_accept,phi_moves) } }) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/plot_all_acceptance_rates.R
plot_all_joint_marginals <- function(MCMC.output,percent.burnin=0,thinning=1){ MCMC.output.list <- load_MCMC_output(MCMC.output) with(MCMC.output.list, { plot_joint_marginal(a0,aD,percent.burnin,thinning) devAskNewPage(ask=TRUE) for (k in 1:nrow(aE)) { plot_joint_marginal(a0,aE[k,],percent.burnin,thinning,param.name2=paste("aE_",k)) } plot_joint_marginal(a0,a2,percent.burnin,thinning) plot_joint_marginal(a0,beta,percent.burnin,thinning) for (k in 1:nrow(aE)) { plot_joint_marginal(aD,aE[k,],percent.burnin,thinning,param.name2=paste("aE_",k)) } plot_joint_marginal(aD,a2,percent.burnin,thinning) plot_joint_marginal(aD,beta,percent.burnin,thinning) for (k in 1:nrow(aE)) { plot_joint_marginal(aE[k,],a2,percent.burnin,thinning,param.name1=paste("aE_",k)) plot_joint_marginal(aE[k,],beta,percent.burnin,thinning,param.name1=paste("aE_",k)) plot_joint_marginal(aE[k,]/aD,a0,percent.burnin,thinning,param.name1=paste("aE_",k)) plot_joint_marginal(aE[k,]/aD,a2,percent.burnin,thinning,param.name1=paste("aE_",k)) plot_joint_marginal(aE[k,]/aD,beta,percent.burnin,thinning,param.name1=paste("aE_",k)) } plot_joint_marginal(a2,beta,percent.burnin,thinning) }) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/plot_all_joint_marginals.R
plot_all_marginals <- function(MCMC.output,percent.burnin=0,thinning=1,population.names=NULL){ MCMC.output.list <- load_MCMC_output(MCMC.output) with(MCMC.output.list, { for (k in 1:nrow(aE)) { plot_marginal(aE[k,]/aD,percent.burnin,thinning,param.name=paste(sprintf("aE_%s",k),"aD",sep="/")); devAskNewPage(ask=TRUE) } plot_marginal(a0,percent.burnin,thinning) plot_marginal(aD,percent.burnin,thinning) for (k in 1:nrow(aE)) { plot_marginal(aE[k,],percent.burnin,thinning,param.name=paste("aE_",k)) } plot_marginal(a2,percent.burnin,thinning) plot_marginal(beta,percent.burnin,thinning) if(exists("phi_mat")){ plot_all_phi_marginals(phi_mat,percent.burnin,thinning,population.names) } plot_marginal(LnL_thetas,percent.burnin,thinning) plot_marginal(LnL_counts,percent.burnin,thinning) plot_marginal(Prob,percent.burnin,thinning) }) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/plot_all_marginals.R
plot_all_phi_marginals <- function(phi_mat,percent.burnin=0,thinning=1,population.names=NULL,pop.index=NULL,histogram=TRUE,density=TRUE){ k <- nrow(phi_mat) for(i in 1:k){ plot_phi_marginal(phi_mat[i,],percent.burnin,thinning,population.names=population.names[i],pop.index=i,histogram,density) grDevices::devAskNewPage(ask=TRUE) } }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/plot_all_phi_marginals.R
plot_all_phi_trace <- function(phi_mat,percent.burnin=0,thinning=1,population.names=NULL){ k <- nrow(phi_mat) for(i in 1:k){ plot_phi_trace(phi_mat[i,],percent.burnin,thinning,population.names=population.names[i],pop.index=i) grDevices::devAskNewPage(ask=TRUE) } }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/plot_all_phi_trace.R
plot_all_trace <- function(MCMC.output,percent.burnin=0,thinning=1,population.names=NULL){ MCMC.output.list <- load_MCMC_output(MCMC.output) with(MCMC.output.list, { for (k in 1:nrow(aE)) { plot_trace(aE[k,]/aD,percent.burnin,thinning,param.name=paste(sprintf("aE_%s",k),"aD",sep="/")); devAskNewPage(ask=TRUE) } plot_trace(a0,percent.burnin,thinning) plot_trace(aD,percent.burnin,thinning) for (k in 1:nrow(aE)) { plot_trace(aE[k,],percent.burnin,thinning,param.name=paste("aE_",k)) } plot_trace(a2,percent.burnin,thinning) plot_trace(beta,percent.burnin,thinning) if(exists("phi_mat")){ plot_all_phi_trace(phi_mat,percent.burnin,thinning,population.names) } plot_trace(LnL_thetas,percent.burnin,thinning) plot_trace(LnL_counts,percent.burnin,thinning) plot_trace(Prob,percent.burnin,thinning) }) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/plot_all_trace.R
plot_joint_marginal <- function(parameter1,parameter2,percent.burnin=0,thinning=1,param.name1=deparse(substitute(parameter1)),param.name2=deparse(substitute(parameter2))){ burnin <- (percent.burnin/100)*length(which(parameter1!=0)) x <- seq(from = burnin,to = length(which(parameter1!=0)),by = thinning) plot(parameter1[x],parameter2[x], col=grDevices::rainbow(start=0.5,end=1.0,length(parameter1[x]),alpha=0.4), pch=19, main=paste("Joint Marginal Density of",paste(param.name1,param.name2,sep=" and "),sep=" "), xlab=param.name1, ylab=param.name2, ylim=c(min(parameter2[x])-abs(min(parameter2[x])-max(parameter2[x]))/5,max(parameter2[x]))) graphics::legend(x="bottomright", cex=0.6, pt.cex=1, pch=21, col=1, pt.bg=c(grDevices::rainbow(start=0.5,end=1.0,length(parameter1[x]),alpha=0.8)[c(1,floor(length(parameter1[x])/2),length(parameter1[x]))]), legend=c("MCMC sampled generation 1", paste("MCMC sampled generation",floor(length(parameter1[x])/2)), paste("MCMC sampled generation",length(parameter1[x])))) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/plot_joint_marginal.R
plot_marginal <- function(parameter,percent.burnin=0,thinning=1,histogram=TRUE,density=TRUE,population.names=NULL,param.name=deparse(substitute(parameter))){ burnin <- (percent.burnin/100)*length(which(parameter!=0)) x <- seq(from = burnin,to = length(which(parameter!=0)),by = thinning) marginal <- density(parameter[x],adj=1) if(is.null(population.names)){ plot(1,type="n", ylim=c(0,max(marginal$y)+max(marginal$y)/5), xlim=c(min(marginal$x),max(marginal$x)), main=paste("Marginal density of",param.name,sep=" "), ylab="density", xlab=param.name ) } if(!is.null(population.names)){ plot(1,type="n", ylim=c(0,max(marginal$y)+max(marginal$y)/5), xlim=c(min(marginal$x),max(marginal$x)), main=paste("Marginal density of F parameter,",population.names,sep=" "), ylab="density", xlab=param.name ) } if(histogram){ graphics::hist(parameter[x], freq=FALSE, col="gray", add=TRUE) } if(density){ graphics::lines(marginal,adj=1) graphics::polygon(x=c(0,marginal$x,0),y=c(0,marginal$y,0),col=grDevices::adjustcolor("blue",0.6)) } graphics::segments(x0=stats::median(parameter[x]), y0=0, x1=stats::median(parameter[x]), y1=max(marginal$y+marginal$y/20), col="red", lwd=3) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/plot_marginal.R
plot_phi_marginal <- function(phi,percent.burnin=0,thinning=1,population.names=NULL,pop.index=NULL,histogram=TRUE,density=TRUE){ burnin <- (percent.burnin/100)*length(which(phi!=0)) x <- seq(from = burnin,to = length(which(phi!=0)),by = thinning) Fk <- 1/(1+phi[x]) marginal <- density(Fk,adj=1) if(is.null(population.names)){ plot(1,type="n", ylim=c(0,max(marginal$y)+max(marginal$y)/5), xlim=c(min(marginal$x),max(marginal$x)), main=sprintf("Marginal density of F parameter, population %s",pop.index), ylab="density", xlab=sprintf("F parameter, population %s",pop.index) ) } if(!is.null(population.names)){ plot(1,type="n", ylim=c(0,max(marginal$y)+max(marginal$y)/5), xlim=c(min(marginal$x),max(marginal$x)), main=paste("Marginal density of F parameter,",population.names,sep=" "), ylab="density", xlab=paste("F parameter,",population.names,sep=" ") ) } if(histogram){ hist(Fk, freq=FALSE, col="gray", add=TRUE) } if(density){ lines(marginal,adj=1) polygon(x=c(0,marginal$x,0),y=c(0,marginal$y,0),col=adjustcolor("blue",0.6)) } segments(x0=median(Fk), y0=0, x1=median(Fk), y1=max(marginal$y+marginal$y/20), col="red", lwd=3) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/plot_phi_marginal.R
plot_phi_trace <- function(phi,percent.burnin=0,thinning=1,population.names=NULL,pop.index=NULL){ burnin <- (percent.burnin/100)*length(which(phi!=0)) x <- seq(from = burnin,to = length(which(phi!=0)),by = thinning) Fk <- 1/(1+phi[x]) if(is.null(population.names)){ plot(Fk, pch=19, ylim=c(0,1), col=grDevices::adjustcolor(1,alpha.f=0.4), main=sprintf("Trace Plot of F parameter, population %s",pop.index), xlab="MCMC sampled generations", ylab=sprintf("F parameter, population %s",pop.index) ) graphics::abline(h=stats::median(Fk),col="red") } if(!is.null(population.names)){ plot(Fk, pch=19, ylim=c(0,1), col=grDevices::adjustcolor(1,alpha.f=0.4), main=paste("Trace Plot of F parameter,",population.names,sep=" "), xlab="MCMC sampled generations", ylab=paste("F parameter,",population.names,sep=" ") ) graphics::abline(h=stats::median(Fk),col="red") } }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/plot_phi_trace.R
plot_posterior_predictive_samples <- function(posterior.predictive.sample.file,save.figure=NULL,figure.name=NULL){ posterior.predictive.samples <- load_posterior_predictive_samples(posterior.predictive.sample.file) with(posterior.predictive.samples, { distance.array <- array(D,dim=dim(posterior.sample.Fst)) number.of.populations <- nrow(observed.Fst) population.pairs <- combn(1:number.of.populations,2) if(is.null(save.figure)){ plot(as.numeric(D[upper.tri(D)]),as.numeric(observed.Fst[upper.tri(observed.Fst)]),col="red",ylim=c(0,max(posterior.sample.Fst)+max(posterior.sample.Fst)/20),pch=19,cex=0.3,main="Posterior Predictive Sampling",xlab=expression(paste("Pairwise Geographic Distance ",(D[ij]),sep="")),ylab=expression(paste("Pairwise ",F[ST],sep=""))) prog <- txtProgressBar(min=0,ncol(population.pairs),char="|",style=3) for(i in 1:ncol(population.pairs)){ points(as.numeric(distance.array[population.pairs[,i][1],population.pairs[,i][2],]),as.numeric(posterior.sample.Fst[population.pairs[,i][1],population.pairs[,i][2],]),pch=19,col=adjustcolor(1,0.02),cex=0.2) setTxtProgressBar(prog,i) } points(as.numeric(D),as.numeric(observed.Fst),col="red",pch=19,cex=0.4) } if(!is.null(save.figure)){ png(figure.name,width=6*200,height=4*200,res=200,pointsize=9) plot(as.numeric(D[upper.tri(D)]),as.numeric(observed.Fst[upper.tri(observed.Fst)]),col="red",ylim=c(0,max(posterior.sample.Fst)+max(posterior.sample.Fst)/20),pch=19,cex=0.3,main="Posterior Predictive Sampling",xlab=expression(paste("Pairwise Geographic Distance ",(D[ij]),sep="")),ylab=expression(paste("Pairwise ",F[ST],sep=""))) prog <- txtProgressBar(min=0,ncol(population.pairs),char="|",style=3) for(i in 1:ncol(population.pairs)){ points(as.numeric(distance.array[population.pairs[,i][1],population.pairs[,i][2],]),as.numeric(posterior.sample.Fst[population.pairs[,i][1],population.pairs[,i][2],]),pch=19,col=adjustcolor(1,0.02),cex=0.2) setTxtProgressBar(prog,i) } points(as.numeric(D),as.numeric(observed.Fst),col="red",pch=19,cex=0.4) dev.off() } }) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/plot_posterior_predictive_samples.R
plot_trace <- function(parameter,percent.burnin=0,thinning=1,param.name=deparse(substitute(parameter))){ burnin <- (percent.burnin/100)*length(which(parameter!=0)) x <- seq(from = burnin,to = length(which(parameter!=0)),by = thinning) plot(parameter[x], pch=19, col=grDevices::adjustcolor(1,alpha.f=0.4), main=paste("Trace Plot of",param.name,sep=" "), xlab="MCMC sampled generations", ylab=param.name ) graphics::abline(h=stats::median(parameter[x]),col="red") }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/plot_trace.R
posterior.predictive.sample <- function(MCMC.output,posterior.predictive.sample.size,output.file,prefix=''){ MCMC.output.list <- load_MCMC_output(MCMC.output) with(MCMC.output.list, { mcmc.generations <- length(which(a0 != 0)) n.pops <- nrow(last.params$counts) n.loci <- ncol(last.params$counts) sampled.generations <- sample(c(1:mcmc.generations),posterior.predictive.sample.size,replace=TRUE) posterior.sample.a0 <- a0[sampled.generations] posterior.sample.aD <- aD[sampled.generations] posterior.sample.aE <- matrix(aE[,sampled.generations],nrow=length(last.params$E),ncol=posterior.predictive.sample.size) posterior.sample.a2 <- a2[sampled.generations] posterior.sample.beta <- beta[sampled.generations] if(exists("phi_mat")){ posterior.sample.phi_mat <- phi_mat[,sampled.generations] } posterior.sample.covariance <- matrix(0,nrow=n.pops,ncol=n.pops) posterior.sample.thetas <- matrix(0,nrow=n.pops,ncol=n.loci) posterior.sample.mu <- matrix(0,nrow=n.pops,ncol=n.loci) posterior.sample.allele.frequencies <- matrix(0,nrow=n.pops,ncol=n.loci) posterior.sample.allele.counts <- matrix(0,nrow=n.pops,ncol=n.loci) posterior.sample.Fst <- array(dim=c(n.pops,n.pops,posterior.predictive.sample.size)) progress <- txtProgressBar(min=0,posterior.predictive.sample.size,char="|",style=3) for(i in 1:posterior.predictive.sample.size){ posterior.sample.covariance <- Covariance(posterior.sample.a0[i], posterior.sample.aD[i], posterior.sample.aE[,i], posterior.sample.a2[i], last.params$D, last.params$E, last.params$delta) posterior.sample.thetas <- t(mvrnorm(last.params$loci,numeric(n.pops),posterior.sample.covariance)) posterior.sample.mu <- matrix(rnorm(n.loci,0,sd=sqrt(1/posterior.sample.beta[i])),nrow=n.pops,ncol=n.loci,byrow=TRUE) posterior.allele.frequencies <- transform_frequencies(posterior.sample.thetas,posterior.sample.mu) if(!exists("phi_mat")){ posterior.sample.allele.counts <- simulate_allele_count_data(posterior.allele.frequencies,last.params$sample_sizes) } if(exists("phi_mat")){ posterior.sample.allele.counts <- simulate_allele_count_data(posterior.allele.frequencies,last.params$sample_sizes,posterior.sample.phi_mat[,i]) } posterior.sample.Fst[,,i] <- calculate.all.pairwise.Fst(posterior.sample.allele.counts,last.params$sample_sizes) setTxtProgressBar(progress,i) } observed.Fst <- calculate.all.pairwise.Fst(last.params$counts,last.params$sample_sizes) D <- last.params$D E <- last.params$E save(observed.Fst,posterior.sample.Fst,D,E,posterior.predictive.sample.size,file=paste(prefix,paste(output.file,".Robj",sep=''),sep='')) }) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/posterior.predictive.sample.R
simulate_allele_count_data <- function(allele.frequencies,sample.sizes,phi.parameter=NULL,invariant.loci.tolerance=10,allele.frequency.numerical.shift=1e-10){ populations <- nrow(allele.frequencies) loci <- ncol(allele.frequencies) simulated.allele.counts <- matrix(0,nrow=populations,ncol=loci) if(is.null(phi.parameter)){ invariant.loci <- apply(simulated.allele.counts,2,identify_invariant_loci) i <- 0 while(any(invariant.loci=="TRUE") && i < invariant.loci.tolerance){ simulated.allele.counts <- matrix( stats::rbinom(n=populations*loci, size=sample.sizes, prob=allele.frequencies), nrow=populations,ncol=loci) invariant.loci <- apply(simulated.allele.counts,2,identify_invariant_loci) i <- i + 1 } } if(!is.null(phi.parameter)){ invariant.loci <- apply(simulated.allele.counts,2,identify_invariant_loci) i <- 0 while(any(invariant.loci=="TRUE") && i < invariant.loci.tolerance){ allele.frequencies[which(allele.frequencies == 0)] <- allele.frequency.numerical.shift allele.frequencies[which(allele.frequencies == 1)] <- 1-allele.frequency.numerical.shift simulated.allele.counts <- matrix( emdbook::rbetabinom(n=populations*loci, size=sample.sizes, prob=allele.frequencies, shape1=phi.parameter*allele.frequencies, shape2=phi.parameter*(1-allele.frequencies)), nrow=populations,ncol=loci) invariant.loci <- apply(simulated.allele.counts,2,identify_invariant_loci) i <- i + 1 } } if(i == invariant.loci.tolerance){ warnings("your data matrix contains invariant loci") } return(simulated.allele.counts) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/simulate_allele_count_data.R
transform_frequencies <- function(thetas,mu){ allele.frequencies <- 1/(1+exp(-(thetas+mu))) return(allele.frequencies) }
/scratch/gouwar.j/cran-all/cranData/BEDASSLE/R/transform_frequencies.R
# Delimiters used in .fam and .bim files delims <- "[ \t]" BEDMatrix <- setClass("BEDMatrix", slots = c( xptr = "externalptr", dims = "integer", dnames = "list", path = "character" )) BEDMatrix <- function(path, n = NULL, p = NULL, simple_names = FALSE) { path <- path.expand(path) if (!file.exists(path)) { # Try to add extension (common in PLINK) path <- paste0(path, ".bed") if (!file.exists(path)) { stop("File not found.", call. = FALSE) } } pathSansExt <- tools::file_path_sans_ext(path) filesetName <- basename(pathSansExt) if (is.null(n)) { # Check if .fam file exists famPath <- paste0(pathSansExt, ".fam") if (!file.exists(famPath)) { stop(filesetName, ".fam not found. Provide number of samples (n).", call. = FALSE) } else { message("Extracting number of samples and rownames from ", filesetName, ".fam...") if (requireNamespace("data.table", quietly = TRUE)) { if (simple_names) { famColumns <- c(2L) } else { famColumns <- c(1L, 2L) } fam <- data.table::fread( famPath, select = famColumns, colClasses = list(character = famColumns), data.table = FALSE, showProgress = FALSE ) # Determine n n <- nrow(fam) # Determine rownames if (simple_names) { # Use within-family ID only rownames <- fam[, 1L] } else { # Concatenate family ID and within-family ID rownames <- paste0(fam[, 1L], "_", fam[, 2L]) } } else { fam <- readLines(famPath) # much faster than read.table # Determine n n <- length(fam) # Determine rownames if (simple_names) { # Use within-family ID only rownames <- vapply(strsplit(fam, delims), `[`, "", 2L) } else { rownames <- vapply(strsplit(fam, delims), function(line) { # Concatenate family ID and within-family ID paste0(line[1L], "_", line[2L]) }, "") } } } } else { n <- as.integer(n) rownames <- NULL } if (is.null(p)) { # Check if .bim file exists bimPath <- paste0(pathSansExt, ".bim") if (!file.exists(bimPath)) { stop(filesetName, ".bim not found. Provide number of variants (p).", .call = FALSE) } else { message("Extracting number of variants and colnames from ", filesetName, ".bim...") if (requireNamespace("data.table", quietly = TRUE)) { if (simple_names) { bimColumns <- c(2L) } else { bimColumns <- c(2L, 5L) } bim <- data.table::fread( bimPath, select = bimColumns, colClasses = list(character = bimColumns), data.table = FALSE, showProgress = FALSE ) # Determine p p <- nrow(bim) # Determine colnames if (simple_names) { # Use variant name only colnames <- bim[, 1L] } else { # Concatenate variant name and A1 allele (like '--recode A' # in PLINK) colnames <- paste0(bim[, 1L], "_", bim[, 2L]) } } else { bim <- readLines(bimPath) # much faster than read.table # Determine p p <- length(bim) # Determine colnames if (simple_names) { # Use variant name only colnames <- vapply(strsplit(bim, delims), `[`, "", 2L) } else { colnames <- vapply(strsplit(bim, delims), function(line) { # Concatenate variant name and A1 allele (like # '--recode A' in PLINK) paste0(line[2L], "_", line[5L]) }, "") } } } } else { p <- as.integer(p) colnames <- NULL } obj <- new( "BEDMatrix", xptr = .Call(C_BEDMatrix_initialize, path, n, p), path = path, dims = c(n, p), dnames = list(rownames, colnames) ) return(obj) } setMethod("show", "BEDMatrix", function(object) { dims <- dim(object) n <- dims[1L] p <- dims[2L] cat("BEDMatrix: ", n, " x ", p, " [", slot(object, "path"), "]\n", sep = "") }) extract_vector <- function(x, i) { .Call(C_BEDMatrix_extract_vector, slot(x, "xptr"), i) } extract_matrix <- function(x, i, j) { subset <- .Call(C_BEDMatrix_extract_matrix, slot(x, "xptr"), i, j) # Preserve dimnames names <- slot(x, "dnames") dimnames(subset) <- list( names[[1L]][i], names[[2L]][j] ) return(subset) } `[.BEDMatrix` <- extract( extract_vector = extract_vector, extract_matrix = extract_matrix, allowDoubles = TRUE ) dim.BEDMatrix <- function(x) { slot(x, "dims") } dimnames.BEDMatrix <- function(x) { slot(x, "dnames") } `dimnames<-.BEDMatrix` <- function(x, value) { d <- dim(x) v1 <- value[[1L]] v2 <- value[[2L]] if (!is.list(value) || length(value) != 2L || !(is.null(v1) || length(v1) == d[1L]) || !(is.null(v2) || length(v2) == d[2L])) { stop("invalid dimnames", call. = FALSE) } slot(x, "dnames") <- lapply(value, function(v) { if (!is.null(v)) { as.character(v) } }) return(x) } length.BEDMatrix <- function(x) { prod(dim(x)) } str.BEDMatrix <- function(object, ...) { print(object) } as.matrix.BEDMatrix <- function(x, ...) { x[, , drop = FALSE] } is.matrix.BEDMatrix <- function(x) { TRUE }
/scratch/gouwar.j/cran-all/cranData/BEDMatrix/R/BEDMatrix.R
extdataPath <- system.file("extdata", package = "BEDMatrix") standalonePath <- "standalone.bed" parseRaw <- function(path) { lines <- strsplit(readLines(path), " ") header <- lines[[1]] data <- matrix(data = unlist(lines[2:length(lines)]), nrow = 50, ncol = 1006, byrow = TRUE) pheno <- data[, 1:6] geno <- data[, 7:ncol(data)] suppressWarnings(mode(geno) <- "integer") rownames(geno) <- paste0(pheno[, 1], "_", pheno[, 2]) colnames(geno) <- header[7:length(header)] return(geno) } raw <- parseRaw(paste0(extdataPath, "/example.raw")) bed <- suppressMessages(BEDMatrix(path = paste0(extdataPath, "/example.bed")))
/scratch/gouwar.j/cran-all/cranData/BEDMatrix/inst/tinytest/setup.R
source("setup.R") # it throws an error if file does not exist expect_error(BEDMatrix("NOT_FOUND"), "File not found\\.") # it throws an error if file is not a BED file expect_error(BEDMatrix("test-BEDMatrix.R")) # test both prefix and .bed paths for (path in c(paste0(extdataPath, "/example"), paste0(extdataPath, "/example.bed"))) { # it determines n from FAM file bed <- suppressMessages(BEDMatrix(path = path)) expect_equal(nrow(bed), nrow(raw)) expect_message(BEDMatrix(path = path), "Extracting number of samples and rownames from example\\.fam\\.\\.\\.") # it throws an error if FAM file is not found and n is not given expect_error(BEDMatrix(path = standalonePath), "standalone.fam not found\\. Provide number of samples \\(n\\)\\.") # it determines rownames from FAM file bed <- suppressMessages(BEDMatrix(path = path)) expect_equal(rownames(bed), rownames(raw)) expect_message(BEDMatrix(path = path), "Extracting number of samples and rownames from example\\.fam\\.\\.\\.") # it determines p from BIM file bed <- suppressMessages(BEDMatrix(path = path)) expect_equal(ncol(bed), ncol(raw)) expect_message(BEDMatrix(path = path), "Extracting number of variants and colnames from example\\.bim\\.\\.\\.") # it throws an error if BIM file is not found and p is not given expect_error(BEDMatrix(path = standalonePath), "standalone.fam not found\\. Provide number of samples \\(n\\)\\.") # it determines colnames from BIM file bed <- suppressMessages(BEDMatrix(path = path)) expect_equal(colnames(bed), colnames(raw)) expect_message(BEDMatrix(path = path), "Extracting number of variants and colnames from example\\.bim\\.\\.\\.") # it accepts n and p if FAM or BIM file are present bed <- BEDMatrix(path = path, n = nrow(raw), p = ncol(raw)) expect_equal(dimnames(bed), list(NULL, NULL)) # it accepts n and p if FAM or BIM file is not found bed <- BEDMatrix(path = standalonePath, n = 3, p = 6) expect_equal(dimnames(bed), list(NULL, NULL)) # it throws an error if dimensions are wrong expect_error(BEDMatrix(path = path, n = 10, p = 5), "n or p does not match the dimensions of the file\\.") }
/scratch/gouwar.j/cran-all/cranData/BEDMatrix/inst/tinytest/test-BEDMatrix.R
source("setup.R") # Source extraction tests extractionTests <- new.env() extractionTests$COMPARE_OBJECT <- raw extractionTests$CUSTOM_OBJECT <- bed extractionTests$OUT_OF_BOUNDS_INT <- length(extractionTests$CUSTOM_OBJECT) + 1 extractionTests$OUT_OF_BOUNDS_CHAR <- "snp1000_U" source( file = system.file("test-suite", "crochet-extract.R", package = "crochet"), local = extractionTests )
/scratch/gouwar.j/cran-all/cranData/BEDMatrix/inst/tinytest/test-crochet.R
source("setup.R") expect_equal(length(bed), length(raw))
/scratch/gouwar.j/cran-all/cranData/BEDMatrix/inst/tinytest/test-length.R
#' BEKKs: Volatility modelling #' #' @docType package #' @aliases _PACKAGE #' @name BEKKs #' @author \itemize{ #' \item Markus J. Fülle \email{[email protected]} #' \item Helmut Herwartz \email{[email protected]} #' \item Alexander Lange \email{[email protected]} #' \item Christian M. Hafner \email{[email protected]} #' } #' #' @import mathjaxr #' @description #' \loadmathjax #' This package implements estimation, simulation and forecasting techniques for conditional volatility modelling using the BEKK model. #' The full BEKK(1,1,1) model of Engle and Kroner (1995) #' \mjdeqn{H_t = CC' + A' r_{t-1} r_{t-1}'A + G' H_{t-1}G ,}{H_t = CC' + A' r_{t-1} r_{t-1}'A + G' H_{t-1}G ,} the asymmetric extensions of Kroner and Ng (1998) and Grier et. al. (2004) #' \mjdeqn{H_t = CC' + A' r_{t-1} r_{t-1}'A +B'\gamma_{t-1} \gamma_{t-1}' B+G'H_{t-1}G}{H_t = CC' +A'r_{t-1} r_{t-1}'A +B'\gamma_{t-1} \gamma_{t-1}' B+G'H_{t-1}G,,} #' with \mjdeqn{\gamma_t = r_t I\left(r_t < 0 \right)}{\gamma_t = r_t I(r_t < 0 )} are implemented. #' Moreover, the diagonal BEKK, where the parameter matrices A, B and G are reduced to diagonal matrices and #' the scalar BEKK model of Ding and Engle (2001) #' \mjdeqn{H_t = CC' + a r_{t-1} r_{t-1}' + g H_{t-1},}{H_t = CC' + a r_{t-1} r_{t-1}' + g H_{t-1},} #' where a and g are scalar parameters and are implemented to allow faster but less flexible estimation in higher dimensions. #' @details #' The main functions are: #' \itemize{ #' \item \tabular{ll}{ \code{\link{bekk_spec}} \tab Specifies the model type to be estimated.} #' \item \tabular{ll}{\code{\link{bekk_fit}} \tab Estimates a BEKK(1,1,1) model of a given series and #' specification object \link{bekk_spec}.} #' \item \tabular{ll}{ \code{\link{simulate}} \tab Simulates a BEKK(1,1,1) process using either a \link{bekk_fit} or \link{bekk_spec} object.} #' \item \tabular{ll}{ \code{\link{predict}} \tab Forecasts conditional volatility using a \link{bekk_fit} object.} #' \item \tabular{ll}{ \code{\link{VaR}} \tab Estimates (portfolio) Value-at-Risk using a fitted BEKK(1,1,1) model.} #' \item \tabular{ll}{ \code{\link{backtest}} \tab Backtesting estimated (portfolio) value-at-risks of a fitted BEKK(1,1,1) model.} #' \item \tabular{ll}{ \code{\link{virf}} \tab Calculates volatility impulse response functions for fitted symmetric BEKK(1,1,1) models.} #' } #' @references Engle, R. F. and K. F. Kroner (1995). Multivariate simultaneous generalized arch. Econometric Theory 11(1),122-150. #' @references Kroner, K. F. and V. K. Ng (1998). Modeling asymmetric comovements of asset returns. Review of Financial Studies 11(4), 817-44. #' @references Ding, Zhuanxin and Engle, Robert F (2001). Large scale conditional covariance matrix modeling, estimation and testing. NYU working paper No. Fin-01-029. #' @references Grier, K. B., Olan T. Henry, N. Olekalns, and K. Shields (2004). The asymmetric effects of uncertainty on inflation and output growth. Journal of Applied Econometrics 19(5), 551-565. #' @references Hafner CM, Herwartz H (2006). Volatility impulse responses for multivariate GARCH models: An exchange rate illustration. Journal of International Money and Finance,25,719-740. #' @useDynLib BEKKs #' @importFrom Rcpp sourceCpp #' @md NULL
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/BEKKs.R
#' Performing a Portmanteau test checking for remaining correlation in the empirical co-variances of the estimated BEKK residuals. #' #' @description Method for a Portmanteau test of the null hypothesis of no remaining correlation in the co-variances of the estimated BEKK residuals. #' #' @param x An object of class "bekkFit" from function \link{bekk_fit}. #' @param lags An integer defining the lag length. #' @return Returns an Object of class "htest" containing the p-value and test statistic. #' #' @details Here, the multivariate Portmanteau test of Hosking (1980) is implemented. #' #' @references J. R. M. Hosking (1980). The Multivariate Portmanteau Statistic, Journal of the American Statistical Association, 75:371, 602-608. #' @import xts #' @import stats #' @import ks #' @export portmanteau.test <- function(x, lags = 5){ if(!is.numeric(lags)){ stop("Please provide a numeric object or vector for 'lags'.") } if(any(lags<3)){ stop("Please provide 'lags' larger than 2.") } UseMethod("portmanteau.test") } #' @export portmanteau.test.bekkFit <- function(x, lags = 5){ e <- x$e_t n <- nrow(e) N <- ncol(e) e <- matrix(e,n,N) #e <- matrix(e, nrow = n, ncol = N) e2 <- matrix(NA,nrow = n, ncol = N*(N+1)/2) for(i in 1:n){ e2[i,] <- ks::vech(crossprod(t(e[i,]))) } e=e2 c_hat <- function(j){ c= t(e[(j+1):n,]) %*% e[1:(n-j),] return(c/n) } c_0 = c_hat(0) #c_0_inv = solve(c_0) c_0_inv = chol2inv(chol(c_0)) Q <- function(lgs){ q=0 for(i in 1:lgs){ c_temp = t(c_hat(i)) q=q+sum(diag(c_temp%*%c_0_inv%*%c_temp%*%c_0_inv)) } return(q) } p_val_q <- function(k, lgs){ return(1-pchisq(k, df=(lgs-2)*(N^2))) } statistic = Q(lags) names(statistic) = "statistic" p.value = p_val_q(p_val_q(statistic,lags),lags) names(p.value) = "p.value" parameter = (lags-2)*(N^2) names(parameter) = "df" data = "Residuals of estimated BEKK process" rval <- list(statistic = statistic, parameter = parameter, p.value = p.value, data.name = data, method = paste("Portmanteau Test (Lags = ", as.character(lags), ")", sep = "" )) class(rval) <- "htest" return(rval) }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/Portmanteau_test.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 diag_selection_mat <- function(n) { .Call('_BEKKs_diag_selection_mat', PACKAGE = 'BEKKs', n) } cut_mat_symmetric <- function(n) { .Call('_BEKKs_cut_mat_symmetric', PACKAGE = 'BEKKs', n) } cut_mat_asymmetric <- function(n) { .Call('_BEKKs_cut_mat_asymmetric', PACKAGE = 'BEKKs', n) } set_seed <- function(seed) { invisible(.Call('_BEKKs_set_seed', PACKAGE = 'BEKKs', seed)) } valid_bekk <- function(C, A, G) { .Call('_BEKKs_valid_bekk', PACKAGE = 'BEKKs', C, A, G) } valid_asymm_bekk <- function(C, A, B, G, r, signs) { .Call('_BEKKs_valid_asymm_bekk', PACKAGE = 'BEKKs', C, A, B, G, r, signs) } valid_asymm_bekk_sim <- function(C, A, B, G, exp_indicator_value, signs) { .Call('_BEKKs_valid_asymm_bekk_sim', PACKAGE = 'BEKKs', C, A, B, G, exp_indicator_value, signs) } loglike_bekk <- function(theta, r) { .Call('_BEKKs_loglike_bekk', PACKAGE = 'BEKKs', theta, r) } loglike_asymm_bekk <- function(theta, r, signs) { .Call('_BEKKs_loglike_asymm_bekk', PACKAGE = 'BEKKs', theta, r, signs) } loglike_dbekk <- function(theta, r) { .Call('_BEKKs_loglike_dbekk', PACKAGE = 'BEKKs', theta, r) } loglike_asymm_dbekk <- function(theta, r, signs) { .Call('_BEKKs_loglike_asymm_dbekk', PACKAGE = 'BEKKs', theta, r, signs) } score_bekk <- function(theta, r) { .Call('_BEKKs_score_bekk', PACKAGE = 'BEKKs', theta, r) } score_dbekk <- function(theta, r) { .Call('_BEKKs_score_dbekk', PACKAGE = 'BEKKs', theta, r) } score_asymm_bekk <- function(theta, r, signs) { .Call('_BEKKs_score_asymm_bekk', PACKAGE = 'BEKKs', theta, r, signs) } score_asymm_dbekk <- function(theta, r, signs) { .Call('_BEKKs_score_asymm_dbekk', PACKAGE = 'BEKKs', theta, r, signs) } bhh_bekk <- function(r, theta, max_iter, crit) { .Call('_BEKKs_bhh_bekk', PACKAGE = 'BEKKs', r, theta, max_iter, crit) } bhh_asymm_bekk <- function(r, theta, max_iter, crit, signs) { .Call('_BEKKs_bhh_asymm_bekk', PACKAGE = 'BEKKs', r, theta, max_iter, crit, signs) } bhh_dbekk <- function(r, theta, max_iter, crit) { .Call('_BEKKs_bhh_dbekk', PACKAGE = 'BEKKs', r, theta, max_iter, crit) } bhh_asymm_dbekk <- function(r, theta, max_iter, crit, signs) { .Call('_BEKKs_bhh_asymm_dbekk', PACKAGE = 'BEKKs', r, theta, max_iter, crit, signs) } random_grid_search_BEKK <- function(r) { .Call('_BEKKs_random_grid_search_BEKK', PACKAGE = 'BEKKs', r) } random_grid_search_asymmetric_BEKK <- function(r, signs) { .Call('_BEKKs_random_grid_search_asymmetric_BEKK', PACKAGE = 'BEKKs', r, signs) } random_grid_search_dBEKK <- function(r) { .Call('_BEKKs_random_grid_search_dBEKK', PACKAGE = 'BEKKs', r) } random_grid_search_asymmetric_dBEKK <- function(r, signs) { .Call('_BEKKs_random_grid_search_asymmetric_dBEKK', PACKAGE = 'BEKKs', r, signs) } sigma_bekk <- function(r, C, A, G) { .Call('_BEKKs_sigma_bekk', PACKAGE = 'BEKKs', r, C, A, G) } sigma_bekk_asymm <- function(r, C, A, B, G, signs) { .Call('_BEKKs_sigma_bekk_asymm', PACKAGE = 'BEKKs', r, C, A, B, G, signs) } hesse_bekk <- function(theta, r) { .Call('_BEKKs_hesse_bekk', PACKAGE = 'BEKKs', theta, r) } hesse_dbekk <- function(theta, r) { .Call('_BEKKs_hesse_dbekk', PACKAGE = 'BEKKs', theta, r) } hesse_asymm_bekk <- function(theta, r, signs) { .Call('_BEKKs_hesse_asymm_bekk', PACKAGE = 'BEKKs', theta, r, signs) } hesse_asymm_dbekk <- function(theta, r, signs) { .Call('_BEKKs_hesse_asymm_dbekk', PACKAGE = 'BEKKs', theta, r, signs) } eigen_value_decomposition <- function(A) { .Call('_BEKKs_eigen_value_decomposition', PACKAGE = 'BEKKs', A) } virf_bekk <- function(H_t, theta, shocks, periods) { .Call('_BEKKs_virf_bekk', PACKAGE = 'BEKKs', H_t, theta, shocks, periods) } virf_dbekk <- function(H_t, theta, shocks, periods) { .Call('_BEKKs_virf_dbekk', PACKAGE = 'BEKKs', H_t, theta, shocks, periods) } virf_bekka <- function(H_t, C, A, B, G, signs, expected_signs, shocks, periods) { .Call('_BEKKs_virf_bekka', PACKAGE = 'BEKKs', H_t, C, A, B, G, signs, expected_signs, shocks, periods) } virf_sbekk <- function(H_t, theta, shocks, periods) { .Call('_BEKKs_virf_sbekk', PACKAGE = 'BEKKs', H_t, theta, shocks, periods) } simulate_bekk_c <- function(theta, NoObs, n) { .Call('_BEKKs_simulate_bekk_c', PACKAGE = 'BEKKs', theta, NoObs, n) } simulate_bekka_c <- function(theta, NoObs, n, signs, expected_signs) { .Call('_BEKKs_simulate_bekka_c', PACKAGE = 'BEKKs', theta, NoObs, n, signs, expected_signs) } simulate_dbekk_c <- function(theta, NoObs, N) { .Call('_BEKKs_simulate_dbekk_c', PACKAGE = 'BEKKs', theta, NoObs, N) } simulate_dbekka_c <- function(theta, NoObs, N, signs, expected_signs) { .Call('_BEKKs_simulate_dbekka_c', PACKAGE = 'BEKKs', theta, NoObs, N, signs, expected_signs) } simulate_sbekk_c <- function(theta, NoObs, N) { .Call('_BEKKs_simulate_sbekk_c', PACKAGE = 'BEKKs', theta, NoObs, N) } simulate_sbekka_c <- function(theta, NoObs, N, signs, expected_signs) { .Call('_BEKKs_simulate_sbekka_c', PACKAGE = 'BEKKs', theta, NoObs, N, signs, expected_signs) } indicatorFunction <- function(r, signs) { .Call('_BEKKs_indicatorFunction', PACKAGE = 'BEKKs', r, signs) } expected_indicator_value <- function(r, signs) { .Call('_BEKKs_expected_indicator_value', PACKAGE = 'BEKKs', r, signs) } elimination_mat <- function(n) { .Call('_BEKKs_elimination_mat', PACKAGE = 'BEKKs', n) } commutation_mat <- function(n) { .Call('_BEKKs_commutation_mat', PACKAGE = 'BEKKs', n) } duplication_mat <- function(n) { .Call('_BEKKs_duplication_mat', PACKAGE = 'BEKKs', n) } inv_gen <- function(m) { .Call('_BEKKs_inv_gen', PACKAGE = 'BEKKs', m) } valid_sbekk <- function(C, a, g) { .Call('_BEKKs_valid_sbekk', PACKAGE = 'BEKKs', C, a, g) } valid_asymm_sbekk <- function(C, a, b, g, r, signs) { .Call('_BEKKs_valid_asymm_sbekk', PACKAGE = 'BEKKs', C, a, b, g, r, signs) } loglike_sbekk <- function(theta, r) { .Call('_BEKKs_loglike_sbekk', PACKAGE = 'BEKKs', theta, r) } loglike_asymm_sbekk <- function(theta, r, signs) { .Call('_BEKKs_loglike_asymm_sbekk', PACKAGE = 'BEKKs', theta, r, signs) } score_sbekk <- function(theta, r) { .Call('_BEKKs_score_sbekk', PACKAGE = 'BEKKs', theta, r) } score_asymm_sbekk <- function(theta, r, signs) { .Call('_BEKKs_score_asymm_sbekk', PACKAGE = 'BEKKs', theta, r, signs) } bhh_sbekk <- function(r, theta, max_iter, crit) { .Call('_BEKKs_bhh_sbekk', PACKAGE = 'BEKKs', r, theta, max_iter, crit) } bhh_asymm_sbekk <- function(r, theta, max_iter, crit, signs) { .Call('_BEKKs_bhh_asymm_sbekk', PACKAGE = 'BEKKs', r, theta, max_iter, crit, signs) } hesse_sbekk <- function(theta, r) { .Call('_BEKKs_hesse_sbekk', PACKAGE = 'BEKKs', theta, r) } hesse_asymm_sbekk <- function(theta, r, signs) { .Call('_BEKKs_hesse_asymm_sbekk', PACKAGE = 'BEKKs', theta, r, signs) } sigma_sbekk <- function(r, C, a, g) { .Call('_BEKKs_sigma_sbekk', PACKAGE = 'BEKKs', r, C, a, g) } sigma_sbekk_asymm <- function(r, C, a, b, g, signs) { .Call('_BEKKs_sigma_sbekk_asymm', PACKAGE = 'BEKKs', r, C, a, b, g, signs) } random_grid_search_sBEKK <- function(r) { .Call('_BEKKs_random_grid_search_sBEKK', PACKAGE = 'BEKKs', r) } random_grid_search_asymmetric_sBEKK <- function(r, signs) { .Call('_BEKKs_random_grid_search_asymmetric_sBEKK', PACKAGE = 'BEKKs', r, signs) } YLagCr <- function(y, p) { .Call('_BEKKs_YLagCr', PACKAGE = 'BEKKs', y, p) }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/RcppExports.R
#' Calculating Value-at-Risk (VaR) #' #' @description Method for calculating VaR from estimated covariance processes (\link{bekk_fit}) or predicted covariances (\link{predict}). #' #' @param x An object of class "bekkFit" from the function \link{bekk_fit} or an object of class "bekkForecast" from the function \link{predict}. #' @param p A numerical value that determines the confidence level. The default value is set at 0.99 in accordance with the Basel Regulation. #' @param portfolio_weights A vector determining the portfolio weights to calculate the portfolio VaR. If set to "NULL", the univariate VaR for each series are calculated. #' @param distribution A character string determining the assumed distribution of the residuals. Implemented are "normal", "empirical" and "t". The default is using the empirical distribution of the residuals. #' @return Returns a S3 class "var" object containing the VaR forecast and respective confidence bands. #' @examples #' \donttest{ #' #' data(StocksBonds) #' obj_spec <- bekk_spec() #' x1 <- bekk_fit(obj_spec, StocksBonds, QML_t_ratios = FALSE, max_iter = 50, crit = 1e-9) #' #' # single VaRs of series #' x2 <- VaR(x1, distribution="normal") #' plot(x2) #' #' # VaR of equally-weighted portfolio #' portfolio_weights <- c(0.5, 0.5) #' x3 <- VaR(x1, portfolio_weights = portfolio_weights) #' plot(x3) #' #' # VaR of traditional 30/70 weighted bond and stock portfolio #' portfolio_weights <- c(0.3, 0.7) #' x4 <- VaR(x1, portfolio_weights = portfolio_weights) #' plot(x4) #' #' } #' #' @import xts #' @import stats #' @import moments #' @export VaR <- function(x, p = 0.99, portfolio_weights = NULL, distribution = "empirical") { UseMethod('VaR') } #' @export VaR.bekkFit <- function(x, p = 0.99, portfolio_weights = NULL, distribution = "empirical" ) { if(nrow(x$data) < 1000 && distribution == "empirical"){ stop("Using the empirical distribution is not stable for time series with less than 1000 observations!") } alpha = p N = ncol(x$data) n = nrow(x$data) match.arg(distribution, c("empirical", "t", "normal")) if(length(portfolio_weights)!= N && !is.null(portfolio_weights)){ stop("Portfolio weights do no match number of time series") } #specify quantiles here if(distribution == "t"){ #fit skewed t skew_t <- function(i){ kurtos = moments::kurtosis(x$e_t[,i])-3 df = 6/kurtos+4 if(df <= 4){ df=4.001 } return(qt(1-alpha, df = df)/sqrt(df/(df-2))) } qtls <- sapply(1:N, skew_t) }else if(distribution == "empirical"){ empirical <- function(i){ quantile(x$e_t[,i],1-alpha) } qtls <- sapply(1:N, empirical) } else if(distribution == "normal"){ #fit skewed t qtls <- rep(qnorm(1-alpha),ncol(x$data)) } else{ qtls <- rep(qnorm(1-alpha),ncol(x$data)) } #quantile(x$e_t, ) if (is.null(portfolio_weights)) { columns = ncol(x$data) csd <- extract_csd(x) VaR <- matrix(NA, nrow = nrow(x$data), ncol = ncol(x$data)) for(i in 1:n) { for(j in 1: ncol(x$data)){ VaR[i, j] = sqrt(matrix(x$H_t[i,],N,N)[j,j]) * qtls[j] } } VaR <- as.data.frame(VaR) for(column in 1:columns) { r = as.vector(na.omit(x$data[,column])) if (!is.numeric(r)) stop("The selected column is not numeric") m2 = csd[, column] #VaR[, column] = - qnorm(alpha)*m2 #VaR <- as.data.frame(VaR) for (i in 1:ncol(x$data)) { colnames(VaR)[i] <- paste('VaR of', colnames(x$data)[i]) } } } else { if(distribution == "t"){ #fit skewed t kurtos = moments::kurtosis(x$e_t%*%portfolio_weights)-3 df = 6/kurtos+4 if(df <= 4){ df=4.001 } qtls <- qt(1-alpha, df = df)/sqrt(df/(df-2)) } else if(distribution == "empirical"){ qtls <- quantile(x$e_t%*%portfolio_weights,1-alpha) } else if(distribution == "normal"){ #fit skewed t qtls <-qnorm(1-alpha) } else{ qtls <- qnorm(1-alpha) } VaR <- matrix(NA, nrow = nrow(x$data), ncol = 1) for(i in 1:nrow(x$H_t)) { VaR[i,] <- qtls*sqrt(portfolio_weights%*%matrix(x$H_t[i,], ncol = ncol(x$data))%*%portfolio_weights) #VaR[i,] <- portfolio_weights%*%eigen_value_decomposition(matrix(x$H_t[i,], ncol = ncol(x$data)))%*%qtls } VaR <- as.data.frame(VaR) } if (inherits(x$data, "ts")) { VaR <- ts(VaR, start = time(x$data)[1], frequency = frequency(x$data)) }else if(inherits(x$data, "xts") || inherits(x$data, "zoo") ){ VaR <- xts(VaR, order.by = time(x$data[1:nrow(x$data),])) } result <- list(VaR = VaR, p = p, portfolio_weights = portfolio_weights, bekk = x) class(result) <- c('var', 'bekkFit') return(result) } #' @export VaR.bekkForecast <- function(x, p = 0.99, portfolio_weights = NULL, distribution = "empirical") { if(nrow(x$bekkfit$data) < 1000 && distribution == "empirical"){ stop("Using the empirical distribution is not stable for time series with less than 1000 observations!") } alpha = p obj <- x$bekkfit obj$H_t <- rbind(x$bekkfit$H_t, x$H_t_forecast) obj$sigma_t <- rbind(x$bekkfit$sigma_t, x$volatility_forecast) N = ncol(obj$data) n = nrow(obj$H_t) if(length(portfolio_weights)!= N && !is.null(portfolio_weights)){ stop("Portfolio weights do no match number of time series") } if(distribution == "t"){ #fit skewed t skew_t <- function(i){ kurtos = moments::kurtosis(obj$e_t[,i])-3 df = 6/kurtos+4 if(df <= 4){ df=4.001 } return(qt(1-alpha, df = df)/sqrt(df/(df-2))) } qtls <- sapply(1:N, skew_t) }else if(distribution == "empirical"){ empirical <- function(i){ quantile(obj$e_t[,i],1-alpha) } qtls <- sapply(1:N, empirical) } else if(distribution == "normal"){ #fit skewed t qtls <- rep(qnorm(1-alpha),ncol(obj$data)) } else{ qtls <- rep(qnorm(1-alpha),ncol(obj$data)) } if (is.null(portfolio_weights)) { columns = ncol(x$bekkfit$data) #csd <- extract_csd(obj) VaR <- matrix(NA, nrow = nrow(x$bekkfit$data) + x$n.ahead, ncol = ncol(x$bekkfit$data)) for(i in 1:nrow(obj$H_t)) { for(j in 1: ncol(x$bekkfit$data)){ VaR[i, j] = sqrt(matrix(obj$H_t[i,],N,N)[j,j]) * qtls[j] } } VaR <- as.data.frame(VaR) for (i in 1:ncol(x$bekkfit$data)) { colnames(VaR)[i] <- paste('VaR of', colnames(x$bekkfit$data)[i]) } # Confidence intervals H_t_lower <- rbind(x$bekkfit$H_t[-nrow(x$bekkfit$H_t),], x$H_t_lower_conf_band) H_t_upper <- rbind(x$bekkfit$H_t[-nrow(x$bekkfit$H_t),], x$H_t_upper_conf_band) colnames(x$volatility_lower_conf_band) = colnames(x$volatility_forecast) obj$sigma_t <- rbind(x$bekkfit$sigma_t, x$volatility_lower_conf_band) csd_lower <- extract_csd(obj) VaR_lower <- matrix(NA, nrow = nrow(x$bekkfit$data) + x$n.ahead, ncol = ncol(x$bekkfit$data)) for(i in 1:nrow(obj$H_t)) { for(j in 1: ncol(x$bekkfit$data)){ VaR_lower[i, j] = sqrt(matrix(H_t_lower[i,],N,N)[j,j]) * qtls[j] } } VaR_lower <- as.data.frame(VaR_lower) for (i in 1:ncol(x$bekkfit$data)) { colnames(VaR_lower)[i] <- paste('VaR of', colnames(x$bekkfit$data)[i]) } colnames(x$volatility_upper_conf_band) = colnames(x$volatility_forecast) obj$sigma_t <- rbind(x$bekkfit$sigma_t, x$volatility_upper_conf_band) csd_upper <- extract_csd(obj) VaR_upper <- matrix(NA, nrow = nrow(x$bekkfit$data) + x$n.ahead, ncol = ncol(x$bekkfit$data)) for(i in 1:nrow(obj$H_t)) { for(j in 1: ncol(x$bekkfit$data)){ VaR_upper[i, j] = sqrt(matrix(H_t_upper[i,],N,N)[j,j]) * qtls[j] } } VaR_upper <- as.data.frame(VaR_upper) for (i in 1:ncol(x$bekkfit$data)) { colnames(VaR_upper)[i] <- paste('VaR of', colnames(x$bekkfit$data)[i]) } } else { if(distribution == "t"){ #fit skewed t kurtos = moments::kurtosis(obj$e_t%*%portfolio_weights)-3 df = 6/kurtos+4 if(df <= 4){ df=4.001 } qtls <- qt(1-alpha, df = df)/sqrt(df/(df-2)) } else if(distribution == "empirical"){ qtls <- quantile(obj$e_t%*%portfolio_weights,1-alpha) } else if(distribution == "normal"){ #fit skewed t qtls <-qnorm(1-alpha) } else{ qtls <- qnorm(1-alpha) } VaR <- matrix(NA, nrow = nrow(x$bekkfit$data) + x$n.ahead, ncol = 1) for(i in 1:nrow(obj$H_t)) { #VaR[i,] <- -qnorm(alpha)*sqrt(portfolio_weights%*%matrix(x$H_t[i,], ncol = ncol(x$data))%*%portfolio_weights) VaR[i,] <- sqrt(portfolio_weights%*%matrix(obj$H_t[i,], ncol = ncol(x$bekkfit$data))%*%portfolio_weights)*qtls } # for(i in 1:nrow(obj$H_t)) { # VaR[i,] <- -qnorm(alpha)*sqrt(portfolio_weights%*%matrix(obj$H_t[i,], ncol = ncol(x$bekkfit$data))%*%portfolio_weights) # } VaR <- as.data.frame(VaR) # Confidence intervals VaR_lower <- matrix(NA, nrow = nrow(x$bekkfit$data) + x$n.ahead, ncol = 1) VaR_upper <- matrix(NA, nrow = nrow(x$bekkfit$data) + x$n.ahead, ncol = 1) H_t_lower <- rbind(x$bekkfit$H_t[-nrow(x$bekkfit$H_t),], x$H_t_lower_conf_band) H_t_upper <- rbind(x$bekkfit$H_t[-nrow(x$bekkfit$H_t),], x$H_t_upper_conf_band) # for(i in 1:nrow(obj$H_t)) { # VaR_lower[i,] <- -qnorm(alpha)*sqrt(portfolio_weights%*%matrix(H_t_lower[i,], ncol = ncol(x$bekkfit$data))%*%portfolio_weights) # VaR_upper[i,] <- -qnorm(alpha)*sqrt(portfolio_weights%*%matrix(H_t_upper[i,], ncol = ncol(x$bekkfit$data))%*%portfolio_weights) # } for(i in 1:nrow(obj$H_t)) { VaR_lower[i,] <- sqrt(portfolio_weights%*%matrix(H_t_lower[i,], ncol = ncol(x$bekkfit$data))%*%portfolio_weights)*qtls VaR_upper[i,] <- sqrt(portfolio_weights%*%matrix(H_t_upper[i,], ncol = ncol(x$bekkfit$data))%*%portfolio_weights)*qtls } VaR_lower <- as.data.frame(VaR_lower) VaR_upper <- as.data.frame(VaR_upper) } if (inherits(x$data, "ts")) { VaR <- ts(VaR, start = time(x$bekkfit$data)[1], frequency = frequency(x$bekkfit$data)) } else if(inherits(x$data, "xts") || inherits(x$data, "zoo") ){ VaR <- xts(VaR, order.by = time(x$data[1:nrow(x$data),])) } result <- list(VaR = VaR, VaR_lower = VaR_lower, VaR_upper = VaR_upper, p = p, portfolio_weights = portfolio_weights, n.ahead = x$n.ahead, bekk = x) class(result) <- c('var', 'bekkForecast') return(result) }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/VaR.R
#' Backtesting via Value-at-Risk (VaR) #' #' @description Method for backtesting a model obtained from \link{bekk_fit} in terms of VaR-forcasting accuracy using a rolling window approach. #' #' @param x An object of class "bekkFit" from the function \link{bekk_fit}. #' @param window_length An integer specifying the length of the rolling window. #' @param p A numerical value that determines the confidence level. The default value is set at 0.99 in accordance with the Basel Regulation. #' @param portfolio_weights A vector determining the portfolio weights to calculate the portfolio VaR. If set to "NULL", the univariate VaR for each series are calculated. #' @param n.ahead Number of periods to predict conditional volatility. Default is a one-period ahead forecast. #' @param distribution A character string determining the assumed distribution of the residuals. Implemented are "normal", "empirical" and "t". The default is assuming the empirical distribution of the residuals. #' @param nc Number of cores to be used for parallel computation. #' @return Returns a S3 class "backtest" object containing the VaR forecast, out-of-sample returns and backtest statistics according to the R-package "GAS". conf #' @examples #' \donttest{ #' \dontshow{Sys.setenv("OMP_THREAD_LIMIT"="1")} #' data(StocksBonds) #' obj_spec <- bekk_spec() #' x1 <- bekk_fit(obj_spec, StocksBonds, QML_t_ratios = FALSE, max_iter = 50, crit = 1e-9) #' #' # backtesting #' x2 <- backtest(x1, window_length = 6000, n.ahead = 1, nc = 1) #' plot(x2) #' # backtesting using 5 day-ahead forecasts #' x3 <- backtest(x1, window_length = 6000, n.ahead = 5, nc = 1) #' plot(x3) #' # backtesting using 20 day-ahead forecasts and portfolio #' x4 <- backtest(x1, window_length = 6000, portfolio_weights = c(0.5,0.5), n.ahead = 20, nc = 1) #' plot(x4) #' } #' #' @import xts #' @import stats #' @import pbapply #' @importFrom GAS BacktestVaR #' @import lubridate #' @export backtest<- function(x, window_length = 1000, p = 0.99, portfolio_weights = NULL, n.ahead = 1, distribution = "empirical", nc = 1) { UseMethod('backtest') } #' @export backtest.bekkFit <- function(x, window_length = 1000, p = 0.99, portfolio_weights = NULL, n.ahead = 1, distribution = "empirical", nc = 1) { data <- x$data n <- nrow(data) N <- ncol(data) n_out = n - window_length match.arg(distribution, c("empirical", "t", "normal")) #out_sample_returns <- x$data[(window_length+1):n,] %*% t(portfolio_weights) if(window_length < 500){ stop("The supplied 'window_length' must be larger than 500.") } if(window_length >= n){ stop("The supplied 'window_length' exeeds the length of the data.") } if(window_length < 1000 && distribution == "empirical"){ stop("Empirical distribution only available for window_length > 1000.") } if((n-window_length) < n.ahead){ stop("The supplied 'n.ahead' exceeds the forecasting horizon.") } if(length(portfolio_weights)!= N && !is.null(portfolio_weights)){ stop("Portfolio weights do no match number of time series") } if (is.null(portfolio_weights)) { hit_rate = numeric(N) out_sample_returns <- x$data[(window_length+1):n,] #VaR <- matrix(NA, nrow = n_out, ncol = N) OoS_indices <- seq(1,n_out, n.ahead) wrapper <- function(i) { if(n.ahead > 1 && i > (n_out-n.ahead)){ n.ahead = n_out-i+1 } spec = x$spec fit <- bekk_fit(spec, data[i:(window_length-1+i),]) predict <- predict(fit, n.ahead = n.ahead, ci = 0.5) #VaR[i:(i+n.ahead-1),] = as.matrix(VaR(predict, p = p, portfolio_weights = portfolio_weights)$VaR[(window_length+1):(window_length+n.ahead),]) res = as.matrix(VaR(predict, p = p, portfolio_weights = portfolio_weights, distribution = distribution)$VaR[(window_length+1):(window_length+n.ahead),]) return(res) } if(.Platform$OS.type == "windows") { cl = parallel::makeCluster(nc) VaR = pbapply::pblapply(X=OoS_indices, FUN=wrapper, cl = cl) parallel::stopCluster(cl) } else { VaR = pbapply::pblapply(X=OoS_indices, FUN=wrapper, cl = nc) } VaR = do.call(rbind,VaR) for(j in 1:N){ hit_rate[j]= sum(VaR[,j] > out_sample_returns[,j]) } hit_rate = hit_rate/n_out backtests = list() #VaR <- as.data.frame(VaR) for (i in 1:N) { backtests[[i]] = suppressWarnings(BacktestVaR(out_sample_returns[,i], VaR[,i], alpha = 1- p)) colnames(VaR)[i] <- paste('VaR of', colnames(x$data)[i]) } } else { out_sample_returns = x$data[(window_length+1):n,] %*% matrix(portfolio_weights, ncol = 1, nrow = N) hit_rate = 0 # VaR <- matrix(NA, nrow = n_out, ncol = 1) # i = 1 # # spec = x$spec # fit <- bekk_fit(spec, data[i:(window_length-1+i),]) # forecast <- forecast(fit, n.ahead = n.ahead, ci = 0.5) # VaR[i:(i+n.ahead-1),] = as.matrix(VaR(forecast, p = p, portfolio_weights = portfolio_weights)$VaR[(window_length+1):(window_length+n.ahead),]) # # # hit_rate= hit_rate + sum(VaR[i:(i+n.ahead-1),] > out_sample_returns[i:(i+n.ahead-1),]) # # # # i = i + n.ahead # if(n.ahead > 1 && i >= (n_out-n.ahead)){ # n.ahead = 1 # } # } # hit_rate = hit_rate/n_out # backtests= suppressWarnings(GAS::BacktestVaR(out_sample_returns, VaR, alpha = 1- p)) # VaR <- as.data.frame(VaR) # } OoS_indices <- seq(1,(n_out), n.ahead) wrapper <- function(i) { if(n.ahead > 1 && i > (n_out-n.ahead)){ n.ahead = n_out-i+1 } spec = x$spec fit <- bekk_fit(spec, data[i:(window_length-1+i),]) predict <- predict(fit, n.ahead = n.ahead, ci = 0.5) res = as.matrix(VaR(predict, p = p, portfolio_weights = portfolio_weights, distribution = distribution)$VaR[(window_length+1):(window_length+n.ahead),]) return(res) } #future::plan(future::multicore(workers = n_cores)) # cl = future::makeClusterPSOCK(nc) # VaR = future.apply::future_lapply(X=OoS_indices, FUN=wrapper) if(.Platform$OS.type == "Windows") { cl = parallel::makeCluster(nc) VaR = pbapply::pblapply(X=OoS_indices, FUN=wrapper, cl = cl) parallel::stopCluster(cl) } else { VaR = pbapply::pblapply(X=OoS_indices, FUN=wrapper, cl = nc) } VaR = do.call(rbind,VaR) hit_rate= sum(VaR > out_sample_returns) hit_rate = hit_rate/n_out backtests= suppressWarnings(GAS::BacktestVaR(out_sample_returns, VaR, alpha = 1- p)) out_sample_returns = as.data.frame(out_sample_returns) VaR = as.data.frame(VaR) colnames(VaR)<- "Portfolio" colnames(out_sample_returns)<- "Portfolio" } if (inherits(x$data, "ts")) { VaR <- xts(VaR, order.by = lubridate::as_date(lubridate::date_decimal(time(x$data)[(window_length+1):n]))) out_sample_returns <- xts(out_sample_returns, order.by = lubridate::as_date(lubridate::date_decimal(time(x$data)[(window_length+1):n]))) }else if(inherits(x$data, "xts") || inherits(x$data, "zoo") ){ VaR <- xts(VaR, order.by = time(x$data[(window_length+1):n,])) out_sample_returns <- xts(out_sample_returns, order.by = time(x$data[(window_length+1):n,])) } result=list( VaR = VaR, out_sample_returns = out_sample_returns, hit_rate = hit_rate, backtests = backtests, portfolio_weights = portfolio_weights, bekkFit = x, p = p, window_length = window_length ) class(result) <- c('backtest') return(result) }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/backtest.R
#' Estimating multivariate BEKK-type volatility models #' #' @description Method for fitting a variety of N-dimensional BEKK models. #' #' @param spec An object of class "bekkSpec" from function \link{bekk_spec}. #' @param data A multivariate data object. Can be a numeric matrix or ts/xts/zoo object. #' @param QML_t_ratios Logical. If QML_t_ratios = 'TRUE', the t-ratios of the BEKK parameter matrices #' are exactly calculated via second order derivatives. #' @param max_iter Maximum number of BHHH algorithm iterations. #' @param crit Determines the precision of the BHHH algorithm. #' @return Returns a S3 class "bekkFit" object containing the estimated parameters, t-values, standard errors and volatility process of the model defined by the BEKK_spec object. #' #' @details The BEKK optimization routine is based on the Berndt–Hall–Hall–Hausman (BHHH) algorithm and is inspired by the study of Hafner and Herwartz (2008). #' The authors provide analytical formulas for the score and Hessian of several MGARCH models in a QML framework and show that analytical derivations significantly outperform numerical methods. #' #' @references Hafner and Herwartz (2008). Analytical quasi maximum likelihood inference in multivariate volatility models. Metrika, 67, 219-239. #' #' @examples #' \donttest{ #' #' data(StocksBonds) #' #' # Fitting a symmetric BEKK model #' obj_spec <- bekk_spec() #' x1 <- bekk_fit(obj_spec, StocksBonds, QML_t_ratios = FALSE, max_iter = 50, crit = 1e-9) #' #' summary(x1) #' #' plot(x1) #' #' # Fitting an asymmetric BEKK model #' obj_spec <- bekk_spec(model = list(type = "bekk", asymmetric = TRUE)) #' x1 <- bekk_fit(obj_spec, StocksBonds) #' #' summary(x1) #' #' plot(x1) #' #' # Fitting a symmetric diagonal BEKK model #' obj_spec <- bekk_spec(model = list(type = "dbekk", asymmetric = FALSE)) #' x1 <- bekk_fit(obj_spec, StocksBonds, QML_t_ratios = FALSE, max_iter = 50, crit = 1e-9) #' #' summary(x1) #' #' plot(x1) #' #' #' # Fitting a symmetric scalar BEKK model #' obj_spec <- bekk_spec(model = list(type = "sbekk", asymmetric = FALSE)) #' x1 <- bekk_fit(obj_spec, StocksBonds, QML_t_ratios = FALSE, max_iter = 50, crit = 1e-9) #' #' summary(x1) #' #' plot(x1) #' #' } #' @import xts #' @import stats #' @import utils #' @export bekk_fit <- function(spec, data, QML_t_ratios = FALSE, max_iter = 50, crit = 1e-9){ if (!inherits(spec, 'bekkSpec')) { stop('Please provide an object of class "bekkSpec" for spec.') } if (any(is.na(data))) { stop("\nNAs in data.\n") } if (is.null(ncol(data))) { stop("The data matrix should contain at least two variables.") } if (ncol(data) < 2) { stop("The data matrix should contain at least two variables.") } if (is.null(colnames(data))) { colnames(data) <- paste("y", 1:ncol(data), sep = "") } UseMethod('bekk_fit') } #' @export bekk_fit.bekk <- function(spec, data, QML_t_ratios = FALSE, max_iter = 50, crit = 1e-9) { init_values <- spec$init_values N <- ncol(data) if(!is.numeric(init_values)) { if (is.null(init_values)) { theta <- gridSearch_BEKK(data) theta <- theta[[1]] } else if (init_values == 'random') { cat('Generating starting values \n') theta <- random_grid_search_BEKK(data) theta <- theta[[1]] } else if (init_values == 'simple') { uncond_var <- crossprod(data)/nrow(data) A <- matrix(0, ncol = N, nrow = N) G <- matrix(0, ncol = N, nrow = N) C <- matrix(0, ncol = N, nrow = N) #th0=numeric(2*n^2+n*(n+1)/2) diag(A) <- 0.3 diag(G) <- 0.92 diag(C) <- 0.05*diag(uncond_var) for (i in 1:N){ for (j in seq(i,N)){ cij <- uncond_var[i, j]/sqrt(uncond_var[i, i]*uncond_var[j, j]) C[i,j] <- cij*sqrt(C[i, i]*C[j, j]) C[j,i] <- C[i, j] } } C = t(chol(C)) C0 = C[,1] if (N > 2) { for (i in 2:(N-1)){ C0 = c(C0, C[i:N, i]) } } C0 = c(C0, C[N, N]) theta = c(C0, c(A), c(G)) } } else { if(length(init_values) != 2 * N^2 + N * (N + 1)/2) { stop('Number of initial parameter does not match dimension of data.') } theta <- init_values } theta <- matrix(theta, ncol =1) params <- bhh_bekk(data, theta, max_iter, crit) if (QML_t_ratios == TRUE) { tratios <- QML_t_ratios(params$theta, data) tratios_mat <- coef_mat(tratios, N) sds <- QML_sd(params$theta, data) sd_mat <- coef_mat(sds, N) } else { tratios_mat <- coef_mat(params$t_val, N) sd_mat <- coef_mat(params$sd, N) } param_mat <- coef_mat(params$theta, N) var_process <- sigma_bekk(data, t(param_mat$c0), param_mat$a, param_mat$g) sigma_t <- as.data.frame(var_process$sigma_t) colnames(sigma_t) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t)[k2] <- paste('Conditional standard deviation of \n', colnames(data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t)[k2] <- paste('Conditional correlation of \n', colnames(data)[i], ' and ', colnames(data)[j]) k2 <- k2 +1 } } } for (i in 1:nrow(sigma_t)) { tm <- matrix(unlist(sigma_t[i,]), N, N, byrow = T) tm2 <- sqrt(solve(diag(diag(tm))))%*%tm%*%sqrt(solve(diag(diag(tm)))) diag(tm2) <- sqrt(diag(tm)) sigma_t[i,] <- c(tm2) } elim <- elimination_mat(N) sigma_t <- sigma_t[, which(colSums(elim) == 1)] if (inherits(data, "ts")) { sigma_t <- ts(sigma_t, start = time(data)[1], frequency = frequency(data)) H_t <- ts(var_process$sigma_t, start = time(data)[1], frequency = frequency(data)) e_t <-ts(var_process$e_t, start = time(data)[1], frequency = frequency(data)) } else if(inherits(data, "xts") || inherits(data, "zoo") ){ sigma_t <- xts(sigma_t, order.by = time(data)) H_t <- xts(var_process$sigma_t, order.by = time(data)) e_t <- xts(var_process$e_t, order.by = time(data)) }else{ H_t <- var_process$sigma_t e_t <- var_process$e_t } # Final check if BEKK is valid BEKK_valid <- valid_bekk(param_mat$c0, param_mat$a, param_mat$g) params$likelihood_iter <- params$likelihood_iter[params$likelihood_iter != 0] result <- list(C0 = param_mat$c0, A = param_mat$a, G = param_mat$g, C0_t = tratios_mat$c0, A_t = tratios_mat$a, G_t = tratios_mat$g, C0_sd = sd_mat$c0, A_sd = sd_mat$a, G_sd = sd_mat$g, theta = params$theta, log_likelihood = params$likelihood, BEKK_valid = BEKK_valid, sigma_t = sigma_t, H_t = H_t, e_t = e_t, Second_moments_of_residuals = cov(var_process$e_t), iter = params$iter, likelihood_iter = params$likelihood_iter, asymmetric = FALSE, data = data, spec = spec, QML_t_ratios = QML_t_ratios) class(result) <- c('bekkFit', 'bekk') result$AIC <- logLik(result)$AIC result$BIC <- logLik(result)$BIC result$Portmanteau.test <- portmanteau.test(result, lags = 5) return(result) } #' @export bekk_fit.bekka <- function(spec, data, QML_t_ratios = FALSE, max_iter = 50, crit = 1e-9) { init_values <- spec$init_values N <- ncol(data) if(is.null(spec$model$signs)){ spec$model$signs = matrix(rep(-1, N), ncol = 1) } if(length(spec$model$signs) != N){ stop('Length of "signs" does not match dimension of data.') } if(!is.numeric(init_values)) { if (is.null(init_values)) { theta <- gridSearch_asymmetricBEKK(data, spec$model$signs) theta <- theta[[1]] } else if (init_values == 'random') { cat('Generating starting values \n') theta = random_grid_search_asymmetric_BEKK(data, spec$model$signs)[[1]] } else if (init_values == 'simple') { uncond_var <- crossprod(data)/nrow(data) A <- matrix(0, ncol = N, nrow = N) B <- matrix(0, ncol = N, nrow = N) G <- matrix(0, ncol = N, nrow = N) C <- matrix(0, ncol = N, nrow = N) #th0=numeric(2*n^2+n*(n+1)/2) diag(A) <- 0.25 diag(B) <- 0.05 diag(G) <- 0.92 diag(C) <- 0.05*diag(uncond_var) for (i in 1:N){ for (j in seq(i,N)){ cij <- uncond_var[i, j]/sqrt(uncond_var[i, i]*uncond_var[j, j]) C[i,j] <- cij*sqrt(C[i, i]*C[j, j]) C[j,i] <- C[i, j] } } C = t(chol(C)) C0 = C[,1] if (N > 2) { for (i in 2:(N-1)){ C0 = c(C0, C[i:N, i]) } } C0 = c(C0, C[N, N]) theta = c(C0, c(A), c(G)) } } else { if(length(init_values) != 3 * N^2 + N * (N + 1)/2) { stop('Number of initial parameter does not match dimension of data.') } theta <- init_values } theta <- matrix(theta, ncol =1) params <- bhh_asymm_bekk(data, theta, max_iter, crit, spec$model$signs) if (QML_t_ratios == TRUE) { tratios <- QML_t_ratios_asymm(params$theta, data, spec$model$signs) tratios_mat <- coef_mat_asymm(tratios, N) sds <- QML_sd_asymm(params$theta, data, spec$model$signs) sd_mat <- coef_mat_asymm(sds, N) } else { tratios_mat <- coef_mat_asymm(params$t_val, N) sd_mat <- coef_mat_asymm(params$sd, N) } param_mat <- coef_mat_asymm(params$theta, N) var_process <- sigma_bekk_asymm(data, t(param_mat$c0), param_mat$a, param_mat$b, param_mat$g, spec$model$signs) sigma_t <- as.data.frame(var_process$sigma_t) colnames(sigma_t) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t)[k2] <- paste('Conditional SD of', colnames(data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t)[k2] <- paste('Conditional correlation of', colnames(data)[i], ' and ', colnames(data)[j]) k2 <- k2 +1 } } } for (i in 1:nrow(sigma_t)) { tm <- matrix(unlist(sigma_t[i,]), N, N, byrow = T) tm2 <- sqrt(solve(diag(diag(tm))))%*%tm%*%sqrt(solve(diag(diag(tm)))) diag(tm2) <- sqrt(diag(tm)) sigma_t[i,] <- c(tm2) } elim <- elimination_mat(N) sigma_t <- sigma_t[, which(colSums(elim) == 1)] if (inherits(data, "ts")) { sigma_t <- ts(sigma_t, start = time(data)[1], frequency = frequency(data)) H_t <- ts(var_process$sigma_t, start = time(data)[1], frequency = frequency(data)) e_t <-ts(var_process$e_t, start = time(data)[1], frequency = frequency(data)) } else if(inherits(data, "xts") || inherits(data, "zoo") ){ sigma_t <- xts(sigma_t, order.by = time(data)) H_t <- xts(var_process$sigma_t, order.by = time(data)) e_t <- xts(var_process$e_t, order.by = time(data)) }else{ H_t <- var_process$sigma_t e_t <- var_process$e_t } # Final check if BEKK is valid BEKK_valid <- valid_asymm_bekk(param_mat$c0, param_mat$a, param_mat$b, param_mat$g, data, spec$model$signs) expected_signs=expected_indicator_value(data,spec$model$signs) params$likelihood_iter <- params$likelihood_iter[params$likelihood_iter != 0] result <- list(C0 = param_mat$c0, A = param_mat$a, B = param_mat$b, G = param_mat$g, C0_t = tratios_mat$c0, A_t = tratios_mat$a, B_t = tratios_mat$b, G_t = tratios_mat$g, C0_sd = sd_mat$c0, A_sd = sd_mat$a, B_sd = sd_mat$b, G_sd = sd_mat$g, theta = params$theta, signs = spec$model$signs, log_likelihood = params$likelihood, BEKK_valid = BEKK_valid, sigma_t = sigma_t, H_t = H_t, e_t = e_t, Second_moments_of_residuals = cov(var_process$e_t), iter = params$iter, likelihood_iter = params$likelihood_iter, asymmetric = TRUE, expected_signs = expected_signs, data = data, spec = spec, QML_t_ratios = QML_t_ratios) class(result) <- c('bekkFit', 'bekka') result$AIC <- AIC(result)$AIC result$BIC <- BIC(result)$BIC result$Portmanteau.test <- portmanteau.test(result, lags = 5) return(result) } #' @export bekk_fit.dbekk <- function(spec, data, QML_t_ratios = FALSE, max_iter = 50, crit = 1e-9) { init_values <- spec$init_values N <- ncol(data) if(!is.numeric(init_values)) { if (is.null(init_values)) { theta <- gridSearch_dBEKK(data) theta <- theta[[1]] } else if (init_values == 'random') { cat('Generating starting values \n') theta <- random_grid_search_dBEKK(data) theta <- theta[[1]] } } else { if(length(init_values) != 2 * N + N * (N + 1)/2) { stop('Number of initial parameter does not match dimension of data.') } theta <- init_values } theta <- matrix(theta, ncol =1) params <- bhh_dbekk(data, theta, max_iter, crit) if (QML_t_ratios == TRUE) { tratios <- QML_t_ratios_dbekk(params$theta, data) tratios_mat <- coef_mat_diagonal(tratios, N) sds <- QML_sd_dbekk(params$theta, data) sd_mat <- coef_mat_diagonal(sds, N) } else { tratios_mat <- coef_mat_diagonal(params$t_val, N) sd_mat <- coef_mat_diagonal(params$sd, N) } param_mat <- coef_mat_diagonal(params$theta, N) var_process <- sigma_bekk(data, t(param_mat$c0), param_mat$a, param_mat$g) sigma_t <- as.data.frame(var_process$sigma_t) colnames(sigma_t) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t)[k2] <- paste('Conditional standard deviation of \n', colnames(data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t)[k2] <- paste('Conditional correlation of \n', colnames(data)[i], ' and ', colnames(data)[j]) k2 <- k2 +1 } } } for (i in 1:nrow(sigma_t)) { tm <- matrix(unlist(sigma_t[i,]), N, N, byrow = T) tm2 <- sqrt(solve(diag(diag(tm))))%*%tm%*%sqrt(solve(diag(diag(tm)))) diag(tm2) <- sqrt(diag(tm)) sigma_t[i,] <- c(tm2) } elim <- elimination_mat(N) sigma_t <- sigma_t[, which(colSums(elim) == 1)] if (inherits(data, "ts")) { sigma_t <- ts(sigma_t, start = time(data)[1], frequency = frequency(data)) H_t <- ts(var_process$sigma_t, start = time(data)[1], frequency = frequency(data)) e_t <-ts(var_process$e_t, start = time(data)[1], frequency = frequency(data)) } else if(inherits(data, "xts") || inherits(data, "zoo") ){ sigma_t <- xts(sigma_t, order.by = time(data)) H_t <- xts(var_process$sigma_t, order.by = time(data)) e_t <- xts(var_process$e_t, order.by = time(data)) }else{ H_t <- var_process$sigma_t e_t <- var_process$e_t } # Final check if BEKK is valid BEKK_valid <- valid_bekk(param_mat$c0, param_mat$a, param_mat$g) params$likelihood_iter <- params$likelihood_iter[params$likelihood_iter != 0] result <- list(C0 = param_mat$c0, A = param_mat$a, G = param_mat$g, C0_t = tratios_mat$c0, A_t = tratios_mat$a, G_t = tratios_mat$g, C0_sd = sd_mat$c0, A_sd = sd_mat$a, G_sd = sd_mat$g, theta = params$theta, log_likelihood = params$likelihood, BEKK_valid = BEKK_valid, sigma_t = sigma_t, H_t = H_t, e_t = e_t, Second_moments_of_residuals = cov(var_process$e_t), iter = params$iter, likelihood_iter = params$likelihood_iter, asymmetric = FALSE, data = data, spec = spec, QML_t_ratios = QML_t_ratios) class(result) <- c('bekkFit', 'dbekk') result$AIC <- AIC(result)$AIC result$BIC <- BIC(result)$BIC result$Portmanteau.test <- portmanteau.test(result, lags = 5) return(result) } #' @export bekk_fit.dbekka <- function(spec, data, QML_t_ratios = FALSE, max_iter = 50, crit = 1e-9) { init_values <- spec$init_values N <- ncol(data) if(is.null(spec$model$signs)){ spec$model$signs = matrix(rep(-1, N), ncol = 1) } if(length(spec$model$signs) != N){ stop('Length of "signs" does not match dimension of data.') } if(!is.numeric(init_values)) { if (is.null(init_values)) { theta <- gridSearch_asymmetricdBEKK(data, spec$model$signs) theta <- theta[[1]] } else if (init_values == 'random') { cat('Generating starting values \n') theta = random_grid_search_asymmetric_dBEKK(data, spec$model$signs)[[1]] } } else { if(length(init_values) != 3 * N + N * (N + 1)/2) { stop('Number of initial parameter does not match dimension of data.') } theta <- init_values } theta <- matrix(theta, ncol =1) params <- bhh_asymm_dbekk(data, theta, max_iter, crit, spec$model$signs) if (QML_t_ratios == TRUE) { tratios <- QML_t_ratios_dbekka(params$theta, data, spec$model$signs) tratios_mat <- coef_mat_asymm_diagonal(tratios, N) sds <- QML_sd_dbekka(params$theta, data, spec$model$signs) sd_mat <- coef_mat_asymm_diagonal(sds, N) } else { tratios_mat <- coef_mat_asymm_diagonal(params$t_val, N) sd_mat <- coef_mat_asymm_diagonal(params$sd, N) } param_mat <- coef_mat_asymm_diagonal(params$theta, N) var_process <- sigma_bekk_asymm(data, t(param_mat$c0), param_mat$a, param_mat$b, param_mat$g, spec$model$signs) sigma_t <- as.data.frame(var_process$sigma_t) colnames(sigma_t) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t)[k2] <- paste('Conditional SD of', colnames(data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t)[k2] <- paste('Conditional correlation of', colnames(data)[i], ' and ', colnames(data)[j]) k2 <- k2 +1 } } } for (i in 1:nrow(sigma_t)) { tm <- matrix(unlist(sigma_t[i,]), N, N, byrow = T) tm2 <- sqrt(solve(diag(diag(tm))))%*%tm%*%sqrt(solve(diag(diag(tm)))) diag(tm2) <- sqrt(diag(tm)) sigma_t[i,] <- c(tm2) } elim <- elimination_mat(N) sigma_t <- sigma_t[, which(colSums(elim) == 1)] if (inherits(data, "ts")) { sigma_t <- ts(sigma_t, start = time(data)[1], frequency = frequency(data)) H_t <- ts(var_process$sigma_t, start = time(data)[1], frequency = frequency(data)) e_t <-ts(var_process$e_t, start = time(data)[1], frequency = frequency(data)) } else if(inherits(data, "xts") || inherits(data, "zoo") ){ sigma_t <- xts(sigma_t, order.by = time(data)) H_t <- xts(var_process$sigma_t, order.by = time(data)) e_t <- xts(var_process$e_t, order.by = time(data)) }else{ H_t <- var_process$sigma_t e_t <- var_process$e_t } # Final check if BEKK is valid BEKK_valid <- valid_asymm_bekk(param_mat$c0, param_mat$a, param_mat$b, param_mat$g, data, spec$model$signs) expected_signs=expected_indicator_value(data,spec$model$signs) params$likelihood_iter <- params$likelihood_iter[params$likelihood_iter != 0] result <- list(C0 = param_mat$c0, A = param_mat$a, B = param_mat$b, G = param_mat$g, C0_t = tratios_mat$c0, A_t = tratios_mat$a, B_t = tratios_mat$b, G_t = tratios_mat$g, C0_sd = sd_mat$c0, A_sd = sd_mat$a, B_sd = sd_mat$b, G_sd = sd_mat$g, theta = params$theta, signs = spec$model$signs, log_likelihood = params$likelihood, BEKK_valid = BEKK_valid, sigma_t = sigma_t, H_t = H_t, e_t = e_t, Second_moments_of_residuals = cov(var_process$e_t), iter = params$iter, likelihood_iter = params$likelihood_iter, asymmetric = TRUE, expected_signs = expected_signs, data = data, spec = spec, QML_t_ratios = QML_t_ratios) class(result) <- c('bekkFit', 'dbekka') result$AIC <- AIC(result)$AIC result$BIC <- BIC(result)$BIC result$Portmanteau.test <- portmanteau.test(result, lags = 5) return(result) } #' @export bekk_fit.sbekk <- function(spec, data, QML_t_ratios = FALSE, max_iter = 50, crit = 1e-9) { init_values <- spec$init_values N <- ncol(data) if(!is.numeric(init_values)) { if (is.null(init_values)) { theta <- gridSearch_sBEKK(data) theta <- theta[[1]] } else if (init_values == 'random') { cat('Generating starting values \n') theta <- random_grid_search_sBEKK(data) theta <- theta[[1]] } } else { if(length(init_values) != 2 + N * (N + 1)/2) { stop('Number of initial parameter does not match dimension of data.') } theta <- init_values } theta <- matrix(theta, ncol =1) params <- bhh_sbekk(data, theta, max_iter, crit) if (QML_t_ratios == TRUE) { tratios <- QML_t_ratios_sbekk(params$theta, data) tratios_mat <- coef_mat_scalar(tratios, N) sds <- QML_sd_sbekk(params$theta, data) sd_mat <- coef_mat_scalar(sds, N) } else { tratios_mat <- coef_mat_scalar(params$t_val, N) sd_mat <- coef_mat_scalar(params$sd, N) } param_mat <- coef_mat_scalar(params$theta, N) var_process <- sigma_sbekk(data, t(param_mat$c0), param_mat$a, param_mat$g) sigma_t <- as.data.frame(var_process$sigma_t) colnames(sigma_t) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t)[k2] <- paste('Conditional standard deviation of \n', colnames(data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t)[k2] <- paste('Conditional correlation of \n', colnames(data)[i], ' and ', colnames(data)[j]) k2 <- k2 +1 } } } for (i in 1:nrow(sigma_t)) { tm <- matrix(unlist(sigma_t[i,]), N, N, byrow = T) tm2 <- sqrt(solve(diag(diag(tm))))%*%tm%*%sqrt(solve(diag(diag(tm)))) diag(tm2) <- sqrt(diag(tm)) sigma_t[i,] <- c(tm2) } elim <- elimination_mat(N) sigma_t <- sigma_t[, which(colSums(elim) == 1)] if (inherits(data, "ts")) { sigma_t <- ts(sigma_t, start = time(data)[1], frequency = frequency(data)) H_t <- ts(var_process$sigma_t, start = time(data)[1], frequency = frequency(data)) e_t <-ts(var_process$e_t, start = time(data)[1], frequency = frequency(data)) } else if(inherits(data, "xts") || inherits(data, "zoo") ){ sigma_t <- xts(sigma_t, order.by = time(data)) H_t <- xts(var_process$sigma_t, order.by = time(data)) e_t <- xts(var_process$e_t, order.by = time(data)) }else{ H_t <- var_process$sigma_t e_t <- var_process$e_t } # Final check if BEKK is valid BEKK_valid <- valid_sbekk(param_mat$c0, param_mat$a, param_mat$g) params$likelihood_iter <- params$likelihood_iter[params$likelihood_iter != 0] result <- list(C0 = param_mat$c0, a = param_mat$a, g = param_mat$g, C0_t = tratios_mat$c0, a_t = tratios_mat$a, g_t = tratios_mat$g, C0_sd = sd_mat$c0, a_sd = sd_mat$a, g_sd = sd_mat$g, theta = params$theta, log_likelihood = params$likelihood, BEKK_valid = BEKK_valid, sigma_t = sigma_t, H_t = H_t, e_t = e_t, Second_moments_of_residuals = cov(var_process$e_t), iter = params$iter, likelihood_iter = params$likelihood_iter, asymmetric = FALSE, data = data, spec = spec, QML_t_ratios = QML_t_ratios) class(result) <- c('bekkFit', 'sbekk') result$AIC <- AIC(result)$AIC result$BIC <- BIC(result)$BIC result$Portmanteau.test <- portmanteau.test(result, lags = 5) return(result) } #' @export bekk_fit.sbekka <- function(spec, data, QML_t_ratios = FALSE, max_iter = 50, crit = 1e-9) { init_values <- spec$init_values N <- ncol(data) if(is.null(spec$model$signs)){ spec$model$signs = matrix(rep(-1, N), ncol = 1) } if(length(spec$model$signs) != N){ stop('Length of "signs" does not match dimension of data.') } if(!is.numeric(init_values)) { if (is.null(init_values)) { theta <- gridSearch_asymmetricsBEKK(data, spec$model$signs) theta <- theta[[1]] } else if (init_values == 'random') { cat('Generating starting values \n') theta = random_grid_search_asymmetric_sBEKK(data, spec$model$signs)[[1]] } } else { if(length(init_values) != 3 + N * (N + 1)/2) { stop('Number of initial parameter does not match dimension of data.') } theta <- init_values } theta <- matrix(theta, ncol =1) params <- bhh_asymm_sbekk(data, theta, max_iter, crit, spec$model$signs) if (QML_t_ratios == TRUE) { tratios <- QML_t_ratios_sbekk_asymm(params$theta, data, spec$model$signs) tratios_mat <- coef_mat_asymm_scalar(tratios, N) sds <- QML_sd_sbekk_asymm(params$theta, data, spec$model$signs) sd_mat <- coef_mat_asymm_scalar(sds, N) } else { tratios_mat <- coef_mat_asymm_scalar(params$t_val, N) sd_mat <- coef_mat_asymm_scalar(params$sd, N) } param_mat <- coef_mat_asymm_scalar(params$theta, N) var_process <- sigma_sbekk_asymm(data, t(param_mat$c0), param_mat$a, param_mat$b, param_mat$g, spec$model$signs) sigma_t <- as.data.frame(var_process$sigma_t) colnames(sigma_t) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t)[k2] <- paste('Conditional SD of', colnames(data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t)[k2] <- paste('Conditional correlation of', colnames(data)[i], ' and ', colnames(data)[j]) k2 <- k2 +1 } } } for (i in 1:nrow(sigma_t)) { tm <- matrix(unlist(sigma_t[i,]), N, N, byrow = T) tm2 <- sqrt(solve(diag(diag(tm))))%*%tm%*%sqrt(solve(diag(diag(tm)))) diag(tm2) <- sqrt(diag(tm)) sigma_t[i,] <- c(tm2) } elim <- elimination_mat(N) sigma_t <- sigma_t[, which(colSums(elim) == 1)] if (inherits(data, "ts")) { sigma_t <- ts(sigma_t, start = time(data)[1], frequency = frequency(data)) H_t <- ts(var_process$sigma_t, start = time(data)[1], frequency = frequency(data)) e_t <-ts(var_process$e_t, start = time(data)[1], frequency = frequency(data)) } else if(inherits(data, "xts") || inherits(data, "zoo") ){ sigma_t <- xts(sigma_t, order.by = time(data)) H_t <- xts(var_process$sigma_t, order.by = time(data)) e_t <- xts(var_process$e_t, order.by = time(data)) }else{ H_t <- var_process$sigma_t e_t <- var_process$e_t } # Final check if BEKK is valid BEKK_valid <- valid_asymm_sbekk(param_mat$c0, param_mat$a, param_mat$b, param_mat$g, data, spec$model$signs) expected_signs=expected_indicator_value(data,spec$model$signs) params$likelihood_iter <- params$likelihood_iter[params$likelihood_iter != 0] result <- list(C0 = param_mat$c0, a = param_mat$a, b = param_mat$b, g = param_mat$g, C0_t = tratios_mat$c0, a_t = tratios_mat$a, b_t = tratios_mat$b, g_t = tratios_mat$g, C0_sd = sd_mat$c0, a_sd = sd_mat$a, b_sd = sd_mat$b, g_sd = sd_mat$g, theta = params$theta, signs = spec$model$signs, log_likelihood = params$likelihood, BEKK_valid = BEKK_valid, sigma_t = sigma_t, H_t = H_t, e_t = e_t, Second_moments_of_residuals = cov(var_process$e_t), iter = params$iter, likelihood_iter = params$likelihood_iter, asymmetric = TRUE, expected_signs = expected_signs, data = data, spec = spec, QML_t_ratios = QML_t_ratios) class(result) <- c('bekkFit', 'sbekka') result$AIC <- AIC(result)$AIC result$BIC <- BIC(result)$BIC result$Portmanteau.test <- portmanteau.test(result, lags = 5) return(result) }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/bekk_fit.R
#' bekkFit method #' #' @description Generic 'bekkFit' methods. More details on 'bekkFit' are described in \link{bekk_fit} #' #' @param x An object of class "bekkFit" from function \link{bekk_fit}. #' @param object An object of class "bekkFit" from function \link{bekk_fit}. #' @param k Numeric value, the penalty per parameter for AIC to be used; the default k = 2 is the classical AIC. #' @param ... Further arguments to be passed to and from other methods. #' #' @examples #' \donttest{ #' #' data(StocksBonds) #' #' # Fitting a symmetric BEKK model #' obj_spec <- bekk_spec() #' x1 <- bekk_fit(obj_spec, StocksBonds, QML_t_ratios = FALSE, max_iter = 50, crit = 1e-9) #' #' logLik(x1) #' } #' @import xts #' @import stats #' @rdname bekk_fit_methods #' @export print.bekkFit <- function(x,...){ bekkObject <- x if (all(class(bekkObject) != 'var')) { if (any(class(bekkObject) == 'bekk')) { cat(paste("\n", "BEKK estimation results", "\n", sep = "")) underScore <- paste(rep("-", nchar("BEKK estimation results")), collapse = "") } else if (any(class(bekkObject) == 'bekka')) { cat(paste("\n", "Asymmetric BEKK estimation results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Asymmetric BEKK estimation results")), collapse = "") } else if (any(class(bekkObject) == 'dbekk')) { cat(paste("\n", "Diagonal BEKK estimation results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Diagonal BEKK estimation results")), collapse = "") } else if (any(class(bekkObject) == 'dbekka')) { cat(paste("\n", "Asymmetric diagonal BEKK estimation results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Asymmetric diagonal BEKK estimation results")), collapse = "") } else if (any(class(bekkObject) == 'sbekk')) { cat(paste("\n", "Scalar BEKK estimation results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Scalar BEKK estimation results")), collapse = "") } else if (any(class(bekkObject) == 'sbekka')) { cat(paste("\n", "Asymmetric scalar BEKK estimation results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Asymmetric scalar BEKK estimation results")), collapse = "") } cat(underScore) cat("\nLog-likelihood: ") cat(bekkObject$log_likelihood) cat("\nBEKK model stationary: ") cat(bekkObject$BEKK_valid) cat("\nNumber of BHHH iterations: ") cat(bekkObject$iter) cat("\nAIC: ") cat(bekkObject$AIC) cat("\nBIC: ") cat(bekkObject$BIC) cat("\n") } bekkObject } #' @rdname bekk_fit_methods #' @export residuals.bekkFit <- function(object, ...) { object$e_t } #' @noRd AIC.bekkFit <- function(object, ..., k = 2) { x <- object AICinner <- function(e) { N <- ncol(e$data) if (any(class(e) == 'bekk')) { aic <- k * 2 * N^2 + N * (N + 1)/2 - 2 * llv(e) } else if (any(class(e) == 'bekka')) { aic <- k * 3 * N^2 + N * (N + 1)/2 - 2 * llv(e) } else if (any(class(e) == 'sbekk')) { aic <- k * 2 + N * (N + 1)/2 - 2 * llv(e) } else if (any(class(e) == 'sbekka')) { aic <- k * 3 + N * (N + 1)/2 - 2 * llv(e) }else if (any(class(e) == 'dbekk')) { aic <- k * 2 + N * (N + 1)/2 - 2 * llv(e) } else if (any(class(e) == 'dbekka')) { aic <- k * 3 + N * (N + 1)/2 - 2 * llv(e) } return(aic) } if(!missing(...)) {# several objects: produce data.frame lls <- sapply(list(x, ...), AICinner) vals <- sapply(list(x, ...), function(e1){length(e1$theta)}) aic <- data.frame(df = vals, AIC = lls) } else { aic <- AICinner(x) aic <- data.frame(df = length(x$theta), AIC = aic) } return(aic) } #' @noRd BIC.bekkFit <- function(object, ...) { x <- object BICinner <- function(e) { N <- ncol(e$data) if (any(class(e) == 'bekk')) { bic <- N^2 + N * (N + 1)/2 * log(nrow(e$data)) - 2 * llv(e) } else if (any(class(e) == 'bekka')) { bic <- 3 * N^2 + N * (N + 1)/2 * log(nrow(e$data)) - 2 * llv(e) } else if (any(class(e) == 'sbekk')) { bic <- 2 + N * (N + 1)/2 * log(nrow(e$data)) - 2 * llv(e) } else if (any(class(e) == 'sbekka')) { bic <- 3 + N * (N + 1)/2 * log(nrow(e$data)) - 2 * llv(e) } else if (any(class(e) == 'dbekk')) { bic <- 2 * N + N * (N + 1)/2 * log(nrow(e$data)) - 2 * llv(e) } else if (any(class(e) == 'dbekka')) { bic <- 3 * N + N * (N + 1)/2 * log(nrow(e$data)) - 2 * llv(e) } return(bic) } if(!missing(...)) {# several objects: produce data.frame lls <- sapply(list(x, ...), BICinner) vals <- sapply(list(x, ...), function(e1){length(e1$theta)}) bic <- data.frame(df = vals, BIC = lls) } else { bic <- data.frame(df = length(x$theta), BIC = BICinner(x)) } return(bic) } #' @noRd llv <- function(object) { llv_inner <- function(x){ if (any(class(x) == 'bekk')) { logl <- loglike_bekk(x$theta, x$data) } else if (any(class(x) == 'bekka')) { logl <- loglike_asymm_bekk(x$theta, x$data, x$signs) } else if (any(class(x) == 'sbekk')) { logl <- loglike_sbekk(x$theta, x$data) } else if (any(class(x) == 'sbekka')) { logl <- loglike_asymm_sbekk(x$theta, x$data, x$signs) }else if (any(class(x) == 'dbekk')) { logl <- loglike_dbekk(x$theta, x$data) } else if (any(class(x) == 'dbekka')) { logl <- loglike_asymm_dbekk(x$theta, x$data, x$signs) } return(logl) } lls <- llv_inner(object) return(lls) } #' @rdname bekk_fit_methods #' @export logLik.bekkFit <- function(object, ..., k = 2) { llv_inner <- function(x){ if (any(class(x) == 'bekk')) { logl <- loglike_bekk(x$theta, x$data) } else if (any(class(x) == 'bekka')) { logl <- loglike_asymm_bekk(x$theta, x$data, x$signs) } else if (any(class(x) == 'sbekk')) { logl <- loglike_sbekk(x$theta, x$data) } else if (any(class(x) == 'sbekka')) { logl <- loglike_asymm_sbekk(x$theta, x$data, x$signs) }else if (any(class(x) == 'dbekk')) { logl <- loglike_dbekk(x$theta, x$data) } else if (any(class(x) == 'dbekka')) { logl <- loglike_asymm_dbekk(x$theta, x$data, x$signs) } return(logl) } if(!missing(...)) {# several objects: produce data.frame lls <- sapply(list(object, ...), llv_inner) vals <- sapply(list(object, ...), function(e1){length(e1$theta)}) aic <- data.frame(df = vals, LLV = lls, AIC = AIC(object, ..., k = k)$AIC, BIC = BIC(object, ...)$BIC) } else { lls <- llv_inner(object) aic <- data.frame(df = length(object$theta), LLV = lls, AIC = AIC(object, k = k)$AIC, BIC = BIC(object)$BIC) } return(aic) }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/bekk_fit_methods.R
#' @name predict #' @rdname predict #' @title Forecasting conditional volatilities with BEKK models #' #' @description Method for predicting a N-dimensional BEKK covariances. #' #' @param object A fitted bekk model of class "bekkFit" from the \link{bekk_fit} function #' @param n.ahead Number of periods to forecast conditional volatility. Default is a one-period ahead forecast. #' @param ci Floating point in [0,1] defining the niveau for confidence bands of the conditional volatility forecast. Default is 95 per cent niveau confidence bands. #' @param ... Further parameters to be passed on to the function. #' @return Returns a S3 class "bekkForecast" object containing the conditional volatility forecasts and respective confindence bands. #' @examples #' \donttest{#' #' data(StocksBonds) #' obj_spec <- bekk_spec() #' x1 <- bekk_fit(obj_spec, StocksBonds, QML_t_ratios = FALSE, max_iter = 50, crit = 1e-9) #' #' x2 <- predict(x1, n.ahead = 1) #' #' } #' @export predict.bekk <- function(object, n.ahead = 1, ci = 0.95, ...) { x <- object N <- ncol(x$data) NoBs <- nrow(x$data) #var_process <- sigma_bekk(xx$data, xx$C0, xx$A, xx$G) H_t <- vector(mode = "list",length = n.ahead+1) H_t[[1]] <- matrix(x$H_t[NoBs,],nrow = N, ncol = N) current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) for(i in 1:n.ahead){ H_t[[i+1]] <- x$C0 %*% t(x$C0) + t(x$A) %*% t(current_returns) %*% current_returns %*% x$A + t(x$G) %*% H_t[[i]] %*% x$G current_returns <- eigen_value_decomposition(H_t[[i+1]]) } sigma_t <- matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 2:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t[[i]]))))%*%H_t[[i]]%*%sqrt(solve(diag(diag(H_t[[i]])))) diag(tm2) <- sqrt(diag(H_t[[i]])) sigma_t[i-1,] <- c(tm2) } colnames(sigma_t) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t)[k2] <- paste('Conditional standard deviation of \n', colnames(x$data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t)[k2] <- paste('Conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) k2 <- k2 +1 } } } elim <- elimination_mat(N) sigma_t <- sigma_t[, which(colSums(elim) == 1)] H_t_f <- matrix(NA, nrow = n.ahead, ncol = ncol(x$data)^2) for (i in 2:(n.ahead+1)){ H_t_f[i-1, ] <- c(H_t[[i]]) } # Generating confidence intervals score_final = score_bekk(x$theta, x$data) s1_temp = diag(inv_gen(t(score_final) %*% score_final),names=T ) s1 = sqrt(s1_temp) lower_theta = x$theta - qnorm(ci)*s1 upper_theta = x$theta + qnorm(ci)*s1 H_t_lower <- vector(mode = "list",length = n.ahead+1) H_t_upper <- vector(mode = "list",length = n.ahead+1) x_lower <- coef_mat(lower_theta, N) x_upper <- coef_mat(upper_theta, N) if(!any(c(valid_bekk(x_lower$c0,x_lower$a,x_lower$g), valid_bekk(x_upper$c0,x_upper$a,x_upper$g)))){ stop("Lower and/or upper CI are not leading to a stationary BEKK model. Please decrease the cofidence level ci.") } #check if t(x_lower$C0) or x_lower$C0 for sigma_bekk H_t_lower[[1]] <- matrix(sigma_bekk(x$data, t(x_lower$c0), x_lower$a, x_lower$g)$sigma_t[NoBs,], nrow = N, ncol = N) H_t_upper[[1]] <- matrix(sigma_bekk(x$data,t(x_upper$c0),x_upper$a, x_upper$g)$sigma_t[NoBs,], nrow = N, ncol = N) current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) for(i in 1:n.ahead){ H_t_lower[[i+1]] <- x_lower$c0 %*% t(x_lower$c0) + t(x_lower$a) %*% t(current_returns) %*% current_returns %*% x_lower$a + t(x_lower$g) %*% H_t[[i]] %*% x_lower$g current_returns <- eigen_value_decomposition(H_t_lower[[i+1]]) } current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) for(i in 1:n.ahead){ H_t_upper[[i+1]] <- x_upper$c0 %*% t(x_upper$c0) + t(x_upper$a) %*% t(current_returns) %*% current_returns %*% x_upper$a + t(x_upper$g) %*% H_t[[i]] %*% x_upper$g current_returns <- eigen_value_decomposition(H_t_upper[[i+1]]) } sigma_t_lower <- matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 2:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t_lower[[i]]))))%*%H_t_lower[[i]]%*%sqrt(solve(diag(diag(H_t_lower[[i]])))) diag(tm2) <- sqrt(diag(H_t_lower[[i]])) sigma_t_lower[i-1,] <- c(tm2) } sigma_t_upper <- matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 2:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t_upper[[i]]))))%*%H_t_upper[[i]]%*%sqrt(solve(diag(diag(H_t_upper[[i]])))) diag(tm2) <- sqrt(diag(H_t_upper[[i]])) sigma_t_upper[i-1,] <- c(tm2) } colnames(sigma_t_lower) <- rep(1, N^2) colnames(sigma_t_upper) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t_lower)[k2] <- paste('Lower CI conditional standard deviation of \n', colnames(x$data)[k]) colnames(sigma_t_upper)[k2] <- paste('Upper CI conditional standard deviation of \n', colnames(x$data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t_lower)[k2] <- paste('Lower CI conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) colnames(sigma_t_upper)[k2] <- paste('Upper CI conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) k2 <- k2 +1 } } } elim <- elimination_mat(N) sigma_t_lower <- sigma_t_lower[, which(colSums(elim) == 1)] sigma_t_upper <- sigma_t_upper[, which(colSums(elim) == 1)] H_t_f_lower <- H_t_f_upper <- matrix(NA, nrow = n.ahead+1 , ncol = ncol(x$data)^2) for (i in 1:(n.ahead+1)){ H_t_f_lower[i, ] <- c(H_t_lower[[i]]) H_t_f_upper[i, ] <- c(H_t_upper[[i]]) } if (inherits(x$data, "ts")) { sigma_t <- ts(sigma_t, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) sigma_t_lower <- ts(sigma_t_lower, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) sigma_t_upper <- ts(sigma_t_upper, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f <- ts(H_t_f, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f_lower <- ts(H_t_f_lower, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f_upper <- ts(H_t_f_upper, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) } else if(inherits(x$data, "xts") || inherits(x$data, "zoo") ){ sigma_t <- xts(matrix(sigma_t, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) sigma_t_lower <- xts(matrix(sigma_t_lower, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) sigma_t_upper <- xts(matrix(sigma_t_upper, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f <- xts(matrix(H_t_f, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f_lower <- xts(matrix(H_t_f_lower, nrow = n.ahead+1), order.by = seq((time(x$data)[nrow(x$data)]), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f_upper <- xts(matrix(H_t_f_upper, nrow = n.ahead+1), order.by = seq((time(x$data)[nrow(x$data)]), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) } result <- list( volatility_forecast = sigma_t, volatility_lower_conf_band = sigma_t_lower, volatility_upper_conf_band = sigma_t_upper, H_t_forecast = H_t_f, H_t_lower_conf_band = H_t_f_lower, H_t_upper_conf_band = H_t_f_upper, n.ahead = n.ahead, bekkfit = x ) class(result) <- c('bekkForecast', 'bekk') return(result) } #' @rdname predict #' @export predict.bekka <- function(object, n.ahead = 1, ci = 0.95, ...) { x <- object N <- ncol(x$data) NoBs <- nrow(x$data) #var_process <- sigma_bekk(xx$data, xx$C0, xx$A, xx$G) H_t = vector(mode = "list",length=n.ahead+1) H_t[[1]] = matrix(x$H_t[NoBs,],nrow = N, ncol = N) current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) H_t[[2]]=x$C0 %*% t(x$C0) + t(x$A) %*% t(current_returns) %*% current_returns %*% x$A + indicatorFunction(as.matrix(current_returns), x$signs) * t(x$B) %*% t(current_returns) %*% current_returns %*% x$B + t(x$G) %*% H_t[[1]] %*% x$G expected_signs=expected_indicator_value(x$data,x$signs) for(i in 2:n.ahead){ H_t[[i+1]]=x$C0 %*% t(x$C0) + t(x$A) %*% H_t[[i]] %*% x$A + expected_signs * t(x$B) %*% H_t[[i]] %*% x$B + t(x$G) %*% H_t[[i]] %*% x$G } sigma_t = matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 1:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t[[i]]))))%*%H_t[[i]]%*%sqrt(solve(diag(diag(H_t[[i]])))) diag(tm2) <- sqrt(diag(H_t[[i]])) sigma_t[i-1,] <- c(tm2) } colnames(sigma_t) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t)[k2] <- paste('Conditional standard deviation of \n', colnames(x$data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t)[k2] <- paste('Conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) k2 <- k2 +1 } } } elim <- elimination_mat(N) sigma_t <- sigma_t[, which(colSums(elim) == 1)] H_t_f <- matrix(NA, nrow = n.ahead, ncol = ncol(x$data)^2) for (i in 1:(n.ahead+1)){ H_t_f[i-1, ] <- c(H_t[[i]]) } #95% confidence interval score_final = score_asymm_bekk(x$theta, x$data, x$signs) s1_temp = diag(inv_gen(t(score_final) %*% score_final),names=T) s1 = sqrt(s1_temp) lower_theta = x$theta - qnorm(ci)*s1 upper_theta = x$theta + qnorm(ci)*s1 H_t_lower <- vector(mode = "list",length = n.ahead+1) H_t_upper <- vector(mode = "list",length = n.ahead+1) x_lower <- coef_mat_asymm(lower_theta, N) x_upper <- coef_mat_asymm(upper_theta, N) #check if t(x_lower$C0) or x_lower$C0 for sigma_bekk H_t_lower[[1]] <- matrix(sigma_bekk_asymm(x$data, t(x_lower$c0), x_lower$a, x_lower$b, x_lower$g, x$signs)$sigma_t[NoBs,],nrow = N, ncol = N) H_t_upper[[1]] <- matrix(sigma_bekk_asymm(x$data, t(x_upper$c0), x_upper$a, x_upper$b, x_upper$g, x$signs)$sigma_t[NoBs,],nrow = N, ncol = N) current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) for(i in 1:n.ahead){ H_t_lower[[i+1]] <- x_lower$c0 %*% t(x_lower$c0) + t(x_lower$a) %*% t(current_returns) %*% current_returns %*% x_lower$a + expected_signs * t(x_lower$b) %*% H_t_lower[[i]] %*% x_lower$b + t(x_lower$g) %*% H_t[[i]] %*% x_lower$g current_returns <- eigen_value_decomposition(H_t_lower[[i+1]]) } current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) for(i in 1:n.ahead){ H_t_upper[[i+1]] <- x_upper$c0 %*% t(x_upper$c0) + t(x_upper$a) %*% t(current_returns) %*% current_returns %*% x_upper$a + expected_signs * t(x_upper$b) %*% H_t_upper[[i]] %*% x_upper$b + t(x_upper$g) %*% H_t[[i]] %*% x_upper$g current_returns <- eigen_value_decomposition(H_t_upper[[i+1]]) } sigma_t_lower <- matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 2:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t_lower[[i]]))))%*%H_t_lower[[i]]%*%sqrt(solve(diag(diag(H_t_lower[[i]])))) diag(tm2) <- sqrt(diag(H_t_lower[[i]])) sigma_t_lower[i-1,] <- c(tm2) } sigma_t_upper <- matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 2:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t_upper[[i]]))))%*%H_t_upper[[i]]%*%sqrt(solve(diag(diag(H_t_upper[[i]])))) diag(tm2) <- sqrt(diag(H_t_upper[[i]])) sigma_t_upper[i-1,] <- c(tm2) } colnames(sigma_t_lower) <- rep(1, N^2) colnames(sigma_t_upper) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t_lower)[k2] <- paste('Lower CI conditional standard deviation of \n', colnames(x$data)[k]) colnames(sigma_t_upper)[k2] <- paste('Upper CI conditional standard deviation of \n', colnames(x$data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t_lower)[k2] <- paste('Lower CI conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) colnames(sigma_t_upper)[k2] <- paste('Upper CI conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) k2 <- k2 +1 } } } elim <- elimination_mat(N) sigma_t_lower <- sigma_t_lower[, which(colSums(elim) == 1)] sigma_t_upper <- sigma_t_upper[, which(colSums(elim) == 1)] H_t_f_lower <- H_t_f_upper <- matrix(NA, nrow = n.ahead + 1, ncol = ncol(x$data)^2) for (i in 1:(n.ahead+1)){ H_t_f_lower[i, ] <- c(H_t_lower[[i]]) H_t_f_upper[i, ] <- c(H_t_upper[[i]]) } if (inherits(x$data, "ts")) { sigma_t <- ts(sigma_t, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) sigma_t_lower <- ts(sigma_t_lower, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) sigma_t_upper <- ts(sigma_t_upper, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f <- ts(H_t_f, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f_lower <- ts(H_t_f_lower, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f_upper <- ts(H_t_f_upper, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) } else if(inherits(x$data, "xts") || inherits(x$data, "zoo") ){ sigma_t <- xts(matrix(sigma_t, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) sigma_t_lower <- xts(matrix(sigma_t_lower, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) sigma_t_upper <- xts(matrix(sigma_t_upper, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f <- xts(matrix(H_t_f, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f_lower <- xts(matrix(H_t_f_lower, nrow = n.ahead+1), order.by = seq((time(x$data)[nrow(x$data)]), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f_upper <- xts(matrix(H_t_f_upper, nrow = n.ahead+1), order.by = seq((time(x$data)[nrow(x$data)]), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) } result <- list( volatility_forecast = sigma_t, volatility_lower_conf_band = sigma_t_lower, volatility_upper_conf_band = sigma_t_upper, H_t_forecast = H_t_f, H_t_lower_conf_band = H_t_f_lower, H_t_upper_conf_band = H_t_f_upper, n.ahead = n.ahead, bekkfit = x ) class(result) <- c('bekkForecast', 'bekka') return(result) } #' @rdname predict #' @export predict.dbekk <- function(object, n.ahead = 1, ci = 0.95, ...) { x <- object N <- ncol(x$data) NoBs <- nrow(x$data) #var_process <- sigma_bekk(xx$data, xx$C0, xx$A, xx$G) H_t <- vector(mode = "list",length = n.ahead+1) H_t[[1]] <- matrix(x$H_t[NoBs,],nrow = N, ncol = N) current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) for(i in 1:n.ahead){ H_t[[i+1]] <- x$C0 %*% t(x$C0) + t(x$A) %*% t(current_returns) %*% current_returns %*% x$A + t(x$G) %*% H_t[[i]] %*% x$G current_returns <- eigen_value_decomposition(H_t[[i+1]]) } sigma_t <- matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 2:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t[[i]]))))%*%H_t[[i]]%*%sqrt(solve(diag(diag(H_t[[i]])))) diag(tm2) <- sqrt(diag(H_t[[i]])) sigma_t[i-1,] <- c(tm2) } colnames(sigma_t) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t)[k2] <- paste('Conditional standard deviation of \n', colnames(x$data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t)[k2] <- paste('Conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) k2 <- k2 +1 } } } elim <- elimination_mat(N) sigma_t <- sigma_t[, which(colSums(elim) == 1)] H_t_f <- matrix(NA, nrow = n.ahead, ncol = ncol(x$data)^2) for (i in 2:(n.ahead+1)){ H_t_f[i-1, ] <- c(H_t[[i]]) } # Generating confidence intervals score_final = score_dbekk(x$theta, x$data) s1_temp = diag(inv_gen(t(score_final) %*% score_final),names=T) s1 = sqrt(s1_temp) lower_theta = x$theta - qnorm(ci)*s1 upper_theta = x$theta + qnorm(ci)*s1 H_t_lower <- vector(mode = "list",length = n.ahead+1) H_t_upper <- vector(mode = "list",length = n.ahead+1) x_lower <- coef_mat_diagonal(lower_theta, N) x_upper <- coef_mat_diagonal(upper_theta, N) #check if t(x_lower$C0) or x_lower$C0 for sigma_bekk H_t_lower[[1]] <- matrix(sigma_bekk(x$data, t(x_lower$c0), x_lower$a, x_lower$g)$sigma_t[NoBs,], nrow = N, ncol = N) H_t_upper[[1]] <- matrix(sigma_bekk(x$data,t(x_upper$c0),x_upper$a, x_upper$g)$sigma_t[NoBs,], nrow = N, ncol = N) current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) for(i in 1:n.ahead){ H_t_lower[[i+1]] <- x_lower$c0 %*% t(x_lower$c0) + t(x_lower$a) %*% t(current_returns) %*% current_returns %*% x_lower$a + t(x_lower$g) %*% H_t[[i]] %*% x_lower$g current_returns <- eigen_value_decomposition(H_t_lower[[i+1]]) } current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) for(i in 1:n.ahead){ H_t_upper[[i+1]] <- x_upper$c0 %*% t(x_upper$c0) + t(x_upper$a) %*% t(current_returns) %*% current_returns %*% x_upper$a + t(x_upper$g) %*% H_t[[i]] %*% x_upper$g current_returns <- eigen_value_decomposition(H_t_upper[[i+1]]) } sigma_t_lower <- matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 2:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t_lower[[i]]))))%*%H_t_lower[[i]]%*%sqrt(solve(diag(diag(H_t_lower[[i]])))) diag(tm2) <- sqrt(diag(H_t_lower[[i]])) sigma_t_lower[i-1,] <- c(tm2) } sigma_t_upper <- matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 2:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t_upper[[i]]))))%*%H_t_upper[[i]]%*%sqrt(solve(diag(diag(H_t_upper[[i]])))) diag(tm2) <- sqrt(diag(H_t_upper[[i]])) sigma_t_upper[i-1,] <- c(tm2) } colnames(sigma_t_lower) <- rep(1, N^2) colnames(sigma_t_upper) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t_lower)[k2] <- paste('Lower CI conditional standard deviation of \n', colnames(x$data)[k]) colnames(sigma_t_upper)[k2] <- paste('Upper CI conditional standard deviation of \n', colnames(x$data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t_lower)[k2] <- paste('Lower CI conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) colnames(sigma_t_upper)[k2] <- paste('Upper CI conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) k2 <- k2 +1 } } } elim <- elimination_mat(N) sigma_t_lower <- sigma_t_lower[, which(colSums(elim) == 1)] sigma_t_upper <- sigma_t_upper[, which(colSums(elim) == 1)] H_t_f_lower <- H_t_f_upper <- matrix(NA, nrow = n.ahead + 1, ncol = ncol(x$data)^2) for (i in 1:(n.ahead+1)){ H_t_f_lower[i, ] <- c(H_t_lower[[i]]) H_t_f_upper[i, ] <- c(H_t_upper[[i]]) } if (inherits(x$data, "ts")) { sigma_t <- ts(sigma_t, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) sigma_t_lower <- ts(sigma_t_lower, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) sigma_t_upper <- ts(sigma_t_upper, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f <- ts(H_t_f, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f_lower <- ts(H_t_f_lower, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f_upper <- ts(H_t_f_upper, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) } else if(inherits(x$data, "xts") || inherits(x$data, "zoo") ){ sigma_t <- xts(matrix(sigma_t, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) sigma_t_lower <- xts(matrix(sigma_t_lower, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) sigma_t_upper <- xts(matrix(sigma_t_upper, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f <- xts(matrix(H_t_f, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f_lower <- xts(matrix(H_t_f_lower, nrow = n.ahead+1), order.by = seq((time(x$data)[nrow(x$data)]), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f_upper <- xts(matrix(H_t_f_upper, nrow = n.ahead+1), order.by = seq((time(x$data)[nrow(x$data)]), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) } result <- list( volatility_forecast = sigma_t, volatility_lower_conf_band = sigma_t_lower, volatility_upper_conf_band = sigma_t_upper, H_t_forecast = H_t_f, H_t_lower_conf_band = H_t_f_lower, H_t_upper_conf_band = H_t_f_upper, n.ahead = n.ahead, bekkfit = x ) class(result) <- c('bekkForecast', 'dbekk') return(result) } #' @rdname predict #' @export predict.dbekka <- function(object, n.ahead = 1, ci = 0.95, ...) { x <- object N <- ncol(x$data) NoBs <- nrow(x$data) #var_process <- sigma_bekk(xx$data, xx$C0, xx$A, xx$G) H_t = vector(mode = "list",length=n.ahead+1) H_t[[1]] = matrix(x$H_t[NoBs,],nrow = N, ncol = N) current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) H_t[[2]]=x$C0 %*% t(x$C0) + t(x$A) %*% t(current_returns) %*% current_returns %*% x$A + indicatorFunction(as.matrix(current_returns), x$signs) * t(x$B) %*% t(current_returns) %*% current_returns %*% x$B + t(x$G) %*% H_t[[1]] %*% x$G expected_signs=expected_indicator_value(x$data,x$signs) for(i in 2:n.ahead){ H_t[[i+1]]=x$C0 %*% t(x$C0) + t(x$A) %*% H_t[[i]] %*% x$A + expected_signs * t(x$B) %*% H_t[[i]] %*% x$B + t(x$G) %*% H_t[[i]] %*% x$G } sigma_t = matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 1:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t[[i]]))))%*%H_t[[i]]%*%sqrt(solve(diag(diag(H_t[[i]])))) diag(tm2) <- sqrt(diag(H_t[[i]])) sigma_t[i-1,] <- c(tm2) } colnames(sigma_t) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t)[k2] <- paste('Conditional standard deviation of \n', colnames(x$data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t)[k2] <- paste('Conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) k2 <- k2 +1 } } } elim <- elimination_mat(N) sigma_t <- sigma_t[, which(colSums(elim) == 1)] H_t_f <- matrix(NA, nrow = n.ahead, ncol = ncol(x$data)^2) for (i in 1:(n.ahead+1)){ H_t_f[i-1, ] <- c(H_t[[i]]) } #95% confidence interval score_final = score_asymm_dbekk(x$theta, x$data, x$signs) s1_temp = diag(inv_gen(t(score_final) %*% score_final),names=T) s1 = sqrt(s1_temp) lower_theta = x$theta - qnorm(ci)*s1 upper_theta = x$theta + qnorm(ci)*s1 H_t_lower <- vector(mode = "list",length = n.ahead+1) H_t_upper <- vector(mode = "list",length = n.ahead+1) x_lower <- coef_mat_asymm_diagonal(lower_theta, N) x_upper <- coef_mat_asymm_diagonal(upper_theta, N) #check if t(x_lower$C0) or x_lower$C0 for sigma_bekk H_t_lower[[1]] <- matrix(sigma_bekk_asymm(x$data, t(x_lower$c0), x_lower$a, x_lower$b, x_lower$g, x$signs)$sigma_t[NoBs,],nrow = N, ncol = N) H_t_upper[[1]] <- matrix(sigma_bekk_asymm(x$data, t(x_upper$c0), x_upper$a, x_upper$b, x_upper$g, x$signs)$sigma_t[NoBs,],nrow = N, ncol = N) current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) for(i in 1:n.ahead){ H_t_lower[[i+1]] <- x_lower$c0 %*% t(x_lower$c0) + t(x_lower$a) %*% t(current_returns) %*% current_returns %*% x_lower$a + expected_signs * t(x_lower$b) %*% H_t_lower[[i]] %*% x_lower$b + t(x_lower$g) %*% H_t[[i]] %*% x_lower$g current_returns <- eigen_value_decomposition(H_t_lower[[i+1]]) } current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) for(i in 1:n.ahead){ H_t_upper[[i+1]] <- x_upper$c0 %*% t(x_upper$c0) + t(x_upper$a) %*% t(current_returns) %*% current_returns %*% x_upper$a + expected_signs * t(x_upper$b) %*% H_t_upper[[i]] %*% x_upper$b + t(x_upper$g) %*% H_t[[i]] %*% x_upper$g current_returns <- eigen_value_decomposition(H_t_upper[[i+1]]) } sigma_t_lower <- matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 2:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t_lower[[i]]))))%*%H_t_lower[[i]]%*%sqrt(solve(diag(diag(H_t_lower[[i]])))) diag(tm2) <- sqrt(diag(H_t_lower[[i]])) sigma_t_lower[i-1,] <- c(tm2) } sigma_t_upper <- matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 2:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t_upper[[i]]))))%*%H_t_upper[[i]]%*%sqrt(solve(diag(diag(H_t_upper[[i]])))) diag(tm2) <- sqrt(diag(H_t_upper[[i]])) sigma_t_upper[i-1,] <- c(tm2) } colnames(sigma_t_lower) <- rep(1, N^2) colnames(sigma_t_upper) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t_lower)[k2] <- paste('Lower CI conditional standard deviation of \n', colnames(x$data)[k]) colnames(sigma_t_upper)[k2] <- paste('Upper CI conditional standard deviation of \n', colnames(x$data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t_lower)[k2] <- paste('Lower CI conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) colnames(sigma_t_upper)[k2] <- paste('Upper CI conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) k2 <- k2 +1 } } } elim <- elimination_mat(N) sigma_t_lower <- sigma_t_lower[, which(colSums(elim) == 1)] sigma_t_upper <- sigma_t_upper[, which(colSums(elim) == 1)] H_t_f_lower <- H_t_f_upper <- matrix(NA, nrow = n.ahead + 1, ncol = ncol(x$data)^2) for (i in 1:(n.ahead+1)){ H_t_f_lower[i, ] <- c(H_t_lower[[i]]) H_t_f_upper[i, ] <- c(H_t_upper[[i]]) } if (inherits(x$data, "ts")) { sigma_t <- ts(sigma_t, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) sigma_t_lower <- ts(sigma_t_lower, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) sigma_t_upper <- ts(sigma_t_upper, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f <- ts(H_t_f, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f_lower <- ts(H_t_f_lower, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f_upper <- ts(H_t_f_upper, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) } else if(inherits(x$data, "xts") || inherits(x$data, "zoo") ){ sigma_t <- xts(matrix(sigma_t, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) sigma_t_lower <- xts(matrix(sigma_t_lower, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) sigma_t_upper <- xts(matrix(sigma_t_upper, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f <- xts(matrix(H_t_f, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f_lower <- xts(matrix(H_t_f_lower, nrow = n.ahead+1), order.by = seq((time(x$data)[nrow(x$data)]), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f_upper <- xts(matrix(H_t_f_upper, nrow = n.ahead+1), order.by = seq((time(x$data)[nrow(x$data)]), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) } result <- list( volatility_forecast = sigma_t, volatility_lower_conf_band = sigma_t_lower, volatility_upper_conf_band = sigma_t_upper, H_t_forecast = H_t_f, H_t_lower_conf_band = H_t_f_lower, H_t_upper_conf_band = H_t_f_upper, n.ahead = n.ahead, bekkfit = x ) class(result) <- c('bekkForecast', 'dbekka') return(result) } #' @rdname predict #' @export predict.sbekk <- function(object, n.ahead = 1, ci = 0.95, ...) { x <- object N <- ncol(x$data) NoBs <- nrow(x$data) #var_process <- sigma_bekk(xx$data, xx$C0, xx$A, xx$G) H_t <- vector(mode = "list",length = n.ahead+1) H_t[[1]] <- matrix(x$H_t[NoBs,],nrow = N, ncol = N) current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) for(i in 1:n.ahead){ H_t[[i+1]] <- x$C0 %*% t(x$C0) + x$a * t(current_returns) %*% current_returns+ x$g * H_t[[i]] current_returns <- eigen_value_decomposition(H_t[[i+1]]) } sigma_t <- matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 2:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t[[i]]))))%*%H_t[[i]]%*%sqrt(solve(diag(diag(H_t[[i]])))) diag(tm2) <- sqrt(diag(H_t[[i]])) sigma_t[i-1,] <- c(tm2) } colnames(sigma_t) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t)[k2] <- paste('Conditional standard deviation of \n', colnames(x$data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t)[k2] <- paste('Conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) k2 <- k2 +1 } } } elim <- elimination_mat(N) sigma_t <- sigma_t[, which(colSums(elim) == 1)] H_t_f <- matrix(NA, nrow = n.ahead, ncol = ncol(x$data)^2) for (i in 2:(n.ahead+1)){ H_t_f[i-1, ] <- c(H_t[[i]]) } # Generating confidence intervals score_final = score_sbekk(x$theta, x$data) s1_temp = diag(inv_gen(t(score_final) %*% score_final),names=T) s1 = sqrt(s1_temp) lower_theta = x$theta - qnorm(ci)*s1 upper_theta = x$theta + qnorm(ci)*s1 H_t_lower <- vector(mode = "list",length = n.ahead+1) H_t_upper <- vector(mode = "list",length = n.ahead+1) x_lower <- coef_mat_scalar(lower_theta, N) x_upper <- coef_mat_scalar(upper_theta, N) #check if t(x_lower$C0) or x_lower$C0 for sigma_bekk H_t_lower[[1]] <- matrix(sigma_sbekk(x$data, t(x_lower$c0), x_lower$a, x_lower$g)$sigma_t[NoBs,], nrow = N, ncol = N) H_t_upper[[1]] <- matrix(sigma_sbekk(x$data,t(x_upper$c0),x_upper$a, x_upper$g)$sigma_t[NoBs,], nrow = N, ncol = N) current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) for(i in 1:n.ahead){ H_t_lower[[i+1]] <- x_lower$c0 %*% t(x_lower$c0) + x_lower$a * t(current_returns) %*% current_returns + x_lower$g * H_t[[i]] current_returns <- eigen_value_decomposition(H_t_lower[[i+1]]) } current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) for(i in 1:n.ahead){ H_t_upper[[i+1]] <- x_upper$c0 %*% t(x_upper$c0) + x_upper$a * t(current_returns) %*% current_returns + x_upper$g * H_t[[i]] current_returns <- eigen_value_decomposition(H_t_upper[[i+1]]) } sigma_t_lower <- matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 2:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t_lower[[i]]))))%*%H_t_lower[[i]]%*%sqrt(solve(diag(diag(H_t_lower[[i]])))) diag(tm2) <- sqrt(diag(H_t_lower[[i]])) sigma_t_lower[i-1,] <- c(tm2) } sigma_t_upper <- matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 2:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t_upper[[i]]))))%*%H_t_upper[[i]]%*%sqrt(solve(diag(diag(H_t_upper[[i]])))) diag(tm2) <- sqrt(diag(H_t_upper[[i]])) sigma_t_upper[i-1,] <- c(tm2) } colnames(sigma_t_lower) <- rep(1, N^2) colnames(sigma_t_upper) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t_lower)[k2] <- paste('Lower CI conditional standard deviation of \n', colnames(x$data)[k]) colnames(sigma_t_upper)[k2] <- paste('Upper CI conditional standard deviation of \n', colnames(x$data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t_lower)[k2] <- paste('Lower CI conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) colnames(sigma_t_upper)[k2] <- paste('Upper CI conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) k2 <- k2 +1 } } } elim <- elimination_mat(N) sigma_t_lower <- sigma_t_lower[, which(colSums(elim) == 1)] sigma_t_upper <- sigma_t_upper[, which(colSums(elim) == 1)] H_t_f_lower <- H_t_f_upper <- matrix(NA, nrow = n.ahead + 1, ncol = ncol(x$data)^2) for (i in 1:(n.ahead+1)){ H_t_f_lower[i, ] <- c(H_t_lower[[i]]) H_t_f_upper[i, ] <- c(H_t_upper[[i]]) } if (inherits(x$data, "ts")) { sigma_t <- ts(sigma_t, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) sigma_t_lower <- ts(sigma_t_lower, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) sigma_t_upper <- ts(sigma_t_upper, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f <- ts(H_t_f, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f_lower <- ts(H_t_f_lower, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f_upper <- ts(H_t_f_upper, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) } else if(inherits(x$data, "xts") || inherits(x$data, "zoo") ){ sigma_t <- xts(matrix(sigma_t, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) sigma_t_lower <- xts(matrix(sigma_t_lower, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) sigma_t_upper <- xts(matrix(sigma_t_upper, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f <- xts(matrix(H_t_f, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f_lower <- xts(matrix(H_t_f_lower, nrow = n.ahead+1), order.by = seq((time(x$data)[nrow(x$data)]), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f_upper <- xts(matrix(H_t_f_upper, nrow = n.ahead+1), order.by = seq((time(x$data)[nrow(x$data)]), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) } result <- list( volatility_forecast = sigma_t, volatility_lower_conf_band = sigma_t_lower, volatility_upper_conf_band = sigma_t_upper, H_t_forecast = H_t_f, H_t_lower_conf_band = H_t_f_lower, H_t_upper_conf_band = H_t_f_upper, n.ahead = n.ahead, bekkfit = x ) class(result) <- c('bekkForecast', 'sbekk') return(result) } #' @rdname predict #' @export predict.sbekka <- function(object, n.ahead = 1, ci = 0.95, ...) { x <- object N <- ncol(x$data) NoBs <- nrow(x$data) #var_process <- sigma_bekk(xx$data, xx$C0, xx$A, xx$G) H_t = vector(mode = "list",length=n.ahead+1) H_t[[1]] = matrix(x$H_t[NoBs,],nrow = N, ncol = N) current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) H_t[[2]]=x$C0 %*% t(x$C0) + x$a * t(current_returns) %*% current_returns + indicatorFunction(as.matrix(current_returns), x$signs) * x$b * t(current_returns) %*% current_returns + x$g * H_t[[1]] expected_signs=expected_indicator_value(x$data,x$signs) for(i in 2:n.ahead){ H_t[[i+1]]=x$C0 %*% t(x$C0) + x$a * H_t[[i]] + expected_signs * x$b * H_t[[i]] + x$g * H_t[[i]] } sigma_t = matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 1:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t[[i]]))))%*%H_t[[i]]%*%sqrt(solve(diag(diag(H_t[[i]])))) diag(tm2) <- sqrt(diag(H_t[[i]])) sigma_t[i-1,] <- c(tm2) } colnames(sigma_t) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t)[k2] <- paste('Conditional standard deviation of \n', colnames(x$data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t)[k2] <- paste('Conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) k2 <- k2 +1 } } } elim <- elimination_mat(N) sigma_t <- sigma_t[, which(colSums(elim) == 1)] H_t_f <- matrix(NA, nrow = n.ahead, ncol = ncol(x$data)^2) for (i in 1:(n.ahead+1)){ H_t_f[i-1, ] <- c(H_t[[i]]) } #95% confidence interval score_final = score_asymm_sbekk(x$theta, x$data, x$signs) s1_temp = diag(inv_gen(t(score_final) %*% score_final),names=T) s1 = sqrt(s1_temp) lower_theta = x$theta - qnorm(ci)*s1 upper_theta = x$theta + qnorm(ci)*s1 H_t_lower <- vector(mode = "list",length = n.ahead+1) H_t_upper <- vector(mode = "list",length = n.ahead+1) x_lower <- coef_mat_asymm_scalar(lower_theta, N) x_upper <- coef_mat_asymm_scalar(upper_theta, N) #check if t(x_lower$C0) or x_lower$C0 for sigma_bekk H_t_lower[[1]] <- matrix(sigma_sbekk_asymm(x$data, t(x_lower$c0), x_lower$a, x_lower$b, x_lower$g, x$signs)$sigma_t[NoBs,],nrow = N, ncol = N) H_t_upper[[1]] <- matrix(sigma_sbekk_asymm(x$data, t(x_upper$c0), x_upper$a, x_upper$b, x_upper$g, x$signs)$sigma_t[NoBs,],nrow = N, ncol = N) current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) for(i in 1:n.ahead){ H_t_lower[[i+1]] <- x_lower$c0 %*% t(x_lower$c0) + x_lower$a * t(current_returns) %*% current_returns + expected_signs * x_lower$b * H_t_lower[[i]] + x_lower$g * H_t[[i]] current_returns <- eigen_value_decomposition(H_t_lower[[i+1]]) } current_returns <- matrix(c(x$data[NoBs,]), nrow = 1) for(i in 1:n.ahead){ H_t_upper[[i+1]] <- x_upper$c0 %*% t(x_upper$c0) + x_upper$a * t(current_returns) %*% current_returns + expected_signs * x_upper$b * H_t_upper[[i]] + x_upper$g * H_t[[i]] current_returns <- eigen_value_decomposition(H_t_upper[[i+1]]) } sigma_t_lower <- matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 2:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t_lower[[i]]))))%*%H_t_lower[[i]]%*%sqrt(solve(diag(diag(H_t_lower[[i]])))) diag(tm2) <- sqrt(diag(H_t_lower[[i]])) sigma_t_lower[i-1,] <- c(tm2) } sigma_t_upper <- matrix(NA, nrow = n.ahead, ncol = N^2) for (i in 2:(n.ahead+1)){ tm2 <- sqrt(solve(diag(diag(H_t_upper[[i]]))))%*%H_t_upper[[i]]%*%sqrt(solve(diag(diag(H_t_upper[[i]])))) diag(tm2) <- sqrt(diag(H_t_upper[[i]])) sigma_t_upper[i-1,] <- c(tm2) } colnames(sigma_t_lower) <- rep(1, N^2) colnames(sigma_t_upper) <- rep(1, N^2) k <- 1 k2 <- 1 for (i in 1:N) { for (j in 1:N) { if (i == j) { colnames(sigma_t_lower)[k2] <- paste('Lower CI conditional standard deviation of \n', colnames(x$data)[k]) colnames(sigma_t_upper)[k2] <- paste('Upper CI conditional standard deviation of \n', colnames(x$data)[k]) k <- k + 1 k2 <- k2 +1 } else { colnames(sigma_t_lower)[k2] <- paste('Lower CI conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) colnames(sigma_t_upper)[k2] <- paste('Upper CI conditional correlation of \n', colnames(x$data)[i], ' and ', colnames(x$data)[j]) k2 <- k2 +1 } } } elim <- elimination_mat(N) sigma_t_lower <- sigma_t_lower[, which(colSums(elim) == 1)] sigma_t_upper <- sigma_t_upper[, which(colSums(elim) == 1)] H_t_f_lower <- H_t_f_upper <- matrix(NA, nrow = n.ahead + 1, ncol = ncol(x$data)^2) for (i in 1:(n.ahead+1)){ H_t_f_lower[i, ] <- c(H_t_lower[[i]]) H_t_f_upper[i, ] <- c(H_t_upper[[i]]) } if (inherits(x$data, "ts")) { sigma_t <- ts(sigma_t, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) sigma_t_lower <- ts(sigma_t_lower, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) sigma_t_upper <- ts(sigma_t_upper, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f <- ts(H_t_f, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f_lower <- ts(H_t_f_lower, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) H_t_f_upper <- ts(H_t_f_upper, start = (time(x$data)[nrow(x$data)]+1), frequency = frequency(x$data)) } else if(inherits(x$data, "xts") || inherits(x$data, "zoo") ){ sigma_t <- xts(matrix(sigma_t, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) sigma_t_lower <- xts(matrix(sigma_t_lower, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) sigma_t_upper <- xts(matrix(sigma_t_upper, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f <- xts(matrix(H_t_f, nrow = n.ahead), order.by = seq((time(x$data)[nrow(x$data)]+1), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f_lower <- xts(matrix(H_t_f_lower, nrow = n.ahead+1), order.by = seq((time(x$data)[nrow(x$data)]), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) H_t_f_upper <- xts(matrix(H_t_f_upper, nrow = n.ahead+1), order.by = seq((time(x$data)[nrow(x$data)]), (time(x$data)[nrow(x$data)] + n.ahead), by = periodicity(x$data)$units)) } result <- list( volatility_forecast = sigma_t, volatility_lower_conf_band = sigma_t_lower, volatility_upper_conf_band = sigma_t_upper, H_t_forecast = H_t_f, H_t_lower_conf_band = H_t_f_lower, H_t_upper_conf_band = H_t_f_upper, n.ahead = n.ahead, bekkfit = x ) class(result) <- c('bekkForecast', 'sbekka') return(result) }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/bekk_forecast.R
process_object <- function(x) { UseMethod('process_object') } process_object.bekkFit <- function(x) { theta <- x$theta N <- ncol(x$C0) signs <- x$signs BEKK_valid <- x$BEKK_valid expected_signs <- x$expected_signs return(list(theta = theta, N = N, signs=signs, expected_signs = expected_signs, BEKK_valid = BEKK_valid )) } process_object.bekkSpec <- function(x) { if (is.null(x$init_values)) { stop('Please provide "initial_values" in "bekk_spec" as paramater for simulation.') } if (is.null(x$N)) { stop('Please provide "N" in "bekk_spec" as dimension for simulation.') } theta <- x$init_values N <- x$N if(is.null(x$signs) && x$model$asymmetric == TRUE){ signs=as.matrix(rep(-1,N)) }else{ signs=x$signs } if(x$model$asymmetric == FALSE){ par = coef_mat(theta,N) BEKK_valid = valid_bekk(par$c0, par$a, par$g) } else{ par = coef_mat_asymm(theta,N) BEKK_valid = valid_asymm_bekk_sim(par$c0, par$a, par$b, par$g, 1/(N^2),signs) } expected_signs=1/(N^2) return(list(theta = theta, N = N, signs = signs, expected_signs = expected_signs, BEKK_valid = BEKK_valid)) } # Obtaining QML t-ratios QML_t_ratios <- function(theta, r) { s1 <- score_bekk(theta, r) s1 <- crossprod(s1) s2 <- hesse_bekk(theta, r) s2 <- solve(s2) %*% s1 %*% solve(s2) s2 <- sqrt(abs(diag(s2))) return(theta/s2) } QML_t_ratios_asymm <- function(theta, r, signs) { s1 <- score_asymm_bekk(theta, r, signs) s1 <- crossprod(s1) s2 <- hesse_asymm_bekk(theta, r, signs) s2 <- solve(s2) %*% s1 %*% solve(s2) s2 <- sqrt(abs(diag(s2))) return(theta/s2) } QML_t_ratios_dbekk <- function(theta, r) { s1 <- score_dbekk(theta, r) s1 <- crossprod(s1) s2 <- hesse_dbekk(theta, r) s2 <- solve(s2) %*% s1 %*% solve(s2) s2 <- sqrt(abs(diag(s2))) return(theta/s2) } QML_t_ratios_dbekka <- function(theta, r, signs) { s1 <- score_asymm_dbekk(theta, r, signs) s1 <- crossprod(s1) s2 <- hesse_asymm_dbekk(theta, r, signs) s2 <- solve(s2) %*% s1 %*% solve(s2) s2 <- sqrt(abs(diag(s2))) return(theta/s2) } QML_t_ratios_sbekk <- function(theta, r) { s1 <- score_sbekk(theta, r) s1 <- crossprod(s1) s2 <- hesse_sbekk(theta, r) s2 <- solve(s2) %*% s1 %*% solve(s2) s2 <- sqrt(abs(diag(s2))) return(theta/s2) } QML_t_ratios_sbekk_asymm <- function(theta, r, signs) { s1 <- score_asymm_sbekk(theta, r, signs) s1 <- crossprod(s1) s2 <- hesse_asymm_sbekk(theta, r, signs) s2 <- solve(s2) %*% s1 %*% solve(s2) s2 <- sqrt(abs(diag(s2))) return(theta/s2) } QML_sd <- function(theta, r) { s1 <- score_bekk(theta, r) s1 <- crossprod(s1) s2 <- hesse_bekk(theta, r) s2 <- solve(s2) %*% s1 %*% solve(s2) s2 <- sqrt(abs(diag(s2))) return(as.matrix(s2)) } QML_sd_asymm <- function(theta, r, signs) { s1 <- score_asymm_bekk(theta, r, signs) s1 <- crossprod(s1) s2 <- hesse_asymm_bekk(theta, r, signs) s2 <- solve(s2) %*% s1 %*% solve(s2) s2 <- sqrt(abs(diag(s2))) return(as.matrix(s2)) } QML_sd_dbekk <- function(theta, r) { s1 <- score_dbekk(theta, r) s1 <- crossprod(s1) s2 <- hesse_dbekk(theta, r) s2 <- solve(s2) %*% s1 %*% solve(s2) s2 <- sqrt(abs(diag(s2))) return(as.matrix(s2)) } QML_sd_dbekka <- function(theta, r, signs) { s1 <- score_asymm_dbekk(theta, r, signs) s1 <- crossprod(s1) s2 <- hesse_asymm_dbekk(theta, r, signs) s2 <- solve(s2) %*% s1 %*% solve(s2) s2 <- sqrt(abs(diag(s2))) return(as.matrix(s2)) } QML_sd_sbekk <- function(theta, r) { s1 <- score_sbekk(theta, r) s1 <- crossprod(s1) s2 <- hesse_sbekk(theta, r) s2 <- solve(s2) %*% s1 %*% solve(s2) s2 <- sqrt(abs(diag(s2))) return(as.matrix(s2)) } QML_sd_sbekk_asymm <- function(theta, r, signs) { s1 <- score_asymm_sbekk(theta, r, signs) s1 <- crossprod(s1) s2 <- hesse_asymm_sbekk(theta, r, signs) s2 <- solve(s2) %*% s1 %*% solve(s2) s2 <- sqrt(abs(diag(s2))) return(as.matrix(s2)) }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/bekk_functions.R
#' @import future #' @import future.apply bekk_mc_eval <- function(object, spec, sample_sizes, iter, nc = 1) { xx <- process_object(object) theta <- xx$theta mse <- rep(NA, length(sample_sizes)) index <- 1 for(j in sample_sizes) { print(paste('Sample size: ', j)) sim_dat <- vector(mode = "list", iter) sim_dat <- future_lapply(1:iter, function(x){simulate(object, nobs = j)}, future.seed=TRUE) dd <- future_lapply(sim_dat, function(x){bekk_fit(spec = spec, data = x, max_iter = 200)},future.seed=TRUE) mse[index] <- sum(unlist(lapply(dd, RMSE, theta_true = theta)))/iter print(mse[index]) index <- index +1 } result <- data.frame(Sample_size = sample_sizes, MSE = mse) colnames(result) <- c('Sample', 'MSE') result <- list(result) class(result) <- 'bekkMC' return(result) } RMSE <- function(x, theta_true) { theta_est <- x$theta return(mean(sqrt(((theta_true - theta_est) / theta_true)^2))) } plot.bekkMC <- function(x, ...) { Sample <- NULL MSE <- NULL msep <- x[[1]] ggplot(msep) + geom_line(aes(x = Sample, y = MSE)) + theme_bw() }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/bekk_mc_eval.R
#' @name simulate #' @rdname simulate #' @title Simulating BEKK models #' #' @description Method for simulating a N-dimensional BEKK model. #' #' @param object A spec object of class "bekkSpec" from the function \link{bekk_spec} or a fitted bekk model of class "bekkFit" from the \link{bekk_fit} function #' @param nsim Number of observations of the simulated sample #' @param ... Further parameters to be passed on to the function. #' @return Returns a simulated time series S3 class object using the parameters of passed "bekkSpec" or "bekkFit". #' #' @examples #' \donttest{ #' #' # simulate a BEKK with estimated parameter #' obj_spec <- bekk_spec() #' x1 <- bekk_fit(obj_spec, StocksBonds) #' #' x2 <- simulate(x1, nsim = 3000) #' #' plot(x2) #' #' } #' #' @export simulate.bekk <- function(object, nsim, ...) { spec <- object if(is.null(nsim) || !is.numeric(nsim) || nsim < 1){ stop("Please provide an integer specifying the number of observations") } if(!inherits(spec,c("bekkSpec", "bekkFit"))){ stop("Please provide an object of class bekk_fit or 'bekk_spec'.") } xx <- process_object(spec) par <- coef_mat(xx$theta,xx$N) if(xx$BEKK_valid==FALSE){ stop("Please provide a stationary BEKK model.") } sim_dat <- simulate_bekk_c(c(xx$theta), nsim, xx$N) return(ts(sim_dat)) } #' @rdname simulate #' @export simulate.bekka <- function(object, ..., nsim) { spec <- object if(is.null(nsim) || !is.numeric(nsim) || nsim < 1){ stop("Please provide an integer specifying the number of observations") } if(!inherits(spec,c("bekkSpec", "bekkFit"))){ stop("Please provide an object of class bekk_fit or 'bekk_spec'.") } xx <- process_object(spec) par <- coef_mat_asymm(xx$theta,xx$N) if(xx$BEKK_valid==FALSE){ stop("Please provide a stationary BEKK model.") } #expected_signs sim_dat <- simulate_bekka_c(c(xx$theta), nsim, xx$N, xx$signs, xx$expected_signs) return(ts(sim_dat)) } #' @rdname simulate #' @export simulate.dbekk <- function(object, ..., nsim) { spec <- object if(is.null(nsim) || !is.numeric(nsim) || nsim < 1){ stop("Please provide an integer specifying the number of observations") } if(!inherits(spec,c("bekkSpec", "bekkFit"))){ stop("Please provide an object of class bekk_fit or 'bekk_spec'.") } xx <- process_object(spec) par <- coef_mat_diagonal(xx$theta,xx$N) if(xx$BEKK_valid==FALSE){ stop("Please provide a stationary BEKK model.") } sim_dat <- simulate_dbekk_c(c(xx$theta), nsim, xx$N) return(ts(sim_dat)) } #' @rdname simulate #' @export simulate.dbekka <- function(object, ..., nsim) { spec <- object if(is.null(nsim) || !is.numeric(nsim) || nsim < 1){ stop("Please provide an integer specifying the number of observations") } if(!inherits(spec,c("bekkSpec", "bekkFit"))){ stop("Please provide an object of class bekk_fit or 'bekk_spec'.") } xx <- process_object(spec) par <- coef_mat_asymm_diagonal(xx$theta,xx$N) if(xx$BEKK_valid==FALSE){ stop("Please provide a stationary BEKK model.") } #expected_signs sim_dat <- simulate_dbekka_c(c(xx$theta), nsim, xx$N, xx$signs, xx$expected_signs) return(ts(sim_dat)) } #' @rdname simulate #' @export simulate.sbekk <- function(object, ..., nsim) { spec <- object if(is.null(nsim) || !is.numeric(nsim) || nsim < 1){ stop("Please provide an integer specifying the number of observations") } if(!inherits(spec,c("bekkSpec", "bekkFit"))){ stop("Please provide an object of class bekk_fit or 'bekk_spec'.") } xx <- process_object(spec) par <- coef_mat_scalar(xx$theta,xx$N) if(xx$BEKK_valid==FALSE){ stop("Please provide a stationary BEKK model.") } sim_dat <- simulate_sbekk_c(c(xx$theta), nsim, xx$N) return(ts(sim_dat)) } #' @rdname simulate #' @export simulate.sbekka <- function(object, ..., nsim) { spec <- object if(is.null(nsim) || !is.numeric(nsim) || nsim < 1){ stop("Please provide an integer specifying the number of observations") } if(!inherits(spec,c("bekkSpec", "bekkFit"))){ stop("Please provide an object of class bekk_fit or 'bekk_spec'.") } xx <- process_object(spec) par <- coef_mat_asymm_scalar(xx$theta,xx$N) if(xx$BEKK_valid==FALSE){ stop("Please provide a stationary BEKK model.") } #expected_signs sim_dat <- simulate_sbekka_c(c(xx$theta), nsim, xx$N, xx$signs, xx$expected_signs) return(ts(sim_dat)) }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/bekk_sim.R
#' BEKK specification method #' #' @description Method for creating a N-dimensional BEKK model specification object prior to fitting and/or simulating. #' #' @param model A list containing the model type specification: Either "bekk" "dbekk" or "sbekk". #' Moreover it can be specified whether the model should be estimated allowing for asymmetric volatility structure. #' @param init_values initial values for \link{bekk_fit} during BHHH algorithm. It can be either a numerical vector of suitable dimension, 'NULL' (default) to use a simple grid search algorithm, or a character vector i.e. "random" to use a random starting value generator (set a seed in advance for reproducible results), or #' "simple" for relying on a simple initial values generator based on typical values for BEKK parameter found in the literature. If the object from this function is passed to \link{simulate}, "init_values" are used as parameters for data generating process. #' @param signs An N-dimensional vector consisting of "1" or "-1" to indicate the asymmetric effects to be considered. #' Setting the i-th element of the vector to "1" or "-1" means that the model takes into account additional volatility if the returns of the i-th column in the data matrix are either positive or negative. #' If "asymmetric = TRUE", the default is set to "rep(-1, N)" i.e. it is assumed that excess volatility occurs for all series if the returns are negative. #' @param N Integer specifying the dimension of the BEKK model. Only relevant when this object of class "bekkSpec"" is used for simulating BEKK processes by applying it to \link{simulate}. #' @return Returns a S3 class "bekkSpec" object containing the specifications of the model to be estimated. #' #' #' @examples #' \donttest{ #' #' data(StocksBonds) #' #' # Fitting a symmetric BEKK model using default starting values #' # - i.e. fixed values #' obj_spec_fixed <- bekk_spec(init_values = NULL) #' x1 <- bekk_fit(obj_spec_fixed, StocksBonds, QML_t_ratios = FALSE, #' max_iter = 50, crit = 1e-9) #'# Fitting a symmetric BEKK model using initial values originating from a #'# random grid search algorithm #' obj_spec_random <- bekk_spec(init_values = "random") #' x2 <- bekk_fit(obj_spec_random, StocksBonds, QML_t_ratios = FALSE, #' max_iter = 50, crit = 1e-9) #' summary(x1) #' summary(x2) #' plot(x1) #' plot(x2) #' # Fitting an asymmetric BEKK model with default starting values #' obj_spec_fix <- bekk_spec(model = list(type = "bekk", asymmetric = TRUE), #' init_values = NULL) #' x1 <- bekk_fit(obj_spec_fix, StocksBonds) #' obj_spec_random <- bekk_spec(model = list(type = "bekk", asymmetric = TRUE), #' init_values = "random") #' x2 <- bekk_fit(obj_spec_random, StocksBonds) #' summary(x1) #' summary(x2) #' } #' @export bekk_spec <- function(model = list(type = "bekk", asymmetric = FALSE), init_values = NULL, signs = NULL, N = NULL) { if(!is.logical(model$asymmetric) || is.null(model$asymmetric)){ stop('Please specify whether the model to be estimated is asymmetric or not.') } # Checking type match.arg(model$type, c("bekk", "dbekk", "sbekk")) # Checking inputs if(!is.null(N) & is.numeric(init_values) ) { if(nrow(init_values) != 2 * N^2 + N * (N + 1)/2 & model$type == "bekk" & model$asymmetric == FALSE) { stop('Number of initial parameter does not match dimension of data and model.') } if(nrow(init_values) != 3 * N^2 + N * (N + 1)/2 & model$type == "bekk" & model$asymmetric == TRUE) { stop('Number of initial parameter does not match dimension of data and model.') } if(nrow(init_values) != 2 * N + N * (N + 1)/2 & model$type == "dbekk" & model$asymmetric == FALSE) { stop('Number of initial parameter does not match dimension of data and model.') } if(nrow(init_values) != 3 * N + N * (N + 1)/2 & model$type == "dbekk" & model$asymmetric == TRUE) { stop('Number of initial parameter does not match dimension of data and model.') } if(nrow(init_values) != 2 + N * (N + 1)/2 & model$type == "sbekk" & model$asymmetric == FALSE) { stop('Number of initial parameter does not match dimension of data and model.') } if(nrow(init_values) != 3 + N * (N + 1)/2 & model$type == "sbekk" & model$asymmetric == TRUE) { stop('Number of initial parameter does not match dimension of data and model.') } } specification <- list(model = model, init_values = init_values, signs = signs, N = N) class(specification) <- 'bekkSpec' class(specification)[2] <- model$type if (model$asymmetric == TRUE) { if (!is.null(signs)) { signs <- matrix(signs, ncol = 1) if(any(abs(signs) != 1)){ stop('Elements of "signs" must be either "1" or "-1".') } } specification$model$signs <- signs class(specification)[2] <- paste(class(specification)[2], 'a', sep = "") } return(specification) }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/bekk_spec.R
gridSearch_BEKK <- function(r) { N <- ncol(r) uncond_var <- crossprod(r)/nrow(r) A <- matrix(0, ncol = N, nrow = N) G <- matrix(0, ncol = N, nrow = N) C <- matrix(0, ncol = N, nrow = N) diag(A) <- 0.3 diag(G) <- 0.92 diag(C) <- 0.05*diag(uncond_var) for (i in 1:N){ for (j in seq(i,N)){ cij <- uncond_var[i, j]/sqrt(uncond_var[i, i]*uncond_var[j, j]) C[i,j] <- cij*sqrt(C[i, i]*C[j, j]) C[j,i] <- C[i, j] } } C = t(chol(C)) C0 = C[,1] if (N > 2) { for (i in 2:(N-1)){ C0 = c(C0, C[i:N, i]) } } C0 = c(C0, C[N, N]) for (i in 1:N) { for (j in 1:N) { if (i < j) { A[i, j] <- 0.03 if (j == N & i == 1) { A[i, j] <- -0.03 } G[i, j] <- -0.03 } else if (i > j) { A[i, j] <- 0.03 G[i, j] <- 0.03 } } } th0 = c(C0, c(A), c(G)) lik = loglike_bekk(th0, r) return(list(th0, lik)) } gridSearch_asymmetricBEKK <- function(r, signs) { N <- ncol(r) uncond_var <- crossprod(r)/nrow(r) A <- matrix(0, ncol = N, nrow = N) B <- matrix(0, ncol = N, nrow = N) G <- matrix(0, ncol = N, nrow = N) C <- matrix(0, ncol = N, nrow = N) #th0=numeric(2*n^2+n*(n+1)/2) diag(A) <- 0.3 diag(B) <- 0.3 diag(G) <- 0.92 diag(C) <- 0.05*diag(uncond_var) for (i in 1:N){ for (j in seq(i,N)){ cij <- uncond_var[i, j]/sqrt(uncond_var[i, i]*uncond_var[j, j]) C[i,j] <- cij*sqrt(C[i, i]*C[j, j]) C[j,i] <- C[i, j] } } C = t(chol(C)) C0 = C[,1] if (N > 2) { for (i in 2:(N-1)){ C0 = c(C0, C[i:N, i]) } } C0 = c(C0, C[N, N]) for (i in 1:N) { for (j in 1:N) { if (i < j) { A[i, j] <- 0.03 B[i, j] <- 0.03 if (j == N & i == 1) { A[i, j] <- -0.03 B[i, j] <- -0.03 } G[i, j] <- -0.03 } else if (i > j) { A[i, j] <- 0.03 B[i, j] <- 0.03 G[i, j] <- 0.03 } } } th0 = c(C0, c(A), c(B), c(G)) lik = loglike_asymm_bekk(th0, r, signs) return(list(th0, lik)) } gridSearch_dBEKK <- function(r) { N <- ncol(r) uncond_var <- crossprod(r)/nrow(r) A <- matrix(0, ncol = N, nrow = N) G <- matrix(0, ncol = N, nrow = N) C <- matrix(0, ncol = N, nrow = N) diag(A) <- 0.3 diag(G) <- 0.92 diag(C) <- 0.05*diag(uncond_var) for (i in 1:N){ for (j in seq(i,N)){ cij <- uncond_var[i, j]/sqrt(uncond_var[i, i]*uncond_var[j, j]) C[i,j] <- cij*sqrt(C[i, i]*C[j, j]) C[j,i] <- C[i, j] } } C = t(chol(C)) C0 = C[,1] if (N > 2) { for (i in 2:(N-1)){ C0 = c(C0, C[i:N, i]) } } C0 = c(C0, C[N, N]) for (i in 1:N) { for (j in 1:N) { if (i < j) { A[i, j] <- 0.03 if (j == N & i == 1) { A[i, j] <- -0.03 } G[i, j] <- -0.03 } else if (i > j) { A[i, j] <- 0.03 G[i, j] <- 0.03 } } } th0 = c(C0, diag(A), diag(G)) lik = loglike_dbekk(th0, r) return(list(th0, lik)) } gridSearch_asymmetricdBEKK <- function(r, signs) { N <- ncol(r) uncond_var <- crossprod(r)/nrow(r) A <- matrix(0, ncol = N, nrow = N) B <- matrix(0, ncol = N, nrow = N) G <- matrix(0, ncol = N, nrow = N) C <- matrix(0, ncol = N, nrow = N) #th0=numeric(2*n^2+n*(n+1)/2) diag(A) <- 0.3 diag(B) <- 0.3 diag(G) <- 0.92 diag(C) <- 0.05*diag(uncond_var) for (i in 1:N){ for (j in seq(i,N)){ cij <- uncond_var[i, j]/sqrt(uncond_var[i, i]*uncond_var[j, j]) C[i,j] <- cij*sqrt(C[i, i]*C[j, j]) C[j,i] <- C[i, j] } } C = t(chol(C)) C0 = C[,1] if (N > 2) { for (i in 2:(N-1)){ C0 = c(C0, C[i:N, i]) } } C0 = c(C0, C[N, N]) th0 = c(C0, diag(A), diag(B), diag(G)) lik = loglike_asymm_dbekk(th0, r, signs) return(list(th0, lik)) } gridSearch_sBEKK <- function(r) { N <- ncol(r) C <- matrix(0, ncol = N, nrow = N) uncond_var <- crossprod(r)/nrow(r) a <- 0.2 g <- 0.7 diag(C) <- 0.05*diag(uncond_var) for (i in 1:N){ for (j in seq(i,N)){ cij <- uncond_var[i, j]/sqrt(uncond_var[i, i]*uncond_var[j, j]) C[i,j] <- cij*sqrt(C[i, i]*C[j, j]) C[j,i] <- C[i, j] } } C = t(chol(C)) C0 = C[,1] if (N > 2) { for (i in 2:(N-1)){ C0 = c(C0, C[i:N, i]) } } C0 = c(C0, C[N, N]) th0 = c(C0, a, g) lik = loglike_sbekk(th0, r) return(list(th0, lik)) } gridSearch_asymmetricsBEKK <- function(r, signs) { N <- ncol(r) C <- matrix(0, ncol = N, nrow = N) uncond_var <- crossprod(r)/nrow(r) #th0=numeric(2*n^2+n*(n+1)/2) a <- 0.2 b <- 0.1 g <- 0.6 diag(C) <- 0.05*diag(uncond_var) for (i in 1:N){ for (j in seq(i,N)){ cij <- uncond_var[i, j]/sqrt(uncond_var[i, i]*uncond_var[j, j]) C[i,j] <- cij*sqrt(C[i, i]*C[j, j]) C[j,i] <- C[i, j] } } C = t(chol(C)) C0 = C[,1] if (N > 2) { for (i in 2:(N-1)){ C0 = c(C0, C[i:N, i]) } } C0 = c(C0, C[N, N]) th0 = c(C0, a, b, g) lik = loglike_asymm_sbekk(th0, r, signs) return(list(th0, lik)) } #H2 Code # gridSearch_BEKK <- function(r){ # N <- ncol(r) # # uncond_var <- crossprod(r)/nrow(r) # A <- matrix(0, ncol = N, nrow = N) # G <- matrix(0, ncol = N, nrow = N) # C <- matrix(0, ncol = N, nrow = N) # #th0=numeric(2*n^2+n*(n+1)/2) # # diag(A) <- 0.3 # diag(G) <- 0.92 # diag(C) <- 0.05*diag(uncond_var) # # # for (i in 1:N){ # for (j in seq(i,N)){ # # cij <- uncond_var[i, j]/sqrt(uncond_var[i, i]*uncond_var[j, j]) # C[i,j] <- cij*sqrt(C[i, i]*C[j, j]) # C[j,i] <- C[i, j] # # } # } # # C = t(chol(C)) # C0 = C[,1] # # if (N > 2) { # for (i in 2:(N-1)){ # C0 = c(C0, C[i:N, i]) # } # } # # C0 = c(C0, C[N, N]) # # # deterministic variance components # # th0 = c(C0, c(A), c(G)) # # # change elements of A and G and compute likelihood in each step # # likmax = -1e25 # # #print th0 # result= recursiveSearch_BEKK(r, C0, c(A), c(G), 1, th0, likmax) # th0=result[[1]] # likmax=result[[2]] # return(list(th0,likmax)) # # } # # # recursiveSearch_BEKK=function(r, c0, avec, gvec, index, thetaopt, likmax){ # # n=ncol(r) # start= -3 # endr = 3 # step = 6 # indextest=0 # # if (index == n^2){ # index = index + 2; # } else if (index < n^2){ # indextest = (index-1)/(n+1) # } else{ # indextest = (index-n^2-1)/(n+1) # } # # we have a diagonal element # if (indextest - floor(indextest) == 0){ # index = index + 1 # } # # for (i in seq(start, endr, step)){ # val = i/100 # # #set a and g respectively according to index, exclude diagonal elements # if (index <= n^2){ # avec[index] = val # } else{ # gvec[index-n^2] = val # } # #last element is excluded # if (index < (2*n^2-1)){ # # recursive step # result= recursiveSearch_BEKK(r, c0, avec, gvec, index+1, thetaopt, likmax) # thetaopt=result[[1]] # likmax=result[[2]] # } else{ # #final step # theta = c(c0,avec,gvec) # #likelihood # lik = loglike_bekk(theta,r) # if (lik > likmax){ # thetaopt = theta # likmax = lik # } # # } # } # # return(list(thetaopt,likmax)) # }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/bekk_starting_values.R
# Funtion to extract conditional standard deviations from bekk_fit object extract_csd <- function(x) { csd <- matrix(NA, nrow = nrow(x$sigma_t), ncol = ncol(x$data)) csd_names <- rep(NA, ncol(x$data)) counter <- 1 for (i in 1:ncol(x$data)) { csd[, i] <- x$sigma_t[, counter] csd_names[i] <- colnames(x$sigma_t)[counter] counter <- counter + ncol(x$data) - (i - 1) } colnames(csd) <- csd_names return(csd) }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/extract_csd.R
# Converts parameter vector into coefficients matrices of BEKK model coef_mat <- function(theta, N) { c_0 <- theta[1:(N * (N + 1)/2), ] a_0 <- theta[(N * (N+1)/2 + 1):(N^2 + (N * (N + 1)/2)), ] g_0 <- theta[((N^2 + (N * (N + 1)/2)) + 1):(2*N^2 + (N * (N + 1)/2)), ] a <- (matrix(a_0, N, N)) g <- (matrix(g_0, N, N)) c0 <- matrix(0, N, N) c0[lower.tri(c0, diag = TRUE)] <- c_0 return(list(c0 = c0, a = a, g = g)) } coef_mat_asymm <- function(theta, N) { c_0 <- theta[1:(N * (N + 1)/2), ] a_0 <- theta[(N * (N+1)/2 + 1):(N^2 + (N * (N + 1)/2)), ] b_0 <- theta[(N^2 + (N * (N + 1)/2) + 1):(2*N^2 + (N * (N + 1)/2)), ] g_0 <- theta[((2*N^2 + (N * (N + 1)/2)) + 1):(3*N^2 + (N * (N + 1)/2)), ] a <- (matrix(a_0, N, N)) b <- (matrix(b_0, N, N)) g <- (matrix(g_0, N, N)) c0 <- matrix(0, N, N) c0[lower.tri(c0, diag = TRUE)] <- c_0 return(list(c0 = c0, a = a, b = b, g = g)) } coef_mat_diagonal <- function(theta, N) { c_0 <- theta[1:(N * (N + 1)/2), ] a_0 <- theta[(N * (N+1)/2 + 1):(N + (N * (N + 1)/2)), ] g_0 <- theta[((N + (N * (N + 1)/2)) + 1):(2*N + (N * (N + 1)/2)), ] a <- diag(a_0) g <- diag(g_0) c0 <- matrix(0, N, N) c0[lower.tri(c0, diag = TRUE)] <- c_0 return(list(c0 = c0, a = a, g = g)) } coef_mat_asymm_diagonal <- function(theta, N) { c_0 <- theta[1:(N * (N + 1)/2), ] a_0 <- theta[(N * (N+1)/2 + 1):(N + (N * (N + 1)/2)), ] b_0 <- theta[(N + (N * (N + 1)/2) + 1):(2*N + (N * (N + 1)/2)), ] g_0 <- theta[((2*N + (N * (N + 1)/2)) + 1):(3*N + (N * (N + 1)/2)), ] a <- diag(a_0) b <- diag(b_0) g <- diag(g_0) c0 <- matrix(0, N, N) c0[lower.tri(c0, diag = TRUE)] <- c_0 return(list(c0 = c0, a = a, b = b, g = g)) } coef_mat_scalar <- function(theta, N) { c_0 <- theta[1:(N * (N + 1)/2), ] a <- theta[(N * (N+1)/2 + 1), ] g <- theta[(( (N * (N + 1)/2)) + 2), ] c0 <- matrix(0, N, N) c0[lower.tri(c0, diag = TRUE)] <- c_0 return(list(c0 = c0, a = a, g = g)) } coef_mat_asymm_scalar <- function(theta, N) { c_0 <- theta[1:(N * (N + 1)/2), ] a <- theta[(N * (N+1)/2 + 1), ] b <- theta[(N * (N+1)/2 + 2), ] g <- theta[(( (N * (N + 1)/2)) + 3), ] c0 <- matrix(0, N, N) c0[lower.tri(c0, diag = TRUE)] <- c_0 return(list(c0 = c0, a = a, b = b, g = g)) }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/matrix_stuff.R
#' @import ggplot2 #' @import ggfortify #' @import reshape2 #' @export #' plot.backtest <- function(x, ...) { obs <- NULL type <- NULL time <- NULL V1 <- NULL Portfolio <- NULL if(is.null(x$portfolio_weights)) { if (inherits(x$VaR, c("ts"))) { colnames(x$VaR) = colnames(x$out_sample_returns) t_series <- as.data.frame(x$VaR) t_series$time <- time(x$VaR) t_series <- melt(t_series, id ="time") out_sample_returns <- as.data.frame(x$out_sample_returns) out_sample_returns$time <- time(x$out_sample_returns) out_sample_returns <- melt(out_sample_returns, id ="time") ggplot(t_series) + geom_point(data = out_sample_returns, mapping = aes(x = time, y = value, colour = "Returns"), show.legend = TRUE) + theme_bw() + xlab('') + ylab('Returns/VaR') + geom_line(aes(x = time, y = value, colour = "Estimated VaR")) + scale_color_manual(values = c('black', 'blue'), "") + facet_wrap(~variable, scales = 'free_y', ncol = 1)+ theme(legend.position="bottom", legend.title = element_blank()) }else if (inherits(x$VaR, c("xts","zoo"))) { names(x$VaR) = names(x$out_sample_returns) t_series <- as.data.frame(x$VaR) t_series$time <- time(x$VaR) t_series <- melt(t_series, id ="time") out_sample_returns <- as.data.frame(x$out_sample_returns) out_sample_returns$time <- time(x$out_sample_returns) out_sample_returns <- melt(out_sample_returns, id ="time") ggplot(t_series) + geom_point(data = out_sample_returns, mapping = aes(x = time, y = value, colour = "Returns"), show.legend = TRUE) + theme_bw() + xlab('') + ylab('Returns/VaR') + geom_line(aes(x = time, y = value, colour = "Estimated VaR")) + scale_color_manual(values = c('black', 'blue'), "") + facet_wrap(~variable, scales = 'free_y', ncol = 1)+ theme(legend.position="bottom", legend.title = element_blank()) } else { names(x$VaR) = names(x$out_sample_returns) x$VaR$obs <- 1:nrow(x$VaR) VaR <- melt(x$VaR, id = 'obs') x$out_sample_returns$obs <- 1:nrow(x$out_sample_returns) out_sample_returns <- melt(x$out_sample_returns, id = 'obs') ggplot(VaR) + geom_point(data = out_sample_returns, mapping = aes(x = obs, y = value, colour = "Returns"), show.legend = TRUE) + theme_bw() + xlab('') + ylab('VaR') + geom_line(aes(x = obs, y = value, colour = "Estimated VaR")) + scale_color_manual(values = c('black', 'blue'), "") + facet_wrap(~variable, scales = 'free_y', ncol = 1)+ theme(legend.position="bottom", legend.title = element_blank()) } }else { if (inherits(x$VaR, c("xts","zoo"))) { ggplot(data=x$VaR) + geom_point(data=x$out_sample_returns, mapping = aes(x = time(x$VaR), y = Portfolio, colour="Returns")) + geom_line(x$VaR , mapping = aes(x = time(x$VaR), y = Portfolio, colour="Estimated VaR")) + theme_bw() + xlab("") + ylab('Portfolio returns/VaR') + ggtitle('Portfolio Backtest')+ scale_color_manual(values = c('blue', 'black'), "") + theme(legend.position="bottom", legend.title = element_blank()) } else { ggplot(x$VaR) + geom_point(data = x$out_sample_returns, mapping = aes(x = 1:nrow(x$VaR), y = Portfolio, colour="Returns")) + theme_bw() + xlab('') + ylab('Returns/VaR') + geom_line(aes(x = 1:nrow(x$VaR), y = Portfolio, colour="Estimated VaR")) + ggtitle('Portfolio Backtest')+ scale_color_manual(values = c('blue', 'black'), "") + theme(legend.position="bottom", legend.title = element_blank()) } } }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/plot.backtest.R
#' @import ggplot2 #' @import grid #' @import gridExtra #' @export plot.bekkFit <- function(x, diagnostic = FALSE, ...){ V1 <- NULL l <- NULL if (diagnostic == FALSE) { trianglePlotGrid <- function(plots){ #take a list of plots and returns a single plot where the elements in the list arranged in a triangular grid #plots should be a list of 1 or 3 or 6... plots to be arranged in a trianglular structure with 1 plot in the top row ncols <- (-1 + sqrt(1 + 8*length(plots)))/2 grobs <- list() k <-1 for (i in 1:ncols) { for (j in 1:ncols) { if (i <= j) { grobs[[length(grobs)+1]] <- plots[[k]] k <- k+1 } else { grobs[[length(grobs)+1]] <- nullGrob() } } } do.call("grid.arrange", c(grobs, ncol=ncols)) } if (inherits(x$sigma_t, c("ts","zoo","xts")) ){ plist <- vector(mode = "list", length = ncol(x$sigma_t)) xxc <- colnames(x$sigma_t) for (i in 1:ncol(x$sigma_t)) { if (grepl('correlation', xxc[i])) { plist[[i]] <- suppressMessages(autoplot(x$sigma_t[,i]) + theme_bw() + ylab("") + ylim(-1,1) + geom_hline(yintercept = 0, col = 'red')) } else { plist[[i]] <- autoplot(x$sigma_t[,i]) + theme_bw() + ylab("") } } } else { xxc <- colnames(x$sigma_t) plist <- vector(mode = "list", length = ncol(x$sigma_t)) for (i in 1:ncol(x$sigma_t)) { xx1 <- data.frame(x$sigma_t[,i]) colnames(xx1) <- 'V1' if (grepl('correlation', xxc[i])) { plist[[i]] <- ggplot(xx1, aes(x = 1:nrow(x$sigma_t), y = V1)) + geom_line() + theme_bw()+ xlab('') + ylab('') + ylim(-1,1) + geom_hline(yintercept = 0, col = 'red') } else { plist[[i]] <- ggplot(xx1, aes(x = 1:nrow(x$sigma_t), y = V1)) + geom_line() + theme_bw()+ xlab('') + ylab('') } } } for (i in 1:ncol(x$sigma_t)) { plist[[i]] <- plist[[i]] + ggtitle(xxc[i]) } trianglePlotGrid(plist) } else { dat <- data.frame(l = x$likelihood_iter) ggplot(dat, aes(x = 1:nrow(dat), y = l)) + geom_line() + ggtitle('BHHH-algorithm convergence') + xlab('Iteration') + ylab('log-likelihood') + theme_bw() } }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/plot.bekk.R
#' @import ggplot2 #' @import ggfortify #' @import reshape2 #' @export plot.var <- function(x, ...) { obs <- NULL V1 <- NULL lower <- NULL type <- NULL upper <- NULL if (any(class(x) == 'bekkFit')) { if(is.null(x$portfolio_weights)) { if (inherits(x$bekk$data, c("ts","xts","zoo"))) { autoplot(x$VaR) + theme_bw() + ylab('VaR') } else { x$VaR$obs <- 1:nrow(x$VaR) VaR <- melt(x$VaR, id = 'obs') ggplot(VaR) + geom_line(aes(x = obs, y = value)) + theme_bw() + xlab('') + ylab('VaR') + facet_wrap(~variable, scales = 'free_y', ncol = 1) } } else { if (inherits(x$bekk$data, c("ts","xts","zoo"))) { autoplot(x$VaR) + theme_bw() + ylab('VaR') + ggtitle('Portfolio VaR') } else { ggplot(x$VaR) + geom_line(aes(x = 1:nrow(x$VaR), y = V1)) + theme_bw() + xlab('') + ylab('VaR') + ggtitle('Portfolio VaR') } } } else if (any(class(x) == 'bekkForecast')) { if(is.null(x$portfolio_weights)) { sample <- x$VaR[1:(nrow(x$VaR)-x$n.ahead),] forc <- x$VaR[(nrow(x$VaR)-x$n.ahead+1):nrow(x$VaR),] cb_lower <- x$VaR_lower[(nrow(x$VaR)-x$n.ahead+1):nrow(x$VaR),] cb_upper <- x$VaR_upper[(nrow(x$VaR)-x$n.ahead+1):nrow(x$VaR),] sample$obs <- as.character(1:nrow(sample)) forc$obs <- as.character((nrow(sample)+1):(nrow(sample)+x$n.ahead)) cb_lower$obs <- as.character((nrow(sample)+1):(nrow(sample)+x$n.ahead)) cb_upper$obs <- as.character((nrow(sample)+1):(nrow(sample)+x$n.ahead)) sample <- sample[(nrow(sample)-4*x$n.ahead):nrow(sample),] sample$type <- as.factor('Sample') forc$type <- as.factor('Forecast') cb_lower$type <- as.factor('Forecast') cb_upper$type <- as.factor('Forecast') cb_l <- melt(cb_lower, id = c('obs', 'type')) cb_u <- melt(cb_upper, id = c('obs', 'type')) cb <- cbind(cb_l, cb_u$value) colnames(cb)[4:5] <- c('lower', 'upper') total <- rbind(sample, forc) VaR <- melt(total, id = c('obs', 'type')) cc <- merge(VaR, cb, all.x = TRUE, all.y = TRUE) if (x$n.ahead > 1) { ggplot(cc, aes(x = obs, y = value)) + geom_line(aes(y = lower, group = type, color = type, linetype = type), na.rm = TRUE, color = 'red') + geom_line(aes(y = upper, group = type, color = type, linetype = type), na.rm = TRUE, color = 'red') + geom_line(aes(group = type, color = type)) + geom_point(aes(shape = type)) + theme_bw() + xlab('') + ylab('VaR') + scale_color_manual(values = c('black', 'blue')) + facet_wrap(~variable, scales = 'free_y', ncol = 1) + theme(legend.position="bottom", legend.title = element_blank()) } else { ggplot(cc, aes(x = obs, y = value)) + geom_line(data = cc[cc$type == 'Sample',], aes(x = obs, y = value, group = type)) + geom_errorbar( aes(ymin=lower, ymax=upper), width=.2, color = 'red') + geom_point(aes(x = obs, y = value, shape = type), size = 2.5) + theme_bw() + xlab('') + ylab('VaR') + scale_color_manual(values = c('black', 'blue')) + facet_wrap(~variable, scales = 'free_y', ncol = 1) + theme(legend.position="bottom", legend.title = element_blank()) } } else { sample <- as.data.frame(x$VaR[1:(nrow(x$VaR)-x$n.ahead),]) forc <- as.data.frame(x$VaR[(nrow(x$VaR)-x$n.ahead+1):nrow(x$VaR),]) cb_lower <- as.data.frame(x$VaR_lower[(nrow(x$VaR)-x$n.ahead+1):nrow(x$VaR),]) cb_upper <- as.data.frame(x$VaR_upper[(nrow(x$VaR)-x$n.ahead+1):nrow(x$VaR),]) sample$obs <- as.character(1:nrow(sample)) forc$obs <- as.character((nrow(sample)+1):(nrow(sample)+x$n.ahead)) cb_lower$obs <- as.character((nrow(sample)+1):(nrow(sample)+x$n.ahead)) cb_upper$obs <- as.character((nrow(sample)+1):(nrow(sample)+x$n.ahead)) sample <- sample[(nrow(sample)-4*x$n.ahead):nrow(sample),] sample$type <- as.factor('Sample') forc$type <- as.factor('Forecast') colnames(sample)[1] <- colnames(forc)[1] <- colnames(cb_lower)[1] <- colnames(cb_upper)[1] <- 'V1' cb_lower$type <- as.factor('Forecast') cb_upper$type <- as.factor('Forecast') cb_l <- melt(cb_lower, id = c('obs', 'type')) cb_u <- melt(cb_upper, id = c('obs', 'type')) cb <- cbind(cb_l, cb_u$value) colnames(cb)[4:5] <- c('lower', 'upper') total <- rbind(sample, forc) VaR <- melt(total, id = c('obs', 'type')) cc <- merge(VaR, cb, all.x = TRUE, all.y = TRUE) if (x$n.ahead > 1) { ggplot(cc, aes(x = obs, y = value)) + geom_line(aes(y = lower, group = type, linetype = type), color = 'red', na.rm = TRUE) + geom_line(aes(y = upper, group = type, linetype = type), color = 'red', na.rm = TRUE) + geom_line(aes(group = type, color = type)) + geom_point(aes(shape = type)) + theme_bw() + xlab('') + ylab('VaR') + scale_color_manual(values = c('black', 'blue')) + theme(legend.position="bottom", legend.title = element_blank()) + ggtitle('Portfolio VaR') } else { ggplot(cc, aes(x = obs, y = value)) + geom_line(data = cc[cc$type == 'Sample',], aes(x = obs, y = value, group = type)) + geom_errorbar( aes(ymin=lower, ymax=upper), width=.2, color = 'red') + geom_point(aes(x = obs, y = value, shape = type), size = 2.5) + theme_bw() + xlab('') + ylab('VaR') + scale_color_manual(values = c('black', 'blue')) + theme(legend.position="bottom", legend.title = element_blank()) + ggtitle('Portfolio VaR') } } } }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/plot.var.R
#' @import ggplot2 #' @import grid #' @import gridExtra #' @export plot.virf <- function(x, ...){ V1 <- NULL l <- NULL lower <- NULL upper <- NULL trianglePlotGrid <- function(plots){ #take a list of plots and returns a single plot where the elements in the list arranged in a triangular grid #plots should be a list of 1 or 3 or 6... plots to be arranged in a trianglular structure with 1 plot in the top row ncols <- (-1 + sqrt(1 + 8*length(plots)))/2 grobs <- list() k <-1 for (i in 1:ncols) { for (j in 1:ncols) { if (i <= j) { grobs[[length(grobs)+1]] <- plots[[k]] k <- k+1 } else { grobs[[length(grobs)+1]] <- nullGrob() } } } do.call("grid.arrange", c(grobs, ncol=ncols)) } xxc <- colnames(x$VIRF) plist <- vector(mode = "list", length = ncol(x$VIRF)) for (i in 1:ncol(x$VIRF)) { xx1 <- data.frame(x$VIRF[,i]) xxci <- data.frame(x$VIRF_lower[,i],x$VIRF_upper[,i]) colnames(xx1) <- 'V1' colnames(xxci) <- c("lower","upper") if (grepl('correlation', xxc[i])) { plist[[i]] <- ggplot(xx1, aes(x = 1:nrow(x$VIRF), y = V1)) + geom_line() + geom_ribbon(data=xxci,aes(ymin=lower,ymax=upper),alpha=0.3) + theme_bw() + xlab('') + ylab('') + geom_hline(yintercept = 0, col = 'red') } else { plist[[i]] <- ggplot(xx1, aes(x = 1:nrow(x$VIRF), y = V1)) + geom_line() + geom_ribbon(data=xxci,aes(ymin=lower,ymax=upper),alpha=0.3) + theme_bw()+ xlab('') + ylab('') + geom_hline(yintercept = 0, col = 'red') } } for (i in 1:ncol(x$VIRF)) { plist[[i]] <- plist[[i]] + ggtitle(xxc[i]) } trianglePlotGrid(plist) }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/plot.virf.R
#' @export summary.backtest <- function(object, ...) { bekkObject <- object$bekkFit if (any(class(bekkObject) == 'bekk')) { cat(paste("\n", "BEKK backtesting results", "\n", sep = "")) underScore <- paste(rep("-", nchar("BEKK backtesting results")), collapse = "") } else if (any(class(bekkObject) == 'bekka')) { cat(paste("\n", "Asymmetric BEKK backtesting results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Asymmetric BEKK backtesting results")), collapse = "") } else if (any(class(bekkObject) == 'dbekk')) { cat(paste("\n", "Diagonal BEKK backtesting results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Diagonal BEKK backtesting results")), collapse = "") } else if (any(class(bekkObject) == 'dbekka')) { cat(paste("\n", "Asymmetric diagonal BEKK backtesting results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Asymmetric diagonal BEKK backtesting results")), collapse = "") } else if (any(class(bekkObject) == 'sbekk')) { cat(paste("\n", "Scalar BEKK backtesting results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Scalar BEKK backtesting results")), collapse = "") } else if (any(class(bekkObject) == 'sbekka')) { cat(paste("\n", "Asymmetric scalar BEKK backtesting results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Asymmetric scalar BEKK backtesting results")), collapse = "") } cat(underScore) cat("\nValue-at-risk confidence level: ") cat(as.character(object$p)) cat("\nWindow length: ") cat(as.character(object$window_length)) if(!is.null(object$portfolio_weights)){ res_hit <- data.frame(matrix(NA, ncol = ncol(object$VaR), nrow=1)) res_Kupiec <- data.frame(matrix(NA, ncol = ncol(object$VaR), nrow=2)) res_Christoffersen <- data.frame(matrix(NA, ncol = ncol(object$VaR), nrow=2)) colnames(res_hit)=c("") colnames(res_Kupiec)=c("") colnames(res_Christoffersen)=c("") row.names(res_hit)=c("") row.names(res_Kupiec)=c("Test:", "p-value:") row.names(res_Christoffersen)=c("Test:", "p-value:") res_Kupiec[,1]=object$backtests$LRuc res_Christoffersen[,1]=object$backtests$LRcc cat("\nPortfolio weights: ") cat(object$portfolio_weights) cat("\n") cat(underScore) res_hit[1,]=object$hit_rate cat(paste("\nHit rate:", round(res_hit,3), "\n", sep = " " )) cat("\nUnconditional coverage test of Kupiec: \n") print(res_Kupiec) cat("\nconditional coverage test of Christoffersen: \n") print(res_Christoffersen) }else{ res_hit <- data.frame(matrix(NA, ncol = ncol(object$VaR), nrow=1)) res_Kupiec <- data.frame(matrix(NA, ncol = ncol(object$VaR), nrow=2)) res_Christoffersen <- data.frame(matrix(NA, ncol = ncol(object$VaR), nrow=2)) colnames(res_hit)=colnames(object$out_sample_returns) colnames(res_Kupiec)=colnames(res_hit) colnames(res_Christoffersen)=colnames(res_hit) row.names(res_hit)=c("") row.names(res_Kupiec)=c("Test:", "p-value:") row.names(res_Christoffersen)=c("Test:", "p-value:") for(i in 1:ncol(object$VaR)){ res_Kupiec[,i]=object$backtests[[i]]$LRuc res_Christoffersen[,i]=object$backtests[[i]]$LRcc } cat("\nPortfolio weights: None\n") cat(underScore) res_hit[1,]=object$hit_rate cat("\nHit rates: \n") cat(underScore) print(res_hit) cat("\nUnconditional coverage test of Kupiec: \n") print(res_Kupiec) cat("\nconditional coverage test of Christoffersen: \n") print(res_Christoffersen) } cat("\n") } #' @export print.backtest <- function(x,...){ object <- x bekkObject <- object$bekkFit if (any(class(bekkObject) == 'bekk')) { cat(paste("\n", "BEKK backtesting results", "\n", sep = "")) underScore <- paste(rep("-", nchar("BEKK backtesting results")), collapse = "") } else if (any(class(bekkObject) == 'bekka')) { cat(paste("\n", "Asymmetric BEKK backtesting results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Asymmetric BEKK backtesting results")), collapse = "") } else if (any(class(bekkObject) == 'dbekk')) { cat(paste("\n", "Diagonal BEKK backtesting results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Diagonal BEKK backtesting results")), collapse = "") } else if (any(class(bekkObject) == 'dbekka')) { cat(paste("\n", "Asymmetric diagonal BEKK backtesting results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Asymmetric diagonal BEKK backtesting results")), collapse = "") } else if (any(class(bekkObject) == 'sbekk')) { cat(paste("\n", "Scalar BEKK backtesting results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Scalar BEKK backtesting results")), collapse = "") } else if (any(class(bekkObject) == 'sbekka')) { cat(paste("\n", "Asymmetric scalar BEKK backtesting results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Asymmetric scalar BEKK backtesting results")), collapse = "") } cat(underScore) cat("\nValue-at-risk confidence level: ") cat(as.character(object$p)) cat("\nWindow length: ") cat(as.character(object$window_length)) if(!is.null(object$portfolio_weights)){ cat("\nPortfolio weights: ") cat(object$portfolio_weights) cat("\n") cat(underScore) cat("\nHit rate: ") cat(round(object$hit_rate,3)) }else{ cat("\nPortfolio weights: None\n") res_hit <- data.frame(matrix(NA, ncol = ncol(object$VaR), nrow=1)) colnames(res_hit)=colnames(object$out_sample_returns) row.names(res_hit)=c("") res_hit[1,]=round(object$hit_rate,3) cat(underScore) cat("\nHit rates: \n") print(res_hit) } cat("\n") }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/summary.backtest.R
#' @export summary.bekk <- function(object, ...) { bekkObject <- object cat(paste("\n", "BEKK estimation results", "\n", sep = "")) underScore <- paste(rep("-", nchar("BEKK estimation results")), collapse = "") cat(underScore) cat("\nLog-likelihood: ") cat(bekkObject$log_likelihood) cat("\nBEKK model stationary: ") cat(bekkObject$BEKK_valid) cat("\nNumber of BHHH iterations: ") cat(bekkObject$iter) cat("\nAIC: ") cat(bekkObject$AIC) cat("\nBIC: ") cat(bekkObject$BIC) cat("\nEstimated parameter matrices: \n") cat("\nC \n") print(bekkObject$C0) cat("\nA \n") print(bekkObject$A) cat("\nG \n") print(bekkObject$G) cat("\nStandard errors of parameter matrices: \n") cat("\nC \n") print(bekkObject$C0_sd) cat("\nA \n") print(bekkObject$A_sd) cat("\nG \n") print(bekkObject$G_sd) cat("\n") } #' @export summary.bekka <- function(object, ...) { bekkObject <- object cat(paste("\n", "Asymmetric BEKK estimation results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Asymmetric BEKK estimation results")), collapse = "") cat(underScore) cat("\nLog-likelihood: ") cat(bekkObject$log_likelihood) cat("\nBEKK model stationary: ") cat(bekkObject$BEKK_valid) cat("\nNumber of BHHH iterations: ") cat(bekkObject$iter) cat("\nAIC: ") cat(bekkObject$AIC) cat("\nBIC: ") cat(bekkObject$BIC) cat("\nEstimated parameter matrices: \n") cat("\nC \n") print(bekkObject$C0) cat("\nA \n") print(bekkObject$A) cat("\nB \n") print(bekkObject$B) cat("\nG \n") print(bekkObject$G) cat("\nStandard errors of parameter matrices: \n") cat("\nC \n") print(bekkObject$C0_sd) cat("\nA \n") print(bekkObject$A_sd) cat("\nB \n") print(bekkObject$B_sd) cat("\nG \n") print(bekkObject$G_sd) cat("\n") } #' @export summary.dbekk <- function(object, ...) { bekkObject <- object cat(paste("\n", "Diagonal BEKK estimation results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Diagonal BEKK estimation results")), collapse = "") cat(underScore) cat("\nLog-likelihood: ") cat(bekkObject$log_likelihood) cat("\nDiagonal BEKK model stationary: ") cat(bekkObject$BEKK_valid) cat("\nNumber of BHHH iterations: ") cat(bekkObject$iter) cat("\nAIC: ") cat(bekkObject$AIC) cat("\nBIC: ") cat(bekkObject$BIC) cat("\nEstimated parameter matrices: \n") cat("\nC \n") print(bekkObject$C0) cat("\nA \n") print(bekkObject$A) cat("\nG \n") print(bekkObject$G) cat("\nStandard errors of parameter matrices: \n") cat("\nC \n") print(bekkObject$C0_sd) cat("\nA \n") print(bekkObject$A_sd) cat("\nG \n") print(bekkObject$G_sd) cat("\n") } #' @export summary.dbekka <- function(object, ...) { bekkObject <- object cat(paste("\n", "Asymmetric diagonal BEKK estimation results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Asymmetric diagonal BEKK estimation results")), collapse = "") cat(underScore) cat("\nLog-likelihood: ") cat(bekkObject$log_likelihood) cat("\nDiagonal BEKK model stationary: ") cat(bekkObject$BEKK_valid) cat("\nNumber of BHHH iterations: ") cat(bekkObject$iter) cat("\nAIC: ") cat(bekkObject$AIC) cat("\nBIC: ") cat(bekkObject$BIC) cat("\nEstimated parameter matrices: \n") cat("\nC \n") print(bekkObject$C0) cat("\nA \n") print(bekkObject$A) cat("\nB \n") print(bekkObject$B) cat("\nG \n") print(bekkObject$G) cat("\nStandard errors of parameter matrices: \n") cat("\nC \n") print(bekkObject$C0_sd) cat("\nA \n") print(bekkObject$A_sd) cat("\nB \n") print(bekkObject$B_sd) cat("\nG \n") print(bekkObject$G_sd) cat("\n") } #' @export summary.sbekk <- function(object, ...) { bekkObject <- object cat(paste("\n", "Scalar BEKK estimation results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Scalar BEKK estimation results")), collapse = "") cat(underScore) cat("\nLog-likelihood: ") cat(bekkObject$log_likelihood) cat("\nScalar BEKK model stationary: ") cat(bekkObject$BEKK_valid) cat("\nNumber of BHHH iterations: ") cat(bekkObject$iter) cat("\nAIC: ") cat(bekkObject$AIC) cat("\nBIC: ") cat(bekkObject$BIC) cat("\nEstimated parameter matrices: \n") cat("\nC \n") print(bekkObject$C0) cat("\na \n") print(bekkObject$a) cat("\ng \n") print(bekkObject$g) cat("\nStandard errors of parameter matrices: \n") cat("\nC \n") print(bekkObject$C0_sd) cat("\na \n") print(bekkObject$a_sd) cat("\ng \n") print(bekkObject$g_sd) cat("\n") } #' @export summary.sbekka <- function(object, ...) { bekkObject <- object cat(paste("\n", "Asymmetric scalar BEKK estimation results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Asymmetric scalar BEKK estimation results")), collapse = "") cat(underScore) cat("\nLog-likelihood: ") cat(bekkObject$log_likelihood) cat("\nScalar BEKK model stationary: ") cat(bekkObject$BEKK_valid) cat("\nNumber of BHHH iterations: ") cat(bekkObject$iter) cat("\nAIC: ") cat(bekkObject$AIC) cat("\nBIC: ") cat(bekkObject$BIC) cat("\nEstimated parameter matrices: \n") cat("\nC \n") print(bekkObject$C0) cat("\na \n") print(bekkObject$a) cat("\nb \n") print(bekkObject$b) cat("\ng \n") print(bekkObject$g) cat("\nStandard errors of parameter matrices: \n") cat("\nC \n") print(bekkObject$C0_sd) cat("\na \n") print(bekkObject$a_sd) cat("\nb \n") print(bekkObject$b_sd) cat("\ng \n") print(bekkObject$g_sd) cat("\n") }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/summary.bekk.R
#' @export print.var <- function(x,...){ object <- x bekkObject <- object$bekk if (any(class(bekkObject) == 'bekk')) { cat(paste("\n", "BEKK VaR results", "\n", sep = "")) underScore <- paste(rep("-", nchar("BEKK VaR results")), collapse = "") } else if (any(class(bekkObject) == 'bekka')) { cat(paste("\n", "Asymmetric BEKK VaR results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Asymmetric BEKK VaR results")), collapse = "") } else if (any(class(bekkObject) == 'dbekk')) { cat(paste("\n", "Diagonal BEKK VaR results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Diagonal BEKK VaR results")), collapse = "") } else if (any(class(bekkObject) == 'dbekka')) { cat(paste("\n", "Asymmetric diagonal BEKK VaR results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Asymmetric diagonal BEKK VaR results")), collapse = "") } else if (any(class(bekkObject) == 'sbekk')) { cat(paste("\n", "Scalar BEKK VaR results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Scalar BEKK VaR results")), collapse = "") } else if (any(class(bekkObject) == 'sbekka')) { cat(paste("\n", "Asymmetric scalar BEKK VaR results", "\n", sep = "")) underScore <- paste(rep("-", nchar("Asymmetric scalar BEKK VaR results")), collapse = "") } cat(underScore) cat("\nValue-at-risk confidence level: ") cat(as.character(object$p)) if(!is.null(object$portfolio_weights)){ cat("\nPortfolio weights: ") cat(object$portfolio_weights) }else{ cat("\nPortfolio weights: None\n") } }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/summary.var.R
#' Estimating multivariate volatility impulse response functions (VIRF) for BEKK models #' #' @description Method for estimating VIRFs of N-dimensional BEKK models. Currently, only VIRFs for symmetric BEKK models are implemented. #' #' @param x An object of class "bekkfit" from function \link{bekk_fit}. #' @param time Time instance to calculate VIRFs for. #' @param q A number specifying the quantile to be considered for a shock on which basis the VIRFs are generated. #' @param index_series An integer defining the number of series for which a shock is assumed. #' @param n.ahead An integer defining the number periods for which the VIRFs are generated. #' @param ci A number defining the confidence level for the confidence bands. #' @param time_shock Boolean indicating if the estimated residuals at date specified by "time" shall be used as a shock. #' @return Returns an object of class "virf". #' @references Hafner CM, Herwartz H (2006). Volatility impulse responses for multivariate GARCH models: An exchange rate illustration. Journal of International Money and Finance,25,719–740. #' @examples #' \donttest{ #' #' data(StocksBonds) #' obj_spec <- bekk_spec() #' x1 <- bekk_fit(obj_spec, StocksBonds, QML_t_ratios = FALSE, max_iter = 50, crit = 1e-9) #' #' # 250 day ahead VIRFs and 90% CI for a Shock in the 1% quantile of Bonds (i.e. series=2) #' # shock is supposed to occur at day 500 #' x2 <- virf(x1, time = 500, q = 0.01, index_series=2, n.ahead = 500, ci = 0.90) #' plot(x2) #' } #' @import xts #' @import stats #' @import numDeriv #' @export virf <- function(x ,time = 1, q = 0.05, index_series = 1, n.ahead = 10, ci = 0.9, time_shock = FALSE){ if (!inherits(x, 'bekkFit')) { stop('Please provide and object of class "bekkFit" for x') } if (x$spec$model$asymmetric==T) { stop('VIRFs are implemented only for symmetric BEKK models.') } if(!( n.ahead%%1==0) || n.ahead < 1){ stop('Please provide a posive integer for periods') } if(!(index_series%%1==0) || index_series < 1){ stop('Please provide a posive integer for index_series') } if(index_series > ncol(x$data)){ stop('Total number of indices in the data is lower than index_series') } if(!is.numeric(time)){ if( !is.numeric(x$data[time]) || nrow(x$data[time])==0){ stop('Provided date object is not included in data') } }else{ if((time%%1!=0 || time < 1) ){ stop('Please provide a posive integer or a date object for time') }else if(!(time%%1!=0 || time < 1) && time > nrow(x$data)){ stop('Total number of observations is exeded by time') } } if(!is.numeric(q) || q < 0 || q > 1 || length(q)>1 || length(q) == 0){ stop('Please provide a number in the interval (0,1) for q.') } UseMethod('virf') } #' @export virf.bekk <- function(x, time = 1, q = 0.05, index_series=1, n.ahead = 10, ci = 0.9, time_shock = FALSE) { N <- ncol(x$data) data <- x$data H <- matrix(x$H_t[time,],N,N) #get quantiles of returns residuals = x$e_t if(!time_shock ){ shocks = matrix(0, nrow = 1, ncol = N) shocks[index_series] = sapply(q,FUN=quantile,x=as.matrix(residuals[,index_series])) for(i in 1: N){ if(i==index_series){ shocks[index_series] = sapply(q,FUN=quantile,x=as.matrix(residuals[,index_series])) }else{ shocks[i] = sapply(0.5,FUN=quantile,x=as.matrix(residuals[,i])) *0 } } }else{ shocks = matrix(x$e_t[time,],nrow = 1, ncol = N) } VIRF = virf_bekk(H, x$theta, matrix(shocks, ncol=N, nrow = 1), n.ahead) #dupl <- duplication_mat(N) #elim <- elimination_mat(N) score_final = score_bekk(x$theta, x$data) score_outer = t(score_final) %*% score_final # s1 = eigen_value_decomposition(s1_temp) hesse_final = solve(hesse_bekk(x$theta, x$data)) #s1_temp = solve(hesse_final) if(x$QML_t_ratios==TRUE){ Sigma_temp=hesse_final%*%score_outer%*%hesse_final }else{ Sigma_temp=solve(score_outer) } s1_temp = function(th){ virf_bekk(H, th, matrix(shocks, ncol=N, nrow = 1), n.ahead) } th<-x$theta d_virf = jacobian(s1_temp,th) s1_temp=d_virf%*%Sigma_temp%*%t(d_virf) # s1 = s1_temp*0 # counter = 1 # while(counter < nrow(s1)){ # s1[counter:(counter+n.ahead-1),counter:(counter+n.ahead-1)]=s1_temp[counter:(counter+n.ahead-1),counter:(counter+n.ahead-1)] # counter = counter + n.ahead # } s1 = sqrt(abs(diag(s1_temp))) * qnorm(ci) #return(s1) #print(det(d_virf%*%hesse_final%*%t(d_virf))) VIRF_lower = VIRF - matrix(s1, nrow = n.ahead, ncol = N*(N+1)/2) VIRF_upper = VIRF + matrix(s1, nrow = n.ahead, ncol = N*(N+1)/2) # for (i in 1:nrow(VIRF)) { # tm <- matrix((dupl%*%VIRF[i,]), N, N, byrow = T) # tm2 <- sqrt(solve(diag(abs(diag(tm)))))%*%tm%*%sqrt(solve(diag(abs(diag(tm))))) # diag(tm2) <- sqrt(abs(diag(tm)))%*%solve(diag(abs(diag(tm))))%*%diag(diag(tm)) # VIRF[i,] <- elim%*%c(tm2) # } VIRF <- as.data.frame(VIRF) VIRF_lower <- as.data.frame(VIRF_lower) VIRF_upper <- as.data.frame(VIRF_upper) for(i in 1:ncol(VIRF)){ colnames(VIRF)[i] <- paste("VIRF for", colnames(x$sigma_t)[i], sep=" ") } colnames(VIRF) <- gsub("Conditional","conditional", colnames(VIRF)) colnames(VIRF) <- gsub("correlation","covariance", colnames(VIRF)) colnames(VIRF) <- gsub("standard deviation","variance", colnames(VIRF)) result <- list(VIRF=VIRF, VIRF_upper=VIRF_upper, VIRF_lower=VIRF_lower, N=N, time=time, q=q, index_series=index_series, x=x) class(result) <- c('virf','bekkFit', 'bekk') return(result) } #' @export virf.dbekk <- function(x, time = 1, q = 0.05, index_series=1, n.ahead = 10, ci = 0.9, time_shock = FALSE) { N <- ncol(x$data) data <- x$data H <- matrix(x$H_t[time,],N,N) #get quantiles of returns residuals = x$e_t shocks = matrix(0, nrow = 1, ncol = N) if(!time_shock ){ shocks[index_series] = sapply(q,FUN=quantile,x=as.matrix(residuals[,index_series])) for(i in 1: N){ if(i==index_series){ shocks[index_series] = sapply(q,FUN=quantile,x=as.matrix(residuals[,index_series])) }else{ shocks[i] = sapply(0.5,FUN=quantile,x=as.matrix(residuals[,i])) * 0 } } }else{ shocks = matrix(x$e_t[time,],nrow = 1, ncol = N) } VIRF = virf_dbekk(H, x$theta, matrix(shocks, ncol=N, nrow = 1), n.ahead) hesse_final = solve(hesse_dbekk(x$theta, x$data)) score_final = score_dbekk(x$theta, x$data) score_outer = t(score_final) %*% score_final if(x$QML_t_ratios==TRUE){ Sigma_temp=hesse_final%*%score_outer%*%hesse_final }else{ Sigma_temp=solve(score_outer) } s1_temp = function(th){ virf_dbekk(H, th, matrix(shocks, ncol=N, nrow = 1), n.ahead) } th<-x$theta d_virf = jacobian(s1_temp,th) s1_temp=d_virf%*%Sigma_temp%*%t(d_virf) # s1 = s1_temp*0 # counter = 1 # while(counter < nrow(s1)){ # s1[counter:(counter+n.ahead-1),counter:(counter+n.ahead-1)]=s1_temp[counter:(counter+n.ahead-1),counter:(counter+n.ahead-1)] # counter = counter + n.ahead # } s1 = sqrt(diag(s1_temp)) * qnorm(ci) #return(s1) #print(det(d_virf%*%hesse_final%*%t(d_virf))) VIRF_lower = VIRF - matrix(s1, nrow = n.ahead, ncol = N*(N+1)/2) VIRF_upper = VIRF + matrix(s1, nrow = n.ahead, ncol = N*(N+1)/2) # for (i in 1:nrow(VIRF)) { # tm <- matrix((dupl%*%VIRF[i,]), N, N, byrow = T) # tm2 <- sqrt(solve(diag(abs(diag(tm)))))%*%tm%*%sqrt(solve(diag(abs(diag(tm))))) # diag(tm2) <- sqrt(abs(diag(tm)))%*%solve(diag(abs(diag(tm))))%*%diag(diag(tm)) # VIRF[i,] <- elim%*%c(tm2) # } VIRF <- as.data.frame(VIRF) VIRF_lower <- as.data.frame(VIRF_lower) VIRF_upper <- as.data.frame(VIRF_upper) for(i in 1:ncol(VIRF)){ colnames(VIRF)[i] <- paste("VIRF for", colnames(x$sigma_t)[i], sep=" ") } colnames(VIRF) <- gsub("Conditional","conditional", colnames(VIRF)) colnames(VIRF) <- gsub("correlation","covariance", colnames(VIRF)) colnames(VIRF) <- gsub("standard deviation","variance", colnames(VIRF)) result <- list(VIRF=VIRF, VIRF_upper=VIRF_upper, VIRF_lower=VIRF_lower, N=N, time=time, q=q, index_series=index_series, x=x) class(result) <- c('virf','bekkFit', 'dbekk') return(result) } #' @export virf.sbekk <- function(x, time = 1, q = 0.05, index_series=1, n.ahead = 10, ci = 0.9, time_shock = FALSE) { N <- ncol(x$data) data <- x$data H <- matrix(x$H_t[time,],N,N) #get quantiles of returns residuals = x$e_t shocks = matrix(0, nrow = 1, ncol = N) if(!time_shock ){ shocks[index_series] = sapply(q,FUN=quantile,x=as.matrix(residuals[,index_series])) for(i in 1: N){ if(i==index_series){ shocks[index_series] = sapply(q,FUN=quantile,x=as.matrix(residuals[,index_series])) }else{ shocks[i] = sapply(0.5,FUN=quantile,x=as.matrix(residuals[,i])) * 0 } } }else{ shocks = matrix(x$e_t[time,],nrow = 1, ncol = N) } VIRF = virf_sbekk(H, x$theta, matrix(shocks, ncol=N, nrow = 1), n.ahead) #dupl <- duplication_mat(N) #elim <- elimination_mat(N) # score_final = score_bekk(x$theta, x$data) # s1_temp = solve(t(score_final) %*% score_final) # s1 = eigen_value_decomposition(s1_temp) #hesse_final = solve(hesse_sbekk(x$theta, x$data)) #s1_temp = solve(hesse_final) hesse_final = solve(hesse_sbekk(x$theta, x$data)) score_final = score_sbekk(x$theta, x$data) score_outer = t(score_final) %*% score_final if(x$QML_t_ratios==TRUE){ Sigma_temp=hesse_final%*%score_outer%*%hesse_final }else{ Sigma_temp=solve(score_outer) } s1_temp = function(th){ virf_dbekk(H, th, matrix(shocks, ncol=N, nrow = 1), n.ahead) } s1_temp = function(th){ virf_sbekk(H, th, matrix(shocks, ncol=N, nrow = 1), n.ahead) } th<-x$theta d_virf = jacobian(s1_temp,th) s1_temp=d_virf%*%Sigma_temp%*%t(d_virf) # s1 = s1_temp*0 # counter = 1 # while(counter < nrow(s1)){ # s1[counter:(counter+n.ahead-1),counter:(counter+n.ahead-1)]=s1_temp[counter:(counter+n.ahead-1),counter:(counter+n.ahead-1)] # counter = counter + n.ahead # } s1 = sqrt(diag(s1_temp)) * qnorm(ci) #return(s1) #print(det(d_virf%*%hesse_final%*%t(d_virf))) VIRF_lower = VIRF - matrix(s1, nrow = n.ahead, ncol = N*(N+1)/2) VIRF_upper = VIRF + matrix(s1, nrow = n.ahead, ncol = N*(N+1)/2) # for (i in 1:nrow(VIRF)) { # tm <- matrix((dupl%*%VIRF[i,]), N, N, byrow = T) # tm2 <- sqrt(solve(diag(abs(diag(tm)))))%*%tm%*%sqrt(solve(diag(abs(diag(tm))))) # diag(tm2) <- sqrt(abs(diag(tm)))%*%solve(diag(abs(diag(tm))))%*%diag(diag(tm)) # VIRF[i,] <- elim%*%c(tm2) # } VIRF <- as.data.frame(VIRF) VIRF_lower <- as.data.frame(VIRF_lower) VIRF_upper <- as.data.frame(VIRF_upper) for(i in 1:ncol(VIRF)){ colnames(VIRF)[i] <- paste("VIRF for", colnames(x$sigma_t)[i], sep=" ") } colnames(VIRF) <- gsub("Conditional","conditional", colnames(VIRF)) colnames(VIRF) <- gsub("correlation","covariance", colnames(VIRF)) colnames(VIRF) <- gsub("standard deviation","variance", colnames(VIRF)) result <- list(VIRF=VIRF, VIRF_upper=VIRF_upper, VIRF_lower=VIRF_lower, N=N, time=time, q=q, index_series=index_series, x=x) class(result) <- c('virf','bekkFit', 'sbekk') return(result) }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/virf.R
.onLoad <- function(libname, pkgname) { # CRAN OMP THREAD LIMIT Sys.setenv("OMP_THREAD_LIMIT" = 1) } .onUnload <- function(libpath) { library.dynam.unload("BEKKs", libpath) invisible(NULL) }
/scratch/gouwar.j/cran-all/cranData/BEKKs/R/zzz.R
# Additional Functions ---- ## Permute Changepoint Labels ---- Permute_CP <- function(output, n_class, max_cp, max_beta){ # output = output from JAGS # n_class = number of latent clases # max_cp = maximum number of changepoints # max_beta = maximum number of intercept + slope parameters ### Setup ----- n_iter = dim(output$sigma2_error)[2] n_chain = dim(output$sigma2_error)[3] ### Permute changepoint labels, if necessary, so they are ordered ----- for(i in 1:n_iter){ for(j in 1:n_chain){ for(k in 1:n_class){ if(output$n_cp[k,i,j]>0){ ind <- c(1:max_cp)[as.logical(output$cp_indicator[k,,i,j])] my_order <- order(output$cp[k,ind,i,j]) output$cp_indicator[k,,i,j] <- c(output$cp_indicator[k,ind[my_order],i,j],output$cp_indicator[k,-ind,i,j]) output$cp[k,,i,j] <- c(output$cp[k,ind[my_order],i,j],output$cp[k,-ind,i,j]) output$beta[k,3:max_beta,i,j] <- c(output$beta[k,ind[my_order]+2,i,j],output$beta[k,c(3:max_beta)[-ind],i,j]) } } } } return(output) } ## Get Mode (for initializing number of changepoints) ---- getmode <- function(v){ # v = vector uniqv <- unique(v) uniqv[which.max(tabulate(match(v, uniqv)))] } ## Realign ECR Function ---- Realign_ECR <- function(output, n_class, model=NULL){ # output = output from JAGS # n_class = number of latent clases # model = model type ("PREMM", "CI_PREMM_Full", "CI_PREMM_Class_Predictive" or "CI_PREMM_Outcome_Predictive") ### Warnings ----- if(is.null(model)) stop('Specify model type. Must be \'PREMM\', \'CI_PREMM_Full\', \'CI_PREMM_Class_Predictive\', or \'CI_PREMM_Outcome_Predictive\'') ### Setup ----- n_subj <- dim(output$class)[1] n_iter <- dim(output$class)[2] n_chain <- dim(output$class)[3] if(model=="CI_PREMM_Full" | model=="CI_PREMM_Class_Predictive"){class_pred <- TRUE} else {class_pred <- FALSE} ## For class_pred == TRUE if(class_pred == TRUE){output$class_orig <- output$class} ### Align for the first time ----- class_matrix <- matrix(nrow=n_iter*n_chain, ncol=n_subj) for(i in 1:n_subj){ class_matrix[,i] <- as.vector(output$class[i,,]) } permutes <- label.switching::ecr.iterative.1(class_matrix, n_class)$permutations perm_chain <- list() perm_chain[[1]] <- permutes[1:n_iter,] perm_chain[[2]] <- permutes[(n_iter+1):(2*n_iter),] perm_chain[[3]] <- permutes[(2*n_iter+1):(3*n_iter),] for(i in 1:n_chain){ for(j in 1:n_iter){ # this loop will ignore parameters that don't exist in output ## Initialization-Only output$beta[,,j,i] <- output$beta[perm_chain[[i]][j,],,j,i] output$cp[,,j,i] <- output$cp[perm_chain[[i]][j,],,j,i] ## PREMM only output$class_prob[,j,i] <- output$class_prob[perm_chain[[i]][j,],j,i] ## Full Model output$beta_mean[,,j,i] <- output$beta_mean[perm_chain[[i]][j,],,j,i] output$beta_sd[,,j,i] <- output$beta_sd[perm_chain[[i]][j,],,j,i] output$cp_mean[,,j,i] <- output$cp_mean[perm_chain[[i]][j,],,j,i] output$cp_sd[,,j,i] <- output$cp_sd[perm_chain[[i]][j,],,j,i] ## ALL output$cp_indicator[,,j,i] <- output$cp_indicator[perm_chain[[i]][j,],,j,i] output$n_cp[,j,i] <- output$n_cp[perm_chain[[i]][j,],j,i] inv_perm <- order(perm_chain[[i]][j,]) output$class[,j,i] <- inv_perm[output$class[,j,i]] } } ### Align for a second time (for better accuracy) ----- class_matrix <- matrix(nrow=n_iter*n_chain, ncol=n_subj) for(i in 1:n_subj){ class_matrix[,i] <- as.vector(output$class[i,,]) } permutes <- label.switching::ecr.iterative.1(class_matrix, n_class)$permutations perm_chain <- list() perm_chain[[1]] <- permutes[1:n_iter,] perm_chain[[2]] <- permutes[(n_iter+1):(2*n_iter),] perm_chain[[3]] <- permutes[(2*n_iter+1):(3*n_iter),] for(i in 1:n_chain){ for(j in 1:n_iter){ # this loop will ignore parameters that don't exist in output ## Initialization-Only output$beta[,,j,i] <- output$beta[perm_chain[[i]][j,],,j,i] output$cp[,,j,i] <- output$cp[perm_chain[[i]][j,],,j,i] ## PREMM only output$class_prob[,j,i] <- output$class_prob[perm_chain[[i]][j,],j,i] ## Full Model output$beta_mean[,,j,i] <- output$beta_mean[perm_chain[[i]][j,],,j,i] output$beta_sd[,,j,i] <- output$beta_sd[perm_chain[[i]][j,],,j,i] output$cp_mean[,,j,i] <- output$cp_mean[perm_chain[[i]][j,],,j,i] output$cp_sd[,,j,i] <- output$cp_sd[perm_chain[[i]][j,],,j,i] ## ALL output$cp_indicator[,,j,i] <- output$cp_indicator[perm_chain[[i]][j,],,j,i] output$n_cp[,j,i] <- output$n_cp[perm_chain[[i]][j,],j,i] inv_perm <- order(perm_chain[[i]][j,]) output$class[,j,i] <- inv_perm[output$class[,j,i]] } } ### Invert logistic regression models if the class was switched ----- if(class_pred == TRUE){ n_cov <- length(output$class_predictive_covariate_lambda[,1,1]) for(i in 1:n_iter){ for(j in 1:n_chain){ for(k in 1:n_cov){ if(output$class[1,i,j] != output$class_orig[1,i,j]){ cp_class1 = output$cp_indicator[1,,i,j] cp_class2 = output$cp_indicator[2,,i,j] output$cp_indicator[1,,i,j] = cp_class2 output$cp_indicator[2,,i,j] = cp_class1 output$logistic_intercept[1,i,j] = -output$logistic_intercept[1,i,j] output$class_predictive_covariate_lambda[k,i,j] = -output$class_predictive_covariate_lambda[k,i,j] } } } } } return(output) }
/scratch/gouwar.j/cran-all/cranData/BEND/R/Additional_PREM_Functions.R
#' Simulated data for a PREM + Extensions #' #' Simulated data for a piecewise random effects model (PREM) and useful extensions (CI-PREM, PREMM, CI-PREMM) with 18 timepoints collected on 30 individuals. #' #' \itemize{ #' \item `id` ID for each individual. #' \item `time` Timepoints for each individual. #' \item `y` Outcome. #' \item `class_pred_1` First class predictive covariate (time-invariant). #' \item `class_pred_2` Second class predictive covariate (time-invariant). #' \item `outcome_pred_1` Outcome predictive covariate (time-varying). #' } #' #' @docType data #' @keywords datasets #' @name SimData_PREM #' @usage data(SimData_PREM) #' @format A data frame with 540 rows and 6 variables. NULL #' Simulated data for a PCREM #' #' Simulated data for a piecewise crossed random effects model (PCREM) with 7 timepoints collected on 30 individuals. #' #' \itemize{ #' \item `id` ID for each individual. #' \item `teacherid` ID for each teacher. #' \item `time` Timepoints for each individual. #' \item `y` Outcome. #' } #' #' @docType data #' @keywords datasets #' @name SimData_PCREM #' @usage data(SimData_PCREM) #' @format A data frame with 210 rows and 4 variables. NULL #' Simulated data for a BPREM #' #' Simulated data for a bivariate piecewise random effects model (BPREM) with 7 timepoints collected on 30 individuals. #' #' \itemize{ #' \item `id` ID for each individual. #' \item `time` Timepoints for each individual. #' \item `y1` Outcome 1. #' \item `y2` Outcome 2. #' } #' #' @docType data #' @keywords datasets #' @name SimData_BPREM #' @usage data(SimData_BPREM) #' @format A data frame with 210 rows and 4 variables. NULL #' Fitted results for a PREM #' #' Fitted results for a piecewise random effects model (PREM) using `SimData_PREM`. Included to demonstrate the use of `plot_BEND()` and `summary.PREM()`. #' #' @docType data #' @keywords datasets #' @name results_prem #' @usage data(results_prem) #' @format A list (an object of class `PREM`) with fitted model results. NULL #' Fitted results for a PCREM #' #' Simulated data for a piecewise crossed random effects model (PCREM) using `SimData_CREM`. Included to demonstrate the use of `summary.CREM()`. #' #' @docType data #' @keywords datasets #' @name results_pcrem #' @usage data(results_pcrem) #' @format A list (an object of class `CREM`) with fitted model results. NULL #' Fitted results for a BPREM #' #' Simulated data for a bivariate piecewise random effects model (BPREM) using `SimData_BPREM`. Included to demonstrate the use of `summary.BPREM()`. #' #' @docType data #' @keywords datasets #' @name results_bprem #' @usage data(results_bprem) #' @format A list (an object of class `BPREM`) with fitted model results. NULL
/scratch/gouwar.j/cran-all/cranData/BEND/R/BEND.R
#' Bayesian Bivariate Piecewise Random Effects Model (BPREM) #' #' @description Estimates a Bayesian bivariate piecewise random effects models (BPREM) for longitudinal data with two interrelated outcomes. See Peralta et al. (2022) for more details. #' #' @param data Data frame in long format, where each row describes a measurement occasion for a given individual. It is assumed that each individual has the same number of assigned timepoints (a.k.a., rows). There can be missingness in the outcomes (`y1_var` and ` y2_var`), but there cannot be missingness in time (`time_var`). #' @param id_var Name of column that contains ids for individuals with repeated measures in a longitudinal dataset. #' @param time_var Name of column that contains the time variable. This column cannot contain any missing values. #' @param y1_var Name of column that contains the first outcome variable. Missing values should be denoted by NA. #' @param y2_var Name of column that contains the second outcome variable. Missing values should be denoted by NA. #' @param iters_adapt (optional) Number of iterations for adaptation of jags model (default = 5000). #' @param iters_burn_in (optional) Number of iterations for burn-in (default = 100000). #' @param iters_sampling (optional) Number of iterations for posterior sampling (default = 50000). #' @param thin (optional) Thinning interval for posterior sampling (default = 15). #' @param save_full_chains Logical indicating whether the MCMC chains from rjags should be saved (default = FALSE). Note, this should not be used regularly as it will result in an object with a large file size. #' @param save_conv_chains Logical indicating whether the MCMC chains from rjags should be saved but only for the parameters monitored for convergence (default = FALSE). This would be useful for plotting traceplots for relevant model parameters to evaluate convergence behavior. Note, this should not be used regularly as it will result in an object with a large file size. #' @param verbose Logical controlling whether progress messages/bars are generated (default = TRUE). #' #' @returns A list (an object of class `BPREM`) with elements: #' \item{Convergence}{Potential scale reduction factor (PSRF) for each parameter (`parameter_psrf`), Gelman multivariate scale reduction factor (`multivariate_psrf`), and mean PSRF (`mean_psrf`) to assess model convergence.} #' \item{Model_Fit}{Deviance (`deviance`), effective number of parameters (`pD`), and Deviance information criterion (`dic`) to assess model fit.} #' \item{Fitted_Values}{Vector giving the fitted value at each timepoint for each individual (same length as long data).} #' \item{Parameter_Estimates}{Data frame with posterior mean and 95% credible intervals for each model parameter.} #' \item{Run_Time}{Total run time for model fitting.} #' \item{Full_MCMC_Chains}{If save_full_chains=TRUE, raw MCMC chains from rjags.} #' \item{Convergence_MCMC_Chains}{If save_conv_chains=TRUE, raw MCMC chains from rjags but only for the parameters monitored for convergence.} #' #' @details #' For more information on the model equation and priors implemented in this function, see Peralta et al. (2022). #' #' @author Corissa T. Rohloff, Yadira Peralta #' #' @references Peralta, Y., Kohli, N., Lock, E. F., & Davison, M. L. (2022). Bayesian modeling of associations in bivariate piecewise linear mixed-effects models. Psychological Methods, 27(1), 44–64. https://doi.org/10.1037/met0000358 #' #' @examples #' \donttest{ #' # load simulated data #' data(SimData_BPREM) #' # plot observed data #' plot_BEND(data = SimData_BPREM, #' id_var = "id", #' time_var = "time", #' y_var = "y1", #' y2_var = "y2") #' # fit Bayes_BPREM() #' results_bprem <- Bayes_BPREM(data = SimData_BPREM, #' id_var = "id", #' time_var = "time", #' y1_var = "y1", #' y2_var = "y2") #' # result summary #' summary(results_bprem) #' # plot fitted results #' plot_BEND(data = SimData_BPREM, #' id_var = "id", #' time_var = "time", #' y_var = "y1", #' y2_var = "y2", #' results = results_bprem) #' } #' #' @import stats #' #' @export Bayes_BPREM <- function(data, id_var, time_var, y1_var, y2_var, iters_adapt=5000, iters_burn_in=100000, iters_sampling=50000, thin=15, save_full_chains=FALSE, save_conv_chains=FALSE, verbose=TRUE){ # START OF SETUP ---- ## Initial data check data <- as.data.frame(data) ## Control progress messages/bars if(verbose) progress_bar = "text" if(!verbose) progress_bar = "none" ## Start tracking run time run_time_total_start <- Sys.time() ## Load module to compute DIC suppressMessages(rjags::load.module('dic')) ## Set number of chains - will use 3 chains across all models n_chains <- 3 # Reshape data for modeling ---- ## outcome data - matrix form y1 <- reshape(data[,c(id_var, time_var, y1_var)], idvar=id_var, timevar=time_var, direction='wide') y1 <- unname(as.matrix(y1[,names(y1)!=id_var])) y2 <- reshape(data[,c(id_var, time_var, y2_var)], idvar=id_var, timevar=time_var, direction='wide') y2 <- unname(as.matrix(y2[,names(y2)!=id_var])) ## JAGS model assumes the two outcome variables are in an array y <- array(c(y1, y2), dim = c(nrow(y1),ncol(y1),2)) ## time data - matrix form ## should be the same dimensions as y1 and y2 x <- matrix(data[,c(time_var)], byrow=TRUE, nrow=dim(y1)[1], ncol=dim(y1)[2]) # Define relevant variables ---- n_subj <- dim(y)[1] n_time <- dim(y)[2] max_time <- max(x) min_time <- min(x) min_cp_mean <- unique(sort(x))[2] max_cp_mean <- unique(sort(x, decreasing=TRUE))[2] mean_cp <- (max_time-min_time)/2 # mean of changepoint prec_cp <- 1/((max_time-min_time)/4)^2 # precision of changepoint bound_e1 <- min(apply(y[,,1], 2, var, na.rm = TRUE)) # bound of error variance 1 bound_e2 <- min(apply(y[,,2], 2, var, na.rm = TRUE)) # bound of error variance 2 beta_mean_zero <- rep(0,8) # Vector of zeros to center the distribution of raw random-effects omega_b <- diag(8) # Scale matrix for the Wishart distribution # Error messages for input ---- ## data format warnings if(sum(is.na(x)>0)) stop('Columns for t_var cannot have NA values (but y_var can)') if(min(x)!=0) warning('Prior assumes first time point measured at x=0') # FULL MODEL ---- ## Specify full JAGS model full_spec <- textConnection(bivariate_pw) ## Variables to extract from full model param_recovery_full <- c('beta', 'beta_mean', 'var_b', 'cov_b', 'cor_b', 'rho', 'mu_y', 'sigma2_error', 'for_conv', 'pD', 'deviance') ## Create data list for JAGS model data_list <- list('y' = y, 'x' = x, 'n_subj' = n_subj, 'n_time' = n_time, 'mean_cp' = mean_cp, 'prec_cp' = prec_cp, 'min_time' = min_time, 'max_time' = max_time, 'bound_e1' = bound_e1, 'bound_e2' = bound_e2, 'beta_mean_zero' = beta_mean_zero, 'omega_b' = omega_b) if(verbose) cat('Calibrating MCMC...\n') full_model <- rjags::jags.model(full_spec, data = data_list, n.chains = n_chains, n.adapt = iters_adapt, quiet = TRUE) # burn-in if(verbose) cat('Running burn-in...\n') update(full_model, iters_burn_in, progress.bar=progress_bar) # sampling if(verbose) cat('Collecting samples...\n') full_out <- rjags::jags.samples(full_model, variable.names=param_recovery_full, n.iter=iters_sampling, thin=thin, progress.bar=progress_bar) # Compiling Full Results ----- ## Convergence # Setup the parameter vector beta_mean_param <- paste0('beta_', rep(1:2,e=4), c(0:2, 'cp'), '_mean') beta_var_param <- paste0('var_b_', rep(1:2,e=4), c(0:2, 'cp')) base_cov <- c('11_10', '12_10', '12_11', '1cp_10', '1cp_11', '1cp_12', '20_10', '20_11', '20_12', '20_1cp', '21_10', '21_11', '21_12', '21_1cp', '21_20', '22_10', '22_11', '22_12', '22_1cp', '22_20', '22_21', '2cp_10', '2cp_11', '2cp_12', '2cp_1cp', '2cp_20', '2cp_21', '2cp_22') beta_cov_param <- paste0("cov_b_", base_cov) beta_corr_param <- paste0("corr_b_", base_cov) error_param <- c("error_var_11", "error_var_22", "error_cov_12") param_names <- c(beta_mean_param, beta_var_param, beta_cov_param, error_param, beta_corr_param, "error_corr_12") mcmc_list <- coda::as.mcmc.list(full_out$for_conv) for(i in 1:3){colnames(mcmc_list[[i]]) <- param_names} gelman_msrf <- coda::gelman.diag(mcmc_list[,1:47]) # individual parameter psrf, and multivariate psrf # beta_corr_param and error_corr_12 are redundant parameters, thus they were removed when assessing convergence parameter_psrf <- data.frame(point_est = gelman_msrf$psrf[,1], upper_ci = gelman_msrf$psrf[,2]) multivariate_psrf <- gelman_msrf$mpsrf mean_psrf <- mean(parameter_psrf$point_est) # mean psrf across parameters convergence <- list(parameter_psrf=parameter_psrf, multivariate_psrf=multivariate_psrf, mean_psrf=mean_psrf) ## Model Fit deviance <- mean(full_out$deviance) pD <- mean(full_out$pD) dic <- deviance + pD model_fit <- list(deviance=deviance, pD=pD, dic=dic) ## Fitted Values y_mean <- summary(full_out$mu_y, FUN='mean')[[1]] ## Parameter Estimates sum_mcmc <- summary(mcmc_list) # parameter estimates ## Parameter Estimates sum_mcmc <- summary(mcmc_list) # parameter estimates param_est <- data.frame(Mean = sum_mcmc$statistics[,1], CI_Lower = sum_mcmc$quantiles[,1], CI_Upper = sum_mcmc$quantiles[,5]) ## Stop tracking run time run_time_total_end <- Sys.time() run_time_total <- run_time_total_end - run_time_total_start my_results <- list('Convergence' = convergence, 'Model_Fit' = model_fit, 'Fitted_Values'=y_mean, 'Parameter_Estimates'=param_est, 'Run_Time'=format(run_time_total)) if(save_full_chains==TRUE){my_results$Full_MCMC_Chains=full_out} if(save_conv_chains==TRUE){my_results$Convergence_MCMC_Chains=mcmc_list[,1:47]} class(my_results) <- 'BPREM' return(my_results) } # Model ---- ## Bivariate Piecewise ---- bivariate_pw <- "model{ for(i in 1:n_subj){ for(j in 1:n_time){ y[i,j,1:2] ~ dmnorm(mu_y[i,j,1:2], tau_error[1:2,1:2]) mu_y[i,j,1] <- beta[i,1] + beta[i,2]*x[i,j] + beta[i,3]*(max(0, x[i,j]-beta[i,4])) mu_y[i,j,2] <- beta[i,5] + beta[i,6]*x[i,j] + beta[i,7]*(max(0, x[i,j]-beta[i,8])) } } ## Level 1 precision and error variance tau_error[1:2,1:2] <- inverse(sigma2_e[,]) sigma2_e[1,1] <- sigma2_e1 sigma2_e[2,2] <- sigma2_e2 sigma2_e[2,1] <- rho*sqrt(sigma2_e1)*sqrt(sigma2_e2) sigma2_e[1,2] <- sigma2_e[2,1] sigma2_e1 ~ dunif(0,bound_e1) sigma2_e2 ~ dunif(0,bound_e2) rho ~ dunif(-1,1) ## Distribution of random-effects and error variance for (i in 1:n_subj){ for(k in 1:8){ beta[i,k] <- beta_mean[k] + beta_rand[i,k] beta_rand[i,k] <- scale_c[k]*beta_raw[i,k] } beta_raw[i,1:8] ~ dmnorm(beta_mean_zero[1:8], tau_b_raw[1:8,1:8]) } ## Priors for fixed-effects beta_mean[1] ~ dnorm(0, 0.0001) beta_mean[2] ~ dnorm(0, 0.0001) beta_mean[3] ~ dnorm(0, 0.0001) beta_mean[4] ~ dnorm(mean_cp, prec_cp)T(min_time,max_time) beta_mean[5] ~ dnorm(0, 0.0001) beta_mean[6] ~ dnorm(0, 0.0001) beta_mean[7] ~ dnorm(0, 0.0001) beta_mean[8] ~ dnorm(mean_cp, prec_cp)T(min_time,max_time) ## Priors for scaling constants for(k in 1:8){ scale_c[k] ~ dunif(0, 100) } ## Prior for covariance matrix of random-effects # Inverse Wishart prior for the raw covariance matrix tau_b_raw[1:8,1:8] ~ dwish(omega_b[1:8,1:8], 9) sigma2_b_raw[1:8,1:8] <- inverse(tau_b_raw[,]) ## Define elements of correlation and covariance matrices to recover for(k in 1:8){ for(k_prime in 1:8){ rho_b[k,k_prime] <- sigma2_b_raw[k,k_prime]/sqrt(sigma2_b_raw[k,k]*sigma2_b_raw[k_prime,k_prime]) cov_b_mat[k,k_prime] <- scale_c[k]*scale_c[k_prime]*sigma2_b_raw[k,k_prime] } } # Variances: for(k in 1:8){ var_b[k] <- cov_b_mat[k,k] } # Covariances cov_b[1] <- cov_b_mat[2,1] cov_b[2] <- cov_b_mat[3,1] cov_b[3] <- cov_b_mat[3,2] cov_b[4] <- cov_b_mat[4,1] cov_b[5] <- cov_b_mat[4,2] cov_b[6] <- cov_b_mat[4,3] cov_b[7] <- cov_b_mat[5,1] cov_b[8] <- cov_b_mat[5,2] cov_b[9] <- cov_b_mat[5,3] cov_b[10] <- cov_b_mat[5,4] cov_b[11] <- cov_b_mat[6,1] cov_b[12] <- cov_b_mat[6,2] cov_b[13] <- cov_b_mat[6,3] cov_b[14] <- cov_b_mat[6,4] cov_b[15] <- cov_b_mat[6,5] cov_b[16] <- cov_b_mat[7,1] cov_b[17] <- cov_b_mat[7,2] cov_b[18] <- cov_b_mat[7,3] cov_b[19] <- cov_b_mat[7,4] cov_b[20] <- cov_b_mat[7,5] cov_b[21] <- cov_b_mat[7,6] cov_b[22] <- cov_b_mat[8,1] cov_b[23] <- cov_b_mat[8,2] cov_b[24] <- cov_b_mat[8,3] cov_b[25] <- cov_b_mat[8,4] cov_b[26] <- cov_b_mat[8,5] cov_b[27] <- cov_b_mat[8,6] cov_b[28] <- cov_b_mat[8,7] # Correlations cor_b[1] <- rho_b[2,1] cor_b[2] <- rho_b[3,1] cor_b[3] <- rho_b[3,2] cor_b[4] <- rho_b[4,1] cor_b[5] <- rho_b[4,2] cor_b[6] <- rho_b[4,3] cor_b[7] <- rho_b[5,1] cor_b[8] <- rho_b[5,2] cor_b[9] <- rho_b[5,3] cor_b[10] <- rho_b[5,4] cor_b[11] <- rho_b[6,1] cor_b[12] <- rho_b[6,2] cor_b[13] <- rho_b[6,3] cor_b[14] <- rho_b[6,4] cor_b[15] <- rho_b[6,5] cor_b[16] <- rho_b[7,1] cor_b[17] <- rho_b[7,2] cor_b[18] <- rho_b[7,3] cor_b[19] <- rho_b[7,4] cor_b[20] <- rho_b[7,5] cor_b[21] <- rho_b[7,6] cor_b[22] <- rho_b[8,1] cor_b[23] <- rho_b[8,2] cor_b[24] <- rho_b[8,3] cor_b[25] <- rho_b[8,4] cor_b[26] <- rho_b[8,5] cor_b[27] <- rho_b[8,6] cor_b[28] <- rho_b[8,7] # Define elements of error covariance (level 1) to recover sigma2_error[1] <- sigma2_e[1,1] sigma2_error[2] <- sigma2_e[2,2] sigma2_error[3] <- sigma2_e[2,1] # Collect important parameters to assess convergence for_conv[1:8] <- beta_mean for_conv[9:16] <- var_b for_conv[17:44] <- cov_b for_conv[45:47] <- sigma2_error for_conv[48:75] <- cor_b for_conv[76] <- rho }"
/scratch/gouwar.j/cran-all/cranData/BEND/R/Bayes_BPREM.R
#' Bayesian Crossed Random Effects Model (CREM) #' #' @description #' Estimates a Bayesian crossed random effects models (CREM) for longitudinal data with dynamic group membership. Four different choices for functional forms are provided: linear, quadratic, exponential, and piecewise. See Rohloff et al. (2024) for more details. #' #' @param data Data frame in long format, where each row describes a measurement occasion for a given individual. It is assumed that each individual has the same number of assigned timepoints (a.k.a., rows). There can be missingness in the outcome (`y_var`), but there cannot be missingness in time (`time_var`). #' @param ind_id_var Name of column that contains ids for individuals with repeated measures in a longitudinal dataset (e.g., students). #' @param cross_id_var Name of column that contains ids for the crossed factor (e.g., teachers). #' @param time_var Name of column that contains the time variable. This column cannot contain any missing values. #' @param y_var Name of column that contains the outcome variable. Missing values should be denoted by NA. #' @param form Name of the functional form. Options include: ‘linear’ (default), ‘quadratic’, ‘exponential’, ‘piecewise’. #' @param fixed_effects (optional) Starting values for the fixed effects parameters. #' @param iters_adapt (optional) Number of iterations for adaptation of jags model (default = 5000). #' @param iters_burn_in (optional) Number of iterations for burn-in (default = 50000). #' @param iters_sampling (optional) Number of iterations for posterior sampling (default = 50000). #' @param thin (optional) Thinning interval for posterior sampling (default = 15). #' @param save_full_chains Logical indicating whether the MCMC chains from rjags should be saved (default = FALSE). Note, this should not be used regularly as it will result in an object with a large file size. #' @param save_conv_chains Logical indicating whether the MCMC chains from rjags should be saved but only for the parameters monitored for convergence (default = FALSE). This would be useful for plotting traceplots for relevant model parameters to evaluate convergence behavior. Note, this should not be used regularly as it will result in an object with a large file size. #' @param verbose Logical controlling whether progress messages/bars are generated (default = TRUE). #' #' @returns A list (an object of class `CREM`) with elements: #' \item{Convergence}{Potential scale reduction factor (PSRF) for each parameter (`parameter_psrf`), Gelman multivariate scale reduction factor (`multivariate_psrf`), and mean PSRF (`mean_psrf`) to assess model convergence.} #' \item{Model_Fit}{Deviance (`deviance`), effective number of parameters (`pD`), and Deviance information criterion (`dic`) to assess model fit.} #' \item{Fitted_Values}{Vector giving the fitted value at each timepoint for each individual (same length as long data).} #' \item{Functional_Form}{Functional form fitted.} #' \item{Parameter_Estimates}{Data frame with posterior mean and 95% credible intervals for each model parameter.} #' \item{Run_Time}{Total run time for model fitting.} #' \item{Full_MCMC_Chains}{If save_full_chains=TRUE, raw MCMC chains from rjags.} #' \item{Convergence_MCMC_Chains}{If save_conv_chains=TRUE, raw MCMC chains from rjags but only for the parameters monitored for convergence.} #' #' @details #' For more information on the model equation and priors implemented in this function, see Rohloff et al. (2024). #' #' Note, this function differs from the above reference by estimating the covariances between the random effects parameters. The variance-covariance matrices of the individual and group random effects have a scaled inverse-Wishart prior (see Peralta et al., 2022). #' #' @author Corissa T. Rohloff #' #' @references #' Peralta, Y., Kohli, N., Lock, E. F., & Davison, M. L. (2022). Bayesian modeling of associations in bivariate piecewise linear mixed-effects models. Psychological Methods, 27(1), 44–64. https://doi.org/10.1037/met0000358 #' #' Rohloff, C. T., Kohli, N., & Lock, E. F. (2024). Identifiability and estimability of Bayesian linear and nonlinear crossed random effects models. British Journal of Mathematical and Statistical Psychology. https://doi.org/10.1111/bmsp.12334 #' #' @examples #' \donttest{ #' # load simulated data #' data(SimData_PCREM) #' # plot observed data #' plot_BEND(data = SimData_PCREM, #' id_var = "id", #' time_var = "time", #' y_var = "y") #' # fit Bayes_CREM() #' results_pcrem <- Bayes_CREM(data = SimData_PCREM, #' ind_id_var = "id", #' cross_id_var = "teacherid", #' time_var = "time", #' y_var = "y", #' form="piecewise") #' # result summary #' summary(results_pcrem) #' # plot fitted results #' plot_BEND(data = SimData_PCREM, #' id_var = "id", #' time_var = "time", #' y_var = "y", #' results = results_pcrem) #' } #' #' @import stats #' #' @export Bayes_CREM <- function(data, ind_id_var, cross_id_var, time_var, y_var, form="linear", fixed_effects=NULL, iters_adapt=5000, iters_burn_in=50000, iters_sampling=50000, thin=15, save_full_chains=FALSE, save_conv_chains=FALSE, verbose=TRUE){ # START OF SETUP ---- ## Initial data check data <- as.data.frame(data) ## Control progress messages/bars if(verbose) progress_bar = "text" if(!verbose) progress_bar = "none" ## Start tracking run time run_time_total_start <- Sys.time() ## Load module to compute DIC suppressMessages(rjags::load.module('dic')) ## Set number of chains - will use 3 chains across all models n_chains <- 3 # Pull data for modeling ---- y <- data[,paste0(y_var)] # vector of all outcome observations t <- data[,paste0(time_var)] # vector of all timepoints observations ind_id <- as.numeric(factor(data[,paste0(ind_id_var)])) # vector of individual ids cross_id <- as.numeric(factor(data[,paste0(cross_id_var)])) # vector of crossed factor ids # Define relevant variables ---- n_subj <- length(unique(data[,paste0(ind_id_var)])) # number of individuals (e.g., students) n_group <- length(unique(data[,paste0(cross_id_var)])) # number of groups in the crossed factor (e.g., teachers) n_obs <- length(y) # number of total observations # For piecewise model min_cp <- unique(sort(t))[2] max_cp <- unique(sort(t, decreasing = TRUE))[2] # Error messages for input ---- ## data format warnings if(sum(length(y)==length(t))<1) stop('Columns for y_var and t_var must have the same dimensions') if(sum(is.na(t))>0) stop('Columns for t_var cannot have NA values (but y_var can)') if(min(t)!=0) warning('Prior assumes first time point measured at t=0') ## model specification warnings if(form!="linear"&&form!="quadratic"&&form!="exponential"&&form!="piecewise") stop("Specified functional form must be \"linear\", \"quadratic\", \"exponential\", or \"piecewise\"") # FULL MODEL ---- ## Specify full JAGS model ## Linear if(form=="linear") n_beta <- 2 if(form=="linear") full_spec <- textConnection(ln_crem) ## Quadratic if(form=="quadratic") n_beta <- 3 if(form=="quadratic") full_spec <- textConnection(qd_crem) ## Exponential if(form=="exponential") n_beta <- 3 if(form=="exponential") full_spec <- textConnection(ex_crem) ## Piecewise if(form=="piecewise") n_beta <- 4 if(form=="piecewise") full_spec <- textConnection(pw_crem) ## Scale matrix for the Wishart distribution omega_b <- diag(n_beta) ## Variables to extract from full model param_recovery_full <- c('beta_ir', 'beta_mean', 'cov_b', 'var_b', 'cov_g', 'var_g', 'mu_y', 'sigma2_error', 'for_conv', 'pD', 'deviance') ## Create data list for JAGS model data_list <- list("t" = t, "y" = y, "n_obs" = n_obs, "n_subj" = n_subj, 'n_group' = n_group, "ind_id" = ind_id, 'cross_id' = cross_id, "n_beta" = n_beta, "omega_b" = omega_b) if(form=="piecewise") data_list$min_cp <- min_cp if(form=="piecewise") data_list$max_cp <- max_cp ## Compile Info for Initial Values initial_vals <- vector('list', n_chains) if(!is.null(fixed_effects)){ for(i in 1:n_chains){initial_vals[[i]]$beta_mean <- fixed_effects} } if(verbose) cat("Calibrating MCMC...\n") if(!is.null(fixed_effects)) full_model <- rjags::jags.model(full_spec, data = data_list, inits = initial_vals, n.chains = n_chains, n.adapt = iters_adapt, quiet = TRUE) if(is.null(fixed_effects)) full_model <- rjags::jags.model(full_spec, data = data_list, n.chains = n_chains, n.adapt = iters_adapt, quiet = TRUE) # burn-in if(verbose) cat("Burn in of jags model...\n") update(full_model, iters_burn_in, progress.bar=progress_bar) # sampling if(verbose) cat("Collecting samples...\n") full_out <- rjags::jags.samples(full_model, variable.names = param_recovery_full, n.iter = iters_sampling, thin = thin, progress.bar=progress_bar) # Compiling Full Results ----- if(form=="linear") param_names <- c("beta_0_mean", "beta_1_mean", "var_b_0", "var_b_1", "cov_b_01", "var_g_0", "var_g_1", "cov_g_01", "error_var") if(form=="quadratic" | form=="exponential") param_names <- c("beta_0_mean", "beta_1_mean", "beta_2_mean", "var_b_0", "var_b_1", "var_b_2", "cov_b_01", "cov_b_02", "cov_b_12", "var_g_0", "var_g_1", "var_g_2", "cov_g_01", "cov_g_02", "cov_g_12", "error_var") if(form=="piecewise") param_names <- c("beta_0_mean", "beta_1_mean", "beta_2_mean", "beta_cp_mean", "var_b_0", "var_b_1", "var_b_2", "var_b_cp", "cov_b_01", "cov_b_02", "cov_b_12", "cov_b_0cp", "cov_b_1cp", "cov_b_2cp", "var_g_0", "var_g_1", "var_g_2", "var_g_cp", "cov_g_01", "cov_g_02", "cov_g_12", "cov_g_0cp", "cov_g_1cp", "cov_g_2cp", "error_var") ## Convergence mcmc_list <- coda::as.mcmc.list(full_out$for_conv) for(i in 1:3){colnames(mcmc_list[[i]]) <- param_names} gelman_msrf <- coda::gelman.diag(mcmc_list) # individual parameter psrf, and multivariate psrf parameter_psrf <- data.frame(point_est = gelman_msrf$psrf[,1], upper_ci = gelman_msrf$psrf[,2]) multivariate_psrf <- gelman_msrf$mpsrf mean_psrf <- mean(parameter_psrf$point_est) # mean psrf across parameters convergence <- list(parameter_psrf=parameter_psrf, multivariate_psrf=multivariate_psrf, mean_psrf=mean_psrf) ## Model Fit deviance <- mean(full_out$deviance) pD <- mean(full_out$pD) dic <- deviance + pD model_fit <- list(deviance=deviance, pD=pD, dic=dic) ## Fitted Values y_mean <- summary(full_out$mu_y, FUN='mean')[[1]] ## Parameter Estimates sum_mcmc <- summary(mcmc_list) # parameter estimates param_est <- data.frame(Mean = sum_mcmc$statistics[,1], CI_Lower = sum_mcmc$quantiles[,1], CI_Upper = sum_mcmc$quantiles[,5]) ## Stop tracking run time run_time_total_end <- Sys.time() run_time_total <- run_time_total_end - run_time_total_start my_results <- list('Convergence' = convergence, 'Model_Fit' = model_fit, 'Fitted_Values'=y_mean, 'Functional_Form'=form, 'Parameter_Estimates'=param_est, 'Run_Time'=format(run_time_total)) if(save_full_chains==TRUE){my_results$Full_MCMC_Chains=full_out} if(save_conv_chains==TRUE){my_results$Convergence_MCMC_Chains=mcmc_list} class(my_results) <- 'CREM' return(my_results) } # Models ---- ## Linear ---- ln_crem <- "model{ ##### Level-1 Model ##### for(j in 1:n_obs){ y[j] ~ dnorm(mu_y[j], tau_y) mu_y[j] <- beta_ir[j,1] + beta_ir[j,2]*t[j] beta_ir[j,1] <- beta_mean[1] + b0[ind_id[j],1] + g0[cross_id[j],1] beta_ir[j,2] <- beta_mean[2] + b0[ind_id[j],2] + g0[cross_id[j],2] } ##### Loop over Individuals ##### for(i in 1:n_subj){ for(p in 1:n_beta){ # scale variances b0[i,p] <- b_scale_c[p]*b0_raw[i,p] } # generate raw variance-covariance matrix b0_raw[i,1:n_beta] ~ dmnorm(rep(0,n_beta), tau_b_raw[1:n_beta,1:n_beta]) } ##### Loop over Crossed Factor ##### for(r in 1:n_group){ for(p in 1:n_beta){ # scale variances g0[r,p] <- g_scale_c[p]*g0_raw[r,p] } # generate raw variance-covariance matrix g0_raw[r,1:n_beta] ~ dmnorm(rep(0,n_beta), tau_g_raw[1:n_beta,1:n_beta]) } ##### Priors ##### ## Priors for Fixed Effects ## for(p in 1:n_beta){ beta_mean[p] ~ dnorm(0, 0.00001) } ## Priors for Residual Variance Components ## tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y ## Priors for Individual Random Effects ## tau_b_raw[1:n_beta,1:n_beta] ~ dwish(omega_b[1:n_beta,1:n_beta], n_beta+1) sigma2_b_raw[1:n_beta,1:n_beta] <- inverse(tau_b_raw[,]) ## Priors for Crossed Random Effects ## tau_g_raw[1:n_beta,1:n_beta] ~ dwish(omega_b[1:n_beta,1:n_beta], n_beta+1) sigma2_g_raw[1:n_beta,1:n_beta] <- inverse(tau_g_raw[,]) ## Priors for Scaling Constants ## for(p in 1:n_beta){ # For Individual Effects # b_scale_c[p] ~ dunif(0, 100) # For Crossed Effects # g_scale_c[p] ~ dunif(0, 100) } ##### Variance-Covariance Matrix Elements ##### ## Individual Effects ## # Scale Covariance Matrices # for(p in 1:n_beta){ for(p_prime in 1:n_beta){ cov_b_mat[p,p_prime] <- b_scale_c[p]*b_scale_c[p_prime]*sigma2_b_raw[p,p_prime] } } # Variances # for(p in 1:n_beta){ var_b[p] <- cov_b_mat[p,p] } # Covariances # cov_b[1] <- cov_b_mat[2,1] # 0,1 ## Crossed Effects ## # Scale Covariance Matrices # for(p in 1:n_beta){ for(p_prime in 1:n_beta){ cov_g_mat[p,p_prime] <- g_scale_c[p]*g_scale_c[p_prime]*sigma2_g_raw[p,p_prime] } } # Variances # for(p in 1:n_beta){ var_g[p] <- cov_g_mat[p,p] } # Covariances # cov_g[1] <- cov_g_mat[2,1] # 0,1 ##### Collect important parameters to assess convergence ##### for_conv[1:2] <- beta_mean for_conv[3:4] <- var_b for_conv[5] <- cov_b for_conv[6:7] <- var_g for_conv[8] <- cov_g for_conv[9] <- sigma2_error } " ## Quadratic ---- qd_crem <- "model{ ##### Level-1 Model ##### for(j in 1:n_obs){ y[j] ~ dnorm(mu_y[j], tau_y) mu_y[j] <- beta_ir[j,1] + beta_ir[j,2]*t[j] + beta_ir[j,3]*(t[j]^2) beta_ir[j,1] <- beta_mean[1] + b0[ind_id[j],1] + g0[cross_id[j],1] beta_ir[j,2] <- beta_mean[2] + b0[ind_id[j],2] + g0[cross_id[j],2] beta_ir[j,3] <- beta_mean[3] + b0[ind_id[j],3] + g0[cross_id[j],3] } ##### Loop over Individuals ##### for(i in 1:n_subj){ for(p in 1:n_beta){ # scale variances b0[i,p] <- b_scale_c[p]*b0_raw[i,p] } # generate raw variance-covariance matrix b0_raw[i,1:n_beta] ~ dmnorm(rep(0,n_beta), tau_b_raw[1:n_beta,1:n_beta]) } ##### Loop over Crossed Factor ##### for(r in 1:n_group){ for(p in 1:n_beta){ # scale variances g0[r,p] <- g_scale_c[p]*g0_raw[r,p] } # generate raw variance-covariance matrix g0_raw[r,1:n_beta] ~ dmnorm(rep(0,n_beta), tau_g_raw[1:n_beta,1:n_beta]) } ##### Priors ##### ## Priors for Fixed Effects ## for(p in 1:n_beta){ beta_mean[p] ~ dnorm(0, 0.00001) } ## Priors for Residual Variance Components ## tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y ## Priors for Individual Random Effects ## tau_b_raw[1:n_beta,1:n_beta] ~ dwish(omega_b[1:n_beta,1:n_beta], n_beta+1) sigma2_b_raw[1:n_beta,1:n_beta] <- inverse(tau_b_raw[,]) ## Priors for Crossed Random Effects ## tau_g_raw[1:n_beta,1:n_beta] ~ dwish(omega_b[1:n_beta,1:n_beta], n_beta+1) sigma2_g_raw[1:n_beta,1:n_beta] <- inverse(tau_g_raw[,]) ## Priors for Scaling Constants ## for(p in 1:n_beta){ # For Individual Effects # b_scale_c[p] ~ dunif(0, 100) # For Crossed Effects # g_scale_c[p] ~ dunif(0, 100) } ##### Variance-Covariance Matrix Elements ##### ## Individual Effects ## # Scale Covariance Matrices # for(p in 1:n_beta){ for(p_prime in 1:n_beta){ cov_b_mat[p,p_prime] <- b_scale_c[p]*b_scale_c[p_prime]*sigma2_b_raw[p,p_prime] } } # Variances # for(p in 1:n_beta){ var_b[p] <- cov_b_mat[p,p] } # Covariances # cov_b[1] <- cov_b_mat[2,1] # 0,1 cov_b[2] <- cov_b_mat[3,1] # 0,2 cov_b[3] <- cov_b_mat[3,2] # 1,2 ## Crossed Effects ## # Scale Covariance Matrices # for(p in 1:n_beta){ for(p_prime in 1:n_beta){ cov_g_mat[p,p_prime] <- g_scale_c[p]*g_scale_c[p_prime]*sigma2_g_raw[p,p_prime] } } # Variances # for(p in 1:n_beta){ var_g[p] <- cov_g_mat[p,p] } # Covariances # cov_g[1] <- cov_g_mat[2,1] # 0,1 cov_g[2] <- cov_g_mat[3,1] # 0,2 cov_g[3] <- cov_g_mat[3,2] # 1,2 ##### Collect important parameters to assess convergence ##### for_conv[1:3] <- beta_mean for_conv[4:6] <- var_b for_conv[7:9] <- cov_b for_conv[10:12] <- var_g for_conv[13:15] <- cov_g for_conv[16] <- sigma2_error } " ## Exponential ---- ex_crem <- "model{ ##### Level-1 Model ##### for(j in 1:n_obs){ y[j] ~ dnorm(mu_y[j], tau_y) mu_y[j] <- beta_ir[j,1] + beta_ir[j,2]*(1-exp(-beta_ir[j,3]*t[j])) beta_ir[j,1] <- beta_mean[1] + b0[ind_id[j],1] + g0[cross_id[j],1] beta_ir[j,2] <- beta_mean[2] + b0[ind_id[j],2] + g0[cross_id[j],2] beta_ir[j,3] <- beta_mean[3] + b0[ind_id[j],3] + g0[cross_id[j],3] } ##### Loop over Individuals ##### for(i in 1:n_subj){ for(p in 1:n_beta){ # scale variances b0[i,p] <- b_scale_c[p]*b0_raw[i,p] } # generate raw variance-covariance matrix b0_raw[i,1:n_beta] ~ dmnorm(rep(0,n_beta), tau_b_raw[1:n_beta,1:n_beta]) } ##### Loop over Crossed Factor ##### for(r in 1:n_group){ for(p in 1:n_beta){ # scale variances g0[r,p] <- g_scale_c[p]*g0_raw[r,p] } # generate raw variance-covariance matrix g0_raw[r,1:n_beta] ~ dmnorm(rep(0,n_beta), tau_g_raw[1:n_beta,1:n_beta]) } ##### Priors ##### ## Priors for Fixed Effects ## for(p in 1:n_beta){ beta_mean[p] ~ dnorm(0, 0.00001) } ## Priors for Residual Variance Components ## tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y ## Priors for Individual Random Effects ## tau_b_raw[1:n_beta,1:n_beta] ~ dwish(omega_b[1:n_beta,1:n_beta], n_beta+1) sigma2_b_raw[1:n_beta,1:n_beta] <- inverse(tau_b_raw[,]) ## Priors for Crossed Random Effects ## tau_g_raw[1:n_beta,1:n_beta] ~ dwish(omega_b[1:n_beta,1:n_beta], n_beta+1) sigma2_g_raw[1:n_beta,1:n_beta] <- inverse(tau_g_raw[,]) ## Priors for Scaling Constants ## for(p in 1:n_beta){ # For Individual Effects # b_scale_c[p] ~ dunif(0, 100) # For Crossed Effects # g_scale_c[p] ~ dunif(0, 100) } ##### Variance-Covariance Matrix Elements ##### ## Individual Effects ## # Scale Covariance Matrices # for(p in 1:n_beta){ for(p_prime in 1:n_beta){ cov_b_mat[p,p_prime] <- b_scale_c[p]*b_scale_c[p_prime]*sigma2_b_raw[p,p_prime] } } # Variances # for(p in 1:n_beta){ var_b[p] <- cov_b_mat[p,p] } # Covariances # cov_b[1] <- cov_b_mat[2,1] # 0,1 cov_b[2] <- cov_b_mat[3,1] # 0,2 cov_b[3] <- cov_b_mat[3,2] # 1,2 ## Crossed Effects ## # Scale Covariance Matrices # for(p in 1:n_beta){ for(p_prime in 1:n_beta){ cov_g_mat[p,p_prime] <- g_scale_c[p]*g_scale_c[p_prime]*sigma2_g_raw[p,p_prime] } } # Variances # for(p in 1:n_beta){ var_g[p] <- cov_g_mat[p,p] } # Covariances # cov_g[1] <- cov_g_mat[2,1] # 0,1 cov_g[2] <- cov_g_mat[3,1] # 0,2 cov_g[3] <- cov_g_mat[3,2] # 1,2 ##### Collect important parameters to assess convergence ##### for_conv[1:3] <- beta_mean for_conv[4:6] <- var_b for_conv[7:9] <- cov_b for_conv[10:12] <- var_g for_conv[13:15] <- cov_g for_conv[16] <- sigma2_error } " ## Piecewise ---- pw_crem <- "model{ ##### Level-1 Model ##### for(j in 1:n_obs){ y[j] ~ dnorm(mu_y[j], tau_y) mu_y[j] <- beta_ir[j,1] + beta_ir[j,2]*t[j] + beta_ir[j,3]*(max(0, t[j]-beta_ir[j,4])) beta_ir[j,1] <- beta_mean[1] + b0[ind_id[j],1] + g0[cross_id[j],1] beta_ir[j,2] <- beta_mean[2] + b0[ind_id[j],2] + g0[cross_id[j],2] beta_ir[j,3] <- beta_mean[3] + b0[ind_id[j],3] + g0[cross_id[j],3] beta_ir[j,4] <- beta_mean[4] + b0[ind_id[j],4] + g0[cross_id[j],4] } ##### Loop over Individuals ##### for(i in 1:n_subj){ for(p in 1:n_beta){ # scale variances b0[i,p] <- b_scale_c[p]*b0_raw[i,p] } # generate raw variance-covariance matrix b0_raw[i,1:n_beta] ~ dmnorm(rep(0,n_beta), tau_b_raw[1:n_beta,1:n_beta]) } ##### Loop over Crossed Factor ##### for(r in 1:n_group){ for(p in 1:n_beta){ # scale variances g0[r,p] <- g_scale_c[p]*g0_raw[r,p] } # generate raw variance-covariance matrix g0_raw[r,1:n_beta] ~ dmnorm(rep(0,n_beta), tau_g_raw[1:n_beta,1:n_beta]) } ##### Priors ##### ## Priors for Fixed Effects ## for(p in 1:(n_beta-1)){ beta_mean[p] ~ dnorm(0, 0.00001) } beta_mean[n_beta] ~ dunif(min_cp,max_cp) ## Priors for Residual Variance Components ## tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y ## Priors for Individual Random Effects ## tau_b_raw[1:n_beta,1:n_beta] ~ dwish(omega_b[1:n_beta,1:n_beta], n_beta+1) sigma2_b_raw[1:n_beta,1:n_beta] <- inverse(tau_b_raw[,]) ## Priors for Crossed Random Effects ## tau_g_raw[1:n_beta,1:n_beta] ~ dwish(omega_b[1:n_beta,1:n_beta], n_beta+1) sigma2_g_raw[1:n_beta,1:n_beta] <- inverse(tau_g_raw[,]) ## Priors for Scaling Constants ## for(p in 1:n_beta){ # For Individual Effects # b_scale_c[p] ~ dunif(0, 100) # For Crossed Effects # g_scale_c[p] ~ dunif(0, 100) } ##### Variance-Covariance Matrix Elements ##### ## Individual Effects ## # Scale Covariance Matrices # for(p in 1:n_beta){ for(p_prime in 1:n_beta){ cov_b_mat[p,p_prime] <- b_scale_c[p]*b_scale_c[p_prime]*sigma2_b_raw[p,p_prime] } } # Variances # for(p in 1:n_beta){ var_b[p] <- cov_b_mat[p,p] } # Covariances # cov_b[1] <- cov_b_mat[2,1] # 0,1 cov_b[2] <- cov_b_mat[3,1] # 0,2 cov_b[3] <- cov_b_mat[3,2] # 1,2 cov_b[4] <- cov_b_mat[4,1] # 0,3 cov_b[5] <- cov_b_mat[4,2] # 1,3 cov_b[6] <- cov_b_mat[4,3] # 2,3 ## Crossed Effects ## # Scale Covariance Matrices # for(p in 1:n_beta){ for(p_prime in 1:n_beta){ cov_g_mat[p,p_prime] <- g_scale_c[p]*g_scale_c[p_prime]*sigma2_g_raw[p,p_prime] } } # Variances # for(p in 1:n_beta){ var_g[p] <- cov_g_mat[p,p] } # Covariances # cov_g[1] <- cov_g_mat[2,1] # 0,1 cov_g[2] <- cov_g_mat[3,1] # 0,2 cov_g[3] <- cov_g_mat[3,2] # 1,2 cov_g[4] <- cov_g_mat[4,1] # 0,3 cov_g[5] <- cov_g_mat[4,2] # 1,3 cov_g[6] <- cov_g_mat[4,3] # 2,3 ##### Collect important parameters to assess convergence ##### for_conv[1:4] <- beta_mean for_conv[5:8] <- var_b for_conv[9:14] <- cov_b for_conv[15:18] <- var_g for_conv[19:24] <- cov_g for_conv[25] <- sigma2_error } "
/scratch/gouwar.j/cran-all/cranData/BEND/R/Bayes_CREM.R
#' Bayesian Piecewise Random Effects Model (PREM) + Extensions #' #' @description Estimates a Bayesian piecewise random effects model (PREM), with some useful extensions. There are three model options included in this function: #' * `PREM` estimates a Bayesian piecewise random effects model with a latent number of changepoints (default). Allows the inclusion of outcome-predictive covariates (`CI-PREM`). #' * `PREMM` estimates a piecewise random effects mixture model for a given number of latent classes and a latent number of possible changepoints in each class. #' * `CI-PREMM` estimates a covariate influenced piecewise random effects mixture model for a given number of latent classes and a latent number of possible changepoints in each class. Allows the inclusion of outcome- and/or class-predictive covariates. #' See Lock et al. (2018) and Lamm (2022) for more details. #' #' @param data Data frame in long format, where each row describes a measurement occasion for a given individual. It is assumed that each individual has the same number of assigned timepoints (a.k.a., rows). There can be missingness in the outcome (`y_var`), but there cannot be missingness in time (`time_var`). #' @param id_var Name of column that contains ids for individuals with repeated measures in a longitudinal dataset. #' @param time_var Name of column that contains the time variable. This column cannot contain any missing values. #' @param y_var Name of column that contains the outcome variable. Missing values should be denoted by NA. #' @param n_class Number of latent classes (default = 1). Note, CI-PREMM only allows for two classes. #' @param max_cp Maximum number of changepoints in each latent class (default = 2). #' @param class_predictive_vars Name(s) of column(s) that contain class-predictive covariates (time-invariant only). Give a vector of names if multiple covariates. Note, there cannot be any missingness in the covariates. #' @param outcome_predictive_vars Name(s) of column(s) that contain outcome-predictive covariates (time-varying or -invariant). Give a vector of names if multiple covariates. Note, there cannot be any missingness in the covariates. #' @param scale_prior Prior for the scale parameter for the hierarchical random effects. Options include: ‘uniform’ (scaled uniform prior; default) or ‘hc’ (scaled half-cauchy prior). #' @param alpha Concentration parameter for Dirichlet prior for latent classes (default = 1). This can be a vector of values corresponding to the number of classes (specified by n_class). Note, this is not used for CI-PGMM. #' @param cp_prior Prior for the number of changepoints in each class. Options include: 'binomial' (default) or 'uniform'. #' @param binom_prob Probability for binomial prior, if specified (default = 0.5). #' @param iters_adapt (optional) Number of iterations for adaptation of jags model (default = 1000). #' @param iters_burn_in (optional) Number of iterations for burn-in (default = 20000). #' @param iters_sampling (optional) Number of iterations for posterior sampling (default = 30000). #' @param thin (optional) Thinning interval for posterior sampling (default = 15). #' @param save_full_chains Logical indicating whether the MCMC chains from rjags should be saved (default = FALSE). Note, this should not be used regularly as it will result in an object with a large file size. #' @param save_conv_chains Logical indicating whether the MCMC chains from rjags should be saved but only for the parameters monitored for convergence (default = FALSE). This would be useful for plotting traceplots for relevant model parameters to evaluate convergence behavior. Note, this should not be used regularly as it will result in an object with a large file size. #' @param verbose Logical controlling whether progress messages/bars are generated (default = TRUE). #' #' @returns A list (an object of class `PREM`) with elements: #' \item{Convergence}{Potential scale reduction factor (PSRF) for each parameter (`parameter_psrf`), Gelman multivariate scale reduction factor (`multivariate_psrf`), and mean PSRF (`mean_psrf`) to assess model convergence.} #' \item{Model_Fit}{Deviance (`deviance`), effective number of parameters (`pD`), and Deviance information criterion (`dic`) to assess model fit.} #' \item{Fitted_Values}{Vector giving the fitted value at each timepoint for each individual (same length as long data).} #' \item{Parameter_Estimates}{Data frame with posterior mean and 95% credible intervals for each model parameter.} #' \item{Run_Time}{Total run time for model fitting.} #' \item{Full_MCMC_Chains}{If save_full_chains=TRUE, raw MCMC chains from rjags.} #' \item{Convergence_MCMC_Chains}{If save_conv_chains=TRUE, raw MCMC chains from rjags but only for the parameters monitored for convergence.} #' `Class_Information` contains a list with elements: #' \item{class_membership}{Vector of length n with class membership assignments for each individual.} #' \item{individ_class_probability}{nxC matrix with each individual’s probabilities of belonging to each class conditional on their class-predictive covariates (when applicable) and growth curve.} #' \item{unconditional_class_probability}{This output will differ based on which model was fit. For a PREM or CI-PREM, this will equal 1 as there is only one class. For a PREMM or CI-PREMM with only outcome-predictive covariates, this will be a vector of length C denoting the population probability of belonging to each class. For a CI-PREMM with class-predictive covariates, this will be a vector of length n denoting the probability of each individual belonging to the non-reference class (Class 2) based on their class-predictive covariates only.} #' #' @details #' For more information on the model equation and priors implemented in this function, see Lamm et al. (2022; CI-PREMM) and Lock et al. (2018; PREMM). #' #' @author Corissa T. Rohloff, Rik Lamm, Eric F. Lock #' #' @references Lamm, R. (2022). Incorporation of covariates in Bayesian piecewise growth mixture models. https://hdl.handle.net/11299/252533 #' #' Lock, E. F., Kohli, N., & Bose, M. (2018). Detecting multiple random changepoints in Bayesian piecewise growth mixture models. Psychometrika, 83(3), 733–750. https://doi.org/10.1007/s11336-017-9594-5 #' #' @examples #' \donttest{ #' # load simulated data #' data(SimData_PREM) #' # plot observed data #' plot_BEND(data = SimData_PREM, #' id_var = "id", #' time_var = "time", #' y_var = "y") #' #' # PREM --------------------------------------------------------------------------------- #' # fit Bayes_PREM() #' results_prem <- Bayes_PREM(data = SimData_PREM, #' id_var = "id", #' time_var = "time", #' y_var = "y") #' # result summary #' summary(results_prem) #' # plot fitted results #' plot_BEND(data = SimData_PREM, #' id_var = "id", #' time_var = "time", #' y_var = "y", #' results = results_prem) #' #' # CI-PREM --------------------------------------------------------------------------------- #' # fit Bayes_PREM() #' results_ciprem <- Bayes_PREM(data = SimData_PREM, #' id_var = "id", #' time_var = "time", #' y_var = "y", #' outcome_predictive_vars = "outcome_pred_1") #' # result summary #' summary(results_ciprem) #' # plot fitted results #' plot_BEND(data = SimData_PREM, #' id_var = "id", #' time_var = "time", #' y_var = "y", #' results = results_ciprem) #' #' # PREMM --------------------------------------------------------------------------------- #' # fit Bayes_PREM() #' results_premm <- Bayes_PREM(data = SimData_PREM, #' id_var = "id", #' time_var = "time", #' y_var = "y", #' n_class = 2) #' # result summary #' summary(results_premm) #' # plot fitted results #' plot_BEND(data = SimData_PREM, #' id_var = "id", #' time_var = "time", #' y_var = "y", #' results = results_premm) #' #' #' # CI-PREMM --------------------------------------------------------------------------------- #' # fit Bayes_PREM() #' results_cipremm <- Bayes_PREM(data = SimData_PREM, #' id_var = "id", #' time_var = "time", #' y_var = "y", #' n_class = 2, #' class_predictive_vars = c("class_pred_1", "class_pred_2"), #' outcome_predictive_vars = "outcome_pred_1") #' # result summary #' summary(results_cipremm) #' # plot fitted results #' plot_BEND(data = SimData_PREM, #' id_var = "id", #' time_var = "time", #' y_var = "y", #' results = results_cipremm) #' } #' #' @import stats #' #' @export Bayes_PREM <- function(data, id_var, time_var, y_var, n_class=1, max_cp=2, class_predictive_vars=NULL, outcome_predictive_vars=NULL, scale_prior='uniform', alpha=1, cp_prior='binomial', binom_prob=0.5, iters_adapt=1000, iters_burn_in=20000, iters_sampling=30000, thin=15, save_full_chains=FALSE, save_conv_chains=FALSE, verbose=TRUE){ # START OF SETUP ---- ## Initial data check data <- as.data.frame(data) ## Control progress messages/bars if(verbose) progress_bar = "text" if(!verbose) progress_bar = "none" ## Start tracking run time run_time_total_start <- Sys.time() ## Load module to compute DIC suppressMessages(rjags::load.module('dic')) ## Set number of chains - will use 3 chains across all models n_chains <- 3 # Reshape data for modeling ---- ## outcome data - matrix form y <- reshape(data[,c(id_var, time_var, y_var)], idvar=id_var, timevar=time_var, direction='wide') y <- unname(as.matrix(y[,names(y)!=id_var])) ## time data - matrix form ## should be the same dimensions as y x <- matrix(data[,c(time_var)], byrow=TRUE, nrow=dim(y)[1], ncol=dim(y)[2]) # Define relevant variables ---- n_subj <- nrow(y) n_time <- ncol(y) max_beta <- max_cp + 2 max_time <- max(x) min_time <- min(x) min_cp_mean <- unique(sort(x))[2] max_cp_mean <- unique(sort(x, decreasing=TRUE))[2] n_cov_class_predictive <- if(is.null(class_predictive_vars)) 0 else length(class_predictive_vars) n_cov_outcome_predictive <- if(is.null(outcome_predictive_vars)) 0 else length(outcome_predictive_vars) ## Define number of parameters (used to assess convergence later) n_params <- 1 + n_class*(2*max_beta) + n_class*(2*max_cp) + n_cov_outcome_predictive + n_cov_class_predictive if(n_cov_class_predictive>0) n_params <- n_params + 1 # only for class_predictive models (logistic intercept) ## for CI-PREMM only time_vec <- x[1,] # Reshape covariates (if applicable) ---- ## class predictive covariates - matrix form ## assumed to be time invariant if(n_cov_class_predictive>0){ class_predictive_covariates <- unique(data[,c(id_var, class_predictive_vars)]) class_predictive_covariates <- unname(as.matrix(class_predictive_covariates[,names(class_predictive_covariates)!=id_var])) } ## outcome predictive covariates - array of matrices ## assumed to be time varying or invariant if(n_cov_outcome_predictive>0){ outcome_predictive_covariates_list <- list() for(i in 1:n_cov_outcome_predictive){ cov_matrix <- reshape(data[,c(id_var, time_var, outcome_predictive_vars[i])], idvar=id_var, timevar=time_var, direction='wide') outcome_predictive_covariates_list[[i]] <- unname(as.matrix(cov_matrix[,names(cov_matrix)!=id_var])) } outcome_predictive_covariates <- simplify2array(outcome_predictive_covariates_list) # dimensions = (n_subj, n_time, n_cov_outcome_predictive) } # Error messages for input ---- ## data format warnings if(sum(is.na(x)>0)) stop('Columns for t_var cannot have NA values (but y_var can)') if(min(x)!=0) warning('Prior assumes first time point measured at x=0') ## model specification warnings if(cp_prior!='binomial' && cp_prior!='uniform') stop('Input for cp_prior must be \'binomial\' or \'uniform\'') if(scale_prior!='uniform' && scale_prior!='hc') stop('Input for scale_prior must be \'uniform\' or \'hc\'') if(!is.null(class_predictive_vars) && !is.null(outcome_predictive_vars) && n_class>2) stop('CI-PREMM only allows for two classes') # Define mean & precision for random coefficients ---- mean <- c(mean(y[,1], na.rm=TRUE), 0, rep(0,max_cp)) prec_param_int <- 1/var(y[,1], na.rm=TRUE) prec_param <- (1/(max_cp*sd(y, na.rm=TRUE)/(sd(x))))^2 prec_vec <- c(prec_param_int, rep(prec_param, max_cp+1)) prec <- diag(max_beta) diag(prec) <- prec_vec ## Bernoulli indicator prior for uniform number of changepoints aux_prob <- c(max_cp:1)/c((max_cp+1):2) # Model specification ---- mod_type <- if(n_class==1) ('PREM') else if(!is.null(class_predictive_vars) && !is.null(outcome_predictive_vars)) ('CI_PREMM_Full') else if(!is.null(class_predictive_vars) && is.null(outcome_predictive_vars)) ('CI_PREMM_Class_Predictive') else if(is.null(class_predictive_vars) && !is.null(outcome_predictive_vars)) ('CI_PREMM_Outcome_Predictive') else 'PREMM' # END OF SETUP ---- if(mod_type=='PREM'){ # PREM ---- ## GENERATING INITIAL VALUES ---- ## Specify fixed JAGS model if(cp_prior=='binomial' && n_cov_outcome_predictive==0) init_fixed_spec <- textConnection(model_prem_binomial_fixed) if(cp_prior=='binomial' && n_cov_outcome_predictive>0) init_fixed_spec <- textConnection(model_prem_cov_binomial_fixed) if(cp_prior=='uniform' && n_cov_outcome_predictive==0) init_fixed_spec <- textConnection(model_prem_uniform_fixed) if(cp_prior=='uniform' && n_cov_outcome_predictive>0) init_fixed_spec <- textConnection(model_prem_cov_uniform_fixed) ## Variables to extract for initialization of full model param_recovery_init <- c('beta', 'cp', 'n_cp', 'cp_indicator', 'sigma2_error') if(n_cov_outcome_predictive>0) param_recovery_init <- c(param_recovery_init, 'outcome_predictive_covariate_alpha') ## Create data list for JAGS model data_list <- list('x'=x, 'y'=y, 'n_subj'=n_subj, 'n_time'=n_time, 'max_cp'=max_cp, 'max_beta'=max_beta, 'mean'=mean, 'prec'=prec, 'min_cp_mean'=min_cp_mean, 'max_cp_mean'=max_cp_mean, 'n_class'=n_class) if(cp_prior=='binomial') data_list$binom_prob <- binom_prob if(cp_prior=='uniform') data_list$aux_prob <- aux_prob if(n_cov_outcome_predictive>0) data_list$n_cov_outcome_predictive <- n_cov_outcome_predictive if(n_cov_outcome_predictive>0) data_list$outcome_predictive_covariates <- outcome_predictive_covariates if(verbose) cat('Computing initial values...\n') init_fixed_model <- rjags::jags.model(init_fixed_spec, data=data_list, n.chains=n_chains, n.adapt=500, quiet=TRUE) ## burn-in update(init_fixed_model, 2000, progress.bar=progress_bar) ## sampling init_fixed_out <- rjags::jags.samples(init_fixed_model, variable.names=param_recovery_init, n.iter=2000, thin=4, progress.bar=progress_bar) ### Compiling Initialization Results ---- ## Permute changepoint labels, if necessary, so they are ordered correctly init_fixed_out <- Permute_CP(output=init_fixed_out, n_class=n_class, max_cp=max_cp, max_beta=max_beta) ## Extract initial values for full model parameters initial_vals <- vector('list', n_chains) for(i in 1:n_chains){initial_vals[[i]]$beta_mean <- matrix(nrow=n_class, ncol=max_beta)} for(i in 1:n_chains){initial_vals[[i]]$beta_sd <- matrix(nrow=n_class, ncol=max_beta)} for(i in 1:n_chains){initial_vals[[i]]$cp_mean <- matrix(nrow=n_class, ncol=max_cp)} for(i in 1:n_chains){initial_vals[[i]]$cp_sd <- matrix(nrow=n_class, ncol=max_cp)} for(i in 1:n_chains){ for(j in 1:n_class){ initial_vals[[i]]$beta_mean[j,] <- rowMeans(init_fixed_out$beta[j,,init_fixed_out$n_cp[j,,i]==getmode(init_fixed_out$n_cp[j,,i]),i]) if(length(initial_vals[[i]]$cp_mean)<2){ initial_vals[[i]]$cp_mean[j,] <- mean(init_fixed_out$cp[j,,init_fixed_out$n_cp[j,,i]==getmode(init_fixed_out$n_cp[j,,i]),i]) } if(length(initial_vals[[i]]$cp_mean)>1){ initial_vals[[i]]$cp_mean[j,] <- rowMeans(init_fixed_out$cp[j,,init_fixed_out$n_cp[j,,i]==getmode(init_fixed_out$n_cp[j,,i]),i]) } for(k in 1:max_cp){ initial_vals[[i]]$cp_sd[j,k] <- (max_time-min_time)/(4*max_cp) } initial_vals[[i]]$beta_sd[j,1] <- sqrt(1/prec_param_int) for(k in 2:max_beta){ initial_vals[[i]]$beta_sd[j,k] <- sqrt(2/prec_param) } } } ## only when cp_prior=='binomial' if(cp_prior=='binomial'){ for(i in 1:n_chains){initial_vals[[i]]$cp_indicator <- matrix(nrow=n_class, ncol=max_cp)} for(i in 1:n_chains){ for(j in 1:n_class){ for(k in 1:max_cp){ initial_vals[[i]]$cp_indicator[j,k] <- getmode(init_fixed_out$cp_indicator[j,k,,i]) } } } } ## only when cp_prior=='uniform' if(cp_prior=='uniform'){ for(i in 1:n_chains){initial_vals[[i]]$Temp <- matrix(nrow=n_class, ncol=max_cp)} for(i in 1:n_chains){ for(j in 1:n_class){ for(k in 1:max_cp){ initial_vals[[i]]$Temp[j,k] <- getmode(init_fixed_out$cp_indicator[j,k,,i]) } } } } ## only when n_cov_outcome_predictive>0 if(n_cov_outcome_predictive>0){ for(i in 1:n_chains){ if(n_cov_outcome_predictive==1){ initial_vals[[i]]$outcome_predictive_covariate_alpha <- mean(init_fixed_out$outcome_predictive_covariate_alpha[,,i]) } if(n_cov_outcome_predictive>1){ initial_vals[[i]]$outcome_predictive_covariate_alpha <- rowMeans(init_fixed_out$outcome_predictive_covariate_alpha[,,i]) } } } ## FULL MODEL ----- ## Specify full JAGS model # without outcome predictive covariates if(n_cov_outcome_predictive==0 && cp_prior=='binomial' && scale_prior=='uniform') full_spec <- textConnection(model_prem_binomial_scaleunif) if(n_cov_outcome_predictive==0 && cp_prior=='binomial' && scale_prior=='hc') full_spec <- textConnection(model_prem_binomial_scalehc) if(n_cov_outcome_predictive==0 && cp_prior=='uniform' && scale_prior=='uniform') full_spec <- textConnection(model_prem_uniform_scaleunif) if(n_cov_outcome_predictive==0 && cp_prior=='uniform' && scale_prior=='hc') full_spec <- textConnection(model_prem_uniform_scalehc) # with outcome predictive covariates if(n_cov_outcome_predictive>0 && cp_prior=='binomial' && scale_prior=='uniform') full_spec <- textConnection(model_prem_cov_binomial_scaleunif) if(n_cov_outcome_predictive>0 && cp_prior=='binomial' && scale_prior=='hc') full_spec <- textConnection(model_prem_cov_binomial_scalehc) if(n_cov_outcome_predictive>0 && cp_prior=='uniform' && scale_prior=='uniform') full_spec <- textConnection(model_prem_cov_uniform_scaleunif) if(n_cov_outcome_predictive>0 && cp_prior=='uniform' && scale_prior=='hc') full_spec <- textConnection(model_prem_cov_uniform_scalehc) ## Variables to extract from full model param_recovery_full <- c('beta', 'beta_mean', 'beta_sd', 'cp', 'cp_mean', 'cp_sd', 'n_cp', 'cp_indicator', 'mu_y', 'sigma2_error', 'for_conv','pD','deviance') if(n_cov_outcome_predictive>0) param_recovery_full <- c(param_recovery_full, 'outcome_predictive_covariate_alpha') ## Create data list for JAGS model data_list <- list('x'=x, 'y'=y, 'n_subj'=n_subj, 'n_time'=n_time, 'min_time'=min_time, 'max_time'=max_time, 'max_cp'=max_cp, 'max_beta'=max_beta, 'mean'=mean, 'prec'=prec, 'min_cp_mean'=min_cp_mean, 'max_cp_mean'=max_cp_mean, 'n_class'=n_class, 'n_params'=n_params) if(cp_prior=='binomial') data_list$binom_prob <- binom_prob if(cp_prior=='uniform') data_list$aux_prob <- aux_prob if(n_cov_outcome_predictive>0) data_list$n_cov_outcome_predictive <- n_cov_outcome_predictive if(n_cov_outcome_predictive>0) data_list$outcome_predictive_covariates <- outcome_predictive_covariates if(verbose) cat('Calibrating MCMC...\n') full_model <- rjags::jags.model(full_spec, data=data_list, inits=initial_vals, n.chains=n_chains, n.adapt=iters_adapt, quiet=TRUE) # burn-in if(verbose) cat('Running burn-in...\n') update(full_model, iters_burn_in, progress.bar=progress_bar) # sampling if(verbose) cat('Collecting samples...\n') full_out <- rjags::jags.samples(full_model, variable.names=param_recovery_full, n.iter=iters_sampling, thin=thin, progress.bar=progress_bar) ## Permute changepoint labels, if necessary, so they are ordered correctly full_out <- Permute_CP(output=full_out, n_class=n_class, max_cp=max_cp, max_beta=max_beta) ##### } else if(mod_type=='PREMM'){ # PREMM ---- ## GENERATING INITIAL VALUES ---- ## Specify fixed JAGS model if(cp_prior=='binomial') init_fixed_spec <- textConnection(model_premm_binomial_fixed) if(cp_prior=='uniform') init_fixed_spec <- textConnection(model_premm_uniform_fixed) ## Variables to extract for initialization of full model param_recovery_init <- c('beta', 'cp', 'n_cp', 'cp_indicator', 'sigma2_error', 'class') ## Create data list for JAGS model data_list <- list('x'=x, 'y'=y, 'n_subj'=n_subj, 'n_time'=n_time, 'max_cp'=max_cp, 'max_beta'=max_beta, 'mean'=mean, 'prec'=prec, 'min_cp_mean'=min_cp_mean, 'max_cp_mean'=max_cp_mean, 'n_class'=n_class, 'alpha'=rep(alpha, n_class)) if(cp_prior=='binomial') data_list$binom_prob <- binom_prob if(cp_prior=='uniform') data_list$aux_prob <- aux_prob if(verbose) cat('Computing initial values...\n') init_fixed_model <- rjags::jags.model(init_fixed_spec, data=data_list, n.chains=n_chains, n.adapt=500, quiet=TRUE) ## burn-in update(init_fixed_model, 2000, progress.bar=progress_bar) ## sampling init_fixed_out <- rjags::jags.samples(init_fixed_model, variable.names=param_recovery_init, n.iter=2000, thin=4, progress.bar=progress_bar) ### Compiling Initialization Results ---- ## Permute changepoint labels, if necessary, so they are ordered correctly init_fixed_out <- Permute_CP(output=init_fixed_out, n_class=n_class, max_cp=max_cp, max_beta=max_beta) ## Realign Classes - checking if the classes are properly ordered init_fixed_out <- Realign_ECR(output=init_fixed_out, n_class=n_class, model=mod_type) ## Extract initial values for full model parameters initial_vals <- vector('list', n_chains) for(i in 1:n_chains){initial_vals[[i]]$beta_mean <- matrix(nrow=n_class, ncol=max_beta)} for(i in 1:n_chains){initial_vals[[i]]$beta_sd <- matrix(nrow=n_class, ncol=max_beta)} for(i in 1:n_chains){initial_vals[[i]]$cp_mean <- matrix(nrow=n_class, ncol=max_cp)} for(i in 1:n_chains){initial_vals[[i]]$cp_sd <- matrix(nrow=n_class, ncol=max_cp)} for(i in 1:n_chains){initial_vals[[i]]$class <- NA} for(i in 1:n_chains){ initial_vals[[i]]$class <- apply(init_fixed_out$class[,,i],1,getmode) for(j in 1:n_class){ initial_vals[[i]]$beta_mean[j,] <- rowMeans(init_fixed_out$beta[j,,init_fixed_out$n_cp[j,,i]==getmode(init_fixed_out$n_cp[j,,i]),i]) if(length(initial_vals[[i]]$cp_mean)<2){ initial_vals[[i]]$cp_mean[j,] <- mean(init_fixed_out$cp[j,,init_fixed_out$n_cp[j,,i]==getmode(init_fixed_out$n_cp[j,,i]),i]) } if(length(initial_vals[[i]]$cp_mean)>1){ initial_vals[[i]]$cp_mean[j,] <- rowMeans(init_fixed_out$cp[j,,init_fixed_out$n_cp[j,,i]==getmode(init_fixed_out$n_cp[j,,i]),i]) } for(k in 1:max_cp){ initial_vals[[i]]$cp_sd[j,k] <- (max_time-min_time)/(4*max_cp) } initial_vals[[i]]$beta_sd[j,1] <- sqrt(1/prec_param_int) for(k in 2:max_beta){ initial_vals[[i]]$beta_sd[j,k] <- sqrt(2/prec_param) } } } ## only when cp_prior=='binomial' if(cp_prior=='binomial'){ for(i in 1:n_chains){initial_vals[[i]]$cp_indicator <- matrix(nrow=n_class, ncol=max_cp)} for(i in 1:n_chains){ for(j in 1:n_class){ for(k in 1:max_cp){ initial_vals[[i]]$cp_indicator[j,k] <- getmode(init_fixed_out$cp_indicator[j,k,,i]) } } } } ## only when cp_prior=='uniform' if(cp_prior=='uniform'){ for(i in 1:n_chains){initial_vals[[i]]$Temp <- matrix(nrow=n_class, ncol=max_cp)} for(i in 1:n_chains){ for(j in 1:n_class){ for(k in 1:max_cp){ initial_vals[[i]]$Temp[j,k] <- getmode(init_fixed_out$cp_indicator[j,k,,i]) } } } } ## FULL MODEL ----- ## Specify full JAGS model if(cp_prior=='binomial' && scale_prior=='uniform') full_spec <- textConnection(model_premm_binomial_scaleunif) if(cp_prior=='binomial' && scale_prior=='hc') full_spec <- textConnection(model_premm_binomial_scalehc) if(cp_prior=='uniform' && scale_prior=='uniform') full_spec <- textConnection(model_premm_uniform_scaleunif) if(cp_prior=='uniform' && scale_prior=='hc') full_spec <- textConnection(model_premm_uniform_scalehc) ## Variables to extract from full model param_recovery_full <- c('beta', 'beta_mean', 'beta_sd', 'cp', 'cp_mean', 'cp_sd', 'n_cp', 'cp_indicator', 'class_prob', 'class', 'mu_y', 'sigma2_error', 'for_conv','pD','deviance') ## Create data list for JAGS model data_list <- list('x'=x, 'y'=y, 'n_subj'=n_subj, 'n_time'=n_time, 'min_time'=min_time, 'max_time'=max_time, 'max_cp'=max_cp, 'max_beta'=max_beta, 'mean'=mean, 'prec'=prec, 'min_cp_mean'=min_cp_mean, 'max_cp_mean'=max_cp_mean, 'n_class'=n_class, 'alpha'=rep(alpha, n_class), 'n_params'=n_params) if(cp_prior=='binomial') data_list$binom_prob <- binom_prob if(cp_prior=='uniform') data_list$aux_prob <- aux_prob if(verbose) cat('Calibrating MCMC...\n') full_model <- rjags::jags.model(full_spec, data=data_list, inits=initial_vals, n.chains=n_chains, n.adapt=iters_adapt, quiet=TRUE) # burn-in if(verbose) cat('Running burn-in...\n') update(full_model, iters_burn_in, progress.bar=progress_bar) # sampling if(verbose) cat('Collecting samples...\n') full_out <- rjags::jags.samples(full_model, variable.names=param_recovery_full, n.iter=iters_sampling, thin=thin, progress.bar=progress_bar) ## Permute changepoint labels, if necessary, so they are ordered correctly full_out <- Permute_CP(output=full_out, n_class=n_class, max_cp=max_cp, max_beta=max_beta) ## Realign Classes - checking if the classes are properly ordered full_out <- Realign_ECR(output=full_out, n_class=n_class, model=mod_type) ##### } else { # CI-PREMM ---- ## GENERATE INITIAL VALUES ---- ## Specify fixed JAGS model if(mod_type=='CI_PREMM_Full' && cp_prior=='binomial') init_fixed_spec <- textConnection(model_cipremm_full_binomial_fixed) if(mod_type=='CI_PREMM_Full' && cp_prior=='uniform') init_fixed_spec <- textConnection(model_cipremm_full_uniform_fixed) if(mod_type=='CI_PREMM_Class_Predictive' && cp_prior=='binomial') init_fixed_spec <- textConnection(model_cipremm_cpo_binomial_fixed) if(mod_type=='CI_PREMM_Class_Predictive' && cp_prior=='uniform') init_fixed_spec <- textConnection(model_cipremm_cpo_uniform_fixed) if(mod_type=='CI_PREMM_Outcome_Predictive' && cp_prior=='binomial') init_fixed_spec <- textConnection(model_cipremm_opo_binomial_fixed) if(mod_type=='CI_PREMM_Outcome_Predictive' && cp_prior=='uniform') init_fixed_spec <- textConnection(model_cipremm_opo_uniform_fixed) ## Variables to extract for initialization of full model param_recovery_init <- c('beta', 'cp', 'n_cp', 'cp_indicator', 'sigma2_error', 'class') if(mod_type=='CI_PREMM_Full') param_recovery_init <- c(param_recovery_init, c('logistic_intercept', 'class_predictive_covariate_lambda', 'outcome_predictive_covariate_alpha')) if(mod_type=='CI_PREMM_Class_Predictive') param_recovery_init <- c(param_recovery_init, c('logistic_intercept', 'class_predictive_covariate_lambda')) if(mod_type=='CI_PREMM_Outcome_Predictive') param_recovery_init <- c(param_recovery_init, 'outcome_predictive_covariate_alpha') ## Create data list for JAGS model data_list <- list('x'=x, 'y'=y, 'n_subj'=n_subj, 'n_time'=n_time, 'max_cp'=max_cp, 'max_beta'=max_beta, 'mean'=mean, 'prec'=prec, 'min_cp_mean'=min_cp_mean, 'max_cp_mean'=max_cp_mean, 'n_class'=n_class) if(cp_prior=='binomial') data_list$binom_prob <- binom_prob if(cp_prior=='uniform') data_list$aux_prob <- aux_prob if(mod_type=='CI_PREMM_Outcome_Predictive') data_list$alpha <- rep(alpha, n_class) if(mod_type=='CI_PREMM_Full' | mod_type=='CI_PREMM_Class_Predictive') data_list$class_predictive_covariates <- class_predictive_covariates if(mod_type=='CI_PREMM_Full' | mod_type=='CI_PREMM_Class_Predictive') data_list$n_cov_class_predictive <- n_cov_class_predictive if(mod_type=='CI_PREMM_Full' | mod_type=='CI_PREMM_Outcome_Predictive') data_list$outcome_predictive_covariates <- outcome_predictive_covariates if(mod_type=='CI_PREMM_Full' | mod_type=='CI_PREMM_Outcome_Predictive') data_list$n_cov_outcome_predictive <- n_cov_outcome_predictive if(verbose) cat('Computing initial values...\n') init_fixed_model <- rjags::jags.model(init_fixed_spec, data=data_list, n.chains=n_chains, n.adapt=500, quiet=TRUE) ## burn-in update(init_fixed_model, 2000, progress.bar=progress_bar) ## sampling init_fixed_out <- rjags::jags.samples(init_fixed_model, variable.names=param_recovery_init, n.iter=2000, thin=4, progress.bar=progress_bar) ### Compiling Initialization Results ---- ## Permute changepoint labels, if necessary, so they are ordered correctly init_fixed_out <- Permute_CP(output=init_fixed_out, n_class=n_class, max_cp=max_cp, max_beta=max_beta) ## Realign Classes - checking if the classes are properly ordered init_fixed_out <- Realign_ECR(output=init_fixed_out, n_class=n_class, model=mod_type) ## Extract initial values for full model parameters initial_vals <- vector('list', n_chains) for(i in 1:n_chains){initial_vals[[i]]$beta_mean <- matrix(nrow=n_class, ncol=max_beta)} for(i in 1:n_chains){initial_vals[[i]]$beta_sd <- matrix(nrow=n_class, ncol=max_beta)} for(i in 1:n_chains){initial_vals[[i]]$cp_mean <- matrix(nrow=n_class, ncol=max_cp)} for(i in 1:n_chains){initial_vals[[i]]$cp_sd <- matrix(nrow=n_class, ncol=max_cp)} for(i in 1:n_chains){ for(j in 1:n_class){ initial_vals[[i]]$beta_mean[j,] <- rowMeans(init_fixed_out$beta[j,,init_fixed_out$n_cp[j,,i]==getmode(init_fixed_out$n_cp[j,,i]),i]) if(length(initial_vals[[i]]$cp_mean)<2){ initial_vals[[i]]$cp_mean[j,] <- mean(init_fixed_out$cp[j,,init_fixed_out$n_cp[j,,i]==getmode(init_fixed_out$n_cp[j,,i]),i]) } if(length(initial_vals[[i]]$cp_mean)>1){ initial_vals[[i]]$cp_mean[j,] <- rowMeans(init_fixed_out$cp[j,,init_fixed_out$n_cp[j,,i]==getmode(init_fixed_out$n_cp[j,,i]),i]) } for(k in 1:max_cp){ initial_vals[[i]]$cp_sd[j,k] <- (max_time-min_time)/(4*max_cp) } initial_vals[[i]]$beta_sd[j,1] <- sqrt(1/prec_param_int) for(k in 2:max_beta){ initial_vals[[i]]$beta_sd[j,k] <- sqrt(2/prec_param) } } } ## only for CI_PREMM_Outcome_Predictive if(mod_type=='CI_PREMM_Outcome_Predictive'){ for(i in 1:n_chains){ initial_vals[[i]]$class <- apply(init_fixed_out$class[,,i],1,getmode) } } ## only when cp_prior=='binomial' if(cp_prior=='binomial'){ for(i in 1:n_chains){initial_vals[[i]]$cp_indicator <- matrix(nrow=n_class, ncol=max_cp)} for(i in 1:n_chains){ for(j in 1:n_class){ for(k in 1:max_cp){ initial_vals[[i]]$cp_indicator[j,k] <- getmode(init_fixed_out$cp_indicator[j,k,,i]) } } } } ## only when cp_prior=='uniform' if(cp_prior=='uniform'){ for(i in 1:n_chains){initial_vals[[i]]$Temp <- matrix(nrow=n_class, ncol=max_cp)} for(i in 1:n_chains){ for(j in 1:n_class){ for(k in 1:max_cp){ initial_vals[[i]]$Temp[j,k] <- getmode(init_fixed_out$cp_indicator[j,k,,i]) } } } } ## only when n_cov_outcome_predictive>0 if(n_cov_outcome_predictive>0){ for(i in 1:n_chains){ if(n_cov_outcome_predictive==1){ initial_vals[[i]]$outcome_predictive_covariate_alpha <- mean(init_fixed_out$outcome_predictive_covariate_alpha[,,i]) } if(n_cov_outcome_predictive>1){ initial_vals[[i]]$outcome_predictive_covariate_alpha <- rowMeans(init_fixed_out$outcome_predictive_covariate_alpha[,,i]) } } } ## only when n_cov_class_predictive>0 if(n_cov_class_predictive>0){ for(i in 1:n_chains){ if(n_cov_class_predictive==1){ initial_vals[[i]]$class_predictive_covariate_lambda <- mean(init_fixed_out$class_predictive_covariate_lambda[,,i]) } if(n_cov_class_predictive>1){ initial_vals[[i]]$class_predictive_covariate_lambda <- rowMeans(init_fixed_out$class_predictive_covariate_lambda[,,i]) } initial_vals[[i]]$logistic_intercept <- mean(init_fixed_out$logistic_intercept[,,i]) } } ## FULL MODEL ----- ## Specify full JAGS model if(mod_type=='CI_PREMM_Full') model_list <- cipremm_mods_full if(mod_type=='CI_PREMM_Class_Predictive') model_list <- cipremm_mods_cpo if(mod_type=='CI_PREMM_Outcome_Predictive') model_list <- cipremm_mods_opo if(cp_prior=='binomial' && scale_prior=='uniform') full_spec <- textConnection(model_list[[1]]) if(cp_prior=='binomial' && scale_prior=='hc') full_spec <- textConnection(model_list[[2]]) if(cp_prior=='uniform' && scale_prior=='uniform') full_spec <- textConnection(model_list[[3]]) if(cp_prior=='uniform' && scale_prior=='hc') full_spec <- textConnection(model_list[[4]]) ## Variables to extract from full model param_recovery_full <- c('beta', 'beta_mean', 'beta_sd', 'cp', 'cp_mean', 'cp_sd', 'n_cp', 'cp_indicator', 'class', 'mu_y', 'sigma2_error', 'for_conv','pD','deviance') if(mod_type=='CI_PREMM_Full') param_recovery_full <- c(param_recovery_full, c('logistic_intercept', 'class_predictive_covariate_lambda', 'outcome_predictive_covariate_alpha', 'logistic_class_prob')) if(mod_type=='CI_PREMM_Class_Predictive') param_recovery_full <- c(param_recovery_full, c('logistic_intercept', 'class_predictive_covariate_lambda', 'logistic_class_prob')) if(mod_type=='CI_PREMM_Outcome_Predictive') param_recovery_full <- c(param_recovery_full, 'outcome_predictive_covariate_alpha', 'class_prob') ## Create data list for JAGS model data_list <- list('x'=x, 'y'=y, 'n_subj'=n_subj, 'n_time'=n_time, 'min_time'=min_time, 'max_time'=max_time, 'max_cp'=max_cp, 'max_beta'=max_beta, 'mean'=mean, 'prec'=prec, 'min_cp_mean'=min_cp_mean, 'max_cp_mean'=max_cp_mean, 'n_class'=n_class, 'n_params'=n_params) if(cp_prior=='binomial') data_list$binom_prob <- binom_prob if(cp_prior=='uniform') data_list$aux_prob <- aux_prob if(mod_type=='CI_PREMM_Outcome_Predictive') data_list$alpha <- rep(alpha, n_class) if(mod_type=='CI_PREMM_Full' | mod_type=='CI_PREMM_Class_Predictive') data_list$class_predictive_covariates <- class_predictive_covariates if(mod_type=='CI_PREMM_Full' | mod_type=='CI_PREMM_Class_Predictive') data_list$n_cov_class_predictive <- n_cov_class_predictive if(mod_type=='CI_PREMM_Full' | mod_type=='CI_PREMM_Outcome_Predictive') data_list$outcome_predictive_covariates <- outcome_predictive_covariates if(mod_type=='CI_PREMM_Full' | mod_type=='CI_PREMM_Outcome_Predictive') data_list$n_cov_outcome_predictive <- n_cov_outcome_predictive if(verbose) cat('Calibrating MCMC...\n') full_model <- rjags::jags.model(full_spec, data=data_list, inits=initial_vals, n.chains=n_chains, n.adapt=iters_adapt, quiet=TRUE) # burn-in if(verbose) cat('Running burn-in...\n') update(full_model, iters_burn_in, progress.bar=progress_bar) # sampling if(verbose) cat('Collecting samples...\n') full_out <- rjags::jags.samples(full_model, variable.names=param_recovery_full, n.iter=iters_sampling, thin=thin, progress.bar=progress_bar) ## Permute changepoint labels, if necessary, so they are ordered correctly full_out <- Permute_CP(output=full_out, n_class=n_class, max_cp=max_cp, max_beta=max_beta) ## Realign Classes - checking if the classes are properly ordered full_out <- Realign_ECR(output=full_out, n_class=n_class, model=mod_type) } # Compiling Full Results ----- ## Convergence # growth parameters (class dependent) for(c in 1:n_class){ full_out$for_conv[1:max_beta+(4+4*max_cp)*(c-1),,] <- full_out$beta_mean[c,,,] full_out$for_conv[(1+max_beta):(2*max_beta)+(4+4*max_cp)*(c-1),,] <- full_out$beta_sd[c,,,] full_out$for_conv[(1+2*max_beta):(2*max_beta+max_cp)+(4+4*max_cp)*(c-1),,] <- full_out$cp_mean[c,,,] full_out$for_conv[(1+2*max_beta+max_cp):(2*max_beta+2*max_cp)+(4+4*max_cp)*(c-1),,] <- full_out$cp_sd[c,,,] } # covariates (if applicable) if(n_cov_outcome_predictive>0){ full_out$for_conv[(1+2*max_beta+2*max_cp):(2*max_beta+2*max_cp+n_cov_outcome_predictive)+(4+4*max_cp)*(n_class-1),,] <- full_out$outcome_predictive_covariate_alpha[,,] } if(n_cov_class_predictive>0){ full_out$for_conv[(1+2*max_beta+2*max_cp+n_cov_outcome_predictive):(2*max_beta+2*max_cp+n_cov_outcome_predictive+n_cov_class_predictive)+(4+4*max_cp)*(n_class-1),,] <- full_out$class_predictive_covariate_lambda[,,] } if(n_cov_class_predictive>0){ full_out$for_conv[(1+2*max_beta+2*max_cp+n_cov_outcome_predictive+n_cov_class_predictive)+(4+4*max_cp)*(n_class-1),,] <- full_out$logistic_intercept[,,] } # error variance full_out$for_conv[n_params,,] <- full_out$sigma2_error mcmc_list <- coda::as.mcmc.list(full_out$for_conv) gelman_msrf <- coda::gelman.diag(mcmc_list) # Initialize the parameter vector param_names <- c('NA') for(c in 1:n_class){ param_names <- c(param_names, paste0('Class ', c, ': beta_', 0:(max_beta-1),'_mean'), paste0('Class ', c, ': beta_', 0:(max_beta-1),'_sd'), paste0('Class ', c, ': cp_', 1:max_cp,'_mean'), paste0('Class ', c, ': cp_', 1:max_cp,'_sd')) } param_names <- c(param_names[-1], if(n_cov_outcome_predictive>0) paste0('outcome_predictive_covariate_alpha_', 1:n_cov_outcome_predictive), if(n_cov_class_predictive>0) paste0('class_predictive_covariate_lambda_', 1:n_cov_class_predictive), if(n_cov_class_predictive>0) 'logistic_intercept', 'error_var') row.names(gelman_msrf$psrf) <- param_names parameter_psrf <- data.frame(point_est = gelman_msrf$psrf[,1], upper_ci = gelman_msrf$psrf[,2]) multivariate_psrf <- gelman_msrf$mpsrf mean_psrf <- mean(parameter_psrf$point_est) # mean psrf across parameters convergence <- list(parameter_psrf=parameter_psrf, multivariate_psrf=multivariate_psrf, mean_psrf=mean_psrf) ## Model Fit deviance <- mean(full_out$deviance) pD <- mean(full_out$pD) dic <- deviance + pD model_fit <- list(deviance=deviance, pD=pD, dic=dic) ## Fitted Values y_mean <- summary(full_out$mu_y, FUN='mean')[[1]] ## Class Information # each of these is fixed to 1 for PREM class_membership <- rep(1, n_subj) individ_class_probability <- matrix(1, nrow=n_subj, ncol=n_class) # conditional on logistic model (when applicable) and growth curve unconditional_class_probability <- 1 if(n_class>1){ for(i in 1:n_subj){class_membership[i] <- as.numeric(names(which.max(table(full_out$class[i,,]))))} for(i in 1:n_subj){individ_class_probability[i,1:n_class] <- prop.table(table(factor(full_out$class[i,,], levels = 1:n_class)))} if(n_cov_class_predictive==0){ unconditional_class_probability <- apply(full_out$class_prob, c(1), FUN='mean') # not based on growth curve part of the model } if(n_cov_class_predictive>0){ unconditional_class_probability <- apply(full_out$logistic_class_prob, c(1), FUN='mean') } } class_info <- list('class_membership'=class_membership, 'individ_class_probability'=individ_class_probability, 'unconditional_class_probability'=unconditional_class_probability) ## Parameter Estimates param_est <- list() for(c in 1:n_class){ # Probability for number of changepoints K_prob <- matrix(nrow=(max_cp+1), ncol=1) for(k in 0:max_cp){ K_prob[k+1] <- sum(full_out$n_cp[c,,]==k)/length(full_out$n_cp[c,,]) } colnames(K_prob) <- 'Probability' rownames(K_prob) <- c(paste0('K=',0:max_cp)) # Parameter estimates for each result for the number of changepoints K=list() for(k in 0:max_cp){ K[[k+1]] <- list() if(K_prob[k+1]>0.01){ beta_array <- array(full_out$beta, dim=c(dim(full_out$beta)[1],dim(full_out$beta)[2],dim(full_out$beta)[3]*dim(full_out$beta)[4])) beta_array <- beta_array[,,full_out$n_cp[c,,]==k] beta <- apply(beta_array, c(1,2), 'mean')[,1:(2+k)] beta_mean_array <- array(full_out$beta_mean[c,,,], dim=c(dim(full_out$beta_mean)[2],dim(full_out$beta_mean)[3]*dim(full_out$beta_mean)[4])) beta_mean_array <- beta_mean_array[1:(2+k),full_out$n_cp[c,,]==k] b_mean <- apply(beta_mean_array, c(1), 'mean') b_mean_CI <- apply(beta_mean_array,c(1), 'quantile', probs=c(0.025,0.975)) beta_sd_array <- array(full_out$beta_sd[c,,,], dim=c(dim(full_out$beta_sd)[2],dim(full_out$beta_sd)[3]*dim(full_out$beta_sd)[4])) beta_sd_array <- beta_sd_array[1:(2+k),full_out$n_cp[c,,]==k] beta_sd <- apply(beta_sd_array, c(1), 'mean') beta_sd_CI <- apply(beta_sd_array, c(1), 'quantile', probs=c(0.025,0.975)) cp <- cp_mean <- cp_mean_CI <- cp_sd <- cp_sd_CI <- NULL if(k>0){ cp_array <- array(full_out$cp, dim=c(dim(full_out$cp)[1],dim(full_out$cp)[2],dim(full_out$cp)[3]*dim(full_out$cp)[4])) cp_array <- cp_array[,,full_out$n_cp[c,,]==k] cp <- apply(cp_array,c(1,2),'mean')[,1:k] cp_mean_array <- array(full_out$cp_mean[c,,,], dim=c(dim(full_out$cp_mean)[2],dim(full_out$cp_mean)[3]*dim(full_out$cp_mean)[4])) cp_mean_array <- cp_mean_array[1:k,full_out$n_cp[c,,]==k,drop=FALSE] cp_mean <- apply(cp_mean_array,c(1),'mean') cp_mean_CI <- apply(cp_mean_array,c(1),'quantile',probs=c(0.025,0.975)) cp_sd_array <- array(full_out$cp_sd[c,,,], dim=c(dim(full_out$cp_sd)[2],dim(full_out$cp_sd)[3]*dim(full_out$cp_sd)[4])) cp_sd_array <- cp_sd_array[1:k,full_out$n_cp[c,,]==k,drop=FALSE] cp_sd <- apply(cp_sd_array,c(1),'mean') cp_sd_CI <- apply(cp_sd_array,c(1),'quantile',probs=c(0.025,0.975)) } K[[k+1]] <- list('beta'=beta, 'beta_mean'=b_mean, 'beta_mean_CI'=b_mean_CI, 'beta_var'=beta_sd^2, 'beta_var_CI'=beta_sd_CI^2, # provide variance estimates as output 'cp'=cp, 'cp_mean'=cp_mean, 'cp_mean_CI'=cp_mean_CI, 'cp_var'=cp_sd^2,'cp_var_CI'=cp_sd_CI^2) # provide variance estimates as output } names(K)[k+1] <- paste0('K_', k) param_est[[c]] <- list('K_prob'=K_prob,'K'=K) } } names(param_est) <- paste0('Class_', 1:n_class) param_est$error_var <- apply(full_out$sigma2_error, c(1), 'mean') if(n_cov_outcome_predictive>0){ outcome_predictive_covariate_alpha <- apply(full_out$outcome_predictive_covariate_alpha, c(1), 'mean') param_est$outcome_predictive_covariates <- outcome_predictive_covariate_alpha names(param_est$outcome_predictive_covariates) <- outcome_predictive_vars } if(n_cov_class_predictive>0){ class_predictive_covariate_lambda <- apply(full_out$class_predictive_covariate_lambda, c(1), 'mean') param_est$class_predictive_covariates <- class_predictive_covariate_lambda names(param_est$class_predictive_covariates) <- class_predictive_vars logistic_intercept <- apply(full_out$logistic_intercept, c(1), 'mean') param_est$logistic_intercept <- logistic_intercept } ## Stop tracking run time run_time_total_end <- Sys.time() run_time_total <- run_time_total_end - run_time_total_start my_results <- list('Convergence'=convergence, 'Model_Fit'=model_fit, 'Fitted_Values'=y_mean, 'Class_Information'=class_info, 'Parameter_Estimates'=param_est, 'Run_Time'=format(run_time_total)) if(save_full_chains==TRUE){my_results$Full_MCMC_Chains=full_out} if(save_conv_chains==TRUE){my_results$Convergence_MCMC_Chains=mcmc_list} class(my_results)='PREM' return(my_results) }
/scratch/gouwar.j/cran-all/cranData/BEND/R/Bayes_PREM.R
# Bayes_PREM JAGS Models # PREM (One class Models) ---- ## Binomial CP ---- ### Fixed ---- model_prem_binomial_fixed <- "model{ for(i in 1:n_subj){ class[i] <- 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[class[i],1] + beta[class[i],2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[class[i],3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) cp[c,k] ~ dunif(min_cp_mean, max_cp_mean) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[c,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) # prior distribution of the fixed parameters beta[c,1:max_beta] ~ dmnorm(mean, prec) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y } " ### Scale Unif ---- model_prem_binomial_scaleunif <- "model{ for(i in 1:n_subj){ class[i] <- 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dunif(0, (max_time-min_time)/4) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dunif(0, (2/prec[q,q])^0.5) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " ### Scale HC ---- model_prem_binomial_scalehc <- "model{ for(i in 1:n_subj){ class[i] <- 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dt(0, 1/(((max_time-min_time)/4)/tan(0.45*3.1416))^2, 1) T(0,) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dt(0, 1/(((2/prec[q,q])^0.5)/tan(0.45*3.1416))^2, 1) T(0,) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " ## Uniform CP ---- ### Fixed ---- model_prem_uniform_fixed <- "model{ for(i in 1:n_subj){ class[i] <- 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[class[i],1] + beta[class[i],2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[class[i],3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) # prior distribution of fixed effect of changepont cp[c,k] ~ dunif(min_cp_mean, max_cp_mean) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[c,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) # prior distribution of the fixed parameters beta[c,1:max_beta] ~ dmnorm(mean, prec) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y } " ### Scale Unif ---- model_prem_uniform_scaleunif <- "model{ for(i in 1:n_subj){ class[i] <- 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dunif(0, (max_time-min_time)/4) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dunif(0, (2/prec[q,q])^0.5) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " ### Scale HC ---- model_prem_uniform_scalehc <- "model{ for(i in 1:n_subj){ class[i] <- 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dt(0, 1/(((max_time-min_time)/4)/tan(0.45*3.1416))^2, 1) T(0,) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dt(0, 1/(((2/prec[q,q])^0.5)/tan(0.45*3.1416))^2, 1) T(0,) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " ## Binomial CP (Covariates) ---- ### Fixed ---- model_prem_cov_binomial_fixed <- "model{ for(i in 1:n_subj){ class[i] <- 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[class[i],1] + beta[class[i],2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[class[i],3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) # prior distribution of fixed effect of changepont cp[c,k] ~ dunif(min_cp_mean, max_cp_mean) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[c,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) # prior distribution of the fixed parameters beta[c,1:max_beta] ~ dmnorm(mean, prec) } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y } " ### Scale Unif ---- model_prem_cov_binomial_scaleunif <- "model{ for(i in 1:n_subj){ class[i] <- 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dunif(0, (max_time-min_time)/4) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dunif(0, (2/prec[q,q])^0.5) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " ### Scale HC ---- model_prem_cov_binomial_scalehc <- "model{ for(i in 1:n_subj){ class[i] <- 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dt(0, 1/(((max_time-min_time)/4)/tan(0.45*3.1416))^2, 1) T(0,) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dt(0, 1/(((2/prec[q,q])^0.5)/tan(0.45*3.1416))^2, 1) T(0,) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " ## Uniform CP (Covariates) ---- ### Fixed ---- model_prem_cov_uniform_fixed <- "model{ for(i in 1:n_subj){ class[i] <- 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[class[i],1] + beta[class[i],2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[class[i],3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) # prior distribution of fixed effect of changepont cp[c,k] ~ dunif(min_cp_mean, max_cp_mean) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[c,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) # prior distribution of the fixed parameters beta[c,1:max_beta] ~ dmnorm(mean, prec) } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y } " ### Scale Unif ---- model_prem_cov_uniform_scaleunif <- "model{ for(i in 1:n_subj){ class[i] <- 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dunif(0, (max_time-min_time)/4) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dunif(0, (2/prec[q,q])^0.5) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " ### Scale HC ---- model_prem_cov_uniform_scalehc <- "model{ for(i in 1:n_subj){ class[i] <- 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dt(0, 1/(((max_time-min_time)/4)/tan(0.45*3.1416))^2, 1) T(0,) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dt(0, 1/(((2/prec[q,q])^0.5)/tan(0.45*3.1416))^2, 1) T(0,) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " # PREMM (No Covariates) ---- ## Binomial CP ---- ### Fixed ---- model_premm_binomial_fixed <- "model{ for(i in 1:n_subj){ class[i] ~ dcat(class_prob) for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[class[i],1] + beta[class[i],2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[class[i],3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) # prior distribution of fixed effect of changepont cp[c,k] ~ dunif(min_cp_mean, max_cp_mean) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[c,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) # prior distribution of the fixed parameters beta[c,1:max_beta] ~ dmnorm(mean, prec) } # prior distribution of the class probability class_prob ~ ddirch(alpha) # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y } " ### Scale Unif ---- model_premm_binomial_scaleunif <- "model{ for(i in 1:n_subj){ class[i] ~ dcat(class_prob) for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dunif(0, (max_time-min_time)/4) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dunif(0, (2/prec[q,q])^0.5) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the class probability class_prob ~ ddirch(alpha) # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " ### Scale HC ---- model_premm_binomial_scalehc <- "model{ for(i in 1:n_subj){ class[i] ~ dcat(class_prob) for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dt(0, 1/(((max_time-min_time)/4)/tan(0.45*3.1416))^2, 1) T(0,) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dt(0, 1/(((2/prec[q,q])^0.5)/tan(0.45*3.1416))^2, 1) T(0,) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the class probability class_prob ~ ddirch(alpha) # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " ## Uniform CP ---- ### Fixed ---- model_premm_uniform_fixed <- "model{ for(i in 1:n_subj){ class[i] ~ dcat(class_prob) for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[class[i],1] + beta[class[i],2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[class[i],3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) # prior distribution of fixed effect of changepont cp[c,k] ~ dunif(min_cp_mean, max_cp_mean) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[c,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) # prior distribution of the fixed parameters beta[c,1:max_beta] ~ dmnorm(mean, prec) } # prior distribution of the class probability class_prob ~ ddirch(alpha) # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y } " ### Scale Unif ---- model_premm_uniform_scaleunif <- "model{ for(i in 1:n_subj){ class[i] ~ dcat(class_prob) for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dunif(0, (max_time-min_time)/4) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dunif(0, (2/prec[q,q])^0.5) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the class probability class_prob ~ ddirch(alpha) # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " ### Scale HC ---- model_premm_uniform_scalehc <- "model{ for(i in 1:n_subj){ class[i] ~ dcat(class_prob) for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dt(0, 1/(((max_time-min_time)/4)/tan(0.45*3.1416))^2, 1) T(0,) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dt(0, 1/(((2/prec[q,q])^0.5)/tan(0.45*3.1416))^2, 1) T(0,) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the class probability class_prob ~ ddirch(alpha) # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " # CI-PREMM ---- ## Full ---- ### Binomial CP ---- #### Fixed ---- model_cipremm_full_binomial_fixed <- "model{ for(i in 1:n_subj){ logit(logistic_class_prob[i]) <- logistic_intercept + sum(class_predictive_covariate_lambda[1:n_cov_class_predictive]*class_predictive_covariates[i,1:n_cov_class_predictive]) class_bern[i] ~ dbern(logistic_class_prob[i]) class[i] <- class_bern[i] + 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[class[i],1] + beta[class[i],2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[class[i],3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) # prior distribution of fixed effect of changepont cp[c,k] ~ dunif(min_cp_mean, max_cp_mean) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[c,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) # prior distribution of the fixed parameters beta[c,1:max_beta] ~ dmnorm(mean, prec) } # prior distribution of the logistic intercept logistic_intercept ~ dnorm(0, 0.1) # prior distribution of the class predictive covariates for(p in 1:n_cov_class_predictive){ class_predictive_covariate_lambda[p] ~ dnorm(0, 0.1) } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y } " #### Scale Unif ---- model_cipremm_full_binomial_scaleunif <- "model{ for(i in 1:n_subj){ logit(logistic_class_prob[i]) <- logistic_intercept + sum(class_predictive_covariate_lambda[1:n_cov_class_predictive]*class_predictive_covariates[i,1:n_cov_class_predictive]) class_bern[i] ~ dbern(logistic_class_prob[i]) class[i] <- class_bern[i] + 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dunif(0, (max_time-min_time)/4) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dunif(0, (2/prec[q,q])^0.5) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the logistic intercept logistic_intercept ~ dnorm(0, 0.1) # prior distribution of the class predictive covariates for(p in 1:n_cov_class_predictive){ class_predictive_covariate_lambda[p] ~ dnorm(0, 0.1) } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " #### Scale HC ---- model_cipremm_full_binomial_scalehc <- "model{ for(i in 1:n_subj){ logit(logistic_class_prob[i]) <- logistic_intercept + sum(class_predictive_covariate_lambda[1:n_cov_class_predictive]*class_predictive_covariates[i,1:n_cov_class_predictive]) class_bern[i] ~ dbern(logistic_class_prob[i]) class[i] <- class_bern[i] + 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dt(0, 1/(((max_time-min_time)/4)/tan(0.45*3.1416))^2, 1) T(0,) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dt(0, 1/(((2/prec[q,q])^0.5)/tan(0.45*3.1416))^2, 1) T(0,) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the logistic intercept logistic_intercept ~ dnorm(0, 0.1) # prior distribution of the class predictive covariates for(p in 1:n_cov_class_predictive){ class_predictive_covariate_lambda[p] ~ dnorm(0, 0.1) } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " ### Uniform CP ---- #### Fixed ---- model_cipremm_full_uniform_fixed <- "model{ for(i in 1:n_subj){ logit(logistic_class_prob[i]) <- logistic_intercept + sum(class_predictive_covariate_lambda[1:n_cov_class_predictive]*class_predictive_covariates[i,1:n_cov_class_predictive]) class_bern[i] ~ dbern(logistic_class_prob[i]) class[i] <- class_bern[i] + 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[class[i],1] + beta[class[i],2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[class[i],3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) # prior distribution of fixed effect of changepont cp[c,k] ~ dunif(min_cp_mean, max_cp_mean) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[c,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) # prior distribution of the fixed parameters beta[c,1:max_beta] ~ dmnorm(mean, prec) } # prior distribution of the logistic intercept logistic_intercept ~ dnorm(0, 0.1) # prior distribution of the class predictive covariates for(p in 1:n_cov_class_predictive){ class_predictive_covariate_lambda[p] ~ dnorm(0, 0.1) } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y } " #### Scale Unif ---- model_cipremm_full_uniform_scaleunif <- "model{ for(i in 1:n_subj){ logit(logistic_class_prob[i]) <- logistic_intercept + sum(class_predictive_covariate_lambda[1:n_cov_class_predictive]*class_predictive_covariates[i,1:n_cov_class_predictive]) class_bern[i] ~ dbern(logistic_class_prob[i]) class[i] <- class_bern[i] + 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dunif(0, (max_time-min_time)/4) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dunif(0, (2/prec[q,q])^0.5) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the logistic intercept logistic_intercept ~ dnorm(0, 0.1) # prior distribution of the class predictive covariates for(p in 1:n_cov_class_predictive){ class_predictive_covariate_lambda[p] ~ dnorm(0, 0.1) } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " #### Scale HC ---- model_cipremm_full_uniform_scalehc <- "model{ for(i in 1:n_subj){ logit(logistic_class_prob[i]) <- logistic_intercept + sum(class_predictive_covariate_lambda[1:n_cov_class_predictive]*class_predictive_covariates[i,1:n_cov_class_predictive]) class_bern[i] ~ dbern(logistic_class_prob[i]) class[i] <- class_bern[i] + 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dt(0, 1/(((max_time-min_time)/4)/tan(0.45*3.1416))^2, 1) T(0,) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dt(0, 1/(((2/prec[q,q])^0.5)/tan(0.45*3.1416))^2, 1) T(0,) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the logistic intercept logistic_intercept ~ dnorm(0, 0.1) # prior distribution of the class predictive covariates for(p in 1:n_cov_class_predictive){ class_predictive_covariate_lambda[p] ~ dnorm(0, 0.1) } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " ## Class Predictive - Only (CPO) ---- ### Binomial CP ---- #### Fixed ---- model_cipremm_cpo_binomial_fixed <- "model{ for(i in 1:n_subj){ logit(logistic_class_prob[i]) <- logistic_intercept + sum(class_predictive_covariate_lambda[1:n_cov_class_predictive]*class_predictive_covariates[i,1:n_cov_class_predictive]) class_bern[i] ~ dbern(logistic_class_prob[i]) class[i] <- class_bern[i] + 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[class[i],1] + beta[class[i],2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[class[i],3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) # prior distribution of fixed effect of changepont cp[c,k] ~ dunif(min_cp_mean, max_cp_mean) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[c,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) # prior distribution of the fixed parameters beta[c,1:max_beta] ~ dmnorm(mean, prec) } # prior distribution of the logistic intercept logistic_intercept ~ dnorm(0, 0.1) # prior distribution of the class predictive covariates for(p in 1:n_cov_class_predictive){ class_predictive_covariate_lambda[p] ~ dnorm(0, 0.1) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y } " #### Scale Unif ---- model_cipremm_cpo_binomial_scaleunif <- "model{ for(i in 1:n_subj){ logit(logistic_class_prob[i]) <- logistic_intercept + sum(class_predictive_covariate_lambda[1:n_cov_class_predictive]*class_predictive_covariates[i,1:n_cov_class_predictive]) class_bern[i] ~ dbern(logistic_class_prob[i]) class[i] <- class_bern[i] + 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dunif(0, (max_time-min_time)/4) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dunif(0, (2/prec[q,q])^0.5) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the logistic intercept logistic_intercept ~ dnorm(0, 0.1) # prior distribution of the class predictive covariates for(p in 1:n_cov_class_predictive){ class_predictive_covariate_lambda[p] ~ dnorm(0, 0.1) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " #### Scale HC ---- model_cipremm_cpo_binomial_scalehc <- "model{ for(i in 1:n_subj){ logit(logistic_class_prob[i]) <- logistic_intercept + sum(class_predictive_covariate_lambda[1:n_cov_class_predictive]*class_predictive_covariates[i,1:n_cov_class_predictive]) class_bern[i] ~ dbern(logistic_class_prob[i]) class[i] <- class_bern[i] + 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dt(0, 1/(((max_time-min_time)/4)/tan(0.45*3.1416))^2, 1) T(0,) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dt(0, 1/(((2/prec[q,q])^0.5)/tan(0.45*3.1416))^2, 1) T(0,) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the logistic intercept logistic_intercept ~ dnorm(0, 0.1) # prior distribution of the class predictive covariates for(p in 1:n_cov_class_predictive){ class_predictive_covariate_lambda[p] ~ dnorm(0, 0.1) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " ### Uniform CP ---- #### Fixed ---- model_cipremm_cpo_uniform_fixed <- "model{ for(i in 1:n_subj){ logit(logistic_class_prob[i]) <- logistic_intercept + sum(class_predictive_covariate_lambda[1:n_cov_class_predictive]*class_predictive_covariates[i,1:n_cov_class_predictive]) class_bern[i] ~ dbern(logistic_class_prob[i]) class[i] <- class_bern[i] + 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[class[i],1] + beta[class[i],2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[class[i],3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) # prior distribution of fixed effect of changepont cp[c,k] ~ dunif(min_cp_mean, max_cp_mean) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[c,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) # prior distribution of the fixed parameters beta[c,1:max_beta] ~ dmnorm(mean, prec) } # prior distribution of the logistic intercept logistic_intercept ~ dnorm(0, 0.1) # prior distribution of the class predictive covariates for(p in 1:n_cov_class_predictive){ class_predictive_covariate_lambda[p] ~ dnorm(0, 0.1) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y } " #### Scale Unif ---- model_cipremm_cpo_uniform_scaleunif <- "model{ for(i in 1:n_subj){ logit(logistic_class_prob[i]) <- logistic_intercept + sum(class_predictive_covariate_lambda[1:n_cov_class_predictive]*class_predictive_covariates[i,1:n_cov_class_predictive]) class_bern[i] ~ dbern(logistic_class_prob[i]) class[i] <- class_bern[i] + 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dunif(0, (max_time-min_time)/4) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dunif(0, (2/prec[q,q])^0.5) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the logistic intercept logistic_intercept ~ dnorm(0, 0.1) # prior distribution of the class predictive covariates for(p in 1:n_cov_class_predictive){ class_predictive_covariate_lambda[p] ~ dnorm(0, 0.1) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " #### Scale HC ---- model_cipremm_cpo_uniform_scalehc <- "model{ for(i in 1:n_subj){ logit(logistic_class_prob[i]) <- logistic_intercept + sum(class_predictive_covariate_lambda[1:n_cov_class_predictive]*class_predictive_covariates[i,1:n_cov_class_predictive]) class_bern[i] ~ dbern(logistic_class_prob[i]) class[i] <- class_bern[i] + 1 for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dt(0, 1/(((max_time-min_time)/4)/tan(0.45*3.1416))^2, 1) T(0,) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dt(0, 1/(((2/prec[q,q])^0.5)/tan(0.45*3.1416))^2, 1) T(0,) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the logistic intercept logistic_intercept ~ dnorm(0, 0.1) # prior distribution of the class predictive covariates for(p in 1:n_cov_class_predictive){ class_predictive_covariate_lambda[p] ~ dnorm(0, 0.1) } # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " ## Outcome Predictive - Only (OPO) ---- ### Binomial CP ---- #### Fixed ---- model_cipremm_opo_binomial_fixed <- "model{ for(i in 1:n_subj){ class[i] ~ dcat(class_prob) for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[class[i],1] + beta[class[i],2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[class[i],3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) # prior distribution of fixed effect of changepont cp[c,k] ~ dunif(min_cp_mean, max_cp_mean) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[c,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) # prior distribution of the fixed parameters beta[c,1:max_beta] ~ dmnorm(mean, prec) } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the class probability class_prob ~ ddirch(alpha) # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y } " #### Scale Unif ---- model_cipremm_opo_binomial_scaleunif <- "model{ for(i in 1:n_subj){ class[i] ~ dcat(class_prob) for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dunif(0, (max_time-min_time)/4) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dunif(0, (2/prec[q,q])^0.5) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the class probability class_prob ~ ddirch(alpha) # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " #### Scale HC ---- model_cipremm_opo_binomial_scalehc <- "model{ for(i in 1:n_subj){ class[i] ~ dcat(class_prob) for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints cp_indicator[c,k] ~ dbern(binom_prob) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dt(0, 1/(((max_time-min_time)/4)/tan(0.45*3.1416))^2, 1) T(0,) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dt(0, 1/(((2/prec[q,q])^0.5)/tan(0.45*3.1416))^2, 1) T(0,) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the class probability class_prob ~ ddirch(alpha) # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " ### Uniform CP ---- #### Fixed ---- model_cipremm_opo_uniform_fixed <- "model{ for(i in 1:n_subj){ class[i] ~ dcat(class_prob) for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[class[i],1] + beta[class[i],2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[class[i],3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) # prior distribution of fixed effect of changepont cp[c,k] ~ dunif(min_cp_mean, max_cp_mean) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[c,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) # prior distribution of the fixed parameters beta[c,1:max_beta] ~ dmnorm(mean, prec) } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the class probability class_prob ~ ddirch(alpha) # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y } " #### Scale Unif ---- model_cipremm_opo_uniform_scaleunif <- "model{ for(i in 1:n_subj){ class[i] ~ dcat(class_prob) for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dunif(0, (max_time-min_time)/4) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dunif(0, (2/prec[q,q])^0.5) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the class probability class_prob ~ ddirch(alpha) # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " #### Scale HC ---- model_cipremm_opo_uniform_scalehc <- "model{ for(i in 1:n_subj){ class[i] ~ dcat(class_prob) for(j in 1:n_time){ y[i,j] ~ dnorm(mu_y[i,j], tau_y) mu_y[i,j] <- beta[i,1] + beta[i,2]*x[i,j] + sum(cp_indicator[class[i],1:max_cp]*beta[i,3:max_beta]*x_cp[class[i],1:max_cp,i,j]) + inprod(outcome_predictive_covariate_alpha[], outcome_predictive_covariates[i,j,]) } } # prior distribution of the random coefficients for(i in 1:n_subj){ for(q in 1:max_beta){ beta[i,q] ~ dnorm(beta_mean[class[i],q], beta_tau[class[i],q]) } for(k in 1:max_cp){ cp[i,k] ~ dnorm(cp_mean[class[i],k], cp_prec[class[i],k]) T(min_time,max_time) } } # prior distribution of the changepoint for(c in 1:n_class){ for(k in 1:max_cp){ # prior distribution of number of changepoints Temp[c,k] ~ dbern(aux_prob[k]) cp_indicator[c,k] <- prod(Temp[c,1:k]) cp_mean[c,k] ~ dunif(min_cp_mean, max_cp_mean) cp_prec[c,k] <- 1/(cp_sd[c,k]^2) # prior distribution of the changepoint variance cp_sd[c,k] ~ dt(0, 1/(((max_time-min_time)/4)/tan(0.45*3.1416))^2, 1) T(0,) for(j in 1:n_time){ for(i in 1:n_subj){ x_cp[c,k,i,j] <- max(0, x[i,j]-cp[i,k]) } } } n_cp[c] <- sum(cp_indicator[c,1:max_cp]) beta_mean[c,1:max_beta] ~ dmnorm(mean, prec) # prior distribution of random-effects variances for(q in 1:max_beta){ beta_sd[c,q] ~ dt(0, 1/(((2/prec[q,q])^0.5)/tan(0.45*3.1416))^2, 1) T(0,) beta_tau[c,q] <- 1/(beta_sd[c,q]^2) } } # prior distribution of the outcome predictive covariates for(l in 1:n_cov_outcome_predictive){ outcome_predictive_covariate_alpha[l] ~ dnorm(0, 0.001) } # prior distribution of the class probability class_prob ~ ddirch(alpha) # prior distribution of the precision for y tau_y ~ dgamma(0.001, 0.001) sigma2_error <- 1/tau_y for_conv[1:n_params] <- c(rep(0, n_params)) } " # CI-PREMM Model Lists ---- ## Full ---- cipremm_mods_full = list(model_cipremm_full_binomial_scaleunif, model_cipremm_full_binomial_scalehc, model_cipremm_full_uniform_scaleunif, model_cipremm_full_uniform_scalehc) ## CPO ---- cipremm_mods_cpo = list(model_cipremm_cpo_binomial_scaleunif, model_cipremm_cpo_binomial_scalehc, model_cipremm_cpo_uniform_scaleunif, model_cipremm_cpo_uniform_scalehc) ## OPO ---- cipremm_mods_opo = list(model_cipremm_opo_binomial_scaleunif, model_cipremm_opo_binomial_scalehc, model_cipremm_opo_uniform_scaleunif, model_cipremm_opo_uniform_scalehc)
/scratch/gouwar.j/cran-all/cranData/BEND/R/PREM_Models.R
#' Plot a BEND Model (PREM, CREM, BPREM) #' #' @description #' Generates a "spaghetti plot" of observed longitudinal trajectories for each individual. If the results from a `BEND` function are supplied, the trajectory defined by the mean parameters is shown in bold. If fitting a mixture (`PREMM` or `CI-PREMM`) or bivariate model (`BPREM`), the mean trajectories for classes or outcomes will be distinguished by color. #' #' @param data Data frame in long format, where each row describes a measurement occasion for a given individual. It is assumed that each individual has the same number of assigned timepoints (a.k.a., rows). #' @param id_var Name of column that contains ids for individuals with repeated measures in a longitudinal dataset. #' @param time_var Name of column that contains the time variable. #' @param y_var Name of column that contains the outcome variable. #' @param y2_var (for `BPREM` only) Name of column that contains the second outcome variable. #' @param results The output of `BEND` model to the data. If results=NULL, only a spaghetti plot of the data will be generated. #' @param xlab X-axis label for the generated plot. #' @param ylab Y-axis label for the generated plot. #' @param colors Colors for each class (`PREMM` or `CI-PREMM`) or outcome (`BPREM`). By default, up to 5 colors are provided in the following order: “blue” (class 1 and outcome 1), “red” (class 2 and outcome 2), “green” (class 3), “gold” (class 4), “gray” (class 5). #' @param mean_colors Colors for the trajectory defined by the mean parameters for each class (`PREMM` or `CI-PREMM`) or outcome (`BPREM`). By default, up to 5 colors are provided in the following order: “darkblue” (class 1 and outcome 1), “darkred” (class 2 and outcome 2), “darkgreen” (class 3), “gold4” (class 4), “darkgray” (class 5). #' @param legend_pos (optional) Option to change legend position (default = "topright"). #' @param ... (optional) Other parameters to pass to the `plot()` function. #' #' @returns No return value, called to generate plot. #' #' @author Corissa T. Rohloff #' #' @examples #' # load simulated data #' data(SimData_PREM) #' # plot observed data #' plot_BEND(data = SimData_PREM, #' id_var = "id", #' time_var = "time", #' y_var = "y") #' # load fitted model results #' data(results_prem) #' # plot fitted results #' plot_BEND(data = SimData_PREM, #' id_var = "id", #' time_var = "time", #' y_var = "y", #' results = results_prem) #' #' @import graphics #' #' @export plot_BEND <- function(data, id_var, time_var, y_var, y2_var=NULL, results=NULL, xlab='X', ylab='Y', colors=NULL, mean_colors=NULL, legend_pos="topright", ...){ # Setup ---- if(is.null(colors)) colors <- c('blue','red','green','gold','gray') if(is.null(mean_colors)) mean_colors = c('darkblue','darkred','darkgreen','gold4','darkgray') ## outcome data - matrix form y <- reshape(data[,c(id_var, time_var, y_var)], idvar=id_var, timevar=time_var, direction='wide') y <- unname(as.matrix(y[,names(y)!=id_var])) # For BPREM only if(!is.null(y2_var)){ y2 <- reshape(data[,c(id_var, time_var, y2_var)], idvar=id_var, timevar=time_var, direction='wide') y2 <- unname(as.matrix(y2[,names(y2)!=id_var])) } ## time data - matrix form ## should be the same dimensions as y x <- matrix(data[,c(time_var)], byrow=TRUE, nrow=dim(y)[1], ncol=dim(y)[2]) ## Define relevant variables n_subj <- nrow(y) n_time <- ncol(y) xvec <- seq(min(x), max(x), length.out=100) # Observed Plot ----- if(is.null(results)){ plot(x[1, !is.na(y[1,])], y[1, !is.na(y[1,])], type = "l", ylim = c(min(y,na.rm=TRUE), max(y,na.rm=TRUE)), xlim= c(min(x,na.rm=TRUE), max(x,na.rm=TRUE)), xlab = xlab, ylab = ylab, ...) for(i in 2:n_subj){ lines(x[i, !is.na(y[i,])], y[i, !is.na(y[i,])], type = "l") } # BPREM only if(!is.null(y2_var)){ for(i in 2:n_subj){ lines(x[i, !is.na(y[i,])], y[i, !is.na(y[i,])], type = "l", col=colors[1]) } for(i in 1:n_subj){ lines(x[i, !is.na(y2[i,])], y2[i, !is.na(y2[i,])], type = "l", col = colors[2]) } legend(legend_pos, lty=1, col=colors[1:2], legend=c("Outcome 1", "Outcome 2")) } return('Observed trajectories') } # Fitted Plot ----- if(!is.null(results)){ ## PREM ----- if(inherits(results, "PREM")){ # determine number of classes n_class <- length(unique(results$Class_Information$class_membership)) # determine number of changepoints in each class (based on final model results) changepoints <- c() class_data <- data.frame() for(i in 1:n_class){ class_num <- paste0("Class_", i) changepoints[i] <- which.max(results$Parameter_Estimates[[class_num]]$K_prob)-1 } max_cp <- max(changepoints) # determine who is in each class class_list <- list() for(i in 1:n_class){ class_list[[i]] <- c(1:n_subj)[results$Class_Information$class_membership==i] } # class mean estimates class_means <- list() for(i in 1:n_class){ class_num <- paste0("Class_", i) cp_num <- paste0("K_", changepoints)[i] k <- changepoints[i] if(k==0) I <- rep(0, max_cp) if(k>0) I <- c(rep(1,k), rep(0,max_cp-k+2)) # i = k+1 int <- results$Parameter_Estimates[[class_num]]$K[[cp_num]]$beta_mean[1] slope1 <- results$Parameter_Estimates[[class_num]]$K[[cp_num]]$beta_mean[2] slope_cp <- rep(0,max_cp) cp <- rep(0,max_cp) if(k>0){ slope_cp[1:k] <- results$Parameter_Estimates[[class_num]]$K[[cp_num]]$beta_mean[3:(k+2)] cp[1:k] <- results$Parameter_Estimates[[class_num]]$K[[cp_num]]$cp_mean[1:k] } class_means[[i]] <- rep(0,100) for(j in 1:100){ temp <- int + slope1*xvec[j] if(k>0){ for(l in 1:k){ temp <- temp + slope_cp[l]*(max(0, xvec[j]-cp[l]))} } class_means[[i]][j] <- temp } } plot(NULL, NULL, xlim = c(min(x), max(x)), ylim = c(min(y,na.rm=TRUE), max(y,na.rm=TRUE)), ylab = ylab, xlab = xlab, ...) for(i in 1:n_class){ for(j in class_list[[i]]){ points(x[j,!is.na(y[j,])], y[j,!is.na(y[j,])], type = "l", col=colors[i]) } points(xvec, class_means[[i]], type='l', col=mean_colors[i], lwd=4) } legend(legend_pos, lty=1, col=colors[1:n_class], legend=paste0("Class ", 1:n_class)) } ## CREM ----- if(inherits(results, "CREM")){ # determine form form <- results$Functional_Form # determine number of parameters if(form=="linear") n_param <- 2 if(form=="quadratic" | form=="exponential") n_param <- 3 if(form=="piecewise") n_param <- 4 # pull fixed effect estimates mean_est <- results$Parameter_Estimates$Mean[1:n_param] # define functional form equation fit_form_eq <- function(x,est){ if(form=="linear") return(est[1] + est[2]*x) if(form=="quadratic") return(est[1] + est[2]*x + est[3]*(x^2)) if(form=="exponential") return(est[1] + est[2]*(1-exp(-est[3]*x))) if(form=="piecewise") return(est[1] + est[2]*x + est[3]*(max(0, x-est[4]))) } mean_traj <- rep(0,100) for(i in 1:100){ mean_traj[i] <- fit_form_eq(xvec[i], mean_est) } plot(x[1, !is.na(y[1,])], y[1, !is.na(y[1,])], type = "l", col = "grey", ylim = c(min(y,na.rm=TRUE), max(y,na.rm=TRUE)), xlim= c(min(x,na.rm=TRUE), max(x,na.rm=TRUE)), xlab = xlab, ylab = ylab, ...) for(i in 2:n_subj){ lines(x[i, !is.na(y[i,])], y[i, !is.na(y[i,])], type = "l", col = "grey") } points(xvec, mean_traj, type='l', col="black", lwd=4) } ## BPREM ----- if(inherits(results, "BPREM")){ # pull fixed effect estimates mean_est1 <- results$Parameter_Estimates$Mean[1:4] mean_est2 <- results$Parameter_Estimates$Mean[5:8] # define piecewise function pw_eq <- function(x,est){ return(est[1] + est[2]*x + est[3]*(max(0, x-est[4]))) } mean_traj1 <- rep(0,100) mean_traj2 <- rep(0,100) for(i in 1:100){ mean_traj1[i] <- pw_eq(xvec[i], mean_est1) mean_traj2[i] <- pw_eq(xvec[i], mean_est2) } plot(x[1, !is.na(y[1,])], y[1, !is.na(y[1,])], type = "l", col = "grey", ylim = c(min(y,na.rm=TRUE), max(y,na.rm=TRUE)), xlim= c(min(x,na.rm=TRUE), max(x,na.rm=TRUE)), xlab = xlab, ylab = ylab, ...) for(i in 2:n_subj){ lines(x[i, !is.na(y[i,])], y[i, !is.na(y[i,])], type = "l", col = colors[1]) } for(i in 1:n_subj){ lines(x[i, !is.na(y2[i,])], y2[i, !is.na(y2[i,])], type = "l", col = colors[2]) } points(xvec, mean_traj1, type='l', col=mean_colors[1], lwd=4) points(xvec, mean_traj2, type='l', col=mean_colors[2], lwd=4) legend(legend_pos, lty=1, col=colors[1:2], legend=c("Outcome 1", "Outcome 2")) } return('Observed trajectories with fitted results') } }
/scratch/gouwar.j/cran-all/cranData/BEND/R/plot_BEND.R
#' Summarize the results of a bivariate piecewise random effects model (BPREM) #' #' @description #' Provides a summary of a BPREM model, as returned by `Bayes_BPREM()`. #' #' @param object An object of class "BPREM" (returned by `Bayes_BPREM(...)`). #' @param ... Additional arguments. #' #' @returns Prints estimates for key parameters in the BPREM. Also returns a list of these values. #' #' @author Corissa T. Rohloff #' #' @examples #' # load fitted model results #' data(results_bprem) #' # result summary #' summary(results_bprem) #' #' @export summary.BPREM <- function(object, ...){ # Setup ---- # determine number of parameters n_param <- 4 # determine number of covariances n_covar <- ((n_param*2)*((n_param*2)-1))/2 # define parameters for labeling param_names <- c("Intercept", "Slope", "Change in Slope", "Changepoint") # FIXED EFFECTS ----- fix_eff_est <- matrix(object$Parameter_Estimates[1:(n_param*2),"Mean"], ncol=2) colnames(fix_eff_est) <- c("Outcome 1", "Outcome 2") rownames(fix_eff_est) <- paste0(param_names, " Mean") # RANDOM EFFECTS ----- ## COVARIANCE MATRIX ----- ran_eff_cov_mat <- diag(n_param*2) diag(ran_eff_cov_mat) <- object$Parameter_Estimates[((n_param*2)+(1:(n_param*2))),"Mean"] ran_eff_cov_mat[upper.tri(ran_eff_cov_mat)] <- object$Parameter_Estimates[(2*(n_param*2)+(1:n_covar)),"Mean"] # covariances on upper triangle ran_eff_cov_mat[lower.tri(ran_eff_cov_mat)] <- t(ran_eff_cov_mat)[lower.tri(ran_eff_cov_mat)] # copy to lower triangle ran_eff_cov_mat[upper.tri(ran_eff_cov_mat)] <- NA colnames(ran_eff_cov_mat) <- rownames(ran_eff_cov_mat) <- paste0(rep(c("Outcome 1: ", "Outcome 2: "), e=4), rep(param_names,2)) ## CORRELATION MATRIX ----- ran_eff_corr_mat <- diag(n_param*2) ran_eff_corr_mat[upper.tri(ran_eff_corr_mat)] <- object$Parameter_Estimates[(2*(n_param*2)+n_covar+3+(1:n_covar)),"Mean"] # covariances on upper triangle ran_eff_corr_mat[lower.tri(ran_eff_corr_mat)] <- t(ran_eff_corr_mat)[lower.tri(ran_eff_corr_mat)] # copy to lower triangle ran_eff_corr_mat[upper.tri(ran_eff_corr_mat)] <- NA colnames(ran_eff_corr_mat) <- rownames(ran_eff_corr_mat) <- paste0(rep(c("Outcome 1: ", "Outcome 2: "), e=4), rep(param_names,2)) # ERROR ----- error_cov_mat <- diag(2) diag(error_cov_mat) <- object$Parameter_Estimates[(2*(n_param*2)+n_covar+(1:2)),"Mean"] error_cov_mat[upper.tri(error_cov_mat)] <- error_cov_mat[lower.tri(error_cov_mat)] <- object$Parameter_Estimates[(2*(n_param*2)+n_covar+(3)),"Mean"] # covariances on upper triangle error_cov_mat[upper.tri(error_cov_mat)] <- NA colnames(error_cov_mat) <- rownames(error_cov_mat) <- paste0(c("Outcome 1: ", "Outcome 2: "), "Error") # PRINT output ----- cat("Fixed Effect Parameters:\n") print(fix_eff_est, digits=3) cat("\n") cat("Random Effect Parameters:\n") cat("Covariance Matrix:\n") print(ran_eff_cov_mat, digits=3, na.print="") cat("\n") cat("Correlation Matrix:\n") print(ran_eff_corr_mat, digits=3, na.print="") cat("\n") cat("Error:\n") cat("Covariance Matrix:\n") print(error_cov_mat, digits=3, na.print="") cat("\n") cat("Error Corr:", object$Parameter_Estimates[(2*(n_param*2)+2*n_covar+3+1),"Mean"], "\n") cat("Gelman's msrf:", round(object$Convergence$multivariate_psrf, 3), "\n") cat("Mean psrf:", round(object$Convergence$mean_psrf, 3), "\n") cat("DIC:", object$Model_Fit$dic) return(invisible(list("fix_eff_est" = fix_eff_est, "ran_eff_cov_mat" = ran_eff_cov_mat, "ran_eff_corr_mat" = ran_eff_corr_mat, "error_cov_mat" = error_cov_mat, "error_corr" = object$Parameter_Estimates[(2*(n_param*2)+2*n_covar+3+1),"Mean"], "msrf" = object$Convergence$multivariate_psrf, "mean_psrf" = object$Convergence$mean_psrf, "DIC" = object$Model_Fit$dic))) }
/scratch/gouwar.j/cran-all/cranData/BEND/R/summary_BPREM.R
#' Summarize the results of a crossed random effects model (CREM) #' #' @description #' Provides a summary of a CREM model, as returned by `Bayes_CREM()`. #' #' @param object An object of class "CREM" (returned by `Bayes_CREM(...)`). #' @param ... Additional arguments. #' #' @returns Prints estimates for key parameters in the CREM. Also returns a list of these values. #' #' @author Corissa T. Rohloff #' #' @examples #' # load fitted model results #' data(results_pcrem) #' # result summary #' summary(results_pcrem) #' #' @export summary.CREM <- function(object, ...){ # Setup ---- # determine form form <- object$Functional_Form # determine number of parameters if(form=="linear") n_param <- 2 if(form=="quadratic" | form=="exponential") n_param <- 3 if(form=="piecewise") n_param <- 4 # determine number of covariances n_covar <- (n_param*(n_param-1))/2 # define parameters for labeling if(form=="linear") param_names <- c("Intercept", "Slope") if(form=="quadratic") param_names <- c("Intercept", "Linear Slope", "Quadratic Slope") if(form=="exponential") param_names <- c("Intercept", "Total Change", "Growth Rate") if(form=="piecewise") param_names <- c("Intercept", "Slope", "Change in Slope", "Changepoint") # FIXED EFFECTS ----- fix_eff_est <- matrix(object$Parameter_Estimates[1:n_param,"Mean"], ncol=1) colnames(fix_eff_est) <- "Estimate" rownames(fix_eff_est) <- paste0(param_names, " Mean") # RANDOM EFFECTS ----- ## INDIVIDUALS ----- ran_eff_b_mat <- diag(n_param) diag(ran_eff_b_mat) <- object$Parameter_Estimates[(n_param+(1:n_param)),"Mean"] ran_eff_b_mat[upper.tri(ran_eff_b_mat)] <- object$Parameter_Estimates[(2*n_param+(1:n_covar)),"Mean"] # covariances on upper triangle ran_eff_b_mat[lower.tri(ran_eff_b_mat)] <- t(ran_eff_b_mat)[lower.tri(ran_eff_b_mat)] # copy to lower triangle ran_eff_b_mat[upper.tri(ran_eff_b_mat)] <- NA colnames(ran_eff_b_mat) <- rownames(ran_eff_b_mat) <- param_names ## GROUPS ----- ran_eff_g_mat <- diag(n_param) diag(ran_eff_g_mat) <- object$Parameter_Estimates[(2*n_param+n_covar+(1:n_param)),"Mean"] ran_eff_g_mat[upper.tri(ran_eff_g_mat)] <- object$Parameter_Estimates[(3*n_param+n_covar+(1:n_covar)),"Mean"] # covariances on upper triangle ran_eff_g_mat[lower.tri(ran_eff_g_mat)] <- t(ran_eff_g_mat)[lower.tri(ran_eff_g_mat)] # copy to lower triangle ran_eff_g_mat[upper.tri(ran_eff_g_mat)] <- NA colnames(ran_eff_g_mat) <- rownames(ran_eff_g_mat) <- param_names # PRINT output ----- cat("Fixed Effect Parameters:\n") print(fix_eff_est, digits=3) cat("\n") cat("Random Effect Parameters:\n") cat("Individual Random Effects Covariance Matrix:\n") print(ran_eff_b_mat, digits=3, na.print="") cat("\n") cat("Group Random Effects Covariance Matrix:\n") print(ran_eff_g_mat, digits=3, na.print="") cat("\n") cat("Error Var:", object$Parameter_Estimates[(3*n_param+2*n_covar+1),"Mean"], "\n") cat("Gelman's msrf:", round(object$Convergence$multivariate_psrf, 3), "\n") cat("Mean psrf:", round(object$Convergence$mean_psrf, 3), "\n") cat("DIC:", object$Model_Fit$dic) return(invisible(list("fix_eff_est" = fix_eff_est, "ran_eff_b_mat" = ran_eff_b_mat, "ran_eff_g_mat" = ran_eff_g_mat, "error_var" = object$Parameter_Estimates[(3*n_param+2*n_covar+1),"Mean"], "msrf" = object$Convergence$multivariate_psrf, "mean_psrf" = object$Convergence$mean_psrf, "DIC" = object$Model_Fit$dic))) }
/scratch/gouwar.j/cran-all/cranData/BEND/R/summary_CREM.R
#' Summarize the results of a piecewise random effects model (PREM) #' #' @description #' Provides a summary of a PREM model, as returned by `Bayes_PREM()`. #' #' @param object An object of class "PREM" (returned by `Bayes_PREM(...)`). #' @param ... Additional arguments. #' #' @returns Prints estimates for key parameters in the PREM. Also returns a list of these values. #' #' @author Corissa T. Rohloff #' #' @examples #' # load fitted model results #' data(results_prem) #' # result summary #' summary(results_prem) #' #' @export summary.PREM <- function(object, ...){ # Setup ---- # determine number of classes n_class <- length(unique(object$Class_Information$class_membership)) # determine number of changepoints in each class (based on final model results) changepoints <- c() class_data <- data.frame() for(i in 1:n_class){ class_num <- paste0("Class_", i) changepoints[i] <- which.max(object$Parameter_Estimates[[class_num]]$K_prob)-1 } max_cp <- max(changepoints) # detect covariates n_cov_op <- length(object$Parameter_Estimates$outcome_predictive_covariates) n_cov_cp <- length(object$Parameter_Estimates$class_predictive_covariates) # CLASS DEPENDENT PARAMETERS ----- # pull parameter estimates for each class coeff_mat <- matrix(NA, nrow=4+4*max_cp, ncol=n_class) for(i in 1:n_class){ class_num <- paste0("Class_", i) cp_num <- paste0("K_", changepoints)[i] # fixed effects coeff_mat[1,i] <- object$Parameter_Estimates[[class_num]]$K[[cp_num]]$beta_mean[1] coeff_mat[2,i] <- object$Parameter_Estimates[[class_num]]$K[[cp_num]]$beta_mean[2] # random effects coeff_mat[max_cp*2+2+1,i] <- object$Parameter_Estimates[[class_num]]$K[[cp_num]]$beta_var[1] coeff_mat[max_cp*2+2+2,i] <- object$Parameter_Estimates[[class_num]]$K[[cp_num]]$beta_var[2] # if there are changepoints (beyond linear) if(changepoints[i]>0){ for(k in 1:changepoints[i]){ # fixed effects coeff_mat[2*k+1,i] <- object$Parameter_Estimates[[class_num]]$K[[cp_num]]$cp_mean[k] coeff_mat[2*k+2,i] <- object$Parameter_Estimates[[class_num]]$K[[cp_num]]$beta_mean[k+2] # random effects coeff_mat[k*2+(max_cp*2+2+1),i] <- object$Parameter_Estimates[[class_num]]$K[[cp_num]]$cp_var[k] coeff_mat[k*2+(max_cp*2+2+2),i] <- object$Parameter_Estimates[[class_num]]$K[[cp_num]]$beta_var[k+2] } } } colnames(coeff_mat) <- paste0("Class ", 1:n_class) my_rownames <- rep(NA, 4+4*max_cp) my_rownames[1] = "Intercept Mean" my_rownames[2] = "Slope Mean" my_rownames[max_cp*2+2+1] = "Interept Var" my_rownames[max_cp*2+2+2] = "Slope Var" for(k in 1:max_cp){ my_rownames[2*k+1] = paste0("Changepoint ", k, " Mean") my_rownames[2*k+2] = paste0("Change in Slope ", k, " Mean") my_rownames[k*2+(max_cp*2+2+1)] = paste0("Changepoint ", k, " Var") my_rownames[k*2+(max_cp*2+2+2)] = paste0("Change in Slope ", k, " Var") } rownames(coeff_mat) <- my_rownames # number of changepoints info coeff_n_cp <- matrix(changepoints, nrow = 1) rownames(coeff_n_cp) <- "Number of Changepoints" coeff_mat <- rbind(coeff_n_cp, coeff_mat) # class probability info class_probs <- matrix(prop.table(table(object$Class_Information$class_membership)), nrow=1) rownames(class_probs) <- "Empirical Class Probabilities" coeff_mat <- rbind(class_probs, coeff_mat) # CLASS INDEPENDENT PARAMETERS ----- coeff_addit <- data.frame("Estimate" = object$Parameter_Estimates$error_var) rownames(coeff_addit) <- "Error Var" # Covariates if(n_cov_op > 0){ coeff_cov_op <- data.frame("Estimate" = object$Parameter_Estimates$outcome_predictive_covariates) coeff_addit <- rbind(coeff_addit, coeff_cov_op) } if(n_cov_cp > 0){ coeff_cov_cp <- data.frame("Estimate" = object$Parameter_Estimates$class_predictive_covariates) rownames(coeff_cov_cp) <- paste0(rownames(coeff_cov_cp), " (in log-odds units)") coeff_log_int <- data.frame("Estimate" = object$Parameter_Estimates$logistic_intercept) rownames(coeff_log_int) <- "Logistic Intercept" coeff_addit <- rbind(coeff_addit, coeff_cov_cp, coeff_log_int) } # PRINT output ----- cat("Class Dependent Parameters:\n") print(coeff_mat, digits=3, na.print="") cat("\n") cat("Class Independent Parameters:\n") print(coeff_addit, digits=3) cat("\n") cat("Gelman's msrf:", round(object$Convergence$multivariate_psrf, 3), "\n") cat("Mean psrf:", round(object$Convergence$mean_psrf, 3), "\n") cat("DIC:", object$Model_Fit$dic) return(invisible(list("class_dep_params" = coeff_mat, "class_ind_params" = coeff_addit, "msrf" = object$Convergence$multivariate_psrf, "mean_psrf" = object$Convergence$mean_psrf, "DIC" = object$Model_Fit$dic))) }
/scratch/gouwar.j/cran-all/cranData/BEND/R/summary_PREM.R
#' Read BENMMI Settings File #' #' This function reads BENMMI settings files. See the package vignette #' for a description of its format. Create an example file by #' calling the \code{\link[BENMMI]{BENMMIdir}}-function. #' #' @param filename name of BENMMI input file (\code{character}) #' #' @details The function performs the following tasks: #' \itemize{ #' \item{checks the existence of \code{filename};} #' \item{reads JSON file while ignoring C-style comments;} #' \item{checks avaiability of required keys in the JSON-file} #' \item{checks values in JSON-file} #' } #' #' @seealso \code{\link[BENMMI]{BENMMIdir}} #' @import jsonlite read_settings <- function(filename) { # check existence of settings file if (!file.exists(filename)) { stop("File not found", call. = FALSE) } # read settings file settings <- readLines(con = filename, warn = FALSE) # remove all C-style comments (//) # Note: comments are formally not part of the JSON specification! settings <- sub(pattern = "//.*$", replacement = "", x = settings) # parse JSON if (!validate(settings)) { stop( sprintf( "Errors found in %s. Check JSON-format (e.g. brackets, braces, trailing comma's)", sQuote(filename) ), call. = FALSE ) } settings <- fromJSON(settings) names(settings) <- tolower(names(settings)) # define keys required_keys <- c("title", "user", "date", "files", "indicators", "confidenceLevel", "months", "pooling", "genusToSpeciesConversion") optional_keys <- c("weights", "pressure", "legendText", "model") available_keys <- c(required_keys, optional_keys) # either indices or indicators are allowed, but not both if (!is.null(settings$indices) & !is.null(settings$indicators)) { stop( sprintf( fmt = "Use either key %s or key %s in JSON-file\n%s, but not both.", sQuote("indices"), sQuote("indicators"), sQuote(basename(filename)) ), call. = FALSE ) } # rename key 'indices' to 'indicators' if (!is.null(settings$indices)) { settings$indicators <- settings$indices settings$indices <- NULL } # check if required keys are available found <- tolower(required_keys) %in% names(settings) if (any(!found)) { stop( sprintf( fmt = "key %s is missing in JSON-file %s\n(see package vignette)", toString(sQuote(required_keys[!found])), sQuote(basename(filename)) ), call. = FALSE ) } # check required files required_keys <- c("benthos", "taxa", "groupsToExclude", "habitats") names(settings$files) <- tolower(names(settings$files)) found <- tolower(required_keys) %in% names(settings$files) if (any(!found)) { stop( sprintf( fmt = "The following keys are missing in %s: %s\n(see package vignette)", sQuote(basename(filename)), toString(sQuote(required_keys[!found])) ), call. = FALSE ) } # check selected indicators valid_indicators <- c("N", "LNN", "S", "D", "SN", "SNA", "H", "AMBI", "ITI", "L", "PIE", "N2") # total abundance, natural log of total abundance, # species sensitivity, Margalef's D, Rygg's SN and its adjustment SNA, # Shannon H, AMBI, ITI, Simpson's L, Hurlbert's PIE, Hill's N2 if (length(settings$indicators) == 0L) { stop( sprintf( fmt = paste0( "No indices have been specified in JSON-file %s.\n", "Please choose a subset of %s (see package vignette)"), sQuote(basename(filename)), toString(valid_indicators) ), call. = FALSE ) } if (length(settings$indicators) > 3L) { stop( sprintf( fmt = "Maximum 3 indices allowed (%i found in %s).", length(settings$indicators), sQuote(basename(filename)) ), call. = FALSE ) } is_invalid <- !(tolower(settings$indicators) %in% tolower(valid_indicators)) if (any(is_invalid)) { stop( sprintf( fmt = paste0( "Invalid indices found in JSON-file %s: %s\n", "Please choose a subset of %s (see package vignette)"), sQuote(basename(filename)), toString(sQuote(settings$indicators[is_invalid])), toString(valid_indicators) ), call. = FALSE ) } settings$indicators <- tolower(settings$indicators) # check indicator weights if (!is.null(settings$weights)) { w <- settings$weights if (is.character(w)) { # handle rational numbers like 1/3 etc. tmp <- w w <- sapply(X = parse(text = w), FUN = eval) attr(w, "character") <- tmp } if (length(w) != length(settings$indicators)) { stop( sprintf( fmt = paste("The number of weights (%i) should be equal", "to the number of indices (%i)"), length(w), length(settings$indicators) ), call. = FALSE ) } is_negative <- w < -.Machine$double.eps if (any(is_negative)) { stop( sprintf( fmt = "Negative weights found in JSON-file: %s", sQuote(basename(filename)) ), call. = FALSE ) } if (abs(1 - sum(w)) > 1.0e-3) { message("Weights do not sum to one and will be normalized first.") w <- abs(w) # small neg.numbers -> 0 w <- w / sum(w) attr(w, "normalized") <- TRUE } else { attr(w, "normalized") <- FALSE } settings$weights <- w } # check confidence level is_invalid <- (settings$confidencelevel < 0.50) || (settings$confidencelevel > 0.99) if (is_invalid) { stop( sprintf( fmt = "Confidence level needs to be in [0.5, 0.99]. Check JSON-file: %s", sQuote(basename(filename)) ), call. = FALSE ) } # process pressure info if (is.null(settings$pressure)) { if (is.null(settings$weights)) { stop( sprintf( fmt = "weights should be given in %s when pressure is missing", sQuote(basename(filename)) ), call. = FALSE ) } } else { required_keys <- c("name", "unit") names(settings$pressure) <- tolower(names(settings$pressure)) found <- tolower(required_keys) %in% names(settings$pressure) if (any(!found)) { stop( sprintf( fmt = "key pressure.%s is missing in %s\n(see package vignette)", toString(sQuote(required_keys[!found])), sQuote(basename(filename)) ), call. = FALSE ) } } # check months if (!is.integer(settings$months) || length(settings$months) != 2L) { stop( "key 'months' should be an integer vector of length 2", call. = TRUE ) } if (!all(settings$months %in% 1:12)) { stop("elements of key 'months' should be in [1, 12]", call. = TRUE) } if ((settings$months[2] - settings$months[1]) < 0) { stop( "First month to analyse should be smaller than or equal to last month", call. = TRUE ) } # check data pooling names(settings$pooling) <- tolower(names(settings$pooling)) if (!is.logical(settings$pooling$enabled)) { stop( "key 'pooling:enabled' should be either 'true' or 'false'", call. = TRUE ) } if (settings$pooling$enabled) { settings$pooling$targetarea <- range(settings$pooling$targetarea) if (!is.numeric(settings$pooling$targetarea) || (length(settings$pooling$targetarea) != 2L)) { stop( "key 'pooling:targetArea' should be a numeric vector of length 2", call. = TRUE ) } settings$pooling$randomseed <- as.integer(settings$pooling$randomseed) if (!is.integer(settings$pooling$randomseed)) { stop( "key 'pooling:randomSeed' needs to be an integer vector of length 1", call. = TRUE ) } } # genus to species conversion if (!is.logical(settings$genustospeciesconversion)) { stop( "key 'genusToSpeciesConversion' should be either 'true' or 'false'", call. = TRUE ) } # check key 'legendText' if (is.null(settings$legendtext)) { settings$legendtext <- "normalized" } else { if ( (length(settings$legendtext) != 1L) || !(tolower(settings$legendtext) %in% c("eqr", "normalized"))) { stop( sprintf( "key %s should be either %s or %s", sQuote("legendText"), sQuote("EQR"), sQuote("normalized") ), call. = FALSE ) } } # check key 'model' if (is.null(settings$model)) { settings$model <- "linear" } else { if ( (length(settings$model) != 1L) || !(settings$model %in% c("linear", "exponential"))) { stop( sprintf( "key %s should be either %s or %s", sQuote("model"), sQuote("linear"), sQuote("exponential") ), call. = FALSE ) } } # issue a warning if keys in JSON-file have not been used not_used <- setdiff(tolower(names(settings)), tolower(available_keys)) if (length(not_used) > 0L) { warning( sprintf( fmt = "key(s) %s not used in JSON-file: %s", toString(not_used), sQuote(basename(filename)) ), call. = FALSE ) } # return results settings } #' Read and Validate BENMMI Input Files #' #' This function reads and checks benthos files. The format is a superset #' of the BEQI2-format as specified in Van Loon (2013). In addition to the #' BEQI2-format, the benthos-format also includes columns latitude (LAT), #' longitude (LONG), and sieve mesh size (MESH). #' #' @param filename name of benthos file (\code{character}) #' #' @import benthos #' @importFrom purrr walk #' #' @references Willem van Loon, 2013. BEQI2 INPUT FORMAT #' #' @seealso \code{\link[benthos]{read_beqi2}} read_mmi <- function(filename) { # read BEQI2-format d <- read_beqi2(filename) # additional validation required_vars <- c("LAT", "LONG", "MESH") missing_vars <- setdiff(required_vars, toupper(names(d))) if (length(missing_vars) > 0L) { stop( sprintf( fmt = "The following columns are missing: %s", toString(missing_vars) ), call. = FALSE ) } # check availability of pressure column if ("PRESSURE" %in% toupper(names(d))) { required_vars <- c(required_vars, "PRESSURE") # check values of pressure column if (!is.numeric(d$PRESSURE)) { d$PRESSURE <- try( as.Date(d$PRESSURE), silent = TRUE ) if (inherits(d$PRESSURE, "try-error")) { stop( "Either dates (YYYY-MM-DD) or numeric values expected\n", "in the PRESSURE column of the benthos file", call. = FALSE ) } # convert date to numeric value year <- d$PRESSURE %>% format("%Y") %>% as.numeric day_of_year <- d$PRESSURE %>% format("%j") %>% as.numeric days_in_year <- paste0(year, "-12-31") %>% as.Date %>% format("%j") %>% as.numeric d$PRESSURE <- year + (day_of_year - 0.5) / days_in_year } } # check completeness of required variables required_vars %>% walk(function(x) { is_na <-is.na(d[[x]]) if (any(is_na)) { stop( sprintf( fmt = "Column %s in %s contains missing values at lines: %s", sQuote(x), sQuote(filename), toString(which(is_na)) ), call. = FALSE ) } }) # check uniqueness of coordinates n1 <- d %>% select_(~OBJECTID, ~SAMPLEID, ~DATE) %>% distinct_ %>% nrow n2 <- d %>% select_(~OBJECTID, ~SAMPLEID, ~DATE, ~LAT, ~LONG) %>% distinct_ %>% nrow if (n1 != n2) { stop( "Coordinates have to be unique for combinations of ", "OBJECTID, SAMPLEID, DATE", call. = FALSE ) } # check uniqueness of sieve mesh mesh_size <- unique(d$MESH) is_unique <- length(mesh_size) == 1L if (!is_unique) { stop( sprintf( fmt = "Sieve mesh size is not unique.\nThe following mesh sizes have been found in column MESH: %s", toString(mesh_size) ), call. = FALSE ) } # return result d } #' Perform BENMMI Analysis #' #' This function performs a complete BENMMI analysis following the #' settings provided in \code{filename}. #' #' @param filename name of the JSON file defining all analysis steps. #' @param tmpdir directory to store temporary files (for debugging only) #' @param browse load resulting report in a browser? \code{TRUE} or \code{FALSE} #' #' @import xtable #' @import dplyr #' @import markdown #' @importFrom tidyr spread gather extract_numeric unnest #' @import knitr #' @import tcltk #' @import ggplot2 #' @importFrom utils browseURL flush.console #' @importFrom readr read_csv cols_only col_character #' #' @examples #' #'\donttest{ #' # This example illustrates a typical use case of the BENMMI-package. #' # Note: execution may take several minutes. #' # See the package vignette for more advanced examples and details. #' #' if (interactive()) { #' #' # Create a work directory (in this example, a temporary #' # directory, but in real use cases a persistent directory #' # will obviously be more useful). #' my_dir <- tempfile("benmmi-example") #' dir.create(my_dir) #' #' # Populate this directory with simple use cases #' # (see the package-vignette for details). #' # Most users will probably use one of these use cases as a #' # template for their own study. #' BENMMIdir(my_dir) #' #' # Run BENMMI given the settings in "settings-S-D-lin.json". This file #' # relates to one of the predefined use cases. #' my_settings_file <- file.path(my_dir, "settings-S-D-lin.json") #' benmmi(my_settings_file, browse = FALSE) #' #' # The output (HTML-report and data-files) is stored in 'my_dir' #' # and described in the package-vignette and resulting HTML-report itself. #' # It is also possible to directly view the generated #' # HTML-report by setting the browse-argument of the benmmi-function to TRUE. #' } #' } #' @rdname benmmi-main #' @export benmmi <- function(filename = NULL, tmpdir = tempfile(pattern = "BENMMI"), browse = TRUE) { # prevent potential problems with dates in other locales old_locale <- Sys.getlocale("LC_TIME") on.exit(Sys.setlocale("LC_TIME", old_locale)) Sys.setlocale("LC_TIME", "C") # interactive selection of filename if (is.null(filename)) { if (capabilities("tcltk")) { filename <- tk_choose.files( default = "", caption = "Select file with BENMMI settings", multi = FALSE, filters = matrix(data = c("BENMMI settings", ".json"), nrow = 1) ) } else { stop( "The 'tcltk'-package is not supported on this machine.\n", "Please provide a valid filename as function argument\n", call. = FALSE ) } } # stop if the user presses Cancel or Esc if(length(filename) == 0L) { message("The BENMMI run has been cancelled by the user.") return(invisible(NULL)) } # check if filename exists if (!file.exists(filename)) { stop( sprintf("JSON-file %s does not exist", sQuote(filename)), call. = FALSE ) } # initialization message message("The BENMMI tool is running...") flush.console() # read settings settings <- read_settings(filename) # set working directory owd <- getwd() on.exit(setwd(owd), add = TRUE) setwd(dirname(filename)) # normalize paths (full paths to make package more robust) for (f in names(settings$files)) { settings$files[[f]] <- suppressWarnings(normalizePath(settings$files[[f]])) } # add output files output_dir <- file.path( getwd(), paste0("OUTPUT-", format(Sys.time(), format = "%Y%m%dT%H%M%S")) ) dir.create(output_dir) prefix <- sub( pattern = "\\.[[:alnum:]]+$", replacement = "", x = basename(settings$files$benthos) ) settings$files$log <- file.path(output_dir, paste0("LOG-", prefix, ".log")) settings$files$out_sample <- file.path(output_dir, paste0("SAMPLE-", prefix, ".csv")) settings$files$out_habitat <- file.path(output_dir, paste0("HABITAT-", prefix, ".csv")) settings$files$out_objectid <- file.path(output_dir, paste0("OBJECTID-", prefix, ".csv")) settings$files$out_group <- file.path(output_dir, paste0("GROUP-", prefix, ".csv")) settings$files$out_iti <- file.path(output_dir, paste0("ITI-", prefix, ".csv")) settings$files$out_tidy <- file.path(output_dir, paste0("TIDY-", prefix, ".csv")) if (settings$pooling$enabled) { settings$files$pooling <- file.path(output_dir, paste0("POOLING-", prefix, ".csv")) } settings$files$report <- file.path(output_dir, paste0("REPORT-", prefix, ".html")) # settings$files$snap_shot <- file.path(output_dir, # paste0(sub(pattern = "OUTPUT", replacement = "BENMMI-snapshot", # x = basename(output_dir)), ".zip")) # start logging to_log <- function(level = c("INFO", "WARNING", "ERROR"), message) { level <- match.arg(level) cat( format(Sys.time()), " [", level, "] ", message, "\n", sep = "", file = settings$files$log, append = TRUE ) if (level != "INFO") { switch(level, "ERROR" = stop(message, call. = FALSE), "WARNING" = warning(message, call. = FALSE) ) } } to_log("INFO", "Starting a new BENMMI session") on.exit(to_log("INFO", "The BENMMI session has been terminated"), add = TRUE) # initialize random number generator if (settings$pooling$enabled) { to_log("INFO", "Initializing the pseudo random number generator...") if (is.null(settings$pooling$randomseed)) { to_log("INFO", "No seed has been specified.") to_log("INFO", "The default initialization process will be followed.") } else { set.seed(seed = settings$pooling$randomseed) } to_log("INFO", "the pseudo random number generator has been initialized.") } # check existence of taxa-file to_log("INFO", sprintf( "Checking the existence of taxa-file %s...", sQuote(basename(settings$files$taxa)) ) ) if (!file.exists(settings$files$taxa)) { to_log("ERROR", "The taxa-file has not been found") return(invisible(NULL)) } to_log("INFO", "the taxa-file has been found") # read taxa-file to_log("INFO", "Reading the taxa-file...") d_taxa <- tryCatch( read_taxa(filename = settings$files$taxa), error = function(e) { to_log("ERROR", sprintf("while reading taxa-file. %s", e$message)) } ) to_log("INFO", "the taxa-file has been read") # check existence of 'groupsToExclude'-file to_log("INFO", sprintf( "Checking the existence of 'groups to exclude'-file %s...", sQuote(basename(settings$files$groupstoexclude)) ) ) if (!file.exists(settings$files$groupstoexclude)) { to_log("ERROR", "The 'groupsToExclude'-file has not been found") return(invisible(NULL)) } to_log("INFO", "the 'groupsToExclude'-file has been found") # read 'groupsToExclude'-file to_log("INFO", "Reading the 'groupsToExclude'-file...") d_groups <- tryCatch( settings$files$groupstoexclude %>% read_csv( col_types = cols_only( GROUP = col_character(), DESCRIPTION = col_character() ) ), error = function(e) { to_log("ERROR", sprintf("while reading taxa-file. %s", e$message)) } ) to_log("INFO", "the 'groupsToExclude'-file has been read") # check existence of the benthos file to_log("INFO", sprintf( "Checking the existence of benthos file %s...", sQuote(basename(settings$files$benthos)) ) ) if (!file.exists(settings$files$benthos)) { to_log("ERROR", "The benthos file has not been found") return(invisible(NULL)) } to_log("INFO", "the benthos file has been found") # read benthos-file to_log("INFO", "Reading the benthos-file...") d_mmi <- tryCatch( read_mmi(filename = settings$files$benthos), error = function(e) { to_log("ERROR", sprintf("while reading benthos-file. %s", e$message)) } ) if (!("PRESSURE" %in% names(d_mmi)) || all(is.na(d_mmi$PRESSURE))) { d_mmi$PRESSURE <- NULL if (is.null(settings$weights)) { stop( sprintf( fmt = "weights should be given in %s when PRESSURE column is missing in %s", sQuote(basename(filename)), sQuote(basename(settings$files$benthos)) ), call. = FALSE ) } } if (!("PRESSURE" %in% names(d_mmi))) { if (!is.null(settings$pressure)) { to_log("WARNING", sprintf( fmt = paste( "pressure has been specified in %s but is missing in %s\n", "BENMMI continues without pressure optimization"), sQuote(basename(filename)), sQuote(basename(settings$files$benthos)) ) ) settings$pressure <- NULL } } to_log("INFO", "the benthos file has been read") # check if records are within the period of interest in_poi <- d_mmi$DATE %>% format(format = "%m") %>% as.integer %>% between(settings$months[1], settings$months[2]) if (!any(in_poi)) { to_log("ERROR", sprintf("No months in file %s are in the specified interval [%s].", sQuote(basename(settings$files$benthos)), paste(settings$months, collapse = ", ") ) ) } # make sure that names in benthos file correspond to those in the taxa-list d_mmi <- d_mmi %>% mutate_( TAXON_OLD = ~TAXON, TAXON = ~as_accepted(taxon = TAXON, taxa = d_taxa) ) # add unique sampling unit identifier d_mmi <- d_mmi %>% select_(~OBJECTID, ~SAMPLEID, ~DATE) %>% distinct_ %>% mutate_(ID = ~row_number()) %>% inner_join(d_mmi, by = c("OBJECTID", "SAMPLEID", "DATE")) # conditionally read user-defined AMBI d_ambi <- NULL if (("ambi" %in% settings$indicators) && !is.null(settings$files$ambi) && (settings$files$ambi != "")) { to_log("INFO", sprintf( "Checking the existence of AMBI-file %s...", settings$files$ambi %>% basename %>% sQuote ) ) if (!file.exists(settings$files$ambi)) { to_log("ERROR", "the AMBI-file has not been found") return(invisible(NULL)) } to_log("INFO", "the AMBI-file has been found") to_log("INFO", "Reading the AMBI-file...") d_ambi <- tryCatch( read_ambi(filename = settings$files$ambi), error = function(e) { to_log("ERROR", sprintf("while reading AMBI-file. %s", e$message)) } ) to_log("INFO", "the AMBI-file has been read") # make sure that names in sensitivity file correspond to those in the taxa-list d_ambi <- d_ambi %>% mutate_(TAXON = ~as_accepted(taxon = TAXON, taxa = d_taxa)) %>% distinct_ # check if taxa are still unique after conversion to WoRMS if (anyDuplicated(d_ambi$TAXON)) { to_log("WARNING", paste0( "the AMBI-file causes inconsistencies\n", "(TAXON-AMBI class combinations are not unique)\n", "Only the first combination will be used" ) ) } d_ambi <- d_ambi %>% distinct_(.dots = "TAXON", .keep_all = TRUE) } # read user-defined ITI if ("iti" %in% settings$indicators) { if (is.null(settings$files$iti) || (settings$files$iti == "")) { to_log("ERROR", "the ITI-file has not been specified") } to_log("INFO", sprintf( "Checking the existence of ITI-file %s...", settings$files$iti %>% basename %>% sQuote ) ) if (!file.exists(settings$files$iti)) { to_log("ERROR", "the ITI-file has not been found") } to_log("INFO", "the ITI-file has been found") to_log("INFO", "Reading the ITI-file...") d_iti <- tryCatch( read_iti(filename = settings$files$iti), error = function(e) { to_log("ERROR", sprintf("while reading ITI-file. %s", e$message)) } ) to_log("INFO", "the ITI-file has been read") # make sure that names in sensitivity file correspond to those in the taxa-list d_iti <- d_iti %>% mutate_(TAXON = ~as_accepted(taxon = TAXON, taxa = d_taxa)) %>% distinct_ # check if taxa are still unique after conversion to WoRMS if (anyDuplicated(d_iti$TAXON)) { to_log("WARNING", paste0( "the ITI-file causes inconsistencies\n", "(TAXON-ITI class combinations are not unique)\n", "Only the first combination will be used" ) ) } d_iti <- d_iti %>% distinct_(.dots = "TAXON", .keep_all = TRUE) } # read habitat reference file to_log("INFO", sprintf( "Checking the existence of habitat reference file %s...", settings$files$habitats %>% basename %>% sQuote ) ) if (!file.exists(settings$files$habitats)) { to_log("ERROR", "the habitat reference file has not been found") return(invisible(NULL)) } to_log("INFO", "the habitat reference file has been found") to_log("INFO", "Reading the habitat reference file...") d_ref <- tryCatch( read_ref(filename = settings$files$habitats, settings$indicators), error = function(e) { to_log("ERROR", sprintf( paste0( "while reading habitat reference file:\n%s\n", "see vignette for its format and how to estimate reference values." ), e$message ) ) } ) to_log("INFO", "the habitat reference file has been read") # check if reference data are available for all records in d_mmi to_log( "INFO", "Checking if reference data are available for all records in the benthos-file..." ) d <- d_mmi %>% select_(~OBJECTID, ~HABITAT) %>% distinct_ %>% left_join(d_ref, by = c("OBJECTID", "HABITAT")) col_names <- names(d) %>% setdiff(c("OBJECTID", "HABITAT")) d <- d %>% filter_(.dots = paste(sprintf("is.na(%s)", col_names), collapse = "|")) if (nrow(d) > 0L) { M <- NULL for (i in 1:nrow(d)) { m <- d[i, ] %>% unlist(use.names = TRUE) m2 <- m[is.na(m)] m1 <- m[c("OBJECTID", "HABITAT")] m1 <- paste(names(m1), sQuote(m1), sep = "=", collapse = " AND ") m <- paste0(paste(names(m2), collapse = ", "), " for ", m1) M <- c(M, m) } to_log("ERROR", sprintf( "The following columns are empty in the habitat reference file:\n%s", paste(M, collapse = ";\n") ) ) return(invisible(NULL)) } to_log("INFO", "reference data are available for all records in the benthos-file.") # create temporary directory if (!file.exists(tmpdir)) { to_log("INFO", "Creating a temporary directory...") dir.create(tmpdir) } to_log("INFO", "a temporary directory has been created.") # copy template of the report to temporary directory to_log("INFO", "Populating the temporary directory...") templates <- list.files( path = system.file("Rmd", package = "BENMMI"), pattern = "\\.Rmd$", full.names = TRUE) file.copy(from = templates, to = tmpdir) to_log("INFO", "the temporary directory has been populated.") # create Markdown document # (code below works better than knit2html) to_log("INFO", "Starting to create a report...") setwd(tmpdir) suppressMessages( mdfile <- try(knit(input = "benmmi.Rmd", quiet = TRUE), silent = TRUE) ) if (inherits(mdfile, "try-error")) { to_log( level = "ERROR", message = toString(attr(mdfile, "condition")$message) ) return(invisible(NULL)) } to_log("INFO", "a report has been created.") to_log("INFO", "Converting the report to HTML...") output <- markdownToHTML( file = mdfile, output = NULL, options = getOption("markdown.HTML.options"), extensions = getOption("markdown.extensions"), title = "BENMMI Report", stylesheet = system.file("css", "benmmi.css", package = "BENMMI") ) writeLines(text = output, con = settings$files$report) to_log("INFO", "the report has been converted to HTML.") # create data snap shot of outputs # zip( # zipfile = settings$files$snap_shot, # files = c(settings$files$log, settings$files$out_sample, # settings$files$out_habitat, settings$files$out_objectid, # settings$files$report, settings$files$pooling), # flags = "-qj9X" # ) # view result if (browse) { browseURL(settings$files$report) } # finalization message("The BENMMI run has been completed successfully.") } #' Perform BENMMI Analysis #' #' @rdname benmmi-main #' #' @export BENMMI <- function(filename = NULL, tmpdir = tempdir(), browse = TRUE) { benmmi(filename = filename, tmpdir = tmpdir, browse = browse) } #' Create BENMMI Directory Structure #' #' Creates a BENMMI-directory structure and populates it with some #' relevant BENMMI-files. Users may wish to modify this directory structure #' and add their own data. #' #' @param path name of an exisiting directory. This directory should #' be empty to prevent loss of data. If missing, a dialogue will #' appear. #' #' @export BENMMIdir <- function(path = NULL) { # interactive selection if (is.null(path)) { if (capabilities("tcltk")) { path <- tk_choose.dir( caption = "Select directory to store BENMMI files" ) } else { stop( "The 'tcltk'-package is not supported on this machine.\n", "Please provide a valid path as function argument\n", call. = FALSE ) } } # check path if (is.na(path)) { message("The BENMMI run has been cancelled by the user.") return(invisible(NULL)) } if (!file.exists(path)) { stop("directory does not exist", call. = FALSE) } # check if directory is empty (to prevent overwriting existing files) if (length(list.files(path)) != 0L) { stop(sprintf("directory %s is not empty!", sQuote(path)), call. = FALSE) } # populate directories tmp <- file.copy( from = c( system.file("extdata/INPUT-FILES", package = "BENMMI"), system.file("extdata/REF-FILES", package = "BENMMI"), system.file("extdata/settings-S-D-lin.json", package = "BENMMI"), system.file("extdata/settings-D-exp.json", package = "BENMMI") ), to = path, recursive = TRUE ) # show message message( sprintf( paste0( "Directory %s\nhas been populated with BENMMI files.\n", "To run the BENMMI tool, type: BENMMI() or benmmi()\n", 'For the tutorial, type: vignette("BENMMI_User_Manual")\n', "For more technical information, type: ?benmmi" ), sQuote(path) ) ) }
/scratch/gouwar.j/cran-all/cranData/BENMMI/R/benmmi-main.R
#' Benthic Multi-Metric Indicator Analysis #' #' Reporting tool for a benthic multimetric indicator #' #' @author Dennis Walvoort (Wageningen University & Research) & Willem van Loon (Rijkswaterstaat-The Netherlands) #' #' @name benmmi-pkg #' @aliases benmmi-pkg #' @rdname benmmi-pkg "_PACKAGE"
/scratch/gouwar.j/cran-all/cranData/BENMMI/R/benmmi.R
.onAttach <- function(libname, pkgname) { packageStartupMessage("\nBENMMI version: ", utils::packageVersion("BENMMI")) packageStartupMessage( "Copyright 2015-", format(Sys.Date(), "%Y"), " by Rijkswaterstaat, the Netherlands (RWS)." ) packageStartupMessage("Type citation(\"BENMMI\") on how to cite BENMMI in publications.") packageStartupMessage("For the tutorial, type: vignette(\"BENMMI_User_Manual\")") packageStartupMessage("For Frequently Asked Questions, type: vignette(\"FAQ\")") }
/scratch/gouwar.j/cran-all/cranData/BENMMI/R/onLoad.R
#' Mean and Confidence Interval #' #' @param x numeric vector #' @param level confidence level #' #' @importFrom stats qt sd #' #' @note Internal function. Not supposed to be called directly #' #' @examples #' stopifnot(all.equal(ci_mean(NA_real_), c(NA_real_, NA_real_, NA_real_))) #' stopifnot(all.equal(ci_mean(1), c(lower = NA_real_, mean = 1, upper = NA_real_))) #' stopifnot(all.equal( #' ci_mean(1:9, 0.95), #' c(lower = 2.934942, mean = 5.000000, upper = 7.065058), #' tolerance = 0.0001) #' ) #' #' @export ci_mean <- function(x, level = 0.90) { if (all(is.na(x))) { return(c(NA_real_, NA_real_, NA_real_)) } n <- sum(!is.na(x)) m <- mean(x, na.rm = TRUE) s <- sd(x, na.rm = TRUE) / sqrt(n) t <- qt(p = 0.5 * (1 + level), df = n) c(lower = m - t * s, mean = m, upper = m + t * s) } #' Construct a Text Representation of a Weight Vector #' #' @param x numeric or character vector #' #' @note Internal function. Not supposed to be called directly #' #' @export toString_weights <- function(x) { if (is.null(x)) { return("optimized") } else { if(attr(x, "normalized")) { return(paste(toString(formatC(x, format = "f", digits = 3)), "(normalized and fixed)")) } else { if (is.null(attr(x, "character"))) { return(paste(toString(formatC(x, format = "f", digits = 3)), "(fixed)")) } else { return(paste(toString(attr(x, "character")), "(fixed)")) } } } }
/scratch/gouwar.j/cran-all/cranData/BENMMI/R/utils.R
### AZTI Marine Biotic Index Borja et al. (2000) introduced the AZTI Marine Biotic Index (AMBI). It is a weighted linear combination of species sensitivity classes. The average percentage of the total abundance per sample without an AMBI classification is given below: ```{r results='asis'} if (tibble::has_name(d_mmi, "AMBI_GROUP")) { d <- d_mmi %>% group_by(OBJECTID, HABITAT, YEAR) %>% mutate(N = sum(VALUE)) %>% filter(has_ambi(taxon = TAXON, group = AMBI_GROUP)) %>% summarise(AMBI_MISSING = 100 * (1 - sum(VALUE) / unique(N))) } else { d <- d_mmi %>% group_by(OBJECTID, HABITAT, YEAR) %>% mutate(N = sum(VALUE)) %>% filter(has_ambi(taxon = TAXON)) %>% summarise(AMBI_MISSING = 100 * (1 - sum(VALUE) / unique(N))) } d %>% xtable %>% print(type = "html") ``` Note that in `r sum(d$AMBI_MISSING > 20)` cases, more than 20% of the total abundance does not have an AMBI classification.
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/ambi-missing.Rmd
```{r child="default-ambi.Rmd", eval=is.null(d_ambi)} ``` ```{r child="user-ambi.Rmd", eval=!is.null(d_ambi)} ``` ```{r, eval=!is.null(d_ambi)} d_mmi <- d_mmi %>% left_join(d_ambi, by = "TAXON") %>% rename(AMBI_GROUP = GROUP) ``` ```{r} if (is.null(d_ambi)) { d <- d_mmi %>% filter(!has_ambi(taxon = TAXON)) %>% select(TAXON) %>% group_by(TAXON) %>% summarise(N_RECORDS = n()) } else { d <- d_mmi %>% filter(!has_ambi(taxon = TAXON, group = AMBI_GROUP)) %>% select(TAXON) %>% group_by(TAXON) %>% summarise(N_RECORDS = n()) } ``` ```{r results='asis', eval=(nrow(d)>0L)} cat(sprintf("\nThe AMBI-class is missing for the following %i taxa:\n", nrow(d))) d %>% xtable %>% print(type = "html", include.rownames = FALSE) ```
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/ambi.Rmd
```{r echo=FALSE, message=FALSE, warning=FALSE} knitr::opts_chunk$set( echo = FALSE, comment = NA, quiet = TRUE, progress = FALSE, tidy = FALSE, cache = FALSE, message = FALSE, error = FALSE, # FALSE: do not preserve errors. Always stop execution. warning = TRUE ) options(width = 110) # attach packages library(readr) library(purrr) ``` ```{r} # define variables indicators <- toupper(settings$indicators) number_of_indicators <- length(indicators) has_pressure <- !is.null(settings$pressure) needs_optimization <- is.null(settings$weights) && has_pressure title_text <- if (tolower(settings$legendtext) == "eqr") { "EQR" } else { "normalized index" } title_text_plur <- if (tolower(settings$legendtext) == "eqr") { "EQRs" } else { "normalized indices" } Title_text <- if (tolower(settings$legendtext) == "eqr") { "EQR" } else { "Normalized index" } # add postfix _STAR to indicators. Should be converted to * in plots # (NB: using * directly is not possible in model formulas) indicators_EQR <- paste0(indicators, "_STAR") ``` # BENMMI benthos data analysis report _`r settings$title`_ _BENMMI-package version `r packageVersion("BENMMI")` (`r packageDescription("BENMMI", fields = "Date")`)_ ## Data Files and Settings ```{r} to_log("INFO", "Entering section 'Data Files and Settings'...") ``` - Indices: `r toString(toupper(settings$indicators))` - Pressure: `r if (has_pressure) {sprintf("%s (%s)", settings$pressure$name, settings$pressure$unit)} else {"not specified"}` - Time stamp: `r format(Sys.time())` - User name: `r settings$user` - Input directory: `r dirname(settings$files$benthos)` - benthos-file: `r basename(settings$files$benthos)` - Reference directory: `r dirname(settings$files$taxa)` - Species names-file: `r basename(settings$files$taxa)` - Taxonomic groups to exclude: `r basename(settings$files$groupstoexclude)` - Habitats-file: `r basename(settings$files$habitats)` - AMBI-file (user-defined): `r if ("ambi" %in% settings$indicators) {if (is.null(d_ambi)) {"not specified"} else {basename(settings$files$ambi)}} else {"not applicable"}` - Version default AMBI-data: `r if ("ambi" %in% settings$indicators) {"November 2014"} else {"not applicable"}` - ITI-file (user-defined): `r if ("iti" %in% settings$indicators) {basename(settings$files$iti)} else {"not applicable"}` - Output directory: `r dirname(settings$files$out_habitat)` - Output file tidy data: `r basename(settings$files$out_tidy)` - Output file habitat-level: `r basename(settings$files$out_habitat)` - Output file object-level: `r basename(settings$files$out_objectid)` - Output file groups-level: `r basename(settings$files$out_group)` - Output file ITI: `r basename(settings$files$out_iti)` - Output file pooling information: `r if (settings$pooling$enabled) {basename(settings$files$pooling)} else {"not applicable"}` - Log file: `r basename(settings$files$log)` - Report file: `r basename(settings$files$report)` - Pooling: `r if (settings$pooling$enabled) {"enabled"} else {"disabled"}` - Genus to species conversion: `r if (settings$genustospeciesconversion) {"enabled"} else {"disabled"}` - Confidence level: `r settings$confidencelevel` - model: `r settings$model` - Weights: `r toString_weights(settings$weights)` - legend text: `r settings$legendtext` ## Selection of benthos records ```{r} to_log("INFO", "Entering section 'Selection of benthos records'...") ``` - Number of records read: `r nrow(d_mmi)` ```{r} # keep only records within the period of interest d_mmi <- d_mmi %>% mutate(MONTH = DATE %>% format(format = "%m") %>% as.integer) %>% filter(MONTH %>% between(settings$months[1], settings$months[2])) %>% select(-MONTH) ``` - Number of records between month `r settings$months[1]` and month `r settings$months[2]` : `r nrow(d_mmi)`. ```{r child="groups-to-exclude.Rmd", eval=nrow(d_groups) != 0L} ``` ```{r child="no-groups-to-exclude.Rmd", eval=nrow(d_groups) == 0L} ``` ```{r} # add year d_mmi <- d_mmi %>% mutate(YEAR = DATE %>% format(format = "%Y") %>% as.integer) # number of records n_records <- nrow(d_mmi) n_samples <- d_mmi$ID %>% unique %>% length ``` - Number of records remaining for analysis: `r n_records` - Number of samples remaining for analysis: `r n_samples` ## Taxonomic groups The figures below give for each combination of OBJECTID-HABITAT-YEAR the average abundance in each taxonomic group. The first figure gives _absolute_ abundances (counts), the second figure _relative_ abundances (percentages). See also `r sQuote(basename(settings$files$out_group))` in the OUTPUT-directory for these entries per sample. ```{r} # total counts in each group per sampling unit (box core etc.) d <- d_mmi %>% left_join(d_taxa %>% select(accepted, group) %>% distinct, by = c(TAXON = "accepted")) %>% group_by(OBJECTID, SAMPLEID, HABITAT, DATE, YEAR, group) %>% summarise(n = sum(VALUE)) # ...store resulting table in wide format d %>% left_join( d %>% group_by(OBJECTID, SAMPLEID, HABITAT, DATE, YEAR) %>% summarise(N = sum(n)), by = c("OBJECTID", "SAMPLEID", "HABITAT", "DATE", "YEAR")) %>% mutate(p = as.integer(round(100 * n / N))) %>% select(-n) %>% spread(key = group, value = p, fill = 0L) %>% write_csv(settings$files$out_group) # mean counts per group per sample, also expressed as percentages dominant_groups <- c("APPOL", "CRAMP", "ECHIN", "MOBIV") d <- d %>% ungroup %>% select(-DATE, -SAMPLEID) %>% mutate(group = ifelse(group %in% dominant_groups, group, "OTHER")) %>% group_by(OBJECTID, HABITAT, YEAR, group) %>% summarise_all(funs(mean)) %>% group_by(OBJECTID, HABITAT, YEAR) %>% mutate(p = 100 * n / sum(n)) %>% mutate(ohy = paste(OBJECTID, HABITAT, YEAR, sep = "-")) ``` ```{r} # figure height fig_height <- d %>% select(OBJECTID, HABITAT, YEAR) %>% distinct %>% nrow %/% 3L %>% max(3) %>% min(7) ``` <figure> ```{r, fig.width=7, fig.height=fig_height, out.width=900, dpi=150, warning=FALSE} ggplot(data = d) + geom_col(mapping = aes(x = ohy, y = n, fill = group)) + scale_x_discrete(name = "", limits = rev(unique(d$ohy))) + scale_y_continuous(name = "mean abundance per sample (counts)") + coord_flip() ggplot(data = d) + geom_col(mapping = aes(x = ohy, y = p, fill = group)) + scale_x_discrete(name = "", limits = rev(unique(d$ohy))) + scale_y_continuous(name = "mean abundance per sample (%)") + coord_flip() ``` <figcaption> Mean abundance per taxonomic group. Top: expressed as counts per sample, bottom: expressed as percentages per sample. APPOL = Polychaetes; CRAMP = Amphipods; ECHIN = Echninodermata; MOBIV = Bivalvia; OTHER = other groups.</figcaption> </figure> </figure> <br> <br> ## Conversion of species names ```{r} to_log("INFO", "Entering section 'Conversion of species names'...") ``` The table below list all taxon names in benthos-file '`r basename(settings$files$benthos)`' that are converted to accepted taxon names in WoRMS: ```{r results='asis'} d_mmi %>% select(TAXON_OLD, TAXON) %>% distinct %>% filter(tolower(TAXON_OLD) != tolower(TAXON)) %>% rename(`BENTHOS-file` = TAXON_OLD, WoRMS = TAXON) %>% xtable %>% print(type = "html", include.rownames = FALSE) ``` <br> ```{r} inconvertible <- d_mmi %>% filter(is.na(TAXON)) %>% group_by(TAXON_OLD) %>% summarise(COUNT = n()) %>% mutate("similar name(s) WoRMS" = TAXON_OLD %>% sapply( FUN = function(x) { x <- agrep(pattern = x, x = d_taxa$accepted, value = TRUE, ignore.case = TRUE) if ((length(x) == 0L) | (length(x) > 5L)) { x <- "" } x %>% toString } ) ) %>% rename(TAXON = TAXON_OLD) d_mmi <- d_mmi %>% filter(!is.na(TAXON)) %>% select(-TAXON_OLD) ``` The following `r nrow(inconvertible)` taxon names in the benthos-file are inconvertible. These names are not WoRMS-compliant, and will be removed: ```{r eval=nrow(inconvertible)>0L, results='asis'} inconvertible %>% xtable %>% print(type = "html", include.rownames = FALSE) ``` The first column gives the taxon name as found in the benthos input file (`r basename(settings$files$benthos)`), the second column gives the number of occurrences of this name, and the third column gives taxon names (if any) according to WoRMS that are most similar to the one in the benthos input file. This column may be useful to discover and correct typing errors or slightly different spelling. Please report inconvertible taxa names of Dutch benthos data to the TAXA list manager of Rijkswaterstaat ([email protected]). ```{r child="species-sensitivity-values.Rmd", eval=any(c("iti", "ambi") %in% settings$indicators)} ``` ## OBJECTID-HABITATs and sample areas ```{r} to_log("INFO", "Entering section 'OBJECTID-HABITATs and sample areas'...") ``` The following habitats have been selected: ```{r results='asis'} d_mmi %>% group_by(OBJECTID, HABITAT, SAMPLEID, DATE) %>% summarise("N_RECORDS" = n()) %>% group_by(OBJECTID, HABITAT) %>% summarise( "N_SAMPLES" = n(), "N_RECORDS" = sum(N_RECORDS) ) %>% xtable %>% print(type = "html", include.rownames = FALSE) ``` The table below gives the total sample area for the available sample sizes (m&sup2;). The corresponding number of samples is given in brackets. ```{r results='asis'} d1 <- d_mmi %>% select(OBJECTID, HABITAT, YEAR, SAMPLEID, AREA) %>% distinct %>% select(-SAMPLEID) %>% group_by(OBJECTID, HABITAT, YEAR, AREA) %>% summarise(total_area = sum(AREA)) %>% spread(key = AREA, value = total_area, fill = 0) d1$TOTAL <- rowSums(d1[, -(1:3)]) d2 <- d_mmi %>% select(OBJECTID, HABITAT, YEAR, SAMPLEID, AREA) %>% distinct %>% select(-SAMPLEID) %>% group_by(OBJECTID, HABITAT, YEAR, AREA) %>% summarise(n_samples = n()) %>% spread(key = AREA, value = n_samples, fill = 0L) d2$TOTAL <- rowSums(d2[, -(1:3)]) %>% as.integer d <- d1 for (j in 4:ncol(d)) { d1[[j]] <- formatC(x = d1[[j]], format = "f", digits = 3) d2[[j]] <- as.character(d2[[j]]) d1[[j]] <- format(x = d1[[j]]) d2[[j]] <- format(x = d2[[j]]) d[[j]] <- apply( X = cbind(d1[[j]], d2[[j]]), MARGIN = 1L, FUN = function(x) { paste(x[1], paste0("(", x[2], ")"), collapse = "") } ) } d %>% xtable %>% print(type = "html", include.rownames = FALSE) ``` ```{r child="pooling-area.Rmd", eval=isTRUE(settings$pooling$enabled)} ``` ```{r child="genus-to-species.Rmd", eval=isTRUE(settings$genustospeciesconversion)} ``` ## Data pooling ```{r} to_log("INFO", "Entering section 'Data pooling'...") ``` ```{r child="pooling.Rmd", eval=isTRUE(settings$pooling$enabled)} ``` ```{r child="no-pooling.Rmd", eval=!isTRUE(settings$pooling$enabled)} ``` ```{r child="mds.Rmd", eval=!isTRUE(settings$pooling$enabled)} ``` ```{r} # store pre-processed data to_log("INFO", "Finished data preprocessing") to_log("INFO", sprintf("Storing preprocessed (tidy) data in %s...", sQuote(basename(settings$files$out_tidy)))) required_vars <- c("OBJECTID", "HABITAT", "SAMPLEID", "LAT", "LONG", "DATE", "SAMPDEV", "MESH", "TAXON", "VALUE") if (has_pressure) { required_vars <- c(required_vars, "PRESSURE") } if (settings$pooling$enabled) { d_mmi %>% select_(.dots = c(required_vars, "POOL_RUN", "POOL_ID")) %>% write_csv(path = settings$files$out_tidy, na = "") } else { d_mmi %>% select_(.dots = required_vars) %>% write_csv(path = settings$files$out_tidy, na = "") } to_log("INFO", "preprocessed data has been stored.") ``` ## Index calculation ```{r} to_log("INFO", "Entering section 'Index calculation'...") ``` ### Total abundance The total abundance of individuals (_N_) in the data pool. This index is provided for general information on the sample, quality control and optional manual correction for sample size (_e.g._ by means of Margalef _d_). ```{r child="indicator-lnn.Rmd", eval=("lnn" %in% settings$indicators)} ``` ```{r child="indicator-species-richness.Rmd", eval=("s" %in% settings$indicators)} ``` ```{r child="indicator-margalef-d.Rmd", eval=("d" %in% settings$indicators)} ``` ```{r child="indicator-rygg-sn.Rmd", eval=("sn" %in% settings$indicators)} ``` ```{r child="indicator-rygg-sna.Rmd", eval=("sna" %in% settings$indicators)} ``` ```{r child="indicator-simpson-l.Rmd", eval=("l" %in% settings$indicators)} ``` ```{r child="indicator-hill-n2.Rmd", eval=("n2" %in% settings$indicators)} ``` ```{r child="indicator-hurlbert-pie.Rmd", eval=("pie" %in% settings$indicators)} ``` ```{r child="indicator-shannon-h.Rmd", eval=("h" %in% settings$indicators)} ``` ```{r} indicator_functions <- list( N = ~total_abundance(count = VALUE), LNN = ~lnn(count = VALUE), S = ~species_richness(taxon = TAXON, count = VALUE), D = ~margalef(taxon = TAXON, count = VALUE), SN = ~rygg(taxon = TAXON, count = VALUE), SNA = ~rygg(taxon = TAXON, count = VALUE, adjusted = TRUE), L = ~simpson(taxon = TAXON, count = VALUE), H = ~shannon(taxon = TAXON, count = VALUE), N2 = ~hill2(taxon = TAXON, count = VALUE), PIE = ~hpie(taxon = TAXON, count = VALUE), AMBI = ~ambi(taxon = TAXON, count = VALUE, group = AMBI_GROUP), ITI = ~iti( taxon = TAXON, count = VALUE, group = ITI_GROUP) ) if (!("AMBI_GROUP" %in% names(d_mmi))) { indicator_functions$AMBI <- ~ambi(taxon = TAXON, count = VALUE) } if (!("ITI_GROUP" %in% names(d_mmi))) { indicator_functions$ITI <- ~iti(taxon = TAXON, count = VALUE) } indicator_functions <- indicator_functions[unique(c("N", toupper(settings$indicators)))] if (!has_pressure) { # add dummy (to prevent extra code) d_mmi$PRESSURE <- NA_real_ } d_ind <- full_join( d_mmi %>% group_by(OBJECTID, HABITAT, YEAR, POOL_RUN, POOL_ID) %>% summarise_(.dots = indicator_functions), d_mmi %>% group_by(OBJECTID, HABITAT, YEAR, POOL_RUN, POOL_ID) %>% distinct(ID, AREA, PRESSURE) %>% summarise( N_SAMPLES_POOL = n(), POOL_AREA = sum(AREA), PRESSURE = (AREA * PRESSURE) / sum(AREA) ), by = c("OBJECTID", "HABITAT", "YEAR", "POOL_RUN", "POOL_ID") ) %>% ungroup if (!has_pressure) { # remove dummy d_ind$PRESSURE <- NULL d_mmi$PRESSURE <- NULL } # add sample id if pooling is disabled if (settings$pooling$enabled == FALSE) { d_ind <- d_ind %>% left_join( d_mmi %>% select(OBJECTID, HABITAT, YEAR, POOL_RUN, POOL_ID, SAMPLEID) %>% distinct, by = c("OBJECTID", "HABITAT", "YEAR", "POOL_RUN", "POOL_ID") ) } ``` ```{r child="ambi-missing.Rmd", eval=("ambi" %in% settings$indicators)} ``` ```{r child="iti-missing.Rmd", eval=("iti" %in% settings$indicators)} ``` ## Index percentile values ```{r} to_log("INFO", "Entering section 'Index percentile values'...") ``` Percentiles for each index are given below. In addition, the number of samples (n) used to calculate these percentiles is provided. The percentiles have been calculated for the period `r paste(range(d_ind$YEAR), collapse = "-")` and months `r paste(range(settings$months), collapse = "-")`. ```{r results='asis'} # utility function for unrolling a list data_frame # (note as.data.frame recyles lists if necessary) unroll <- function(x) { lapply( X = seq_len(nrow(x)), FUN = function(i) { x[i, ] %>% lapply(unlist) %>% as.data.frame(stringsAsFactors = FALSE) %>% as_data_frame } ) %>% bind_rows } # estimate percentiles probs <- c(0, 1, 5, 25, 50, 75, 95, 99, 100) / 100 d <- d_ind %>% select(-YEAR, -POOL_RUN, -POOL_ID) if (settings$pooling$enabled == FALSE) { d$SAMPLEID <- NULL } d <- d %>% group_by(OBJECTID, HABITAT) %>% summarise_all(funs(list(quantile(., probs = probs, na.rm = TRUE)))) %>% unroll %>% left_join( d_ind %>% group_by(OBJECTID, HABITAT) %>% summarise(n = n()), by = c("OBJECTID", "HABITAT") ) d$PERC <- probs * 100 d %>% select_(.dots = c("OBJECTID", "HABITAT", "PERC", toupper(settings$indicators), "n")) %>% xtable %>% print(type = "html") ``` ```{r} # figure height fig_height <- (((d_ind %>% select(OBJECTID, HABITAT) %>% distinct %>% nrow)-1L) %/% 3L + 1L) * 3 ``` <figure> ```{r, fig.width=7, fig.height=fig_height, out.width=900, dpi=150, warning=FALSE} d <- d_ind d$SAMPLEID <- NULL d <- d %>% select_(.dots = c("OBJECTID", "HABITAT", toupper(settings$indicators))) %>% gather(key = "indicator", value = "value", -OBJECTID, -HABITAT) %>% mutate(indicator = as.character(indicator)) lut <- c(N = 99L, LNN = 99L, S = 99L, D = 99L, SN = 99L, SNA = 99L, H = 99L, L = 1L, N2 = 99L, PIE = 99L, AMBI = 1L, ITI = 99L) d_p <- d %>% group_by(OBJECTID, HABITAT, indicator) %>% summarise( p01 = quantile(x = value, probs = 0.01, na.rm = TRUE), p99 = quantile(x = value, probs = 0.99, na.rm = TRUE) ) d_p$p <- ifelse(lut[d_p$indicator] == 99L, d_p$p99, d_p$p01) ggplot(data = d, mapping = aes(x = value)) + geom_vline(data = d_p, mapping = aes(xintercept = p), colour = "red") + stat_ecdf(na.rm = TRUE) + geom_rug() + scale_x_continuous(name = "index value") + scale_y_continuous(name = "cumulative probability") + facet_grid(HABITAT * OBJECTID ~ indicator, scales = "free_x") ``` <figcaption>Cumulative distributions for each index, OBJECTID and HABITAT. The red line is the reference value based on the 1%- or 99%-percentile. The vertical dashes on the x-axis denote the data positions (rug-plot).</figcaption> </figure> <br> <br> ```{r} section_title <- if(tolower(settings$legendtext) == "eqr") { "Index Ecological Quality Ratios" } else { "Normalized Indices" } ``` ## `r section_title` ```{r} to_log("INFO", sprintf("Entering section '%s'...", section_title)) ``` The following `r tolower(section_title)` are calculated: ```{r child="eqr-species-richness.Rmd", eval=("s" %in% settings$indicators)} ``` ```{r child="eqr-margalef-d.Rmd", eval=("d" %in% settings$indicators)} ``` ```{r child="eqr-rygg-sn.Rmd", eval=("sn" %in% settings$indicators)} ``` ```{r child="eqr-rygg-sna.Rmd", eval=("sna" %in% settings$indicators)} ``` ```{r child="eqr-simpson-l.Rmd", eval=("l" %in% settings$indicators)} ``` ```{r child="eqr-hill-n2.Rmd", eval=("n2" %in% settings$indicators)} ``` ```{r child="eqr-hurlbert-pie.Rmd", eval=("pie" %in% settings$indicators)} ``` ```{r child="eqr-shannon-h.Rmd", eval=("h" %in% settings$indicators)} ``` ```{r child="eqr-ambi.Rmd", eval=("ambi" %in% settings$indicators)} ``` ```{r child="eqr-iti.Rmd", eval=("iti" %in% settings$indicators)} ``` <br> The 'bad' and 'ref' values can be found in the table below. This table is a copy of `r settings$files$habitats %>% basename %>% sQuote` as specified in the settings file. ```{r results='asis'} d_ref %>% xtable %>% print(type = "html", include.rownames = FALSE) ``` ```{r} # compute EQRs eqr_functions <- list( N = ~eqr(x = N, bad = NBAD, ref = NREF), LNN = ~eqr(x = LNN, bad = LNNBAD, ref = LNNREF), S = ~eqr(x = S, bad = SBAD, ref = SREF), D = ~eqr(x = D, bad = DBAD, ref = DREF), SN = ~eqr(x = SN, bad = SNBAD, ref = SNREF), SNA = ~eqr(x = SNA, bad = SNABAD, ref = SNAREF), L = ~eqr(x = L, bad = LBAD, ref = LREF), N2 = ~eqr(x = N2, bad = N2BAD, ref = N2REF), PIE = ~eqr(x = PIE, bad = PIEBAD, ref = PIEREF), H = ~eqr(x = H, bad = HBAD, ref = HREF), AMBI = ~eqr(x = AMBI, bad = AMBIBAD, ref = AMBIREF), ITI = ~eqr(x = ITI, bad = ITIBAD, ref = ITIREF) ) names(eqr_functions) <- paste0(names(eqr_functions), "_STAR") eqr_functions <- eqr_functions[indicators_EQR] d_ind <- d_ind %>% left_join(d_ref, by = c("OBJECTID", "HABITAT")) %>% mutate_(.dots = eqr_functions) ``` ```{r eval=!isTRUE(settings$pooling$enabled)} # create SAMPLE file and add DATE column d <- d_ind %>% select_(.dots = c("OBJECTID", "HABITAT", "SAMPLEID", "YEAR", "N", indicators, indicators_EQR)) %>% set_names(sub(pattern = "_STAR$", replacement = "*", x = names(.))) %>% left_join( d_mmi %>% select(OBJECTID, HABITAT, SAMPLEID, YEAR, DATE) %>% distinct, by = c("OBJECTID", "HABITAT", "SAMPLEID", "YEAR") ) # reorder columns first_columns <- c("OBJECTID", "HABITAT", "SAMPLEID", "DATE", "YEAR") d[, c(first_columns, setdiff(names(d), first_columns))] %>% write_csv(path = settings$files$out_sample, na = "") d_ind <- d_ind %>% select(-SAMPLEID) ``` ## Results ```{r} to_log("INFO", "Entering section 'Results'...") ``` ```{r child="study-area.Rmd", eval=!has_pressure} ``` ```{r child="study-area-pressure.Rmd", eval=has_pressure} ``` ```{r child="plot-scatter.Rmd", eval=(number_of_indicators > 1L)} ``` ```{r child="weights-user.Rmd", eval=!needs_optimization} ``` ```{r child="model-linear.Rmd", eval=settings$model == "linear"} ``` ```{r child="model-exponential.Rmd", eval=settings$model == "exponential"} ``` ```{r child="plot-box-whisker.Rmd", eval=has_pressure} ``` <br> <br> ### Aggregation ```{r} to_log("INFO", "Entering subsection 'Aggregation...'...") ``` The results are averaged to OBJECTID-HABITAT-YEAR combinations. The table below lists all results aggregated by OBJECTID, HABITAT and YEAR. In addition the `r settings$confidencelevel`-confidence interval for the mean is given. NB, for the calculation of the confidence intervals, a normal distribution of the mean is assumed. For small sample sizes, this assumption may not be valid. ```{r} d_agg <- d_ind %>% select(OBJECTID, HABITAT, YEAR, RELAREA, ends_with("_STAR")) %>% group_by(OBJECTID, HABITAT, YEAR, RELAREA) %>% summarise_all( funs( LOWER_LIMIT = ci_mean(., level = settings$confidencelevel)["lower"], MEAN = ci_mean(., level = settings$confidencelevel)["mean"], UPPER_LIMIT = ci_mean(., level = settings$confidencelevel)["upper"] ) ) ``` ```{r eval=nrow(d_agg)>0L, results='asis'} # place column names in the desired order names(d_agg) <- sub(pattern = "^MMI_STAR_(.+)$", replacement = "ZZZZZ\\1", x = names(d_agg)) d <- names(d_agg) d <- c(d[1:4], sort(d[-(1:4)])) d_agg <- d_agg[, d] names(d_agg) <- sub(pattern = "^Z{5}(.+)$", replacement = "MMI_STAR_\\1", x = names(d_agg)) # print table with double lines header d <- d_agg names(d) <- sub(pattern = "_LOWER_LIMIT", replacement = "\\\nlower limit", x = names(d)) names(d) <- sub(pattern = "_MEAN", replacement = "\\\nmean", x = names(d)) names(d) <- sub(pattern = "_UPPER_LIMIT", replacement = "\\\nupper limit", x = names(d)) names(d) <- sub(pattern = "_STAR", replacement = "*", x = names(d)) d %>% select(-RELAREA) %>% xtable %>% print(type = "html", include.rownames = FALSE) # store results in a CSV-file (single line header) d_agg %>% select(-RELAREA) %>% set_names(sub(pattern = "_STAR", replacement = "*", x = names(.))) %>% set_names(sub(pattern = "_UPPER_LIMIT", replacement = " upper limit", x = names(.))) %>% set_names(sub(pattern = "_LOWER_LIMIT", replacement = " lower limit", x = names(.))) %>% set_names(sub(pattern = "_MEAN", replacement = " mean", x = names(.))) %>% write_csv(path = settings$files$out_habitat, na = "") ``` <br> `r if (sprintf("MMI_%s_MEAN", settings$legendtext) %in% names(d_agg)) { "The table below lists the results aggregated (habitat area-weighted) by OBJECTID and YEAR."}` ```{r results='asis', eval=(sprintf("MMI_%s_MEAN", settings$legendtext) %in% names(d_agg))} # Two-step aggregation: # 1. per OBJECTID-HABITAT-YEAR # 2. per OBJECTID-YEAR # (otherwise the averages will be depend on the number of samples) d_agg <- d_agg %>% group_by(OBJECTID, YEAR) %>% summarise(MMI_STAR = sum(MMI_STAR_MEAN * RELAREA, na.rm = TRUE) / sum(RELAREA, na.rm = TRUE)) %>% rename("MMI_STAR" = "MMI*") # print table d_agg %>% xtable %>% print(type = "html") # store results in a file d_agg %>% write_csv(path = settings$files$out_objectid, na = "") ``` ## References ```{r} to_log("INFO", "Entering section 'References'...") ``` Borja, A., J. Franco and V. P&eacute;rez, 2000. A Marine Biotic Index to Establish the Ecological Quality of Soft-Bottom Benthos Within European Estuarine and Coastal Environments. Marine Pollution Bulletin 40:1100-1114 Gittenberger A. and W. van Loon, 2013. Sensitivities of marine macrozoobenthos to environmental pressures in the Netherlands. Nederlandse Faunistische Mededelingen, 41 (2013) 79-112. Nickel, S., A. Hertel, R. Pesch, W. Schroeder, E. Steinnes, H. Thelle Uggerud, 2014. Correlating concentrations of heavy metals in atmospheric deposition with respective accumulation in moss and natural surface soil for ecological land classes in Norway between 1990 and 2010. Environ Sci Pollut Res. Sammon, J. W., 1969. A non-linear mapping for data structure analysis. IEEE Trans. Comput., C-18 401–409. Shannon, C. E., 1948. A Mathematical Theory of Communication. Bell System Technical Journal 27: 379–423. van Loon, W.M.G.M., A.R. Boon, A. Gittenberger, D.J.J. Walvoort, M. Lavaleye, G.C.A. Duineveld, A.J. Verschoor, 2015. Application of the Benthic Ecosystem Quality Index 2 to benthos in Dutch transitional and coastal waters. Journal of Sea Research 103:1-13 Willem van Loon, Dennis Walvoort, Marc Lavaleye, Gerard Duineveld, Christina Herbon, Abigayil Blandon, Graham Philips, Roland Pesch, Petra Schmidt, Jorg Scholle, Karin Heyer, Gert van Hoey, Mats Blomqvist, 2017. A regional benthos assessment method for the Southern North Sea using Margalef diversity and reference value modeling. Accepted for publication by Ecological Indicators ## Acknowledgements ```{r} to_log("INFO", "Entering section 'Acknowledgements'...") ``` Angel Borja (AZTI-TECHNALIA, Spain), is kindly acknowledged for the permission to use the standard AMBI species list (ambi.azti.es). ## Session information ```{r} to_log("INFO", "Entering section 'Session information'...") ``` ```{r} sessionInfo() ``` <div style="margin-bottom: 50mm;"></div> # Appendices ```{r child="plot-qc-margalef.Rmd", eval=isTRUE("D" %in% indicators)} ``` ## Sample size versus confidence interval For interpretation of the current results and future sampling, the relation between the confidence interval and the sample size is of interest. The minimum sample size _n_ required to estimate the mean `r title_text` within _d_ `r title_text`-units from the true mean with 1-&alpha; confidence (in this report 1-&alpha; = `r settings$confidencelevel`) can be estimated by _n_ = (_&sigma;_ _t_<sub>&alpha;/2,&nu;</sub>)<sup>2</sup> / _d_<sup>2</sup> where _&sigma;_ is the standard deviation, _t_<sub>&alpha;/2,&nu;</sub> is the Student's _t_-value for significance level &alpha; and &nu;=_n_-1 degrees of freedom. See also Nickel _et al._ (2014), or [Wikipedia](https://en.wikipedia.org/wiki/Sample_size_determination#Means). Note: in this report, we do not use transformations like the logit. Since we usually have sufficiently large sample sizes, the distribution of the mean `r title_text` will tend to normality (central limit theorem). ```{r} MIN_SAMPLE_SIZE <- 10L d <- d_ind # only the MMI is needed if the user has provided the weights # (see 2016-12-08-BENMMI-TESTVERSLAG-V4.1.docx) if (!is.null(settings$weights)) { d <- d %>% select(OBJECTID, HABITAT, MMI_STAR) } else { d <- d %>% select_(.dots = c("OBJECTID", "HABITAT", indicators_EQR, "MMI_STAR")) } d <- d %>% gather(key = EQR, value = VALUE, ends_with("_STAR")) %>% group_by(OBJECTID, HABITAT, EQR) %>% summarise( sigma = sd(VALUE, na.rm = TRUE), n = sum(!is.na(VALUE)) ) %>% ungroup %>% mutate( t_alpha = ifelse( n < MIN_SAMPLE_SIZE, NA_real_, qt(p = 0.5 * (1 + settings$confidencelevel), df = n - 1) ) ) %>% rowwise() %>% do( OBJECTID = rep.int(x = .$OBJECTID, times = 23), HABITAT = rep.int(x = .$HABITAT, times = 23), EQR = rep.int(x = .$EQR, times = 23), t_alpha = rep.int(x = .$t_alpha, times = 23), sigma = rep.int(x = .$sigma, times = 23), d = seq(from = 0.01, to = 0.12, by = 0.005) ) %>% unnest() %>% mutate( n = ceiling((t_alpha * sigma / d)^2) ) ``` In the figure below, sample size (_n_) is given as function of the half width (_d_) of the `r 100*settings$confidencelevel`%-confidence interval of the mean `r title_text` for every `r title_text` and OBJECTID-HABITAT combination. These graphs can be used to estimate the required sample size (y-axis) for a given confidence interval (x-axis). Suppose one needs to estimate the mean of ``r d$EQR[9]`` for OBJECTID ``r d$OBJECTID[9]`` and HABITAT ``r d$HABITAT[9]`` and one wants this estimated mean within _d_=`r round(d$d[9], 2)` `r title_text`-units of the true (but unknown) mean with `r 100*settings$confidencelevel`% confidence, then one needs a minimum sample size of `r d$n[9]`. ```{r} # figure height fig_height <- (((d %>% select(OBJECTID, HABITAT) %>% distinct %>% nrow)-1L) %/% 2L + 1L) * 2 ``` <figure> ```{r,echo=FALSE, fig.retina=NULL, fig.width=6, fig.height=fig_height, out.width=900, dpi=300, warning=FALSE, message=FALSE} d$EQR <- sub(pattern = "_STAR$", replacement = "*", x = d$EQR) g <- ggplot( data = d %>% filter(n <= 100) %>% mutate(label = paste(OBJECTID, HABITAT, sep = " - ")) ) + geom_path(mapping = aes(x = d, y = n, colour = EQR, linetype = EQR)) + scale_color_discrete(name = "index") + scale_linetype(name = "index") + facet_wrap(~ label, ncol = 2) + scale_y_continuous(name = "sample size (n)", limits = c(0, NA)) + scale_x_continuous(name = sprintf("Half width (d) of %s%%- conf.int. %s", 100*settings$confidencelevel, title_text), limits = c(0, NA)) suppressWarnings(print(g)) ``` <figcaption>Half width of the `r 100*settings$confidencelevel`%-confidence interval of the mean `r title_text` as function of sample size.</figcaption> </figure> <br> <br> The calculation of these curves is driven by the benthos data in the input file "`r basename(settings$files$benthos)`". For example, these curves can be calculated for a specific OBJECTID-HABITAT, using data for a period of two or three years (to also account for temporal variability). Also note that if the dataset in the input file is used for an assessment, then it can be checked if the sample size was sufficient for the desired confidence interval of the assessment. The table below presents a subset of the data in the figure above in tabular format. Column 'd' gives half of the width of the `r 100*settings$confidencelevel`%-confidence interval in `r title_text`-units. Column 'n' is the corresponding minimum sample size. ```{r, results='asis'} d %>% filter(as.character(d) %in% as.character(seq(from = 0.02, to = 0.12, by = 0.02))) %>% select(OBJECTID, HABITAT, INDEX = EQR, d, n) %>% xtable(digits = c(0, 0, 0, 0, 2, 0)) %>% print(type = "html", include.rownames = FALSE) ``` Note: in case the sample size in "`r basename(settings$files$benthos)`" is smaller than `r MIN_SAMPLE_SIZE` for a specific OBJECTID-HABITAT, then the minimum sample size is not calculated.
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/benmmi.Rmd
No AMBI-file has been specified. Therefore, the standard AMBI-list (www.azti.es, version November 2014, by courtesy of Angel Borja) will be used.
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/default-ambi.Rmd
No ITI-file has been specified. Therefore, the standard ITI-list will be used.
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/default-iti.Rmd
### AMBI `r settings$legendtext`(AMBI) = ( AMBI<sub>bad</sub> - AMBI<sub>ass</sub> ) / ( AMBI<sub>bad</sub> - AMBI<sub>ref</sub> )
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/eqr-ambi.Rmd
### Hill's N2 index v(_N2_) = ( _N2_<sub>ass</sub> - _N2_<sub>bad</sub> ) / ( _N2_<sub>ref</sub> - _N2_<sub>bad</sub> )
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/eqr-hill-n2.Rmd
### Hurlbert's PIE `r settings$legendtext`(PIE) = ( PIE<sub>ass</sub> - PIE<sub>bad</sub> ) / ( PIE<sub>ref</sub> - PIE<sub>bad</sub> )
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/eqr-hurlbert-pie.Rmd
### ITI `r settings$legendtext`(ITI) = ( ITI<sub>ass</sub> - ITI<sub>bad</sub> ) / ( ITI<sub>ref</sub> - ITI<sub>bad</sub> )
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/eqr-iti.Rmd
### Margalef's Diversity `r settings$legendtext`(_D_) = ( _D_<sub>ass</sub> - _D_<sub>bad</sub> ) / ( _D_<sub>ref</sub> - _D_<sub>bad</sub> )
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/eqr-margalef-d.Rmd
### Rygg's index `r settings$legendtext`(_SN_) = ( _SN_<sub>ass</sub> - _SN_<sub>bad</sub> ) / ( _SN_<sub>ref</sub> - _SN_<sub>bad</sub> )
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/eqr-rygg-sn.Rmd
### Adjusted Rygg's index `r settings$legendtext`(_SNA_) = ( _SNA_<sub>ass</sub> - _SNA_<sub>bad</sub> ) / ( _SNA_<sub>ref</sub> - _SNA_<sub>bad</sub> )
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/eqr-rygg-sna.Rmd
### Shannon's Index `r settings$legendtext`(_H'_) = ( _H'_<sub>ass</sub> - _H'_<sub>bad</sub> ) / ( _H'_<sub>ref</sub> - _H'_<sub>bad</sub> )
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/eqr-shannon-h.Rmd
### Simpson's index `r settings$legendtext`(_L_) = ( _L_<sub>bad</sub> - _L_<sub>ass</sub> ) / ( _L_<sub>bad</sub> - _L_<sub>ref</sub> )
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/eqr-simpson-l.Rmd
### Species richness `r settings$legendtext`(_S_) = ( _S_<sub>ass</sub> - _S_<sub>bad</sub> ) / ( _S_<sub>ref</sub> - _S_<sub>bad</sub> )
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/eqr-species-richness.Rmd
## Conversion of genus to species within a single sample ```{r} to_log("INFO", "Entering section 'Conversion of genus to species within a single sample'...") ``` The following taxonomic ranks have been found in the MMI input file: ```{r} d <- d_mmi %>% left_join(d_taxa %>% select(accepted, level) %>% distinct, by = c("TAXON" = "accepted")) d$level %>% factor %>% table ``` The number of records are given below each rank. The taxonomic ranks have been obtained from the TWN/WoRMS-list. If in a specific sample a genus and one or more species of the same genus are present, it is likely that this genus is one of the species already present in the sample. Therefore, the abundance of this genus is distributed over the corresponding species already present in the sample in ratio of the abundances of species already identified in the sample. Ties have been cut randomly. ```{r} # split data in Genus/Species part and rest part d_rest <- d %>% filter(!(level %in% c("Genus", "Species"))) %>% select(-level) d <- d %>% filter(level %in% c("Genus", "Species")) %>% select(-level) # identify generic and specific names (currently more stable than 'mutate') d$GENERIC <- generic_name(d$TAXON) d$SPECIFIC <- specific_name(d$TAXON) sel <- is.na(d$GENERIC) d$GENERIC[sel] <- d$TAXON[sel] d$VALUE_OLD <- d$VALUE # perform genus to species correction for each generic in each sampling unit d <- d %>% group_by(ID, GENERIC) %>% mutate( VALUE_NEW = genus_to_species(is_genus = is.na(SPECIFIC), count = VALUE_OLD) ) %>% ungroup %>% select(ID, TAXON, VALUE_OLD, VALUE_NEW) %>% right_join(d_mmi, by = c("ID", "TAXON")) %>% mutate(HAS_CHANGED = abs(VALUE_NEW - VALUE_OLD) > 1.0e-9) # number of records that has been changed n_records_changed <- sum(d$HAS_CHANGED, na.rm = TRUE) # number of records to report n_records_report <- min(15L, n_records_changed) ``` The number of records that have been selected for genus to species conversion is `r n_records_changed`. The first `r n_records_report` records are given below (if relevant, the remainder has been removed to decrease the size of this report). For each record both the old counts (`VALUE_OLD`, counts before conversion) and new counts (`VALUE_NEW`, counts after conversion) are given. ```{r, results='asis', eval=n_records_report>0} d %>% filter(HAS_CHANGED) %>% arrange(OBJECTID, SAMPLEID, DATE) %>% slice(seq_len(n_records_report)) %>% select(OBJECTID, SAMPLEID, DATE, TAXON, VALUE_OLD, VALUE_NEW) %>% mutate(VALUE_OLD = as.integer(VALUE_OLD), DATE = format(DATE)) %>% xtable %>% print(type = "html", include.rownames = FALSE) ``` ```{r} # Remove taxa with VALUE=0 and not being azoic samples. (These values are due to # genus to species conversion), and add the results to that part of d_mmi # not being involved with genus-to-species conversion d_mmi <- d %>% filter(is_azoic(TAXON) | (VALUE_NEW > 1.0e-9)) %>% mutate(VALUE = VALUE_NEW) %>% select(-VALUE_NEW, -VALUE_OLD, -HAS_CHANGED) %>% bind_rows(d_rest) ``` The number of records in the MMI input file after genus to species conversion is `r nrow(d_mmi)`.
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/genus-to-species.Rmd
```{r} # MMI has been designed for endofauna, therefore remove # epifaunal species and insecta d_mmi <- d_mmi %>% left_join(d_taxa %>% select(accepted, group) %>% distinct, by = c("TAXON" = "accepted")) %>% left_join(d_groups, by = c("group" = "GROUP")) d <- d_mmi %>% select(DESCRIPTION) %>% filter(!is.na(DESCRIPTION)) %>% group_by(DESCRIPTION) %>% summarise(COUNT = n()) %>% full_join(d_groups, by = "DESCRIPTION") %>% mutate(COUNT = as.integer(ifelse(is.na(COUNT), 0, COUNT))) %>% select(GROUP, DESCRIPTION, COUNT) %>% arrange(GROUP) # remove black-listed species from benthos data # (i.e., those not in d_groups, hence description is missing) d_mmi <- d_mmi %>% filter(is.na(DESCRIPTION)) %>% select(-DESCRIPTION, -group) ``` - Total number of non-endofauna records that has been removed: `r sum(d$COUNT, na.rm = TRUE)`. The table below lists the number of records in each non-endofauna group that has been removed. ```{r, results='asis'} d %>% xtable %>% print(type = "html", include.rownames = FALSE) ```
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/groups-to-exclude.Rmd
### Hill's Diversity Number N2 Hill's diversity number N2 is the reciprocal of Simpson's index
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/indicator-hill-n2.Rmd
### Hurlbert's Probability of Interspecific Encounter (PIE) Hurlbert's PIE is the probability that two inidividuals selected at random (_without_ replacement) from a sample will belong to different species (Hurlbert, 1971, p.579, Eq. 3). It is the complement of Simpson's L.
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/indicator-hurlbert-pie.Rmd
### Natural logarithm of the total abundance The natural logarithm of the total abundance is given by: LNN = ln(_N_ + 1)
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/indicator-lnn.Rmd
### Margalef's Diversity (D) Margalef diversity is given by: _D_ = (_S_-1) / ln(_N_) It is less dependent on the sample area than _S_.
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/indicator-margalef-d.Rmd
### Rygg SN Rygg SN is given by: SN = ln(_S_) / ln(ln(_N_)) for _N_ > exp(1) It is less dependent on the sample area than _S_.
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/indicator-rygg-sn.Rmd
### Adjusted Rygg SN The adjusted version of Rygg SN is given by: SN = ln(_S_) / ln(ln(_N_+1)+1) It is less dependent on the sample area than _S_ and more consistent than the original formulation of Rygg (2006) for small S (2, 3) and N (2, 3).
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/indicator-rygg-sna.Rmd
### Shannon Index Shannon Index (_H'_)is given by (Shannon, 1948, p.393). In this tool, the logarithm to the base 2 is taken to estimate _H'_.
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/indicator-shannon-h.Rmd
### Simpson's Measure of Concentration _L_ Simpson's Measure of Concentration gives the probability that two individuals selected at random from a sample will belong to the same species. It is given by (finite sample case): _L_ = &sum;^S <sub>i=1</sub> (_n_<sub>_i_</sub> (_n_<sub>_i_</sub>-1))/ (_N_ (_N_-1)) where _n_<sub>_i_</sub> the number of individuals in species _i_ and _N_ the total number of individuals (total abundance).
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/indicator-simpson-l.Rmd
### Species richness Species richness (_S_) is defined as the number of taxa (lowest identification level possible) per sampling unit (data pool or box core sample).
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/indicator-species-richness.Rmd
### ITI: Infaunal Trophic Index The average percentage of the total abundance per sample without an ITI classification (Gittenberger and van Loon, 2013) is given below: ```{r results='asis'} if (tibble::has_name(d_mmi, "ITI_GROUP")) { d <- d_mmi %>% filter(!CARNIVORE | is.na(CARNIVORE)) %>% group_by(OBJECTID, HABITAT, YEAR) %>% mutate(N = sum(VALUE)) %>% filter(has_iti(taxon = TAXON, group = ITI_GROUP)) %>% summarise(ITI_MISSING = 100 * (1 - sum(VALUE) / unique(N))) } else { d <- d_mmi %>% filter(!CARNIVORE | is.na(CARNIVORE)) %>% group_by(OBJECTID, HABITAT, YEAR) %>% mutate(N = sum(VALUE)) %>% filter(has_iti(taxon = TAXON)) %>% summarise(ITI_MISSING = 100 * (1 - sum(VALUE) / unique(N))) } d %>% xtable %>% print(type = "html") ``` Note that in `r sum(d$ITI_MISSING > 20)` cases, more than 20% of the total abundance does not have an ITI classification.
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/iti-missing.Rmd
```{r child="default-iti.Rmd", eval=is.null(d_iti)} ``` ```{r child="user-iti.Rmd", eval=!is.null(d_iti)} ``` ```{r, eval=!is.null(d_iti)} d_mmi <- d_mmi %>% left_join(d_iti, by = "TAXON") %>% rename(ITI_GROUP = GROUP) ``` ```{r} if (is.null(d_iti)) { d <- d_mmi %>% filter(!has_iti(taxon = TAXON)) %>% select(TAXON) %>% group_by(TAXON) %>% summarise(N_RECORDS = n()) } else { d <- d_mmi %>% filter(!has_iti(taxon = TAXON, group = ITI_GROUP) | CARNIVORE) %>% select(TAXON, CARNIVORE) %>% group_by(TAXON, CARNIVORE) %>% summarise(N_RECORDS = n()) } ``` ```{r results='asis', eval=(nrow(d)>0L)} cat(sprintf("\nThe table below gives TAXA without an ITI-class. The total number of taxa without an ITI-class is %i. Among these taxa are %i carnivores which are always excluded from ITI-calculations.\n", nrow(d), sum(d$CARNIVORE, na.rm = TRUE))) d %>% ungroup %>% mutate( CARNIVORE = as.character(ifelse(is.na(CARNIVORE), FALSE, CARNIVORE)), CARNIVORE = ifelse(CARNIVORE, "yes", "no") ) %>% arrange(CARNIVORE, TAXON) %>% xtable %>% print(type = "html", include.rownames = FALSE, sanitize.text = identity) ``` ```{r} # make sure that carnivores are excluded from ITI-calculations d_mmi$ITI_GROUP[which(d_mmi$CARNIVORE)] <- NA_character_ ``` <br> <br> <br> The table below gives for each combination of OBJECTID-HABITAT-YEAR the average total abundance (N) and average percentage of individuals in each ITI class. File `r sQuote(basename(settings$files$out_iti))` in the OUTPUT-directory gives these entries per sample. ```{r, results='asis'} d <- d_mmi %>% group_by(OBJECTID, SAMPLEID, HABITAT, DATE, YEAR, CARNIVORE, ITI_GROUP) %>% summarise(n = sum(VALUE)) %>% ungroup %>% mutate(ITI_GROUP = ifelse(CARNIVORE, "carnivore", ITI_GROUP)) %>% select(-CARNIVORE) d <- d %>% left_join( d %>% group_by(OBJECTID, SAMPLEID, HABITAT, DATE, YEAR) %>% summarise(N = sum(n)), by = c("OBJECTID", "SAMPLEID", "HABITAT", "DATE", "YEAR")) %>% mutate(p = as.integer(round(100 * n / N))) %>% select(-n) %>% spread(key = ITI_GROUP, value = p, fill = 0L) %>% write_csv(settings$files$out_iti) rmean <- function(x){as.integer(round(mean(x)))} d %>% ungroup %>% select(-DATE, -SAMPLEID) %>% group_by(OBJECTID, HABITAT, YEAR) %>% summarise_each(funs(rmean)) %>% xtable %>% print(type = "html", include.rownames = FALSE) ```
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/iti.Rmd
## Multidimensional scaling (MDS) ```{r} to_log("INFO", "Entering section 'Multidimensional scaling'...") ``` The plot below is non-metric multidimensional scaling plot of the Bray-Curtis dissimilarity (distance). This plot can be used to identify outliers in the data. The Bray-Curtis dissimilarity is given by: <b>D</b><sub>_ij_</sub>= &Sigma;<sub>_k_</sub>abs(_n_<sub>_ik_</sub> - _n_<sub>_jk_</sub>) / &Sigma;<sub>_k_</sub>(_n_<sub>_ik_</sub> + _n_<sub>_jk_</sub>) where _n_<sub>_ik_</sub> and _n_<sub>_jk_</sub> is the abundance of species _k_ at locations _i_ and _j_ respectively. ```{r} # create table in wide format: # - left: sample identifiers # - right: abundance per taxon d <- d_mmi %>% select( OBJECTID, HABITAT, DATE, POOL_RUN, POOL_ID, TAXON, VALUE) %>% mutate(DATE = format(DATE)) %>% group_by(OBJECTID, HABITAT, DATE, POOL_RUN, POOL_ID, TAXON) %>% summarise(VALUE = sum(VALUE)) %>% spread(key = TAXON, value = VALUE, fill = 0L) # select abundances only N <- as.matrix(d[, -(1:5)]) # create Bray-Curtis distance matrix n <- nrow(N) D <- matrix(data = 0, nrow = n, ncol = n) for (i in 1:(n-1)) { n1 <- N[i, ] for (j in (i+1):n) { n2 <- N[j, ] D[i, j] <- bray_curtis(n1, n2) D[j, i] <- D[i, j] } } # apply sammon MDS S <- MASS::sammon(D, trace = FALSE, tol = 1.0e-6) # extract MDS configuration V <- S$points %>% as.data.frame %>% set_names(c("V1", "V2")) # add object identifiers V$OBJECTID <- d$OBJECTID V$HABITAT <- d$HABITAT V$DATE <- d$DATE V$MARKER <- 1:nrow(V) V$MEDIAN_BC <- apply(X = D, MARGIN = 1, FUN = median) if (isTRUE(settings$pooling)) { V$POOL_RUN <- d$POOL_RUN V$POOL_ID <- d$POOL_ID } else { V$SAMPLEID <- d_mmi$SAMPLEID[d$POOL_ID %>% match(d_mmi$ID)] } ``` Multidimensional scaling (MDS) presses the _m_ &times; _m_ dimensional distance matrix <b>D</b> into a 2 dimensional space given that the distortion of the original distances in <b>D</b> is minimized, where _m_ is the number of sampling sites. See Sammon (1969) for details. The figure below gives the multidimensional scaling representation of matrix <b>D</b>. Potential outliers are indicated by text markers. <br> ```{r} # detect potential outliers (two rounds of convex hull) d <- V %>% select(V1, V2, MARKER) marker_pot_outlier <- d$MARKER[chull(x = d$V1, y = d$V2)] d <- d %>% filter(!(MARKER %in% marker_pot_outlier)) marker_pot_outlier <- c(marker_pot_outlier, d$MARKER[chull(x = d$V1, y = d$V2)]) V$POT_OUTLIER <- V$MARKER %in% marker_pot_outlier # create look-up-table for figure caption V <- V %>% mutate(id_chr = paste(OBJECTID, HABITAT, sep = "-")) %>% mutate(id_num = id_chr %>% match(sort(unique(id_chr)))) %>% mutate(id_num = factor(x = id_num, levels = sort(unique(id_num)), ordered = TRUE)) lut <- V %>% select(id_num, id_chr) %>% distinct %>% arrange(id_num) ``` <figure> ```{r, fig.width = 6, fig.height = 5, out.width=900, dpi=300} # create combinations of color and shape col_shp <- expand.grid( col = c("red", "deepskyblue", "orange", "green", "purple"), shp = c(16, 15, 17, 3, 4), stringsAsFactors = FALSE ) # plot MDS ggplot() + geom_point( data = V, mapping = aes(x = V1, y = V2, colour = id_num, shape = id_num)) + geom_text( data = V[V$POT_OUTLIER, ], mapping = aes(x = V1, y = V2, label = MARKER)) + scale_color_manual( name = "", values = col_shp$col ) + scale_shape_manual( name = "", values = col_shp$shp ) + scale_x_continuous() + scale_y_continuous() ``` <figcaption>Multidimensional scaling plot. The meaning of the numbers in the legend is: `r paste(lut$id_num, lut$id_chr, sep = " = ", collapse = "; ")`</figcaption> </figure> <br> <br> The table below can be used to identify potential outliers by matching the text markers in the figure above with those in column `MARKER` below. The MDS-coordinates are given by: (V1, V2); and MEDIAN_BC is the median value of the Bray-Curtis dissimilarity. The Bray-Curtis dissimilarity is given on a scale ranging from 0 (= identical) to 1 (= very different from other samples) and can be used to identify potential ouliers. ```{r results='asis'} # filter potential outliers V <- V %>% filter(POT_OUTLIER) # create table with information on potential outliers if (isTRUE(settings$pooling)) { V %>% select(MARKER, V1 = round(V1, 2), V2 = round(V2, 2), AREA_CODE = id_num, OBJECTID, HABITAT, DATE, POOL_RUN, POOL_ID, MEDIAN_BC) %>% mutate(DATE = format(DATE)) %>% xtable %>% print(type = "html") } else { V %>% select(MARKER, V1, V2, OBJECTID, HABITAT, DATE, SAMPLEID, MEDIAN_BC) %>% mutate( V1 = round(V1, 2), V2 = round(V2, 2), DATE = format(DATE)) %>% xtable %>% print(type = "html", include.rownames = FALSE) } ```
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/mds.Rmd