content
stringlengths
0
14.9M
filename
stringlengths
44
136
--- title: "MAMS-CutoffScreening-GP-Symmetric-tutorial" output: rmarkdown::html_vignette author: "Ziyan Wang" vignette: > %\VignetteIndexEntry{MAMS-CutoffScreening-GP-Symmetric-tutorial} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(BayesianPlatformDesignTimeTrend) ``` ## Four arm trial cutoff screening The 'BayesianPlatformDesignTimeTrend' package simulation process requires stopping boundary cutoff screening first (details refers to demo \code{\link{demo_Cutoffscreening_GP}} and MAMS-CutoffScreening-GP-tutorial). After the cutoff screening process, we need to record the cutoff value of both efficacy and futility boundary for use in the trial simulation process. The data of each trail replicates will be created sequentially during the simulation. In this tutorial, the cutoff screening process for symmertic boundary will be presented. The example is a four-arm MAMS trial with one control and three treatment arms. The control arm will not be stopped during the trial. The time trend pattern are set to be 'linear'. The way of time trend impacting the beginning response probability is set to be 'mult' (multiplicative). The time trend strength is set to be zero. The randomisation method used is the unfixed Thall's approach. The early stop boundary is the symmetric OBF boundary. The model used in this example are fixed effect model (the model with only treatment effect and the model with both treatment effect and discrete stage effect). The evaluation metrics are error rate, mean treatment effect bias, rooted MSE, mean number of patients allocated to each arm and mean total number of patients in the trial. ```{r,eval=FALSE} ntrials = 1000 # Number of trial replicates ns = seq(120, 600, 60) # Sequence of total number of accrued patients at each interim analysis null.reponse.prob = 0.15 alt.response.prob = 0.35 # We investigate the type I error rate for different time trend strength null.scenario = matrix( c( null.reponse.prob, null.reponse.prob, null.reponse.prob, null.reponse.prob ), nrow = 1, ncol = 4, byrow = T ) # alt.scenario = matrix(c(null.reponse.prob,null.reponse.prob,null.reponse.prob,null.reponse.prob, # null.reponse.prob,alt.response.prob,null.reponse.prob,null.reponse.prob, # null.reponse.prob,alt.response.prob,alt.response.prob,null.reponse.prob, # null.reponse.prob,alt.response.prob,alt.response.prob,alt.response.prob), nrow=3, ncol = 4,byrow=T) model = "tlr" #logistic model max.ar = 0.85 #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar) #------------Select the data generation randomisation methods------- rand.type = "Urn" # Urn design max.deviation = 3 # The recommended value for the tuning parameter in the Urn design # Require multiple cores for parallel running cl = 2 # Set the model we want to use and the time trend effect for each model used. # Here the main model will be used twice for two different strength of time trend c(0,0,0,0) and c(1,1,1,1) to investigate how time trend affect the evaluation metrics in BAR setting. # Then the main + stage_continuous model which is the treatment effect + stage effect model will be applied for strength equal c(1,1,1,1) to investigate how the main + stage effect model improve the evaluation metrics. reg.inf = "main" trend.effect = c(0,0,0,0) result = { } OPC = { } K = dim(null.scenario)[2] cutoffindex = 1 trendindex = 1 cutoff.information=demo_Cutoffscreening.GP ( ntrials = ntrials, # Number of trial replicates trial.fun = simulatetrial, # Call the main function grid.inf = list( start.length = 10, grid.min = NULL, grid.max = NULL, confidence.level = 0.95, grid.length = 5000, change.scale = FALSE, noise = T, errorrate = 0.1, simulationerror = 0.01, iter.max = 15, plotornot = FALSE), # Set up the cutoff grid for screening. The start grid has three elements. The extended grid has fifteen cutoff value under investigation input.info = list( response.probs = null.scenario[1,], #The scenario vector in this round ns = ns, # Sequence of total number of accrued patients at each interim analysis max.ar = max.ar, #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar) rand.type = rand.type, # Which randomisation methods in data generation. max.deviation = max.deviation, # The recommended value for the tuning parameter in the Urn design model.inf = list( model = model, #Use which model? ibb.inf = list( #independent beta-binomial model which can be used only for no time trend simulation pi.star = 0.5, # beta prior mean pess = 2, # beta prior effective sample size betabinomialmodel = ibetabinomial.post # beta-binomial model for posterior estimation ), tlr.inf = list( beta0_prior_mu = 0, # Stan logistic model t prior location beta1_prior_mu = 0, # Stan logistic model t prior location beta0_prior_sigma = 2.5, # Stan logistic model t prior sigma beta1_prior_sigma = 2.5, # Stan logistic model t prior sigma beta0_df = 7, # Stan logistic model t prior degree of freedom beta1_df = 7, # Stan logistic model t prior degree of freedom reg.inf = reg.inf, # The model we want to use variable.inf = "Fixeffect" # Use fix effect logistic model ) ), Stop.type = "Early-OBF", # Use Pocock like early stopping boundary Boundary.type = "Symmetric", # Use Symmetric boundary where cutoff value for efficacy boundary and futility boundary sum up to 1 Random.inf = list( Fixratio = FALSE, # Do not use fix ratio allocation Fixratiocontrol = NA, # Do not use fix ratio allocation BARmethod = "Thall", # Use Thall's Bayesian adaptive randomisation approach Thall.tuning.inf = list(tuningparameter = "Unfixed", fixvalue = 1) # Specified the tunning parameter value for fixed tuning parameter ), trend.inf = list( trend.type = "linear", # Linear time trend pattern trend.effect = trend.effect, # Stength of time trend effect trend_add_or_multip = "mult" # Multiplicative time trend effect on response probability ) ), cl = 2 ) ``` Summary of the output data from cutoff screening example ```{r} library(ggplot2) # Details of grid optimdata=optimdata_sym # Recommend cutoff at each screening round nextcutoff = optimdata$next.cutoff prediction = optimdata$prediction cutoff=optimdata$cutoff tpIE=optimdata$tpIE cutoff=cutoff[1:sum(!is.na(tpIE))] tpIE=tpIE[1:sum(!is.na(tpIE))] GP.res = optimdata prediction = data.frame(yhat = GP.res$prediction$yhat.t1E, sd = matrix(GP.res$prediction$sd.t1E,ncol=1), qup = GP.res$prediction$qup.t1E, qdown = GP.res$prediction$qdown.t1E, xgrid = GP.res$prediction$xgrid) GPplot=ggplot(data = prediction) + geom_ribbon(aes(x = xgrid, ymin = qdown, ymax = qup),col="#f8766d", alpha = 0.5,linetype = 2) + geom_line(aes(xgrid, yhat),col = "#f8766d") + geom_point(aes(cutoff[1:sum(!is.na(tpIE))], tpIE[1:sum(!is.na(tpIE))]), data = data.frame(tpIE=tpIE,cutoff=cutoff),col = "#00bfc4") + geom_point(aes(nextcutoff, 0.1), data = data.frame(tpIE=tpIE,cutoff=cutoff),col = "#f8766d") + geom_hline(yintercept = 0.1,linetype = 2) + geom_text(aes(x=1,y=0.15,label=paste0("FWER target is 0.1")),hjust=0,vjust=1)+ geom_vline(xintercept = nextcutoff, linetype = 2) + geom_text(aes(x=6,y=0.8,label=paste0("Next cutoff value is ",round(nextcutoff,3))))+ theme_minimal()+ylab("FWER")+xlab("Cutoff value of the OBF boundary (c*)")+ geom_point(aes(nextcutoff, 0.1), data = data.frame(tpIE=tpIE,cutoff=cutoff),col = "#f8766d") + theme(plot.background = element_rect(fill = "#e6dfba")) print(GPplot) ```
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/vignettes/MAMS-CutoffScreening-GP-Symmetric-tutorial.Rmd
--- title: "MAMS-CutoffScreening-tutorial" output: rmarkdown::html_vignette author: "Ziyan Wang" vignette: > %\VignetteIndexEntry{MAMS-CutoffScreening-tutorial} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(BayesianPlatformDesignTimeTrend) ``` ## Four arm trial cutoff screening The 'BayesianPlatformDesignTimeTrend' package simulation process requires stopping boundary cutoff screening first (details refers to demo \code{\link{demo_Cutoffscreening}} and MAMS-CutoffScreening-tutorial). After the cutoff screening process, we need to record the cutoff value of both efficacy and futility boundary for use in the trial simulation process. The data of each trail replicates will be created sequentially during the simulation. In this tutorial, the cutoff screening process will be presented. The example is a four-arm MAMS trial with one control and three treatment arms. The control arm will not be stopped during the trial. The time trend pattern are set to be 'linear'. The way of time trend impacting the beginning response probability is set to be 'mult' (multiplicative). The time trend strength is set to be zero. The randomisation method used is the unfixed Thall's approach. The early stop boundary is the symmetric Pocock boundary. The model used in this example are fixed effect model (the model with only treatment effect and the model with both treatment effect and discrete stage effect). The evaluation metrics are error rate, mean treatment effect bias, rooted MSE, mean number of patients allocated to each arm and mean total number of patients in the trial. ```{r,eval=FALSE} ntrials = 1000 # Number of trial replicates ns = seq(120, 600, 120) # Sequence of total number of accrued patients at each interim analysis null.reponse.prob = 0.4 alt.response.prob = 0.6 # We investigate the type I error rate for different time trend strength null.scenario = matrix( c( null.reponse.prob, null.reponse.prob, null.reponse.prob, null.reponse.prob ), nrow = 1, ncol = 4, byrow = T ) # alt.scenario = matrix(c(null.reponse.prob,null.reponse.prob,null.reponse.prob,null.reponse.prob, # null.reponse.prob,alt.response.prob,null.reponse.prob,null.reponse.prob, # null.reponse.prob,alt.response.prob,alt.response.prob,null.reponse.prob, # null.reponse.prob,alt.response.prob,alt.response.prob,alt.response.prob), nrow=3, ncol = 4,byrow=T) model = "tlr" #logistic model max.ar = 0.75 #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar) #------------Select the data generation randomisation methods------- rand.type = "Urn" # Urn design max.deviation = 3 # The recommended value for the tuning parameter in the Urn design # Require multiple cores for parallel running cl = 2 # Set the model we want to use and the time trend effect for each model used. # Here the main model will be used twice for two different strength of time trend c(0,0,0,0) and c(1,1,1,1) to investigate how time trend affect the evaluation metrics in BAR setting. # Then the main + stage_continuous model which is the treatment effect + stage effect model will be applied for strength equal c(1,1,1,1) to investigate how the main + stage effect model improve the evaluation metrics. reg.inf = "main" trend.effect = c(0,0,0,0) result = { } OPC = { } K = dim(null.scenario)[2] cutoffindex = 1 trendindex = 1 cutoff.information=demo_Cutoffscreening ( ntrials = ntrials, # Number of trial replicates trial.fun = simulatetrial, # Call the main function grid.inf = list(start = c(0.9, 0.95, 1), extendlength = 20), # Set up the cutoff grid for screening. The start grid has three elements. The extended grid has fifteen cutoff value under investigation input.info = list( response.probs = null.scenario[1,], #The scenario vector in this round ns = ns, # Sequence of total number of accrued patients at each interim analysis max.ar = max.ar, #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar) rand.type = rand.type, # Which randomisation methods in data generation. max.deviation = max.deviation, # The recommended value for the tuning parameter in the Urn design model.inf = list( model = model, #Use which model? ibb.inf = list( #independent beta-binomial model which can be used only for no time trend simulation pi.star = 0.5, # beta prior mean pess = 2, # beta prior effective sample size betabinomialmodel = ibetabinomial.post # beta-binomial model for posterior estimation ), tlr.inf = list( beta0_prior_mu = 0, # Stan logistic model t prior location beta1_prior_mu = 0, # Stan logistic model t prior location beta0_prior_sigma = 2.5, # Stan logistic model t prior sigma beta1_prior_sigma = 2.5, # Stan logistic model t prior sigma beta0_df = 7, # Stan logistic model t prior degree of freedom beta1_df = 7, # Stan logistic model t prior degree of freedom reg.inf = reg.inf, # The model we want to use variable.inf = "Fixeffect" # Use fix effect logistic model ) ), Stop.type = "Early-Pocock", # Use Pocock like early stopping boundary Boundary.type = "Symmetric", # Use Symmetric boundary where cutoff value for efficacy boundary and futility boundary sum up to 1 Random.inf = list( Fixratio = FALSE, # Do not use fix ratio allocation Fixratiocontrol = NA, # Do not use fix ratio allocation BARmethod = "Thall", # Use Thall's Bayesian adaptive randomisation approach Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1) # Specified the tunning parameter value for fixed tuning parameter ), trend.inf = list( trend.type = "linear", # Linear time trend pattern trend.effect = trend.effect, # Stength of time trend effect trend_add_or_multip = "mult" # Multiplicative time trend effect on response probability ) ), cl = 2 ) ``` Summary of the output data from cutoff screening example ```{r} # Details of grid dataloginformd # Recommend cutoff at each screening round t(recommandloginformd) # Plot plot( tpIE ~ cutoff, pch = 16, xlab = "Cutoff", ylab = "Type I Error", cex.lab = 1.3, col = "#f8766d", data = data.frame(dataloginformd) ) cutoffgrid <- seq(0.9, 1, 0.0001) lines(cutoffgrid, t(predictedtpIEinformd), col = "#00bfc4", lwd = 3) ``` ```
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/vignettes/MAMS-CutoffScreening-tutorial.Rmd
--- title: "MAMS-trial-simulation-tutorial" output: rmarkdown::html_vignette author: "Ziyan Wang" vignette: > %\VignetteIndexEntry{MAMS-trial-simulation-tutorial} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(BayesianPlatformDesignTimeTrend) ``` ## Four arm trial simulation The 'BayesianPlatformDesignTimeTrend' package simulation process requires stopping boundary cutoff screening first (details refers to demo \code{\link{demo_Cutoffscreening}} and MAMS-CutoffScreening-tutorial). After the cutoff screening process, we need to record the cutoff value of both efficacy and futility boundary for use in the trial simulation process. The data of each trail replicates will be created sequentailly during the simulation. In this tutorial, the example is a four-arm MAMS trial with one control and three treatment arms. The control arm will not be stopped during the trial. The time trend pattern are set to be 'linear'. The way of time trend impacting the beginning response probability is set to be 'mult' (multiplicative). The time trend strength is set to be zero first and then set to be 0.5 to study the impact of time trend on different evaluation metrics. The model used in this example are fixed effect model (the model with only treatment effect and the model with both treatment effect and discrete stage effect). The evaluation metrics are error rate, mean treatment effect bias, rooted MSE, mean number of patients allocated to each arm and mean total number of patients in the trial. Firstly, We investigate the family wise error rate (FWER) for different time trend strength and how model with stage effect help control the family wise error rate. The cutoff value was screened for null scenario using model only main effect in order to control the FWER under 0.1. The false positive rate is 0.037 equally for each treatment - control comparison. ```{r} ntrials = 1000 # Number of trial replicates ns = seq(120,600,120) # Sequence of total number of accrued patients at each interim analysis null.reponse.prob = 0.4 alt.response.prob = 0.6 # We investigate the type I error rate for different time trend strength null.scenario = matrix( c( null.reponse.prob, null.reponse.prob, null.reponse.prob, null.reponse.prob ), nrow = 1, ncol = 4, byrow = T ) # alt.scenario = matrix(c(null.reponse.prob,null.reponse.prob,null.reponse.prob,null.reponse.prob, # null.reponse.prob,alt.response.prob,null.reponse.prob,null.reponse.prob, # null.reponse.prob,alt.response.prob,alt.response.prob,null.reponse.prob, # null.reponse.prob,alt.response.prob,alt.response.prob,alt.response.prob), nrow=3, ncol = 4,byrow=T) model = "tlr" #logistic model max.ar = 0.75 #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar) #------------Select the data generation randomisation methods------- rand.type = "Urn" # Urn design max.deviation = 3 # The recommended value for the tuning parameter in the Urn design # Require multiple cores for parallel running cl = 2 # Set the model we want to use and the time trend effect for each model used. # Here the main model will be used twice for two different strength of time trend c(0,0,0,0) and c(1,1,1,1) to investigate how time trend affect the evaluation metrics in BAR setting. # Then the main + stage_continuous model which is the treatment effect + stage effect model will be applied for strength equal c(1,1,1,1) to investigate how the main + stage effect model improve the evaluation metrics. reg.inf = c("main", "main", "main + stage_continuous") trend.effect = matrix( c(0, 0, 0, 0, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1), ncol = 4, nrow = 3, byrow = T ) # cutoffearly = matrix(rep(0.994, dim(null.scenario)[1]), ncol = 1) K = dim(null.scenario)[2] print( paste0( "Start trial simulation. This is a ", K, "-arm trial simulation. There are one null scenario and ", K - 1 , " alternative scenarios. There are ", K , " rounds." ) ) cutoffindex = 1 ``` ```{r, eval=FALSE} result = { } OPC_null = { } for (i in 1:dim(null.scenario)[1]) { trendindex = 1 for (j in 1:length(reg.inf)){ restlr = Trial.simulation( ntrials = ntrials, # Number of trial replicates trial.fun = simulatetrial, # Call the main function input.info = list( response.probs = null.scenario[cutoffindex, ], #The scenario vector in this round ns = ns, # Sequence of total number of accrued patients at each interim analysis max.ar = max.ar, #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar) rand.type = rand.type, # Which randomisation methods in data generation. max.deviation = max.deviation, # The recommended value for the tuning parameter in the Urn design model.inf = list( model = model, #Use which model? ibb.inf = list( #independent beta-binomial model which can be used only for no time trend simulation pi.star = 0.5, # beta prior mean pess = 2, # beta prior effective sample size betabinomialmodel = ibetabinomial.post # beta-binomial model for posterior estimation ), tlr.inf = list( beta0_prior_mu = 0, # Stan logistic model t prior location beta1_prior_mu = 0, # Stan logistic model t prior location beta0_prior_sigma = 2.5, # Stan logistic model t prior sigma beta1_prior_sigma = 2.5, # Stan logistic model t prior sigma beta0_df = 7, # Stan logistic model t prior degree of freedom beta1_df = 7, # Stan logistic model t prior degree of freedom reg.inf = reg.inf[trendindex], # The model we want to use variable.inf = "Fixeffect" # Use fix effect logistic model ) ), Stopbound.inf = Stopboundinf( Stop.type = "Early-Pocock", # Use Pocock like early stopping boundary Boundary.type = "Symmetric", # Use Symmetric boundary where cutoff value for efficacy boundary and futility boundary sum up to 1 cutoff = c(cutoffearly[cutoffindex, 1], 1 - cutoffearly[cutoffindex, 1]) # The cutoff value for stopping boundary ), Random.inf = list( Fixratio = FALSE, # Do not use fix ratio allocation Fixratiocontrol = NA, # Do not use fix ratio allocation BARmethod = "Thall", # Use Thall's Bayesian adaptive randomisation approach Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1) # Specified the tunning parameter value for fixed tuning parameter ), trend.inf = list( trend.type = "step", # Linear time trend pattern trend.effect = trend.effect[trendindex, ], # Stength of time trend effect trend_add_or_multip = "mult" # Multiplicative time trend effect on response probability ) ), cl = 2 # 2 cores required ) trendindex = trendindex + 1 # The result list can be used for plotting and the OPC table is the summary evaluaton metrics for each scenario result = c(result, restlr$result) OPC_null = rbind(OPC_null, restlr$OPC) } cutoffindex = cutoffindex + 1 } ``` ```{r} print("Finished null scenario study") save_data = FALSE if (isTRUE(save_data)) { save(result, file = restlr$Nameofsaveddata$nameData) save(OPC_null, file = restlr$Nameofsaveddata$nameTable) } ``` Present the evaluation metrics for null scenario. The FWER is 0.1 when there is no time trend. FWER inflated to 0.1296 when there is a step time trend pattern and modeled by main fixed effect model. The main effect plus stage effect model controls the FWER again under 0.1. ```{r} # Characteristic table print(OPC_null) ``` Then, We investigate the other evaluation metrics for alternative scenario with different time trend strength. The cutoff value was the same as the value in precious example since the control arm response probability in alternative scenario are the same as that in null scenario. ```{r} ntrials = 1000 # Number of trial replicates ns = seq(120,600,120) # Sequence of total number of accrued patients at each interim analysis null.reponse.prob = 0.4 alt.response.prob = 0.6 # We investigate the type I error rate for different time trend strength alt.scenario = matrix( c( null.reponse.prob, alt.response.prob, null.reponse.prob, null.reponse.prob, null.reponse.prob, alt.response.prob, alt.response.prob, null.reponse.prob, null.reponse.prob, alt.response.prob, alt.response.prob, alt.response.prob ), nrow = 3, ncol = 4, byrow = T ) model = "tlr" #logistic model max.ar = 0.75 #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar) #------------Select the data generation randomisation methods------- rand.type = "Urn" # Urn design max.deviation = 3 # The recommended value for the tuning parameter in the Urn design # Require multiple cores for parallel running cl = 2 # Set the model we want to use and the time trend effect for each model used. # Here the main model will be used twice for two different strength of time trend c(0,0,0,0) and c(1,1,1,1) to investigate how time trend affect the evaluation metrics in BAR setting. # Then the main + stage_continuous model which is the treatment effect + stage effect model will be applied for strength equal c(1,1,1,1) to investigate how the main + stage effect model improve the evaluation metrics. reg.inf = c("main + stage_continuous") trend.effect = matrix(c(0.1, 0.1, 0.1, 0.1), ncol = 4, nrow = 1, byrow = T) # cutoffearly = matrix(rep(0.994, dim(alt.scenario)[1]), ncol = 1) K = dim(alt.scenario)[2] print( paste0( "Start trial simulation. This is a ", K, "-arm trial simulation. There are one null scenario and ", K - 1 , " alternative scenarios. There are ", K , " rounds." ) ) cutoffindex = 1 ``` ```{r, eval=FALSE} result = { } OPCalt = { } for (i in 1:dim(alt.scenario)[1]) { trendindex = 1 for (j in 1:length(reg.inf)){ restlr = Trial.simulation( ntrials = ntrials, # Number of trial replicates trial.fun = simulatetrial, # Call the main function input.info = list( response.probs = alt.scenario[cutoffindex, ], #The scenario vector in this round ns = ns, # Sequence of total number of accrued patients at each interim analysis max.ar = max.ar, #limit the allocation ratio for the control group (1-max.ar < r_control < max.ar) rand.type = rand.type, # Which randomisation methods in data generation. max.deviation = max.deviation, # The recommended value for the tuning parameter in the Urn design model.inf = list( model = model, #Use which model? ibb.inf = list( #independent beta-binomial model which can be used only for no time trend simulation pi.star = 0.5, # beta prior mean pess = 2, # beta prior effective sample size betabinomialmodel = ibetabinomial.post # beta-binomial model for posterior estimation ), tlr.inf = list( beta0_prior_mu = 0, # Stan logistic model t prior location beta1_prior_mu = 0, # Stan logistic model t prior location beta0_prior_sigma = 2.5, # Stan logistic model t prior sigma beta1_prior_sigma = 2.5, # Stan logistic model t prior sigma beta0_df = 7, # Stan logistic model t prior degree of freedom beta1_df = 7, # Stan logistic model t prior degree of freedom reg.inf = reg.inf[trendindex], # The model we want to use variable.inf = "Fixeffect" # Use fix effect logistic model ) ), Stopbound.inf = Stopboundinf( Stop.type = "Early-Pocock", # Use Pocock like early stopping boundary Boundary.type = "Symmetric", # Use Symmetric boundary where cutoff value for efficacy boundary and futility boundary sum up to 1 cutoff = c(cutoffearly[cutoffindex, 1], 1 - cutoffearly[cutoffindex, 1]) # The cutoff value for stopping boundary ), Random.inf = list( Fixratio = FALSE, # Do not use fix ratio allocation Fixratiocontrol = NA, # Do not use fix ratio allocation BARmethod = "Thall", # Use Thall's Bayesian adaptive randomisation approach Thall.tuning.inf = list(tuningparameter = "Fixed", fixvalue = 1) # Specified the tunning parameter value for fixed tuning parameter ), trend.inf = list( trend.type = "step", # Linear time trend pattern trend.effect = trend.effect[trendindex, ], # Stength of time trend effect trend_add_or_multip = "mult" # Multiplicative time trend effect on response probability ) ), cl = 2 # 2 cores required ) trendindex = trendindex + 1 # The result list can be used for plotting and the OPC table is the summary evaluaton metrics for each scenario result = c(result, restlr$result) OPC_alt = rbind(OPC_alt, restlr$OPC) } cutoffindex = cutoffindex + 1 } ``` ```{r} print("Finished alternative scenario study") save_data = FALSE if (isTRUE(save_data)) { save(result, file = restlr$Nameofsaveddata$nameData) save(OPC_alt, file = restlr$Nameofsaveddata$nameTable) } ``` Present the evaluation metrics for alternative scenarios. The power used here is the conjunctive power where the trial will be sucessful only if the effective arms are correctly claimed to be effective and all other null arms are claimed to be ineffective. ```{r} # Characteristic table print(OPC_alt) ```
/scratch/gouwar.j/cran-all/cranData/BayesianPlatformDesignTimeTrend/vignettes/MAMS-trial-simulation-tutorial.Rmd
# Bayes_sampsize ==== #' Determine the required sample size for a Bayesian hypothesis test #' #' @param h1 A constraint matrix defining H1. #' @param h2 A constraint matrix defining H2. #' @param m1 A vector of expected population means under H1 (standardized). #' @param m2 A vector of expected populations means under H2 (standardized). #' \code{m2} must be of same length as \code{m1} #' @param sd1 A vector of standard deviations under H1. Must be a single number (equal #' standard deviation under all populations), or a vector of the same length as \code{m1} #' @param sd2 A vector of standard deviations under H2. Must be a single number (equal #' standard deviation under all populations), or a vector of the same length as \code{m2} #' @param scale A number specifying the prior scale #' @param type A character. The type of error to be controlled #' options are: \code{"1", "2", "de", "aoi", "med.1", "med.2"} #' @param cutoff A number. The cutoff criterion for type. #' If \code{type} is \code{"1", "2", "de", "aoi"}, \code{cutoff} must be between 0 and 1 #' If \code{type} is \code{"med.1" or "med.2"}, \code{cutoff} must be larger than 1 #' @param bound1 A number. The boundary above which BF12 favors H1 #' @param bound2 A number. The boundary below which BF12 favors H2 #' @param datasets A number. The number of datasets to compute the error probabilities #' @param nsamp A number. The number of prior or posterior samples to determine the #' fit and complexity #' @param minss A number. The minimum sample size to consider #' @param maxss A number. The maximum sample size to consider #' @param seed A number. The random seed to be set #' @return The sample size for which the chosen type of error probability #' is at the set cutoff, and the according error probabilities and median Bayes factors #' @examples #' # Short computation example NOT SUFFICIENT SAMPLES #' h1 <- matrix(c(1,-1), nrow= 1, byrow= TRUE) #' h2 <- 'c' #' m1 <- c(.4, 0) #' m2 <- c(0, .1) #' bayes_sampsize(h1, h2, m1, m2, sd1 = 1, sd2 = 1, scale = 1000, #' type = "de", cutoff = .125, nsamp = 50, datasets = 50, #' minss = 40, maxss = 70) #' @export bayes_sampsize <- function(h1, h2, m1, m2, sd1 = 1, sd2 = 1, scale = 1000, type = 1, cutoff, bound1 = 1, bound2 = 1 / bound1, datasets = 1000, nsamp = 1000, minss = 2, maxss = 1000, seed = 31) { # Errors ==== if(!is.numeric(cutoff)) stop("expected numeric value") if(!is.numeric(minss) || !is.numeric(maxss)) stop("minss and maxss must be integer") types <- c("1", "2", "de", "aoi", "med.1", "med.2") if(all(types != type)) stop("Incorrect type specified") if(type == "med.1" || type == "med.2") { if(cutoff < 1) stop("Cutoff must be larger than 1 for controlling median BF") cutoff <- 1/cutoff } else { if(cutoff > 1) stop( "Cutoff must be smaller than 1 for controlling error probabilities") } if((maxss-minss) < 20 ) stop("Difference between minss and maxss must be at least 20") # Function ==== times <- c(ceiling(log(maxss) / log(minss + 1)) + 1) timesT <- 0 { pb <- utils::txtProgressBar(min = 0, max = times) set.seed(seed) n <- round(maxss / 2) oldn <- n + 10 ngroup = length(m1) while ((abs(oldn - n) > 1)) { errors <- bayes_power(n = n, h1 = h1, h2 = h2, m1 = m1, m2 = m2, sd1 = sd1, sd2 = sd2, scale = scale, bound1 = bound1, bound2 = bound2, datasets = datasets, nsamp = nsamp, seed = seed) if (errors[[paste(type)]] < cutoff) { oldn <- n maxss <- n n <- round((n + minss) / 2) } if (errors[[paste(type)]] > cutoff) { oldn <- n minss <- n n <- round((n + maxss) / 2) } if (errors[[paste(type)]] == cutoff) break timesT <- timesT + 1 utils::setTxtProgressBar(pb, timesT) } close(pb) } return(c("Sample size" = as.integer(n), "Type 1 error" = errors[[1]], "Type 2 error" = errors[[2]], "Decision error" = errors[[3]], "Indecision error" = errors[[4]], "Median BF12 under H1" = errors[[5]], "Median BF21 under H2"= 1 / errors[[6]], "10quantile under H1" = errors[[7]], "90quantile under H1" = errors[[8]], "10quantile under H2" = errors[[9]], "90quantile under H2" = errors[[10]])) } # Bayes_power ==== #' Determine the 'power' for a Bayesian hypothesis test #' #' @param n A number. The sample size #' @param h1 A constraint matrix defining H1 #' @param h2 A constraint matrix defining H2 #' @param m1 A vector of expected population means under H1 #' @param m2 A vector of expected populations means under H2 #' \code{m2} must be of same length as \code{m1} #' @param sd1 A vector of standard deviations under H1. Must be a single number (equal #' standard deviation under all populations), or a vector of the same length as \code{m1} #' @param sd2 A vector of standard deviations under H2. Must be a single number (equal #' standard deviation under all populations), or a vector of the same length as \code{m2} #' @param scale A number specifying the prior scale #' @param bound1 A number. The boundary above which BF12 favors H1 #' @param bound2 A number. The boundary below which BF12 favors H2 #' @param datasets A number. The number of datasets to compute the error probabilities #' @param nsamp A number. The number of prior or posterior samples to determine the #' fit and complexity #' @param seed A number. The random seed to be set #' @return The Type 1, Type 2, Decision error and Area of Indecision probability and #' the median BF12s under H1 and H2 #' @examples #' # Short example WITH SMALL AMOUNT OF SAMPLES #' h1 <- matrix(c(1,-1,0,0,1,-1), nrow= 2, byrow= TRUE) #' h2 <- "c" #' m1 <- c(.4,.2,0) #' m2 <- c(.2,0,.1) #' bayes_power(40, h1, h2, m1, m2, datasets = 50, nsamp = 50) #' @export bayes_power <- function(n, h1, h2, m1, m2, sd1 = 1, sd2 = 1, scale = 1000, bound1 = 1, bound2 = 1/bound1, datasets = 1000, nsamp = 1000, seed = 31){ # Errors ==== if(!is.numeric(bound1) || !is.numeric(bound2) || !is.numeric(m1) || !is.numeric(m2) || !is.numeric(h1) || !is.numeric(datasets) || !is.numeric(n) || !is.numeric(nsamp) || !is.numeric(sd1) || !is.numeric(sd2) || !is.numeric(scale) || !is.numeric(seed)) { stop("expected numeric value") } if(round(n) != n) stop("n must be integer") if(length(dim(h1)) != 2) stop("h1 must be a matrix") if(any(round(h1) != h1)) stop("h1 can only contain integers") if(any(h1 > 1) || any(h1 < -1)) stop( "h1 must consist of 1 -1 and 0") if(is.numeric(h2)){ if(length(dim(h2)) != 2) stop("h2 must be a matrix") if(any(round(h2) != h2)) stop("h2 can only contain integers") if(any(h2 > 1) || any(h2 < -1)) stop( "h2 must consist of 1 -1 and 0") } else { if(h2 != "c" && h2 != "u") stop("Use 'u' or 'c' for unconstrained or complement.") } if(length(m1) != length(m2)) stop("m1 and m2 must be the same length") if(length(m1) != ncol(h1)) stop("h1 and m1 do not match") if(length(sd1) != 1){ if(length(sd1 != length(m1))) stop("sd1 must be a single number or a vector of same length as m1") } if(length(sd2) != 1){ if(length(sd2 != length(m2))) stop("sd2 must be a single number or a vector of same length as m2") } if(bound1 < bound2) stop("bound1 must be larger than bound2") # # Function ==== # if(!is.null(seed)) { # if(!is.integer(seed)) stop("seed must be integer") set.seed(seed) # } # if(is.null(ngroup)) { ngroup <- length(m1) # } else { # if(!is.integer(ngroup)) stop("ngroup must be integer") # } BF1 <- samp_bf(datasets, n, ngroup, means = m1, sds = sd1, h1, h2, scale, nsamp) BF2 <- samp_bf(datasets, n, ngroup, means = m2, sds = sd2, h1, h2, scale, nsamp) errors <- bayes_error(BF1, BF2, bound1, bound2) return(errors) }
/scratch/gouwar.j/cran-all/cranData/BayesianPower/R/power_sampsize.R
# Documentation eval_const() ==== #' Evaluate a constraint matrix for a set of prior/posterior samples #' #' @param hyp A constraint matrix defining a hypothesis. #' @param samples A matrix. Prior or posterior samples, the number of columns #' corresponds to the number of groups, the number of rows the number of samples #' @return A number between 0 and 1. The proportion of samples in which the #' constraints are met. eval_const <- function(hyp, samples) { # Errors ==== if(length(dim(hyp)) != 2) { stop("Hypothesis must be a matrix") } if(length(dim(samples)) != 2) { stop("Samples must be in a matrix form") } if(ncol(samples) != ncol(hyp)) { stop("Not the right dimensions") } # Function ==== nconst <- nrow(hyp) out <- mean(apply(samples, 1, function(i) { rowCheck <- apply(hyp, 1, function(j) { sum(i * j) > 0 } ) sum(rowCheck) == nconst } ) ) return(out) } # Documentation samp_dist() ==== #' Sample from prior or posterior distribution #' #' @param nsamp A number. The number of prior or posterior samples to determine the #' fit and complexity #' @param means A vector. The prior or posterior means for each group #' @param sds A number or a vector. The standard deviations for each group #' If a number is used, the same prior or posterior standard deviation is #' used for each group. #' @return A matrix of \code{nsamp} rows and as many columns as the #' length of \code{means}. samp_dist <- function(nsamp, means, sds) { # Function ==== ngroup <- length(means) samples <- matrix(stats::rnorm(nsamp * ngroup, mean = means, sd = sds), ncol = ngroup, byrow = TRUE) return(samples) } # Documentation calc_fc() ==== #' Compute the complexity or fit for two hypotheses. #' @param hyp A constraint matrix defining H1. #' @param hyp2 A constraint matrix defining H2 OR a character \code{'u'} #' or \code{'c'} specifying an unconstrained or complement hypothesis #' @param means A vector of posterior or prior means #' @param sds A vector or posterior or prior standard deviation #' @param nsamp A number. The number of prior or posterior samples to determine the #' fit and complexity #' @return A vector. #' The proportion of posterior samples in agreement with H1 and with H2 calc_fc <- function(hyp, hyp2, means, sds, nsamp = 1000) { # Errors ==== if(is.numeric(hyp2)){ if(length(dim(hyp2)) != 2) stop("Hypothesis must be a matrix") } else { if(hyp2 != "c" && hyp2 != "u") stop("Use 'u' or 'c' for unconstrained or complement.") } if(length(dim(hyp)) != 2) { stop("Hypothesis must be a matrix") } # Function ==== ngroup <- length(means) samples <- samp_dist(nsamp, means = means, sds = sds) fc1 <- eval_const(hyp, samples) if (fc1 == 0) fc1 <- 1/nsamp fc2 <- if (is.character(hyp2)) { if (hyp2 == 'u') 1 if (hyp2 == 'c') 1 - fc1 } else { eval_const(hyp2, samples) } if (fc2 == 0) fc2 <- 1 / nsamp out <- c(fc1, fc2) return(out) } # Documentation calc_bf()==== #' Compute a Bayes factor #' #' @param data A matrix. The dataset for which the BF must be computed #' @param h1 A constraint matrix defining H1. #' @param h2 A constraint matrix defining H2. #' @param scale A number specifying the prior scale. #' @param nsamp A number. The number of prior or posterior samples to determine the #' @return BF12, that is, the evidence for H1 relative to H2 calc_bf <- function(data, h1, h2, scale, nsamp = 1000) { # Function ==== postmeans <- colMeans(data) postsds <- apply(data, 1, stats::sd) / sqrt(nrow(data)) ngroup <- length(postmeans) priormeans <- rep(0, ngroup) priorsds <- postsds*scale comp <- calc_fc(h1, h2, priormeans, priorsds, nsamp) fit <- calc_fc(h1, h2, postmeans, postsds, nsamp) bf <- (fit[1] / comp[1]) / (fit[2] / comp[2]) return(bf) } # Documentation samp_bf() ==== #' Sample multiple datasets and compute the Bayes factor in each #' #' @param datasets A number. The number of datasets to simulate for each #' sample size \code{n} #' @param n A number. The group sample size to be used in data simulation #' @param ngroup A number. The number of groups. #' @param means A vector of expected population means. #' @param sds A vector of expected population standard deviations #' Note, when standardized, this is a vector of 1s #' @param h1 A constraint matrix defining H1. #' @param h2 A constraint matrix defining H2. #' @param scale A number specifying the prior scale. #' @param nsamp A number. The number of samples for the fit and complexity #' See \code{?BayesianPower::calc_fc} #' @return A vector of Bayes factors BF12 for each of the simulated datasets samp_bf <- function(datasets, n, ngroup, means, sds, h1, h2, scale, nsamp) { # Function ==== out <- sapply(1:datasets, function(i){ data <- matrix(stats::rnorm(n * ngroup, mean = means, sd = sds), ncol = ngroup, byrow = TRUE) calc_bf(data = data, h1, h2, scale, nsamp) } ) return(out) } # Documentation bayes_error() ==== #' Determine the unconditional error probabilities for a set of simulated #' Bayes factors. #' #' @param BFs1 A vector. Simulated BF12 under H1 for a given n #' @param BFs2 A vector. Simulated BF12 under H2 for a given n #' @param bound1 A number. The boundary above which BF12 favors H1 #' @param bound2 A number. The boundary below which BF12 favors H2 #' @return A named vector. The Type 1, Type 2, Decision error and Area of Indecision probabilities #' and the median Bayes factors under H1 and H2 bayes_error <- function(BFs1, BFs2, bound1 = 1, bound2 = 1/bound1) { # Function ==== type1 <- mean(BFs1 < bound2) type2 <- mean(BFs2 > bound1) de <- (type1 + type2)/2 aoi <- if (bound1 != bound2) { (mean((BFs1 < bound1) & (BFs1 > bound2)) + mean((BFs2 < bound1) & (BFs2 > bound2))) / 2 } else { 0 } med.2 <- stats::median(BFs2) med.1 <- 1/stats::median(BFs1) perc10.1 <- as.numeric(stats::quantile(BFs1, .10)) perc90.1 <- as.numeric(stats::quantile(BFs1, .90)) perc10.2 <- as.numeric(stats::quantile(BFs2, .10)) perc90.2 <- as.numeric(stats::quantile(BFs2, .90)) return(c("1" = type1, "2" = type2, "de" = de, "aoi" = aoi, "med.1" = med.1, "med.2" = med.2, "quant10.1" = perc10.1, "quant90.1" = perc90.1, "quant10.2" = perc10.2, "quant90.2" = perc90.2)) }
/scratch/gouwar.j/cran-all/cranData/BayesianPower/R/utils.R
## ----------------------------------------------------------------------------- R <- matrix(c(1,-1,0,0,1,-1), nrow = 2, byrow = TRUE) mu <- c(.4, .2, 0) R mu R %*% mu (R %*% mu) > 0 ## ---- eval = FALSE------------------------------------------------------------ # h1 <- matrix(c(1,-1,0,0,1,-1), nrow= 2, byrow= TRUE) # h2 <- 'c' # m1 <- c(.4,.2,0) # m2 <- c(.2,0,.1) # bayes_power(40, m1, m2, h1, h2) ## ---- eval = FALSE------------------------------------------------------------ # h1 <- matrix(c(1,-1,0,0,0,1,-1,0,0,0,1,-1), nrow= 3, byrow= TRUE) # h2 <- matrix(c(0,-1,1,0,0,1,0,-1,-1,0,0,1), nrow = 3, byrow= TRUE) # m1 <- c(.7,.3,.1,0) # m2 <- c(0,.4,.5,.1) # bayes_power(34, h1, h2, m1, m2, bound1 = 3, bound2 = 1/3) ## ---- eval = FALSE------------------------------------------------------------ # h1 <- matrix(c(1, -1, 0, # 0, 1, -1), # nrow= 2, byrow= TRUE) # h2 <- 'c' # m1 <- c(.4, .2, 0) # m2 <- c(.2, 0, .1) # bayes_sampsize(h1, h2, m1, m2, type = "de", cutoff = .125) ## ---- eval = FALSE------------------------------------------------------------ # h1 <- matrix(c(1, -1, 0, 0, # 0, 1, -1, 0, # 0, 0, 1, -1), # nrow= 3, byrow= TRUE) # h2 <- matrix(c(0, -1, 1, 0, # 0, 1, 0, -1, # -1, 0, 0, 1), # nrow = 3, byrow= TRUE) # m1 <- c(.7, .3, .1, 0) # m2 <- c(0, .4, .5, .1) # bayes_sampsize(h1, h2, m1, m2, type = "aoi", cutoff = .2, minss = 2, maxss = 500) ## ---- eval = FALSE------------------------------------------------------------ # h1 <- matrix(c(1, -1, 0, 0, # 0, 1, -1, 0, # 0, 0, 1, -1), # nrow= 3, byrow= TRUE) # h2 <- 'u' # m1 <- c(.3, .2, 0) # m2 <- c(0, 0, 0) # bayes_sampsize(h1, h2, m1, m2, type = "med.1", cutoff = 3, minss = 2, maxss = 500)
/scratch/gouwar.j/cran-all/cranData/BayesianPower/inst/doc/bayesianpower.R
--- title: "BayesianPower" author: "Fayette Klaassen" # date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BayesianPower} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Introduction *BayesianPower* can be used for sample size determination (using `bayes_sampsize`) and power calculation (using `bayes_power`) when Bayes factors are used to compare an inequality constrained hypothesis $H_i$ to its complement $H_c$, another inequality constrained hypothesis $H_j$ or the unconstrained hypothesis $H_u$. Power is defined as a combination of controlled error probabilities. The unconditional or conditional error probabilities can be controlled. Four approaches to control these probabilities are available in the methods of this package. **Users are advised to read this vignette and the paper available at <a href = "https://doi.org/10.17605/OSF.IO/D9EAJ">10.17605/OSF.IO/D9EAJ</a> where the four available approaches are presented in detail (Klaassen, Hoijtink \& Gu, unpublished)).** ## Power calculation with `bayes_power()` `bayes_power(n, h1, h2, m1, m2, sd1=1, sd2=1, scale = 1000, bound1 = 1, bound2 = 1/bound1, datasets = 1000, nsamp = 1000, seed = 31)` ### Arguments ` n` A number. The sample size for which the error probabilities must be computed. ` h1` A constraint matrix defining H1, see below for more details. ` h2` A constraint matrix defining H2, or a character `'u'` or `'c'` for the unconstrained or complement hypothesis. `m1` A vector of expected population means under H1 (standardized), see below for more details. ` m2` A vector of expected populations means under H2 (standardized). ` m2` must be of same length as ` m1`. ` sd1` A vector of standard deviations under H1. Must be a single number (equal standard deviation under all populations), or a vector of the same length as `m1` ` sd2` A vector of standard deviations under H2. Must be a single number (equal standard deviation under all populations), or a vector of the same length as `m2` ` scale` A number or use the default `1000` to set the prior scale. ` bound1` A number. The boundary above which BF12 favors H1, see below for more details. ` bound2` A number. The boundary below which BF12 favors H2. ` datasets` A number. The number of datasets to simulate to compute the error probabilities ` nsamp` A number. The number of prior or posterior samples to determine the complexity or fit. ` seed` A number. The random seed to be set. ### Details #### Specifying hypotheses Hypotheses are defined by means of a constraint matrix, that specifies the ordered constraints between the means $\boldsymbol\mu$ using a constraint matrix $R$, such that $R \boldsymbol{\mu} > \bf{0}$, where $R$ is a matrix with $J$ columns and $K$ rows, where $J$ is the number of group means and $K$ is the number of constraints between the means, $\boldsymbol\mu$ is a vector of $J$ means and $\bf{0}$ is a vector of $K$ zeros. The constraint matrix $R$ contains a set of linear inequality constraints. Consider ```{r} R <- matrix(c(1,-1,0,0,1,-1), nrow = 2, byrow = TRUE) mu <- c(.4, .2, 0) R mu R %*% mu (R %*% mu) > 0 ``` The matrix $R$ specifies that the sum of $1 \times \mu_1$ and $-1 \times \mu_2$ and $0 \times \mu_3$ is larger than $0$, **and** the sum of $0 \times \mu_1$ and $1 \times \mu_2$ and $-1 \times \mu_3$ is larger than $0$. This can also be written as: $\mu_1 > \mu_2 > \mu_3$. For more information about the specification of constraint matrices, see for example [@hoijtink12book]. The argument `h1` has to be a constraint matrix as specified above. The argument `h2` can be either a constraint matrix, or the character `'u'` or `'c'` if the goal is to compare $H_1$ with $H_u$, the unconstrained hypothesis, or $H_c$ the complement hypothesis. #### Specifying population means Hypothesized population means have to be defined under $H_1$ and $H_2$, also if $H_u$ or $H_c$ are considered as $H_2$. The group specific standard deviations can be set under `sd1` and `sd2`, by default, all group standard deviations are $1$. #### Prior scale The prior scale can be set using `scale`. By default, a scale of `1000` is used. This implies that the prior covariance matrix is proportional to the standard errors of the sampled data, by a factor of `1000`. #### Setting bounds `bound1` and `bound2` describe the boundary used for interpreting a Bayes factor. If `bound1 = 1`, all $BF_{12} > 1$ are considered to express evidence in favor of $H_1$, if `bound1 = 3`, all $BF_{12} > 3$ are considered to express evidence in favor of $H_1$. Similarly, `bound2` is the boundary *below* which $BF_{12}$ is considered to express evidence in favor of $H_2$. ### Examples #### Example 1. $H_1$ vs $H_c$ An example where three group means are ordered in $H_1: \mu_1 > \mu_2 > \mu_3$ which is compared to its complement. The power is determined for $n = 40$ ```{r, eval = FALSE} h1 <- matrix(c(1,-1,0,0,1,-1), nrow= 2, byrow= TRUE) h2 <- 'c' m1 <- c(.4,.2,0) m2 <- c(.2,0,.1) bayes_power(40, m1, m2, h1, h2) ``` #### Example 2. H1 vs H2 An example where four group means are ordered in $H_1: \mu_1 > \mu_2 > \mu_3 > \mu_4$ and in $H_2: \mu_3 > \mu_2 > \ mu_4 > \mu_1$. Only Bayes factors larger than $3$ are considered evidence in favor of $H_1$ and only Bayes factors smaller than $1/3$ are considered evidence in favor of $H_2$. ```{r, eval = FALSE} h1 <- matrix(c(1,-1,0,0,0,1,-1,0,0,0,1,-1), nrow= 3, byrow= TRUE) h2 <- matrix(c(0,-1,1,0,0,1,0,-1,-1,0,0,1), nrow = 3, byrow= TRUE) m1 <- c(.7,.3,.1,0) m2 <- c(0,.4,.5,.1) bayes_power(34, h1, h2, m1, m2, bound1 = 3, bound2 = 1/3) ``` ## Sample size determination with `bayes_sampsize()` ` bayes_sampsize(h1, h2, m1, m2, sd1 = 1, sd2 = 1, scale = 1000, type = 1, cutoff, bound1 = 1, bound2 = 1 / bound1, datasets = 1000, nsamp = 1000, minss = 2, maxss = 1000, seed = 31) ` ### Arguments The arguments are the same as for `bayes_power()` with the addition of: `type`A character. The type of error to be controlled. The options are: `"1", "2", "de", "aoi", "med.1", "med.2"`. See below for more details. `cutoff` A number. The cutoff criterion for type. If `type` is `"1", "2", "de", "aoi"`, `cutoff` must be between $0$ and $1$. If `type` is `"med.1"` or `"med.2"`, `cutoff` must be larger than $1$. See below for more details. `minss` A number. The minimum sample size. `maxss` A number. The maximum sample size. ### Details `bayes_sampsize()` iteratively uses `bayes_power()` to determine the error probabilities for a sample size, evaluates whether the chosen error is below the cutoff, and adjusts the sample size. #### `type` [@klaassenPIH] describes in detail the different types of controlling error probabilities that can be considered. Specifying `"1"` or `"2"` indicates that the Type 1 or Type 2 error probability has to be controlled, respectively the probability of concluding $H_2$ is the best hypothesis when $H_1$ is true or concluding that $H_1$ is the best hypothesis when $H_2$ is true. Note that when $H_1$ or $H_2$ is considered the best hypothesis depends on the values chosen for `bound1` and `bound2`. Specifying `"de"` or `"aoi" ` indicates that the Decision error probability (average of Type 1 and Type 2) or the probability of Indecision has to be controlled. Finally, specifying `" med.1"` or `"med.2"` indicates the minimum desired median $BF_{12}$ when $H_1$ is true, or the minimum desired median $BF_{21}$ when $H_2$ is true. ### Examples #### Example 1. $H_1$ versus $H_c$, controlling decision error ```{r, eval = FALSE} h1 <- matrix(c(1, -1, 0, 0, 1, -1), nrow= 2, byrow= TRUE) h2 <- 'c' m1 <- c(.4, .2, 0) m2 <- c(.2, 0, .1) bayes_sampsize(h1, h2, m1, m2, type = "de", cutoff = .125) ``` #### Example 2. H1 versus H2, controlling indecision error ```{r, eval = FALSE} h1 <- matrix(c(1, -1, 0, 0, 0, 1, -1, 0, 0, 0, 1, -1), nrow= 3, byrow= TRUE) h2 <- matrix(c(0, -1, 1, 0, 0, 1, 0, -1, -1, 0, 0, 1), nrow = 3, byrow= TRUE) m1 <- c(.7, .3, .1, 0) m2 <- c(0, .4, .5, .1) bayes_sampsize(h1, h2, m1, m2, type = "aoi", cutoff = .2, minss = 2, maxss = 500) ``` #### Example 3. $H_1$ versus $H_u$, controlling median Bayes factor ```{r, eval = FALSE} h1 <- matrix(c(1, -1, 0, 0, 0, 1, -1, 0, 0, 0, 1, -1), nrow= 3, byrow= TRUE) h2 <- 'u' m1 <- c(.3, .2, 0) m2 <- c(0, 0, 0) bayes_sampsize(h1, h2, m1, m2, type = "med.1", cutoff = 3, minss = 2, maxss = 500) ``` ## References Hoijtink, H. (2012). *Informative hypotheses. Theory and practice for behavioral and social scientists.* Boca Raton: Chapman Hall/CRC. Klaassen, F., Hoijtink, H., Gu, X. (unpublished). *The power of informative hypotheses.* Pre-print available at <a href = "https://doi.org/10.17605/OSF.IO/D9EAJ">https://doi.org/10.17605/OSF.IO/D9EAJ</a>
/scratch/gouwar.j/cran-all/cranData/BayesianPower/inst/doc/bayesianpower.Rmd
--- title: "BayesianPower" author: "Fayette Klaassen" # date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BayesianPower} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Introduction *BayesianPower* can be used for sample size determination (using `bayes_sampsize`) and power calculation (using `bayes_power`) when Bayes factors are used to compare an inequality constrained hypothesis $H_i$ to its complement $H_c$, another inequality constrained hypothesis $H_j$ or the unconstrained hypothesis $H_u$. Power is defined as a combination of controlled error probabilities. The unconditional or conditional error probabilities can be controlled. Four approaches to control these probabilities are available in the methods of this package. **Users are advised to read this vignette and the paper available at <a href = "https://doi.org/10.17605/OSF.IO/D9EAJ">10.17605/OSF.IO/D9EAJ</a> where the four available approaches are presented in detail (Klaassen, Hoijtink \& Gu, unpublished)).** ## Power calculation with `bayes_power()` `bayes_power(n, h1, h2, m1, m2, sd1=1, sd2=1, scale = 1000, bound1 = 1, bound2 = 1/bound1, datasets = 1000, nsamp = 1000, seed = 31)` ### Arguments ` n` A number. The sample size for which the error probabilities must be computed. ` h1` A constraint matrix defining H1, see below for more details. ` h2` A constraint matrix defining H2, or a character `'u'` or `'c'` for the unconstrained or complement hypothesis. `m1` A vector of expected population means under H1 (standardized), see below for more details. ` m2` A vector of expected populations means under H2 (standardized). ` m2` must be of same length as ` m1`. ` sd1` A vector of standard deviations under H1. Must be a single number (equal standard deviation under all populations), or a vector of the same length as `m1` ` sd2` A vector of standard deviations under H2. Must be a single number (equal standard deviation under all populations), or a vector of the same length as `m2` ` scale` A number or use the default `1000` to set the prior scale. ` bound1` A number. The boundary above which BF12 favors H1, see below for more details. ` bound2` A number. The boundary below which BF12 favors H2. ` datasets` A number. The number of datasets to simulate to compute the error probabilities ` nsamp` A number. The number of prior or posterior samples to determine the complexity or fit. ` seed` A number. The random seed to be set. ### Details #### Specifying hypotheses Hypotheses are defined by means of a constraint matrix, that specifies the ordered constraints between the means $\boldsymbol\mu$ using a constraint matrix $R$, such that $R \boldsymbol{\mu} > \bf{0}$, where $R$ is a matrix with $J$ columns and $K$ rows, where $J$ is the number of group means and $K$ is the number of constraints between the means, $\boldsymbol\mu$ is a vector of $J$ means and $\bf{0}$ is a vector of $K$ zeros. The constraint matrix $R$ contains a set of linear inequality constraints. Consider ```{r} R <- matrix(c(1,-1,0,0,1,-1), nrow = 2, byrow = TRUE) mu <- c(.4, .2, 0) R mu R %*% mu (R %*% mu) > 0 ``` The matrix $R$ specifies that the sum of $1 \times \mu_1$ and $-1 \times \mu_2$ and $0 \times \mu_3$ is larger than $0$, **and** the sum of $0 \times \mu_1$ and $1 \times \mu_2$ and $-1 \times \mu_3$ is larger than $0$. This can also be written as: $\mu_1 > \mu_2 > \mu_3$. For more information about the specification of constraint matrices, see for example [@hoijtink12book]. The argument `h1` has to be a constraint matrix as specified above. The argument `h2` can be either a constraint matrix, or the character `'u'` or `'c'` if the goal is to compare $H_1$ with $H_u$, the unconstrained hypothesis, or $H_c$ the complement hypothesis. #### Specifying population means Hypothesized population means have to be defined under $H_1$ and $H_2$, also if $H_u$ or $H_c$ are considered as $H_2$. The group specific standard deviations can be set under `sd1` and `sd2`, by default, all group standard deviations are $1$. #### Prior scale The prior scale can be set using `scale`. By default, a scale of `1000` is used. This implies that the prior covariance matrix is proportional to the standard errors of the sampled data, by a factor of `1000`. #### Setting bounds `bound1` and `bound2` describe the boundary used for interpreting a Bayes factor. If `bound1 = 1`, all $BF_{12} > 1$ are considered to express evidence in favor of $H_1$, if `bound1 = 3`, all $BF_{12} > 3$ are considered to express evidence in favor of $H_1$. Similarly, `bound2` is the boundary *below* which $BF_{12}$ is considered to express evidence in favor of $H_2$. ### Examples #### Example 1. $H_1$ vs $H_c$ An example where three group means are ordered in $H_1: \mu_1 > \mu_2 > \mu_3$ which is compared to its complement. The power is determined for $n = 40$ ```{r, eval = FALSE} h1 <- matrix(c(1,-1,0,0,1,-1), nrow= 2, byrow= TRUE) h2 <- 'c' m1 <- c(.4,.2,0) m2 <- c(.2,0,.1) bayes_power(40, m1, m2, h1, h2) ``` #### Example 2. H1 vs H2 An example where four group means are ordered in $H_1: \mu_1 > \mu_2 > \mu_3 > \mu_4$ and in $H_2: \mu_3 > \mu_2 > \ mu_4 > \mu_1$. Only Bayes factors larger than $3$ are considered evidence in favor of $H_1$ and only Bayes factors smaller than $1/3$ are considered evidence in favor of $H_2$. ```{r, eval = FALSE} h1 <- matrix(c(1,-1,0,0,0,1,-1,0,0,0,1,-1), nrow= 3, byrow= TRUE) h2 <- matrix(c(0,-1,1,0,0,1,0,-1,-1,0,0,1), nrow = 3, byrow= TRUE) m1 <- c(.7,.3,.1,0) m2 <- c(0,.4,.5,.1) bayes_power(34, h1, h2, m1, m2, bound1 = 3, bound2 = 1/3) ``` ## Sample size determination with `bayes_sampsize()` ` bayes_sampsize(h1, h2, m1, m2, sd1 = 1, sd2 = 1, scale = 1000, type = 1, cutoff, bound1 = 1, bound2 = 1 / bound1, datasets = 1000, nsamp = 1000, minss = 2, maxss = 1000, seed = 31) ` ### Arguments The arguments are the same as for `bayes_power()` with the addition of: `type`A character. The type of error to be controlled. The options are: `"1", "2", "de", "aoi", "med.1", "med.2"`. See below for more details. `cutoff` A number. The cutoff criterion for type. If `type` is `"1", "2", "de", "aoi"`, `cutoff` must be between $0$ and $1$. If `type` is `"med.1"` or `"med.2"`, `cutoff` must be larger than $1$. See below for more details. `minss` A number. The minimum sample size. `maxss` A number. The maximum sample size. ### Details `bayes_sampsize()` iteratively uses `bayes_power()` to determine the error probabilities for a sample size, evaluates whether the chosen error is below the cutoff, and adjusts the sample size. #### `type` [@klaassenPIH] describes in detail the different types of controlling error probabilities that can be considered. Specifying `"1"` or `"2"` indicates that the Type 1 or Type 2 error probability has to be controlled, respectively the probability of concluding $H_2$ is the best hypothesis when $H_1$ is true or concluding that $H_1$ is the best hypothesis when $H_2$ is true. Note that when $H_1$ or $H_2$ is considered the best hypothesis depends on the values chosen for `bound1` and `bound2`. Specifying `"de"` or `"aoi" ` indicates that the Decision error probability (average of Type 1 and Type 2) or the probability of Indecision has to be controlled. Finally, specifying `" med.1"` or `"med.2"` indicates the minimum desired median $BF_{12}$ when $H_1$ is true, or the minimum desired median $BF_{21}$ when $H_2$ is true. ### Examples #### Example 1. $H_1$ versus $H_c$, controlling decision error ```{r, eval = FALSE} h1 <- matrix(c(1, -1, 0, 0, 1, -1), nrow= 2, byrow= TRUE) h2 <- 'c' m1 <- c(.4, .2, 0) m2 <- c(.2, 0, .1) bayes_sampsize(h1, h2, m1, m2, type = "de", cutoff = .125) ``` #### Example 2. H1 versus H2, controlling indecision error ```{r, eval = FALSE} h1 <- matrix(c(1, -1, 0, 0, 0, 1, -1, 0, 0, 0, 1, -1), nrow= 3, byrow= TRUE) h2 <- matrix(c(0, -1, 1, 0, 0, 1, 0, -1, -1, 0, 0, 1), nrow = 3, byrow= TRUE) m1 <- c(.7, .3, .1, 0) m2 <- c(0, .4, .5, .1) bayes_sampsize(h1, h2, m1, m2, type = "aoi", cutoff = .2, minss = 2, maxss = 500) ``` #### Example 3. $H_1$ versus $H_u$, controlling median Bayes factor ```{r, eval = FALSE} h1 <- matrix(c(1, -1, 0, 0, 0, 1, -1, 0, 0, 0, 1, -1), nrow= 3, byrow= TRUE) h2 <- 'u' m1 <- c(.3, .2, 0) m2 <- c(0, 0, 0) bayes_sampsize(h1, h2, m1, m2, type = "med.1", cutoff = 3, minss = 2, maxss = 500) ``` ## References Hoijtink, H. (2012). *Informative hypotheses. Theory and practice for behavioral and social scientists.* Boca Raton: Chapman Hall/CRC. Klaassen, F., Hoijtink, H., Gu, X. (unpublished). *The power of informative hypotheses.* Pre-print available at <a href = "https://doi.org/10.17605/OSF.IO/D9EAJ">https://doi.org/10.17605/OSF.IO/D9EAJ</a>
/scratch/gouwar.j/cran-all/cranData/BayesianPower/vignettes/bayesianpower.Rmd
#' @keywords internal "_PACKAGE" ## usethis namespace: start ## usethis namespace: end NULL
/scratch/gouwar.j/cran-all/cranData/BayesianReasoning/R/BayesianReasoning-package.R
#' Plot PPV values for a diagnostic and a screening group #' #' Plot PPV associated to different levels of FP and a specific Sensitivity, for two different Prevalence groups. #' #' @param max_FP False positive rate (1-Specificity) [0-100]. #' @param Sensitivity Sensitivity of the test [0-100]. #' @param prevalence_screening_group Prevalence of the screening group, 1 out of x [1-Inf]. #' @param prevalence_diagnostic_group Prevalence of the diagnostic group, 1 out of x [1-Inf]. #' @param folder Where to save the plot (the filename would be automatically created using the plot parameters) #' @param labels_prevalence Labels to use for both groups. #' #' @return Shows a plot or, if given a folder argument, saves a .png version of the plot #' @export #' @importFrom reshape2 melt #' @importFrom ggplot2 ggplot aes geom_line scale_colour_hue theme_minimal theme element_text scale_x_continuous scale_y_continuous labs #' @importFrom tidyr gather #' @importFrom magrittr %>% #' @importFrom dplyr mutate rename #' @importFrom tibble as_tibble #' #' @examples #' #' # Example 1 #' PPV_diagnostic_vs_screening( #' max_FP = 10, Sensitivity = 100, #' prevalence_screening_group = 1500, #' prevalence_diagnostic_group = 3 #' ) #' #' # Example 2. QWith custom labels #' PPV_diagnostic_vs_screening( #' max_FP = 10, Sensitivity = 100, #' prevalence_screening_group = 1667, #' prevalence_diagnostic_group = 44, #' labels_prevalence = c("20 y.o.", "50 y.o.") #' ) PPV_diagnostic_vs_screening <- function(max_FP = 10, Sensitivity = 100, prevalence_screening_group = 100, prevalence_diagnostic_group = 2, labels_prevalence = c("Screening", "Diagnostic"), # save_plot = FALSE, folder = "") { # FIXED parameters -------------------------------------------------------- min_Prevalence <- 1 # FP Steps_FP <- 100 Step_size_FP <- max_FP / Steps_FP min_FP <- 0 # Step_size_FP #0 FP <- seq(min_FP, max_FP, Step_size_FP) # Calculate PPVs ---------------------------------------------------------- Real_Prevalence_PPV <- list() Real_Prevalence_PPV <- ((Sensitivity * min_Prevalence) / ((Sensitivity * min_Prevalence) + ((prevalence_screening_group - 1) * FP))) * 100 Study_Prevalence_PPV <- list() Study_Prevalence_PPV <- ((Sensitivity * min_Prevalence) / ((Sensitivity * min_Prevalence) + ((prevalence_diagnostic_group - 1) * FP))) * 100 # Build DF ---------------------------------------------------------------- FINAL <- FP %>% as_tibble() %>% mutate( Real_Prevalence = Real_Prevalence_PPV, Study_Prevalence = Study_Prevalence_PPV ) %>% rename(FP = value) %>% gather(prevalence, PPV, 2:3) %>% mutate(prevalence = as.factor(prevalence)) # Plot -------------------------------------------------------------------- Labels_plot <- c(paste0(labels_prevalence[1], " prevalence: 1 out of ", prevalence_screening_group), paste0(labels_prevalence[2], " prevalence: 1 out of ", prevalence_diagnostic_group)) p <- ggplot(data = FINAL, aes(x = FP, y = PPV, colour = prevalence)) + geom_line(linewidth = 1.5) + scale_colour_hue(l = 50, labels = Labels_plot) + theme_minimal() + theme(text = ggplot2::element_text(size = 20)) + scale_x_continuous(labels = function(x) paste0(x, "%")) + scale_y_continuous(name = "Positive Predictive Value", limits = c(0, 100), labels = function(x) paste0(x, "%")) + theme(legend.position = "bottom") + labs( title = "", subtitle = paste0("Sensitivity = ", Sensitivity, "%"), x = "False Positive rate", color = "" ) if (folder != "") { print(p) plot_name <- paste0(folder, "/FP_", max_FP, "_sens_", Sensitivity, "_screening_", prevalence_screening_group, "_diagnostic_", prevalence_diagnostic_group, ".png") ggsave(plot_name, p, dpi = 300, width = 14, height = 10) message("\n Plot created in: ", plot_name, "\n") } else { print(p) } }
/scratch/gouwar.j/cran-all/cranData/BayesianReasoning/R/PPV_diagnostic_vs_screening.R
#' Plot PPV and NPV heatmaps #' #' Plot heatmaps showing the PPV for a given Sensitivity and a range of Prevalences and False Positive values or NPV values for a given Specificity and a range of Prevalences and True Positive values #' #' @param min_Prevalence [x] out of y prevalence of disease: [1-Inf] #' @param max_Prevalence x out of [y] prevalence of disease: [1-Inf] #' @param Sensitivity Sensitivity of test: [0-100] #' @param Specificity Specificity of test: [0-100] #' @param limits_Sensitivity c(min Sensitivity, max Sensitivity) #' @param limits_Specificity c(min Specificity, max Specificity) #' @param overlay Type of overlay: ["line", "area"] #' @param overlay_labels Labels for each point in the overlay. For example: c("80", "70", "60", "50", "40", "30", "20 y.o.") #' @param overlay_extra_info show extra info in overlay? [TRUE/FALSE] #' @param overlay_position_FP FP value (position in the x-axis) for each point in the overlay. For example: c(7, 8, 9, 12, 14, 14) #' @param overlay_position_FN FN value (position in the x-axis) for each point in the overlay. For example: c(7, 8, 9, 12, 14, 14) #' @param uncertainty_prevalence How much certainty we have about the prevalence ["high"/"low"] #' @param overlay_prevalence_1 Prevalence value (position in the y-axis) for each point in the overlay. For example: c(1, 1, 1, 2, 1, 1) #' @param overlay_prevalence_2 Prevalence value (position in the y-axis) for each point in the overlay. For example: c(26, 29, 44, 69, 227, 1667) #' @param label_title Title for the plot #' @param label_subtitle Subtitle for the plot #' @param Language Language for the plot labels: ["sp", "en"] #' @param PPV_NPV Should show PPV or NPV ["PPV", "NPV"] #' @param DEBUG Shows debug warnings [TRUE/FALSE] #' @param folder Where to save the plot (the filename would be automatically created using the plot parameters) #' @param one_out_of Show y scale as 1 out of x [TRUE, FALSE] FALSE by default #' @param steps_matrix width of PPV/NPV matrix. 100 by default #' @param ... Other parameters. Now used to pass dpi, height and width in the Show and Save plot section #' #' @return Shows a plot or, if given a folder argument, saves a .png version of the plot #' @export #' @importFrom ggplot2 ggplot aes element_text geom_tile scale_x_continuous scale_y_continuous scale_fill_gradientn labs margin annotate ggsave #' @importFrom reshape2 melt #' @importFrom dplyr mutate filter pull #' @importFrom magrittr %>% #' #' @examples #' PPV_heatmap( #' min_Prevalence = 1, #' max_Prevalence = 1000, #' Sensitivity = 100, #' Specificity = 98, #' Language = "en" #' ) PPV_heatmap <- function(min_Prevalence = 1, max_Prevalence = 1000, Sensitivity = NULL, Specificity = NULL, limits_Sensitivity = NULL, limits_Specificity = NULL, one_out_of = FALSE, overlay = "no", overlay_labels = "", overlay_extra_info = FALSE, overlay_position_FP = NULL, overlay_position_FN = NULL, overlay_prevalence_1 = NULL, overlay_prevalence_2 = NULL, uncertainty_prevalence = "high", label_title = "", label_subtitle = "", Language = "en", folder = "", PPV_NPV = "PPV", steps_matrix = 100, DEBUG = FALSE, ...) { # Process variables ------------------------------------------------------- # Get ... vars dots <- list(...) # CHECKS variables and sets defaults main_variables <- process_variables( min_Prevalence = min_Prevalence, max_Prevalence = max_Prevalence, Sensitivity = Sensitivity, Specificity = Specificity, limits_Sensitivity = limits_Sensitivity, limits_Specificity = limits_Specificity, overlay_labels = overlay_labels, overlay_position_FP = overlay_position_FP, overlay_position_FN = overlay_position_FN, overlay_prevalence_1 = overlay_prevalence_1, overlay_prevalence_2 = overlay_prevalence_2, PPV_NPV = PPV_NPV, one_out_of = one_out_of, overlay = overlay, steps_matrix = steps_matrix ) if (DEBUG == TRUE) { message("\nDEBUG: ", "min_Sensitivity: ", main_variables$min_Sensitivity, " max_FN: ", main_variables$max_FN, " | max_Sensitivity: ", main_variables$max_Sensitivity, " min_FN: ", main_variables$min_FN) message("DEBUG: ", "min_Specificity: ", main_variables$min_Specificity, " max_FP: ", main_variables$max_FP, " | max_Specificity: ", main_variables$max_Specificity, " min_FP: ", main_variables$min_FP) } # System parameters ------------------------------------------------------- if (overlay != "no") { overlay_tag <- paste0("_", overlay) } else { overlay_tag <- "" } if (overlay_extra_info == TRUE) { overlay_extra_info_tag <- paste0(overlay_extra_info, "_") } else { overlay_extra_info_tag <- "" } # Create PPV matrix ------------------------------------------------------- PPV_melted <- .createPPVmatrix( min_Prevalence = main_variables$min_Prevalence, max_Prevalence = main_variables$max_Prevalence, Sensitivity = main_variables$Sensitivity, Specificity = main_variables$Specificity, min_FP = main_variables$min_FP, max_FP = main_variables$max_FP, max_FN = main_variables$max_FN, min_FN = main_variables$min_FN, one_out_of = one_out_of, PPV_NPV = PPV_NPV ) # Plot -------------------------------------------------------------------- # Create plot labels in Language translated_labels <- .translate_labels( Language = Language, Sensitivity = main_variables$Sensitivity, Specificity = main_variables$Specificity, PPV_NPV = PPV_NPV ) # Number of decimals depends on the range decimals <- .number_decimals_plot_axis( PPV_NPV = PPV_NPV, min_FP = main_variables$min_FP, max_FP = main_variables$max_FP, min_FN = main_variables$min_FN, max_FN = main_variables$max_FN, min_Prevalence = main_variables$min_Prevalence, max_Prevalence = main_variables$max_Prevalence ) # Choose function depending on the type of overlay if (overlay == "line") { p <- .plot_overlay_line( PPV_melted = PPV_melted, uncertainty_prevalence = uncertainty_prevalence, min_Prevalence = main_variables$min_Prevalence, max_Prevalence = main_variables$max_Prevalence, min_FP = main_variables$min_FP, max_FP = main_variables$max_FP, max_FN = main_variables$max_FN, min_FN = main_variables$min_FN, one_out_of = one_out_of, overlay_prevalence_1 = main_variables$overlay_prevalence_1, overlay_prevalence_2 = main_variables$overlay_prevalence_2, overlay_position_FP = overlay_position_FP, overlay_position_FN = overlay_position_FN, overlay_labels = overlay_labels, decimals_x = decimals$decimals_x, decimals_y = decimals$decimals_y, label_title = label_title, label_subtitle = label_subtitle, translated_labels = translated_labels, PPV_NPV = PPV_NPV ) } else if (overlay == "area") { p <- .plot_overlay_area( PPV_NPV = PPV_NPV, one_out_of = one_out_of, min_Prevalence = main_variables$min_Prevalence, max_Prevalence = main_variables$max_Prevalence, min_FP = main_variables$min_FP, max_FP = main_variables$max_FP, max_FN = main_variables$max_FN, min_FN = main_variables$min_FN, PPV_melted = PPV_melted, steps_matrix = steps_matrix, decimals_x = decimals$decimals_x, decimals_y = decimals$decimals_y, label_title = label_title, label_subtitle = label_subtitle, translated_labels = translated_labels, # Overlay area specific parameters Language = Language, Sensitivity = main_variables$Sensitivity, Specificity = main_variables$Specificity, uncertainty_prevalence = uncertainty_prevalence, overlay_prevalence_1 = main_variables$overlay_prevalence_1, overlay_prevalence_2 = main_variables$overlay_prevalence_2, overlay_position_FP = overlay_position_FP, overlay_position_FN = overlay_position_FN, overlay_labels = overlay_labels, overlay_extra_info = overlay_extra_info, # Ellipsis DEBUG = DEBUG ) } else { p <- .plot_creation( PPV_melted = PPV_melted, min_Prevalence = main_variables$min_Prevalence, max_Prevalence = main_variables$max_Prevalence, min_FP = main_variables$min_FP, max_FP = main_variables$max_FP, max_FN = main_variables$max_FN, min_FN = main_variables$min_FN, one_out_of = one_out_of, decimals_x = decimals$decimals_x, decimals_y = decimals$decimals_y, label_title = label_title, label_subtitle = label_subtitle, translated_labels = translated_labels, PPV_NPV = PPV_NPV ) } # Show and Save plot ----------------------------------------------------- if (folder != "") { if (is.null(dots$dpi)) dots$dpi <- 150 if (is.null(dots$width)) dots$width <- 14 if (is.null(dots$height)) dots$height <- 10 # PPV/NPV defines what we use for filename if (PPV_NPV == "PPV") { Sensitivity_Specificity_tag <- main_variables$Sensitivity range_tag <- paste(c(main_variables$min_FP, main_variables$max_FP), collapse = "_") } else if (PPV_NPV == "NPV") { Sensitivity_Specificity_tag <- main_variables$Specificity range_tag <- paste(c(main_variables$min_FN, main_variables$max_FN), collapse = "_") } # Name and save plot_name <- paste0(folder, "/", PPV_NPV, "_", main_variables$min_Prevalence, "_", main_variables$max_Prevalence, "_", Sensitivity_Specificity_tag, "_", range_tag, overlay_tag, "_", overlay_extra_info_tag, Language, ".png") ggsave(plot_name, p, dpi = dots$dpi, width = dots$width, height = dots$height) message("\n Plot created in: ", plot_name, "\n") } print(p) }
/scratch/gouwar.j/cran-all/cranData/BayesianReasoning/R/PPV_heatmap.R
# https://community.rstudio.com/t/how-to-solve-no-visible-binding-for-global-variable-note/28887 # https://www.r-bloggers.com/2019/08/no-visible-binding-for-global-variable/ utils::globalVariables(c("prevalence_1", "prevalence_2", "prevalence_pct", "FP", "NPV", "PPV", "Prevalence", "FN", "value", "prevalence", "sensitivity", "specificity", "N", "classification", "fill_str", "test_result", "type"))
/scratch/gouwar.j/cran-all/cranData/BayesianReasoning/R/globals.R
#' process_variables #' Checks and process main variables, checks for errors, creates defaults #' #' @param min_Prevalence [x] out of y prevalence of disease: [1-Inf] #' @param max_Prevalence x out of [y] prevalence of disease: [1-Inf] #' @param Sensitivity Sensitivity of the test: [0-100] #' @param Specificity Specificity of the test: [0-100] #' @param limits_Sensitivity c(min Sensitivity, max Sensitivity) #' @param limits_Specificity c(min Specificity, max Specificity) #' @param overlay_labels vector with labels for each overlay point #' @param overlay_position_FP FP value (position in the x-axis) for each point in the overlay. For example: c(7, 8, 9, 12, 14, 14) #' @param overlay_position_FN FN value (position in the x-axis) for each point in the overlay. For example: c(7, 8, 9, 12, 14, 14) #' @param overlay_prevalence_1 Prevalence value (position in the y-axis) for each point in the overlay. For example: c(1, 1, 1, 2, 1, 1) #' @param overlay_prevalence_2 Prevalence value (position in the y-axis) for each point in the overlay. For example: c(26, 29, 44, 69, 227, 1667) #' @param PPV_NPV Should show PPV or NPV ["PPV", "NPV"] #' @param one_out_of Show y scale as 1 out of x [TRUE, FALSE] FALSE by default #' @param steps_matrix width of PPV/NPV matrix. 100 by default #' @param overlay Type of overlay: ["line", "area"] #' #' @noRd #' @importFrom stats var process_variables <- function(min_Prevalence = NULL, max_Prevalence = NULL, Sensitivity = NULL, Specificity = NULL, limits_Sensitivity = NULL, limits_Specificity = NULL, overlay_labels = NULL, overlay_position_FP = NULL, overlay_position_FN = NULL, overlay_prevalence_1 = NULL, overlay_prevalence_2 = NULL, PPV_NPV = "PPV", one_out_of = NULL, overlay = "", steps_matrix = 100) { # DEFAULTS # By default we show a range of Specificities or Sensitivities of 10% (+-5%) default_plus_minus <- 5 # CHECK variables --------------------------------------------------------- if (PPV_NPV == "PPV") { # Sensitivity if (is.null(Sensitivity)) stop("\n* Sensitivity is needed in PPV_NPV == 'PPV'") if (!is.null(limits_Specificity)) { if (length(limits_Specificity) != 2) stop("\n* limits_Specificity should be a vector of length 2, now is (", paste(limits_Specificity, collapse = ", "), "). e.g.: limits_Specificity = c(90, 95)") if (limits_Specificity[1] < 0 | limits_Specificity[2] > 100) stop("\n* limits_Specificity should be between 0 and 100, now are (", paste(limits_Specificity, collapse = ", "), "). e.g.: limits_Specificity = c(90, 95)") } # Specificity if (is.null(overlay_position_FP) & !is.null(Specificity) & !is.null(limits_Specificity)) { if (var(limits_Specificity) == 0) stop("\n* limits_Specificity need two different numbers: limits_Specificity = c(min, max)") Specificity <- mean(limits_Specificity) # Only if we don't have overlay_position_FP warning("* Both Specificity (", Specificity, ") and limits_Specificity (", paste(limits_Specificity, collapse = ", "), ") have values. Ignoring Specificity and using limits_Specificity") default_plus_minus <- limits_Specificity[2] - Specificity } else if (!is.null(overlay_position_FP) & !is.null(Specificity) & !is.null(limits_Specificity)) { # If we have overlay_position_FP, that IS the Sensitivity (will be needed for the extra info in overlay) Specificity <- 100 - overlay_position_FP warning("* overlay_position_FP, Specificity (", Specificity, ") and limits_Specificity (", paste(limits_Specificity, collapse = ", "), ") have values. Using overlay_position_FP as Sensitivity and using limits_Sensitivity") } else if (is.null(Specificity) & is.null(limits_Specificity)) { Specificity <- 95 warning("* Specificity and limits_Specificity are NULL. Setting Specificity = ", Specificity, " and limits_Specificity = c(", Specificity - default_plus_minus, ", ", Specificity + default_plus_minus, ")") } else if (is.null(Specificity) & !is.null(limits_Specificity)) { if (var(limits_Specificity) == 0) stop("\n* limits_Specificity need two different numbers: limits_Specificity = c(min, max)") Specificity <- mean(limits_Specificity) default_plus_minus <- limits_Specificity[2] - Specificity } else if (!is.null(Specificity) & is.null(limits_Specificity)) { warning("* limits_Specificity is NULL. Setting limits_Specificity = c(", Specificity - default_plus_minus, ", ", Specificity + default_plus_minus, ")") } # If we have overlay_position_FP, that IS the Specificity (will be needed for the extra info in overlay) if (!is.null(overlay_position_FP)) Specificity <- 100 - overlay_position_FP # If after the typical processing is null, assign dummy values (will be overwritten latter) if (is.null(limits_Specificity)) limits_Specificity <- c(0, 0) # Set final limits_Specificity if (!is.null(overlay_position_FP) & !is.null(limits_Specificity)) { # message("Using limits_Specificity as is") } else { # By default we show a range of Specificity of 10% (+-5%) if (Specificity + default_plus_minus <= 100) limits_Specificity[2] <- c(Specificity + default_plus_minus) if (Specificity - default_plus_minus >= 0) limits_Specificity[1] <- c(Specificity - default_plus_minus) } if (overlay == "area" | overlay == "line") { if (is.null(overlay_position_FP)) stop("\n* overlay_position_FP needs a value") if (!is.null(overlay_position_FN)) warning("\n* overlay_position_FN should only be used for NPV plots") } } else if (PPV_NPV == "NPV") { # Specificity if (is.null(Specificity)) stop("\n* Specificity is needed in PPV_NPV == 'NPV'") if (!is.null(limits_Sensitivity)) { if (length(limits_Sensitivity) != 2) stop("\n* limits_Sensitivity should be a vector of length 2, now is (", paste(limits_Sensitivity, collapse = ", "), "). e.g.: limits_Sensitivity = c(90, 95)") if (limits_Sensitivity[1] < 0 | limits_Sensitivity[2] > 100) stop("\n* limits_Sensitivity should be between 0 and 100, now are (", paste(limits_Sensitivity, collapse = ", "), "). e.g.: limits_Sensitivity = c(90, 95)") } # Sensitivity if (is.null(overlay_position_FN) & !is.null(Sensitivity) & !is.null(limits_Sensitivity)) { if (stats::var(limits_Sensitivity) == 0) stop("\n* limits_Sensitivity need two different numbers: limits_Sensitivity = c(min, max)") Sensitivity <- mean(limits_Sensitivity) # Only if we don't have overlay_position_FN warning("* Both Sensitivity (", Sensitivity, ") and limits_Sensitivity (", paste(limits_Sensitivity, collapse = ", "), ") have values. Ignoring Sensitivity and using limits_Sensitivity") default_plus_minus <- limits_Sensitivity[2] - Sensitivity } else if (!is.null(overlay_position_FN) & !is.null(Sensitivity) & !is.null(limits_Sensitivity)) { # If we have overlay_position_FN, that IS the Sensitivity (will be needed for the extra info in overlay) Sensitivity <- 100 - overlay_position_FN warning("* overlay_position_FN, Sensitivity (", Sensitivity, ") and limits_Sensitivity (", paste(limits_Sensitivity, collapse = ", "), ") have values. Using overlay_position_FN as Sensitivity and using limits_Sensitivity") } else if (is.null(Sensitivity) & is.null(limits_Sensitivity)) { Sensitivity <- 95 warning("* Sensitivity and limits_Specificity are NULL. Setting Sensitivity = ", Sensitivity, " and limits_Sensitivity = c(", Sensitivity - default_plus_minus, ", ", Sensitivity + default_plus_minus, ")") } else if (is.null(Sensitivity) & !is.null(limits_Sensitivity)) { if (stats::var(limits_Sensitivity) == 0) stop("\n* limits_Sensitivity need two different numbers: limits_Sensitivity = c(min, max)") Sensitivity <- mean(limits_Sensitivity) default_plus_minus <- limits_Sensitivity[2] - Sensitivity } else if (!is.null(Sensitivity) & is.null(limits_Sensitivity)) { warning("* limits_Sensitivity is NULL. Setting limits_Sensitivity = c(", Sensitivity - default_plus_minus, ", ", Sensitivity + default_plus_minus, ")") } # If we have overlay_position_FN, that IS the Sensitivity (will be needed for the extra info in overlay) if (!is.null(overlay_position_FN)) Sensitivity <- 100 - overlay_position_FN # limits_Sensitivity if (is.null(limits_Sensitivity)) limits_Sensitivity <- c(0, 0) # Set final limits_Sensitivity if (!is.null(overlay_position_FN) & !is.null(limits_Sensitivity)) { # message("Using limits_Sensitivity as is") } else { # By default we show a range of Sensitivities of 10% (+-5%) if (Sensitivity + default_plus_minus <= 100) limits_Sensitivity[2] <- c(Sensitivity + default_plus_minus) if (Sensitivity - default_plus_minus >= 0) limits_Sensitivity[1] <- c(Sensitivity - default_plus_minus) } if (overlay == "area" | overlay == "line") { if (is.null(overlay_position_FN)) stop("\n* overlay_position_FN needs a value") if (!is.null(overlay_position_FP)) warning("\n* overlay_position_FP should only be used for PPV plots") } } # Translate --------------------------------------------------------------- # Translate limits max_Sensitivity <- limits_Sensitivity[2] min_Sensitivity <- limits_Sensitivity[1] max_Specificity <- limits_Specificity[2] min_Specificity <- limits_Specificity[1] # Translate to FN and FP max_FN <- (100 - min_Sensitivity) min_FN <- (100 - max_Sensitivity) max_FP <- (100 - min_Specificity) min_FP <- (100 - max_Specificity) # Check dimensions ----------------------------------------------------------- # CHECKS if (min_Prevalence < 1) { warning("\n[WARNING]: min_Prevalence (", min_Prevalence, ") is < 1. \n[EXPECTED]: min_Prevalence should be an integer > 0.\n[CHANGED]: min_Prevalence = 1") min_Prevalence <- 1 } if (min_Prevalence > max_Prevalence) { warning("\n[WARNING]: min_Prevalence (", min_Prevalence, ") is > than max_Prevalence (", max_Prevalence, ").\n[EXPECTED]: min_Prevalence should be smaller than max_Prevalence.\n[CHANGED]: min_Prevalence = max_Prevalence/2") min_Prevalence <- max_Prevalence / 2 } else if (min_Prevalence == max_Prevalence) { warning("\n[WARNING]: min_Prevalence (", min_Prevalence, ") is == max_Prevalence (", max_Prevalence, ").\n[EXPECTED]: min_Prevalence should be smaller than max_Prevalence.\n[CHANGED]: max_Prevalence = min_Prevalence * 2") max_Prevalence <- min_Prevalence * 2 } # If the dimensions of the overlay are bigger, adjust max_FP and max_Prevalence if (overlay == "area" | overlay == "line") { if (is.null(overlay_prevalence_1) & is.null(overlay_prevalence_2)) stop("* Need a prevalence for the overlay. Use the overlay_prevalence_1 and overlay_prevalence_2 parameters (overlay_prevalence_1 out of overlay_prevalence_2)") if (is.null(overlay_prevalence_1)) stop("* Missing the overlay_prevalence_1 parameter for the overlay's prevalence (overlay_prevalence_1 out of overlay_prevalence_2)") if (is.null(overlay_prevalence_2)) stop("* Missing the overlay_prevalence_2 parameter for the overlay's prevalence (overlay_prevalence_1 out of overlay_prevalence_2)") if (overlay == "area" & length(overlay_prevalence_1) > 1) stop("* overlay_prevalence_1 has > 1 value. Not allowed in overlay = 'area'. Did you meant overlay = 'line'? ") # General case. Then we have specific for PPV and NPV below if (overlay == "line" & (length(overlay_prevalence_1) != length(overlay_prevalence_2))) stop("* overlay_prevalence_1 and overlay_prevalence_2 need to have the same number of values. Now they have ", length(overlay_prevalence_1), " and ", length(overlay_prevalence_2)) if (overlay == "line" & !is.null(overlay_labels) & length(overlay_labels) != 1) { if (length(overlay_prevalence_1) != length(overlay_labels)) stop("* overlay_labels needs 0, 1 or the same number of values that overlay_prevalence_1 (", length(overlay_prevalence_1), "). Now it has ", length(overlay_labels)) } # CHECK overlay_prevalence_1/overlay_prevalence_2 fits into min_Prevalence/max_Prevalence if (any(min_Prevalence / max_Prevalence > overlay_prevalence_1 / overlay_prevalence_2)) { index_issue <- which(min_Prevalence / max_Prevalence > overlay_prevalence_1 / overlay_prevalence_2) warning("\n[WARNING]: min_Prevalence/max_Prevalence > overlay_prevalence_1/overlay_prevalence_2\n[EXPECTED]: min_Prevalence/max_Prevalence should be <= overlay_prevalence_1/overlay_prevalence_2") if (max_Prevalence == overlay_prevalence_2[index_issue] & min_Prevalence != overlay_prevalence_1[index_issue]) { warning("\n[WARNING]: max_Prevalence == overlay_prevalence_2\n[CHANGED]: Changing min_Prevalence = overlay_prevalence_1") min_Prevalence <- overlay_prevalence_1[index_issue] } else if (min_Prevalence == overlay_prevalence_1[index_issue] & max_Prevalence != overlay_prevalence_2[index_issue]) { warning("\n[WARNING]: min_Prevalence == overlay_prevalence_1\n[CHANGED]: Changing max_Prevalence = overlay_prevalence_2") max_Prevalence <- overlay_prevalence_2[index_issue] } else { warning("\n[WARNING]: min_Prevalence != overlay_prevalence_1\n\t max_Prevalence != overlay_prevalence_2\n[CHANGED]: Changing max_Prevalence = overlay_prevalence_2 & min_Prevalence = overlay_prevalence_1") min_Prevalence <- overlay_prevalence_1[index_issue] max_Prevalence <- overlay_prevalence_2[index_issue] } } if (PPV_NPV == "PPV") { if (overlay == "area" & length(overlay_position_FP) > 1) stop("* overlay_position_FP has > 1 value. Not allowed in overlay = 'area'. Did you meant overlay = 'line'? ") if (overlay == "line" & (length(overlay_prevalence_1) != length(overlay_position_FP))) stop("* overlay_position_FP and overlay_prevalence_1, ... need to have the same number of values. Now they have ", length(overlay_position_FP), " and ", length(overlay_prevalence_1)) if (overlay == "area") { if (exists("overlay_position_FP")) { if (overlay_position_FP > max_FP) { warning("\n[WARNING]: overlay_position_FP (", overlay_position_FP, ") is > than max_FP (", max_FP, ").\n[EXPECTED]: overlay_position_FP should be smaller than max_FP\n[CHANGED]: max_FP = overlay_position_FP") max_FP <- overlay_position_FP } } } if (exists("overlay_position_FP")) { if (min(overlay_position_FP) < min_FP) { warning("\n[WARNING]: overlay_position_FP (", min(overlay_position_FP), ") is < min_FP (", min_FP, ").\n[EXPECTED]: overlay_position_FP should be >= min_FP.\n[CHANGED]: min_FP = 0") min_FP <- 0 } # if (max(overlay_position_FP) > min_FP) { # warning("\n[WARNING]: overlay_position_FP (", max(overlay_position_FP) , ") is > min_FP (", min_FP, ").\n[EXPECTED]: overlay_position_FP should be <= max_FP.\n[CHANGED]: max_FP = overlay_position_FP + 10%") # max_FP = max(overlay_position_FP) + (max(overlay_position_FP) * .1) # } } } else if (PPV_NPV == "NPV") { if (overlay == "area" & length(overlay_position_FN) > 1) stop("* overlay_position_FN has > 1 value. Not allowed in overlay = 'area'. Did you meant overlay = 'line'? ") if (overlay == "line" & (length(overlay_prevalence_1) != length(overlay_position_FN))) stop("* overlay_position_FN and overlay_prevalence_1, ... need to have the same number of values. Now they have ", length(overlay_position_FN), " and ", length(overlay_prevalence_1)) if (exists("overlay_position_FN")) { if (max(overlay_position_FN) > max_FN) { warning("\n[WARNING]: overlay_position_FN (", max(overlay_position_FN), ") is > max_FN (", max_FN, ")\n[EXPECTED]: overlay_position_FN should be <= max_FN.\n[CHANGED]: max_FN = overlay_position_FN + 10%") max_FN <- max(overlay_position_FN) + (max(overlay_position_FN) * .1) } if (min(overlay_position_FN) < min_FN) { warning("\n[WARNING]: overlay_position_FN (", min(overlay_position_FN), ") is < min_FN (", min_FN, ")\n[EXPECTED]: overlay_position_FN should be <= min_FN.\n[CHANGED]: min_FN = 0") min_FN <- 0 } } } } # Only needed in "line" because in area we calculate the position of the individual point using prevalence_PCT if (overlay == "line") { if (any(overlay_prevalence_1 > min_Prevalence)) { ratio_x <- (overlay_prevalence_1 / min_Prevalence) warning("\n[WARNING]: Some of the overlay_prevalence_1 (", min(overlay_prevalence_1), ") are > min_Prevalence (", min_Prevalence, ").\n[EXPECTED]: overlay_prevalence_1 should be >= min_Prevalence.\n[CHANGED]: overlay_prevalence_1 and overlay_prevalence_2 to ", paste(overlay_prevalence_1 * ratio_x, collapse = ", "), " and ", paste(overlay_prevalence_2 * ratio_x, collapse = ", ")) overlay_prevalence_1 <- overlay_prevalence_1 / ratio_x overlay_prevalence_2 <- overlay_prevalence_2 / ratio_x } } # Check overlay prevalence ------------------------------------------------ if (length(overlay_prevalence_1) == 1) { if (overlay_prevalence_1 > overlay_prevalence_2) { warning("\n[WARNING]: overlay_prevalence_1 (", overlay_prevalence_1, ") is > than overlay_prevalence_2 (", overlay_prevalence_2, ").\n[EXPECTED]: overlay_prevalence_1 should be smaller than overlay_prevalence_2.\n[CHANGED]: overlay_prevalence_1 = overlay_prevalence_2/2") overlay_prevalence_1 <- overlay_prevalence_2 / 2 } } else if (length(overlay_prevalence_1) > 1) { # if (DEBUG == TRUE) message("> 1 overlay") } # If the overlay prevalence is very high and we have one_out_of = TRUE, sometimes the closest row in the PPV matrix is the first one, which distorts the NPV calculation if (one_out_of == TRUE & overlay == "area") { # & PPV_NPV == "NPV" overlay_P <- overlay_prevalence_1 / overlay_prevalence_2 prevalence_temp <- seq(min_Prevalence, max_Prevalence, length.out = steps_matrix + 1) # *prevalence_2* x out of [y] (min_Prevalence out of max_Prevalence) prevalence_P <- prevalence_temp[1] / prevalence_temp[2] if ((overlay_P - prevalence_P) > (1 - overlay_P)) { warning("\n[WARNING]: overlay_prevalence_1/overlay_prevalence_2 closer to 1 than to the first prevalence row\n[CHANGED]: Changing max_Prevalence = (overlay_prevalence_2-overlay_prevalence_1) * 3") max_Prevalence <- (overlay_prevalence_2 - overlay_prevalence_1) * 3 } } # General CHECKS if (!is.null(Sensitivity) & overlay != c("line")) { if (Sensitivity > 100 | Sensitivity < 0) stop("* Sensitivity should be a value 0-100") } if (!is.null(Specificity) & overlay != c("line")) { if (Specificity > 100 | Specificity < 0) stop("* Specificity should be a value 0-100") } # Output ------------------------------------------- main_variables <- list( min_Prevalence = min_Prevalence, max_Prevalence = max_Prevalence, Sensitivity = Sensitivity, Specificity = Specificity, max_Sensitivity = max_Sensitivity, min_Sensitivity = min_Sensitivity, max_Specificity = max_Specificity, min_Specificity = min_Specificity, overlay_prevalence_1 = overlay_prevalence_1, overlay_prevalence_2 = overlay_prevalence_2, # Redundant max_FN = max_FN, min_FN = min_FN, max_FP = max_FP, min_FP = min_FP ) return(main_variables) } #' .createPPVmatrix #' #' Helper function to create a PPV or NPV matrix #' #' @param min_Prevalence [x] out of y prevalence of disease: [1-Inf] #' @param max_Prevalence x out of [y] prevalence of disease: [1-Inf] #' @param Sensitivity Sensitivity of test: [0-100] #' @param Specificity Specificity of test: [0-100] #' @param steps_matrix width of PPV/NPV matrix. 100 by default #' @param one_out_of Show y scale as 1 out of x [TRUE, FALSE] FALSE by default #' @param min_FP Minimum False Positive rate: [0-100] #' @param max_FP Maximum False Positive rate: [0-100] #' @param min_FN Minimum False Negative rate: [0-100] #' @param max_FN Maximum False Negative rate: [0-100] #' @param PPV_NPV Should show PPV or NPV ["PPV", "NPV"] #' #' @noRd #' @return A DF called PPV_melted #' @importFrom reshape2 melt #' .createPPVmatrix <- function(min_Prevalence = 1, max_Prevalence = 1000, Sensitivity = 100, Specificity = 99, one_out_of = TRUE, PPV_NPV = "PPV", min_FP = 0, max_FP = 10, max_FN = 0, min_FN = 10, steps_matrix = 100) { # Parameters --- # Sensitivity range (False Negatives) if (PPV_NPV == "NPV") { range_FN <- (max_FN - min_FN) step_size_FN <- range_FN / steps_matrix FN_array <- seq(min_FN, max_FN, step_size_FN) # if(length(FN_array) == 1) FN_array = rep(FN_array, steps_matrix + 1) # CATCH FN = 0 } # Specificity range (False Positives) if (PPV_NPV == "PPV") { range_FP <- (max_FP - min_FP) step_size_FP <- range_FP / steps_matrix FP_array <- seq(min_FP, max_FP, step_size_FP) # if(length(FP_array) == 1) FP_array = rep(FP_array, steps_matrix + 1) # CATCH FP = 0 } # Prevalence if (one_out_of == FALSE) { prevalence_2 <- exp(seq(log(min_Prevalence), log(max_Prevalence), length.out = steps_matrix + 1)) # *prevalence_2* x out of [y] (min_Prevalence out of max_Prevalence) # prevalence_2 <- pracma::logseq(min_Prevalence, max_Prevalence, steps_matrix + 1) # GENERATES IDENTICAL SEQUENCE... } else { prevalence_2 <- seq(min_Prevalence, max_Prevalence, length.out = steps_matrix + 1) # *prevalence_2* x out of [y] (min_Prevalence out of max_Prevalence) } sick_array <- rep(min_Prevalence, steps_matrix + 1) healthy_array <- prevalence_2 - min_Prevalence # PPV if (PPV_NPV == "PPV") { sensitivity_array <- rep(Sensitivity / 100, steps_matrix + 1) # Sensitivity is fixed when calculating PPV specificity_array <- (100 - FP_array) / 100 # We calculate a 100x100 PPV matrix using %o% (outer) TRUE_positives <- (sick_array %o% sensitivity_array) FALSE_positives <- (healthy_array %o% (1 - specificity_array)) # PPV Calculation --- PPV <- round(TRUE_positives / (TRUE_positives + FALSE_positives), 2) # Label columns and rows of matrix colnames(PPV) <- FP_array rownames(PPV) <- prevalence_2 # Long format para ggplot Heatmap PPV_melted <- reshape2::melt(PPV) # Give names to variables names(PPV_melted) <- c("prevalence_2", "FP", "PPV") } # NPV if (PPV_NPV == "NPV") { sensitivity_array <- (100 - FN_array) / 100 specificity_array <- rep(Specificity / 100, steps_matrix + 1) # Specificity is fixed when calculating PPV # We calculate a 100x100 PPV matrix using %o% (outer) # The order of this %o% multiplications is critical (in NPV they have to be reversed) TRUE_negatives <- (healthy_array %o% specificity_array) FALSE_negatives <- (sick_array %o% (1 - sensitivity_array)) # NPV Calculation --- NPV <- round(TRUE_negatives / (TRUE_negatives + FALSE_negatives), 2) # Label columns and rows of matrix colnames(NPV) <- FN_array rownames(NPV) <- prevalence_2 # Long format para ggplot Heatmap PPV_melted <- reshape2::melt(NPV) # %>% dplyr::select(-"Var1") # Var1 is prevalence_2, which we have from PPV # Give names to variables names(PPV_melted) <- c("prevalence_2", "FN", "NPV") } # Final touches to DF PPV_melted <- PPV_melted %>% dplyr::mutate( prevalence_1 = min_Prevalence, sensitivity = rep(sensitivity_array, each = steps_matrix + 1), specificity = rep(specificity_array, each = steps_matrix + 1) ) %>% dplyr::select(prevalence_1, prevalence_2, sensitivity, specificity, dplyr::everything()) %>% dplyr::mutate( prevalence_pct = prevalence_1 / prevalence_2, PPV_calc = (prevalence_1 * sensitivity) / ((prevalence_1 * sensitivity) + ((prevalence_2 - prevalence_1) * (1 - specificity))) ) %>% dplyr::as_tibble() return(PPV_melted) } #' .get_point_ppv_npv #' #' Get PPV or NPV for the overlay #' #' @param PPV_melted DF out of .createPPVmatrix() #' @param PPV_NPV Should show PPV or NPV ["PPV", "NPV"] #' @param Language Language for the plot labels: ["sp", "en"] #' @param Sensitivity Sensitivity of test: [0-100] #' @param Specificity Specificity of test: [0-100] #' @param overlay_prevalence_1 [x] out of y prevalence of disease #' @param overlay_prevalence_2 x out of [y] prevalence of disease #' @param overlay_position_FP FP value (position in the x-axis) for each point in the overlay. For example: c(7, 8, 9, 12, 14, 14) #' @param overlay_position_FN FN value (position in the x-axis) for each point in the overlay. For example: c(7, 8, 9, 12, 14, 14) #' @param overlay_labels vector with labels for each overlay point #' @param overlay_extra_info show extra info in overlay? [TRUE/FALSE] #' @param decimals_x Number of decimals to show in x axis [0-2] #' @param decimals_y Number of decimals to show in x axis [0-2] #' @param translated_labels Translated labels for plot. Output of .translate_labels() #' #' @noRd .get_point_ppv_npv <- function(PPV_melted, PPV_NPV = "PPV", Language, Sensitivity, Specificity, overlay_prevalence_1, overlay_prevalence_2, overlay_labels, overlay_extra_info = FALSE, overlay_position_FP, overlay_position_FN, translated_labels, decimals_x, decimals_y) { # message("\n", PPV_NPV,": Sensitivity: ", Sensitivity, " Specificity: ", Specificity, " overlay_position_FP: ", overlay_position_FP, " overlay_position_FN: ", overlay_position_FN, "\n") # Common vars TRUE_positives <- (overlay_prevalence_1 * Sensitivity) / 100 healthy_n <- (overlay_prevalence_2 - overlay_prevalence_1) PCT_prevalence_overlay <- overlay_prevalence_1 / overlay_prevalence_2 decimals_overlay <- 2 # X The variable that defines axis position depends on PPV_NPV if (PPV_NPV == "PPV") { Specificity <- (100 - overlay_position_FP) } else if (PPV_NPV == "NPV") { Sensitivity <- (100 - overlay_position_FN) } # Process labels for area overlay sick_positive <- (overlay_prevalence_1 * Sensitivity) / 100 healthy_positive <- (healthy_n * (100 - Specificity)) / 100 # Make sure we use the proper plural when needed (e.g. in Spanish) if (Language == "sp" | Language == "es") { translated_labels$label_sick <- ifelse(sick_positive > 1, paste0(translated_labels$label_sick, "s"), translated_labels$label_sick) translated_labels$label_healthy <- ifelse(healthy_n > 1, paste0(translated_labels$label_healthy, "s"), translated_labels$label_healthy) } else { translated_labels$label_sick <- translated_labels$label_sick translated_labels$label_healthy <- translated_labels$label_healthy } if (overlay_extra_info == TRUE) { extra_info_overlay <- paste0( "\n ---------------------------------------------", "\n", overlay_prevalence_1, " ", translated_labels$label_sick, ": ", round(TRUE_positives, decimals_overlay), " (+) ", round(overlay_prevalence_1 - TRUE_positives, decimals_overlay), " (-)", "\n", healthy_n, " ", translated_labels$label_healthy, ": ", round((healthy_n) - ((healthy_n) * (100 - Specificity)) / 100, decimals_overlay), " (-) ", round(((healthy_n) * (100 - Specificity)) / 100, decimals_overlay), " (+) " ) } else { extra_info_overlay <- "" } # Get PPV or NPV value --- if (PPV_NPV == "NPV") { DF_point_PPV_NPV <- PPV_melted %>% dplyr::filter( # Closest value to PCT_prevalence_overlay & overlay_position_FP_FN abs(prevalence_pct - PCT_prevalence_overlay) == min(abs(prevalence_pct - PCT_prevalence_overlay)) & abs(FN - overlay_position_FN) == min(abs(FN - overlay_position_FN)) ) DF_point_PPV_NPV <- DF_point_PPV_NPV[1, ] # Manually calculate NPV calculated_NPV <- round( ((Specificity) * (healthy_n)) / (((Specificity) * (healthy_n)) + (overlay_prevalence_1 * overlay_position_FN)), 2 ) DEBUG_MESSAGE <- paste0( "TRUE_positives: ", TRUE_positives, " | Specificity: ", Specificity, " | overlay_position_FN: ", overlay_position_FN, " | overlay_prevalence_1: ", overlay_prevalence_1, " | overlay_prevalence_2: ", overlay_prevalence_2, "\n", "calculated_NPV: ", calculated_NPV * 100, "%", " | NPV in PPV_melted: ", DF_point_PPV_NPV$NPV * 100, "%", " | DIFF: ", round(calculated_NPV - DF_point_PPV_NPV$NPV, 2) * 100, "%" ) # Overlay message Details_point_PPV_NPV <- paste0( overlay_labels, "\n", translated_labels$label_y_axis, ": ", overlay_prevalence_1, " ", translated_labels$label_prevalence, " ", overlay_prevalence_2, "\n", translated_labels$label_caption_name, ": ", Specificity, "%", "\n", translated_labels$label_x_axis, ": ", overlay_position_FN, "%", extra_info_overlay ) point_PPV_NPV <- calculated_NPV * 100 } else if (PPV_NPV == "PPV") { DF_point_PPV_NPV <- PPV_melted %>% dplyr::filter( # Closest value to PCT_prevalence_overlay & overlay_position_FP_FN abs(prevalence_pct - PCT_prevalence_overlay) == min(abs(prevalence_pct - PCT_prevalence_overlay)) & abs(FP - overlay_position_FP) == min(abs(FP - overlay_position_FP)) ) DF_point_PPV_NPV <- DF_point_PPV_NPV[1, ] # Manually calculate PPV calculated_PPV <- round( (Sensitivity * overlay_prevalence_1) / ((Sensitivity * overlay_prevalence_1) + (healthy_n) * overlay_position_FP), 2 ) DEBUG_MESSAGE <- paste0( "Sensitivity: ", Sensitivity, " | overlay_position_FP: ", overlay_position_FP, " | overlay_prevalence_1: ", overlay_prevalence_1, " | overlay_prevalence_2: ", overlay_prevalence_2, "\n", "calculated_PPV: ", calculated_PPV * 100, "%", " | PPV in PPV_melted: ", DF_point_PPV_NPV$PPV * 100, "%", " | DIFF: ", round(calculated_PPV - DF_point_PPV_NPV$PPV, 2) * 100, "%" ) # overlay Details_point_PPV_NPV <- paste0( overlay_labels, "\n", translated_labels$label_y_axis, ": ", overlay_prevalence_1, " ", translated_labels$label_prevalence, " ", overlay_prevalence_2, "\n", translated_labels$label_caption_name, ": ", Sensitivity, "%", "\n", translated_labels$label_x_axis, ": ", paste0(round((100 - Specificity), decimals_overlay), "% "), extra_info_overlay ) # point_PPV_NPV = DF_point_PPV_NPV %>% dplyr::mutate(PPV = round(PPV * 100, 2)) %>% dplyr::pull(PPV) point_PPV_NPV <- calculated_PPV * 100 } # Function outputs list( Details_point_PPV_NPV = Details_point_PPV_NPV, point_PPV_NPV = point_PPV_NPV, # size_overlay_text = nchar(paste0(overlay_prevalence_1, " ", translated_labels$label_prevalence, " ", overlay_prevalence_2)), DEBUG_MESSAGE = DEBUG_MESSAGE ) } #' .number_decimals_plot_axis #' #' The number of decimal places in the x and y axis label depends on how wide the range is. #' #' @param PPV_NPV Should show PPV or NPV ["PPV", "NPV"] #' @param min_FP Minimum False Positive rate: [0-100] #' @param max_FP Maximum False Positive rate: [0-100] #' @param min_FN Minimum False Negative rate: [0-100] #' @param max_FN Maximum False Negative rate: [0-100] #' @param min_Prevalence [x] out of y prevalence of disease: [1-Inf] #' @param max_Prevalence x out of [y] prevalence of disease: [1-Inf] #' #' @noRd .number_decimals_plot_axis <- function(PPV_NPV = "PPV", min_FP = 0, max_FP, min_FN, max_FN, min_Prevalence, max_Prevalence) { # The vars to calculate range depend on PPV NPV if (PPV_NPV == "PPV") { max_FP_FN <- max_FP min_FP_FN <- min_FP } else if (PPV_NPV == "NPV") { max_FP_FN <- max_FN min_FP_FN <- min_FN } # Number of decimals x AXIS if (max_FP_FN - min_FP_FN < 1) { decimals_x <- 2 } else if (max_FP_FN - min_FP_FN <= 5) { decimals_x <- 1 } else if (max_FP_FN - min_FP_FN > 5) { decimals_x <- 0 } # Number of decimals y AXIS if (max_Prevalence - min_Prevalence < 2) { decimals_y <- 2 } else if (max_Prevalence - min_Prevalence <= 64) { decimals_y <- 1 } else if (max_Prevalence - min_Prevalence > 64) { decimals_y <- 0 } # Output vars --- list( "decimals_x" = decimals_x, "decimals_y" = decimals_y ) } #' .plot_creation #' #' Function to create the main heatmap plot #' #' @param PPV_melted DF out of .createPPVmatrix() #' @param PPV_NPV Should show PPV or NPV ["PPV", "NPV"] #' @param min_Prevalence [x] out of y prevalence of disease: [1-Inf] #' @param max_Prevalence x out of [y] prevalence of disease: [1-Inf] #' @param min_FP Minimum False Positive rate: [0-100] #' @param max_FP Maximum False Positive rate: [0-100] #' @param min_FN Minimum False Negative rate: [0-100] #' @param max_FN Maximum False Negative rate: [0-100] #' @param steps_matrix width of PPV/NPV matrix. 100 by default #' @param decimals_x Number of decimals to show in x axis [0-2] #' @param decimals_y Number of decimals to show in y axis [0-2] #' @param label_title Title for the plot #' @param label_subtitle Subtitle for the plot #' @param translated_labels Translated labels for plot. Output of .translate_labels() #' @param one_out_of Show y scale as 1 out of x [TRUE, FALSE] FALSE by default #' #' @noRd #' @importFrom ggplot2 ggplot aes geom_tile scale_x_continuous scale_y_continuous scale_fill_gradientn labs margin element_text #' .plot_creation <- function(PPV_NPV = "PPV", one_out_of = TRUE, min_Prevalence, max_Prevalence, min_FP = 0, max_FP, max_FN, min_FN, PPV_melted, steps_matrix = 100, decimals_x, decimals_y, label_title = "", label_subtitle = "", translated_labels = translated_labels # DEBUG_MESSAGE = "" ) { # Global variables --- # Colors PPV # https://www.google.com/search?q=color+picker # Palettes: 0%, 25%, 2550%, 75%, 100% if (PPV_NPV == "PPV") { Paleta_legend <- c("white", "grey", "black", "yellowgreen", "chartreuse4") # Original } else if (PPV_NPV == "NPV") { # Paleta_legend = c("white", "grey", "black","#bd7afa", "#420080") # Violet Paleta_legend <- c("white", "grey", "black", "#f7d479", "#ffb300") # Orange } # Breaks and labels for PPV/NPV legend breaks_legend <- c(0, 0.25, 0.5, 0.75, 1) labels_legend <- c(0, 25, 50, 75, 100) # False Positives (x axis) Steps_FP <- steps_matrix range_FP <- (max_FP - min_FP) step_size_FP <- range_FP / Steps_FP # Sensitivity (For NPV plot) Steps_FN <- steps_matrix # min_FN <- 0 # max_FN <- (100 - Sensitivity) range_FN <- (max_FN - min_FN) step_size_FN <- range_FN / Steps_FN # PPV --------------------------------------------------------------------- if (PPV_NPV == "PPV") { # Create plot if (one_out_of == TRUE) { p <- ggplot2::ggplot(PPV_melted, ggplot2::aes(FP, (prevalence_2))) } else { p <- ggplot2::ggplot(PPV_melted, ggplot2::aes(FP, (prevalence_pct))) } # BREAKS X # [TODO] Can USE PPV_melted to get this? breaks_x <- round(seq(from = min_FP, to = max_FP, by = step_size_FP * 10), decimals_x) # With no decimals sometimes the breaks are not equidistant. This is a hacky way to solve it if (length(unique(diff(breaks_x))) > 1) breaks_x <- round(seq(from = min_FP, to = max_FP, by = step_size_FP * 10), decimals_x + 1) # PPV tiles p <- p + ggplot2::geom_tile(ggplot2::aes(fill = PPV), colour = "white") # NPV --------------------------------------------------------------------- } else if (PPV_NPV == "NPV") { # Create plot if (one_out_of == TRUE) { p <- ggplot2::ggplot(PPV_melted, ggplot2::aes(FN, (prevalence_2))) } else { p <- ggplot2::ggplot(PPV_melted, ggplot2::aes(FN, (prevalence_pct))) } # BREAKS X # [TODO] Can USE PPV_melted to get this? breaks_x <- round(seq(min_FN, max_FN, step_size_FN * 10), decimals_x) # With no decimals sometimes the breaks are not equidistant. This is a hacky way to solve it if (length(unique(diff(breaks_x))) > 1) breaks_x <- round(seq(from = min_FN, to = max_FN, by = step_size_FN * 10), decimals_x + 1) # NPV tiles p <- p + ggplot2::geom_tile(ggplot2::aes(fill = NPV), colour = "white") } labels_x <- paste0(breaks_x, "%") p <- p + ggplot2::scale_x_continuous(breaks = breaks_x, labels = labels_x, expand = c(0, 0)) + ggplot2::scale_fill_gradientn(colours = Paleta_legend, na.value = "transparent", breaks = breaks_legend, labels = labels_legend, limits = c(0, 1), name = translated_labels$label_legend) + ggplot2::theme( text = ggplot2::element_text(size = 16), panel.background = ggplot2::element_rect(fill = "transparent"), plot.caption = ggplot2::element_text(size = 16, color = "darkgrey"), axis.title.y = ggplot2::element_text(margin = ggplot2::margin(0, 10, 0, 0)), axis.title.x = ggplot2::element_text(margin = ggplot2::margin(10, 0, 0, 0)), legend.position = c(0.94, 0.85), # horizontal, vertical legend.direction = "vertical", # legend.direction = 'horizontal', legend.margin = margin(5, 5, 10, 5) ) + ggplot2::labs( title = label_title, subtitle = label_subtitle, caption = translated_labels$label_caption, x = paste(translated_labels$label_x_axis, translated_labels$label_x_axis_extra), y = translated_labels$label_y_axis ) # Y axis breaks --- min_prevalence_pct <- min(PPV_melted$prevalence_pct) max_prevalence_pct <- max(PPV_melted$prevalence_pct) if (max_Prevalence - min_Prevalence < 20) { num_breaks <- 15 } else { num_breaks <- 10 } # BREAKS Y if (one_out_of == TRUE) { # breaks_y = unique(PPV_melted$prevalence_pct)[c(seq(1, steps_matrix, 10), 101)] breaks_y <- seq(min_Prevalence, max_Prevalence, length.out = num_breaks) # 1 out of 1 labels_y <- paste(min_Prevalence, translated_labels$label_prevalence, round(breaks_y, decimals_y)) } else { # breaks_y <- pracma::logseq(min_prevalence_pct, max_prevalence_pct, num_breaks) # IDENTICAL SEQUENCE breaks_y <- exp(seq(log(min_prevalence_pct), log(max_prevalence_pct), length.out = num_breaks)) labels_y <- paste(round(breaks_y * max_Prevalence, decimals_y), translated_labels$label_prevalence, max_Prevalence) # breaks_y * max_Prevalence } # Change scale depending on one_out_of if (one_out_of == TRUE) { p <- p + ggplot2::scale_y_continuous(breaks = breaks_y, labels = labels_y, expand = c(0, 0)) } else { # p = p + ggplot2::scale_y_log10(breaks = breaks_y, labels = labels_y, expand = c(0,0)) # trans_reverser() to reverse a log scale p <- p + ggplot2::scale_y_continuous(trans = ggforce::trans_reverser("log10"), breaks = breaks_y, labels = labels_y, expand = c(0, 0)) } # Output vars --- return(p) } #' .plot_overlay_area #' #' Add area overlay to PPV_heatmap plot #' #' @param PPV_melted DF out of .createPPVmatrix() #' @param uncertainty_prevalence . #' @param min_Prevalence [x] out of y prevalence of disease #' @param max_Prevalence x out of [y] prevalence of disease #' @param Sensitivity Sensitivity of test: [0-100] #' @param Specificity Specificity of test: [0-100] #' @param min_FP Minimum False Positive rate: [0-100] #' @param max_FP Maximum False Positive rate: [0-100] #' @param min_FN Minimum False Negative rate: [0-100] #' @param max_FN Maximum False Negative rate: [0-100] #' @param overlay_labels vector with labels for each overlay point #' @param overlay_extra_info show extra info in overlay? [TRUE/FALSE] #' @param PPV_NPV Should show PPV or NPV ["PPV", "NPV"] #' @param Language Language for the plot labels: ["sp", "en"] #' @param overlay_prevalence_1 [x] out of y prevalence of disease for the overlay #' @param overlay_prevalence_2 x out of [y] prevalence of disease for the overlay #' @param decimals_x Number of decimals to show in x axis [0-2] #' @param decimals_y Number of decimals to show in y axis [0-2] #' @param label_title Title for the plot #' @param label_subtitle Subtitle for the plot #' @param translated_labels Translated labels for plot. Output of .translate_labels() #' @param overlay_position_FP FP value (position in the x-axis) for each point in the overlay. For example: c(7, 8, 9, 12, 14, 14) #' @param overlay_position_FN FN value (position in the x-axis) for each point in the overlay. For example: c(7, 8, 9, 12, 14, 14) #' @param one_out_of Show y scale as 1 out of x [TRUE, FALSE] FALSE by default #' @param steps_matrix width of PPV/NPV matrix. 100 by default #' @param ... Other parameters. Now used to pass DEBUG in plot_overlay_area() call inside PPV_heatmap() #' #' @noRd #' @importFrom ggplot2 annotate .plot_overlay_area <- function(PPV_NPV = "PPV", one_out_of = TRUE, min_Prevalence, max_Prevalence, min_FP = 0, max_FP, max_FN, min_FN, PPV_melted, steps_matrix = 100, decimals_x, decimals_y, label_title = "", label_subtitle = "", translated_labels = translated_labels, # DEBUG_MESSAGE = "", # Overlay area specific parameters Language = "en", Sensitivity, Specificity, uncertainty_prevalence = "low", overlay_prevalence_1, overlay_prevalence_2, overlay_position_FP, overlay_position_FN, overlay_labels = "", overlay_extra_info = FALSE, ...) { # Get ... vars dots <- list(...) # Calculate point prevalence --- # # Use overlay prevalence as a pct PCT_prevalence_overlay <- overlay_prevalence_1 / overlay_prevalence_2 # # # Looks for closer value of prevalence_2 (prevalence_2) using the prevalence_pct # Sets the y axis position of overlay point_Prevalence_DF <- PPV_melted %>% dplyr::filter(abs(prevalence_pct - PCT_prevalence_overlay) == min(abs(prevalence_pct - PCT_prevalence_overlay))) %>% dplyr::sample_n(1) # Y axis position for overlay if (one_out_of == TRUE) { point_Prevalence <- point_Prevalence_DF %>% dplyr::pull(prevalence_2) } else { point_Prevalence <- point_Prevalence_DF %>% dplyr::pull(prevalence_pct) } # Get PPV or NPV value --- list_point_PPV <- .get_point_ppv_npv( PPV_melted = PPV_melted, PPV_NPV = PPV_NPV, Language = Language, Sensitivity = Sensitivity, Specificity = Specificity, overlay_prevalence_1 = overlay_prevalence_1, overlay_prevalence_2 = overlay_prevalence_2, overlay_labels = overlay_labels, overlay_extra_info = overlay_extra_info, overlay_position_FP = overlay_position_FP, overlay_position_FN = overlay_position_FN, decimals_x = decimals_x, decimals_y = decimals_y, translated_labels = translated_labels ) # dots list processed through ... if (dots$DEBUG == TRUE) message("\nDEBUG ", PPV_NPV, ": ", list_point_PPV$DEBUG_MESSAGE) # Add overlay --- # Size of geom_mark_rect() if (uncertainty_prevalence == "high") { uncertainty_prevalence_num <- .05 } else { uncertainty_prevalence_num <- .02 } # X The variable that defines axis position depends on PPV_NPV if (PPV_NPV == "PPV") { x_axis_position <- overlay_position_FP } else if (PPV_NPV == "NPV") { x_axis_position <- overlay_position_FN } p <- .plot_creation( PPV_melted = PPV_melted, min_Prevalence = min_Prevalence, max_Prevalence = max_Prevalence, min_FP = min_FP, max_FP = max_FP, max_FN = max_FN, min_FN = min_FN, one_out_of = one_out_of, decimals_x = decimals_x, decimals_y = decimals_y, translated_labels = translated_labels, label_subtitle = label_subtitle, label_title = label_title, PPV_NPV = PPV_NPV # DEBUG_MESSAGE = list_point_PPV$DEBUG_MESSAGE ) p <- p + # Overlay center (red dot) ggplot2::annotate("point", color = "red", alpha = 1, size = 1.5, x = x_axis_position, y = point_Prevalence ) + # Text + rectangle ggforce::geom_mark_rect( # Uncertainty square aes( label = paste0(translated_labels$label_PPV_NPV, ": ", list_point_PPV$point_PPV_NPV, "%"), # BOLD title white rectangle x = x_axis_position, y = point_Prevalence, # x0 = x_axis_position - 1 ), alpha = .04, expand = uncertainty_prevalence_num, fill = "red", color = "black", # Description white rectangle label.colour = "black", description = paste0(list_point_PPV$Details_point_PPV_NPV), label.width = 85, # Adjust to fit label label.minwidth = 35, # Connector (line) con.size = .2 ) # Output vars --- return(p) } #' .plot_overlay_line #' #' Add line overlay to a PPV_heatmap plot #' #' @param PPV_melted DF out of .createPPVmatrix() #' @param min_Prevalence [x] out of y prevalence of disease #' @param max_Prevalence x out of [y] prevalence of disease #' @param overlay_prevalence_1 vector with [x] out of y prevalence of disease #' @param overlay_prevalence_2 vector with x out of [y] prevalence of disease #' @param overlay_labels vector with labels for each overlay point #' @param min_FP Minimum False Positive rate: [0-100] #' @param max_FP Maximum False Positive rate: [0-100] #' @param min_FN Minimum False Negative rate: [0-100] #' @param max_FN Maximum False Negative rate: [0-100] #' @param decimals_x Number of decimals to show in x axis [0-2] #' @param decimals_y Number of decimals to show in y axis [0-2] #' @param label_title Title for the plot #' @param label_subtitle Subtitle for the plot #' @param translated_labels Translated labels for plot. Output of .translate_labels() #' @param overlay_position_FP FP value (position in the x-axis) for each point in the overlay. For example: c(7, 8, 9, 12, 14, 14) #' @param overlay_position_FN FN value (position in the x-axis) for each point in the overlay. For example: c(7, 8, 9, 12, 14, 14) #' @param PPV_NPV Should show PPV or NPV ["PPV", "NPV"] #' @param uncertainty_prevalence How big the uncertainty area should be: ["low" or "high"] #' @param one_out_of Show y scale as 1 out of x [TRUE, FALSE] FALSE by default #' @param steps_matrix width of PPV/NPV matrix. 100 by default #' #' @noRd #' @importFrom ggplot2 annotate layer_scales #' @importFrom cli cli_alert_info .plot_overlay_line <- function(PPV_NPV = "PPV", one_out_of = TRUE, min_Prevalence, max_Prevalence, min_FP = 0, max_FP, max_FN, min_FN, PPV_melted, steps_matrix = 100, decimals_x, decimals_y, label_title = "", label_subtitle = "", translated_labels = translated_labels, # Overlay line specific parameters uncertainty_prevalence = "low", overlay_prevalence_1, overlay_prevalence_2, overlay_position_FP, overlay_position_FN, overlay_labels = "") { # Size of geom_mark_rect() if (uncertainty_prevalence == "high") { uncertainty_prevalence_num <- .02 } else if (uncertainty_prevalence == "low") { uncertainty_prevalence_num <- .01 } # Create plot after adjusting overlay dimensions p <- .plot_creation( PPV_melted = PPV_melted, min_Prevalence = min_Prevalence, max_Prevalence = max_Prevalence, min_FP = min_FP, max_FP = max_FP, max_FN = max_FN, min_FN = min_FN, one_out_of = one_out_of, decimals_x = decimals_x, decimals_y = decimals_y, translated_labels = translated_labels, label_subtitle = label_subtitle, label_title = label_title, PPV_NPV = PPV_NPV ) # X The variable that defines axis position depends on PPV_NPV if (PPV_NPV == "PPV") { x_axis_position <- overlay_position_FP overlay_position_FN <- NA } else { x_axis_position <- overlay_position_FN overlay_position_FP <- NA } # Use the equivalent of prevalence_pct when one_out_of == FALSE if (one_out_of == FALSE) { overlay_prevalence_2 <- overlay_prevalence_1 / overlay_prevalence_2 } # TODO: Not sure what is this. Repeats the first element twice and deletes last element overlay_position_x_end <- c(x_axis_position[1], x_axis_position[-length(x_axis_position)]) overlay_position_y_end <- c(overlay_prevalence_2[1], overlay_prevalence_2[-length(overlay_prevalence_2)]) # Plot Overlay --- # DF for ggforce::geom_mark_rect() DF_X <- data.frame( x_axis_position = x_axis_position, overlay_prevalence_2 = overlay_prevalence_2, overlay_labels = overlay_labels ) # https://github.com/thomasp85/ggforce/issues/209 # Calculate x0 values to anchor ggforce label bias_x = 15 range_x_plot = ggplot2::layer_scales(p)$x$range$range width_range_x_plot = range_x_plot[2] - range_x_plot[1] x0_anchor = x_axis_position - (width_range_x_plot / bias_x) if (any(x0_anchor < 0)) { cli::cli_alert_info("Label/s outside range, will anchor right") x0_anchor = x_axis_position + (width_range_x_plot / bias_x) } # Add segment p <- p + ggplot2::annotate("segment", x = x_axis_position, xend = overlay_position_x_end, y = overlay_prevalence_2, yend = overlay_position_y_end, color = "red", alpha = .1, linewidth = 3 ) + # Add point ggplot2::annotate("point", color = "red", alpha = .5, size = .8, x = x_axis_position, y = overlay_prevalence_2 ) + # Add rectangle ggforce::geom_mark_rect( data = DF_X, label.colour = "black", alpha = .04, expand = uncertainty_prevalence_num, aes( x = x_axis_position, y = overlay_prevalence_2, group = overlay_labels, label = overlay_labels, x0 = x0_anchor # Anchor position # Leave y0 free to avoid overlaps # Also, y scale changes with one_out_of parameter # y0 = y0_anchor ), fill = "red", # con.border = "none", con.size = .2 ) # Output vars return(p) } #' .translate_labels #' #' Supports showing plot labels in Spanish (sp) or English (default) #' #' @param Language Language for the plot labels: ["sp", "en"] #' @param Sensitivity Sensitivity of test: [0-100] #' @param Specificity Specificity of test: [0-100] #' @param PPV_NPV Should show PPV or NPV ["PPV", "NPV"] #' #' @noRd #' @return A list with labels .translate_labels <- function(Language, Sensitivity, Specificity, PPV_NPV = "PPV") { # General --- if (Language == "sp" | Language == "es") { label_sick <- "enferma" label_healthy <- "sana" } else { label_sick <- "sick" label_healthy <- "healthy" } # PPV --- if (PPV_NPV == "PPV") { # Labels if (Language == "sp" | Language == "es") { label_caption_name <- "Sensibilidad" label_caption <- paste0("Sensibilidad = ", Sensitivity, "%") label_x_axis <- "Falsos +" label_x_axis_extra <- "(1 - Especificidad)" label_y_axis <- "Prevalencia" label_prevalence <- "de" label_legend <- "Valor\nPredictivo\nPositivo (%)\n " label_PPV_NPV <- "Valor Predictivo Positivo" } else { label_caption_name <- "Sensitivity" label_caption <- paste0("Sensitivity = ", Sensitivity, "%") label_x_axis <- "False +" label_x_axis_extra <- "(1 - Specificity)" label_y_axis <- "Prevalence" label_prevalence <- "out of" label_legend <- "Positive\nPredictive\nValue (%)\n " label_PPV_NPV <- "Positive Predictive Value" } # NPV --- } else if (PPV_NPV == "NPV") { # Labels if (Language == "sp" | Language == "es") { label_caption_name <- "Especificidad" label_caption <- paste0("Especificidad = ", Specificity, "%") # Tasa de Verdaderos Negativos label_x_axis <- "Falsos -" label_x_axis_extra <- "(1 - Sensibilidad)" label_y_axis <- "Prevalencia" label_prevalence <- "de" label_legend <- "Valor\nPredictivo\nNegativo (%)\n " label_PPV_NPV <- "Valor Predictivo Negativo" } else { label_caption_name <- "Specificity" label_caption <- paste0("Specificity = ", Specificity, "%") # True Negative Rate label_x_axis <- "False -" label_x_axis_extra <- "(1 - Sensitivity)" label_y_axis <- "Prevalence" label_prevalence <- "out of" label_legend <- "Negative\nPredictive\nValue (%)\n " label_PPV_NPV <- "Negative Predictive Value" } } # Output vars --- list( label_sick = label_sick, label_healthy = label_healthy, label_caption = label_caption, label_caption_name = label_caption_name, label_x_axis = label_x_axis, label_x_axis_extra = label_x_axis_extra, label_y_axis = label_y_axis, label_prevalence = label_prevalence, label_legend = label_legend, label_PPV_NPV = label_PPV_NPV ) }
/scratch/gouwar.j/cran-all/cranData/BayesianReasoning/R/helper_functions.R
#' Show minimum possible prevalence given the test characteristics #' #' Given a FP and a desired PPV, what is the Minimum Prevalence of a Condition #' #' @param Sensitivity Sensitivity of the test: [0-100] #' @param FP_test False positive rate (1-Specificity): [0-100] #' @param min_PPV_desired Which PPV is what you consider the minimum to trust a positive result in the test: [0-100] #' #' @return A description showing the minimum necessary prevalence. #' @export #' @importFrom reshape2 melt #' #' @examples #' #' # Example 1 #' min_possible_prevalence(Sensitivity = 99.9, FP_test = .1, min_PPV_desired = 70) #' "To reach a PPV of 70 when using a test with 99.9 % Sensitivity and 0.1 % False Positive Rate, #' you need a prevalence of at least 1 out of 429" #' #' # Example 2 #' min_possible_prevalence(100, 0.1, 98) #' "To reach a PPV of 98 when using a test with 100 % Sensitivity and 0.1 % False Positive Rate, #' you need a prevalence of at least 1 out of 21" min_possible_prevalence <- function(Sensitivity = 95, FP_test = 1, min_PPV_desired = 90) { # Fixed parameters -------------------------------------------------------- Min_Prevalence <- 1 Max_Prevalence <- 10000 # CHANGE ME Steps_Prevalence <- 10000 Step_size_Prevalence <- Max_Prevalence / Steps_Prevalence Prevalence <- seq(Min_Prevalence, (1 + Max_Prevalence), Step_size_Prevalence) # Calculation ------------------------------------------------------------- # We calculate the 100x100 PPV matrix PPV <- (Sensitivity * Min_Prevalence) / ((Sensitivity * Min_Prevalence) + ((Prevalence - 1) * FP_test)) # Long format PPV_melted <- data.frame(melted_PPV = PPV, melted_Prevalence = seq_along(PPV)) # Calculate prevalence output_prevalence <- max(PPV_melted$melted_Prevalence[PPV_melted$melted_PPV > (min_PPV_desired / 100)]) # PPV_melted %>% filter(abs(melted_PPV - (min_PPV_desired / 100)) == min(abs(melted_PPV - (min_PPV_desired / 100)))) # Keep closest value to min_PPV_desired # Function output -------------------------------------------------------- message("To reach a PPV of ", min_PPV_desired, "% when using a test with ", Sensitivity, "% Sensitivity and ", FP_test, "% False Positive Rate, you need a prevalence of at least 1 out of ", output_prevalence) }
/scratch/gouwar.j/cran-all/cranData/BayesianReasoning/R/min_possible_prevalence.R
#' plot_cutoff #' Create a cutoff plot, showing the healthy and sick distributions, and the #' consequences of different cutoff points #' #' @param prevalence prevalence of the disease #' @param cutoff_point cutoff point to use #' @param mean_sick mean for the sick people distribution #' @param mean_healthy mean for the healthy people distribution #' @param sd_sick sd for the sick people distribution #' @param sd_healthy sd for the healthy people distribution #' @param n_people number of people to use #' @param add_table FALSE/TRUE: add gt table with Sensitivity, Specificity, etc. #' @param output_filename NULL. If a filename, will save the plot #' #' @return A list with plots and table #' @export #' @importFrom stats rnorm #' @importFrom ggtext element_markdown #' @importFrom gt gt cell_text cells_column_labels cols_align cols_label fmt_markdown tab_style #' @importFrom png readPNG #' @importFrom scales comma #' @importFrom ggplot2 ggplot_build #' #' @examples #' \dontrun{ #' plot_cutoff(prevalence = 0.2) #' } plot_cutoff <- function(prevalence = 0.1, cutoff_point = 30, mean_sick = 35, mean_healthy = 20, sd_sick = 3, sd_healthy = 5, n_people = 100000, add_table = FALSE, output_filename = NULL) { # DEBUG # prevalence = 0.1 # cutoff_point = 40 # mean_sick = 35 # mean_healthy = 20 # sd_sick = 3 # sd_healthy = 5 # n_people = 100000 # output_filename = NULL SEED = 10 # How many sick & healthy n_healthy = (1 - prevalence) * n_people n_sick = prevalence * n_people # Checks if (n_people >= 10^7) cli::cli_alert_warning("Lots of observations. Will take a few seconds to create the plot. Lower the n_people ({n_people})") if (prevalence < 0.005) cli::cli_alert_warning("With prevalence this low, you will hardly see the sick distribution ({prevalence})") # Colors ------------------------------------------------------------------ # Base 16 (0123456789abcdef), from 00 (lower) to ff (higher) DF_colors = tibble::tibble( classification = c("TN", "FP", "FN", "TP"), fill_str = c("#00000040", "#990ff7ff", "#000000ee", "#990ff740") ) color_TN = DF_colors[DF_colors$classification == "TN",]$fill_str color_FP = DF_colors[DF_colors$classification == "FP",]$fill_str color_FN = DF_colors[DF_colors$classification == "FN",]$fill_str color_TP = DF_colors[DF_colors$classification == "TP",]$fill_str # Formatted names for title TP_title = paste0("<span style = 'color: ", color_TP, ";'>True Positives</span>") FN_title = paste0("<span style = 'color: ", color_FN, ";'>False Negatives</span>") FP_title = paste0("<span style = 'color: ", color_FP, ";'>False Positives</span>") TN_title = paste0("<span style = 'color: ", color_TN, ";'>True Negatives</span>") # Create distributions ---------------------------------------------------- set.seed(SEED) data_sick = round(rnorm(n_sick, mean = mean_sick , sd = sd_sick)) set.seed(SEED) data_healthy = round(rnorm(n_healthy, mean = mean_healthy , sd = sd_healthy)) DF = tibble::tibble(type = c(rep("healthy", n_healthy), rep("sick", n_sick)), test_result = c(data_healthy, data_sick)) |> dplyr::mutate(test_result_dico = ifelse(test_result >= cutoff_point, "positive", "negative")) |> dplyr::mutate(classification = dplyr::case_when( type == "healthy" & test_result_dico == "negative" ~ "TN", type == "healthy" & test_result_dico == "positive" ~ "FP", type == "sick" & test_result_dico == "negative" ~ "FN", type == "sick" & test_result_dico == "positive" ~ "TP", ), fill_str = dplyr::case_when( classification == "TN" ~ color_TN, classification == "FP" ~ color_FP, classification == "FN" ~ color_FN, classification == "TP" ~ color_TP, )) # Table ------------------------------------------------------------------- DF_table_raw1 = DF |> dplyr::group_by(classification) |> dplyr::summarise(N = dplyr::n(), .groups = "drop") table_template = tibble::tibble(TN = 0, FP = 0, FN = 0, TP = 0) DF_table_raw2 = DF_table_raw1 |> tidyr::pivot_wider(names_from = classification, values_from = N) DF_table = DF_table_raw2 |> # Add columns if they do not exist (e.g. when 0 FP) tibble::add_column(!!!table_template[setdiff(names(table_template), names(DF_table_raw2))]) |> # Calculate dplyr::mutate(Sensitivity = TP/ (TP + FN), Specificity = TN / (TN + FP), PPV = TP / (TP + FP), NPV = TN / (TN + FN), Prevalence = prevalence) # Formatted cells for table TP = paste0("<span style = 'color: ", color_TP, ";'>TP", "</span><BR>", format(DF_table$TP, big.mark = ",", scientific = FALSE)) FN = paste0("<span style = 'color: ", color_FN, ";'>FN", "</span><BR>", format(DF_table$FN, big.mark = ",", scientific = FALSE)) FP = paste0("<span style = 'color: ", color_FP, ";'>FP", "</span><BR>", format(DF_table$FP, big.mark = ",", scientific = FALSE)) TN = paste0("<span style = 'color: ", color_TN, ";'>TN", "</span><BR>", format(DF_table$TN, big.mark = ",", scientific = FALSE)) Sensitivity = paste0("**Sensitivity**<BR>", round(DF_table$Sensitivity, 3) * 100, "%") Specificity = paste0("**Specificity**<BR>", round(DF_table$Specificity, 3) * 100, "%") PPV = paste0("**PPV**<BR>", round(DF_table$PPV, 3) * 100, "%") NPV = paste0("**NPV**<BR>", round(DF_table$NPV, 3) * 100, "%") TABLE_raw = tibble::tibble(X = c("**test +**", "**test -**", ""), `Sick` = c(TP, FN, Sensitivity), `Healthy` = c(FP, TN, Specificity), Y = c(PPV, NPV, "")) TABLE_gt = TABLE_raw |> gt::gt() |> gt::fmt_markdown(dplyr::everything()) |> gt::cols_align(align = "center") |> gt::cols_label(X = "", Y= "") |> gt::tab_style( style = gt::cell_text(weight = "bold"), locations = gt::cells_column_labels() ) # Initial Plot --------------------------------------------------------------- # Number of individual bins bins_histogram = max(DF$test_result) - min(DF$test_result) binwidth = 1 # Base plot plot = DF |> ggplot2::ggplot(ggplot2::aes(test_result)) + # Histograms ggplot2::geom_histogram(data = subset(DF, classification == 'TN'), ggplot2::aes(fill = fill_str), bins = bins_histogram, binwidth = 1, show.legend = FALSE) + ggplot2::geom_histogram(data = subset(DF, classification == 'TP'), ggplot2::aes(fill = fill_str), bins = bins_histogram, binwidth = 1, show.legend = FALSE) + ggplot2::geom_histogram(data = subset(DF, classification == 'FN'), ggplot2::aes(fill = fill_str), bins = bins_histogram, binwidth = 1, show.legend = FALSE) + ggplot2::geom_histogram(data = subset(DF, classification == 'FP'), ggplot2::aes(fill = fill_str), bins = bins_histogram, binwidth = 1, show.legend = FALSE) + # Outlines (healthy, sick) # stat_bin(data = subset(DF, classification %in% c('FN', 'TP')), geom = "step", direction = "mid", aes(linetype = type), binwidth = binwidth, show.legend = FALSE) + # stat_bin(data = subset(DF, classification %in% c('TN', 'FP')), geom = "step", direction = "mid", aes(linetype = type), binwidth = binwidth, show.legend = FALSE) + ggplot2::stat_bin(data = subset(DF, classification %in% c('TN')), geom = "step", direction = "mid", ggplot2::aes(linetype = type), binwidth = binwidth, show.legend = FALSE) + ggplot2::stat_bin(data = subset(DF, classification %in% c('TP')), geom = "step", direction = "mid", ggplot2::aes(linetype = type), binwidth = binwidth, show.legend = FALSE) + ggplot2::stat_bin(data = subset(DF, classification %in% c('FN')), geom = "step", direction = "mid", ggplot2::aes(linetype = type), binwidth = binwidth, show.legend = FALSE) + ggplot2::stat_bin(data = subset(DF, classification %in% c('FP')), geom = "step", direction = "mid", ggplot2::aes(linetype = type), binwidth = binwidth, show.legend = FALSE) + ggplot2::geom_vline(xintercept = cutoff_point - 0.5, linetype = "dashed") + ggplot2::theme_minimal(base_size = 14) + ggplot2::labs(caption = paste0(format(n_people, big.mark = ",", scientific = FALSE), " people, ", "prevalence 1 out of ", 1/DF_table$Prevalence, "\n", format(n_sick, big.mark = ",", scientific = FALSE), " sick (M = ", mean_sick, ", SD = ", sd_sick, ")\n", format(n_healthy, big.mark = ",", scientific = FALSE) , " healthy (M = ", mean_healthy, ", SD = ", sd_healthy, ")" # "Sensitivity = ", round(DF_table$Sensitivity * 100, 0), "% Specificity = ", round(DF_table$Specificity * 100, 0), "% \n", # "PPV = ", round(DF_table$PPV * 100, 0), "% NPV = ", round(DF_table$NPV * 100, 0), "%" )) + ggplot2::theme(legend.position = "top") + ggplot2::scale_fill_identity() # Max values for healthy and sick ----------------------------------------- ggplot_layout = ggplot2::ggplot_build(plot)$layout ggplot_data = ggplot2::ggplot_build(plot)$data data_1 = ggplot_data[[1]][which.max(ggplot_data[[1]]$count),] data_2 = ggplot_data[[2]][which.max(ggplot_data[[2]]$count),] data_3 = ggplot_data[[3]][which.max(ggplot_data[[3]]$count),] data_4 = ggplot_data[[4]][which.max(ggplot_data[[4]]$count),] max_counts = rbind(data_1, data_2, data_3, data_4) |> dplyr::left_join(DF_colors, by = c("fill" = "fill_str")) range_x = ggplot_layout$panel_params[[1]]$x.range # Looking for the max value in the healthy or sick histograms max_count_healthy = max(max_counts[max_counts$classification %in% c("TN", "FP"), "count"]) max_count_sick = max(max_counts[max_counts$classification %in% c("TP", "FN"), "count"]) TN_x = max(max_counts[max_counts$classification %in% c("TN"), "x"]) FN_x = max(max_counts[max_counts$classification %in% c("FN"), "x"]) FP_x = max(max_counts[max_counts$classification %in% c("FP"), "x"]) TP_x = max(max_counts[max_counts$classification %in% c("TP"), "x"]) if (FP_x == TP_x) TP_x = TP_x * 1.05 if (FN_x == TN_x) TN_x = TN_x * .95 # Plot annotations -------------------------------------------------------- final_plot = plot + ggplot2::annotate(x = cutoff_point -.5, y = max_count_healthy *.95, label = "Cutoff point", vjust = 2, geom = "text", angle = 90) + # annotate(x = mean_healthy, y = max_count_healthy / 2, label = "Healthy", vjust = 2, geom = "text") + # annotate(x = mean_sick, y = max_count_sick / 2, label = "Sick", vjust = 2, geom = "text", color = "white") + # Sick / Healthy ggplot2::annotate(x = mean_healthy, y = max_count_healthy, label = "Healthy", vjust = -1, geom = "text", color = "#000000") + ggplot2::annotate(x = mean_sick, y = max_count_sick, label = "Sick", vjust = -1, geom = "text", color = "#222222") + # TN / FN / FP / TP ggplot2::annotate(x = TN_x, y = 0, label = "TN", vjust = 2, geom = "text", color = color_TN) + ggplot2::annotate(x = FN_x, y = 0, label = "FN", vjust = 2, geom = "text", color = color_FN) + ggplot2::annotate(x = FP_x, y = 0, label = "FP", vjust = 2, geom = "text", color = color_FP) + ggplot2::annotate(x = TP_x, y = 0, label = "TP", vjust = 2, geom = "text", color = color_TP) + ggplot2::labs(x = NULL, y = NULL, title = paste0(TP_title, ", ", FN_title, ", ", FP_title, ", ",TN_title), subtitle = "Depending on a Cutoff point<BR><BR>") + ggplot2::theme(plot.title = ggtext::element_markdown(), plot.subtitle = ggtext::element_markdown()) + ggplot2::scale_y_continuous(labels = scales::comma) # Combine table and plot -------------------------------------------------- if (add_table == TRUE) { # Save and read image name_file = paste0(tempfile(), ".png") TABLE_gt |> gt::gtsave(name_file, quiet = TRUE) grob_table <- grid::rasterGrob(png::readPNG(name_file), interpolate=TRUE) # Add image of gt table to plot final_plot = final_plot + ggplot2::annotation_custom( grob_table, # xmin = min(range_x), xmax = max(range_x) / 4 + min(range_x), ymin = max_count_healthy * .8 # ymax = max_count_healthy * .5 ) } # Save plot if (!is.null(output_filename)) ggplot2::ggsave(output_filename, final_plot, bg = "white", width = 16, height = 12) # Output ------------------------------------------------------------------ OUTPUT = list(TABLE_gt = TABLE_gt, final_plot = final_plot) return(OUTPUT) } #' remove_layers_cutoff_plot #' Remove layers from a cutoff plot. This is useful to show how different things #' are calculated (e.g. Sensitivity) #' #' @param cutoff_plot A plot_cutoff() plot #' @param delete_what Elements to delete (i.e. FP, FN, TP, TN) #' @param silent TRUE do not show debug info #' #' @return a cutoff plot without the elements deleted #' @export #' #' @examples #' \dontrun{ #' PLOT = plot_cutoff(prevalence = 0.2) #' remove_layers_cutoff_plot(PLOT$final_plot, delete_what = c("FN", "TP")) + #' ggplot2::labs(subtitle = "Specificity = TN/(TN+FP)") #' } remove_layers_cutoff_plot <- function(cutoff_plot, delete_what, silent = TRUE) { layers <- lapply(cutoff_plot$layers, function(x) { # GeomStep, GeomBar if (class(x$geom)[1] %in% c("GeomStep", "GeomBar")) { if (unique(x$data$classification)[1] %in% delete_what) { if (silent == FALSE) cli::cli_alert_info("DELETE: {class(x$geom)[1]} | {unique(x$data$classification)[1]}") NULL } else { if (silent == FALSE) cli::cli_alert_info("{class(x$geom)[1]} | {unique(x$data$classification)[1]}") x } # GeomText } else if (class(x$geom)[1] %in% c("GeomText")) { if (x$aes_params$label %in% delete_what) { if (silent == FALSE) cli::cli_alert_info("DELETE: {class(x$geom)[1]} | {x$aes_params$label}") NULL } else if (x$aes_params$label == "Cutoff point") { if (silent == FALSE) cli::cli_alert_info("DELETE: {class(x$geom)[1]} | {x$aes_params$label}") NULL # Delete the Healthy text when we delete TN } else if (x$aes_params$label == "Healthy" & all(c("TN", "FP") %in% delete_what)) { if (silent == FALSE) cli::cli_alert_info("DELETE: {class(x$geom)[1]} | {x$aes_params$label} | delete_what: {c('TN', 'FP') %in% delete_what}") NULL } else if (x$aes_params$label == "Sick" & all(c("TP", "FN") %in% delete_what)) { if (silent == FALSE) cli::cli_alert_info("DELETE: {class(x$geom)[1]} | {x$aes_params$label} | delete_what: {c('TP', 'FN') %in% delete_what}") NULL } else { if (silent == FALSE) cli::cli_alert_info("{class(x$geom)[1]} | {x$aes_params$label}") x } } else { if (silent == FALSE) cli::cli_alert_info("{class(x$geom)[1]} | {unique(x$data$classification)[1]} | {x$aes_params$label}") x } }) layers <- layers[!sapply(layers, is.null)] cutoff_plot$layers <- layers return(cutoff_plot) }
/scratch/gouwar.j/cran-all/cranData/BayesianReasoning/R/plot_cutoff.R
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>", dpi = 60, fig.height = 10, fig.width = 14 ) ## ----echo = FALSE, message = FALSE, results = 'hide'-------------------------- library(BayesianReasoning) library(patchwork) ## ----------------------------------------------------------------------------- PPV_plot = PPV_heatmap( min_Prevalence = 1, max_Prevalence = 80, Sensitivity = 95, limits_Specificity = c(85, 100), overlay = "area", overlay_prevalence_1 = 1, overlay_prevalence_2 = 69, overlay_position_FP = 12.1, label_title = "PPV", label_subtitle = "Screening test" ) ## ----------------------------------------------------------------------------- NPV_plot = PPV_heatmap( PPV_NPV = "NPV", min_Prevalence = 1, max_Prevalence = 80, Specificity = 87.9, overlay = "area", overlay_prevalence_1 = 1, overlay_prevalence_2 = 69, overlay_position_FN = 5, label_title = "NPV", label_subtitle = "Screening test" ) ## ----fig.height = 14, fig.width = 12------------------------------------------ (PPV_plot / NPV_plot) + plot_layout(guides = 'collect')
/scratch/gouwar.j/cran-all/cranData/BayesianReasoning/inst/doc/PPV_NPV.R
--- title: "Screening tests and PPV vs NPV" author: "Gorka Navarrete" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Screening tests and PPV vs NPV} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", dpi = 60, fig.height = 10, fig.width = 14 ) ``` ```{r, echo = FALSE, message = FALSE, results = 'hide'} library(BayesianReasoning) library(patchwork) ``` Screening tests are applied to asymptomatic people with the hope to catch a disease in the early stages. Screening tests are by definition applied to populations where the prevalence of the condition is low (most people are healthy). This simple fact has consequences for how much we can trust their + and - results, the Positive Predictive Value (PPV) and Negative Predictive Value (NPV) of the test, respectively. **Please, keep in mind the goal of this vignette is to exemplify how to use the BayesianReasoning package. None of the information contained here should be taken as medical advice.** ## PPV and NPV definitions PPV, formally $P(Disease \mid +)$ is the probability of having a disease *given* a test result is +. $P(Disease \mid +) = \frac{TruePositives}{TruePositives + FalsePositives}$ --- NPV, formally $P(Healthy \mid -)$ is the probability of being healthy *given* a test result is -. $P(Healthy \mid -) = \frac{TrueNegatives}{TrueNegatives + FalseNegatives}$ --- ## Example We will use as an example Mammography at 50 years old as a screening test to detect Breast Cancer. ### PPV The PPV of Mammography at 50 years old in the general population is relatively low. ```{r} PPV_plot = PPV_heatmap( min_Prevalence = 1, max_Prevalence = 80, Sensitivity = 95, limits_Specificity = c(85, 100), overlay = "area", overlay_prevalence_1 = 1, overlay_prevalence_2 = 69, overlay_position_FP = 12.1, label_title = "PPV", label_subtitle = "Screening test" ) ``` ### NPV The NPV of Mammography at 50 years old in the general population is very high. ```{r} NPV_plot = PPV_heatmap( PPV_NPV = "NPV", min_Prevalence = 1, max_Prevalence = 80, Specificity = 87.9, overlay = "area", overlay_prevalence_1 = 1, overlay_prevalence_2 = 69, overlay_position_FN = 5, label_title = "NPV", label_subtitle = "Screening test" ) ``` ### Combined PPV + NPV Combining both PPV and NPV shows how negative results of Mammography at 50 years old in the general population are very trustworthy, but positive results are not. We can plot the PPV and NPV plots side by side using `{patchwork}`: ```{r, fig.height = 14, fig.width = 12} (PPV_plot / NPV_plot) + plot_layout(guides = 'collect') ``` ## Sources **Breast Cancer screening information**: * Nelson, H. D., O’Meara, E. S., Kerlikowske, K., Balch, S., & Miglioretti, D. (2016). Factors associated with rates of false-positive and false-negative results from digital mammography screening: An analysis of registry data. Annals of Internal Medicine, 164(4), 226–235. https://doi.org/10.7326/M15-0971 * https://seer.cancer.gov/archive/csr/1975_2012/browse_csr.php?sectionSEL=4&pageSEL=sect_04_table.24 **Theoretical overview of the technical concepts**: * Akobeng, A.K. (2007) https://doi.org/10.1111/j.1651-2227.2006.00180.x **Practical explanation about the importance of understanding PPV**: * Navarrete et al. (2015) for a https://doi.org/10.3389/fpsyg.2015.01327
/scratch/gouwar.j/cran-all/cranData/BayesianReasoning/inst/doc/PPV_NPV.Rmd
params <- list(package_creation = FALSE) ## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>", dpi = 60, fig.height = 10, fig.width = 14 ) ## ----echo = FALSE, message = FALSE, results = 'hide'-------------------------- library(BayesianReasoning) # FROM: https://community.rstudio.com/t/internet-resources-should-fail-gracefully/49199/12 safely_show_image_Rmd <- function(remote_file, name_image) { try_GET <- function(x, ...) { tryCatch( httr::GET(url = x, httr::timeout(1), ...), error = function(e) conditionMessage(e), warning = function(w) conditionMessage(w) ) } is_response <- function(x) { class(x) == "response" } # First check internet connection if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } # Then try for timeout problems resp <- try_GET(remote_file) if (!is_response(resp)) { message(resp) return(invisible(NULL)) } # Then stop if status > 400 if (httr::http_error(resp)) { httr::message_for_status(resp) return(invisible(NULL)) } # Output paste0("![", name_image, "](", remote_file, ")") } ## ----echo=FALSE, results='asis'----------------------------------------------- # If we are creating the README params$package_creation will be TRUE if (params$package_creation) { cat(paste0("[", safely_show_image_Rmd(remote_file = "https://www.r-pkg.org/badges/version/BayesianReasoning", name_image = "CRAN status"), "]", "(https://cran.r-project.org/package=BayesianReasoning)")) cat(paste0("[", safely_show_image_Rmd(remote_file = "https://codecov.io/gh/gorkang/BayesianReasoning/branch/master/graph/badge.svg", name_image = "Codecov test coverage"), "]", "(https://app.codecov.io/gh/gorkang/BayesianReasoning?branch=master)")) cat(paste0("[", safely_show_image_Rmd(remote_file = "http://cranlogs.r-pkg.org/badges/BayesianReasoning", name_image = "downloads"), "]", "(https://cran.r-project.org/package=BayesianReasoning)")) cat(paste0("[", safely_show_image_Rmd(remote_file = "https://img.shields.io/badge/lifecycle-experimental-orange.svg", name_image = "Lifecycle: experimental"), "]", "(https://lifecycle.r-lib.org/articles/stages.html#experimental)")) cat(paste0("[", safely_show_image_Rmd(remote_file = "https://zenodo.org/badge/93097662.svg", name_image = "DOI"), "]", "(https://zenodo.org/badge/latestdoi/93097662)")) } ## ----heatmap------------------------------------------------------------------ PPV_heatmap(min_Prevalence = 1, max_Prevalence = 1000, Sensitivity = 100, limits_Specificity = c(90, 100), Language = "en") ## ----NPV-heatmap-------------------------------------------------------------- PPV_heatmap(PPV_NPV = "NPV", min_Prevalence = 800, max_Prevalence = 1000, Specificity = 95, limits_Sensitivity = c(90, 100), Language = "en") ## ----area--------------------------------------------------------------------- PPV_heatmap(min_Prevalence = 1, max_Prevalence = 1200, Sensitivity = 81, limits_Specificity = c(94, 100), label_subtitle = "Prenatal screening for Down Syndrome by Age", overlay = "area", overlay_labels = "40 y.o.", overlay_position_FP = 4.8, overlay_prevalence_1 = 1, overlay_prevalence_2 = 68) ## ----area2-------------------------------------------------------------------- PPV_heatmap(min_Prevalence = 1, max_Prevalence = 1200, Sensitivity = 81, limits_Specificity = c(94, 100), label_subtitle = "Prenatal screening for Down Syndrome by Age", overlay_extra_info = TRUE, overlay = "area", overlay_labels = "40 y.o.", overlay_position_FP = 4.8, overlay_prevalence_1 = 1, overlay_prevalence_2 = 68) ## ----line--------------------------------------------------------------------- PPV_heatmap(min_Prevalence = 1, max_Prevalence = 1800, Sensitivity = 90, limits_Specificity = c(84, 100), label_subtitle = "PPV of Mammogram for Breast Cancer by Age", overlay = "line", overlay_labels = c("80 y.o.", "70 y.o.", "60 y.o.", "50 y.o.", "40 y.o.", "30 y.o.", "20 y.o."), overlay_position_FP = c(6.5, 7, 8, 9, 12, 14, 14), overlay_prevalence_1 = c(1, 1, 1, 1, 1, 1, 1), overlay_prevalence_2 = c(22, 26, 29, 44, 69, 227, 1667)) ## ----line-2------------------------------------------------------------------- PPV_heatmap(min_Prevalence = 1, max_Prevalence = 2000, Sensitivity = 81, limits_Specificity = c(94, 100), label_subtitle = "Prenatal screening for Down Syndrome by Age", overlay = "line", overlay_labels = c("40 y.o.", "30 y.o.", "20 y.o."), overlay_position_FP = c(4.8, 4.8, 4.8), overlay_prevalence_1 = c(1, 1, 1), overlay_prevalence_2 = c(68, 626, 1068)) ## ----diagnostic--------------------------------------------------------------- PPV_diagnostic_vs_screening(max_FP = 10, Sensitivity = 100, prevalence_screening_group = 1000, prevalence_diagnostic_group = 2) ## ----cutoff------------------------------------------------------------------- PLOTS = plot_cutoff(prevalence = 0.2, cutoff_point = 33, mean_sick = 35, mean_healthy = 20, sd_sick = 3, sd_healthy = 5 ) PLOTS$final_plot ## ----remove-cutoff------------------------------------------------------------ # Sensitivity remove_layers_cutoff_plot(PLOTS$final_plot, delete_what = c("FP", "TN")) + ggplot2::labs(subtitle = "Sensitivity = TP/(TP+FN)") # Specificity remove_layers_cutoff_plot(PLOTS$final_plot, delete_what = c("FN", "TP")) + ggplot2::labs(subtitle = "Specificity = TN/(TN+FP)") # PPV remove_layers_cutoff_plot(PLOTS$final_plot, delete_what = c("TN", "FN")) + ggplot2::labs(subtitle = "PPV = TP/(TP+FP)") # NPV remove_layers_cutoff_plot(PLOTS$final_plot, delete_what = c("TP", "FP")) + ggplot2::labs(subtitle = "NPV = TN/(TN+FN)")
/scratch/gouwar.j/cran-all/cranData/BayesianReasoning/inst/doc/introduction.R
--- title: "Introduction to BayesianReasoning" author: "Gorka Navarrete" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Introduction to BayesianReasoning} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console params: package_creation: FALSE --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", dpi = 60, fig.height = 10, fig.width = 14 ) ``` ```{r, echo = FALSE, message = FALSE, results = 'hide'} library(BayesianReasoning) # FROM: https://community.rstudio.com/t/internet-resources-should-fail-gracefully/49199/12 safely_show_image_Rmd <- function(remote_file, name_image) { try_GET <- function(x, ...) { tryCatch( httr::GET(url = x, httr::timeout(1), ...), error = function(e) conditionMessage(e), warning = function(w) conditionMessage(w) ) } is_response <- function(x) { class(x) == "response" } # First check internet connection if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } # Then try for timeout problems resp <- try_GET(remote_file) if (!is_response(resp)) { message(resp) return(invisible(NULL)) } # Then stop if status > 400 if (httr::http_error(resp)) { httr::message_for_status(resp) return(invisible(NULL)) } # Output paste0("![", name_image, "](", remote_file, ")") } ``` # Bayesian reasoning <!-- badges: start --> ```{r echo=FALSE, results='asis'} # If we are creating the README params$package_creation will be TRUE if (params$package_creation) { cat(paste0("[", safely_show_image_Rmd(remote_file = "https://www.r-pkg.org/badges/version/BayesianReasoning", name_image = "CRAN status"), "]", "(https://cran.r-project.org/package=BayesianReasoning)")) cat(paste0("[", safely_show_image_Rmd(remote_file = "https://codecov.io/gh/gorkang/BayesianReasoning/branch/master/graph/badge.svg", name_image = "Codecov test coverage"), "]", "(https://app.codecov.io/gh/gorkang/BayesianReasoning?branch=master)")) cat(paste0("[", safely_show_image_Rmd(remote_file = "http://cranlogs.r-pkg.org/badges/BayesianReasoning", name_image = "downloads"), "]", "(https://cran.r-project.org/package=BayesianReasoning)")) cat(paste0("[", safely_show_image_Rmd(remote_file = "https://img.shields.io/badge/lifecycle-experimental-orange.svg", name_image = "Lifecycle: experimental"), "]", "(https://lifecycle.r-lib.org/articles/stages.html#experimental)")) cat(paste0("[", safely_show_image_Rmd(remote_file = "https://zenodo.org/badge/93097662.svg", name_image = "DOI"), "]", "(https://zenodo.org/badge/latestdoi/93097662)")) } ``` <!-- badges: end --> --- ## Bayesian reasoning in medical contexts This package includes a few functions to plot and help understand Positive and Negative Predictive Values, and their relationship with Sensitivity, Specificity and Prevalence. + The **Positive Predictive** Value of a medical test is the probability that a positive result will mean having the disease. Formally p(Disease|+) + The **Negative Predictive** Value of a medical test is the probability that a negative result will mean **not** having the disease. Formally p(Healthy|-) The BayesianReasoning package has three main functions: + **PPV_heatmap()**: Plot heatmaps with PPV or NPV values for the given test and disease parameters. + **PPV_diagnostic_vs_screening()**: Plots the difference between the PPV of a test in a diagnostic context (very high prevalence; or a common study sample, e.g. ~50% prevalence) versus a screening context (lower prevalence). + **min_possible_prevalence()**: Calculates how high should the prevalence of a disease be to reach a desired PPV given certain test parameters. --- If you want to install the package can use: `remotes::install_github("gorkang/BayesianReasoning")`. Please report any problems you find in the [Issues Github page](https://github.com/gorkang/BayesianReasoning/issues). There is a [shiny app implementation](https://gorkang.shinyapps.io/BayesianReasoning/) with most of the main features available. --- ## PPV_heatmap() Plot heatmaps with PPV or NPV values for a given specificity and a range of Prevalences and FP or FN (1 - Sensitivity). The basic parameters are: * min_Prevalence: Min prevalence in y axis. "min_Prevalence out of y" * max_Prevalence: Max prevalence in y axis. "1 out of max_Prevalence" * Sensitivity: Sensitivity of the test * max_FP: FP is 1 - specificity. The x axis will go from FP = 0% to max_FP * Language: "es" for Spanish or "en" for English ```{r heatmap} PPV_heatmap(min_Prevalence = 1, max_Prevalence = 1000, Sensitivity = 100, limits_Specificity = c(90, 100), Language = "en") ``` --- ### NPV You can also plot an NPV heatmap with PPV_NPV = "NPV". ```{r NPV-heatmap} PPV_heatmap(PPV_NPV = "NPV", min_Prevalence = 800, max_Prevalence = 1000, Specificity = 95, limits_Sensitivity = c(90, 100), Language = "en") ``` --- ### Area overlay You can add different types of overlay to the plots. For example, an area overlay showing the point PPV for a given prevalence and FP or FN: ```{r area} PPV_heatmap(min_Prevalence = 1, max_Prevalence = 1200, Sensitivity = 81, limits_Specificity = c(94, 100), label_subtitle = "Prenatal screening for Down Syndrome by Age", overlay = "area", overlay_labels = "40 y.o.", overlay_position_FP = 4.8, overlay_prevalence_1 = 1, overlay_prevalence_2 = 68) ``` The area plot overlay can show more details about how the calculation of PPV/NPV is performed: ```{r area2} PPV_heatmap(min_Prevalence = 1, max_Prevalence = 1200, Sensitivity = 81, limits_Specificity = c(94, 100), label_subtitle = "Prenatal screening for Down Syndrome by Age", overlay_extra_info = TRUE, overlay = "area", overlay_labels = "40 y.o.", overlay_position_FP = 4.8, overlay_prevalence_1 = 1, overlay_prevalence_2 = 68) ``` ### Line overlay Also, you can add a line overlay highlighting a range of prevalences and FP. This is useful, for example, to show how the PPV of a test changes with age: ```{r line} PPV_heatmap(min_Prevalence = 1, max_Prevalence = 1800, Sensitivity = 90, limits_Specificity = c(84, 100), label_subtitle = "PPV of Mammogram for Breast Cancer by Age", overlay = "line", overlay_labels = c("80 y.o.", "70 y.o.", "60 y.o.", "50 y.o.", "40 y.o.", "30 y.o.", "20 y.o."), overlay_position_FP = c(6.5, 7, 8, 9, 12, 14, 14), overlay_prevalence_1 = c(1, 1, 1, 1, 1, 1, 1), overlay_prevalence_2 = c(22, 26, 29, 44, 69, 227, 1667)) ``` --- Another example. In this case, the FP is constant across age: ```{r line-2} PPV_heatmap(min_Prevalence = 1, max_Prevalence = 2000, Sensitivity = 81, limits_Specificity = c(94, 100), label_subtitle = "Prenatal screening for Down Syndrome by Age", overlay = "line", overlay_labels = c("40 y.o.", "30 y.o.", "20 y.o."), overlay_position_FP = c(4.8, 4.8, 4.8), overlay_prevalence_1 = c(1, 1, 1), overlay_prevalence_2 = c(68, 626, 1068)) ``` --- ## PPV_diagnostic_vs_screening() In scientific studies developing a new test for the early detection of a medical condition, it is quite common to use a sample where 50% of participants has a medical condition and the other 50% are normal controls. This has the unintended effect of maximizing the PPV of the test. This function shows a plot with the difference between the PPV of a diagnostic context (very high prevalence; or a common study sample, e.g. ~50% prevalence) versus that of a screening context (lower prevalence). ```{r diagnostic} PPV_diagnostic_vs_screening(max_FP = 10, Sensitivity = 100, prevalence_screening_group = 1000, prevalence_diagnostic_group = 2) ``` --- ## min_possible_prevalence() Imagine you would like to use a test in a population and want to have a 98% PPV. That is, IF a positive result comes out in the test, you would like a 98% certainty that it is a true positive. How high should the prevalence of the disease be in that group? ```r min_possible_prevalence(Sensitivity = 100, FP_test = 0.1, min_PPV_desired = 98) ``` > To reach a PPV of 98 when using a test with 100 % Sensitivity and 0.1 % False Positive Rate, you need a prevalence of at least 1 out of 21 --- Another example, with a very good test, and lower expectations: ```r min_possible_prevalence(Sensitivity = 99.9, FP_test = .1, min_PPV_desired = 70) ``` > To reach a PPV of 70 when using a test with 99.9 % Sensitivity and 0.1 % False Positive Rate, you need a prevalence of at least 1 out of 429 --- ## plot_cutoff() Since v0.4.2 you can also plot the distributions of sick and healthy individuals and learn about how a cutoff point changes the True Positives, False Positives, True Negatives, False Negatives, Sensitivity, Specificity, PPV and NPV. ```{r cutoff} PLOTS = plot_cutoff(prevalence = 0.2, cutoff_point = 33, mean_sick = 35, mean_healthy = 20, sd_sick = 3, sd_healthy = 5 ) PLOTS$final_plot ``` Then, with `remove_layers_cutoff_plot()` you can remove specific layers, to help you understand some of these concepts. ```{r remove-cutoff} # Sensitivity remove_layers_cutoff_plot(PLOTS$final_plot, delete_what = c("FP", "TN")) + ggplot2::labs(subtitle = "Sensitivity = TP/(TP+FN)") # Specificity remove_layers_cutoff_plot(PLOTS$final_plot, delete_what = c("FN", "TP")) + ggplot2::labs(subtitle = "Specificity = TN/(TN+FP)") # PPV remove_layers_cutoff_plot(PLOTS$final_plot, delete_what = c("TN", "FN")) + ggplot2::labs(subtitle = "PPV = TP/(TP+FP)") # NPV remove_layers_cutoff_plot(PLOTS$final_plot, delete_what = c("TP", "FP")) + ggplot2::labs(subtitle = "NPV = TN/(TN+FN)") ```
/scratch/gouwar.j/cran-all/cranData/BayesianReasoning/inst/doc/introduction.Rmd
--- title: "Screening tests and PPV vs NPV" author: "Gorka Navarrete" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Screening tests and PPV vs NPV} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", dpi = 60, fig.height = 10, fig.width = 14 ) ``` ```{r, echo = FALSE, message = FALSE, results = 'hide'} library(BayesianReasoning) library(patchwork) ``` Screening tests are applied to asymptomatic people with the hope to catch a disease in the early stages. Screening tests are by definition applied to populations where the prevalence of the condition is low (most people are healthy). This simple fact has consequences for how much we can trust their + and - results, the Positive Predictive Value (PPV) and Negative Predictive Value (NPV) of the test, respectively. **Please, keep in mind the goal of this vignette is to exemplify how to use the BayesianReasoning package. None of the information contained here should be taken as medical advice.** ## PPV and NPV definitions PPV, formally $P(Disease \mid +)$ is the probability of having a disease *given* a test result is +. $P(Disease \mid +) = \frac{TruePositives}{TruePositives + FalsePositives}$ --- NPV, formally $P(Healthy \mid -)$ is the probability of being healthy *given* a test result is -. $P(Healthy \mid -) = \frac{TrueNegatives}{TrueNegatives + FalseNegatives}$ --- ## Example We will use as an example Mammography at 50 years old as a screening test to detect Breast Cancer. ### PPV The PPV of Mammography at 50 years old in the general population is relatively low. ```{r} PPV_plot = PPV_heatmap( min_Prevalence = 1, max_Prevalence = 80, Sensitivity = 95, limits_Specificity = c(85, 100), overlay = "area", overlay_prevalence_1 = 1, overlay_prevalence_2 = 69, overlay_position_FP = 12.1, label_title = "PPV", label_subtitle = "Screening test" ) ``` ### NPV The NPV of Mammography at 50 years old in the general population is very high. ```{r} NPV_plot = PPV_heatmap( PPV_NPV = "NPV", min_Prevalence = 1, max_Prevalence = 80, Specificity = 87.9, overlay = "area", overlay_prevalence_1 = 1, overlay_prevalence_2 = 69, overlay_position_FN = 5, label_title = "NPV", label_subtitle = "Screening test" ) ``` ### Combined PPV + NPV Combining both PPV and NPV shows how negative results of Mammography at 50 years old in the general population are very trustworthy, but positive results are not. We can plot the PPV and NPV plots side by side using `{patchwork}`: ```{r, fig.height = 14, fig.width = 12} (PPV_plot / NPV_plot) + plot_layout(guides = 'collect') ``` ## Sources **Breast Cancer screening information**: * Nelson, H. D., O’Meara, E. S., Kerlikowske, K., Balch, S., & Miglioretti, D. (2016). Factors associated with rates of false-positive and false-negative results from digital mammography screening: An analysis of registry data. Annals of Internal Medicine, 164(4), 226–235. https://doi.org/10.7326/M15-0971 * https://seer.cancer.gov/archive/csr/1975_2012/browse_csr.php?sectionSEL=4&pageSEL=sect_04_table.24 **Theoretical overview of the technical concepts**: * Akobeng, A.K. (2007) https://doi.org/10.1111/j.1651-2227.2006.00180.x **Practical explanation about the importance of understanding PPV**: * Navarrete et al. (2015) for a https://doi.org/10.3389/fpsyg.2015.01327
/scratch/gouwar.j/cran-all/cranData/BayesianReasoning/vignettes/PPV_NPV.Rmd
--- title: "Introduction to BayesianReasoning" author: "Gorka Navarrete" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Introduction to BayesianReasoning} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console params: package_creation: FALSE --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", dpi = 60, fig.height = 10, fig.width = 14 ) ``` ```{r, echo = FALSE, message = FALSE, results = 'hide'} library(BayesianReasoning) # FROM: https://community.rstudio.com/t/internet-resources-should-fail-gracefully/49199/12 safely_show_image_Rmd <- function(remote_file, name_image) { try_GET <- function(x, ...) { tryCatch( httr::GET(url = x, httr::timeout(1), ...), error = function(e) conditionMessage(e), warning = function(w) conditionMessage(w) ) } is_response <- function(x) { class(x) == "response" } # First check internet connection if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } # Then try for timeout problems resp <- try_GET(remote_file) if (!is_response(resp)) { message(resp) return(invisible(NULL)) } # Then stop if status > 400 if (httr::http_error(resp)) { httr::message_for_status(resp) return(invisible(NULL)) } # Output paste0("![", name_image, "](", remote_file, ")") } ``` # Bayesian reasoning <!-- badges: start --> ```{r echo=FALSE, results='asis'} # If we are creating the README params$package_creation will be TRUE if (params$package_creation) { cat(paste0("[", safely_show_image_Rmd(remote_file = "https://www.r-pkg.org/badges/version/BayesianReasoning", name_image = "CRAN status"), "]", "(https://cran.r-project.org/package=BayesianReasoning)")) cat(paste0("[", safely_show_image_Rmd(remote_file = "https://codecov.io/gh/gorkang/BayesianReasoning/branch/master/graph/badge.svg", name_image = "Codecov test coverage"), "]", "(https://app.codecov.io/gh/gorkang/BayesianReasoning?branch=master)")) cat(paste0("[", safely_show_image_Rmd(remote_file = "http://cranlogs.r-pkg.org/badges/BayesianReasoning", name_image = "downloads"), "]", "(https://cran.r-project.org/package=BayesianReasoning)")) cat(paste0("[", safely_show_image_Rmd(remote_file = "https://img.shields.io/badge/lifecycle-experimental-orange.svg", name_image = "Lifecycle: experimental"), "]", "(https://lifecycle.r-lib.org/articles/stages.html#experimental)")) cat(paste0("[", safely_show_image_Rmd(remote_file = "https://zenodo.org/badge/93097662.svg", name_image = "DOI"), "]", "(https://zenodo.org/badge/latestdoi/93097662)")) } ``` <!-- badges: end --> --- ## Bayesian reasoning in medical contexts This package includes a few functions to plot and help understand Positive and Negative Predictive Values, and their relationship with Sensitivity, Specificity and Prevalence. + The **Positive Predictive** Value of a medical test is the probability that a positive result will mean having the disease. Formally p(Disease|+) + The **Negative Predictive** Value of a medical test is the probability that a negative result will mean **not** having the disease. Formally p(Healthy|-) The BayesianReasoning package has three main functions: + **PPV_heatmap()**: Plot heatmaps with PPV or NPV values for the given test and disease parameters. + **PPV_diagnostic_vs_screening()**: Plots the difference between the PPV of a test in a diagnostic context (very high prevalence; or a common study sample, e.g. ~50% prevalence) versus a screening context (lower prevalence). + **min_possible_prevalence()**: Calculates how high should the prevalence of a disease be to reach a desired PPV given certain test parameters. --- If you want to install the package can use: `remotes::install_github("gorkang/BayesianReasoning")`. Please report any problems you find in the [Issues Github page](https://github.com/gorkang/BayesianReasoning/issues). There is a [shiny app implementation](https://gorkang.shinyapps.io/BayesianReasoning/) with most of the main features available. --- ## PPV_heatmap() Plot heatmaps with PPV or NPV values for a given specificity and a range of Prevalences and FP or FN (1 - Sensitivity). The basic parameters are: * min_Prevalence: Min prevalence in y axis. "min_Prevalence out of y" * max_Prevalence: Max prevalence in y axis. "1 out of max_Prevalence" * Sensitivity: Sensitivity of the test * max_FP: FP is 1 - specificity. The x axis will go from FP = 0% to max_FP * Language: "es" for Spanish or "en" for English ```{r heatmap} PPV_heatmap(min_Prevalence = 1, max_Prevalence = 1000, Sensitivity = 100, limits_Specificity = c(90, 100), Language = "en") ``` --- ### NPV You can also plot an NPV heatmap with PPV_NPV = "NPV". ```{r NPV-heatmap} PPV_heatmap(PPV_NPV = "NPV", min_Prevalence = 800, max_Prevalence = 1000, Specificity = 95, limits_Sensitivity = c(90, 100), Language = "en") ``` --- ### Area overlay You can add different types of overlay to the plots. For example, an area overlay showing the point PPV for a given prevalence and FP or FN: ```{r area} PPV_heatmap(min_Prevalence = 1, max_Prevalence = 1200, Sensitivity = 81, limits_Specificity = c(94, 100), label_subtitle = "Prenatal screening for Down Syndrome by Age", overlay = "area", overlay_labels = "40 y.o.", overlay_position_FP = 4.8, overlay_prevalence_1 = 1, overlay_prevalence_2 = 68) ``` The area plot overlay can show more details about how the calculation of PPV/NPV is performed: ```{r area2} PPV_heatmap(min_Prevalence = 1, max_Prevalence = 1200, Sensitivity = 81, limits_Specificity = c(94, 100), label_subtitle = "Prenatal screening for Down Syndrome by Age", overlay_extra_info = TRUE, overlay = "area", overlay_labels = "40 y.o.", overlay_position_FP = 4.8, overlay_prevalence_1 = 1, overlay_prevalence_2 = 68) ``` ### Line overlay Also, you can add a line overlay highlighting a range of prevalences and FP. This is useful, for example, to show how the PPV of a test changes with age: ```{r line} PPV_heatmap(min_Prevalence = 1, max_Prevalence = 1800, Sensitivity = 90, limits_Specificity = c(84, 100), label_subtitle = "PPV of Mammogram for Breast Cancer by Age", overlay = "line", overlay_labels = c("80 y.o.", "70 y.o.", "60 y.o.", "50 y.o.", "40 y.o.", "30 y.o.", "20 y.o."), overlay_position_FP = c(6.5, 7, 8, 9, 12, 14, 14), overlay_prevalence_1 = c(1, 1, 1, 1, 1, 1, 1), overlay_prevalence_2 = c(22, 26, 29, 44, 69, 227, 1667)) ``` --- Another example. In this case, the FP is constant across age: ```{r line-2} PPV_heatmap(min_Prevalence = 1, max_Prevalence = 2000, Sensitivity = 81, limits_Specificity = c(94, 100), label_subtitle = "Prenatal screening for Down Syndrome by Age", overlay = "line", overlay_labels = c("40 y.o.", "30 y.o.", "20 y.o."), overlay_position_FP = c(4.8, 4.8, 4.8), overlay_prevalence_1 = c(1, 1, 1), overlay_prevalence_2 = c(68, 626, 1068)) ``` --- ## PPV_diagnostic_vs_screening() In scientific studies developing a new test for the early detection of a medical condition, it is quite common to use a sample where 50% of participants has a medical condition and the other 50% are normal controls. This has the unintended effect of maximizing the PPV of the test. This function shows a plot with the difference between the PPV of a diagnostic context (very high prevalence; or a common study sample, e.g. ~50% prevalence) versus that of a screening context (lower prevalence). ```{r diagnostic} PPV_diagnostic_vs_screening(max_FP = 10, Sensitivity = 100, prevalence_screening_group = 1000, prevalence_diagnostic_group = 2) ``` --- ## min_possible_prevalence() Imagine you would like to use a test in a population and want to have a 98% PPV. That is, IF a positive result comes out in the test, you would like a 98% certainty that it is a true positive. How high should the prevalence of the disease be in that group? ```r min_possible_prevalence(Sensitivity = 100, FP_test = 0.1, min_PPV_desired = 98) ``` > To reach a PPV of 98 when using a test with 100 % Sensitivity and 0.1 % False Positive Rate, you need a prevalence of at least 1 out of 21 --- Another example, with a very good test, and lower expectations: ```r min_possible_prevalence(Sensitivity = 99.9, FP_test = .1, min_PPV_desired = 70) ``` > To reach a PPV of 70 when using a test with 99.9 % Sensitivity and 0.1 % False Positive Rate, you need a prevalence of at least 1 out of 429 --- ## plot_cutoff() Since v0.4.2 you can also plot the distributions of sick and healthy individuals and learn about how a cutoff point changes the True Positives, False Positives, True Negatives, False Negatives, Sensitivity, Specificity, PPV and NPV. ```{r cutoff} PLOTS = plot_cutoff(prevalence = 0.2, cutoff_point = 33, mean_sick = 35, mean_healthy = 20, sd_sick = 3, sd_healthy = 5 ) PLOTS$final_plot ``` Then, with `remove_layers_cutoff_plot()` you can remove specific layers, to help you understand some of these concepts. ```{r remove-cutoff} # Sensitivity remove_layers_cutoff_plot(PLOTS$final_plot, delete_what = c("FP", "TN")) + ggplot2::labs(subtitle = "Sensitivity = TP/(TP+FN)") # Specificity remove_layers_cutoff_plot(PLOTS$final_plot, delete_what = c("FN", "TP")) + ggplot2::labs(subtitle = "Specificity = TN/(TN+FP)") # PPV remove_layers_cutoff_plot(PLOTS$final_plot, delete_what = c("TN", "FN")) + ggplot2::labs(subtitle = "PPV = TP/(TP+FP)") # NPV remove_layers_cutoff_plot(PLOTS$final_plot, delete_what = c("TP", "FP")) + ggplot2::labs(subtitle = "NPV = TN/(TN+FN)") ```
/scratch/gouwar.j/cran-all/cranData/BayesianReasoning/vignettes/introduction.Rmd
#' Factory to generate a parallel executor of an existing function #' #' @author Florian Hartig #' @param fun function to be changed to parallel execution #' @param parallel should a parallel R cluster be used or not. If set to T, cores will be detected automatically and n-1 of the available n cores of the machine will be used. Alternatively, you can set the number of cores used by hand #' @param parallelOptions list containing three lists. First "packages" determines the R packages necessary to run the likelihood function. Second "variables" the objects in the global environment needed to run the likelihood function and third "dlls" the DLLs needed to run the likelihood function (see Details). #' @note Can also be used to make functions compatible with library sensitivity #' @details For parallelization, option T means that an automatic parallelization via R is attempted, or "external", in which case it is assumed that the likelihood is already parallelized. In this case it needs to accept a matrix with parameters as columns. #' Further you can specify the packages, objects and DLLs that are exported to the cluster. #' By default a copy of your workspace is exported. However, depending on your workspace this can be very inefficient. #' #' Alternatively you can specify the environments and packages in the likelihood function (e.g. BayesianTools::VSEM() instead of VSEM()). #' @export #' @example /inst/examples/generateParallelExecuter.R generateParallelExecuter <- function(fun, parallel = F, parallelOptions = list(variables = "all", packages = "all", dlls = NULL)){ if (parallel == F){ parallelFun <- function(parMat, ...){ res <- apply(parMat, 1, fun, ...) if(! is.null(dim(res))) res = t(res) # to have results row-wise if multiple results are returned return(res) } cl <- "Cluster not defined for bayesianSetup if parallel = FALSE" }else{ #library(foreach) #library(iterators) # library(parallel) if (parallel == T | parallel == "auto"){ cores <- parallel::detectCores() - 1 } else if (is.numeric(parallel)){ cores <- parallel if (cores > parallel::detectCores()) stop("BayesianTools: error - more cores specified than available on this machine") } else stop("BayesianTools: error wrong argument to parallel") # get variables, packages, dlls in current workspace here if defaults are set in parameters cl <- parallel::makeCluster(cores) # update the parallelOptions based on user settings. defaultParallelOptions <- list(variables = "all", packages = "all", dlls = NULL) parallelOptions <- modifyList(defaultParallelOptions, parallelOptions) # get loaded packages if(is.null(parallelOptions$packages[1])) packages <- parallelOptions$packages else if(parallelOptions$packages[1] == "all") packages <- (.packages()) else packages <- parallelOptions$packages # get loaded DLLs if(is.null(parallelOptions$dlls[1])) dlls <- parallelOptions$dlls else if(parallelOptions$dlls[1] == "all"){ tmpdlls <- getLoadedDLLs() dlls <- vector(mode = "character", length = length(tmpdlls)) counter <- 0 for(i in tmpdlls){ counter <- counter+1 dlls[counter] <- i[[2]] } }else dlls <- unlist(parallelOptions$dlls) # get objects in global environment if(is.null(parallelOptions$variables[1])) objects = NULL else if(parallelOptions$variables[1] == "all") objects <- ls(envir = .GlobalEnv) else objects <- unlist(parallelOptions$variables) # function to export packages and dlls packageFun <- function(packages = NULL, dlls = NULL) { if(!is.null(packages)){ for(i in packages) library(i, character.only = TRUE) } if(!is.null(dlls)){ for(i in dlls) try(dyn.load(i), silent = T) } } # export packages, dlls and objects to cluster parallel::clusterCall(cl, packageFun, packages, dlls) parallel::clusterExport(cl, varlist = objects) #doParallel::registerDoParallel(cl) parallelFun <- function(parMat, ...){ res = parallel::parApply(cl = cl, parMat, 1, fun, ...) if(! is.null(dim(res))) res = t(res) # to have results row-wise if multiple results are returned return(res) } # parallelFun <- function(parMat){ # res <- foreach::foreach(parMat=iter(parMat, by='row'), .combine = "rbind", .packages = "BayesianTools")%dopar%{ # fun # } # if(! is.null(dim(res))) res = t(res) # to have results row-wise if multiple results are returned # return(res) # } message("parallel function execution created with", cores, "cores.") } return(list(parallelFun = parallelFun, cl = cl)) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/BayesianSetupGenerateParallel.R
#' @title BayesianTools #' @name BayesianTools #' @docType package #' @useDynLib BayesianTools, .registration = TRUE #' @description A package with general-purpose MCMC and SMC samplers, as well as plots and diagnostic functions for Bayesian statistics #' @details A package with general-purpose MCMC and SMC samplers, as well as plots and diagnostic functions for Bayesian statistics, particularly for process-based models. #' #' The package contains 2 central functions, \code{\link{createBayesianSetup}}, which creates a standardized Bayesian setup with likelihood and priors, and \code{\link{runMCMC}}, which allows to run various MCMC and SMC samplers. #' #' The package can of course also be used for general (non-Bayesian) target functions. #' #' To use the package, a first step is to use \code{\link{createBayesianSetup}} to create a BayesianSetup, which usually contains prior and likelihood densities, or in general a target function. #' #' Those can be sampled with \code{\link{runMCMC}}, which can call a number of general purpose Metropolis sampler, including the \code{\link{Metropolis}} that allows to specify various popular Metropolis variants such as adaptive and/or delayed rejection Metropolis; two variants of differential evolution MCMC \code{\link{DE}}, \code{\link{DEzs}}, two variants of DREAM \code{\link{DREAM}} and \code{\link{DREAMzs}}, the \code{\link{Twalk}} MCMC, and a Sequential Monte Carlo sampler \code{\link{smcSampler}}. #' #' The output of runMCMC is of class mcmcSampler / smcSampler if one run is performed, or mcmcSamplerList / smcSamplerList if several sampler are run. Various functions are available for plotting, model comparison (DIC, marginal likelihood), or to use the output as a new prior. #' #' For details on how to use the packgage, run vignette("BayesianTools", package="BayesianTools"). #' #' To get the suggested citation, run citation("BayesianTools") #' #' To report bugs or ask for help, post a \href{https://stackoverflow.com/questions/5963269/how-to-make-a-great-r-reproducible-example}{reproducible example} via the BayesianTools \href{https://github.com/florianhartig/BayesianTools/issues}{issue tracker} on GitHub. #' #'Acknowledgements: The creation and maintenance of this package profited from funding and collaboration through Cost Action FP 1304 PROFOUND, DFG DO 786/12-1 CONECT, EU FP7 ERA-NET Sumforest REFORCE and Bayklif Project BLIZ. NULL
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/BayesianTools.R
# Modified from https://gist.github.com/gaberoo/4619102 ############################################################################## # Estimate Deviance Information Criterion (DIC) # # References: # Bayesian Data Analysis. # Gelman, A., Carlin, J., Stern, H., and Rubin D. # Second Edition, 2003 # # Bayesian predictive information criterion for the evaluation of # hierarchical Bayesian and empirical Bayes models. # Ando, T. # Biometrika, 2007 # # Input: # x : matrix of posterior samples # lik : vector of the likelihood of the posterior samples # lik.fun : function that calculates the likelihood # ... : other parameters that are passed to 'lik.fun' # # Output: # list() # DIC : Deviance Information Criterion # IC : Bayesian Predictive Information Criterion # pD : Effective number of parameters (pD = Dbar - Dhat) # pV : Effective number of parameters (pV = var(D)/2) # Dbar : Expected value of the deviance over the posterior # Dhat : Deviance at the mean posterior estimate ############################################################################## #' Deviance information criterion #' @author Florian Hartig #' @param sampler An object of class bayesianOutput (mcmcSampler, smcSampler, or mcmcList) #' @param ... further arguments passed to \code{\link{getSample}} #' @references Spiegelhalter, D. J.; Best, N. G.; Carlin, B. P. & van der Linde, A. (2002) Bayesian measures of model complexity and fit. J. Roy. Stat. Soc. B, 64, 583-639.\cr\cr #' Gelman, A.; Hwang, J. & Vehtari, A. (2014) Understanding predictive information criteria for Bayesian models. Statistics and Computing, Springer US, 24, 997-1016-. #' @details Output: #' list with the following elements: \cr #' DIC : Deviance Information Criterion \cr #' IC : Bayesian Predictive Information Criterion \cr #' pD : Effective number of parameters (pD = Dbar - Dhat) \cr #' pV : Effective number of parameters (pV = var(D)/2) \cr #' Dbar : Expected value of the deviance over the posterior \cr #' Dhat : Deviance at the mean posterior estimate \cr #' @seealso \code{\link{WAIC}}, \code{\link{MAP}}, \code{\link{marginalLikelihood}} #' @export DIC <- function(sampler, ...){ draw = getSample(sampler, parametersOnly = F, ...) if(class(sampler)[1] %in% c("mcmcSamplerList", "smcSamplerList")) sampler = sampler[[1]] x = draw[,1:sampler$setup$numPars] lik = draw[,sampler$setup$numPars+2] lik.fun = sampler$setup$likelihood$density D.bar <- -2*mean(lik) if(is.vector(x)) theta.bar = mean(x) else theta.bar <- apply(x,2,mean) D.hat <- -2*lik.fun(theta.bar) pD <- D.bar - D.hat pV <- var(-2*lik)/2 list(DIC=pD+D.bar,IC=2*pD+D.bar,pD=pD,pV=pV,Dbar=D.bar,Dhat=D.hat) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/DIC.R
#' Standard GOF metrics #' Startvalues for sampling with nrChains > 1 : if you want to provide different start values for the different chains, provide a list #' @author Florian Hartig #' @param predicted predicted values #' @param observed observed values #' @param plot should a plot be created #' @param centered if T, variables are centered to the mean of the observations, i.e. the intercept is for the mean value of the observation #' #' @details The function considers observed ~ predicted and calculates #' #' 1) rmse = root mean squared error #' 2) mae = mean absolute errorr #' 3) a linear regression with slope, intercept and coefficient of determination R2 #' #' For the linear regression, centered = T means that variables will be centered around the mean value of the observation. This setting avoids a correlation between slope and intercept (that the intercept is != 0 as soon as the slope is !=0) #' #' @note In principle, it is possible to plot observed ~ predicted and predicted ~ observed. However, if we assume that the error is mainly on the y axis (observations), i.e. that observations scatter around the true (ideal) value, we should plot observed ~ predicted. See Pineiro et al. (2008). How to evaluate models: observed vs. predicted or predicted vs. observed?. Ecological Modelling, 216(3-4), 316-322. #' @return A list with the following entries: rmse = root mean squared error, mae = mean absolute error, slope = slope of regression, offset = intercept of regression, R2 = R2 of regression #' @example /inst/examples/GOF.R #' @export GOF<- function(observed, predicted, plot = F, centered = T){ # root mean squared error rmse <- sqrt( mean( (predicted - observed)^2, na.rm = T) ) # mean absolute error mae <- mean( abs(predicted - observed), na.rm = TRUE) #ssq <- sum( (predicted - observed)^2, na.rm= T) # linear regression if(centered == T){ meanObs = mean(observed, na.rm = T) observed = observed - meanObs predicted = predicted - meanObs } linReg = lm( observed ~ predicted) if(plot == T){ plot(observed ~ predicted) abline(linReg, col = "red") abline(v = 0, lty = 2) abline(h = 0 , lty = 2) #loessMod <- mgcv::gam(predicted ~ s(observed)) #ord = order(predicted) #lines(predicted[ord], predict(loessMod)[ord], col = "green") } out = list(rmse = rmse, mae = mae, slope = as.numeric(coefficients(linReg)[2]), offset = as.numeric(coefficients(linReg)[1]), R2 = summary(linReg)$r.squared) return(out) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/GOF.R
#' calculates the Maxiumum APosteriori value (MAP) #' @author Florian Hartig #' @param bayesianOutput an object of class BayesianOutput (mcmcSampler, smcSampler, or mcmcList) #' @param ... optional values to be passed on the the getSample function #' @details Currently, this function simply returns the parameter combination with the highest posterior in the chain. A more refined option would be to take the MCMC sample and do additional calculations, e.g. use an optimizer, a kerne delnsity estimator, or some other tool to search / interpolate around the best value in the chain #' @seealso \code{\link{WAIC}}, \code{\link{DIC}}, \code{\link{marginalLikelihood}} #' @export MAP <- function(bayesianOutput, ...){ samples = getSample(bayesianOutput, parametersOnly = F, ...) if("mcmcSamplerList" %in% class(bayesianOutput)) nPars <- bayesianOutput[[1]]$setup$numPars else nPars = bayesianOutput$setup$numPars best = which.max(samples[,nPars + 1]) return(list(parametersMAP = samples[best, 1:nPars], valuesMAP = samples[best, (nPars + 1):(nPars + 3)] )) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/MAP.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' C version of the VSEM model #' @param par parameter vector #' @param PAR Photosynthetically active radiation (PAR) MJ /m2 /day #' @export vsemC <- function(par, PAR) { .Call(`_BayesianTools_vsemC`, par, PAR) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/RcppExports.R
#' Simulation-based calibration tests #' #' This function performs simulation-based calibration tests based on the idea that posteriors averaged over the prior should yield the prior. #' #' @param posteriorList a list with posterior samples. List items must be of a class that is supported by \code{\link{getSample}}. This includes BayesianTools objects, but also matrix and data.frame #' @param priorDraws a matrix with parameter values, drawn from the prior, that were used to simulate the data underlying the posteriorList. If colnames are provided, these will be used in the plots #' @param ... arguments to be passed to \code{\link{getSample}}. Consider in particular the thinning option. #' #' @details The purpose of this function is to evaluate the results of a simulation-based calibration of an MCMC analysis. #' #' Briefly, the idea is to repeatedly #' #' 1. sample parameters from the prior, #' 2. simulate new data based on these parameters, #' 3. calculate the posterior for these data #' #' If the sampler and the likelihood are implemented correctly, the average over all the posterior distribution should then again yield the prior (e.g. Cook et al., 2006). #' #' To test if this is the case, we implement the methods suggested by Talts et al., which is to calculate the rank statistics between the parameter draws and the posterior draws, which we then formally evaluate with a qq unif plot, and a ks.test #' #' I speculate that a ks.test between the two distribution would likely give an identical result, but this is not noted in Talts et al. #' #' Cook, S. R., Gelman, A. and Rubin, D. B. (2006). Validation of Software for Bayesian Models Using Posterior Quantiles. J. Comput. Graph. Stat. 15 675-692. #' #' Talts, Sean, Michael Betancourt, Daniel Simpson, Aki Vehtari, and Andrew Gelman. "Validating Bayesian Inference Algorithms with Simulation-Based Calibration." arXiv preprint arXiv:1804.06788 (2018). #' #' @note This function was implemented for the tests in Maliet, Odile, Florian Hartig, and Hélène Morlon. "A model with many small shifts for estimating species-specific diversification rates." Nature ecology & evolution 3.7 (2019): 1086-1092. The code linked with this paper provides a further example of its use. #' #' @author Florian Hartig #' #' @export #' calibrationTest <- function(posteriorList, priorDraws, ...){ x = mergeChains(posteriorList, ...) nPar <- ncol(x) oldPar <- par(mfrow = getPanels(nPar*3)) res = numeric(nPar) names(res) = colnames(priorDraws) for(i in 1:nPar){ lim = range(x[,i], priorDraws[,i]) hist(x[,i], breaks = 50, freq = F, col = "#99000020", main = colnames(priorDraws)[i]) hist(priorDraws[,i], breaks = 50, freq = F, col = "#00990020", add = T) cDens = ecdf(x[,i]) rankDist <- cDens(priorDraws[,i]) hist(rankDist, breaks = 50, freq = F) abline(h = 1, col = "red") gap::qqunif(rankDist,pch=2,bty="n", logscale = F, col = "black", cex = 0.6, main = colnames(priorDraws)[i], cex.main = 1) res[i] = ks.test(x[,i], priorDraws[,i])$p.value legend("topleft", c(paste("KS test: p=", round(res[i], digits = 5)), paste("Deviation ", ifelse(res[i] < 0.05, "significant", "n.s."))), text.col = ifelse(res[i] < 0.05, "red", "black" ), bty="n") } par(oldPar) out = list() out$statistic = NULL out$method = "ks.test on rank statistics posterior / parameters" out$alternative = "two.sided" out$p.value = res class(out) = "htest" return(out) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/SBC.R
#' SMC sampler #' @author Florian Hartig #' @description Sequential Monte Carlo Sampler #' @param bayesianSetup either an object of class bayesianSetup created by \code{\link{createBayesianSetup}} (recommended), or a log target function #' @param initialParticles initial particles - either a draw from the prior, provided as a matrix with the single parameters as columns and each row being one particle (parameter vector), or a numeric value with the number of desired particles. In this case, the sampling option must be provided in the prior of the BayesianSetup. #' @param iterations number of iterations #' @param resampling if new particles should be created at each iteration #' @param resamplingSteps how many resampling (MCMC) steps between the iterations #' @param proposal optional proposal class #' @param adaptive should the covariance of the proposal be adapted during sampling #' @param proposalScale scaling factor for the proposal generation. Can be adapted if there is too much / too little rejection #' @details The sampler can be used for rejection sampling as well as for sequential Monte Carlo. For the former case set the iterations to one. #' #' @note The SMC currently assumes that the initial particle is sampled from the prior. If a better initial estimate of the posterior distribution is available, this the sampler should be modified to include this. Currently, however, this is not included in the code, so the appropriate adjustments have to be done by hand. #' @export #' @example /inst/examples/SMCHelp.R smcSampler <- function(bayesianSetup, initialParticles = 1000, iterations = 10, resampling = T, resamplingSteps = 2, proposal = NULL, adaptive = T, proposalScale = 0.5){ if(resamplingSteps < 1) stop("SMC error, resamplingSteps can't be < 1") setup <- checkBayesianSetup(bayesianSetup) info = list() info$resamplingAcceptance = matrix(nrow = iterations, ncol = resamplingSteps) info$survivingParticles = rep(NA, iterations) if(inherits(initialParticles, "numeric")){ initialParticles = bayesianSetup$prior$sampler(initialParticles) } if (any(is.infinite(setup$prior$density(initialParticles)))) stop("initialParticles outside prior range") particles <- initialParticles rejectionRate = 0 particleSize = nrow(initialParticles) acceptanceTarget = round(particleSize / 2) posterior = matrix(nrow = particleSize, ncol = 3) numPar <- ncol(initialParticles) if (is.null(proposal)) proposalGenerator = createProposalGenerator(rep(40,numPar)) usedUp = 0 for (i in 1:iterations){ posterior = setup$posterior$density(particles, returnAll = T) likelihoodValues <- posterior[,2] # idea - adjust (1/iterations) such that always approx 30% of particles are maintain #level = sort(likelihoodValues)[acceptanceTarget] #best = likelihoodValues ll = likelihoodValues - max(likelihoodValues, na.rm = T) llCutoff = sort(ll)[acceptanceTarget] relativeL = exp(likelihoodValues - max(likelihoodValues, na.rm = T))^(1/iterations) sel = sample.int(n=length(likelihoodValues), size = length(likelihoodValues), replace = T, prob = relativeL) info$survivingParticles[i] = length(unique(sel)) particles = particles[sel,] if (numPar == 1) particles = matrix(particles, ncol = 1) if (resampling == T){ if (adaptive == T){ proposalGenerator = updateProposalGenerator(proposalGenerator, particles) } for (j in 1:resamplingSteps){ particlesProposals = proposalGenerator$returnProposalMatrix(particles, scale = proposalScale) jumpProb <- exp(setup$posterior$density(particlesProposals) - likelihoodValues[sel])^(i/iterations) * exp(setup$prior$density(particlesProposals) - setup$prior$density(particles)) accepted <- jumpProb > runif(length(jumpProb), 0 ,1) rejectionRate = rejectionRate + sum(accepted) particles[accepted, ] = particlesProposals[accepted, ] } } } info$rejectionRate = rejectionRate / (iterations * resamplingSteps) out = list( setup = setup, initialParticles = initialParticles, particles = particles, posteriorValues = posterior, proposalGenerator = proposalGenerator, info = info ) class(out) <- c("smcSampler", "bayesianOutput") return(out) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/SMC.R
#' Very simple ecosystem model #' @description A very simple ecosystem model, based on three carbon pools and a basic LUE model #' @param pars a parameter vector with parameters and initial states #' @param PAR Forcing, photosynthetically active radiation (PAR) MJ /m2 /day #' @param C switch to choose whether to use the C or R version of the model. C is much faster. #' @return a matrix with colums NEE, CV, CR and CS units and explanations see details #' @import Rcpp #' @useDynLib BayesianTools, .registration = TRUE #' @details This Very Simple Ecosystem Model (VSEM) is a 'toy' model designed to be very simple but yet bear some resemblance to deterministic processed based ecosystem models (PBMs) that are commonly used in forest modelling. #' #' The model determines the accumulation of carbon in the plant and soil from the growth of the plant via photosynthesis and senescence to the soil which respires carbon back to the atmosphere. #' #' The model calculates Gross Primary Productivity (GPP) using a very simple light-use efficiency (LUE) formulation multiplied by light interception. Light interception is calculated via Beer's law with a constant light extinction coefficient operating on Leaf Area Index (LAI). #' #' A parameter (GAMMA) determines the fraction of GPP that is autotrophic respiration. The Net Primary Productivity (NPP) is then allocated to above and below-ground vegetation via a fixed allocation fraction. Carbon is lost from the plant pools to a single soil pool via fixed turnover rates. Heterotropic respiration in the soil is determined via a soil turnover rate. #' #' The model equations are #' #' -- Photosynthesis #' #' \deqn{LAI = LAR*Cv} #' \deqn{GPP = PAR * LUE * (1 - \exp^{(-KEXT * LAI)})} #' \deqn{NPP = (1-GAMMA) * GPP} #' #' -- State equations #' \deqn{dCv/dt = Av * NPP - Cv/tauV} #' \deqn{dCr/dt = (1.0-Av) * NPP - Cr/tauR} #' \deqn{dCs/dt = Cr/tauR + Cv/tauV - Cs/tauS} #' #' The model time-step is daily. #' #' -- VSEM inputs: #' #' PAR Photosynthetically active radiation (PAR) MJ /m2 /day #' #' -- VSEM parameters: #' #' KEXT Light extinction coefficient m2 ground area / m2 leaf area #' #' LAR Leaf area ratio m2 leaf area / kg aboveground vegetation #' #' LUE Light-Use Efficiency (kg C MJ-1 PAR) #' #' GAMMA Autotrophic respiration as a fraction of GPP #' #' tauV Longevity of aboveground vegetation days #' #' tauR Longevity of belowground vegetation days #' #' tauS Residence time of soil organic matter d #' #' -- VSEM states: #' #' Cv Above-ground vegetation pool kg C / m2 #' #' Cr Below-ground vegetation pool kg C / m2 #' #' Cs Carbon in organic matter kg C / m2 #' #' -- VSEM fluxes: #' #' G Gross Primary Productivity kg C /m2 /day #' #' NPP Net Primary Productivity kg C /m2 /day #' #' NEE Net Ecosystem Exchange kg C /m2 /day #' @seealso \code{\link{VSEMgetDefaults}}, \code{\link{VSEMcreatePAR}}, , \code{\link{VSEMcreateLikelihood}} #' @example /inst/examples/VSEMHelp.R #' @export #' @author David Cameron, R and C implementation by Florian Hartig VSEM <- function(pars = c(KEXT = 0.5, LAR = 1.5, LUE = 0.002, GAMMA = 0.4, tauV = 1440, tauS = 27370, tauR = 1440, Av = 0.5, Cv = 3, Cs = 15, Cr = 3), PAR, C = TRUE){ if (C == T){ out <- vsemC(pars, PAR) colnames(out) = c("NEE", "Cv", "Cs", "CR") return(out) } else { numObs = length(PAR) KEXT = pars[1] LAR = pars[2] LUE = pars[3] GAMMA = pars[4] tauV = pars[5] tauS = pars[6] tauR = pars[7] Av = pars[8] Cv = pars[9] Cs = pars[10] Cr = pars[11] out = matrix(nrow = numObs, ncol = 4 ) colnames(out) = c("NEE", "Cv", "Cs", "CR") for (i in 1:numObs){ G = PAR[i] * LUE * (1 - exp(-KEXT*LAR*Cv)) NPP = (1-GAMMA)*G Cv = Cv + Av*NPP - Cv/tauV Cr = Cr + (1.0-Av)*NPP - Cr/tauR Cs = Cs + Cr/tauR + Cv/tauV - Cs/tauS NEE = (Cs/tauS + GAMMA*G) - G out[i, ] = c(NEE, Cv, Cs, Cr) } return(out) } } #' returns the default values for the VSEM #' @export #' @return a data.frame VSEMgetDefaults <- function(){ best = list( KEXT = 0.5, LAR = 1.5, LUE = 0.002, GAMMA = 0.4, tauV = 1440, tauS = 27370, tauR = 1440, Av = 0.5, Cv = 3.0, Cs = 15, Cr = 3.0 ) def = data.frame(best = unlist(best)) def$lower = c(0.2,0.2,0.0005, 0.2, 500,4000,500, 0.2, 0,0,0) def$upper= c(1,3,0.004, 0.6, 3000,50000,3000, 1, 400,1000,200) return(def) } #' Allows to mix a given parameter vector with a default parameter vector #' @param pars vector with new parameter values #' @param defaults vector with defaukt parameter values #' @param locations indices of the new parameter values #' @rdname package-deprecated #' @description This function is deprecated and will be removed by v0.2. #' @export createMixWithDefaults <- function(pars, defaults, locations){ .Deprecated(package = "BayesianTools") out = defaults out[locations] = pars return(out) } #' Create a random radiation (PAR) time series #' @author David Cameron, R implementation by Florian Hartig #' @param days days to calculate the PAR for #' @export VSEMcreatePAR <- function(days = 1:(3*365)){ PAR = (abs (sin(days/365 * pi)+ rnorm(length(days)) *0.25)) *10 return(PAR) } #' Create an example dataset, and from that a likelihood or posterior for the VSEM model #' @author Florian Hartig #' @param likelihoodOnly switch to devide whether to create only a likelihood, or a full bayesianSetup with uniform priors. #' @param plot switch to decide whether data should be plotted #' @param selection vector containing the indices of the selected parameters #' @details The purpose of this function is to be able to conveniently create a likelihood for the VSEM model for demonstration purposes. The function creates example data --> likelihood --> BayesianSetup, where the latter is the #' @export VSEMcreateLikelihood <- function(likelihoodOnly = F, plot = F, selection = c(1:6, 12)){ # create radiation input PAR <- VSEMcreatePAR(1:1000) plotTimeSeries(observed = PAR) # create reference parameters and add one row for the SD of the observed data refPars <- VSEMgetDefaults() refPars[12,] <- c(0.1, 0.001, 0.5) rownames(refPars)[12] <- "error-sd" # create reference data referenceData <- VSEM(refPars$best[1:11], PAR) obs <- referenceData + rnorm(length(referenceData), sd = (abs(referenceData) + 1E-7) * refPars$best[12]) # plot if that switch is on if(plot == T){ oldpar <- par(mfrow = c(2,2)) for (i in 1:4) plotTimeSeries(observed = obs[,i], predicted = referenceData[,i], main = colnames(referenceData)[i]) par(oldpar) } # Create likelihood for reference data likelihood <- function(x){ mix = refPars$best mix[selection] = x predicted <- VSEM(mix[1:11], PAR) predicted[,1] <- predicted[,1] diff <- c(predicted[,1:3] - obs[,1:3]) llValues <- dnorm(diff, sd = (abs(c(predicted[,1:3])) + 0.0000001) * mix[12], log = T) if (sum == FALSE) return(llValues) else return(sum(llValues)) } if(likelihoodOnly == T) return(likelihood) else{ bayesianSetup <- createBayesianSetup(likelihood, lower = refPars$lower[selection], upper = refPars$upper[selection] , best = refPars$best[selection], names = rownames(refPars)[selection]) return(bayesianSetup) } }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/VSEM.R
# TODO - implement WAIC as AIC, can look at https://github.com/jrnold/mcmcStats/blob/master/R/waic.R, check against http://finzi.psych.upenn.edu/library/blmeco/html/WAIC.html, https://cran.r-project.org/web/packages/loo/index.html, http://stats.stackexchange.com/questions/173128/watanabe-akaike-widely-applicable-information-criterion-waic-using-pymc #' calculates the WAIC #' @author Florian Hartig #' @param bayesianOutput an object of class BayesianOutput. Must implement a log-likelihood density function that can return point-wise log-likelihood values ("sum" argument). #' @param numSamples the number of samples to calculate the WAIC #' @param ... optional values to be passed on the the getSample function #' @note The function requires that the likelihood passed on to BayesianSetup contains the option sum = T/F, with defaul F. If set to true, the likelihood for each data point must be returned. #' @details #' #' #' The WAIC is constructed as #' \deqn{WAIC = -2 * (lppd - p_{WAIC})} #' #' The lppd (log pointwise predictive density), defined in Gelman et al., 2013, eq. 4 as #' #' \deqn{lppd = \sum_{i=1}^n \log \left(\frac{1}{S} \sum_{s=1}^S p(y_i | \theta^s)\right)} #' #' #' The value of \eqn{p_WAIC} can be calculated in two ways, the method used is determined by the #' \code{method} argument. #' #' Method 1 is defined as, #' \deqn{p_{WAIC1} = 2 \sum_{i=1}^{n} (\log (\frac{1}{S} \sum_{s=1}^{S} p(y_i \ \theta^s)) - \frac{1}{S} \sum_{s = 1}^{S} \log p(y_i | \theta^s))} #' Method 2 is defined as, #' \deqn{p_{WAIC2} = 2 \sum_{i=1}^{n} V_{s=1}^{S} (\log p(y_i | \theta^s))} #' where \eqn{V_{s=1}^{S}} is the sample variance. #' #' @references #' #' Gelman, Andrew and Jessica Hwang and Aki Vehtari (2013), "Understanding Predictive Information Criteria for Bayesian Models," \url{http://www.stat.columbia.edu/~gelman/research/unpublished/waic_understand_final.pdf}. #' #' Watanabe, S. (2010). "Asymptotic Equivalence of Bayes Cross Validation and Widely Applicable Information Criterion in Singular Learning Theory", Journal of Machine Learning Research, \url{https://www.jmlr.org/papers/v11/watanabe10a.html}. #' @example /inst/examples/WAICHelp.R #' @seealso \code{\link{DIC}}, \code{\link{MAP}}, \code{\link{marginalLikelihood}} #' @export WAIC <- function(bayesianOutput, numSamples = 1000, ...){ x = getSample(bayesianOutput, parametersOnly = F, ...) # Catch nPars and ll density for mcmcList if("mcmcSamplerList" %in% class(bayesianOutput)){ if (bayesianOutput[[1]]$setup$pwLikelihood == FALSE) stop("WAIC can only be applied if the likelihood density can be returned point-wise ('sum' argument, see examples).") nPars = bayesianOutput[[1]]$setup$numPars llDensity <- bayesianOutput[[1]]$setup$likelihood$density }else{ if (bayesianOutput$setup$pwLikelihood == FALSE) stop("WAIC can only be applied if the likelihood density can be returned point-wise ('sum' argument, see examples).") nPars = bayesianOutput$setup$numPars llDensity <- bayesianOutput$setup$likelihood$density } # x = getSample(bayesianOutput, parametersOnly = F) i <- sample.int(nrow(x),numSamples,replace=TRUE) # should replace really be true? # Calculating log pointwise posterior predictive density, see Gelman et al., 2013, eq. 4 # This is simply the mean likelihood of the posterior samples pointWiseLikelihood = t(llDensity(x[i,1:nPars], sum = F)) lppd <- sum(apply(pointWiseLikelihood, 2, logSumExp, mean = T)) # Calculating pWAIC options pWAIC1 <- 2 * sum(apply(pointWiseLikelihood, 2, function(y) logSumExp(y, mean = T) - mean(y)) ) pWAIC2 <- sum(apply(pointWiseLikelihood, 2, var)) out = list ( WAIC1 = -2 * (lppd - pWAIC1), WAIC2 = -2 * (lppd - pWAIC2), lppd = lppd, pWAIC1 = pWAIC1, pWAIC2 = pWAIC2) return(out) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/WAIC.R
# runBenchmark <- function(bayesianSetup, starvalues, particles){ # # # # MCMC # testSampler <- mcmcSampler(bayesianSetup = bayesianSetup, startvalue = c(1,1,0), optimize = T) # testSampler2 <- getSamples(testSampler, 10000, DRlevels =2) # plot(testSampler2) # # # # # smcSampler # # # # # # }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/benchmark.R
#' Determine the groups of correlated parameters #' @author Stefan Paul #' @param chain MCMC chain including only the parameters (not logP,ll, logP) #' @param blockSettings list with settings #' @return groups #' @keywords internal updateGroups <- function(chain,blockSettings){ settings <- getBlockSettings(blockSettings) blockUpdateType <- settings$blockUpdateType switch(blockUpdateType, "correlation" = { ## (Pair wise) Correlation in the parameters cormat <- abs(cor(chain[,1:(ncol(chain)-3),sample(1:dim(chain)[3],1)])) diag(cormat) <- 0 # Correct for NA and Inf values as this could cause error in as.dist() cormat[c(which(is.na(cormat)),which(cormat == Inf),which(cormat == -Inf)) ] <- 0 tree <- hclust(as.dist(1-cormat)) # get tree based on distance(dissimilarity = 1-cor). cT <- cutree(tree, k = settings$k, h = settings$h) # get groups. With h we can manipulate the strength of the interaction. }, "user" = { cT <- settings$groups }, "random" = { pool <- c(1:settings$k, sample(1:settings$k, (ncol(chain)-3-settings$k))) cT <- sample(pool) } ) pSel <- settings$pSel if(is.null(pSel) && is.null(settings$pGroup)) pSel = rep(1,ncol(chain)-3) return(list(cT = cT, pGroup = settings$pGroup, pSel = pSel)) } #' Determine the parameters in the block update #' @param blockSettings settings for block update #' @return vector containing the parameter to be updated #' @keywords internal getBlock <- function(blockSettings){ groups <- blockSettings$cT pGroup <- blockSettings$pGroup pSel <- blockSettings$pSel nGroups = max(groups) if(nGroups == 1) return(1:length(groups)) if (is.null(pGroup)) pGroup = rep(1,nGroups) if(length(pSel) > nGroups) pSel <- pSel[1:nGroups] pSel = c(pSel, rep(0,nGroups - length(pSel))) groupsToSample = sample.int(nGroups, 1, prob = pSel) selectedGroups = sample.int(nGroups,groupsToSample, prob = pGroup[1:nGroups]) GroupMember <- which(is.element(groups,selectedGroups)) return(GroupMember) } #' getblockSettings #' @description Transforms the original settings in settings used in the model runs #' @param blockUpdate input settings #' @return list with block settings #' @keywords internal getBlockSettings <- function(blockUpdate){ h <- k <- pSel <- pGroup <- groups <- NULL blockUpdateType <- blockUpdate[[1]] switch(blockUpdateType, "correlation" = { h <- blockUpdate$h k <- blockUpdate$k pSel <- blockUpdate$pSel pGroup <- blockUpdate$pGroup }, "random"={ k <- blockUpdate$k }, "user"= { groups <- blockUpdate$groups pSel <- blockUpdate$pSel pGroup <- blockUpdate$pGroup }) return(list(blockUpdateType = blockUpdateType, h = h, k = k, pSel = pSel, pGroup = pGroup, groups = groups)) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/blockUpdate.R
# NOTE: The functions in this class are just templates that are to be implemented for all subclasses of BayesianOutput. They are not functional. #' Extracts the sample from a bayesianOutput #' @author Florian Hartig #' @param sampler an object of class mcmcSampler, mcmcSamplerList, smcSampler, smcSamplerList, mcmc, mcmc.list, double, numeric #' @param parametersOnly for a BT output, if F, likelihood, posterior and prior values are also provided in the output #' @param coda works only for mcmc classes - provides output as a coda object. Note: if mcmcSamplerList contains mcmc samplers such as DE that have several chains, the internal chains will be collapsed. This may not be the desired behavior for all applications. #' @param start for mcmc samplers start value in the chain. For SMC samplers, start particle #' @param end for mcmc samplers end value in the chain. For SMC samplers, end particle #' @param thin thinning parameter. Either an integer determining the thinning intervall (default is 1) or "auto" for automatic thinning. #' @param numSamples sample size (only used if thin = 1). If you want to use numSamples set thin to 1. #' @param whichParameters possibility to select parameters by index #' @param reportDiagnostics logical, determines whether settings should be included in the output #' @param ... further arguments #' @example /inst/examples/getSampleHelp.R #' @details If thin is greater than the total number of samples in the sampler object the first and the last element (of each chain if a sampler with multiples chains is used) are sampled. If numSamples is greater than the total number of samples all samples are selected. In both cases a warning is displayed. #' @details If thin and numSamples is passed, the function will use the thin argument if it is valid and greater than 1, else numSamples will be used. #' @export getSample <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = 1, numSamples = NULL, whichParameters = NULL, reportDiagnostics = FALSE, ...) UseMethod("getSample") #' @rdname getSample #' @author Florian Hartig #' @export getSample.matrix <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, reportDiagnostics = F, ...){ if(is.null(end)) end = nrow(sampler) out = sampler[start:end,, drop=F] ######################## # THINNING nTotalSamples <- nrow(out) thin <- correctThin(nTotalSamples, thin = thin) if (thin == 1 && !is.null(numSamples)) { out <- sampleEquallySpaced(out, numSamples) } else { sel = seq(1, nTotalSamples, by = thin) out = out[sel,, drop=F] } if (!is.null(whichParameters)) out = out[,whichParameters, drop = FALSE] if(coda == T) out = makeObjectClassCodaMCMC(out, start = start, end = end, thin = thin) if(reportDiagnostics == T){ return(list(chain = out, start = start, end = end, thin = thin)) } else return(out) } #' @rdname getSample #' @author Tankred Ott #' @export # TODO: This is right now only a helper function for getSample.mcmc. It is needed to return a vector istead of a matrix, if # the mcmc object passed to getSample.mcmc contains a vector. getSample.double <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, reportDiagnostics = F, ...){ if(is.null(end)) end = length(sampler) out <- sampler[start:end] nTotalSamples <- length(out) thin = correctThin(nTotalSamples, thin) if (thin == 1 && !is.null(numSamples)) { out <- sampleEquallySpaced(out, numSamples) } else { sel = seq(1, nTotalSamples, by = thin) out = out[sel] } return(out) } #' @rdname getSample #' @author Tankred Ott #' @export # TODO: This is right now only a helper function for getSample.mcmc. It is needed to return a vector instead of a matrix, if # the mcmc object passed to getSample.mcmc contains a vector. getSample.integer <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, reportDiagnostics = F, ...){ if(is.null(end)) end = length(sampler) out <- sampler[start:end] nTotalSamples <- length(out) thin = correctThin(nTotalSamples, thin) if (thin == 1 && !is.null(numSamples)) { out <- sampleEquallySpaced(out, numSamples) } else { sel = seq(1, nTotalSamples, by = thin) out = out[sel] } return(out) } #' @rdname getSample #' @author Tankred Ott #' @export getSample.data.frame <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, reportDiagnostics = F, ...){ getSample(as.matrix(sampler), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, whichParameters = whichParameters, reportDiagnostics = reportDiagnostics) } #' @rdname getSample #' @author Tankred Ott #' @export getSample.list <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, reportDiagnostics = F, ...){ if(!is.null(numSamples)) numSamples = ceiling(numSamples/length(sampler)) if(coda == F){ # out = NULL out <- rep(list(NA), length(sampler)) for (i in 1:length(sampler)){ # out = rbind(out, getSample(sampler[[i]], parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics= F)) out[[i]] <- getSample(sampler[[i]], parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics= F) } out <- combineChains(out) } if(coda == T){ out = list() for (i in 1:length(sampler)){ out[[i]] = getSample(sampler[[i]], parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics= F) } if(inherits(out[[1]], "mcmc.list")) out = unlist(out, recursive = F) class(out) = "mcmc.list" out = out } return(out) } # The following two S3 implementations make getSample compatible with coda::mcmc and coda::mcmc.list #' @rdname getSample #' @author Tankred Ott #' @export getSample.mcmc <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, reportDiagnostics = F, ...){ if(coda == T){ # mcmc objects can contain matrices or vectors if (is.matrix(sampler)) { nTotalSamples <- nrow(sampler) } else { nTotalSamples <- length(sampler) } if (is.null(end)) end = nTotalSamples # check/correct thin thin <- correctThin(nTotalSamples, thin) # see http://svitsrv25.epfl.ch/R-doc/library/coda/html/window.mcmc.html # for coda's window implementation return(window(sampler, start = start, end = end, thin = thin)) } else if(coda == F){ # mcmc objects can contain matrices or vectors # TODO: do vector case as 1-d matrix? if (is.matrix(sampler)) { out <- getSample(as.matrix(sampler), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics = reportDiagnostics) } else { out <- getSample(as.vector(sampler), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics = reportDiagnostics) } return(out) } } #' @author Tankred Ott #' @rdname getSample #' @export getSample.mcmc.list <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, reportDiagnostics = F, ...){ # TODO: implement handling of wrong inputs? if(coda == T){ if (is.matrix(sampler[[1]])) { nTotalSamples <- nrow(sampler[[1]]) } else { nTotalSamples <- length(sampler[[1]]) } if (is.null(end)) end = nTotalSamples # check/correct thin thin <- correctThin(nTotalSamples, thin) # see http://svitsrv25.epfl.ch/R-doc/library/coda/html/window.mcmc.html # for coda's window implementation return(window(sampler, start = start, end = end, thin = thin)) } else if(coda == F){ if(is.matrix(sampler[[1]])) { return(getSample(combineChains(sampler), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics = reportDiagnostics)) } else { return(as.vector(getSample(combineChains(sampler), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics = reportDiagnostics))) } } } # getSample implementation for nimble objects of class MCMC #' @rdname getSample #' @author Tankred Ott #' @export getSample.MCMC <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, reportDiagnostics = F, ...){ return(getSample(as.matrix(sampler$mvSamples), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics = reportDiagnostics)) } #' @rdname getSample #' @author Tankred Ott #' @export getSample.MCMC_refClass <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, reportDiagnostics = F, ...){ return(getSample(as.matrix(sampler$mvSamples), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics = reportDiagnostics)) } #' Merge Chains #' #' Merge a list of outputs from MCMC / SMC samplers #' #' The function merges a list of outputs from MCMC / SMC samplers into a single matrix. Requirement is that the list contains classes for which the \code{\link{getSample}} function works #' #' @param l a list with objects that can be accessed with \code{\link{getSample}} #' @param ... arguments to be passed on to \code{\link{getSample}} #' #' @return a matrix #' #' @author Florian Hartig #' #' @export mergeChains <- function(l, ...){ x = getSample(l[[1]], ...) for(i in 2:length(l)){ x = rbind(x, getSample(l[[i]], ...)) } return(x) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/classBayesianOutput.R
#' Creates a standardized collection of prior, likelihood and posterior functions, including error checks etc. #' @author Florian Hartig, Tankred Ott #' @param likelihood log likelihood density function #' @param prior either a prior class (see \code{\link{createPrior}}) or a log prior density function #' @param priorSampler if a prior density (and not a prior class) is provided to prior, the optional prior sampling function can be provided here #' @param lower vector with lower prior limits #' @param upper vector with upper prior limits #' @param best vector with best prior values #' @param names optional vector with parameter names #' @param parallel parallelization option. Default is F. Other options include T, or "external". See details. #' @param parallelOptions list containing three lists. First "packages" determines the R packages necessary to run the likelihood function. Second "variables" the objects in the global environment needed to run the likelihood function and third "dlls" the DLLs needed to run the likelihood function (see Details and Examples). #' @param catchDuplicates Logical, determines whether unique parameter combinations should only be evaluated once. Only used when the likelihood accepts a matrix with parameter as columns. #' @param plotLower vector with lower limits for plotting #' @param plotUpper vector with upper limits for plotting #' @param plotBest vector with best values for plotting #' @details If prior is of class prior (e.g. create with \code{\link{createPrior}}), priorSampler, lower, upper and best will be ignored.\cr If prior is a function (log prior density), priorSampler (custom sampler), or lower/upper (uniform sampler) is required.\cr If prior is NULL, and lower and upper are passed, a uniform prior (see \code{\link{createUniformPrior}}) will be created with boundaries lower and upper. #' #' For parallelization, Bayesiantools requies that the likelihood can evaluate several parameter vectors (supplied as a matrix) in parallel. #' #' * parallel = T means that an automatic parallelization of the likelihood via a standard R socket cluster is attempted, using the function \code{\link{generateParallelExecuter}}. By default, of the N cores detected on the computer, N-1 cores are requested. Alternatively, you can provide a integer number to parallel, specifying the cores reserved for the cluster. When the cluster is cluster is created, a copy of your workspace, including DLLs and objects are exported to the cluster workers. Because this can be very inefficient, you can explicitly specify the packages, objects and DLLs that are to be exported via parallelOptions. Using parallel = T requires that the function to be parallelized is well encapsulate, i.e. can run on a shared memory / shared hard disk machine in parallel without interfering with each other. #' #' If automatic parallelization cannot be done (e.g. because dlls are not thread-safe or write to shared disk), and only in this case, you should specify parallel = "external". In this case, it is assumed that the likelihood is programmed such that it accepts a matrix with parameters as columns and the different model runs as rows. It is then up to the user if and how to parallelize this function. This option gives most flexibility to the user, in particular for complicated parallel architecture or shared memory problems. #' #' For more details on parallelization, make sure to read both vignettes, in particular the section on the likelihood in the main vignette, and the section on parallelization in the vignette on interfacing models. #' #' @export #' @seealso \code{\link{checkBayesianSetup}} \cr #' \code{\link{createLikelihood}} \cr #' \code{\link{createPrior}} \cr #' @example /inst/examples/classBayesianSetup.R #' #' #@param model TODO createBayesianSetup <- function(likelihood, prior = NULL, priorSampler = NULL, parallel = FALSE, lower= NULL, upper = NULL, best = NULL, names = NULL, parallelOptions = list(variables = "all", packages = "all", dlls = NULL), catchDuplicates = FALSE, plotLower = NULL, plotUpper = NULL, plotBest = NULL ){ # TODO implement parameter "model" (function that makes predictions from the model) model <- NULL # INPUTS CHECKS if(is.null(upper) && is.null(lower) && is.null(prior)) stop("Either boundaries or prior density and prior sampler must be provided.") # if(!is.null(lower) || !is.null(upper) || !is.null(best)) print("DEPRECATED: lower/upper/best arguments for createBayesianSetup are deprecated and will be removed in a future update. Pass those arguments in the info parameter instead or use createUnformPrior.") if(("prior" %in% class(prior)) && (!is.null(lower) || !is.null(upper))) warning("Prior object and boundary values provided to createBayesiansetup, the latter will be ignored") if(("prior" %in% class(prior)) && (!is.null(priorSampler))) warning("Prior object and priorSampler provided to createBayesiansetup, the latter will be ignored") if(is.null(parallelOptions)) parallelOptions <- list(variables = "all", packages = "all", dlls = "all") # PRIOR CHECKS priorClass = NULL if ("prior" %in% class(prior)) { priorClass = prior } else if (inherits(prior,"bayesianOutput")) { priorClass = createPriorDensity(prior) } else if ("function" %in% class(prior)) { if ("function" %in% class(priorSampler)) priorClass = createPrior(prior, priorSampler) else if (!is.null(lower) && !is.null(upper)) priorClass = createPrior(prior, lower=lower, upper=upper, best=best) else stop("If prior is a function, priorSampler or lower/upper is required") } else if (is.null(prior)) { # TODO: deprecate this # checks for NULL for lower/upper are already done at begin of function priorClass = createUniformPrior(lower = lower, upper = upper, best = best) } else stop("wrong input for prior") # LIKELIHOOD CHECKS if ("likelihood" %in% class(likelihood)) { likelihoodClass = likelihood } else if ("function" %in% class(likelihood)) { likelihoodClass = createLikelihood(likelihood, parallel = parallel, parallelOptions = parallelOptions, catchDuplicates = catchDuplicates) } else { stop("likelihood must be an object of class likelihood or a function") } pwLikelihood = likelihoodClass$pwLikelihood # GET NUMBER OF PARAMETERS numPars = length(priorClass$sampler()) # CREATE POSTERIOR posteriorClass = createPosterior(priorClass,likelihoodClass) # CHECK FOR PLOTTING PARAMETERS if (is.null(plotLower)) plotLower <- priorClass$lower if (is.null(plotUpper)) plotUpper <- priorClass$upper if (is.null(plotBest)) plotBest <- priorClass$best if (is.null(plotLower) | is.null(plotUpper) | is.null(plotBest)) print("Info is missing upper/lower/best. This can cause plotting and sensitivity analysis functions to fail. If you want to use those functions provide (plot)upper/lower/best either in createBayesianSetup or prior") # CHECK NAMES if (is.null(names)) { if (!is.null(priorClass$parNames)) names = priorClass$parNames else if (!is.null(likelihoodClass$parNames)) names = likelihoodClass$parNames else if (numPars > 0) names = paste("par", 1:numPars) } # CONSTRUCT OUTPUT info <- list(priorLower = priorClass$lower, priorUpper = priorClass$upper, priorBest = priorClass$best, plotLower = plotLower, plotUpper = plotUpper, plotBest = plotBest, parNames = names, numPars = numPars) out <- list(prior = priorClass, likelihood = likelihoodClass, posterior = posteriorClass, names = names, numPars = numPars, model = model, parallel = parallel, pwLikelihood = pwLikelihood, info = info) class(out) <- "BayesianSetup" return(out) } # # #' Generates initial sample TODO # #' @param n TODO # #' @param checkInf TODO # #' @param overdispersed TODO # #' @param maxIterations TODO # #' @export # generateInitialSamples <- function(n, checkInf = T, overdispersed = F, maxIterations = 5){ # if(is.null(sampler)) stop("sampling not implemented") # done = F # # stop("to implement") # # # check infinity of likelihood / create overdispersion # # } #TODO: FH I wonder if we should keep this function option alive - seems better to me to explicitly do # this with the createBayesianSetup #' Checks if an object is of class 'BayesianSetup' #' @author Florian Hartig #' @description Function used to assure that an object is of class 'BayesianSetup'. If you pass a function, it is coverted to an object of class 'BayesianSetup' (using \code{\link{createBayesianSetup}}) before it is returned. #' @param bayesianSetup either object of class bayesianSetup or a log posterior function #' @param parallel if bayesianSetup is a function, this will set the parallelization option for the class BayesianSetup that is created internally. If bayesianSetup is already a BayesianSetup, then this will check if parallel = T is requested but not supported by the BayesianSetup. This option is for internal use in the samplers #' @note The recommended option to use this function in the samplers is to have parallel with default NULL in the samplers, so that checkBayesianSetup with a function will create a bayesianSetup without parallelization, while it will do nothing with an existing BayesianSetup. If the user sets parallelization, it will set the approriate parallelization for a function, and check in case of an existing BayesianSetup. The checkBayesianSetup call in the samplers should then be followed by a check for parallel = NULL in sampler, in which case paralell can be set from the BayesianSetup #' @seealso \code{\link{createBayesianSetup}} #' @export checkBayesianSetup <- function(bayesianSetup, parallel = F){ if(inherits(bayesianSetup, "function")){ if(is.null(parallel)) parallel = F bayesianSetup = createBayesianSetup(bayesianSetup, parallel = parallel) } else if(inherits(bayesianSetup, "BayesianSetup")){ if(!is.null(parallel)) if(parallel == T & bayesianSetup$parallel == F) stop("parallel = T requested in sampler but BayesianSetup does not support parallelization. See help of BayesianSetup on how to enable parallelization") } else stop("bayesianSetup must be class BayesianSetup or a function") return(bayesianSetup) } #' Function to close cluster in BayesianSetup #' @author Stefan Paul #' @description Function closes #' the parallel executer (if available) #' @param bayesianSetup object of class BayesianSetup #' @export stopParallel <- function(bayesianSetup){ ## Stop cluster try(parallel::stopCluster(bayesianSetup$likelihood$cl), silent = TRUE) ## Remove object # pos <- -1 # if(is.null(envir)) envir <- as.environment(pos) # .Internal(remove(deparse(substitute(bayesianSetup)), envir = envir, inherits = FALSE)) } #' @author Maximilian Pichler #' @export print.BayesianSetup <- function(x, ...){ cat('BayesianSetup: \n\n') bayesianSetup = x info = c( "priorLower", "priorUpper", "plotLower", "plotUpper") parInfo = data.frame(matrix(NA, ncol = 4, nrow = bayesianSetup$info$numPars)) colnames(parInfo) = info rownames(parInfo) = bayesianSetup$info$parNames for(i in 1:4) if(!is.null(bayesianSetup$info[[info[i]]])) parInfo[,i] <- bayesianSetup$info[[info[i]]] print(parInfo) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/classBayesianSetup.R
#' Creates a standardized likelihood class#' #' @author Florian Hartig #' @param likelihood Log likelihood density #' @param names Parameter names (optional) #' @param parallel parallelization , either i) no parallelization --> F, ii) native R parallelization --> T / "auto" will select n-1 of your available cores, or provide a number for how many cores to use, or iii) external parallelization --> "external". External means that the likelihood is already able to execute parallel runs in form of a matrix with #' @param catchDuplicates Logical, determines whether unique parameter combinations should only be evaluated once. Only used when the likelihood accepts a matrix with parameter as columns. #' @param parallelOptions list containing two lists. First "packages" determines the R packages necessary to run the likelihood function. Second "objects" the objects in the global envirnment needed to run the likelihood function (for details see \code{\link{createBayesianSetup}}). #' @param sampler sampler #' @seealso \code{\link{likelihoodIidNormal}} \cr #' \code{\link{likelihoodAR1}} \cr #' @export createLikelihood <- function(likelihood, names = NULL, parallel = F, catchDuplicates=T, sampler = NULL, parallelOptions = NULL){ # check if point-wise likelihood available pwLikelihood = if ("sum" %in% names(as.list(args(likelihood)))) TRUE else FALSE catchingLikelihood <- function(x, ...){ out <- tryCatch( { y = likelihood(x, ...) if (any(y == Inf | is.nan(y) | is.na(y) | !is.numeric(y))){ message(paste("BayesianTools warning: positive Inf or NA / nan values, or non-numeric values occured in the likelihood. Setting likelihood to -Inf.\n Original value was", y, "for parameters", x, "\n\n ")) y[is.infinite(y) | is.nan(y) | is.na(y) | !is.numeric(y)] = -Inf } y }, error=function(cond){ cat(c("Parameter values ", x, "\n")) message("Problem encountered in the calculation of the likelihood with parameter ", x, "\n Error message was", cond, "\n set result of the parameter evaluation to -Inf ", "ParameterValues ") return(-Inf) } ) return(out) } # initalize cl cl <- NULL if (parallel == T | parallel == "auto" | is.numeric(parallel)) { tmp <- generateParallelExecuter(likelihood, parallel, parallelOptions) parallelLikelihood <- tmp$parallelFun cl <- tmp$cl parallel = T } parallelDensity<- function(x, ...){ if (is.vector(x)) return(catchingLikelihood(x, ...)) else if(is.matrix(x)){ if(catchDuplicates == TRUE){ # Check for the rows that are not duplicated wn <- which(!duplicated(x)) if(length(wn) <2) { return(parallelLikelihood(x, ...)) } else { # Define a output vector out1 <- rep(0,length=nrow(x)) # Run the likelihood function for unique values if (parallel == "external"){ out1[wn]<-likelihood(x[wn,], ...) } else{ if (parallel == T){ out1[wn]<-parallelLikelihood(x[wn,], ...) } else{ out1[wn]<-apply(x[wn,], 1, likelihood, ...) } } # Copy the values for the duplicates for(i in 1:length(out1)){ if(out1[i] != 0) next else{ same <- numeric() for(k in 1:length(out1)){ if(all(x[k,]== x[i,])){ same <- c(same,k) } } out1[same[-1]] <- out1[same[1]] } } return(out1) }} else{ if (parallel == "external") return(likelihood(x, ...)) else if (parallel == T){ return(parallelLikelihood(x, ...))} else return(apply(x, 1, likelihood, ...)) } } else stop("parameter must be vector or matrix") } out<- list(density = parallelDensity, sampler = sampler, cl = cl, pwLikelihood = pwLikelihood, parNames = names) class(out) <- "likelihood" return(out) } #library(mvtnorm) #library(sparseMVN) #' Normal / Gaussian Likelihood function #' @author Florian Hartig #' @param predicted vector of predicted values #' @param observed vector of observed values #' @param sd standard deviation of the i.i.d. normal likelihood #' @export likelihoodIidNormal <- function(predicted, observed, sd){ notNAvalues = !is.na(observed) if (sd <= 0) return (-Inf) else return(sum(dnorm(predicted[notNAvalues], mean = observed[notNAvalues], sd = sd, log = T))) } # TODO - gibbs sample out the error terms #' AR1 type likelihood function #' @author Florian Hartig #' @param predicted vector of predicted values #' @param observed vector of observed values #' @param sd standard deviation of the iid normal likelihood #' @param a temporal correlation in the AR1 model #' @note The AR1 model considers the process: \cr y(t) = a y(t-1) + E \cr e = i.i.d. N(0,sd) \cr |a| < 1 \cr At the moment, no NAs are allowed in the time series. #' @export likelihoodAR1 <- function(predicted, observed, sd, a){ if (any(is.na(observed))) stop("AR1 likelihood cannot work with NAs included, split up the likelihood") if (sd <= 0) return (-Inf) if (abs(a) >= 1) return (-Inf) n = length(observed) res = predicted - observed # this calculates the unconditiona LL for this data, see e.g. http://stat.unicas.it/downloadStatUnicas/seminari/2008/Julliard0708_1.pdf ll = 0.5 * ( - n * log(2*pi) - n * log(sd^2) + log( 1- a^2 ) - (1- a^2) / sd^2 * res[1]^2 - 1 / sd^2 * sum( (res[2:n] - a * res[1:(n-1)])^2) ) return(ll) } # Tests # library(stats) # data<-arima.sim(n=1000,model = list(ar=0.9)) # x <- ar(data, aic = F, order.max = 1) # opt <- function(par){ # -likelihoodAR1(data, rep(0,1000), sd = par[1], a = par[2] ) # } # optim(c(1.1,0.7), opt )
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/classLikelihood.R
# Functions for class mcmcSamper #' @author Florian Hartig #' @export getSample.mcmcSampler <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = 1, numSamples = NULL, whichParameters = NULL, reportDiagnostics= F, ...){ if (inherits(sampler$chain, "matrix")){ if(is.null(end)) end = nrow(sampler$chain) if(parametersOnly == T) { out = sampler$chain[start:end,1:sampler$setup$numPars, drop = F] if(!is.null(sampler$setup$names)) colnames(out) = sampler$setup$names } else { out = sampler$chain[start:end,, drop = F] if(!is.null(sampler$setup$names)) colnames(out) = c(sampler$setup$names, "Lposterior", "Llikelihood", "Lprior") } ######################## # THINNING if (thin == "auto"){ thin = max(floor(nrow(out) / 5000),1) } if(is.null(thin) || thin == F || thin < 1 || is.nan(thin)) thin = 1 if(thin > nrow(out)) warning("thin is greater than the total number of samples!") if (! thin == 1){ sel = seq(1,dim(out)[1], by = thin ) out = out[sel,] } # Sample size if(thin == 1 && !is.null(numSamples)){ out <- sampleEquallySpaced(out, numSamples) } # TODO - see matrix, need to check if both thing and numSamples is set ############# if (!is.null(whichParameters)) out = out[,whichParameters, drop = F] if(coda == T) out = makeObjectClassCodaMCMC(out, start = start, end = end, thin = thin) } else if (inherits(sampler$chain, "mcmc.list")){ out = list() for (i in 1:length(sampler$chain)){ if(is.null(end)) end = nrow(sampler$chain[[1]]) temp = sampler$chain[[i]][start:end,, drop = F] if(parametersOnly == T) { temp = temp[,1:sampler$setup$numPars, drop = F] if(class(temp)[1] == "numeric") temp = as.matrix(temp) # case 1 parameter if(!is.null(sampler$setup$names)) colnames(temp) = sampler$setup$names } else { if(!is.null(sampler$setup$names)) colnames(temp) = c(sampler$setup$names, "Lposterior", "Llikelihood", "Lprior") } ######################## # THINNING if (thin == "auto"){ thin = max(floor(nrow(temp) / 5000),1) } if(is.null(thin) || thin == F || thin < 1 || is.nan(thin)) thin = 1 if(thin > nrow(temp)) warning("thin is greater than the total number of samples!") if (! thin == 1){ sel = seq(1,dim(temp)[1], by = thin ) temp = temp[sel,] } # Sample size if(thin == 1 && !is.null(numSamples)){ nSamplesPerChain <- ceiling(numSamples/length(sampler$chain)) if(i == 1){ if(nSamplesPerChain*length(sampler$chain) > numSamples) message("Due to internal chains, numSamples was rounded to the next number divisble by the number of chains.", call. = FALSE) } temp <- sampleEquallySpaced(temp, nSamplesPerChain) } ############# if (!is.null(whichParameters)) temp = temp[,whichParameters, drop = F] out[[i]] = makeObjectClassCodaMCMC(temp, start = start, end = end, thin = thin) } class(out) = "mcmc.list" #trueNumSamples <- sum(unlist(lapply(out, FUN = nrow))) #if (!is.null(numSamples) && trueNumSamples > numSamples) warning(paste(c("Could not draw ", numSamples, " samples due to rounding errors. Instead ", trueNumSamples," were drawn."))) if(coda == F){ out = combineChains(out) } if(coda == T){ out = out } }else stop("sampler appears not to be of class mcmcSampler") if(reportDiagnostics == T){ return(list(chain = out, start = start, end = end, thin = thin)) } else return(out) } #' @method summary mcmcSampler #' @author Stefan Paul #' @export summary.mcmcSampler <- function(object, ...){ #codaChain = getSample(sampler, parametersOnly = parametersOnly, coda = T, ...) #summary(codaChain) #rejectionRate(sampler$codaChain) #effectiveSize(sampler$codaChain) #DIC(sampler) #max() #} sampler <- object try(DInf <- DIC(sampler), silent = TRUE) MAPvals <- round(MAP(sampler)$parametersMAP,3) psf <- FALSE mcmcsampler <- sampler$settings$sampler runtime <- sampler$settings$runtime[3] correlations <- round(cor(getSample(sampler)),3) chain <- getSample(sampler, parametersOnly = T, coda = T, ...) # chain <- getSample(sampler, parametersOnly = T, coda = T) if("mcmc.list" %in% class(chain)){ psf <- TRUE nrChain <- length(chain) nrIter <- nrow(chain[[1]]) conv <- ifelse(chain$setup$numPars > 1, round(coda::gelman.diag(chain)$mpsrf,3), round(coda::gelman.diag(chain)$mpsrf,3)$psrf[1]) npar <- sampler$setup$numPars lowerq <- upperq <- numeric(npar) medi <- numeric(npar) parnames <- colnames(chain[[1]]) # Shorthen parameter names for (i in 1:npar) { if (nchar(parnames[i]) > 8) parnames[i] <- paste(substring(parnames[i], 1, 6), "...", sep = "") } for (i in 1:npar) { tmp <- unlist(chain[, i]) tmp <- quantile(tmp, probs = c(0.025, 0.5, 0.975)) lowerq[i] <- round(tmp[1], 3) medi[i] <- round(tmp[2], 3) upperq[i] <- round(tmp[3], 3) } } else{ nrChain <- 1 nrIter <- nrow(chain) npar <- sampler$setup$numPars conv <- "Only one chain; convergence cannot be determined!" medi <- numeric(npar) lowerq <- upperq <- numeric(npar) parnames <- colnames(chain) for (i in 1:npar) { tmp <- quantile(chain[, i], probs = c(0.025, 0.5, 0.975)) lowerq[i] <- round(tmp[1], 3) medi[i] <- round(tmp[2], 3) upperq[i] <- round(tmp[3], 3) } } parOutDF <- cbind(MAPvals, lowerq, medi, upperq) colnames(parOutDF) <- c("MAP", "2.5%", "median", "97.5%") if (psf == TRUE) { psf <- round(gelmanDiagnostics(sampler)$psrf[,1], 3) parOutDF <- cbind(psf, parOutDF) } row.names(parOutDF) <- parnames cat(rep("#", 25), "\n") cat("## MCMC chain summary ##","\n") cat(rep("#", 25), "\n", "\n") cat("# MCMC sampler: ",mcmcsampler, "\n") cat("# Nr. Chains: ", nrChain, "\n") cat("# Iterations per chain: ", nrIter, "\n") cat("# Rejection rate: ", ifelse(object$setup$numPars == 1 & class(chain) == "mcmc.list", # this is a hack because coda::rejectionRate does not work for 1-d MCMC lists round(mean(sapply(chain, coda::rejectionRate)),3), round(mean(coda::rejectionRate(chain)),3) ), "\n") cat("# Effective sample size: ", ifelse(sampler$setup$numPars == 1, round(coda::effectiveSize(chain),0), round(mean(coda::effectiveSize(chain)),0) ) , "\n") cat("# Runtime: ", runtime, " sec.","\n", "\n") cat("# Parameters\n") print(parOutDF) cat("\n") try(cat("## DIC: ", round(DInf$DIC,3), "\n"), silent = TRUE) cat("## Convergence" ,"\n", "Gelman Rubin multivariate psrf: ", conv, "\n","\n") cat("## Correlations", "\n") print(correlations) } #' @author Florian Hartig #' @method print mcmcSampler #' @export print.mcmcSampler <- function(x, ...){ print("mcmcSampler - you can use the following methods to summarize, plot or reduce this class:") print(methods(class ="mcmcSampler")) #codaChain = getSample(sampler, coda = T, ...) #rejectionRate(sampler$codaChain) #effectiveSize(sampler$codaChain) } #' @author Florian Hartig #' @method plot mcmcSampler #' @export plot.mcmcSampler <- function(x, ...){ tracePlot(x, ...) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/classMcmcSampler.R
#' Convenience function to create an object of class mcmcSamplerList from a list of mcmc samplers #' @author Florian Hartig #' @param mcmcList a list with each object being an mcmcSampler #' @return Object of class "mcmcSamplerList" #' @export createMcmcSamplerList <- function(mcmcList){ # mcmcList <- list(mcmcList) -> This line didn't make any sense at all. Better would be to allow the user to simply provide several inputs without a list, but I guess the list option should be maintained, as this is convenient when scripting. for (i in 1:length(mcmcList)){ if (! ("mcmcSampler" %in% class(mcmcList[[i]])) ) stop("list objects are not of class mcmcSampler") } class(mcmcList) = c("mcmcSamplerList", "bayesianOutput") return(mcmcList) } #' @author Stefan Paul #' @method summary mcmcSamplerList #' @export summary.mcmcSamplerList <- function(object, ...){ #codaChain = getSample(sampler, parametersOnly = parametersOnly, coda = T, ...) #summary(codaChain) #rejectionRate(sampler$codaChain) #effectiveSize(sampler$codaChain) #DIC(sampler) #max() sampler <- object DInf <- DIC(sampler) MAPvals <- round(MAP(sampler)$parametersMAP,3) gelDiag <- gelmanDiagnostics(sampler) psf <- round(gelDiag$psrf[,1], 3) mcmcsampler <- sampler[[1]]$settings$sampler runtime <- 0 for(i in 1:length(sampler)) runtime <- runtime+sampler[[i]]$settings$runtime[3] correlations <- round(cor(getSample(sampler)),3) sampler <- getSample(sampler, parametersOnly = T, coda = T, ...) if("mcmc.list" %in% class(sampler)){ nrChain <- length(sampler) nrIter <- nrow(sampler[[1]]) conv <- round(gelDiag$mpsrf,3) npar <- ncol(sampler[[1]]) lowerq <- upperq <- numeric(npar) medi <- numeric(npar) parnames <- colnames(sampler[[1]]) for(i in 1:npar){ tmp <- unlist(sampler[,i]) tmp <- quantile(tmp, probs = c(0.025, 0.5, 0.975)) lowerq[i] <- round(tmp[1],3) medi[i] <- round(tmp[2],3) upperq[i] <- round(tmp[3],3) } }else{ nrChain <- 1 nrIter <- nrow(sampler) npar <- ncol(sampler) conv <- "Only one chain; convergence cannot be determined!" medi <- numeric(npar) lowerq <- upperq <- numeric(npar) parnames <- colnames(sampler) for(i in 1:npar){ tmp <- quantile(sampler[,i], probs = c(0.025, 0.5, 0.975)) lowerq[i] <- round(tmp[1],3) medi[i] <- round(tmp[2],3) upperq[i] <- round(tmp[3],3) } } # output for parameter metrics parOutDF <- cbind(psf, MAPvals, lowerq, medi, upperq) colnames(parOutDF) <- c("psf", "MAP", "2.5%", "median", "97.5%") row.names(parOutDF) <- parnames cat(rep("#", 25), "\n") cat("## MCMC chain summary ##","\n") cat(rep("#", 25), "\n", "\n") cat("# MCMC sampler: ",mcmcsampler, "\n") cat("# Nr. Chains: ", nrChain, "\n") cat("# Iterations per chain: ", nrIter, "\n") cat("# Rejection rate: ", ifelse(object[[1]]$setup$numPars == 1, # this is a hack because coda::rejectionRate does not work for 1-d MCMC lists round(mean(sapply(sampler, coda::rejectionRate)),3), round(mean(coda::rejectionRate(sampler)),3) ), "\n") cat("# Effective sample size: ", round(mean(coda::effectiveSize(sampler)),0), "\n") cat("# Runtime: ", runtime, " sec.","\n", "\n") cat("# Parameters\n") print(parOutDF) cat("\n") cat("## DIC: ", round(DInf$DIC,3), "\n") cat("## Convergence" ,"\n", "Gelman Rubin multivariate psrf: ", conv, "\n","\n") cat("## Correlations", "\n") print(correlations) } #' @author Florian Hartig #' @method print mcmcSamplerList #' @export print.mcmcSamplerList <- function(x, ...){ print("mcmcSamplerList - you can use the following methods to summarize, plot or reduce this class:") print(methods(class ="mcmcSamplerList")) #codaChain = getSample(sampler, coda = T, ...) #rejectionRate(sampler$codaChain) #effectiveSize(sampler$codaChain) } #' @method plot mcmcSamplerList #' @export plot.mcmcSamplerList <- function(x, ...){ tracePlot(x, ...) } #' @author Florian Hartig #' @export getSample.mcmcSamplerList <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = 1, numSamples = NULL, whichParameters = NULL, reportDiagnostics, ...){ if(!is.null(numSamples)) numSamples = ceiling(numSamples/length(sampler)) if(coda == F){ # out = NULL out <- rep(list(NA), length(sampler)) for (i in 1:length(sampler)){ # out = rbind(out, getSample(sampler[[i]], parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics= F)) out[[i]] <- getSample(sampler[[i]], parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics= F) } out <- combineChains(out) } if(coda == T){ out = list() for (i in 1:length(sampler)){ out[[i]] = getSample(sampler[[i]], parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics= F) } if(inherits(out[[1]], "mcmc.list")) out = unlist(out, recursive = F) class(out) = "mcmc.list" out = out } return(out) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/classMcmcSamplerList.R
#' Creates a standardized posterior class #' @author Florian Hartig #' @param prior prior class #' @param likelihood Log likelihood density #' @details Function is internally used in \code{\link{createBayesianSetup}} to create a standarized posterior class. #' @export createPosterior <- function(prior, likelihood){ posterior <- function(x, returnAll = F){ if (is.vector(x)){ priorResult = prior$density(x) # Checking if outside prior to save calculation time if (! (priorResult == -Inf)) ll = likelihood$density(x) else ll = -Inf if (returnAll == F) return(ll + priorResult) else return(c(ll + priorResult, ll, priorResult)) } else if(is.matrix(x)){ priorResult = prior$density(x) # Checking first if outside the prior to save calculation time feasible <- (! priorResult == -Inf) if (dim(x)[2] == 1) llResult <- likelihood$density(matrix(x[feasible, ], ncol = 1)) else{ if(TRUE %in% feasible) llResult <- likelihood$density(x[feasible, ]) else llResult <- -Inf } post = priorResult ll = priorResult ll[!feasible] = NA ll[feasible] = llResult post[feasible] = post[feasible] + llResult post[!feasible] = -Inf if (returnAll == F) return(post) else{ out <- cbind(post, ll, priorResult) colnames(out) = c("posterior", "likelihood", "prior") return(out) } } else stop("parameter must be vector or matrix") } out<- list(density = posterior) class(out) <- "posterior" return(out) } # likelihood <- function(x)stop("a") # prior <- createPrior(function(x) sum(dunif(x, log = T))) # # x = createPosterior(prior, likelihood) # # x$density(c(0.2,0.2)) # prior$density(c(2,2)) # # # x = c(0.2,0.2)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/classPosterior.R
#' Creates a standardized prior class #' @author Florian Hartig #' @param density Prior density #' @param sampler Sampling function for density (optional) #' @param lower vector with lower bounds of parameters #' @param upper vector with upper bounds of parameter #' @param best vector with "best" parameter values #' @details This is the general prior generator. It is highly recommended to not only implement the density, but also the sampler function. If this is not done, the user will have to provide explicit starting values for many of the MCMC samplers. Note the existing, more specialized prior function. If your prior can be created by those, they are preferred. Note also that priors can be created from an existing MCMC output from BT, or another MCMC sample, via \code{\link{createPriorDensity}}. #' @note min and max truncate, but not re-normalize the prior density (so, if a pdf that integrated to one is truncated, the integral will in general be smaller than one). For MCMC sampling, this doesn't make a difference, but if absolute values of the prior density are a concern, one should provide a truncated density function for the prior. #' @export #' @seealso \code{\link{createPriorDensity}} \cr #' \code{\link{createBetaPrior}} \cr #' \code{\link{createUniformPrior}} \cr #' \code{\link{createTruncatedNormalPrior}}\cr #' \code{\link{createBayesianSetup}}\cr #' @example /inst/examples/createPrior.R createPrior <- function(density = NULL, sampler = NULL, lower = NULL, upper = NULL, best = NULL){ # case density is a Bayesian Posterior if(inherits(density,"bayesianOutput")) return(createPriorDensity(density, lower = lower, upper = upper, best = best)) if(! is.null(lower) & ! is.null(upper)) if(any(lower > upper)) stop("prior with lower values > upper") if(is.null(best) & ! is.null(lower) & ! is.null(upper)) best = (upper + lower) / 2 # if no density is provided if (is.null(density)){ density <- function(x){ return(0) } } catchingPrior <- function(x){ # check if bounds are respected if(!is.null(lower)){ if (any(x < lower)) return(-Inf) } if(!is.null(upper)){ if (any(x > upper)) return(-Inf) } # calculate prior density within try-catch statement out <- tryCatch( { density(x) }, error=function(cond) { warning("Problem in the prior", cond) return(-Inf) } ) # extra check if (out == Inf) stop("Inf encountered in prior") return(out) } parallelDensity<- function(x){ if (is.vector(x)) return(catchingPrior(x)) else if(is.matrix(x)) return(apply(x, 1, catchingPrior)) else stop("parameter must be vector or matrix") } # Check and parallelize the sampler # if no sampler is passed, but lower and upper, generate uniform sampler if (is.null(sampler) && !is.null(lower) && !is.null(upper)) { sampler <- function(n = 1) { runif(n, lower, upper) } } if(!is.null(sampler)){ npar <- length(sampler()) parallelSampler <- function(n=NULL){ if(is.null(n)) out = sampler() else{ if (npar == 1) out = matrix(replicate(n, sampler())) else if (npar >1) out = t(replicate(n, sampler(), simplify = T)) else stop("sampler provided doesn't work") } return(out) } } else parallelSampler = function(n = NULL){ stop("Attept to call the sampling function of the prior, although this function has not been provided in the Bayesian setup. A likely cause of this error is that you use a function or sampling algorithm that tries to sample from the prior. Either change the settings of your function, or provide a sampling function in your BayesianSetup (see ?createBayesianSetup, and ?createPrior)") } checkPrior <- function(x = NULL, z = FALSE){ if(is.null(x)) x <- parallelSampler(1000) if(is.function(x)) x <- x() if(!is.matrix(x)) x <- parallelSampler(1000) check <- parallelDensity(x) if(any(is.infinite(check))) { if(z) warning("Z matrix values outside prior range", call. = FALSE) else warning("Start values outside prior range", call. = FALSE) } } out<- list(density = parallelDensity, sampler = parallelSampler, lower = lower, upper = upper, best = best, originalDensity = density, checkStart = checkPrior) class(out) <- "prior" return(out) } #' Convenience function to create a simple uniform prior distribution #' @author Florian Hartig #' @param lower vector of lower prior range for all parameters #' @param upper vector of upper prior range for all parameters #' @param best vector with "best" values for all parameters #' @note for details see \code{\link{createPrior}} #' @seealso \code{\link{createPriorDensity}}, \code{\link{createPrior}}, \code{\link{createBetaPrior}}, \code{\link{createTruncatedNormalPrior}}, \code{\link{createBayesianSetup}} #' @example /inst/examples/createPrior.R #' @export createUniformPrior<- function(lower, upper, best = NULL){ len = length(lower) density <- function(x){ if (length(x) != len) stop("parameter vector does not match prior") else return(sum(dunif(x, min = lower, max = upper, log = T))) } sampler <- function() runif(len, lower, upper) out <- createPrior(density = density, sampler = sampler, lower = lower, upper = upper, best = best) return(out) } #' Convenience function to create a truncated normal prior #' @author Florian Hartig #' @param mean best estimate for each parameter #' @param sd sdandard deviation #' @param lower vector of lower prior range for all parameters #' @param upper vector of upper prior range for all parameters #' @note for details see \code{\link{createPrior}} #' @seealso \code{\link{createPriorDensity}} \cr #' \code{\link{createPrior}} \cr #' \code{\link{createBetaPrior}} \cr #' \code{\link{createUniformPrior}} \cr #' \code{\link{createBayesianSetup}} \cr #' @export #' @example /inst/examples/createPrior.R createTruncatedNormalPrior<- function(mean, sd, lower, upper){ len = length(mean) density <- function(x){ if (length(x) != len) stop("parameter vector does not match prior") else return(sum(msm::dtnorm(x, mean = mean, sd = sd, lower = lower, upper = upper, log = T))) } sampler <- function(){ msm::rtnorm(n = length(mean), mean = mean, sd = sd, lower = lower, upper = upper) } out <- createPrior(density = density, sampler = sampler, lower = lower, upper = upper) return(out) } #' Convenience function to create a beta prior #' @author Florian Hartig #' @param a shape1 of the beta distribution #' @param b shape2 of the beta distribution #' @param upper upper values for the parameters #' @param lower lower values for the parameters #' @note for details see \code{\link{createPrior}} #' @details This creates a beta prior, assuming that lower / upper values for parameters are are fixed. The beta is the calculated relative to this lower / upper space. #' @seealso \code{\link{createPriorDensity}} \cr #' \code{\link{createPrior}} \cr #' \code{\link{createTruncatedNormalPrior}} \cr #' \code{\link{createUniformPrior}} \cr #' \code{\link{createBayesianSetup}} \cr #' @example /inst/examples/createPrior.R #' @export createBetaPrior<- function(a, b, lower=0, upper=1){ len = length(lower) if (! any(upper > lower)) stop("wrong values in beta prior") range = upper - lower density <- function(x){ x = (x - lower) / range if (length(x) != len) stop("parameter vector does not match prior") else return(sum( dbeta(x, shape1 = a, shape2 = b, log=T) )) } sampler <- function(){ out = rbeta(n = len, shape1 = a, shape2 = b) out = (out * range) + lower return(out) } out <- createPrior(density = density, sampler = sampler, lower = lower, upper = upper) return(out) } #' Fits a density function to a multivariate sample #' #' @author Florian Hartig #' @export #' @param sampler an object of class BayesianOutput or a matrix #' @param method method to generate prior - default and currently only option is multivariate #' @param eps numerical precision to avoid singularity #' @param lower vector with lower bounds of parameter for the new prior, independent of the input sample #' @param upper vector with upper bounds of parameter for the new prior, independent of the input sample #' @param best vector with "best" values of parameter for the new prior, independent of the input sample #' @param scaling optional scaling factor for the covariance. If scaling > 1 will create a prior wider than the posterior, < 1 a prior more narrow than the posterior. Scaling is linear to the posterior width, i.e. scaling = 2 will create a prior that with 2x the sd of the original posterior. #' @param ... parameters to pass on to the getSample function #' #' @details This function fits a density estimator to a multivariate (typically a posterior) sample. The main purpose is to summarize a posterior sample as a pdf, in order to include it as a prior in a new analysis, for example when new data becomes available, or to calculate a fractional Bayes factor (see \code{\link{marginalLikelihood}}). #' #' The limitation of this function is that we currently only implement a multivariate normal density estimator, so you will have a loss of information if your posterior is not approximately multivariate normal, which is likely the case if you have weak data. Extending the function to include more flexible density estimators (e.g. gaussian processes) is on our todo list, but it's quite tricky to get this stable, so I'm not sure when we will have this working. In general, creating reliable empirical density estimates in high-dimensional parameter spaces is extremely tricky, regardless of the software you are using. #' #' For that reason, it is usually recommended to not update the posterior with this option, but rather: #' #' 1. If the full dataset is available, to make a single, or infrequent updates, recompute the entire model with the full / updated data #' #' 2. For frequent updates, consider using SMC instead of MCMC sampling. SMC sampling doesn't require an analytical summary of the posterior. #' #' @seealso \code{\link{createPrior}} \cr #' \code{\link{createBetaPrior}} \cr #' \code{\link{createTruncatedNormalPrior}} \cr #' \code{\link{createUniformPrior}} \cr #' \code{\link{createBayesianSetup}} \cr #' @example /inst/examples/createPrior.R createPriorDensity <- function(sampler, method = "multivariate", eps = 1e-10, lower = NULL, upper = NULL, best = NULL, scaling = 1, ...){ x = getSample(sampler, ...) if(method == "multivariate"){ nPars = ncol(x) covar = cov(x) * scaling^2 mean = apply(x, 2, mean) if(is.null(lower)) lower = rep(-Inf, length = length(mean)) if(is.null(upper)) upper = rep(Inf, length = length(mean)) density = function(par){ dens = tmvtnorm::dtmvnorm(x = par, mean = mean, sigma = covar + eps, log = T, lower = lower, upper = upper) return(dens) } sampler = function(n=1){ par <- tmvtnorm::rtmvnorm(n = n, mean = mean, sigma = covar + eps, lower = lower, upper = upper, algorithm = "rejection") if (n == 1) par = as.vector(par) return(par) } out <- createPrior(density = density, sampler = sampler, lower = lower, upper = upper, best = best) return(out) } } #' @author Maximilian Pichler #' @export print.prior <- function(x, ...){ cat('Prior: \n\n') prior = x info = c( "lower", "upper","best") maxPar = max(length(prior$lower),length(prior$lupper)) if(maxPar == 0) maxPar = ncol(prior$sampler()) priorInfo = data.frame(matrix(NA, ncol = 3, nrow = maxPar)) colnames(priorInfo) = info for(i in 1:3) if(!is.null(prior[[info[i]]])) priorInfo[,i] <- prior[[info[i]]] rownames(priorInfo) <- sapply(1:maxPar, FUN = function(x) return(paste("par",x))) print(priorInfo) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/classPrior.R
#' Convenience function to create an object of class SMCSamplerList from a list of mcmc samplers #' @author Florian Hartig #' @param ... a list of MCMC samplers #' @return a list of class smcSamplerList with each object being an smcSampler #' @export createSmcSamplerList <- function(...){ smcList <- list(...) for (i in 1:length(smcList)){ if (! ("mcmcSampler" %in% class(smcList[[i]])) ) stop("list objects are not of class mcmcSampler") } class(smcList) = c("smcSamplerList", "bayesianOutput") return(smcList) } #' @method summary smcSamplerList #' @author Florian Hartig #' @export summary.smcSamplerList <- function(object, ...){ sample = getSample(object, parametersOnly = T, ...) summary(sample) } #' @method print smcSamplerList #' @author Florian Hartig #' @export print.smcSamplerList <- function(x, ...){ print("smcSamplerList - you can use the following methods to summarize, plot or reduce this class:") print(methods(class ="smcSamplerList")) } #' @method plot smcSamplerList #' @export plot.smcSamplerList <- function(x, ...){ marginalPlot(x, ...) } #' @export getSample.smcSamplerList <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = 1, numSamples = NULL, whichParameters = NULL, reportDiagnostics = FALSE, ...){ out = list() for (i in 1:length(sampler)){ out[[i]] = getSample(sampler[[i]], parametersOnly = parametersOnly, whichParameters = whichParameters, start = start, end = end, thin = thin, numSamples = numSamples, coda = F, reportDiagnostics = F) } out = combineChains(out, merge =F) return(out) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/classSMCSamplerList.R
#' @author Florian Hartig #' @export getSample.smcSampler <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = 1, numSamples = NULL, whichParameters = NULL, reportDiagnostics = FALSE, ...){ if(is.null(end)) end = nrow(sampler$particles) if(parametersOnly == T) { out = sampler$particles[start:end,] if(!is.null(sampler$setup$names)) colnames(out) = sampler$setup$names } else { out = cbind(sampler$particles[start:end,] , sampler$posterior[start:end,] ) if(!is.null(sampler$setup$names)) colnames(out) = c(sampler$setup$names, "Lposterior", "Llikelihood", "Lprior") } ######################## # THINNING if (thin == "auto"){ thin = max(floor(nrow(out) / 5000),1) } if(is.null(thin) || thin == F || thin < 1) thin = 1 if (! thin == 1){ sel = seq(1,dim(out)[1], by = thin ) out = out[sel,] } # Sample size if(thin == 1 && !is.null(numSamples)){ if (numSamples > nrow(out)) { numSamples = nrow(out) warning("numSamples is greater than the total number of samples! All samples were selected.") } if (numSamples < 1) numSamples = 1; sel <- seq(1,dim(out)[1], len = numSamples) out <- out[sel,] } ############# if (!is.null(whichParameters)) out = out[,whichParameters] if(reportDiagnostics == T){ return(list(chain = out, start = start, end = end, thin = thin)) } else return(out) } #' @author Florian Hartig #' @method summary smcSampler #' @export summary.smcSampler<- function(object, ...){ sampler <- object print("SMC sampler output") summary(getSample(sampler, ...)) } #' @method plot smcSampler #' @export plot.smcSampler<- function(x, ...){ marginalPlot(x, ...) } #' @author Florian Hartig #' @method print smcSampler #' @export print.smcSampler <- function(x, ...){ print("smcSampler - you can use the following methods to summarize, plot or reduce this class:") print(methods(class ="smcSampler")) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/classSmcSampler.R
#' Function to combine chains #' #' @param x a list of MCMC chains #' @param merge logical determines whether chains should be merged #' @return combined chains #' #' @note to combine several chains to a single McmcSamplerList, see \code{\link{createMcmcSamplerList}} #' #' @keywords internal combineChains <- function(x, merge = T){ if(merge == T){ temp1 = as.matrix(x[[1]]) names = colnames(temp1) sel = seq(1, by = length(x), len = nrow(temp1) ) out = matrix(NA, nrow = length(x) * nrow(temp1), ncol = ncol(temp1)) out[sel, ] = temp1 if (length(x) > 1){ for (i in 2:length(x)){ out[sel+i-1, ] = as.matrix(x[[i]]) } } colnames(out) = names } else{ out = as.matrix(x[[1]]) if (length(x) > 1){ for (i in 2:length(x)){ out = rbind(out, as.matrix(x[[i]])) } } } return(out) } #' Helper function to change an object to a coda mcmc class, #' #' @param chain mcmc Chain #' @param start for mcmc samplers start value in the chain. For SMC samplers, start particle #' @param end for mcmc samplers end value in the chain. For SMC samplers, end particle #' @param thin thinning parameter #' @return object of class coda::mcmc #' @details Very similar to coda::mcmc but with less overhead #' @keywords internal makeObjectClassCodaMCMC <- function (chain, start = 1, end = numeric(0), thin = 1){ attr(chain, "mcpar") <- c(start, end, thin) attr(chain, "class") <- "mcmc" chain }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/codaFunctions.R
#' Convert coda::mcmc objects to BayesianTools::mcmcSampler #' @description Function is used to make the plot and diagnostic functions #' available for coda::mcmc objects #' @param sampler An object of class mcmc or mcmc.list #' @param names vector giving the parameter names (optional) #' @param info matrix (or list with matrices for mcmc.list objects) with three coloumns containing log posterior, log likelihood and log prior of the sampler for each time step (optional; but see Details) #' @param likelihood likelihood function used in the sampling (see Details) #' @details The parameter 'likelihood' is optional for most functions but can be needed e.g for #' using the \code{\link{DIC}} function. #' #' Also the parameter info is optional for most uses. However for some functions (e.g. \code{\link{MAP}}) #' the matrix or single coloumns (e.g. log posterior) are necessary for the diagnostics. #' @export convertCoda <- function(sampler, names = NULL, info = NULL, likelihood = NULL){ likelihood <- list(density = likelihood) if(inherits(sampler, "mcmc")){ if(is.null(names)){ names <- paste("Par",1:ncol(sampler)) } setup <- list(names = names, numPars = ncol(sampler), likelihood = likelihood) if(is.null(info)) info <- matrix(NA, nrow = nrow(sampler), ncol = 3) out <- list(chain = cbind(sampler,info), setup = setup) class(out) = c("mcmcSampler", "bayesianOutput") }else{ if(inherits(sampler, "mcmc.list")){ if(is.null(names)){ names <- paste("Par",1:ncol(sampler[[1]])) } setup <- list(names = names, numPars = ncol(sampler[[1]]), likelihood = likelihood) if(is.null(info)){ info <- list() for(i in 1:length(sampler)) info[[i]] <- matrix(NA, nrow = nrow(sampler[[1]]), ncol = 3) } chain <- list() for(i in 1:length(sampler)){ chain[[i]] <- cbind(sampler[[i]], info[[i]]) } class(chain) = "mcmc.list" out <- list(chain = chain, setup = setup) class(out) = c("mcmcSampler", "bayesianOutput") }else stop("sampler must be of class 'coda::mcmc' or 'coda::mcmc.list'") } return(out) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/convertCoda.R
#' Gelman Diagnostics #' #' Runs Gelman Diagnotics for an object of class BayesianOutput #' #' @author Florian Hartig #' @param sampler an object of class mcmcSampler or mcmcSamplerList #' @param thin parameter determining the thinning intervall. Either an integer or "auto" (default) for automatic thinning. #' @param plot should a Gelman plot be generated #' @param ... further arguments passed to \code{\link{getSample}} #' #' @details The function calls [coda::gelman.diag] to calculate Gelman-Rubin diagnostics [coda::gelman.plot] to produce the plots. #' #' The idea of these diagnostics is to compare withing and between chain variance of several independent MCMC runs (Gelman & Rubin, 1992). The ratio of the 2 is called the potential scale reduction factor (psfr, also called Rhat). If psfr = 1, this suggest that the independent MCMC runs are essentially identical, and which in turn suggests that they have converged. In practice, values < 1.05, or sometimes < 1.1 for all parameters are considered acceptable. #' #' To obtain reliable Gelman-Rubin diagnostics, the independent MCMCs should be started at different points of the parameter space, ideally overdispersed. #' #' The diagnostics also calculate a multivariate version of the psrf (mpsrf, Brooks & Gelman 1998). In practice, values < 1.1 or < 1.2 are often considered acceptable. While useful as an overview, mpsrf < 1.1 does not necessarily mean that all individual psrf < 1.05, and thus I would in doubt recommend looking at the individual psrf and decide on a case-by-case basis if a lack of convergence for a particular parameter is a concern. #' #' Also, note that convergence is a continuum, and different aspects of a posterior estimation converge with different speed. The rules about 1.05 were obtained by looking at the error of the posterior median / mean. If the goal for the inference is a posterior quantity that is more unstable than the mean, for example tail probabilities or the DIC, one should try to obtain large posterior samples with smaller psrf values. #' #' **Note on the use of Gelman diagnostics for population MCMCs, in particular the DE sampler family**: the Gelman diagnostics were originally designed for being applied to the outcome of several independent MCMC runs. Technically and practically, it can also be applied to a single population MCMC run that has several internal chains, such as DE, DEzs, DREAM, DREAMzs or T-Walk. As argued in ter Braak et al. (2008), the internal chains should be independent after burn-in. While this is likely correct, it also means that they are not completely independent before, and we observed this behavior in the use of the algorithms (i.e. that internal DEzs chains are more similar to each other than the chains of independent DEzs algorithms), see for example [BT issue 226](https://github.com/florianhartig/BayesianTools/issues/226). A concern is that this non-independence could lead to a failure to detect that the sampler hasn't converged yet, due to a wrong burn-in. We would therefore recommend to run several DEzs and check convergence with those, instead of running only one. #' #' @references #' #' Gelman, A and Rubin, DB (1992) Inference from iterative simulation using multiple sequences, Statistical Science, 7, 457-511. #' #' Brooks, SP. and Gelman, A. (1998) General methods for monitoring convergence of iterative simulations. Journal of Computational and Graphical Statistics, 7, 434-455. #' #' ter Braak, Cajo JF, and Jasper A. Vrugt. "Differential evolution Markov chain with snooker updater and fewer chains." Statistics and Computing 18.4 (2008): 435-446. #' #' @export gelmanDiagnostics <- function(sampler, thin = "auto", plot = F, ...){ sample = getSample(sampler, coda = T, ...) if(! ("mcmc.list" == class(sample))) stop("Trying to apply gelmanDiagnostics to an object that doesn't return an mcmc.list. Make sure you have a sampler that runs several chains, or an mcmcSamlerList") pars = ncol(sample[[1]]) diag = NULL try({diag = coda::gelman.diag(sample)}, silent = T) if(is.null(diag)){ message("gelmanDiagnostics could not be calculated, possibly there is not enoug variance in your MCMC chains. Try running the sampler longer") diag = list() diag$psrf = matrix(nrow = pars, ncol = 2) rownames(diag$psrf) = colnames(sample) diag$mpsrf = NA } if(pars == 1) diag$mpsrf = NA # fixes #221 if(plot == T & ! is.na(diag$mpsrf)){ # Wrapper around the gelman.plot to filter out getSample arguments from ... gP <- function(...,start, end, parametersOnly, coda, numSamples, whichParameters, reportDiagnostics, thin, plot, sampler) coda::gelman.plot(sample, ...) do.call(gP, as.list(match.call())) } return(diag) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/diagnosticsGelman.R
#' Calculate posterior volume #' @author Florian Hartig #' @param sampler an object of superclass bayesianOutput or any other class that has the getSample function implemented (e.g. Matrix) #' @param prior schould also prior volume be calculated #' @param method method for volume estimation. Currently, the only option is "MVN" #' @param ... additional parameters to pass on to the \code{\link{getSample}} #' @details The idea of this function is to provide an estimate of the "posterior volume", i.e. how "broad" the posterior is. One potential application is to the overall reduction of parametric uncertainty between different data types, or between prior and posterior. #' #' Implemented methods for volume estimation: #' #' Option "MVN" - in this option, the volume is calculated as the determinant of the covariance matrix of the prior / posterior sample. #' #' @example /inst/examples/getVolume.R #' @export getVolume <- function(sampler, prior = F, method = "MVN", ...){ x = getSample(sampler, ...) if(method == "MVN"){ nPars = ncol(x) postVol = det(cov(x)) }else stop("BayesianTools: unknown method argument in getVolume") if(prior == T){ x = sampler$setup$prior$sampler(5000) if(method == "MVN"){ nPars = ncol(x) priorVol = det(cov(x)) }else stop("BayesianTools: unknown method argument in getVolume") return(list(priorVol = priorVol, postVol = postVol)) }else return(postVol) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/getVolume.R
#' @import graphics #' @import stats #' @import DHARMa #' @import bridgesampling #' @importFrom utils flush.console methods modifyList NULL
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/importPackages.R
# Motivation for this functions from # https://radfordneal.wordpress.com/2008/08/17/the-harmonic-mean-of-the-likelihood-worst-monte-carlo-method-ever/ # https://gist.github.com/gaberoo/4619102 # ' @export #marginalLikelihood <- function(x,lik,V,sampler$setup$likelihood$density,sampler$setup$prior$density,..., num.samples=1000,log=TRUE) UseMethod("marginalLikelihood") #' Calcluated the marginal likelihood from a set of MCMC samples #' @export #' @author Florian Hartig #' @param sampler an MCMC or SMC sampler or list, or for method "Prior" also a BayesianSetup #' @param numSamples number of samples to use. How this works, and if it requires recalculating the likelihood, depends on the method #' @param method method to choose. Currently available are "Chib" (default), the harmonic mean "HM", sampling from the prior "Prior", and bridge sampling "Bridge". See details #' @param ... further arguments passed to \code{\link{getSample}} #' @details The marginal likelihood is the average likelihood across the prior space. It is used, for example, for Bayesian model selection and model averaging. #' #' It is defined as \deqn{ML = \int L(\Theta) p(\Theta) d\Theta} #' #' Given that MLs are calculated for each model, you can get posterior weights (for model selection and/or model averaging) on the model by #' #' \deqn{P(M_i|D) = ML_i * p(M_i) / (\sum_i ML_i * p(M_i) )} #' #' In BT, we return the log ML, so you will have to exp all values for this formula. #' #' It is well-known that the ML is VERY dependent on the prior, and in particular the choice of the width of uninformative priors may have major impacts on the relative weights of the models. It has therefore been suggested to not use the ML for model averaging / selection on uninformative priors. If you have no informative priors, and option is to split the data into two parts, use one part to generate informative priors for the model, and the second part for the model selection. See help for an example. #' #' The marginalLikelihood function currently implements four ways to calculate the marginal likelihood. Be aware that marginal likelihood calculations are notoriously prone to numerical stability issues. Especially in high-dimensional parameter spaces, there is no guarantee that any of the implemented algorithms will converge reasonably fast. The recommended (and default) method is the method "Chib" (Chib and Jeliazkov, 2001), which is based on MCMC samples, with a limited number of additional calculations. Despite being the current recommendation, note there are some numeric issues with this algorithm that may limit reliability for larger dimensions. #' #' The harmonic mean approximation, is implemented only for comparison. Note that the method is numerically unrealiable and usually should not be used. #' #' The third method is simply sampling from the prior. While in principle unbiased, it will only converge for a large number of samples, and is therefore numerically inefficient. #' #' The Bridge method uses bridge sampling as implemented in the R package "bridgesampling". It is potentially more exact than the Chib method, but might require more computation time. However, this may be very dependent on the sampler. #' #' @return A list with log of the marginal likelihood, as well as other diagnostics depending on the chose method #' #' @example /inst/examples/marginalLikelihoodHelp.R #' @references #' #' Chib, Siddhartha, and Ivan Jeliazkov. "Marginal likelihood from the Metropolis-Hastings output." Journal of the American Statistical Association 96.453 (2001): 270-281. #' #' Dormann et al. 2018. Model averaging in ecology: a review of Bayesian, information-theoretic, and tactical approaches for predictive inference. Ecological Monographs #' #' @seealso \code{\link{WAIC}}, \code{\link{DIC}}, \code{\link{MAP}} marginalLikelihood <- function(sampler, numSamples = 1000, method = "Chib", ...){ if ((class(sampler)[1] %in% c("mcmcSamplerList", "smcSamplerList"))) { setup <- sampler[[1]]$setup posterior = sampler[[1]]$setup$posterior$density } else if ((class(sampler)[1] %in% c("mcmcSampler", "smcSampler"))) { setup <- sampler$setup posterior = sampler$setup$posterior$density } else if ((class(sampler)[1] %in% c("BayesianSetup"))) { setup <- sampler posterior = sampler$posterior$density } else stop("sampler must be a sampler or a BayesianSetup") if (method == "Chib"){ chain <- getSample(sampler = sampler, parametersOnly = F, ...) if(class(sampler)[1] %in% c("mcmcSamplerList", "smcSamplerList")) sampler <- sampler[[1]] x <- chain[,1:sampler$setup$numPars,drop=F] lik <- chain[,sampler$setup$numPars + 2] MAPindex <- which.max(chain[,sampler$setup$numPars + 1]) #propGen = createProposalGenerator(covariance = cov(x)) V <- cov(x) # calculate reference parameter theta.star <- x[MAPindex,,drop=F] lik.star <- lik[MAPindex] # get samples from posterior g <- sample.int(nrow(x), numSamples, replace=TRUE) # should replace really be true? q.g <- mvtnorm::dmvnorm(x[g,,drop=F], mean = theta.star, sigma = V, log = FALSE) lik.g <- lik[g] alpha.g <- sapply(lik.g, function(l) min(1, exp(lik.star - l))) # Metropolis Ratio #lik.g <- apply(theta.g,1,sampler$setup$likelihood$density,...) # get samples from proposal theta.j <- mvtnorm::rmvnorm(numSamples, mean = theta.star, sigma = V) lik.j <- apply(theta.j, 1, sampler$setup$likelihood$density) alpha.j <- sapply(lik.j, function(l) min(1, exp(l - lik.star))) # Metropolis Ratio # Prior pi.hat <- mean(alpha.g * q.g) / mean(alpha.j) pi.star <- 0 if (!is.null(sampler$setup$prior$density)) pi.star <- sampler$setup$prior$density(theta.star) ln.m <- lik.star + pi.star - log(pi.hat) out <- list(ln.ML = ln.m, ln.lik.star = lik.star, ln.pi.star = pi.star, ln.pi.hat = log(pi.hat), method = "Chib") } else if (method == "HM"){ warning("The Harmonic Mean estimator is notoriously unstable. It's only implemented for comparison. We strongly advice against using it for research!") chain <- getSample(sampler = sampler, parametersOnly = F, ...) lik <- chain[, setup$numPars + 2] ml <- log(1 / mean(1 / exp(lik))) # ml = 1 / logSumExp(-lik, mean = T) function needs to be adjusted out <- list(ln.ML=ml, method ="HM") } else if (method == "Prior"){ samples <- setup$prior$sampler(numSamples) likelihoods <- setup$likelihood$density(samples) ml <- logSumExp(likelihoods, mean = T) out <- list(ln.ML=ml, method ="Prior") } else if (method == "Bridge") { chain <- getSample(sampler = sampler, parametersOnly = F, numSamples = numSamples, ...) nParams <- setup$numPars lower <- setup$prior$lower upper <- setup$prior$upper out <- list(ln.ML = bridgesample(chain ,nParams, lower, upper, posterior)$logml, method ="Bridge") } else if ("NN") { # TODO: implement nearest neighbour method: # https://arxiv.org/abs/1704.03472 stop("Not yet implemented") } else { stop(paste(c("\"", method, "\" is not a valid method parameter!"), sep = " ", collapse = "")) } return(out) } #' Calculates the marginal likelihood of a chain via bridge sampling #' @export #' @author Tankred Ott #' @param chain a single mcmc chain with samples as rows and parameters and posterior density as columns. #' @param nParams number of parameters #' @param lower optional - lower bounds of the prior #' @param upper optional - upper bounds of the prior #' @param posterior posterior density function #' @param ... arguments passed to bridge_sampler #' @details This function uses "bridge_sampler" from the package "bridgesampling". #' @example /inst/examples/bridgesampleHelp.R #' @seealso \code{\link{marginalLikelihood}} #' @keywords internal bridgesample <- function (chain, nParams, lower = NULL, upper = NULL, posterior, ...) { # TODO: implement this without bridgesampling package # https://github.com/quentingronau/bridgesampling if (is.null(lower)) lower <- rep(-Inf, nParams) if (is.null(upper)) upper <- rep(Inf, nParams) names(lower) <- names(upper) <- colnames(chain[, 1:nParams]) log_posterior = function(x, data){ return(posterior(x)) } out <- bridgesampling::bridge_sampler( samples = chain[, 1:nParams], log_posterior = log_posterior, data = chain, lb = lower, ub = upper, ... ) return(out) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/marginalLikelihood.R
#' Calculate confidence region from an MCMC or similar sample #' @author Florian Hartig #' @param sampleMatrix matrix of outcomes. Could be parameters or predictions #' @param quantiles quantiles to be calculated #' @export #' @seealso \code{\link{getPredictiveDistribution}} \cr #' \code{\link{getPredictiveIntervals}} \cr #' getCredibleIntervals <- function(sampleMatrix, quantiles = c(0.025, 0.975)){ x = matrix (ncol = ncol(sampleMatrix), nrow = length(quantiles)) rownames(x) = quantiles for (i in 1:length(quantiles)){ x[i,] = apply(sampleMatrix,2,function(x)quantile(x,probs=quantiles[i])) } return(x) } #' Calculates predictive distribution based on the parameters #' @author Florian Hartig #' @param parMatrix matrix of parameter values #' @param model model / function to calculate predictions. Outcome should be a vector #' @param numSamples number of samples to be drawn #' @details If numSamples is greater than the number of rows in parMatrix, or NULL, or FALSE, or less than 1 all samples in parMatrix will be used. #' @export #' @seealso \code{\link{getPredictiveIntervals}} \cr #' \code{\link{getCredibleIntervals}} \cr getPredictiveDistribution<-function(parMatrix, model, numSamples = 1000){ # Do thinning if wanted and neccessary if (numSamples != F && nrow(parMatrix) > 2*numSamples && !is.null(numSamples) && numSamples > 0){ sel = round(seq(1,nrow(parMatrix), len = numSamples )) parMatrixSel = parMatrix[sel,] }else{ parMatrixSel = parMatrix } # calculate predictions run1 = model(parMatrixSel[1,]) out = matrix(NA, ncol = length(run1), nrow = nrow(parMatrixSel)) out[1,] = run1 for (i in 2:nrow(parMatrixSel)){ out[i,] = model(parMatrixSel[i,]) } return(out) } #' Calculates Bayesian credible (confidence) and predictive intervals based on parameter sample #' @author Florian Hartig #' @param parMatrix matrix of parameter values #' @param model model / function to calculate predictions. Outcome should be a vector #' @param numSamples number of samples to be drawn #' @param quantiles quantiles to calculate #' @param error function with signature f(mean, par) that generates error expectations from mean model predictions. Par is a vector from the matrix with the parameter samples (full length). f needs to know which of these parameters are parameters of the error function. If supplied, will calculate also predictive intervals additional to credible intervals #' @details If numSamples is greater than the number of rows in parMatrix, or NULL, or FALSE, or less than 1 all samples in parMatrix will be used. #' @export #' @seealso \code{\link{getPredictiveDistribution}} \cr #' \code{\link{getCredibleIntervals}} \cr getPredictiveIntervals<-function(parMatrix, model, numSamples = 1000, quantiles = c(0.025, 0.975), error = NULL){ out = list() # Posterior predictive credible interval pred = getPredictiveDistribution(parMatrix, model = model, numSamples = numSamples) out$posteriorPredictiveCredibleInterval = getCredibleIntervals(sampleMatrix = pred, quantiles = quantiles) # Posterior predictive prediction interval # Posterior predictive simulations if(!is.null(error)){ predDistr = pred for (i in 1:nrow(predDistr)){ predDistr[i,] = error(mean = pred[i,], par = parMatrix[i,]) } predInt = getCredibleIntervals(sampleMatrix = predDistr, quantiles = quantiles) out$posteriorPredictivePredictionInterval = predInt out$posteriorPredictiveSimulations = predDistr } return(out) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/mcmcConfidence.R
#' Differential-Evolution MCMC #' @author Francesco Minunno and Stefan Paul #' @param bayesianSetup a BayesianSetup with the posterior density function to be sampled from #' @param settings list with parameter settings #' @param startValue (optional) eiter a matrix with start population, a number to define the number of chains that are run or a function that samples a starting population. #' @param iterations number of function evaluations. #' @param burnin number of iterations treated as burn-in. These iterations are not recorded in the chain. #' @param thin thinning parameter. Determines the interval in which values are recorded. #' @param f scaling factor gamma #' @param eps small number to avoid singularity #' @param blockUpdate list determining whether parameters should be updated in blocks. For possible settings see Details. #' @param message logical determines whether the sampler's progress should be printed #' @references Braak, Cajo JF Ter. "A Markov Chain Monte Carlo version of the genetic algorithm Differential Evolution: easy Bayesian computing for real parameter spaces." Statistics and Computing 16.3 (2006): 239-249. #' @export #' @example /inst/examples/DEfamilyHelp.R #' @seealso \code{\link{DEzs}} #' @details For blockUpdate the first element in the list determines the type of blocking. #' Possible choices are #' \itemize{ #' \item{"none"}{ (default), no blocking of parameters} #' \item{"correlation"} { blocking based on correlation of parameters. Using h or k (see below)} #' \item{"random"} { random blocking. Using k (see below)} #' \item{"user"} { user defined groups. Using groups (see below)} #' } #' Further seven parameters can be specified. "k" determnined the number of groups, "h" the strength #' of the correlation used to group parameter and "groups" is used for user defined groups. #' "groups" is a vector containing the group number for each parameter. E.g. for three parameters #' with the first two in one group, "groups" would be c(1,1,2). #' Further pSel and pGroup can be used to influence the choice of groups. In the sampling process #' a number of groups is randomly drawn and updated. pSel is a vector containing relative probabilities #' for an update of the respective number of groups. E.g. for always updating only one group pSel = 1. #' For updating one or two groups with the same probability pSel = c(1,1). By default all numbers #' have the same probability. #' The same principle is used in pGroup. Here the user can influence the probability of each group #' to be updated. By default all groups have the same probability. #' Finally "groupStart" defines the starting point of the groupUpdate and "groupIntervall" the intervall #' in which the groups are evaluated. DE <- function(bayesianSetup, settings = list( startValue = NULL, iterations = 10000, f = -2.38, burnin = 0, thin = 1, eps = 0, consoleUpdates = 100, blockUpdate = list("none", k = NULL, h = NULL, pSel = NULL, pGroup = NULL, groupStart = 1000, groupIntervall = 1000), currentChain = 1, message = TRUE ) ){ if("bayesianOutput" %in% class(bayesianSetup)){ restart <- TRUE } else restart <- FALSE if(restart){ if(is.null(settings)) settings <- bayesianSetup$settings else settings <- applySettingsDefault(settings = settings, sampler = "DE") }else{ # If nothing provided use default settings settings <- applySettingsDefault(settings = settings, sampler = "DE") } if(!restart){ setup <- bayesianSetup }else{ setup <- bayesianSetup$setup } setup <- checkBayesianSetup(setup, parallel = settings$parallel) # calling parallel will check if requested parallelization in settings is provided by the BayesianSetup if(is.null(settings$parallel)) settings$parallel = setup$parallel # checking back - if no parallelization is provided, we use the parallelization in the BayesianSetup. We could also set parallel = F, but I feel it makes more sense to use the Bayesiansetup as default if(!restart){ if(is.null(settings$startValue)){ parLen = length(bayesianSetup$prior$sampler(1)) X = bayesianSetup$prior$sampler(3 * parLen) } if(is.function(settings$startValue)){ X = settings$startValue() } if(class(settings$startValue)[1] == "numeric"){ X = bayesianSetup$prior$sampler(settings$startValue) } if(is.matrix(settings$startValue)) X <- settings$startValue }else{ X <- bayesianSetup$X } # X = startValue if (!is.matrix(X)) stop("wrong starting values") FUN = setup$posterior$density ## Initialize blockUpdate parameters and settings blockdefault <- list("none", k = NULL, h = NULL, pSel = NULL, pGroup = NULL, groupStart = 1000, groupIntervall = 1000) if(!is.null(settings$blockUpdate)){ blockUpdate <- modifyList(blockdefault, settings$blockUpdate) blockUpdate[[1]] <- settings$blockUpdate[[1]] # to catch first argument if(blockUpdate[[1]] == "none"){ blockUpdateType <- "none" blocks = FALSE BlockStart = FALSE }else{ groupStart <- blockUpdate$groupStart groupIntervall <- blockUpdate$groupIntervall blockUpdateType = blockUpdate[[1]] blocks = TRUE ## Initialize BlockStart BlockStart = FALSE Bcount = 0 } }else{ blockUpdateType <- "none" blocks = FALSE BlockStart = FALSE } Npar <- ncol(X) Npop <- nrow(X) burnin <- settings$burnin/Npop n.iter <- ceiling(settings$iterations/Npop) if (n.iter < 2) stop ("The total number of iterations must be greater than the number of parameters to fit times 3.") lChain <- ceiling((n.iter - burnin)/settings$thin)+1 #pChain <- array(NA, dim=c(n.iter*Npop, Npar+3)) pChain <- array(NA, dim=c(lChain, Npar+3, Npop)) colnames(pChain) <- c(setup$names, "LP", "LL", "LPr") counter <- 1 iseq <- 1:Npop F2 = abs(settings$f)/sqrt(2*Npar) if (settings$f>0) F1 = F2 else F1 = 0.98 logfitness_X <- FUN(X, returnAll = T) # Write first values in chain pChain[1,,] <- t(cbind(X,logfitness_X)) # Print adjusted iterations # cat("Iterations adjusted to", n.iter*Npop,"to fit settings", "\n") #### eps <- settings$eps currentChain <- settings$currentChain iterations <- settings$iterations for (iter in 2:n.iter) { if (iter%%10) F_cur = F2 else F_cur = F1 if(blocks){ ### Update the groups. if(iter == groupStart+ Bcount*groupIntervall){ blockSettings <- updateGroups(chain = pChain[1:counter,, ], blockUpdate) BlockStart <- TRUE Bcount <- Bcount + 1 } } #### for (i in iseq){ # select to random different individuals (and different from i) in rr, a 2-vector rr <- sample(iseq[-i], 2, replace = FALSE) x_prop <- X[i,] + F_cur * (X[rr[1],]-X[rr[2],]) + eps * rnorm(Npar,0,1) if(BlockStart){ # Get the current group and update the proposal accordingly Member <- getBlock(blockSettings) x_prop[-Member] <- X[i,-Member] #### } logfitness_x_prop <- FUN(x_prop, returnAll = T) if(!is.na(logfitness_x_prop[1] - logfitness_X[i,1])){ # To catch possible error if ((logfitness_x_prop[1] - logfitness_X[i,1] ) > log(runif(1))){ X[i,] <- x_prop logfitness_X[i,] <- logfitness_x_prop } } } #iseq if ((iter > burnin) && (iter %% settings$thin == 0) ) { # retain sample counter <- counter+1 pChain[counter,,] <- t(cbind(X,logfitness_X)) } if(settings$message){ if( (iter %% settings$consoleUpdates == 0) | (iter == n.iter)) cat("\r","Running DE-MCMC, chain ", currentChain, "iteration" ,iter*Npop,"of",n.iter*Npop,". Current logp ", logfitness_X[,1], "Please wait!","\r") flush.console() } } # n.iter iterationsOld <- 0 pChain <- pChain[1:counter,,] if(restart){ # Combine chains newchains <- array(NA, dim = c((counter+nrow(bayesianSetup$chain[[1]])), (Npar+3), Npop)) for(i in 1:Npop){ for(k in 1:(Npar+3)){ newchains[,k,i] <- c(bayesianSetup$chain[[i]][,k],pChain[,k,i]) } } pChain <- newchains } pChain<- coda::as.mcmc.list(lapply(1:Npop,function(i) coda::as.mcmc(pChain[,1:(Npar+3),i]))) list(Draws = pChain, X = as.matrix(X[,1:Npar])) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/mcmcDE.R
#TODO: long-term - consider combinining DE and DE.ZS #' Differential-Evolution MCMC zs #' @author Francesco Minunno and Stefan Paul #' @param bayesianSetup a BayesianSetup with the posterior density function to be sampled from #' @param settings list with parameter settings #' @param startValue (optional) eiter a matrix with start population, a number to define the number of chains that are run or a function that samples a starting population. #' @param Z starting Z population #' @param iterations iterations to run #' @param pSnooker probability of Snooker update #' @param burnin number of iterations treated as burn-in. These iterations are not recorded in the chain. #' @param thin thinning parameter. Determines the interval in which values are recorded. #' @param eps small number to avoid singularity #' @param f scaling factor for gamma #' @param parallel logical, determines weather parallel computing should be attempted (see details) #' @param pGamma1 probability determining the frequency with which the scaling is set to 1 (allows jumps between modes) #' @param eps.mult random term (multiplicative error) #' @param eps.add random term #' @param blockUpdate list determining whether parameters should be updated in blocks. For possible settings see Details. #' @param message logical determines whether the sampler's progress should be printed #' @references ter Braak C. J. F., and Vrugt J. A. (2008). Differential Evolution Markov Chain with snooker updater and fewer chains. Statistics and Computing http://dx.doi.org/10.1007/s11222-008-9104-9 #' @export #' @example /inst/examples/DEfamilyHelp.R #' @seealso \code{\link{DE}} #' @details For parallel computing, the likelihood density in the bayesianSetup needs to be parallelized, i.e. needs to be able to operate on a matrix of proposals #' #' For blockUpdate the first element in the list determines the type of blocking. #' Possible choices are #' \itemize{ #' \item{"none"}{ (default), no blocking of parameters} #' \item{"correlation"} { blocking based on correlation of parameters. Using h or k (see below)} #' \item{"random"} { random blocking. Using k (see below)} #' \item{"user"} { user defined groups. Using groups (see below)} #' } #' Further seven parameters can be specified. "k" determnined the number of groups, "h" the strength #' of the correlation used to group parameter and "groups" is used for user defined groups. #' "groups" is a vector containing the group number for each parameter. E.g. for three parameters #' with the first two in one group, "groups" would be c(1,1,2). #' Further pSel and pGroup can be used to influence the choice of groups. In the sampling process #' a number of groups is randomly drawn and updated. pSel is a vector containing relative probabilities #' for an update of the respective number of groups. E.g. for always updating only one group pSel = 1. #' For updating one or two groups with the same probability pSel = c(1,1). By default all numbers #' have the same probability. #' The same principle is used in pGroup. Here the user can influence the probability of each group #' to be updated. By default all groups have the same probability. #' Finally "groupStart" defines the starting point of the groupUpdate and "groupIntervall" the intervall #' in which the groups are evaluated. DEzs <- function(bayesianSetup, settings = list(iterations=10000, Z = NULL, startValue = NULL, pSnooker = 0.1, burnin = 0, thin = 1, f = 2.38, eps = 0, parallel = NULL, pGamma1 = 0.1, eps.mult =0.2, eps.add = 0, consoleUpdates = 100, zUpdateFrequency = 1, currentChain = 1, blockUpdate = list("none", k = NULL, h = NULL, pSel = NULL, pGroup = NULL, groupStart = 1000, groupIntervall = 1000) ,message = TRUE)) { # X = startValue if("bayesianOutput" %in% class(bayesianSetup)){ restart <- TRUE } else restart <- FALSE if(restart){ if(is.null(settings)) settings <- bayesianSetup$settings else settings <- applySettingsDefault(settings = settings, sampler = "DEzs") }else{ # If nothing provided use default settings settings <- applySettingsDefault(settings = settings, sampler = "DEzs") } if(!restart){ setup <- bayesianSetup } else setup <- bayesianSetup$setup setup <- checkBayesianSetup(setup, parallel = settings$parallel) # calling parallel will check if requested parallelization in settings is provided by the BayesianSetup if(is.null(settings$parallel)) settings$parallel = setup$parallel # checking back - if no parallelization is provided, we use the parallelization in the BayesianSetup. We could also set parallel = F, but I feel it makes more sense to use the Bayesiansetup as default if(!restart){ if(is.null(settings$startValue)){ parLen = length(bayesianSetup$prior$sampler(1)) X = bayesianSetup$prior$sampler(3) } if(is.function(settings$startValue)){ X = settings$startValue() } if(class(settings$startValue)[1] == "numeric"){ X = bayesianSetup$prior$sampler(settings$startValue) } if(is.matrix(settings$startValue)) X <- settings$startValue if(is.null(settings$Z)){ parLen = length(bayesianSetup$prior$sampler(1)) Z = bayesianSetup$prior$sampler(parLen * 10) } if(is.function(settings$Z)){ Z = settings$Z() } if(class(settings$Z)[1] == "numeric"){ Z = bayesianSetup$prior$sampler(settings$Z) } if(is.matrix(settings$Z)) Z <- settings$Z }else{ X <- bayesianSetup$X Z <- bayesianSetup$Z if(is.vector(Z)) Z = as.matrix(Z) } if (! is.matrix(X)) stop("wrong starting values") if (! is.matrix(Z)) stop("wrong Z values") FUN = setup$posterior$density if(is.null(settings$parallel)) parallel = setup$parallel else parallel <- settings$parallel if(parallel == T & setup$parallel == F) stop("parallel = T requested in DEzs but BayesianSetup does not support parallelization. See help of BayesianSetup on how to enable parallelization") ## Initialize blockUpdate parameters and settings blockdefault <- list("none", k = NULL, h = NULL, pSel = NULL, pGroup = NULL, groupStart = 1000, groupIntervall = 1000) if(!is.null(settings$blockUpdate)){ blockUpdate <- modifyList(blockdefault, settings$blockUpdate) blockUpdate[[1]] <- settings$blockUpdate[[1]] # to catch first argument if(blockUpdate[[1]] == "none"){ blockUpdateType <- "none" blocks = FALSE BlockStart = FALSE }else{ groupStart <- blockUpdate$groupStart groupIntervall <- blockUpdate$groupIntervall blockUpdateType = blockUpdate[[1]] blocks = TRUE ## Initialize BlockStart BlockStart = FALSE Bcount = 0 } }else{ blockUpdateType <- "none" blocks = FALSE BlockStart = FALSE } # Initialize parameter values. Because they are called in # the loop this saves time in comparison to referencing them # every iteration using settings$... iterations <- settings$iterations consoleUpdates <- settings$currentChain currentChain <- settings$currentChain pSnooker <- settings$pSnooker zUpdateFrequency <- settings$zUpdateFrequency pGamma1 <- settings$pGamma1 eps.mult <- settings$eps.mult eps.add <- settings$eps.add # Initialization of previous chain length (= 0 if restart = F) lChainOld <- 0 Npar <- ncol(X) Npar12 <- (Npar - 1)/2 # factor for Metropolis ratio DE Snooker update # M0 is initial population size of Z is the size of Z, it's the same number, only kept 2 to stay consistent with the ter Brakk & Vrugt 2008 M = M0 = nrow(Z) Npop <- nrow(X) F2 = settings$f/sqrt(2*Npar) F1 = 1.0 rr = NULL r_extra = 0 #if(burnin != 0) stop("burnin option is currently not implemented") burnin <- settings$burnin/Npop n.iter <- ceiling(settings$iterations/Npop) if (n.iter < 2) stop ("The total number of iterations must be greater than 3") lChain <- ceiling((n.iter - burnin)/settings$thin)+1 pChain <- array(NA, dim=c(lChain, Npar+3, Npop)) colnames(pChain) <- c(setup$names, "LP", "LL", "LPr") # Print adjusted iterations # cat("Iterations adjusted to", n.iter*Npop,"to fit settings", "\n") # assign memory for Z Zold <- Z Z <- matrix(NA, nrow= M0 + floor((n.iter-1) /zUpdateFrequency) * Npop, ncol=Npar) Z[1:M,] <- Zold counter <- 1 counterZ <- 0 # accept.prob <- 0 logfitness_X <- FUN(X, returnAll = T) # Write first values in chain pChain[1,,] <- t(cbind(X,logfitness_X)) for (iter in 2:n.iter) { f <- ifelse(iter%%10 == 0, 0.98, F1) #accept <- 0 if(blocks){ ### Update the groups. if(iter == groupStart+ Bcount*groupIntervall){ blockSettings <- updateGroups(chain = pChain[1:counter,, ], blockUpdate) BlockStart <- TRUE Bcount <- Bcount + 1 } } if(parallel == TRUE | parallel == "external"){ x_prop <- matrix(NA, nrow= Npop, ncol=Npar) r_extra <- numeric(Npop) for(i in 1:Npop){ # select to random different individuals (and different from i) in rr, a 2-vector rr <- sample.int(M, 3, replace = FALSE) if(runif(1) < pSnooker) { z <- Z[rr[3],] x_z <- X[i,] - z D2 <- max(sum(x_z*x_z), 1.0e-300) projdiff <- sum((Z[rr[1],] -Z[rr[2],]) * x_z)/D2 # inner_product of difference with x_z / squared norm x_z gamma_snooker <- runif(1, min=1.2,max=2.2) x_prop[i,] <- X[i,] + gamma_snooker * projdiff * x_z x_z <- x_prop[i,] - z D2prop <- max(sum(x_z*x_z), 1.0e-300) r_extra[i] <- Npar12 * (log(D2prop) - log(D2)) } else { if ( runif(1)< pGamma1 ) { gamma_par = F1 # to be able to jump between modes } else { gamma_par = F2 * runif(Npar, min=1-eps.mult, max=1+eps.mult) # multiplicative error to be applied to the difference # gamma_par = F2 } rr = sample.int(M, 2, replace = FALSE) if (eps.add ==0) { # avoid generating normal random variates if possible x_prop[i,] = X[i,] + gamma_par * (Z[rr[1],]-Z[rr[2],]) } else { x_prop[i,] = X[i,] + gamma_par * (Z[rr[1],]-Z[rr[2],]) + eps.add*rnorm(Npar,0,1) } r_extra = rep(0, Npop) } } # end proposal creation if(BlockStart){ # Get the current group and update the proposal accordingly Member <- getBlock(blockSettings) x_prop[,-Member] <- X[,-Member] #### } # run proposals logfitness_x_prop <- FUN(x_prop, returnAll = T) # evaluate acceptance for(i in 1:Npop){ if(!is.na(logfitness_x_prop[i,1] - logfitness_X[i,1])){ if ((logfitness_x_prop[i,1] - logfitness_X[i,1] + r_extra[i]) > log(runif(1))){ # accept <- accept + 1 X[i,] <- x_prop[i,] logfitness_X[i,] <- logfitness_x_prop[i,] } } } } else{ # if not parallel for (i in 1:Npop){ # select to random different individuals (and different from i) in rr, a 2-vector rr <- sample.int(M, 3, replace = FALSE) if(runif(1) < pSnooker) { z <- Z[rr[3],] x_z <- X[i,] - z D2 <- max(sum(x_z*x_z), 1.0e-300) projdiff <- sum((Z[rr[1],] -Z[rr[2],]) * x_z)/D2 # inner_product of difference with x_z / squared norm x_z gamma_snooker <- runif(1, min=1.2,max=2.2) x_prop <- X[i,] + gamma_snooker * projdiff * x_z x_z <- x_prop - z D2prop <- max(sum(x_z*x_z), 1.0e-300) r_extra <- Npar12 * (log(D2prop) - log(D2)) } else { if ( runif(1)< pGamma1 ) { gamma_par = F1 # to be able to jump between modes } else { gamma_par = F2 * runif(Npar, min=1-eps.mult, max=1+eps.mult) # multiplicative error to be applied to the difference # gamma_par = F2 } rr = sample.int(M, 2, replace = FALSE) if (eps.add ==0) { # avoid generating normal random variates if possible x_prop = X[i,] + gamma_par * (Z[rr[1],]-Z[rr[2],]) } else { x_prop = X[i,] + gamma_par * (Z[rr[1],]-Z[rr[2],]) + eps.add*rnorm(Npar,0,1) } r_extra = 0 } if(BlockStart){ # Get the current group and update the proposal accordingly Member <- getBlock(blockSettings) x_prop[-Member] <- X[i,-Member] #### } # evaluate proposal - can this be mixed with the parallel above? logfitness_x_prop <- FUN(x_prop, returnAll = T) # evaluate acceptance if(!is.na(logfitness_x_prop[1] - logfitness_X[i,1])){ if ((logfitness_x_prop[1] - logfitness_X[i,1] + r_extra) > log(runif(1))){ # accept <- accept + 1 X[i,] <- x_prop logfitness_X[i,] <- logfitness_x_prop } } } # for Npop } if ((iter > burnin) && (iter %% settings$thin == 0) ) { # retain sample counter <- counter+1 pChain[counter,,] <- t(cbind(X,logfitness_X)) } if (iter%%zUpdateFrequency == 0) { # update history Z[( M0 + (counterZ*Npop) + 1 ):( M0 + (counterZ+1)*Npop),] <- X counterZ <- counterZ +1 M <- M + Npop } # Console update if(settings$message){ if( (iter %% settings$consoleUpdates == 0) | (iter == n.iter)) cat("\r","Running DEzs-MCMC, chain ", currentChain, "iteration" ,iter*Npop,"of",n.iter*Npop,". Current logp ", logfitness_X[,1],". Please wait!","\r") flush.console() } } # n.iter pChain <- pChain[1:counter,,] if(restart){ # Combine chains newchains <- array(NA, dim = c((counter+nrow(bayesianSetup$chain[[1]])), (Npar+3), Npop)) for(i in 1:Npop){ for(k in 1:(Npar+3)){ newchains[,k,i] <- c(bayesianSetup$chain[[i]][,k],pChain[,k,i]) } } pChain <- newchains } pChain<- coda::as.mcmc.list(lapply(1:Npop,function(i) coda::as.mcmc(pChain[,1:(Npar+3),i]))) list(Draws = pChain, X = as.matrix(X[,1:Npar]), Z = Z) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/mcmcDEzs.R
### DREAM algorithm #' DREAM #' @author Stefan Paul #' @param bayesianSetup Object of class 'bayesianSetup' or 'bayesianOuput'. #' @param settings list with parameter values #' @param iterations Number of model evaluations #' @param nCR parameter determining the number of cross-over proposals. If nCR = 1 all parameters are updated jointly. #' @param updateInterval determining the intervall for the pCR update #' @param gamma Kurtosis parameter Bayesian Inference Scheme #' @param eps Ergodicity term #' @param e Ergodicity term #' @param pCRupdate If T, crossover probabilities will be updated #' @param burnin number of iterations treated as burn-in. These iterations are not recorded in the chain. #' @param thin thin thinning parameter. Determines the interval in which values are recorded. #' @param adaptation Number or percentage of samples that are used for the adaptation in DREAM (see Details). #' @param DEpairs Number of pairs used to generate proposal #' @param startValue eiter a matrix containing the start values (see details), an integer to define the number of chains that are run, a function to sample the start values or NUll, in which case the values are sampled from the prior. #' @param consoleUpdates Intervall in which the sampling progress is printed to the console #' @param message logical determines whether the sampler's progress should be printed #' @return mcmc.object containing the following elements: chains, X, pCR #' @references Vrugt, Jasper A., et al. "Accelerating Markov chain Monte Carlo simulation by differential evolution with self-adaptive randomized subspace sampling." International Journal of Nonlinear Sciences and Numerical Simulation 10.3 (2009): 273-290. #' @details Insted of a bayesianSetup, the function can take the output of a previous run to restart the sampler #' from the last iteration. Due to the sampler's internal structure you can only use the output #' of DREAM. #' If you provide a matrix with start values the number of rows determines the number of chains that are run. #' The number of coloumns must be equivalent to the number of parameters in your bayesianSetup. \cr\cr #' There are several small differences in the algorithm presented here compared to the original paper by Vrugt et al. (2009). Mainly #' the algorithm implemented here does not have an automatic stopping criterion. Hence, it will #' always run the number of iterations specified by the user. Also, convergence is not #' monitored and left to the user. This can easily be done with coda::gelman.diag(chain). #' Further the proposed delayed rejectio step in Vrugt et al. (2009) is not implemented here.\cr\cr #' #' During the adaptation phase DREAM is running two mechanisms to enhance the sampler's efficiency. #' First the disribution of crossover values is tuned to favor large jumps in the parameter space. #' The crossover probabilities determine how many parameters are updated simultaneously. #' Second outlier chains are replanced as they can largely deteriorate the sampler's performance. #' However, these steps destroy the detailed balance of the chain. Consequently these parts of the chain #' should be discarded when summarizing posterior moments. This can be done automatically during the #' sampling process (i.e. burnin > adaptation) or subsequently by the user. We chose to distinguish between #' the burnin and adaptation phase to allow the user more flexibility in the sampler's settings. #' #' #' @example /inst/examples/DEfamilyHelp.R #' @seealso \code{\link{DREAMzs}} #' @export DREAM <- function(bayesianSetup, settings = list( iterations = 10000, nCR = 3, gamma = NULL, eps = 0, e = 5e-2, pCRupdate = TRUE, updateInterval = 10, burnin = 0, thin = 1, adaptation = 0.2, parallel = NULL, DEpairs = 2, consoleUpdates = 10, startValue = NULL, currentChain = 1, message = TRUE)) { if("bayesianOutput" %in% class(bayesianSetup)){ restart <- TRUE } else restart <- FALSE if(restart){ if(is.null(settings)) settings <- bayesianSetup$settings else settings <- applySettingsDefault(settings = settings, sampler = "DREAM") settings$adaptation <- 0 # set adaptation to 0 if restart because it has already been # applied in chain that is restarted and destroys detailed balance. }else{ # If nothing provided use default settings settings <- applySettingsDefault(settings = settings, sampler = "DREAM") } if(!restart){ setup <- bayesianSetup }else{ setup <- bayesianSetup$setup } setup <- checkBayesianSetup(setup, parallel = settings$parallel) # calling parallel will check if requested parallelization in settings is provided by the BayesianSetup if(is.null(settings$parallel)) settings$parallel = setup$parallel # checking back - if no parallelization is provided, we use the parallelization in the BayesianSetup. We could also set parallel = F, but I feel it makes more sense to use the Bayesiansetup as default if(!restart){ if(is.null(settings$startValue)){ parLen = length(bayesianSetup$prior$sampler(1)) X = bayesianSetup$prior$sampler(max(4,2 * parLen)) } if(is.function(settings$startValue)){ X = settings$startValue() } if(class(settings$startValue)[1] == "numeric"){ X = bayesianSetup$prior$sampler(settings$startValue) } if(is.matrix(settings$startValue)) X <- settings$startValue }else{ X <- bayesianSetup$X } # X = startValue if (!is.matrix(X)) stop("wrong starting values") currentChain = settings$currentChain FUN = setup$posterior$density pCRupdate <- settings$pCRupdate nCR <- settings$nCR Npar <- ncol(X) Npop <- nrow(X) # Check for consistency of DEpairs if(settings$DEpairs > (Npop-2)) stop("DEpairs to large for number of chains") # Set adaptation if percentage is supplied if(settings$adaptation <1) settings$adaptation <- settings$adaptation*settings$iterations # Set number of iterations and initialize chain n.iter <- ceiling(settings$iterations/Npop) if (n.iter < 2) stop ("The total number of iterations must be greater than the number of parameters to fit times 2.") settings$burnin <- settings$burnin/Npop lChain <- ceiling((n.iter - settings$burnin)/settings$thin)+1 pChain <- array(NA, dim=c(lChain, Npar+3, Npop)) colnames(pChain) <- c(setup$names, "LP", "LL", "LPr") # Evaluate start values and write them in the chain logfitness_X <- FUN(X, returnAll = T) pChain[1,,] <- t(cbind(X,logfitness_X)) # Set counter counter <- 1 iseq <- 1:Npop # gamma initialization. However gamma is calculated every iteration (see below). gamma <- 2.38/sqrt(settings$DEpairs*Npar) # delta initialization delta <- rep(0, settings$nCR) funevals <- 0 #### pCR update if(!restart){ pCR = rep(1/nCR, nCR) lCR <- rep(0,nCR) CR <- matrix(1/nCR, nrow = Npop, ncol = settings$updateInterval) }else{ pCR <- bayesianSetup$pCR CR <- generateCRvalues(pCR, settings, Npop) } # helper counter for CR value index counter_update <- 0 ## omega initialization omega <- numeric() ## eps and e eps <- settings$eps e <- settings$e ##################### Start iterations ############################## for(iter in 2:n.iter){ xOld <- X counter_update <- counter_update +1 for(i in 1:Npop){ selectedChains1 <- sample((1:Npop)[-i], settings$DEpairs, replace = FALSE) selectedChains2 <- numeric(settings$DEpairs) # Avoid that selected chains are identical for(k in 1:settings$DEpairs){ selectedChains2[k] <- sample((1:Npop)[-c(i,selectedChains1[k],selectedChains2[1:k]) ],1) } # Get indices of parameters that are updated = indX rn <- runif(Npar) indX <- which(rn>(1-CR[i, counter_update])) # Make sure at least one dimension is updated if(length(indX) == 0) indX <- sample(1:Npar, 1) # First update proposal x_prop <- X[i,] # Calculate gamma based on DEpairs and number of dimensions # that are updated simulateously. # To jump between modes gamma is set to 1 every fifth iteration. if(runif(1)>4/5){ gamma <- 1 }else{ gamma <-2.38/sqrt(settings$DEpairs* length(indX)) } # Replace with new proposal for indX x_prop[indX] <- X[i,indX] + (1+e)*gamma*(apply(as.matrix(X[selectedChains1,indX]),2,sum)- apply(as.matrix(X[selectedChains2,indX]),2,sum)) + eps*rnorm(length(indX),0,1) logfitness_x_prop <- FUN(x_prop, returnAll = T) if(!is.na(logfitness_x_prop[1] - logfitness_X[i,1])){ # To catch possible error if ((logfitness_x_prop[1] - logfitness_X[i,1] ) > log(runif(1))){ X[i,] <- x_prop logfitness_X[i,] <- logfitness_x_prop } } } #Npop ## Write values in chain if((iter > settings$burnin) && (iter %% settings$thin == 0)){ counter <- counter+1 pChain[counter,,] <- t(cbind(X,logfitness_X)) } if(iter < settings$adaptation){ if(pCRupdate){ ## Calculate delta, this is (unlike the update) done every iteration ## Calculate delta ## Calculate standard deviation of each dimension of X sdX <- apply(X[,1:Npar,drop=FALSE],2,sd) ## Compute Euclidean distance between old and new X values delta_Norm <- rowSums(((xOld-X[,1:Npar,drop=FALSE])/sdX)^2) ## Now delta can be calculated for (k in 1:settings$nCR){ # Loop over CR values # Find updated chains ind <- which(abs(CR[,k]-(k/nCR)) < 1e-5) ## Add normalized squared distance to the current delta delta[k] <- delta[k]+sum(delta_Norm[ind]) #delta[k] <- delta[k]+sum(delta_Norm) } } if(iter%%settings$updateInterval == 0){ if(pCRupdate){ # Update CR values tmp <- AdaptpCR(CR, delta, lCR, settings, Npop) pCR <- tmp$pCR lCR <- tmp$lCR ## CR values are generated outside loop because they are calculated # even after adaptation phase. See below! } ## remove outliers ## TODO include if(remOutliers = TRUE) ?? for(out in 1:Npop){ omega[out] <- mean(pChain[((counter/2):counter),Npar+1, out]) } if(NaN %in% omega){ outlierChain <- NULL # Prevent possible error }else{ # Inter quantile range IQR <- quantile(omega, probs = c(0.25, 0.75)) # Determine outlier chains outlierChain <- which(omega< IQR[1] - 2*(IQR[2]-IQR[1])) } # Replace with best chain if(length(outlierChain) > 0){ best <- which.max(pChain[counter,Npar+1,]) pChain[counter,,outlierChain] <- pChain[counter,,best] } # Remove outliers } } if(iter%%settings$updateInterval == 0){ counter_update <- 0 # set counter back to zero CR <- generateCRvalues(pCR, settings, Npop) } if(settings$message){ if( (iter %% settings$consoleUpdates == 0) | (iter == n.iter)) cat("\r","Running DREAM-MCMC, chain ", currentChain, "iteration" ,iter*Npop,"of",n.iter*Npop,". Current logp ", logfitness_X[,1], "Please wait!","\r") flush.console() } } # niter ################ End of iterations ################ iterationsOld <- 0 pChain <- pChain[1:counter,,] if(restart){ # Combine chains newchains <- array(NA, dim = c((counter+nrow(bayesianSetup$chain[[1]])), (Npar+3), Npop)) for(i in 1:Npop){ for(k in 1:(Npar+3)){ newchains[,k,i] <- c(bayesianSetup$chain[[i]][,k],pChain[,k,i]) } } pChain <- newchains } pChain<- coda::as.mcmc.list(lapply(1:Npop,function(i) coda::as.mcmc(pChain[,1:(Npar+3),i]))) return(list(chains = pChain, X = as.matrix(X[,1:Npar]), pCR = pCR)) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/mcmcDREAM.R
##' Generates matrix of CR values based on pCR ##' @param pCR Vector of crossover probabilities. Needs to be of length nCR. ##' @param settings settings list ##' @param Npop number of chains ##' @return Matrix with CR values #' @keywords internal generateCRvalues <- function(pCR,settings, Npop){ # Random vector, add zero to get first position RandomVec <- c(0,cumsum(as.numeric(rmultinom(1, size = Npop*settings$updateInterval, prob = pCR)))) # get candidate points cand <- sample(Npop*settings$updateInterval) CR <- rep(NA, Npop*settings$updateInterval) ## Now loop over chains to generate CR values for(i in 1:settings$nCR){ #Start and End Start <- RandomVec[i]+1 End <- RandomVec[i+1] # get candidates candx <- cand[Start:End] # Assign these indices settings$CR CR[candx] <- i/settings$nCR } ## Reshape CR CR <- matrix(CR,Npop,settings$updateInterval) return(CR) } #' Adapts pCR values #' @param CR Vector of crossover probabilities. Needs to be of length nCR. #' @param settings settings list #' @param delta vector with differences #' @param lCR values to weight delta #' @param Npop number of chains. #' @return Matrix with CR values #' @keywords internal AdaptpCR <- function(CR, delta ,lCR, settings, Npop){ if(any(delta >0)){ ## Adaptions can only be made if there are changes in X # Change CR to vector CR <- c(CR) # Store old lCR values lCROld <- lCR ## Determine lCR lCR <- rep(NA,settings$nCR) for (k in 1:settings$nCR){ ## how many times a CR value is used. This is used to weight delta CR_counter <- length(which(CR==k/settings$nCR)) lCR[k] <- lCROld[k]+ CR_counter } ## Adapt pCR pCR <- Npop * (delta / lCR) / sum(delta) pCR[which(is.nan(pCR))] <- 1/settings$nCR # catch possible error if delta and lCR = 0 ## Normalize values pCR <- pCR/sum(pCR) } return(list(pCR=pCR,lCR=lCR)) } ##AdaptpCR
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/mcmcDREAM_helperFunctions.R
### DREAMzs algorithm #' DREAMzs #' @author Stefan Paul #' @param bayesianSetup Object of class 'bayesianSetup' or 'bayesianOuput'. #' @param settings list with parameter values #' @param iterations Number of model evaluations #' @param nCR parameter determining the number of cross-over proposals. If nCR = 1 all parameters are updated jointly. #' @param updateInterval determining the intervall for the pCR (crossover probabilities) update #' @param gamma Kurtosis parameter Bayesian Inference Scheme. #' @param eps Ergodicity term #' @param e Ergodicity term #' @param pCRupdate Update of crossover probabilities #' @param burnin number of iterations treated as burn-in. These iterations are not recorded in the chain. #' @param thin thin thinning parameter. Determines the interval in which values are recorded. #' @param adaptation Number or percentage of samples that are used for the adaptation in DREAM (see Details) #' @param DEpairs Number of pairs used to generate proposal #' @param ZupdateFrequency frequency to update Z matrix #' @param pSnooker probability of snooker update #' @param Z starting matrix for Z #' @param startValue eiter a matrix containing the start values (see details), an integer to define the number of chains that are run, a function to sample the start values or NUll, in which case the values are sampled from the prior. #' @param consoleUpdates Intervall in which the sampling progress is printed to the console #' @param message logical determines whether the sampler's progress should be printed #' @return mcmc.object containing the following elements: chains, X, pCR, Z #' @references Vrugt, Jasper A., et al. "Accelerating Markov chain Monte Carlo simulation by differential evolution with self-adaptive randomized subspace sampling." International Journal of Nonlinear Sciences and Numerical Simulation 10.3 (2009): 273-290. #' @references ter Braak C. J. F., and Vrugt J. A. (2008). Differential Evolution Markov Chain with snooker updater and fewer chains. Statistics and Computing http://dx.doi.org/10.1007/s11222-008-9104-9 #' @details Insted of a bayesianSetup, the function can take the output of a previous run to restart the sampler #' from the last iteration. Due to the sampler's internal structure you can only use the output #' of DREAMzs. #' If you provide a matrix with start values the number of rows detemines the number of chains that are run. #' The number of coloumns must be equivalent to the number of parameters in your bayesianSetup. \cr\cr #' There are several small differences in the algorithm presented here compared to the original paper by Vrugt et al. (2009). Mainly #' the algorithm implemented here does not have an automatic stopping criterion. Hence, it will #' always run the number of iterations specified by the user. Also, convergence is not #' monitored and left to the user. This can easily be done with coda::gelman.diag(chain). #' Further the proposed delayed rejectio step in Vrugt et al. (2009) is not implemented here.\cr\cr #' During the adaptation phase DREAM is running two mechanisms to enhance the sampler's efficiency. #' First the disribution of crossover values is tuned to favor large jumps in the parameter space. #' The crossover probabilities determine how many parameters are updated simultaneously. #' Second outlier chains are replanced as they can largely deteriorate the sampler's performance. #' However, these steps destroy the detailed balance of the chain. Consequently these parts of the chain #' should be discarded when summarizing posterior moments. This can be done automatically during the #' sampling process (i.e. burnin > adaptation) or subsequently by the user. We chose to distinguish between #' the burnin and adaptation phase to allow the user more flexibility in the sampler's settings. #' @example /inst/examples/DEfamilyHelp.R #' @seealso \code{\link{DREAM}} #' @export DREAMzs <- function(bayesianSetup, settings = list(iterations = 10000, nCR = 3, gamma = NULL, eps = 0, e = 5e-2, pCRupdate = FALSE, updateInterval = 10, burnin = 0, thin = 1, adaptation = 0.2, parallel = NULL, Z = NULL, ZupdateFrequency = 10, pSnooker = 0.1, DEpairs = 2, consoleUpdates = 10, startValue = NULL, currentChain = 1, message = FALSE)) { if("bayesianOutput" %in% class(bayesianSetup)){ restart <- TRUE } else restart <- FALSE if(restart){ if(is.null(settings)) settings <- bayesianSetup$settings else settings <- applySettingsDefault(settings = settings, sampler = "DREAMzs") settings$adaptation <- 0 # set burnIn to 0 if restart because it has already been # applied in chain that is restarted and destroys detailed balance. }else{ # If nothing provided use default settings settings <- applySettingsDefault(settings = settings, sampler = "DREAMzs") } if(!restart){ setup <- bayesianSetup } else setup <- bayesianSetup$setup setup <- checkBayesianSetup(setup, parallel = settings$parallel) # calling parallel will check if requested parallelization in settings is provided by the BayesianSetup if(is.null(settings$parallel)) settings$parallel = setup$parallel # checking back - if no parallelization is provided, we use the parallelization in the BayesianSetup. We could also set parallel = F, but I feel it makes more sense to use the Bayesiansetup as default if(!restart){ if(is.null(settings$startValue)){ parLen = length(bayesianSetup$prior$sampler(1)) X = bayesianSetup$prior$sampler(3) } if(is.function(settings$startValue)){ X = settings$startValue() } if(class(settings$startValue)[1] == "numeric"){ X = bayesianSetup$prior$sampler(settings$startValue) } if(is.matrix(settings$startValue)) X <- settings$startValue if(is.null(settings$Z)){ parLen = length(bayesianSetup$prior$sampler(1)) Z = bayesianSetup$prior$sampler(parLen * 10) } if(is.function(settings$Z)){ Z = settings$Z() } if(class(settings$Z)[1] == "numeric"){ Z = bayesianSetup$prior$sampler(settings$Z) } if(is.matrix(settings$Z)) Z <- settings$Z }else{ X <- bayesianSetup$X Z <- bayesianSetup$Z if(is.vector(Z)) Z = as.matrix(Z) } if (! is.matrix(X)) stop("wrong starting values") if (! is.matrix(Z)) stop("wrong Z values") FUN = setup$posterior$density pCRupdate <- settings$pCRupdate nCR <- settings$nCR Npar <- ncol(X) Npar12 <- (Npar - 1)/2 # factor for Metropolis ratio DE Snooker update parallel <- settings$parallel if(!is.null(parallel)){ if(is.numeric(parallel) | parallel == "external") parallel <- TRUE }else parallel <- FALSE pCRupdate <- settings$pCRupdate nCR <- settings$nCR Npar <- ncol(X) Npop <- nrow(X) # Set adaptation if percentage is supplied if(settings$adaptation <1) settings$adaptation <- settings$adaptation*settings$iterations # Determine number of iterations and initialize chain n.iter <- ceiling(settings$iterations/Npop) if (n.iter < 2) stop ("The total number of iterations must be greater than 3") settings$burnin <- settings$burnin/Npop lChain <- ceiling((n.iter - settings$burnin)/settings$thin)+1 pChain <- array(NA, dim=c(lChain, Npar+3, Npop)) # assign memory for Z and write first values in Z M <- nrow(Z[complete.cases(Z),,drop = FALSE]) Zold <- Z[complete.cases(Z),,drop = FALSE] Z <- matrix(NA, nrow= M + floor((n.iter) /settings$ZupdateFrequency) * Npop, ncol=Npar) Z[1:M,] <- Zold colnames(pChain) <- c(setup$names, "LP", "LL", "LPr") # Evaluate start values and write them in the chain logfitness_X <- FUN(X, returnAll = T) pChain[1,,] <- t(cbind(X,logfitness_X)) # Set counter counter <- 1 iseq <- 1:Npop #### gamma, initialization. However gamma is calculated every iteration (see below). gamma <- 2.38/sqrt(settings$DEpairs*Npar) ## delta initialization delta <- rep(0, settings$nCR) funevals <- 0 #### pCR update # Initialization if(!restart){ pCR = rep(1/nCR, nCR) lCR <- rep(0,nCR) CR <- matrix(1/nCR, nrow = Npop, ncol = settings$updateInterval) }else{ pCR <- bayesianSetup$pCR CR <- generateCRvalues(pCR, settings, Npop) } # helper counter for CR value index counter_update <- 0 ## Omega initialization omega <- numeric() ## eps and e eps <- settings$eps e <- settings$e ##################### Start iterations ############################## for(iter in 2:n.iter){ xOld <- X if(parallel == TRUE){ x_prop <- matrix(NA, nrow= Npop, ncol=Npar) r_extra <- numeric(Npop) for(i in 1:Npop){ if(runif(1)>settings$pSnooker){ selectedChains1 <- sample((1:M), settings$DEpairs, replace = FALSE) selectedChains2 <- numeric(settings$DEpairs) # Avoid that selected chains are identical for(k in 1:settings$DEpairs){ selectedChains2[k] <- sample((1:M)[-c(selectedChains1[k],selectedChains2[1:k]) ],1) } # Get indices of parameters that are updated = indX rn <- runif(Npar) indX <- which(rn>(1-CR[i])) # Make sure at least one dimension is updated if(length(indX) == 0) indX <- sample(1:Npar, 1) # First update proposal x_prop[i,] <- X[i,] # Calculate gamma based on DEpairs and number of dimensions # that are updated simulateously. # To jump between modes gamma is set to 1 every fifth iteration. if(runif(1)>4/5){ gamma <- 1 }else{ gamma <-2.38/sqrt(settings$DEpairs* length(indX)) } # No snooker update # Replace with new proposal for indX x_prop[i,indX] <- X[i,indX] + (1+e)*gamma*(apply(as.matrix(Z[selectedChains1,indX]),2,sum)- apply(as.matrix(Z[selectedChains2,indX]),2,sum)) + eps*rnorm(length(indX),0,1) r_extra[i] <- 0 }else{ # Make proposal using snooker update selectSnooker <- sample((1:M),replace = FALSE, 3) z <- Z[selectSnooker[1],] x_z <- X[i,] - z D2 <- max(sum(x_z*x_z), 1.0e-300) projdiff <- sum((Z[selectSnooker[1],] -Z[selectSnooker[2],]) * x_z)/D2 # inner_product of difference with x_z / squared norm x_z gamma_snooker <- runif(1, min=1.2,max=2.2) x_prop[i,] <- X[i,] + gamma_snooker * projdiff * x_z x_z <- x_prop[i,] - z D2prop <- max(sum(x_z*x_z), 1.0e-300) r_extra[i] <- Npar12 * (log(D2prop) - log(D2)) } } # Npop # run proposals logfitness_x_prop <- FUN(x_prop, returnAll = T) # evaluate acceptance for(i in 1:Npop){ if(!is.na(logfitness_x_prop[i,1] - logfitness_X[i,1])){ if ((logfitness_x_prop[i,1] - logfitness_X[i,1] + r_extra[i]) > log(runif(1))){ # accept <- accept + 1 X[i,] <- x_prop[i,] logfitness_X[i,] <- logfitness_x_prop[i,] } } } }else{ ## If not parallel for(i in 1:Npop){ if(runif(1)>settings$pSnooker){ selectedChains1 <- sample((1:M), settings$DEpairs, replace = FALSE) selectedChains2 <- numeric(settings$DEpairs) # Avoid that selected chains are identical for(k in 1:settings$DEpairs){ selectedChains2[k] <- sample((1:M)[-c(selectedChains1[k],selectedChains2[1:k]) ],1) } # Get indices of parameters that are updated = indX rn <- runif(Npar) indX <- which(rn>(1-CR[i])) # Make sure at least one dimension is updated if(length(indX) == 0) indX <- sample(1:Npar, 1) # First update proposal x_prop <- X[i,] # Calculate gamma based on DEpairs and number of dimensions # that are updated simulateously. # To jump between modes gamma is set to 1 every fifth iteration. if(runif(1)>4/5){ gamma <- 1 }else{ gamma <-2.38/sqrt(settings$DEpairs* length(indX)) } # No snooker update # Replace with new proposal for indX x_prop[indX] <- X[i,indX] + (1+e)*gamma*(apply(as.matrix(Z[selectedChains1,indX]),2,sum)- apply(as.matrix(Z[selectedChains2,indX]),2,sum)) + eps*rnorm(length(indX),0,1) r_extra <- 0 }else{ # Make proposal using snooker update selectSnooker <- sample((1:M),replace = FALSE, 3) z <- Z[selectSnooker[1],] x_z <- X[i,] - z D2 <- max(sum(x_z*x_z), 1.0e-300) projdiff <- sum((Z[selectSnooker[1],] -Z[selectSnooker[2],]) * x_z)/D2 # inner_product of difference with x_z / squared norm x_z gamma_snooker <- runif(1, min=1.2,max=2.2) x_prop <- X[i,] + gamma_snooker * projdiff * x_z x_z <- x_prop - z D2prop <- max(sum(x_z*x_z), 1.0e-300) r_extra <- Npar12 * (log(D2prop) - log(D2)) } logfitness_x_prop <- FUN(x_prop, returnAll = T) if(!is.na(logfitness_x_prop[1] - logfitness_X[i,1])){ # To catch possible error if ((logfitness_x_prop[1] - logfitness_X[i,1] + r_extra) > log(runif(1))){ X[i,] <- x_prop logfitness_X[i,] <- logfitness_x_prop } } } #Npop } # not parallel ## Write values in chain if((iter > settings$burnin) && (iter %% settings$thin == 0)){ counter <- counter+1 pChain[counter,,] <- t(cbind(X,logfitness_X)) } # Update Z if(counter%%settings$ZupdateFrequency == 0){ Z[(M+1):(M+Npop),] <- X M <- M+Npop } ################################### if(iter < settings$adaptation){ if(pCRupdate){ ## Calculate delta, this is (unlike the update) done every iteration ## Calculate delta ## Calculate standard deviation of each dimension of X sdX <- apply(X[,1:Npar,drop=FALSE],2,sd) ## Compute Euclidean distance between old and new X values delta_Norm <- rowSums(((xOld-X[,1:Npar,drop=FALSE])/sdX)^2) ## Now delta can be calculated for (k in 1:settings$nCR){ # Loop over CR values # Find updated chains ind <- which(abs(CR[,k]-(k/nCR)) < 1e-5) ## Add normalized squared distance to the current delta delta[k] <- delta[k]+sum(delta_Norm[ind]) #delta[k] <- delta[k]+sum(delta_Norm) } } if(iter%%settings$updateInterval == 0){ if(pCRupdate){ # Update CR values tmp <- AdaptpCR(CR, delta, lCR, settings, Npop) pCR <- tmp$pCR lCR <- tmp$lCR } ## remove outliers ## TODO include if(remOutliers = TRUE) ?? for(out in 1:Npop){ omega[out] <- mean(pChain[((counter/2):counter),Npar+1, out]) } if(NaN %in% omega){ outlierChain <- NULL # Prevent possible error }else{ # Inter quantile range IQR <- quantile(omega, probs = c(0.25, 0.75)) # Determine outlier chains outlierChain <- which(omega< IQR[1] - 2*(IQR[2]-IQR[1])) } # Replace with best chain if(length(outlierChain) > 0){ best <- which.max(pChain[counter,Npar+1,]) pChain[counter,,outlierChain] <- pChain[counter,,best] } # Remove outliers } } if(iter%%settings$updateInterval == 0){ counter_update <- 0 # set counter back to zero CR <- generateCRvalues(pCR, settings, Npop) } ############################### if(settings$message){ if( (iter %% settings$consoleUpdates == 0) | (iter == n.iter)) cat("\r","Running DREAM-MCMC, chain ", settings$currentChain, "iteration" ,iter*Npop,"of",n.iter*Npop,". Current logp ", logfitness_X[,1], "Please wait!","\r") flush.console() } } # niter iterationsOld <- 0 pChain <- pChain[1:counter,,] if(restart){ # Combine chains newchains <- array(NA, dim = c((counter+nrow(bayesianSetup$chain[[1]])), (Npar+3), Npop)) for(i in 1:Npop){ for(k in 1:(Npar+3)){ newchains[,k,i] <- c(bayesianSetup$chain[[i]][,k],pChain[,k,i]) } } pChain <- newchains } pChain<- coda::as.mcmc.list(lapply(1:Npop,function(i) coda::as.mcmc(pChain[,1:(Npar+3),i]))) list(chains = pChain, X = as.matrix(X[,1:Npar]), Z = Z, pCR = pCR) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/mcmcDREAMzs.R
#' The Metropolis Algorithm #' @author Francesco Minunno #' @description The Metropolis Algorithm (Metropolis et al. 1953) #' @param startValue vector with the start values for the algorithm. Can be NULL if FUN is of class BayesianSetup. In this case startValues are sampled from the prior. #' @param iterations iterations to run #' @param nBI number of burnin #' @param parmin minimum values for the parameter vector or NULL if FUN is of class BayesianSetup #' @param parmax maximum values for the parameter vector or NULL if FUN is of class BayesianSetup #' @param f scaling factor #' @param FUN function to be sampled from or object of class bayesianSetup #' @param consoleUpdates interger, determines the frequency with which sampler progress is printed to the console #' @references Metropolis, Nicholas, et al. "Equation of state calculations by fast computing machines." The journal of chemical physics 21.6 (1953): 1087-1092. #' @keywords internal # #' @export M <- function(startValue = NULL, iterations = 10000, nBI = 0 , parmin = NULL, parmax= NULL, f = 1, FUN, consoleUpdates=1000) { if(inherits(FUN, "BayesianSetup")){ if(FUN$numPars==1) stop("Sampler cannot be started for 1 parameter") if(is.null(startValue)){ startValue <- FUN$prior$sampler() } parmin <- FUN$prior$lower parmax <- FUN$prior$upper FUN <- FUN$posterior$density } pValues = startValue lChain = iterations npar <- length(pValues) logMAP <- -Inf pChain <- matrix(NA_real_, nrow = lChain - nBI, ncol = npar+3) #******************************************************************************** # First call to the model. Calculate likelihood and prior postL0 <- FUN(pValues, returnAll = T) accept.prob <- 0 #******************************************************************************** # Define Variance-covariance matrix (vcovProp) for proposal generation an scalProp <- f * 2.4^2/npar # This f is the scaling factor tuned manually covPar <- scalProp * diag((0.01 * (parmax - parmin))^2) #******************************************************************************** # Build up the chain. Candidates for the parameter values (candidatepValues) # are assumed to stem from a multivariate normal distribution (mvrnorm) with mean # at the current state and covariance given by scalProp*covPar. #----- for (j in 1:lChain) { if (j%%consoleUpdates == 0) print(c(j,postL1[1])) candidatepValues <- mvtnorm::rmvnorm(1, pValues, covPar) # Call the model and calculate the likelihood postL1 <- FUN(candidatepValues, returnAll = T) # Check whether the candidates are accepted. alpha <- min(exp(postL1[1] - postL0[1]), 1) accept <- 0 if (runif(1) < alpha) { postL0 <- postL1 pValues <- candidatepValues accept <- 1 if (postL0[1] > logMAP) { logMAP <- postL0[1] psetMAP <- pValues } } if (j > nBI) { pChain[j-nBI,] <- c(pValues,postL0) accept.prob <- accept.prob + accept } } accept.prob <- accept.prob/(lChain-nBI) list(Draws = pChain, accept.prob = accept.prob,psetMAP=psetMAP) } #' The Adaptive Metropolis Algorithm #' @author Francesco Minunno #' @description The Adaptive Metropolis Algorithm (Haario et al. 2001) #' @param startValue vector with the start values for the algorithm. Can be NULL if FUN is of class BayesianSetup. In this case startValues are sampled from the prior. #' @param iterations iterations to run #' @param nBI number of burnin #' @param parmin minimum values for the parameter vector or NULL if FUN is of class BayesianSetup #' @param parmax maximum values for the parameter vector or NULL if FUN is of class BayesianSetup #' @param f scaling factor #' @param FUN function to be sampled from or object of class bayesianSetup #' @param eps small number to avoid singularity #' @references Haario, Heikki, Eero Saksman, and Johanna Tamminen. "An adaptive Metropolis algorithm." Bernoulli (2001): 223-242. #' @keywords internal # #' @export AM <- function(startValue = NULL, iterations = 10000, nBI = 0, parmin = NULL, parmax = NULL, FUN, f = 1, eps = 0) { if(inherits(FUN, "BayesianSetup")){ if(FUN$numPars==1) stop("Sampler cannot be started for 1 parameter") if(is.null(startValue)){ startValue <- FUN$prior$sampler() } parmin <- FUN$prior$lower parmax <- FUN$prior$upper FUN <- FUN$posterior$density } pValues = startValue lChain = iterations noAdapt <- 1000 n.iter <- lChain + noAdapt npar = length(pValues) pChain <- matrix(NA_real_, nrow = n.iter - nBI, ncol = npar+3) #******************************************************************************** # First call to the model. Calculate likelihood and prior postL0 <- FUN(pValues, returnAll = T) accept.prob <- 0 epsDiag <- eps * diag(npar) scalProp <- f * (2.4^2/npar) covPar <- scalProp * diag((0.01*(parmax - parmin))^2) for (j in 1:n.iter) { candidatepValues <- as.vector(mvtnorm::rmvnorm(1, pValues, covPar)) postL1 <- FUN(candidatepValues, returnAll = T) alpha <- min(exp(postL1[1] - postL0[1]), 1) accept <- 0 if (runif(1) < alpha) { postL0 <- postL1 pValues <- candidatepValues accept <- 1 } if (j > nBI) { pChain[j-nBI,] <- c(pValues, postL0) } if (j == (nBI + noAdapt)) { avePar <- apply(pChain[1:noAdapt,1:npar], 2, mean) covPar <- scalProp * (cov(pChain[1:noAdapt,1:npar], pChain[1:noAdapt,1:npar]) + epsDiag) } if (j > (nBI + noAdapt)) { accept.prob <- accept.prob + accept t <- j - nBI avePar_new <- as.vector(((t-1) * avePar + pValues) / t) covPar_new <- ((t-2) * covPar + scalProp * ((t-1) * (avePar %o% avePar) - t * (avePar_new %o% avePar_new) + (pValues %o% pValues)) + epsDiag) / (t-1) avePar <- avePar_new covPar <- covPar_new } } accept.prob = accept.prob/(lChain-nBI) list(Draws = pChain[(noAdapt+1):(n.iter-nBI),], accept.prob = accept.prob) } #' The Delayed Rejection Algorithm #' @author Francesco Minunno #' @description The Delayed Rejection Algorithm (Tierney and Mira, 1999) #' @param startValue vector with the start values for the algorithm. Can be NULL if FUN is of class BayesianSetup. In this case startValues are sampled from the prior. #' @param iterations iterations to run #' @param nBI number of burnin #' @param parmin minimum values for the parameter vector or NULL if FUN is of class BayesianSetup #' @param parmax maximum values for the parameter vector or NULL if FUN is of class BayesianSetup #' @param f1 scaling factor for first proposal #' @param f2 scaling factor for second proposal #' @param FUN function to be sampled from or object of class bayesianSetup #' @references Tierney, Luke, and Antonietta Mira. "Some adaptive Monte Carlo methods for Bayesian inference." Statistics in medicine 18.1718 (1999): 2507-2515. #' @keywords internal # #' @export DR <- function(startValue = NULL, iterations = 10000, nBI=0, parmin = NULL, parmax =NULL, f1 = 1, f2= 0.5, FUN) { if(inherits(FUN, "BayesianSetup")){ if(FUN$numPars==1) stop("Sampler cannot be started for 1 parameter") if(is.null(startValue)){ startValue <- FUN$prior$sampler() } parmin <- FUN$prior$lower parmax <- FUN$prior$upper FUN <- FUN$posterior$density } pValues = startValue lChain = iterations npar = length(pValues) pChain <- matrix(NA_real_, nrow = lChain - nBI, ncol = npar+3) #******************************************************************************** # First call to the model. Calculate likelihood and prior postL0 <- FUN(pValues, returnAll = T) #******************************************************************************** # Define Variance-covariance matrix (vcovProp) for proposal generation an covPar <- diag((0.01 * (parmax - parmin))^2) sP <- (2.4^2/npar) * c(f1, f2) accept.prob <- 0 for (j in 1:lChain) { candidatepValues <- mvtnorm::rmvnorm(1, pValues, sP[1] * covPar) # Call the model and calculate the likelihood postL1 <- FUN(candidatepValues, returnAll = T) # Check whether the candidates are accepted. If yes and if burn-in has been completed, alpha1 <- min(exp(postL1[1]-postL0[1]), 1.0) accept <- 0 if (runif(1) < alpha1) { pValues <- candidatepValues postL0 = postL1 accept <- 1 } else { candidatepValues2 <- mvtnorm::rmvnorm(1, pValues, sP[2] * covPar) # Call the model and calculate the likelihood postL2 <- FUN(candidatepValues2, returnAll = T) # Check whether the candidates are accepted. alpha2 <- min(exp(postL1[1]-postL2[1]), 1.0) temp <- mvtnorm::dmvnorm(candidatepValues, candidatepValues2, sP[1] * covPar) / mvtnorm::dmvnorm(candidatepValues, pValues, sP[1] * covPar) alpha <- min(exp(postL2[1]-postL0[1]) * temp * ((1.0-alpha2)/(1.0-alpha1)), 1.0) if(is.nan(alpha)) { alpha <- -1 } if (runif(1) < alpha) { pValues <- candidatepValues2 postL0 <- postL2 accept <- 1 } } if (j > nBI) { pChain[j-nBI,] <- c(pValues, postL0) accept.prob <- accept.prob + accept } } accept.prob = accept.prob/(lChain-nBI) list(Draws = pChain, accept.prob = accept.prob) } #' The Delayed Rejection Adaptive Metropolis Algorithm #' @author Francesco Minunno #' @description The Delayed Rejection Adaptive Metropolis Algorithm (Haario et al. 2001) #' @param startValue vector with the start values for the algorithm. Can be NULL if FUN is of class BayesianSetup. In this case startValues are sampled from the prior. #' @param iterations iterations to run #' @param nBI number of burnin #' @param parmin minimum values for the parameter vector or NULL if FUN is of class BayesianSetup #' @param parmax maximum values for the parameter vector or NULL if FUN is of class BayesianSetup #' @param f scaling factor #' @param FUN function to be sampled from #' @param eps small number to avoid singularity or object of class bayesianSetup #' @references Haario, Heikki, Eero Saksman, and Johanna Tamminen. "An adaptive Metropolis algorithm." Bernoulli (2001): 223-242. #' @keywords internal # #' @export DRAM <- function(startValue = NULL, iterations = 10000, nBI = 0, parmin = NULL, parmax = NULL, FUN, f = 1, eps = 0) { if(inherits(FUN, "BayesianSetup")){ if(FUN$numPars==1) stop("Sampler cannot be started for 1 parameter") if(is.null(startValue)){ startValue <- FUN$prior$sampler() } parmin <- FUN$prior$lower parmax <- FUN$prior$upper FUN <- FUN$posterior$density } pValues = startValue lChain = iterations noAdapt <- 1000 n.iter <- lChain + noAdapt npar = length(pValues) pChain <- matrix(NA_real_, nrow = n.iter - nBI, ncol = npar+3) #******************************************************************************** # First call to the model. Calculate likelihood and prior postL0 <- FUN(pValues, returnAll = T) accept.prob <- 0 epsDiag <- eps * diag(npar) scalProp <- f * (2.4^2/npar) covPar <- scalProp * diag((0.01*(parmax - parmin))^2) for (j in 1:n.iter) { candidatepValues <- as.vector(mvtnorm::rmvnorm(1, pValues, covPar)) postL1 <- FUN(candidatepValues, returnAll = T) alpha1 <- min(exp(postL1[1] - postL0[1]), 1) accept <- 0 if (runif(1) < alpha1) { postL0 <- postL1 pValues <- candidatepValues accept <- 1 } else { candidatepValues2 <- as.vector(mvtnorm::rmvnorm(1, pValues, 0.5 * covPar)) # Call the model and calculate the likelihood postL2 <- FUN(candidatepValues2, returnAll = T) # Check whether the candidates are accepted. alpha2 <- min(exp(postL1[1]-postL2[1]), 1.0) temp <- mvtnorm::dmvnorm(candidatepValues, candidatepValues2, covPar) / mvtnorm::dmvnorm(candidatepValues, pValues, covPar) alpha <- min(exp(postL2[1]-postL0[1]) * temp * ((1.0-alpha2)/(1.0-alpha1)), 1.0) if(is.nan(alpha)) { alpha <- -1 } if (runif(1) < alpha) { pValues <- candidatepValues2 postL0 <- postL2 accept <- 1 } } if (j > nBI) { pChain[j-nBI,] <- c(pValues, postL0) } if (j == (nBI + noAdapt)) { avePar <- apply(pChain[1:noAdapt,1:npar], 2, mean) covPar <- scalProp * (cov(pChain[1:noAdapt,1:npar], pChain[1:noAdapt,1:npar]) + epsDiag) } if (j > (nBI + noAdapt)) { accept.prob <- accept.prob + accept t <- j - nBI avePar_new <- as.vector(((t-1) * avePar + pValues) / t) covPar_new <- ((t-2) * covPar + scalProp * ((t-1) * (avePar %o% avePar) - t * (avePar_new %o% avePar_new) + (pValues %o% pValues)) + epsDiag) / (t-1) avePar <- avePar_new covPar <- covPar_new } } accept.prob = accept.prob/(lChain-nBI) list(Draws = pChain[(noAdapt+1):(n.iter-nBI),], accept.prob = accept.prob) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/mcmcFrancesco.R
#' Creates a Metropolis-type MCMC with options for covariance adaptatin, delayed rejection, Metropolis-within-Gibbs, and tempering #' @author Florian Hartig #' @param bayesianSetup either an object of class bayesianSetup created by \code{\link{createBayesianSetup}} (recommended), or a log target function #' @param settings a list of settings - possible options follow below #' @param startValue startValue for the MCMC and optimization (if optimize = T). If not provided, the sampler will attempt to obtain the startValue from the bayesianSetup #' @param optimize logical, determines whether an optimization for start values and proposal function should be run before starting the sampling #' @param proposalGenerator optional proposalgenerator object (see \code{\link{createProposalGenerator}}) #' @param proposalScaling additional scaling parameter for the proposals that controls the different scales of the proposals after delayed rejection (typical, after a rejection, one would want to try a smaller scale). Needs to be as long as DRlevels. Defaults to 0.5^(- 0:(mcmcSampler$settings$DRlevels -1) #' @param burnin number of iterations treated as burn-in. These iterations are not recorded in the chain. #' @param thin thinning parameter. Determines the interval in which values are recorded. #' @param consoleUpdates integer, determines the frequency with which sampler progress is printed to the console #' @param adapt logical, determines wheter an adaptive algorithm should be implemented. Default is TRUE. #' @param adaptationInterval integer, determines the interval of the adaption if adapt = TRUE. #' @param adaptationNotBefore integer, determines the start value for the adaption if adapt = TRUE. #' @param DRlevels integer, determines the number of levels for a delayed rejection sampler. Default is 1, which means no delayed rejection is used. #' @param temperingFunction function to implement simulated tempering in the algorithm. The function describes how the acceptance rate will be influenced in the course of the iterations. #' @param gibbsProbabilities vector that defines the relative probabilities of the number of parameters to be changes simultaniously. #' @param message logical determines whether the sampler's progress should be printed #' @details The 'Metropolis' function is the main function for all Metropolis based samplers in this package. To call the derivatives from the basic Metropolis-Hastings MCMC, you can either use the corresponding function (e.g. \code{\link{AM}} for an adaptive Metropolis sampler) or use the parameters to adapt the basic Metropolis-Hastings. The advantage of the latter case is that you can easily combine different properties (e.g. adapive sampling and delayed rejection sampling) without changing the function. #' @import coda #' @example /inst/examples/MetropolisHelp.R #' @export #' @references Haario, H., E. Saksman, and J. Tamminen (2001). An adaptive metropolis algorithm. Bernoulli , 223-242. #' @references Haario, Heikki, et al. "DRAM: efficient adaptive MCMC." Statistics and Computing 16.4 (2006): 339-354. #' @references Hastings, W. K. (1970). Monte carlo sampling methods using markov chains and their applications. Biometrika 57 (1), 97-109. #' @references Green, Peter J., and Antonietta Mira. "Delayed rejection in reversible jump Metropolis-Hastings." Biometrika (2001): 1035-1053. #' @references Metropolis, N., A. W. Rosenbluth, M. N. Rosenbluth, A. H. Teller, and E. Teller (1953). Equation of state calculations by fast computing machines. The journal of chemical physics 21 (6), 1087 - 1092. Metropolis <- function(bayesianSetup, settings = list(startValue = NULL, optimize = T, proposalGenerator = NULL, consoleUpdates=100, burnin = 0, thin = 1, parallel = NULL, adapt = T, adaptationInterval= 500, adaptationNotBefore = 3000, DRlevels = 1 , proposalScaling = NULL, adaptationDepth = NULL, temperingFunction = NULL, gibbsProbabilities = NULL, message = TRUE )){ ## General setup - this template should be similar for all MCMC algorithms setup <- checkBayesianSetup(bayesianSetup, parallel = settings$parallel) # calling parallel will check if requested parallelization in settings is provided by the BayesianSetup if(is.null(settings$parallel)) settings$parallel = bayesianSetup$parallel # checking back - if no parallelization is provided, we use the parallelization in the BayesianSetup. We could also set parallel = F, but I feel it makes more sense to use the Bayesiansetup as default settings = applySettingsDefault(settings, sampler = "Metropolis") if(is.null(settings$startValue)){ settings$startValue = bayesianSetup$prior$sampler() } if(is.function(settings$startValue)){ settings$startValue = settings$startValue() } ## Parameter consistency checks if(is.null(settings$adaptationDepth)){ settings$adaptationDepth = settings$adaptationNotBefore } # Decreasing scaling for DRAM by default if (is.null(settings$proposalScaling)) settings$proposalScaling = 0.5^(- 0:(settings$DRlevels -1)) tmp <- setupStartProposal(proposalGenerator = settings$proposalGenerator, bayesianSetup = bayesianSetup, settings = settings) settings = tmp$settings proposalGenerator = tmp$proposalGenerator ####### CREATE CHAIN chain = array(dim = c(1,bayesianSetup$numPars+3)) chain[1,1:bayesianSetup$numPars] = settings$startValue colnames(chain) = c(1:bayesianSetup$numPars, "LP", "LL", "LPr") chain[1, (bayesianSetup$numPars+1):(bayesianSetup$numPars+3)] = setup$posterior$density(settings$startValue, returnAll = T) current = settings$startValue currentLP = as.numeric(chain[1, (bayesianSetup$numPars+1)]) ##### Sampling classFields = list( setup = setup, settings = settings, current = current, currentLP = currentLP, chain = chain, proposalGenerator = proposalGenerator, funEvals = 0, acceptanceRate = 0 ) class(classFields) <- c("mcmcSampler", "bayesianOutput") return(classFields) } #' gets samples while adopting the MCMC proposal generator #' @author Florian Hartig #' @param mcmcSampler an mcmcSampler #' @param iterations iterations #' @description Function to sample with cobinations of the basic Metropolis-Hastings MCMC algorithm (Metropolis et al., 1953), a variation of the adaptive Metropolis MCMC (Haario et al., 2001), the delayed rejection algorithm (Tierney & Mira, 1999), and the delayed rejection adaptive Metropolis algorithm (DRAM, Haario et al), and the Metropolis within Gibbs #' @export #' @keywords internal sampleMetropolis <- function(mcmcSampler, iterations){ burnin <- mcmcSampler$settings$burnin thin <- mcmcSampler$settings$thin CounterFunEvals = mcmcSampler$funEvals CounterAccept = nrow(mcmcSampler$chain)*mcmcSampler$acceptanceRate if (mcmcSampler$settings$DRlevels > 2) stop("DRlevels > 2 currently not implemented") # Increase chain lastvalue = nrow(mcmcSampler$chain) mcmcSampler$chain = rbind(mcmcSampler$chain, array(dim = c(floor((iterations-burnin)/thin),mcmcSampler$setup$numPars+3))) alpha = rep(NA, mcmcSampler$settings$DRlevels) proposalEval = matrix( nrow = mcmcSampler$settings$DRlevels, ncol = 3) proposal = matrix( nrow = mcmcSampler$settings$DRlevels, ncol = mcmcSampler$setup$numPars) # Initialize counter for chain update counter <- lastvalue for (i in lastvalue:(lastvalue+iterations-1)){ accepted = F if(is.null(mcmcSampler$settings$temperingFunction)) tempering = 1 else tempering = mcmcSampler$settings$temperingFunction(i) if(tempering < 1) warning("Tempering option < 1. This usually doesn't make sense!") for (j in 1:mcmcSampler$settings$DRlevels){ proposal[j,] = mcmcSampler$proposalGenerator$returnProposal(x = mcmcSampler$current, scale = mcmcSampler$settings$proposalScaling[j]) proposalEval[j,] <- mcmcSampler$setup$posterior$density(proposal[j,], returnAll = T) CounterFunEvals <- CounterFunEvals+1 # case j = 1 (normal MH-MCMC) if (j == 1){ alpha[j] = metropolisRatio(proposalEval[j,1], mcmcSampler$currentLP, tempering) jumpProbab = alpha[1] # case j = 2 (first delayed rejection) } else if (j == 2 & alpha[j-1] > 0 ){ alpha[j] = metropolisRatio(proposalEval[j,1], proposalEval[j-1,1], tempering) temp <- metropolisRatio(mcmcSampler$proposalGenerator$returnDensity(proposal[1,], proposal[2,]), mcmcSampler$proposalGenerator$returnDensity(mcmcSampler$current, proposal[1,])) jumpProbab = metropolisRatio(proposalEval[j,1], mcmcSampler$currentLP, tempering) * temp * (1.0-alpha[j]) / (1.0-alpha[j-1]) } if (runif(1) < jumpProbab){ accepted = T mcmcSampler$current = proposal[j,] mcmcSampler$currentLP = proposalEval[j,1] if((i > (lastvalue+burnin)) && (i %% thin == 0) ){ counter <- counter+1 mcmcSampler$chain[counter,] = c(proposal[j,], proposalEval[j,]) } break } } if((accepted == F) && (i > (lastvalue+burnin)) && (i %% thin == 0)){ counter <- counter +1 mcmcSampler$chain[counter,] = mcmcSampler$chain[counter-1,] } if(accepted == T) CounterAccept <- CounterAccept+1 # Proposal update if(mcmcSampler$settings$adapt == T & i > mcmcSampler$settings$adaptationNotBefore & i %% mcmcSampler$settings$adaptationInterval == 0 ){ start = max(1, counter - mcmcSampler$settings$adaptationDepth) mcmcSampler$proposalGenerator = updateProposalGenerator(proposal = mcmcSampler$proposalGenerator, chain = mcmcSampler$chain[start:counter,1:mcmcSampler$setup$numPars], message = F) } # Console update if(mcmcSampler$settings$message){ if( i %% mcmcSampler$settings$consoleUpdates == 0 ) cat("\r","Running Metropolis-MCMC, chain ", mcmcSampler$settings$currentChain, "iteration" ,i,"of",iterations, ". Current logp: ", mcmcSampler$chain[counter,mcmcSampler$setup$numPars+1]," Please wait!","\r") flush.console() } } # Make sure chain has right size TODO - why is this neccessary mcmcSampler$chain <- mcmcSampler$chain[1:counter,] mcmcSampler$funEvals <- CounterFunEvals mcmcSampler$acceptanceRate <- CounterAccept/CounterFunEvals return(mcmcSampler) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/mcmcMetropolis.R
#' Run multiple chains #' @param bayesianSetup Object of class "BayesianSetup" #' @param settings list with settings for sampler #' @param sampler character, either "Metropolis" or "DE" #' @return list containing the single runs ($sampler) and the chains in a coda::mcmc.list ($mcmc.list) #' @keywords internal mcmcMultipleChains <- function(bayesianSetup, settings, sampler) { # Get number of chains nrChains <- settings$nrChains # Set settings$nrChains to one to avoid infinite loop settings$nrChains <- 1 # Initialize output out <- list() out$sampler <- list() # Run sampler for (i in 1:nrChains) { out$sampler[[i]] <- runMCMC(bayesianSetup, sampler = sampler, settings = settings) } # Make coda::mcmc.list object for (i in 1:nrChains) { txtemp <- paste("coda::mcmc(out$sampler[[", i, "]]$chain)", sep = "") if (i == 1) tx = txtemp else tx <- paste(tx, txtemp, sep = ", ") } tx <- paste("coda::mcmc.list(", tx, ")", sep = "") out$mcmc.list <- eval(parse(text = tx)) return(out) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/mcmcMultipleChains.R
#' Main wrapper function to start MCMCs, particle MCMCs and SMCs #' @author Florian Hartig #' @param bayesianSetup either a BayesianSetup (see \code{\link{createBayesianSetup}}), a function, or a BayesianOutput created by runMCMC. The latter allows to continue a previous MCMC run. See details for how to restart a sampler. #' @param sampler sampling algorithm to be run. Default is DEzs. Options are "Metropolis", "AM", "DR", "DRAM", "DE", "DEzs", "DREAM", "DREAMzs", "SMC". For details see the help of the individual functions. #' @param settings list with settings for each sampler. If a setting is not provided, defaults (see \code{\link{applySettingsDefault}}) will be used. #' @details The runMCMC function can be started with either one of #' #' 1. an object of class BayesianSetup with prior and likelihood function (created with \code{\link{createBayesianSetup}}). check if appropriate parallelization options are used - many samplers can make use of parallelization if this option is activated when the class is created. #' 2. a log posterior or other target function, #' 3. an object of class BayesianOutput created by runMCMC. The latter allows to continue a previous MCMC run. #' #' Settings for the sampler are provides as a list. You can see the default values by running \code{\link{applySettingsDefault}} with the respective sampler name. The following settings can be used for all MCMCs: #' #' * startValue (no default) start values for the MCMC. Note that DE family samplers require a matrix of start values. If startvalues are not provided, they are sampled from the prior. #' * iterations (10000) the MCMC iterations #' * burnin (0) burnin #' * thin (1) thinning while sampling #' * consoleUpdates (100) update frequency for console updates #' * parallel (NULL) whether parallelization is to be used #' * message (TRUE) if progress messages are to be printed #' * nrChains (1) the number of independent MCMC chains to be run. Note that this is not controlling the internal number of chains in population MCMCs such as DE, so if you run nrChains = 3 with a DEzs startValue that is a 4xparameter matrix (= 4 internal chains), you will run independent DEzs runs with 4 internal chains each. #' #' The MCMC samplers will have a number of additional settings, which are described in the Vignette (run vignette("BayesianTools", package="BayesianTools") and in the help of the samplers. See \code{\link{Metropolis}} for Metropolis based samplers, \code{\link{DE}} and \code{\link{DEzs}} for standard differential evolution samplers, \code{\link{DREAM}} and \code{\link{DREAMzs}} for DREAM sampler, \code{\link{Twalk}} for the Twalk sampler, and \code{\link{smcSampler}} for rejection and Sequential Monte Carlo sampling. Note that the samplers "AM", "DR", and "DRAM" are special cases of the "Metropolis" sampler and are shortcuts for predefined settings ("AM": adapt=TRUE; "DR": DRlevels=2; "DRAM": adapt=True, DRlevels=2). #' #' Note that even if you specify parallel = T, this will only turn on internal parallelization of the samplers. The independent samplers controlled by nrChains are not evaluated in parallel, so if time is an issue it will be better to run the MCMCs individually and then combine them via \code{\link{createMcmcSamplerList}} into one joint object. #' #' Note that DE and DREAM variants as well as SMC and T-walk require a population to start, which should be provided as a matrix. Default (NULL) sets the population size for DE to 3 x dimensions of parameters, for DREAM to 2 x dimensions of parameters and for DEzs and DREAMzs to three, sampled from the prior. Note also that the zs variants of DE and DREAM require two populations, the current population and the z matrix (a kind of memory) - if you want to set both, provide a list with startvalue$X and startvalue$Z. #' #' setting startValue for sampling with nrChains > 1 : if you want to provide different start values for the different chains, provide them as a list #' #' @return The function returns an object of class mcmcSampler (if one chain is run) or mcmcSamplerList. Both have the superclass bayesianOutput. It is possible to extract the samples as a coda object or matrix with \code{\link{getSample}}. #' It is also possible to summarize the posterior as a new prior via \code{\link{createPriorDensity}}. #' @example /inst/examples/mcmcRun.R #' @seealso \code{\link{createBayesianSetup}} #' @export runMCMC <- function(bayesianSetup , sampler = "DEzs", settings = NULL){ options(warn = 0) ptm <- proc.time() ####### RESTART ########## if("bayesianOutput" %in% class(bayesianSetup)){ # TODO - the next statements should have assertions in case someone overwrites the existing setting or similar previousMcmcSampler <- bayesianSetup # Catch the settings in case of nrChains > 1 if(!("mcmcSamplerList" %in% class(previousMcmcSampler) | "smcSamplerList" %in% class(previousMcmcSampler) )){ if(is.null(settings)) settings <- previousMcmcSampler$settings setup <- previousMcmcSampler$setup sampler <- previousMcmcSampler$settings$sampler previousSettings <- previousMcmcSampler$settings } else{ if(is.null(settings)) settings <- previousMcmcSampler[[1]]$settings settings$nrChains <- length(previousMcmcSampler) setup <- previousMcmcSampler[[1]]$setup sampler <- previousMcmcSampler[[1]]$settings$sampler previousSettings <- previousMcmcSampler[[1]]$settings } # Set settings$sampler (only needed if new settings are supplied) settings$sampler <- sampler # overwrite new settings for(name in names(settings)) previousSettings[[name]] <- settings[[name]] settings <- previousSettings # Check if previous settings will be new default previousMcmcSampler$settings <- applySettingsDefault(settings = settings, sampler = settings$sampler, check = TRUE) restart <- TRUE ## NOT RESTART STARTS HERE ################### }else if(inherits(bayesianSetup, "BayesianSetup")){ restart <- FALSE if(is.null(settings$parallel)) settings$parallel <- bayesianSetup$parallel if(is.numeric(settings$parallel)) settings$parallel <- TRUE setup <- checkBayesianSetup(bayesianSetup, parallel = settings$parallel) settings <- applySettingsDefault(settings = settings, sampler = sampler, check = TRUE) } else stop("runMCMC requires a class of type BayesianOutput or BayesianSetup") ###### END RESTART ############## # TODO - the following statement should be removed once all further functions access settings$sampler instead of sampler # At the moment only the same sampler can be used to restart sampling. sampler = settings$sampler #### Assertions if(!restart && setup$numPars == 1) if(!getPossibleSamplerTypes()$univariate[which(getPossibleSamplerTypes()$BTname == settings$sampler)]) stop("This sampler can not be applied to a univariate distribution") if(restart == T) if(!getPossibleSamplerTypes()$restartable[which(getPossibleSamplerTypes()$BTname == settings$sampler)]) stop("This sampler can not be restarted") ########### Recursive call in case multiple chains are to be run if(settings$nrChains >1){ # Initialize output list out<- list() # Run several samplers for(i in 1:settings$nrChains){ settingsTemp <- settings settingsTemp$nrChains <- 1 # avoid infinite loop settingsTemp$currentChain <- i if(restart){ out[[i]] <- runMCMC(bayesianSetup = previousMcmcSampler[[i]], settings = settingsTemp) }else{ if(is.list(settings$startValue)) settingsTemp$startValue = settings$startValue[[i]] out[[i]] <- runMCMC(bayesianSetup = setup, sampler = settings$sampler, settings = settingsTemp) } } if(settings$sampler == "SMC") class(out) = c("smcSamplerList", "bayesianOutput") else class(out) = c("mcmcSamplerList", "bayesianOutput") return(out) ######### END RECURSIVE CALL # MAIN RUN FUNCTION HERE }else{ # check start values setup$prior$checkStart(settings$startValue) if (sampler == "Metropolis" || sampler == "AM" || sampler == "DR" || sampler == "DRAM"){ if(restart == FALSE){ mcmcSampler <- Metropolis(bayesianSetup = setup, settings = settings) mcmcSampler <- sampleMetropolis(mcmcSampler = mcmcSampler, iterations = settings$iterations) } else { mcmcSampler <- sampleMetropolis(mcmcSampler = previousMcmcSampler, iterations = settings$iterations) } } ############## Differential Evolution ##################### if (sampler == "DE"){ if(restart == F) out <- DE(bayesianSetup = setup, settings = settings) else out <- DE(bayesianSetup = previousMcmcSampler, settings = settings) #out <- DE(bayesianSetup = bayesianSetup, settings = list(startValue = NULL, iterations = settings$iterations, burnin = settings$burnin, eps = settings$eps, parallel = settings$parallel, consoleUpdates = settings$consoleUpdates, # blockUpdate = settings$blockUpdate, currentChain = settings$currentChain)) mcmcSampler = list( setup = setup, settings = settings, chain = out$Draws, X = out$X, sampler = "DE" ) } ############## Differential Evolution with snooker update if (sampler == "DEzs"){ # check z matrix if(!is.null(settings$Z)) setup$prior$checkStart(settings$Z,z = TRUE) if(restart == F) out <- DEzs(bayesianSetup = setup, settings = settings) else out <- DEzs(bayesianSetup = previousMcmcSampler, settings = settings) mcmcSampler = list( setup = setup, settings = settings, chain = out$Draws, X = out$X, Z = out$Z, sampler = "DEzs" ) } ############## DREAM if (sampler == "DREAM"){ if(restart == F) out <- DREAM(bayesianSetup = setup, settings = settings) else out <- DREAM(bayesianSetup = previousMcmcSampler, settings = settings) mcmcSampler = list( setup = setup, settings = settings, chain = out$chains, pCR = out$pCR, sampler = "DREAM", lCR = out$lCR, X = out$X, delta = out$delta ) } ############## DREAMzs if (sampler == "DREAMzs"){ # check z matrix if(!is.null(settings$Z)) setup$prior$checkStart(settings$Z,z = TRUE) if(restart == F) out <- DREAMzs(bayesianSetup = setup, settings = settings) else out <- DREAMzs(bayesianSetup = previousMcmcSampler, settings = settings) mcmcSampler = list( setup = setup, settings = settings, chain = out$chains, pCR = out$pCR, sampler = "DREAMzs", JumpRates = out$JumpRates, X = out$X, Z = out$Z ) } if(sampler == "Twalk"){ warning("At the moment using T-walk is discouraged: numeric instability") if(!restart){ if(is.null(settings$startValue)){ settings$startValue = bayesianSetup$prior$sampler(2) } mcmcSampler <- Twalk(bayesianSetup = setup, settings = settings) }else{ mcmcSampler <- Twalk(bayesianSetup = previousMcmcSampler, settings = settings) } mcmcSampler$setup <- setup mcmcSampler$sampler <- "Twalk" } if ((sampler != "SMC")){ class(mcmcSampler) <- c("mcmcSampler", "bayesianOutput") } ############# SMC ##################### if (sampler == "SMC"){ mcmcSampler <- smcSampler(bayesianSetup = bayesianSetup, initialParticles = settings$initialParticles, iterations = settings$iterations, resampling = settings$resampling, resamplingSteps = settings$resamplingSteps, proposal = settings$proposal, adaptive = settings$adaptive, proposalScale = settings$proposalScale ) mcmcSampler$settings = settings } mcmcSampler$settings$runtime = mcmcSampler$settings$runtime + proc.time() - ptm if(is.null(settings$message) || settings$message == TRUE){ message("runMCMC terminated after ", mcmcSampler$settings$runtime[3], "seconds") } return(mcmcSampler) } } #bayesianSetup = bayesianSetup, initialParticles = settings$initialParticles, iterations = settings$iterations, resampling = settings$resampling, resamplingSteps = settings$resamplingSteps, proposal = settings$proposal, adaptive = settings$adaptive, parallel = settings$parallel #' Provides the default settings for the different samplers in runMCMC #' @author Florian Hartig #' @param settings optional list with parameters that will be used instead of the defaults #' @param sampler one of the samplers in \code{\link{runMCMC}} #' @param check logical determines whether parameters should be checked for consistency #' @details see \code{\link{runMCMC}} #' @export applySettingsDefault<-function(settings=NULL, sampler = "DEzs", check = FALSE){ if(is.null(settings)) settings = list() if(!is.null(sampler)){ if(!is.null(settings$sampler)) { # TODO: this is a bit hacky. The best would prabably be to change the Metropolis function to allow AM, DR and DRAM # arguments and call applySettingsDefault for those if (settings$sampler %in% c("AM", "DR", "DRAM") && sampler == "Metropolis") { sampler <- settings$sampler } if(settings$sampler != sampler) { warning("sampler argument overwrites an existing settings$sampler in applySettingsDefault. This only makes sense if one wants to take over settings from one sampler to another") } } settings$sampler = sampler } if(!settings$sampler %in% getPossibleSamplerTypes()$BTname) stop("trying to set values for a sampler that does not exist") mcmcDefaults <- list(startValue = NULL, iterations = 10000, burnin = 0, thin = 1, consoleUpdates = 100, parallel = NULL, message = TRUE) #### Metropolis #### if(settings$sampler %in% c("AM", "DR", "DRAM", "Metropolis")){ defaultSettings <- c(mcmcDefaults, list(optimize = T, proposalGenerator = NULL, adapt = F, adaptationInterval = 500, adaptationNotBefore = 3000, DRlevels = 1 , proposalScaling = NULL, adaptationDepth = NULL, temperingFunction = NULL, proposalGenerator = NULL, gibbsProbabilities = NULL)) if (settings$sampler %in% c("AM", "DRAM")) defaultSettings$adapt <- TRUE if (settings$sampler %in% c("DR", "DRAM")) defaultSettings$DRlevels <- 2 } #### DE Family #### if(settings$sampler %in% c("DE", "DEzs")){ defaultSettings <- c(mcmcDefaults, list(eps = 0, currentChain = 1, blockUpdate = list("none", k = NULL, h = NULL, pSel = NULL, pGroup = NULL, groupStart = 1000, groupIntervall = 1000) )) if (settings$sampler == "DE"){ defaultSettings$f <- -2.38 # TODO CHECK } if (settings$sampler == "DEzs"){ defaultSettings$f <- 2.38 defaultSettings <- c(defaultSettings, list(Z = NULL, zUpdateFrequency = 1, pSnooker = 0.1, pGamma1 = 0.1, eps.mult =0.2, eps.add = 0)) } } #### DREAM Family #### if(settings$sampler %in% c("DREAM", "DREAMzs")){ defaultSettings <- c(mcmcDefaults, list(nCR = 3, currentChain = 1, gamma = NULL, eps = 0, e = 5e-2, DEpairs = 2, adaptation = 0.2, updateInterval = 10)) if (settings$sampler == "DREAM"){ defaultSettings$pCRupdate <- TRUE } if (settings$sampler == "DREAMzs"){ defaultSettings = c(defaultSettings, list( pCRupdate = FALSE, Z = NULL, ZupdateFrequency = 10, pSnooker = 0.1 )) } } #### Twalk #### if (settings$sampler == "Twalk"){ defaultSettings = c(mcmcDefaults, list(at = 6, aw = 1.5, pn1 = NULL, Ptrav = 0.4918, Pwalk = NULL, Pblow = NULL)) defaultSettings$parallel = NULL } #### SMC #### if (settings$sampler == "SMC"){ defaultSettings = list(iterations = 10, resampling = T, resamplingSteps = 2, proposal = NULL, adaptive = T, proposalScale = 0.5, initialParticles = 1000 ) } ## CHECK DEFAULTS if(check){ nam = c(names(defaultSettings), "sampler", "nrChains", "runtime", "sessionInfo", "parallel") ind <- which((names(settings) %in% nam == FALSE)) nam_n <- names(settings)[ind] for(i in 1:length(nam_n)) nam_n[i] <- paste(nam_n[i], " ") if(length(ind) > 0){ message("Parameter(s) ", nam_n , " not used in ", settings$sampler, "\n") } } defaultSettings$nrChains = 1 defaultSettings$runtime = 0 defaultSettings$sessionInfo = utils::sessionInfo() nam = names(defaultSettings) for (i in 1:length(defaultSettings)){ if(! nam[i] %in% names(settings)){ addition = list( defaultSettings[[i]]) names(addition) = nam[i] settings = c(settings, addition) } } if (! is.null(settings$burnin)){ if (settings$burnin > settings$iterations) stop("BayesianToools::applySettingsDefault - setting burnin cannnot be larger than setting iteration") if (! is.null(settings$adaptationNotBefore)){ if (settings$burnin >= settings$adaptationNotBefore) stop("BayesianToools::applySettingsDefault - setting burnin cannnot be larger than setting adaptationNotBefore") } } return(settings) } #' Help function to find starvalues and proposalGenerator settings #' @author Florian Hartig #' @param proposalGenerator proposal generator #' @param bayesianSetup either an object of class bayesianSetup created by \code{\link{createBayesianSetup}} (recommended), or a log target function #' @param settings list with settings #' @keywords internal setupStartProposal <- function(proposalGenerator = NULL, bayesianSetup, settings){ # Proposal range = (bayesianSetup$prior$upper - bayesianSetup$prior$lower) / 50 if(is.null(settings$startValue)) settings$startValue = (bayesianSetup$prior$upper + bayesianSetup$prior$lower) / 2 if (length(range) != bayesianSetup$numPars) range = rep(1,bayesianSetup$numPars) if(is.null(proposalGenerator)){ proposalGenerator = createProposalGenerator(range, gibbsProbabilities = settings$gibbsProbabilities) } ####### OPTIMIZATION if (settings$optimize == T){ if(is.null(settings$message) || settings$message == TRUE){ cat("BT runMCMC: trying to find optimal start and covariance values", "\b") } target <- function(x){ out <- bayesianSetup$posterior$density(x) if (out == -Inf) out = -1e20 # rnorm(1, mean = -1e20, sd = 1e-20) return(out) } try( { if(bayesianSetup$numPars > 1) optresul <- optim(par=settings$startValue,fn=target, method="Nelder-Mead", hessian=F, control=list("fnscale"=-1, "maxit" = 10000)) else optresul <- optim(par=settings$startValue,fn=target, method="Brent", hessian=F, control=list("fnscale"=-1, "maxit" = 10000), lower = bayesianSetup$prior$lower, upper = bayesianSetup$prior$upper) settings$startValue = optresul$par hessian = numDeriv::hessian(target, optresul$par) proposalGenerator$covariance = as.matrix(Matrix::nearPD(MASS::ginv(-hessian))$mat) #proposalGenerator$covariance = MASS::ginv(-optresul$hessian) # Create objects for startValues and covariance to add space between values startV <-covV <- character() for(i in 1:length(settings$startValue)){ startV[i] <- paste(settings$startValue[i], "") } for(i in 1:length( proposalGenerator$covariance)){ covV[i] <- paste( proposalGenerator$covariance[i], "") } if(is.null(settings$message) || settings$message == TRUE){ message("BT runMCMC: Optimization finished, setting startValues to " , startV, " - Setting covariance to " , covV) } proposalGenerator = updateProposalGenerator(proposalGenerator) } , silent = FALSE) } out = list(proposalGenerator = proposalGenerator, settings = settings) return(out) } #' Returns possible sampler types #' @export #' @author Florian Hartig getPossibleSamplerTypes <- function(){ out = list( BTname = c("AM", "DR", "DRAM", "Metropolis", "DE", "DEzs", "DREAM", "DREAMzs", "Twalk", "SMC"), possibleSettings = list() , possibleSettingsName = list() , univariatePossible = c(T, T, T, T, T, T, T, T, T, F), restartable = c(T, T, T, T, T, T, T, T, T, F) ) return(out) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/mcmcRun.R
# # ############### MCMC RESULT ############################# # # # #' runs an automatic MCMC # #' # #' @export # automaticMCMC <- function(likelihood, prior = NULL, startvalues, maxiter=20000, steplength = 1000, optimize = T){ # # mcmc1 <- mcmcSampler(likelihood = likelihood, prior = prior, startvalue = startvalues[[1]], optimize = optimize) # mcmc2 <- mcmcSampler(likelihood = likelihood, prior = prior, startvalue = startvalues[[2]], optimize = optimize) # mcmc3 <- mcmcSampler(likelihood = likelihood, prior = prior, startvalue = startvalues[[3]], optimize = optimize) # # steps = 0 # conv = F # convTemp = F # while(nrow(mcmc1$chain) < maxiter){ # mcmc1<- getSamples(mcmc1, steplength) # mcmc2<- getSamples(mcmc2, steplength) # mcmc3<- getSamples(mcmc3, steplength) # # sel <- round(steps/3):steps # parcol <- 1:mcmc1$numPars # parLL <- mcmc1$numPars + 1 # parLP <- mcmc1$numPars + 1 # # res <- mcmc.list(mcmc(mcmc1$chain[sel, parcol]), mcmc(mcmc2$chain[sel, parcol]), mcmc(mcmc3$chain[sel, parcol])) # # likelihoods <- c(mcmc1$chain[sel, parLL], mcmc2$chain[sel, parLL], mcmc3$chain[sel, parLL]) # posteriors <- c(mcmc1$chain[sel, parLP], mcmc2$chain[sel, parLP], mcmc3$chain[sel, parLP]) # # currentConv <- tryCatch( # { # x <- all(gelman.diag(res)$psrf[1:mcmc1$numPars,] < 1.05) # ifelse(is.na(x), F, x) # }, # error=function(cond) { # F # }) # # print(currentConv) # # if (convTemp & currentConv ) { # conv = T # break # } # convTemp = currentConv # currentConv = F # steps = steps + steplength # } # if (! conv) print("Algorithm not converged") # # # calc.dic <- function(x,lik,lik.fun,...) # dic = calc.dic(x = combineChains(res),lik = likelihoods,lik.fun = mcmc1$catchingLikelihood) # # # Not working yet # #marginalLik = marginal.likelihood(x = res, lik = likelihoods,lik.fun = mcmc1$catchingLikelihood, prior.fun = mcmc1$catchingPrior) # # # classFields = list( # mcmclist = res, # dic = dic # ) # # class(classFields) <- append(class(classFields),"mcmcResult") # return(classFields) # }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/mcmcSamplerAutomatic.R
#' T-walk MCMC #' @author Stefan Paul #' @param bayesianSetup Object of class 'bayesianSetup' or 'bayesianOuput'. #' @param settings list with parameter values. #' @param iterations Number of model evaluations #' @param at "traverse" move proposal parameter. Default to 6 #' @param aw "walk" move proposal parameter. Default to 1.5 #' @param pn1 Probability determining the number of parameters that are changed #' @param Ptrav Move probability of "traverse" moves, default to 0.4918 #' @param Pwalk Move probability of "walk" moves, default to 0.4918 #' @param Pblow Move probability of "traverse" moves, default to 0.0082 #' @param burnin number of iterations treated as burn-in. These iterations are not recorded in the chain. #' @param thin thinning parameter. Determines the interval in which values are recorded. #' @param startValue Matrix with start values #' @param consoleUpdates Intervall in which the sampling progress is printed to the console #' @param message logical determines whether the sampler's progress should be printed #' @details ##' The probability of "hop" moves is 1 minus the sum of all other probabilities. #' @return Object of class bayesianOutput. #' @references Christen, J. Andres, and Colin Fox. "A general purpose sampling algorithm for continuous distributions (the t-walk)." Bayesian Analysis 5.2 (2010): 263-281. #' @export Twalk <- function (bayesianSetup, settings = list(iterations = 10000, at = 6, aw = 1.5, pn1 = NULL, Ptrav = 0.4918, Pwalk = 0.4918, Pblow = 0.0082, burnin = 0, thin= 1, startValue = NULL, consoleUpdates = 100, message = TRUE)) { if("bayesianOutput" %in% class(bayesianSetup)){ restart <- TRUE setup <- bayesianSetup$setup }else{ restart <- FALSE setup <- bayesianSetup } setup <- checkBayesianSetup(setup, parallel = settings$parallel) # calling parallel will check if requested parallelization in settings is provided by the BayesianSetup if(is.null(settings$parallel)) settings$parallel = setup$parallel # checking back - if no parallelization is provided, we use the parallelization in the BayesianSetup. We could also set parallel = F, but I feel it makes more sense to use the Bayesiansetup as default aw <- settings$aw at <- settings$at Npar <- setup$numPars iterations <- floor(settings$iterations/2) # Divided by 2 because two chains are run if(is.null(settings$pn1)) pn1 <- min(Npar,4)/Npar else pn1 <- settings$pn1 Ptrav <- settings$Ptrav if(is.null(settings$Pwalk)) Pwalk <- 0.4918 else Pwalk <- settings$Pwalk if(is.null(settings$Pblow)) Pblow <- 0.0082 else Pblow <- settings$Pblow # Set burnin and thin burnin <- settings$burnin thin <- settings$thin # Set Phop Phop <- 1-(Ptrav+Pwalk+Pblow) # Check for consistency of move probabilities if((Pwalk + Ptrav + Pblow) > 1) stop("Move probabilities larger one") consoleUpdates <- settings$consoleUpdates FUN <- setup$posterior$density if(!restart){ # Initialize x and x2 if(is.null(settings$startValue)){ settings$startValue = setup$prior$sampler(2) } if(is.function(settings$startValue)){ settings$startValue = settings$startValue(2) } x <- settings$startValue[1,] x2 <- settings$startValue[2,] # Evaluate Eval <- FUN(x, returnAll = T) Eval2 <- FUN(x2, returnAll = T) }else{ x <- bayesianSetup$chain[[1]][nrow(bayesianSetup$chain[[1]]), 1:Npar] x2 <- bayesianSetup$chain[[2]][nrow(bayesianSetup$chain[[2]]), 1:Npar] Eval <- bayesianSetup$chain[[1]][nrow(bayesianSetup$chain[[1]]), (Npar+1):(Npar+3)] Eval2 <- bayesianSetup$chain[[2]][nrow(bayesianSetup$chain[[2]]), (Npar+1):(Npar+3)] } # Initialize chains chain <- matrix(NA, nrow = floor((iterations+1-burnin)/thin), ncol = Npar+3) chain2 <- matrix(NA, nrow = floor((iterations+1-burnin)/thin), ncol = Npar+3) # Fill first values in chain chain[1,] <- c(x,Eval) chain2[1,] <- c(x2,Eval2) # Initialize counter for acceptance rate acceptance <- 0 # Initialize counter counter <- 0 for (i in 1:iterations) { move <- TwalkMove(Npar = Npar, FUN = FUN, x = x, Eval = Eval, x2 = x2, Eval2 = Eval2, at = at, aw = aw, pn1 = pn1, Ptrav = Ptrav, Pwalk = Pwalk, Pblow = Pblow, Phop = Phop) if(!is.na(move$alpha)){ if (runif(1) < move$alpha) { x <- move$y Eval<- move$val x2 <- move$y2 Eval2 <- move$val2 } } if((i > burnin) && (i %% thin == 0) ){ # retain sample counter <- counter + 1 chain[counter,] <- c(x, Eval) chain2[counter,] <- c(x2, Eval2) } if(settings$message){ if( (i %% consoleUpdates == 0) | (i == iterations)) { cat("\r","Running Twalk-MCMC, chain ", settings$currentChain , "iteration" ,(i*2),"of",(iterations*2), ". Current logp ", Eval[1], Eval2[1] ,". Please wait!","\r") flush.console() } } } colnames(chain) <- c(setup$names,"LP", "LL", "LPr") colnames(chain2) <- c(setup$names,"LP", "LL", "LPr") if(restart){ # Combine chains chain <- rbind(bayesianSetup$chain[[1]], chain) chain2 <- rbind(bayesianSetup$chain[[2]], chain2) } # Make sure chains have the right size chain <- chain[1:counter,] chain2 <- chain2[1:counter,] chain <- coda::mcmc.list(coda::mcmc(chain), coda::mcmc(chain2)) out <- list(chain = chain, settings = settings) class(out) <- c("mcmcSampler", "bayesianOutput") return(out) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/mcmcTwalk.R
###### # Twalk helper functions ###### #' Wrapper for step function #' @param Npar Number of parameters #' @param FUN Log posterior density #' @param x parameter vector of chain 1 #' @param Eval last evaluation of x #' @param x2 parameter vector of chain 2 #' @param Eval2 last evaluation of x #' @param at "traverse" move proposal parameter. #' @param aw "walk" move proposal parameter. #' @param pn1 Probability determining the number of parameters that are changed. #' @param Ptrav Move probability of "traverse" moves, default to 0.4918 #' @param Pwalk Move probability of "walk" moves, default to 0.4918 #' @param Pblow Move probability of "blow" moves, default to 0.0082 #' @param Phop Move probability of "hop" moves #' @references Christen, J. Andres, and Colin Fox. "A general purpose sampling algorithm for continuous distributions (the t-walk)." Bayesian Analysis 5.2 (2010): 263-281. #' @keywords internal TwalkMove <- function (Npar, FUN, x, Eval, x2, Eval2, at = 6, aw = 1.5, pn1 = min(Npar, 4)/Npar, Ptrav = 0.4918, Pwalk = 0.4918, Pblow = 0.0082, Phop = 0.0082) { p <- sample(4,1, prob = c(Ptrav,Pwalk,Pblow,Phop)) if(p == 1)case <- "traverse" else if(p ==2) case <- "walk" else if(p ==3) case <- "blow" else case <- "hop" out <- Twalksteps(case = case, Npar = Npar, FUN = FUN, x = x, Eval = Eval, x2 = x2, Eval2 = Eval2, at = at, aw = aw, pn1 = pn1) return(list(y = out$y, val = out$val, y2 = out$y2, val2 = out$val2, alpha = out$alpha)) } #' Main function that is executing and evaluating the moves #' @param case Type of Twalk move. Either "walk", "traverse", "hop" or "blow" #' @param Npar number of parameters #' @param FUN Log posterior density #' @param x parameter vector of chain 1 #' @param Eval last evaluation of x #' @param x2 parameter vector of chain 2 #' @param Eval2 last evaluation of x #' @param at "traverse" move proposal parameter. #' @param aw "walk" move proposal parameter. #' @param pn1 Probability determining the number of parameters that are changed. #' @references Christen, J. Andres, and Colin Fox. "A general purpose sampling algorithm for continuous distributions (the t-walk)." Bayesian Analysis 5.2 (2010): 263-281. #' @keywords internal Twalksteps <- function(case, Npar, FUN, x, Eval, x2, Eval2, at, aw, pn1){ val <- NULL val2 <- NULL p <- runif(1) switch(case, "traverse" = { #Traverse if (p < 0.5) { beta <- betaFun(at) tmp <- propFun(case, Npar = Npar, pn1 = pn1, x2 = x, x = x2, beta = beta) y2 <- tmp$prop npSel <- tmp$npSel y <- x val <- Eval val2 <- FUN(y2, returnAll = T) if (npSel == 0) alpha <- 1 else alpha <- exp((- Eval2[1] + val2[1]) + (npSel - 2) * log(beta)) }else{ beta <- betaFun(at) tmp <- propFun(case, Npar = Npar, pn1 = pn1, x = x, x2 = x2, beta = beta) y <- tmp$prop npSel <- tmp$npSel y2 <- x2 val2 <- Eval2 val <- FUN(y, returnAll = T) if (npSel == 0) alpha <- 1 else alpha <- exp((-Eval[1] + val[1]) + (npSel - 2) * log(beta)) }}, # End traverse "walk" = { # walk if (p < 0.5) { tmp <- propFun(case, Npar = Npar, pn1 = pn1, aw = aw, x2 = x, x = x2) y2 <- tmp$prop npSel <- tmp$npSel y <- x val <- Eval if ( (all(abs(y2 - y) > 0))) { val2 <- FUN(y2, returnAll = T) alpha <- exp(-Eval2[1] + val2[1]) } else { alpha <- 0 } }else{ tmp <- propFun(case, Npar = Npar, pn1 = pn1, aw = aw, x = x, x2 = x2) y <- tmp$prop npSel <- tmp$npSel y2 <- x2 val2 <- Eval2 if ( (all(abs(y2 - y) > 0))) { val <- FUN(y, returnAll = T) alpha <- exp(-Eval[1] + val[1]) } else { alpha <- 0 } }}, # End walk "blow" = { #blow if (p < 0.5) { tmp <- propFun(case, Npar = Npar, pn1 = pn1, x = x2, x2 = x) y2 <- tmp$prop npSel <- tmp$npSel pSel <- tmp$pSel y <- x val <- Eval if ( all(y2 != x)) { val2 <- FUN(y2, returnAll = T) G1 <- Gfun(case, npSel, pSel, y2, x2, x) G2 <- Gfun(case, npSel, pSel, x2, y2, x) alpha <- exp((-Eval2[1] + val2[1]) + (G1 - G2)) } else { alpha <- 0 } }else{ tmp <- propFun(case, Npar = Npar, pn1 = pn1, x = x, x2 = x2) y <- tmp$prop npSel <- tmp$npSel pSel <- tmp$pSel y2 <- x2 val2 <- Eval2 if (all(y != x2)) { val <- FUN(y, returnAll = T) G1 <- Gfun(case, npSel, pSel, y, x, x2) G2 <- Gfun(case, npSel, pSel, x, y, x2) alpha <- exp((-Eval[1] + val[1]) + (G1 - G2)) } else { alpha <- 0 } } }, # End blow "hop" = { #hop if (p < 0.5) { tmp <- propFun(case, Npar = Npar, pn1 = pn1, x2 = x, x = x2) y2 <- tmp$prop npSel <- tmp$npSel pSel <- tmp$pSel y <- x val <- Eval if ( all(y2 != x)) { val2 <- FUN(y2, returnAll = T) G1 <- Gfun(case, npSel, pSel, y2, x2, x) G2 <- Gfun(case, npSel, pSel, x2, y2, x) alpha <- exp((-Eval2[1] + val2[1]) + (G1 - G2)) } else { alpha <- 0 } }else{ tmp <- propFun(case, Npar = Npar, pn1 = pn1, x = x, x2 = x2) y <- tmp$prop npSel <- tmp$npSel pSel <- tmp$pSel y2 <- x2 val2 <- Eval2 if ( all(y != x2)) { val <- FUN(y, returnAll = T) G1 <- Gfun(case, npSel, pSel, y, x, x2) G2 <- Gfun(case, npSel, pSel, x, y, x2) alpha <- exp((-Eval[1] + val[1]) + (G1 - G2)) } else { alpha <- 0 } }}) # End hop and end switch return(list(y = y, val = val, y2 = y2, val2 = val2, alpha = alpha, npSel = npSel)) } ################## Helper functions ############################################################### #' Helper function for sum of x*x #' @param x vector of values #' @keywords internal sumSquare <- function(x){return(sum(x*x))} #' Helper function to create proposal #' @param case Type of Twalk move. Either "walk", "traverse", "hop" or "blow" #' @param Npar number of parameters #' @param pn1 Probability determining the number of parameters that are changed. #' @param aw "walk" move proposal parameter. #' @param beta parameter for "traverse" move proposals. #' @param x parameter vector of chain 1 #' @param x2 parameter vector of chain 2 #' @references Christen, J. Andres, and Colin Fox. "A general purpose sampling algorithm for continuous distributions (the t-walk)." Bayesian Analysis 5.2 (2010): 263-281. #' @keywords internal propFun <- function(case, Npar, pn1, x, x2, beta = NULL, aw = NULL){ switch(case, "traverse"={ pSel <- (runif(Npar) < pn1) prop <- NULL for (i in 1:Npar){ if (pSel[i]) prop <- c( prop, x2[i] + beta*(x2[i] - x[i])) else prop <- c( prop, x[i]) } return(list(prop=prop, npSel=sum(pSel))) }, "walk"={ u <- runif(Npar) pSel <- (runif(Npar) < pn1) z <- (aw/(1+aw))*(aw*u^2 + 2*u -1) z <- z*pSel return(list( prop=x + (x - x2)*z, npSel=sum(pSel))) }, "blow"={ pSel <- (runif(Npar) < pn1) sigma <- max(pSel*abs(x2 - x)) return(list( prop=x2*pSel + sigma*rnorm(Npar)*pSel + x*(1-pSel), npSel=sum(pSel), pSel=pSel)) }, "hop"={ pSel <- (runif(Npar) < pn1) sigma <- max(pSel*abs(x2 - x))/3 prop <- NULL for (i in 1:Npar){ if (pSel[i]) prop <- c( prop, x[i] + sigma*rnorm(1)) else prop <- c( prop, x[i]) } return(list( prop=prop, npSel=sum(pSel), pSel=pSel)) } ) } #' Helper function for calculating beta #' @param at "traverse" move proposal parameter. #' @keywords internal betaFun <- function(at) { if (runif(1) < (at-1)/(2*at)) return(exp(1/(at + 1)*log(runif(1)))) else return(exp(1/(1 - at)*log(runif(1)))) } #' Helper function for blow and hop moves #' @param case Type of Twalk move. Either "hop" or "blow" #' @param npSel number of parameters that are changed. #' @param pSel vector containing information about which parameters are changed. #' @param h Parameter for "blow" and hop moves #' @param x parameter vector of chain 1 #' @param x2 parameter vector of chain 2 #' @references Christen, J. Andres, and Colin Fox. "A general purpose sampling algorithm for continuous distributions (the t-walk)." Bayesian Analysis 5.2 (2010): 263-281. #' @keywords internal Gfun <- function(case, npSel, pSel, h, x, x2){ switch(case, "blow"= { sigma <- max(pSel*abs(x2 - x)) if(npSel > 0) return((npSel/2)*log(2*pi) + npSel*log(sigma) + 0.5*sumSquare(h - x2)/(sigma^2)) else return(0) }, "hop" = { sigma <- max(pSel*abs(x2 - x))/3 if (npSel > 0) return((npSel/2)*log(2*pi) - npSel*log(3) + npSel*log(sigma) + 0.5*9*sumSquare((h - x))/(sigma^2)) else return(0) }) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/mcmcTwalk_helperFunctions.R
#' Flexible function to create correlation density plots #' @author Florian Hartig #' @param mat object of class "bayesianOutput" or a matrix or data frame of variables #' @param density type of plot to do. Either "smooth" (default), "corellipseCor", or "ellipse" #' @param thin thinning of the matrix to make things faster. Default is to thin to 5000 #' @param method method for calculating correlations. Possible choices are "pearson" (default), "kendall" and "spearman" #' @param whichParameters indices of parameters that should be plotted #' @param scaleCorText should the text to display correlation be scaled to the strength of the correlation #' @param ... additional parameters to pass on to the \code{\link{getSample}}, for example parametersOnly =F, or start = 1000 #' @references The code for the correlation density plot originates from Hartig, F.; Dislich, C.; Wiegand, T. & Huth, A. (2014) Technical Note: Approximate Bayesian parameterization of a process-based tropical forest model. Biogeosciences, 11, 1261-1272. #' @export #' @seealso \code{\link{marginalPlot}} \cr #' \code{\link{plotTimeSeries}} \cr #' \code{\link{tracePlot}} \cr #' @example /inst/examples/correlationPlotHelp.R correlationPlot<- function(mat, density = "smooth", thin = "auto", method = "pearson", whichParameters = NULL, scaleCorText = T, ...){ mat = getSample(mat, thin = thin, whichParameters = whichParameters, ...) numPars = ncol(mat) if(numPars < 2) stop("BayesianTools::correlationPlot - using this function only makes sense if you have more than 1 parameter") names = colnames(mat) panel.hist.dens <- function(x, ...) { usr <- par("usr"); on.exit(par(usr = usr)) par(usr = c(usr[1:2], 0, 1.5) ) h <- hist(x, plot = FALSE) breaks <- h$breaks; nB <- length(breaks) y <- h$counts; y <- y/max(y) rect(breaks[-nB], 0, breaks[-1], y, col="blue4", ...) } # replaced by spearman panel.cor <- function(x, y, digits = 2, prefix = "", cex.cor, ...) { usr <- par("usr"); on.exit(par(usr = usr)) par(usr = c(0, 1, 0, 1)) r <- cor(x, y, use = "complete.obs", method = method) txt <- format(c(r, 0.123456789), digits = digits)[1] txt <- paste0(prefix, txt) if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt) if(scaleCorText == T) text(0.5, 0.5, txt, cex = cex.cor * abs(r)) else text(0.5, 0.5, txt, cex = cex.cor) } plotEllipse <- function(x,y){ usr <- par("usr"); on.exit(par(usr = usr)) par(usr = c(usr[1:2], 0, 1.5) ) cor <- cor(x,y) el = ellipse::ellipse(cor) polygon(el[,1] + mean(x), el[,2] + mean(y), col = "red") } correlationEllipse <- function(x){ cor = cor(x) ToRGB <- function(x){grDevices::rgb(x[1]/255, x[2]/255, x[3]/255)} C1 <- ToRGB(c(178, 24, 43)) C2 <- ToRGB(c(214, 96, 77)) C3 <- ToRGB(c(244, 165, 130)) C4 <- ToRGB(c(253, 219, 199)) C5 <- ToRGB(c(247, 247, 247)) C6 <- ToRGB(c(209, 229, 240)) C7 <- ToRGB(c(146, 197, 222)) C8 <- ToRGB(c(67, 147, 195)) C9 <- ToRGB(c(33, 102, 172)) CustomPalette <- grDevices::colorRampPalette(rev(c(C1, C2, C3, C4, C5, C6, C7, C8, C9))) ord <- order(cor[1, ]) xc <- cor[ord, ord] colors <- unlist(CustomPalette(100)) ellipse::plotcorr(xc, col=colors[xc * 50 + 50]) } if (density == "smooth"){ return(pairs(mat, lower.panel=function(...) {par(new=TRUE);IDPmisc::ipanel.smooth(...)}, diag.panel=panel.hist.dens, upper.panel=panel.cor)) }else if (density == "corellipseCor"){ return(pairs(mat, lower.panel=plotEllipse, diag.panel=panel.hist.dens, upper.panel=panel.cor)) }else if (density == "ellipse"){ correlationEllipse(mat) }else if (density == F){ return(pairs(mat, lower.panel=panel.cor, diag.panel=panel.hist.dens, upper.panel=panel.cor)) }else stop("wrong sensity argument") }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/plotCorrelationDensity.r
#' @author Maximilian Pichler #' @title Diagnostic Plot #' @description This function plots the DIC, WAIC, mPSRF, PSRF(with upper C.I.) and traces of the parameters in dependence of iterations. DIC, WAIC are plotted separately for the chains and the trace plots also for the internal chains. #' @param out object of class "bayesianOutput" #' @param start start value for calculating DIC, WAIC, mPSRF and PSRF, default = 50 #' @param numSamples for calculating WAIC, default = 10 because of high computational costs #' @param window plot range to show, vector of percents or only one value as start value for the window #' @param plotWAIC whether to calculate WAIC or not, default = T #' @param plotPSRF calculate and plot mPSRF/PSRF or not, default = T #' @param plotDIC calculate and plot DICor not, default = T #' @param plotTrace show trace plots or not, default = T #' @param graphicParameters graphic parameters as list for plot function #' @param ... parameters to give to getSample #' @example /inst/examples/plotDiagnosticHelp.R #' @export plotDiagnostic <- function(out, start = 50, numSamples = 100, window = 0.2, plotWAIC = F, plotPSRF = T, plotDIC = T, plotTrace = T, graphicParameters = NULL, ...){ oldpar = NULL on.exit(par(oldpar)) if(!"bayesianOutput" %in% class(out)) stop("Wrong input, object of class bayesianOutput required. see runMCMC()") calcWAIC <- TRUE if("mcmcSamplerList" %in% class(out) && out[[1]]$setup$pwLikelihood) calcWAIC <- FALSE if("mcmcSampler" %in% class(out) && out$setup$pwLikelihood) calcWAIC <- FALSE if(!plotWAIC) calcWAIC <- FALSE defaultGraphicParameters <- graphicParameters # calculate DIC and WAIC, minimum range: start - start+1 if("mcmcSamplerList" %in% class(out)){ if(is.matrix(out[[1]]$chain)) len <- out[[1]]$settings$iterations else len <- round(out[[1]]$settings$iterations / length(out[[1]]$chain)) iter = out[[1]]$settings$iterations internal = length(out[[1]]$chain) start = start + 1 lenW <- length(seq(start , by = 10, to = len)) DICResult <- matrix(NA, nrow = length(out), ncol = len - start) WAICResult<- matrix(NA, nrow = length(out), ncol = length(seq(start , by = 10, to = len))) numPars <- out[[1]]$setup$numPars Wseq <- seq(start , by = 10, to = len) for(i in 1:length(out)) { if(plotDIC) DICResult[i,] <- sapply(start:len, FUN = function(x){return(DIC(out[[i]], start = start - 1 , end = x, ...)$DIC)}) if(calcWAIC) WAICResult[i,] <- sapply(seq(start , by = 10, to = len), FUN = function(x){return(WAIC(out[[i]], start = start - 1 ,end = x, numSamples = numSamples, ...)$WAIC1)}) } } else { if(is.matrix(out$chain)) len <- out$settings$iterations else len <- round(out$settings$iterations / length(out$chain)) internal = length(out$chain) iter = out$settings$iterations start = start + 1 lenW<- length(seq(start, by = 10, to = len)) Wseq <- seq(start , by = 10, to = len) if(plotDIC) DICResult <- sapply(start:len, FUN = function(x){return(DIC(out, start = start - 1, end = x, ...)$DIC)}) if(calcWAIC) WAICResult<- sapply(seq(start, by = 10, to = len), FUN = function(x){return(WAIC(out, end = x, start = start - 1, numSamples = numSamples, ...)$WAIC1)}) numPars <- out$setup$numPars } # TODO: missing: check if sampler with multiple chains # should user call method with plotPSFR=F for one-chain-sampler? # calc mPSRF, first checking which low values we could calculate if(plotPSRF){ seq <- vector() for(i in start:len){ success <- try(coda::gelman.diag(getSample(out, start = start - 1, parametersOnly = T, coda = T, end = i, ...))$mpsrf, silent = T) if(!"try-error" %in% class(success)){ # break seq[i] <- i } } seq <- seq[complete.cases(seq)] # calculate the actual PSRF values if(numPars > 1) PSRF <- matrix(0, nrow = length(seq), ncol = numPars*2 + 1) else PSRF <- matrix(0, nrow = length(seq), ncol = numPars*2 ) for(i in 1:length(seq)){ res <- coda::gelman.diag(getSample(out, start = start - 1, parametersOnly = T, coda = T, end = seq[i], ...)) if(numPars > 1)PSRF[i,] <- c(as.vector(res$psrf), res$mpsrf) else PSRF[i,] <- c(as.vector(res$psrf)) } } # Get number of plots nrPlots <- 2 if(calcWAIC) nrPlots <- nrPlots + 1 if(plotDIC) nrPlots <- nrPlots + 1 if(plotPSRF) nrPlots <- nrPlots + 2 if(plotTrace) nrPlots<- numPars*2 + nrPlots par(mfrow = getPanels(nrPlots)) # set graphicParameters if(is.null(graphicParameters)){ graphicParameters = list(lty = 1, lwd = 1, type = "l", xlab = "Iterations", ylab = "", col = 1:6) } else { if(is.null(graphicParameters$lty)) graphicParameters$lty = 1 if(is.null(graphicParameters$lwd)) graphicParameters$lwd = 1 if(is.null(graphicParameters$type)) graphicParameters$type = "l" if(is.null(graphicParameters$xlab)) graphicParameters$xlab = "Iterations" if(is.null(graphicParameters$ylab)) graphicParameters$ylab = "" if(is.null(graphicParameters$col)) graphicParameters$col = 1:6 } # plot DIC if(plotDIC){ if(is.matrix(DICResult)){ # col <- 1:ncol(DICResult) if(is.na(window[2])) endV <- nrow(DICResult) else endV <- window[2]*nrow(DICResult) startV <- window[1]*nrow(DICResult) x = nrow(DICResult) ylim = c(min(DICResult[startV:endV,])*0.99, max(DICResult[startV:endV,])*1.01) } else { if(is.na(window[2])) endV <- length(DICResult) else endV <- window[2]*length(DICResult) startV <- window[1]*length(DICResult) x = length(DICResult) ylim = c(min(DICResult[startV:endV])*0.99, max(DICResult[startV:endV])*1.01) } graphicParameters$y = DICResult graphicParameters$x = 1:x graphicParameters$main = "DIC" graphicParameters$xlim = c(startV, endV) graphicParameters$ylim = ylim if(is.null(graphicParameters$xaxt)) graphicParameters$xaxt = "n" do.call(matplot, graphicParameters) if(graphicParameters$xaxt == "n" ){ axis(1, at = seq(startV, by = 100, to = endV), labels = seq(startV, by = 100, to = endV)*internal) graphicParameters$xaxt <- NULL } } # plot WAIC if(calcWAIC){ if(is.matrix(WAICResult)){ # col <- 1:ncol(DICResult) if(is.na(window[2])) endV <- nrow(WAICResult) else endV <- window[2]*nrow(WAICResult) startV <- window[1]*nrow(WAICResult) x = nrow(WAICResult) ylim = c(min(WAICResult[startV:endV,])*0.99, max(WAICResult[startV:endV,])*1.01) } else { if(is.na(window[2])) endV <- length(WAICResult) else endV <- window[2]*length(WAICResult) startV <- window[1]*length(WAICResult) x = length(WAICResult) ylim = c(min(WAICResult[startV:endV])*0.99, max(WAICResult[startV:endV])*1.01) } graphicParameters$y = WAICResult graphicParameters$x = 1:x graphicParameters$main = "WAIC" graphicParameters$xlim = c(startV, endV) graphicParameters$ylim = ylim if(is.null(graphicParameters$xaxt)) graphicParameters$xaxt = "n" do.call(matplot, graphicParameters) if(graphicParameters$xaxt == "n" ){ axis(1, at = seq(startV, by = 10, to = endV), labels = seq(startV, by = 10, to = endV)*10*internal) graphicParameters$xaxt <- NULL } } if(plotPSRF){ if(is.na(window[2])) endV <- nrow(PSRF) else endV <- window[2]*nrow(PSRF) startV <- window[1]*nrow(PSRF) graphicParameters$xlim = c(startV, endV) graphicParameters$x = 1:nrow(PSRF) # plot mPSRF if(numPars > 1){ if(!typeof(seq) == "logical" ) { graphicParameters$ylim = c(min(PSRF[startV:endV,ncol(PSRF)])*0.99, max(PSRF[startV:endV,ncol(PSRF)])*1.01) graphicParameters$y = PSRF[,ncol(PSRF)] graphicParameters$main = "mPSRF" do.call(plot, graphicParameters) } } graphicParameters$ylim = c(min(PSRF[startV:endV,-ncol(PSRF)])*0.99, max(PSRF[startV:endV,-ncol(PSRF)])*1.01) graphicParameters$y = PSRF[,-ncol(PSRF)] graphicParameters$main = "PSRF" lty = NULL for(i in 1:numPars)lty <- c(lty, c(1,2)) graphicParameters$lty <- lty col = NULL for(i in 1:6)col <- c(col, c(i,i)) graphicParameters$col <- col do.call(matplot, graphicParameters) } # plot parameter traces if(plotTrace){ # if(is.null(defaultGraphicParameters)) defaultGraphicParameters <- list() # if(is.na(window[2])) endV <- len # else endV <- window[2]*len # defaultGraphicParameters$xlim <- c(len*window[1], endV) # defaultGraphicParameters$ask = F # defaultGraphicParameters$auto.layout = F # defaultGraphicParameters$x = getSample(out, start = start, coda = T, parametersOnly = T,...) # do.call(coda::cumuplot, defaultGraphicParameters) coda::cumuplot(getSample(out, start = start, coda = T, parametersOnly = T, ...), ask = F, auto.layout = F) } }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/plotDiagnostic.R
#' @export marginalPlot <- function(x, ...) UseMethod("marginalPlot") #' Plot MCMC marginals #' @param x bayesianOutput, or matrix or data.frame containing with samples as rows and parameters as columns #' @param prior if x is a bayesianOutput, T/F will determine if the prior is drawn (default = T). If x is matrix oder data.frame, a prior can be drawn if a matrix of prior draws with values as rows and parameters as columns can be provided here. #' @param xrange vector or matrix of plotting ranges for the x axis. If matrix, the rows must be parameters and the columns min and max values. #' @param type character determining the plot type. Either 'd' for density plot, or 'v' for violin plot #' @param singlePanel logical, determining whether the parameter should be plotted in a single panel or each in its own panel #' @param settings optional list of additional settings for \code{\link{marginalPlotDensity}}, and \code{\link{marginalPlotViolin}}, respectively #' @param nPriorDraws number of draws from the prior, if x is bayesianOutput #' @param ... additional arguments passed to \code{\link{getSample}}. If you have a high number of draws from the posterior it is advised to set numSamples (to e.g. 5000) for performance reasons. #' @example /inst/examples/marginalPlotHelp.R #' @author Tankred Ott, Florian Hartig marginalPlot <- function(x, prior = NULL, xrange = NULL, type = 'd', singlePanel = FALSE, settings = NULL, nPriorDraws = 10000, ...) { posteriorMat <- getSample(x, parametersOnly = TRUE, ...) # checking for which args <- list(...) if("which" %in% names(args)) which = args$which else which = 1:ncol(posteriorMat) # check prior if ('bayesianOutput' %in% class(x)) { # default T if NULL and BayesianOutput provide if (is.null(prior)) prior = TRUE if (any(c('data.frame', 'matrix') %in% class(prior))) priorMat = prior else if (is.logical(prior)){ if (prior == TRUE) priorMat = getSetup(x)$prior$sampler(nPriorDraws) # draw prior from bayesianSetup else if (prior == F) priorMat = NULL } else stop('wrong argument to prior') } else { # default F if (is.null(prior)) prior = FALSE if (any(c('data.frame', 'matrix') %in% class(prior))) priorMat = prior else if (is.logical(prior)){ priorMat = NULL if (prior == TRUE) message("prior = T will only have an effect if x is of class BayesianOutput") } else stop('wrong argument to prior') } if (!is.null(priorMat)) { priorMat = priorMat[,which,drop=F] if (ncol(posteriorMat) != ncol(priorMat)) stop("wrong dimensions of prior") colnames(priorMat) <- colnames(posteriorMat) } nPar <- ncol(posteriorMat) # check xrange if (!is.null(xrange)) { if (!any(c('numeric', 'matrix') %in% class(xrange))) stop('xrange must be numeric or matrix, or NULL') if ('numeric' %in% class(xrange)) xrange <- matrix(rep(xrange), nPar, nrow = 2) else if ('matrix' %in% class(xrange)) { if (ncol(xrange) != ncol(posteriorMat)) stop('xrange must have as many colums as there are parameterss') else if (nrow(xrange) != 2) stop('xrange must have two rows (min, max)') } } else { posteriorRanges <- apply(posteriorMat, 2, range) priorRanges <- if(!is.null(priorMat)) apply(priorMat, 2, range) else NULL xrange <- if (is.null(priorRanges)) posteriorRanges else apply(rbind(priorRanges, posteriorRanges), 2, range) } # check type if (any(c('d', 'dens', 'density') == type)) type <- 'd' # else if (any(c('h', 'hist', 'histogram') == type)) type <- 'h' else if (any(c('v', 'violin') == type)) type <- 'v' # else stop('type must be one of "d", "h", "v"') else stop('type must be one of "d", "v"') # check parameter names if (is.null(colnames(posteriorMat))) colnames(posteriorMat) <- paste('par', 1:nPar, sep = '') if (!is.null(priorMat)) colnames(priorMat) <- colnames(posteriorMat) # prepare arguments for sub-functions .args <- c(list(posteriorMat = posteriorMat, priorMat = priorMat, xrange = xrange, singlePanel = singlePanel), settings) if (type == 'd') do.call(marginalPlotDensity, .args) # else if (type == 'h') do.call(marginalPlotHistogram, .args) else if (type == 'v') do.call(marginalPlotViolin, .args) } #' Plot marginals as densities #' @param posteriorMat matrix with samples as rows and parameters as columns #' @param priorMat matrix (optional) with samples as rows and parameters as columns #' @param xrange vector or matrix (optional), determining the plotting range, with parameters as columns and min, max as rows #' @param col vector of colors for posterior and #' @param singlePanel logical, determining whether the parameter should be plotted in a single panel or each in its own panel # #' @param ... further options #' @author Tankred Ott #' @keywords internal # TODO: this could be simplified. It is verbose for now to be able to change stuff easily marginalPlotDensity <- function(posteriorMat, priorMat = NULL, xrange = NULL, col=c('#FC006299','#00BBAA30'), singlePanel = TRUE, ...) { nPar <- ncol(posteriorMat) parNames <- colnames(posteriorMat) if (is.null(xrange)) { posteriorRanges <- apply(posteriorMat, 2, range) priorRanges <- if(!is.null(priorMat)) apply(priorMat, 2, range) else NULL xrange <- if (is.null(priorRanges)) posteriorRanges else apply(rbind(priorRanges, posteriorRanges), 2, range) } posteriorDensities <- lapply(1:ncol(posteriorMat), function(i) density(posteriorMat[,i], from = xrange[1,i], to = xrange[2,i], ...)) priorDensities <- if (!is.null(priorMat)) lapply(1:ncol(priorMat), function(i) density(priorMat[,i], from = xrange[1,i], to = xrange[2,i], ...)) else NULL postXY <- lapply(posteriorDensities, function(d) { xy <- cbind(c(d$x[1], d$x, d$x[length(d$x)]), c(0, d$y, 0)) colnames(xy) <- c('x', 'y') xy }) priorXY <- if (!is.null(priorDensities)) lapply(priorDensities, function(d) { xy <- cbind(c(d$x[1], d$x, d$x[length(d$x)]), c(0, d$y, 0)) colnames(xy) <- c('x', 'y') xy }) else NULL if (singlePanel) { op <- par(mfrow = c(nPar,1), mar = c(2, 5, 2, 2), oma = c(5, 4, 4, 0)) on.exit(par(op)) for (i in 1:length(posteriorDensities)) { postX <- postXY[[i]][,1] postY <- postXY[[i]][,2] priorX <- if (!is.null(priorXY[[i]])) priorXY[[i]][,1] else NULL priorY <- if (!is.null(priorXY[[i]])) priorXY[[i]][,2] else NULL yrange <- if (is.null(priorX)) range(postY) else range(c(postY, priorY)) plot(NULL, NULL, xlim = xrange[,i], ylim = yrange, main = NA, xlab = NA, ylab = NA, bty = 'n', yaxt = 'n', xaxt = 'n') axis(1, at = xrange[,i], labels = NA, lwd.ticks=0) xticks <- axTicks(1) xticks <- xticks[xticks >= xrange[1,i] & xticks <= xrange[2,i]] axis(1, at = xticks) mtext(sprintf('%20s', parNames[i]), 2, las = 1, adj = 1.25) polygon(postX, postY, col = col[1], border = 1) if (!is.null(priorX)) polygon(priorX, priorY, col = col[2], border = 1) } mtext('Marginal parameter uncertainty', outer = TRUE, cex = 1.5) } else { mfrow <- if (nPar < 16) getPanels(nPar) else c(4,4) op <- par(mfrow = mfrow, mar = c(4.5, 4, 5, 3), oma=c(3, 1.5, 2, 0), xpd=TRUE) on.exit(par(op)) for (i in 1:length(posteriorDensities)) { postX <- postXY[[i]][,1] postY <- postXY[[i]][,2] priorX <- if (!is.null(priorXY[[i]])) priorXY[[i]][,1] else NULL priorY <- if (!is.null(priorXY[[i]])) priorXY[[i]][,2] else NULL yrange <- if (is.null(priorX)) range(postY) else range(c(postY, priorY)) plot(NULL, NULL, xlim = xrange[,i], ylim = yrange, main = parNames[i], xlab = NA, ylab = 'density') polygon(postX, postY, col = col[1], border = 1) if (!is.null(priorX)) polygon(priorX, priorY, col = col[2], border = 1) if (i %% 16 == 1) mtext('Marginal parameter uncertainty', outer = TRUE, cex = 1.5) } } # overlay plot with empty plot to be able to place the legends freely par(fig = c(0, 1, 0, 1), oma = c(0, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE) plot(0, 0, type = 'n', bty = 'n', xaxt = 'n', yaxt = 'n') legend('bottom', if (!is.null(priorX)) c('posterior', 'prior') else 'posterior', xpd = TRUE, horiz = TRUE, inset = c(0, 0), bty = 'n', pch = 15, col = col, cex = 1.5) } #' Plot marginals as violin plot #' @param posteriorMat matrix with samples as rows and parameters as columns #' @param priorMat matrix (optional) with samples as rows and parameters as columns #' @param xrange vector or matrix (optional), determining the plotting range, with parameters as columns and min, max as rows #' @param col vector of colors for posterior and #' @param singlePanel logical, determining whether the parameter should be plotted in a single panel or each in its own panel # #' @param ... further options #' @author Tankred Ott #' @keywords internal # TODO: this could be simplified. It is verbose for now to be able to change stuff easily marginalPlotViolin <- function(posteriorMat, priorMat = NULL, xrange = NULL, col=c('#FC006299','#00BBAA88'), singlePanel = TRUE, ...) { nPar <- ncol(posteriorMat) parNames <- colnames(posteriorMat) if (is.null(xrange)) { posteriorRanges <- apply(posteriorMat, 2, range) priorRanges <- if(!is.null(priorMat)) apply(priorMat, 2, range) else NULL xrange <- if (is.null(priorRanges)) posteriorRanges else apply(rbind(priorRanges, posteriorRanges), 2, range) } posteriorDensities <- lapply(1:ncol(posteriorMat), function(i) density(posteriorMat[,i], from = xrange[1,i], to = xrange[2,i], ...)) priorDensities <- if (!is.null(priorMat)) lapply(1:ncol(priorMat), function(i) density(priorMat[,i], from = xrange[1,i], to = xrange[2,i], ...)) else NULL postXY <- lapply(posteriorDensities, function(d) { xy <- cbind(c(d$x[1], d$x, d$x[length(d$x)]), c(0, d$y, 0)) colnames(xy) <- c('x', 'y') if (is.null(priorDensities)) xy <- rbind(xy, cbind(rev(xy[,1]), rev(-xy[,2]))) xy }) priorXY <- if (!is.null(priorDensities)) lapply(priorDensities, function(d) { xy <- cbind(c(d$x[1], d$x, d$x[length(d$x)]), -c(0, d$y, 0)) colnames(xy) <- c('x', 'y') xy }) else NULL if (singlePanel) { nChar <- max(nchar(parNames)) op <- par(mfrow = c(nPar,1), mar = c(2, min(nChar, 20), 2, 2), oma = c(5, 0, 4, 0)) on.exit(par(op)) for (i in 1:length(posteriorDensities)) { postX <- postXY[[i]][,1] postY <- postXY[[i]][,2] priorX <- if (!is.null(priorXY[[i]])) priorXY[[i]][,1] else NULL priorY <- if (!is.null(priorXY[[i]])) priorXY[[i]][,2] else NULL yrange <- if (is.null(priorX)) range(postY) else range(c(postY, priorY)) plot(NULL, NULL, xlim = xrange[,i], ylim = yrange, main = NA, xlab = NA, ylab = NA, bty = 'n', yaxt = 'n', xaxt = 'n') axis(1, at = xrange[,i], labels = NA, lwd.ticks=0) xticks <- axTicks(1) xticks <- xticks[xticks >= xrange[1,i] & xticks <= xrange[2,i]] axis(1, at = xticks) mtext(sprintf('%20s', parNames[i]), 2, las = 1, adj = 1) polygon(postX, postY, col = col[1], border = 1) if (!is.null(priorX)) polygon(priorX, priorY, col = col[2], border = 1) } mtext('Marginal parameter uncertainty', outer = TRUE, cex = 1.5) } else { mfrow <- if (nPar < 16) getPanels(nPar) else c(4,4) op <- par(mfrow = mfrow, mar = c(4.5, 4.5, 5, 3), oma=c(3, 0, 2, 0), xpd=TRUE) on.exit(par(op)) for (i in 1:length(posteriorDensities)) { postX <- postXY[[i]][,1] postY <- postXY[[i]][,2] priorX <- if (!is.null(priorXY[[i]])) priorXY[[i]][,1] else NULL priorY <- if (!is.null(priorXY[[i]])) priorXY[[i]][,2] else NULL yrange <- if (is.null(priorX)) range(postY) else range(c(postY, priorY)) plot(NULL, NULL, xlim = xrange[,i], ylim = yrange, main = parNames[i], xlab = NA, ylab = 'density', yaxt = 'n') yticks <- sort(c(0, axTicks(2))) axis(2, at = yticks, labels = abs(yticks)) polygon(postX, postY, col = col[1], border = 1) if (!is.null(priorX)) polygon(priorX, priorY, col = col[2], border = 1) if (i %% 16 == 1) mtext('Marginal parameter uncertainty', outer = TRUE, cex = 1.5) } } # overlay plot with empty plot to be able to place the legends freely par(fig = c(0, 1, 0, 1), oma = c(0, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE) plot(0, 0, type = 'n', bty = 'n', xaxt = 'n', yaxt = 'n') legend('bottom', if (!is.null(priorX)) c('posterior', 'prior') else 'posterior', xpd = TRUE, horiz = TRUE, inset = c(0, 0), bty = 'n', pch = 15, col = col, cex = 1.5) } #' #' @keywords internal #' marginalPlotHistogram <- function(posteriorMat, priorMat = NULL, xrange = NULL, col=c('#FF5000A0','#4682B4A0'), #' singlePanel = TRUE, breaks = NULL, ...) { #' #' }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/plotMarginals.R
#' Performs a one-factor-at-a-time sensitivity analysis for the posterior of a given bayesianSetup within the prior range. #' @author Florian Hartig #' @param bayesianSetup An object of class BayesianSetup #' @param selection indices of selected parameters #' @param equalScale if T, y axis of all plots will have the same scale #' @note This function can also be used for sensitivity analysis of an arbitrary output - just create a BayesianSetup with this output. #' @example /inst/examples/plotSensitivityHelp.R #' @export plotSensitivity <- function(bayesianSetup, selection = NULL, equalScale = T){ if (is.null(selection)) selection = 1:bayesianSetup$numPars n = length(selection) post = list() lowS = bayesianSetup$prior$lower[selection] upS = bayesianSetup$prior$upper[selection] refPar = bayesianSetup$prior$best[selection] names = bayesianSetup$names[selection] fullRefPar <- bayesianSetup$prior$best minR = Inf maxR = -Inf for (j in 1:n){ post[[j]] <- data.frame(par = seq(lowS[j], upS[j], len = 20), resp = rep(NA, 20)) for (i in 1:20){ parS <- refPar parS[j] = post[[j]]$par[i] parS2 = fullRefPar parS2[selection] = parS post[[j]]$resp[i] = bayesianSetup$posterior$density(parS2) } minR = min(minR, post[[j]]$resp) maxR = max(maxR, post[[j]]$resp) } oldPar = par(mfrow = getPanels(n)) for (i in 1:n){ if(equalScale == T) plot(resp~par, xlab = names[i], type = "l", col = "red", data = post[[i]], ylim = c(minR, maxR), ylab = "Response") else plot(resp~par, xlab = names[i], type = "l", col = "red", data = post[[i]], ylab = "Response") abline(v = refPar[i]) } names(post) = names post$reference = refPar par(oldPar) return(post) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/plotSensitivityOAT.R
#' Plots a time series, with the option to include confidence and prediction band #' @author Florian Hartig #' @param x optional values for x axis (time) #' @param observed observed values #' @param predicted predicted values #' @param confidenceBand matrix with confidenceBand #' @param predictionBand matrix with predictionBand #' @param xlab a title for the x axis #' @param ylab a title for the y axis #' @param ... further arguments passed to \code{\link[graphics]{plot}} #' @details Values for confidence and prediction bands can be generated with \code{\link{getPredictiveIntervals}}. For a more elaborate version of this plot, see \code{\link{plotTimeSeriesResults}} #' @seealso \code{\link{marginalPlot}}, \code{\link{tracePlot}}, \code{\link{correlationPlot}} #' @example /inst/examples/plotTimeSeriesHelp.R #' @export plotTimeSeries <- function(observed = NULL, predicted = NULL, x = NULL, confidenceBand = NULL, predictionBand = NULL, xlab = "Time", ylab = "Observed / predicted values", ...){ ylim = range(observed, predicted, confidenceBand, predictionBand,na.rm=TRUE) if (is.null(x)){ if(!is.null(observed)) x = 1:length(observed) else if(!is.null(predicted)) x = 1:length(predicted) else stop("either observed or predicted must be supplied") } len = length(x) plot(x, y = rep(0, len), ylim = ylim, type = "n", xlab = xlab, ylab = ylab, ...) if(!is.null(predictionBand)) polygon(c(x,rev(x)),c(predictionBand[1,],predictionBand[2,len:1]),col="moccasin",border=NA) if(!is.null(confidenceBand)) polygon(c(x,rev(x)),c(confidenceBand[1,],confidenceBand[2,len:1]),col="#99333380",border=NA) if(!is.null(predicted)) lines(x, predicted, col = "red") if(!is.null(observed)) points(x, observed, col = "black", pch = 3, cex = 0.6) } #' Plots residuals of a time series #' @author Florian Hartig #' @param residuals x #' @param x optional values for x axis (time) #' @param main title of the plot #' @export plotTimeSeriesResiduals <- function(residuals, x = NULL, main = "residuals"){ ylim = range(residuals) if (is.null(x)){ x = 1:length(residuals) } barplot(residuals) } #' Creates a time series plot typical for an MCMC / SMC fit #' @author Florian Hartig #' @param sampler Either a) a matrix b) an MCMC object (list or not), or c) an SMC object #' @param model function that calculates model predictions for a given parameter vector #' @param observed observed values as vector #' @param error function with signature f(mean, par) that generates observations with error (error = stochasticity according to what is assumed in the likelihood) from mean model predictions. Par is a vector from the matrix with the parameter samples (full length). f needs to know which of these parameters are parameters of the error function. See example in \code{\link{VSEM}} #' @param start numeric start value for the plot (see \code{\link{getSample}}) #' @param plotResiduals logical determining whether residuals should be plotted #' @param prior if a prior sampler is implemented, setting this parameter to TRUE will draw model parameters from the prior instead of the posterior distribution #' @param ... further arguments passed to \code{\link[graphics]{plot}} #' @seealso \code{\link{getPredictiveIntervals}} #' @example /inst/examples/plotTimeSeriesHelp.R #' @export plotTimeSeriesResults <- function(sampler, model, observed, error = NULL, plotResiduals = TRUE, start = 1, prior = FALSE, ...){ oldPar = par(no.readonly = TRUE) if (plotResiduals == TRUE && is.null(error)) { warning("Can not plot residuals without an error function.") } if(!is.vector(observed)) stop("wrong type given to observed. Must be a vector") if (plotResiduals == TRUE && !is.null(error)) { layout(matrix(c(1, 1, 1, 2, 3, 4), 2, 3, byrow = TRUE)) par(mar = c(3, 3, 3, 3), oma = c(2, 2, 2, 2)) } # ... can we pass on to both getSample and plot? if(prior == FALSE){ if(inherits(sampler,"bayesianOutput")) parMatrix = getSample(sampler, start = start) else if (inherits(sampler, "matrix")) parMatrix = sampler else if ("mcmc.list" %in% class(sampler) || "mcmc" %in% class(sampler)) parMatrix = getSample(sampler, start = start) else stop("wrong type given to variable sampler") }else if (prior == TRUE){ if(inherits(sampler,"bayesianOutput")) { if(inherits(sampler, "mcmcSamplerList")) parMatrix = sampler[[1]]$setup$prior$sampler(1000) else parMatrix <- sampler$setup$prior$sampler(1000) } else { stop("prior==TRUE is only available for sampler of type bayesianOutput") } }else stop("BayesianTools::plotTimeSeriesResults - wrong argument to prior") numSamples = min(1000, nrow(parMatrix)) pred <- getPredictiveIntervals(parMatrix = parMatrix, model = model, numSamples = numSamples, quantiles = c(0.025, 0.5, 0.975), error = error) if(!is.null(error)) plotTimeSeries(observed = observed, predicted = pred$posteriorPredictivePredictionInterval[2,], confidenceBand = pred$posteriorPredictiveCredibleInterval[c(1,3),], predictionBand = pred$posteriorPredictivePredictionInterval[c(1,3),], ...) else plotTimeSeries(observed = observed, predicted = pred$posteriorPredictiveSimulations, confidenceBand = pred$posteriorPredictiveSimulations[c(1,3),], ...) if (plotResiduals && !is.null(error)) { dh = getDharmaResiduals(model = model, parMatrix = parMatrix, numSamples = numSamples, observed = observed, error = error, plot = FALSE) # qq-plot gap::qqunif(dh$scaledResiduals, pch=2, bty="n", logscale = F, col = "black", cex = 0.6, main = "QQ plot residuals", cex.main = 1) # residuals vs fitted noNa <- which(!is.na(dh$scaledResiduals)) DHARMa::plotResiduals(dh$fittedPredictedResponse[noNa], dh$scaledResiduals[noNa], main = "Residual vs. predicted\n quantile lines should be\n horizontal lines at 0.25, 0.5, 0.75", cex.main = 1, xlab = "Predicted value", ylab = "Standardized residual") # residuals vs time t <- 1:length(dh$fittedPredictedResponse[noNa]) DHARMa::plotResiduals(t, dh$scaledResiduals[noNa], xlab = "Time", ylab = "Standardized residual", main = "Residual vs. time\n quantile lines should be\n horizontal lines at 0.25, 0.5, 0.75", cex.main = 1) message("DHARMa::plotTimeSeriesResults called with posterior predictive (residual) diagnostics. Type vignette(\"DHARMa\", package=\"DHARMa\") for a guide on how to interpret these plots") } par(oldPar) } #' Creates a DHARMa object #' @author Tankred Ott #' @param model function that calculates model predictions for a given parameter vector #' @param parMatrix a parameter matrix from which the simulations will be generated #' @param numSamples the number of samples #' @param observed a vector of observed values #' @param error function with signature f(mean, par) that generates error expectations from mean model predictions. Par is a vector from the matrix with the parameter samples (full length). f needs to know which of these parameters are parameters of the error function #' @param plot logical, determining whether the simulated residuals should be plotted # #' @export getDharmaResiduals <- function(model, parMatrix, numSamples, observed, error, plot = TRUE){ predDistr <- getPredictiveDistribution(parMatrix = parMatrix, model = model, numSamples = numSamples) # apply error to predictions for (i in 1:nrow(predDistr)){ predDistr[i,] = error(mean = predDistr[i,], par = parMatrix[i,]) } fittedPars = apply(parMatrix, 2, median) fittedPredictedResponse = model(fittedPars) dh = DHARMa::createDHARMa(simulatedResponse = t(predDistr), observedResponse = observed, fittedPredictedResponse = fittedPredictedResponse) if (plot == TRUE) { DHARMa::plotResiduals(dh) } return(dh) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/plotTimeSeries.R
#' Trace plot for MCMC class #' @param sampler an object of class MCMC sampler #' @param thin determines the thinning intervall of the chain #' @param ... additional parameters to pass on to the \code{\link{getSample}}, for example parametersOnly =F, or start = 1000 #' @export #' @seealso \code{\link{marginalPlot}} \cr #' \code{\link{plotTimeSeries}} \cr #' \code{\link{correlationPlot}} #' @example /inst/examples/tracePlotHelp.R tracePlot <- function(sampler, thin = "auto", ...){ codaChain = getSample(sampler, coda = T, thin = thin, ...) plot(codaChain) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/plotTrace.R
#' Factory that creates a proposal generator #' @author Florian Hartig #' @param covariance covariance matrix. Can also be vector of the sqrt of diagonal elements --> standard deviation #' @param gibbsProbabilities optional probabilities for the number of parameters to vary in a Metropolis within gibbs style - for 4 parameters, c(1,1,0.5,0) means that at most 3 parameters will be varied, and it is double as likely to vary one or two than varying 3 #' @param gibbsWeights optional probabilities for parameters to be varied in a Metropolis within gibbs style - default ist equal weight for all parameters - for 4 parameters, c(1,1,1,100) would mean that if 2 parameters would be selected, parameter 4 would be 100 times more likely to be picked than the others. If 4 is selected, the remaining parameters have equal probability. #' @param otherDistribution optional additinal distribution to be mixed with the default multivariate normal. The distribution needs to accept a parameter vector (to allow for the option of making the distribution dependend on the parameter values), but it is still assumed that the change from the current values is returned, not the new absolute values. #' @param otherDistributionLocation a vector with 0 and 1, denoting which parameters are modified by the otherDistribution #' @param otherDistributionScaled should the other distribution be scaled if gibbs updates are calculated? #' @param message print out parameter settings #' @param method method for covariance decomposition #' @param scalingFactor scaling factor for the proposals #' @seealso \code{\link{updateProposalGenerator}} #' @export #' @example /inst/examples/proposalGeneratorHelp.R #' #@param covarianceDecomp composed covariance matrix. If provided, faster TODO? createProposalGenerator <- function( covariance, # covariance matrix for the multivariate proposal gibbsProbabilities = NULL, # changes gibbsWeights = NULL, otherDistribution = NULL, otherDistributionLocation = NULL, otherDistributionScaled = F, message = F, method = "chol", scalingFactor = 2.38 ) { # To provide the option of defining via sd of individual normal if (is.vector(covariance)) { covariance = diag(covariance^2) } if(ncol(covariance) == 0 && nrow(covariance) == 0) covariance = 1 if(is.null(otherDistribution)) numberOfParameters = max(1,nrow(covariance)) else numberOfParameters = length(otherDistributionLocation) if(is.null(method) | numberOfParameters < 2){ covarianceDecomp = NULL if(numberOfParameters > 1) samplingFunction = function() as.vector(mvtnorm::rmvnorm(n = 1, sigma = covariance)) else samplingFunction = function() rnorm(n = 1, sd = sqrt(covariance)) } else { covarianceDecomp = factorMatrice(covariance, method = method) samplingFunction = function() as.vector(getRmvnorm(n = 1, R = covarianceDecomp)) } ## Assertions if(!is.null(otherDistribution)){ stopifnot(class(otherDistribution) == "function") stopifnot(!is.null(otherDistributionLocation)) if(is.numeric(otherDistributionLocation)) otherDistributionLocation = as.logical(otherDistributionLocation) stopifnot(is.logical(otherDistributionLocation)) stopifnot(is.logical(otherDistributionScaled)) } #scalingFactor = 2.38/sqrt(numberOfParameters) # CHECK ??? #scalingFactorN = (2.38^2)/numberOfParameters # note - scaling is 2.38 * sqrt because it is applied on the change, not directly on the sigma ########################## # Definition of proposal function returnProposal <- function(x, scale = 1){ # Possibility to mix with other distribution if(!is.null(otherDistribution)){ move = rep(NA, numberOfParameters) move[otherDistributionLocation] = otherDistribution(x[otherDistributionLocation]) move[!otherDistributionLocation] = samplingFunction() }else{ move = samplingFunction() } ## Gibbs updates if (!is.null(gibbsProbabilities)) { nGibbs <- sample.int(length(x), size = 1, replace = F, prob = gibbsProbabilities) whichParametersLoc <- sample.int(length(x), nGibbs, replace = F, prob = gibbsWeights) move[! (1:numberOfParameters %in% whichParametersLoc)] = 0 } else { nGibbs = numberOfParameters } ### if(!is.null(otherDistribution) & otherDistributionScaled == F){ nGibbs = nrow(covariance) move[!otherDistributionLocation] = move[!otherDistributionLocation] * scalingFactor / sqrt(nGibbs) }else{ move = move * scalingFactor / sqrt(nGibbs) } newParam = x + move * scale return(newParam) } returnProposalMatrix <- function(x, scale = 1){ numPar <- ncol(x) if (numPar == 1){ out = matrix(apply(x, 1, returnProposal, scale = scale), ncol = 1) } else { out = t(apply(x, 1, returnProposal, scale = scale)) } return(out) } returnDensity <- function(x, y, scale = 1){ if (!is.null(gibbsProbabilities) & !(is.null(otherDistribution)))stop("proposal density not implemented if Gibbs or other distribution is activated in the proposal. This error may appear if you have chosen both gibbs and delayed rejection in an MCMC algorith. This option is currently not implemented") sigmaDensity = scalingFactor^2 / numberOfParameters * covariance * scalingFactor^2 if(length(sigmaDensity) > 1) dens = mvtnorm::dmvnorm(x, mean = y, sigma = sigmaDensity, log = T) else dens = dnorm(x, mean = y, sd = sqrt(sigmaDensity), log = T) return(dens) } ########################## # Wrap up class fields classFields = list( covariance = covariance, covarianceDecomp = covarianceDecomp, gibbsProbabilities = gibbsProbabilities, gibbsWeights = gibbsWeights, otherDistribution = otherDistribution, otherDistributionLocation = otherDistributionLocation, otherDistributionScaled = otherDistributionScaled, returnProposal = returnProposal, returnProposalMatrix = returnProposalMatrix, returnDensity = returnDensity, updateProposalGenerator = updateProposalGenerator , samplingFunction = samplingFunction ) class(classFields) <- c("proposalGenerator") if(message == T){ cat("Proposalgenerator created") print(classFields) } return(classFields) } #' @method print proposalGenerator #' @export print.proposalGenerator <- function(x, ...){ names = names(x) for(i in 1:6){ cat(names[i], "set to:\n ") print(x[[i]]) } } #' To update settings of an existing proposal genenerator #' @param proposal an object of class proposalGenerator #' @param chain a chain to create the covariance matrix from (optional) #' @param message whether to print an updating message #' @param eps numeric tolerance for covariance #' @param manualScaleAdjustment optional adjustment for the covariance scale (multiplicative) #' @details The this function can be applied in 2 ways 1) update the covariance given an MCMC chain, and 2) update the proposal generator after parameters have been changed #' @export updateProposalGenerator <- function(proposal,chain = NULL, message = F, eps = 1e-10, manualScaleAdjustment = 1){ if(!is.null(chain)){ npar = ncol(chain) if(is.null(npar)) npar = 1 if (npar > 1){ covar = cov(chain) * manualScaleAdjustment covar = as.matrix(Matrix::nearPD(covar + diag(eps, npar))$mat) }else{ covar = var(chain) * manualScaleAdjustment } if(!any(is.na(covar))) proposal$covariance = covar } out <- createProposalGenerator( covariance = proposal$covariance, gibbsProbabilities = proposal$gibbsProbabilities, gibbsWeights = proposal$gibbsWeights, otherDistribution = proposal$otherDistribution, otherDistributionLocation = proposal$otherDistributionLocation, otherDistributionScaled = proposal$otherDistributionScaled ) if(message == T){ cat("Proposalgenerator settings changed") print(out) } return(out) } #' Produce multivariate normal proposal #' @param n n #' @param R R #' @return X #' @keywords internal getRmvnorm <- function(n=1, R){ X <- matrix(rnorm(n * ncol(R)), nrow=n )%*% R return(X) } #' factorMatrice #' @param sigma sigma #' @param method either "eigen", "svd" or "chol" #' @keywords internal factorMatrice <- function(sigma, method){ if(method == "eigen") { ev <- eigen(sigma, symmetric = TRUE) if (!all(ev$values >= -sqrt(.Machine$double.eps) * abs(ev$values[1]))){ warning("sigma is numerically not positive definite") } ## ev$vectors %*% diag(sqrt(ev$values), length(ev$values)) %*% t(ev$vectors) ## faster for large nrow(sigma): t(ev$vectors %*% (t(ev$vectors) * sqrt(ev$values))) } else if(method == "svd"){ s. <- svd(sigma) if (!all(s.$d >= -sqrt(.Machine$double.eps) * abs(s.$d[1]))){ warning("sigma is numerically not positive definite") } t(s.$v %*% (t(s.$u) * sqrt(s.$d))) } else if(method == "chol"){ R <- chol(sigma, pivot = TRUE) R[, order(attr(R, "pivot"))] } } # adapt #proposalGenerator$covariance = factorMatrice(proposalGenerator$covariance, method)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/proposalGenerator.R
#' Multivariate normal likelihood #' @author Florian Hartig #' @description Generates a 3 dimensional multivariate normal likelihood function. #' @param mean vector with the three mean values of the distribution #' @param sigma either a correlation matrix, or "strongcorrelation", or "no correlation" #' @param sample should the function create samples #' @param n number of samples to create #' @param throwErrors parameter for test purpose. Between 0 and 1 for proportion of errors #' @details 3-d multivariate normal density function with mean 2,4,0 and either strong correlation (default), or no correlation. #' @export #' @seealso \code{\link{testDensityBanana}} \cr #' \code{\link{testLinearModel}} #' @example inst/examples/generateTestDensityMultiNormalHelp.R #' # @param x TODO not fully implemented yet! either the parameter vector if the function is used for density, or the number of replicates when sampling generateTestDensityMultiNormal <- function(mean = c(0,0,0), sigma = "strongcorrelation", sample = F, n = 1, throwErrors = -1){ # for test purposes if(runif(1) < throwErrors ){ stop("TestError") } if (sigma == "strongcorrelation"){ m <- c(0.2, 0.3, 0.3503) sigma = emulator::corr.matrix(cbind(m),scales=1) }else if (sigma == "no correlation"){ sigma = diag(rep(1,3)) } if (sample == F){ out <- function(x) mvtnorm::dmvnorm(x, mean = mean, sigma = sigma, log=T) return(out) }else{ out_sample <- function(n) mvtnorm::rmvnorm(n=n, mean = mean, sigma = sigma) return(out_sample) } } #' Banana-shaped density function #' @author Florian Hartig #' @param p 2-dim parameter vector #' @note inspired from package FMEmcmc, seems to go back to Laine M (2008). Adaptive MCMC Methods with Applications in Environmental and Models. Finnish Meteorological Institute Contributions 69. ISBN 978-951-697-662-7. #' @export #' @seealso \code{\link{generateTestDensityMultiNormal}} \cr #' \code{\link{testLinearModel}} testDensityBanana <- function (p){ P <- c(p[1], p[2] - (p[1]^2+1)) Cov <- matrix(nrow = 2, data = c(1, 0.9, 0.9, 1)) return(mvtnorm::dmvnorm(P, mean = rep(0, length(P)), sigma = Cov, log = T)) } #' GelmanMeng test function #' #' @param x parameter vector #' @param A function parameter #' @param B function parameter #' @param C1 function parameter #' @param C2 function parameter #' @param log log #' #' A non-elliptical, bivariate density function proposed by Gelman and Meng (1991). testDensityGelmanMeng <- function(x, A = 1, B = 0, C1 = 3, C2 = 3, log = TRUE) { if (is.vector(x)) x <- matrix(x, nrow = 1) r <- -0.5 * (A * x[,1]^2 * x[,2]^2 + x[,1]^2 + x[,2]^2 - 2 * B * x[,1] * x[,2] - 2 * C1 * x[,1] - 2 * C2 * x[,2]) if (!log) r <- exp(r) as.vector(r) } #' Normal likelihood #' @author Florian Hartig #' @param x a parameter vector of arbitrary length #' @param sum if likelihood should be summed or not #' @export testDensityNormal <- function(x, sum = T){ if(sum == T) return(sum(dnorm(x, log = T))) else return(dnorm(x, log = T)) } #' 3d Mutivariate Normal likelihood #' @param x a parameter vector of arbitrary length #' @param sigma either a correlation matrix, or "strongcorrelation", or "no correlation" #' @export testDensityMultiNormal <- function(x, sigma = "strongcorrelation"){ if (sigma == "strongcorrelation"){ m <- c(0.2, 0.3, 0.3503) sigma = emulator::corr.matrix(cbind(m),scales=1) }else if (sigma == "no correlation"){ sigma = diag(rep(1,3)) } return(mvtnorm::dmvnorm(x, mean = c(0,0,0), sigma = sigma, log=T)) } #' Fake model, returns a ax + b linear response to 2-param vector #' @author Florian Hartig #' @param x 2-dim parameter vector #' @param env optional, environmental covariate #' @example /inst/examples/testLinearModel.R #' @export #' @seealso \code{\link{generateTestDensityMultiNormal}} \cr #' \code{\link{testDensityBanana}} testLinearModel <- function(x, env = NULL){ if (is.null(env)) env = seq(-3,3,len = 20) x[1] * env + x[2] } #' Test function infinity ragged #' @author Florian Hartig #' @param x 2-dim parameter vector #' @param error should error or infinity be returned #' @export #' @seealso \code{\link{generateTestDensityMultiNormal}} \cr #' \code{\link{testDensityBanana}} testDensityInfinity <- function(x, error = F){ if( error == F) return(ifelse ((3*sum(x^2) %% 3) < 0.3, -Inf, 1)) else return(ifelse ((3*sum(x^2) %% 3) < 0.3, stop("testerror"), 1)) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/testFunctions.R
# inspired from https://gist.github.com/doobwa/941125 # maybe replace with logSumExp {matrixStats} which might be faster? #' Funktion to compute log(sum(exp(x)) #' @author Florian Hartig #' @param x values at log scale #' @param mean logical, determines whether the mean should be used instead of the sum #' @details This function computes log(sum(exp(x)), using the offset trick to avoid numeric overflow, see, e.g. http://jblevins.org/notes/log-sum-exp. The mean option allows calculating logMeanExp #' #' @keywords internal logSumExp<- function(x, mean = F) { # nObs = length(x) if(any(x == Inf)) stop("BayesianTools::logSumExp: positive infinity values in log probabilities") if(any(x == -Inf )){ message("BayesianTools::logSumExp: encountered -Inf in logSumExp - value was removed") x = x[x != -Inf] } # seems that this created problems in the presence of small values, # doesn't seem to be a need to shift towards min # if ( max(abs(x)) > max(x) ) offset <- min(x) else offset <- max(x) offset <- max(x) if (mean == T) out = log(sum(exp(x - offset))/nObs) + offset else out = log(sum(exp(x - offset))) + offset return(out) } # Unit test in test-utils-Numerics
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/utils-Numerics.R
#' Function to get the setup from a bayesianOutput #' @param x bayesianOutput #' @return bayesianSetup #' @author Tankred Ott #' @keywords internal getSetup <- function(x) { classes <- class(x) if (any(c('mcmcSampler', 'smcSampler') %in% classes)) x$setup else if (any(c('mcmcSamplerList', 'smcSamplerList') %in% classes)) x[[1]]$setup else stop('Can not get setup from x') } #' Function to thin matrices #' @param mat matrix to thin #' @param thin thinning parameter #' @return thinned matrix #' @keywords internal thinMatrix <- function(mat, thin = "auto"){ if (thin == "auto"){ thin = max(floor(nrow(mat) / 5000),1) } if (! is.null(thin) & ! thin == F){ sel = seq(1,dim(mat)[1], by = thin ) mat = mat[sel,] } return(mat) } #' Function to scale matrices #' @param mat matrix to scale #' @param min minimum value #' @param max maximum value #' @return sclaed matrix #' @keywords internal scaleMatrix <- function(mat, min, max){ if(class(mat)[1] %in% c("matrix", "data.frame")){ for(i in 1:ncol(mat)){ mat[,i] <- (mat[,i] - min[i]) / (max[i] - min[i]) } }else if (is.vector(mat)){ mat = (mat - min) / (max - min) }else stop("wrong class") return(mat) } #' Function to calculate the metropolis ratio #' @author Florian Hartig #' @param LP2 log posterior old position #' @param LP1 log posterior of proposal #' @param tempering value for tempering #' @keywords internal metropolisRatio <- function(LP2, LP1, tempering = 1){ # this catches two -Inf cases / I wonder if we should throw a warning in this case if( is.na(LP2 - LP1)) out = -Inf else out = min(exp( (LP2 - LP1) / tempering), 1) return(out) } #' getPanels #' #' Calculates the argument x for par(mfrow = x) for a desired number of panels #' #' @author Florian Hartig #' @param x the desired number of panels #' @export getPanels <- function(x){ if (x <= 0) stop("number can't be < 1") lower = floor(sqrt(x)) upper = ceiling(sqrt(x)) if (lower == upper) return(c(lower, lower)) else{ if (lower * upper >= x) return(c(lower, upper)) else return(c(upper, upper)) } } #' Gets n equally spaced samples (rows) from a matrix or vector #' @author Tankred Ott #' @param x matrix or vector #' @param numSamples number of samples (rows) to be drawn #' @details Gets n equally spaced samples (rows) from a matrix and returns a new matrix (or vector) containing those samples #' @keywords internal sampleEquallySpaced <- function(x, numSamples) { # wrong input: x is neither vector nor matrix if (!is.matrix(x) && !is.vector(x)) { stop("Expected matrix or vector for x!") } # wrong input: numSamples is not single numeric value if (!is.vector(numSamples) || !is.numeric(numSamples) || length(numSamples) > 1) { stop("Expected a single numeric value for numSamples!") } len = 0 if (is.matrix(x)) { len = nrow(x) } else { len = length(x) } if (len == 1) { return(x) } # wrong input: numSamples > total number of samples if (numSamples > len) { numSamples = len warning("numSamples is greater than the total number of samples! All samples were selected.") # wrong input: numsaples 0 or negative } else if (numSamples < 1) { numSamples = 1; warning("numSamples is less than 1! Only the first sample was selected.") } sel <- seq(1, len, len = numSamples) if (is.matrix(x)) { out <- x[sel, , drop=F] } else { out <- x[sel] } return(out) } #' Checks if thin is consistent with nTotalSamples samples and if not corrects it. #' @author Tankred Ott #' @param nTotalSamples total number of rows/samples #' @param thin thinning #' @param autoThinFraction fraction of the data that will be sampled when thin is set to "auto". E.g. 0.5 means thin will be nTotalSamples * 0.5. The resulting thin value is rounded down to the next integer. #' @details Checks if the thin argument is consistent with the data consisting of nTotalSamples samples/rows and corrects thin if not. #' @author Tankred Ott # #' @export #' @keywords internal correctThin <- function(nTotalSamples, thin, autoThinFraction = 0.001) { if (autoThinFraction > 1 || autoThinFraction <= 0) { stop("autoThinFraction must be greater than 0 and less than 1!") } if (thin == "auto"){ thin = max(floor(nTotalSamples * autoThinFraction), 1) } else if (is.null(thin) || thin == F || thin < 1 || is.nan(thin)) { thin = 1 } else if (thin > nTotalSamples) { warning("thin is greater than the total number of samples! Only the first sample/row was selected.") thin = nTotalSamples } return(thin) } #' @title Rescale #' @description Rescales values in the interval "from" (lower, upper) to the new interval "to" (lower, upper). #' @param x vector of values tp be scaled #' @param from vector of length 2, original interval (lower, upper) #' @param to vector of length 2, target interval (lower, upper) #' #' @keywords internal #' @author Tankred Ott rescale <- function (x, from, to) { # scale x from 0 to 1 x <- (x - from[1]) / (from[2] - from[1]) # scale to new interval return(x * (to[2] - to[1]) + to[1]) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/utils.R
as_mcmc.matrix = function(x, names, par = NULL) { dimnames(x)=list(NULL, names) if (is.null(par)) { attr(x, 'mcpar') = c(1, nrow(x), 1) } else { attr(x, 'mcpar') = par } class(x) = 'mcmc' return(x) } calc_mpsrf = function(x, end) { x = window(x, start(x), end, 1) Niter = niter(x) Nchain = nchain(x) Nvar = nvar(x) x = lapply(x, as.matrix) S2 = array(sapply(x, var, simplify=TRUE), dim=c(Nvar,Nvar,Nchain)) W = apply(S2, c(1,2), mean) xbar = matrix(sapply(x, apply, 2, mean, simplify=TRUE), nrow=Nvar, ncol=Nchain) B = Niter * var(t(xbar)) CW = chol(W) emax = eigen(backsolve(CW, t(backsolve(CW, B, transpose=TRUE)), transpose=TRUE), symmetric=TRUE, only.values=TRUE)$values[1] res = sqrt( (1 - 1/Niter) + (1 + 1/Nvar) * emax/Niter ) return(res) } mpsrf = function(x, step = 50, ...) { if (nchain(x) < 2 | nvar(x) == 1) stop("You need at least two chains and two parameters.") z = seq(start(x)-1+step, end(x), by = step) res = sapply(z, function(i) calc_mpsrf(x,i)) x = data.frame(z,MPSRF = res) return(x) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/R/utilsFrancesco.R
## ----global_options, include=FALSE-------------------------------------------- knitr::opts_chunk$set(fig.width=5, fig.height=5, warning=FALSE, cache = F) ## ---- echo = F, message = F--------------------------------------------------- set.seed(123) ## ---- eval = F---------------------------------------------------------------- # install.packages("BayesianTools") ## ----------------------------------------------------------------------------- library(BayesianTools) citation("BayesianTools") ## ----------------------------------------------------------------------------- set.seed(123) ## ---- eval = F---------------------------------------------------------------- # sessionInfo() ## ----------------------------------------------------------------------------- ll <- generateTestDensityMultiNormal(sigma = "no correlation") bayesianSetup = createBayesianSetup(likelihood = ll, lower = rep(-10, 3), upper = rep(10, 3)) ## ----------------------------------------------------------------------------- iter = 10000 settings = list(iterations = iter, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) ## ----------------------------------------------------------------------------- print(out) summary(out) ## ----------------------------------------------------------------------------- plot(out) # plot internally calls tracePlot(out) correlationPlot(out) marginalPlot(out, prior = TRUE) ## ----------------------------------------------------------------------------- marginalLikelihood(out) DIC(out) MAP(out) ## ---- eval = F---------------------------------------------------------------- # getSample(out, start = 100, end = NULL, thin = 5, whichParameters = 1:2) ## ---- echo = T---------------------------------------------------------------- iter = 1000 settings = list(iterations = iter, nrChains = 3, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) ## ----------------------------------------------------------------------------- print(out) summary(out) ## ----------------------------------------------------------------------------- plot(out) ## ----------------------------------------------------------------------------- #getSample(out, coda = F) gelmanDiagnostics(out, plot = T) ## ---- eval = F---------------------------------------------------------------- # ll = logDensity(x) ## ----------------------------------------------------------------------------- ll <- generateTestDensityMultiNormal(sigma = "no correlation") bayesianSetup = createBayesianSetup(likelihood = ll, lower = rep(-10, 3), upper = rep(10, 3)) ## ---- eval = FALSE------------------------------------------------------------ # ## Definition of likelihood function # likelihood <- function(matrix){ # # Calculate likelihood in parallel # # Return vector of likelihood valus # } # # ## Create Bayesian Setup # BS <- createBayesianSetup(likelihood, parallel = "external", ...) # # ## Run MCMC # runMCMC(BS, sampler = "SMC", ...) ## ---- eval = FALSE------------------------------------------------------------ # ## n = Number of cores # n=2 # x <- c(1:10) # likelihood <- function(param) return(sum(dnorm(x, mean = param, log = T))) # bayesianSetup <- createBayesianSetup(likelihood, parallel = n, lower = -5, upper = 5) # # ## give runMCMC a matrix with n rows of proposals as startValues or sample n times from the previous created sampler # out <- runMCMC(bayesianSetup, settings = list(iterations = 1000)) ## ---- eval = FALSE------------------------------------------------------------ # ### Create cluster with n cores # cl <- parallel::makeCluster(n) # # ## Definition of the likelihood # likelihood <- function(X) sum(dnorm(c(1:10), mean = X, log = T)) # # ## Definition of the likelihood which will be calculated in parallel. Instead of the parApply function, we could also define a costly parallelized likelihood # pLikelihood <- function(param) parallel::parApply(cl = cl, X = param, MARGIN = 1, FUN = likelihood) # # ## export functions, dlls, libraries # # parallel::clusterEvalQ(cl, library(BayesianTools)) # parallel::clusterExport(cl, varlist = list(likelihood)) # # ## create BayesianSetup # bayesianSetup <- createBayesianSetup(pLikelihood, lower = -10, upper = 10, parallel = 'external') # # ## For this case we want to parallelize the internal chains, therefore we create a n row matrix with startValues, if you parallelize a model in the likelihood, do not set a n*row Matrix for startValue # settings = list(iterations = 100, nrChains = 1, startValue = bayesianSetup$prior$sampler(n)) # # ## runMCMC # out <- runMCMC(bayesianSetup, settings, sampler = "DEzs") ## ---- eval = FALSE------------------------------------------------------------ # ### Create cluster with n cores # cl <- parallel::makeCluster(n) # # ## export your model # # parallel::clusterExport(cl, varlist = list(complexModel)) # # ## Definition of the likelihood # likelihood <- function(param) { # # ll <- complexModel(param) # # return(ll) # } # # ## create BayesianSetup and settings # bayesianSetup <- createBayesianSetup(likelihood, lower = -10, upper = 10, parallel = 'external') # settings = list(iterations = 100, nrChains = 1) # # ## runMCMC # out <- runMCMC(bayesianSetup, settings) # ## ---- eval = FALSE------------------------------------------------------------ # ### Definition of likelihood function # x <- c(1:10) # likelihood <- function(param) return(sum(dnorm(x, mean = param, log = T))) # # ## Create BayesianSetup and settings # bayesianSetup <- createBayesianSetup(likelihood, lower = -10, upper = 10, parallel = F) # settings = list(iterations = 100000) # # ## Start cluster with n cores for n chains and export BayesianTools library # cl <- parallel::makeCluster(n) # parallel::clusterEvalQ(cl, library(BayesianTools)) # # ## calculate parallel n chains, for each chain the likelihood will be calculated on one core # out <- parallel::parLapply(cl, 1:n, fun = function(X, bayesianSetup, settings) runMCMC(bayesianSetup, settings, sampler = "DEzs"), bayesianSetup, settings) # # ## Combine the chains # out <- createMcmcSamplerList(out) ## ----------------------------------------------------------------------------- # Create a BayesianSetup ll <- generateTestDensityMultiNormal(sigma = "no correlation") bayesianSetup = createBayesianSetup(likelihood = ll, lower = rep(-10, 3), upper = rep(10, 3)) settings = list(iterations = 2500, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, settings = settings) newPrior = createPriorDensity(out, method = "multivariate", eps = 1e-10, lower = rep(-10, 3), upper = rep(10, 3), best = NULL) bayesianSetup <- createBayesianSetup(likelihood = ll, prior = newPrior) settings = list(iterations = 1000, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, settings = settings) ## ---- message = F------------------------------------------------------------- ll <- generateTestDensityMultiNormal(sigma = "no correlation") bayesianSetup = createBayesianSetup(likelihood = ll, lower = rep(-10, 3), upper = rep(10, 3)) settings = list(iterations = 10000, nrChains= 3, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) plot(out) marginalPlot(out, prior = T) correlationPlot(out) gelmanDiagnostics(out, plot=T) # option to restart the sampler settings = list(iterations = 1000, nrChains= 1, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) out2 <- runMCMC(bayesianSetup = out) out3 <- runMCMC(bayesianSetup = out2) #plot(out) #plot(out3) # create new prior from posterior sample newPriorFromPosterior <- createPriorDensity(out2) ## ----------------------------------------------------------------------------- iter = 10000 ## ----------------------------------------------------------------------------- applySettingsDefault(sampler = "Metropolis") ## ---- results = 'hide', eval = F---------------------------------------------- # settings <- list(iterations = iter, adapt = F, DRlevels = 1, gibbsProbabilities = NULL, temperingFunction = NULL, optimize = F, message = FALSE) # out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) # plot(out) ## ---- results = 'hide', eval = F---------------------------------------------- # settings <- list(iterations = iter, adapt = F, DRlevels = 1, gibbsProbabilities = NULL, temperingFunction = NULL, optimize = T, message = FALSE) # out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) # plot(out) ## ---- results = 'hide', eval = F---------------------------------------------- # settings <- list(iterations = iter, adapt = T, DRlevels = 1, gibbsProbabilities = NULL, temperingFunction = NULL, optimize = T, message = FALSE) # out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) # plot(out) ## ---- results = 'hide', eval = F---------------------------------------------- # settings <- list(iterations = iter, adapt = F, DRlevels = 2, gibbsProbabilities = NULL, temperingFunction = NULL, optimize = T, message = FALSE) # out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) # plot(out) ## ---- results = 'hide', eval = F---------------------------------------------- # settings <- list(iterations = iter, adapt = T, DRlevels = 2, gibbsProbabilities = NULL, temperingFunction = NULL, optimize = T, message = FALSE) # out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) # plot(out) ## ---- results = 'hide', eval = F---------------------------------------------- # settings <- list(iterations = iter, adapt = T, DRlevels = 1, gibbsProbabilities = c(1,0.5,0), temperingFunction = NULL, optimize = T, message = FALSE) # out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) # plot(out) ## ---- results = 'hide', eval = F---------------------------------------------- # temperingFunction <- function(x) 5 * exp(-0.01*x) + 1 # settings <- list(iterations = iter, adapt = F, DRlevels = 1, gibbsProbabilities = c(1,1,0), temperingFunction = temperingFunction, optimize = T, message = FALSE) # out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) # plot(out) ## ---- results = 'hide', eval = F---------------------------------------------- # settings <- list(iterations = iter, message = FALSE) # out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DE", settings = settings) # plot(out) ## ---- results = 'hide', eval = F---------------------------------------------- # settings <- list(iterations = iter, message = FALSE) # out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DEzs", settings = settings) # plot(out) ## ---- results = 'hide', eval = F---------------------------------------------- # settings <- list(iterations = iter, message = FALSE) # out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DREAM", settings = settings) # plot(out) ## ---- results = 'hide', eval = FALSE------------------------------------------ # settings <- list(iterations = iter, message = FALSE) # out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DREAMzs", settings = settings) # plot(out) ## ---- eval = F---------------------------------------------------------------- # settings = list(iterations = iter, message = FALSE) # # out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Twalk", settings = settings) ## ---- eval = T---------------------------------------------------------------- settings <- list(iterations = iter, nrChains = 3, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) plot(out) #chain = getSample(out, coda = T) gelmanDiagnostics(out, plot = F) ## ---- results = 'hide', eval = F---------------------------------------------- # settings <- list(initialParticles = iter, iterations= 1) # out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "SMC", settings = settings) # plot(out) ## ---- results = 'hide', eval = F---------------------------------------------- # settings <- list(initialParticles = iter, iterations= 10) # out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "SMC", settings = settings) # plot(out) ## ----------------------------------------------------------------------------- sampleSize = 30 x <- (-(sampleSize-1)/2):((sampleSize-1)/2) y <- 1 * x + 1*x^2 + rnorm(n=sampleSize,mean=0,sd=10) plot(x,y, main="Test Data") ## ----------------------------------------------------------------------------- likelihood1 <- function(param){ pred = param[1] + param[2]*x + param[3] * x^2 singlelikelihoods = dnorm(y, mean = pred, sd = 1/(param[4]^2), log = T) return(sum(singlelikelihoods)) } likelihood2 <- function(param){ pred = param[1] + param[2]*x singlelikelihoods = dnorm(y, mean = pred, sd = 1/(param[3]^2), log = T) return(sum(singlelikelihoods)) } ## ----------------------------------------------------------------------------- setUp1 <- createBayesianSetup(likelihood1, lower = c(-5,-5,-5,0.01), upper = c(5,5,5,30)) setUp2 <- createBayesianSetup(likelihood2, lower = c(-5,-5,0.01), upper = c(5,5,30)) ## ---- results = 'hide'-------------------------------------------------------- settings = list(iterations = 15000, message = FALSE) out1 <- runMCMC(bayesianSetup = setUp1, sampler = "Metropolis", settings = settings) #tracePlot(out1, start = 5000) M1 = marginalLikelihood(out1) M1 settings = list(iterations = 15000, message = FALSE) out2 <- runMCMC(bayesianSetup = setUp2, sampler = "Metropolis", settings = settings) #tracePlot(out2, start = 5000) M2 = marginalLikelihood(out2) M2 ## ----------------------------------------------------------------------------- exp(M1$ln.ML - M2$ln.ML) ## ----------------------------------------------------------------------------- exp(M1$ln.ML) / ( exp(M1$ln.ML) + exp(M2$ln.ML)) ## ----------------------------------------------------------------------------- DIC(out1)$DIC DIC(out2)$DIC ## ----------------------------------------------------------------------------- # This will not work, since likelihood1 has no sum argument # WAIC(out1) # likelihood with sum argument likelihood3 <- function(param, sum = TRUE){ pred <- param[1] + param[2]*x + param[3] * x^2 singlelikelihoods <- dnorm(y, mean = pred, sd = 1/(param[4]^2), log = T) return(if (sum == TRUE) sum(singlelikelihoods) else singlelikelihoods) } setUp3 <- createBayesianSetup(likelihood3, lower = c(-5,-5,-5,0.01), upper = c(5,5,5,30)) out3 <- runMCMC(bayesianSetup = setUp3, sampler = "Metropolis", settings = settings) WAIC(out3)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/doc/BayesianTools.R
--- title: "Bayesian Tools - General-Purpose MCMC and SMC Samplers and Tools for Bayesian Statistics" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{Manual for the BayesianTools R package} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} abstract: "The BayesianTools (BT) package supports model analysis (including sensitivity analysis and uncertainty analysis), Bayesian model calibration, as well as model selection and multi-model inference techniques for system models." --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=5, fig.height=5, warning=FALSE, cache = F) ``` ```{r, echo = F, message = F} set.seed(123) ``` # Quick start The purpose of this first section is to give you a quick overview of the most important functions of the BayesianTools (BT) package. For a more detailed description, see the later sections ## Installing, loading and citing the package If you haven't installed the package yet, either run ```{r, eval = F} install.packages("BayesianTools") ``` Or follow the instructions on [https://github.com/florianhartig/BayesianTools](https://github.com/florianhartig/BayesianTools) to install a development or an older version. Loading and citation ```{r} library(BayesianTools) citation("BayesianTools") ``` Note: BayesianTools calls a number of secondary packages. Particular important is coda, which is used on a number of plots and summary statistics. If you make heavy use of the summary statistics and diagnostics plots, it would be nice to cite coda as well! Pro-tip: if you are running a stochastic algorithms such as an MCMC, you should always set or record your random seed to make your results reproducible (otherwise, results will change slightly every time you run the code) ```{r} set.seed(123) ``` In a real application, to ensure reproducibility, it would also be useful to record the session info, ```{r, eval = F} sessionInfo() ``` which lists the version number of R and all loaded packages. ## The Bayesian Setup The central object in the BT package is the BayesianSetup. This class contains the information about the model to be fit (likelihood), and the priors for the model parameters. A BayesianSetup is created by the createBayesianSetup function. The function expects a log-likelihood and (optional) a log-prior. It then automatically creates the posterior and various convenience functions for the samplers. Advantages of the BayesianSetup include 1. support for automatic parallelization 2. functions are wrapped in try-catch statements to avoid crashes during long MCMC evaluations 3. and the posterior checks if the parameter is outside the prior first, in which case the likelihood is not evaluated (makes the algorithms faster for slow likelihoods). If no prior information is provided, an unbounded flat prior is created. If no explicit prior, but lower and upper values are provided, a standard uniform prior with the respective bounds is created, including the option to sample from this prior, which is useful for SMC and also for getting starting values. This option is used in the following example, which creates a multivariate normal likelihood density and a uniform prior for 3 parameters. ```{r} ll <- generateTestDensityMultiNormal(sigma = "no correlation") bayesianSetup = createBayesianSetup(likelihood = ll, lower = rep(-10, 3), upper = rep(10, 3)) ``` See later more detailed description about the BayesianSetup. **Hint:** for an example how to run this steps for dynamic ecological model, see ?VSEM ## Running MCMC and SMC functions Once you have your setup, you may want to run a calibration. The runMCMC function is the main wrapper for all other implemented MCMC/SMC functions. It always takes the following arguments * A bayesianSetup (alternatively, the log target function) * The sampler name * A list with settings - if a parameter is not provided, the default will be used As an example, choosing the sampler name "Metropolis" calls a versatile Metropolis-type MCMC with options for covariance adaptation, delayed rejection, tempering and Metropolis-within-Gibbs sampling. For details, see the the later reference on MCMC samplers. This is how we would call this sampler with default settings ```{r} iter = 10000 settings = list(iterations = iter, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) ``` #### Summarizing outputs All samplers can be plotted and summarized via the console with the standard print, and summary commands ```{r} print(out) summary(out) ``` and plottted with several plot functions. The marginalPlot can either be plotted as histograms with density overlay, which is also the default, or as a violin plot (see "?marginalPlot"). ```{r} plot(out) # plot internally calls tracePlot(out) correlationPlot(out) marginalPlot(out, prior = TRUE) ``` Other Functions that can be applied to all samplers include model selection scores such as the DIC and the marginal Likelihood (for the calculation of the Bayes factor, see later section for more details), and the Maximum Aposteriori Value (MAP). For the marginal likelihood calculation it is possible to chose from a set of methods (see "?marginalLikelihood"). ```{r} marginalLikelihood(out) DIC(out) MAP(out) ``` You can extract (a part of) the sampled parameter values by ```{r, eval = F} getSample(out, start = 100, end = NULL, thin = 5, whichParameters = 1:2) ``` For all samplers, you can conveniently perform multiple runs via the nrChains argument ```{r, echo = T} iter = 1000 settings = list(iterations = iter, nrChains = 3, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) ``` The result is an object of mcmcSamplerList, which should allow to do everything one can do with an mcmcSampler object (with slightly different output sometimes). ```{r} print(out) summary(out) ``` For example, in the plot you now see 3 chains. ```{r} plot(out) ``` There are a few additional functions that may only be available for lists, for example convergence checks ```{r} #getSample(out, coda = F) gelmanDiagnostics(out, plot = T) ``` #### Which sampler to choose? The BT package provides a large class of different MCMC samplers, and it depends on the particular application which is most suitable. In the absence of further information, we currently recommend the DEzs sampler. This is also the default in the runMCMC function. # BayesianSetup Reference ## Reference on creating likelihoods The likelihood should be provided as a log density function. ```{r, eval = F} ll = logDensity(x) ``` See options for parallelization below. We will use a simple 3-d multivariate normal density for this demonstration. ```{r} ll <- generateTestDensityMultiNormal(sigma = "no correlation") bayesianSetup = createBayesianSetup(likelihood = ll, lower = rep(-10, 3), upper = rep(10, 3)) ``` #### Parallelization of the likelihood evaluations Likelihoods are often costly to compute. If that is the case for you, you should think about parallelization possibilities. The 'createBayesianSetup' function has the input variable 'parallel', with the following options * F / FALSE means no parallelization should be used * T / TRUE means that automatic parallelization options from R are used (careful: this will not work if your likelihood writes to file, or uses global variables or functions - see general R help on parallelization) * "external", assumed that the likelihood is already parallelized. In this case, the function needs to accept a matrix with parameters as columns, and rows as the different model runs you want to evaluate. This is the most likely option to use if you have a complicated setup (file I/O, HPC cluster) that cannot be treated with the standard R parallelization. Algorithms in the BayesianTools package can make use of parallel computing if this option is specified in the BayesianSetup. Note that currently, parallelization is used by the following algorithms: SMC, DEzs and DREAMzs sampler. It can also be used through the BayesianSetup with the functions of the sensitivity package. Here some more details on the parallelization #### 1. In-build parallelization: The in-build parallelization is the easiest way to make use of parallel computing. In the "parallel" argument you can choose the number of cores used for parallelization. Alternatively for TRUE or "auto" all available cores except for one will be used. Now the proposals are evaluated in parallel. Technically, the in-build parallelization uses an R cluster to evaluate the posterior density function. The input for the parallel function is a matrix, where each column represents a parameter and each row a proposal. In this way, the proposals can be evaluated in parallel. For sampler, where only one proposal is evaluated at a time (namely the Metropolis based algorithms as well as DE/DREAM without the zs extension), no parallelization can be used. #### 2. External parallelization The second option is to use an external parallelization. Here, a parallelization is attempted in the user defined likelihood function. To make use of external parallelization, the likelihood function needs to take a matrix of proposals and return a vector of likelihood values. In the proposal matrix each row represents one proposal, each column a parameter. Further, you need to specify the "external" parallelization in the "parallel" argument. In simplified terms the use of external parallelization uses the following steps: ```{r, eval = FALSE} ## Definition of likelihood function likelihood <- function(matrix){ # Calculate likelihood in parallel # Return vector of likelihood valus } ## Create Bayesian Setup BS <- createBayesianSetup(likelihood, parallel = "external", ...) ## Run MCMC runMCMC(BS, sampler = "SMC", ...) ``` #### 3. Multi-core and cluster calculations If you want to run your calculations on a cluster there are several ways to achieve it. In the first case you want to parallize n internal (not overall chains) on n cores. The argument "parallel = T" in "createBayesianSetup" allows only at most parallelization on 3 cores for the SMC, DEzs and DreamsSamplers. But by setting "parallel = n" to n cores in the "createBayesianSetup", the internal chains of DEzs and DREAMzs will be parallelized on n cores. This works only for the DEzs and DREAMzs samplers. ```{r, eval = FALSE} ## n = Number of cores n=2 x <- c(1:10) likelihood <- function(param) return(sum(dnorm(x, mean = param, log = T))) bayesianSetup <- createBayesianSetup(likelihood, parallel = n, lower = -5, upper = 5) ## give runMCMC a matrix with n rows of proposals as startValues or sample n times from the previous created sampler out <- runMCMC(bayesianSetup, settings = list(iterations = 1000)) ``` In the second case you want to parallize n internal chains on n cores with a external parallilzed likelihood function. Unlike the previous case, that way DEzs, DREAMzs, and SMC samplers can be parallelized. ```{r, eval = FALSE} ### Create cluster with n cores cl <- parallel::makeCluster(n) ## Definition of the likelihood likelihood <- function(X) sum(dnorm(c(1:10), mean = X, log = T)) ## Definition of the likelihood which will be calculated in parallel. Instead of the parApply function, we could also define a costly parallelized likelihood pLikelihood <- function(param) parallel::parApply(cl = cl, X = param, MARGIN = 1, FUN = likelihood) ## export functions, dlls, libraries # parallel::clusterEvalQ(cl, library(BayesianTools)) parallel::clusterExport(cl, varlist = list(likelihood)) ## create BayesianSetup bayesianSetup <- createBayesianSetup(pLikelihood, lower = -10, upper = 10, parallel = 'external') ## For this case we want to parallelize the internal chains, therefore we create a n row matrix with startValues, if you parallelize a model in the likelihood, do not set a n*row Matrix for startValue settings = list(iterations = 100, nrChains = 1, startValue = bayesianSetup$prior$sampler(n)) ## runMCMC out <- runMCMC(bayesianSetup, settings, sampler = "DEzs") ``` In a another case your likelihood requires a parallized model. Start your cluster and export your model, the required libraries, and dlls. Now you can start your calculations with the argument "parallel = external" in createBayesianSetup. ```{r, eval = FALSE} ### Create cluster with n cores cl <- parallel::makeCluster(n) ## export your model # parallel::clusterExport(cl, varlist = list(complexModel)) ## Definition of the likelihood likelihood <- function(param) { # ll <- complexModel(param) # return(ll) } ## create BayesianSetup and settings bayesianSetup <- createBayesianSetup(likelihood, lower = -10, upper = 10, parallel = 'external') settings = list(iterations = 100, nrChains = 1) ## runMCMC out <- runMCMC(bayesianSetup, settings) ``` In the last case you can parallize over whole chain calculations. However, here the likelihood itself will not be parallelized. Each chain will be run on one core and the likelihood will be calculated on that core. ```{r, eval = FALSE} ### Definition of likelihood function x <- c(1:10) likelihood <- function(param) return(sum(dnorm(x, mean = param, log = T))) ## Create BayesianSetup and settings bayesianSetup <- createBayesianSetup(likelihood, lower = -10, upper = 10, parallel = F) settings = list(iterations = 100000) ## Start cluster with n cores for n chains and export BayesianTools library cl <- parallel::makeCluster(n) parallel::clusterEvalQ(cl, library(BayesianTools)) ## calculate parallel n chains, for each chain the likelihood will be calculated on one core out <- parallel::parLapply(cl, 1:n, fun = function(X, bayesianSetup, settings) runMCMC(bayesianSetup, settings, sampler = "DEzs"), bayesianSetup, settings) ## Combine the chains out <- createMcmcSamplerList(out) ``` ** Remark: even though parallelization can significantly reduce the computation time, it is not always useful because of the so-called communication overhead (computational time for distributing and retrieving infos from the parallel cores). For models with low computational cost, this procedure can take more time than the actual evaluation of the likelihood. If in doubt, make a small comparison of the runtime before starting your large sampling. ** ## Reference on creating priors The prior in the BayesianSetup consists of four parts * A log density function * An (optional) sampling function (must be a function without parameters, that returns a draw from the prior) * lower / upper boundaries * Additional info - best values, names of the parameters, ... These information can be passed by first creating an a extra object, via createPrior, or through the the createBayesianSetup function. #### Creating priors You have 5 options to create a prior * Do not set a prior - in this case, an infinite prior will be created * Set min/max values - a bounded flat prior and the corresponding sampling function will be created * Use one of the pre-definded priors, see ?createPrior for a list. One of the options here is to use a previous MCMC output as new prior. Pre-defined priors will usually come with a sampling function * Use a user-define prior, see ?createPrior * Create a prior from a previous MCMC sample #### Creating user-defined priors If creating a user-defined prior, the following information can/should be provided to createPrior: * A log density function, as a function of a parameter vector x, same syntax as the likelihood * Additionally, you should consider providing a function that samples from the prior, because many samplers (SMC, DE, DREAM) can make use of this function for initial conditions. If you use one of the pre-defined priors, the sampling function is already implemented * lower / upper boundaries (can be set on top of any prior, to create truncation) * Additional info - best values, names of the parameters, ... #### Creating a prior from a previous MCMC sample The following example from the help shows how this works ```{r} # Create a BayesianSetup ll <- generateTestDensityMultiNormal(sigma = "no correlation") bayesianSetup = createBayesianSetup(likelihood = ll, lower = rep(-10, 3), upper = rep(10, 3)) settings = list(iterations = 2500, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, settings = settings) newPrior = createPriorDensity(out, method = "multivariate", eps = 1e-10, lower = rep(-10, 3), upper = rep(10, 3), best = NULL) bayesianSetup <- createBayesianSetup(likelihood = ll, prior = newPrior) settings = list(iterations = 1000, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, settings = settings) ``` # MCMC sampler reference ## The runMCMC() function The runMCMC function is the central function for starting MCMC algorithms in the BayesianTools package. It requires a bayesianSetup, a choice of sampler (standard is DEzs), and optionally changes to the standard settings of the chosen sampler. runMCMC(bayesianSetup, sampler = "DEzs", settings = NULL) One optional argument that you can always use is nrChains - the default is 1. If you choose more, the runMCMC will perform several runs. ```{r, message = F} ll <- generateTestDensityMultiNormal(sigma = "no correlation") bayesianSetup = createBayesianSetup(likelihood = ll, lower = rep(-10, 3), upper = rep(10, 3)) settings = list(iterations = 10000, nrChains= 3, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) plot(out) marginalPlot(out, prior = T) correlationPlot(out) gelmanDiagnostics(out, plot=T) # option to restart the sampler settings = list(iterations = 1000, nrChains= 1, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) out2 <- runMCMC(bayesianSetup = out) out3 <- runMCMC(bayesianSetup = out2) #plot(out) #plot(out3) # create new prior from posterior sample newPriorFromPosterior <- createPriorDensity(out2) ``` ## The different MCMC samplers For convenience we define a number of iterations ```{r} iter = 10000 ``` ### The Metropolis MCMC class The BayesianTools package is able to run a large number of Metropolis-Hastings (MH) based algorithms All of these samplers can be accessed by the "Metropolis" sampler in the runMCMC function by specifying the sampler's settings. The following code gives an overview about the default settings of the MH sampler. ```{r} applySettingsDefault(sampler = "Metropolis") ``` The following examples show how the different settings can be used. As you will see different options can be activated singly or in combination. #### Standard MH MCMC The following settings will run the standard Metropolis Hastings MCMC. Refernences: Hastings, W. K. (1970). Monte carlo sampling methods using markov chains and their applications. Biometrika 57 (1), 97-109. Metropolis, N., A. W. Rosenbluth, M. N. Rosenbluth, A. H. Teller, and E. Teller (1953). Equation of state calculations by fast computing machines. The journal of chemical physics 21 (6), 1087 - 1092. ```{r, results = 'hide', eval = F} settings <- list(iterations = iter, adapt = F, DRlevels = 1, gibbsProbabilities = NULL, temperingFunction = NULL, optimize = F, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) plot(out) ``` #### Standard MH MCMC, prior optimization This sampler uses an optimization step prior to the sampling process. The optimization aims at improving the starting values and the covariance of the proposal distribution. ```{r, results = 'hide', eval = F} settings <- list(iterations = iter, adapt = F, DRlevels = 1, gibbsProbabilities = NULL, temperingFunction = NULL, optimize = T, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) plot(out) ``` #### Adaptive MCMC, prior optimization In the adaptive Metropolis sampler (AM) the information already acquired in the sampling process is used to improve (or adapt) the proposal function. In the BayesianTools package the history of the chain is used to adapt the covariance of the propoasal distribution. References: Haario, H., E. Saksman, and J. Tamminen (2001). An adaptive metropolis algorithm. Bernoulli , 223-242. ```{r, results = 'hide', eval = F} settings <- list(iterations = iter, adapt = T, DRlevels = 1, gibbsProbabilities = NULL, temperingFunction = NULL, optimize = T, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) plot(out) ``` #### Standard MCMC, prior optimization, delayed rejection Even though rejection is an essential step of a MCMC algorithm it can also mean that the proposal distribution is (locally) badly tuned to the target distribution. In a delayed rejection (DR) sampler a second (or third, etc.) proposal is made before rejection. This proposal is usually drawn from a different distribution, allowing for a greater flexibility of the sampler. In the BayesianTools package the number of delayed rejection steps as well as the scaling of the proposals can be determined. ** Note that the current version only supports two delayed rejection steps. ** References: Green, Peter J., and Antonietta Mira. "Delayed rejection in reversible jump Metropolis-Hastings." Biometrika (2001): 1035-1053. ```{r, results = 'hide', eval = F} settings <- list(iterations = iter, adapt = F, DRlevels = 2, gibbsProbabilities = NULL, temperingFunction = NULL, optimize = T, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) plot(out) ``` #### Adaptive MCMC, prior optimization, delayed rejection The delayed rejection adaptive Metropolis (DRAM) sampler is merely a combination of the two previous sampler (DR and AM). References: Haario, Heikki, et al. "DRAM: efficient adaptive MCMC." Statistics and Computing 16.4 (2006): 339-354. ```{r, results = 'hide', eval = F} settings <- list(iterations = iter, adapt = T, DRlevels = 2, gibbsProbabilities = NULL, temperingFunction = NULL, optimize = T, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) plot(out) ``` #### Standard MCMC, prior optimization, Gibbs updating To reduce the dimensions of the target function a Metropolis-within-Gibbs sampler can be run with the BayesianTools package. This means in each iteration only a subset of the parameter vector is updated. In the example below at most two (of the three) parameters are updated each step, and it is double as likely to vary one than varying two. ** Note that currently adaptive cannot be mixed with Gibbs updating! ** ```{r, results = 'hide', eval = F} settings <- list(iterations = iter, adapt = T, DRlevels = 1, gibbsProbabilities = c(1,0.5,0), temperingFunction = NULL, optimize = T, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) plot(out) ``` #### Standard MCMC, prior optimization, gibbs updating, tempering Simulated tempering is closely related to simulated annealing (e.g. Bélisle, 1992) in optimization algorithms. The idea of tempering is to increase the acceptance rate during burn-in. This should result in a faster initial scanning of the target function. To include this a tempering function needs to be supplied by the user. The function describes how the acceptance rate is influenced during burn-in. In the example below an exponential decline approaching 1 (= no influece on the acceptance rate)is used. References: Bélisle, C. J. (1992). Convergence theorems for a class of simulated annealing algorithms on rd. Journal of Applied Probability, 885–895. C. J. Geyer (2011) Importance sampling, simulated tempering, and umbrella sampling, in the Handbook of Markov Chain Monte Carlo, S. P. Brooks, et al (eds), Chapman & Hall/CRC. ```{r, results = 'hide', eval = F} temperingFunction <- function(x) 5 * exp(-0.01*x) + 1 settings <- list(iterations = iter, adapt = F, DRlevels = 1, gibbsProbabilities = c(1,1,0), temperingFunction = temperingFunction, optimize = T, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) plot(out) ``` ### Differential Evolution MCMC The BT package implements two versions of the differential evolution MCMC. In doubt, you should use the DEzs option. The first is the normal DE MCMC, corresponding to Ter Braak, Cajo JF. "A Markov Chain Monte Carlo version of the genetic algorithm Differential Evolution: easy Bayesian computing for real parameter spaces." Statistics and Computing 16.3 (2006): 239-249. In this sampler multiple chains are run in parallel (but not in the sense of parallel computing). The main difference to the Metropolis based algorithms is the creation of the proposal. Generally all samplers use the current positin of the chain and add a step in the parameter space to generate a new proposal. Whereas in the Metropolis based sampler this step is usually drawn from a multivariate normal distribution (yet every distribution is possible), the DE sampler uses the current position of two other chains to generate the step for each chain. For sucessful sampling at least 2*d chains, with d being the number of parameters, need to be run in parallel. ```{r, results = 'hide', eval = F} settings <- list(iterations = iter, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DE", settings = settings) plot(out) ``` The second is the Differential Evolution MCMC with snooker update and sampling from past states, corresponding to ter Braak, Cajo JF, and Jasper A. Vrugt. "Differential evolution Markov chain with snooker updater and fewer chains." Statistics and Computing 18.4 (2008): 435-446. This extension covers two differences to the normal DE MCMC. First a snooker update is used based on a user defined probability. Second also past states of other chains are respected in the creation of the proposal. These extensions allow for fewer chains (i.e. 3 chains are usually enough for up to 200 parameters) and parallel computing as the current position of each chain is only dependent on the past states of the other chains. ```{r, results = 'hide', eval = F} settings <- list(iterations = iter, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DEzs", settings = settings) plot(out) ``` ### DREAM sampler Also for the DREAM sampler, there are two versions included. First of all, the standard DREAM sampler, see Vrugt, Jasper A., et al. "Accelerating Markov chain Monte Carlo simulation by differential evolution with self-adaptive randomized subspace sampling." International Journal of Nonlinear Sciences and Numerical Simulation 10.3 (2009): 273-290. This sampler is largely build on the DE sampler with some significant differences: 1) More than two chains can be used to generate a proposal. 2) A randomized subspace sampling can be used to enhance the efficiency for high dimensional posteriors. Each dimension is updated with a crossover probalitity CR. To speed up the exploration of the posterior DREAM adapts the distribution of CR values during burn-in to favor large jumps over small ones. 3) Outlier chains can be removed during burn-in. ```{r, results = 'hide', eval = F} settings <- list(iterations = iter, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DREAM", settings = settings) plot(out) ``` The second implementation uses the same extension as the DEzs sampler. Namely sampling from past states and a snooker update. Also here this extension allows for the use of fewer chains and parallel computing. Again, in doubt you should prefer "DREAMzs". ```{r, results = 'hide', eval = FALSE} settings <- list(iterations = iter, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DREAMzs", settings = settings) plot(out) ``` ### T-walk The T-walk is a MCMC algorithm developed by Christen, J. Andrés, and Colin Fox. "A general purpose sampling algorithm for continuous distributions (the t-walk)." Bayesian Analysis 5.2 (2010): 263-281. In the sampler two independent points are used to explore the posterior space. Based on probabilities four different moves are used to generate proposals for the two points. As for the DE sampler this procedure requires no tuning of the proposal distribution for efficient sampling in complex posterior distributions. ```{r, eval = F} settings = list(iterations = iter, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Twalk", settings = settings) ``` ### Convergence checks for MCMCs All MCMCs should be checked for convergence. We recommend the standard procedure of Gelmal-Rubin. This procedure requires running several MCMCs (we recommend 3). This can be achieved either directly in the runMCMC (nrChains = 3), or, for runtime reasons, by combining the results of three independent runMCMC evaluations with nrChains = 1. ```{r, eval = T} settings <- list(iterations = iter, nrChains = 3, message = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) plot(out) #chain = getSample(out, coda = T) gelmanDiagnostics(out, plot = F) ``` ## Non-MCMC sampling algorithms MCMCs sample the posterior space by creating a chain in parameter space. While this allows "learning" from past steps, it does not permit the parallel execution of a large number of posterior values at the same time. An alternative to MCMCs are particle filters, aka Sequential Monte-Carlo (SMC) algorithms. See Hartig, F.; Calabrese, J. M.; Reineking, B.; Wiegand, T. & Huth, A. Statistical inference for stochastic simulation models - theory and application Ecol. Lett., 2011, 14, 816-827 ### Rejection samling The easiest option is to simply sample a large number of parameters and accept them according to their posterior value. This option can be emulated with the implemented SMC, setting iterations to 1. ```{r, results = 'hide', eval = F} settings <- list(initialParticles = iter, iterations= 1) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "SMC", settings = settings) plot(out) ``` ### Sequential Monte Carlo (SMC) The more sophisticated option is using the implemented SMC, which is basically a particle filter that applies several filter steps. ```{r, results = 'hide', eval = F} settings <- list(initialParticles = iter, iterations= 10) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "SMC", settings = settings) plot(out) ``` Note that the use of a number for initialParticles requires that the bayesianSetup includes the possibility to sample from the prior. # Bayesian model comparison and averaging There are a number of Bayesian model selection and model comparison methods. The BT implements three of the most common of them, the DIC, the WAIC, and the Bayes factor. * On the Bayes factor, see Kass, R. E. & Raftery, A. E. Bayes Factors J. Am. Stat. Assoc., Amer Statist Assn, 1995, 90, 773-795 * An overview on DIC and WAIC is given in Gelman, A.; Hwang, J. & Vehtari, A. (2014) Understanding predictive information criteria for Bayesian models. Statistics and Computing, 24, 997-1016-. On DIC, see also the original reference by Spiegelhalter, D. J.; Best, N. G.; Carlin, B. P. & van der Linde, A. (2002) Bayesian measures of model complexity and fit. J. Roy. Stat. Soc. B, 64, 583-639. The Bayes factor relies on the calculation of marginal likelihoods, which is numerically not without problems. The BT package currently implements three methods * The recommended way is the method "Chib" (Chib and Jeliazkov, 2001). which is based on MCMC samples, but performs additional calculations. Despite being the current recommendation, note there are some numeric issues with this algorithm that may limit reliability for larger dimensions. * The harmonic mean approximation, is implemented only for comparison. Note that the method is numerically unrealiable and usually should not be used. * The third method is simply sampling from the prior. While in principle unbiased, it will only converge for a large number of samples, and is therefore numerically inefficient. ## Example Data linear Regression with quadratic and linear effect ```{r} sampleSize = 30 x <- (-(sampleSize-1)/2):((sampleSize-1)/2) y <- 1 * x + 1*x^2 + rnorm(n=sampleSize,mean=0,sd=10) plot(x,y, main="Test Data") ``` Likelihoods for both ```{r} likelihood1 <- function(param){ pred = param[1] + param[2]*x + param[3] * x^2 singlelikelihoods = dnorm(y, mean = pred, sd = 1/(param[4]^2), log = T) return(sum(singlelikelihoods)) } likelihood2 <- function(param){ pred = param[1] + param[2]*x singlelikelihoods = dnorm(y, mean = pred, sd = 1/(param[3]^2), log = T) return(sum(singlelikelihoods)) } ``` Posterior definitions ```{r} setUp1 <- createBayesianSetup(likelihood1, lower = c(-5,-5,-5,0.01), upper = c(5,5,5,30)) setUp2 <- createBayesianSetup(likelihood2, lower = c(-5,-5,0.01), upper = c(5,5,30)) ``` MCMC and marginal likelihood calculation ```{r, results = 'hide'} settings = list(iterations = 15000, message = FALSE) out1 <- runMCMC(bayesianSetup = setUp1, sampler = "Metropolis", settings = settings) #tracePlot(out1, start = 5000) M1 = marginalLikelihood(out1) M1 settings = list(iterations = 15000, message = FALSE) out2 <- runMCMC(bayesianSetup = setUp2, sampler = "Metropolis", settings = settings) #tracePlot(out2, start = 5000) M2 = marginalLikelihood(out2) M2 ``` ### Model comparison via Bayes factor Bayes factor (need to reverse the log) ```{r} exp(M1$ln.ML - M2$ln.ML) ``` BF > 1 means the evidence is in favor of M1. See Kass, R. E. & Raftery, A. E. (1995) Bayes Factors. J. Am. Stat. Assoc., Amer Statist Assn, 90, 773-795. Assuming equal prior weights on all models, we can calculate the posterior weight of M1 as ```{r} exp(M1$ln.ML) / ( exp(M1$ln.ML) + exp(M2$ln.ML)) ``` If models have different model priors, multiply with the prior probabilities of each model. ### Model comparison via DIC The Deviance information criterion is a commonly applied method to summarize the fit of an MCMC chain. It can be obtained via ```{r} DIC(out1)$DIC DIC(out2)$DIC ``` ### Model comparison via WAIC The Watanabe–Akaike information criterion is another criterion for model comparison. To be able to calculate the WAIC, the model must implement a log-likelihood that density that allows to calculate the log-likelihood point-wise (the likelihood functions requires a "sum" argument that determines whether the summed log-likelihood should be returned). It can be obtained via ```{r} # This will not work, since likelihood1 has no sum argument # WAIC(out1) # likelihood with sum argument likelihood3 <- function(param, sum = TRUE){ pred <- param[1] + param[2]*x + param[3] * x^2 singlelikelihoods <- dnorm(y, mean = pred, sd = 1/(param[4]^2), log = T) return(if (sum == TRUE) sum(singlelikelihoods) else singlelikelihoods) } setUp3 <- createBayesianSetup(likelihood3, lower = c(-5,-5,-5,0.01), upper = c(5,5,5,30)) out3 <- runMCMC(bayesianSetup = setUp3, sampler = "Metropolis", settings = settings) WAIC(out3) ```
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/doc/BayesianTools.Rmd
## ----global_options, include=FALSE-------------------------------------------- knitr::opts_chunk$set(fig.width=5, fig.height=5, warning=FALSE, cache = F) ## ---- echo = F, message = F--------------------------------------------------- set.seed(123) ## ---- eval = F---------------------------------------------------------------- # runMyModel(par) ## ---- eval = F---------------------------------------------------------------- # dyn.load(model) # # runMyModel(par){ # out = # model call here # # process out # return(out) # } ## ---- eval = F---------------------------------------------------------------- # runMyModel(par){ # # # Create here a string with what you would write to call the model from the command line # systemCall <- paste("model.exe", par[1], par[2]) # # out = system(systemCall, intern = TRUE) # intern indicates whether to capture the output of the command as an R character vector # # # write here to convert out in the apprpriate R classes # # } ## ---- eval = F---------------------------------------------------------------- # runMyModel(par, returnData = NULL){ # # writeParameters(par) # # system("Model.exe") # # if(! is.null(returnData)) return(readData(returnData)) # The readData function will be defined later # # } # # writeParameters(par){ # # # e.g. # # read template parameter fil # # replace strings in template file # # write parameter file # } ## ---- eval = F---------------------------------------------------------------- # setUpModel <- function(parameterTemplate, site, localConditions){ # # # create the runModel, readData functions (see later) here # # return(list(runModel, readData)) # # } ## ---- eval = F---------------------------------------------------------------- # getData(type = X){ # # read.csv(xxx) # # # do some transformation # # # return data in desidered format # } ## ---- eval = F---------------------------------------------------------------- # par = c(1,2,3,4 ..) # # runMyModel(par) # # output <- getData(type = DesiredType) # # plot(output) ## ----------------------------------------------------------------------------- mymodel<-function(x){ output<-0.2*x+0.1^x return(output) } ## ---- eval = F---------------------------------------------------------------- # # library(parallel) # cl <- makeCluster(2) # # runParallel<- function(parList){ # parSapply(cl, parList, mymodel) # } # # runParallel(c(1,2)) ## ---- eval = F---------------------------------------------------------------- # library(BayesianTools) # parModel <- generateParallelExecuter(mymodel) ## ----------------------------------------------------------------------------- library(BayesianTools) ll <- generateTestDensityMultiNormal(sigma = "no correlation") bayesianSetup <- createBayesianSetup(likelihood = ll, lower = rep(-10, 3), upper = rep(10, 3)) settings = list(iterations = 200) # run the several MCMCs chains either in seperate R sessions, or via R parallel packages out1 <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DEzs", settings = settings) out2 <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DEzs", settings = settings) res <- createMcmcSamplerList(list(out1, out2)) plot(res)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/doc/InterfacingAModel.R
--- title: "Interfacing your model with R" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{Interfacing your model with R} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} abstract: "This tutorial discusses how to interface models written in other programming languages with R, so that they can be fit with BayesianTools" author: Florian Hartig editor_options: chunk_output_type: console --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=5, fig.height=5, warning=FALSE, cache = F) ``` ```{r, echo = F, message = F} set.seed(123) ``` # Interfacing a model with BT - step-by-step guide ## Step 1: Create a runModel(par) function A basic requirement to allow calibration in BT is that we need to be able to execute the model with a given set of parameters. Strictly speaking, BT will not see the model as such, but requires a likelihood function with interface likelihood(par), where par is a vector, but in this function, you will then probably run the model with parameters par, where par could stay a vector, or be transformed to another format, e.g. data.frame, matrix or list. What happens now depends on how your model is programmed - I have listed the steps in order of convenience / speed. If your model has never been interfaced with R you will likely have to move down to the last option. \begin{enumerate} \item Model in R, or R interface existing \item Model can be compiled and linked as a dll \item Model is in C/C++ and can be interfaced with RCPP \item Model can be compiled as executable and accepts parameters via the command line \item Model can be compiled as executable reads parameters via parameter file \item Model parameters are hard-coded in the executable \end{enumerate} ### Case 1 - model programmed in R Usually, you will have to do nothing. Just make sure you can call your model a in ```{r, eval = F} runMyModel(par) ``` Typically this function will directly return the model outputs, so step 2 can be skipped. ### Case 2 - compiled dll, parameters are set via dll interface If you have your model prepared as a dll, or you can prepare it that way, you can use the \ref{https://stat.ethz.ch/R-manual/R-devel/library/base/html/dynload.html}{dyn.load()} function to link R to your model ```{r, eval = F} dyn.load(model) runMyModel(par){ out = # model call here # process out return(out) } ``` Again, if you implement this, you will also typically return the output directly via the dll and not write to file, which means that step 2 can be skipped. The tricky thing in this approach is that you have to code the interface to your dll, which technically means in most programming languages to set your variables as external or something similar, so that they can be accessed from the outside. How this works will depend on the programming language. ### Case 3 - model programmed in C / C++, interfaced with RCPP RCPP is a highly flexible environment to interface between R and C/C++. If your model is coded in C / C++, RCPP offers the most save and powerful way to connect with R (much more flexible than with command line or dll). However, doing the interface may need some adjustments to the code, and there can be technical problems that are difficult to solve for beginners. I do not recommend to attempt interfacing an existing C/C++ model unless you have RCPP experience, or at least very food C/C++ experience. Again, if you implement this, you will also typically return the output directly via the dll and not write to file, which means that step 2 can be skipped. ### Case 4 - compiled executable, parameters set via command line (std I/O) If your model is written in a compiled or interpreted language, and accepts parameters via std I/O, wrapping is usually nothing more than writing the system call in an R function. An example would be ```{r, eval = F} runMyModel(par){ # Create here a string with what you would write to call the model from the command line systemCall <- paste("model.exe", par[1], par[2]) out = system(systemCall, intern = TRUE) # intern indicates whether to capture the output of the command as an R character vector # write here to convert out in the apprpriate R classes } ``` Note: If you have problems with the system command, try system2. If the model returns the output via std.out, you can catch this and convert it and skip step 2. If your model writes to file, go to step 2. ### Case 5 - compiled model, parameters set via parameter file or in any other method Many models read parameters with a parameter file. In this case you want to do something like this ```{r, eval = F} runMyModel(par, returnData = NULL){ writeParameters(par) system("Model.exe") if(! is.null(returnData)) return(readData(returnData)) # The readData function will be defined later } writeParameters(par){ # e.g. # read template parameter fil # replace strings in template file # write parameter file } ``` Depending on your problem, it can also make sense to define a setup function such as ```{r, eval = F} setUpModel <- function(parameterTemplate, site, localConditions){ # create the runModel, readData functions (see later) here return(list(runModel, readData)) } ``` How you do the write parameter function depends on the file format you use for the parameters. In general, you probably want to create a template parameter file that you use as a base and from which you change parameters * If your parameter file is in an *.xml format*, check out the xml functions in R * If your parameter file is in a *general text format*, the best option may be to create a template parameter file, place a unique string at the locations of the parameters that you want to replace, and then use string replace functions in R, e.g. [grep](https://stat.ethz.ch/R-manual/R-devel/library/base/html/grep.html) to replace this string. ### Case 6 - compiled model, parameters cannot be changed You have to change your model code to achieve one of the former options. If the model is in C/C++, going directly to RCPP seems the best alternative. ## Step 2: Reading back data If your model returns the output directly (which is highly preferable, ), you can skip this step. For simple models, you might consider returning the model output directly with the runMyModel function. This is probably so for cases a) and b) above, i.e. model is already in R, or accepts parameters via command line. More complicated models, however, produce a large number of outputs and you typically don't need all of them. It is therefore more useful to make on or several separate readData or getDate function. The only two different cases I will consider here is * via dll / RCPP * via file ouputs *Model is a dll* If the model is a dll file, the best thing would probably be to implement appropriate getData functions in the source code that can then be called from R. If your model is in C and in a dll, interfacing this via RCPP would probably be easier, because you can directly return R dataframes and other data structures. *Model writes file output* If the model writes file output, write a getData function that reads in the model outputs and returns the data in the desired format, typically the same that you would use to represent your field data. ```{r, eval = F} getData(type = X){ read.csv(xxx) # do some transformation # return data in desidered format } ``` \subsection{Testing the approach} From R, you should now be able to do something like that ```{r, eval = F} par = c(1,2,3,4 ..) runMyModel(par) output <- getData(type = DesiredType) plot(output) ``` ## Step 3 (optional) - creating an R package from your code The last step is optional, but we recommend that you take it from the start, because there is really no downside to it. You work with R code in several files that you run by hand, or diretly put all code into an R package. Creating and managing R packages is very easy, and it's easier to pass on your code because everything, including help, is in on package. To create an R package, follow the tutorial \href{http://biometry.github.io/APES/R/R70-PackageDevelopment.html}{here}. Remember to create a good documentation using Roxygen. # Speed optimization and parallelization For running sensitivity analyses or calibrations, runtime is often an issue. Before you parallelize, make sure your model is as fast as possible. ## Easy things * Are you compiling with maximum optimization (e.g. -o3 in cpp) * If you have a spin-up phase, could you increase the time-step during this phase? * Could you increase the time step generally * Do you write unnecessary outputs that you could turn off (harddisk I/O is often slow)? ## Difficult things * Make the model directly callable (RCPPor dll) to avoid harddisk I/O * Is it possible to reduce initialization time (not only spin-up, but also for reading in the forcings / drivers) by avoid ending the model executable after each run, but rather keep it "waiting" for a new run. * Code optimization: did you use a profiler? Read up on code optimization * Check for unnecessary calculations in your code / introduce compiler flags where appropriate ## Parallelization A possibility to speed up the run time of your model is to run it on multiple cores (CPU's). To do so, you have two choices: 1. Parallelize the model itself 2. Parallelize the model call, so that BT can do several model evaluations in parallel Which of the two makes more sense depends a lot on your problem. To parallelize the model itself will be interesting in particular for very large models, which could otherwise not be calibrated with MCMCs. However, this approach will typically require to write parallel C/C++ code and require advanced programming skills, which is the reason why we will not further discuss it here. The usual advice in parallel computing is anyway to parallelize the outer loops first, to minimize communication overhead, which would suggest to start with parallelizing model evaluations. This is also much easier to program. Even within this, there are two levels of parallelization possible: 1. Parallelize the call of several MCMC / SMC samplers 2. Parallelize within the MCMC / SMC samplers Currently, BT only supports parallelization within MCMCs / SMCs, but it easy to also implement between sampler parallelization by hand. Both approaches are describe below. ### Within sampler parallelization Within-sampler parallelization is particular useful for algorithms that can use a large number of cores in parallel, e.g. sensitivity analyses or SMC sampling. For the MCMCs, it depends on the settings and the algorithm how much parallelization they can make use of. In general, MCMCs are Markovian, as the name says, i.e. the set up a chain of sequential model evaluations, and those calls can therefore not be fully parallelized. However, a number of MCMCs in the BT package uses MCMC algorithms that can be partly parallelized, in particular the population MCMC algorithms DE/DEzs/DREAM/DREAMzs. For all these cases, BT will automatically use parallelization of the BT setup indicates that this is implemented. How to do this? A first requirement to do so is to to have your model wrapped into an R function (see PREVIOUS SECTION). If that is the case, R offers a number of options to run functions in parallel. The easiest is to use the parallel package that comes with the R core. For other packages, see the internet and the CRAN task view on [High Performance Computing](https://CRAN.R-project.org/view=HighPerformanceComputing) As an example, assume we have the following, very simple model: ```{r} mymodel<-function(x){ output<-0.2*x+0.1^x return(output) } ``` To start a parallel computation, we will first need to create a cluster object. Here we will initiate a cluster with 2 CPU's. ```{r, eval = F} library(parallel) cl <- makeCluster(2) runParallel<- function(parList){ parSapply(cl, parList, mymodel) } runParallel(c(1,2)) ``` You could use this principle to build your own parallelized likelihood. However, something very similar to the previous loop is automatized in BayesianTools. You can directly create a parallel model evaluation function with the function generateParallelExecuter, or alternatively directly in the createBayesianSetup ```{r, eval = F} library(BayesianTools) parModel <- generateParallelExecuter(mymodel) ``` If your model is tread-safe, you should be able to run this out of the box. I therefore recommend using the hand-coded paraellelization only of the model is not thread-safe. ### Running several MCMCs in parallel Additionally to the within-chain parallelization, you can also run several MCMCs in parallel, and combine them later to a single McmcSamplerList ```{r} library(BayesianTools) ll <- generateTestDensityMultiNormal(sigma = "no correlation") bayesianSetup <- createBayesianSetup(likelihood = ll, lower = rep(-10, 3), upper = rep(10, 3)) settings = list(iterations = 200) # run the several MCMCs chains either in seperate R sessions, or via R parallel packages out1 <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DEzs", settings = settings) out2 <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DEzs", settings = settings) res <- createMcmcSamplerList(list(out1, out2)) plot(res) ``` ### Thread safety Thread safety quite generally means that you can execute multiple instances of your code on your hardware. There are various things that can limit Thread safety, for example * writing outputs to file (several threads might write to the same file at the same time)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/doc/InterfacingAModel.Rmd
library(BayesianTools) ll <- generateTestDensityMultiNormal(sigma = "no correlation") bayesianSetup <- createBayesianSetup(likelihood = ll, lower = rep(-10, 3), upper = rep(10, 3)) settings = list(iterations = 200) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DEzs", settings = settings) summary(out) # DE family samplers are population MCMCs that run a number of internal chains # in parallel. Here examples how to change the internal chains # note that internal chains can be executedi n parallel settings = list(startValue = 4, iterations = 200) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DEzs", settings = settings) summary(out) # Modify the start values of the internal chains (note that this is a matrix # of dim nChain * nPar) settings = list(startValue = matrix(rnorm(12), nrow = 4, ncol = 3), iterations = 200) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DEzs", settings = settings) summary(out) # In the DE sampler family with Z matrix, the previous chains are written in # a common matrix, from which proposals are generated. Per default this matrix # is started with samples from the prior, but we can change this. Often useful # to improve sampler convergence, # see https://github.com/florianhartig/BayesianTools/issues/79 settings = list(startValue = matrix(rnorm(12), nrow = 4, ncol = 3), Z = matrix(rnorm(300), nrow = 100, ncol = 3), iterations = 200) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DEzs", settings = settings) summary(out)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/DEfamilyHelp.R
x = runif(500,-1,1) y = 0.2 + 0.9 *x + rnorm(500, sd = 0.5) summary(lm(y ~ x)) GOF(x,y) GOF(x,y, plot = TRUE)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/GOF.R
# Running the metropolis via the runMCMC with a proposal covariance generated from the prior # (can be useful for complicated priors) ll = function(x) sum(dnorm(x, log = TRUE)) setup = createBayesianSetup(ll, lower = c(-10,-10), upper = c(10,10)) samples = setup$prior$sampler(1000) generator = createProposalGenerator(diag(1, setup$numPars)) generator = updateProposalGenerator(generator, samples, manualScaleAdjustment = 1, message = TRUE) settings = list(proposalGenerator = generator, optimize = FALSE, iterations = 500) out = runMCMC(bayesianSetup = setup, sampler = "Metropolis", settings = settings)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/MetropolisHelp.R
## Example for the use of SMC # First we need a bayesianSetup - SMC makes most sense if we can for demonstration, # we'll write a function that puts out the number of model calls MultiNomialNoCor <- generateTestDensityMultiNormal(sigma = "no correlation") parallelLL <- function(parMatrix){ print(paste("Calling likelihood with", nrow(parMatrix), "parameter combinations")) out = apply(parMatrix, 1, MultiNomialNoCor) return(out) } bayesianSetup <- createBayesianSetup(likelihood = parallelLL, lower = rep(-10, 3), upper = rep(10, 3), parallel = "external") # Defining settings for the sampler # First we use the sampler for rejection sampling settings <- list(initialParticles = 1000, iterations = 1, resampling = FALSE) # Running the sampler out1 <- runMCMC(bayesianSetup = bayesianSetup, sampler = "SMC", settings = settings) #plot(out1) # Now for sequential Monte Carlo settings <- list(initialParticles = 100, iterations = 5, resamplingSteps = 1) out2 <- runMCMC(bayesianSetup = bayesianSetup, sampler = "SMC", settings = settings) #plot(out2) \dontrun{ ## Example for starting a new SMC run with results from a previous SMC run # Generate example data (time series) # x1 and x2 are predictory, yObs is the response t <- seq(1, 365) x1 <- (sin( 1 / 160 * 2 * pi * t) + pi) * 5 x2 <- cos( 1 / 182.5 * 1.25 * pi * t) * 12 # the model mod <- function(par, t1 = 1, tn = 365) { par[1] * x1[t1:tn] + par[2] * x2[t1:tn] } # the true parameters par1 <- 1.65 par2 <- 0.75 yObs <- mod(c(par1, par2)) + rnorm(length(x1), 0, 2) # split the time series in half plot(yObs ~ t) abline(v = 182, col = "red", lty = 2) # First half of the data ll_1 <- function(x, sum = TRUE) { out <- dnorm(mod(x, 1, 182) - yObs[1:182], 0, 2, log = TRUE) if (sum == TRUE) sum(out) else out } # Fit the first half of the time series # (e.g. fit the model to the data soon as you collect the data) setup_1 <- createBayesianSetup(ll_1, lower = c(-10, -10), upper = c(10, 10)) settings_1 <- list(initialParticles = 1000) out_1 <- runMCMC(setup_1, "SMC", settings_1) summary(out_1) # Second half of the data ll_2 <- function(x, sum = TRUE) { out <- dnorm(mod(x, 183, 365) - yObs[183:365], 0, 2, log = TRUE) if (sum == TRUE) sum(out) else out } # Fit the second half of the time series # (e.g. fit the model to the data soon as you collect the data) setup_2 <- createBayesianSetup(ll_2, lower = c(-10, -10), upper = c(10, 10)) # This is the important step, we use the final particles from the # previous SMC run to initialize the new SMC run settings_2 <- list(initialParticles = out_1$particles) out_2 <- runMCMC(setup_2, "SMC", settings_2) summary(out_2) par_pred <- apply(out_2$particles, 2, median) pred <- mod(par_pred) plotTimeSeries(yObs, pred) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/SMCHelp.R
## This example shows how to run and calibrate the VSEM model library(BayesianTools) # Create input data for the model PAR <- VSEMcreatePAR(1:1000) plot(PAR, main = "PAR (driving the model)", xlab = "Day") # load reference parameter definition (upper, lower prior) refPars <- VSEMgetDefaults() # this adds one additional parameter for the likelihood standard deviation (see below) refPars[12,] <- c(2, 0.1, 4) rownames(refPars)[12] <- "error-sd" head(refPars) # create some simulated test data # generally recommended to start with simulated data before moving to real data referenceData <- VSEM(refPars$best[1:11], PAR) # model predictions with reference parameters referenceData[,1] = 1000 * referenceData[,1] # this adds the error - needs to conform to the error definition in the likelihood obs <- referenceData + rnorm(length(referenceData), sd = refPars$best[12]) oldpar <- par(mfrow = c(2,2)) for (i in 1:4) plotTimeSeries(observed = obs[,i], predicted = referenceData[,i], main = colnames(referenceData)[i]) # Best to program in a way that we can choose easily which parameters to calibrate parSel = c(1:6, 12) # here is the likelihood likelihood <- function(par, sum = TRUE){ # set parameters that are not calibrated on default values x = refPars$best x[parSel] = par predicted <- VSEM(x[1:11], PAR) # replace here VSEM with your model predicted[,1] = 1000 * predicted[,1] # this is just rescaling diff <- c(predicted[,1:4] - obs[,1:4]) # difference betweeno observed and predicted # univariate normal likelihood. Note that there is a parameter involved here that is fit llValues <- dnorm(diff, sd = x[12], log = TRUE) if (sum == FALSE) return(llValues) else return(sum(llValues)) } # optional, you can also directly provide lower, upper in the createBayesianSetup, see help prior <- createUniformPrior(lower = refPars$lower[parSel], upper = refPars$upper[parSel], best = refPars$best[parSel]) bayesianSetup <- createBayesianSetup(likelihood, prior, names = rownames(refPars)[parSel]) # settings for the sampler, iterations should be increased for real applicatoin settings <- list(iterations = 2000, nrChains = 2) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DEzs", settings = settings) \dontrun{ plot(out) summary(out) marginalPlot(out) gelmanDiagnostics(out) # should be below 1.05 for all parameters to demonstrate convergence # Posterior predictive simulations # Create a prediction function createPredictions <- function(par){ # set the parameters that are not calibrated on default values x = refPars$best x[parSel] = par predicted <- VSEM(x[1:11], PAR) # replace here VSEM with your model return(predicted[,1] * 1000) } # Create an error function createError <- function(mean, par){ return(rnorm(length(mean), mean = mean, sd = par[7])) } # plot prior predictive distribution and prior predictive simulations plotTimeSeriesResults(sampler = out, model = createPredictions, observed = obs[,1], error = createError, prior = TRUE, main = "Prior predictive") # plot posterior predictive distribution and posterior predictive simulations plotTimeSeriesResults(sampler = out, model = createPredictions, observed = obs[,1], error = createError, main = "Posterior predictive") ######################################################## # Demonstrating the updating of the prior from old posterior # Note that it is usually more exact to rerun the MCMC # with all (old and new) data, instead of updating the prior # because likely some information is lost when approximating the # Posterior by a multivariate normal settings <- list(iterations = 5000, nrChains = 2) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DEzs", settings = settings) plot(out) correlationPlot(out, start = 1000) newPrior = createPriorDensity(out, method = "multivariate", eps = 1e-10, lower = refPars$lower[parSel], upper = refPars$upper[parSel], start= 1000) bayesianSetup <- createBayesianSetup(likelihood = likelihood, prior = newPrior, names = rownames(refPars)[parSel] ) # check boundaries are correct set bayesianSetup$prior$sampler() < refPars$lower[parSel] bayesianSetup$prior$sampler() > refPars$upper[parSel] # check prior looks similar to posterior x = bayesianSetup$prior$sampler(2000) correlationPlot(x, thin = F) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DEzs", settings = settings) plot(out) correlationPlot(out) plotTimeSeriesResults(sampler = out, model = createPredictions, observed = obs[,1], error = createError, prior = F, main = "Posterior predictive") plotTimeSeriesResults(sampler = out, model = createPredictions, observed = obs[,1], error = createError, prior = T, main = "Prior predictive") } par(oldpar)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/VSEMHelp.R
bayesianSetup <- createBayesianSetup(likelihood = testDensityNormal, prior = createUniformPrior(lower = rep(-10,2), upper = rep(10,2))) # likelihood density needs to have option sum = FALSE testDensityNormal(c(1,1,1), sum = FALSE) bayesianSetup$likelihood$density(c(1,1,1), sum = FALSE) bayesianSetup$likelihood$density(matrix(rep(1,9), ncol = 3), sum = FALSE) # running MCMC out = runMCMC(bayesianSetup = bayesianSetup) WAIC(out)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/WAICHelp.R
means <- c(0, 1, 2) sds <- c(1, 0.6, 3) # log-likelihood ll <- function (x) { return(sum(dnorm(x, mean = means, sd = sds, log = TRUE))) } # lower and upper bounds for prior lb <- rep(-10, 3) ub <- rep(10, 3) # create setup and run MCMC setup <- createBayesianSetup(likelihood = ll, lower = lb, upper = ub) out <- runMCMC(bayesianSetup = setup, settings = list(iterations = 1000), sampler = "DEzs") # sample from MCMC output with "burn-in" of 25% sample <- getSample(out$chain, start = 250, numSamples = 500) # use bridge sampling to get marginal likelihood bs_result <- bridgesample(chain = sample, nParams = out$setup$numPars, lower = lb, upper = ub, posterior = out$setup$posterior$density) bs_result
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/bridgesampleHelp.R
ll <- function(x) sum(dnorm(x, log = TRUE)) test <- createBayesianSetup(ll, prior = NULL, priorSampler = NULL, lower = -10, upper = 10) str(test) test$prior$density(0) test$likelihood$density(c(1,1)) test$likelihood$density(1) test$posterior$density(1) test$posterior$density(1, returnAll = TRUE) test$likelihood$density(matrix(rep(1,4), nrow = 2)) #test$posterior$density(matrix(rep(1,4), nrow = 2), returnAll = TRUE) test$likelihood$density(matrix(rep(1,4), nrow = 4)) \dontrun{ ## Example of how to use parallelization using the VSEM model # Note that the parallelization produces overhead and is not always # speeding things up. In this example, due to the small # computational cost of the VSEM the parallelization is # most likely to reduce the speed of the sampler. # Creating reference data PAR <- VSEMcreatePAR(1:1000) refPars <- VSEMgetDefaults() refPars[12,] <- c(0.2, 0.001, 1) rownames(refPars)[12] <- "error-sd" referenceData <- VSEM(refPars$best[1:11], PAR) obs = apply(referenceData, 2, function(x) x + rnorm(length(x), sd = abs(x) * refPars$best[12])) # Selecting parameters parSel = c(1:6, 12) ## Builidng the likelihood function likelihood <- function(par, sum = TRUE){ x = refPars$best x[parSel] = par predicted <- VSEM(x[1:11], PAR) diff = c(predicted[,1:3] - obs[,1:3]) llValues = dnorm(diff, sd = max(abs(c(predicted[,1:3])),0.0001) * x[12], log = TRUE) if (sum == False) return(llValues) else return(sum(llValues)) } # Prior prior <- createUniformPrior(lower = refPars$lower[parSel], upper = refPars$upper[parSel]) ## Definition of the packages and objects that are exported to the cluster. # These are the objects that are used in the likelihood function. opts <- list(packages = list("BayesianTools"), variables = list("refPars", "obs", "PAR" ), dlls = NULL) # Create Bayesian Setup BSVSEM <- createBayesianSetup(likelihood, prior, best = refPars$best[parSel], names = rownames(refPars)[parSel], parallel = 2, parallelOptions = opts) ## The bayesianSetup can now be used in the runMCMC function. # Note that not all samplers can make use of parallel # computing. # Remove the Bayesian Setup and close the cluster stopParallel(BSVSEM) rm(BSVSEM) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/classBayesianSetup.R
## Generate a test likelihood function. ll <- generateTestDensityMultiNormal(sigma = "no correlation") ## Create a BayesianSetup object from the likelihood ## is the recommended way of using the runMCMC() function. bayesianSetup <- createBayesianSetup(likelihood = ll, lower = rep(-10, 3), upper = rep(10, 3)) ## Finally we can run the sampler and have a look settings = list(iterations = 1000) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DEzs", settings = settings) ## Correlation density plots: correlationPlot(out) ## additional parameters can be passed to getSample (see ?getSample for further information) ## e.g. to select which parameters to show or thinning (faster plot) correlationPlot(out, scaleCorText = FALSE, thin = 100, start = 200, whichParameters = c(1,2)) ## text to display correlation will be not scaled to the strength of the correlation correlationPlot(out, scaleCorText = FALSE) ## We can also switch the method for calculating correllations correlationPlot(out, scaleCorText = FALSE, method = "spearman")
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/correlationPlotHelp.R
# the BT package includes a number of convenience functions to specify # prior distributions, including createUniformPrior, createTruncatedNormalPrior # etc. If you want to specify a prior that corresponds to one of these # distributions, you should use these functions, e.g.: prior <- createUniformPrior(lower = c(0,0), upper = c(0.4,5)) prior$density(c(2, 3)) # outside of limits -> -Inf prior$density(c(0.2, 2)) # within limits, -0.6931472 # All default priors include a sampling function, i.e. you can create # samples from the prior via prior$sampler() # [1] 0.2291413 4.5410389 # if you want to specify a prior that does not have a default function, # you should use the createPrior function, which expects a density and # optionally a sampler function: density = function(par){ d1 = dunif(par[1], -2,6, log =TRUE) d2 = dnorm(par[2], mean= 2, sd = 3, log =TRUE) return(d1 + d2) } sampler = function(n=1){ d1 = runif(n, -2,6) d2 = rnorm(n, mean= 2, sd = 3) return(cbind(d1,d2)) } prior <- createPrior(density = density, sampler = sampler, lower = c(-10,-20), upper = c(10,20), best = NULL) # note that the createPrior supports additional truncation # To use a prior in an MCMC, include it in a BayesianSetup set.seed(123) ll <- function(x) sum(dnorm(x, log = TRUE)) # multivariate normal ll bayesianSetup <- createBayesianSetup(likelihood = ll, prior = prior) settings = list(iterations = 100) out <- runMCMC(bayesianSetup = bayesianSetup, settings = settings) # use createPriorDensity to create a new (estimated) prior from MCMC output newPrior = createPriorDensity(out, method = "multivariate", eps = 1e-10, lower = c(-10,-20), upper = c(10,20), best = NULL, scaling = 0.5)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/createPrior.R
testDensityMultiNormal <- generateTestDensityMultiNormal() parDen <- generateParallelExecuter(testDensityMultiNormal)$parallelFun x = matrix(runif(9,0,1), nrow = 3) parDen(x)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/generateParallelExecuter.R
# sampling from the test function x = generateTestDensityMultiNormal(sample = TRUE, n = 1000)(1000) correlationPlot(x) marginalPlot(x) # generating the the density density = generateTestDensityMultiNormal(sample = FALSE) density(x[1,])
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/generateTestDensityMultiNormalHelp.R
ll = function(x) sum(dnorm(x, log = TRUE)) setup = createBayesianSetup(ll, lower = c(-10,-10), upper = c(10,10)) settings = list(nrChains = 2, iterations = 1000) out <- runMCMC(bayesianSetup = setup, sampler = "DEzs", settings = settings) # population MCMCs divide the interations by the number of internal chains, # so the end of the 3 chains is 1000/3 = 334 sample <- getSample(out, start = 100, end = 334, thin = 10) # sampling with number of samples instead of thinning and # returning a coda object sample <- getSample(out, start = 100, numSamples = 60, coda = TRUE) plot(sample) # MCMC with a single chain: settings_2 <- list(nrChains = 1, iterations = 1000) out_2 <- runMCMC(setup, sampler = "Metropolis", settings = settings_2) sample_2 <- getSample(out_2, numSamples = 100)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/getSampleHelp.R
bayesianSetup = createBayesianSetup( likelihood = generateTestDensityMultiNormal(sigma = "no correlation"), lower = rep(-10, 3), upper = rep(10, 3)) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = list(iterations = 2000, message = FALSE)) getVolume(out, prior = TRUE) bayesianSetup = createBayesianSetup( likelihood = generateTestDensityMultiNormal(sigma = "strongcorrelation"), lower = rep(-10, 3), upper = rep(10, 3)) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = list(iterations = 2000, message = FALSE)) getVolume(out, prior = TRUE)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/getVolume.R
############################################################## # Comparison of ML for two regression models # Creating test data with quadratic relationship sampleSize = 30 x <- (-(sampleSize-1)/2):((sampleSize-1)/2) y <- 1 * x + 1*x^2 + rnorm(n=sampleSize,mean=0,sd=10) # plot(x,y, main="Test Data") # likelihoods for linear and quadratic model likelihood1 <- function(param){ pred = param[1] + param[2]*x + param[3] * x^2 singlelikelihoods = dnorm(y, mean = pred, sd = 1/(param[4]^2), log = TRUE) return(sum(singlelikelihoods)) } likelihood2 <- function(param){ pred = param[1] + param[2]*x singlelikelihoods = dnorm(y, mean = pred, sd = 1/(param[3]^2), log = TRUE) return(sum(singlelikelihoods)) } setUp1 <- createBayesianSetup(likelihood1, lower = c(-5,-5,-5,0.01), upper = c(5,5,5,30)) setUp2 <- createBayesianSetup(likelihood2, lower = c(-5,-5,0.01), upper = c(5,5,30)) out1 <- runMCMC(bayesianSetup = setUp1) M1 = marginalLikelihood(out1, start = 1000) out2 <- runMCMC(bayesianSetup = setUp2) M2 = marginalLikelihood(out2, start = 1000) ### Calculating Bayes factor exp(M1$ln.ML - M2$ln.ML) # BF > 1 means the evidence is in favor of M1. See Kass, R. E. & Raftery, A. E. # (1995) Bayes Factors. J. Am. Stat. Assoc., Amer Statist Assn, 90, 773-795. ### Calculating Posterior weights exp(M1$ln.ML) / ( exp(M1$ln.ML) + exp(M2$ln.ML)) # If models have different model priors, multiply with the prior probabilities of each model. \dontrun{ ############################################################# # Fractional Bayes factor # Motivation: ML is very dependent on the prior, which is a problem if you # have uninformative priors. you can see this via rerunning the upper # example with changed priors - suddenly, support for M1 is gone setUp1 <- createBayesianSetup(likelihood1, lower = c(-500,-500,-500,0.01), upper = c(500,500,500,3000)) setUp2 <- createBayesianSetup(likelihood2, lower = c(-500,-500,0.01), upper = c(500,500,3000)) out1 <- runMCMC(bayesianSetup = setUp1) M1 = marginalLikelihood(out1, start = 1000) out2 <- runMCMC(bayesianSetup = setUp2) M2 = marginalLikelihood(out2, start = 1000) ### Calculating Bayes factor exp(M1$ln.ML - M2$ln.ML) # it has therefore been suggested that ML should not be calculated on uninformative priors. But # what to do if there are no informative priors? # one option is to calculate the fractional BF, which means that one splites the data in half, # uses the first half to fit the model, and then use the posterior as a new (now informative) # prior for the ML - let's do this for the previous case # likelihoods with half the data likelihood1 <- function(param){ pred = param[1] + param[2]*x + param[3] * x^2 singlelikelihoods = dnorm(y, mean = pred, sd = 1/(param[4]^2), log = TRUE) return(sum(singlelikelihoods[seq(1, 30, 2)])) } likelihood2 <- function(param){ pred = param[1] + param[2]*x singlelikelihoods = dnorm(y, mean = pred, sd = 1/(param[3]^2), log = TRUE) return(sum(singlelikelihoods[seq(1, 30, 2)])) } setUp1 <- createBayesianSetup(likelihood1, lower = c(-500,-500,-500,0.01), upper = c(500,500,500,3000)) setUp2 <- createBayesianSetup(likelihood2, lower = c(-500,-500,0.01), upper = c(500,500,3000)) out1 <- runMCMC(bayesianSetup = setUp1) out2 <- runMCMC(bayesianSetup = setUp2) newPrior1 = createPriorDensity(out1, start = 200, lower = c(-500,-500,-500,0.01), upper = c(500,500,500,3000)) newPrior2 = createPriorDensity(out2, start = 200, lower = c(-500,-500,0.01), upper = c(500,500,3000)) # now rerun this with likelihoods for the other half of the data and new prior likelihood1 <- function(param){ pred = param[1] + param[2]*x + param[3] * x^2 singlelikelihoods = dnorm(y, mean = pred, sd = 1/(param[4]^2), log = TRUE) return(sum(singlelikelihoods[seq(2, 30, 2)])) } likelihood2 <- function(param){ pred = param[1] + param[2]*x singlelikelihoods = dnorm(y, mean = pred, sd = 1/(param[3]^2), log = TRUE) return(sum(singlelikelihoods[seq(2, 30, 2)])) } setUp1 <- createBayesianSetup(likelihood1, prior = newPrior1) setUp2 <- createBayesianSetup(likelihood2, prior = newPrior2) out1 <- runMCMC(bayesianSetup = setUp1) M1 = marginalLikelihood(out1, start = 1000) out2 <- runMCMC(bayesianSetup = setUp2) M2 = marginalLikelihood(out2, start = 1000) ### Calculating the fractional Bayes factor exp(M1$ln.ML - M2$ln.ML) } ############################################################ ### Performance comparison ### # Low dimensional case with narrow priors - all methods have low error # we use a truncated normal for the likelihood to make sure that the density # integrates to 1 - makes it easier to calcuate the theoretical ML likelihood <- function(x) sum(msm::dtnorm(x, log = TRUE, lower = -1, upper = 1)) prior = createUniformPrior(lower = rep(-1,2), upper = rep(1,2)) bayesianSetup <- createBayesianSetup(likelihood = likelihood, prior = prior) out = runMCMC(bayesianSetup = bayesianSetup, settings = list(iterations = 5000)) # plot(out) # theoretical value theory = log(1/(2^2)) marginalLikelihood(out)$ln.ML - theory marginalLikelihood(out, method = "Prior", numSamples = 500)$ln.ML - theory marginalLikelihood(out, method = "HM", numSamples = 500)$ln.ML - theory marginalLikelihood(out, method = "Bridge", numSamples = 500)$ln.ML - theory # higher dimensions - wide prior - HM and Prior don't work likelihood <- function(x) sum(msm::dtnorm(x, log = TRUE, lower = -10, upper = 10)) prior = createUniformPrior(lower = rep(-10,3), upper = rep(10,3)) bayesianSetup <- createBayesianSetup(likelihood = likelihood, prior = prior) out = runMCMC(bayesianSetup = bayesianSetup, settings = list(iterations = 5000)) # plot(out) # theoretical value theory = log(1/(20^3)) marginalLikelihood(out)$ln.ML - theory marginalLikelihood(out, method = "Prior", numSamples = 500)$ln.ML - theory marginalLikelihood(out, method = "HM", numSamples = 500)$ln.ML - theory marginalLikelihood(out, method = "Bridge", numSamples = 500)$ln.ML - theory
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/marginalLikelihoodHelp.R
## Generate a test likelihood function. ll <- generateTestDensityMultiNormal(sigma = "no correlation") ## Create a BayesianSetup bayesianSetup <- createBayesianSetup(likelihood = ll, lower = rep(-10, 3), upper = rep(10, 3)) ## Finally we can run the sampler and have a look settings = list(iterations = 1000, adapt = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) marginalPlot(out, prior = TRUE) ## We can plot the marginals in several ways: ## violin plots marginalPlot(out, type = 'v', singlePanel = TRUE) marginalPlot(out, type = 'v', singlePanel = FALSE) marginalPlot(out, type = 'v', singlePanel = TRUE, prior = TRUE) ## density plot marginalPlot(out, type = 'd', singlePanel = TRUE) marginalPlot(out, type = 'd', singlePanel = FALSE) marginalPlot(out, type = 'd', singlePanel = TRUE, prior = TRUE) ## if you have a very wide prior you can use the xrange option to plot only ## a certain parameter range marginalPlot(out, type = 'v', singlePanel = TRUE, xrange = matrix(rep(c(-5, 5), 3), ncol = 3)) ##Further options # We can pass arguments to getSample (check ?getSample) and to the density and violin plots marginalPlot(out, type = 'v', singlePanel = TRUE, settings = list(col = c('#FC006299','#00BBAA88')), prior = TRUE) marginalPlot(out, type = 'v', singlePanel = TRUE, numSamples = 500)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/marginalPlotHelp.R
## Generate a test likelihood function. ll <- generateTestDensityMultiNormal(sigma = "no correlation") ## Create a BayesianSetup object from the likelihood ## is the recommended way of using the runMCMC() function. bayesianSetup <- createBayesianSetup(likelihood = ll, lower = rep(-10, 3), upper = rep(10, 3)) ## Finally we can run the sampler and have a look settings = list(iterations = 1000, adapt = FALSE) out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "Metropolis", settings = settings) ## out is of class bayesianOutput. There are various standard functions # implemented for this output plot(out) correlationPlot(out) marginalPlot(out) summary(out) ## additionally, you can return the sample as a coda object, and make use of the coda functions # for plotting and analysis codaObject = getSample(out, start = 500, coda = TRUE)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/mcmcRun.R
\dontrun{ # Create bayesian setup with bayesianSetup <- createBayesianSetup(likelihood = testDensityNormal, prior = createUniformPrior(lower = -10, upper = 10)) # running MCMC out = runMCMC(bayesianSetup = bayesianSetup) # diagnostic plots plotDiagnostic(out) }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/plotDiagnosticHelp.R
ll <- testDensityBanana bayesianSetup <- createBayesianSetup(likelihood = ll, lower = rep(-10, 2), upper = rep(10, 2)) plotSensitivity(bayesianSetup)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/plotSensitivityHelp.R
# Create time series ts <- VSEMcreatePAR(1:100) # create fake "predictions" pred <- ts + rnorm(length(ts), mean = 0, sd = 2) # plot time series par(mfrow=c(1,2)) plotTimeSeries(observed = ts, main="Observed") plotTimeSeries(observed = ts, predicted = pred, main = "Observed and predicted") par(mfrow=c(1,1))
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/plotTimeSeriesHelp.R
# Create input data for the model # see help for the VSEM model PAR <- VSEMcreatePAR(1:1000) # load reference parameter definition (upper, lower prior) refPars <- VSEMgetDefaults() # this adds one additional parameter for the likelihood standard deviation (see below) refPars[12,] <- c(2, 0.1, 4) rownames(refPars)[12] <- "error-sd" head(refPars) # create some simulated test data # generally recommended to start with simulated data before moving to real data referenceData <- VSEM(refPars$best[1:11], PAR) # model predictions with reference parameters referenceData[,1] = 1000 * referenceData[,1] # this adds the error - needs to conform to the error definition in the likelihood obs <- referenceData + rnorm(length(referenceData), sd = refPars$best[12]) parSel = c(1:6, 12) # here is the likelihood likelihood <- function(par, sum = TRUE){ # set parameters that are not calibrated on default values x = refPars$best x[parSel] = par predicted <- VSEM(x[1:11], PAR) # replace here VSEM with your model predicted[,1] = 1000 * predicted[,1] # this is just rescaling diff <- c(predicted[,1:4] - obs[,1:4]) # difference betweeno observed and predicted # univariate normal likelihood. Note that there is a parameter involved here that is fit llValues <- dnorm(diff, sd = x[12], log = TRUE) if (sum == FALSE) return(llValues) else return(sum(llValues)) } # optional, you can also directly provide lower, upper in the createBayesianSetup, see help prior <- createUniformPrior(lower = refPars$lower[parSel], upper = refPars$upper[parSel], best = refPars$best[parSel]) bayesianSetup <- createBayesianSetup(likelihood, prior, names = rownames(refPars)[parSel]) # settings for the sampler, iterations should be increased for real applicatoin settings <- list(iterations = 2000, nrChains = 2) \dontrun{ out <- runMCMC(bayesianSetup = bayesianSetup, sampler = "DEzs", settings = settings) # Posterior predictive simulations # Create a prediction function createPredictions <- function(par){ # set the parameters that are not calibrated on default values x = refPars$best x[parSel] = par predicted <- VSEM(x[1:11], PAR) # replace here VSEM with your model return(predicted[,1] * 1000) } # Create an error function createError <- function(mean, par){ return(rnorm(length(mean), mean = mean, sd = par[7])) } # plot prior predictive distribution and prior predictive simulations plotTimeSeriesResults(sampler = out, model = createPredictions, observed = obs[,1], error = createError, prior = TRUE, main = "Prior predictive") }
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/plotTimeSeriesResultsHelp.R
testMatrix = matrix(rep(c(0,0,0,0), 1000), ncol = 4) testVector = c(0,0,0,0) ##Standard multivariate normal proposal generator testGenerator <- createProposalGenerator(covariance = c(1,1,1,1), message = TRUE) methods(class = "proposalGenerator") print(testGenerator) x = testGenerator$returnProposal(testVector) x x <- testGenerator$returnProposalMatrix(testMatrix) boxplot(x) ##Changing the covariance testGenerator$covariance = diag(rep(100,4)) testGenerator <- testGenerator$updateProposalGenerator(testGenerator, message = TRUE) testGenerator$returnProposal(testVector) x <- testGenerator$returnProposalMatrix(testMatrix) boxplot(x) ##-Changing the gibbs probabilities / probability to modify 1-n parameters testGenerator$gibbsProbabilities = c(1,1,0,0) testGenerator <- testGenerator$updateProposalGenerator(testGenerator) testGenerator$returnProposal(testVector) x <- testGenerator$returnProposalMatrix(testMatrix) boxplot(x) ##-Changing the gibbs weights / probability to pick each parameter testGenerator$gibbsWeights = c(0.3,0.3,0.3,100) testGenerator <- testGenerator$updateProposalGenerator(testGenerator) testGenerator$returnProposal(testVector) x <- testGenerator$returnProposalMatrix(testMatrix) boxplot(x) ##-Adding another function otherFunction <- function(x) sample.int(10,1) testGenerator <- createProposalGenerator( covariance = c(1,1,1), otherDistribution = otherFunction, otherDistributionLocation = c(0,0,0,1), otherDistributionScaled = TRUE ) testGenerator$returnProposal(testVector) x <- testGenerator$returnProposalMatrix(testMatrix) boxplot(x) table(x[,4])
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/proposalGeneratorHelp.R
x = c(1,2) y = testLinearModel(x) plot(y)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/testLinearModel.R
# set up and run the MCMC ll <- function(x) sum(dnorm(x, log = TRUE)) setup <- createBayesianSetup(likelihood = ll, lower = c(-10, -10), upper = c(10,10)) settings <- list(iterations = 2000) out <- runMCMC(bayesianSetup = setup, settings = settings, sampler = "Metropolis") # plot the trace tracePlot(sampler = out, thin = 10) tracePlot(sampler = out, thin = 50) # additional parameters can be passed on to getSample (see help) tracePlot(sampler = out, thin = 10, start = 500) # select parameter by index tracePlot(sampler = out, thin = 10, start = 500, whichParameters = 2)
/scratch/gouwar.j/cran-all/cranData/BayesianTools/inst/examples/tracePlotHelp.R