content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Two Node Process Data Simulation
#'
#' Simulates data for a stochastic two node, two component process at steady state. Location indices are the same as what is shown in \code{vignette("Two_Node_Process", package = "BayesMassBal")}.
#'
#' @param K Numeric specifying the number of sample sets to be simulated.
#' @param feed List specifying qualities for the process grade. See default for required structure. \code{rate} is the mean feed rate. \code{sd} is the standard deviation of the feed rate. \code{CuFeS2grade} is the mass percent CuFeS2 present in the feed. Grade is not stochastic. See Details for important information on specifying these values.
#' @param rec List specifying mean and variance in process performance. See default for required structure. \code{rec$component$mean} is a vector giving mean fractional recovery of the given component for \code{c(node1,node2)}. \code{rec$component$var} gives the variance in the process in a similar manner. See Details.
#' @param assayNoise List specifying standard deviations of random noise added to simulated process outputs. See default for required structure. The index of a vector within the list is equivalent to the index of the sampling location. See Details section for important information on specifying these values.
#' @param truncation Logical indicating if the simulation should be rerun, and previous results discarded, until no observed values are less than 0. Default is TRUE. See details for more information.
#'
#' @details
#'
#' Each of the \code{K} data sets collected from the \code{twonodeSim()} simulation is independent and identically distributed.
#'
#' The feed rate to the process is normally distributed with a mean of \code{feed$rate}, and a standard deviation of \code{feed$sd}. If the feed rate is sufficiently small, and the standard deviation is sufficiently large, negative feed rates can be generated.
#'
#' Process recovery at each node is simulated from a \href{https://en.wikipedia.org/wiki/Beta_distribution}{beta distribution}, reparameterized as shown in \href{https://stats.stackexchange.com/a/12239}{this post} to make parameter specification more intuitive. This reparameterization is only valid when \eqn{\sigma^2 \leq \mu(1-\mu)}, and the list argument \code{rec} must be specified as such.
#'
#' The steps of the simulation for each sample set are:
#'
#' \enumerate{
#' \item Draw a random normally distributed feed rate.
#' \item Draw random values for recovery of the two components at each node.
#' \item Calculate mass flow rate at each location. These mass flow rates are the \emph{true} mass flow rates, given the process variability.
#' \item Adds normally distributed noise to each observation as specified in argument \code{assayNoise}
#' }
#'
#' \strong{If the standard deviations supplied to \code{feed} and \code{assayNoise} are sufficiently large, the simulation can return negative mass flow rates.}
#'
#' The argument \code{truncation = TRUE} discards negative mass flow rates, and reruns the simulation until all values are non-negative. For some combinations of a large \code{K} and specifications in \code{feed} and \code{assayNoise}, this can happen frequently. If if the simulation is run three or more times a warning will be printed that the returned expectations are unreliable. If this is the case, expectations should be calculated using analytical or Monte-Carlo methods outside of the abilities of this function. For the default parameters, truncation can occur, but is rare. The default parameters were chosen in a way that makes a truncation warning highly unlikely.
#'
#' @return Returns a list of simulated data and expected values. List members are as follows:
#' @return \item{\code{simulation}}{List of matrices giving simulated data. \code{twonodeSim()$simulation} is structured so that it can directly be passed to the \code{\link{BMB}} function as the \code{y} argument.}
#' @return \item{\code{expectations}}{List of matrices giving expected values of the mass flow rate for each component at every location. See the Details section for information about instances that may create reliability issues with this output.}
#'
#' @importFrom stats rnorm rbeta
#'
#' @export
#'
#' @examples
#'
#' y <- twonodeSim()$simulation
#'
#' ## Then the BMB function can be run as
#' \donttest{C <- matrix(c(1,-1,0,-1,0,0,1,-1,0,-1), byrow = TRUE, ncol = 5, nrow = 2)
#' X <- constrainProcess(C = C)
#' }
#' \donttest{BMB(X = X, y = y, BTE = c(100,600,1))}
twonodeSim <- function(K = 7, feed = list(rate = 100, sd = 6, CuFeS2grade = 1.2), rec = list(CuFeS2 = list(mean = c(98,95)/100, var = c(0.00005,0.00008)), gangue = list(mean = c(7,4)/100, var =c(0.00005,0.000025))),assayNoise = list(CuFeS2 = c(0.15,0.2,0.05,0.00005,0.005), gangue = c(5,1, 0.03, 2, 0.5)), truncation = TRUE){
f.rate <- feed$rate
recCu <- rec$CuFeS2$mean
recG <- rec$gangue$mean
g.cu <- feed$CuFeS2grade/100
g.gangue <- 1-g.cu
feed.mass <- rnorm(K,mean = f.rate, sd = feed$sd)
true.cu <- true.gangue <- data.frame(matrix(NA, ncol = 5, nrow = K))
names(true.cu) <- names(true.gangue) <- paste("B",1:5, sep = "")
v.cu <- rec$CuFeS2$var
v.g <- rec$CuFeS2$var
meantemp <- c(rec$CuFeS2$mean, rec$gangue$mean)
vartemp <- c(rec$CuFeS2$var, rec$gangue$var)
if(!all(vartemp <= (meantemp*(1-meantemp)))){warning("Process var must be less than or equal to mean*(1-mean).")}
rec.cu.params <- list()
rec.cu.params[["r1"]]$alpha <- -(recCu[1]*(v.cu[1] + recCu[1]^2 - recCu[1])/v.cu[1])
rec.cu.params[["r1"]]$beta <- (v.cu[1]^2 + recCu[1]^2 - recCu[1])*(recCu[1]-1)/v.cu[1]
rec.cu.params[["r2"]]$alpha <- -(recCu[2]*(v.cu[2] + recCu[2]^2 - recCu[2])/v.cu[2])
rec.cu.params[["r2"]]$beta <- (v.cu[2]^2 + recCu[2]^2 - recCu[2])*(recCu[2]-1)/v.cu[2]
rec.g.params <- list()
rec.g.params[["r1"]]$alpha <- -(recG[1]*(v.g[1] + recG[1]^2 - recG[1])/v.g[1])
rec.g.params[["r1"]]$beta <- (v.g[1]^2 + recG[1]^2 - recG[1])*(recG[1]-1)/v.g[1]
rec.g.params[["r2"]]$alpha <- -(recG[2]*(v.g[2] + recG[2]^2 - recG[2])/v.g[2])
rec.g.params[["r2"]]$beta <- (v.g[2]^2 + recG[2]^2 - recG[2])*(recG[2]-1)/v.g[2]
r1.cu <- rbeta(n = K, shape1 = rec.cu.params$r1$alpha, shape2 = rec.cu.params$r1$beta)
r2.cu <- rbeta(n = K, shape1 = rec.cu.params$r2$alpha, shape2 = rec.cu.params$r2$beta)
r1.g <- rbeta(n = K, shape1 = rec.g.params$r1$alpha, shape2 = rec.g.params$r1$beta)
r2.g <- rbeta(n = K, shape1 = rec.g.params$r2$alpha, shape2 = rec.g.params$r2$beta)
true.cu$B1 <- feed.mass*g.cu
true.gangue$B1 <- feed.mass - true.cu$B1
true.cu$B2 <- true.cu$B1 * r1.cu
true.cu$B3 <- true.cu$B2 * r2.cu
true.cu$B4 <- true.cu$B1 * (1-r1.cu)
true.cu$B5 <- true.cu$B2 * (1-r2.cu)
true.gangue$B2 <- true.gangue$B1 * r1.g
true.gangue$B3 <- true.gangue$B2 * r2.g
true.gangue$B4 <- true.gangue$B1 * (1-r1.g)
true.gangue$B5 <- true.gangue$B2 * (1-r2.g)
true.total <- true.cu + true.gangue
s <- assayNoise$CuFeS2
obs.cu <- t(apply(true.cu,1,noise, s = s))
s <- assayNoise$gangue
obs.gangue<- t(apply(true.gangue,1,noise, s = s))
if(truncation == TRUE){
count.while <- 0
while(any(obs.cu < 0) | any(obs.gangue < 0)){
feed.mass <- rnorm(K,mean = f.rate, sd = feed$sd)
r1.cu <- rbeta(n = K, shape1 = rec.cu.params$r1$alpha, shape2 = rec.cu.params$r1$beta)
r2.cu <- rbeta(n = K, shape1 = rec.cu.params$r2$alpha, shape2 = rec.cu.params$r2$beta)
r1.g <- rbeta(n = K, shape1 = rec.g.params$r1$alpha, shape2 = rec.g.params$r1$beta)
r2.g <- rbeta(n = K, shape1 = rec.g.params$r2$alpha, shape2 = rec.g.params$r2$beta)
true.cu$B1 <- feed.mass*g.cu
true.gangue$B1 <- feed.mass - true.cu$B1
true.cu$B2 <- true.cu$B1 * r1.cu
true.cu$B3 <- true.cu$B2 * r2.cu
true.cu$B4 <- true.cu$B1 * (1-r1.cu)
true.cu$B5 <- true.cu$B2 * (1-r2.cu)
true.gangue$B2 <- true.gangue$B1 * r1.g
true.gangue$B3 <- true.gangue$B2 * r2.g
true.gangue$B4 <- true.gangue$B1 * (1-r1.g)
true.gangue$B5 <- true.gangue$B2 * (1-r2.g)
true.total <- true.cu + true.gangue
s <- assayNoise$CuFeS2
obs.cu <- t(apply(true.cu,1,noise, s = s))
s <- assayNoise$gangue
obs.gangue<- t(apply(true.gangue,1,noise, s = s))
count.while <- count.while + 1
}
if(count.while >= 2){warning("Excessive truncation has occured. Returned expectations may be unreliable. Reduce assayNoise values.")}
}
true.cu <- true.gangue <- data.frame(matrix(NA, ncol = 5, nrow = 1))
names(true.cu) <- names(true.gangue) <- paste("B",1:5, sep = "")
true.cu$B1 <- f.rate*g.cu
true.gangue$B1 <- f.rate - true.cu$B1
true.cu$B2 <- true.cu$B1 * recCu[1]
true.cu$B3 <- true.cu$B2 * recCu[2]
true.cu$B4 <- true.cu$B1 * (1-recCu[1])
true.cu$B5 <- true.cu$B2 * (1-recCu[2])
true.gangue$B2 <- true.gangue$B1 * recG[1]
true.gangue$B3 <- true.gangue$B2 * recG[2]
true.gangue$B4 <- true.gangue$B1 * (1-recG[1])
true.gangue$B5 <- true.gangue$B2 * (1-recG[2])
names(obs.cu) <- names(obs.gangue) <- names(true.cu) <- names(true.gangue)<- paste0(rep("y", times = 5), 1:5)
expectations <- list(CuFeS2 = t(true.cu), gangue = t(true.gangue))
simulation <- list(CuFeS2 = t(obs.cu), gangue = t(obs.gangue))
return(list(simulation = simulation, expectations = expectations))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMassBal/R/twonodeSim.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(BayesMassBal)
## ----multiprocess, echo = FALSE, fig.height= 1.5, fig.width = 3---------------
yshift <- 1
rekt <- data.frame(matrix(NA, ncol = 4, nrow = 2))
names(rekt) <- c("xleft", "ybottom", "xright", "ytop")
rekt$xleft <- c(3,7)
rekt$ybottom <- 4
rekt$xright <- c(5,9)
rekt$ytop <- 6
aros <- data.frame(matrix(NA, ncol = 4, nrow = 5))
names(aros) <-c("x0","y0","x1","y1")
aros[1,] <- c(1,5,rekt$xleft[1],5)
aros[2,] <- c(rekt$xright[1],5,rekt$xleft[2],5)
aros[3,] <- c(rekt$xright[2],5,11,5)
aros[4,] <- c(rekt$xright[1] - 1, rekt$ybottom[1],rekt$xright[1] - 1, rekt$ybottom[1] - 2)
aros[5,] <- c(rekt$xright[2] - 1, rekt$ybottom[2], rekt$xright[2] - 1 , rekt$ybottom[2] - 2)
aros$y0 <- aros$y0
aros$y1 <- aros$y1
b.loc <- data.frame(matrix(NA, ncol = 2, nrow = 5))
names(b.loc) <- c("x","y")
b.loc[1,] <- c(0.5,aros$y0[1])
b.loc[2,] <- c(mean(c(aros$x0[2],aros$x1[2])),aros$y0[2] + 0.6)
b.loc[3,] <- c(aros$x1[3] + 0.5, aros$y1[3])
b.loc[4,] <- c(aros$x1[4], aros$y1[4] - 0.6)
b.loc[5,] <- c(aros$x1[5], aros$y1[5] - 0.6)
p.loc <- data.frame(matrix(NA, ncol = 2, nrow = 2))
names(p.loc) <- c("x","y")
p.loc$x <- rekt$xleft + 1
p.loc$y <- rekt$ybottom + 1
par(mar = c(0.1,0.1,0.1,0.1))
plot(1, type="n", xlab="", ylab="", xlim=c(0, 12), ylim=c(1, 6), axes = FALSE)
rect(xleft = rekt$xleft, ybottom = rekt$ybottom, xright = rekt$xright, ytop =rekt$ytop, col = "skyblue")
arrows(aros$x0,aros$y0, x1= aros$x1,y1 = aros$y1, code = 2, length = 0.1)
for(i in 1:5){
text(b.loc$x[i], b.loc$y[i],labels = bquote(y[.(i)]), adj = c(0.5,0.5), cex = 1.2)
}
for(i in 1:2){
text(p.loc$x[i], p.loc$y[i], labels = bquote(P[.(i)]), adj = c(0.5,0.5), cex = 1.2)
}
text(1.5,4.6,labels= "F", adj = c(0.5,0.5), cex = 0.7)
text(c(5.5,9.5),c(4.6,4.6), labels = "C", adj= c(0.5,0.5), cex = 0.7)
text(c(4.4,8.4),c(3.5,3.5), labels = "T", adj= c(0.5,0.5), cex = 0.7)
## ----cdef---------------------------------------------------------------------
C <- matrix(c(1,-1,0,-1,0,0,1,-1,0,-1), nrow = 2, ncol = 5, byrow = TRUE)
C
## ----Xdef---------------------------------------------------------------------
X <- constrainProcess(C = C)
X
## ----Xdefcsv------------------------------------------------------------------
constraint_file_location <- system.file("extdata", "twonode_constraints.csv",package = "BayesMassBal")
X <- constrainProcess(file = constraint_file_location)
## ----datasim------------------------------------------------------------------
y <- importObservations(file = system.file("extdata", "twonode_example.csv",
package = "BayesMassBal"),
header = TRUE, csv.params = list(sep = ";"))
## ----indepsamp----------------------------------------------------------------
indep.samples <- BMB(X = X, y = y, cov.structure = "indep", BTE = c(100,3000,1), lml = TRUE, verb = 0)
## ----feedplot-----------------------------------------------------------------
plot(indep.samples,sample.params = list(ybal = list(CuFeS2 = 3)),
layout = "dens",hdi.params = c(1,0.95))
## ----traceplot----------------------------------------------------------------
plot(indep.samples,sample.params = list(beta = list(CuFeS2 = 1:3, gangue = 1:3)),layout = "trace",hdi.params = c(1,0.95))
## ----diagnostics--------------------------------------------------------------
indep.samples$diagnostics
## ----compdraw-----------------------------------------------------------------
component.samples <- BMB(X = X, y = y, cov.structure = "component", BTE = c(100,3000,1), lml = TRUE, verb = 0)
## ----locdraw------------------------------------------------------------------
location.samples <- BMB(X = X, y = y, cov.structure = "location", BTE = c(100,3000,1), lml = TRUE, verb = 0)
## ----compvsindep--------------------------------------------------------------
indep.samples$lml - component.samples$lml
## ----compvsloc----------------------------------------------------------------
component.samples$lml - location.samples$lml
## ----bayessummary-------------------------------------------------------------
summary(component.samples, export = NA)
## ----maineff------------------------------------------------------------------
fn_example <- function(X,ybal){
cu.frac <- 63.546/183.5
feed.mass <- ybal$CuFeS2[1] + ybal$gangue[1]
# Concentrate mass per ton feed
con.mass <- (ybal$CuFeS2[3] + ybal$gangue[3])/feed.mass
# Copper mass per ton feed
cu.mass <- (ybal$CuFeS2[3]*cu.frac)/feed.mass
gam <- c(-1,-1/feed.mass,cu.mass,-con.mass,-cu.mass,-con.mass)
f <- X %*% gam
return(f)
}
rangex <- matrix(c(4.00 ,6.25,1125,1875,3880,9080,20,60,96,208,20.0,62.5),
ncol = 6, nrow = 2)
mE_example <- mainEff(indep.samples, fn = "fn_example",rangex = rangex,xj = 3, N = 25, res = 25)
## ----maineffplot--------------------------------------------------------------
m.sens<- mE_example$fn.out[2,]
hpd.sens <- mE_example$fn.out[c(1,3),]
row.names(hpd.sens) <- c("upper", "lower")
g.plot <- mE_example$g/2000
y.lim <- range(hpd.sens)
lzero.bound <- apply(hpd.sens,1,function(X){which(X <= 0)})
lzero.mean <- which(m.sens <= 0)
main.grid <- pretty(g.plot)
minor.grid <- pretty(g.plot,25)
minor.grid <- minor.grid[-which(minor.grid %in% main.grid)]
y.main <- pretty(hpd.sens)
opar <- par(no.readonly =TRUE)
par(mar = c(4.2,4,1,1))
plot(g.plot,m.sens, type = "n", xlim = range(g.plot), ylim = y.lim, ylab = "Net Revenue ($/ton Feed)", xlab= "Cu Price ($/lb)")
abline(v = main.grid, lty = 6, col = "grey", lwd = 1)
abline(v = minor.grid, lty =3, col = "grey", lwd = 0.75)
abline(h = 0, col = "red", lwd = 1, lty = 6)
lines(g.plot[lzero.mean],m.sens[lzero.mean],col = "red", lwd =2)
lines(g.plot[-lzero.mean[-length(lzero.mean)]],m.sens[-lzero.mean[-length(lzero.mean)]],col = "darkgreen", lwd =2)
lines(g.plot[lzero.bound$lower],hpd.sens[2,][lzero.bound$lower], lty = 5, lwd = 2, col = "red")
lines(g.plot[-lzero.bound$lower],hpd.sens[2,][-lzero.bound$lower], lty = 5, lwd = 2, col = "darkgreen")
lines(g.plot[lzero.bound$upper],hpd.sens[1,][lzero.bound$upper], lty = 5, lwd = 2, col = "red")
lines(g.plot[-lzero.bound$upper],hpd.sens[1,][-lzero.bound$upper], lty = 5, lwd = 2, col= "darkgreen")
legend("topleft", legend = c("Expected Main Effect", "95% Bounds", "Net Revenue < $0", "Net Revenue > $0"), col = c("black","black","red", "darkgreen"), lty = c(1,6,1,1), lwd = c(2,2,2,2), bg = "white")
par(opar)
|
/scratch/gouwar.j/cran-all/cranData/BayesMassBal/inst/doc/Two_Node_Process.R
|
---
title: "Two_Node_Process"
author: "Scott Koermer"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Two_Node_Process}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(BayesMassBal)
```
The function `BMB` is used with a two node process and simulated data.
```{r multiprocess, echo = FALSE, fig.height= 1.5, fig.width = 3}
yshift <- 1
rekt <- data.frame(matrix(NA, ncol = 4, nrow = 2))
names(rekt) <- c("xleft", "ybottom", "xright", "ytop")
rekt$xleft <- c(3,7)
rekt$ybottom <- 4
rekt$xright <- c(5,9)
rekt$ytop <- 6
aros <- data.frame(matrix(NA, ncol = 4, nrow = 5))
names(aros) <-c("x0","y0","x1","y1")
aros[1,] <- c(1,5,rekt$xleft[1],5)
aros[2,] <- c(rekt$xright[1],5,rekt$xleft[2],5)
aros[3,] <- c(rekt$xright[2],5,11,5)
aros[4,] <- c(rekt$xright[1] - 1, rekt$ybottom[1],rekt$xright[1] - 1, rekt$ybottom[1] - 2)
aros[5,] <- c(rekt$xright[2] - 1, rekt$ybottom[2], rekt$xright[2] - 1 , rekt$ybottom[2] - 2)
aros$y0 <- aros$y0
aros$y1 <- aros$y1
b.loc <- data.frame(matrix(NA, ncol = 2, nrow = 5))
names(b.loc) <- c("x","y")
b.loc[1,] <- c(0.5,aros$y0[1])
b.loc[2,] <- c(mean(c(aros$x0[2],aros$x1[2])),aros$y0[2] + 0.6)
b.loc[3,] <- c(aros$x1[3] + 0.5, aros$y1[3])
b.loc[4,] <- c(aros$x1[4], aros$y1[4] - 0.6)
b.loc[5,] <- c(aros$x1[5], aros$y1[5] - 0.6)
p.loc <- data.frame(matrix(NA, ncol = 2, nrow = 2))
names(p.loc) <- c("x","y")
p.loc$x <- rekt$xleft + 1
p.loc$y <- rekt$ybottom + 1
par(mar = c(0.1,0.1,0.1,0.1))
plot(1, type="n", xlab="", ylab="", xlim=c(0, 12), ylim=c(1, 6), axes = FALSE)
rect(xleft = rekt$xleft, ybottom = rekt$ybottom, xright = rekt$xright, ytop =rekt$ytop, col = "skyblue")
arrows(aros$x0,aros$y0, x1= aros$x1,y1 = aros$y1, code = 2, length = 0.1)
for(i in 1:5){
text(b.loc$x[i], b.loc$y[i],labels = bquote(y[.(i)]), adj = c(0.5,0.5), cex = 1.2)
}
for(i in 1:2){
text(p.loc$x[i], p.loc$y[i], labels = bquote(P[.(i)]), adj = c(0.5,0.5), cex = 1.2)
}
text(1.5,4.6,labels= "F", adj = c(0.5,0.5), cex = 0.7)
text(c(5.5,9.5),c(4.6,4.6), labels = "C", adj= c(0.5,0.5), cex = 0.7)
text(c(4.4,8.4),c(3.5,3.5), labels = "T", adj= c(0.5,0.5), cex = 0.7)
```
The constraints around these process nodes are:
\begin{align}
y_1 &= y_2 +y_4\\
y_2 &= y_3 +y_5
\end{align}
Therefore the matrix of constraints, `C` is:
```{r cdef}
C <- matrix(c(1,-1,0,-1,0,0,1,-1,0,-1), nrow = 2, ncol = 5, byrow = TRUE)
C
```
The `constrainProcess` function in the `BayesMassBal` package is used to generate an `X` matrix based on `C` that will later be used with the `BMB` function.
```{r Xdef}
X <- constrainProcess(C = C)
X
```
Constraints can also be imported from a `.csv` file. The path to a file, included in the `BayesMassBal` package, for this process can be found and constraints can be imported by specifying the location for the `file` argument for `constrainProcess` as shown below:
```{r Xdefcsv}
constraint_file_location <- system.file("extdata", "twonode_constraints.csv",package = "BayesMassBal")
X <- constrainProcess(file = constraint_file_location)
```
The previously simulated data is loaded from a `.csv` file using the `importObservations()` function. The local location of the the file imported below can be found by typing `system.file("extdata", "twonode_example.csv",package = "BayesMassBal")`. View the document in Excel to see how your data should be formatted for import. ***Note:*** it is not required that the entries into the `*.csv` file are separated by `";"`.
```{r datasim}
y <- importObservations(file = system.file("extdata", "twonode_example.csv",
package = "BayesMassBal"),
header = TRUE, csv.params = list(sep = ";"))
```
Then, the `BMB` function is used to generate the distribution of constrained masses from the data with `cov.structure = "indep"`.
```{r indepsamp}
indep.samples <- BMB(X = X, y = y, cov.structure = "indep", BTE = c(100,3000,1), lml = TRUE, verb = 0)
```
The output of `BMB` is a `BayesMassBal` object. Special instructions are designated when feeding a `BayesMassBal` object to the `plot()` function. Adding the argument `layout = "dens"` and indicating the mass balanced flow rate for CuFeS2 at $y_3$ should be plotted using a list supplied to `sample.params`, the desired distribution can be plotted with its 95% [Highest Posterior Density Interval](https://en.wikipedia.org/wiki/Credible_interval).
```{r feedplot}
plot(indep.samples,sample.params = list(ybal = list(CuFeS2 = 3)),
layout = "dens",hdi.params = c(1,0.95))
```
It is also possible to generate trace plots to inspect convergence of the Gibbs sampler. Here are trace plots for $\beta$
```{r traceplot}
plot(indep.samples,sample.params = list(beta = list(CuFeS2 = 1:3, gangue = 1:3)),layout = "trace",hdi.params = c(1,0.95))
```
A quantitative diagnostics for convergence and autocorrelation are available as part of the output from `BMB`:
```{r diagnostics}
indep.samples$diagnostics
```
The model with independent variances may not be the best fitting model. Models specifying covariance between sample locations for a single component, and covariance between components at a single location are fit.
```{r compdraw}
component.samples <- BMB(X = X, y = y, cov.structure = "component", BTE = c(100,3000,1), lml = TRUE, verb = 0)
```
```{r locdraw}
location.samples <- BMB(X = X, y = y, cov.structure = "location", BTE = c(100,3000,1), lml = TRUE, verb = 0)
```
Computing $\log(\mathrm{Bayes Factor})$ for $BF = p(y|\texttt{indep})/p(y|\texttt{component})$:
```{r compvsindep}
indep.samples$lml - component.samples$lml
```
Then comparing $p(y|\texttt{component})$ to $p(y|\texttt{location})$
```{r compvsloc}
component.samples$lml - location.samples$lml
```
Shows there is little difference between the models where `cov.structure = "location"` and `cov.structure = "component"`, but both of these models better explain the data than `cov.structure = "indep"`.
We can view a summary of the favored model by passing a `BayesMassBal` object to the `summary` function. While not done in this case, the summary table can be saved by passing the desired name of a `*.csv` file to the `export` argument.
```{r bayessummary}
summary(component.samples, export = NA)
```
The main effect of a variable independent of the process can be calculated by supplying a function, `fn` that takes the arguments of mass balanced flow rates `ybal`, and the random independent and uniformly distributed variables `x`. Information can be gained on the main effect of a particular element of `x`, `xj`, on `fn` using the `mainEff` function. Output from `mainEff` includes information on the distribution of $E_x\lbrack f(x,y_{\mathrm{bal}})|x_j \rbrack$.
```{r maineff}
fn_example <- function(X,ybal){
cu.frac <- 63.546/183.5
feed.mass <- ybal$CuFeS2[1] + ybal$gangue[1]
# Concentrate mass per ton feed
con.mass <- (ybal$CuFeS2[3] + ybal$gangue[3])/feed.mass
# Copper mass per ton feed
cu.mass <- (ybal$CuFeS2[3]*cu.frac)/feed.mass
gam <- c(-1,-1/feed.mass,cu.mass,-con.mass,-cu.mass,-con.mass)
f <- X %*% gam
return(f)
}
rangex <- matrix(c(4.00 ,6.25,1125,1875,3880,9080,20,60,96,208,20.0,62.5),
ncol = 6, nrow = 2)
mE_example <- mainEff(indep.samples, fn = "fn_example",rangex = rangex,xj = 3, N = 25, res = 25)
```
A plot of the output can be made. To get lines that are better connected, change increase `N` in the `mainEff` function.
```{r maineffplot}
m.sens<- mE_example$fn.out[2,]
hpd.sens <- mE_example$fn.out[c(1,3),]
row.names(hpd.sens) <- c("upper", "lower")
g.plot <- mE_example$g/2000
y.lim <- range(hpd.sens)
lzero.bound <- apply(hpd.sens,1,function(X){which(X <= 0)})
lzero.mean <- which(m.sens <= 0)
main.grid <- pretty(g.plot)
minor.grid <- pretty(g.plot,25)
minor.grid <- minor.grid[-which(minor.grid %in% main.grid)]
y.main <- pretty(hpd.sens)
opar <- par(no.readonly =TRUE)
par(mar = c(4.2,4,1,1))
plot(g.plot,m.sens, type = "n", xlim = range(g.plot), ylim = y.lim, ylab = "Net Revenue ($/ton Feed)", xlab= "Cu Price ($/lb)")
abline(v = main.grid, lty = 6, col = "grey", lwd = 1)
abline(v = minor.grid, lty =3, col = "grey", lwd = 0.75)
abline(h = 0, col = "red", lwd = 1, lty = 6)
lines(g.plot[lzero.mean],m.sens[lzero.mean],col = "red", lwd =2)
lines(g.plot[-lzero.mean[-length(lzero.mean)]],m.sens[-lzero.mean[-length(lzero.mean)]],col = "darkgreen", lwd =2)
lines(g.plot[lzero.bound$lower],hpd.sens[2,][lzero.bound$lower], lty = 5, lwd = 2, col = "red")
lines(g.plot[-lzero.bound$lower],hpd.sens[2,][-lzero.bound$lower], lty = 5, lwd = 2, col = "darkgreen")
lines(g.plot[lzero.bound$upper],hpd.sens[1,][lzero.bound$upper], lty = 5, lwd = 2, col = "red")
lines(g.plot[-lzero.bound$upper],hpd.sens[1,][-lzero.bound$upper], lty = 5, lwd = 2, col= "darkgreen")
legend("topleft", legend = c("Expected Main Effect", "95% Bounds", "Net Revenue < $0", "Net Revenue > $0"), col = c("black","black","red", "darkgreen"), lty = c(1,6,1,1), lwd = c(2,2,2,2), bg = "white")
par(opar)
```
|
/scratch/gouwar.j/cran-all/cranData/BayesMassBal/inst/doc/Two_Node_Process.Rmd
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(BayesMassBal)
set.seed(5)
## ----datagen, echo = FALSE----------------------------------------------------
y <- rep(NA, times = 41)
y[1] <- 10
mu <- 20
alpha <- 0.8
sig <- 10
for(i in 2:length(y)){
y[i] <- mu + alpha * y[i-1] + rnorm(1)*sig
}
plot(0:(length(y)-1),y,main= "Observations", ylab = "Mass Flow Rate of an Element", xlab ="Time", pch = 19)
## ----fakedatagen, eval = FALSE------------------------------------------------
# y <- rep(NA, times = 41)
# y[1] <- 10
# mu <- 20
# alpha <- 0.8
# sig <- 10
#
# for(i in 2:length(y)){
# y[i] <- mu + alpha * y[i-1] + rnorm(1)*sig
# }
## ----ssEstunconst-------------------------------------------------------------
fit1 <- ssEst(y)
## ----plot1--------------------------------------------------------------------
plot(fit1)
## ----ssConst------------------------------------------------------------------
fit2 <- ssEst(y, stationary = TRUE)
plot(fit2)
|
/scratch/gouwar.j/cran-all/cranData/BayesMassBal/inst/doc/ssEst.R
|
---
title: "ssEst"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{ssEst}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(BayesMassBal)
set.seed(5)
```
```{r datagen, echo = FALSE}
y <- rep(NA, times = 41)
y[1] <- 10
mu <- 20
alpha <- 0.8
sig <- 10
for(i in 2:length(y)){
y[i] <- mu + alpha * y[i-1] + rnorm(1)*sig
}
plot(0:(length(y)-1),y,main= "Observations", ylab = "Mass Flow Rate of an Element", xlab ="Time", pch = 19)
```
When looking over mass flow rate data from the individual stream of a process, such as the simulated data above, it is often unclear when the process achieved steady state as well as what the steady state mass flow rate is. Multiple engineers, each using different tools and reasoning, looking at this data will have different determinations. Inconsistencies in estimation can result in misleading conclusions when making process performance. Thus, there is a need to for a method consistently categorizing such data.
The `ssEst` function fills this gap, as well as provides quantitative information on if the stream is at steady state in the first place. The below auto-regressive model, with a lag of 1, is fit to the data.
$$
y_t = \mu + \alpha y_{t-1} + \epsilon
$$
Where $\epsilon \sim \mathcal{N}(0,\sigma^2)$. Using Bayesian inference, `ssEst` function generates samples from the distribution of the model parameters $\mu$, $\alpha$, and $\sigma^2$. If $|\alpha| < 1$ the model is considered stationary or mean reverting. The behavior is predictable. It is possible to calculate $\mathbb{E}\lbrack y\rbrack$ by the following equation:
$$
\mathbb{E}\lbrack y \rbrack = \frac{\mu}{1-\phi}
$$
Getting started, the data set above was generated with the following code:
```{r fakedatagen, eval = FALSE}
y <- rep(NA, times = 41)
y[1] <- 10
mu <- 20
alpha <- 0.8
sig <- 10
for(i in 2:length(y)){
y[i] <- mu + alpha * y[i-1] + rnorm(1)*sig
}
```
Note, $\alpha = 0.8$ implies the process is stationary. Then the `ssEst` function is used on these observed values.
```{r ssEstunconst}
fit1 <- ssEst(y)
```
Since `fit1` is a `"BayesMassBal"` object, it can easily be passed to `plot()` so the output can be quickly inspected.
```{r plot1}
plot(fit1)
```
One can see the density obtained from the samples of $\alpha$. It is up to the engineer to decide if there is enough evidence to deem the trend stationary. If all of the samples of $\alpha$ obtained are in the interval (-1,1), then the distribution for the estimation of steady state will also be calculated and displayed. In this case, only a small fraction of the samples of $\alpha$ are outside of the valid range. It is likely the chain is stationary.
To inspect $\mathbb{E}\lbrack y \rbrack$ `ssEst` is rerun with the argument setting `stationary = TRUE`. This setting specifies a prior distribution on $\alpha$ which forces all samples to be within (-1,1) so that the steady state mass flow rate can be calculated. Refitting and re-plotting gives:
```{r ssConst}
fit2 <- ssEst(y, stationary = TRUE)
plot(fit2)
```
The point $\mathbb{E}\lbrack y \rbrack$ can then be used in a classical point mass balance, or some subset of the data can be used with the `BMB()` function. One way to choose the time steady state of the system has been achieved is using all data points after which the line for the Expected Steady State has been crossed. Another method would be to use a subset of samples of $\mathbb{E} \lbrack y | \mu, \alpha \rbrack$ with the `BMB` function.
|
/scratch/gouwar.j/cran-all/cranData/BayesMassBal/inst/doc/ssEst.Rmd
|
---
title: "Two_Node_Process"
author: "Scott Koermer"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Two_Node_Process}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(BayesMassBal)
```
The function `BMB` is used with a two node process and simulated data.
```{r multiprocess, echo = FALSE, fig.height= 1.5, fig.width = 3}
yshift <- 1
rekt <- data.frame(matrix(NA, ncol = 4, nrow = 2))
names(rekt) <- c("xleft", "ybottom", "xright", "ytop")
rekt$xleft <- c(3,7)
rekt$ybottom <- 4
rekt$xright <- c(5,9)
rekt$ytop <- 6
aros <- data.frame(matrix(NA, ncol = 4, nrow = 5))
names(aros) <-c("x0","y0","x1","y1")
aros[1,] <- c(1,5,rekt$xleft[1],5)
aros[2,] <- c(rekt$xright[1],5,rekt$xleft[2],5)
aros[3,] <- c(rekt$xright[2],5,11,5)
aros[4,] <- c(rekt$xright[1] - 1, rekt$ybottom[1],rekt$xright[1] - 1, rekt$ybottom[1] - 2)
aros[5,] <- c(rekt$xright[2] - 1, rekt$ybottom[2], rekt$xright[2] - 1 , rekt$ybottom[2] - 2)
aros$y0 <- aros$y0
aros$y1 <- aros$y1
b.loc <- data.frame(matrix(NA, ncol = 2, nrow = 5))
names(b.loc) <- c("x","y")
b.loc[1,] <- c(0.5,aros$y0[1])
b.loc[2,] <- c(mean(c(aros$x0[2],aros$x1[2])),aros$y0[2] + 0.6)
b.loc[3,] <- c(aros$x1[3] + 0.5, aros$y1[3])
b.loc[4,] <- c(aros$x1[4], aros$y1[4] - 0.6)
b.loc[5,] <- c(aros$x1[5], aros$y1[5] - 0.6)
p.loc <- data.frame(matrix(NA, ncol = 2, nrow = 2))
names(p.loc) <- c("x","y")
p.loc$x <- rekt$xleft + 1
p.loc$y <- rekt$ybottom + 1
par(mar = c(0.1,0.1,0.1,0.1))
plot(1, type="n", xlab="", ylab="", xlim=c(0, 12), ylim=c(1, 6), axes = FALSE)
rect(xleft = rekt$xleft, ybottom = rekt$ybottom, xright = rekt$xright, ytop =rekt$ytop, col = "skyblue")
arrows(aros$x0,aros$y0, x1= aros$x1,y1 = aros$y1, code = 2, length = 0.1)
for(i in 1:5){
text(b.loc$x[i], b.loc$y[i],labels = bquote(y[.(i)]), adj = c(0.5,0.5), cex = 1.2)
}
for(i in 1:2){
text(p.loc$x[i], p.loc$y[i], labels = bquote(P[.(i)]), adj = c(0.5,0.5), cex = 1.2)
}
text(1.5,4.6,labels= "F", adj = c(0.5,0.5), cex = 0.7)
text(c(5.5,9.5),c(4.6,4.6), labels = "C", adj= c(0.5,0.5), cex = 0.7)
text(c(4.4,8.4),c(3.5,3.5), labels = "T", adj= c(0.5,0.5), cex = 0.7)
```
The constraints around these process nodes are:
\begin{align}
y_1 &= y_2 +y_4\\
y_2 &= y_3 +y_5
\end{align}
Therefore the matrix of constraints, `C` is:
```{r cdef}
C <- matrix(c(1,-1,0,-1,0,0,1,-1,0,-1), nrow = 2, ncol = 5, byrow = TRUE)
C
```
The `constrainProcess` function in the `BayesMassBal` package is used to generate an `X` matrix based on `C` that will later be used with the `BMB` function.
```{r Xdef}
X <- constrainProcess(C = C)
X
```
Constraints can also be imported from a `.csv` file. The path to a file, included in the `BayesMassBal` package, for this process can be found and constraints can be imported by specifying the location for the `file` argument for `constrainProcess` as shown below:
```{r Xdefcsv}
constraint_file_location <- system.file("extdata", "twonode_constraints.csv",package = "BayesMassBal")
X <- constrainProcess(file = constraint_file_location)
```
The previously simulated data is loaded from a `.csv` file using the `importObservations()` function. The local location of the the file imported below can be found by typing `system.file("extdata", "twonode_example.csv",package = "BayesMassBal")`. View the document in Excel to see how your data should be formatted for import. ***Note:*** it is not required that the entries into the `*.csv` file are separated by `";"`.
```{r datasim}
y <- importObservations(file = system.file("extdata", "twonode_example.csv",
package = "BayesMassBal"),
header = TRUE, csv.params = list(sep = ";"))
```
Then, the `BMB` function is used to generate the distribution of constrained masses from the data with `cov.structure = "indep"`.
```{r indepsamp}
indep.samples <- BMB(X = X, y = y, cov.structure = "indep", BTE = c(100,3000,1), lml = TRUE, verb = 0)
```
The output of `BMB` is a `BayesMassBal` object. Special instructions are designated when feeding a `BayesMassBal` object to the `plot()` function. Adding the argument `layout = "dens"` and indicating the mass balanced flow rate for CuFeS2 at $y_3$ should be plotted using a list supplied to `sample.params`, the desired distribution can be plotted with its 95% [Highest Posterior Density Interval](https://en.wikipedia.org/wiki/Credible_interval).
```{r feedplot}
plot(indep.samples,sample.params = list(ybal = list(CuFeS2 = 3)),
layout = "dens",hdi.params = c(1,0.95))
```
It is also possible to generate trace plots to inspect convergence of the Gibbs sampler. Here are trace plots for $\beta$
```{r traceplot}
plot(indep.samples,sample.params = list(beta = list(CuFeS2 = 1:3, gangue = 1:3)),layout = "trace",hdi.params = c(1,0.95))
```
A quantitative diagnostics for convergence and autocorrelation are available as part of the output from `BMB`:
```{r diagnostics}
indep.samples$diagnostics
```
The model with independent variances may not be the best fitting model. Models specifying covariance between sample locations for a single component, and covariance between components at a single location are fit.
```{r compdraw}
component.samples <- BMB(X = X, y = y, cov.structure = "component", BTE = c(100,3000,1), lml = TRUE, verb = 0)
```
```{r locdraw}
location.samples <- BMB(X = X, y = y, cov.structure = "location", BTE = c(100,3000,1), lml = TRUE, verb = 0)
```
Computing $\log(\mathrm{Bayes Factor})$ for $BF = p(y|\texttt{indep})/p(y|\texttt{component})$:
```{r compvsindep}
indep.samples$lml - component.samples$lml
```
Then comparing $p(y|\texttt{component})$ to $p(y|\texttt{location})$
```{r compvsloc}
component.samples$lml - location.samples$lml
```
Shows there is little difference between the models where `cov.structure = "location"` and `cov.structure = "component"`, but both of these models better explain the data than `cov.structure = "indep"`.
We can view a summary of the favored model by passing a `BayesMassBal` object to the `summary` function. While not done in this case, the summary table can be saved by passing the desired name of a `*.csv` file to the `export` argument.
```{r bayessummary}
summary(component.samples, export = NA)
```
The main effect of a variable independent of the process can be calculated by supplying a function, `fn` that takes the arguments of mass balanced flow rates `ybal`, and the random independent and uniformly distributed variables `x`. Information can be gained on the main effect of a particular element of `x`, `xj`, on `fn` using the `mainEff` function. Output from `mainEff` includes information on the distribution of $E_x\lbrack f(x,y_{\mathrm{bal}})|x_j \rbrack$.
```{r maineff}
fn_example <- function(X,ybal){
cu.frac <- 63.546/183.5
feed.mass <- ybal$CuFeS2[1] + ybal$gangue[1]
# Concentrate mass per ton feed
con.mass <- (ybal$CuFeS2[3] + ybal$gangue[3])/feed.mass
# Copper mass per ton feed
cu.mass <- (ybal$CuFeS2[3]*cu.frac)/feed.mass
gam <- c(-1,-1/feed.mass,cu.mass,-con.mass,-cu.mass,-con.mass)
f <- X %*% gam
return(f)
}
rangex <- matrix(c(4.00 ,6.25,1125,1875,3880,9080,20,60,96,208,20.0,62.5),
ncol = 6, nrow = 2)
mE_example <- mainEff(indep.samples, fn = "fn_example",rangex = rangex,xj = 3, N = 25, res = 25)
```
A plot of the output can be made. To get lines that are better connected, change increase `N` in the `mainEff` function.
```{r maineffplot}
m.sens<- mE_example$fn.out[2,]
hpd.sens <- mE_example$fn.out[c(1,3),]
row.names(hpd.sens) <- c("upper", "lower")
g.plot <- mE_example$g/2000
y.lim <- range(hpd.sens)
lzero.bound <- apply(hpd.sens,1,function(X){which(X <= 0)})
lzero.mean <- which(m.sens <= 0)
main.grid <- pretty(g.plot)
minor.grid <- pretty(g.plot,25)
minor.grid <- minor.grid[-which(minor.grid %in% main.grid)]
y.main <- pretty(hpd.sens)
opar <- par(no.readonly =TRUE)
par(mar = c(4.2,4,1,1))
plot(g.plot,m.sens, type = "n", xlim = range(g.plot), ylim = y.lim, ylab = "Net Revenue ($/ton Feed)", xlab= "Cu Price ($/lb)")
abline(v = main.grid, lty = 6, col = "grey", lwd = 1)
abline(v = minor.grid, lty =3, col = "grey", lwd = 0.75)
abline(h = 0, col = "red", lwd = 1, lty = 6)
lines(g.plot[lzero.mean],m.sens[lzero.mean],col = "red", lwd =2)
lines(g.plot[-lzero.mean[-length(lzero.mean)]],m.sens[-lzero.mean[-length(lzero.mean)]],col = "darkgreen", lwd =2)
lines(g.plot[lzero.bound$lower],hpd.sens[2,][lzero.bound$lower], lty = 5, lwd = 2, col = "red")
lines(g.plot[-lzero.bound$lower],hpd.sens[2,][-lzero.bound$lower], lty = 5, lwd = 2, col = "darkgreen")
lines(g.plot[lzero.bound$upper],hpd.sens[1,][lzero.bound$upper], lty = 5, lwd = 2, col = "red")
lines(g.plot[-lzero.bound$upper],hpd.sens[1,][-lzero.bound$upper], lty = 5, lwd = 2, col= "darkgreen")
legend("topleft", legend = c("Expected Main Effect", "95% Bounds", "Net Revenue < $0", "Net Revenue > $0"), col = c("black","black","red", "darkgreen"), lty = c(1,6,1,1), lwd = c(2,2,2,2), bg = "white")
par(opar)
```
|
/scratch/gouwar.j/cran-all/cranData/BayesMassBal/vignettes/Two_Node_Process.Rmd
|
---
title: "ssEst"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{ssEst}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(BayesMassBal)
set.seed(5)
```
```{r datagen, echo = FALSE}
y <- rep(NA, times = 41)
y[1] <- 10
mu <- 20
alpha <- 0.8
sig <- 10
for(i in 2:length(y)){
y[i] <- mu + alpha * y[i-1] + rnorm(1)*sig
}
plot(0:(length(y)-1),y,main= "Observations", ylab = "Mass Flow Rate of an Element", xlab ="Time", pch = 19)
```
When looking over mass flow rate data from the individual stream of a process, such as the simulated data above, it is often unclear when the process achieved steady state as well as what the steady state mass flow rate is. Multiple engineers, each using different tools and reasoning, looking at this data will have different determinations. Inconsistencies in estimation can result in misleading conclusions when making process performance. Thus, there is a need to for a method consistently categorizing such data.
The `ssEst` function fills this gap, as well as provides quantitative information on if the stream is at steady state in the first place. The below auto-regressive model, with a lag of 1, is fit to the data.
$$
y_t = \mu + \alpha y_{t-1} + \epsilon
$$
Where $\epsilon \sim \mathcal{N}(0,\sigma^2)$. Using Bayesian inference, `ssEst` function generates samples from the distribution of the model parameters $\mu$, $\alpha$, and $\sigma^2$. If $|\alpha| < 1$ the model is considered stationary or mean reverting. The behavior is predictable. It is possible to calculate $\mathbb{E}\lbrack y\rbrack$ by the following equation:
$$
\mathbb{E}\lbrack y \rbrack = \frac{\mu}{1-\phi}
$$
Getting started, the data set above was generated with the following code:
```{r fakedatagen, eval = FALSE}
y <- rep(NA, times = 41)
y[1] <- 10
mu <- 20
alpha <- 0.8
sig <- 10
for(i in 2:length(y)){
y[i] <- mu + alpha * y[i-1] + rnorm(1)*sig
}
```
Note, $\alpha = 0.8$ implies the process is stationary. Then the `ssEst` function is used on these observed values.
```{r ssEstunconst}
fit1 <- ssEst(y)
```
Since `fit1` is a `"BayesMassBal"` object, it can easily be passed to `plot()` so the output can be quickly inspected.
```{r plot1}
plot(fit1)
```
One can see the density obtained from the samples of $\alpha$. It is up to the engineer to decide if there is enough evidence to deem the trend stationary. If all of the samples of $\alpha$ obtained are in the interval (-1,1), then the distribution for the estimation of steady state will also be calculated and displayed. In this case, only a small fraction of the samples of $\alpha$ are outside of the valid range. It is likely the chain is stationary.
To inspect $\mathbb{E}\lbrack y \rbrack$ `ssEst` is rerun with the argument setting `stationary = TRUE`. This setting specifies a prior distribution on $\alpha$ which forces all samples to be within (-1,1) so that the steady state mass flow rate can be calculated. Refitting and re-plotting gives:
```{r ssConst}
fit2 <- ssEst(y, stationary = TRUE)
plot(fit2)
```
The point $\mathbb{E}\lbrack y \rbrack$ can then be used in a classical point mass balance, or some subset of the data can be used with the `BMB()` function. One way to choose the time steady state of the system has been achieved is using all data points after which the line for the Expected Steady State has been crossed. Another method would be to use a subset of samples of $\mathbb{E} \lbrack y | \mu, \alpha \rbrack$ with the `BMB` function.
|
/scratch/gouwar.j/cran-all/cranData/BayesMassBal/vignettes/ssEst.Rmd
|
logLik.bayesmixsurv <- function(object, ...) {
ret <- max(object$smp$loglike)
# TODO: incomplete, how to find df in the presence of lambda? can we assume lambda only has an additive contribution
# and therefore it can be ignored when comparing models of same lambda?
class(ret) <- "logLik"
return (ret)
}
# estimate
bayesmixsurv <- function(formula1, data, formula2=formula1, stratCol=NULL, weights, subset
, na.action=na.fail, control=bayesmixsurv.control(), print.level=2) {
# TODO: implement weights, subset, na.action
mycall <- match.call()
if (!missing(weights)) warning("weights argument not supported yet, this argument will be ignored")
if (!missing(subset)) warning("subset argument not supported yet, this argument will be ignored")
if (!identical(na.action,na.fail)) stop("na.action argument not supported yet; only na.fail is currently accepted")
# TODO: need to make sure number of rows in X and X2 ends up being equal (watch out for removal of missing rows)
mf1 <- model.frame(formula1, data, drop.unused.levels=TRUE, na.action = na.fail) # incorporate na.action argument
mt1 <- attr(mf1, "terms")
X1 <- model.matrix(mt1, mf1)
y <- model.response(mf1, "numeric")
colnamesX1 <- colnames(X1)
if (colnamesX1[1]!="(Intercept)") stop("intercept term must be included in formula1")
mf2 <- model.frame(formula2, data, drop.unused.levels=TRUE, na.action = na.fail) # incorporate na.action argument
mt2 <- attr(mf2, "terms")
X2 <- model.matrix(mt2, mf2)
colnamesX2 <- colnames(X2)
if (colnamesX2[1]!="(Intercept)") stop("intercept term must be included in formula2")
if (is.list(control$scalex)) {
X1 <- bayesmixsurv.scale(X1, apply.sc=control$scalex$apply.scale.X1, center=control$scalex$centerVec.X1, scale=control$scalex$scaleVec.X1)
X2 <- bayesmixsurv.scale(X2, apply.sc=control$scalex$apply.scale.X2, center=control$scalex$centerVec.X2, scale=control$scalex$scaleVec.X2)
control$scalex <- TRUE
} else if (control$scalex) {
X1 <- bayesmixsurv.scale(X1)
X2 <- bayesmixsurv.scale(X2)
}
apply.scale.X1 <- attr(X1, "apply.scale")
apply.scale.X2 <- attr(X2, "apply.scale")
centerVec.X1 <- attr(X1, "centerVec")
centerVec.X2 <- attr(X2, "centerVec")
scaleVec.X1 <- attr(X1, "scaleVec")
scaleVec.X2 <- attr(X2, "scaleVec")
if (!is.null(stratCol)) {
if ((stratCol %in% all.vars(formula1)) || (stratCol %in% all.vars(formula2))) stop("stratification column cannot be part of either formulas")
data$stratColFactor <- as.factor(data[,stratCol]) # TODO: check that stratColFactor can be used as a name
gformula <- ~ stratColFactor
mfg <- model.frame(gformula, data, drop.unused.levels=TRUE)
mtg <- attr(mfg, "terms")
Xg <- model.matrix(mtg, mfg) # TODO: make sure intercept is first column
stratContrasts <- attr(Xg, "contrasts")
Xg <- Xg[,2:ncol(Xg),drop=F]
stratXlevels <- .getXlevels(mtg, mfg)
stratTerms <- mtg
colnamesXg <- colnames(Xg)
#Xg.debug <<- Xg
} else {
Xg <- NULL
stratContrasts <- NULL
stratXlevels <- NULL
stratTerms <- NULL
colnamesXg <- NULL
}
ret <- list(call=mycall, formula1=formula1, formula2=formula2, weights=rep(1,nrow(X1)), subset=1:nrow(X1)
, na.action=na.action, control=control, X1=X1, X2=X2, y=y
, contrasts1=attr(X1, "contrasts"), contrasts2=attr(X2, "contrasts")
, xlevels1=.getXlevels(mt1, mf1), xlevels2=.getXlevels(mt2, mf2)
, terms1=mt1, terms2=mt2
, colnamesX1=colnamesX1, colnamesX2=colnamesX2
, apply.scale.X1=apply.scale.X1, apply.scale.X2=apply.scale.X2
, centerVec.X1=centerVec.X1, centerVec.X2=centerVec.X2
, scaleVec.X1=scaleVec.X1, scaleVec.X2=scaleVec.X2
, Xg=Xg, stratContrasts=stratContrasts, stratXlevels=stratXlevels, stratTerms=stratTerms, colnamesXg=colnamesXg
)
mcmc <- bayesmixsurv.mcmc(X1, X2, y[,1], y[,2], Xg, control$lambda1, control$lambda2, control$iter, control$single
, control$alpha2.fixed, control$alpha.boundary, control$sd.thresh, print.level, control$nskip)
sel <- (control$burnin+1):control$iter
median <- list(alpha1=median(mcmc$alpha1[sel]), alpha2=median(mcmc$alpha2[sel]),
beta1=apply(mcmc$beta1[sel,,drop=F], 2, median), beta2=apply(mcmc$beta2[sel,,drop=F], 2, median)
, gamma=apply(mcmc$gamma[sel,,drop=F], 2, median), sigma.gamma=median(mcmc$sigma.gamma[sel]))
km.fit <- survfit(bayesmixsurv.strip.formula(formula1), data)
ret <- c(ret, list(idx1=mcmc$idx1, idx2=mcmc$idx2, median=median, max=list(loglike=max(mcmc$loglike))
, smp=list(alpha1=mcmc$alpha1, alpha2=mcmc$alpha2, beta1=mcmc$beta1, beta2=mcmc$beta2
, loglike=mcmc$loglike, gamma=mcmc$gamma, sigma.gamma=mcmc$sigma.gamma)
, km.fit=km.fit, tmax=max(y[,1])))
class(ret) <- "bayesmixsurv"
return (ret)
}
# print
print.bayesmixsurv <- function(x, ...) {
cat("Call:\n")
print(x$call)
cat("component-1 formula:\n")
print(x$formula1)
if (!x$control$single) {
cat("component 2 formula:\n")
print(x$formula2)
} else {
cat("(no component-2 formula in single-component mode)\n")
}
cat("component-1 shrinkage:", x$control$lambda1, "\n")
if (!x$control$single) {
cat("component-2 shrinkage:", x$control$lambda2, "\n")
} else {
cat("(no component-2 shrinkage in single-component mode)\n")
}
cat("MCMC iterations:", x$control$iter, "\n")
cat("burn-in (median calc):", x$control$burnin, "\n")
cat("Threshold on stdev of covariates:", x$control$sd.thresh, "\n")
cat("Model matrix is scaled:", x$control$scalex, "\n")
cat("component-1 coefficients:\n")
beta1 <- x$median$beta1; names(beta1) <- x$colnamesX1
print(beta1)
if (!x$control$single) {
cat("component-2 coefficients:\n")
beta2 <- x$median$beta2; names(beta2) <- x$colnamesX2
print(beta2)
} else {
cat("(no component-2 coefficients in single-component mode)\n")
}
cat("component-1 shape parameter:", x$median$alpha1, "\n")
if (!x$control$single) {
cat("component-2 shape parameter:", x$median$alpha2, "\n")
} else {
cat("(no component-2 shape parameter in single-component mode)\n")
}
cat("number of observations:", nrow(x$X1), "\n")
}
# plot
plot.bayesmixsurv <- function(x, pval=0.05, burnin=round(x$control$iter/2), nrow=2, ncol=3, ...) {
iter <- x$control$iter
sel <- (burnin+1):x$control$iter
nsel <- length(sel)
CI_prob <- c(pval/2, 0.5, 1-pval/2)
nplot_per_page <- nrow*ncol
# determine number of beta coefficients
nbeta1 <- ncol(x$smp$beta1)
nbeta2 <- ncol(x$smp$beta2)
npage_beta1 <- ceiling(nbeta1/nplot_per_page)
npage_beta2 <- ceiling(nbeta2/nplot_per_page)
## loglike and logpost
par(mfrow=c(2,1))
plot(x$smp$loglike, type="l", xlab="Iteration", ylab="Log-likelihood", main="Log-likelihood")
plot(x$smp$loglike[sel], type="l", xlab="Iteration", ylab="Log-likelihood", main="Log-likelihood, Post-Burnin")
## traceplots
# scale coefficients
# beta1
beta1_q <- apply(x$smp$beta1[sel,,drop=F], 2, quantile, probs=CI_prob)
beta1_lower <- beta1_q[1,]
beta1_median <- beta1_q[2,]
beta1_upper <- beta1_q[3,]
for (n in 1:npage_beta1) {
par(mfrow=c(nrow,ncol))
offset <- (n-1)*nplot_per_page
for (i in 1:nplot_per_page) {
if (offset+i<=nbeta1) {
beta1_ylim <- range(x$smp$beta1[,offset+i], 0.0)
plot(x$smp$beta1[,offset+i], type="l", xlab="Iteration", ylab="Sample Value", ylim=beta1_ylim
, main = paste("beta1[", x$colnamesX1[offset+i], "]", sep=""))
abline(h = 0)
lines(sel, rep(beta1_lower[offset+i], nsel), lty=2, col="red")
lines(sel, rep(beta1_median[offset+i], nsel), lty=2, col="red")
lines(sel, rep(beta1_upper[offset+i], nsel), lty=2, col="red")
}
}
}
# beta2
if (!x$control$single) {
beta2_q <- apply(x$smp$beta2[sel,,drop=F], 2, quantile, probs=CI_prob)
beta2_lower <- beta2_q[1,]
beta2_median <- beta2_q[2,]
beta2_upper <- beta2_q[3,]
for (n in 1:npage_beta2) {
par(mfrow=c(nrow,ncol))
offset <- (n-1)*nplot_per_page
for (i in 1:nplot_per_page) {
if (offset+i<=nbeta2) {
beta2_ylim <- range(x$smp$beta2[,offset+i], 0.0)
plot(x$smp$beta2[,offset+i], type="l", xlab="Iteration", ylab="Sample Value", ylim=beta2_ylim
, main = paste("beta2[", x$colnamesX2[offset+i], "]", sep=""))
abline(h = 0)
lines(sel, rep(beta2_lower[offset+i], nsel), lty=2, col="red")
lines(sel, rep(beta2_median[offset+i], nsel), lty=2, col="red")
lines(sel, rep(beta2_upper[offset+i], nsel), lty=2, col="red")
}
}
}
}
# shape parameters
par(mfrow=c(1,2))
# alpha1
alpha1_q <- quantile(x$smp$alpha1[sel], probs=CI_prob)
alpha1_lower <- alpha1_q[1]
alpha1_median <- alpha1_q[2]
alpha1_upper <- alpha1_q[3]
alpha1_ylim <- range(x$smp$alpha1)
plot(x$smp$alpha1, type="l", xlab="Iteration", ylab="Sample Value", ylim=alpha1_ylim
, main = paste("alpha1", sep=""))
abline(h = 0)
abline(h = alpha1_lower, lty=2, col="red")
abline(h = alpha1_median, lty=2, col="red")
abline(h = alpha1_upper, lty=2, col="red")
if (!x$control$single) {
# alpha2
alpha2_q <- quantile(x$smp$alpha2[sel], probs=CI_prob)
alpha2_lower <- alpha2_q[1]
alpha2_median <- alpha2_q[2]
alpha2_upper <- alpha2_q[3]
alpha2_ylim <- range(x$smp$alpha2)
plot(x$smp$alpha2, type="l", xlab="Iteration", ylab="Sample Value", ylim=alpha2_ylim
, main = paste("alpha2", sep=""))
abline(h = 0)
abline(h = alpha2_lower, lty=2, col="red")
abline(h = alpha2_median, lty=2, col="red")
abline(h = alpha2_upper, lty=2, col="red")
}
## autocorrelation plots
# beta1
for (n in 1:npage_beta1) {
par(mfrow=c(nrow,ncol))
offset <- (n-1)*nplot_per_page
for (i in 1:nplot_per_page) {
if (offset+i<=nbeta1) {
if ((offset+i) %in% x$idx1) {
acf(x$smp$beta1[sel,offset+i], main=paste("beta1[", x$colnamesX1[offset+i], "]", sep=""))
} else {
bayesmixsurv.empty.plot(main=paste("beta1[", x$colnamesX1[offset+i], "]", sep=""))
}
}
}
}
if (!x$control$single) {
# beta2
for (n in 1:npage_beta2) {
par(mfrow=c(nrow,ncol))
offset <- (n-1)*nplot_per_page
for (i in 1:nplot_per_page) {
if (offset+i<=nbeta2) {
if ((offset+i) %in% x$idx2) {
acf(x$smp$beta2[sel,offset+i], main=paste("beta2[", x$colnamesX2[offset+i], "]", sep=""))
} else {
bayesmixsurv.empty.plot(main=paste("beta2[", x$colnamesX2[offset+i], "]", sep=""))
}
}
}
}
}
## histograms
# beta1
for (n in 1:npage_beta1) {
par(mfrow=c(nrow,ncol))
offset <- (n-1)*nplot_per_page
for (i in 1:nplot_per_page) {
if (offset+i<=nbeta1) {
if ((offset+i) %in% x$idx1) {
hist(x$smp$beta1[sel,offset+i], xlab="Sample Value"
, main=paste("beta1[", x$colnamesX1[offset+i], "]", sep=""))
abline(v = 0)
abline(v = beta1_median[offset+i], lty=2, col="red")
abline(v = beta1_lower[offset+i], lty=3, col="red")
abline(v = beta1_upper[offset+i], lty=3, col="red")
} else {
bayesmixsurv.empty.plot(main=paste("beta1[", x$colnamesX1[offset+i], "]", sep=""))
}
}
}
}
if (!x$control$single) {
# beta2
for (n in 1:npage_beta2) {
par(mfrow=c(nrow,ncol))
offset <- (n-1)*nplot_per_page
for (i in 1:nplot_per_page) {
if (offset+i<=nbeta2) {
if ((offset+i) %in% x$idx2) {
hist(x$smp$beta2[sel,offset+i], xlab="Sample Value"
, main=paste("beta2[", x$colnamesX2[offset+i], "]", sep=""))
abline(v = 0)
abline(v = beta2_median[offset+i], lty=2, col="red")
abline(v = beta2_lower[offset+i], lty=3, col="red")
abline(v = beta2_upper[offset+i], lty=3, col="red")
} else {
bayesmixsurv.empty.plot(main=paste("beta2[", x$colnamesX2[offset+i], "]", sep=""))
}
}
}
}
}
}
#summary
summary.bayesmixsurv <- function(object, pval=0.05, burnin=object$control$burnin, ...) {
iter <- object$control$iter
CI_prob <- c(pval/2, 0.5, 1-pval/2)
sel <- (burnin+1):iter
# alpha1, alpha2
alpha1_q <- quantile(object$smp$alpha1[sel], prob=CI_prob)
alpha1_lb <- alpha1_q[1]; alpha1_med <- alpha1_q[2]; alpha1_ub <- alpha1_q[3]
alpha2_q <- quantile(object$smp$alpha2[sel], prob=CI_prob)
alpha2_lb <- alpha2_q[1]; alpha2_med <- alpha2_q[2]; alpha2_ub <- alpha2_q[3]
alpha1_pval <- bayesmixsurv.calc.pval(object$smp$alpha1[sel], ref=1.0)
alpha2_pval <- bayesmixsurv.calc.pval(object$smp$alpha2[sel], ref=1.0)
coefficients_alpha <- matrix(c(alpha1_med, alpha1_lb, alpha1_ub, alpha1_pval, alpha2_med, alpha2_lb, alpha2_ub, alpha2_pval)
, ncol=4, byrow = TRUE)
dimnames(coefficients_alpha) <- list(c("alpha1","alpha2"), c("Estimate", "Lower Bound", "Upper Bound", "P-val"))
# beta1
beta1_q <- apply(object$smp$beta1[sel,,drop=F], 2, quantile, probs=CI_prob)
beta1_lb <- beta1_q[1,]
beta1_med <- beta1_q[2,]
beta1_ub <- beta1_q[3,]
beta1_pval <- apply(object$smp$beta1[sel,,drop=F], 2, bayesmixsurv.calc.pval, ref=0.0)
coefficients_beta1 <- as.matrix(cbind(beta1_med, beta1_lb, beta1_ub, beta1_pval))
dimnames(coefficients_beta1) <- list(object$colnamesX1, c("Estimate", "Lower Bound", "Upper Bound", "P-val"))
# beta2
beta2_q <- apply(object$smp$beta2[sel,,drop=F], 2, quantile, probs=CI_prob)
beta2_lb <- beta2_q[1,]
beta2_med <- beta2_q[2,]
beta2_ub <- beta2_q[3,]
beta2_pval <- apply(object$smp$beta2[sel,,drop=F], 2, bayesmixsurv.calc.pval, ref=0.0)
coefficients_beta2 <- as.matrix(cbind(beta2_med, beta2_lb, beta2_ub, beta2_pval))
dimnames(coefficients_beta2) <- list(object$colnamesX2, c("Estimate", "Lower Bound", "Upper Bound", "P-val"))
# gamma
if (!is.null(object$Xg)) {
gamma_q <- apply(object$smp$gamma[sel,,drop=F], 2, quantile, probs=CI_prob)
gamma_lb <- gamma_q[1,]
gamma_med <- gamma_q[2,]
gamma_ub <- gamma_q[3,]
gamma_pval <- apply(object$smp$gamma[sel,,drop=F], 2, bayesmixsurv.calc.pval, ref=0.0)
coefficients_gamma <- as.matrix(cbind(gamma_med, gamma_lb, gamma_ub, gamma_pval))
dimnames(coefficients_gamma) <- list(object$colnamesXg, c("Estimate", "Lower Bound", "Upper Bound", "P-val"))
} else {
coefficients_gamma <- NULL
}
ret <- list(call=object$call, pval=pval, burnin=burnin, single=object$control$single
, coefficients=list(alpha=coefficients_alpha, beta1=coefficients_beta1, beta2=coefficients_beta2
, gamma=coefficients_gamma)
)
class(ret) <- "summary.bayesmixsurv"
return (ret)
}
print.summary.bayesmixsurv <- function(x, ...) {
cat("Call:\n")
print(x$call)
cat("number of burn-in iterations discarded:", x$burnin, "\n")
cat("confidence interval:", x$pval, "\n")
cat("## shape coefficients ##\n")
print(x$coefficients$alpha)
cat("## scale coefficients ##\n")
if (x$single) {
print(x$coefficients$beta1)
} else {
cat("component 1:\n")
print(x$coefficients$beta1)
cat("component2:\n")
print(x$coefficients$beta2)
}
if (!is.null(x$coefficients$gamma)) {
cat("## stratification coefficients ##\n")
print(x$coefficients$gamma)
}
}
predict.bayesmixsurv <- function(object, newdata=NULL, tvec=NULL, burnin=object$control$burnin, ...) {
iter <- object$control$iter
alpha.min <- object$control$alpha.min
alpha.max <- object$control$alpha.max
tt1 <- object$terms1
tt2 <- object$terms2
Terms1 <- delete.response(tt1)
Terms2 <- delete.response(tt2)
if (is.null(newdata)) {
nobs <- nrow(object$X1)
X1 <- object$X1
X2 <- object$X2
Xg <- object$Xg
km.fit <- object$km.fit
} else {
newdata <- droplevels(newdata)
mf1 <- model.frame(Terms1, newdata, xlev = object$xlevels1)
mf2 <- model.frame(Terms2, newdata, xlev = object$xlevels2)
X1 <- model.matrix(Terms1, mf1, contrasts.arg = object$contrasts1)
X2 <- model.matrix(Terms2, mf2, contrasts.arg = object$contrasts2)
if (object$control$scalex) {
X1 <- bayesmixsurv.scale(X1, apply.sc=object$apply.scale.X1, center=object$centerVec.X1, scale=object$scaleVec.X1)
X2 <- bayesmixsurv.scale(X2, apply.sc=object$apply.scale.X2, center=object$centerVec.X2, scale=object$scaleVec.X2)
}
nobs <- nrow(newdata)
if (!is.null(object$stratCol)) {
newdata$stratColFactor <- as.factor(newdata[,object$stratCol])
ttg <- object$stratTerms
Termsg <- delete.response(ttg)
index_seen_levels <- which(newdata$stratColFactor %in% object$stratXlevels$stratColFactor)
index_unseen_levels <- setdiff(1:nrow(newdata), index_seen_levels)
if (length(index_unseen_levels)>0) {
mfg_seen <- model.frame(Termsg, newdata[index_seen_levels,], xlev = object$stratXlevels)
Xg_seen <- model.matrix(Termsg, mfg_seen, contrasts.arg = object$stratContrasts)
Xg <- matrix(NA, nrow=nrow(newdata), ncol=ncol(Xg_seen)); colnames(Xg) <- colnames(Xg_seen)
Xg[index_seen_levels,] <- Xg_seen
Xg[index_unseen_levels,] <- 1/ncol(Xg_seen)
} else {
mfg <- model.frame(Termsg, newdata, xlev = object$stratXlevels)
Xg <- model.matrix(Termsg, mfg, contrasts.arg = object$stratContrasts)
}
Xg <- Xg[,-1] # dropping intercept term
} else {
Xg <- NULL
}
}
if (!is.null(tvec)) {
# TODO: we need an upper bound on length of tvec to avoid memory blow-up
if (length(tvec)==1) tvec <- seq(from=0.0, to=object$tmax, length.out=tvec) # tvec is interpreted as number of time points
nt <- length(tvec)
tvec <- as.matrix(tvec)
t_mat <- tvec[,rep(1,nobs)]
ret <- lapply(1:iter, FUN=function(i) {
xbeta1 <- X1%*%object$smp$beta1[i,]
xbeta2 <- X2%*%object$smp$beta2[i,]
alpha1 <- object$smp$alpha1[i]
alpha2 <- object$smp$alpha2[i]
exbeta1 <- as.matrix(exp(xbeta1))
exbeta2 <- as.matrix(exp(xbeta2))
exbeta1_mat <- t(exbeta1[,rep(1,nt)])
exbeta2_mat <- t(exbeta2[,rep(1,nt)])
H1tmp <- (t_mat^alpha1)*exbeta1_mat
H2tmp <- (t_mat^alpha2)*exbeta2_mat
h1tmp <- alpha1*(t_mat^(alpha1-1))*exbeta1_mat
h2tmp <- alpha2*(t_mat^(alpha2-1))*exbeta2_mat
return (list(h1=h1tmp, h2=h2tmp, H1=H1tmp, H2=H2tmp))
})
h1 <- array(NA, dim=c(iter, nt, nobs))
h2 <- array(NA, dim=c(iter, nt, nobs))
H1 <- array(NA, dim=c(iter, nt, nobs))
H2 <- array(NA, dim=c(iter, nt, nobs))
for (i in 1:iter) {
h1[i,,] <- ret[[i]]$h1
h2[i,,] <- ret[[i]]$h2
H1[i,,] <- ret[[i]]$H1
H2[i,,] <- ret[[i]]$H2
}
h <- h1+h2
H <- H1+H2
S <- exp(-H)
} else {
h1 <- NA
h2 <- NA
h <- NA
H1 <- NA
H2 <- NA
H <- NA
S <- NA
}
if (is.null(newdata)) {
y <- object$y
do_loglike <- T
} else {
Rterms <- drop.terms(tt1)
if (all(all.vars(Rterms)[1:2] %in% colnames(newdata))) {
mfy <- model.frame(Rterms, newdata, xlev = object$xlevels1) # TODO: add check to make sure response variable is available for newdata
y <- model.response(mfy, "numeric")
do_loglike <- T
km.fit <- survfit(bayesmixsurv.strip.formula(object$formula1), newdata)
} else {
do_loglike <- F
km.fit <- NULL
}
}
if (do_loglike) { # TODO: include logpost
loglike <- sapply(1:iter, FUN=function(i) {
condprob.data(object$smp$alpha1[i], object$smp$beta1[i,], object$smp$alpha2[i], object$smp$beta2[i,], X1, X2, object$smp$gamma[i,], Xg, y[,1], y[,2])
})
loglike_median <- median(loglike[(burnin+1):iter])
} else {
loglike <- NA
loglike_median <- NA
}
ret <- list(tvec=as.vector(tvec), burnin=burnin, median=list(loglike=loglike_median)
, smp=list(h1=h1, h2=h2, h=h, H1=H1, H2=H2, H=H, S=S, loglike=loglike)
, km.fit=km.fit
)
class(ret) <- "predict.bayesmixsurv"
return (ret)
}
# summary of predict
summary.predict.bayesmixsurv <- function(object, idx=1:dim(object$smp$h)[3], burnin=object$burnin
, pval=0.05, popmean=identical(idx,1:dim(object$smp$h)[3])
, make.plot=TRUE, ...) {
if (!all(idx %in% 1:dim(object$smp$h)[3])) {
stop("invalid idx argument")
}
if (is.null(object$tvec)) {
cat("prediction summary must be applied to time-dependent prediction entities")
return (NULL) # TODO: can't we still return something useful?!
}
CI_prob <- c(pval/2, 0.5, 1-pval/2)
iter <- dim(object$smp$h)[1]
sel <- (burnin+1):iter
tvec <- object$tvec
# first, calculate summary statistics of h,H,S for each point
# h
h.q <- apply(object$smp$h[sel,,idx], c(2,3), quantile, probs=CI_prob)
h.lower <- h.q[1,,]
h.median <- h.q[2,,]
h.upper <- h.q[3,,]
# H
H.q <- apply(object$smp$H[sel,,idx], c(2,3), quantile, probs=CI_prob)
H.lower <- H.q[1,,]
H.median <- H.q[2,,]
H.upper <- H.q[3,,]
# S
S.q <- apply(object$smp$S[sel,,idx], c(2,3), quantile, probs=CI_prob)
S.lower <- S.q[1,,]
S.median <- S.q[2,,]
S.upper <- S.q[3,,]
if (popmean) {
S.popmean <- apply(object$smp$S[,,idx], c(1,2), mean)
S.popmean.q <- apply(S.popmean[sel,], 2, quantile, probs=CI_prob)
S.popmean.lower <- S.popmean.q[1,]
S.popmean.median <- S.popmean.q[2,]
S.popmean.upper <- S.popmean.q[3,]
if (make.plot) {
S.popmean.ylim <- range(S.popmean.lower, S.popmean.median, S.popmean.upper)
plot(tvec, S.popmean.median, type="l", xlab="Time", ylab="Population Survival Probability", ylim=S.popmean.ylim)
lines(tvec, S.popmean.lower, lty=2)
lines(tvec, S.popmean.upper, lty=2)
lines(object$km.fit, col="red")
legend("topright", legend = c("bayesmixsurv model", "kaplan-meyer"), col=c("black","red"), lty = c(1,1))
}
} else {
S.popmean.lower <- NA
S.popmean.median <- NA
S.popmean.upper <- NA
}
# survival curves
S.q <- apply(object$smp$S[sel,,idx], c(2,3), quantile, probs=CI_prob)
S.lower <- S.q[1,,]
S.median <- S.q[2,,]
S.upper <- S.q[3,,]
# pair-wise comparisons
if (length(idx)==2) {
if (tvec[1]==0) {
tindex <- 2:length(tvec)
} else {
tindex <- 1:length(tvec)
}
idx1 <- idx[1]
idx2 <- idx[2]
# hazard ratio
hr <- object$smp$h[,,idx2]/object$smp$h[,,idx1]
hr.q <- apply(hr[sel,tindex], 2, quantile, probs=CI_prob)
hr.lower <- hr.q[1,]
hr.median <- hr.q[2,]
hr.upper <- hr.q[3,]
# survival diff
S.diff <- object$smp$S[sel,,idx2]-object$smp$S[sel,,idx1]
S.diff.q <- apply(S.diff, 2, quantile, probs=CI_prob)
S.diff.lower <- S.diff.q[1,]
S.diff.median <- S.diff.q[2,]
S.diff.upper <- S.diff.q[3,]
if (make.plot) {
hr.range <- range(hr.lower, hr.median, hr.upper, 1.0)
plot(tvec[tindex], hr.median, type="l", xlab="Time", ylab="Hazard Ratio"
, ylim=hr.range, main=paste0("pval=, ", pval, ", idx1=", idx1, ", idx2=", idx2))
lines(tvec[tindex], hr.lower, lty=2)
lines(tvec[tindex], hr.upper, lty=2)
abline(h=1.0, lty=2, col="red")
S.range <- range(S.lower, S.median, S.upper)
plot(tvec, S.median[,1], type="l", xlab="Time", ylab="Survival Probability", ylim=S.range, col="green"
, main=paste0("pval=", pval))
lines(tvec, S.lower[,1], lty=2, col="green")
lines(tvec, S.upper[,1], lty=2, col="green")
lines(tvec, S.median[,2], col="red")
lines(tvec, S.lower[,2], lty=2, col="red")
lines(tvec, S.upper[,2], lty=2, col="red")
legend("topright", legend=c(paste0("idx1=",idx1),paste0("idx2=",idx2)), col=c("green","red"), lty=c(1,1))
S.diff.range <- range(S.diff.lower, S.diff.median, S.diff.upper, 0.0)
plot(tvec, S.diff.median, type="l", xlab="Time", ylab="Survival Probability Difference"
, ylim=S.diff.range, main=paste0("pval=, ", pval, ", idx1=", idx1, ", idx2=", idx2))
lines(tvec, S.diff.lower, lty=2)
lines(tvec, S.diff.upper, lty=2)
abline(h=0.0, lty=2, col="red")
}
} else {
hr.lower <- NA
hr.median <- NA
hr.upper <- NA
S.diff.lower <- NA
S.diff.median <- NA
S.diff.upper <- NA
}
return (list(lower=list(h=h.lower, H=H.lower, S=S.lower, hr=hr.lower, S.diff=S.diff.lower)
, median=list(h=h.median, H=H.median, S=S.median, hr=hr.median, S.diff=S.diff.median)
, upper=list(h=h.upper, H=H.upper, S=S.upper, hr=hr.upper, S.diff=S.diff.upper)
, popmean=list(lower=list(S=S.popmean.lower), median=list(S=S.popmean.median), upper=list(S=S.popmean.upper))
, km.fit=object$km.fit))
}
# cross-validated log-likelihood
bayesmixsurv.crossval <- function(data, folds, all=FALSE, print.level=1, control=bayesmixsurv.control(), ...) {
nfolds <- max(folds) # TODO: add more checks for validity of folds
if (all) {
ret <- lapply (1:nfolds, function(n) {
if (print.level>=1) cat("processing fold", n, "of", nfolds, "\n")
flush.console()
est <- bayesmixsurv(data=data[which(folds!=n),], control=control, print.level=print.level, ...)
pred <- predict(est, newdata=data[which(folds==n),], burnin=control$burnin)
ret <- max(pred$smp$loglike)
attr(ret, "estobj") <- est
return (ret)
})
fret <- sum(unlist(ret))
estobjs <- list()
for (n in 1:nfolds) estobjs[[n]] <- attr(ret[[n]], "estobj")
attr(fret, "estobjs") <- estobjs
return (fret)
} else {
loglike <- sapply (1:nfolds, function(n) {
if (print.level>=1) cat("processing fold", n, "of", nfolds, "\n")
est <- bayesmixsurv(data=data[which(folds!=n),], control=control, print.level=print.level, ...)
pred <- predict(est, newdata=data[which(folds==n),], burnin=control$burnin)
return (max(pred$smp$loglike))
})
return (sum(loglike))
}
}
bayesmixsurv.crossval.wrapper <- function(data, folds, all=FALSE, print.level=1, control=bayesmixsurv.control()
, lambda.min=0.01, lambda.max=100, nlambda=10
, lambda1.vec=exp(seq(from=log(lambda.min), to=log(lambda.max), length.out = nlambda))
, lambda2.vec=NULL
, lambda12=if (is.null(lambda2.vec)) cbind(lambda1=lambda1.vec, lambda2=lambda1.vec)
else as.matrix(expand.grid(lambda1=lambda1.vec, lambda2=lambda2.vec)), plot=TRUE, ...) {
nlambda <- nrow(lambda12)
loglike <- rep(NA, nlambda)
estobjs <- list()
if (print.level>=1) cat("number of lambda combinations to test:", nlambda, "\n")
for (i in 1:nlambda) { # TODO: keep this loop sequential due to load imbalance across different lambda's; parallelize folds within
if (print.level>=1) cat("processing lambda combo", i, "of", nlambda, "\n")
flush.console()
control$lambda1 <- lambda12[i,"lambda1"]
control$lambda2 <- lambda12[i,"lambda2"]
ret <- bayesmixsurv.crossval(data=data, folds=folds, all=all, print.level=print.level, control=control, ...)
loglike[i] <- ret
if (all) estobjs[[i]] <- attr(ret, "estobjs")
}
opt.index <- which(loglike==max(loglike))[1]
lambda1.opt <- lambda12[opt.index,"lambda1"]
lambda2.opt <- lambda12[opt.index,"lambda2"]
ret <- list(lambda1=lambda1.opt, lambda2=lambda2.opt)
attr(ret, "loglike.vec") <- loglike
attr(ret, "loglike.opt") <- max(loglike)
attr(ret, "lambda12") <- lambda12
if (all) attr(ret, "estobjs") <- estobjs
if (print.level>=1) cat("selected lambda's: ", c(lambda1.opt, lambda2.opt), "\n")
if (plot) {
# we only plot when lambda and lambdas are the same, otherwise we need a 2D plot
if (identical(attr(ret,"lambda12")[,"lambda1"], attr(ret, "lambda12")[,"lambda2"])) {
plot(lambda12[,"lambda1"], loglike, type="l", log="x", xlab="Shrinkage", ylab="Log-likelihood")
}
}
return (ret)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMixSurv/R/BayesMixSurv.R
|
bayesmixsurv.eval <- function(alpha1, beta1, alpha2, beta2, X1, X2, gamma, Xg, t) {
lp1 <- X1%*%beta1
lp2 <- X2%*%beta2
if (!is.null(Xg)) {
lp1 <- lp1 + Xg%*%gamma
lp2 <- lp2 + Xg%*%gamma
}
expterm1 <- exp(lp1)
expterm2 <- exp(lp2)
H1 <- t^(alpha1) * expterm1
H2 <- t^(alpha2) * expterm2
H <- H1+H2
h1 <- alpha1 * t^(alpha1-1) * expterm1
h2 <- alpha2 * t^(alpha2-1) * expterm2
h <- h1+h2
S <- exp(-H)
return (list(h1=h1, h2=h2, h=h, H1=H1, H2=H2, H=H, S=S, lp1=lp1, lp2=lp2))
}
condprob.data <- function(alpha1, beta1, alpha2, beta2, X1, X2, gamma, Xg, t, s) {
ret <- bayesmixsurv.eval(alpha1, beta1, alpha2, beta2, X1, X2, gamma, Xg, t)
return (sum((s*log(ret$h)-ret$H)))
}
condprob.beta <- function(lambda, beta) {
return (-lambda*sum(abs(beta)))
}
condprob.gamma <- function(mu.gamma, sigma.gamma, gamma) {
return (-length(gamma*log(sigma.gamma)-0.5*sum((gamma-mu.gamma)^2)/sigma.gamma^2))
}
condprob.alpha <- function(alpha) {
log(dgamma(alpha, shape=1.0, rate=1e-3))
}
## posterior distributions for stochastic nodes ##
condpost.beta <- function(beta.this, beta.that, alpha.this, alpha.that, gamma, X.this, X.that, t, s, lambda.this, Xg) {
return (condprob.data(alpha.this, beta.this, alpha.that, beta.that, X.this, X.that, gamma, Xg, t, s) + condprob.beta(lambda.this, beta.this))
}
condpost.alpha <- function(alpha.this, beta.this, beta.that, alpha.that, gamma, X.this, X.that, t, s, Xg) {
return (condprob.data(alpha.this, beta.this, alpha.that, beta.that, X.this, X.that, gamma, Xg, t, s) + condprob.alpha(alpha.this))
}
condpost.gamma <- function(gamma, beta1, beta2, alpha1, alpha2, mu.gamma, sigma.gamma, X1, X2, t, s, Xg) {
return (condprob.gamma(gamma, mu.gamma, sigma.gamma) + condprob.data(alpha1, beta1, alpha2, beta2, X1, X2, gamma, Xg, t, s))
}
# MCMC routine
bayesmixsurv.mcmc <- function(X1, X2, t, s, Xg, lambda1=1.0, lambda2=lambda1
, iter=1000, single=FALSE, alpha2.fixed=NULL, alpha.boundary=1.0
, sd.thresh=1e-04, print.level=2, nskip=round(iter/10)) {
# checking of first column of X1 and X2 being intercept must happen in caller (bayesmixsurv)
if (single) {
alpha2.fixed <- 1.0
}
if (is.null(alpha2.fixed)) {
alpha1.lower <- 0.001
alpha1.upper <- alpha.boundary-0.001
alpha2.lower <- alpha.boundary+0.001
alpha2.upper <- 10.0 # turn into parameter, perhaps hidden from user
} else {
alpha1.lower <- 0.001
alpha1.upper <- 10.0
}
K1 <- ncol(X1)
K2 <- ncol(X2)
if (!is.null(Xg)) {
Ncenter <- ncol(Xg)
} else {
Ncenter <- 2 # check that 1 works too and switch to 1
}
gamma.smp <- array(NA, dim=c(iter,Ncenter))
beta1.smp <- array(NA, dim=c(iter,K1))
beta2.smp <- array(NA, dim=c(iter,K2))
alpha1.smp <- rep(NA, iter)
alpha2.smp <- rep(NA, iter)
#mu.gamma.smp <- rep(NA, iter) # leftover code, but mu.gamma is really zero since it represents difference between centers
sigma.gamma.smp <- rep(NA, iter)
loglike.smp <- rep(NA, iter)
# excluding zero-variance variables from sampling process
idx1 <- c(1, setdiff(1:K1, which(apply(X1, 2, function(x) sd(x)<sd.thresh)))) # assumes intercept is at 1; TODO: we need to ensure that beta1[1] corresponds to intercept
idx2 <- c(1, setdiff(1:K2, which(apply(X2, 2, function(x) sd(x)<sd.thresh)))) # assumes intercept is at 1; TODO: we need to ensure that beta1[1] corresponds to intercept
# do we need better initialization, and allow for inits to be passed in?
beta1 <- rep(0, K1)
beta2 <- rep(0, K2)
if (is.null(alpha2.fixed)) {
alpha1 <- 0.5
alpha2 <- 2.0
} else {
alpha1 <- 1.0 # can we do better? be more intelligent?
alpha2 <- alpha2.fixed
}
gamma <- rep(0, Ncenter)
mu.gamma <- 0.0
sigma.gamma <- 1.0
for (n in 1:iter) {
# beta's
beta1[idx1] <- bayesmixsurv.multislice.from.unislice(beta1[idx1], condpost.beta, beta2, alpha1, alpha2, gamma, X1[,idx1,drop=F], X2, t, s, lambda1, Xg
, w=1.0, m=100
, lower=rep(-Inf,length(idx1)), upper=rep(+Inf,length(idx1))) # add maximum absolute value on beta1
if (!single) {
beta2[idx2] <- bayesmixsurv.multislice.from.unislice(beta2[idx2], condpost.beta, beta1, alpha2, alpha1, gamma, X2[,idx2,drop=F], X1, t, s, lambda2, Xg
, w=1.0, m=100
, lower=rep(-Inf,length(idx2)), upper=rep(+Inf,length(idx2)))
} else {
beta2[1] <- -1e+12 # forcing e^(xbeta2) to be 0; TODO: we need to ensure that beta1[1] corresponds to intercept
}
# alpha's
alpha1 <- bayesmixsurv.uni.slice(alpha1, condpost.alpha, beta1, beta2, alpha2, gamma, X1, X2, t, s, Xg
, w=1.0, m=10, lower=alpha1.lower, upper=alpha1.upper)
if (is.null(alpha2.fixed)) {
alpha2 <- bayesmixsurv.uni.slice(alpha2, condpost.alpha, beta2, beta1, alpha1, gamma, X2, X1, t, s, Xg
, w=1.0, m=10, lower=alpha2.lower, upper=alpha2.upper)
}
# gamma
if (!is.null(Xg)) {
gamma <- bayesmixsurv.multislice.from.unislice(gamma, condpost.gamma, beta1, beta2, alpha1, alpha2, mu.gamma, sigma.gamma, X1, X2, t, s, Xg
, w=1.0, m=10, lower=-Inf, upper=+Inf)
sigma.gamma <- sqrt(1/rgamma(1, shape=Ncenter/2, scale=0.5*sum((mu.gamma-gamma)^2))) # using conjugacy, what about prior?! did i assume flat prior and just ignore it?
}
beta1.smp[n,] <- beta1
beta2.smp[n,] <- beta2
alpha1.smp[n] <- alpha1
alpha2.smp[n] <- alpha2
gamma.smp[n,] <- gamma
#mu.gamma.smp[n] <- mu.gamma
sigma.gamma.smp[n] <- sigma.gamma
loglike.smp[n] <- condprob.data(alpha1, beta1, alpha2, beta2, X1, X2, gamma, Xg, t, s)
if (n%%nskip==0 && print.level>=2) cat("finished sample", n, "of", iter, "\n")
flush.console() # for windows
}
return (list(beta1=beta1.smp, beta2=beta2.smp, alpha1=alpha1.smp, alpha2=alpha2.smp, loglike=loglike.smp
, gamma=gamma.smp
, sigma.gamma=sigma.gamma.smp#, xbeta1=xbeta1, xbeta2=xbeta2
, idx1=idx1, idx2=idx2))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMixSurv/R/Sample.R
|
# wrapper around a multivariate function to convert it to univariate, to be used with slice sampler
bayesmixsurv.convert.multivar.to.univar <- function(xk, k, x, f, ...) {
x[k] <- xk
return (f(x, ...))
}
# Gibbs wrapper around univariate slice sampler to convert it to a multivariate sampler
bayesmixsurv.multislice.from.unislice <- function(xm, fm, ..., w=1.0, m=Inf, lower=rep(-Inf,length(xm)), upper=rep(+Inf,length(xm))) {
K <- length(xm)
for (k in 1:K) {
xm[k] <- bayesmixsurv.uni.slice(xm[k], bayesmixsurv.convert.multivar.to.univar, k, xm, fm, ..., w=w, m=m, lower=lower[k], upper=upper[k])
}
return (xm)
}
# R FUNCTIONS FOR PERFORMING UNIVARIATE SLICE SAMPLING.
#
# Radford M. Neal, 17 March 2008.
#
# Implements, with slight modifications and extensions, the algorithm described
# in Figures 3 and 5 of the following paper:
#
# Neal, R. M (2003) "Slice sampling" (with discussion), Annals of Statistics,
# vol. 31, no. 3, pp. 705-767.
#
# See the documentation for the function uni.slice below for how to use it.
# The function uni.slice.test was used to test the uni.slice function.
# GLOBAL VARIABLES FOR RECORDING PERFORMANCE.
#uni.slice.calls <- 0 # Number of calls of the slice sampling function
#uni.slice.evals <- 0 # Number of density evaluations done in these calls
# UNIVARIATE SLICE SAMPLING WITH STEPPING OUT AND SHRINKAGE.
#
# Performs a slice sampling update from an initial point to a new point that
# leaves invariant the distribution with the specified log density function.
#
# Arguments:
#
# x0 Initial point
# g Function returning the log of the probability density (plus constant)
# w Size of the steps for creating interval (default 1)
# m Limit on steps (default infinite)
# lower Lower bound on support of the distribution (default -Inf)
# upper Upper bound on support of the distribution (default +Inf)
# gx0 Value of g(x0), if known (default is not known)
#
# The log density function may return -Inf for points outside the support
# of the distribution. If a lower and/or upper bound is specified for the
# support, the log density function will not be called outside such limits.
#
# The value of this function is the new point sampled, with an attribute
# of "log.density" giving the value of the log density function, g, at this
# point. Depending on the context, this log density might be passed as the
# gx0 argument of a future call of uni.slice.
#
# The global variable uni.slice.calls is incremented by one for each call
# of uni.slice. The global variable uni.slice.evals is incremented by the
# number of calls made to the g function passed.
#
# WARNING: If you provide a value for g(x0), it must of course be correct!
# In addition to giving wrong answers, wrong values for gx0 may result in
# the uni.slice function going into an infinite loop.
bayesmixsurv.uni.slice <- function (x0, f, ..., w=1, m=0, lower=-Inf, upper=+Inf, gx0=NULL)
{
g <- function(x) f(x,...)
# Check the validity of the arguments.
if (!is.numeric(x0) || length(x0)!=1
|| !is.function(g)
|| !is.numeric(w) || length(w)!=1 || w<=0
|| !is.numeric(m) || !is.infinite(m) && (m<=0 || m>1e9 || floor(m)!=m)
|| !is.numeric(lower) || length(lower)!=1 || x0<lower
|| !is.numeric(upper) || length(upper)!=1 || x0>upper
|| upper<=lower
|| !is.null(gx0) && (!is.numeric(gx0) || length(gx0)!=1))
{
stop ("Invalid slice sampling argument")
}
# Keep track of the number of calls made to this function.
#uni.slice.calls <<- uni.slice.calls + 1
# Find the log density at the initial point, if not already known.
if (is.null(gx0))
{
#uni.slice.evals <<- uni.slice.evals + 1
gx0 <- g(x0)
}
# Determine the slice level, in log terms.
logy <- gx0 - rexp(1)
# Find the initial interval to sample from.
u <- runif(1,0,w)
L <- x0 - u
R <- x0 + (w-u) # should guarantee that x0 is in [L,R], even with roundoff
# Expand the interval until its ends are outside the slice, or until
# the limit on steps is reached.
if (is.infinite(m)) # no limit on number of steps
{
repeat
{ if (L<=lower) break
#uni.slice.evals <<- uni.slice.evals + 1
if (g(L)<=logy) break
L <- L - w
}
repeat
{ if (R>=upper) break
#uni.slice.evals <<- uni.slice.evals + 1
if (g(R)<=logy) break
R <- R + w
}
}
else if (m>1) # limit on steps, bigger than one
{
J <- floor(runif(1,0,m))
K <- (m-1) - J
while (J>0)
{ if (L<=lower) break
#uni.slice.evals <<- uni.slice.evals + 1
if (g(L)<=logy) break
L <- L - w
J <- J - 1
}
while (K>0)
{ if (R>=upper) break
#uni.slice.evals <<- uni.slice.evals + 1
if (g(R)<=logy) break
R <- R + w
K <- K - 1
}
}
# Shrink interval to lower and upper bounds.
if (L<lower)
{ L <- lower
}
if (R>upper)
{ R <- upper
}
# Sample from the interval, shrinking it on each rejection.
repeat
{
x1 <- runif(1,L,R)
#uni.slice.evals <<- uni.slice.evals + 1
gx1 <- g(x1)
if (gx1>=logy) break
if (x1>x0)
{ R <- x1
}
else
{ L <- x1
}
}
# Return the point sampled, with its log density attached as an attribute.
attr(x1,"log.density") <- gx1
return (x1)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMixSurv/R/SliceSampler.R
|
bayesmixsurv.strip.formula <- function(survformula) {
allvars <- all.vars(survformula)
return (formula(paste("Surv(", allvars[1], ",", allvars[2], ")~1", sep="")))
}
bayesmixsurv.control <- function(single=FALSE, alpha2.fixed=NULL, alpha.boundary=1.0, lambda1=1.0, lambda2=lambda1, iter=1000, burnin=round(iter/2)
, sd.thresh=1e-4, scalex=TRUE, nskip=round(iter/10)) {
return (list(single=single, alpha2.fixed=alpha2.fixed, alpha.boundary=alpha.boundary, lambda1=lambda1, lambda2=lambda2
, iter=iter, burnin=burnin, sd.thresh=sd.thresh, scalex=scalex, nskip=nskip))
}
bayesmixsurv.scale <- function(X, apply.sc, ...) {
if (missing(apply.sc)) apply.sc <- which(sapply(1:ncol(X), function(n) length(unique(X[,n]))>2))
ret <- scale(X[,apply.sc], ...)
X[,apply.sc] <- ret
attr(X, "centerVec") <- attr(ret, "scaled:center")
attr(X, "scaleVec") <- attr(ret, "scaled:scale")
attr(X, "apply.scale") <- apply.sc
return (X)
}
bayesmixsurv.calc.pval <- function(x, ref=0.0, na.rm = FALSE) { # add flag for one-sided vs. two-sided
if (na.rm) x <- x[!is.na(x)]
bigger <- median(x)>ref
if (sd(x)<.Machine$double.eps) {
ret <- NA
} else {
ret <- max(2*length(which(if (bigger) x<ref else x>ref))/length(x), 1/length(x)) # max adjustment inspired by MCMCglmm package
}
attr(ret, "bigger") <- bigger
return (ret)
}
bayesmixsurv.empty.plot <- function(...) {
plot(0,0,type="l", xlab="", ylab="",...)
}
bayesmixsurv.generate.folds <- function(ntot, nfold=5) {
foldsize <- rep(round(ntot/nfold), nfold-1)
foldsize <- c(foldsize, ntot-sum(foldsize))
remain <- 1:ntot
folds <- rep(NA, ntot)
for (n in 1:(nfold-1)) {
idxtmp <- sample(remain, size=foldsize[n])
folds[idxtmp] <- n
remain <- setdiff(remain, idxtmp)
}
folds[remain] <- nfold
return (folds)
}
bayesmixsurv.generate.folds.eventbalanced <- function(formula, data, nfold=5) {
statusCol <- all.vars(formula)[2]
status_levels <- unique(data[,statusCol])
if (length(status_levels)>2) stop("status field is not binary")
index_with_event <- which(data[,statusCol]==status_levels[1]); nwith <- length(index_with_event)
index_without_event <- which(data[,statusCol]==status_levels[2]); nwithout <- length(index_without_event)
ret_with_event <- bayesmixsurv.generate.folds(nwith, nfold)
ret_without_event <- bayesmixsurv.generate.folds(nwithout, nfold)
ret_all <- list()
ret_flat <- rep(NA, nrow(data))
for (n in 1:nfold) {
ret_all[[n]] <- c(index_with_event[which(ret_with_event==n)], index_without_event[which(ret_without_event==n)])
ret_flat[ret_all[[n]]] <- n
}
return (ret_flat)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMixSurv/R/utils.R
|
.onAttach <- function(libname, pkgname) {
RFver <- read.dcf(file=system.file("DESCRIPTION", package=pkgname),
fields="Version")
packageStartupMessage(paste(pkgname, RFver))
packageStartupMessage("Bayesian Mixture-of-Weibull Survival model for right-censored data.")
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMixSurv/R/zzz.R
|
#' @name Heatmap
#' @rdname Heatmap
#' @title Generic Heatmap function
#'
#' @description Generic function to \code{Heatmap} method.
#'
#' @param x Object or list of objects of class `HP`, `DLM`, `ClosedHP` or `ClosedDLM`. Object of class `BLC` or `PredBLC`.
#' @param ... Further arguments passed to or from other methods.
#'
#' @return A ggplot2 heatmap of the life expectancy.
#'
#' @seealso [Heatmap.HP()], [Heatmap.DLM()], [Heatmap.BLC()] and [Heatmap.list()].
#' @export
Heatmap = function(x, ...) UseMethod("Heatmap")
#'
#' @export
Heatmap.default = function(x, ...) print(x, ...)
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/Heatmap_default.R
|
#' Mortality Data from Portugal to be used as example
#'
#' Matrix with the logarithm of the mortality rates in Portugal's population from 2000 until 2015.
#' The ages vary from 18 to 80 years.
#'
#' @format A numeric matrix with 63 rows and 16 columns:
#' \describe{
#' \item{Row}{Ages available.}
#' \item{Column}{Years available.}
#' }
#'
#' @name PT
#' @docType data
#' @keywords data
#'
#' @references Human Mortality Database. University of California, Berkeley (USA), and Max Planck Institute for Demographic Research (Germany). Available at www.mortality.org or www.humanmortality.de (Accessed: July 9th, 2021).
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/PT.R
|
#' Mortality Database from United States to be used as example
#'
#' Data base with exposures and death counts of the population of the United States between the years 1933 and 2019.
#'
#' @format A data.frame with 9657 rows and 8 variables:
#' \describe{
#' \item{Year}{Years available.}
#' \item{Age}{Ages available.}
#' \item{Ex.Total}{Exposure of the US population.}
#' \item{Dx.Total}{Death count of the US population.}
#' \item{Ex.Male}{Exposure of the US male population.}
#' \item{Dx.Male}{Death count of the US male population.}
#' \item{Ex.Female}{Exposure of the US female population.}
#' \item{Dx.Female}{Death count of the US female population.}
#' }
#'
#' @name USA
#' @docType data
#' @keywords data
#'
#' @references Human Mortality Database. University of California, Berkeley (USA), and Max Planck Institute for Demographic Research (Germany). Available at www.mortality.org or www.humanmortality.de (Accessed: August 9th, 2021).
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/USA.R
|
#' @title Lee-Carter Bayesian Estimation for mortality data
#'
#' @description Performs Bayesian estimation of the Lee-Carter model considering different
#' variances for each age.
#'
#' @usage
#' blc(Y, prior = NULL, init = NULL, M = 5000, bn = 4000, thin = 1)
#'
#' @param Y Log-mortality rates for each age.
#' @param prior A list containing the prior mean \eqn{m_0} and the prior
#' variance \eqn{C_0}.
#' @param init A list containing initial values for \eqn{\alpha}, \eqn{\beta},
#' \eqn{\phi_V}, \eqn{\phi_W} and \eqn{\theta}.
#' @param M The number of iterations. The default value is 5000.
#' @param bn The number of initial iterations from the Gibbs sampler that should be discarded (burn-in). The default value is 4000.
#' @param thin A Positive integer specifying the period for saving samples. The default value is 1.
#'
#' @details
#' Let \eqn{Y_{it}} be the log mortality rate at age \eqn{i} and time \eqn{t}. The Lee-Carter
#' model is specified as follows:
#'
#' \eqn{Y_{it} = \alpha_i + \beta_i \kappa_t + \varepsilon_{it}, i=1,...,p} and \eqn{t=1,...,T},
#'
#' where \eqn{\alpha = (\alpha_1,...,\alpha_p)'} are the interept of the model that represent
#' the log-mortality rate mean in each age; \eqn{\beta = (\beta_1,...,\beta_p)'} are the
#' coefficient regression that represent the speed of relative change in the log-mortality
#' rate in each age. \eqn{\kappa = (\kappa_1,...,\kappa_T)'} are the state variable that
#' represent the global relative change in log-mortality rate. Finally, \eqn{\varepsilon_{it} ~ N(0, \sigma^2_i)}
#' is the random error.
#'
#' For the state variable \eqn{\kappa_t} Lee and Carter (1992) proposed a random walk with
#' drift to govern the dynamics over time:
#'
#' \eqn{\kappa_t = \kappa_{t-1} + \theta + \omega_t},
#'
#' where \eqn{\theta} is the drift parameter and \eqn{\omega_t} is the random error of the
#' random walk.
#'
#' We implement the Bayesian Lee Carter (BLC) model, proposed by Pedroza (2006), to estimate
#' the model. In this approach, we take advantage of the fact that the Bayesian Lee Carter
#' can be specified as dynamic linear model, to estimate the state variables \eqn{\kappa_t}
#' through FFBS algorithm. To estimate the others parameters we use Gibbs sampler to sample
#' from their respective posterior distribution.
#'
#' @return A `BLC` object.
#' \item{alpha}{Posterior sample from alpha parameter.}
#' \item{beta}{Posterior sample from beta parameter.}
#' \item{phiv}{Posterior sample from phiv parameter. phiv is the precision of the random error of the Lee Carter model.}
#' \item{theta}{Posterior sample from theta.}
#' \item{phiw}{Posterior sample from phiw. phiw is the precision of the random error of the random walk.}
#' \item{kappa}{Sampling from the state variables.}
#' \item{Y}{Y Log-mortality rates for each age passed by the user to fit the model.}
#' \item{bn}{The warmup of the algorithm specified by the user to fit the model.}
#' \item{M}{The number of iterations specified by the user to fit the model.}
#' \item{m0}{The prior mean of kappa0.}
#' \item{C0}{The prior covariance matrix of kappa0.}
#'
#' @references Lee, R. D., & Carter, L. R. (1992). “Modeling and forecasting US mortality.” \emph{Journal of the American statistical association}, 87(419), 659-671.
#' @references Pedroza, C. (2006). “A Bayesian forecasting model: predicting US male mortality.” \emph{Biostatistics}, 7(4), 530-550.
#'
#' @examples
#' ## Example of transforming the dataset to fit the function:
#'
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' ## Calculating the mortality rates for the general population:
#' require(dplyr)
#' require(tidyr)
#' require(magrittr)
#'
#' USA %>% mutate(mx = USA$Dx.Total/USA$Ex.Total) -> data
#'
#' data %>%
#' filter(Age %in% 18:80 & Year %in% 2000:2015) %>%
#' mutate(logmx = log(mx)) %>%
#' dplyr::select(Year,Age,logmx) %>%
#' pivot_wider(names_from = Year, values_from = logmx) %>%
#' dplyr::select(-Age) %>%
#' as.matrix() %>%
#' magrittr::set_rownames(18:80) -> Y
#'
#' ## Fitting the model
#' fit = blc(Y = Y, M = 100, bn = 20)
#' print(fit)
#'
#' ## Viewing the results
#' plot(fit, ages = 18:80)
#' plot(fit, parameter = "beta", ages=18:80)
#' improvement(fit)
#'
#' @import magrittr
#' @import progress
#' @importFrom MASS mvrnorm
#' @importFrom stats rnorm
#'
#' @seealso [fitted.BLC()], [plot.BLC()], [print.BLC()] and [predict.BLC()] for `BLC` methods to native R functions [fitted()],
#'[plot()], [print()] and [predict()].
#'
#'[expectancy.BLC()] and [Heatmap.BLC()] for `BLC` methods to compute and visualise the truncated life expectancy
#'via [expectancy()] and [Heatmap()] functions.
#'
#'[improvement()] to compute the improvement of each age, based on the resulting chains of the beta parameter.
#'
#'
#' @export
blc <- function(Y, prior = NULL, init = NULL, M = 5000, bn = 4000, thin = 1) {
# -------- Type validation --------
if (mode(Y) != "numeric")
stop("Expected `Y` to be numeric")
if(!is.null(prior)){
if (mode(prior) != "list") {stop("Expected `prior` and `init` to be lists")}
}else{
prior <- list(m0 = 0, C0 = 100)
}
if(!is.null(init)){
if (mode(init) != "list") {stop("Expected `prior` and `init` to be lists")}
}else{
init <- list(alpha = runif(nrow(Y)), beta = runif(nrow(Y)), phiv = rep(1, nrow(Y)), phiw = 1, theta = runif(1))
}
if (mode(prior) != "list" || mode(init) != "list")
stop("Expected `prior` and `init` to be lists")
prior.types <- unique(sapply(prior, mode))
if (length(prior.types) != 1 || prior.types[1] != "numeric")
stop("Expected `prior` to only contain numerics")
init.types <- unique(sapply(init, mode))
if (length(init.types) != 1 || init.types[1] != "numeric")
stop("Expected `init` to only contain numerics")
if (mode(M) != "numeric" || round(M) != M || M <= 0)
stop("Expected `M` to be a positive integer")
# if (mode(std.type) != "character" || !(std.type %in% c("incl", "beta")))
# stop("Expected `std` to be one of 'incl' or 'beta'")
if (!(mode(bn) %in% c("NULL", "numeric")))
stop("Expected `bn` to be either nil or numeric")
# -------- List validation --------
prior.names <- c("C0", "m0")
init.names <- c("alpha", "beta", "phiv", "phiw", "theta")
if (any(sort(names(prior)) != prior.names))
stop("Invalid names in argument `prior`")
if (any(sort(names(init)) != init.names))
stop("Invalid names in argument `init`")
# -------- Dimension validation --------
N <- ncol(Y)
m <- nrow(Y)
if (length(prior$m0) != 1)
stop("Invalid dimensions for `prior$m0`")
if (length(prior$C0) != 1)
stop("Invalid dimensions for `prior$C0`")
if (length(init$alpha) != m)
stop("Invalid dimensions for `init$alpha`")
if (length(init$beta) != m)
stop("Invalid dimensions for `init$beta`")
if (length(init$phiv) != m)
stop("Invalid dimensions for `init$phiv`")
if (length(init$phiw) != 1)
stop("Invalid dimensions for `init$phiw`")
if (length(init$theta) != 1)
stop("Invalid dimensions for `init$theta`")
# -------- Initialization --------
std.type = "incl"
## Progress Bar
pb = progress::progress_bar$new(format = "Simulating [:bar] :percent in :elapsed",total = M, clear = FALSE, width = 60)
# Allocate storage
chain <- list()
chain$alpha <- matrix(nrow = m, ncol = M)
chain$beta <- matrix(nrow = m, ncol = M)
chain$phiv <- matrix(nrow = m, ncol = M)
chain$theta <- numeric(M)
chain$phiw <- numeric(M)
chain$kappa <- matrix(nrow = N, ncol = M)
pb$tick() ## Progress Bar
# Initialize chains
chain$alpha[ ,1] <- init$alpha
chain$beta[ ,1] <- init$beta
chain$phiv[ ,1] <- init$phiv
chain$theta[1] <- init$theta
chain$phiw[1] <- init$phiw
# -------- Gibbs --------
for (i in 2:M) {
pb$tick() ## Progress Bar
kdf <- kd.filter(Y, prior$m0, prior$C0,diag(1/chain$phiv[ ,i-1]),
1/chain$phiw[i-1], chain$beta[ ,i-1], 1,
chain$alpha[ ,i-1], chain$theta[i-1])
kds <- kd.smoother(Y, kdf, 1/chain$phiw[i-1], 1, chain$theta[i-1])
chain$kappa[ ,i] <- rnorm(N, kds$s[1, ], sqrt(kds$S[1,1, ]))
# Standardize the values
inclination <- (chain$kappa[1,i] - chain$kappa[N,i]) / (N - 1)
level <- mean(chain$kappa[ ,i])
chain$kappa[ ,i] <- (chain$kappa[ ,i] - level) / inclination
chain$alpha[ ,i-1] <- chain$alpha[ ,i-1] + chain$beta[ ,i-1] * level
chain$beta[ ,i-1] <- chain$beta[ ,i-1] * inclination
for (j in 1:m) {
# Generate phiv
B <- sum((Y[j, ] - chain$alpha[j,i-1] - chain$beta[j,i-1] * chain$kappa[ ,i])^2)
chain$phiv[j,i] <- rgamma(1, N/2, B/2)
# Generate alpha and beta
X <- cbind(1, chain$kappa[ ,i])
aux.reg <- chol2inv(chol(t(X) %*% X))
mean.reg <- aux.reg %*% t(X) %*% Y[j, ]
var.reg <- (1 / chain$phiv[j,i]) * aux.reg
tmp <- mvrnorm(1, mean.reg, var.reg)
chain$alpha[j,i] <- tmp[1]
chain$beta[j,i] <- tmp[2]
}
# Generate phiw
B <- sum((chain$kappa[2:N,i] - chain$theta[i-1] - chain$kappa[1:(N-1),i])^2)
chain$phiw[i] <- rgamma(1, (N-1)/2, B/2)
# Generate theta
B <- 1 / (chain$phiw[i] * (N-1))
A <- chain$phiw[i] * (chain$kappa[N,i] - chain$kappa[1,i])
chain$theta[i] <- rnorm(1, B * A, sqrt(B))
}
# -------- Return --------
chain$kappa[,1] <- chain$kappa[,2]
chain = lapply(chain, function(x){if(length(dim(x)) > 1){x[,seq(bn+1, M, by = thin)]}else{x[seq(bn+1, M, by = thin)]}})
class(chain) <- "BLC"
chain$Y <- Y
chain$bn <- 0
chain$M <- length(chain$theta) ## Final sample size
chain$m0 <- prior$m0
chain$C0 <- prior$C0
chain
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/blc.R
|
#' @title Dynamic Linear Model for mortality table graduation
#'
#' @description
#' This function fits a Dynamic Linear Model (DLM) for mortality data following
#' a Bayesian framework using Forward Filtering Backward Sampling algorithm to compute the posterior distribution.
#' The response variable is the log of the mortality rate, and it is modeled specifying the matrices Ft and Gt from the DLM equations.
#' Furthermore, the discount factor is used to control the smoothness of the fitted model. By default, a
#' linear growth model is specified.
#'
#' @usage
#' dlm(y, Ft = matrix(c(1,0), nrow = 1), Gt = matrix(c(1,0,1,1), 2), delta = 0.85,
#' prior = list(m0 = rep(0, nrow(Gt)), C0 = diag(100, nrow(Gt))),
#' prior.sig2 = list(a = 0, b = 0), M = 2000, ages = 0:(length(y)-1))
#'
#' @param y Numeric vector of log mortality rates.
#' @param Ft 1xp Matrix that specifies the observation equation, where p is the number of parameters. By default, 'Ft = matrix(c(1,0), nrow = 1)'.
#' @param Gt pxp Matrix that specifies the system equations. By default, Gt = matrix(c(1,0,1,1), 2).
#' @param delta Positive real value or real vector of the same length as y with values in the '(0, 1)' interval specifying the discount factor for each age. A higher value of delta results in a higher smoothness of the fitted curve. If a single value is defined, this same value is used for all ages. By default, delta is '0.85'.
#' @param prior A list with the prior mean vector \eqn{(m_0)} and covariance matrix \eqn{(C_0)} of \eqn{\theta_0} (state vector at time (age) t = 0). By default mean of zeros and diagonal matrix with a common variance 100 is used. Each element of the list must be named accordingly with the parameter (m0 for mean vector and C0 for covariance matrix).
#' @param prior.sig2 A list with the prior parameters (a, b) of Inverted Gamma distribution for \eqn{\sigma^2}. Each element of the list must be named accordingly with the parameter (a for shape parameter and b for scale parameter).
#' @param M Positive integer that indicates the sampling size from the posterior distributions. The default value is 2000.
#' @param ages Numeric vector of the ages fitted. Default is '0:(length(y)-1)'.
#'
#' @details
#' Let \eqn{Y_t} be the log mortality rate at age \eqn{t}. A DLM is specified as follows:
#'
#' For \eqn{t = 0}:
#'
#' \eqn{\theta_0 \sim N_p (m_0, C_0)}
#'
#' Now, for \eqn{t \geq 1}:
#'
#' The observation equation:
#'
#' \eqn{Y_t = F_t \theta_t + v_t}
#'
#' The system equation:
#'
#' \eqn{\theta_t = G_t \theta_{t-1} + w_t}
#'
#' Where \eqn{F_t} and \eqn{G_t} are known matrices. \eqn{v_t} and \eqn{w_t} are independent
#' random errors with \eqn{v_t \sim N(0, \sigma^2)} and \eqn{w_t \sim N(0, \sigma^2 W_t)}. We
#' use the discount factors \eqn{\delta} to specify \eqn{W_t} as \eqn{W_t = C_t(1-\delta)/\delta},
#' where \eqn{C_t} is the conditional covariance matrix of \eqn{\theta_t}. So, if
#' \eqn{\delta = 0} there is no loss information as \eqn{t} increase (completely reducing the
#' smoothness of the fitted curve). \eqn{\delta} can be specified as a single value for all ages
#' or as a vector in which each element is associated with an age.
#'
#' A scheme described by (Petris et al, 2009) for conjugated inference is used.
#' For more details, see (Petris et al, 2009).
#'
#' @return A DLM class object.
#' \item{mu}{Posterior samples from \eqn{\mu_t = F_t \theta_t}, for all t.}
#' \item{theta}{Posterior samples from \eqn{\theta_t}, for all t.}
#' \item{sig2}{Posterior samples from \eqn{\sigma^2}.}
#' \item{param}{A list with the states parameters for filtering distribution (mt, Ct), predictive distribution (ft, Qt), smoothing distribution (as, Rs), and parameters of the posterior distribution for variance (alpha, beta).}
#' \item{info}{A list with some informations of the fitted model: the specification of \eqn{F_t} and \eqn{G_t} matrices, the data y and the ages, the discount factor \eqn{delta} value specified and priors informations.}
#'
#' @references Campagnoli, P., Petris, G., and Petrone, S. (2009). \emph{Dynamic linear models with R}. Springer-Verlag New York.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' ## Selecting the log mortality rate of the 2010 male population ranging from 0 to 100 years old
#' USA2010 = USA[USA$Year == 2010,]
#' x = 0:100
#' Ex = USA2010$Ex.Male[x+1]
#' Dx = USA2010$Dx.Male[x+1]
#' y = log(Dx/Ex)
#'
#' ## Fitting DLM
#' fit = dlm(y)
#' print(fit)
#' summary(fit)
#'
#' ## Using other functions available in the package:
#' ## plotting (See "?plot.DLM" in the BayesMortality package for more options):
#' plot(fit)
#'
#' ## qx estimation (See "?fitted.DLM" in the BayesMortality package for more options):
#' fitted(fit)
#'
#' ## chain's plot (See "?plot_chain" for more options):
#' plot_chain(fit, param = c("mu[0]", "mu[100]"))
#'
#' ## Varying discount factor
#' fit2 = dlm(y, delta = c(rep(0.8, 36), rep(0.9, 65)))
#' plot(fit2)
#'
#'
#' @include ffbs.R
#'
#' @importFrom mvtnorm rmvt
#'
#'@seealso [fitted.DLM()], [predict.DLM()], [plot.DLM()], [print.DLM()] and [summary.DLM()] for `DLM` methods to native R functions [fitted()],
#'[plot()], [print()] and [summary()].
#'
#'[expectancy.DLM()] and [Heatmap.DLM()] for `DLM` methods to compute and visualise the truncated life expectancy
#'via [expectancy()] and [Heatmap()] functions.
#'
#'[dlm_close()] for close methods to expand the life tables.
#'
#'[plot_chain()] to visualise the markov chains, respectively.
#'
#' @export
dlm <- function(y, Ft = matrix(c(1,0), nrow = 1), Gt = matrix(c(1,0,1,1), 2), delta = 0.85,
prior = list(m0 = rep(0, nrow(Gt)), C0 = diag(100, nrow(Gt))),
prior.sig2 = list(a = 0, b = 0), M = 2000, ages = 0:(length(y)-1)){
## Validation
if(is.vector(Ft)) {Ft = t(as.matrix(Ft))}
if(nrow(Ft) != 1) stop("Ft must be a matrix with the following dimensions: 1 row and p columns.")
if(!(is.matrix(Gt))) {Gt = as.matrix(Gt)}
if(ncol(Ft) != nrow(Gt)) stop("Matrices Ft and Gt are not well defined.")
if(ncol(Gt) != nrow(Gt)) stop("Gt must be a square matrix.")
if(length(prior$m0) != nrow(Gt)) stop("Dimension of prior mean does not match the dimension of matrix Gt.")
if(nrow(prior$C) != nrow(Gt)) stop("Dimension of prior covariance matrix does not match the dimension of matrix Gt.")
if(ncol(prior$C) != nrow(Gt)) stop("Dimension of prior covariance matrix does not match the dimension of matrix Gt.")
if(!(length(delta) == length(y)) & length(delta) != 1) stop("delta must be the same length of y")
if(any(delta <= 0 | delta >= 1)) stop("delta must be in the interval (0,1).")
## Auxiliary objects
t = length(y)
p = length(prior$m0)
fit <- list()
## Filtering
filter = ff(y = y, Ft = Ft, Gt = Gt, m0 = prior$m0, C0 = prior$C0, delta = delta,
alpha0 = prior.sig2$a, beta0 = prior.sig2$b)
## Smoothing
smooth = bs(m = filter$m, C = filter$C, a = filter$a, R = filter$R, Gt = Gt,
alpha = filter$alpha, beta = filter$beta)
## Sampling
sig2 = 1/rgamma(M, smooth$alpha, smooth$beta)
theta <- array(NA, dim = c(M, t, p))
mu <- matrix(NA, nrow = M, ncol = t)
if(p == 1){
for(i in 1:t){
theta[,i,] = aux = rt(M, df = 2*smooth$alpha)*sqrt(c(smooth$Rs[i,,])*(smooth$beta/smooth$alpha)) + smooth$as[i,]
mu[,i] <- c(aux%*%t(Ft))
}
}else{
for(i in 1:t){
theta[,i,] = aux = mvtnorm::rmvt(M, sigma = smooth$Rs[i,,]*(smooth$beta/smooth$alpha),
delta = smooth$as[i,], df = 2*smooth$alpha, type = "shifted")
mu[,i] <- c(aux%*%t(Ft))
}
}
fit$mu = mu
fit$theta = theta
fit$sig2 = sig2
# fit$Wt = filter$Wt
fit$param = list(mt = filter$m, Ct = filter$C,
ft = filter$f, Qt = filter$Q,
as = smooth$as, Rs = filter$Rs,
alpha = smooth$alpha, beta = smooth$beta)
fit$info = list(y = y,
ages = ages,
Ft = Ft,
Gt = Gt,
delta = delta,
prior = prior,
prior.sig2 = prior.sig2)
return(structure(fit, class = "DLM"))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/dlm.R
|
#' @title DLM: Fitting the advanced ages of the life tables
#'
#' @description This function receives an object of the class `DLM` fitted by the dlm() function
#' and fits a closing method to expand the life tables dataset to a maximum age argument inputed
#' by the user.
#' There are three closing methods available: 'linear', 'gompertz' and 'plateau'.
#'
#' @usage
#' dlm_close(fit, method = c("linear", "gompertz", "plateau"),
#' x0 = max(fit$info$ages), max_age = 120, k = 7,
#' weights = seq(from = 0, to = 1, length.out = k),
#' new_data = NULL)
#'
#' @param fit Object of the class `DLM` adjusted by the dlm() function.
#' @param method Character string specifying the closing method to be fitted, with them being: 'plateau', 'linear' or 'gompertz'.
#' @param x0 Integer with the starting age the closing method will be fitted from. Default is the last age fitted by the 'DLM' object.
#' @param max_age Integer with the maximum age the closing method will be fitted. Default age is '120'.
#' @param k Integer representing the size of the age-interval to be mixed with the 'linear' or 'gompertz' closing methods for a smooth graduation. If k = 0, no mixing will be made. Default: 7.
#' @param weights Vector of weights of the closing method used in the mixture of the closing method and the fitted model made in the mixing age group. The vector's size should be equal to 2k+1. For a better understanding of this parameter and the mixture applied in this function, see Details.
#' @param new_data Vector containing the log mortality rates of ages after the x0 input. This is an optional argument used in the 'linear' and 'Gompertz' closing methods.
#'
#' @details
#' #' There are three types of age groups when the closing method is applied: a group
#' where only the fitted model (DLM) computes the death probabilities, followed by a
#' group in which the death probabilities are a mix (or more precise a weighted mean)
#' from the HP model and the closing method and followed by a group in which the
#' death probabilities are computed just by the closing method. The mix is applied
#' so the transition of the death probabilities of the ages between the fitted model
#' and the closing method occurs smoothly.
#'
#' The parameters 'x0' and 'k' define the mixing group age. The parameter 'x0'
#' indicates the center age of the group. The parameter 'k' is the range of ages
#' before 'x0' and after 'x0', so this group has a total of \eqn{2k + 1} age. Therefore,
#' the parameter 'weights' must have a length size equal to \eqn{2k + 1}. In this case,
#' the death probability is calculated as follows. Consider \eqn{model_x} and \eqn{close_x}
#' as the death probability of the fitted model and closing method in the age \eqn{x},
#' respectively. Then, the resulting death probability of the mix is calculated as:
#'
#' \eqn{q_x = w_x model_x + (1-w_x)close_x},
#'
#' where \eqn{w_x} represents the weight of the closing method in the age \eqn{x}. This
#' procedure is applied only in the linear and Gompertz methods.
#'
#'
#' The three closing methods implemented by the function are:
#'
#' 1.'linear' method: Fits a linear regression starting at age x0 - k until the last age with data available
#'
#' 2.'gompertz' method: Used as the closing method of the 2010-2012 English Life Table No. 17, fits the Gompertz mortality law via SIR using the same available data as the 'linear' method.
#'
#' 3.'plateau' method: Keeps the death probability (qx) constant after the x0 argument.
#'
#' @return Returns a `ClosedDLM` class object with the predictive chains of the death probability
#' (qx) from first fitted age to max_age argument, the data information utilized by the function and the
#' closing method chosen.
#'
#' @references Dodd, Erengul, Forster, Jonathan, Bijak, Jakub, & Smith, Peter 2018. “Smoothing mortality data: the English life table, 2010-12.” \emph{Journal of the Royal Statistical Society: Series A (Statistics in Society)}, 181(3), 717-735.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' ## Selecting the exposure and the death count of the year 2010, ranging from 0 to 90 years old:
#' USA2010 = USA[USA$Year == 2010,]
#' x = 0:100
#' Ex = USA2010$Ex.Male[x+1]
#' Dx = USA2010$Dx.Male[x+1]
#' y <- log(Dx/Ex)
#'
#' fit <- dlm(y, M = 100)
#'
#' ## Applying the closing function with different methods:
#' close1 = dlm_close(fit, method = "plateau")
#'
#' ### Getting new data for the linear and gompertz methods:::
#' x2 = 101:110
#' Ex2 = USA2010$Ex.Male[x2+1]
#' Dx2 = USA2010$Dx.Male[x2+1]
#' y2 <- log(Dx2/Ex2)
#'
#' close2 = dlm_close(fit, method = "linear",
#' new_data = y2)
#'
#' #### Using the other functions available in the package with the 'ClosedDLM' object:
#'
#' ## qx estimation (See "?fitted" in the BayesMortalityPlus package for more options):
#' fitted(close2)
#'
#' ## life expectancy (See "?expectancy.DLM" for more options)
#' expectancy(close2, age = seq(0,120,by=20), graph = FALSE)
#'
#' ## plotting (See "?plot" in the BayesMortalityPlus package for more options):
#' plot(list(close1, close2, fit),
#' colors = c("red4","seagreen", "blue"),
#' labels = c("Plateau method","Linear method", "DLM fitted"),
#' plotData = FALSE)
#'
#'
#' @include gompertz_dlm.R
#'
#' @importFrom MASS mvrnorm
#'
#' @seealso [fitted.DLM()], [plot.DLM()], [print.DLM()] and [summary.DLM()] for `ClosedDLM` methods to native R functions [fitted()],
#'[plot()], [print()] and [summary()].
#'
#'[expectancy.DLM()] and [Heatmap.DLM()] for `ClosedDLM` methods to compute and visualise the truncated life expectancy
#'via [expectancy()] and [Heatmap()] functions.
#'
#'
#' @export
dlm_close = function(fit, method = c("linear", "gompertz", "plateau"), x0 = max(fit$info$ages),
max_age = 120, k = 7, weights = seq(from = 0, to = 1, length.out = k),
new_data = NULL){
## Pre-processing
method = match.arg(method)
## Checklist
if(!inherits(fit, "DLM")) { stop("fit argument must be a 'DLM' Object returned by dlm() function.") }
if (length(weights) != k) { stop("The length of the weights vector is not equal to k.") }
if(x0 > max(fit$info$ages)) { stop("x0 argument exceeds the maximum age of the model.") }
if(x0 >= max_age) { stop("The choices of values for x0 and max_age are not consistent, x0 must be less than max_age.") }
if(method == "plateau") { k = 1; weights = 0; new_data = NULL }
if((method == "gompertz" | method == "linear") & is.null(new_data)) { stop("gompertz and linear closing methods require new_data argument.") }
## Check if there are overlapping data between the model and the user input:
min_age = min(fit$info$ages)
if((x0 < max(fit$info$ages)) & is.null(new_data)){
new_data = fit$info$y[(x0+1-min_age):max(fit$info$ages+1-min_age)]
}
fit$info$y = fit$info$y[fit$info$ages <= x0]
if(x0 - k < min_age) { stop("x0 or k arguments are not correct, they are not consistent with the initial age.") }
## Adding input data to the model data:
age_last_data = x0 + length(new_data)
new_data = c(fit$info$y, new_data)
if(max_age-age_last_data < 0) {max_age = age_last_data}
## Completing the data for the closing method:
new_data = c(new_data, rep(NA_real_, max_age-age_last_data))
## Checking if the model starts at the age 0:
if(min_age != 0){ pre_data = rep(NA_real_, length(1:min_age)) }else{ pre_data = NULL }
## Data between 0 and the maximum age:
full_data = c(pre_data, new_data)
if(method == "linear" | method == "gompertz"){
data = data.frame(x = (x0-k+1):(age_last_data))
data$y = full_data[data$x + 1]
if(nrow(data) < 2) { stop("Insufficient data to apply the closing method. Decrease the value of x0 argument or increase the value of k or try different data.") }
}
## End length of the Markov chains:
num_sim = length(fit$sig2)
## Ages where the closing method will be applied:
old_x = (x0 - k + 1):max_age
old_len = length(old_x)
## Matrix to save the fit:
closed = matrix(0, nrow = num_sim, ncol = old_len)
colnames(closed) = old_x
## Returns of the function: qx chains, x = min_age, ..., max_age
ret = matrix(NA_real_, nrow = num_sim, ncol = max_age + 1)
## Closing methods
if(method == "plateau"){
#gets the death probability of x0 and applies it till max_age
sim <- 1 - exp(-exp(rnorm(num_sim, fit$mu[,x0-min_age+1], sqrt(0.01*fit$sig2))))
closed <- matrix(sim, num_sim, old_len)
colnames(closed) = old_x
# for (i in 1:num_sim){
# sim = rnorm(1, fit$mu[i,x0-min_age+1], sqrt(fit$sig2[i]))
# closed[i, ] = exp(sim)
# }
}else if(method == "linear"){
mod = lm(y ~ x, data = data)
pred = predict(mod, newdata = data.frame(x = old_x))
X = model.matrix(mod)
Xpred = cbind(1, old_x)
C1 = t(X) %*% X
Cpred = Xpred %*% chol2inv(chol(C1)) %*% t(Xpred)
RMAT = (diag(old_len) + Cpred)
for (i in 1:num_sim){
sig = sqrt(0.01*fit$sig2[i])
SIGMApred = sig * RMAT
sim_vals = MASS::mvrnorm(1, mu = pred, Sigma = SIGMApred)
closed[i, ] = 1 - exp(-exp(sim_vals))
}
####################################################################################################
}else if(method == "gompertz"){
param = sir_gompertz_dlm(fit, data, resampling_size = num_sim)
for (i in 1:num_sim){
gomp = param[i,1]*exp(param[i,2]*old_x)
sim = 1 - exp(-exp(rnorm(old_len, log(gomp), sqrt(0.01*fit$sig2[i]))))
closed[i, ] = sim
}
}
####################################################################################################
## qx error margin (default was 0.02)
eps = 0.01
# Preventing death probabilities above 1:
closed = apply(closed, 2, function(x) ifelse(x < 1 - eps, x, 1))
new_age = 0:max_age
new_len = length(new_age)
fitted = matrix(NA_real_, nrow = num_sim, ncol = new_len)
colnames(fitted) = new_age
####################################################################################################
t = ncol(fit$mu)
for (i in 1:num_sim){
sim = rnorm(t, fit$mu[i,], sqrt(0.01*fit$sig2[i]))
fitted[i, (min_age+1):(t+min_age)] = 1 - exp(-exp(sim))
}
####################################################################################################
# Model only indexes: age 0 till x0 - k - 1
idx_mod_only = 0:(x0 - k) + 1
# Mix indexes: age x0 - k till x0 + k
idx_mix = (x0 - k + 1):x0 + 1
# Closing method only indexes: age x0 + k + 1 till max_age
idx_close = (x0 + 1):max_age + 1
idx_mnc = c(idx_mix, idx_close)
ret[ , idx_mod_only] = fitted[ , idx_mod_only]
ret[ , idx_mnc] = closed
# Mix
for (i in 1:num_sim) {
ret[i, idx_mix] = weights * ret[i, idx_mix] + (1 - weights) * fitted[i, idx_mix]
}
return(structure(list(qx = ret,
info = list(ages = new_age,
y = full_data),
method = method), class = "ClosedDLM"))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/dlm_close.R
|
#' @name expectancy.BLC
#' @rdname expectancy.BLC
#'
#' @title BLC: Life expectancy
#'
#' @description Computes the fitted life expectancy for a specific age for each year in fit or prediction. It also calculates the limits of credible intervals.
#'
#' @param x A `BLC` or `PredBLC` object.
#' @param at A number that determines at which age the expectancy life is calculated based on the ages used in fit or prediction. For instance, at = 0 is related to the first age used in fitted model.
#' @param prob A number that specifies the probability of the credible interval. Default is '0.95'.
#' @param ... Further arguments passed to or from other methods.
#'
#' @return A list that contains three vectors with the fitted values of life expectancy and the lower and upper limits of the credible intervals for each year used in fitted model or for the prediction.
#'
#' @examples
#' ## Importing log-mortality data from Portugal:
#' data(PT)
#' Y <- PT
#'
#' ## Fitting the model
#' fit = blc(Y = Y, M = 100, bn = 20)
#'
#' ## Life expectancy for the years used in model fitted
#' expectancy(fit)
#'
#' ## Life expectancy for the tenth and thirtieth age in the years used in
#' ## model fitted (27 and 47 y.o.)
#' expectancy(fit, at = c(10,30))
#'
#' @seealso [expectancy.HP()] and [expectancy.DLM()] for `HP` and `DLM` methods.
#'
#' [Heatmap.BLC()] for `BLC` method to drawing a Heatmap for the truncated life expectancy.
#'
#' @export
expectancy.BLC <- function(x, at = NULL, prob = 0.95, ...) {
obj <- x
objClass <- class(obj)
supportedClasses <- c("BLC", "ARBLC")
if (!any(objClass %in% supportedClasses)) {
stop("Invalid object type")
}
L <- nrow(obj$kappa)
q <- nrow(obj$alpha)
if (!is.null(at)){
if (mode(at) != "numeric"){stop("Expected `at` to be numeric")}
if (min(at) == 0){stop("Expected `at` to be greater than 0")}
}else{
at <- 1:q
}
fits <- fitted(obj, prob = prob)
exp_total <- matrix(NA_real_, nrow = q, ncol = L)
#cumprod for life expectancy (px)
for (i in 1:(q-1)){
exp_total[i,] <- apply(fits$mean[i:q,], 2, function(x) sum(cumprod(1-x)))
}
exp_total[q,] <- 1 - (fits$mean[q,])
exp_total <- round(exp_total,3)
##ci
exp_inf <- matrix(NA_real_, nrow = q, ncol = L); exp_sup <- matrix(NA_real_, nrow = q, ncol = L)
### upper CI:
for (i in 1:(q-1)){
exp_sup[i,] <- apply(fits$lower[i:q,], 2, function(x) sum(cumprod(1-x)))
}
exp_sup[q,] <- 1 - (fits$lower[q,])
exp_sup <- round(exp_sup,3)
### lower CI:
for (i in 1:(q-1)){
exp_inf[i,] <- apply(fits$upper[i:q,], 2, function(x) sum(cumprod(1-x)))
}
exp_inf[q,] <- 1 - (fits$upper[q,])
exp_inf <- round(exp_inf,3)
colnames(exp_total) <- colnames(obj$Y)
colnames(exp_sup) <- colnames(obj$Y)
colnames(exp_inf) <- colnames(obj$Y)
row.names(exp_total) <- row.names(obj$Y)
row.names(exp_sup) <- row.names(obj$Y)
row.names(exp_inf) <- row.names(obj$Y)
ret <- list(expectancy = exp_total[at,], upper = exp_sup[at,], lower = exp_inf[at,])
ret
}
#'
#' @export
expectancy.PredBLC <- function(x, at = NULL, prob = 0.95, ...) {
obj <- x
if (!inherits(obj,"PredBLC")) stop("Invalid object")
L <- obj$h
q <- dim(obj$y)[3]
if (!is.null(at)){
if (mode(at) != "numeric"){stop("Expected `at` to be numeric")}
if (min(at) == 0){stop("Expected `at` to be greater than 0")}
}else{
at <- 1:q
}
fits <- fitted(obj, prob = prob)
exp_total <- matrix(NA_real_, nrow = q, ncol = L)
for (i in 1:(q-1)){
exp_total[i,] <- apply(fits$mean[i:q,], 2, function(x) sum(cumprod(1-x)))
}
exp_total[q,] <- 1 - (fits$mean[q,])
exp_total <- round(exp_total,3)
##ci
exp_inf <- matrix(NA_real_, nrow = q, ncol = L); exp_sup <- matrix(NA_real_, nrow = q, ncol = L)
### upper CI:
for (i in 1:(q-1)){
exp_sup[i,] <- apply(fits$lower[i:q,], 2, function(x) sum(cumprod(1-x)))
}
exp_sup[q,] <- 1 - (fits$lower[q,])
exp_sup <- round(exp_sup,3)
### lower CI:
for (i in 1:(q-1)){
exp_inf[i,] <- apply(fits$upper[i:q,], 2, function(x) sum(cumprod(1-x)))
}
exp_inf[q,] <- 1 - (fits$upper[q,])
exp_inf <- round(exp_inf,3)
ret <- list(expectancy = exp_total[at,], upper = exp_sup[at,], lower = exp_inf[at,])
ret
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/expectancy_blc.R
|
#' @name expectancy
#' @rdname expectancy
#' @title Generic expectancy function
#'
#' @description Generic function to \code{expectancy} method.
#'
#'
#' @param x Object of one of these class: `HP`, `DLM`, `BLC`, `ClosedHP`, `ClosedDLM`, `BLC`, or `PredBLC`.
#' @param ... Further arguments passed to or from other methods.
#'
#'
#' @return
#' A data.frame and (if graph = TRUE) a plot for `HP`, `DLM`, `ClosedHP` and `ClosedDLM` methods.
#' A list that contains three vectors with the fitted values of life expectancy and the lower and upper limits of the credible intervals for each year used in fitted model or for the prediction, for `BLC` and `PredBLC` methods.
#'
#' @details This function computes the life expectancy given by:
#'
#' \eqn{e_x = \sum tp_x}
#'
#' where:
#'
#' \eqn{tp_x = p_0 x p_1 x ... x p_x}
#'
#' @seealso [expectancy.HP()], [expectancy.DLM()] and [expectancy.BLC()].
#' @export
expectancy = function(x, ...) UseMethod("expectancy")
#'
#' @export
expectancy.default = function(x, ...) print(x, ...)
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/expectancy_default.R
|
#' @name expectancy.DLM
#' @rdname expectancy.DLM
#'
#' @title DLM: Life expectancy
#'
#' @description This function computes the life expectancy for each age for Dynamic Linear model.
#'
#'
#' @param x Object of the following classes: `DLM` or `ClosedDLM`.
#' @param age Numeric vector specifying the ages to calculate the life expectancy. The default is a sequence (0, 10, 20, ...) until the last decade used in the fitted model.
#' @param graph Logical value (TRUE ou FALSE). If TRUE, it also returns a plot. The default value is TRUE.
#' @param max_age Positive number indicating the last age to be considered to compute the life expectancy (prediction will be considered to match the age interval if needed). This argument is only necessary with objects of the class `DLM`.
#' @param prob A number specifying the probability of credible interval. The default value is 0.95.
#' @param ... Further arguments passed to or from other methods.
#'
#'
#' @return A data.frame and (if graph = TRUE) a plot.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' # Example 1: --------------------------------
#'
#' USA1990 = USA[USA$Year == 1990,]
#'
#' Ex = USA1990$Ex.Total[1:111]
#' Dx = USA1990$Dx.Total[1:111]
#' y <- log(Dx/Ex)
#'
#' fit <- dlm(y, M = 100)
#' expectancy(fit)
#'
#' # Example 2: -------------------------------
#'
#' # Using some arguments:
#'
#' expectancy(fit, age = c(0,20,30,60),
#' prob = 0.99, max_age = 90, graph = FALSE)
#'
#'
#' @include fitted_dlm.R
#'
#' @import ggplot2
#'
#' @seealso [expectancy.HP()] and [expectancy.BLC()] for `HP` and `BLC` methods.
#'
#' [Heatmap.DLM()] and [Heatmap.list()] for `DLM` or `list` methods to drawing a Heatmap for the truncated life expectancy.
#'
#' @export
expectancy.DLM <- function(x, age = seq(0, max(fit$info$ages), by = 10),
graph = TRUE,
max_age = 110,
prob = 0.95,
...){
fit = x
if(max(age) > max_age){
stop("Invalid age interval. Check the max_age argument")
}
max_age = max_age+1
## calculating qx and ci
if(max_age > max(fit$info$ages)){
pred <- predict(fit, h = (max_age - max(fit$info$ages)), prob = prob )
aux <- fitted(fit, prob = prob)
mu <- c(aux$qx.fitted, pred$qx.fitted)
ic <- rbind(aux[,-c(1,2)], data.frame(qx.lower = pred$qx.lower, qx.upper = pred$qx.upper))
}else{
aux <- fitted(fit, prob = prob)
mu <- aux$qx.fitted
ic <- aux[,-2]
}
exp_total <- rep(NA_real_, max_age); exp_inf <- rep(NA_real_,max_age); exp_sup <- rep(NA_real_,max_age)
# cumprod for life expectancy (px)
for (i in 1:max_age){
exp_total[i] <- sum(cumprod(1-mu[i:max_age])) ## px
exp_sup[i] <- sum(cumprod(1-ic$qx.lower[i:max_age])) ## upper CI
exp_inf[i] <- sum(cumprod(1-ic$qx.upper[i:max_age])) ## lower CI
}
exp_total <- round(exp_total,2)
exp_sup <- round(exp_sup,2)
exp_inf <- round(exp_inf,2)
tab <- data.frame(x = 0:(max(age)),
exp_total[1:(max(age)+1)],
exp_inf[1:(max(age)+1)],
exp_sup[1:(max(age)+1)])
tab[is.na(tab)] = 0
colnames(tab) <- c("age","expectancy","ci.lower","ci.upper")
if(graph == TRUE){
p <- ggplot(data=tab) + theme_light() +
geom_line(aes(x=age,y=expectancy)) +
geom_ribbon(aes(x=age, ymin= ci.lower, ymax= ci.upper), alpha=0.3)
return(list(expectancy=tab[tab$age %in% age,],
plot=p))
}else{
return(tab[tab$age %in% age,])
}
}
#' @export
expectancy.ClosedDLM <- function(x, age = seq(0, max(fit$info$ages), by = 10),
graph = TRUE, prob = 0.95, ...){
fit = x
### sanity check
if(max(age) > max(fit$info$ages)){
stop("Invalid age interval. Check the ages modeled")
}
## last age modeled
max_age <- max(fit$info$ages)
## calculating log(qx)
aux <- fitted(fit, prob = prob)
mu <- aux$qx.fitted
ic <- aux[,-2]
exp_total <- rep(NA_real_, max_age); exp_inf <- rep(NA_real_,max_age); exp_sup <- rep(NA_real_,max_age)
# cumprod for life expectancy (px)
for (i in 1:max_age){
exp_total[i] <- sum(cumprod(1-mu[i:max_age])) ## px
exp_sup[i] <- sum(cumprod(1-ic$qx.lower[i:max_age])) ## upper CI
exp_inf[i] <- sum(cumprod(1-ic$qx.upper[i:max_age])) ## lower CI
}
exp_total <- round(exp_total,2)
exp_sup <- round(exp_sup,2)
exp_inf <- round(exp_inf,2)
tab <- data.frame(x = 0:(max(age)),
exp_total[1:(max(age)+1)],
exp_inf[1:(max(age)+1)],
exp_sup[1:(max(age)+1)])
tab[is.na(tab)] = 0
colnames(tab) <- c("age","expectancy","ci.lower","ci.upper")
if(graph == TRUE){
p <- ggplot(data=tab) + theme_light() +
geom_line(aes(x=age,y=expectancy)) +
geom_ribbon(aes(x=age, ymin= ci.lower, ymax= ci.upper), alpha=0.3)
return(list(expectancy=tab[tab$age %in% age,],
plot=p))
}else{
return(tab[tab$age %in% age,])
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/expectancy_dlm.R
|
#' @name expectancy.HP
#' @rdname expectancy.HP
#'
#' @title HP: Life expectancy
#'
#' @description This function computes the life expectancy for each age for Heligman-Pollard model.
#'
#'
#' @param x Object of the class `HP` or `ClosedHP` fitted by hp() or hp_close() functions.
#' @param Ex Numeric vector with the exposure by age. This argument is only necessary when using poisson and binomial models with objects of the class `HP`.
#' @param age Numeric vector specifying the ages to calculate the life expectancy. The default is a sequence (0, 10, 20, ...) until the last decade used in the fitted model.
#' @param graph Logical value (TRUE ou FALSE). If TRUE, it returns a plot.
#' @param max_age Positive number indicating the last age to be considered to compute the life expectancy (extrapolation will be considered to match the age interval if needed). This argument is only necessary with objects of the class `HP`.
#' @param prob A percentage specifying the probability of credible interval.
#' @param ... Further arguments passed to or from other methods.
#'
#'
#' @return A data.frame and (if graph = TRUE) a plot.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' # Example 1: --------------------------------
#'
#' USA1990 = USA[USA$Year == 1990,]
#'
#' Ex = USA1990$Ex.Total[1:91]
#' Dx = USA1990$Dx.Total[1:91]
#' x = 0:90
#'
#' fit <- hp(x, Ex, Dx, model = "binomial", M = 1000, bn = 0, thin = 10)
#' expectancy(fit)
#'
#'
#' # Example 2: -------------------------------
#'
#' # Using some arguments:
#'
#' Ex = USA1990$Ex.Total[1:106]
#'
#' expectancy(fit, Ex = Ex, age = c(0,20,30,60,105),
#' max_age = 105, prob = 0.99, graph = FALSE)
#'
#'
#' @include fitted_hp.R
#'
#' @import ggplot2
#'
#' @seealso [expectancy.DLM()] and [expectancy.BLC()] for `DLM` and `BLC` methods.
#'
#' [Heatmap.HP()] and [Heatmap.list()] for `HP` or `list` methods to drawing a Heatmap for the truncated life expectancy.
#'
#' @export
expectancy.HP <- function(x, Ex = NULL, age = NULL, graph = TRUE,
max_age = 110, prob = 0.95, ...){
fit = x
if(is.null(age)){ age = seq(0, max(fit$data$x),by = 10) }
## Checking age
if(max(age) > max_age){
stop("Invalid age interval. Check the max_age argument")
}
## calculating qx and px ----
# extrapolating the age interval to max_age
if(fit$info$model %in% c("binomial","poisson")){
if(is.null(Ex)){
Ex <- c(fit$data$Ex, rep(fit$data$Ex[length(fit$data$Ex)], (max_age+1)-length(fit$data$Ex)))
aux <- fitted(fit, age = 0:max_age, Ex = Ex, prob = prob)
est_IC <- aux
}else{
aux <- fitted(fit, age = 0:max_age, Ex = Ex, prob = prob)
est_IC <- aux
}
}else{
aux <- fitted(fit, age = 0:max_age, prob = prob)
est_IC <- aux
}
qx_est <- aux$qx.fitted
exp_total <- rep(NA_real_, max_age); exp_inf <- rep(NA_real_,max_age); exp_sup <- rep(NA_real_,max_age)
# cumprod for life expectancy (px)
for (i in 1:max_age){
exp_total[i] <- sum(cumprod(1-qx_est[i:max_age])) ## px
exp_sup[i] <- sum(cumprod(1-est_IC$qx.lower[i:max_age])) ## upper CI
exp_inf[i] <- sum(cumprod(1-est_IC$qx.upper[i:max_age])) ## lower CI
}
exp_total <- round(exp_total,2)
exp_sup <- round(exp_sup,2)
exp_inf <- round(exp_inf,2)
tab <- data.frame(x = 0:max(age),
exp_total[1:(max(age)+1)],
exp_inf[1:(max(age)+1)],
exp_sup[1:(max(age)+1)])
tab[is.na(tab)] = 0
colnames(tab) <- c("age","expectancy","ci.lower","ci.upper")
if(graph == TRUE){
p <- ggplot(data=tab) + theme_light() +
geom_line(aes(x=age,y=expectancy)) +
geom_ribbon(aes(x=age, ymin= ci.lower, ymax= ci.upper), alpha=0.3)
return(list(expectancy=tab[tab$age %in% age,],
plot=p))
}else{
return(tab[tab$age %in% age,])
}
}
#' @export
#'
expectancy.ClosedHP <- function(x, age = seq(0, max(fit$data$x),by = 10),
graph = TRUE, prob = 0.95, ...){
fit = x
max_age <- max(fit$data$x)
###sanity
if(max(age) > max_age){
stop("Invalid age interval. Check the ages modeled")
}
## calculating qx and px
aux <- fitted(fit, prob = prob)
qx_est <- aux$qx.fitted
est_IC <- aux
exp_total <- rep(NA_real_, max_age); exp_inf <- rep(NA_real_,max_age); exp_sup <- rep(NA_real_,max_age)
# cumprod for life expectancy (px)
for (i in 1:max_age){
exp_total[i] <- sum(cumprod(1-qx_est[i:max_age])) ## px
exp_sup[i] <- sum(cumprod(1-est_IC$qx.lower[i:max_age])) ## upper CI
exp_inf[i] <- sum(cumprod(1-est_IC$qx.upper[i:max_age])) ## lower CI
}
exp_total <- round(exp_total,2)
exp_sup <- round(exp_sup,2)
exp_inf <- round(exp_inf,2)
tab <- data.frame(x = 0:(max(age)),
exp_total[1:(max(age)+1)],
exp_inf[1:(max(age)+1)],
exp_sup[1:(max(age)+1)])
tab[is.na(tab)] = 0
colnames(tab) <- c("age","expectancy","ci.lower","ci.upper")
if(graph == TRUE){
p <- ggplot(data=tab) + theme_light() +
geom_line(aes(x=age,y=expectancy)) +
geom_ribbon(aes(x=age, ymin=ci.lower, ymax=ci.upper), alpha=0.3)
return(list(expectancy=tab[tab$age %in% age,],
plot=p))
}else{
return(tab[tab$age %in% age,])
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/expectancy_hp.R
|
## Filtering
ff = function(m0, C0, y, alpha0, beta0, Ft, Gt, delta){
N = length(y)
p = length(m0)
resultado.m = matrix(NA_real_, N, p)
resultado.C = array(NA_real_, c(N,p,p))
resultado.W = array(NA_real_, c(N,p,p))
resultado.a = matrix(NA_real_, N, p)
resultado.R = array(NA_real_, c(N,p,p))
resultado.f = c()
resultado.Q = c()
resultado.alpha = c()
resultado.beta = c()
if(length(delta) == 1){ delta = rep(delta, N) }
V = 0.01
## Kalman Filter
### Step 1
Wt = C0 * (1 - delta[1]) / delta[1]
at = Gt %*% m0
Rt = Gt %*% C0 %*% t(Gt) + Wt
ft = Ft %*% at
Qt = (Ft %*% Rt %*% t(Ft) + V)[1,1]
et = y[1] - ft
At = (Rt %*% t(Ft)) / Qt
mt = at + At %*% et
Ct = Rt - At %*% Ft %*% Rt
alphat = alpha0 + 1/2
betat = beta0 + 0.5*t(et)%*%et/Qt
resultado.m[1,] = mt
resultado.C[1,,] = Ct
resultado.W[1,,] = Ct * (1 - delta[1]) / delta[1]
resultado.a[1,] = at
resultado.R[1,,] = Rt
resultado.f[1] = ft ##
resultado.Q[1] = Qt ##
resultado.alpha[1] = alphat
resultado.beta[1] = betat
### Step 2
for (j in 2:N) {
Wt = Ct * (1 - delta[j]) / delta[j]
at = Gt %*% mt
Rt = Gt %*% Ct %*% t(Gt) + Wt
ft = Ft %*% at
Qt = (Ft %*% Rt %*% t(Ft) + V)[1,1]
et = y[j] - ft
At = (Rt %*% t(Ft)) / Qt
mt = at + At %*% et
Ct = Rt - At %*% Ft %*% Rt
alphat = alphat + 1/2
betat = betat + 0.5*t(et)%*%et/Qt
resultado.m[j,] = mt
resultado.C[j,,] = Ct
resultado.W[j,,] = Wt
resultado.a[j,] = at
resultado.R[j,,] = Rt
resultado.f[j] = ft ##
resultado.Q[j] = Qt ##
resultado.alpha[j] = alphat
resultado.beta[j] = betat
}
return(list(m = resultado.m, C = resultado.C,
a = resultado.a, R = resultado.R,
W = resultado.W, f = resultado.f,
Qt = resultado.Q,
alpha = resultado.alpha,
beta = resultado.beta))
}
## Backward Sampling
bs = function(m, C, a, R, Gt, alpha, beta){
N = nrow(m)
p = ncol(m)
as = matrix(NA, N, p)
Rs = array(NA, c(N, p, p))
# theta <- matrix(NA,N,p)
## Distribuicao de thetaT
as[N,] = m[N,] ##
Rs[N,,] = C[N,,] ##
alpha = alpha[N]
beta = beta[N]
### step 3 - Smoothing
for (t in (N - 1):1) {
Bt = C[t,,] %*% t(Gt) %*% chol2inv(chol(R[t + 1,,]))
Rs[t,,] = C[t,,] + Bt %*% (Rs[t + 1,,] - R[t + 1,,]) %*% t(Bt)
as[t,] = m[t,] + Bt %*% (as[t + 1,] - a[t + 1,])
}
return(list(as = as, Rs = Rs, alpha = alpha, beta = beta))
}
# Filtering and smoothing with FFBS and discount factor W
ffbs <- function(m0, C0, y, alpha0, beta0, Ft, Gt, delta){
aux.f = ff(m0, C0, y, alpha0, beta0, Ft, Gt, delta)
res = bs(aux.f$m, aux.f$C, aux.f$a, aux.f$R, Gt, aux.f$alpha, aux.f$beta)
# res$W = aux.f$W
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/ffbs.R
|
#' @name fitted.BLC
#' @rdname fitted.BLC
#'
#' @title BLC: Fitted death probabilities (qx)
#'
#' @description Computes the fitted values associated to each age and year based on
#' the resulting chains from a fitted BLC model. In addition, this function also
#' evaluates the values of lower and upper limits of the credible interval.
#'
#'
#' @param object A `BLC` or `PredBLC` object, result of a call to blc() or predict() function.
#' @param prob A real number that indicates the probability of the credible interval.
#' @param ... Other arguments.
#'
#' @return A list with the matrices of fitted values and lower and upper limits of the credible interval for each age and year.
#'
#' @examples
#' ## Importing log-mortality data from Portugal:
#' data(PT)
#' Y <- PT
#'
#' ## Fitting the model
#' fit = blc(Y = Y, M = 100, bn = 20)
#'
#' ## Log-mortalities estimates for each age and year in model fitted
#' fitted(fit, prob = 0.95)
#'
#' @seealso [fitted.HP()] and [fitted.DLM()] for `HP` or `DLM` methods.
#'
#' @export
fitted.BLC <- function(object, prob = 0.95, ...) {
obj = object
N <- obj$M - obj$bn
L <- nrow(obj$kappa)
q <- nrow(obj$alpha)
fits <- array(dim = c(q, L, N))
for (i in (obj$bn+1):obj$M) {
fits[ , ,i - obj$bn] <- obj$alpha[ ,i] + obj$beta[ ,i,drop=F] %*% obj$kappa[ ,i]
}
alpha <- 1 - prob
mean <- apply(fits, c(1,2), mean)
upper <- apply(fits, c(1,2), quantile, 1 - alpha/2)
lower <- apply(fits, c(1,2), quantile, alpha/2)
colnames(mean) <- colnames(obj$Y)
colnames(upper) <- colnames(obj$Y)
colnames(lower) <- colnames(obj$Y)
row.names(mean) <- row.names(obj$Y)
row.names(upper) <- row.names(obj$Y)
row.names(lower) <- row.names(obj$Y)
list(mean = 1 - exp(-exp(mean)), upper = 1 - exp(-exp(upper)), lower = 1 - exp(-exp(lower)))
}
#'
#' @export
fitted.PredBLC <- function(object, prob = 0.95, ...) {
obj = object
fits <- obj$y
alpha <- 1 - prob
mean <- apply(fits, c(3,2), mean)
upper <- apply(fits, c(3,2), quantile, 1 - alpha/2)
lower <- apply(fits, c(3,2), quantile, alpha/2)
colnames(mean) <- colnames(obj$y)
colnames(upper) <- colnames(obj$y)
colnames(lower) <- colnames(obj$y)
row.names(mean) <- row.names(obj$y)
row.names(upper) <- row.names(obj$y)
row.names(lower) <- row.names(obj$y)
list(mean = 1 - exp(-exp(mean)), upper = 1 - exp(-exp(upper)), lower = 1 - exp(-exp(lower)))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/fitted_blc.R
|
#' @name fitted.DLM
#' @rdname fitted.DLM
#'
#' @title DLM: Fitted death probabilities (qx)
#'
#' @description This function computes the point estimations of the death probabilities (qx) of a
#' mortality graduation returned by dlm() or dlm_close() functions.
#'
#'
#' @param object Object of the following classes: `DLM` or `ClosedDLM`.
#' @param age Vector with the ages to calculate the death probabilities (Optional). By default, all ages are considered.
#' @param prob Coverage probability of the predictive intervals.
#' @param ... Other arguments.
#'
#' @return A data.frame object with the selected ages and the corresponding estimates and predictive intervals of the death probabilities.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' ## Selecting the log mortality rate of the year 2000, ranging from 0 to 100 years old:
#' USA2000 = USA[USA$Year == 2000,]
#' x = 0:100
#' Ex = USA2000$Ex.Total[x+1]
#' Dx = USA2000$Dx.Total[x+1]
#' y = log(Dx/Ex)
#'
#' ## Fitting dlm
#' fit = dlm(y, M = 100)
#'
#' ## Estimating the death probabilities (qx)
#' fitted(fit)
#'
#' @seealso [fitted.HP()] and [fitted.BLC()] for `HP` or `BLC` methods.
#'
#' @export
fitted.DLM <- function(object, age = NULL, prob = 0.95, ...){
set.seed(123) ## Set seed to reproducibility
fit = object
V = 0.01 ## same value specified in filtering
# qx_fitted = 1 - exp(-exp(fit$mu))
# qx_fitted = apply(qx_fitted, 2, median, na.rm = T)
# qx_fitted[(qx_fitted < 0 | qx_fitted > 1)] = NA_real_
t = ncol(fit$mu)
n = nrow(fit$mu)
fitted = matrix(NA_real_, nrow = n, ncol = t)
# for(i in 1:n){
# sim = rnorm(t, fit$mu[i,], sqrt(fit$sig2[i]))
# fitted[i,] <- exp(sim)
# }
for(i in 1:t){
sim = rnorm(n, fit$mu[,i], sqrt(fit$sig2)*V)
fitted[,i] <- 1 - exp(-exp(sim))
}
qx_fitted = apply(fitted, 2, median, na.rm = T)
qx_fitted[qx_fitted < 0] = NA_real_
qx_fitted[qx_fitted > 1] = 1
qi = apply(fitted, 2, quantile, (1-prob)/2, na.rm = T)
qs = apply(fitted, 2, quantile, (1+prob)/2, na.rm = T)
aux = data.frame(age = fit$info$ages, qx.fitted = qx_fitted, qx.lower = qi, qx.upper = qs)
aux[!(aux$qx.lower > 0), 3] = 0
aux[!(aux$qx.upper < 1), 4] = 1
if(!is.null(age)) aux = aux[(aux$age %in% age), ]
return(aux)
}
#' @export
fitted.ClosedDLM <- function(object, age = NULL, prob = 0.95, ...){
fit = object
fitted = fit$qx
close_age = fit$info$ages
qx_fitted = fitted
qx_fitted[(qx_fitted < 0 | qx_fitted > 1)] = NA_real_
qx_fitted = apply(qx_fitted, 2, median, na.rm = T)
qi = apply(fitted, 2, quantile, (1-prob)/2, na.rm = T)
qs = apply(fitted, 2, quantile, (1+prob)/2, na.rm = T)
df = data.frame(age = close_age, qx.fitted = qx_fitted, qx.lower = qi, qx.upper = qs)
df[!(df$qx.lower > 0), 3] = 0
df[!(df$qx.upper < 1), 4] = 1
if(!is.null(age)) df = df[(df$age %in% age), ]
return(df[order(df$age), ])
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/fitted_dlm.R
|
#' @name fitted.HP
#' @rdname fitted.HP
#'
#' @title HP: Fitted death probabilities (qx)
#'
#' @description This function computes the point estimations of the death probabilities (qx) of the `HP` or the `ClosedHP` class object fitted by the hp() or hp_close() functions.
#'
#'
#' @param object Object of the class `HP` or `ClosedHP` adjusted by the hp() or hp_close() functions.
#' @param age Vector with the ages to calculate the death probabilities (Optional). By default, all ages are considered.
#' @param Ex Vector with the exposures of the selected ages. Its length must be equal to the age vector. This argument is only necessary when using the Poisson and the Binomial distributions.
#' @param prob Coverage probability of the predictive intervals.
#' @param ... Other arguments.
#'
#' @return A data.frame object with the selected ages and the corresponding estimates and predictive intervals of the death probabilities.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' ## Selecting the exposure and the death count of the year 2000, ranging from 0 to 90 years old:
#' USA2000 = USA[USA$Year == 2000,]
#' x = 0:90
#' Ex = USA2000$Ex.Total[x+1]
#' Dx = USA2000$Dx.Total[x+1]
#'
#' ## Fitting a simple model:
#' fit = hp(x = x, Ex = Ex, Dx = Dx, M = 5000, bn = 0, thin = 10)
#'
#' ## Estimating the death probabilities (qx)
#' fitted(fit)
#' fitted(fit, age = 0:110, Ex = USA2000$Ex.Total[0:110+1])
#'
#' @include fun_aux.R
#'
#' @seealso [fitted.BLC()] and [fitted.DLM()] for `BLC` or `DLM` methods.
#'
#' @export
fitted.HP <- function(object, age = NULL, Ex = NULL, prob = 0.95, ...){
set.seed(123) ## Set seed to reproducibility
fit = object
## checking if age and Ex were inputed by the user
if(is.null(age) && is.null(Ex)){
## if age and Ex are null, fetch from the fit model
age = fit$data$x
Ex = fit$data$Ex
}else if(is.null(age) && !(is.null(Ex))){
if(fit$info$model %in% c("binomial", "poisson")) { stop("Missing age argument.") }
age = fit$data$x
}else if(!(is.null(age)) && is.null(Ex)){
if(fit$info$model %in% c("binomial","poisson")){
if(all(age %in% fit$data$x)){
min_age = min(fit$data$x, na.rm = T)
Ex = fit$data$Ex[age-min_age+1]
}else{
stop("Missing Ex argument.")
}
}
}else if(length(age) != length(Ex)){
## length check for age and Ex
stop("age and Ex arguments have different lengths.")
}
## checking for invalid probabilities
if(prob < 0 || prob > 1){ stop("Invalid death probability values.") }
if(fit$info$model == "binomial"){
age_out = age[is.na(Ex)]
age = age[!is.na(Ex)] ## Removing ages with no exposures
Ex = Ex[!is.na(Ex)] ## Removing exposures with NA values
## Aux object to save the qx markov chains
qx_fitted = matrix(NA_real_, nrow = nrow(fit$post.samples$mcmc_theta), ncol = length(age))
qx_ic = matrix(NA_real_, nrow = nrow(fit$post.samples$mcmc_theta), ncol = length(age))
for (i in 1:nrow(qx_ic)){
qx = 1 - exp(-hp_curve_9(age, fit$post.samples$mcmc_theta[i,]))
qx = ifelse((qx < 0 | qx > 1), NA_real_, qx)
qx_fitted[i,] = qx ## # Estimativa pontual
sim = rbinom(length(age), trunc(Ex), qx)
qx_ic[i,] = sim/Ex
}
}else if(fit$info$model == "poisson"){
age_out = age[is.na(Ex)]
age = age[!is.na(Ex)] ## Removing ages with no exposures
Ex = Ex[!is.na(Ex)] ## Removing exposures with NA values
## Aux object to save the qx markov chains
qx_fitted = matrix(NA_real_, nrow = nrow(fit$post.samples$mcmc_theta), ncol = length(age))
qx_ic = matrix(NA_real_, nrow = nrow(fit$post.samples$mcmc_theta), ncol = length(age))
for (i in 1:nrow(qx_ic)){
qx = 1 - exp(-hp_curve_9(age, fit$post.samples$mcmc_theta[i,]))
qx = ifelse((qx < 0 | qx > 1), NA_real_, qx)
qx_fitted[i,] = qx ## # Estimativa pontual
sim = rpois(length(age), lambda = Ex*qx)
qx_ic[i,] = sim/Ex
}
}else{
age_out = NULL
## Aux object to save the qx markov chains
qx_fitted = matrix(NA_real_, nrow = nrow(fit$post.samples$mcmc_theta), ncol = length(age))
qx_ic = matrix(NA_real_, nrow = nrow(fit$post.samples$mcmc_theta), ncol = length(age))
for(i in 1:nrow(qx_ic)){
hp <- hp_curve(age, fit$post.samples$mcmc_theta[i,])
qx_fitted[i,] = hp/(1 + hp)
sim = rnorm(length(age), log(hp), sqrt(fit$post.samples$sigma2[i]))
qx_ic[i,] <- exp(sim)/(1+exp(sim))
}
}
qi = apply(qx_ic, 2, quantile, (1-prob)/2, na.rm = T)
qs = apply(qx_ic, 2, quantile, (1+prob)/2, na.rm = T)
qx_fitted = apply(qx_fitted, 2, median, na.rm = T)
aux = data.frame(age = age, qx.fitted = qx_fitted, qx.lower = qi, qx.upper = qs)
aux[!(aux$qx.lower > 0), 3] = 0
aux[!(aux$qx.upper < 1), 4] = 1
if(length(age_out) > 0){
aux2 <- data.frame(age = age_out, qx.fitted = NA_real_, qx.lower = NA_real_, qx.upper = NA_real_)
aux <- rbind(aux, aux2)
aux <- aux[order(aux$age),]
}
return(aux)
}
#' @export
fitted.ClosedHP <- function(object, age = NULL, prob = 0.95, ...){
fit = object
if(fit$method == "Mix"){
qx_fitted = apply(fit$qx, 2, median)
return(data.frame(age = fit$data$x, qx.fitted = qx_fitted, qx.lower = NA_real_, qx.upper = NA_real_))
}
qx_fitted = fit$qx
qx_fitted[(qx_fitted < 0 | qx_fitted > 1)] = NA_real_
close_age = fit$data$x
qi = apply(qx_fitted, 2, quantile, (1-prob)/2, na.rm = T)
qs = apply(qx_fitted, 2, quantile, (1+prob)/2, na.rm = T)
qx_fitted = apply(qx_fitted, 2, median, na.rm = T)
df = data.frame(age = close_age, qx.fitted = qx_fitted, qx.lower = qi, qx.upper = qs)
df[!(df$qi > 0), 3] = 0
df[!(df$qs < 1), 4] = 1
if(!is.null(age)){
df = df[(close_age %in% age), ]
if(any(!(age %in% close_age))){
age_not_fitted = age[!(age %in% close_age)]
aux = data.frame(age = age_not_fitted, qx.fitted = NA_real_, qx.lower = NA_real_, qx.upper = NA_real_)
df = rbind(df, aux); row.names(df) = NULL
}
}
return(df[order(df$age), ])
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/fitted_hp.R
|
## HP curve function
hp_curve_9 = function(x, p){
if(!is.null(dim(p))){
a = p[,1] ; b = p[,2] ; c = p[,3] ; d = p[,4] ; e = p[,5] ; f = p[,6] ; g = p[,7] ; h = p[,8] ; k = p[,9]
}else{
a = p[1] ; b = p[2] ; c = p[3] ; d = p[4] ; e = p[5] ; f = p[6] ; g = p[7] ; h = p[8] ; k = p[9]
}
a^((x+b)^c) + d*exp(-e*log(x/f)^2) + g*h^x / (1 + k*g*h^x)
}
hp_curve <- function(x, p){
if(!is.null(dim(p))){
a = p[,1] ; b = p[,2] ; c = p[,3] ; d = p[,4] ; e = p[,5] ; f = p[,6] ; g = p[,7] ; h = p[,8]
}else{
a = p[1] ; b = p[2] ; c = p[3] ; d = p[4] ; e = p[5] ; f = p[6] ; g = p[7] ; h = p[8]
}
a ^ ((x + b) ^ c) + d * exp(-e * (log(x) - log(f))^2) + g * h ^ x
}
## Decimal function in Y scale
decimal <- function(n){
cont <- 0
while(n%%10 < 1){
n <- 10*n; cont <- cont + 1
}
return(cont)
}
################# HP OPTIMIZATION
###### Loss functions
### 9-parameter HP curve
f.perda9 = function(p, Ex, Dx, x){ ##default loss function -> K > 0!!!
mu = hp_curve_9(x, exp(p)); mx = Dx/Ex
loss = log(mu/mx)^2
loss[is.infinite(loss)] <- 10^5
return(sum(loss, na.rm = T))
}
#### Alternative including scenario where K < 0
f.perda9.alt = function(p, Ex, Dx, x){
p <- c(exp(p[-9]), p[9])
mu = hp_curve_9(x, p); mx = Dx/Ex
loss = log(mu/mx)^2
loss[is.infinite(loss)] <- 10^5
return(sum(loss, na.rm = T))
}
### 8-parameter HP curve
f.perda = function(p, Ex, Dx, x){
mu = hp_curve(x, exp(p)); mx = Dx/Ex
mu = mu/(1+mu)
loss = log(mu/mx)^2
loss[is.infinite(loss)] <- 10^5
return(sum(loss, na.rm = T))
}
#### Optim function
optim_HP <- function(x, Ex, Dx, curve = c("8par", "9par")){
start = c(5e-04, 0.004, 0.08, 0.001, 10, 17, 5e-05, 1.1, 1) ## Start do mortalitlyLaws
if(curve == "8par"){
start = start[-9]
aux <- nlminb(start = log(start),
objective = f.perda, x = x, Ex = Ex, Dx = Dx,
lower = log(c(1e-16, 1e-16, 1e-16, 1e-16, 1e-16, 15+1e-16, 1e-16, 1)),
upper = log(c(1-1e-16, 1-1e-16, 1-1e-16, 1-1e-16, 100, 110-1e-16, 1-1e-16, 100)),
control = list(eval.max = 5000, iter.max = 5000))
return(exp(aux$par))
}else if(curve == "9par"){
alt = FALSE
if(alt){
aux <- nlminb(start = log(start),
objective = f.perda9.alt, x = x, Ex = Ex, Dx = Dx,
lower = c(log(c(1e-16, 1e-16, 1e-16, 1e-16, 1e-16, 15+1e-16, 1e-16, 1)), -4),
upper = c(log(c(1-1e-16, 1-1e-16, 1-1e-16, 1-1e-16, 100, 110-1e-16, 1-1e-16, 100)), 100),
control = list(eval.max = 5000, iter.max = 5000))
return(c(aux$par[-9], aux$par[9]))
}else{
aux <- nlminb(start = log(start),
objective = f.perda9, x = x, Ex = Ex, Dx = Dx,
lower = log(c(1e-16, 1e-16, 1e-16, 1e-16, 1e-16, 15+1e-16, 1e-16, 1, 1e-16)),
upper = log(c(1-1e-16, 1-1e-16, 1-1e-16, 1-1e-16, 100, 110-1e-16, 1-1e-16, 100, 100)),
control = list(eval.max = 5000, iter.max = 5000))
return(exp(aux$par))
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/fun_aux.R
|
#'
#' @import utils
utils::globalVariables(c("age",
"Age",
"Ex",
"Exp",
"expectancy",
"ci.upper",
"Model",
"ci.lower",
"X1",
"X2",
"X3",
"X4",
"Year",
"ages",
"autocor",
"iteration",
"lim.inf",
"lim.sup",
"log.qx",
"qx.lower",
"qx.upper",
"qx",
"qx.fitted",
"samples",
"year",
"yint"
))
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/globals.R
|
#### SIR function
sir_gompertz_dlm <- function(fit, data, resampling_size = nrow(fit$mu)){
## Gompertz: gomp = a*exp(b*x)
y = data$y ## log(qx)
sigma = median(sqrt(0.01*fit$sig2))
likelihood <- function(par){
gomp = par[1]*exp(par[2]*data$x) #### normality in DLM
prod(dnorm(y, mean = log(gomp), sd = sigma))
}
### SIR method
## sampling A and B
A = rbeta(500, 1, 10000)
B = rbeta(500, 1, 10)
## Joint distribution
df = data.frame(A = rep(A, each = 500), B = rep(B, times = 500))
## Assuming uniform priori, posteriori distribution is proportional to the likelihood
post.dist <- apply(df, 1, likelihood)
pesos = post.dist/(dbeta(df[,1], 1, 10000)*dbeta(df[,2], 1, 10))
probs = pesos/sum(pesos)
res = sample(500*500, resampling_size, replace = T, prob = probs)
res_A = df[res,1]
res_B = df[res,2]
return(data.frame(A = res_A, B = res_B))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/gompertz_dlm.R
|
#' @name Heatmap.BLC
#' @rdname Heatmap.BLC
#'
#' @title BLC: Heatmap for the life expectancy
#'
#' @description Draws a Heat Map based on the life expectancy of a fitted BLC or PredBLC model.
#'
#'
#' @param x A `BLC` or `PredBLC` object, result of a call to blc() function or forecast via predict() function.
#' @param x_lab Description of the modelled object.
#' @param age Vector with the ages to plot the heatmap.
#' @param color Vector of colours used in the heatmap.
#' @param ... Further arguments passed to or from other methods.
#'
#' @return A ggplot2 heatmap of the life expectancy.
#'
#' @examples
#' ## Importing log-mortality data from Portugal:
#' data(PT)
#' Y <- PT
#'
#' ## Fitting the model
#'
#' fit = blc(Y = Y, M = 100, bn = 20)
#'
#' ## Heatmap:
#' Heatmap(fit, x_lab = 2000:2015, age = 18:80)
#'
#' @seealso [Heatmap.HP()] and [Heatmap.DLM()] for `HP` or `DLM` methods.
#'
#' @import ggplot2
#' @export
Heatmap.BLC <- function(x, x_lab = NULL, age = NULL, color = c("red","white","blue"), ...){
obj = x
objClass <- class(obj)
supportedClasses <- c("BLC")
if(!any(objClass %in% supportedClasses)){stop("Invalid object type")}
L <- ncol(obj$Y)
q <- nrow(obj$Y)
exps <- expectancy(obj)$expectancy
if(!is.null(x_lab)){
if(length(x_lab) != L){stop("Argument `x_lab` has to be the same length of the modelled years")}
tag <- x_lab
}else{tag <- 1:L}
if(!is.null(age)){
if(length(age) != q){stop("Argument `age` has to be the same length of the modelled age interval")}
rows <- age
}else{rows <- 1:q}
exps2 <- as.data.frame(matrix(NA_real_, nrow = q*L, ncol = 3))
colnames(exps2) <- c("Exp","Year","Age")
for(j in 1:L){exps2[(q*j-(q-1)):(q*j),] <- data.frame(exps[,j],rep(tag[j]),as.numeric(rows))}
midp <- mean(exps)
p <- ggplot(exps2) + theme_light() +
geom_tile(aes(x = reorder(as.character(Year), sort(as.numeric(Year))), y = Age, fill=Exp)) +
labs(x="Years",
y="Age",
title = "Life expectancy") +
scale_fill_gradient2(name = "Expectancy (years)",
low = color[1],
mid = color[2],
high = color[3],
midpoint = midp)
p
}
#'
#' @export
Heatmap.PredBLC <- function(x, x_lab = NULL, age = NULL, color = c("red","white","blue"), ...){
obj = x
objClass <- class(obj)
supportedClasses <- c("PredBLC")
if(!any(objClass %in% supportedClasses)){stop("Invalid object type")}
L <- obj$h
q <- dim(obj$y)[3]
exps <- expectancy(obj)$expectancy
if(!is.null(x_lab)){
if(length(x_lab) != L){stop("Argument `x_lab` has to be the same length of the years forecasted")}
tag <- x_lab
}else{tag <- 1:L}
if(!is.null(age)){
if(length(age) != q){stop("Argument `age` has to be the same length of the modelled age interval")}
rows <- age
}else{rows <- 1:q}
exps2 <- as.data.frame(matrix(NA_real_, nrow = q*L, ncol = 3))
colnames(exps2) <- c("Exp","Year","Age")
for(j in 1:L){exps2[(q*j-(q-1)):(q*j),] <- data.frame(exps[,j], rep(tag[j]),as.numeric(rows))}
midp <- mean(exps)
p <- ggplot(exps2) + theme_light() +
geom_tile(aes(x = reorder(as.character(Year), sort(Year)), y = Age, fill=Exp)) +
labs(x="Years",
y="Age",
title = "Life expectancy") +
scale_fill_gradient2(name = "Expectancy (years)",
low = color[1],
mid = color[2],
high = color[3],
midpoint = midp)
p
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/heatmap_blc.R
|
#' @name Heatmap.DLM
#' @rdname Heatmap.DLM
#'
#' @title DLM: Heatmap for the life expectancy
#'
#' @description This function plots a heatmap for the life expectancy of the fitted DLMs.
#'
#'
#' @param x Object or a list of objects of the class `DLM` or `ClosedDLM` returned by dlm() or dlm_close() functions.
#' @param x_lab Description of the object 'fit'.
#' @param age Vector with the ages to plot the heatmap.
#' @param max_age Positive number indicating the last age to be considered to compute the life expectancy (prediction will be considered to match the age interval if needed). This argument is only necessary with objects of the class `DLM`.
#' @param color Vector of colours used in the heatmap.
#' @param ... Further arguments passed to or from other methods.
#'
#' @return A ggplot2 heatmap of the life expectancy.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' # Example 1: -------------------------------
#'
#' USA2010 = USA[USA$Year == 2010,]
#'
#' ExF = USA2010$Ex.Female[1:91]
#' DxF = USA2010$Dx.Female[1:91]
#' yF = log(DxF/ExF)
#'
#' fitF <- dlm(yF, M = 100)
#'
#' Heatmap(fitF, x_lab = "Female expec. 2010 USA", max_age = 90)
#'
#'
#' @include expectancy_dlm.R
#'
#' @seealso [Heatmap.BLC()] and [Heatmap.HP()] for `BLC` or `HP` methods.
#'
#' [Heatmap.list()] to the `list` method, adding multiple objects in one single Heatmap.
#'
#' @import ggplot2
#' @export
#'
Heatmap.DLM <- function(x, x_lab = NULL, age = NULL, max_age = 110,
color = c("red","white","blue"), ...){
fits = x
if(is.null(age)){age = fits$info$ages}
if(is.null(x_lab)){x_lab <- "Fitted model"}
#sanity check:
if(max(age) > max_age){stop("Invalid age interval. Check the max_age argument")}
if(length(color) != 3){stop("The argument color must be a 3 length vector.")}
#calculating the life expectancy
exps <- expectancy(fits, graph = FALSE, age = age, max_age = max_age)
#creating dataframe for the heatmap:
exp <- exps$expectancy
ano <- c()
for(i in 1:length(x_lab)){ano <- c((rep(x_lab[i],length(age))),ano)}
idade <- exps$age
df <- data.frame(
"age" = idade,
"year" = rev(as.character(ano)),
"exp" = exp)
#plot
midp <- mean(exp)
p <- ggplot(df) + theme_light() +
geom_raster(aes(x = year, y = age, fill = exp),interpolate = FALSE) +
labs(x="",
y="Age",
title = "Life expectancy") +
scale_fill_gradient2(name = "Expectancy (years)",
low = color[1],
mid = color[2],
high = color[3],
midpoint = midp)
return(p)
}
#' @export
Heatmap.ClosedDLM <- function(x, x_lab = NULL, age = NULL,
color = c("red","white","blue"), ...){
fits = x
if(is.null(x_lab)){x_lab <- "Fitted model"}
if(is.null(age)){age = fits$info$ages}
#sanity check:
if(max(age) > max(fits$info$ages)){stop("Invalid age interval. Check the max_age argument")}
if(length(color) != 3){stop("The argument color must be a 3 length vector.")}
#calculating the life expectancy
exps <- expectancy(fits, graph = FALSE, age = age)
#creating dataframe for the heatmap:
exp <- exps$expectancy
idade <- exps$age
ano <- c()
for(i in 1:length(x_lab)){ano <- c((rep(x_lab[i],length(age))),ano)}
df <- data.frame(
"age" = idade,
"year" = rev(as.character(ano)),
"exp" = exp)
#plot
midp <- mean(exp)
p <- ggplot(df) + theme_light() +
geom_raster(aes(x = year, y = age, fill = exp),interpolate = FALSE) +
labs(x="",
y="Age",
title = "Life expectancy") +
scale_fill_gradient2(name = "Expectancy (years)",
low = color[1],
mid = color[2],
high = color[3],
midpoint = midp)
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/heatmap_dlm.R
|
#' @name Heatmap.HP
#' @rdname Heatmap.HP
#'
#' @title HP: Heatmap for the life expectancy
#'
#' @description This function plots a heatmap for the life expectancy of the fitted HP models.
#'
#'
#' @param x Object or a list of objects of the class `HP` or `ClosedHP` returned by hp() or close_hp() functions.
#' @param x_lab Description of the object 'fit'.
#' @param age Vector with the ages to plot the heatmap.
#' @param max_age Positive number indicating the last age to be considered to compute the life expectancy (extrapolation will be considered to match the age interval if needed). This argument is only necessary with objects of the class `HP`.
#' @param color Vector of colours used in the heatmap.
#' @param ... Further arguments passed to or from other methods.
#'
#' @return A ggplot2 heatmap of the life expectancy.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' # Example: -------------------------------
#'
#' USA2010 = USA[USA$Year == 2010,]
#'
#' ExF = USA2010$Ex.Female[1:91]
#' DxF = USA2010$Dx.Female[1:91]
#' x <- 0:90
#'
#' fitF <- hp(x, ExF, DxF, model = "lognormal", M = 1000, bn = 0, thin = 10)
#'
#' Heatmap(fitF, x_lab = "Female expec. 2010 USA")
#'
#' @seealso [Heatmap.BLC()] and [Heatmap.DLM()] for `BLC` or `DLM` methods.
#'
#' [Heatmap.list()] to the `list` method, adding multiple objects in one single Heatmap.
#'
#' @include expectancy_hp.R
#'
#' @import ggplot2
#' @export
#'
Heatmap.HP <- function(x, x_lab = NULL, age = 0:90, max_age = 110,
color = c("red","white","blue"), ...){
fits = x
if(is.null(x_lab)){x_lab <- "Fitted model"}
#sanity check:
if(max(age) > max_age){stop("Invalid age interval. Check the max_age argument")}
if(length(color) != 3){stop("The argument color must be a 3 length vector.")}
#calculating life expectancy
exps <- expectancy(fits, graph = FALSE, age = age, max_age = max_age)
#creating dataframe for the heatmap:
exp <- exps$expectancy
ano <- c()
for(i in 1:length(x_lab)){ano <- c((rep(x_lab[i],length(age))),ano)}
idade <- exps$age
df <- data.frame(
"age" = idade,
"year" = rev(as.character(ano)),
"exp" = exp)
#plot
midp <- mean(exp)
p <- ggplot(df) + theme_light() +
geom_raster(aes(x = year, y = age, fill = exp), interpolate = FALSE) +
labs(x="",
y="Age",
title = "Life expectancy") +
scale_fill_gradient2(name = "Expectancy (years)",
low = color[1],
mid = color[2],
high = color[3],
midpoint = midp)
return(p)
}
#' @export
#'
Heatmap.ClosedHP <- function(x, x_lab = NULL, age = 0:90,
color = c("red","white","blue"), ...){
fits = x
if(is.null(x_lab)){x_lab <- "Fitted model"}
#sanity check:
if(length(color) != 3){stop("The argument color must be a 3 length vector.")}
#calculating life expectancy
exps <- expectancy(fits, graph = FALSE, age = age)
#creating dataframe for the heatmap:
exp <- exps$expectancy
ano <- c()
for(i in 1:length(x_lab)){ano <- c((rep(x_lab[i],length(age))),ano)}
idade <- exps$age
df <- data.frame(
"age" = idade,
"year" = rev(as.character(ano)),
"exp" = exp)
#plot
midp <- mean(exp)
p <- ggplot(df) + theme_light() +
geom_raster(aes(x = year, y = age, fill = exp),interpolate = FALSE) +
labs(x="",
y="Age",
title = "Life expectancy") +
scale_fill_gradient2(name = "Expectancy (years)",
low = color[1],
mid = color[2],
high = color[3],
midpoint = midp)
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/heatmap_hp.R
|
#' @name Heatmap.list
#' @rdname Heatmap.list
#'
#' @title Heatmap for a set of life tables
#'
#' @description This function plots a heatmap for the life expectancy of the mortality graduations
#' returned by hp(), dlm(), hp_close() or dlm_close() functions.
#'
#'
#' @param x List of objects of classes: `HP`, `DLM`, `ClosedHP`, or `ClosedDLM`.
#' @param x_lab Description of the object 'fit'.
#' @param age Vector with the ages to plot the heatmap.
#' @param max_age Positive number indicating the last age to be considered to compute the life expectancy (methods for matching the age interval will be considered if needed). This argument is only necessary with objects of the class `HP` or `DLM`.
#' @param color Vector with colours used in the heatmap.
#' @param ... Further arguments passed to or from other methods.
#'
#' @return A ggplot2 heatmap of the life expectancy.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' # Example (HP): -------------------------------
#'
#' ## Selecting the data from 2010
#' USA2010 = USA[USA$Year == 2010,]
#'
#' ExF = USA2010$Ex.Female[1:91]
#' DxF = USA2010$Dx.Female[1:91]
#' x <- 0:90
#'
#' fitF <- hp(x, ExF, DxF, model = "lognormal", M = 1000, bn = 0, thin = 10)
#'
#' ExM = USA2010$Ex.Male[1:91]
#' DxM = USA2010$Dx.Male[1:91]
#'
#' fitM <- hp(x, ExM, DxM, model = "lognormal", M = 1000, bn = 0, thin = 10)
#'
#' fits <- list(fitF = fitF, fitM = fitM)
#'
#' Heatmap(fits, x_lab = c("Female 2010 USA","Male 2010 USA"),
#' age = 15:85)
#'
#'
#' @include expectancy_dlm.R
#' @include expectancy_hp.R
#'
#' @seealso [Heatmap.HP()], [Heatmap.DLM()] and [Heatmap.BLC()] for drawing single Heatmaps.
#'
#' @import ggplot2
#' @export
#'
Heatmap.list <- function(x, x_lab = NULL, age = NULL, max_age = NULL,
color = c("red","white","blue"), ...){
fits = x
if(is.null(x_lab)){
x_lab <- rep(NA_character_,length(fits))
for(i in 1:length(x_lab)){x_lab[i] <- paste("Fit",as.character(i))}
}
#sanity check:
if(inherits(fits, "list")){
if(length(fits) != length(x_lab)){stop("Number of fitted models is different of the x_lab's length.")}
}
if(length(color) != 3){stop("The argument color must be a 3 length vector.")}
#checking the model
if(is.null(age)){
check = unlist(lapply(fits, class)) %in% c("DLM", "ClosedDLM")
if(all(check)){
ages = rep(NA_real_, length(check))
for(i in 1:length(check)){ages[i] = length(fits[[i]]$info$ages)}
age = fits[[which.min(ages)]]$info$ages
}else if(!any(check)){
age = 0:90
}else{
dlm_id <- seq(1:length(check))[check]
ages = rep(NA_real_, length(dlm_id))
for(i in dlm_id){ages[i] = length(fits[[i]]$info$ages)}
age = fits[[which.min(ages)]]$info$ages
}
}
#calculating life expectancy
if( any(unlist(lapply(fits, class)) %in% c("ClosedHP","ClosedDLM")) ){
if(!is.null(max_age)){
warning("max_age argument is available for HP and DLM objects only, the argument will be dropped")
}
lista_exp <- lapply(fits, expectancy, graph = FALSE, age = age)
exps = NULL
for(i in 1:length(lista_exp)){exps <- rbind(exps,lista_exp[[i]])}
}else{
if(is.null(max_age)){max_age = 110}
lista_exp <- lapply(fits, expectancy, graph = FALSE, age = age, max_age = max_age)
exps = NULL
for(i in 1:length(lista_exp)){exps <- rbind(exps,lista_exp[[i]])}
}
#creating heatmap dataframe
exp <- exps$expectancy
ano <- c()
for(i in 1:length(x_lab)){ano <- c((rep(x_lab[i],length(age))),ano)}
idade <- exps$age
df <- data.frame(
"age" = idade,
"year" = rev(as.character(ano)),
"exp" = exp)
#plot
midp <- mean(exp)
p <- ggplot(df) + theme_light() +
geom_raster(aes(x = year, y = age, fill = exp),interpolate = FALSE) +
labs(x="",
y="Age",
title = "Life expectancy") +
scale_fill_gradient2(name = "Expectancy (years)",
low = color[1],
mid = color[2],
high = color[3],
midpoint = midp)
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/heatmap_list.R
|
#' @title Bayesian Heligman-Pollard curve for mortality table graduation
#'
#' @description
#'
#' This function fits the Heligman-Pollard (HP) model following a Bayesian framework using
#' Markov chain Monte Carlo techniques to sample the posterior distribution.
#' Three model options are available: The Binomial and the Poisson models consider nine parameters,
#' whereas the Log-Normal model considers eight parameters for modelling the HP curve.
#'
#' @usage
#' hp(x, Ex, Dx, model = c("binomial", "lognormal", "poisson"),
#' M = NULL, bn = NULL, thin = 10, m = rep(NA, 8), v = rep(NA, 8),
#' inits = NULL, K = NULL, sigma2 = NULL, prop.control = NULL,
#' reduced_model = FALSE)
#'
#' @param x Numeric vector of the ages.
#' @param Ex Numeric vector with the exposure by age.
#' @param Dx Numeric vector with the death counts by age.
#' @param model Character string specifying the model to be adopted. The options are: "binomial", "lognormal" or "poisson".
#' @param M Positive integer indicating the number of iterations of the MCMC run. The default value is 50000 iterations. For the reduced model, the default is 30000 iterations.
#' @param bn Non-negative integer indicating the number of iteration to be discarded as the burn-in period. The default value is half of the M value, rounded.
#' @param thin Positive integer specifying the period for saving samples. The default value is 10.
#' @param m Numeric vector with the mean of the prior distributions for (A, B, C, D, E, F, G, H).
#' @param v Numeric vector with the variance of the prior distributions for (A, B, C, D, E, F, G, H).
#' @param inits Numeric vector with the initial values for the parameters (A, B, C, D, E, F, G, H).
#' @param K Number that specifies the extra parameter 'K' value for binomial and poisson models. It is considered only if model = "binomial" or model = "poisson". The default value is the optimal value.
#' @param sigma2 Positive number that specifies initial value of sigma2 in lognormal model. It is considered only if model = "lognormal".
#' @param prop.control Positive number that is considered for tuning the acceptance rate of MCMC.
#' @param reduced_model Logical value which determines if reduced model should be addressed. If 'TRUE' (default), the first term of the HP curve (infant mortality) is not considered.
#'
#' @details
#' The binomial model assumes that Dx, the death count for the age x, has a binomial distribution:
#' Bin(Ex, qx), where qx is probability of death in age x. The poisson model assumes that Dx has a
#' Poisson distribution: Po(Ex*qx). Both models consider the nine parameters
#' HP curve, that was proposed by Heligman and Pollard (1980):
#'
#' \eqn{HP_x = A^{(x+B)^C} + De^{(-E(log(x/F))^2)} + GH^x/(1+KGH^x)}
#'
#' \eqn{qx = 1-e^{(-HP_x)}}
#'
#' This approximation ensures that qx, which is a probability, is in the correct range.
#'
#' The Log-Normal model assumes that the log odds of death qx/(1-qx) has Normal distribution
#' with a constant variance for all the ages. This model was proposed by Dellaportas et al.(2001)
#' and they consider the eighth parameters HP curve as follows:
#'
#' \eqn{log(qx/(1-qx)) = log(A^{(x+B)^C} + De^{-E(log(x/F))^2} + GH^x) + \epsilon_x},
#'
#' where \eqn{\epsilon_x} has independent distributions Normal(0, sigma2) for all ages. More details
#' of this model are available in Dellaportas, Smith e Stavropoulos (2001).
#'
#' The reduced model does not consider the first term of the HP curve: \eqn{A^{(x+B)^C}}. In this
#' case, A, B and C are fixed as zero.
#'
#' All parameters, with the exception of the extra parameter K of the Binomial and the Poisson models
#' that is the estimated optimal value, are estimated by the MCMC methods. Gibbs sampling for sigma2 and
#' Metropolis-Hastings for parameters A, B, C, D, E, F, G and H. Informative prior distributions
#' should help the method to converge quickly.
#'
#' @return A `HP` class object.
#' \item{summary}{A table with summaries measures of the parameters.}
#' \item{post.samples}{A list with the chains generated by MCMC.}
#' \item{data}{A table with the data considered in fitted model.}
#' \item{info}{A list with some informations of the fitted model like prior distributions mean and variance, initial values.}
#'
#' @references Dellaportas, P., Smith, A. F., and Stavropoulos, P. (2001). “Bayesian Analysis of Mortality Data.” \emph{Journal of the Royal Statistical Society: Series A (Statistics in Society)} 164 (2). Wiley Online Library: 275–291.
#' @references Heligman, Larry, and John H Pollard. (1980). “The Age Pattern of Mortality.” \emph{Journal of the Institute of Actuaries (1886-1994)} 107 (1). JSTOR: 49–80.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' ## Selecting the exposure and death count of the 2010 male population ranging from 0 to 90 years old
#' USA2010 = USA[USA$Year == 2010,]
#' x = 0:90
#' Ex = USA2010$Ex.Male[x+1]
#' Dx = USA2010$Dx.Male[x+1]
#'
#' ## Fitting binomial model
#' \donttest{fit = hp(x = x, Ex = Ex, Dx = Dx)
#' print(fit)
#' fit$summary
#' fit$info
#' }
#' ## Fitting lognormal model
#' ## Specifying number of iterations, burn-in, thinning and the initial value of sigma2
#' fit = hp(x = x, Ex = Ex, Dx = Dx, model = "lognormal",
#' M = 1000, bn = 0, thin = 10, sigma2 = 0.1)
#' summary(fit)
#'
#' ## Fitting poisson model
#' ## Specifying the prior distribution parameters for B and E and fixing K as 0.
#' \donttest{fit = hp(x = x, Ex = Ex, Dx = Dx, model = "poisson",
#' m = c(NA, 0.08, NA, NA, 9, NA, NA, NA),
#' v = c(NA, 1e-4, NA, NA, 0.1, NA, NA, NA), K = 0)
#' summary(fit)
#' }
#'
#' ## Using other functions available in the package:
#' ## plotting (See "?plot.HP" in the BayesMortalityPlus package for more options):
#' plot(fit)
#'
#' ## qx estimation (See "?fitted.HP" in the BayesMortalityPlus package for more options):
#' fitted(fit)
#'
#' ## chain's plot (See "?plot_chain" for more options):
#' plot_chain(fit)
#'
#'@seealso [fitted.HP()], [plot.HP()], [print.HP()] and [summary.HP()] for `HP` methods to native R functions [fitted()],
#'[plot()], [print()] and [summary()].
#'
#'[expectancy.HP()] and [Heatmap.HP()] for `HP` methods to compute and visualise the truncated life expectancy
#'via [expectancy()] and [Heatmap()] functions.
#'
#'[hp_close()] for close methods to expand the life tables and [hp_mix()] for mixing life tables.
#'
#'[plot_chain()] to visualise the markov chains, respectively.
#'
#'
#' @include hp_binomial.R
#' @include hp_lognormal.R
#' @include hp_poisson.R
#' @include hp_binomial_reduced.R
#' @include hp_lognormal_reduced.R
#' @include hp_poisson_reduced.R
#' @include fun_aux.R
#'
#' @importFrom MASS mvrnorm
#' @import progress
#' @import stats
#'
#' @export
hp <- function(x, Ex, Dx, model = c("binomial", "lognormal", "poisson"), M = NULL, bn = NULL,
thin = 10, m = rep(NA_real_, 8), v = rep(NA_real_, 8), inits = NULL, K = NULL, sigma2 = NULL,
prop.control = NULL, reduced_model = FALSE){
##############################################################################################
### Validation
model = match.arg(model)
x <- unique(trunc(x)); tam_x = length(x); tam_ex = length(Ex); tam_dx = length(Dx)
if(tam_x != tam_ex || tam_x != tam_dx || tam_ex != tam_dx){ stop("x, Ex, Dx must be the same length.") }
if(sum(x < 0, na.rm = T) > 0 || sum(Ex < 0, na.rm = T) > 0 || sum(Dx < 0, na.rm = T) > 0){ stop("x, Ex, Dx must be nonnegative numbers.") }
x0 <- min(x, na.rm = T)
if(x0 >= 15){ if(!reduced_model) warning("Lower age >= 15. We recommend to use reduced_model = TRUE.", immediate. = T) }
if(is.null(M)){
if(reduced_model){
M = 30000
}else{
M = 50000
}
}
if(is.null(bn)){
bn = round(M/2)
}
if(bn > M){ stop("The total number of iterations (M) must be greater than the burnin period (bn).") }
M = as.integer(trunc(M)); bn = as.integer(trunc(bn)); thin = trunc(thin)
if(M < 1 || bn < 0 || thin < 1){ stop("M, bn and thin must be positive numbers.") }
#### priori validation
## Means
m_default <- c(1/51, 0.5, 2/12, 1/51, 3/0.5, 25, 1/101, 100/90)
if(length(m) != 8) {stop("Length of m must be equal to 8.") }
m <- ifelse(is.na(m), m_default, m)
lower = c(0, 0, 0, 0, 0, 15, 0, 0)
upper = c(1, 1, 1, 1, Inf, 110, 1, Inf)
if(any(m[c(1:5,7,8)] <= 0) || any(m[c(1:4,7)] >= 1) || m[6] <= 15 || m[6] >= 110){param = LETTERS[1:8]; stop(sprintf("The means are not in the correct interval for the parameters.", paste(param[which((m <= lower || m >= upper))], collapse = ", ")))}
## Var
v_default <- c(50/((51^2)*(52)), 1/12, 20/((12^2)*13), 50/((51^2)*(52)), 3/0.25, 100, 100/((101^2)*(102)), 1/81)
if(length(v) != 8) {stop("Length of v must be equal to 8.") }
v <- ifelse(is.na(v), v_default, v)
if(any(v <= 0)){ stop("At least one of the variance is invalid. All variance must be positives.") }
if(any(v[c(1:4,7)] >= m[c(1:4,7)]*(1-m[c(1:4,7)]))) {{param = LETTERS[c(1:4,7)]; stop(sprintf("Variance of the parameter(s) %s too large.", paste(param[which(v[c(1:4,7)] >= m[c(1:4,7)]*(1-m[c(1:4,7)]))], collapse = ", ")))}}
## prop.control
prop.control <- ifelse(is.null(prop.control), 1, prop.control)
if(prop.control <= 0){ stop("prop.control must be a positive number.") }
##############################################################################################
### MCMC
if(model == "binomial"){
if(reduced_model){
mcmc = hp_binomial_red(x = x, Ex = Ex, Dx = Dx, M = M, bn = bn, thin = thin, m = m, v = v,
inits = inits, prop.control = prop.control, K = K)
}else{
mcmc = hp_binomial(x = x, Ex = Ex, Dx = Dx, M = M, bn = bn, thin = thin, m = m, v = v,
inits = inits, prop.control = prop.control, K = K)
}
}else if(model == "lognormal"){
if(reduced_model){
mcmc = hp_lognormal_red(x = x, Ex = Ex, Dx = Dx, M = M, bn = bn, thin = thin, m = m, v = v,
inits = inits, prop.control = prop.control, sigma2 = sigma2)
}else{
mcmc = hp_lognormal(x = x, Ex = Ex, Dx = Dx, M = M, bn = bn, thin = thin, m = m, v = v,
inits = inits, prop.control = prop.control, sigma2 = sigma2)
}
}else if(model == "poisson"){
if(reduced_model){
mcmc = hp_poisson_red(x = x, Ex = Ex, Dx = Dx, M = M, bn = bn, thin = thin, m = m, v = v,
inits = inits, prop.control = prop.control, K = K)
}else{
mcmc = hp_poisson(x = x, Ex = Ex, Dx = Dx, M = M, bn = bn, thin = thin, m = m, v = v,
inits = inits, prop.control = prop.control, K = K)
}
}else{stop("Invalid model.")
}
##############################################################################################
### house keeping
param = c("A", "B", "C", "D", "E", "F", "G", "H", "K")
if(model == "lognormal"){ param = param[-9] }
#### data
Qx = 1-exp(-Dx/Ex)
data = data.frame(x = x, Ex = Ex, Dx = Dx, qx = Qx)
#### final samples
mcmc_theta = mcmc$theta.post
sigma2 = mcmc$sigma2
#### summary
accept = 100*t(data.frame(
a = mcmc$cont[1]/M,
b = mcmc$cont[2]/M,
c = mcmc$cont[3]/M,
d = mcmc$cont[4]/M,
e = mcmc$cont[5]/M,
f = mcmc$cont[6]/M,
g = mcmc$cont[7]/M,
h = mcmc$cont[8]/M,
k = mcmc$cont[9]/M
))
if(model == "lognormal"){ accept = accept[-9,] }
resumo = data.frame(
mean = apply(mcmc_theta, 2, mean),
sd = apply(mcmc_theta, 2, sd),
t(apply(mcmc_theta, 2, quantile, probs = c(0.025, 0.50, 0.975))),
"Accept %" = round(accept, 1),
row.names = param
)
colnames(resumo) <- c("mean", "sd", "2.5%", "50.0%", "97.5%", "Accept %")
#### prioris
prior.dist <- t(data.frame(means = m, vars = v)); colnames(prior.dist) = param[-9]
### returns
return(structure(list(summary = round(resumo, 6),
post.samples = list(mcmc_theta = mcmc_theta, sigma2 = sigma2),
data = data,
info = list(model = model,
reduced = reduced_model,
inits = mcmc$inits,
prior.dist = prior.dist,
prop.control = prop.control)),
class = "HP"))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/hp.R
|
hp_binomial <- function(x, Ex, Dx, M = 100000, bn = round(M/4), thin = 1, m = NULL, v = NULL,
inits = NULL, prop.control = NULL, K = NULL){
##############################################################################################
#### optimal value for the inits
Mx = Dx/Ex
opt = optim_HP(x = x, Ex, Dx, curve = "9par")
## init validation
if(is.null(inits)){
inits[1] = opt[1]; inits[2] = opt[2]; inits[3] = opt[3]; inits[4] = opt[4]
inits[5] = opt[5]; inits[6] = opt[6]; inits[7] = opt[7]; inits[8] = opt[8]
if(inits[1] <= 0 || inits[1] >= 1){ inits[1] = 0.01 }
if(inits[2] <= 0 || inits[2] >= 1){ inits[2] = 0.1 }
if(inits[3] <= 0 || inits[3] >= 1){ inits[3] = 0.1 }
if(inits[4] <= 0 || inits[4] >= 1){ inits[4] = 0.01 }
if(inits[5] <= 0){ inits[5] = 5 }
if(inits[6] <= 15 || inits[6] >= 110){ inits[6] = 25 }
if(inits[7] <= 0 || inits[7] >= 1){ inits[7] = 0.01 }
if(inits[8] <= 0){ inits[8] = 1.1 }
}else if(length(inits) != 8 || any(is.na(inits[1:8]))){
stop("inits vector with missing values (NA) or length not equal to 8.")
}else if(any(inits[c(1:5,7,8)] <= 0) || any(inits[c(1:4,7)] >= 1) || inits[6] <= 15 || inits[6] >= 110){
stop("Initial value(s) outside the range of possible values for the paramater(s).")
}
## K
k2 = ifelse(is.null(K), opt[9], K)
##############################################################################################
### auxs
mu = function(x, a, b, c, d, e, f, g, h, k){
### HP function for specific age and parameters
media = a^((x+b)^c) + d*exp(-e*(log(x)-log(f))^2) + (g*h^x)/(1 + k*g*h^x)
return(media)
}
### Log-likelihood (MCMC)
like.HPBayes= function(Ex, Dx, x, a,b,c,d,e,f,g,h,k){
qx = mu(x, a, b, c, d, e, f, g, h, k)
# qx = 1 - exp(-mx)
qx[qx < 1e-16] = 1e-16 # avoid numeric error w log(qx)
qx[qx > 1 - 1e-16] = 1 - 1e-16 # avoid numeric error w log(1 - qx)
# Ex = trunc(Ex)
logvero = sum(Dx*log(qx), na.rm = T) + sum((Ex - Dx)*log(1 - qx), na.rm = T);
return(logvero)
}
### Jacobian
jac_logit = function(x) - log(abs(x - x^2))
jac_log = function(x) - log(x)
jac_f = function(x){
- log(110 - x) - log(x - 15)
}
##############################################################################################
## Prioris
if(v[1] < m[1]*(1-m[1])){
alpha.a = ((1 - m[1])/v[1] - 1/m[1])*m[1]^2
beta.a = alpha.a*(1/m[1] - 1)
}
if(v[2] < m[2]*(1-m[2])){
alpha.b = ((1 - m[2])/v[2] - 1/m[2])*m[2]^2
beta.b = alpha.b*(1/m[2] - 1)
}
if(v[3] < m[3]*(1-m[3])){
alpha.c = ((1 - m[3])/v[3] - 1/m[3])*m[3]^2
beta.c = alpha.c*(1/m[3] - 1)
}
if(v[4] < m[4]*(1-m[4])){
alpha.d = ((1 - m[4])/v[4] - 1/m[4])*m[4]^2
beta.d = alpha.d*(1/m[4] - 1)
}
alpha.e = (m[5]^2)/v[5]
beta.e = m[5]/v[5]
media.f = m[6]; variancia.f = v[6]
if(v[7] < m[7]*(1-m[7])){
alpha.g = ((1 - m[7])/v[7] - 1/m[7])*m[7]^2
beta.g = alpha.g*(1/m[7] - 1)
}
alpha.h = (m[8]^2)/v[8]
beta.h = m[8]/v[8]
## SD for prop. distributions
sd = 1*prop.control
### proposed parameters for the block (a, b, c, d, e, f, g, h)
U = diag(8)
eps = 1e-10
##############################################################################################
## aux objects
param = c("A", "B", "C", "D", "E", "F", "G", "H", "K")
param_problemas = NULL; warn = F
theta.post = matrix(NA_real_, ncol = 9, nrow = M + 1)
### Acceptance rates
cont = rep(0, 9)
### Initial values
theta.post[1,] = c(inits, k2)
## progress bar
pb = progress::progress_bar$new(format = "Simulating [:bar] :percent in :elapsed",total = M, clear = FALSE, width = 60)
##############################################################################################
## Fits
system.time(for (k in 2:(M+1)) {
pb$tick()
##### ''a', 'b', 'c', 'd', 'e', 'f', 'g', 'h' estimated in one block (joint)
### Covariance matrix (Metropolis-Hastings)
if(k < 1000){
V = sd*U
}else if(k%%10 == 0) { ### updating every 10 iterations
X = theta.post[c((k-1000):(k-1)), 1:8]
X[,1] = log(X[,1]/(1 - X[,1])) ## A
X[,2] = log(X[,2]/(1 - X[,2])) ## B
X[,3] = log(X[,3]/(1 - X[,3])) ## C
X[,4] = log(X[,4]/(1 - X[,4])) ## D
X[,5] = log(X[,5]) ## E
X[,6] = (X[,6] - 15)/(110 - 15); X[,6] = log(X[,6]/(1 - X[,6])) ## F
X[,7] = log(X[,7]/(1 - X[,7])) ## G
X[,8] = log(X[,8]) ## H
V = sd*(eps*U + var(X))
}
aux = theta.post[k-1, 1:8]
aux[1] = log(aux[1]/(1 - aux[1])) ## A
aux[2] = log(aux[2]/(1 - aux[2])) ## B
aux[3] = log(aux[3]/(1 - aux[3])) ## C
aux[4] = log(aux[4]/(1 - aux[4])) ## D
aux[5] = log(aux[5]) ## E
aux[6] = (aux[6] - 15)/(110 - 15); aux[6] = log(aux[6]/(1 - aux[6])) ## F
aux[7] = log(aux[7]/(1 - aux[7])) ## G
aux[8] = log(aux[8]) ## H
prop = MASS::mvrnorm(1, mu = aux, Sigma = V)
a.prop = exp(prop[1]) / (1 + exp(prop[1]))
b.prop = exp(prop[2]) / (1 + exp(prop[2]))
c.prop = exp(prop[3]) / (1 + exp(prop[3]))
d.prop = exp(prop[4]) / (1 + exp(prop[4]))
e.prop = exp(prop[5])
f.prop = 15 + (110 - 15)*(exp(prop[6])/ (1 + exp(prop[6])))
g.prop = exp(prop[7]) / (1 + exp(prop[7]))
h.prop = exp(prop[8])
lverok = like.HPBayes(Ex, Dx, x, theta.post[k-1,1], theta.post[k-1,2], theta.post[k-1,3], theta.post[k-1,4], theta.post[k-1,5], theta.post[k-1,6], theta.post[k-1,7], theta.post[k-1,8], theta.post[k-1,9])
lveroprop = like.HPBayes(Ex, Dx, x, a.prop , b.prop , c.prop , d.prop , e.prop , f.prop , g.prop , h.prop , theta.post[k-1,9])
auxk = lverok + dbeta(theta.post[k-1,1], alpha.a, beta.a, log = T) + dbeta(theta.post[k-1,2], alpha.b, beta.b, log = T) + dbeta(theta.post[k-1,3], alpha.c, beta.c, log = T) + dbeta(theta.post[k-1,4], alpha.d, beta.d, log = T) + dgamma(theta.post[k-1,5], alpha.e, beta.e, log = T) + dnorm(theta.post[k-1,6], media.f, sqrt(variancia.f), log = T) + dbeta(theta.post[k-1,7], alpha.g, beta.g, log = T) + dgamma(theta.post[k-1,8], alpha.h, beta.h, log = T) + jac_logit(theta.post[k-1,1]) + jac_logit(theta.post[k-1,2]) + jac_logit(theta.post[k-1,3]) + jac_logit(theta.post[k-1,4]) + jac_log(theta.post[k-1,5]) + jac_f(theta.post[k-1,6]) + jac_logit(theta.post[k-1,7]) + jac_log(theta.post[k-1,8])
auxprop = lveroprop + dbeta(a.prop , alpha.a, beta.a, log = T) + dbeta(b.prop , alpha.b, beta.b, log = T) + dbeta(c.prop , alpha.c, beta.c, log = T) + dbeta(d.prop , alpha.d, beta.d, log = T) + dgamma(e.prop , alpha.e, beta.e, log = T) + dnorm(f.prop , media.f, sqrt(variancia.f), log = T) + dbeta(g.prop , alpha.g, beta.g, log = T) + dgamma(h.prop , alpha.h, beta.h, log = T) + jac_logit(a.prop) + jac_logit(b.prop) + jac_logit(c.prop) + jac_logit(d.prop) + jac_log(e.prop) + jac_f(f.prop) + jac_logit(g.prop) + jac_log(h.prop)
ratio = auxprop - auxk; test = runif(1)
if(is.na(ratio) || is.nan(ratio)){ ratio = -Inf }
if (ratio > log(test)) {
theta.post[k,1] = a.prop
theta.post[k,2] = b.prop
theta.post[k,3] = c.prop
theta.post[k,4] = d.prop
theta.post[k,5] = e.prop
theta.post[k,6] = f.prop
theta.post[k,7] = g.prop
theta.post[k,8] = h.prop
cont[1:8] = cont[1:8] + 1
} else {
theta.post[k,1:8] = theta.post[k-1,1:8]
}
theta.post[k,9] = theta.post[k-1,9] ### fixed K
limite_inf = which(theta.post[k,] <= c(1e-16, 1e-16, 1e-16, 1e-16, 1e-16, 15+1e-16, 1e-16, 1e-16, -Inf))
limite_sup = which(theta.post[k,] >= c(1-1e-5, 1-1e-5, 1-1e-5, 1-1e-5, Inf, 110-1e-5, 1-1e-5, Inf, Inf))
if(length(limite_inf) > 0){
theta.post[k, limite_inf] <- theta.post[k-1, limite_inf]
if(k > bn){
param_problemas = append(param_problemas, param[limite_inf]); warn = T
}
}
if(length(limite_sup) > 0){
theta.post[k, limite_sup] <- theta.post[k-1, limite_sup]
if(k > bn){
param_problemas = append(param_problemas, param[limite_sup]); warn = T
}
}
})
if(warn){ warning(paste0("MCMC may have had some issues with the parameter(s): ", paste(sort(unique(param_problemas)), collapse = ", "), ".\nCheck the 'plot_chain' function output to visualize the parameters chain. It might be helpful to assign informative prior distribution for these parameters. See ?hp.")) }
# MCMC may have had some issues with the parameters. Check the 'plot_chain' function output to visualize the parameters chain. It might be helpful to assign informative prior distribution for these parameters.
##############################################################################################
### Return
## Final samples
theta.post = theta.post[seq(bn+1+1, M+1, by = thin),]
return(list(theta.post = theta.post, sigma2 = NULL, cont = cont, inits = inits))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/hp_binomial.R
|
hp_binomial_red <- function(x, Ex, Dx, M = 100000, bn = round(M/4), thin = 1, m = NULL, v = NULL,
inits = NULL, prop.control = NULL, K = NULL){
##############################################################################################
#### optimal value for the inits
Mx = Dx/Ex
opt = optim_HP(x = x, Ex, Dx, curve = "9par")
## init validation
if(is.null(inits)){
inits[1] = opt[1]; inits[2] = opt[2]; inits[3] = opt[3]; inits[4] = opt[4]
inits[5] = opt[5]; inits[6] = opt[6]; inits[7] = opt[7]; inits[8] = opt[8]
if(inits[1] <= 0 || inits[1] >= 1){ inits[1] = 0.01 }
if(inits[2] <= 0 || inits[2] >= 1){ inits[2] = 0.1 }
if(inits[3] <= 0 || inits[3] >= 1){ inits[3] = 0.1 }
if(inits[4] <= 0 || inits[4] >= 1){ inits[4] = 0.01 }
if(inits[5] <= 0){ inits[5] = 5 }
if(inits[6] <= 15 || inits[6] >= 110){ inits[6] = 25 }
if(inits[7] <= 0 || inits[7] >= 1){ inits[7] = 0.01 }
if(inits[8] <= 0){ inits[8] = 1.1 }
}else if(length(inits) != 8 || any(is.na(inits[1:8]))){
stop("inits vector with missing values (NA) or length not equal to 8.")
}else if(any(inits[c(1:5,7,8)] <= 0) || any(inits[c(1:4,7)] >= 1) || inits[6] <= 15 || inits[6] >= 110){
stop("Initial value(s) outside the range of possible values for the paramater(s).")
}
## K
k2 = ifelse(is.null(K), opt[9], K)
a = b = c = 0 #reduced model
inits[1:3] = 0
##############################################################################################
### auxs
mu = function(x, a, b, c, d, e, f, g, h, k){
### HP function for specific age and parameters
media = a^((x+b)^c) + d*exp(-e*(log(x)-log(f))^2) + (g*h^x)/(1 + k*g*h^x)
return(media)
}
### Log-likelihood (MCMC)
like.HPBayes= function(Ex, Dx, x, a,b,c,d,e,f,g,h,k){
qx = mu(x, a, b, c, d, e, f, g, h, k)
# qx = 1 - exp(-mx)
qx[qx < 1e-16] = 1e-16 # avoid numeric error w log(qx)
qx[qx > 1 - 1e-16] = 1 - 1e-16 # avoid numeric error w log(1 - qx)
# Ex = trunc(Ex)
logvero = sum(Dx*log(qx), na.rm = T) + sum((Ex - Dx)*log(1 - qx), na.rm = T);
return(logvero)
}
### Jacobian
jac_logit = function(x) - log(abs(x - x^2))
jac_log = function(x) - log(x)
jac_f = function(x){
- log(110 - x) - log(x - 15)
}
##############################################################################################
if(v[4] < m[4]*(1-m[4])){
alpha.d = ((1 - m[4])/v[4] - 1/m[4])*m[4]^2
beta.d = alpha.d*(1/m[4] - 1)
}
alpha.e = (m[5]^2)/v[5]
beta.e = m[5]/v[5]
media.f = m[6]; variancia.f = v[6]
if(v[7] < m[7]*(1-m[7])){
alpha.g = ((1 - m[7])/v[7] - 1/m[7])*m[7]^2
beta.g = alpha.g*(1/m[7] - 1)
}
alpha.h = (m[8]^2)/v[8]
beta.h = m[8]/v[8]
## SD for prop. distributions
sd = 1*prop.control
### proposed parameters for the block (d, e, f, g, h)
U = diag(5)
eps = 1e-10
##############################################################################################
## aux objects
param = c("A", "B", "C", "D", "E", "F", "G", "H", "K")
param_problemas = NULL; warn = F
theta.post = matrix(NA_real_, ncol = 9, nrow = M + 1)
### Acceptance rates
cont = rep(0, 9)
### Initial values
theta.post[1,] = c(inits, k2)
## progress bar
pb = progress::progress_bar$new(format = "Simulating [:bar] :percent in :elapsed",total = M, clear = FALSE, width = 60)
##############################################################################################
## Fits
system.time(for (k in 2:(M+1)) {
pb$tick()
##### 'd', 'e', 'f', 'g', 'h' estimated in one block (joint)
### Covariance matrix (Metropolis-Hastings)
if(k < 1000){
V = sd*U
}else if(k%%10 == 0) { ### updating every 10 iterations
X = theta.post[c((k-1000):(k-1)), 4:8]
X[,1] = log(X[,1]/(1 - X[,1])) ## D
X[,2] = log(X[,2]) ## E
X[,3] = (X[,3] - 15)/(110 - 15); X[,3] = log(X[,3]/(1 - X[,3])) ## F
X[,4] = log(X[,4]/(1 - X[,4])) ## G
X[,5] = log(X[,5]) ## H
V = sd*(eps*U + var(X))
}
aux = theta.post[k-1, 4:8]
aux[1] = log(aux[1]/(1 - aux[1])) ## D
aux[2] = log(aux[2]) ## E
aux[3] = (aux[3] - 15)/(110 - 15); aux[3] = log(aux[3]/(1 - aux[3])) ## F
aux[4] = log(aux[4]/(1 - aux[4])) ## G
aux[5] = log(aux[5]) ## H
prop = MASS::mvrnorm(1, mu = aux, Sigma = V)
d.prop = exp(prop[1]) / (1 + exp(prop[1]))
e.prop = exp(prop[2])
f.prop = 15 + (110 - 15)*(exp(prop[3])/ (1 + exp(prop[3])))
g.prop = exp(prop[4]) / (1 + exp(prop[4]))
h.prop = exp(prop[5])
lverok = like.HPBayes(Ex, Dx, x, theta.post[k-1,1], theta.post[k-1,2], theta.post[k-1,3], theta.post[k-1,4], theta.post[k-1,5], theta.post[k-1,6], theta.post[k-1,7], theta.post[k-1,8], theta.post[k-1,9])
lveroprop = like.HPBayes(Ex, Dx, x, a , b , c , d.prop , e.prop , f.prop , g.prop , h.prop , theta.post[k-1,9])
auxk = lverok + dbeta(theta.post[k-1,4], alpha.d, beta.d, log = T) + dgamma(theta.post[k-1,5], alpha.e, beta.e, log = T) + dnorm(theta.post[k-1,6], media.f, sqrt(variancia.f), log = T) + dbeta(theta.post[k-1,7], alpha.g, beta.g, log = T) + dgamma(theta.post[k-1,8], alpha.h, beta.h, log = T) + jac_logit(theta.post[k-1,4]) + jac_log(theta.post[k-1,5]) + jac_f(theta.post[k-1,6]) + jac_logit(theta.post[k-1,7]) + jac_log(theta.post[k-1,8])
auxprop = lveroprop + dbeta(d.prop , alpha.d, beta.d, log = T) + dgamma(e.prop , alpha.e, beta.e, log = T) + dnorm(f.prop , media.f, sqrt(variancia.f), log = T) + dbeta(g.prop , alpha.g, beta.g, log = T) + dgamma(h.prop , alpha.h, beta.h, log = T) + jac_logit(d.prop) + jac_log(e.prop) + jac_f(f.prop) + jac_logit(g.prop) + jac_log(h.prop)
ratio = auxprop - auxk; test = runif(1)
if(is.na(ratio) || is.nan(ratio)){ ratio = -Inf }
if (ratio > log(test)) {
theta.post[k,4] = d.prop
theta.post[k,5] = e.prop
theta.post[k,6] = f.prop
theta.post[k,7] = g.prop
theta.post[k,8] = h.prop
cont[4:8] = cont[4:8] + 1
} else {
theta.post[k,4:8] = theta.post[k-1,4:8]
}
theta.post[k, 1] = a
theta.post[k, 2] = b
theta.post[k, 3] = c
theta.post[k,9] = theta.post[k-1,9] ### fixed K
limite_inf = which(theta.post[k,4:8] <= c(1e-16, 1e-16, 15+1e-16, 1e-16, 1e-16))
limite_sup = which(theta.post[k,4:8] >= c(1-1e-5, Inf, 110-1e-5, 1-1e-5, Inf))
if(length(limite_inf) > 0){
theta.post[k, 3 + limite_inf] <- theta.post[k-1, 3 + limite_inf]
if(k > bn){
param_problemas = append(param_problemas, param[3 + limite_inf]); warn = T
}
}
if(length(limite_sup) > 0){
theta.post[k, 3 + limite_sup] <- theta.post[k-1, 3 + limite_sup]
if(k > bn){
param_problemas = append(param_problemas, param[3 + limite_sup]); warn = T
}
}
})
if(warn){ warning(paste0("MCMC may have had some issues with the parameter(s): ", paste(sort(unique(param_problemas)), collapse = ", "), ".\nCheck the 'plot_chain' function output to visualize the parameters chain. It might be helpful to assign informative prior distribution for these parameters. See ?hp.")) }
##############################################################################################
### Return
## Final samples
theta.post = theta.post[seq(bn+1+1, M+1, by = thin),]
return(list(theta.post = theta.post, sigma2 = NULL, cont = cont, inits = inits))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/hp_binomial_reduced.R
|
#' @title HP: Fitting the advanced ages of the life tables
#'
#' @description This function receives an object of the class `HP` fitted by the hp() function
#' and fits a closing method to expand the life tables dataset to a maximum age argument inputted
#' by the user.
#' There are four closing methods available: 'hp', 'plateau', 'linear', and 'gompertz'.
#' The 'linear' method can only be applied with HP objects following the lognormal variant of
#' the HP mortality law.
#'
#' @usage
#' hp_close (fit, method = c("hp", "plateau", "linear", "gompertz"),
#' x0 = max(fit$data$x), max_age = 120, k = 7,
#' weights = seq(from = 0, to = 1, length.out = 2*k+1),
#' new_Ex = NULL, new_Dx = NULL)
#'
#' @param fit Object of the class `HP` fitted by the hp() function
#' @param method Character string specifying the closing method to be fitted, with them being: 'hp', 'plateau', 'linear' or 'gompertz'.
#' @param x0 Integer with the starting age the closing method will be fitted from. Default is the last age fitted by the 'HP' object.
#' @param max_age Integer with the maximum age the closing method will be fitted. Default age is '120'.
#' @param k Integer representing the size of the age interval to be mixed with the 'linear' or 'gompertz' closing methods for smooth graduation. If k = 0, no mixing will be applied.
#' @param weights Vector of weights of the closing method used in the mixture of the closing method and the fitted model made in the mixing age group. The vector's size should be equal to 2k+1. For a better understanding of this parameter and the mixture applied in this function, see Details.
#' @param new_Ex Vector with exposure of ages after the x0 input. This is an optional argument used in the 'linear' and 'gompertz' closing methods. If this argument is specified, then new_Dx also needs to be.
#' @param new_Dx Vector containing the death counts of the ages after the x0 input. This is also an optional argument used in the 'linear' and 'gompertz' closing methods. The length must be the same as new_Ex.
#'
#' @details
#' There are three types of age groups when the closing method is applied: a group
#' where only the HP-fitted model computes the death probabilities, followed by a
#' group in which the death probabilities are a mix (or more precise a weighted mean)
#' from the HP model and the closing method and followed by a group in which the
#' death probabilities are computed just by the closing method. The mix is applied
#' so the transition of the death probabilities of the ages between the fitted model
#' and the closing method occurs smoothly.
#'
#' The parameters 'x0' and 'k' define the mixing group age. The parameter 'x0'
#' indicates the center age of the group. The parameter 'k' is the range of ages
#' before 'x0' and after 'x0', so this group has a total of \eqn{2k + 1} age. Therefore,
#' the parameter 'weights' must have a length size equal to \eqn{2k + 1}. In this case,
#' the death probability is calculated as follows. Consider \eqn{model_x} and \eqn{close_x}
#' as the death probability of the fitted model and closing method in the age \eqn{x},
#' respectively. Then, the resulting death probability of the mix is calculated as:
#'
#' \eqn{q_x = w_x model_x + (1-w_x)close_x},
#'
#' where \eqn{w_x} represents the weight of the closing method in the age \eqn{x}.
#' This computation is applied to all elements in the MCMC chain of the fitted model,
#' resulting in a new chain of death probabilities. This procedure is applied only in
#' the linear and Gompertz methods.
#'
#' The four closing methods for life tables are:
#'
#' 1.'hp' method: Expands the previously adjusted HP model until the max_age argument.
#'
#' 2.'plateau' method: Keeps the death probability (qx) constant after the x0 argument.
#'
#' 3.'linear' method: Fits a linear regression starting at age x0 - k until the last age with data available (lognormal only).
#'
#' 4.'gompertz' method: Adopted as the closing method of the 2010-2012 English Life Table No. 17, fits the Gompertz mortality law via SIR using the same available data as the 'linear' method.
#'
#' @return Returns a `ClosedHP` class object with the predictive chains of the death probability
#' (qx) from first fitted age to max_age argument, the data utilized by the function and the
#' closing method chosen.
#'
#' @references Dodd, Erengul, Forster, Jonathan, Bijak, Jakub, & Smith, Peter 2018. “Smoothing mortality data: the English life table, 2010-12.” \emph{Journal of the Royal Statistical Society: Series A (Statistics in Society)}, 181(3), 717-735.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' ## Selecting the exposure and the death count of the year 2010, ranging from 0 to 90 years old:
#' USA2010 = USA[USA$Year == 2010,]
#' x = 0:90
#' Ex = USA2010$Ex.Male[x+1]
#' Dx = USA2010$Dx.Male[x+1]
#'
#' ## Fitting a lognormal HP model:
#' fit = hp(x = x, Ex = Ex, Dx = Dx, model = "lognormal",
#' M = 1000, bn = 0, thin = 10)
#'
#' ## Applying the closing function with different methods:
#' close1 = hp_close(fit, method = "hp", x0 = 90)
#' \donttest{close2 = hp_close(fit, method = "plateau", x0 = 90)
#' close3 = hp_close(fit, method = "linear", x0 = 80,
#' new_Ex = USA2010$Ex.Male[82:101],
#' new_Dx = USA2010$Dx.Male[82:101])
#' close4 = hp_close(fit, method = "gompertz", x0 = 70,
#' new_Ex = USA2010$Ex.Male[72:101],
#' new_Dx = USA2010$Dx.Male[72:101],
#' k = 5, max_age = 120)
#'
#' #### Using the other functions available in the package with the 'ClosedHP' object:
#'
#' ## qx estimation (See "?fitted.HP" in the BayesMortalityPlus package for more options):
#' fitted(close2)
#'
#' ## life expectancy (See "?expectancy.HP" for more options)
#' expectancy(close3, age = 0:110)
#'
#' ## plotting (See "?plot.HP" in the BayesMortalityPlus package for more options):
#' plot(close4)
#' g <- plot(list(close4, fit),
#' colors = c("seagreen", "blue"),
#' labels = c("Closed", "Model"))
#' # plotly::ggplotly(g)
#' }
#'
#' @seealso [fitted.HP()], [plot.HP()], [print.HP()] and [summary.HP()] for `ClosedHP` methods to native R functions [fitted()],
#'[plot()], [print()] and [summary()].
#'
#'[expectancy.HP()] and [Heatmap.HP()] for `ClosedHP` methods to compute and visualise the truncated life expectancy
#'via [expectancy()] and [Heatmap()] functions.
#'
#'
#' @include fun_aux.R
#' @include sir_gompertz.R
#'
#' @importFrom MASS mvrnorm
#'
#' @export
hp_close = function(fit, method = c("hp", "plateau", "linear", "gompertz"), x0 = max(fit$data$x),
max_age = 120, k = 7, weights = seq(from = 0, to = 1, length.out = 2*k+1),
new_Ex = NULL, new_Dx = NULL){
## Pre-processing
method = match.arg(method)
## Checklist
if(!inherits(fit, "HP")) { stop("fit argument must be a 'HP' Object returned by hp() function.") }
if (length(weights) != 2*k + 1) { stop("The length of the weights vector is not equal to 2k+1.") }
if(x0 > max(fit$data$x)) { stop("x0 argument exceeds the maximum age of the model.") }
if(x0 >= max_age) { stop("the choices of values for x0 and max_age are not consistent, x0 must be less than max_age.") }
if(method %in% c("hp", "plateau")) { k = 0; weights = 0 }
if(fit$info$model == "lognormal" & method %in% c("hp", "plateau")) { new_Ex = new_Dx = NULL }
if (method == "linear" & fit$info$model != "lognormal"){ stop("linear closing method is only available for the lognormal model.") }
if((method == "gompertz" | method == "linear") & ((!is.null(new_Ex) & is.null(new_Dx) | (is.null(new_Ex) & !is.null(new_Dx))))) { stop("gompertz and linear closing methods require new_Ex and new_Dx arguments.") }
if(!is.null(new_Ex) & !is.null(new_Dx) & length(new_Ex) != length(new_Dx)) { stop("new_Ex and new_Dx lengths should be the same.") }
## Check if there are overlapping data between the model and the user input:
min_age = min(fit$data$x)
if(x0 < max(fit$data$x) & is.null(new_Ex)){
new_Ex = fit$data$Ex[(x0+1-min_age):max(fit$data$x+1-min_age)]
new_Dx = fit$data$Dx[(x0+1-min_age):max(fit$data$x+1-min_age)]
}
fit$data = fit$data[fit$data$x <= x0,]
if(x0 - k < min_age) { stop("x0 or k arguments are not correct, they are not consistent with the initial age.") }
## Adding input data to the model data:
age_last_data_Ex = x0 + length(new_Ex)
age_last_data_Dx = x0 + length(new_Dx)
new_Ex = c(fit$data$Ex, new_Ex)
new_Dx = c(fit$data$Dx, new_Dx)
if(max_age-age_last_data_Ex < 0) {max_age = age_last_data_Ex}
## Completing the data for the closing method:
if(fit$info$model == "lognormal") {
new_Ex = c(new_Ex, rep(NA_real_, max_age-age_last_data_Ex))
}else{
new_Ex = c(new_Ex, rep(new_Ex[length(new_Ex)], max_age-age_last_data_Ex))
}
new_Dx = c(new_Dx, rep(NA_real_, max_age-age_last_data_Dx))
## Data between 0 and the maximum age:
full_Ex = c(new_Ex)
full_Dx = c(new_Dx)
### CHECK
if(method == "linear" | method == "gompertz"){
data = data.frame(x = (x0-k):(age_last_data_Ex))
data$Ex = full_Ex[data$x + 1 - min_age]
data$Dx = full_Dx[data$x + 1 - min_age]
data$qx = 1 - exp(-data$Dx/data$Ex)
data$y = log(data$qx)
if(nrow(data) < 2) { stop("Insufficient data to apply the closing method. Decrease the value of x0 argument or increase the value of k or try different data.") }
}
## End length of the Markov chains:
num_sim = nrow(fit$post.samples$mcmc_theta)
## Ages where the closing method will be applied:
old_x = (x0 - k):max_age
old_len = length(old_x)
## Matrix to save the fit:
closed = matrix(0, nrow = num_sim, ncol = old_len)
colnames(closed) = old_x
## Returns of the function: qx chains, x = min_age, ..., max_age
ret = matrix(NA_real_, nrow = num_sim, ncol = max_age + 1 - min_age)
## Closing methods
if (method == "hp"){
# Fits the mortality curve to a group of parameters:
if(fit$info$model == "lognormal"){
for (i in 1:num_sim){
hp = hp_curve(old_x, fit$post.samples$mcmc_theta[i, ])
sim = exp(rnorm(old_len, log(hp), sqrt(fit$post.samples$sigma2[i])))
closed[i, ] = sim/(1+sim)
}
}else if(fit$info$model == "binomial"){
aux_Ex = full_Ex[old_x + 1]
for (i in 1:num_sim){
qx = 1 - exp(-hp_curve_9(old_x, fit$post.samples$mcmc_theta[i,]))
qx = ifelse(qx > 1, 1, qx)
qx = ifelse(qx < 0, 0, qx)
sim = rbinom(old_len, trunc(aux_Ex), qx)
closed[i, ] = sim/trunc(aux_Ex)
}
}else{
aux_Ex = full_Ex[old_x + 1]
for (i in 1:num_sim){
qx = 1 - exp(-hp_curve_9(old_x, fit$post.samples$mcmc_theta[i,]))
qx = ifelse(qx > 1, 1, qx)
qx = ifelse(qx < 0, 0, qx)
sim = rpois(old_len, aux_Ex*qx)
closed[i, ] = sim/aux_Ex
}
}
}else if(method == "plateau"){
if(fit$info$model == "lognormal"){
# for (i in 1:num_sim){
# hp = hp_curve(x0, fit$post.samples$mcmc_theta[i, ])
# sim = exp(rnorm(1, log(hp), sqrt(fit$post.samples$sigma2[i])))
# closed[i, ] = sim/(1+sim)
# }
# gets the death probability of x0 and applies it till max_age
hp = hp_curve(x0, fit$post.samples$mcmc_theta)
sim = exp(rnorm(num_sim, log(hp), sqrt(fit$post.samples$sigma2)))
closed <- matrix(sim/(1+sim), num_sim, old_len)
}else if(fit$info$model == "binomial"){
aux_Ex = full_Ex[x0+1-min_age]
# for (i in 1:num_sim){
# qx = 1 - exp(-hp_curve_9(x0, fit$post.samples$mcmc_theta[i,]))
# qx = ifelse(qx > 1, 1, qx)
# qx = ifelse(qx < 0, 0, qx)
# sim = rbinom(1, trunc(aux_Ex), qx)
# closed[i, ] = sim/trunc(aux_Ex)
# }
qx = 1 - exp(-hp_curve_9(x0, fit$post.samples$mcmc_theta))
qx = ifelse(qx > 1, 1, qx)
qx = ifelse(qx < 0, 0, qx)
sim = rbinom(num_sim, rep(trunc(aux_Ex), num_sim), qx)
closed <- matrix(sim/trunc(aux_Ex), num_sim, old_len)
}else{
aux_Ex = full_Ex[x0+1-min_age]
# for (i in 1:num_sim){
# qx = 1 - exp(-hp_curve_9(x0, fit$post.samples$mcmc_theta[i,]))
# qx = ifelse(qx > 1, 1, qx)
# qx = ifelse(qx < 0, 0, qx)
# sim = rpois(1, aux_Ex*qx)
# closed[i, ] = sim/aux_Ex
# }
qx = 1 - exp(-hp_curve_9(x0, fit$post.samples$mcmc_theta))
qx = ifelse(qx > 1, 1, qx)
qx = ifelse(qx < 0, 0, qx)
sim = rpois(num_sim, aux_Ex*qx)
closed <- matrix(sim/aux_Ex, num_sim, old_len)
}
}else if(method == "linear"){
mod = lm(y ~ x, data = data)
pred = predict(mod, newdata = data.frame(x = old_x))
X = model.matrix(mod)
Xpred = cbind(1, old_x)
C1 = t(X) %*% X
Cpred = Xpred %*% chol2inv(chol(C1)) %*% t(Xpred)
RMAT = (diag(old_len) + Cpred)
for (i in 1:num_sim){
sig = sqrt(fit$post.samples$sigma2[i])
SIGMApred = sig * RMAT
sim_vals = MASS::mvrnorm(1, mu = pred, Sigma = SIGMApred)
closed[i, ] = exp(sim_vals)
}
}else if(method == "gompertz"){
param = sir_gompertz(fit, data, resampling_size = num_sim)
if(fit$info$model == "lognormal"){
for (i in 1:num_sim){
gomp = param[i,1]*exp(param[i,2]*old_x)
sim = exp(rnorm(old_len, log(gomp), sqrt(fit$post.samples$sigma2[i])))
closed[i, ] = sim/(1+sim)
}
}else if(fit$info$model == "binomial"){
aux_Ex = full_Ex[old_x + 1]
for (i in 1:num_sim){
qx = 1 - exp(-param[i,1]*exp(param[i,2]*old_x))
qx = ifelse(qx > 1, 1, qx)
qx = ifelse(qx < 0, 0, qx)
sim = rbinom(old_len, trunc(aux_Ex), qx)
closed[i, ] = sim/trunc(aux_Ex)
}
}else{
aux_Ex = full_Ex[old_x + 1]
for (i in 1:num_sim){
qx = 1 - exp(-param[i,1]*exp(param[i,2]*old_x))
qx = ifelse(qx > 1, 1, qx)
qx = ifelse(qx < 0, 0, qx)
sim = rpois(old_len, aux_Ex*qx)
closed[i, ] = sim/aux_Ex
}
}
}
## qx error margin (default was 0.02)
eps = 0.01
# Preventing death probabilities above 1:
closed = apply(closed, 2, function(x) ifelse(x < 1 - eps, x, 1))
new_age = min_age:max_age
new_len = length(new_age)
fitted = matrix(0, nrow = num_sim, ncol = new_len)
colnames(fitted) = new_age
## Checking the model used:
if(fit$info$model == "lognormal"){
for (i in 1:num_sim){
hp = hp_curve(new_age, fit$post.samples$mcmc_theta[i, ])
sim = exp(rnorm(new_len, log(hp), sqrt(fit$post.samples$sigma2[i])))
fitted[i, ] = sim/(1+sim)
}
}else if(fit$info$model == "binomial"){
for (i in 1:num_sim){
qx = 1 - exp(-hp_curve_9(new_age, fit$post.samples$mcmc_theta[i,]))
qx = ifelse(qx > 1, 1, qx)
qx = ifelse(qx < 0, 0, qx)
sim = rbinom(new_len, trunc(full_Ex), qx)
fitted[i, ] = sim/trunc(full_Ex)
}
}else{
for (i in 1:num_sim){
qx = 1 - exp(-hp_curve_9(new_age, fit$post.samples$mcmc_theta[i,]))
qx = ifelse(qx > 1, 1, qx)
qx = ifelse(qx < 0, 0, qx)
sim = rpois(new_len, full_Ex*qx)
fitted[i, ] = sim/full_Ex
}
}
# Model only indexes: age 0 till x0 - k - 1
idx_mod_only = min_age:(x0 - k - 1) + 1 - min_age
# Mix indexes: age x0 - k till x0 + k
idx_mix = (x0 - k):(x0 + k) + 1 - min_age
# Closing method only indexes: age x0 + k + 1 till max_age
idx_close = (x0 + k + 1):max_age + 1 - min_age
idx_mnc = c(idx_mix, idx_close)
ret[ , idx_mod_only] = fitted[ , idx_mod_only]
ret[ , idx_mnc] = closed
# Mix
for (i in 1:num_sim) {
ret[i, idx_mix] = weights * ret[i, idx_mix] + (1 - weights) * fitted[i, idx_mix]
}
return(structure(list(qx = ret,
data = data.frame(x = new_age,
Ex = full_Ex,
Dx = full_Dx,
qx = 1-exp(-full_Dx/full_Ex)),
method = method), class = "ClosedHP"))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/hp_close.R
|
hp_lognormal <- function(x, Ex, Dx, M = 100000, bn = round(M/4), thin = 1, m = NULL, v = NULL,
inits = NULL, prop.control = NULL, sigma2 = NULL){
##############################################################################################
#### optimal value for the inits
Mx = Dx/Ex
opt = optim_HP(x = x, Ex, Dx, curve = "8par")
## init validation
if(is.null(inits)){
inits[1] = opt[1]; inits[2] = opt[2]; inits[3] = opt[3]; inits[4] = opt[4]
inits[5] = opt[5]; inits[6] = opt[6]; inits[7] = opt[7]; inits[8] = opt[8]
if(inits[1] <= 0 || inits[1] >= 1){ inits[1] = 0.01 }
if(inits[2] <= 0 || inits[2] >= 1){ inits[2] = 0.1 }
if(inits[3] <= 0 || inits[3] >= 1){ inits[3] = 0.1 }
if(inits[4] <= 0 || inits[4] >= 1){ inits[4] = 0.01 }
if(inits[5] <= 0){ inits[5] = 5 }
if(inits[6] <= 15 || inits[6] >= 110){ inits[6] = 25 }
if(inits[7] <= 0 || inits[7] >= 1){ inits[7] = 0.01 }
if(inits[8] <= 0){ inits[8] = 1.1 }
}else if(length(inits) != 8 || any(is.na(inits[1:8]))){
stop("inits vector with missing values (NA) or length not equal to 8.")
}else if(any(inits[c(1:5,7,8)] <= 0) || any(inits[c(1:4,7)] >= 1) || inits[6] <= 15 || inits[6] >= 110){
stop("Initial value(s) outside the range of possible values for the paramater(s).")
}
sigma2 = ifelse(is.null(sigma2), 0.1, sigma2)
if(sigma2 <= 0){ stop("Invalid sigma2 value.") }
##############################################################################################
### auxs
mu = function(x, a, b, c, d, e, f, g, h){
### HP function for specific age and parameters
media = a^((x+b)^c) + d*exp(-e*(log(x)-log(f))^2) + (g*h^x)
return(media)
}
### Log-likelihood (MCMC)
like.HPBayes= function(Ex, Dx, x, a, b, c, d, e, f, g, h, sigma2){
## Calculando a curva hp e a média
hp = mu(x, a, b, c, d, e, f, g, h)
# mu_mean = log(hp / (1 + hp)) ## notation used in the closing method, using this y = log(qx) version.
qx = 1 - exp(-Dx/Ex)
qx[qx < 1e-16] = 1e-16 # avoid numeric error w log(qx)
qx[qx > 1 - 1e-16] = 1 - 1e-16 # avoid numeric error w log(1 - qx)
y = log(qx/(1-qx)) # y ~ normal(log(hp), sigma2)
## log-likelihood normal(mu_mean, sigma2)
logvero = sum(-0.5 * ( log(sigma2) + (1/sigma2) * (y - log(hp))^2 ), na.rm = T);
return(logvero)
}
### Jacobian
jac_logit = function(x) - log(abs(x - x^2))
jac_log = function(x) - log(x)
jac_f = function(x){
- log(110 - x) - log(x - 15)
}
##############################################################################################
## Prioris
if(v[1] < m[1]*(1-m[1])){
alpha.a = ((1 - m[1])/v[1] - 1/m[1])*m[1]^2; beta.a = alpha.a*(1/m[1] - 1)
}
if(v[2] < m[2]*(1-m[2])){
alpha.b = ((1 - m[2])/v[2] - 1/m[2])*m[2]^2
beta.b = alpha.b*(1/m[2] - 1)
}
if(v[3] < m[3]*(1-m[3])){
alpha.c = ((1 - m[3])/v[3] - 1/m[3])*m[3]^2
beta.c = alpha.c*(1/m[3] - 1)
}
if(v[4] < m[4]*(1-m[4])){
alpha.d = ((1 - m[4])/v[4] - 1/m[4])*m[4]^2
beta.d = alpha.d*(1/m[4] - 1)
}
alpha.e = (m[5]^2)/v[5]
beta.e = m[5]/v[5]
media.f = m[6]; variancia.f = v[6]
if(v[7] < m[7]*(1-m[7])){
alpha.g = ((1 - m[7])/v[7] - 1/m[7])*m[7]^2
beta.g = alpha.g*(1/m[7] - 1)
}
alpha.h = (m[8]^2)/v[8]
beta.h = m[8]/v[8]
## Priori sigma2
alpha.s2 = beta.s2 = 0
## Posteriori sigma2
alpha.s2.post <- alpha.s2 + 0.5*length(x)
## ## SD for prop. distributions
sd = 1*prop.control
### proposed parameters for the block (a, b, c, d, e, f, g, h)
U = diag(8)
eps = 1e-10
##############################################################################################
## aux objects
param = c("A", "B", "C", "D", "E", "F", "G", "H")
param_problemas = NULL; warn = F
qx = 1 - exp(-Dx/Ex)
qx[qx < 1e-16] = 1e-16
qx[qx > 1 - 1e-16] = 1 - 1e-16
y = log(qx/(1-qx))
theta.post = matrix(NA_real_, ncol = 8, nrow = M + 1)
### Acceptance rates
cont = rep(0, 8)
### Initial values
theta.post[1,] = inits
## progress bar
pb = progress::progress_bar$new(format = "Simulating [:bar] :percent in :elapsed",total = M, clear = FALSE, width = 60)
##############################################################################################
## Fits
system.time(for (k in 2:(M+1)) {
pb$tick()
##### 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h' estimated in one block (joint)
### Covariance matrix (Metropolis-Hastings)
if(k < 1000){
V = sd*U
}else if(k%%10 == 0) { ### updating every 10 iterations
X = theta.post[c((k-1000):(k-1)), 1:8]
X[,1] = log(X[,1]/(1 - X[,1])) ## A
X[,2] = log(X[,2]/(1 - X[,2])) ## B
X[,3] = log(X[,3]/(1 - X[,3])) ## C
X[,4] = log(X[,4]/(1 - X[,4])) ## D
X[,5] = log(X[,5]) ## E
X[,6] = (X[,6] - 15)/(110 - 15); X[,6] = log(X[,6]/(1 - X[,6])) ## F
X[,7] = log(X[,7]/(1 - X[,7])) ## G
X[,8] = log(X[,8]) ## H
V = sd*(eps*U + var(X))
}
aux = theta.post[k-1, 1:8]
aux[1] = log(aux[1]/(1 - aux[1])) ## A
aux[2] = log(aux[2]/(1 - aux[2])) ## B
aux[3] = log(aux[3]/(1 - aux[3])) ## C
aux[4] = log(aux[4]/(1 - aux[4])) ## D
aux[5] = log(aux[5]) ## E
aux[6] = (aux[6] - 15)/(110 - 15); aux[6] = log(aux[6]/(1 - aux[6])) ## F
aux[7] = log(aux[7]/(1 - aux[7])) ## G
aux[8] = log(aux[8]) ## H
prop = MASS::mvrnorm(1, mu = aux, Sigma = V)
a.prop = exp(prop[1]) / (1 + exp(prop[1]))
b.prop = exp(prop[2]) / (1 + exp(prop[2]))
c.prop = exp(prop[3]) / (1 + exp(prop[3]))
d.prop = exp(prop[4]) / (1 + exp(prop[4]))
e.prop = exp(prop[5])
f.prop = 15 + (110 - 15)*(exp(prop[6])/ (1 + exp(prop[6])))
g.prop = exp(prop[7]) / (1 + exp(prop[7]))
h.prop = exp(prop[8])
lverok = like.HPBayes(Ex, Dx, x, theta.post[k-1,1], theta.post[k-1,2], theta.post[k-1,3], theta.post[k-1,4], theta.post[k-1,5], theta.post[k-1,6], theta.post[k-1,7], theta.post[k-1,8], sigma2[k-1])
lveroprop = like.HPBayes(Ex, Dx, x, a.prop , b.prop , c.prop , d.prop , e.prop , f.prop , g.prop , h.prop , sigma2[k-1])
auxk = lverok + dbeta(theta.post[k-1,1], alpha.a, beta.a, log = T) + dbeta(theta.post[k-1,2], alpha.b, beta.b, log = T) + dbeta(theta.post[k-1,3], alpha.c, beta.c, log = T) + dbeta(theta.post[k-1,4], alpha.d, beta.d, log = T) + dgamma(theta.post[k-1,5], alpha.e, beta.e, log = T) + dnorm(theta.post[k-1,6], media.f, sqrt(variancia.f), log = T) + dbeta(theta.post[k-1,7], alpha.g, beta.g, log = T) + dgamma(theta.post[k-1,8], alpha.h, beta.h, log = T) + jac_logit(theta.post[k-1,1]) + jac_logit(theta.post[k-1,2]) + jac_logit(theta.post[k-1,3]) + jac_logit(theta.post[k-1,4]) + jac_log(theta.post[k-1,5]) + jac_f(theta.post[k-1,6]) + jac_logit(theta.post[k-1,7]) + jac_log(theta.post[k-1,8])
auxprop = lveroprop + dbeta(a.prop , alpha.a, beta.a, log = T) + dbeta(b.prop , alpha.b, beta.b, log = T) + dbeta(c.prop , alpha.c, beta.c, log = T) + dbeta(d.prop , alpha.d, beta.d, log = T) + dgamma(e.prop , alpha.e, beta.e, log = T) + dnorm(f.prop , media.f, sqrt(variancia.f), log = T) + dbeta(g.prop , alpha.g, beta.g, log = T) + dgamma(h.prop , alpha.h, beta.h, log = T) + jac_logit(a.prop) + jac_logit(b.prop) + jac_logit(c.prop) + jac_logit(d.prop) + jac_log(e.prop) + jac_f(f.prop) + jac_logit(g.prop) + jac_log(h.prop)
ratio = auxprop - auxk; test = runif(1)
if(is.na(ratio) || is.nan(ratio)){ ratio = -Inf }
if (ratio > log(test)) {
theta.post[k,1] = a.prop
theta.post[k,2] = b.prop
theta.post[k,3] = c.prop
theta.post[k,4] = d.prop
theta.post[k,5] = e.prop
theta.post[k,6] = f.prop
theta.post[k,7] = g.prop
theta.post[k,8] = h.prop
cont[1:8] = cont[1:8] + 1
} else {
theta.post[k,1:8] = theta.post[k-1,1:8]
}
##### Sigma2
beta.s2.post <- beta.s2 + 0.5*sum((y - log(mu(x, theta.post[k,1], theta.post[k,2], theta.post[k,3], theta.post[k,4], theta.post[k,5], theta.post[k,6], theta.post[k,7], theta.post[k,8])))^2)
aux <- rgamma(1, alpha.s2.post, beta.s2.post)
sigma2[k] <- 1/aux
limite_inf = which(theta.post[k,] <= c(1e-16, 1e-16, 1e-16, 1e-16, 1e-16, 15+1e-16, 1e-16, 1e-16))
limite_sup = which(theta.post[k,] >= c(1-1e-5, 1-1e-5, 1-1e-5, 1-1e-5, Inf, 110-1e-5, 1-1e-5, Inf))
if(length(limite_inf) > 0){
theta.post[k, limite_inf] <- theta.post[k-1, limite_inf]
if(k > bn){
param_problemas = append(param_problemas, param[limite_inf]); warn = T
}
}
if(length(limite_sup) > 0){
theta.post[k, limite_sup] <- theta.post[k-1, limite_sup]
if(k > bn){
param_problemas = append(param_problemas, param[limite_sup]); warn = T
}
}
})
if(warn){ warning(paste0("MCMC may have had some issues with the parameter(s): ", paste(sort(unique(param_problemas)), collapse = ", "), ".\nCheck the 'plot_chain' function output to visualize the parameters chain. It might be helpful to assign informative prior distribution for these parameters. See ?hp.")) }
##############################################################################################
### Return
## Final samples
theta.post = theta.post[seq(bn+1+1, M+1, by = thin),]
sigma2 = sigma2[seq(bn+1+1, M+1, by = thin)]
return(list(theta.post = theta.post, sigma2 = sigma2, cont = cont, inits = inits))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/hp_lognormal.R
|
hp_lognormal_red <- function(x, Ex, Dx, M = 100000, bn = round(M/4), thin = 1, m = NULL, v = NULL,
inits = NULL, prop.control = NULL, sigma2 = NULL){
##############################################################################################
#### optimal value for the inits
Mx = Dx/Ex
opt = optim_HP(x = x, Ex, Dx, curve = "8par")
## init validation
if(is.null(inits)){
inits[1] = opt[1]; inits[2] = opt[2]; inits[3] = opt[3]; inits[4] = opt[4]
inits[5] = opt[5]; inits[6] = opt[6]; inits[7] = opt[7]; inits[8] = opt[8]
if(inits[1] <= 0 || inits[1] >= 1){ inits[1] = 0.01 }
if(inits[2] <= 0 || inits[2] >= 1){ inits[2] = 0.1 }
if(inits[3] <= 0 || inits[3] >= 1){ inits[3] = 0.1 }
if(inits[4] <= 0 || inits[4] >= 1){ inits[4] = 0.01 }
if(inits[5] <= 0){ inits[5] = 5 }
if(inits[6] <= 15 || inits[6] >= 110){ inits[6] = 25 }
if(inits[7] <= 0 || inits[7] >= 1){ inits[7] = 0.01 }
if(inits[8] <= 0){ inits[8] = 1.1 }
}else if(length(inits) != 8 || any(is.na(inits[1:8]))){
stop("inits vector with missing values (NA) or length not equal to 8.")
}else if(any(inits[c(1:5,7,8)] <= 0) || any(inits[c(1:4,7)] >= 1) || inits[6] <= 15 || inits[6] >= 110){
stop("Initial value(s) outside the range of possible values for the paramater(s).")
}
sigma2 = ifelse(is.null(sigma2), 0.1, sigma2)
if(sigma2 <= 0){ stop("Invalid sigma2 value.") }
a = b = c = 0 ## reduced model
inits[1:3] = 0
##############################################################################################
### auxs
mu = function(x, a, b, c, d, e, f, g, h){
### HP function for specific age and parameters
media = a^((x+b)^c) + d*exp(-e*(log(x)-log(f))^2) + (g*h^x)
return(media)
}
### Log-likelihood (MCMC)
like.HPBayes= function(Ex, Dx, x, a, b, c, d, e, f, g, h, sigma2){
## Calculating HP curve and mean
hp = mu(x, a, b, c, d, e, f, g, h)
# mu_mean = log(hp / (1 + hp)) ## notation used in the closing method, using this y = log(qx) version.
qx = 1 - exp(-Dx/Ex)
qx[qx < 1e-16] = 1e-16 # avoid numeric error w log(qx)
qx[qx > 1 - 1e-16] = 1 - 1e-16 # avoid numeric error w log(1 - qx)
y = log(qx/(1-qx)) # y ~ normal(log(hp), sigma2)
## log-likelihood normal(mu_mean, sigma2)
logvero = sum(-0.5 * ( log(sigma2) + (1/sigma2) * (y - log(hp))^2 ), na.rm = T);
return(logvero)
}
### Jacobian
jac_logit = function(x) - log(abs(x - x^2))
jac_log = function(x) - log(x)
jac_f = function(x){
- log(110 - x) - log(x - 15)
}
##############################################################################################
## Prioris
if(v[1] < m[1]*(1-m[1])){
alpha.a = ((1 - m[1])/v[1] - 1/m[1])*m[1]^2; beta.a = alpha.a*(1/m[1] - 1)
}
if(v[2] < m[2]*(1-m[2])){
alpha.b = ((1 - m[2])/v[2] - 1/m[2])*m[2]^2
beta.b = alpha.b*(1/m[2] - 1)
}
if(v[3] < m[3]*(1-m[3])){
alpha.c = ((1 - m[3])/v[3] - 1/m[3])*m[3]^2
beta.c = alpha.c*(1/m[3] - 1)
}
if(v[4] < m[4]*(1-m[4])){
alpha.d = ((1 - m[4])/v[4] - 1/m[4])*m[4]^2
beta.d = alpha.d*(1/m[4] - 1)
}
alpha.e = (m[5]^2)/v[5]
beta.e = m[5]/v[5]
media.f = m[6]; variancia.f = v[6]
if(v[7] < m[7]*(1-m[7])){
alpha.g = ((1 - m[7])/v[7] - 1/m[7])*m[7]^2
beta.g = alpha.g*(1/m[7] - 1)
}
alpha.h = (m[8]^2)/v[8]
beta.h = m[8]/v[8]
## Priori sigma2
alpha.s2 = beta.s2 = 0
## Posteriori sigma2
alpha.s2.post <- alpha.s2 + 0.5*length(x)
## SD for prop. distributions
sd = 1*prop.control
### proposed parameters for the block (d, e, f, g, h)
U = diag(5)
eps = 1e-10
##############################################################################################
## aux objects
param = c("A", "B", "C", "D", "E", "F", "G", "H")
param_problemas = NULL; warn = F
qx = 1 - exp(-Dx/Ex)
qx[qx < 1e-16] = 1e-16
qx[qx > 1 - 1e-16] = 1 - 1e-16
y = log(qx/(1-qx))
theta.post = matrix(NA_real_, ncol = 8, nrow = M + 1)
### Acceptance rates
cont = rep(0, 8)
### Initial values
theta.post[1,] = inits
## progress bar
pb = progress::progress_bar$new(format = "Simulating [:bar] :percent in :elapsed",total = M, clear = FALSE, width = 60)
##############################################################################################
## Fits
system.time(for (k in 2:(M+1)) {
pb$tick()
##### 'd', 'e', 'f', 'g', 'h' estimated in one block (joint)
### Covariance matrix (Metropolis-Hastings)
if(k < 1000){
V = sd*U
}else if(k%%10 == 0) { ### updating every 10 iterations
X = theta.post[c((k-1000):(k-1)), 4:8]
X[,1] = log(X[,1]/(1 - X[,1])) ## D
X[,2] = log(X[,2]) ## E
X[,3] = (X[,3] - 15)/(110 - 15); X[,3] = log(X[,3]/(1 - X[,3])) ## F
X[,4] = log(X[,4]/(1 - X[,4])) ## G
X[,5] = log(X[,5]) ## H
V = sd*(eps*U + var(X))
}
aux = theta.post[k-1, 4:8]
aux[1] = log(aux[1]/(1 - aux[1])) ## D
aux[2] = log(aux[2]) ## E
aux[3] = (aux[3] - 15)/(110 - 15); aux[3] = log(aux[3]/(1 - aux[3])) ## F
aux[4] = log(aux[4]/(1 - aux[4])) ## G
aux[5] = log(aux[5]) ## H
prop = MASS::mvrnorm(1, mu = aux, Sigma = V)
d.prop = exp(prop[1]) / (1 + exp(prop[1]))
e.prop = exp(prop[2])
f.prop = 15 + (110 - 15)*(exp(prop[3])/ (1 + exp(prop[3])))
g.prop = exp(prop[4]) / (1 + exp(prop[4]))
h.prop = exp(prop[5])
lverok = like.HPBayes(Ex, Dx, x, theta.post[k-1,1], theta.post[k-1,2], theta.post[k-1,3], theta.post[k-1,4], theta.post[k-1,5], theta.post[k-1,6], theta.post[k-1,7], theta.post[k-1,8], sigma2 = sigma2[k-1])
lveroprop = like.HPBayes(Ex, Dx, x, a , b , c , d.prop , e.prop , f.prop , g.prop , h.prop , sigma2 = sigma2[k-1])
auxk = lverok + dbeta(theta.post[k-1,4], alpha.d, beta.d, log = T) + dgamma(theta.post[k-1,5], alpha.e, beta.e, log = T) + dnorm(theta.post[k-1,6], media.f, sqrt(variancia.f), log = T) + dbeta(theta.post[k-1,7], alpha.g, beta.g, log = T) + dgamma(theta.post[k-1,8], alpha.h, beta.h, log = T) + jac_logit(theta.post[k-1,4]) + jac_log(theta.post[k-1,5]) + jac_f(theta.post[k-1,6]) + jac_logit(theta.post[k-1,7]) + jac_log(theta.post[k-1,8])
auxprop = lveroprop + dbeta(d.prop , alpha.d, beta.d, log = T) + dgamma(e.prop , alpha.e, beta.e, log = T) + dnorm(f.prop , media.f, sqrt(variancia.f), log = T) + dbeta(g.prop , alpha.g, beta.g, log = T) + dgamma(h.prop , alpha.h, beta.h, log = T) + jac_logit(d.prop) + jac_log(e.prop) + jac_f(f.prop) + jac_logit(g.prop) + jac_log(h.prop)
ratio = auxprop - auxk; test = runif(1)
if(is.na(ratio) || is.nan(ratio)){ ratio = -Inf }
if (ratio > log(test)) {
theta.post[k,4] = d.prop
theta.post[k,5] = e.prop
theta.post[k,6] = f.prop
theta.post[k,7] = g.prop
theta.post[k,8] = h.prop
cont[4:8] = cont[4:8] + 1
} else {
theta.post[k,4:8] = theta.post[k-1,4:8]
}
theta.post[k, 1] = a
theta.post[k, 2] = b
theta.post[k, 3] = c
##### Sigma2
beta.s2.post <- beta.s2 + 0.5*sum((y - log(mu(x, theta.post[k,1], theta.post[k,2], theta.post[k,3], theta.post[k,4], theta.post[k,5], theta.post[k,6], theta.post[k,7], theta.post[k,8])))^2)
aux <- rgamma(1, alpha.s2.post, beta.s2.post)
sigma2[k] <- 1/aux
limite_inf = which(theta.post[k,4:8] <= c(1e-16, 1e-16, 15+1e-16, 1e-16, 1e-16))
limite_sup = which(theta.post[k,4:8] >= c(1-1e-5, Inf, 110-1e-5, 1-1e-5, Inf))
if(length(limite_inf) > 0){
theta.post[k, 3 + limite_inf] <- theta.post[k-1, 3 + limite_inf]
if(k > bn){
param_problemas = append(param_problemas, param[3 + limite_inf]); warn = T
}
}
if(length(limite_sup) > 0){
theta.post[k, 3 + limite_sup] <- theta.post[k-1, 3 + limite_sup]
if(k > bn){
param_problemas = append(param_problemas, param[3 + limite_sup]); warn = T
}
}
})
if(warn){ warning(paste0("MCMC may have had some issues with the parameter(s): ", paste(sort(unique(param_problemas)), collapse = ", "), ".\nCheck the 'plot_chain' function output to visualize the parameters chain. It might be helpful to assign informative prior distribution for these parameters. See ?hp.")) }
##############################################################################################
### Return
## Final samples
theta.post = theta.post[seq(bn+1+1, M+1, by = thin),]
sigma2 = sigma2[seq(bn+1+1, M+1, by = thin)]
return(list(theta.post = theta.post, sigma2 = sigma2, cont = cont, inits = inits))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/hp_lognormal_reduced.R
|
#' @title HP: Model mixture
#'
#' @description This function mixes the fitted mortality table of the HP model with another mortality
#' table provided by the user.
#'
#' @usage
#' hp_mix (fit, mu_post, weights = NULL, mix_age,
#' x0_prior, x0_post, max_age)
#'
#' @param fit Object of the class 'HP' fitted by the hp() function.
#' @param mu_post Vector with mortality rates considered in the mix.
#' @param weights Positive vector specifying the weights considered in the mix.
#' @param mix_age Positive vector specifying the age range in the mixture.
#' @param x0_prior Non-negative number indicating the initial age of the fitted HP model.
#' @param x0_post Non-negative number indicating the initial age of the mortality table provided by the user.
#' @param max_age Positive number indicating the final age in the mixture.
#'
#' @return Return the posterior distribution for qx.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' ## Selecting the exposure and death count of the 2010 and 2013 male populations ranging
#' ## from 0 to 90 years old
#' USA2010 = USA[USA$Year == 2010,]
#' x = 0:90
#' Ex = USA2010$Ex.Male[x+1]
#' Dx = USA2010$Dx.Male[x+1]
#'
#' USA2013 = USA[USA$Year == 2013,]
#' Ex2 = USA2013$Ex.Male[x+1]
#' Dx2 = USA2013$Dx.Male[x+1]
#'
#' ## Fitting HP model for 2010 data and calculating the mortality rates of 2013
#' fit = hp(x = x, Ex = Ex, Dx = Dx,
#' M = 1000, bn = 0, thin = 10)
#' tx_2013 = 1 - exp(-Dx2/Ex2)
#'
#' ## Mixing fitted model and mortality rates of 2013:
#' mix <- hp_mix(fit, tx_2013, x0_prior = 0, x0_post = 0, mix_age = c(50,90),
#' max_age = 90)
#'
#' ## Obtaining the new estimated mortality table (after mixture):
#' qx_mix<- apply(mix$qx, 2, median, na.rm = TRUE)
#' qx_mix
#'
#' @include fun_aux.R
#'
#' @export
hp_mix <- function(fit, mu_post, weights = NULL, mix_age, x0_prior, x0_post, max_age) {
if(!inherits(fit, "HP")) stop("fit must be an object of the class HP.")
if(x0_prior < 0) stop("x0_prior is the initial age used in the fitted HP model.")
if(x0_post < 0) stop("x0_post is the initial age of the mortality rates used in the mixture.")
if(mix_age[1] > mix_age[2]) { aux <- mix_age; mix_age[1] <- aux[2]; mix_age[2] <- aux[1]; rm(aux) }
if(max_age < x0_prior) stop("max_age must be bigger than x0_prior.")
if(max_age < x0_post) stop("max_age must be bigger than x0_post.")
## Weights check
if(is.null(weights)){
prior_weights = seq(from = 1, to = 0, length.out = mix_age[2] - mix_age[1] + 1)
}else{
if(any(weights < 0)){
warning("weights cannot have negative values.")
prior_weights = seq(from = 1, to = 0, length.out = mix_age[2] - mix_age[1] + 1)
}else if(any(weights > 1)){
if(length(weights) != length(mix_age[2] - mix_age[1] + 1)) stop("Length of the vector 'weights' must be equal to the range of mix_age.")
prior_weights = weights/max(weights)
}else{
if(length(weights) != (mix_age[2] - mix_age[1] + 1)) stop("Length of the vector 'weights' must be equal to the range of mix_age.")
prior_weights = weights
}
}
posterior_weights = 1 - prior_weights
## calculating qx
age_fitted <- x0_prior:max_age
mu_prior = matrix(NA_real_, nrow = nrow(fit$post.samples$mcmc_theta), ncol = length(age_fitted))
for (i in 1:nrow(mu_prior)){
if(fit$info$model == "lognormal"){
mu_prior[i,] = hp_curve(age_fitted, fit$post.samples$mcmc_theta[i,])
mu_prior[i,] = mu_prior[i,]/(1 + mu_prior[i,])
mu_prior[i,] = ifelse((mu_prior[i,] < 0 | mu_prior[i,] > 1), NA, mu_prior[i,])
}else{
mu_prior[i,] = 1 - exp(-hp_curve_9(age_fitted, fit$post.samples$mcmc_theta[i,]))
mu_prior[i,] = ifelse((mu_prior[i,] < 0 | mu_prior[i,] > 1), NA, mu_prior[i,])
}
}
## Mixing
mix_int = mix_age[1]:mix_age[2] + 1
for (i in 1:nrow(mu_prior)) {
mu_prior[i,mix_int-x0_prior] = prior_weights*mu_prior[i,mix_int-x0_prior] + posterior_weights*mu_post[mix_int-x0_post]
}
return(structure(list(qx = mu_prior,
data = data.frame(x = age_fitted,
Ex = fit$data$Ex[age_fitted - min(fit$data$x)+1],
Dx = fit$data$Dx[age_fitted - min(fit$data$x)+1],
qx = 1-exp(-fit$data$Dx[age_fitted - min(fit$data$x)+1]/fit$data$Ex[age_fitted - min(fit$data$x)+1])),
info = list(mix_age = mix_age, x0_prior = x0_prior, x0_post = x0_post, max_age = max_age, mu_post = mu_post),
method = "Mix"),
class = "ClosedHP"))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/hp_mix.R
|
hp_poisson <- function(x, Ex, Dx, M = 100000, bn = round(M/4), thin = 1, m = NULL, v = NULL,
inits = NULL, prop.control = NULL, K = NULL){
##############################################################################################
#### optimal value for the inits
Mx = Dx/Ex
opt = optim_HP(x = x, Ex, Dx, curve = "9par")
## init validation
if(is.null(inits)){
inits[1] = opt[1]; inits[2] = opt[2]; inits[3] = opt[3]; inits[4] = opt[4]
inits[5] = opt[5]; inits[6] = opt[6]; inits[7] = opt[7]; inits[8] = opt[8]
if(inits[1] <= 0 || inits[1] >= 1){ inits[1] = 0.01 }
if(inits[2] <= 0 || inits[2] >= 1){ inits[2] = 0.1 }
if(inits[3] <= 0 || inits[3] >= 1){ inits[3] = 0.1 }
if(inits[4] <= 0 || inits[4] >= 1){ inits[4] = 0.01 }
if(inits[5] <= 0){ inits[5] = 5 }
if(inits[6] <= 15 || inits[6] >= 110){ inits[6] = 25 }
if(inits[7] <= 0 || inits[7] >= 1){ inits[7] = 0.01 }
if(inits[8] <= 0){ inits[8] = 1.1 }
}else if(length(inits) != 8 || any(is.na(inits[1:8]))){
stop("inits vector with missing values (NA) or length not equal to 8.")
}else if(any(inits[c(1:5,7,8)] <= 0) || any(inits[c(1:4,7)] >= 1) || inits[6] <= 15 || inits[6] >= 110){
stop("Initial value(s) outside the range of possible values for the paramater(s).")
}
## K
k2 = ifelse(is.null(K), opt[9], K)
##############################################################################################
### auxs
mu = function(x, a, b, c, d, e, f, g, h, k){
### HP function for specific age and parameters
media = a^((x+b)^c) + d*exp(-e*(log(x)-log(f))^2) + (g*h^x)/(1 + k*g*h^x)
return(media)
}
### Log-likelihood (MCMC)
like.HPBayes= function(Ex, Dx, x, a,b,c,d,e,f,g,h,k){
qx = mu(x, a, b, c, d, e, f, g, h, k)
# qx = 1 - exp(-mx)
qx[qx < 1e-16] = 1e-16 # avoid numeric error w log(qx)
qx[qx > 1 - 1e-16] = 1 - 1e-16 # avoid numeric error w log(1 - qx)
# Ex = trunc(Ex)
# logvero = sum(Dx*log(qx), na.rm = T) + sum((Ex - Dx)*log(1 - qx), na.rm = T)
logvero = sum(Dx*log(Ex*qx) - Ex*qx, na.rm = T);
return(logvero)
}
### Jacobian
jac_logit = function(x) - log(abs(x - x^2))
jac_log = function(x) - log(x)
jac_f = function(x){
- log(110 - x) - log(x - 15)
}
##############################################################################################
## Prioris
if(v[1] < m[1]*(1-m[1])){
alpha.a = ((1 - m[1])/v[1] - 1/m[1])*m[1]^2
beta.a = alpha.a*(1/m[1] - 1)
}
if(v[2] < m[2]*(1-m[2])){
alpha.b = ((1 - m[2])/v[2] - 1/m[2])*m[2]^2
beta.b = alpha.b*(1/m[2] - 1)
}
if(v[3] < m[3]*(1-m[3])){
alpha.c = ((1 - m[3])/v[3] - 1/m[3])*m[3]^2
beta.c = alpha.c*(1/m[3] - 1)
}
if(v[4] < m[4]*(1-m[4])){
alpha.d = ((1 - m[4])/v[4] - 1/m[4])*m[4]^2
beta.d = alpha.d*(1/m[4] - 1)
}
alpha.e = (m[5]^2)/v[5]
beta.e = m[5]/v[5]
media.f = m[6]; variancia.f = v[6]
if(v[7] < m[7]*(1-m[7])){
alpha.g = ((1 - m[7])/v[7] - 1/m[7])*m[7]^2
beta.g = alpha.g*(1/m[7] - 1)
}
alpha.h = (m[8]^2)/v[8]
beta.h = m[8]/v[8]
## SD for prop. distributions
sd = 1*prop.control
### proposed parameters for the block (a, b, c, d, e, f, g, h)
U = diag(8)
eps = 1e-10
##############################################################################################
## aux objects
param = c("A", "B", "C", "D", "E", "F", "G", "H", "K")
param_problemas = NULL; warn = F
theta.post = matrix(NA_real_, ncol = 9, nrow = M + 1)
### Acceptance rates
cont = rep(0, 9)
### Initial values
theta.post[1,] = c(inits, k2)
## progress bar
pb = progress::progress_bar$new(format = "Simulating [:bar] :percent in :elapsed",total = M, clear = FALSE, width = 60)
##############################################################################################
## Fits
system.time(for (k in 2:(M+1)) {
pb$tick()
##### ''a', 'b', 'c', 'd', 'e', 'f', 'g', 'h' estimated in one block (joint)
### Covariance matrix (Metropolis-Hastings)
if(k < 1000){
V = sd*U
}else if(k%%10 == 0) { ### updating every 10 iterations
X = theta.post[c((k-1000):(k-1)), 1:8]
X[,1] = log(X[,1]/(1 - X[,1])) ## A
X[,2] = log(X[,2]/(1 - X[,2])) ## B
X[,3] = log(X[,3]/(1 - X[,3])) ## C
X[,4] = log(X[,4]/(1 - X[,4])) ## D
X[,5] = log(X[,5]) ## E
X[,6] = (X[,6] - 15)/(110 - 15); X[,6] = log(X[,6]/(1 - X[,6])) ## F
X[,7] = log(X[,7]/(1 - X[,7])) ## G
X[,8] = log(X[,8]) ## H
V = sd*(eps*U + var(X))
}
aux = theta.post[k-1, 1:8]
aux[1] = log(aux[1]/(1 - aux[1])) ## A
aux[2] = log(aux[2]/(1 - aux[2])) ## B
aux[3] = log(aux[3]/(1 - aux[3])) ## C
aux[4] = log(aux[4]/(1 - aux[4])) ## D
aux[5] = log(aux[5]) ## E
aux[6] = (aux[6] - 15)/(110 - 15); aux[6] = log(aux[6]/(1 - aux[6])) ## F
aux[7] = log(aux[7]/(1 - aux[7])) ## G
aux[8] = log(aux[8]) ## H
prop = MASS::mvrnorm(1, mu = aux, Sigma = V)
a.prop = exp(prop[1]) / (1 + exp(prop[1]))
b.prop = exp(prop[2]) / (1 + exp(prop[2]))
c.prop = exp(prop[3]) / (1 + exp(prop[3]))
d.prop = exp(prop[4]) / (1 + exp(prop[4]))
e.prop = exp(prop[5])
f.prop = 15 + (110 - 15)*(exp(prop[6])/ (1 + exp(prop[6])))
g.prop = exp(prop[7]) / (1 + exp(prop[7]))
h.prop = exp(prop[8])
lverok = like.HPBayes(Ex, Dx, x, theta.post[k-1,1], theta.post[k-1,2], theta.post[k-1,3], theta.post[k-1,4], theta.post[k-1,5], theta.post[k-1,6], theta.post[k-1,7], theta.post[k-1,8], theta.post[k-1,9])
lveroprop = like.HPBayes(Ex, Dx, x, a.prop , b.prop , c.prop , d.prop , e.prop , f.prop , g.prop , h.prop , theta.post[k-1,9])
auxk = lverok + dbeta(theta.post[k-1,1], alpha.a, beta.a, log = T) + dbeta(theta.post[k-1,2], alpha.b, beta.b, log = T) + dbeta(theta.post[k-1,3], alpha.c, beta.c, log = T) + dbeta(theta.post[k-1,4], alpha.d, beta.d, log = T) + dgamma(theta.post[k-1,5], alpha.e, beta.e, log = T) + dnorm(theta.post[k-1,6], media.f, sqrt(variancia.f), log = T) + dbeta(theta.post[k-1,7], alpha.g, beta.g, log = T) + dgamma(theta.post[k-1,8], alpha.h, beta.h, log = T) + jac_logit(theta.post[k-1,1]) + jac_logit(theta.post[k-1,2]) + jac_logit(theta.post[k-1,3]) + jac_logit(theta.post[k-1,4]) + jac_log(theta.post[k-1,5]) + jac_f(theta.post[k-1,6]) + jac_logit(theta.post[k-1,7]) + jac_log(theta.post[k-1,8])
auxprop = lveroprop + dbeta(a.prop , alpha.a, beta.a, log = T) + dbeta(b.prop , alpha.b, beta.b, log = T) + dbeta(c.prop , alpha.c, beta.c, log = T) + dbeta(d.prop , alpha.d, beta.d, log = T) + dgamma(e.prop , alpha.e, beta.e, log = T) + dnorm(f.prop , media.f, sqrt(variancia.f), log = T) + dbeta(g.prop , alpha.g, beta.g, log = T) + dgamma(h.prop , alpha.h, beta.h, log = T) + jac_logit(a.prop) + jac_logit(b.prop) + jac_logit(c.prop) + jac_logit(d.prop) + jac_log(e.prop) + jac_f(f.prop) + jac_logit(g.prop) + jac_log(h.prop)
ratio = auxprop - auxk; test = runif(1)
if(is.na(ratio) || is.nan(ratio)){ ratio = -Inf }
if (ratio > log(test)) {
theta.post[k,1] = a.prop
theta.post[k,2] = b.prop
theta.post[k,3] = c.prop
theta.post[k,4] = d.prop
theta.post[k,5] = e.prop
theta.post[k,6] = f.prop
theta.post[k,7] = g.prop
theta.post[k,8] = h.prop
cont[1:8] = cont[1:8] + 1
} else {
theta.post[k,1:8] = theta.post[k-1,1:8]
}
theta.post[k,9] = theta.post[k-1,9]
limite_inf = which(theta.post[k,] <= c(1e-16, 1e-16, 1e-16, 1e-16, 1e-16, 15+1e-16, 1e-16, 1e-16, -Inf))
limite_sup = which(theta.post[k,] >= c(1-1e-5, 1-1e-5, 1-1e-5, 1-1e-5, Inf, 110-1e-5, 1-1e-5, Inf, Inf))
if(length(limite_inf) > 0){
theta.post[k, limite_inf] <- theta.post[k-1, limite_inf]
if(k > bn){
param_problemas = append(param_problemas, param[limite_inf]); warn = T
}
}
if(length(limite_sup) > 0){
theta.post[k, limite_sup] <- theta.post[k-1, limite_sup]
if(k > bn){
param_problemas = append(param_problemas, param[limite_sup]); warn = T
}
}
})
if(warn){ warning(paste0("MCMC may have had some issues with the parameter(s): ", paste(sort(unique(param_problemas)), collapse = ", "), ".\nCheck the 'plot_chain' function output to visualize the parameters chain. It might be helpful to assign informative prior distribution for these parameters. See ?hp.")) }
##############################################################################################
### Return
## Final samples
theta.post = theta.post[seq(bn+1+1, M+1, by = thin),]
return(list(theta.post = theta.post, sigma2 = NULL, cont = cont, inits = inits))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/hp_poisson.R
|
hp_poisson_red <- function(x, Ex, Dx, M = 100000, bn = round(M/4), thin = 1, m = NULL, v = NULL,
inits = NULL, prop.control = NULL, K = NULL){
##############################################################################################
#### optimal value for the inits
Mx = Dx/Ex
opt = optim_HP(x = x, Ex, Dx, curve = "9par")
## init validation
if(is.null(inits)){
inits[1] = opt[1]; inits[2] = opt[2]; inits[3] = opt[3]; inits[4] = opt[4]
inits[5] = opt[5]; inits[6] = opt[6]; inits[7] = opt[7]; inits[8] = opt[8]
if(inits[1] <= 0 || inits[1] >= 1){ inits[1] = 0.01 }
if(inits[2] <= 0 || inits[2] >= 1){ inits[2] = 0.1 }
if(inits[3] <= 0 || inits[3] >= 1){ inits[3] = 0.1 }
if(inits[4] <= 0 || inits[4] >= 1){ inits[4] = 0.01 }
if(inits[5] <= 0){ inits[5] = 5 }
if(inits[6] <= 15 || inits[6] >= 110){ inits[6] = 25 }
if(inits[7] <= 0 || inits[7] >= 1){ inits[7] = 0.01 }
if(inits[8] <= 0){ inits[8] = 1.1 }
}else if(length(inits) != 8 || any(is.na(inits[1:8]))){
stop("inits vector with missing values (NA) or length not equal to 8.")
}else if(any(inits[c(1:5,7,8)] <= 0) || any(inits[c(1:4,7)] >= 1) || inits[6] <= 15 || inits[6] >= 110){
stop("Initial value(s) outside the range of possible values for the paramater(s).")
}
## K
k2 = ifelse(is.null(K), opt[9], K)
a = b = c = 0
inits[1:3] = 0
##############################################################################################
### auxs
mu = function(x, a, b, c, d, e, f, g, h, k){
### HP function for specific age and parameters
media = a^((x+b)^c) + d*exp(-e*(log(x)-log(f))^2) + (g*h^x)/(1 + k*g*h^x)
return(media)
}
### Log-likelihood (MCMC)
like.HPBayes= function(Ex, Dx, x, a,b,c,d,e,f,g,h,k){
qx = mu(x, a, b, c, d, e, f, g, h, k)
# qx = 1 - exp(-mx)
qx[qx < 1e-16] = 1e-16 # avoid numeric error w log(qx)
qx[qx > 1 - 1e-16] = 1 - 1e-16 # avoid numeric error w log(1 - qx)
logvero = sum(Dx*log(Ex*qx) - Ex*qx, na.rm = T);
return(logvero)
}
### Jacobian
jac_logit = function(x) - log(abs(x - x^2))
jac_log = function(x) - log(x)
jac_f = function(x){
- log(110 - x) - log(x - 15)
}
##############################################################################################
if(v[4] < m[4]*(1-m[4])){
alpha.d = ((1 - m[4])/v[4] - 1/m[4])*m[4]^2
beta.d = alpha.d*(1/m[4] - 1)
}
alpha.e = (m[5]^2)/v[5]
beta.e = m[5]/v[5]
media.f = m[6]; variancia.f = v[6]
if(v[7] < m[7]*(1-m[7])){
alpha.g = ((1 - m[7])/v[7] - 1/m[7])*m[7]^2
beta.g = alpha.g*(1/m[7] - 1)
}
alpha.h = (m[8]^2)/v[8]
beta.h = m[8]/v[8]
## SD for prop. distributions
sd = 1*prop.control
### proposed parameters for the block (d, e, f, g, h)
U = diag(5)
eps = 1e-10
##############################################################################################
## aux objects
param = c("A", "B", "C", "D", "E", "F", "G", "H", "K")
param_problemas = NULL; warn = F
theta.post = matrix(NA_real_, ncol = 9, nrow = M + 1)
### Acceptance rates
cont = rep(0, 9)
### Initial values
theta.post[1,] = c(inits, k2)
## progress bar
pb = progress::progress_bar$new(format = "Simulating [:bar] :percent in :elapsed",total = M, clear = FALSE, width = 60)
##############################################################################################
## Fits
system.time(for (k in 2:(M+1)) {
pb$tick()
##### 'd', 'e', 'f', 'g', 'h' estimated in one block (joint)
### Covariance matrix (Metropolis-Hastings)
if(k < 1000){
V = sd*U
}else if(k%%10 == 0) { ### updating every 10 iterations
X = theta.post[c((k-1000):(k-1)), 4:8]
X[,1] = log(X[,1]/(1 - X[,1])) ## D
X[,2] = log(X[,2]) ## E
X[,3] = (X[,3] - 15)/(110 - 15); X[,3] = log(X[,3]/(1 - X[,3])) ## F
X[,4] = log(X[,4]/(1 - X[,4])) ## G
X[,5] = log(X[,5]) ## H
V = sd*(eps*U + var(X))
}
aux = theta.post[k-1, 4:8]
aux[1] = log(aux[1]/(1 - aux[1])) ## D
aux[2] = log(aux[2]) ## E
aux[3] = (aux[3] - 15)/(110 - 15); aux[3] = log(aux[3]/(1 - aux[3])) ## F
aux[4] = log(aux[4]/(1 - aux[4])) ## G
aux[5] = log(aux[5]) ## H
prop = MASS::mvrnorm(1, mu = aux, Sigma = V)
d.prop = exp(prop[1]) / (1 + exp(prop[1]))
e.prop = exp(prop[2])
f.prop = 15 + (110 - 15)*(exp(prop[3])/ (1 + exp(prop[3])))
g.prop = exp(prop[4]) / (1 + exp(prop[4]))
h.prop = exp(prop[5])
lverok = like.HPBayes(Ex, Dx, x, theta.post[k-1,1], theta.post[k-1,2], theta.post[k-1,3], theta.post[k-1,4], theta.post[k-1,5], theta.post[k-1,6], theta.post[k-1,7], theta.post[k-1,8], theta.post[k-1,9])
lveroprop = like.HPBayes(Ex, Dx, x, a , b , c , d.prop , e.prop , f.prop , g.prop , h.prop , theta.post[k-1,9])
auxk = lverok + dbeta(theta.post[k-1,4], alpha.d, beta.d, log = T) + dgamma(theta.post[k-1,5], alpha.e, beta.e, log = T) + dnorm(theta.post[k-1,6], media.f, sqrt(variancia.f), log = T) + dbeta(theta.post[k-1,7], alpha.g, beta.g, log = T) + dgamma(theta.post[k-1,8], alpha.h, beta.h, log = T) + jac_logit(theta.post[k-1,4]) + jac_log(theta.post[k-1,5]) + jac_f(theta.post[k-1,6]) + jac_logit(theta.post[k-1,7]) + jac_log(theta.post[k-1,8])
auxprop = lveroprop + dbeta(d.prop , alpha.d, beta.d, log = T) + dgamma(e.prop , alpha.e, beta.e, log = T) + dnorm(f.prop , media.f, sqrt(variancia.f), log = T) + dbeta(g.prop , alpha.g, beta.g, log = T) + dgamma(h.prop , alpha.h, beta.h, log = T) + jac_logit(d.prop) + jac_log(e.prop) + jac_f(f.prop) + jac_logit(g.prop) + jac_log(h.prop)
ratio = auxprop - auxk; test = runif(1)
if(is.na(ratio) || is.nan(ratio)){ ratio = -Inf }
if (ratio > log(test)) {
theta.post[k,4] = d.prop
theta.post[k,5] = e.prop
theta.post[k,6] = f.prop
theta.post[k,7] = g.prop
theta.post[k,8] = h.prop
cont[4:8] = cont[4:8] + 1
} else {
theta.post[k,4:8] = theta.post[k-1,4:8]
}
theta.post[k, 1] = a
theta.post[k, 2] = b
theta.post[k, 3] = c
theta.post[k,9] = theta.post[k-1,9]
limite_inf = which(theta.post[k,4:8] <= c(1e-16, 1e-16, 15+1e-16, 1e-16, 1e-16))
limite_sup = which(theta.post[k,4:8] >= c(1-1e-5, Inf, 110-1e-5, 1-1e-5, Inf))
if(length(limite_inf) > 0){
theta.post[k, 3 + limite_inf] <- theta.post[k-1, 3 + limite_inf]
if(k > bn){
param_problemas = append(param_problemas, param[3 + limite_inf]); warn = T
}
}
if(length(limite_sup) > 0){
theta.post[k, 3 + limite_sup] <- theta.post[k-1, 3 + limite_sup]
if(k > bn){
param_problemas = append(param_problemas, param[3 + limite_sup]); warn = T
}
}
})
if(warn){ warning(paste0("MCMC may have had some issues with the parameter(s): ", paste(sort(unique(param_problemas)), collapse = ", "), ".\nCheck the 'plot_chain' function output to visualize the parameters chain. It might be helpful to assign informative prior distribution for these parameters. See ?hp.")) }
##############################################################################################
### Return
## Final samples
theta.post = theta.post[seq(bn+1+1, M+1, by = thin),]
return(list(theta.post = theta.post, sigma2 = NULL, cont = cont, inits = inits))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/hp_poisson_reduced.R
|
#' @name improvement
#' @rdname improvement
#'
#' @title BLC: Improvement
#'
#' @description Calculates the improvement of each age, based on the resulting chains of the beta parameter from a fitted blc model.
#'
#' @usage
#' improvement(obj, prob = 0.95)
#'
#' @param obj A `BLC` object, result of a call to blc() function.
#' @param prob A real number that represents the credibility level of the intervals.
#'
#' @return A data.frame with the improvement values of each age, as well as their credible intervals.
#'
#' @examples
#' ## Importing log-mortality data from Portugal:
#' data(PT)
#' Y <- PT
#'
#' ## Fitting the model
#' fit = blc(Y = Y, M = 100, bn = 20)
#'
#' ## Improvement:
#' improvement(fit)
#' improvement(fit, prob = 0.9) #90% credible intervals
#'
#'
#' @export
improvement <- function(obj, prob = 0.95) {
objClass <- class(obj)
supportedClasses <- c("BLC", "ARBLC")
if (!any(objClass %in% supportedClasses)) {
stop("Invalid object type")
}
lower <- (1-prob)/2
upper <- (1+prob)/2
improvement <- apply(obj$beta[ ,(obj$bn+1):obj$M], 1, function (x) mean(1 - exp(-x)))
improvement.int <- apply(obj$beta[ ,(obj$bn+1):obj$M], 1, function (x) quantile(1 - exp(-x),
probs = c(lower, upper)))
data.frame(improvement = improvement, lower.lim = improvement.int[1,], upper.lim = improvement.int[2,])
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/improvement.R
|
# @title Kalman Filter with Drift
#
# @description Computes mean and variance of the distribution of the state, conditional
# on the covariances of observation and system errors, evolution matrices, drifts
# and the observations up to that point.
#
# @param y Observed data as an m-by-N matrix where m is the dimension
# @param m0 Initial mean as a p-vector
# @param C0 Initial covariance p-by-p matrix
# @param V Covariance m-by-p matrix V referring to observation error
# @param W Covariance p-by-p matrix W referring to system error
# @param Ft Constant observation matrix as an m-by-p matrix
# @param Gt Constant evolution matrix as a p-by-p matrix
# @param a1 Drift of the observational equation
# @param a2 Drift of the system equation
#
# @return A list containing the means `m` and the covariances `C`
#
# @note
# Reference: Petris et al, 2009, p.53
#
kd.filter <- function(y, m0, C0, V, W, Ft, Gt, a1, a2) {
N <- ncol(y) # Number of observations
p <- length(m0) # State dimension
ret <- vector("list", 2) # List to be returned
ret$m <- matrix(nrow = p, ncol = N) # Array of means m_t
ret$C <- array(dim = c(p, p, N)) # Array of covariances C_t
# First-step computation
# at <- a2 + Gt %*% m0
# Rt <- W + Gt %*% C0 %*% t(Gt)
# Qt <- Ft %*% Rt %*% t(Ft) + V
# ybart <- y[ ,1] - a1
# Bt <- Rt %*% t(Ft)
# ret$C[ , ,1] <- Rt - Bt %*% solve(Qt) %*% t(Bt)
# ret$m[ ,1] <- ret$C[ , ,1] %*% (t(Ft) %*% solve(V) %*% ybart + solve(Rt) %*% at)
at <- a2 + Gt %*% m0
Rt <- W + Gt %*% C0 %*% t(Gt)
ft <- Ft %*% at
Qt <- Ft %*% Rt %*% t(Ft) + V
et <- y[ ,1] - a1 - ft
At <- Rt %*% t(Ft) %*% chol2inv(chol(Qt))
ret$m[ ,1] <- at + At %*% et
ret$C[ , ,1] <- Rt - At %*% Ft %*% Rt
# Updating
# FT.V.inv <- t(Ft) %*% solve(V)
# ybart <- y - a1
for (i in 2:N) {
# at <- a2 + Gt %*% ret$m[ ,i-1]
# Rt <- W + Gt %*% ret$C[ , ,i-1] %*% t(Gt)
# Qt <- Ft %*% Rt %*% t(Ft) + V
# Bt <- Rt %*% t(Ft)
# ret$C[ , ,i] <- Rt - Bt %*% solve(Qt) %*% t(Bt)
# ret$m[ ,i] <- ret$C[ , ,i] %*% (FT.V.inv %*% ybart[ ,i] + solve(Rt) %*% at)
at <- a2 + Gt %*% ret$m[ ,i-1]
Rt <- W + Gt %*% ret$C[ , ,i-1] %*% t(Gt)
ft <- Ft %*% at
Qt <- Ft %*% Rt %*% t(Ft) + V
et <- y[ ,i] - a1 - ft
At <- Rt %*% t(Ft) %*% chol2inv(chol(Qt))
ret$m[ ,i] <- at + At %*% et
ret$C[ , ,i] <- Rt - At %*% Ft %*% Rt
}
ret
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/kd_filter.R
|
# @title Kalman Smoother with Drift
#
# @description Computes mean and variance of the distribution of the state, conditional
# on the covariances of observation and system errors, equation matrices
# and all of the observations.
#
# @param y Observed data as an m-by-N matrix where m is the dimension
# @param f Kalman filtering results as returned by `k.filter` on the data
# @param W Covariance p-by-p matrix W referring to system error
# @param Gt Evolution matrix as a p-by-p matrix
# @param a2 Drift of the system equation
#
# @return A list containing the means `s` and the covariances `S`
#
# @note
# Reference: Petris et al, 2009, p.61
#
kd.smoother <- function(y, f, W, Gt, a2) {
N <- ncol(y) # Number of observations
p <- nrow(f$m) # State dimension
ret <- vector("list", 2) # List to be returned
ret$s <- matrix(nrow = p, ncol = N) # Array of means s_t
ret$S <- array(dim = c(p, p, N)) # Array of covariances S_t
ret$s[ ,N] <- f$m[ ,N]
ret$S[ , ,N] <- f$C[ , ,N]
W.inv <- chol2inv(chol(W))
GT.W.inv.G <- t(Gt) %*% W.inv %*% Gt
# G.W.inv <- Gt %*% W.inv
GT.W.inv <- t(Gt) %*% W.inv
for (i in (N-1):1) {
C.inv <- chol2inv(chol(f$C[ , ,i]))
Bt <- chol2inv(chol(GT.W.inv.G + C.inv))
ret$s[ ,i] <- Bt %*% (GT.W.inv %*% (ret$s[ ,i+1] - a2) + C.inv %*% f$m[ ,i])
A <- Bt %*% GT.W.inv
ret$S[ , ,i] <- Bt + A %*% ret$S[ , ,i+1] %*% t(A)
}
ret
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/kd_smoother.R
|
#' @name mean.BLC
#' @rdname mean.BLC
#'
#' @title BLC: Arithmetic mean
#'
#' @description Calculates the means based on the resulting chains from a fitted BLC model.
#'
#'
#' @param x A `BLC` object, result of a call to blc() function.
#' @param name A character with a parameter name of the BLC model that should be returned. It can be one of these: "alpha", "beta", "kappa", "phiv", "theta", "phiw".
#' @param ... Further arguments passed to or from other methods.
#'
#' @return A vector with the mean values of the selected parameter.
#'
#' @examples
#' data(PT)
#' Y <- PT
#'
#' ## Fitting the model
#' fit = blc(Y = Y, M = 100, bn = 20)
#'
#' mean(fit, "kappa")
#'
#' @seealso [mean.PredBLC()] for `PredBLC` object method.
#'
#'
#' @export
mean.BLC <- function(x, name, ...) {
obj = x
matrixNames <- c("alpha", "beta", "kappa", "phiv")
vectorNames <- c("theta", "phiw")
if (name %in% matrixNames) {
apply(obj[[name]][ ,(obj$bn+1):obj$M], 1, mean)
} else if (name %in% vectorNames) {
mean(obj[[name]][(obj$bn+1):obj$M])
} else {
stop("Invalid `name` argument")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/mean_blc.R
|
#' @name mean.PredBLC
#' @rdname mean.PredBLC
#'
#' @title BLC: Arithmetic mean for predictions
#'
#' @description Calculates the means based on the resulting chains from a predicted year.
#'
#'
#' @param x A `PredBLC` object, result to the pred() function call on a `BLC` object.
#' @param h A positive integer specifying the year in the prediction horizon to be calculated.
#' @param ... Further arguments passed to or from other methods.
#'
#' @return A vector with the mean values of the log-mortality chains.
#'
#' @examples
#' data(PT)
#' Y <- PT
#'
#' ## Fitting the model
#' fit = blc(Y = Y, M = 100, bn = 20)
#'
#' ## Prediction for 2 years ahead
#' pred = predict(fit, h = 2)
#'
#' mean(pred, 1)
#' mean(pred, 2)
#'
#' @seealso [mean.BLC()] for `BLC` object method.
#'
#' @export
mean.PredBLC <- function(x, h, ...) {
obj = x
apply(obj$y[ ,h, ], 2, mean)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/mean_blc_pred.R
|
#' @name plot.BLC
#' @rdname plot.BLC
#'
#' @title BLC: Plot the fitted values
#'
#' @description This function plots the fitted log mortality values as well as the parameters values and credible intervals of the BLC fitted models.
#'
#'
#' @param x A `BLC` object, result of a call to blc() function.
#' @param parameter A character determines the parameter that will be plotted. Default is "all" which means that all three parameters "alpha", "beta" and "kappa" will be plotted. It can also be "alpha", "beta", "kappa" or "fitted". The last one provides a plot with all the fitted tables.
#' @param prob A numeric value that indicates the probability for the credible interval. Default is '0.9'.
#' @param age A numeric vector that represents the ages used in the fitted BLC model. Default is 'NULL'.
#' @param ... Other arguments.
#'
#' @return A plot with the fitted log mortality or fitted values and credible intervals of the parameters.
#'
#' @examples
#' ## Importing log-mortality data from Portugal:
#' data(PT)
#' Y <- PT
#'
#' ## Fitting the model
#' fit = blc(Y = Y, M = 100, bn = 20)
#'
#' ## Parameters' plot
#' plot(fit, parameter = "all")
#' \donttest{plot(fit, parameter = "beta", prob = 0.95)
#' plot(fit, parameter = "alpha", age = 18:80)
#' plot(fit, parameter = "kappa")
#'
#' ## Fitted mortality graduation
#' plot(fit, parameter = "fitted", age = 18:80)
#' }
#'
#' @seealso [plot.HP()] and [plot.DLM()] for `HP` or `DLM` methods.
#'
#' @import ggplot2
#' @import scales
#'
#' @export
plot.BLC <- function(x, parameter = "all", prob = 0.9,
age = NULL, ...) {
obj = x
sig = 1 - prob
if(!(is.null(age))){
if(!(is.integer(age))){
stop("Object age should be an integer vector")
}
}else{
age = 1:length(obj$beta[,1])
}
# Drop warmup from chains
chain.idx <- obj$bn:obj$M
alpha <- obj$alpha[ ,chain.idx]
beta <- obj$beta[ ,chain.idx]
kappa <- obj$kappa[ ,chain.idx]
alpha.est <- apply(alpha, 1, mean)
alpha.inf <- apply(alpha, 1, quantile, sig/2)
alpha.sup <- apply(alpha, 1, quantile, 1 - sig/2)
alpha.lim <- range(c(alpha.inf, alpha.sup))
beta.est <- apply(beta, 1, mean)
beta.inf <- apply(beta, 1, quantile, sig/2)
beta.sup <- apply(beta, 1, quantile, 1 - sig/2)
beta.lim <- range(c(beta.inf, beta.sup))
kappa.est <- apply(kappa, 1, mean)
kappa.inf <- apply(kappa, 1, quantile, sig/2)
kappa.sup <- apply(kappa, 1, quantile, 1 - sig/2)
kappa.lim <- range(c(kappa.inf, kappa.sup))
N <- length(kappa.est)
# Plot types
if(parameter == "all"){
df.beta = data.frame(x = age, fitted = beta.est, lim.inf = beta.inf, lim.sup = beta.sup, param = "beta")
df.alpha = data.frame(x = age, fitted = alpha.est, lim.inf = alpha.inf, lim.sup = alpha.sup, param = "alpha")
df.kappa = data.frame(x = 1:N, fitted = kappa.est, lim.inf = kappa.inf, lim.sup = kappa.sup, param = "kappa")
df = rbind(df.beta, df.alpha, df.kappa)
df$param = factor(df$param, labels = c("alpha[x]", "beta[x]", "kappa[t]"))
ggplot(data = df) +
geom_ribbon(aes(x = x, ymin = lim.inf, ymax = lim.sup), alpha = 0.5, fill = "blue") +
geom_line(aes(x = x, y = fitted), col = "blue") +
xlab("") + ylab("") + theme_bw() +
theme(axis.title.x = ggplot2::element_text(color = 'black', size = 13),
axis.title.y = ggplot2::element_text(color = 'black', size = 13)) +
facet_wrap(~param, scales = "free", nrow = 2,
labeller = label_parsed) +
geom_hline(data = data.frame(yint = 0, param = "beta[x]"), aes(yintercept = yint), col = "red", lty = 2)
}else if(parameter == "beta"){
ggplot(data = data.frame(x = age, fitted = beta.est, lim.inf = beta.inf, lim.sup = beta.sup)) +
geom_hline(yintercept = 0, col = "red", lty = 2) +
geom_ribbon(aes(x = x, ymin = lim.inf, ymax = lim.sup), alpha = 0.5, fill = "blue") +
geom_line(aes(x = x, y = fitted), col = "blue") +
xlab("x") + ylab(expression(beta[x])) + theme_bw() +
theme(axis.title.x = ggplot2::element_text(color = 'black', size = 13),
axis.title.y = ggplot2::element_text(color = 'black', size = 13))
}else if(parameter == "alpha"){
ggplot(data = data.frame(x = age, fitted = alpha.est, lim.inf = alpha.inf, lim.sup = alpha.sup)) +
geom_ribbon(aes(x = x, ymin = lim.inf, ymax = lim.sup), alpha = 0.5, fill = "blue") +
geom_line(aes(x = x, y = fitted), col = "blue") +
xlab("x") + ylab(expression(alpha[x])) + theme_bw() +
theme(axis.title.x = ggplot2::element_text(color = 'black', size = 13),
axis.title.y = ggplot2::element_text(color = 'black', size = 13))
}else if(parameter == "kappa"){
N = length(kappa.est)
ggplot(data = data.frame(x = 1:N, fitted = kappa.est, lim.inf = kappa.inf, lim.sup = kappa.sup)) +
geom_ribbon(aes(x = x, ymin = lim.inf, ymax = lim.sup), alpha = 0.5, fill = "blue") +
geom_line(aes(x = x, y = fitted), col = "blue") +
xlab("t") + ylab(expression(kappa[t])) + theme_bw() +
theme(axis.title.x = ggplot2::element_text(color = 'black', size = 13),
axis.title.y = ggplot2::element_text(color = 'black', size = 13))
}else if(parameter == "fitted"){
N = length(age); t = length(kappa.est)
tables = matrix(NA_real_, nrow = N, ncol = t)
aux.table = matrix(NA_real_, nrow = N, ncol = ncol(alpha))
for(t in 1:t){
for(i in 1:ncol(alpha)){ aux.table[,i] = alpha[,i] + beta[,i]*kappa[t,i] }
tables[,t] = rowMeans(aux.table)
}
df.tables = data.frame(tables, Age = age) %>% gather(key = "Year", value = "log.qx", -Age)
ggplot(df.tables) +
scale_y_continuous(trans = "log10", breaks = 10^-seq(0,5), limits = 10^-c(5,0), labels = scales::comma) +
scale_x_continuous(breaks = seq(0, 100, by = 10), limits = c(NA,NA)) +
geom_line(aes(x = Age, y = exp(log.qx), col = Year), show.legend = FALSE) +
xlab("x") + ylab("qx") + theme_bw() +
theme(axis.title.x = ggplot2::element_text(color = 'black', size = 13),
axis.title.y = ggplot2::element_text(color = 'black', size = 13))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/plot_blc.R
|
#' @name plot.PredBLC
#' @rdname plot.PredBLC
#'
#' @title BLC: Plot the log-mortality of a prediction
#'
#' @description This functions plot the predicted log-mortality and the predict
#' intervals of the log-mortality for a specific year in the prediction horizon
#'
#'
#' @param x A `PredBLC` object, result to the pred() function call on a `BLC` object.
#' @param h A numeric vector specifying the year(s) in the prediction horizon to be calculated.
#' @param prob A real number that represents the probability of the predict interval.
#' @param plotIC Logical. If 'TRUE' (default), shows the predictive intervals.
#' @param age A numeric vector indicating the modelled ages. (Optional).
#' @param ... Other arguments.
#'
#' @return A 'ggplot' object with the predicted mortality rates and their predict intervals.
#'
#' @examples
#' ## Importing log-mortality data from Portugal:
#' data(PT)
#' Y <- PT
#'
#' ## Fitting the model
#' fit = blc(Y = Y, M = 100, bn = 20)
#'
#' #' ## Prediction for 10 years ahead
#' pred = predict(fit, h = 3)
#'
#' ## Plotting
#' plot(pred, h = 1)
#' \donttest{plot(pred, h = 3, prob = 0.9)}
#'
#' @seealso [plot.HP()], [plot.DLM()] and [plot.BLC] for `HP`, `DLM` or `BLC` methods.
#'
#' @import ggplot2
#' @import scales
#'
#' @export
plot.PredBLC <- function(x, h = NULL, prob = 0.95, plotIC = TRUE, age = NULL,
...) {
obj = x
alpha <- 1 - prob
if(is.null(h)) { h <- dim(obj$y)[2] }
if(any(h > dim(obj$y)[2])) { stop("h has invalid values.") }
h.size <- length(h)
res <- array(dim = c(h.size, 3, dim(obj$y)[3]))
for(ind in 1:h.size){
for (i in 1:dim(obj$y)[3]) {
tmp <- obj$y[ ,h[ind], i]
res[ind, 1, i] <- mean(tmp)
res[ind, 2, i] <- quantile(tmp, 1 - alpha/2)
res[ind, 3, i] <- quantile(tmp, alpha/2)
}
}
res <- exp(res)
n <- length(res[1,1,])
if(is.null(age)) {age = 1:n}
if(h.size == 1){
if(plotIC){
ggplot2::ggplot(data = NULL) +
ggplot2::scale_y_continuous(trans = 'log10', breaks = 10^-seq(20,0),
limits = 10^-c(NA_real_,0), labels = scales::comma) +
ggplot2::scale_x_continuous(breaks = seq(0, 200, by = 10), limits = c(NA_real_, NA_real_)) +
ggplot2::xlab("x (index)") + ggplot2::ylab("qx") + ggplot2::theme_bw() +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = 1.2),
axis.title.x = ggplot2::element_text(color = 'black', size = 12),
axis.title.y = ggplot2::element_text(color = 'black', size = 12),
axis.text = ggplot2::element_text(color = 'black', size = 12),
legend.text = ggplot2::element_text(size = 12),
legend.position = "bottom") +
ggplot2::geom_ribbon(ggplot2::aes(x = age, ymin = res[1,3,], ymax = res[1,2,]), fill = "steelblue4", alpha = 0.3) +
ggplot2::geom_line(ggplot2::aes(x = age, y = res[1,1,]), col = "steelblue", linewidth = 0.8, alpha = 0.8)
}else{
ggplot2::ggplot(data = NULL) +
ggplot2::scale_y_continuous(trans = 'log10', breaks = 10^-seq(20,0),
limits = 10^-c(NA_real_,0), labels = scales::comma) +
ggplot2::scale_x_continuous(breaks = seq(0, 200, by = 10), limits = c(NA_real_, NA_real_)) +
ggplot2::xlab("x (index)") + ggplot2::ylab("qx") + ggplot2::theme_bw() +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = 1.2),
axis.title.x = ggplot2::element_text(color = 'black', size = 12),
axis.title.y = ggplot2::element_text(color = 'black', size = 12),
axis.text = ggplot2::element_text(color = 'black', size = 12),
legend.text = ggplot2::element_text(size = 12),
legend.position = "bottom") +
ggplot2::geom_line(ggplot2::aes(x = age, y = res[1,1,]), col = "steelblue", linewidth = 0.8, alpha = 0.8)
}
}else{
aux_res <- cbind(t(res[1,,]), h[1])
for(ind in 2:h.size){ aux_res <- rbind(aux_res, cbind(t(res[ind,,]), h[ind])) }
df_res <- data.frame(aux_res)
index = rep(age, h.size)
if(plotIC){
ggplot2::ggplot(data = df_res) +
ggplot2::scale_y_continuous(trans = 'log10', breaks = 10^-seq(20,0),
limits = 10^-c(NA_real_,0), labels = scales::comma) +
ggplot2::scale_x_continuous(breaks = seq(0, 200, by = 10), limits = c(NA_real_, NA_real_)) +
ggplot2::xlab("x (index)") + ggplot2::ylab("qx") + ggplot2::theme_bw() +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = 1.2),
axis.title.x = ggplot2::element_text(color = 'black', size = 12),
axis.title.y = ggplot2::element_text(color = 'black', size = 12),
axis.text = ggplot2::element_text(color = 'black', size = 12),
legend.text = ggplot2::element_text(size = 12),
legend.position = "bottom") + ggplot2::labs(color = NULL, fill = NULL) +
ggplot2::geom_ribbon(ggplot2::aes(x = index, ymin = X3, ymax = X2, fill = paste0("h = ", as.factor(X4))), alpha = 0.3) +
ggplot2::geom_line(ggplot2::aes(x = index, y = X1, col = paste0("h = ", as.factor(X4))), linewidth = 0.8, alpha = 0.8)
}else{
ggplot2::ggplot(data = df_res) +
ggplot2::scale_y_continuous(trans = 'log10', breaks = 10^-seq(20,0),
limits = 10^-c(NA_real_,0), labels = scales::comma) +
ggplot2::scale_x_continuous(breaks = seq(0, 200, by = 10), limits = c(NA_real_, NA_real_)) +
ggplot2::xlab("x (index)") + ggplot2::ylab("qx") + ggplot2::theme_bw() +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = 1.2),
axis.title.x = ggplot2::element_text(color = 'black', size = 12),
axis.title.y = ggplot2::element_text(color = 'black', size = 12),
axis.text = ggplot2::element_text(color = 'black', size = 12),
legend.text = ggplot2::element_text(size = 12),
legend.position = "bottom") + ggplot2::labs(color = NULL, fill = NULL) +
ggplot2::geom_line(ggplot2::aes(x = index, y = X1, col = paste0("h = ", as.factor(X4))), linewidth = 0.8, alpha = 0.8)
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/plot_blc_pred.R
|
#' @title Chain's plot
#'
#' @description
#' This function provides three options of plots for the chain generated by the MCMC
#' algorithm in hp() and dlm() functions.
#'
#' @usage
#' plot_chain(fit, param, type = c("trace", "acf", "density"))
#'
#' @param fit Object of the classes `HP` or `DLM`.
#' @param param Character vector specifying the parameters to be plotted. It is used only when the class of fit object is `DLM`.
#' @param type Character string specifying the type of plot to be returned. There are three options: "trace" return a plot for the sample of the parameters; "acf" return a plot for the autocorrelation of the parameters; "density" return a plot for the posterior density of the parameters based on the samples generated by the MCMC method.
#'
#' @return A plot of the chosen type of the selected parameter(s).
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' \donttest{data(USA)
#'
#' ## Selecting the log mortality rate of the 2010 total population ranging from 0 to 90 years old
#' USA2010 = USA[USA$Year == 2010,]
#' x = 0:90
#' Ex = USA2010$Ex.Total[x+1]
#' Dx = USA2010$Dx.Total[x+1]
#' y = log(Dx/Ex)
#'
#' ## Fitting HP model
#' fit = hp(x = x, Ex = Ex, Dx = Dx, model = "lognormal",
#' m = c(NA, 0.08, rep(NA, 6)),
#' v = c(NA, 1e-4, rep(NA, 6)))
#'
#' ## Plotting all the available types of plot:
#' plot_chain(fit, type = "trace")
#' plot_chain(fit, type = "acf")
#' plot_chain(fit, type = "density")
#'
#'
#' ## Fitting DLM
#' fit = dlm(y, M = 100)
#'
#' plot_chain(fit, param = "sigma2", type = "trace")
#' plot_chain(fit, param = "mu[10]", type = "acf")
#'
#' ## Selecting all theta1 indexed with 1 in first digit
#' plot_chain(fit, param = "theta1[1", type = "density")
#'
#' ## Plotting all parameters indexed by age 10 and age 11
#' plot_chain(fit, param = c("[10]", "[11]"))
#' }
#'
#' @import ggplot2
#' @importFrom dplyr select
#' @importFrom dplyr starts_with
#' @importFrom tidyr gather
#'
#' @export
plot_chain <- function(fit, param, type = c("trace", "acf", "density")){
if(inherits(fit,"HP")){
type = match.arg(type)
if(type == "trace"){
if(fit$info$model %in% c("binomial", "poisson")){
df = data.frame(
samples = c(fit$post.samples$mcmc_theta[,1], fit$post.samples$mcmc_theta[,2],
fit$post.samples$mcmc_theta[,3], fit$post.samples$mcmc_theta[,4],
fit$post.samples$mcmc_theta[,5], fit$post.samples$mcmc_theta[,6],
fit$post.samples$mcmc_theta[,7], fit$post.samples$mcmc_theta[,8]),
param = rep(LETTERS[1:8], each = nrow(fit$post.samples$mcmc_theta)),
iteration = rep(1:nrow(fit$post.samples$mcmc_theta), times = 8)
)
}else{
df = data.frame(
samples = c(fit$post.samples$mcmc_theta[,1], fit$post.samples$mcmc_theta[,2],
fit$post.samples$mcmc_theta[,3], fit$post.samples$mcmc_theta[,4],
fit$post.samples$mcmc_theta[,5], fit$post.samples$mcmc_theta[,6],
fit$post.samples$mcmc_theta[,7], fit$post.samples$mcmc_theta[,8],
fit$post.samples$sigma2),
param = rep(c(LETTERS[1:8], "sigma2"), each = nrow(fit$post.samples$mcmc_theta)),
iteration = rep(1:nrow(fit$post.samples$mcmc_theta), times = 9)
)
}
ggplot2::ggplot(df) +
ggplot2::xlab("") + ggplot2::ylab("") + ggplot2::theme_bw() +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = 1.2),
axis.title.x = ggplot2::element_text(color = 'black', size = 13),
axis.title.y = ggplot2::element_text(color = 'black', size = 13),
axis.text = ggplot2::element_text(color = 'black', size = 13),
legend.text = ggplot2::element_text(size = 13),
strip.background = ggplot2::element_rect(fill = "deepskyblue4"),
strip.text = ggplot2::element_text(color = "white", size = 12)) +
ggplot2::geom_line(ggplot2::aes(x = iteration, y = samples), col = "deepskyblue4") +
ggplot2::facet_wrap(~param, scales = "free")
}else if(type == "acf"){
if(fit$info$model %in% c("binomial", "poisson")){
df = data.frame(
autocor = c(
acf(fit$post.samples$mcmc_theta[,1], lag.max = 50, plot = F)$acf,
acf(fit$post.samples$mcmc_theta[,2], lag.max = 50, plot = F)$acf,
acf(fit$post.samples$mcmc_theta[,3], lag.max = 50, plot = F)$acf,
acf(fit$post.samples$mcmc_theta[,4], lag.max = 50, plot = F)$acf,
acf(fit$post.samples$mcmc_theta[,5], lag.max = 50, plot = F)$acf,
acf(fit$post.samples$mcmc_theta[,6], lag.max = 50, plot = F)$acf,
acf(fit$post.samples$mcmc_theta[,7], lag.max = 50, plot = F)$acf,
acf(fit$post.samples$mcmc_theta[,8], lag.max = 50, plot = F)$acf),
param = rep(LETTERS[1:8], each = 51),
lag = rep(0:50, times = 8)
)
}else{
df = data.frame(
autocor = c(
acf(fit$post.samples$mcmc_theta[,1], lag.max = 50, plot = F)$acf,
acf(fit$post.samples$mcmc_theta[,2], lag.max = 50, plot = F)$acf,
acf(fit$post.samples$mcmc_theta[,3], lag.max = 50, plot = F)$acf,
acf(fit$post.samples$mcmc_theta[,4], lag.max = 50, plot = F)$acf,
acf(fit$post.samples$mcmc_theta[,5], lag.max = 50, plot = F)$acf,
acf(fit$post.samples$mcmc_theta[,6], lag.max = 50, plot = F)$acf,
acf(fit$post.samples$mcmc_theta[,7], lag.max = 50, plot = F)$acf,
acf(fit$post.samples$mcmc_theta[,8], lag.max = 50, plot = F)$acf,
acf(fit$post.samples$sigma2, lag.max = 50, plot = F)$acf),
param = rep(c(LETTERS[1:8], "sigma2"), each = 51),
lag = rep(0:50, times = 9)
)
}
ggplot2::ggplot(df) +
ggplot2::scale_y_continuous(breaks = seq(0, 1, by = 0.2)) +
ggplot2::scale_x_continuous(breaks = seq(0, 50, by = 10), limits = c(-0.5, 50.5)) +
ggplot2::xlab("Lag") + ggplot2::ylab("Autocorrelation") + ggplot2::theme_bw() +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = 1.2),
axis.title.x = ggplot2::element_text(color = 'black', size = 13),
axis.title.y = ggplot2::element_text(color = 'black', size = 13),
axis.text = ggplot2::element_text(color = 'black', size = 13),
legend.text = ggplot2::element_text(size = 13),
strip.background = ggplot2::element_rect(fill = "deepskyblue4"),
strip.text = ggplot2::element_text(color = "white", size = 12)) +
ggplot2::geom_bar(ggplot2::aes(x = lag, y = autocor), stat = "identity", col = "black", fill = "orangered4") +
ggplot2::facet_wrap(~param)
}else if(type == "density"){
if(fit$info$model %in% c("binomial", "poisson")){
df = data.frame(
samples = c(fit$post.samples$mcmc_theta[,1], fit$post.samples$mcmc_theta[,2],
fit$post.samples$mcmc_theta[,3], fit$post.samples$mcmc_theta[,4],
fit$post.samples$mcmc_theta[,5], fit$post.samples$mcmc_theta[,6],
fit$post.samples$mcmc_theta[,7], fit$post.samples$mcmc_theta[,8]),
param = rep(LETTERS[1:8], each = nrow(fit$post.samples$mcmc_theta)),
iteration = rep(1:nrow(fit$post.samples$mcmc_theta), times = 8)
)
}else{
df = data.frame(
samples = c(fit$post.samples$mcmc_theta[,1], fit$post.samples$mcmc_theta[,2],
fit$post.samples$mcmc_theta[,3], fit$post.samples$mcmc_theta[,4],
fit$post.samples$mcmc_theta[,5], fit$post.samples$mcmc_theta[,6],
fit$post.samples$mcmc_theta[,7], fit$post.samples$mcmc_theta[,8],
fit$post.samples$sigma2),
param = rep(c(LETTERS[1:8], "sigma2"), each = nrow(fit$post.samples$mcmc_theta)),
iteration = rep(1:nrow(fit$post.samples$mcmc_theta), times = 9)
)
}
ggplot2::ggplot(df) +
ggplot2::xlab("") + ggplot2::ylab("") + ggplot2::theme_bw() +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = 1.2),
axis.title.x = ggplot2::element_text(color = 'black', size = 13),
axis.title.y = ggplot2::element_text(color = 'black', size = 13),
axis.text = ggplot2::element_text(color = 'black', size = 13),
legend.text = ggplot2::element_text(size = 13),
strip.background = ggplot2::element_rect(fill = "deepskyblue4"),
strip.text = ggplot2::element_text(color = "white", size = 12)) +
ggplot2::geom_density(ggplot2::aes(x = samples), col = "black", fill = "deepskyblue4", alpha = .7) +
ggplot2::facet_wrap(~param, scales = "free")
}else{
stop("Invalid type.")
}
}else if(inherits(fit,"DLM")){
type = match.arg(type)
ages = fit$info$ages
m = length(fit$info$Ft)
theta_name = paste0("theta", rep(1:m, each = length(fit$info$y)), "[", fit$info$ages, "]")
col_names = c(paste0("mu[", fit$info$ages, "]"), theta_name, "sigma2")
if(type == "trace"){
if(m>1){
aux = fit$theta[,,1]
for(i in 2:m) aux = cbind(aux, fit$theta[,,i])
}else{
aux = fit$theta
}
chains = cbind(fit$mu, aux, fit$sig2)
chains = as.data.frame(chains)
colnames(chains) <- col_names
## Selecting the parameters to plot
chains = dplyr::select(chains, dplyr::contains(param))
p = ncol(chains)
## Checking if param is valid
if(p == 0) { stop("param argument is not valid.") }
chains$iteration = rep(1:nrow(chains))
chains = tidyr::gather(chains, key = "param", value = "samples", -iteration)
chains$param = factor(chains$param, levels = unique(chains$param))
ggplot2::ggplot(chains) +
ggplot2::xlab("") + ggplot2::ylab("") + ggplot2::theme_bw() +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = 1.2),
axis.title.x = ggplot2::element_text(color = 'black', size = 10),
axis.title.y = ggplot2::element_text(color = 'black', size = 10),
axis.text = ggplot2::element_text(color = 'black', size = 10),
legend.text = ggplot2::element_text(size = 10),
strip.background = ggplot2::element_rect(fill = "deepskyblue4"),
strip.text = ggplot2::element_text(color = "white", size = 12)) +
ggplot2::geom_line(ggplot2::aes(x = iteration, y = samples), col = "deepskyblue4") +
ggplot2::facet_wrap(~param, scales = "free")
}else if(type == "acf"){
if(m>1){
aux = fit$theta[,,1]
for(i in 2:m) aux = cbind(aux, fit$theta[,,i])
}else{
aux = fit$theta
}
chains = cbind(fit$mu, aux, fit$sig2)
chains = as.data.frame(chains)
colnames(chains) <- col_names
## Selecting the parameters to plot
chains = dplyr::select(chains, dplyr::contains(param))
p = ncol(chains)
## Checking if param is valid
if(p == 0) { stop("param argument is not valid.") }
aux = matrix(NA_real_, nrow = 51, ncol = p); aux = as.data.frame(aux); colnames(aux) = colnames(chains)
for(i in 1:p){ aux[,i] <- acf(chains[,i], lag.max = 50, plot = F)$acf }
aux$lag = lag = rep(0:50)
aux = tidyr::gather(aux, key = "param", value = "autocor", -lag)
aux$param = factor(aux$param, levels = unique(aux$param))
ggplot2::ggplot(aux) +
ggplot2::scale_y_continuous(breaks = seq(0, 1, by = 0.2)) +
ggplot2::scale_x_continuous(breaks = seq(0, 50, by = 10), limits = c(-0.5, 50.5)) +
ggplot2::xlab("Lag") + ggplot2::ylab("Autocorrelation") + ggplot2::theme_bw() +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = 1.2),
axis.title.x = ggplot2::element_text(color = 'black', size = 10),
axis.title.y = ggplot2::element_text(color = 'black', size = 10),
axis.text = ggplot2::element_text(color = 'black', size = 10),
legend.text = ggplot2::element_text(size = 10),
strip.background = ggplot2::element_rect(fill = "deepskyblue4"),
strip.text = ggplot2::element_text(color = "white", size = 12)) +
ggplot2::geom_bar(ggplot2::aes(x = lag, y = autocor), stat = "identity", col = "black", fill = "orangered4") +
ggplot2::facet_wrap(~param)
}else if(type == "density"){
if(m>1){
aux = fit$theta[,,1]
for(i in 2:m) aux = cbind(aux, fit$theta[,,i])
}else{
aux = fit$theta
}
chains = cbind(fit$mu, aux, fit$sig2)
chains = as.data.frame(chains)
colnames(chains) <- col_names
## Selecting the parameters to plot
chains = dplyr::select(chains, dplyr::contains(param))
p = ncol(chains)
## Checking if param is valid
if(p == 0) { stop("param argument is not valid.") }
chains = tidyr::gather(chains, key = "param", value = "samples")
chains$param = factor(chains$param, levels = unique(chains$param))
ggplot2::ggplot(chains) +
ggplot2::xlab("") + ggplot2::ylab("") + ggplot2::theme_bw() +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = 1.2),
axis.title.x = ggplot2::element_text(color = 'black', size = 10),
axis.title.y = ggplot2::element_text(color = 'black', size = 10),
axis.text = ggplot2::element_text(color = 'black', size = 10),
legend.text = ggplot2::element_text(size = 10),
strip.background = ggplot2::element_rect(fill = "deepskyblue4"),
strip.text = ggplot2::element_text(color = "white", size = 12)) +
ggplot2::geom_density(ggplot2::aes(x = samples), col = "black", fill = "deepskyblue4", alpha = .7) +
ggplot2::facet_wrap(~param, scales = "free")
}else{
stop("Invalid type.")
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/plot_chain.R
|
#' @name plot.DLM
#' @rdname plot.DLM
#'
#' @title DLM: Plot the life table
#'
#' @description Function that returns a log-scale ggplot of the `DLM` and `ClosedDLM` objects returned by dlm() and dlm_close() functions.
#'
#'
#' @param x Object of the class `DLM` or `ClosedDLM` returned by the dlm() or dlm_close() functions.
#' @param age Vector with the ages to plot the life table.
#' @param plotIC Logical. If 'TRUE' (default), shows the predictive intervals.
#' @param plotData Logical. If 'TRUE' (default), shows crude rate (black dots).
#' @param labels Vector with the name of the curve label. (Optional).
#' @param colors Vector with the color of the curve. (Optional).
#' @param linetype Vector with the line type of the curve. (Optional).
#' @param prob Coverage probability of the predictive intervals. Default is '0.95'.
#' @param ... Other arguments.
#'
#' @return A 'ggplot' object with fitted life table.
#'
#' @examples
#' ## Selecting the log mortality rate of the 1990 male population ranging from 0 to 100 years old
#' USA1990 = USA[USA$Year == 1990,]
#' x = 0:100
#' Ex = USA1990$Ex.Male[x+1]
#' Dx = USA1990$Dx.Male[x+1]
#' y = log(Dx/Ex)
#'
#' ## Fitting DLM
#' fit = dlm(y, ages = 0:100, M = 100)
#'
#' ## Plotting the life tables:
#' plot(fit)
#'
#' ## Now we are starting from 20 years
#' \donttest{
#' fit2 = dlm(y[21:101], Ft = 1, Gt = 1, ages = 20:100, M = 100)
#'
#' plot(fit2, plotIC = FALSE)
#'
#' ## To plot multiples life tables see ?plot.list
#' plot(list(fit, fit2), age = 20:100,
#' plotData = FALSE,
#' colors = c("red", "blue"),
#' labels = c("1", "2"))
#' }
#'
#'
#' @include fitted_dlm.R
#' @include fun_aux.R
#'
#' @import ggplot2
#' @import scales
#'
#' @seealso [plot.HP()], [plot.BLC()] and [plot.PredBLC()] for `HP`, `BLC` or `PredBLC` methods.
#'
#' [plot.list()] to the `list` method, adding multiple objects in one single plot.
#'
#' [plot_chain()] to plot the chains generated by the MCMC algorithms for the `HP` and `DLM` objects.
#'
#' @export
plot.DLM <- function(x, plotIC = TRUE, plotData = TRUE, labels = NULL,
colors = NULL, linetype = NULL, prob = 0.95,
age = NULL, ...){
fit = x
h = 0
## Checking
if(is.null(age)) { age = fit$info$ages }
if(length(age) != length(fit$info$ages)) {
if(min(age) < min(fit$info$age)) { warning("There are ages especified smaller than the ones fitted. These ages will not be used.") }
age = age[age >= min(fit$info$ages)]
ages_to_predict = age[age > max(fit$info$ages)]; h = length(ages_to_predict)
}else{
fit$info$ages = age
}
## selecting just the columns of the ages specified by the user
aux = which(fit$info$ages %in% age)
fit$mu = fit$mu[,aux]
fit$beta = fit$beta[,aux]
fit$info$y = c(fit$info$y)[aux]
fit$info$ages = fit$info$ages[aux]
## fitted qx and ci
qx_fit = qx_ci = na.omit(fitted(fit, prob = prob))
if(h > 0) {
qx_pred <- predict(fit, h = h)
aux_last_age = max(qx_fit$age)
aux_qx_fit = c(qx_fit$qx.fitted, qx_pred$qx.fitted)
aux_qi_fit = c(qx_ci$qx.lower, qx_pred$qx.lower)
aux_qs_fit = c(qx_ci$qx.upper, qx_pred$qx.upper)
qx_fit = data.frame(age = c(qx_fit$age, (aux_last_age+1):(aux_last_age+h)),
qx.fitted = aux_qx_fit)
qx_ci = data.frame(age = c(qx_ci$age, (aux_last_age+1):(aux_last_age+h)),
qx.lower = aux_qi_fit, qx.upper = aux_qs_fit)
}
## Customizing the plot
if(is.null(colors)) { colors = "seagreen" }
if(is.null(labels)){ labels = "DLM fitted" }
if(is.null(linetype)) { linetype = "solid" }
if(length(colors) != 1) {
warning("colors length is incorrect. It will be replaced by default color.")
colors = "seagreen"
}
if(length(labels) != 1) {
warning("labels length is incorrect. It will be replaced by default label.")
labels = "DLM fitted"
}
if(length(linetype) != 1) {
warning("linetype length is incorrect. It will be replaced by default type.")
labels = "solid"
}
## data
qx = 1-exp(-exp(fit$info$y))
if(plotData){
if(h == 0){
df = na.omit(data.frame(qx = qx, ages = age))
}else{
df = na.omit(data.frame(qx = c(qx, rep(NA_real_, h)), ages = age))
}
new_labels <- append(labels, "data", 0)
new_colors <- append(colors, "gray10", 0)
if(linetype %in% c("blank", "solid", "dashed", "dotted", "dotdash", "longdash", "twodash", "1F", "F1", "4C88C488", "12345678")){
new_linetype = c("blank", linetype)
}else{
linetype = as.numeric(linetype)
new_linetype = c(0, linetype)
}
}else{
new_labels = labels
new_colors = colors
}
## lower limit:
limits_y <- decimal(min(c(qx_ci$qx.lower[qx_ci$qx.lower > 0], qx[qx > 0], qx_fit$qx.fitted[qx_fit$qx.fitted > 0]), na.rm = T))
## Plot base:
g <- ggplot2::ggplot() +
ggplot2::scale_y_continuous(trans = 'log10', breaks = 10^-seq(limits_y,0),
limits = 10^-c(limits_y,0), labels = scales::comma) +
ggplot2::scale_x_continuous(breaks = seq(0, 200, by = 10), limits = c(NA, NA)) +
ggplot2::xlab("Age") + ggplot2::ylab("qx") + ggplot2::theme_bw() +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = 1.2),
axis.title.x = ggplot2::element_text(color = 'black', size = 12),
axis.title.y = ggplot2::element_text(color = 'black', size = 12),
axis.text = ggplot2::element_text(color = 'black', size = 12),
legend.text = ggplot2::element_text(size = 12),
legend.position = "bottom") # + ggplot2::labs(linetype = NULL)
if(plotIC){
if(plotData){
g + ggplot2::geom_point(data = df, ggplot2::aes(x = ages, y = qx, col = "data"), alpha = 0.8, size = 0.8) +
ggplot2::geom_ribbon(data = qx_ci, ggplot2::aes(x = age, ymin = qx.lower, ymax = qx.upper, fill = "fitted"), alpha = 0.3) +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_fill_manual(name = NULL, values = colors) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype) +
ggplot2::guides(fill = "none", lty = "none",
color = ggplot2::guide_legend(override.aes = list(linetype = c(new_linetype),
shape = c(19, NA))))
}else{
g +
ggplot2::geom_ribbon(data = qx_ci, ggplot2::aes(x = age, ymin = qx.lower, ymax = qx.upper, fill = "fitted"), alpha = 0.3) +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_fill_manual(name = NULL, values = colors) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype, labels = new_labels) +
ggplot2::guides(fill = "none")
}
}else{
if(plotData){
g + ggplot2::geom_point(data = df, ggplot2::aes(x = ages, y = qx, col = "data"), alpha = 0.8, size = 0.8) +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype, labels = new_labels) +
ggplot2::guides(fill = "none", lty = "none",
color = ggplot2::guide_legend(override.aes = list(linetype = c(new_linetype),
shape = c(19, NA))))
}else{
g +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype, labels = new_labels) +
ggplot2::guides(fill = "none")
}
}
}
#' @export
plot.ClosedDLM <- function(x, plotIC = TRUE, plotData = TRUE, labels = NULL,
colors = NULL, linetype = NULL, prob = 0.95,
age = NULL, ...){
fit = x
## Checking
if(is.null(age)) { age = fit$info$ages }
if(length(age) != length(fit$info$ages)) {
if(any(!(age %in% fit$info$ages))) { warning("The ages specified does not match with the ages fitted. Just the ages that match will be used.") }
age = age[which(age %in% fit$info$ages)]
}else{
fit$info$ages = age
}
## selecting just the columns of the ages specified by the user
aux = which(fit$info$ages %in% age)
fit$qx = fit$qx[,aux]
fit$info$y = c(fit$info$y)[aux]
fit$info$ages = fit$info$ages[aux]
## fitted qx and ci
qx_fit = qx_ci = na.omit(fitted(fit, prob = prob))
## Customizing the plot
if(is.null(colors)) { colors = "seagreen" }
if(is.null(labels)){ labels = "DLM fitted" }
if(is.null(linetype)) { linetype = "solid" }
if(length(colors) != 1) {
warning("colors length is incorrect. It will be replaced by default color.")
colors = "seagreen"
}
if(length(labels) != 1) {
warning("labels length is incorrect. It will be replaced by default label.")
labels = "DLM fitted"
}
if(length(linetype) != 1) {
warning("linetype length is incorrect. It will be replaced by default type.")
labels = "solid"
}
## data
qx = 1-exp(-exp(fit$info$y))
if(plotData){
df = na.omit(data.frame(qx = qx, ages = age))
new_labels <- append(labels, "data", 0)
new_colors <- append(colors, "gray10", 0)
if(linetype %in% c("blank", "solid", "dashed", "dotted", "dotdash", "longdash", "twodash", "1F", "F1", "4C88C488", "12345678")){
new_linetype = c("blank", linetype)
}else{
linetype = as.numeric(linetype)
new_linetype = c(0, linetype)
}
}else{
new_labels = labels
new_colors = colors
}
## lower limit:
limits_y <- decimal(min(c(qx_ci$qx.lower[qx_ci$qx.lower > 0], qx[qx > 0], qx_fit$qx.fitted[qx_fit$qx.fitted > 0]), na.rm = T))
## Plot base:
g <- ggplot2::ggplot() +
ggplot2::scale_y_continuous(trans = 'log10', breaks = 10^-seq(limits_y,0),
limits = 10^-c(limits_y,0), labels = scales::comma) +
ggplot2::scale_x_continuous(breaks = seq(0, 200, by = 10), limits = c(NA, NA)) +
ggplot2::xlab("Age") + ggplot2::ylab("qx") + ggplot2::theme_bw() +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = 1.2),
axis.title.x = ggplot2::element_text(color = 'black', size = 12),
axis.title.y = ggplot2::element_text(color = 'black', size = 12),
axis.text = ggplot2::element_text(color = 'black', size = 12),
legend.text = ggplot2::element_text(size = 12),
legend.position = "bottom")
if(plotIC){
if(plotData){
g + ggplot2::geom_point(data = df, ggplot2::aes(x = ages, y = qx, col = "data"), alpha = 0.8, size = 0.8) +
ggplot2::geom_ribbon(data = qx_ci, ggplot2::aes(x = age, ymin = qx.lower, ymax = qx.upper, fill = "fitted"), alpha = 0.3) +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_fill_manual(name = NULL, values = colors) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype) +
ggplot2::guides(fill = "none", lty = "none",
color = ggplot2::guide_legend(override.aes = list(linetype = c(new_linetype),
shape = c(19, NA))))
}else{
g +
ggplot2::geom_ribbon(data = qx_ci, ggplot2::aes(x = age, ymin = qx.lower, ymax = qx.upper, fill = "fitted"), alpha = 0.3) +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_fill_manual(name = NULL, values = colors) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype, labels = new_labels) +
ggplot2::guides(fill = "none")
}
}else{
if(plotData){
g + ggplot2::geom_point(data = df, ggplot2::aes(x = ages, y = qx, col = "data"), alpha = 0.8, size = 0.8) +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype) +
ggplot2::guides(fill = "none", lty = "none",
color = ggplot2::guide_legend(override.aes = list(linetype = c(new_linetype),
shape = c(19, NA))))
}else{
g +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype, labels = new_labels) +
ggplot2::guides(fill = "none")
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/plot_dlm.R
|
#' @name plot.HP
#' @rdname plot.HP
#'
#' @title HP: Plot the life table
#'
#' @description Function that returns a log-scale ggplot of `HP` and `ClosedHP` objects returned by the hp() and hp_close() functions.
#'
#'
#' @param x Object of the class `HP` or `ClosedHP` returned by hp() or hp_close() functions.
#' @param age Vector with the ages to plot the life table.
#' @param Ex Vector with the exposures of the selected ages. Its length must be equal to the age vector. This argument is only necessary when using poisson and binomial HP models.
#' @param plotIC Logical. If 'TRUE' (default), shows the predictive intervals.
#' @param plotData Logical. If 'TRUE' (default), shows crude rate (black dots).
#' @param labels Vector with the name of the curve label. (Optional).
#' @param colors Vector with the color of the curve. (Optional).
#' @param linetype Vector with the line type of the curve. (Optional).
#' @param prob Coverage probability of the predictive intervals. Default is '0.95'.
#' @param ... Other arguments.
#'
#' @return A 'ggplot' object with fitted life table.
#'
#' @examples
#' ## Selecting the exposure and the death count of the year 1990, ranging from 0 to 90 years old:
#' USA1990 = USA[USA$Year == 1990,]
#' x = 0:90
#' Ex = USA1990$Ex.Male[x+1]
#' Dx = USA1990$Dx.Male[x+1]
#'
#' ## Fitting the poisson and the lognormal model:
#' fit = hp(x = x, Ex = Ex, Dx = Dx, model = "poisson",
#' M = 2000, bn = 1000, thin = 1)
#' fit2 = hp(x = x, Ex = Ex, Dx = Dx, model = "lognormal",
#' M = 2000, bn = 1000, thin = 1)
#'
#' ## Plot the life tables:
#' plot(fit)
#' plot(fit2, age = 0:110, plotIC = TRUE)
#'
#' ## To plot multiples life tables see ?plot.list
#' plot(list(fit, fit2),
#' age = 0:110, Ex = USA1990$Ex.Male,
#' plotIC = FALSE, colors = c("red", "blue"),
#' labels = c("Poisson", "Lognormal"))
#'
#' @include fitted_hp.R
#' @include fun_aux.R
#'
#' @import ggplot2
#' @import scales
#'
#' @seealso [plot.DLM()], [plot.BLC()] and [plot.PredBLC()] for `DLM`, `BLC` or `PredBLC` methods.
#'
#' [plot.list()] to the `list` method, adding multiple objects in one single plot.
#'
#' [plot_chain()] to plot the chains generated by the MCMC algorithms for the `HP` and `DLM` objects.
#'
#'
#' @export
plot.HP <- function(x, age = NULL, Ex = NULL, plotIC = TRUE,
plotData = TRUE, labels = NULL, colors = NULL,
linetype = NULL, prob = 0.95, ...){
fit = x
#### ----
qx_fit = qx_ci = fitted(fit, age = age, Ex = Ex, prob = prob)
## Customizing the plot
if(is.null(colors)) { colors = "seagreen" }
if(is.null(labels)) { labels = "HP fitted" }
if(is.null(linetype)) { linetype = "solid" }
if(length(colors) != 1) {
warning("colors length is incorrect. It will be replaced by default color.")
colors = "seagreen"
}
if(length(labels) != 1) {
warning("labels length is incorrect. It will be replaced by default label.")
labels = "DLM fitted"
}
if(length(linetype) != 1) {
warning("linetype length is incorrect. It will be replaced by default type.")
labels = "solid"
}
## Organizing data
data = fit$data
data$Model = "data"
data <- na.omit(data)
if(plotData){
new_labels <- append(labels, "data", 0)
new_colors <- append(colors, "gray10", 0)
if(linetype %in% c("blank", "solid", "dashed", "dotted", "dotdash", "longdash", "twodash", "1F", "F1", "4C88C488", "12345678")){
new_linetype = c("blank", linetype)
}else{
linetype = as.numeric(linetype)
new_linetype = c(0, linetype)
}
}else{
new_labels = labels
new_colors = colors
}
## lower limit:
li_y <- decimal(min(c(qx_ci$qx.lower[qx_ci$qx.lower > 0], data$qx[data$qx > 0], qx_fit$qx.fitted[qx_fit$qx.fitted > 0]), na.rm = T))
if(!is.null(age)) { data = data[data$x %in% age, ] }
## Plot base:
g <- ggplot2::ggplot() +
ggplot2::scale_y_continuous(trans = 'log10', breaks = 10^-seq(li_y,0), limits = 10^-c(li_y,0), labels = scales::comma) +
ggplot2::scale_x_continuous(breaks = seq(0, 200, by = 10), limits = c(NA, NA)) +
ggplot2::xlab("Age") + ggplot2::ylab("qx") + ggplot2::theme_bw() +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = 1.2),
axis.title.x = ggplot2::element_text(color = 'black', size = 12),
axis.title.y = ggplot2::element_text(color = 'black', size = 12),
axis.text = ggplot2::element_text(color = 'black', size = 12),
legend.text = ggplot2::element_text(size = 12),
legend.position = "bottom")
if(plotIC){
if(plotData){
g + ggplot2::geom_point(data = data, ggplot2::aes(x = x, y = qx, col = "data"), alpha = 1, size = 0.8) +
ggplot2::geom_ribbon(data = qx_ci, ggplot2::aes(x = age, ymin = qx.lower, ymax = qx.upper, fill = "fitted"), alpha = 0.3) +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_fill_manual(name = NULL, values = colors) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype) +
ggplot2::guides(fill = "none", lty = "none",
color = ggplot2::guide_legend(override.aes = list(linetype = c(new_linetype),
shape = c(19, NA))))
}else{
g +
ggplot2::geom_ribbon(data = qx_ci, ggplot2::aes(x = age, ymin = qx.lower, ymax = qx.upper, fill = "fitted"), alpha = 0.3) +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_fill_manual(name = NULL, values = colors) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype, labels = new_labels) +
ggplot2::guides(fill = "none")
}
}else{
if(plotData){
g + ggplot2::geom_point(data = data, ggplot2::aes(x = x, y = qx, col = "data"), alpha = 1, size = 0.8) +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype, labels = new_labels) +
ggplot2::guides(fill = "none", lty = "none",
color = ggplot2::guide_legend(override.aes = list(linetype = c(new_linetype),
shape = c(19, NA))))
}else{
g +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype, labels = new_labels) +
ggplot2::guides(fill = "none")
}
}
}
#' @export
plot.ClosedHP <- function(x, age = NULL, plotIC = TRUE, plotData = TRUE,
labels = NULL, colors = NULL, linetype = NULL,
prob = 0.95, ...){
fit = x
qx_fit = qx_ci = na.omit(fitted(fit, age = age, prob = prob))
## Customizing the plot
## Customizing the plot
if(is.null(colors)) { colors = "seagreen" }
if(is.null(labels)) { labels = "HP fitted" }
if(is.null(linetype)) { linetype = "solid" }
if(length(colors) != 1) {
warning("colors length is incorrect. It will be replaced by default color.")
colors = "seagreen"
}
if(length(labels) != 1) {
warning("labels length is incorrect. It will be replaced by default label.")
labels = "DLM fitted"
}
if(length(linetype) != 1) {
warning("linetype length is incorrect. It will be replaced by default type.")
labels = "solid"
}
## Organizing data
data = fit$data
data$Model = "data"
data <- na.omit(data)
if(plotData){
new_labels <- append(labels, "data", 0)
new_colors <- append(colors, "gray10", 0)
if(linetype %in% c("blank", "solid", "dashed", "dotted", "dotdash", "longdash", "twodash", "1F", "F1", "4C88C488", "12345678")){
new_linetype = c("blank", linetype)
}else{
linetype = as.numeric(linetype)
new_linetype = c(0, linetype)
}
}else{
new_labels = labels
new_colors = colors
}
## lower limit:
li_y <- decimal(min(c(qx_ci$qx.lower[qx_ci$qx.lower > 0], data$qx[data$qx > 0], qx_fit$qx.fitted[qx_fit$qx.fitted > 0]), na.rm = T))
if(!is.null(age)) { data = data[data$x %in% age, ] }
## Plot base:
g <- ggplot2::ggplot() +
ggplot2::scale_y_continuous(trans = 'log10', breaks = 10^-seq(li_y,0), limits = 10^-c(li_y,0), labels = scales::comma) +
ggplot2::scale_x_continuous(breaks = seq(0, 200, by = 10), limits = c(NA, NA)) +
ggplot2::xlab("Age") + ggplot2::ylab("qx") + ggplot2::theme_bw() +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = 1.2),
axis.title.x = ggplot2::element_text(color = 'black', size = 12),
axis.title.y = ggplot2::element_text(color = 'black', size = 12),
axis.text = ggplot2::element_text(color = 'black', size = 12),
legend.text = ggplot2::element_text(size = 12),
legend.position = "bottom")
if(plotIC){
if(plotData){
g + ggplot2::geom_point(data = data, ggplot2::aes(x = x, y = qx, col = "data"), alpha = 1, size = 0.8) +
ggplot2::geom_ribbon(data = qx_ci, ggplot2::aes(x = age, ymin = qx.lower, ymax = qx.upper, fill = "fitted"), alpha = 0.3) +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_fill_manual(name = NULL, values = colors) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype) +
ggplot2::guides(fill = "none", lty = "none",
color = ggplot2::guide_legend(override.aes = list(linetype = c(new_linetype),
shape = c(19, NA))))
}else{
g +
ggplot2::geom_ribbon(data = qx_ci, ggplot2::aes(x = age, ymin = qx.lower, ymax = qx.upper, fill = "fitted"), alpha = 0.3) +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_fill_manual(name = NULL, values = colors) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype, labels = new_labels) +
ggplot2::guides(fill = "none")
}
}else{
if(plotData){
g + ggplot2::geom_point(data = data, ggplot2::aes(x = x, y = qx, col = "data"), alpha = 1, size = 0.8) +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype) +
ggplot2::guides(fill = "none", lty = "none",
color = ggplot2::guide_legend(override.aes = list(linetype = c(new_linetype),
shape = c(19, NA))))
}else{
g +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = "fitted", lty = "fitted"), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype, labels = new_labels) +
ggplot2::guides(fill = "none")
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/plot_hp.R
|
#' @name plot.list
#' @rdname plot.list
#'
#' @title Plot a set of life tables
#'
#' @description
#' Function that returns a log-scale 'ggplot' of the mortality graduation
#' returned by hp(), dlm(), hp_close() or dlm_close() functions.
#'
#'
#' @param x List of objects of the following classes: `HP`, `DLM`, `ClosedHP` or `ClosedDLM`.
#' @param age Vector with the ages to plot the life tables.
#' @param Ex Vector with the exposures of the selected ages. Its length must be equal to the age vector. This argument is only necessary when plotting poisson and binomial HP models.
#' @param plotIC Logical. If 'TRUE'(default), plots the predictive intervals.
#' @param plotData Logical. If 'TRUE' (default), plots the data used in the modelling as dots.
#' @param labels Description of the curve (Optional).
#' @param colors Vector of colours of the curves (Optional).
#' @param linetype Vector with the line type of the curve. (Optional).
#' @param prob Coverage probability of the predictive intervals. Default is '0.95'.
#' @param ... Other arguments.
#'
#' @return A 'ggplot' object with fitted life tables.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' ## Selecting the log mortality rate of the 1990 male population ranging from 0 to 100 years old
#' USA1990 = USA[USA$Year == 1990,]
#' x = 0:90
#' Ex = USA1990$Ex.Male[x+1]
#' Dx = USA1990$Dx.Male[x+1]
#' y = log(Dx/Ex)
#'
#'
#' ## Fit poisson and lognormal HP model and DLM
#' fit = hp(x = x, Ex = Ex, Dx = Dx, model = "poisson",
#' M = 2000, bn = 1000, thin = 1)
#' fit2 = dlm(y, M = 100)
#'
#' ## Plot multiples life tables
#' plot(list(fit, fit2),
#' age = 0:110, Ex = USA1990$Ex.Male,
#' plotIC = FALSE, colors = c("red", "blue"),
#' labels = c("HP Poisson", "DLM"))
#'
#'
#' ## Plot ClosedHP and ClosedDLM objects
#' close1 = hp_close(fit, method = "hp", x0 = 90)
#' close2 = dlm_close(fit2, method = "plateau")
#' plot(list(fit, fit2, close1, close2),
#' plotIC = FALSE, colors = c("red", "blue", "green", "purple"),
#' labels = c("HP", "DLM", "ClosedHP", "ClosedDLM"))
#'
#' @include fitted_dlm.R
#' @include fitted_hp.R
#' @include fun_aux.R
#'
#' @import ggplot2
#' @import scales
#'
#' @seealso [plot.DLM()], [plot.HP()], [plot.BLC()] and [plot.PredBLC()] for single plots.
#'
#' @export
plot.list <- function(x, age = NULL, Ex = NULL, plotIC = TRUE,
plotData = TRUE, labels = NULL, colors = NULL,
linetype = NULL, prob = 0.95, ...){
fits = x
if(length(fits) == 1){ stop("The length of fits must be two or more.") }
if(all(unlist(lapply(fits, class)) %in% c("DLM", "ClosedDLM", "HP", "ClosedHP"))){
## Number of fitted models
n_models <- length(fits)
classes <- unlist(lapply(fits, class))
h = rep(0, n_models)
## Checking ages
if(is.null(age)) {
age <- list(); data_aux <- list()
for(i in 1:n_models){
if(classes[i] %in% c("HP", "ClosedHP")){
age[[i]] = fits[[i]]$data$x; data_aux[[i]] = fits[[i]]$data
}else{
age[[i]] = fits[[i]]$info$ages; data_aux[[i]] = data.frame(x = fits[[i]]$info$ages, qx = 1 - exp(-exp(fits[[i]]$info$y)))
}
}
}else{
warn = F; aux2 = age; ages_new = aux = age = list(); data_aux <- list()
for(i in 1:n_models){
if(classes[i] %in% c("HP", "ClosedHP")){
age[[i]] = aux2
data_aux[[i]] = fits[[i]]$data[fits[[i]]$data$x %in% age[[i]], ]
}else if(classes[i] == "ClosedDLM"){
if(any(!(aux2 %in% fits[[i]]$info$ages))) { warn = T }
ages_new[[i]] = aux2[which(aux2 %in% fits[[i]]$info$ages)]
age[[i]] = ages_new[[i]]
## selecting just the columns of the ages specified by the user
aux[[i]] = which(fits[[i]]$info$ages %in% ages_new[[i]])
fits[[i]]$qx = fits[[i]]$qx[,aux[[i]]]
fits[[i]]$info$y = c(fits[[i]]$info$y[aux[[i]]])
fits[[i]]$info$ages = fits[[i]]$info$ages[aux[[i]]]
data_aux[[i]] = data.frame(x = fits[[i]]$info$ages, qx = 1-exp(-exp(fits[[i]]$info$y)))
}else{
if(min(aux2) < min(fits[[i]]$info$ages)) { warn = T }
ages_new[[i]] = aux2[aux2 >= min(fits[[i]]$info$ages)]
ages_to_predict = aux2[aux2 > max(fits[[i]]$info$ages)]; h[i] = length(ages_to_predict)
age[[i]] = ages_new[[i]]
## selecting just the columns of the ages specified by the user
aux[[i]] = which(fits[[i]]$info$ages %in% ages_new[[i]])
## The update is different according to object class
fits[[i]]$mu = fits[[i]]$mu[,aux[[i]]]
fits[[i]]$beta = fits[[i]]$beta[,aux[[i]]]
fits[[i]]$info$y = c(fits[[i]]$info$y[aux[[i]]])
fits[[i]]$info$ages = fits[[i]]$info$ages[aux[[i]]]
data_aux[[i]] = data.frame(x = fits[[i]]$info$ages, qx = 1-exp(-exp(fits[[i]]$info$y)))
}
}
if(warn){ warning("There are ages especified smaller than the ones in DLM fitted. These ages will not be used.") }
rm(ages_new)
}
####################################################################################
## qx fit and ci
qx_fit = qx_cin = n_aux = NULL
for(i in 1:n_models){
aux = fitted(fits[[i]], age = age[[i]], Ex = Ex, prob = prob)
if(h[i] > 0) {
qx_pred <- predict(fits[[i]], h = h[i])
aux_last_age = max(aux$age)
aux_qx_fit = c(aux$qx.fitted, qx_pred$qx.fitted)
aux_qi_fit = c(aux$qx.lower, qx_pred$qx.lower)
aux_qs_fit = c(aux$qx.upper, qx_pred$qx.upper)
aux1 = data.frame(age = c(aux$age, (aux_last_age+1):(aux_last_age+h[i])),
qx.fitted = aux_qx_fit)
aux2 = data.frame(age = c(aux$age, (aux_last_age+1):(aux_last_age+h[i])),
qx.lower = aux_qi_fit, qx.upper = aux_qs_fit)
}else{
aux1 = data.frame(age = aux$age, qx.fitted = aux$qx.fitted)
aux2 = data.frame(age = aux$age, qx.lower = aux$qx.lower, qx.upper = aux$qx.upper)
}
qx_fit <- rbind(qx_fit, aux1)
qx_cin <- rbind(qx_cin, aux2)
n_aux[i] <- nrow(aux1)
}
qx_fit$Model = paste("Model", rep(1:n_models, n_aux))
qx_cin$Model = paste("Model", rep(1:n_models, n_aux))
qx_fit = na.omit(qx_fit)
qx_cin = na.omit(qx_cin)
oneModel = F
## Customizing the plot
if(n_models == 1){
oneModel = T
if(is.null(colors)) { colors = "seagreen" }
if(is.null(labels)) { labels = "fitted" }
if(is.null(linetype)) { linetype = "solid" }
}
## checking if there are color or label inputs
if(is.null(labels)){ labels <- unique(qx_fit$Model) }
if(length(labels) != n_models) {
warning("The number of labels does not match the number of models to plot.")
labels <- unique(qx_fit$Model)
}
if(is.null(colors)){ colors = scales::hue_pal()(n_models) }
if(length(colors) != n_models) {
warning("The number of selected colors does not match the number of models to plot.")
colors = scales::hue_pal()(n_models)
}
if(is.null(linetype)){ linetype = "solid" }
if(length(linetype) == 1){ linetype = rep(linetype, n_models) }
if(length(linetype) != n_models) {
warning("The number of selected linetype must be one type or match the number of models to plot.")
linetype = rep("solid", n_models)
}
## Organizing data
if(!oneModel){
data = NULL
for(i in 1:n_models){
data_aux[[i]]$Model = paste("Model", i)
data <- rbind(data, data_aux[[i]][,c("x", "qx", "Model")])
}
new_labels = labels
new_colors = colors
data <- na.omit(data)
data$Model <- factor(data$Model, labels = new_labels, levels = paste("Model", 1:n_models))
qx_cin$Model <- factor(qx_cin$Model, labels = new_labels, levels = paste("Model", 1:n_models))
qx_fit$Model <- factor(qx_fit$Model, labels = new_labels, levels = paste("Model", 1:n_models))
}else{
data = data_aux[[1]][,c("x", "qx", "Model")]
data$Model = "data"
data <- na.omit(data)
if(plotData){
new_labels <- append(labels, "data", 0)
new_colors <- append(colors, "gray10", 0)
}else{
new_labels = labels
new_colors = colors
}
}
## lower limit:
limits_y <- decimal(min(c(qx_cin$qx.lower[qx_cin$qx.lower > 0], data$qx[data$qx > 0], qx_fit$qx.fitted[qx_fit$qx.fitted > 0]), na.rm = T))
## Plot base:
g <- ggplot2::ggplot() +
ggplot2::scale_y_continuous(trans = 'log10', breaks = 10^-seq(limits_y,0),
limits = 10^-c(limits_y,0), labels = scales::comma) +
ggplot2::scale_x_continuous(breaks = seq(0, 200, by = 10), limits = c(NA, NA)) +
ggplot2::xlab("Age") + ggplot2::ylab("qx") + ggplot2::theme_bw() +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = 1.2),
axis.title.x = ggplot2::element_text(color = 'black', size = 12),
axis.title.y = ggplot2::element_text(color = 'black', size = 12),
axis.text = ggplot2::element_text(color = 'black', size = 12),
legend.text = ggplot2::element_text(size = 12),
legend.position = "bottom")
if(plotIC){
if(plotData){
g + ggplot2::geom_point(data = data, ggplot2::aes(x = x, y = qx, col = Model), alpha = 0.8, size = 0.8) +
ggplot2::geom_ribbon(data = qx_cin, ggplot2::aes(x = age, ymin = qx.lower, ymax = qx.upper, fill = Model), alpha = 0.3) +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = Model, lty = Model), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_fill_manual(name = NULL, values = colors) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype, labels = new_labels) +
ggplot2::guides(fill = "none")
}else{
g +
ggplot2::geom_ribbon(data = qx_cin, ggplot2::aes(x = age, ymin = qx.lower, ymax = qx.upper, fill = Model), alpha = 0.3) +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = Model, lty = Model), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_fill_manual(name = NULL, values = colors) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype, labels = new_labels) +
ggplot2::guides(fill = "none")
}
}else{
if(plotData){
g + ggplot2::geom_point(data = data, ggplot2::aes(x = x, y = qx, col = Model), alpha = 0.8, size = 0.8) +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = Model, lty = Model), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype, labels = new_labels) +
ggplot2::guides(fill = "none")
}else{
g +
ggplot2::geom_line(data = qx_fit, ggplot2::aes(x = age, y = qx.fitted, col = Model, lty = Model), linewidth = 0.8, alpha = 0.8) +
ggplot2::scale_colour_manual(name = NULL, values = new_colors, labels = new_labels) +
ggplot2::scale_linetype_manual(name = NULL, values = linetype, labels = new_labels) +
ggplot2::guides(fill = "none")
}
}
}else{
stop("fits argument must be an object or a list of DLM and/or HP objects.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/plot_list.R
|
#' @name predict.BLC
#' @rdname predict.BLC
#'
#' @title BLC: Forecasting
#'
#' @description Calculates the means and variances of the forecast distributions based on
#' the resulting chains from an estimation method.
#'
#'
#' @param object A `BLC` object that is result of a call to blc() function.
#' @param h The prediction horizon.
#' @param ... Other arguments.
#'
#' @return A `PredBLC` object that contains a list with predicted values calculated
#' from `BLC` object chains structured in an array.
#'
#' @examples
#' ## Importing log-mortality data from Portugal:
#' data(PT)
#' Y <- PT
#'
#' ## Fitting the model
#' fit = blc(Y = Y, M = 100, bn = 20)
#'
#' ## Prediction for 2 years ahead
#' pred = predict(fit, h = 2)
#' print(pred)
#'
#' @importFrom MASS mvrnorm
#'
#' @seealso [fitted.BLC()], [print.BLC()], and [plot.PredBLC()] for `PredBLC` methods to native R functions [fitted()],
#'[print()], and [plot()].
#'
#'[expectancy.BLC()] and [Heatmap.BLC] to compute and plot the life expectancy of the prediction(s).
#'
#' @export
predict.BLC <- function(object, h, ...) {
obj = object
N <- ncol(obj$Y)
q <- nrow(obj$beta)
sim <- array(dim = c(obj$M - obj$bn, h, q))
for (l in 1:(obj$M - obj$bn)) {
est.alpha <- obj$alpha[ ,l + obj$bn]
est.beta <- obj$beta[ ,l + obj$bn]
est.V <- diag(1/obj$phiv[ ,l + obj$bn])
est.theta <- obj$theta[l + obj$bn]
est.W <- 1/obj$phiw[l + obj$bn]
Gt <- 1
filt <- kd.filter(obj$Y, obj$m0, obj$C0, est.V, est.W, est.beta,
Gt, est.alpha, est.theta)
a <- est.theta + Gt %*% filt$m[ ,N]
R <- Gt %*% filt$C[ , ,N] %*% t(Gt) + est.W
f <- est.alpha + est.beta %*% as.matrix(a)
Q <- est.beta %*% as.matrix(R) %*% t(est.beta) + est.V
sim[l,1, ] <- mvrnorm(1, f, Q)
if (h > 1) for (k in 2:h) {
a <- est.theta + Gt %*% a
R <- Gt %*% R %*% t(Gt) + est.W
f <- est.alpha + est.beta %*% as.matrix(a)
Q <- est.beta %*% as.matrix(R) %*% t(est.beta) + est.V
sim[l,k, ] <- mvrnorm(1, f, Q)
}
}
sim <- list(y = sim, h = h)
class(sim) <- "PredBLC"
sim
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/predict_blc.R
|
#' @name predict.DLM
#' @rdname predict.DLM
#'
#' @title DLM: Prediction of death probability
#'
#' @description Extrapolates the mortality curve fitted by DLM by calculating the median
#' of death probability and the respective prediction interval.
#'
#'
#' @param object A `DLM` object that is result of a call to dlm() function.
#' @param h The ages prediction horizon.
#' @param prob Coverage probability of the predictive intervals.
#' @param ... Other arguments.
#'
#' @return A data.frame with the death probability prediction and prediction interval for the ages in the prediction horizon.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' ## Selecting the log mortality rate of the year 2000, ranging from 0 to 100 years old:
#' USA2000 = USA[USA$Year == 2000,]
#' x = 0:100
#' Ex = USA2000$Ex.Total[x+1]
#' Dx = USA2000$Dx.Total[x+1]
#'
#' y = log(Dx/Ex)
#'
#' ## Fitting dlm
#' fit = dlm(y, M = 100)
#'
#' ## Extrapolating the death probabilities (qx)
#' predict(fit, h = 3, prob = 0.95)
#'
#'
#' @importFrom MASS mvrnorm
#'
#' @seealso [fitted.DLM()].
#'
#' @include ffbs.R
#'
#' @export
predict.DLM <- function(object, h, prob = 0.95, ...){
fit = object
N = length(fit$info$y)
p = length(fit$info$prior$m0)
y = fit$info$y
Gt = fit$info$Gt
Ft = fit$info$Ft
delta = fit$info$delta[length(fit$info$delta)]
# V = fit$sig2
V = 0.01 ## same value used in Filtering
sig2 = fit$sig2
n = length(fit$sig2)
aux = fit$param
sim <- matrix(NA_real_, nrow = n, ncol = h)
Wt = aux$Ct[N,,] * (1 - delta) / delta
at = Gt %*% aux$mt[N,]
Rt = Gt %*% aux$Ct[N,,] %*% t(Gt) + Wt
ft = Ft %*% at
Qt = (Ft %*% Rt %*% t(Ft) + V)[1,1]
At = Rt %*% t(Ft) / Qt
Ct = Rt - At %*% Ft %*% Rt ## second moment
# sim[, 1] <- rnorm(n, mean = ft, sd = sd(Qt * sig2))
sim[, 1] <- rt(n, df = 2*aux$alpha)*sqrt(Qt*(aux$beta/aux$alpha)) + c(ft)
if(h > 1) for(k in 2:h){
Wt = Ct * (1 - delta) / delta
at = Gt %*% at
Rt = Gt %*% Rt %*% t(Gt) + Wt
ft = Ft %*% at
Qt = (Ft %*% Rt %*% t(Ft) + V)[1,1]
At = Rt %*% t(Ft) / Qt
Ct = Rt - At %*% Ft %*% Rt ## second moment
# sim[, k] <- rnorm(n, mean = ft, sd = sd(Qt * sig2))
sim[, k] <- rt(n, df = 2*aux$alpha)*sqrt(Qt*(aux$beta/aux$alpha)) + c(ft)
}
qx_sim = exp(sim)
qx_sim[qx_sim < 0] = 0
qx_sim[qx_sim > 1] = 1
qx_fitted = apply(qx_sim, 2, median, na.rm = T)
qx_lim = apply(qx_sim, 2, quantile, probs = c((1-prob)/2, (1+prob)/2), na.rm = T)
qx_fitted = data.frame(Ages = (fit$info$ages[N]+1):(fit$info$ages[N]+h), qx_fitted = qx_fitted)
return(data.frame(age = qx_fitted$Ages, qx.fitted = 1 - exp(-qx_fitted$qx_fitted),
qx.lower = 1 - exp(-qx_lim[1,]), qx.upper = 1 - exp(-qx_lim[2,])))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/predict_dlm.R
|
#' @name print.BLC
#' @rdname print.BLC
#'
#' @title BLC: Print
#'
#' @description Print details from a fitted BLC model and returns it invisibly.
#'
#'
#' @param x A `BLC` or `PredBLC`object, result of a call to blc() or predict() function.
#' @param ... Further arguments passed to or from other methods.
#'
#' @return A character vector with the details of a fitted `BLC` or `PredBLC` model.
#'
#'
#' @seealso [print.DLM()] and [print.HP()] for `DLM` or `HP` methods.
#'
#' @export
print.BLC <- function(x, ...) {
obj = x
catf <- function(x, ...) cat(sprintf(x, ...), end = '\n')
catf("Bayesian Lee-Carter Estimation\n")
catf("Age groups: %d", nrow(obj$Y))
catf("Time length: %d", ncol(obj$Y))
catf("Sample size: %d", obj$M)
catf("Prior: N(%.2f, %.2e)", obj$m0, obj$C0)
invisible(obj)
}
#' @export
print.PredBLC <- function(x, ...) {
obj = x
cat(sprintf('Forecast of a Bayesian Lee-Carter model (h = %d)\n',
obj$h))
cat("\n")
invisible(obj)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/print_blc.R
|
# @name print.PredBLC
# @rdname print.PredBLC
#
# @title Print Values for BLC prediction models
#
# @description Print details from a fitted BLC prediction model and returns it invisibly.
#
#
# @param x A `PredBLC` object, result to the pred() function call on a `BLC` object.
# @param ... Further arguments passed to or from other methods.
#
# @return A character vector with the details of a fitted `PredBLC` model.
#
#
# @seealso [print.DLM()], [print.HP()] and [print.BLC()] for `DLM`, `HP` or `BLC` methods.
#
#
print.PredBLC <- function(x, ...) {
obj = x
cat(sprintf('Forecast of a Bayesian Lee-Carter model (h = %d)\n',
obj$h))
invisible(obj)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/print_blc_pred.R
|
#' @name print.DLM
#' @rdname print.DLM
#'
#' @title DLM: Print
#'
#' @description Print details from a fitted `DLM` or `ClosedDLM` models and returns it invisibly.
#'
#'
#' @param x A `DLM` or `ClosedDLM` object, result of a call to dlm() or dlm_close() function.
#' @param ... Further arguments passed to or from other methods.
#'
#' @return A character vector with the details of a fitted `DLM` or `ClosedDLM` model.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' ## Selecting the log mortality rate of the 2010 male population ranging from 0 to 100 years old
#' USA2010 = USA[USA$Year == 2010,]
#' x = 0:100
#' Ex = USA2010$Ex.Male[x+1]
#' Dx = USA2010$Dx.Male[x+1]
#' y = log(Dx/Ex)
#'
#' ## Fitting DLM
#' fit = dlm(y, M = 100)
#' print(fit)
#'
#' @seealso [print.HP()] and [print.BLC()] for `HP` or `BLC` methods.
#'
#' @export
print.DLM <- function(x, ...){
fit = x
catf <- function(x, ...) cat(sprintf(x, ...), end = '\n')
catf("DLM for life tables fitted")
catf("")
catf("Ft:")
cat("[", fit$info$Ft,"]")
catf("")
catf("")
catf("Gt:")
for(i in 1:nrow(fit$info$Gt)) {cat(fit$info$Gt[i,]); catf("")}
catf("")
cat("Discount factor: ", as.character(fit$info$delta), "\n")
catf("")
catf("Ages fitted:")
cat(fit$info$ages)
catf("")
}
#' @export
print.ClosedDLM <- function(x, ...){
fit = x
catf <- function(x, ...) cat(sprintf(x, ...), end = '\n')
catf("DLM closure curve estimation")
catf("Method: %s", fit$method)
catf("Min. age: %s", min(fit$info$ages, na.rm = T))
catf("Max. age: %s", max(fit$info$ages, na.rm = T))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/print_dlm.R
|
#' @name print.HP
#' @rdname print.HP
#'
#' @title HP: Print
#'
#' @description Print details from a fitted `HP` or `ClosedHP` models and returns it invisibly.
#'
#'
#' @param x A `HP` or `ClosedHP` object, result of a call to hp() or hp_close() function.
#' @param ... Further arguments passed to or from other methods.
#'
#' @return A character vector with the details of a fitted `HP` or `ClosedHP` model.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' ## Selecting the exposure and death count of the 2010 male population ranging from 0 to 90 years old
#' USA2010 = USA[USA$Year == 2010,]
#' x = 0:90
#' Ex = USA2010$Ex.Male[x+1]
#' Dx = USA2010$Dx.Male[x+1]
#'
#' ## Fitting binomial model
#' fit = hp(x = x, Ex = Ex, Dx = Dx, M = 5000, bn = 0, thin = 10)
#' print(fit)
#'
#' @seealso [print.DLM()] and [print.BLC()] for `DLM` or `BLC` methods.
#'
#' @export
print.HP <- function(x, ...){
fit = x
catf <- function(x, ...) cat(sprintf(x, ...), end = '\n')
catf("HP curve estimation")
catf("Model: %s", fit$info$model)
}
#' @export
print.ClosedHP <- function(x, ...){
fit = x
catf <- function(x, ...) cat(sprintf(x, ...), end = '\n')
catf("HP closure curve estimation")
catf("Method: %s", fit$method)
catf("Min. age: %s", min(fit$data$x, na.rm = T))
catf("Max. age: %s", max(fit$data$x, na.rm = T))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/print_hp.R
|
#' @name quantile.BLC
#' @rdname quantile.BLC
#'
#' @title BLC: Sample quantiles
#'
#' @description Compute the quantiles based on the resulting chains from a fitted BLC model.
#'
#'
#' @param x A `BLC` object, result of a call to blc() function.
#' @param q A real number that represents the probability of the quantiles.
#' @param name A character with a parameter name of the blc model that should be returned. It can be one of these: "alpha", "beta", "kappa", "phiv", "theta", "phiw".
#' @param ... Further arguments passed to or from other methods.
#'
#' @return A data.frame with the quantiles of the selected parameter.
#'
#' @examples
#' ## Importing log-mortality data from Portugal:
#' data(PT)
#' Y <- PT
#'
#' ## Fitting the model
#' fit = blc(Y = Y, M = 100, bn = 20)
#'
#' ## Parameters' median and quantiles 0.05, 0.95
#' quantile(fit, c(0.05, 0.5, 0.95), "alpha")
#' quantile(fit, c(0.05, 0.5, 0.95), "beta")
#' quantile(fit, c(0.05, 0.5, 0.95), "kappa")
#' quantile(fit, c(0.05, 0.5, 0.95), "phiv") ## random error precision
#' quantile(fit, c(0.05, 0.5, 0.95), "theta") ## drift parameter
#' quantile(fit, c(0.05, 0.5, 0.95), "phiw")
#'
#' @seealso [quantile.PredBLC()] for `PredBLC` method.
#'
#' @export
quantile.BLC <- function(x, q, name, ...) {
obj = x
matrixNames <- c("alpha", "beta", "kappa", "phiv")
vectorNames <- c("theta", "phiw")
if (name %in% matrixNames) {
apply(obj[[name]][ ,(obj$bn+1):obj$M], 1, quantile, q)
} else if (name %in% vectorNames) {
quantile(obj[[name]][(obj$bn+1):obj$M], q)
} else {
stop("Invalid `name` argument")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/quantile_blc.R
|
#' @name quantile.PredBLC
#' @rdname quantile.PredBLC
#'
#' @title BLC: Sample quantiles for predictions
#'
#' @description Calculates the quantiles of log-mortality based on the resulting chains from a predicted year.
#'
#'
#' @param x A `PredBLC` object, result to the pred() function call on a `BLC` object.
#' @param q A real number that represents the probability of the quantiles.
#' @param h A positive integer especifying the year in the prediciton horizon to be calculated.
#' @param ... Further arguments passed to or from other methods.
#'
#' @return A data.frame with the quantiles of the selected parameter.
#'
#' @examples
#' ## Importing log-mortality data from Portugal:
#' data(PT)
#' Y <- PT
#'
#' ## Fitting the model
#' fit = blc(Y = Y, M = 100, bn = 20)
#'
#' ## Prediction for 2 years ahead
#' pred = predict(fit, h = 2)
#'
#' ## The log-mortality median for the first year of prediction
#' quantile(pred, q = 0.5, h = 1)
#'
#' ## The 0.1 and 0.9 quantiles for the first and second year of prediction
#' quantile(pred, q = c(0.1, 0.9), h = 1)
#' quantile(pred, q = c(0.1, 0.9), h = 2)
#'
#' @seealso [quantile.BLC()] for `BLC` method.
#'
#' @export
quantile.PredBLC <- function(x, q, h, ...) {
obj = x
if(length(h) > 1) h = h[1]
apply(obj$y[ ,h, ], 2, quantile, q)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/quantile_blc_pred.R
|
sir_gompertz <- function(fit, data, resampling_size = nrow(fit$post.samples$mcmc_theta)){
## Gompertz: gomp = a*exp(b*x)
## likelihood
if(fit$info$model == "binomial"){
sim_Ex = rep(1000, length(data$Ex))
sim_Dx = round(data$qx*sim_Ex)
likelihood <- function(par){
gomp = par[1]*exp(par[2]*data$x)
q_x = 1 - exp(-gomp)
prod(dbinom(sim_Dx, size = sim_Ex, prob = q_x))
}
}else if(fit$info$model == "poisson"){
sim_Ex = rep(10000, length(data$Ex))
sim_Dx = round(data$qx*sim_Ex)
likelihood <- function(par){
mx = par[1]*exp(par[2]*data$x)
q_x = 1 - exp(-mx)
prod(dpois(sim_Dx, lambda = sim_Ex*q_x))
}
}else{
y = log(data$qx/(1-data$qx))
sigma = sqrt(median(fit$post.samples$sigma2))
likelihood <- function(par){
gomp = par[1]*exp(par[2]*data$x)
prod(dnorm(y, mean = log(gomp), sd = sigma))
}
}
### SIR method
## sampling A and B
A = rbeta(500, 1, 10000)
B = rbeta(500, 1, 10)
## Joint distribution
df = data.frame(A = rep(A, each = 500), B = rep(B, times = 500))
## Assuming uniform priori, posteriori distribution is proportional to likelihood
post.dist <- apply(df, 1, likelihood)
pesos = post.dist/(dbeta(df[,1], 1, 10000)*dbeta(df[,2], 1, 10))
probs = pesos/sum(pesos)
res = sample(500*500, resampling_size, replace = T, prob = probs)
res_A = df[res,1]
res_B = df[res,2]
return(data.frame(A = res_A, B = res_B))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/sir_gompertz.R
|
#' @name summary.DLM
#' @rdname summary.DLM
#'
#' @title DLM: Summary
#'
#' @description Summarizes information from the parameters' markov chains of a fitted `DLM` or `ClosedDLM` model.
#'
#'
#' @param object A `DLM` or `ClosedDLM` object, result of a call to dlm() or dlm_close() function.
#' @param digits An integer indicating the number of decimals places.
#' @param ... Further arguments passed to or from other methods.
#'
#' @return A data.frame object with the mean, standard deviation and 2.5%, 50% and 97.5% quantiles of a fitted `DLM` or `ClosedDLM` model.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' ## Selecting the log mortality rate of the 2010 male population ranging from 0 to 100 years old
#' USA2010 = USA[USA$Year == 2010,]
#' x = 0:100
#' Ex = USA2010$Ex.Male[x+1]
#' Dx = USA2010$Dx.Male[x+1]
#' y = log(Dx/Ex)
#'
#' ## Fitting DLM
#' fit = dlm(y, M = 100)
#' summary(fit)
#'
#' @seealso [summary.HP()] for `HP` method.
#'
#' @export
summary.DLM <- function(object, digits = 5, ...){
fit = object
summ = rbind(resumo(fit$sig2),
resumo(fit$mu))
row.names(summ) = c("sigma2", paste0("mu[", fit$info$ages, "]"))
return(round(summ, digits))
}
#' @export
summary.ClosedDLM <- function(object, ...){
fit = object
summ = as.data.frame(cbind(min(fit$info$ages):max(fit$info$ages), resumo(fit$qx)))
colnames(summ) <- c("age", "mean", "sd", "2.5%", "50.0%", "97.5%")
return(summ)
}
#'
resumo <- function(x){
if(is.matrix(x)){
resumo = data.frame(
x1 = apply(x, 2, mean),
x2 = apply(x, 2, sd),
t(apply(x, 2, quantile, probs = c(0.025, 0.5, 0.975))))
}else{
resumo = data.frame(
x1 = mean(x),
x2 = sd(x),
t(quantile(x, probs = c(0.025, 0.5, 0.975))))
}
colnames(resumo) <- c("mean", "sd", "2.5%", "50.0%", "97.5%")
return(resumo)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/summary_dlm.R
|
#' @name summary.HP
#' @rdname summary.HP
#'
#' @title HP: Summary
#'
#' @description Summarizes information from the parameters' markov chains of a fitted `HP` or `ClosedHP` model.
#'
#'
#' @param object A `HP` or `ClosedHP` object, result of a call to hp() or hp_close() function.
#' @param ... Further arguments passed to or from other methods.
#'
#' @return A data.frame object with the mean, standard deviation and 2.5%, 50% and 97.5% quantiles of a fitted `HP` or `ClosedHP` model.
#'
#' @examples
#' ## Importing mortality data from the USA available on the Human Mortality Database (HMD):
#' data(USA)
#'
#' ## Selecting the exposure and death count of the 2010 male population ranging from 0 to 90 years old
#' USA2010 = USA[USA$Year == 2010,]
#' x = 0:90
#' Ex = USA2010$Ex.Male[x+1]
#' Dx = USA2010$Dx.Male[x+1]
#'
#' ## Fitting binomial model
#' fit = hp(x = x, Ex = Ex, Dx = Dx, M = 5000, bn = 0, thin = 10)
#' summary(fit)
#'
#' @seealso [summary.DLM()] for `DLM` method.
#'
#' @export
summary.HP <- function(object, ...){ fit = object; return(fit$summary) }
#' @export
summary.ClosedHP <- function(object, ...){
fit = object
resumo = data.frame(
x = min(fit$data$x):max(fit$data$x),
x1 = apply(fit$qx, 2, mean),
x2 = apply(fit$qx, 2, sd),
t(apply(fit$qx, 2, quantile, probs = c(0.025, 0.5, 0.975))))
colnames(resumo) <- c("age", "mean", "sd", "2.5%", "50.0%", "97.5%")
return(resumo)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMortalityPlus/R/summary_hp.R
|
#' Interface for the BayesMultMeta class
#'
#' The BayesMultMeta package implements two methods of constructing Markov
#' chains to assess the posterior distribution of the model parameters, namely
#' the overall mean vector \eqn{\mathbf{\mu}} and the between-study covariance matrix
#' \eqn{\mathbf{\Psi}}, of the generalized marginal multivariate random effects models.
#' The Bayesian inference procedures are performed when the model parameters are
#' endowed with the Berger and Bernardo reference prior
#' \insertCite{berger1992development}{BayesMultMeta} and the Jeffreys prior
#' \insertCite{1946RSPSA.186..453J}{BayesMultMeta}. This is achieved by
#' constructing Markov chains using the Metropolis-Hastings algorithms developed
#' in \insertCite{bodnar2021objective}{BayesMultMeta}. The convergence
#' properties of the generated Markov chains are investigated by the rank plots
#' and the split-\eqn{\hat{R}} estimate based on the rank normalization, which are
#' proposed in \insertCite{vehtari2021rank}{BayesMultMeta}.
#'
#' @param X A \eqn{p \times n} matrix which contains \eqn{n} observation vectors
#' of dimension \eqn{p}
#' @param U A \eqn{pn \times pn} block-diagonal matrix which contains the
#' covariance matrices of observation vectors.
#' @param N Length of the generated Markov chain.
#' @param burn_in Number of burn-in samples
#' @param likelihood Likelihood to use. It currently supports "normal" and
#' "t".
#' @param prior Prior to use. It currently supports "reference" and
#' "jeffrey".
#' @param algorithm_version One of "mu" or "Psi". Both algorithms samples the
#' same quantities.
#' @param d Degrees of freedom for the t-distribution when the "t" option is
#' used for the likelihood.
#'
#' @return a BayesMultMeta class which contains simulations from the MCMC
#' inference procedure as well as many of the input parameters. The elements
#' 'psi' and 'mu' in the list contains simulations from the posterior
#' distribution. All other elements are input parameters to the class.
#'
#' @references
#' \insertAllCited{}
#'
#' @examples
#' dataREM<-mvmeta::hyp
#' # Observation matrix X
#' X<-t(cbind(dataREM$sbp,dataREM$dbp))
#' p<-nrow(X) # model dimension
#' n<-ncol(X) # sample size
#' # Matrix U
#' U<-matrix(0,n*p,n*p)
#' for (i_n in 1:n) {
#' Use<-diag(c(dataREM$sbp_se[i_n],dataREM$dbp_se[i_n]))
#' Corr_mat<-matrix(c(1,dataREM$rho[i_n],dataREM$rho[i_n],1),p,p)
#' U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)]<- Use%*%Corr_mat%*%Use
#' }
#'
#' bmgmr_run <- BayesMultMeta(X, U, 1e2, burn_in = 100,
#' likelihood = "normal", prior="jeffrey",
#' algorithm_version = "mu")
#' summary(bmgmr_run)
#' plot(bmgmr_run)
#'
#' @export
BayesMultMeta <- function(X, U, N, burn_in, likelihood, prior, algorithm_version, d=NULL) {
assertthat::assert_that(likelihood %in% c("normal", "t"))
assertthat::assert_that(prior %in% c("jeffrey", "reference"))
assertthat::assert_that(algorithm_version %in% c("mu", "Psi"))
if (algorithm_version == "mu") {
if (likelihood == "normal" & prior == "jeffrey") {
simulations <- sample_post_nor_jef_marg_mu(X, U, N + burn_in)
}else if (likelihood == "normal" & prior == "reference") {
simulations <- sample_post_nor_ref_marg_mu(X, U, N + burn_in)
}else if (likelihood == "t" & prior == "jeffrey") {
simulations <- sample_post_t_jef_marg_mu(X, U*(d-2)/d, d, N + burn_in)
}else if (likelihood == "t" & prior == "reference") {
simulations <- sample_post_t_ref_marg_mu(X, U*(d-2)/d, d, N + burn_in)
}
}else{
if (likelihood == "normal" & prior == "jeffrey") {
simulations <- sample_post_nor_jef_marg_Psi(X, U, N + burn_in)
}else if (likelihood == "normal" & prior == "reference") {
simulations <- sample_post_nor_ref_marg_Psi(X, U, N + burn_in)
}else if (likelihood == "t" & prior == "jeffrey") {
simulations <- sample_post_t_jef_marg_Psi(X, U*(d-2)/d, d, N + burn_in)
}else if (likelihood == "t" & prior == "reference") {
simulations <- sample_post_t_ref_marg_Psi(X, U*(d-2)/d, d, N + burn_in)
}
}
structure(list(mu=simulations[[1]],
psi=simulations[[2]],
X=X,
U=U,
N=N,
p=nrow(X),
burn_in=burn_in,
likelihood=likelihood,
prior=prior,
algorithm_version=algorithm_version,
d=d
), class="BayesMultMeta")
}
#' Summary statistics from the posterior of a BayesMultMeta class
#'
#' @param object BayesMultMeta class
#' @param alpha Significance level used in the computation of the credible interval.
#' @param ... not used
#'
#' @returns a list with summary statistics
#' @export
summary.BayesMultMeta <- function(object, alpha=0.95, ...) {
Gp<-duplication_matrix(object$p)
Lp<-Gp%*%solve(t(Gp)%*%Gp)
list("mu"=bayes_inference(object$mu[,(object$burn_in+1):(object$burn_in+object$N)], alpha),
"psi"=bayes_inference(object$psi[,(object$burn_in+1):(object$burn_in+object$N)], alpha)%*%Lp)
}
#' Summary statistics from a posterior distribution
#'
#' Given a univariate sample drawn from the posterior distribution, this
#' function computes the posterior mean, the posterior median, the posterior
#' standard deviation, and the limits of the \eqn{(1-\alpha)} probability-symmetric
#' credible interval.
#'
#' @param x Univariate sample from the posterior distribution of a parameter.
#' @param alp Significance level used in the computation of the credible interval
#'
#' @return a matrix with summary statistics
bayes_inference <- function(x,alp){
x_mean<-apply(x,1,mean)
x_med<-apply(x,1,quantile,probs=0.5)
x_ql<-apply(x,1,quantile,probs=alp/2)
x_qu<-apply(x,1,quantile,probs=1-alp/2)
x_sd<-sqrt(apply(x,1,var))
rbind(x_mean,x_med,x_sd,x_ql,x_qu)
}
#' Plot a BayesMultMeta object
#'
#' This function produces the trace plots of the constructed Markov chains.
#'
#' @param x a BayesMultMeta object
#' @param ... additional arguments
#'
#' @return No return value, produces trace plots
#'
#' @export
plot.BayesMultMeta <- function(x, ...) {
for (var in c("mu", "psi")) {
df <- x[[var]]
ylab <- function(p) ifelse(var == "mu", parse(text=paste0("mu[", p, "]")),
parse(text=paste0("psi[", p, "]")))
for (idx in 1:nrow(df)) {
plot(y=df[idx,], x=1:ncol(df), type="l", ylab=ylab(idx),
xlab="index")
abline(v=x$burn_in, col = "lightgray", lty = 3)
invisible(readline(prompt="Press [enter] to continue"))
}
}
}
#' Computes the ranks within the pooled draws of Markov chains
#'
#' The function computes the ranks within the pooled draws of Markov
#' chains. Average ranks are used for ties.
#'
#' @param MC An \eqn{N \times M} matrix with N draws in each of M constructed
#' Markov chains.
#'
#' @return a matrix with the ranks from the MCMC procedure
#' @export
#'
#' @examples
#' dataREM<-mvmeta::hyp
#' # Observation matrix X
#' X<-t(cbind(dataREM$sbp,dataREM$dbp))
#' p<-nrow(X) # model dimension
#' n<-ncol(X) # sample size
#' # Matrix U
#' U<-matrix(0,n*p,n*p)
#' for (i_n in 1:n) {
#' Use<-diag(c(dataREM$sbp_se[i_n],dataREM$dbp_se[i_n]))
#' Corr_mat<-matrix(c(1,dataREM$rho[i_n],dataREM$rho[i_n],1),p,p)
#' U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)]<- Use%*%Corr_mat%*%Use
#' }
#' # Generating M Markov chains for mu_1
#' M<-4 # number of chains
#' MC <-NULL
#' for (i in 1:M) {
#' chain <- BayesMultMeta(X, U, 1e2, burn_in = 1e2,
#' likelihood = "t", prior="jeffrey",
#' algorithm_version = "mu",d=3)
#' MC<- cbind(MC,chain$mu[1,])
#' }
#' ranks<-MC_ranks(MC)
#' id_chain <- 1
#' hist(ranks[,id_chain],breaks=25,prob=TRUE, labels = FALSE, border = "dark blue",
#' col = "light blue", main = expression("Chain 1,"~mu[1]), xlab = expression(),
#' ylab = expression(),cex.axis=1.2,cex.main=1.7,font=2)
#'
MC_ranks <- function(MC) {
Np<-nrow(MC)
M<-ncol(MC)
matrix(rank(c(MC),ties.method = "average"),Np,M)
}
#' Computes the split-\eqn{\hat{R}} estimate based on the rank normalization
#'
#' The function computes the split-\eqn{\hat{R}} estimate based on the rank
#' normalization.
#'
#' @inheritParams MC_ranks
#'
#' @return a value with the the split-\eqn{\hat{R}} estimate based on the rank
#' normalization
#'
#' @examples
#' dataREM<-mvmeta::hyp
#' # Observation matrix X
#' X<-t(cbind(dataREM$sbp,dataREM$dbp))
#' p<-nrow(X) # model dimension
#' n<-ncol(X) # sample size
#' # Matrix U
#' U<-matrix(0,n*p,n*p)
#' for (i_n in 1:n) {
#' Use<-diag(c(dataREM$sbp_se[i_n],dataREM$dbp_se[i_n]))
#' Corr_mat<-matrix(c(1,dataREM$rho[i_n],dataREM$rho[i_n],1),p,p)
#' U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)]<- Use%*%Corr_mat%*%Use
#' }
#' # Generating M Markov chains for mu_1
#' M<-4 # number of chains
#' MC <-NULL
#' for (i in 1:M) {
#' chain <- BayesMultMeta(X, U, 1e2, burn_in = 1e2,
#' likelihood = "t", prior="jeffrey",
#' algorithm_version = "mu",d=3)
#' MC<- cbind(MC,chain$mu[1,])
#' }
#' split_rank_hatR(MC)
#'
split_rank_hatR<-function(MC) {
Np<-nrow(MC)
M<-ncol(MC)
if (Np%%2) {
simpleError('The length of Markov chain should be an even number')
} else {
ranks<-MC_ranks(MC)
x_ranks_full<-qnorm((ranks-3/8)/(M*Np+1/4))
x_ranks<-cbind(x_ranks_full[1:(Np/2),],x_ranks_full[(Np/2+1):Np,])
means<-apply(x_ranks,2,mean)
BR<-Np*var(means)/2
vars<-apply(x_ranks,2,var)
WR<-sum(vars)/(2*M)
sqrt(1-2/Np+(2/Np)*BR/WR)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMultMeta/R/BayesMultMeta.R
|
#' Duplication matrix
#'
#' This function creates the duplication matrix of size \eqn{p^2 \times p(p+1)/2}
#'
#' @param p Integer which specifies the dimension of the duplication matrix.
#'
#' @return a matrix of size \eqn{p^2 \times p(p+1)/2}
duplication_matrix <- function(p){
mat <- diag(p)
index <- seq(p*(p+1)/2)
mat[ lower.tri( mat , TRUE ) ] <- index
mat[ upper.tri( mat ) ] <- t( mat )[ upper.tri( mat ) ]
outer(c(mat), index , function( x , y ) ifelse(x==y, 1, 0))
}
#' Metropolis-Hastings algorithm for the normal distribution and the Jeffreys
#' prior, where \eqn{\mathbf{\mu}} is generated from the marginal posterior.
#'
#' This function implements Metropolis-Hastings algorithm for drawing samples
#' from the posterior distribution of \eqn{\mathbf{\mu}} and \eqn{\mathbf{\Psi}} under the
#' assumption of the normal distribution when the Jeffreys prior is employed.
#' At each step, the algorithm starts with generating a draw from the marginal
#' distribution of \eqn{\mathbf{\mu}}.
#'
#' @param X A \eqn{p \times n} matrix which contains \eqn{n} observation vectors of
#' dimension \eqn{p}.
#' @param U A \eqn{p n \times p n} block-diagonal matrix which contains the
#' covariance matrices of observation vectors.
#' @param Np Length of the generated Markov chain.
#'
#' @return List with the generated samples from the joint posterior distribution
#' of \eqn{\mathbf{\mu}} and \eqn{\mathbf{\Psi}}, where the values of
#' \eqn{\mathbf{\Psi}} are presented by using the vec operator.
#'
sample_post_nor_jef_marg_mu<-function(X,U,Np){
p<-nrow(X) # model dimension
n<-ncol(X) # sample size
############## addtional definitons
bi_n<-rep(1,n)
tbi_n<-t(bi_n)
In<-diag(bi_n)
Jn<-matrix(1,n,n)
Ip<-diag(rep(1,p))
Gp<-duplication_matrix(p)
tGp<-t(Gp)
mu_m<-NULL
Psi_m<-NULL
bar_X<-X%*%bi_n/n
S<-X%*%(In-Jn/n)%*%t(X)/(n-1)
tcholS<-t(chol(S))
### generating an initial value for mu and Psi new draw from proposal
mu0<-bar_X+sqrt((n-1)/n/(n-p+1))*tcholS%*%rnorm(p)/sqrt(rchisq(1,n-p+1)/(n-p+1))
Xn<-X-mu0%*%tbi_n
Cov_p<-Xn%*%t(Xn)
cCov_p<-chol(Cov_p)
Z<-matrix(rnorm(p*n+p),p,n+1)
Psi0<-t(cCov_p)%*%solve(Z%*%t(Z))%*%cCov_p
### initial value of the proposal
q0<-det(Psi0)^(-0.5*(n+p+2))*exp(-0.5*sum(diag(solve(Psi0)%*%Cov_p)))
### initial value of the target (posterior)
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
for (i_n in 1:n)
{iPsiUi<-solve(Psi0+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
mat_numJ<-mat_numJ+iPsiUi
det_num<-det_num*sqrt(det(iPsiUi))
exp_num<-exp_num*exp(-0.5*sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n]))
}
p0<-sqrt(det(mat_numJ/n))*sqrt(det(tGp%*%mat_num%*%Gp/n))*det_num*exp_num
for (j in 1:Np)
{
### generating a new draw mu and Psi from the proposal
mu_p<-bar_X+sqrt((n-1)/n/(n-p+1))*tcholS%*%rnorm(p)/sqrt(rchisq(1,n-p+1)/(n-p+1))
Xn<-X-mu_p%*%tbi_n
Cov_p<-Xn%*%t(Xn)
cCov_p<-chol(Cov_p)
Z<-matrix(rnorm(p*n+p),p,n+1)
Psi_p<-t(cCov_p)%*%solve(Z%*%t(Z))%*%cCov_p
### value of the proposal at new draw
q1 <-det(Psi_p)^(-0.5*(n+p+2))*exp(-0.5*sum(diag(solve(Psi_p)%*%Cov_p)))
### value of the posterior
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
for (i_n in 1:n)
{iPsiUi<-solve(Psi_p+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
mat_numJ<-mat_numJ+iPsiUi
det_num<-det_num*sqrt(det(iPsiUi))
exp_num<-exp_num*exp(-0.5*sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n]))
}
p1<-sqrt(det(mat_numJ/n))*sqrt(det(tGp%*%mat_num%*%Gp/n))*det_num*exp_num
### MH ratio
ratio_MH<-p1*q0/p0/q1
if (runif(1) <= ratio_MH)
{
mu0<-mu_p
Psi0<-Psi_p
q0<-q1
p0<-p1
}
Psi_m<-cbind(Psi_m,as.vector(Psi0))
mu_m<-cbind(mu_m,mu0)
}
list(mu_m,Psi_m)
}
#' Metropolis-Hastings algorithm for the normal distribution and the Jeffreys
#' prior, where \eqn{\mathbf{\Psi}} is generated from the marginal posterior.
#'
#' This function implements Metropolis-Hastings algorithm for drawing samples
#' from the posterior distribution of \eqn{\mathbf{\mu}} and \eqn{\mathbf{\Psi}}
#' under the assumption of the normal distribution when the Jeffreys prior is
#' employed. At each step, the algorithm starts with generating a draw from the
#' marginal distribution of \eqn{\mathbf{\Psi}}.
#'
#' @inherit sample_post_nor_jef_marg_mu
sample_post_nor_jef_marg_Psi<-function(X,U,Np){
p<-nrow(X) # model dimension
n<-ncol(X) # sample size
############## addtional definitons
bi_n<-rep(1,n)
tbi_n<-t(bi_n)
In<-diag(bi_n)
Jn<-matrix(1,n,n)
Ip<-diag(rep(1,p))
Gp<-duplication_matrix(p)
tGp<-t(Gp)
mu_m<-NULL
Psi_m<-NULL
bar_X<-X%*%bi_n/n
S<-X%*%(In-Jn/n)%*%t(X)/(n-1)
cS<-sqrt(n-1)*chol(S)
### generating an initial value for mu and Psi new draw from proposal
Z<-matrix(rnorm(p*n),p,n)
Psi0<-t(cS)%*%solve(Z%*%t(Z))%*%cS
mu0<-bar_X+t(chol(Psi0))%*%rnorm(p)/sqrt(n)
Xn<-X-mu0%*%tbi_n
Cov_p<-Xn%*%t(Xn)
### initial value of the proposal
exp_prop<-exp(-0.5*sum(diag(solve(Psi0)%*%Cov_p)))
q0<-det(Psi0)^(-0.5*(n+p+2))*exp_prop
### initial value of the target (posterior)
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
#exp_prop<-1
for (i_n in 1:n)
{iPsiUi<-solve(Psi0+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
mat_numJ<-mat_numJ+iPsiUi
det_num<-det_num*sqrt(det(iPsiUi))
exp_num<-exp_num*exp(-0.5*sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n]))
}
p0<-sqrt(det(mat_numJ/n))*sqrt(det(tGp%*%mat_num%*%Gp/n))*det_num*exp_num
for (j in 1:Np)
{
### generating a new draw mu and Psi from the proposal
Z<-matrix(rnorm(p*n),p,n)
Psi_p<-t(cS)%*%solve(Z%*%t(Z))%*%cS
mu_p<-bar_X+t(chol(Psi_p))%*%rnorm(p)/sqrt(n)
Xn<-X-mu_p%*%tbi_n
Cov_p<-Xn%*%t(Xn)
### value of the proposal at new draw
exp_prop<-exp(-0.5*sum(diag(solve(Psi_p)%*%Cov_p)))
q1<-det(Psi_p)^(-0.5*(n+p+2))*exp_prop
### value of the posterior
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
#exp_prop<-1
for (i_n in 1:n)
{iPsiUi<-solve(Psi_p+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
mat_numJ<-mat_numJ+iPsiUi
det_num<-det_num*sqrt(det(iPsiUi))
exp_num<-exp_num*exp(-0.5*sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n]))
}
p1<-sqrt(det(mat_numJ/n))*sqrt(det(tGp%*%mat_num%*%Gp/n))*det_num*exp_num
### MH ratio
ratio_MH<-p1*q0/p0/q1
if (runif(1) <= ratio_MH)
{
mu0<-mu_p
Psi0<-Psi_p
q0<-q1
p0<-p1
}
Psi_m<-cbind(Psi_m,as.vector(Psi0))
mu_m<-cbind(mu_m,mu0)
}
list(mu_m,Psi_m)
}
#' Metropolis-Hastings algorithm for the t-distribution and the Jeffreys prior,
#' where \eqn{\mathbf{\mu}} is generated from the marginal posterior.
#'
#' This function implements Metropolis-Hastings algorithm for drawing samples
#' from the posterior distribution of \eqn{\mathbf{\mu}} and \eqn{\mathbf{\Psi}}
#' under the assumption of the t-distribution when the Jeffreys prior is
#' employed. At each step, the algorithm starts with generating a draw from the
#' marginal distribution of \eqn{\mathbf{\mu}}.
#'
#' @inherit sample_post_nor_jef_marg_mu
#' @param d Degrees of freedom for the t-distribution
#'
sample_post_t_jef_marg_mu<-function(X,U,d,Np){
p<-nrow(X) # model dimension
n<-ncol(X) # sample size
############## addtional definitons
bi_n<-rep(1,n)
tbi_n<-t(bi_n)
In<-diag(bi_n)
Jn<-matrix(1,n,n)
Ip<-diag(rep(1,p))
Gp<-duplication_matrix(p)
tGp<-t(Gp)
mu_m<-NULL
Psi_m<-NULL
bar_X<-X%*%bi_n/n
S<-X%*%(In-Jn/n)%*%t(X)/(n-1)
tcholS<-t(chol(S))
### generating an initial value for mu and Psi new draw from proposal
mu0<-bar_X+sqrt((n-1)/n/(n-p+1))*tcholS%*%rnorm(p)/sqrt(rchisq(1,n-p+1)/(n-p+1))
Xn<-X-mu0%*%tbi_n
Cov_p<-Xn%*%t(Xn)
cCov_p<-chol(Cov_p)
Z<-matrix(rnorm(p*n+p),p,n+1)
Psi0<-t(cCov_p)%*%solve(Z%*%t(Z))%*%cCov_p*sqrt(rchisq(1,d)/(d))
### initial value of the proposal
q0<-det(Psi0)^(-0.5*(n+p+2))*(1+sum(diag(solve(Psi0)%*%Cov_p))/d)^(-0.5*(p*n+d))
### initial value of the target (posterior)
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
t_num<-0
for (i_n in 1:n)
{iPsiUi<-solve(Psi0+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
mat_numJ<-mat_numJ+iPsiUi
det_num<-det_num*sqrt(det(iPsiUi))
t_num<-t_num+sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n])/d
}
p0<-sqrt(det(mat_numJ/n))*sqrt(det(tGp%*%(mat_num-as.vector(mat_numJ)%*%t(as.vector(mat_numJ))/(n*p+d))%*%Gp/n))*det_num*(1+t_num)^(-0.5*(p*n+d))
for (j in 1:Np)
{
### generating a new draw mu and Psi from the proposal
mu_p<-bar_X+sqrt((n-1)/n/(n-p+1))*tcholS%*%rnorm(p)/sqrt(rchisq(1,n-p+1)/(n-p+1))
Xn<-X-mu_p%*%tbi_n
Cov_p<-Xn%*%t(Xn)
cCov_p<-chol(Cov_p)
Z<-matrix(rnorm(p*n+p),p,n+1)
Psi_p<-t(cCov_p)%*%solve(Z%*%t(Z))%*%cCov_p*sqrt(rchisq(1,d)/(d))
### value of the proposal at new draw
q1 <-det(Psi_p)^(-0.5*(n+p+2))*(1+sum(diag(solve(Psi_p)%*%Cov_p))/d)^(-0.5*(p*n+d))
### value of the posterior
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
t_num<-0
for (i_n in 1:n)
{iPsiUi<-solve(Psi_p+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
mat_numJ<-mat_numJ+iPsiUi
det_num<-det_num*sqrt(det(iPsiUi))
t_num<-t_num+sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n])/d
}
p1<-sqrt(det(mat_numJ/n))*sqrt(det(tGp%*%(mat_num-as.vector(mat_numJ)%*%t(as.vector(mat_numJ))/(n*p+d))%*%Gp/n))*det_num*(1+t_num)^(-0.5*(p*n+d))
### MH ratio
ratio_MH<-p1*q0/p0/q1
if (runif(1) <= ratio_MH)
{
mu0<-mu_p
Psi0<-Psi_p
q0<-q1
p0<-p1
}
Psi_m<-cbind(Psi_m,as.vector(Psi0))
mu_m<-cbind(mu_m,mu0)
}
list(mu_m,Psi_m)
}
#'
#' Metropolis-Hastings algorithm for the t-distribution and the Jeffreys prior,
#' where \eqn{\mathbf{\Psi}} is generated from the marginal posterior.
#'
#' This function implements Metropolis-Hastings algorithm for drawing samples
#' from the posterior distribution of \eqn{\mathbf{\mu}} and \eqn{\mathbf{\Psi}}
#' under the assumption of the t-distribution when the Jeffreys prior is
#' employed. At each step, the algorithm starts with generating a draw from the
#' marginal distribution of \eqn{\mathbf{\Psi}}.
#'
#' @inherit sample_post_t_jef_marg_mu
sample_post_t_jef_marg_Psi<-function(X,U,d,Np){
p<-nrow(X) # model dimension
n<-ncol(X) # sample size
############## addtional definitons
bi_n<-rep(1,n)
tbi_n<-t(bi_n)
In<-diag(bi_n)
Jn<-matrix(1,n,n)
Ip<-diag(rep(1,p))
Gp<-duplication_matrix(p)
tGp<-t(Gp)
mu_m<-NULL
Psi_m<-NULL
bar_X<-X%*%bi_n/n
S<-X%*%(In-Jn/n)%*%t(X)/(n-1)
cS<-sqrt(n-1)*chol(S)
### generating an initial value for mu and Psi new draw from proposal
Z<-matrix(rnorm(p*n),p,n)
Psi0<-t(cS)%*%solve(Z%*%t(Z))%*%cS*sqrt(rchisq(1,d)/(d))
mu0<-bar_X+sqrt((d+(n-1)*sum(diag(solve(Psi0)%*%S)))/n/(d+p*n-p))*t(chol(Psi0))%*%rnorm(p)/sqrt(rchisq(1,d+p*n-p)/(d+p*n-p))
Xn<-X-mu0%*%tbi_n
Cov_p<-Xn%*%t(Xn)
### initial value of the proposal
q0<-det(Psi0)^(-0.5*(n+p+2))*(1+sum(diag(solve(Psi0)%*%Cov_p))/d)^(-0.5*(p*n+d))
### initial value of the target (posterior)
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
t_num<-0
for (i_n in 1:n)
{iPsiUi<-solve(Psi0+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
mat_numJ<-mat_numJ+iPsiUi
det_num<-det_num*sqrt(det(iPsiUi))
t_num<-t_num+sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n])/d
}
p0<-sqrt(det(mat_numJ/n))*sqrt(det(tGp%*%(mat_num-as.vector(mat_numJ)%*%t(as.vector(mat_numJ))/(n*p+d))%*%Gp/n))*det_num*(1+t_num)^(-0.5*(p*n+d))
for (j in 1:Np)
{
### generating a new draw mu and Psi from the proposal
Z<-matrix(rnorm(p*n),p,n)
Psi_p<-t(cS)%*%solve(Z%*%t(Z))%*%cS*sqrt(rchisq(1,d)/(d))
mu_p<-bar_X+sqrt((d+(n-1)*sum(diag(solve(Psi_p)%*%S)))/n/(d+p*n-p))*t(chol(Psi_p))%*%rnorm(p)/sqrt(rchisq(1,d+p*n-p)/(d+p*n-p))
Xn<-X-mu_p%*%tbi_n
Cov_p<-Xn%*%t(Xn)
### value of the proposal at new draw
q1<-det(Psi_p)^(-0.5*(n+p+2))*(1+sum(diag(solve(Psi_p)%*%Cov_p))/d)^(-0.5*(p*n+d))
### value of the posterior
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
t_num<-0
for (i_n in 1:n)
{iPsiUi<-solve(Psi_p+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
mat_numJ<-mat_numJ+iPsiUi
det_num<-det_num*sqrt(det(iPsiUi))
t_num<-t_num+sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n])/d
}
p1<-sqrt(det(mat_numJ/n))*sqrt(det(tGp%*%(mat_num-as.vector(mat_numJ)%*%t(as.vector(mat_numJ))/(n*p+d))%*%Gp/n))*det_num*(1+t_num)^(-0.5*(p*n+d))
### MH ratio
ratio_MH<-p1*q0/p0/q1
if (runif(1) <= ratio_MH)
{
mu0<-mu_p
Psi0<-Psi_p
q0<-q1
p0<-p1
}
Psi_m<-cbind(Psi_m,as.vector(Psi0))
mu_m<-cbind(mu_m,mu0)
}
list(mu_m,Psi_m)
}
#' Metropolis-Hastings algorithm for the normal distribution and the Berger and
#' Bernardo reference prior, where \eqn{\mathbf{\mu}} is generated from the
#' marginal posterior.
#'
#' This function implements Metropolis-Hastings algorithm for drawing samples
#' from the posterior distribution of \eqn{\mathbf{\mu}} and \eqn{\mathbf{\Psi}}
#' under the assumption of the normal distribution when the Berger and Bernardo
#' reference prior is employed. At each step, the algorithm starts with
#' generating a draw from the marginal distribution of \eqn{\mathbf{\mu}}.
#'
#' @inherit sample_post_nor_jef_marg_mu
sample_post_nor_ref_marg_mu<-function(X,U,Np){
p<-nrow(X) # model dimension
n<-ncol(X) # sample size
############## addtional definitons
bi_n<-rep(1,n)
tbi_n<-t(bi_n)
In<-diag(bi_n)
Jn<-matrix(1,n,n)
Ip<-diag(rep(1,p))
Gp<-duplication_matrix(p)
tGp<-t(Gp)
mu_m<-NULL
Psi_m<-NULL
bar_X<-X%*%bi_n/n
S<-X%*%(In-Jn/n)%*%t(X)/(n-1)
tcholS<-t(chol(S))
### generating an initial value for mu and Psi new draw from proposal
mu0<-bar_X+sqrt((n-1)/n/(n-p))*tcholS%*%rnorm(p)/sqrt(rchisq(1,n-p)/(n-p))
Xn<-X-mu0%*%tbi_n
Cov_p<-Xn%*%t(Xn)
cCov_p<-chol(Cov_p)
Z<-matrix(rnorm(p*n),p,n)
Psi0<-t(cCov_p)%*%solve(Z%*%t(Z))%*%cCov_p
### initial value of the proposal
q0<-det(Psi0)^(-0.5*(n+p+1))*exp(-0.5*sum(diag(solve(Psi0)%*%Cov_p)))
### initial value of the target (posterior)
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
for (i_n in 1:n)
{iPsiUi<-solve(Psi0+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
det_num<-det_num*sqrt(det(iPsiUi))
exp_num<-exp_num*exp(-0.5*sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n]))
}
p0<-sqrt(det(tGp%*%mat_num%*%Gp/n))*det_num*exp_num
for (j in 1:Np)
{
### generating a new draw mu and Psi from the proposal
mu_p<-bar_X+sqrt((n-1)/n/(n-p))*tcholS%*%rnorm(p)/sqrt(rchisq(1,n-p)/(n-p))
Xn<-X-mu_p%*%tbi_n
Cov_p<-Xn%*%t(Xn)
cCov_p<-chol(Cov_p)
Z<-matrix(rnorm(p*n),p,n)
Psi_p<-t(cCov_p)%*%solve(Z%*%t(Z))%*%cCov_p
### value of the proposal at new draw
q1 <-det(Psi_p)^(-0.5*(n+p+1))*exp(-0.5*sum(diag(solve(Psi_p)%*%Cov_p)))
### value of the posterior
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
for (i_n in 1:n)
{iPsiUi<-solve(Psi_p+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
det_num<-det_num*sqrt(det(iPsiUi))
exp_num<-exp_num*exp(-0.5*sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n]))
}
p1<-sqrt(det(tGp%*%mat_num%*%Gp/n))*det_num*exp_num
### MH ratio
ratio_MH<-p1*q0/p0/q1
if (runif(1) <= ratio_MH)
{mu0<-mu_p
Psi0<-Psi_p
q0<-q1
p0<-p1
}
Psi_m<-cbind(Psi_m,as.vector(Psi0))
mu_m<-cbind(mu_m,mu0)
}
list(mu_m,Psi_m)
}
#' Metropolis-Hastings algorithm for the normal distribution and the Berger and
#' Bernardo reference prior, where \eqn{\mathbf{\Psi}} is generated from the marginal
#' posterior.
#'
#' This function implements Metropolis-Hastings algorithm for drawing samples
#' from the posterior distribution of \eqn{\mathbf{\mu}} and \eqn{\mathbf{\Psi}}
#' under the assumption of the normal distribution when the Berger and Bernardo
#' reference prior is employed. At each step, the algorithm starts with
#' generating a draw from the marginal distribution of \eqn{\mathbf{\Psi}}.
#'
#' @inherit sample_post_nor_jef_marg_mu
sample_post_nor_ref_marg_Psi<-function(X,U,Np){
p<-nrow(X) # model dimension
n<-ncol(X) # sample size
############## addtional definitons
bi_n<-rep(1,n)
tbi_n<-t(bi_n)
In<-diag(bi_n)
Jn<-matrix(1,n,n)
Ip<-diag(rep(1,p))
Gp<-duplication_matrix(p)
tGp<-t(Gp)
mu_m<-NULL
Psi_m<-NULL
bar_X<-X%*%bi_n/n
S<-X%*%(In-Jn/n)%*%t(X)/(n-1)
cS<-sqrt(n-1)*chol(S)
### generating an initial value for mu and Psi new draw from proposal
Z<-matrix(rnorm(p*n-p),p,n-1)
Psi0<-t(cS)%*%solve(Z%*%t(Z))%*%cS
mu0<-bar_X+t(chol(Psi0))%*%rnorm(p)/sqrt(n)
Xn<-X-mu0%*%tbi_n
Cov_p<-Xn%*%t(Xn)
### initial value of the proposal
exp_prop<-exp(-0.5*sum(diag(solve(Psi0)%*%Cov_p)))
q0<-det(Psi0)^(-0.5*(n+p+1))*exp_prop
### initial value of the target (posterior)
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
#exp_prop<-1
for (i_n in 1:n)
{iPsiUi<-solve(Psi0+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
det_num<-det_num*sqrt(det(iPsiUi))
exp_num<-exp_num*exp(-0.5*sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n]))
}
p0<-sqrt(det(tGp%*%mat_num%*%Gp/n))*det_num*exp_num
for (j in 1:Np)
{
### generating a new draw mu and Psi from the proposal
Z<-matrix(rnorm(p*n-p),p,n-1)
Psi_p<-t(cS)%*%solve(Z%*%t(Z))%*%cS
mu_p<-bar_X+t(chol(Psi_p))%*%rnorm(p)/sqrt(n)
Xn<-X-mu_p%*%tbi_n
Cov_p<-Xn%*%t(Xn)
### value of the proposal at new draw
exp_prop<-exp(-0.5*sum(diag(solve(Psi_p)%*%Cov_p)))
q1<-det(Psi_p)^(-0.5*(n+p+1))*exp_prop
### value of the posterior
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
#exp_prop<-1
for (i_n in 1:n)
{iPsiUi<-solve(Psi_p+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
det_num<-det_num*sqrt(det(iPsiUi))
exp_num<-exp_num*exp(-0.5*sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n]))
}
p1<-sqrt(det(tGp%*%mat_num%*%Gp/n))*det_num*exp_num
### MH ratio
ratio_MH<-p1*q0/p0/q1
if (runif(1) <= ratio_MH)
{
mu0<-mu_p
Psi0<-Psi_p
q0<-q1
p0<-p1
}
Psi_m<-cbind(Psi_m,as.vector(Psi0))
mu_m<-cbind(mu_m,mu0)
}
list(mu_m,Psi_m)
}
#' Metropolis-Hastings algorithm for the t-distribution and Berger and Bernardo
#' reference prior, where \eqn{\mathbf{\mu}} is generated from the marginal
#' posterior.
#'
#' This function implements Metropolis-Hastings algorithm for drawing samples
#' from the posterior distribution of \eqn{\mathbf{\mu}} and \eqn{\mathbf{\Psi}}
#' under the assumption of the t-distribution when the Berger and Bernardo prior
#' is employed. At each step, the algorithm starts with generating a draw from
#' the marginal distribution of \eqn{\mathbf{\mu}}.
#'
#' @inherit sample_post_t_jef_marg_mu
sample_post_t_ref_marg_mu<-function(X,U,d,Np){
p<-nrow(X) # model dimension
n<-ncol(X) # sample size
############## addtional definitons
bi_n<-rep(1,n)
tbi_n<-t(bi_n)
In<-diag(bi_n)
Jn<-matrix(1,n,n)
Ip<-diag(rep(1,p))
Gp<-duplication_matrix(p)
tGp<-t(Gp)
mu_m<-NULL
Psi_m<-NULL
bar_X<-X%*%bi_n/n
S<-X%*%(In-Jn/n)%*%t(X)/(n-1)
tcholS<-t(chol(S))
### generating an initial value for mu and Psi new draw from proposal
mu0<-bar_X+sqrt((n-1)/n/(n-p))*tcholS%*%rnorm(p)/sqrt(rchisq(1,n-p)/(n-p))
Xn<-X-mu0%*%tbi_n
Cov_p<-Xn%*%t(Xn)
cCov_p<-chol(Cov_p)
Z<-matrix(rnorm(p*n),p,n)
Psi0<-t(cCov_p)%*%solve(Z%*%t(Z))%*%cCov_p*sqrt(rchisq(1,d)/(d))
### initial value of the proposal
q0<-det(Psi0)^(-0.5*(n+p+1))*(1+sum(diag(solve(Psi0)%*%Cov_p))/d)^(-0.5*(p*n+d))
### initial value of the target (posterior)
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
t_num<-0
for (i_n in 1:n)
{iPsiUi<-solve(Psi0+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
mat_numJ<-mat_numJ+iPsiUi
det_num<-det_num*sqrt(det(iPsiUi))
t_num<-t_num+sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n])/d
}
p0<-sqrt(det(tGp%*%(mat_num-as.vector(mat_numJ)%*%t(as.vector(mat_numJ))/(n*p+d))%*%Gp/n))*det_num*(1+t_num)^(-0.5*(p*n+d))
for (j in 1:Np)
{
### generating a new draw mu and Psi from the proposal
mu_p<-bar_X+sqrt((n-1)/n/(n-p))*tcholS%*%rnorm(p)/sqrt(rchisq(1,n-p)/(n-p))
Xn<-X-mu_p%*%tbi_n
Cov_p<-Xn%*%t(Xn)
cCov_p<-chol(Cov_p)
Z<-matrix(rnorm(p*n),p,n)
Psi_p<-t(cCov_p)%*%solve(Z%*%t(Z))%*%cCov_p*sqrt(rchisq(1,d)/(d))
### value of the proposal at new draw
q1 <-det(Psi_p)^(-0.5*(n+p+1))*(1+sum(diag(solve(Psi_p)%*%Cov_p))/d)^(-0.5*(p*n+d))
### value of the posterior
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
t_num<-0
for (i_n in 1:n)
{iPsiUi<-solve(Psi_p+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
mat_numJ<-mat_numJ+iPsiUi
det_num<-det_num*sqrt(det(iPsiUi))
t_num<-t_num+sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n])/d
}
p1<-sqrt(det(tGp%*%(mat_num-as.vector(mat_numJ)%*%t(as.vector(mat_numJ))/(n*p+d))%*%Gp/n))*det_num*(1+t_num)^(-0.5*(p*n+d))
### MH ratio
ratio_MH<-p1*q0/p0/q1
if (runif(1) <= ratio_MH)
{
mu0<-mu_p
Psi0<-Psi_p
q0<-q1
p0<-p1
}
Psi_m<-cbind(Psi_m,as.vector(Psi0))
mu_m<-cbind(mu_m,mu0)
}
list(mu_m,Psi_m)
}
#' Metropolis-Hastings algorithm for the t-distribution and Berger and Bernardo
#' reference prior, where \eqn{\mathbf{\Psi}} is generated from the marginal
#' posterior.
#'
#' This function implements Metropolis-Hastings algorithm for drawing samples
#' from the posterior distribution of \eqn{\mathbf{\mu}} and \eqn{\mathbf{\Psi}}
#' under the assumption of the t-distribution when the Berger and Bernardo prior
#' is employed. At each step, the algorithm starts with generating a draw from
#' the marginal distribution of \eqn{\mathbf{\Psi}}.
#'
#' @inherit sample_post_t_jef_marg_mu
sample_post_t_ref_marg_Psi<-function(X,U,d,Np){
p<-nrow(X) # model dimension
n<-ncol(X) # sample size
############## addtional definitons
bi_n<-rep(1,n)
tbi_n<-t(bi_n)
In<-diag(bi_n)
Jn<-matrix(1,n,n)
Ip<-diag(rep(1,p))
Gp<-duplication_matrix(p)
tGp<-t(Gp)
mu_m<-NULL
Psi_m<-NULL
bar_X<-X%*%bi_n/n
S<-X%*%(In-Jn/n)%*%t(X)/(n-1)
cS<-sqrt(n-1)*chol(S)
### generating an initial value for mu and Psi new draw from proposal
Z<-matrix(rnorm(p*(n-1)),p,n-1)
Psi0<-t(cS)%*%solve(Z%*%t(Z))%*%cS*sqrt(rchisq(1,d)/(d))
mu0<-bar_X+sqrt((d+(n-1)*sum(diag(solve(Psi0)%*%S)))/n/(d+p*n-p))*t(chol(Psi0))%*%rnorm(p)/sqrt(rchisq(1,d+p*n-p)/(d+p*n-p))
Xn<-X-mu0%*%tbi_n
Cov_p<-Xn%*%t(Xn)
### initial value of the proposal
q0<-det(Psi0)^(-0.5*(n+p+1))*(1+sum(diag(solve(Psi0)%*%Cov_p))/d)^(-0.5*(p*n+d))
### initial value of the target (posterior)
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
t_num<-0
for (i_n in 1:n)
{iPsiUi<-solve(Psi0+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
mat_numJ<-mat_numJ+iPsiUi
det_num<-det_num*sqrt(det(iPsiUi))
t_num<-t_num+sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n])/d
}
p0<-sqrt(det(tGp%*%(mat_num-as.vector(mat_numJ)%*%t(as.vector(mat_numJ))/(n*p+d))%*%Gp/n))*det_num*(1+t_num)^(-0.5*(p*n+d))
for (j in 1:Np)
{
### generating a new draw mu and Psi from the proposal
Z<-matrix(rnorm(p*(n-1)),p,n-1)
Psi_p<-t(cS)%*%solve(Z%*%t(Z))%*%cS*sqrt(rchisq(1,d)/(d))
mu_p<-bar_X+sqrt((d+(n-1)*sum(diag(solve(Psi_p)%*%S)))/n/(d+p*n-p))*t(chol(Psi_p))%*%rnorm(p)/sqrt(rchisq(1,d+p*n-p)/(d+p*n-p))
Xn<-X-mu_p%*%tbi_n
Cov_p<-Xn%*%t(Xn)
### value of the proposal at new draw
q1<-det(Psi_p)^(-0.5*(n+p+1))*(1+sum(diag(solve(Psi_p)%*%Cov_p))/d)^(-0.5*(p*n+d))
### value of the posterior
mat_num<-matrix(0,p^2,p^2)
mat_numJ<-matrix(0,p,p)
det_num<-1
exp_num<-1
t_num<-0
for (i_n in 1:n)
{iPsiUi<-solve(Psi_p+U[(p*(i_n-1)+1):(p*i_n),(p*(i_n-1)+1):(p*i_n)])
mat_num<-mat_num+kronecker(iPsiUi,iPsiUi)
mat_numJ<-mat_numJ+iPsiUi
det_num<-det_num*sqrt(det(iPsiUi))
t_num<-t_num+sum(t(Xn[,i_n])%*%iPsiUi%*%Xn[,i_n])/d
}
p1<-sqrt(det(tGp%*%(mat_num-as.vector(mat_numJ)%*%t(as.vector(mat_numJ))/(n*p+d))%*%Gp/n))*det_num*(1+t_num)^(-0.5*(p*n+d))
### MH ratio
ratio_MH<-p1*q0/p0/q1
if (runif(1) <= ratio_MH)
{
mu0<-mu_p
Psi0<-Psi_p
q0<-q1
p0<-p1
}
Psi_m<-cbind(Psi_m,as.vector(Psi0))
mu_m<-cbind(mu_m,mu0)
}
list(mu_m,Psi_m)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMultMeta/R/MH_sample_post.R
|
#' Bayesian estimation of mixture distributions
#'
#' Estimation of a univariate mixture with unknown number of components using a sparse finite mixture Markov chain Monte Carlo (SFM MCMC) algorithm.
#'
#' @param data Vector of observations.
#' @param K Maximum number of mixture components.
#' @param dist String indicating the distribution of the mixture components;
#' currently supports `"normal"`, `"skew_normal"`, `"poisson"` and `"shifted_poisson"`.
#' @param priors List of priors; default is an empty list which implies the following priors:\cr
#' `a0 = 1`,\cr `A0 = 200`,\cr `b0 = median(y)`,\cr `B0 = (max(y) - min(y))^2` (normal),\cr
#' `D_xi = 1`,\cr `D_psi =1`, (skew normal: `B0 = diag(D_xi,D_psi)`), \cr `c0 = 2.5`,\cr
#' `l0 = 1.1` (poisson),\cr `l0 = 5` (shifted poisson),\cr `L0 = 1.1/median(y)`,\cr `L0 = l0 - 1` (shifted poisson),\cr
#' `g0 = 0.5`,\cr `G0 = 100*g0/c0/B0` (normal),\cr
#' `G0 = g0/(0.5*var(y))` (skew normal).
#' @param nb_iter Number of MCMC iterations; default is `2000`.
#' @param burnin Number of MCMC iterations used as burnin; default is `nb_iter/2`.
#' @param print Showing MCMC progression ? Default is `TRUE`.
#'
#' @return A list of class \code{bayes_mixture} containing:
#' \item{data}{Same as argument.}
#' \item{mcmc}{Matrix of MCMC draws where the rows corresponding to burnin have been discarded;}
#' \item{mcmc_all}{Matrix of MCMC draws.}
#' \item{loglik}{Log likelihood at each MCMC draw.}
#' \item{K}{Number of components.}
#' \item{dist}{Same as argument.}
#' \item{pdf_func}{The pdf/pmf of the mixture components.}
#' \item{dist_type}{Type of the distribution, i.e. continuous or discrete.}
#' \item{pars_names}{Names of the mixture components' parameters.}
#' \item{loc}{Name of the location parameter of the mixture components.}
#' \item{nb_var}{Number of variables/parameters in the mixture distribution.}
#'
#' @details
#'
#' Let \eqn{y_i}, \eqn{i=1,\dots,n} denote observations.
#' A general mixture of \eqn{K} distributions from the same
#' parametric family is given by:
#' \deqn{y_i \sim \sum_{k=1}^{K}\pi_k p(\cdot|\theta_k)}
#' with \eqn{\sum_{k=1}^{K}\pi_k=1} and \eqn{\pi_k\geq 0}, \eqn{k=1, ...,K}.
#' \cr\cr
#' The exact number of components does not have to be known *a priori*
#' when using an SFM MCMC approach. Rather, an upper bound is specified for the
#' number of components and the weights of superfluous components are shrunk
#' towards zero during estimation. Following \insertCite{malsiner-walli_model-based_2016;textual}{BayesMultiMode}
#' a symmetric Dirichlet prior is used for the mixture weights:
#' \deqn{\pi_k \sim \text{Dirichlet}(e_0,\dots,e_0),}
#' where a Gamma hyperprior is used on the concentration parameter \eqn{e_0}:\cr\cr
#' \deqn{e_0 \sim \text{Gamma}\left(a_0, A_0\right).}
#'
#' **Mixture of Normal distributions**
#'
#' Normal components take the form:
#' \deqn{p(y_i|\mu_k,\sigma_k) = \frac{1}{\sqrt{2 \pi} \
#' \sigma_k} \exp\left( - \, \frac{1}{2} \left( \frac{y_i -
#' \mu_k}{\sigma_k} \right)^2 \right).}
#'
#' Independent conjugate priors are used for \eqn{\mu_k} and \eqn{\sigma^2_k}
#' (see for instance Malsiner-Walli et al. 2016):
#' \deqn{\mu_k \sim \text{Normal}( \text{b}_0, \text{B}_0),}
#' \deqn{\sigma^{-2}_k \sim \text{Gamma}( \text{c}_0, \text{C}_0),}
#' \deqn{C_0 \sim \text{Gamma}( \text{g}_0, \text{G}_0).}
#'
#'
#' **Mixture of skew-Normal distributions**
#'
#' We use the skew-Normal of \insertCite{azzalini_1985;textual}{BayesMultiMode} which takes the form:
#' \deqn{p(y_i| \xi_k,\omega_k,\alpha_k) = \frac{1}{\omega_k\sqrt{2\pi}} \ \exp\left( - \,
#' \frac{1}{2} \left( \frac{y_i - \xi_k}{\omega_k} \right)^2\right) \
#' \left(1 + \text{erf}\left( \alpha_k\left(\frac{y_i - \xi_k}{\omega_k\sqrt{2}}\right)\right)\right),}
#' where \eqn{\xi_k} is a location parameter, \eqn{\omega_k} a scale parameter and \eqn{\alpha_k}
#' the shape parameter introducing skewness. For Bayesian estimation, we adopt the approach of
#' \insertCite{fruhwirth-schnatter_bayesian_2010;textual}{BayesMultiMode} and use the following reparameterised random-effect model:
#' \deqn{z_i \sim TN_{[0,\infty)}(0, 1),}
#' \deqn{y_i|(S_i = k) = \xi_k + \psi_k z_i + \epsilon_i, \quad \epsilon_i \sim N(0, \sigma^2_k),}
#' where the parameters of the skew-Normal are recovered with
#' \deqn{\omega_k = \frac{\psi_k}{\sigma_k}, \qquad \omega^2_k = \sigma^2_k + \psi^2_k.}
#' By defining a regressor \eqn{x_i = (1, z_i)'}, the skew-Normal mixture can be seen as
#' random effect model and sampled using standard techniques. Thus we use priors similar to
#' the Normal mixture model:
#' \deqn{(\xi_k, \psi_k)' \sim \text{Normal}(\text{b}_0, \text{B}_0),}
#' \deqn{\sigma^{-2}_k \sim \text{Gamma}(\text{c}_0, \text{C}_0),}
#' \deqn{\text{C}_0 \sim \text{Gamma}( \text{g}_0, \text{G}_0).}
#' We set \deqn{\text{b}_0 = (\text{median}(y), 0)'} and \deqn{\text{B}_0 = \text{diag}(\text{D}\_\text{xi}, \text{D}\_\text{psi})} with D_xi = D_psi = 1.
#'
#'
#' **Mixture of Poisson distributions**
#'
#' Poisson components take the form:
#' \deqn{p(y_i|\lambda_k) = \frac{1}{y_i!} \, \lambda^{y_i}_k \,\exp(-\lambda_k).}
#' The prior for \eqn{\lambda_k} follows from \insertCite{viallefont2002bayesian;textual}{BayesMultiMode}:
#' \deqn{\lambda_k \sim \text{Gamma}(\text{l}_0,\text{L}_0).}
#'
#'
#' **Mixture of shifted-Poisson distributions**
#'
#' Shifted-Poisson components take the form
#' \deqn{p(y_i |\lambda_k, \kappa_k) = \frac{1}{(y_i - \kappa_k)!} \,
#' \lambda^{(y_i - \kappa_k)!}_k \,\exp(-\lambda_k)}
#' where \eqn{\kappa_k} is a location or shift parameter with uniform prior, see \insertCite{Cross2024;textual}{BayesMultiMode}.
#'
#' @references
#' \insertAllCited{}
#'
#' @importFrom assertthat assert_that
#' @importFrom assertthat is.scalar
#' @importFrom assertthat is.string
#'
#' @examples
#' # Example with galaxy data ================================================
#' set.seed(123)
#'
#' # retrieve galaxy data
#' y = galaxy
#'
#' # estimation
#' bayesmix = bayes_fit(data = y,
#' K = 5, #not many to run the example rapidly
#' dist = "normal",
#' nb_iter = 500, #not many to run the example rapidly
#' burnin = 100)
#'
#' # plot estimated mixture
#' # plot(bayesmix, max_size = 200)
#'
#' # Changing priors ================================================
#' set.seed(123)
#'
#' # retrieve galaxy data
#' y = galaxy
#'
#' # estimation
#' K = 5
#' bayesmix = bayes_fit(data = y,
#' K = K, #not many to run the example rapidly
#' dist = "normal",
#' priors = list(a0 = 10,
#' A0 = 10*K),
#' nb_iter = 500, #not many to run the example rapidly
#' burnin = 100)
#'
#' # plot estimated mixture
#' # plot(bayesmix, max_size = 200)
#'
#' # Example with DNA data =====================================================
#' \donttest{
#' set.seed(123)
#'
#' # retrieve DNA data
#' y = d4z4
#'
#' # estimation
#' bayesmix = bayes_fit(data = y,
#' K = 5, #not many to run the example rapidly
#' dist = "shifted_poisson",
#' nb_iter = 500, #not many to run the example rapidly
#' burnin = 100)
#'
#' # plot estimated mixture
#' # plot(bayesmix, max_size = 200)
#' }
#'
#' @export
bayes_fit <- function(data,
K,
dist,
priors = list(),
nb_iter = 2000,
burnin = nb_iter/2,
print = TRUE) {
assert_that(is.vector(data) & length(data) > K,
msg = "data should be a vector of length greater than K")
assert_that(all(is.finite(data)),
msg = "data should only include numeric finite values")
assert_that(is.string(dist) & dist %in% c("normal", "skew_normal", "poisson", "shifted_poisson"),
msg = paste0("Unsupported distribution;\n",
"dist should be either\n",
"'normal', 'skew_normal', 'poisson' or 'shifted_poisson'"))
assert_that(is.scalar(nb_iter), round(nb_iter) == nb_iter, nb_iter > 0, msg = "nb_iter should be a positive integer")
assert_that(is.scalar(burnin), burnin > 0, burnin < nb_iter, round(burnin) == burnin,
msg = "nb_iter should be a positive integer lower than burnin")
assert_that(is.scalar(K), round(K) == K, K > 0, msg = "K should be a positive integer")
assert_that(is.logical(print), msg = "print should be either TRUE or FALSE")
if (dist %in% c("poisson", "shifted_poisson")) {
assert_that(!any(!data%%1==0),
msg = "data must include only integer values when using Poisson or shifted Poisson mixtures.")
assert_that(min(data) > -1,
msg = "data should not include negative values when using Poisson or shifted Poisson mixtures.")
dist_type = "discrete"
} else {
dist_type = "continuous"
}
priors = check_priors(priors, dist, data)
mcmc <- gibbs_SFM(y = data,
K = K,
nb_iter = nb_iter,
priors = priors,
print = print,
dist = dist)
# extract loglik from mcmc
ll_id = which(colnames(mcmc) == "loglik")
loglik = mcmc[,ll_id]
mcmc = mcmc[,-ll_id]
BayesMixture = bayes_mixture(mcmc = mcmc,
data = data,
burnin = burnin,
dist = dist,
dist_type = dist_type,
loglik = loglik)
return(BayesMixture)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMultiMode/R/bayes_fit.R
|
#' Creating a S3 object of class `bayes_mixture`
#'
#' Creates an object of class `bayes_mixture` which can subsequently be used as argument in [bayes_mode()].
#' This function is useful for users who want to use the mode inference capabilities of `BayesMultiMode` with mixture
#' estimated using external software.
#'
#' @param mcmc A matrix of MCMC draws with one column per variable, e.g. eta1, eta2, ..., mu1, mu2, etc...
#' @param data Vector of observation used for estimating the model.
#' @param burnin Number of draws to discard as burnin; default is `0`.
#' @param dist Distribution family of the mixture components supported by
#' the package (i.e. `"normal"`, `"student"`, `"skew_normal"` or `"shifted_poisson"`).
#' If left unspecified, `pdf_func` is required.
#' @param pdf_func (function) Pdf or pmf of the mixture components;
#' this input is used only if `dist` is left unspecified.
#' pdf_func should have two arguments : (i) the observation where the pdf is evaluated;
#' (ii) a named vector representing the function parameters. For instance a normal pdf would take the form:
#' `pdf_func <- function(x, pars) dnorm(x, pars['mu'], pars['sigma'])`.
#' The names of `pars` should correspond to variables in `mcmc`, e.g. `"mu1"`, `"mu2"` etc...
#' @param dist_type Either `"continuous"` or `"discrete"`.
#' @param loglik Vector showing the log likelihood at each MCMC draw.
#' @param vars_to_keep (optional) Character vector containing the names
#' of the variables to keep in `mcmc`, e.g. `c("eta", "mu", "sigma")`.
#' @param vars_to_rename (optional) Use for renaming variables/parameters in `mcmc`.
#' A named character vector where the names are the new variable names
#' and the elements the variables in `mcmc`, e.g. c("new_name" = "old_name").
#' @param loc (for continuous mixtures other than Normal mixtures) String indicating the location parameter
#' of the distribution; the latter is used to initialise the MEM algorithm.
#'
#' @return A list of class `bayes_mixture` containing:
#' \item{data}{Same as argument.}
#' \item{mcmc}{Matrix of MCMC draws where the rows corresponding to burnin have been discarded;}
#' \item{mcmc_all}{Matrix of MCMC draws.}
#' \item{loglik}{Log likelihood at each MCMC draw.}
#' \item{K}{Number of components.}
#' \item{dist}{Same as argument.}
#' \item{pdf_func}{The pdf/pmf of the mixture components.}
#' \item{dist_type}{Type of the distribution, i.e. continuous or discrete.}
#' \item{pars_names}{Names of the mixture components' parameters.}
#' \item{loc}{Name of the location parameter of the mixture components.}
#' \item{nb_var}{Number of parameters in the mixture distribution.}
#'
#' @importFrom posterior subset_draws
#' @importFrom stringr str_extract
#' @importFrom stringr str_to_lower
#' @importFrom stringr str_replace_all
#' @importFrom stringr str_locate
#'
#' @examples
#'
#' # Example with a Student t ================================================
#'
#' # Constructing synthetic mcmc output
#' mu = c(0.5,6)
#' mu_mat = matrix(rep(mu, 100) + rnorm(200, 0, 0.1),
#' ncol = 2, byrow = TRUE)
#'
#' omega = c(1,2)
#' sigma_mat = matrix(rep(omega, 100) + rnorm(200, 0, 0.1),
#' ncol = 2, byrow = TRUE)
#'
#' nu = c(5,5)
#' nu_mat = matrix(rep(nu, 100) + rnorm(200, 0, 0.1),
#' ncol = 2, byrow = TRUE)
#'
#' eta = c(0.8,0.2)
#' eta_mat = matrix(rep(eta[1], 100) + rnorm(100, 0, 0.05),
#' ncol = 1)
#' eta_mat = cbind(eta_mat,1-eta_mat)
#'
#' xi_mat = matrix(0,100,2)
#'
#' fit = cbind(eta_mat, mu_mat, sigma_mat, nu_mat, xi_mat)
#' colnames(fit) = c("eta1", "eta2", "mu1", "mu2",
#' "omega1", "omega2", "nu1", "nu2", "xi1", "xi2")
#'
#' # sampling observations
#' data = c(sn::rst(eta[1]*1000, mu[1], omega[1], nu = nu[1]),
#' sn::rst(eta[2]*1000, mu[2], omega[2], nu = nu[2]))
#'
#' pdf_func = function(x, pars) {
#' sn::dst(x, pars["mu"], pars["sigma"], pars["xi"], pars["nu"])
#' }
#'
#' dist_type = "continuous"
#'
#' BM = bayes_mixture(fit, data, burnin = 50,
#' pdf_func = pdf_func, dist_type = dist_type,
#' vars_to_rename = c("sigma" = "omega"), loc = "xi")
#' # plot(BM)
#' @export
bayes_mixture <- function(mcmc,
data,
burnin = 0,
dist = NA_character_,
pdf_func = NULL,
dist_type = NA_character_,
loglik = NULL,
vars_to_keep = NA_character_,
vars_to_rename = NA_character_,
loc = NA_character_) {
## input checks
assert_that(is.matrix(mcmc))
assert_that(is.string(dist))
assert_that(is.string(dist_type))
assert_that(is.vector(data) & length(data) > 0,
msg = "data should be a vector of length > 0")
assert_that(is.scalar(burnin) & burnin >= 0, msg = "burnin should be an integer positive or zero")
assert_that(burnin < nrow(mcmc),
msg = "burnin parameter should be less than the number of mcmc draws")
## input checks
assert_that(is.character(vars_to_keep))
assert_that(is.character(vars_to_rename))
##
rownames(mcmc) = NULL
mcmc_all = mcmc
mcmc = mcmc_all[(burnin+1):nrow(mcmc_all), ,drop = FALSE]
# extract parameter names
col_names = str_extract(colnames(mcmc), "[a-z]+")
pars_names = unique(col_names)
# keep only variables specify
if (sum(!is.na(vars_to_keep))>0) {
pars_names = pars_names[pars_names %in% vars_to_keep]
mcmc = mcmc[ , col_names %in% pars_names, drop = F]
}
if(any(!is.na(vars_to_rename))) {
assert_that(!is.null(names(vars_to_rename)),
msg = "vars_to_rename should be named character vector")
assert_that(all(vars_to_rename %in% pars_names),
msg = "old variable names in vars_to_rename should all be in the retained mcmc variables")
new_names = colnames(mcmc)
for (i in 1:length(vars_to_rename)) {
new_names = str_replace_all(new_names,
vars_to_rename[[i]],
names(vars_to_rename)[i])
}
colnames(mcmc) = new_names
pars_names = unique(str_extract(new_names, "[a-z]+"))
}
list_func = test_and_export(mcmc[1,,drop =T], pdf_func, dist, pars_names, dist_type, loc)
BayesMix = list(data = data,
mcmc = mcmc,
mcmc_all = mcmc_all,
loglik = loglik,
K = list_func$K,
dist = dist,
dist_type = list_func$dist_type,
pdf_func = list_func$pdf_func,
pars_names = pars_names,
loc = list_func$loc,
nb_var = length(pars_names) - 1) #minus the shares
class(BayesMix) <- "bayes_mixture"
return(BayesMix)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMultiMode/R/bayes_mixture.R
|
#' Bayesian mode inference
#'
#' Bayesian inference on the modes in a univariate mixture estimated with MCMC methods, see \insertCite{Cross2024;textual}{BayesMultiMode}.
#' Provides posterior probabilities of the number of modes and their locations.
#' Under the hood it calls the function [mix_mode()] to find the modes in each MCMC draw.
#'
#' @param BayesMix An object of class `bayes_mixture` generated with either [bayes_fit()] or [bayes_mixture()].
#' @param rd (for continuous mixtures) Integer indicating the number of decimal places when rounding the distribution's support.
#' It is necessary to compute posterior probabilities of mode locations.
#' @param tol_mixp Components with a mixture proportion below `tol_mixp` are discarded when estimating modes;
#' note that this does not apply to the biggest component so that it is not possible to discard all components;
#' should be between `0` and `1`; default is `0`.
#' @param tol_x (for continuous mixtures) Tolerance parameter for distance in-between modes; default is `sd(data)/10`
#' where data is the vector of observations from `BayesMix`.
#' If two modes are closer than `tol_x`, only the first estimated mode is kept.
#' @param tol_conv (for continuous mixtures) Tolerance parameter for convergence of the algorithm; default is `1e-8`.
#' @param inside_range Should modes outside of `range` be discarded? Default is `TRUE`.
#' @param range limits of the support where modes are saved (if `inside_range` is `TRUE`);
#' default is `c(min(BayesMix$data), max(BayesMix$data))`.
#' This sometimes occurs with very small components when K is large.
#' @return A list of class `bayes_mode` containing:
#' \item{data}{From `BayesMix`.}
#' \item{dist}{From `BayesMix`.}
#' \item{dist_type}{From `BayesMix`.}
#' \item{pars_names}{From `BayesMix`.}
#' \item{modes}{Matrix with a row for each draw and columns showing modes.}
#' \item{p1}{Posterior probability of unimodality.}
#' \item{p_nb_modes}{Matrix showing posterior probabilities for the number of modes.}
#' \item{p_mode_loc}{Matrix showing posterior probabilities for mode locations.}
#' \item{mix_density}{Mixture density at all locations in each draw.}
#' \item{algo}{Algorithm used for mode estimation.}
#' \item{range}{Range outside which modes are discarded if `inside_range` is `TRUE`.}
#' \item{BayesMix}{`BayesMix`.}
#'
#' @details
#' Each draw from the MCMC output after burnin, \eqn{\theta^{(d)}, \quad d = 1,...,D}, leads to a posterior predictive probability
#' density/mass function:
#' \deqn{p(y | \theta^{(d)}) =\sum_{k=1}^{K} \pi_k^{(d)} p(y | \theta_k^{(d)}).}
#' Using this function, the mode in draw \eqn{d} \eqn{y_{m}^{(d)}}, \eqn{m = 1,..., M^{(d)}},
#' where \eqn{M^{(d)}} is the number of modes, are estimated using the algorithm mentioned
#' in the description above.
#'
#' After running this procedure across all retained posterior draws,
#' we compute the posterior probability for the number of modes being \eqn{M} as:
#' \deqn{P(\#\text{modes}=M)=\frac{1}{D}\sum_{d=1}^{D}1(M^{(d)} = M).}
#' Similarly, posterior probabilities for locations of the modes are given by:
#' \deqn{P(y=\text{mode})=\frac{1}{D}\sum_{d=1}^{D} \sum_{m=1}^{M^{(d)}} 1(y = y_m^{(d)}),}
#' for each location \eqn{y} in the range \eqn{[\min(y),\max(y)]}. Obviously,
#' continuous data are not defined on a discrete support;
#' it is therefore necessary to choose a rounding decimal to discretize their support (with the \code{rd} argument).
#'
#' @references
#' \insertAllCited{}
#
#' @importFrom assertthat assert_that
#' @importFrom assertthat is.scalar
#' @importFrom tidyr as_tibble
#'
#' @examples
#' # Example with galaxy data ================================================
#' set.seed(123)
#'
#' # retrieve galaxy data
#' y = galaxy
#'
#' # estimation
#' bayesmix = bayes_fit(data = y,
#' K = 5, #not many to run the example rapidly
#' dist = "normal",
#' nb_iter = 500, #not many to run the example rapidly
#' burnin = 100)
#'
#' # mode estimation
#' BayesMode = bayes_mode(bayesmix)
#'
#' # plot
#' # plot(BayesMode, max_size = 200)
#'
#' # summary
#' # summary(BayesMode)
#'
#' # Example with DNA data ================================================
#' set.seed(123)
#'
#' # retrieve DNA data
#' y = d4z4
#'
#' # estimation
#' bayesmix = bayes_fit(data = y,
#' K = 5, #not many to run the example rapidly
#' dist = "shifted_poisson",
#' nb_iter = 500, #not many to run the example rapidly
#' burnin = 100)
#'
#' # mode estimation
#' BayesMode = bayes_mode(bayesmix)
#'
#' # plot
#' # plot(BayesMode, max_size = 200)
#'
#' # summary
#' # summary(BayesMode)
#'
#' # Example with a Student t ================================================
#' mu = c(0.5,6)
#' sigma = c(1,2)
#' nu = c(5,5)
#' p = c(0.8,0.2)#'
#' data = c(sn::rst(p[1]*1000, mu[1], sigma[1], nu = nu[1]),
#' sn::rst(p[2]*1000, mu[2], sigma[2], nu = nu[2]))
#'
#' fit = c(eta = p, mu = mu, sigma = sigma, nu = nu, xi = c(0,0))
#' fit = rbind(fit, fit)
#'
#' pdf_func = function(x, pars) {
#' sn::dst(x, pars["mu"], pars["sigma"], pars["xi"], pars["nu"])
#' }
#'
#' dist_type = "continuous"
#'
#' bayesmix = bayes_mixture(fit, data, burnin = 1,
#' pdf_func = pdf_func, dist_type = dist_type, loc = "mu")
#'
#' BayesMode = bayes_mode(bayesmix)
#'
#' # plot
#' # plot(BayesMode, max_size = 200)
#'
#' # summary
#' # summary(BayesMode)
#'
#' @export
bayes_mode <- function(BayesMix, rd = 1, tol_mixp = 0, tol_x = sd(BayesMix$data)/10, tol_conv = 1e-8, inside_range = TRUE, range = c(min(BayesMix$data), max(BayesMix$data))) {
assert_that(inherits(BayesMix, "bayes_mixture"), msg = "BayesMix should be an object of class bayes_mixture")
assert_that(all(c("data", "mcmc", "mcmc_all",
"loglik", "K", "dist",
"dist_type", "pdf_func", "pars_names",
"loc", "nb_var") %in% names(BayesMix)),
msg = "BayesMix is not a proper bayes_mixture object.")
assert_that(is.scalar(rd), rd >= 0, round(rd) == rd, msg = "rd should be an integer greater or equal than zero")
assert_that(is.scalar(tol_x) & tol_x > 0, msg = "tol_x should be a positive scalar")
assert_that(is.scalar(tol_mixp) & tol_mixp >= 0 & tol_mixp < 1, msg = "tol_mixp should be a positive scalar between 0 and 1")
assert_that(is.scalar(tol_conv) & tol_conv > 0, msg = "tol_conv should be a positive scalar")
dist = BayesMix$dist
data = BayesMix$data
mcmc = BayesMix$mcmc
dist_type = BayesMix$dist_type
pdf_func = BayesMix$pdf_func
pars_names = BayesMix$pars_names
loc = BayesMix$loc
assert_that(is.vector(range) & length(range) == 2,
msg = "range should be a vector of length 2")
assert_that(all(is.finite(range)),
msg = "lower and upper limits of range should be finite")
assert_that(range[2] > range[1],
msg = "upper limit of range not greater than lower limit")
if (dist %in% c("poisson", "shifted_poisson")) {
assert_that(all(range>=0),
msg = "lower limit should be greater or equal than zero when using the Poisson or shifted Poisson.")
}
modes = t(apply(mcmc, 1, mix_mode_estimates, dist = dist,
pdf_func = pdf_func, dist_type = dist_type,
tol_mixp = tol_mixp, tol_x = tol_x, tol_conv = tol_conv,
loc = loc, range = range,
inside_range = inside_range))
# Number of modes
n_modes = apply(!is.na(modes),1,sum) # number of modes in each MCMC draw
modes = matrix(modes[, 1:max(n_modes)], nrow = nrow(mcmc))
colnames(modes) = paste('mode',1:max(n_modes))
vec_modes = as.vector(modes)
vec_modes = vec_modes[!is.na(vec_modes)]
if (dist_type == "continuous") {
if (!is.na(dist) & dist == "normal") {
algo = "fixed-point"
} else {
algo = "Modal Expectation-Maximization (MEM)"
}
vec_modes = round(vec_modes, rd)
mode_range = seq(min(vec_modes), max(vec_modes), by = 10^-rd)
}
if (dist_type == "discrete") {
mode_range = min(vec_modes):max(vec_modes)
# unique modes to calculate post probs of number of modes
modes <- t(apply(mcmc,1,FUN = mix_mode_estimates,
range = range,
dist = dist,
dist_type = dist_type,
tol_mixp = tol_mixp,
tol_x = tol_x,
tol_conv = tol_conv,
type = "unique",
pdf_func = pdf_func,
inside_range = inside_range))
n_modes = apply(!is.na(modes),1,sum)
algo = "discrete"
}
### Posterior probability of being a mode for each location
sum_modes = unlist(lapply(mode_range,
FUN = counting,
vec = vec_modes))
probs_modes = sum_modes/nrow(mcmc)
p_mode_loc = rbind(mode_range, probs_modes)
rownames(p_mode_loc) = c("mode location", "posterior probability")
##### testing unimodality
p1 = 0 # posterior probability of unimodality
if(any(n_modes==1)){
p1 = length(n_modes[n_modes==1])/nrow(modes)
}
# Test for number of modes : number of modes and their posterior probability
unique_modes = unique(n_modes) #possible number of modes
prob_nb_modes = rep(NA_real_,length(unique_modes))
for (i in 1:length(unique_modes)){
prob_nb_modes[i] = length(n_modes[n_modes==unique_modes[i]])/nrow(modes)
}
p_nb_modes = rbind(unique_modes,prob_nb_modes)
rownames(p_nb_modes) = c("number of modes", "posterior probability")
# ordering
p_nb_modes = p_nb_modes[, order(unique_modes)]
# mixture density
mix_density = apply(mcmc,1, FUN = dmix, x = mode_range,
pars_names = pars_names,
pdf_func = pdf_func)
mix_density = cbind(mode_range, mix_density)
colnames(mix_density) = c("x", paste0("draw",1:nrow(mcmc)))
mix_density = as_tibble(mix_density)
bayes_mode = list()
bayes_mode$data = data
bayes_mode$dist = dist
bayes_mode$dist_type = dist_type
bayes_mode$pars_names = pars_names
bayes_mode$modes = modes
bayes_mode$p1 = p1
bayes_mode$p_nb_modes = p_nb_modes
bayes_mode$p_mode_loc = p_mode_loc
bayes_mode$algo = algo
bayes_mode$BayesMix = BayesMix
bayes_mode$range = range
bayes_mode$mix_density = mix_density
class(bayes_mode) <- "bayes_mode"
return(bayes_mode)
}
#' @keywords internal
mix_mode_estimates <- function(mcmc, dist = NA_character_, dist_type = NA_character_,
tol_mixp, tol_x, tol_conv,
pdf_func = NULL, type = "all", range = NULL,
loc = NA_character_, inside_range = TRUE) {
output = rep(NA_real_, length(mcmc))
mix = mixture(mcmc, dist = dist, pdf_func = pdf_func,
dist_type = dist_type, range = range, loc = loc)
modes = mix_mode(mix, tol_mixp, tol_x, tol_conv, type = type)$mode_estimates
output[1:length(modes)] = modes
return(output)
}
#' @keywords internal
dmix <- function(x, pars, pars_names, pdf_func) {
pars_mat = vec_to_mat(pars, pars_names)
pars_mat = na.omit(pars_mat) # when mcmc contains NA (i.e. BNPmix)
pdf_func_mix(x, pars_mat, pdf_func)
}
#' @keywords internal
counting <- function(x, vec) {
length(vec[vec==x])
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMultiMode/R/bayes_mode.R
|
#' Trace plots
#'
#' This is wrapper around the [bayesplot::mcmc_trace()] function from package `bayesplot`.
#'
#' @param BayesMix An object of class `bayes_mixture`.
#' @param mcmc_vars Variables to plot; default is all the variable in the MCMC output.
#' @param with_burnin Plot all draws ?
#' @param ... Additional arguments passed to function [bayesplot::mcmc_trace()].
#'
#' @importFrom bayesplot mcmc_trace
#' @importFrom assertthat assert_that
#'
#' @return A trace plot.
#'
#' @examples
#' # Example with galaxy data ================================================
#' set.seed(123)
#'
#' # retrieve galaxy data
#' y = galaxy
#'
#' # estimation
#' bayesmix = bayes_fit(data = y,
#' K = 5, #not many to run the example rapidly
#' dist = "normal",
#' nb_iter = 500, #not many to run the example rapidly
#' burnin = 100)
#'
#' # trace plot
#' # bayes_trace(bayesmix)
#'
#' @export
#'
bayes_trace <- function(BayesMix,
mcmc_vars = NULL,
with_burnin = FALSE,
...) {
assert_that(inherits(BayesMix, "bayes_mixture"), msg = "BayesMix should be an object of class bayes_mixture")
assert_that(is.logical(with_burnin), msg = "with_burnin should be either TRUE or FALSE")
if (with_burnin){
mcmc = BayesMix$mcmc_all
} else {
mcmc = BayesMix$mcmc
}
if (is.null(mcmc_vars)) {
mcmc_vars = colnames(mcmc)
}
message(cat("\nNote that label-switching might occur in the MCMC draws because BayesMultiMode does not carry out post-processing.",
"\nWhile label-switching does not affect mode inference it can affect diagnostic checks."))
mcmc_trace(mcmc, pars = mcmc_vars, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMultiMode/R/bayes_trace.R
|
#' Autosomal macrosatellite repeats d4z4
#'
#' Macrosatellite repeats D4Z4 in the subtelomere of chromosome 4q.\cr
#' Locus (hg18): 4q35.2 \cr
#' Unit (kb): 3.3 \cr
#' Restriction enzyme: EcoRI + HindIII/EcoRI + BlnI/XapI \cr
#' Encoded product : DUX4
#' @format
#' A vector of counts with 410 elements.
#' @importFrom Rdpack reprompt
#' @references
#' \insertRef{schaap_genome-wide_2013}{BayesMultiMode}
"d4z4"
#' X chromosomal macrosatellite repeats ct47
#'
#' Repeat units that encode for a cancer testis antigen.\cr
#' Locus (hg18): Xq24 \cr
#' Unit (kb): 4.8 \cr
#' Restriction enzyme: EcoRI \cr
#' Encoded product : cancer testis antigen 47
#'
#' @format
#' A vector of counts with 410 elements.
#' @importFrom Rdpack reprompt
#' @references
#' \insertRef{schaap_genome-wide_2013}{BayesMultiMode}
"ct47"
#' Tropical cyclones lifetime maximum intensity
#'
#' Dataset constructed using the International Best Track Archive for Climate Stewardship (IBTrACS).
#' The distribution of tropical cyclones lifetime maximum intensity across the globe is known
#' to be bimodal which has important implications for climate modelling.
#'
#' @format
#' A dataset with three columns showing the identification of the cyclone, its year of occurrence and its lifetime maximum intensity (LMI).
#' LMI is calculated as the maximum wind speed for each cyclone with unit ks.
#'
#' @source
#' https://www.ncei.noaa.gov/products/international-best-track-archive
#' @importFrom Rdpack reprompt
#' @references
#' \insertRef{knapp_international_2010}{BayesMultiMode}\cr\cr
#' \insertRef{knapp_international_2018}{BayesMultiMode}
"cyclone"
#' Galaxy series
#'
#' Velocity at which 82 galaxies in the Corona Borealis region are moving away from our galaxy, scaled by 1000.
#'
#' @source
#' https://people.maths.bris.ac.uk/~mapjg/mixdata
#'
#' @importFrom Rdpack reprompt
#' @references
#' \insertRef{Richardson_Green_1997_RJMCMC}{BayesMultiMode}
"galaxy"
|
/scratch/gouwar.j/cran-all/cranData/BayesMultiMode/R/data.R
|
#' SFM MCMC algorithms to estimate mixtures.
#'
#' @importFrom gtools rdirichlet
#' @importFrom Rdpack reprompt
#' @importFrom stats median kmeans rgamma rmultinom rnorm density dgamma dpois runif
#' @importFrom MCMCglmm rtnorm
#' @importFrom mvtnorm rmvnorm
#' @importFrom sn dsn
# wrapper
#' @keywords internal
gibbs_SFM <- function(y,
K,
nb_iter,
priors = list(),
print = TRUE,
dist) {
if (dist == "normal") {
mcmc = gibbs_SFM_normal(y, K, nb_iter, priors, print)
}
if (dist == "skew_normal") {
mcmc = gibbs_SFM_skew_n(y, K, nb_iter, priors, print)
}
if (dist == "poisson") {
mcmc = gibbs_SFM_poisson(y, K, nb_iter, priors, print)
}
if (dist == "shifted_poisson") {
mcmc = gibbs_SFM_sp(y, K, nb_iter, priors, print)
}
return(mcmc)
}
#' @keywords internal
gibbs_SFM_normal <- function(y,
K,
nb_iter,
priors = list(),
print = TRUE){
# unpacking priors
a0 = priors$a0
A0 = priors$A0
b0 = priors$b0
B0 = priors$B0
c0 = priors$c0
g0 = priors$g0
G0 = priors$G0
#empty objects to store parameters
mu = matrix(NA_real_, nb_iter, K)
sigma2 = matrix(NA_real_, nb_iter, K)
eta = matrix(NA_real_, nb_iter, K)
lp = matrix(0, nb_iter, 1)
# initialisation
cl_y <- kmeans(y, centers = K, nstart = 30)
S <- matrix(0,length(y),K)
for (k in 1:K) {
S[cl_y$cluster==k ,k] = 1
}
mu[1,] <- cbind(t(cl_y$centers))
C0 = g0
e0 = a0/A0
# sampling
for (m in 2:nb_iter){
# 1. parameter simulation conditional on the classification
## a. sample component proportion
N = colSums(S)
eta[m, ] = rdirichlet(1, e0 + N)
probs = matrix(NA, length(y), K)
for (k in 1:K){
## b. sample sigma
ck = c0 + N[k]/2
Ck = C0 + 0.5*sum((y[S[, k]==1]-mu[m-1, k])^2)
sigma2[m, k] = 1/rgamma(1, ck, Ck)
## c. sample mu
B = 1/(1/B0 + N[k]/sigma2[m, k])
if (N[k]!=0){
b = B*(b0/B0 + N[k]*mean(y[S[, k]==1])/sigma2[m, k])
} else {
b = B*(b0/B0)
}
mu[m, k] = rnorm(1, b, sqrt(B))
# 2. classification
probs[, k] = eta[m, k] * dnorm(y, mu[m, k], sqrt(sigma2[m, k]))
}
# 2. classification
pnorm = probs/rowSums(probs) #removed the replicate
## if the initial classification is bad then some data points won't be
# allocated to any components and some rows will be
# NAs (because if dividing by zero). We correct this by replacing NAs with
# equal probabilities
NA_id = which(is.na(pnorm[,1]))
pnorm[NA_id, ] = 1/ncol(pnorm)
S = t(apply(pnorm, 1, function(x) rmultinom(n = 1,size=1,prob=x)))
# 3. sample hyperparameters
## a. sample C0
C0 = rgamma(1, g0 + K*c0, G0 + sum(1/sigma2[m, ]))
## MH step for e0
## Sample component probabilities hyperparameters: alpha0, using RWMH step
e0 = draw_e0(e0,a0,1/A0,eta[m, ])[[1]]
# compute log lik
lp[m] = sum(probs)
## counter
if(print){
if(m %% (round(nb_iter / 10)) == 0){
cat(paste(100 * m / nb_iter, ' % draws finished'), fill=TRUE)
}
}
}
# output
mcmc = cbind(eta, mu, sqrt(sigma2), lp)
colnames(mcmc) = 1:ncol(mcmc)
for (i in 1:K){
colnames(mcmc)[c(i, K+i, 2*K+i)] = c(paste0("eta", i),
paste0("mu", i),
paste0("sigma", i))
}
colnames(mcmc)[ncol(mcmc)] = "loglik"
return(mcmc)
}
#' @keywords internal
gibbs_SFM_poisson <- function(y,
K,
nb_iter,
priors = list(),
print = TRUE){
# unpacking priors
a0 = priors$a0
A0 = priors$A0
l0 = priors$l0
L0 = priors$L0
n_obs <- length(y)
# Initial conditions
cl_y <- kmeans(y, centers = K, nstart = 30)
S <- matrix(0,length(y),K)
for (k in 1:K) {
S[cl_y$cluster==k ,k] = 1
}
e0 = a0/A0
# storage matrices
lambda = matrix(data=NA,nrow=nb_iter,ncol=K) # lambda
eta = matrix(data=NA,nrow=nb_iter,ncol=K) # probabilities
probs = matrix(data=NaN,nrow=n_obs,ncol=K) # Storage for probabilities
lp = matrix(0, nb_iter, 1)
## Sample lamda and S for each component, k=1,...,k
for(m in 1:nb_iter){
# Compute number of observations allocated in each component
N = colSums(S)
## sample component proportion
eta[m, ] = rdirichlet(1, e0 + N)
for (k in 1:K){
if (N[k]==0) {
yk = 0
} else {
yk = y[S[, k]==1]
}
# Sample lambda from Gamma distribution
lambda[m,k] = rgamma(1, shape = sum(yk) + l0,
rate = N[k] + L0)
#
probs[,k] = eta[m,k]*dpois(y,lambda[m,k])
}
# 2. classification
pnorm = probs/rowSums(probs)
## if the initial classification is bad then some data points won't be
# allocated to any components and some rows will be
# NAs (because if dividing by zero). We correct this by replacing NAs with
# equal probabilities
NA_id = which(is.na(pnorm[,1]))
pnorm[NA_id, ] = 1/ncol(pnorm)
S = t(apply(pnorm, 1, function(x) rmultinom(n = 1,size=1,prob=x)))
## Sample component probabilities hyperparameters: alpha0, using RWMH step
e0 = draw_e0(e0,a0,1/A0,eta[m, ])[[1]]
# compute log lik
lp[m] = sum(probs)
## counter
if(print){
if(m %% (round(nb_iter / 10)) == 0){
cat(paste(100 * m / nb_iter, ' % draws finished'), fill=TRUE)
}
}
}
# output
mcmc = cbind(eta, lambda, lp)
colnames(mcmc) = 1:ncol(mcmc)
for (i in 1:K){
colnames(mcmc)[c(i, K+i)] = c(paste0("eta", i),
paste0("lambda", i))
}
colnames(mcmc)[ncol(mcmc)] = "loglik"
# Return output
return(mcmc)
}
#' @keywords internal
gibbs_SFM_skew_n <- function(y,
K,
nb_iter,
priors = list(),
print = TRUE){
# unpacking priors
a0 = priors$a0
A0 = priors$A0
b0 = priors$b0
c0 = priors$c0
C0 = priors$C0
g0 = priors$g0
G0 = priors$G0
D_xi = priors$D_xi
D_psi = priors$D_psi
n_obs = length(y)
#empty objects to store parameters
sigma2 = rep(NA, K)
psi = rep(NA, K)
xi = matrix(NA, nb_iter, K)
omega = matrix(NA, nb_iter, K)
alpha = matrix(NA, nb_iter, K)
eta = matrix(NA, nb_iter, K)
lp = matrix(0, nb_iter, 1)
# initialisation
cl_y = kmeans(y, centers = K, nstart = 30)
S <- matrix(0, n_obs, K)
for (k in 1:K) {
S[cl_y$cluster==k ,k] = 1
}
b0 = matrix(c(b0, 0),nrow=2)
B0_inv = diag(1/c(D_xi,D_psi))
e0 = a0/A0
for (k in 1:K){
xi[1, k] = mean(y[S[, k]==1])
}
sigma2 = rep(max(y)^2,K)
beta = cbind(xi[1, ],0)
zk = matrix(0,n_obs,1)
cnt_update_e0 = 0
for (m in 1:nb_iter){
# set base priors
ck = c0
Ck = C0
bk = b0
Bk = solve(B0_inv)
## a.1 sample component proportion
N = colSums(S)
eta[m, ] = rdirichlet(1, e0 + N)
probs = matrix(NA, n_obs, K)
for (k in 1:K){
if (N[k] > 0) {
empty = FALSE
} else {
empty = TRUE
}
# sample z
if(!empty){
# allocate y
yk = y[S[ ,k] == 1]
# update z
Ak = 1/(1 + beta[k, 2]^2/sigma2[k])
ak = Ak*beta[k, 2]/sigma2[k]*(yk-beta[k, 1])
zk <- rtnorm(N[k], ak, sqrt(Ak), lower=0)
Xk = matrix(c(rep(1, N[k]), zk), nrow=N[k])
}
## a.1 sample Sigma
if(!empty){
eps = yk - Xk%*%beta[k, ]
Ck = C0 + 0.5*sum(eps^2)
ck = c0 + N[k]/2
}
sigma2[k] = 1/rgamma(1, ck, Ck)
## a.2 sample xi and psi jointly
if(!empty){
Bk = solve(crossprod(Xk)/sigma2[k] + B0_inv)
bk = Bk%*%(crossprod(Xk, yk)/sigma2[k] + B0_inv%*%b0)
}
beta[k, ] = rmvnorm(1, bk, Bk)
# storing
xi[m, k] = beta[k, 1]
omega[m, k] = sqrt(sigma2[k] + beta[k, 2]^2)
alpha[m, k] = beta[k, 2]/sqrt(sigma2[k])
probs[, k] = eta[m, k] * dsn(y, xi[m, k], omega[m, k], alpha[m, k])
}
# classification
pnorm = probs/rowSums(probs)
## if the initial classification is bad then some data points won't be
# allocated to any components and some rows will be
# NAs (because if dividing by zero). We correct this by replacing NAs with
# equal probabilities
NA_id = which(is.na(pnorm[,1]))
pnorm[NA_id, ] = 1/ncol(pnorm)
S = t(apply(pnorm, 1, function(x) rmultinom(n = 1, size = 1, prob = x)))
# 3. sample hyperparameters
## a. sample C0
C0 = rgamma(1, g0 + K*c0, G0 + sum(1/sigma2))
## SFM: MH step for e0
## Sample component probabilities hyperparameters: alpha0, using RWMH step
e0 = draw_e0(e0,a0,1/A0,eta[m, ])[[1]]
# compute log lik
lp[m] = sum(probs)
## counter
if(print){
if(m %% (round(nb_iter / 10)) == 0){
cat(paste(100 * m / nb_iter, ' % draws finished'), fill=TRUE)
}
}
}
# output
mcmc_result = cbind(eta, xi, omega, alpha, lp)
colnames(mcmc_result) = 1:ncol(mcmc_result)
for (i in 1:K){
colnames(mcmc_result)[c(i, K+i, 2*K+i,3*K+i)] = c(paste0("eta", i),
paste0("xi", i),
paste0("omega", i),
paste0("alpha", i))
}
colnames(mcmc_result)[ncol(mcmc_result)] = "loglik"
return(mcmc_result)
}
#' @keywords internal
gibbs_SFM_sp <- function(y,
K,
nb_iter,
priors = list(),
print = TRUE){
# unpacking priors
a0 = priors$a0
A0 = priors$A0
l0 = priors$l0
L0 = priors$L0
n_obs <- length(y)
# Initial conditions
cl_y <- kmeans(y, centers = K, nstart = 30)
S <- matrix(0,length(y),K)
for (k in 1:K) {
S[cl_y$cluster==k ,k] = 1
}
e0 = a0/A0
# storage matrices
kappa = matrix(data=NA,nrow=nb_iter,ncol=K)
lambda = matrix(data=NA,nrow=nb_iter,ncol=K)
eta = matrix(data=NA,nrow=nb_iter,ncol=K)
probs = matrix(data=NaN,nrow=n_obs,ncol=K)
lp = matrix(0, nb_iter, 1)
kappa_m = rep(0,K)
lambda_m = rep(1,K)
## Sample lamda and S for each component, k=1,...,k
for(m in 1:nb_iter){
# Compute number of observations allocated in each component
N = colSums(S)
## sample component proportion
eta[m, ] = rdirichlet(1, e0 + N)
for (k in 1:K){
if (N[k]==0) {
yk = 0
} else {
yk = y[S[, k]==1]
}
# Sample kappa using MH Step
kapub = min(yk)
if (length(kapub) == 0) {# Set to zero if component is empty
kappa_m[k] = 0;
} else if (kapub < kappa_m[k]) {# Set to upper bound if outside boudary
kappa_m[k] = kapub
} else {
temp = draw_kap(yk, lambda_m[k], kappa_m[k], kaplb = 0, kapub) # Draw kappa from MH step
kappa_m[k] = temp[[1]]
}
# Sample lambda from Gamma distribution
lambda_m[k] = rgamma(1, shape = sum(yk) - N[k]*kappa_m[k] + l0,
scale = 1/(N[k] + L0))
#
probs[,k] = eta[m,k]*dpois(y - kappa_m[k], lambda_m[k])
}
# 2. classification
pnorm = probs/rowSums(probs)
## if the initial classification is bad then some data points won't be
# allocated to any components and some rows will be
# NAs (because if dividing by zero). We correct this by replacing NAs with
# equal probabilities
NA_id = which(is.na(pnorm[,1]))
pnorm[NA_id, ] = 1/ncol(pnorm)
S = t(apply(pnorm, 1, function(x) rmultinom(n = 1,size=1,prob=x)))
## Sample component probabilities hyperparameters: alpha0, using RWMH step
e0 = draw_e0(e0,a0,1/A0,eta[m, ])[[1]]
# compute log lik
lp[m] = sum(probs)
# storing
lambda[m,] = lambda_m
kappa[m, ] = kappa_m
## counter
if(print){
if(m %% (round(nb_iter / 10)) == 0){
cat(paste(100 * m / nb_iter, ' % draws finished'), fill=TRUE)
}
}
}
# output
mcmc = cbind(eta, kappa, lambda, lp)
colnames(mcmc) = 1:ncol(mcmc)
for (i in 1:K){
colnames(mcmc)[c(i, K+i, 2*K+i)] = c(paste0("eta", i),
paste0("kappa", i),
paste0("lambda", i))
}
colnames(mcmc)[ncol(mcmc)] = "loglik"
# Return output
return(mcmc)
}
#' @keywords internal
check_priors <- function(priors, dist, data) {
assert_that(all(is.finite(unlist(priors))),
msg = "All priors should be finite.")
# all
priors$a0 = ifelse(is.null(priors$a0), 1, priors$a0)
priors$A0 = ifelse(is.null(priors$A0), 200, priors$A0)
assert_that(is.scalar(priors$a0), priors$a0 > 0, msg = "prior A0 should be a scalar")
assert_that(is.scalar(priors$A0), priors$A0 > 0, msg = "prior A0 should be a positive scalar")
if (dist == "shifted_poisson") {
priors_labels = c("a0", "A0", "l0", "L0")
priors$l0 = ifelse(is.null(priors$l0), 5, priors$l0)
priors$L0 = ifelse(is.null(priors$L0), priors$l0 - 1, priors$L0)
}
if (dist == "poisson") {
priors_labels = c("a0", "A0", "l0", "L0")
priors$l0 = ifelse(is.null(priors$l0), 1.1, priors$l0)
priors$L0 = ifelse(is.null(priors$L0), 1.1/median(data), priors$L0)
}
if (dist %in% c("shifted_poisson", "poisson")) {
assert_that(is.scalar(priors$L0), priors$L0 > 0, msg = "prior L0 should be a positive scalar")
assert_that(is.scalar(priors$l0), priors$L0 > 0, msg = "prior l0 should be a positive scalar")
}
if (dist == "normal") {
priors_labels = c("a0", "A0", "b0", "B0", "c0", "g0", "G0")
priors$b0 = ifelse(is.null(priors$b0), median(data), priors$b0)
priors$B0 = ifelse(is.null(priors$B0), (max(data) - min(data))^2, priors$B0)
priors$c0 = ifelse(is.null(priors$c0), 2.5, priors$c0)
priors$g0 = ifelse(is.null(priors$g0), 0.5, priors$g0)
priors$G0 = ifelse(is.null(priors$G0), 100*priors$g0/priors$c0/priors$B0, priors$G0)
assert_that(is.scalar(priors$b0), msg = "prior b0 should be a scalar")
assert_that(is.scalar(priors$B0), priors$B0 > 0, msg = "prior B0 should be a positive scalar")
assert_that(is.scalar(priors$c0), priors$c0 > 0, msg = "prior c0 should be a positive scalar")
assert_that(is.scalar(priors$g0), priors$g0 > 0, msg = "prior g0 should be a positive scalar")
assert_that(is.scalar(priors$G0), priors$G0 > 0, msg = "prior G0 should be a positive scalar")
}
if (dist == "skew_normal") {
priors_labels = c("a0", "A0", "b0", "c0", "C0", "g0", "G0", "D_xi", "D_psi")
priors$b0 = ifelse(is.null(priors$b0), median(data), priors$b0)
priors$c0 = ifelse(is.null(priors$c0), 2.5, priors$c0)
priors$C0 = ifelse(is.null(priors$C0), 0.5*var(data), priors$C0)
priors$g0 = ifelse(is.null(priors$g0), 0.5, priors$g0)
priors$G0 = ifelse(is.null(priors$G0), priors$g0/(0.5*var(data)), priors$G0)
priors$D_xi = ifelse(is.null(priors$D_xi), 1, priors$D_xi)
priors$D_psi = ifelse(is.null(priors$D_psi), 1, priors$D_psi)
assert_that(is.scalar(priors$b0), msg = "prior b0 should be a scalar")
assert_that(is.scalar(priors$D_xi), priors$D_xi > 0, msg = "prior D_xi should be a positive scalar")
assert_that(is.scalar(priors$D_psi), priors$D_psi > 0, msg = "prior D_psi should be a positive scalar")
assert_that(is.scalar(priors$c0), priors$c0 > 0, msg = "prior c0 should be a positive scalar")
assert_that(is.scalar(priors$C0), priors$C0 > 0, msg = "prior C0 should be a positive scalar")
assert_that(is.scalar(priors$g0), priors$g0 > 0, msg = "prior g0 should be a positive scalar")
assert_that(is.scalar(priors$G0), priors$G0 > 0, msg = "prior G0 should be a positive scalar")
}
isnot = which(!names(priors) %in% priors_labels)
if (length(isnot) > 0) {
warning(paste("prior(s)", names(priors)[isnot], "not needed when estimating a", dist, "mixture."))
}
return (priors[priors_labels])
}
## functions used in the SFM MCMC algorithm
# Posterior of kappa
#' @keywords internal
post_kap <- function(x,LAMBDA,KAPPA) {
n = length(x) # Number of elements in the component
result <- exp(-LAMBDA)*(LAMBDA^(sum(x)-n*KAPPA))/prod(factorial(x-KAPPA))
}
# Draw kappa from posterior using MH step
#' @keywords internal
draw_kap <- function(x,LAMBDA,KAPPA,kaplb,kapub) {
n = length(x) # Number of elements in the component
KAPPAhat = sample(kaplb:kapub, 1) # Draw candidate from uniform proposal
accratio = post_kap(x,LAMBDA,KAPPAhat)/post_kap(x,LAMBDA,KAPPA) # Acceptance ratio
if(is.na(accratio)){
accprob = 1 # Acceptance probability if denominator = inf (numerical error due to large number)
} else {
accprob = min(c(accratio,1)) # Acceptance probability if denominator not 0
}
rand = runif(1, min = 0, max = 1) # Random draw from unifrom (0,1)
if (rand < accprob) {
KAPPA = KAPPAhat; # update
acc = 1; # indicate update
} else{
KAPPA = KAPPA; # don't update
acc = 0; # indicate failure to update
}
out <- list(KAPPA, acc) # Store output in list
return(out) # Return output
}
# Posterior of e0 - Unnormalized target pdf
#' @keywords internal
post_e0 <- function(e0,nu0_p,S0_p,p) {
K = length(p) # Number of components
result <- dgamma(e0,shape = nu0_p, scale = S0_p)*gamma(K*e0)/(gamma(e0)^K)*((prod(p))^(e0-1))
}
# Draw from the unnormalized target pdf for hyperparameter e0
#' @keywords internal
draw_e0 <- function(e0,nu0,S0,p){
# Define terms
e0hat = e0 + rnorm(1,0,0.1) # Draw a candidate from Random walk proposal
accratio = post_e0(e0hat,nu0,S0,p)/post_e0(e0,nu0,S0,p) # Acceptance ratio
if(is.na(accratio)){
accprob = 0 # Acceptance probability if denominator = 0
} else {
accprob = min(c(accratio,1)) # Acceptance probability if denominator not 0
}
# MH Step
rand = runif(1, min = 0, max = 1) # Random draw from unifrom (0,1)
if (rand < accprob) {
e0 = e0hat; # update
acc = 1; # indicate update
} else{
e0 = e0; # don't update
acc = 0; # indicate failure to update
}
out <- list(e0, acc) # Store output in list
return(out) # Return output
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMultiMode/R/gibbs_sfm_algos.R
|
#' Mode estimation
#'
#' Mode estimation in univariate mixture distributions.
#' The fixed-point algorithm of \insertCite{carreira-perpinan_mode-finding_2000;textual}{BayesMultiMode} is used for Gaussian mixtures.
#' The Modal EM algorithm of \insertCite{li_nonparametric_2007;textual}{BayesMultiMode} is used for other continuous mixtures.
#' A basic algorithm is used for discrete mixtures, see \insertCite{Cross2024;textual}{BayesMultiMode}.
#'
#' @param mixture An object of class `mixture` generated with [mixture()].
#' @param tol_mixp Components with a mixture proportion below `tol_mixp` are discarded when estimating modes;
#' note that this does not apply to the biggest component so that it is not possible to discard all components;
#' should be between `0` and `1`; default is `0`.
#' @param tol_x (for continuous mixtures) Tolerance parameter for distance in-between modes; default is `1e-6`; if two modes are closer than `tol_x` the first estimated mode is kept.
#' @param tol_conv (for continuous mixtures) Tolerance parameter for convergence of the algorithm; default is `1e-8`.
#' @param type (for discrete mixtures) Type of modes, either `"unique"` or `"all"` (the latter includes flat modes); default is `"all"`.
#' @param inside_range Should modes outside of `mixture$range` be discarded? Default is `TRUE`.
#' This sometimes occurs with very small components when K is large.
#'
#' @return A list of class `mix_mode` containing:
#' \item{mode_estimates}{estimates of the mixture modes.}
#' \item{algo}{algorithm used for mode estimation.}
#' \item{dist}{from `mixture`.}
#' \item{dist_type}{type of mixture distribution, i.e. continuous or discrete.}
#' \item{pars}{from `mixture`.}
#' \item{pdf_func}{from `mixture`.}
#' \item{K}{from `mixture`.}
#' \item{nb_var}{from `mixture`.}
#'
#' @references
#' \insertRef{Cross2024}{BayesMultiMode}
#'
#' @details
#'
#' This function finds modes in a univariate mixture defined as:
#' \deqn{p(.) = \sum_{k=1}^{K}\pi_k p_k(.),}
#' where \eqn{p_k} is a density or probability mass/density function.
#'
#' **Fixed-point algorithm**
#' Following \insertCite{carreira-perpinan_mode-finding_2000;textual}{BayesMultiMode}, a mode \eqn{x} is found by iterating the two steps:
#' \deqn{(i) \quad p(k|x^{(n)}) = \frac{\pi_k p_k(x^{(n)})}{p(x^{(n)})},}
#' \deqn{(ii) \quad x^{(n+1)} = f(x^{(n)}),}
#' with
#' \deqn{f(x) = (\sum_k p(k|x) \sigma_k)^{-1}\sum_k p(k|x) \sigma_k \mu_k,}
#' until convergence, that is, until \eqn{abs(x^{(n+1)}-x^{(n)})< \text{tol}_\text{conv}},
#' where \eqn{\text{tol}_\text{conv}} is an argument with default value \eqn{1e-8}.
#' Following Carreira-perpinan (2000), the algorithm is started at each component location.
#' Separately, it is necessary to identify identical modes which diverge only up to
#' a small value; this tolerance value can be controlled with the argument
#' `tol_x`.
#'
#' **MEM algorithm**
#' Following \insertCite{li_nonparametric_2007;textual}{BayesMultiMode}, a mode \eqn{x} is found by iterating the two steps:
#' \deqn{(i) \quad p(k|x^{(n)}) = \frac{\pi_k p_k(x^{(n)})}{p(x^{(n)})},}
#' \deqn{(ii) \quad x^{(n+1)} = \text{argmax}_x \sum_k p(k|x) \text{log} p_k(x^{(n)}),}
#' until convergence, that is, until \eqn{abs(x^{(n+1)}-x^{(n)})< \text{tol}_\text{conv}},
#' where \eqn{\text{tol}_\text{conv}} is an argument with default value \eqn{1e-8}.
#' The algorithm is started at each component location.
#' Separately, it is necessary to identify identical modes which diverge only up to
#' a small value. Modes which are closer then `tol_x` are merged.
#'
#' **Discrete method**
#' By definition, modes must satisfy either:
#' \deqn{p(y_{m}-1) < p(y_{m}) > p(y_{m}+1);}
#' \deqn{p(y_{m}-1) < p(y_{m}) = p(y_{m}+1) = \ldots = p(y_{m}+l-1) > p(y_{m}+l).}
#'
#' The algorithm evaluate each location point with these two conditions.
#'
#' @references
#' \insertAllCited{}
#'
#' @importFrom assertthat assert_that
#' @importFrom assertthat is.string
#' @importFrom assertthat is.scalar
#' @importFrom sn dst
#' @importFrom stats dnorm sd optim na.omit var
#'
#' @examples
#'
#' # Example with a normal distribution ====================================
#' mu = c(0,5)
#' sigma = c(1,2)
#' p = c(0.5,0.5)
#'
#' params = c(eta = p, mu = mu, sigma = sigma)
#' mix = mixture(params, dist = "normal", range = c(-5,15))
#' modes = mix_mode(mix)
#'
#' # summary(modes)
#' # plot(modes)
#'
#' # Example with a skew normal =============================================
#' xi = c(0,6)
#' omega = c(1,2)
#' alpha = c(0,0)
#' p = c(0.8,0.2)
#' params = c(eta = p, xi = xi, omega = omega, alpha = alpha)
#' dist = "skew_normal"
#'
#' mix = mixture(params, dist = dist, range = c(-5,15))
#' modes = mix_mode(mix)
#' # summary(modes)
#' # plot(modes)
#'
#' # Example with an arbitrary continuous distribution ======================
#' xi = c(0,6)
#' omega = c(1,2)
#' alpha = c(0,0)
#' nu = c(3,100)
#' p = c(0.8,0.2)
#' params = c(eta = p, mu = xi, sigma = omega, xi = alpha, nu = nu)
#'
#' pdf_func <- function(x, pars) {
#' sn::dst(x, pars["mu"], pars["sigma"], pars["xi"], pars["nu"])
#' }
#'
#' mix = mixture(params, pdf_func = pdf_func,
#' dist_type = "continuous", loc = "mu", range = c(-5,15))
#' modes = mix_mode(mix)
#'
#' # summary(modes)
#' # plot(modes, from = -4, to = 4)
#'
#' # Example with a poisson distribution ====================================
#' lambda = c(0.1,10)
#' p = c(0.5,0.5)
#' params = c(eta = p, lambda = lambda)
#' dist = "poisson"
#'
#'
#' mix = mixture(params, range = c(0,50), dist = dist)
#'
#' modes = mix_mode(mix)
#'
#' # summary(modes)
#' # plot(modes)
#'
#' # Example with an arbitrary discrete distribution =======================
#' mu = c(20,5)
#' size = c(20,0.5)
#' p = c(0.5,0.5)
#' params = c(eta = p, mu = mu, size = size)
#'
#'
#' pmf_func <- function(x, pars) {
#' dnbinom(x, mu = pars["mu"], size = pars["size"])
#' }
#'
#' mix = mixture(params, range = c(0, 50),
#' pdf_func = pmf_func, dist_type = "discrete")
#' modes = mix_mode(mix)
#'
#' # summary(modes)
#' # plot(modes)
#'
#' @export
mix_mode <- function(mixture, tol_mixp = 0, tol_x = 1e-6, tol_conv = 1e-8, type = "all", inside_range = TRUE) {
assert_that(inherits(mixture, "mixture"), msg = "mixture should be an object of class mixture")
assert_that(all(c("pars", "pars_names", "dist_type",
"dist", "pdf_func", "range", "nb_var", "K") %in% names(mixture)),
msg = "mixture is not a proper mixture object.")
assert_that(is.scalar(tol_x) & tol_x > 0, msg = "tol_x should be a positive scalar")
assert_that(is.scalar(tol_mixp) & tol_mixp >= 0 & tol_mixp < 1, msg = "tol_mixp should be a positive scalar between 0 and 1")
assert_that(is.scalar(tol_conv) & tol_conv > 0, msg = "tol_conv should be a positive scalar")
pars = mixture$pars
pars_names = mixture$pars_names
dist = mixture$dist
dist_type = mixture$dist_type
pdf_func = mixture$pdf_func
range = mixture$range
mode = list()
mode$dist = dist
mode$pars = pars
mode$pdf_func = pdf_func
mode$K = mixture$K
mode$nb_var = mixture$nb_var
mode$range = range
pars_mat <- vec_to_mat(pars, pars_names)
tol_mixp_c = min(tol_mixp, pars_mat[, "eta"]) # the component with highest proportion cannot be excluded
pars_mat[, "eta"][pars_mat[, "eta"] < tol_mixp_c] = NA
pars_mat = na.omit(pars_mat) # remove empty components (a feature of some MCMC methods)
if (dist_type == "continuous") {
mode$dist_type = "continuous"
if (!is.na(dist) && dist == "normal") {
mode_estimates = fixed_point(pars_mat, tol_x, tol_conv)
mode$algo = "fixed-point"
} else {
loc = mixture$loc
mode_estimates = MEM(pars_mat, pdf_func, loc, tol_x, tol_conv)
mode$algo = "Modal Expectation-Maximization (MEM)"
}
}
if (dist_type == "discrete") {
mode_estimates = discrete_MF(pars_mat, pdf_func, range, type)
mode$algo = "discrete"
mode$dist_type = "discrete"
}
if (!is.null(range) & inside_range) {
# discard modes outside of the data range
mode_estimates = mode_estimates[mode_estimates >= range[1]]
mode_estimates = mode_estimates[mode_estimates <= range[2]]
}
mode$mode_estimates = mode_estimates
class(mode) = "mix_mode"
return(mode)
}
### internal functions
#' @keywords internal
fixed_point <- function(pars, tol_x = 1e-6, tol_conv = 1e-8) {
modes = rep(NA_real_, nrow(pars))
p = pars[ ,"eta"]
mu = pars[, "mu"]
sigma = pars[, "sigma"]
iter = 0
for (i in 1:length(mu)) {
x = mu[i]
delta = 1
while (delta > tol_conv) {
iter = iter + 1
x1 = f_fp(x, p, mu, sigma)
if (!is.finite(x1)) {
stop(paste("Error in the fixed-point algorithm;\n",
"The normal mixture evaluated at", x,
"does not have a finite likelihood."))
}
delta = abs(x - x1)
x = x1
}
## check that the mode is not too close to other modes
if(any(!is.na(modes))){
diff = abs(x-modes)
diff = diff[!is.na(diff)]
if (!any(diff<tol_x)) {
modes[i] = x
}
} else {
modes[i] = x
}
}
modes = modes[!is.na(modes)]
return(modes)
}
#' @keywords internal
f_fp <- function(x, p, mu, sigma) {
pmx = dnorm(x, mu, sigma) * p
pmx = pmx/sum(pmx)
if (any(is.na(pmx))) {
# x yields a density of zero
pmx = 1/length(pmx)
}
f = 1/sum(pmx/sigma^2) * sum(pmx/sigma^2*mu)
return(f)
}
#' @keywords internal
MEM <- function(pars, pdf_func, loc, tol_x = 1e-6, tol_conv = 1e-8) {
modes = rep(NA_real_, nrow(pars))
nK = nrow(pars)
post_prob = rep(NA_real_, nK)
for (j in 1:nK) {
x = pars[j,loc]
delta = 1
while (delta > 1e-8) {
# E-step
f_mix = pdf_func_mix(x, pars, pdf_func)
for (k in 1:nK){
post_prob[k] = pars[k, "eta"] * pdf_func(x, pars[k, ])/f_mix
}
# M-step
Min = optim(par = x, Q_func, method = "L-BFGS-B",
post_prob = post_prob,
pars = pars,
pdf_func = pdf_func,
control = list(fnscale = -1))
x1 = Min$par
# check convergence and increment
delta = abs(x - x1)
x = x1
}
## check that the mode is not too close to other modes
## check that the mode is not too close to other modes
if(any(!is.na(modes))){
diff = abs(x-modes)
diff = diff[!is.na(diff)]
if (!any(diff<tol_x)) {
modes[j] = x
}
} else {
modes[j] = x
}
}
modes = modes[!is.na(modes)]
return(modes)
}
#' @keywords internal
Q_func = function(x, post_prob, pars, pdf_func){
pdf = rep(NA, nrow(pars))
for (i in 1:nrow(pars)) {
pdf[i] = pdf_func(x, pars[i,])
}
pdf[pdf==0] = 1e-10 #otherwise the log operation below can return infs
Q = sum(post_prob * log(pdf))
if(!is.finite(Q)){
# stop("Q function is not finite")
stop(paste("Error in the MEM algorithm;\n",
"The mixture of pdf_func evaluated at", x,
"does not have a finite likelihood."))
}
return(Q)
}
#' @keywords internal
discrete_MF <- function(pars, pdf_func, range, type = "all"){
## input checks
assert_that(is.string(type),
msg = "type must be a string")
assert_that(type %in% c("unique", "all"),
msg = "type must be either 'unique' or 'all' ")
##
##
x = range[1]:range[2]
##
### Getting denisty
py = pdf_func_mix(x, pars, pdf_func)
# change in the pdf
d_py = diff(py)
# where does the pdf decrease ?
x_decrease = x[d_py<0]
if (length(x_decrease) == 0) {
stop("The mixture pmf does not peak in the range provided; no modes can be found.")
}
# Only keep the points where the pdf starts to decrease; these are modes
d2_py = c(0, x_decrease[-1] - x_decrease[-length(x_decrease)])
x_decrease = x_decrease[which(d2_py!=1)]
# get pdf at these modes
pdf_modes = py[x %in% x_decrease]
# get all the points at these peaks (there might be flat modes)
loc_modes = x[which(py %in% pdf_modes)]
if (length(loc_modes) != length(x_decrease)) {
warning("Some modes are flat.")
}
modes = rep(NA_real_, length(x))
if (type == "unique") {
modes[1:length(loc_modes)] = x_decrease
}
if (type == "all") {
modes[1:length(loc_modes)] = loc_modes
}
modes = modes[!is.na(modes)]
return(modes)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMultiMode/R/mix_mode.R
|
#' Creating a S3 object of class `mixture`
#'
#' Creates an object of class `mixture` which can subsequently be used as argument in [mix_mode()] for mode estimation.
#'
#' @param pars Named vector of mixture parameters.
#' @param dist Distribution family of the mixture components supported by
#' the package (i.e. `"normal"`, `"student"`, `"skew_normal"` or `"shifted_poisson"`).
#' If left unspecified, `pdf_func` is required.
#' @param pdf_func (function) Pdf or pmf of the mixture components;
#' this input is used only if `dist` is left unspecified.
#' pdf_func should have two arguments : (i) the observation where the pdf is evaluated;
#' (ii) a named vector representing the function parameters. For instance a normal pdf would take the form:
#' `pdf_func <- function(x, par) dnorm(x, par['mu'], par['sigma'])`.
#' The names of `par` should correspond to variables in `pars`, e.g. `"mu1"`, `"mu2"` etc...
#' @param dist_type Type of the distribution, either `"continuous"` or `"discrete"`.
#' @param range upper and lower limit of the range where the mixture should be evaluated.
#' @param loc (for continuous mixtures other than Normal mixtures) String indicating the location parameter
#' of the distribution; the latter is used to initialise the MEM algorithm.
#'
#' @returns
#' A list of class `mixture` containing:
#' \item{pars}{Same as argument.}
#' \item{pars_names}{Names of the parameters of the components' distribution.}
#' \item{dist}{Same as argument.}
#' \item{pdf_func}{Pdf (or pmf) of the mixture components.}
#' \item{dist_type}{Same as argument.}
#' \item{loc}{Type of the distribution, either `"continuous"` or `"discrete"`.}
#' \item{nb_var}{Number of parameters in the mixture distribution.}
#' \item{K}{Number of mixture components.}
#' \item{range}{Same as argument.}
#'
#' @examples
#'
#' # Example with the skew normal =============================================
#' xi = c(0,6)
#' omega = c(1,2)
#' alpha = c(0,0)
#' p = c(0.8,0.2)
#' params = c(eta = p, xi = xi, omega = omega, alpha = alpha)
#' dist = "skew_normal"
#'
#' mix = mixture(params, dist = dist, range = c(-2,10))
#'
#' # summary(mix)
#' # plot(mix)
#'
#' # Example with an arbitrary distribution ===================================
#' mu = c(0,6)
#' omega = c(1,2)
#' xi = c(0,0)
#' nu = c(3,100)
#' p = c(0.8,0.2)
#' params = c(eta = p, mu = mu, sigma = omega, xi = xi, nu = nu)
##
#'
#' pdf_func <- function(x, pars) {
#' sn::dst(x, pars["mu"], pars["sigma"], pars["xi"], pars["nu"])
#' }
#'
#'
#' mix = mixture(params, pdf_func = pdf_func,
#' dist_type = "continuous", loc = "mu", range = c(-2,10))
#'
#' # summary(mix)
#' # plot(mix, from = -4, to = 4)
#'
#' @export
mixture <- function(pars,
dist = NA_character_,
pdf_func = NULL,
dist_type = NA_character_,
range,
loc = NA_character_) {
## input checks
assert_that(is.string(dist))
assert_that(is.string(dist_type))
assert_that(is.vector(pars))
assert_that(!is.null(names(pars)),
msg = "element of pars should have names")
assert_that(!(is.na(dist) & is.null(pdf_func)),
msg = "one of dist or pdf_func must be specified")
pars_names = unique(str_extract(names(pars), "[a-z]+"))
list_func = test_and_export(pars, pdf_func, dist, pars_names, dist_type, loc)
assert_that(!is.null(range),
msg = "range argument must be filled when using a discrete distribution")
assert_that(is.vector(range) & length(range) == 2,
msg = "range should be a vector of length 2")
assert_that(all(is.finite(range)),
msg = "lower and upper limits of range should be finite")
assert_that(range[2] > range[1],
msg = "upper limit of range not greater than lower limit")
if (dist %in% c("poisson", "shifted_poisson")) {
assert_that(all(range>=0),
msg = "lower limit should be greater or equal than zero when using the Poisson or shifted Poisson.")
}
mixture = list(pars = pars,
pars_names = pars_names,
dist_type = list_func$dist_type,
dist = dist,
pdf_func = list_func$pdf_func,
range = range,
loc = list_func$loc,
nb_var = length(pars_names) - 1, #minus the shares
K = list_func$K)
class(mixture) <- "mixture"
return(mixture)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMultiMode/R/mixture.R
|
#' @importFrom sn dst
#' @importFrom sn dsn
#' @importFrom stats dnorm
#'
# Mixture of pdf_func
#' @keywords internal
pdf_func_mix <- function(x, pars, pdf_func) {
pdf_func = match.fun(pdf_func) #solves NOTE "pdf_func is undefined"
mixture = 0
for (i in 1:nrow(pars)) {
mixture = mixture + pars[i, "eta"] * pdf_func(x, pars[i,])
}
return(mixture)
}
#' @keywords internal
test_and_export <- function(p, pdf_func, dist, pars_names, dist_type, loc) {
par_type = deparse(substitute(p))
par_type = str_extract(par_type, "[a-z]+")
list_func = list()
K = length(p)/length(pars_names)
assert_that(K%%1==0,
msg = paste("All variables in", par_type, "must have the same number of components."))
assert_that("eta" %in% pars_names,
msg = paste(par_type,
"should include a variable named eta representing mixture proportions."))
pars_mat = vec_to_mat(p, pars_names)
pars_mat = na.omit(pars_mat)
assert_that(nrow(pars_mat) > 1,
msg = paste("there must be at least two components per variable in",
par_type))
assert_that(round(sum(pars_mat[, "eta"], na.rm = T),2)==1,
msg = "The mixture proportions, eta, should sum to one.")
if (!is.null(pdf_func)) {
assert_that(is.function(pdf_func),
msg = "pdf_func must be a function")
func_test = try(pdf_func(1, pars_mat[1,]), silent = T)
assert_that(!("try-error" %in% class(func_test)),
msg = paste0("running pdf_func returns an error; ",
"\n pdf_func should have two arguments :",
"\n the first argument represents the observation where the pdf is evaluated;",
"\n the second argument is a named vector representing the function parameters;",
"\n for instance: \n pdf_func <- function(x, pars) dnorm(x, pars['mu'], pars['sigma'])"))
assert_that(!is.na(pdf_func(1, pars_mat[1,])),
msg = "running pdf_func for the first component returns NA")
assert_that(!is.na(dist_type),
msg = "dist_type must be provided when argument pdf_func is used; \n i.e. 'continuous' or 'discrete'")
assert_that(dist_type %in% c("continuous", "discrete"),
msg = "dist_type should be either 'discrete' or 'continuous'")
}
msg_0 = paste0("variable names in ", par_type, " should be ")
if (!is.na(dist)) {
if (dist == "poisson"){
assert_that(sum(pars_names %in% c("eta", "lambda"))==2,
msg = paste0(msg_0, "eta and lambda when dist = poisson"))
pdf_func <- function(x, pars) dpois(x, pars["lambda"])
}
if (dist == "shifted_poisson"){
assert_that(sum(pars_names %in% c("eta", "kappa", "lambda"))==3,
msg = paste0(msg_0, "eta and lambda when dist = shifted_poisson"))
pdf_func <- function(x, pars) dpois(x - pars["kappa"], pars["lambda"])
}
if (dist == "normal"){
assert_that(sum(pars_names %in% c("eta", "mu", "sigma"))==3,
msg = paste0(msg_0, "eta, mu and sigma when dist = normal"))
pdf_func <- function(x, pars) dnorm(x, pars["mu"], pars["sigma"])
}
if (dist == "skew_normal"){
assert_that(sum(pars_names %in% c("eta", "xi", "omega", "alpha"))==4,
msg = paste0(msg_0, "eta, xi, omega and alpha when dist = skew_normal"))
pdf_func <- function(x, pars) dsn(x, pars["xi"], pars["omega"], pars["alpha"])
loc = "xi"
}
if (dist %in% c("normal", "skew_normal")) {
dist_type = "continuous"
} else if (dist %in% c("poisson", "shifted_poisson")) {
dist_type = "discrete"
} else {
stop("Unsupported distribution; dist should be either normal, skew_normal, poisson or shifted_poisson")
}
}
if ((is.na(dist) | !(dist %in% c("normal", "skew_normal"))) & dist_type != "discrete") {
assert_that(!is.na(loc) & is.string(loc),
msg = paste0("loc argument must be given when using a continuous distribution other than the normal distribution;",
"\n loc should be the location parameter of pdf_func"))
assert_that(loc %in% pars_names[pars_names!="eta"],
msg = paste0("loc must a parameter included in ", par_type, " other than eta"))
}
list_func$dist_type = dist_type
list_func$pdf_func = pdf_func
list_func$loc = loc
list_func$K = K
return(list_func)
}
#' @keywords internal
vec_to_mat <- function(pars, pars_names) {
pars_mat = c()
for (i in 1:length(pars_names)) {
pars_mat = cbind(pars_mat, pars[grep(pars_names[i], names(pars))])
}
pars_mat = matrix(pars_mat, ncol = length(pars_names),
dimnames = list(NULL, pars_names))
# colnames(pars_mat) <- pars_names
return(pars_mat)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMultiMode/R/mixture_functions.R
|
#' Plot method for `bayes_mixture` objects
#'
#' Plot an estimated mixture for a given number of draws with a frequency distribution of the data.
#'
#' @param x An object of class `bayes_mixture`.
#' @param draws The number of MCMC draws to plot.
#' @param draw Plot estimated mixture in draw `draw`; note that `draws` is discarded. Default is `NULL`.
#' @param bins (for continuous mixtures) Number of bins for the histogram of
#' the data. Passed to `geom_histogram()`.
#' @param alpha transparency of the density lines. Default is 0.1. Should be greater than 0 and below or equal to 1.
#' @param ... Not used.
#'
#' @importFrom ggpubr ggarrange
#' @importFrom assertthat assert_that
#' @importFrom dplyr tibble
#' @importFrom dplyr mutate
#' @importFrom dplyr as_tibble
#' @importFrom dplyr filter
#' @importFrom dplyr left_join
#' @importFrom tidyr gather
#' @importFrom magrittr %>%
#' @importFrom magrittr %<>%
#' @import ggplot2
#'
#' @export
plot.bayes_mixture <- function(x, draws = 250,
draw = NULL,
bins = 30,
alpha = 0.1, ...) {
density <- component <- value <- NULL
if (!is.null(draw)) {
assert_that(is.scalar(draw), round(draw) == draw, draw <= nrow(x$mcmc),
draw > 0,
msg = paste("draw should be an integer greater than zero and ",
"inferior to the number of MCMC draws in",
deparse(substitute(x))))
mix = mixture(x$mcmc[draw,],
dist = x$dist,
pdf_func = x$pdf_func,
dist_type = x$dist_type,
loc = x$loc,
range = c(min(x$data), max(x$data)))
plot(mix)
} else {
assert_that(is.scalar(alpha) & alpha >= 0 & alpha <= 1,
msg = "alpha should be a scalar between zero and one")
mcmc = x$mcmc
pdf_func = x$pdf_func
y = x$data
pars_names = x$pars_names
if (x$dist_type == "continuous") {
## plot the data
g = ggplot(data.frame(y = y), aes(y)) +
theme_gg +
theme(legend.position="none") +
xlab("") + ylab("Density") +
geom_histogram(aes(y = after_stat(density)),
fill="grey33",
colour = "white",
bins = bins)
## plot the mixture for each draw
for (i in sample(nrow(mcmc),min(nrow(mcmc), draws))) {
pars = vec_to_mat(mcmc[i, , drop = T], pars_names)
pars = na.omit(pars)
g = g +
geom_function(fun = pdf_func_mix,
args = list(pdf_func = pdf_func,
pars = pars),
alpha = alpha,
colour = "#FF6347")
}
}
if (x$dist_type == "discrete") {
####### Discrete distribution
d_y = rep(NA,length(unique(y)))
for (i in 1:length(d_y)){
d_y[i] = length(y[y==unique(y)[i]])/length(y)
}
df_y_temp = tibble(density = d_y,
x = unique(y))
x_all = seq(min(y),max(y),1)
mixture_uncertainty = matrix(NA, length(x_all), draws)
j = 1
for (i in sample(nrow(mcmc),min(nrow(mcmc), draws))) {
##
pars = vec_to_mat(mcmc[i, ], pars_names)
pars = na.omit(pars)
mixture_uncertainty[,j] = pdf_func_mix(x_all, pars, pdf_func)
j = j+1
}
#
df_y = tibble(x = seq(min(y),max(y),1)) %>%
left_join(df_y_temp, by=c("x"="x")) %>%
mutate(density = ifelse(is.na(density),0,density)) %>%
cbind(mixture_uncertainty)
df_y %<>%
gather(-x,-density,key="component",value="value")
g = ggplot(df_y, aes(x=x)) +
theme_gg +
theme(legend.position="none") +
xlab("") + ylab("Probability") +
geom_col(data = filter(df_y,component=="1"),aes(y=density,fill="grey33"),colour="white",alpha=1) +
geom_line(aes(y=value,colour=component),alpha= alpha) +
scale_colour_manual(values=rep("#FF6347",length(unique(df_y$component)))) +
scale_fill_manual(name = "",
values = c("grey33"), # Color specification
labels = c("Data density"))
}
g
}
}
#' Plot method for `bayes_mode` objects
#'
#' @param x An object of class `bayes_mode`.
#' @param graphs which plot to show ? Default is all three c("p1", "number", "loc").
#' @param draw Plot modes in a given mcmc draw; note that `graphs` is discarded. Default is `NULL`.
#' @param ... Not used.
#'
#' @importFrom ggpubr ggarrange
#' @importFrom assertthat is.scalar
#' @import ggplot2
#'
#' @export
plot.bayes_mode <- function(x, graphs = c("p1", "number", "loc"), draw = NULL, ...) {
Pb <- value <- `posterior probability` <- `number of modes` <- `mode location` <- NULL
if (!is.null(draw)) {
BayesMix = x$BayesMix
assert_that(is.scalar(draw), round(draw) == draw, draw <= nrow(BayesMix$mcmc),
draw > 0,
msg = paste("draw should be an integer greater than zero and ",
"inferior to the number of MCMC draws in",
deparse(substitute(x))))
mix = mixture(BayesMix$mcmc[draw,],
dist = BayesMix$dist,
pdf_func = BayesMix$pdf_func,
dist_type = BayesMix$dist_type,
loc = BayesMix$loc,
range = x$range)
modes = mix_mode(mix, inside_range = F)
plot(modes)
} else {
assert_that(is.vector(graphs) & is.character(graphs),
msg = "graphs should be a character vector")
assert_that(sum(graphs %in% c("p1", "number", "loc"))>=1,
msg = "graphs should include at least p1, number or loc")
modes = x$modes
p1 = x$p1
p_nb_modes = x$p_nb_modes
p_mode_loc = x$p_mode_loc
df_g0 = tibble(Pb = "Pb",
value = (1-p1))
g0 = ggplot(data=df_g0, aes(x=Pb, y=value)) +
ggtitle("Nb. modes > 1") +
theme_gg +
ylim(0, 1) +
xlab("") + ylab("Posterior probability") +
geom_bar(stat="identity")
df_g1 = as_tibble(t(p_mode_loc))
g1 = ggplot(data=df_g1, aes(x = `mode location`, y = `posterior probability`)) +
theme_gg +
# scale_x_continuous(breaks=df_g1$possible_nb_modes) +
ggtitle("Mode locations") +
xlab("") + ylab("Posterior probability") +
geom_bar(stat="identity")
if (x$dist_type == "continuous") {
g1 = g1 + ylim(0, max(df_g1$`posterior probability`))
} else {
g1 = g1 + ylim(0, 1)
}
df_g2 = as_tibble(t(p_nb_modes))
g2= ggplot(data=df_g2, aes(x = `number of modes`, y = `posterior probability`)) +
theme_gg +
scale_x_continuous(breaks=df_g2$`number of modes`) +
ggtitle("Number of modes") +
ylim(0, 1) +
xlab("") + ylab("Posterior probability") +
geom_bar(stat="identity")
# selecting which graphs to show
plot_list = list()
i = 0
widths_p = rep(NA, length(graphs))
if("p1" %in% graphs) {
i = i + 1
plot_list[[i]] <- g0
widths_p[i] = 0.7
}
if("number" %in% graphs) {
i = i + 1
plot_list[[i]] <- g2
widths_p[i] = 1
}
if("loc" %in% graphs) {
i = i + 1
plot_list[[i]] <- g1
widths_p[i] = 1
}
if (i > 1) {
g <- ggarrange(plotlist = plot_list,
ncol = length(graphs), nrow = 1, widths = widths_p)
} else {
g <- plot_list[[i]]
}
g
}
}
#' Plot method for `mixture` objects
#'
#' @param x An object of class `mixture`.
#' @param from the lower limit of the range over which the function will be plotted.
#' Default is `x$range[1]`.
#' @param to the upper limit of the range over which the function will be plotted.
#' Default is `x$range[2]`.
#' @param ... Not used.
#'
#' @importFrom graphics curve
#'
#' @export
plot.mixture <- function(x, from = x$range[1], to = x$range[2], ...) {
pars = x$pars
mode_est = x$mode_estimates
pdf_func = x$pdf_func
dist = x$dist
assert_that(is.finite(from), is.finite(from),
is.finite(to), is.finite(to),
msg = "from and to must be finite")
assert_that(from < to,
msg = "from must be lower than to")
if (x$dist_type == "continuous") {
par_names = str_extract(names(pars), "[a-z]+")
pars = vec_to_mat(pars, par_names)
pars = na.omit(pars)
curve(pdf_func_mix(x, pars, pdf_func), from = from,
to = to, xlab = "", ylab = "")
} else if (x$dist_type == "discrete") {
xx = round(from):round(to)
par_names = str_extract(names(pars), "[a-z]+")
pars_mat = vec_to_mat(pars, par_names)
pars_mat = na.omit(pars_mat)
py = pdf_func_mix(xx, pars_mat, pdf_func)
plot(xx, py, type = "h", xlab = "", ylab = "", lwd = 4,
xlim = c(from, to))
}
}
#' Plot method for `mix_mode` objects
#'
#' @param x An object of class `mix_mode`.
#' @param from the lower limit of the range over which the function will be plotted.
#' Default is `x$range[1]`.
#' @param to the upper limit of the range over which the function will be plotted.
#' Default is `x$range[2]`.
#' @param ... Not used.
#'
#' @importFrom graphics curve abline
#'
#' @export
plot.mix_mode <- function(x, from = x$range[1], to = x$range[2], ...) {
mix = mixture(x$pars, dist = x$dist,
pdf_func = x$pdf_func,
dist_type = x$dist_type,
range = x$range)
modes = x$mode_estimates
modes_outside = modes[modes > x$range[2] | modes < x$range[1]]
modes_inside = modes[modes <= x$range[2] & modes >= x$range[1]]
plot(mix, from = from, to = to)
for (m in modes_inside) {
abline(v = m, col = "red")
}
for (m in modes_outside) {
abline(v = m, lty = 2, col = "grey")
}
}
### ggplot theme
#' @keywords internal
theme_gg <- ggplot2::theme_bw()+ ggplot2::theme(strip.background=element_blank(),
strip.text=element_text(size=11),
title=element_text(size=11),
panel.border=element_blank(),
# panel.grid.major=element_blank(),
#panel.grid.minor=element_blank(),
#legend.key=element_rect(colour="white"),
legend.position="bottom",
legend.box.margin=margin(-10,0,-10,0),
legend.title=element_blank(),
legend.text=element_text(size=11),
axis.text=element_text(size=11),
axis.line.y = element_line(colour = 'grey', size=0.5, linetype="solid"),
axis.line.x = element_line(colour = 'grey', size=0.5, linetype="solid"),
plot.title=element_text(hjust=0.5, size=12, face="bold"))
|
/scratch/gouwar.j/cran-all/cranData/BayesMultiMode/R/plot_methods.R
|
#' Print method for \code{bayes_mode} objects
#'
#' @param x An object of class \code{bayes_mode}.
#' @param max_length maximum number of elements (for vector) or rows (for matrices) to show. Default is `6L`.
#' @param max_width maximum number of columns to show (for matrices). Default is `6L`.
#' @param print_all override max_length and max_width to print everything? Default is FALSE.
#' @param ... Not used.
#'
#' @importFrom utils head
#'
#' @export
print.bayes_mode <- function(x, max_length = 6L, max_width = 6L, print_all = F, ...) {
print_list(x, max_length, max_width, print_all)
}
#' Print method for \code{mix_mode} objects
#'
#' @param x An object of class \code{mix_mode}.
#' @param max_length maximum number of elements (for vector) or rows (for matrices) to show. Default is `6L`.
#' @param max_width maximum number of columns to show (for matrices). Default is `6L`.
#' @param print_all override max_length and max_width to print everything? Default is FALSE.
#' @param ... Not used.
#'
#' @export
print.mix_mode <- function(x, max_length = 6L, max_width = 6L, print_all = F, ...) {
print_list(x, max_length, max_width, print_all)
}
#' Print method for `bayes_mixture` objects
#'
#' @param x An object of class `bayes_mixture`.
#' @param max_length maximum number of elements (for vector) or rows (for matrices) to show. Default is `6L`.
#' @param max_width maximum number of columns to show (for matrices). Default is `6L`.
#' @param print_all override max_length and max_width to print everything? Default is FALSE.
#' @param ... Not used.
#'
#' @export
print.bayes_mixture <- function(x, max_length = 6L, max_width = 6L, print_all = F, ...) {
print_list(x, max_length, max_width, print_all)
}
#' Print method for `mixture` objects
#'
#' @param x An object of class `mixture`.
#' @param max_length maximum number of elements (for vector) or rows (for matrices) to show. Default is `6L`.
#' @param max_width maximum number of columns to show (for matrices). Default is `6L`.
#' @param print_all override max_length and max_width to print everything? Default is FALSE.
#' @param ... Not used.
#'
#' @export
print.mixture <- function(x, max_length = 6L, max_width = 6L, print_all = F, ...) {
print_list(x, max_length, max_width, print_all)
}
print_list <- function(x, max_length = 6L, max_width = 6L, print_all = F) {
assert_that(max_length >= 1,
max_width >= 1)
# Check for data type and print accordingly
# Print list details
for (i in 1:length(x)) {
cat(names(x)[i])
head_print(x[[i]], max_length, max_width, print_all)
cat("\n")
}
}
head_print <- function(x, max_length = 6L, max_width = 6L, print_all = F) {
n = as.integer(max_length)
m = as.integer(max_width)
if (is.vector(x)) {
cat(paste0(" (", class(x), sprintf(" vector, dim %d", length(x)),"):"),"\n")
if (print_all == FALSE) {
if (length(x) <= n) {
print(x)
} else {
print(head(x, n))
cat(sprintf("... (%d more elements)\n", length(x) - n))
}
} else {
print(x)
}
} else if (is.matrix(x)) {
# Print matrix
rows_to_print <- min(nrow(x), n)
cols_to_print <- min(ncol(x), m)
cat(paste0(sprintf(" (matrix, dim %dx%d", nrow(x),ncol(x)),"):"),"\n")
if (print_all == FALSE) {
if (nrow(x) <= n & ncol(x) <= m) {
print(x)
} else {
print(head(x, c(rows_to_print, cols_to_print)))
if (nrow(x) > n & ncol(x) > m) {
cat(sprintf("... (%d more rows and %d more columns)\n", nrow(x) - n, ncol(x) - m))
}
if (nrow(x) > n & ncol(x) <= m) {
cat(sprintf("... (%d more rows)\n", nrow(x) - n))
}
if (nrow(x) <= n & ncol(x) > m) {
cat(sprintf("... (%d more columns)\n", ncol(x) - m))
}
}
} else {
print(x)
}
} else if (is.null(x)) {
cat(" (NULL)\n")
} else if (is.function(x)) {
cat(":\n", head(args(x))[1])
print(body(x))
} else {
# Fallback for other types
cat("(",sprintf("%s", class(x)),")\n")
print(x)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMultiMode/R/print_methods.R
|
#' Summary method for `bayes_mode` objects
#'
#' @param object An object of class `bayes_mode`.
#' @param ... Not used.
#'
#' @export
summary.bayes_mode <- function(object, ...) {
modes = object$modes
p1 = object$p1
cat("Posterior probability of multimodality is", 1-p1, "\n")
cat("\nInference results on the number of modes:")
cat("\n p_nb_modes")
head_print(t(object$p_nb_modes))
cat("\nInference results on mode locations:")
cat("\n p_loc")
head_print(t(object$p_mode_loc))
}
#' Summary method for `mix_mode` objects
#'
#' @param object An object of class `mix_mode`.
#' @param ... Not used.
#'
#' @export
summary.mix_mode <- function(object, ...) {
Nb_m = length(object$mode_estimates)
algo = object$algo
d = object$dist
K = object$K
if (is.na(d)) {
d = object$dist_type
}
if (Nb_m == 1) {
m = "Mode"
} else {
m = "Modes"
}
cat(m, "of a", d, "mixture with", K, "components.")
cat("\n- Number of modes found:", Nb_m)
cat("\n- Mode estimation technique:", object$algo, "algorithm")
cat("\n- Estimates of mode locations:")
cat("\n mode_estimates")
head_print(round(object$mode_estimates),3)
}
#' Summary method for `mixture` objects
#'
#' @param object An object of class `mixture`.
#' @param ... Not used.
#'
#' @export
summary.mixture <- function(object, ...) {
cat("Estimated mixture distribution.")
cat("\n- Mixture type:", object$dist_type)
cat("\n- Number of components:", object$K)
cat("\n- Distribution family:", object$dist)
cat("\n- Number of distribution variables:", object$nb_var)
cat("\n- Names of variables:",
object$pars_names[object$pars_names!="eta"])
cat("\n- Parameter estimates:")
cat("\n pars")
head_print(object$pars)
}
#' Summary method for `bayes_mixture` objects
#' The summary of MCMC draws is given by the function
#' `summarise_draws` from package \pkg{posterior}.
#' @param object An object of class `bayes_mixture`.
#' @param ... Not used.
#'
#' @importFrom posterior summarise_draws
#'
#' @export
summary.bayes_mixture <- function(object, ...) {
d = object$dist
K = object$K
if (is.na(d)) {
d = object$dist_type
}
cat("Mixture estimated with a Bayesian MCMC method.")
cat("\n- Mixture type:", object$dist_type)
cat("\n- Number of components:", object$K)
cat("\n- Distribution family:", object$dist)
cat("\n- Number of distribution variables:", object$nb_var)
cat("\n- Names of variables:",
object$pars_names[object$pars_names!="eta"])
cat("\n\nSummary of MCMC output after burnin:\n")
print(summarise_draws(object$mcmc))
cat(paste0("this table can be reproduced with: summarise_draws(",deparse(substitute(object)),"$mcmc)"))
message(cat("\n\nNote that label-switching might occur in the MCMC draws becayse BayesMultiMode does not carry out post-processing.",
"\nWhile label-switching does not affect mode inference it can affect diagnostic checks.\n"))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesMultiMode/R/summary_methods.R
|
#================================================
# Bayesian nonstationary Gaussian process
# modeling in NIMBLE
# Mark Risser and Daniel Turek
# Lawrence Berkeley National Laboratory
# January, 2019
#================================================
#================================================
# Functions for the NNGP approximation
#================================================
## Script #3: nsgpNNGP.R (functions for the NNGP approximation)
##
## - calculateAD_ns
## - calcQF
## - dmnorm_nngp (formerly dmnorm_nn2)
## - rmnorm_nngp (formerly rmnorm_nn2)
#==============================================================================
# Calculate the Gaussian quadratic form for the NNGP approximation
#==============================================================================
# ROxygen comments ----
#' Calculate the Gaussian quadratic form for the NNGP approximation
#'
#' \code{calcQF} calculates the quadratic form in the multivariate Gaussian
#' based on the NNGP approximation, for a specific parameter combination. The
#' quadratic form is \code{t(u)C^{-1}v}.
#'
#' @param u Vector; left product.
#' @param v Vector; right product
#' @param AD N x (k+1) matrix; the first k columns are the 'A' matrix, and the
#' last column is the 'D' vector. Represents the Cholesky of \code{C^{-1}}.
#' @param nID N x k matrix of neighbor indices.
#'
#' @return A list with two components: (1) an N x 2 array containing the
#' same spatial coordinates, ordered by MMD, and (2) the same thing, but with
#' any NA values removed.
#'
#' @export
#'
calcQF <- nimbleFunction(
run = function(u = double(1), v = double(1), AD = double(2), nID = double(2)) {
N <- dim(AD)[1]
k <- dim(AD)[2] - 1
qf <- u[1] * v[1] / AD[1,k+1]
for(i in 2:N) {
if(i<=k) nNei <- i-1 else nNei <- k
qf <- qf + (u[i] - inprod( AD[i,1:nNei], u[nID[i,1:nNei]] )) *
(v[i] - inprod( AD[i,1:nNei], v[nID[i,1:nNei]] )) / AD[i,k+1]
}
returnType(double())
return(qf)
}
)
#==============================================================================
# Calculate A and D matrices for the NNGP approximation
#==============================================================================
# ROxygen comments ----
#' Calculate A and D matrices for the NNGP approximation
#'
#' \code{calculateAD_ns} calculates A and D matrices (the Cholesky of the
#' precision matrix) needed for the NNGP approximation.
#'
#' @param dist1_3d N x (k+1) x (k+1) array of distances in the x-coordinate
#' direction.
#' @param dist2_3d N x (k+1) x (k+1) array of distances in the y-coordinate
#' direction.
#' @param dist12_3d N x (k+1) x (k+1) array of cross-distances.
#' @param Sigma11 N-vector; 1-1 element of the Sigma() process.
#' @param Sigma12 N-vector; 1-2 element of the Sigma() process.
#' @param Sigma22 N-vector; 2-2 element of the Sigma() process.
#' @param log_sigma_vec N-vector; process standard deviation values.
#' @param log_tau_vec N-vector; nugget standard deviation values.
#' @param nID N x k matrix of neighbor indices.
#' @param N Scalar; number of data measurements.
#' @param k Scalar; number of nearest neighbors.
#' @param nu Scalar; Matern smoothness parameter.
#' @param d Scalar; dimension of the spatial domain.
#'
#' @return A N x (k+1) matrix; the first k columns are the 'A' matrix, and the
#' last column is the 'D' vector.
#'
#' @export
#'
calculateAD_ns <- nimbleFunction(
run = function(
dist1_3d = double(3), dist2_3d = double(3), dist12_3d = double(3),
Sigma11 = double(1), Sigma22 = double(1), Sigma12 = double(1),
log_sigma_vec = double(1), log_tau_vec = double(1), nID = double(2), N = double(), k = double(), nu = double(), d = double() ) {
AD <- array(0, c(N,k+1))
AD[1,k+1] <- exp(log_sigma_vec[1])^2 + exp(log_tau_vec[1])^2
for(i in 2:N) {
if(i<=k) nNei <- i-1 else nNei <- k
ind <- c( nID[i,1:nNei], i )
## these arrays must be extracted, before pssing to nsCorr() function:
d1 <- dist1_3d[i,1:(nNei+1),1:(nNei+1)]
d2 <- dist2_3d[i,1:(nNei+1),1:(nNei+1)]
d12 <- dist12_3d[i,1:(nNei+1),1:(nNei+1)]
S1 <- Sigma11[ind]; S2 <- Sigma22[ind]; S12 <- Sigma12[ind]
Cor <- nsCorr(d1, d2, d12, S1, S2, S12, nu, d)
sigmaMat <- diag(exp(log_sigma_vec[ind]))
Cov <- sigmaMat %*% Cor %*% sigmaMat
C <- Cov + diag(exp(log_tau_vec[ind])^2)
AD[i,1:nNei] <- solve( C[1:nNei,1:nNei], C[nNei+1,1:nNei] )
AD[i,k+1] <- C[nNei+1,nNei+1] - inprod( C[nNei+1,1:nNei], AD[i,1:nNei] )
}
returnType(double(2))
return(AD)
}, check = FALSE
)
#==============================================================================
# Density function for the NNGP approximation
#==============================================================================
# ROxygen comments ----
#' Function for the evaluating the NNGP approximate density.
#'
#' \code{dmnorm_nngp} (and \code{rmnorm_nngp}) calculate the approximate NNGP
#' likelihood for a fixed set of parameters (i.e., A and D matrices). Finally,
#' the distributions must be registered within \code{nimble}.
#'
#' @param x N-vector of data.
#' @param mean N-vector with current values of the mean
#' @param AD N x (k+1) matrix; the first k columns are the 'A' matrix, and the
#' last column is the 'D' vector.
#' @param nID N x k matrix of neighbor indices.
#' @param N Scalar; number of data measurements.
#' @param k Scalar; number of nearest neighbors.
#' @param log Scalar; should the density be on the log scale (1) or not (0).
#'
#' @return The NNGP approximate density.
#'
#' @export
#'
dmnorm_nngp <- nimbleFunction(
run = function(x = double(1), mean = double(1), AD = double(2), nID = double(2), N = double(), k = double(), log = double()) {
xCentered <- x - mean
qf <- calcQF(xCentered, xCentered, AD, nID)
lp <- -0.5 * (1.83787706649*N + sum(log(AD[1:N,k+1])) + qf) # log(2pi) = 1.8378770664
returnType(double())
return(lp)
}, check = FALSE
)
# ROxygen comments ----
#' Function for the evaluating the NNGP approximate density.
#'
#' \code{dmnorm_nngp} (and \code{rmnorm_nngp}) calculate the approximate NNGP
#' likelihood for a fixed set of parameters (i.e., A and D matrices). Finally,
#' the distributions must be registered within \code{nimble}.
#'
#' @param n N-vector of data.
#' @param mean N-vector with current values of the mean
#' @param AD N x (k+1) matrix; the first k columns are the 'A' matrix, and the
#' last column is the 'D' vector.
#' @param nID N x k matrix of neighbor indices.
#' @param N Scalar; number of data measurements.
#' @param k Scalar; number of nearest neighbors.
#'
#' @return The NNGP approximate density.
#'
#' @export
#'
rmnorm_nngp <- nimbleFunction(
run = function(n = integer(), mean = double(1), AD = double(2), nID = double(2), N = double(), k = double()) {
returnType(double(1))
return(numeric(N))
}
)
registerDistributions(list(
dmnorm_nngp = list(
BUGSdist = 'dmnorm_nngp(mean, AD, nID, N, k)',
types = c('value = double(1)', 'mean = double(1)', 'AD = double(2)', 'nID = double(2)', 'N = double()', 'k = double()'),
mixedSizes = TRUE)
), verbose = FALSE)
|
/scratch/gouwar.j/cran-all/cranData/BayesNSGP/R/NNGP.R
|
#================================================
# Bayesian nonstationary Gaussian process
# modeling in NIMBLE
# Mark Risser and Daniel Turek
# Lawrence Berkeley National Laboratory
# January, 2019
#================================================
#================================================
# Functions for the SGV approximation
#================================================
## Script #4: nsgpSGV.R (functions for the SGV approximation)
##
## - conditionLatentObs
## - sgpSetup (formerly sgv_setup)
## - calculateU_ns
## - dmnorm_sgv (formerly sgv_loglikelihood)
## - rmnorm_sgv
#==============================================================================
# Assign conditioning sets for the SGV approximation
#==============================================================================
# ROxygen comments ----
#' Assign conditioning sets for the SGV approximation
#'
#' \code{conditionLatentObs} assigns q_y(i) vs q_z(i) following Section 5.1
#' in Katzfuss and Guinness (2018). This function only needs to be run once
#' per SGV analysis.
#'
#' @param nID N x k matrix of neighbor indices.
#' @param coords_ord N x 2 matrix of locations.
#' @param N Scalar; number of locations (observed only!).
#'
#' @return A matrix indicating whether the conditioning set for each location
#' is on the latent process (y, \code{1}) or the observed values (z, \code{0}).
#'
#' @export
#'
conditionLatentObs <- function( nID, coords_ord, N ){
Nall <- nrow(nID)
k <- ncol(nID)
d <- ncol(coords_ord)
cond_on_y <- matrix(0, Nall, k)
cond_on_y[nID == -1] <- -1 ## populate unused values with -1, to prevent a warning from NIMBLE
cond_on_y[2,1] <- 1
for(i in 3:N){ # i = 1 has no conditioning set; i = 2 automatically conditions on y_1
q_i <- (nID[i,])[nID[i,] != -1]
size_intrsct_qyj_qi <- rep(NA, length(q_i))
for(j in 1:length(q_i)){
q_y_j <- which(cond_on_y[q_i[j],] == 1)
size_intrsct_qyj_qi[j] <- sum(q_y_j %in% q_i)
}
ind_h_i <- which(size_intrsct_qyj_qi == max(size_intrsct_qyj_qi))
h_i <- q_i[ind_h_i]
ind_k_i <- which.min( as.numeric(mahalanobis.dist(data.x = matrix(coords_ord[i,], ncol = d, byrow = TRUE),
data.y = matrix(coords_ord[h_i,], ncol = d, byrow = TRUE),
vc = diag(d))) )
k_i <- h_i[ind_k_i]
q_y_k_i <- nID[k_i,which(cond_on_y[k_i,] == 1)]
cond_on_y[i, which(c(q_y_k_i[which( q_y_k_i %in% q_i )], k_i) %in% (nID[i,])[nID[i,] != -1])] <- 1
}
if(Nall > N) cond_on_y[N:Nall,] <- 1 # This case involves prediction locations
return(cond_on_y)
}
#==============================================================================
# One-time setup wrapper function for the SGV approximation
#==============================================================================
# ROxygen comments ----
#' One-time setup wrapper function for the SGV approximation
#'
#' \code{sgvSetup} is a wrapper function that sets up the SGV approximation.
#' Three objects are required: (1) ordering the locations, (2) identify nearest
#' neighbors, and (3) determine the conditioning set. This function only needs
#' to be run once per SGV analysis.
#'
#' @param coords Matrix of observed locations.
#' @param coords_pred Optional matrix of prediction locations.
#' @param k Number of neighbors.
#' @param seed Setting the seed for reproducibility of the observed location
#' ordering
#' @param pred.seed Setting the seed for reproducibility of the prediction
#' ordering.
#' @param order_coords Logical; should the coordinates be ordered.
#'
#' @return A list with the following components:
#' \item{ord}{A vector of ordering position for the observed locations.}
#' \item{ord_pred}{A vector of ordering position for the prediction
#' locations (if \code{coords_pred} is provided).}
#' \item{ord_all}{A concatenated vector of \code{ord} and \code{ord_pred}.}
#' \item{coords_ord}{A matrix of ordered locations (observed and prediction),
#' included for convenience.}
#' \item{nID_ord}{A matrix of (ordered) neighbor indices.}
#' \item{condition_on_y_ord}{A matrix indicating whether the conditioning
#' set for each (ordered) location is on the latent process (y, \code{1}) or
#' the observed values (z, \code{0}).}
#'
#' @export
#'
sgvSetup <- function( coords, coords_pred = NULL, k = 15, seed = NULL, pred.seed = NULL, order_coords = TRUE ){
if(is.null(seed)) seed <- sample(1e5, 1) # Set seed for reproducibility (randomness in orderCoordinatesMMD function)
if(is.null(pred.seed)) pred.seed <- sample(1e5, 1) # Set seed for reproducibility (randomness in orderCoordinatesMMD function)
d <- ncol(coords) # Spatial dimension
n <- nrow(coords) # Number of (observed) locations
num_NZ <- 3*n + k*n - (k*(k+1)/2)
#--------------------------------------------------------
# Task 1: Order the locations
#--------------------------------------------------------
if(order_coords){
set.seed(seed)
coords_mmd <- orderCoordinatesMMD(coords )
ord <- coords_mmd$orderedIndicesNoNA
} else{
coords_mmd <- coords
ord <- 1:n
}
if(is.null(coords_pred)){ # If no prediction
coords_pred_mmd <- NULL
ord_pred <- NULL
ord_all <- ord
coords_ord <- coords[ord_all,]
} else{
n_pred <- nrow(coords_pred) # Number of prediction locations
set.seed(pred.seed)
coords_pred_mmd <- orderCoordinatesMMD(coords_pred)
ord_pred <- coords_pred_mmd$orderedIndicesNoNA
ord_all <- c(ord, n+ord_pred)
coords_ord <- rbind(coords, coords_pred)[ord_all,]
}
#--------------------------------------------------------
# Task 2: Get nearest neighbors
#--------------------------------------------------------
nID_ord <- determineNeighbors(coords_ord, k)
#--------------------------------------------------------
# Task 3: Conditioning on y or z?
#--------------------------------------------------------
condition_on_y_ord <- conditionLatentObs( nID_ord, coords_ord, n )
return(list( seed = seed, num_NZ = num_NZ,
ord = ord, ord_pred = ord_pred, ord_all = ord_all,
coords_ord = coords_ord, nID_ord = nID_ord,
condition_on_y_ord = condition_on_y_ord ))
}
#==============================================================================
# Calculate the (sparse) matrix U
#==============================================================================
# ROxygen comments ----
#' Calculate the (sparse) matrix U
#'
#' \code{calculateU_ns} calculates the (sparse) matrix U (i.e., the Cholesky
#' of the inverse covariance matrix) using a nonstationary covariance function.
#' The output only contains non-zero values and is stored as three vectors:
#' (1) the row indices, (2) the column indices, and (3) the non-zero values.
#' NOTE: this code assumes the all inputs correspond to the ORDERED locations.
#'
#' @param dist1_3d N x (k+1) x (k+1) array of distances in the x-coordinate
#' direction.
#' @param dist2_3d N x (k+1) x (k+1) array of distances in the y-coordinate
#' direction.
#' @param dist12_3d N x (k+1) x (k+1) array of cross-distances.
#' @param Sigma11 N-vector; 1-1 element of the Sigma() process.
#' @param Sigma12 N-vector; 1-2 element of the Sigma() process.
#' @param Sigma22 N-vector; 2-2 element of the Sigma() process.
#' @param log_sigma_vec N-vector; process standard deviation values.
#' @param log_tau_vec N-vector; nugget standard deviation values.
#' @param nu Scalar; Matern smoothness parameter.
#' @param nID N x k matrix of (ordered) neighbor indices.
#' @param cond_on_y A matrix indicating whether the conditioning set for each
#' (ordered) location is on the latent process (y, \code{1}) or the observed
#' values (z, \code{0}). Calculated in \code{sgvSetup}.
#' @param N Scalar; number of data measurements.
#' @param k Scalar; number of nearest neighbors.
#' @param d Scalar; dimension of the spatial domain.
#' @param M Scalar; number of prediction sites.
#'
#' @return Returns a sparse matrix representation of the Cholesky of the
#' precision matrix for a fixed set of covariance parameters.
#'
#' @export
#'
calculateU_ns <- nimbleFunction( # Create the sparse U matrix for specific theta
run = function(
dist1_3d = double(3), dist2_3d = double(3), dist12_3d = double(3),
Sigma11 = double(1), Sigma22 = double(1), Sigma12 = double(1),
log_sigma_vec = double(1), log_tau_vec = double(1), nu = double(),
nID = double(2), cond_on_y = double(2), N = double(), k = double(), d = double(0),
M = double(0, default = 0) ) {
# Setup
NN <- 2*N + M
num_NZ <- 3*N + k*N - (k*(k+1)/2) + (k+1)*M # Number of non-zero entries in U
num_neigbs <- c(0, seq(from = 1, to = k-1, by = 1), array(k, M+N-k))
Uvals <- array(0, num_NZ)
rowInd <- array(0, num_NZ)
colInd <- array(0, num_NZ)
# Calculate the position of the diagonal elements
dgIdx_vec <- array(-1, N+M)
for(l in 1:k){
dgIdx_vec[l] <- 1 + sum(num_neigbs[1:l])
}
dgIdx_vec[(k+1):(N+M)] <- seq(from=(k*(k+1)/2)+1, to=num_NZ - 2*N, by = k+1)
# First: the y_j
Uvals[1] <- exp(log_sigma_vec[1])^2
rowInd[1] <- 1
colInd[1] <- 1
for(i in 2:(N+M)){ # y_j
if(i<=k) nNei <- i-1 else nNei <- k
ind <- nID[i,1:nNei]
## these arrays must be extracted, before pssing to nsCorr() and nsCrosscorr() function:
Xd1 <- array(dist1_3d[i, 1:nNei, (nNei + 1)], c(nNei, 1)) # Distances between location i and its neighbors
Xd2 <- array(dist2_3d[i, 1:nNei, (nNei + 1)], c(nNei, 1))
Xd12 <- array(dist12_3d[i, 1:nNei, (nNei + 1)], c(nNei, 1))
S1 <- nimNumeric( value = Sigma11[i], length = 1) # Anisotropy parameters for location i
S2 <- nimNumeric( value = Sigma22[i], length = 1)
S12 <- nimNumeric( value = Sigma12[i], length = 1)
d1 <- array(dist1_3d[i, 1:nNei, 1:nNei], c(nNei, nNei)) # Distances between the neighbors of location i
d2 <- array(dist2_3d[i, 1:nNei, 1:nNei], c(nNei, nNei))
d12 <- array(dist12_3d[i, 1:nNei, 1:nNei], c(nNei, nNei))
xS1 <- Sigma11[ind] # Anisotropy parameters for the neighbors of location i
xS2 <- Sigma22[ind]
xS12 <- Sigma12[ind]
# Cross-covariance between location and the conditioning set
Crosscor <- nsCrosscorr(Xd1, Xd2, Xd12, S1, S2, S12, xS1, xS2, xS12, nu, d)
if(length(ind) == 1) {
sigmaMat_cond <- array(exp(log_sigma_vec[ind]), c(1,1))
} else {
sigmaMat_cond <- diag(exp(log_sigma_vec[ind]))
}
Crosscov <- array(exp(log_sigma_vec[i]), c(1,1)) %*% array(Crosscor, c(1,nNei)) %*% sigmaMat_cond # Formerly pt1
# Covariance of conditioning set
Cor_cond <- nsCorr(d1, d2, d12, xS1, xS2, xS12, nu, d)
Cov_cond <- sigmaMat_cond %*% Cor_cond %*% sigmaMat_cond # Formerly pt2
# Covariance of the process at the location
Cov_loc <- exp(log_sigma_vec[i])^2 # Formerly pt3
####################################
# b_i <- nimNumeric( value = (Crosscov %*% inverse(Cov_cond))[1,1:nNei], length = nNei)
b_i <- nimNumeric( value = solve(Cov_cond, t(Crosscov))[1:nNei,1], length = nNei)
r_i <- Cov_loc - inprod( b_i, nimNumeric(value = (Crosscov)[1,1:nNei], length = nNei) )
####################################
# Store
dgIdx <- dgIdx_vec[i]
Uvals[dgIdx] <- 1/sqrt(r_i)
if(i > N){
rowInd[dgIdx] <- N + i
colInd[dgIdx] <- N + i
} else{
rowInd[dgIdx] <- 2*(i-1)+1
colInd[dgIdx] <- 2*(i-1)+1
}
for(j in 1:nNei){
if(cond_on_y[i,j] == 1){ # condition on y_i
Uvals[dgIdx + j] <- -b_i[j]/sqrt(r_i)
if(i > N){ # Pred locations
if(ind[j] > N){
rowInd[dgIdx + j] <- N + ind[j]
} else{
rowInd[dgIdx + j] <- 2*(ind[j]-1)+1
}
colInd[dgIdx + j] <- N + i
} else{ # Obs locations
rowInd[dgIdx + j] <- 2*(ind[j]-1)+1
colInd[dgIdx + j] <- 2*(i-1)+1
}
} else{ # condition on z_i
Uvals[dgIdx + j] <- -b_i[j]/sqrt(r_i)
if(i > N){ # Pred locations
stop("Error.")
colInd[dgIdx + j] <- N + i
} else{ # Obs locations
rowInd[dgIdx + j] <- 2*ind[j]
colInd[dgIdx + j] <- 2*(i-1)+1
}
}
}
}
# Next: the z_i
Uvals[(num_NZ - (2*N) + 1):(num_NZ - N)] <- 1/exp(log_tau_vec[1:N])
rowInd[(num_NZ - (2*N) + 1):(num_NZ - N)] <- seq(from = 2, to = (2*N), by = 2)
colInd[(num_NZ - (2*N) + 1):(num_NZ - N)] <- seq(from = 2, to = (2*N), by = 2)
Uvals[(num_NZ - N + 1):num_NZ] <- -1/exp(log_tau_vec[1:N])
rowInd[(num_NZ - N + 1):num_NZ] <- seq(from = 1, to = (2*N), by = 2)
colInd[(num_NZ - N + 1):num_NZ] <- seq(from = 2, to = (2*N), by = 2)
# Combine
U_ijx <- array(0, c(num_NZ, 3))
U_ijx[,1] <- rowInd
U_ijx[,2] <- colInd
U_ijx[,3] <- Uvals
returnType(double(2))
return(U_ijx)
}, check = FALSE
)
# ROxygen comments ----
#' nimble_sparse_tcrossprod
#' @param i Vector of row indices.
#' @param j Vector of column indices.
#' @param x Vector of values in the matrix.
#' @param subset Optional vector of rows to include in the calculation.
#' @export
R_sparse_tcrossprod <- function(i, j, x, subset = -1) {
Asparse <- sparseMatrix(i = i, j = j, x = x)
if(subset[1] < 0){ # No subset
ans.dsCMatrix <- tcrossprod(Asparse)
} else{
ans.dsCMatrix <- tcrossprod(Asparse[subset,])
}
ans.dgTMatrix <- as(ans.dsCMatrix, 'dgTMatrix')
i <- ans.dgTMatrix@i + 1
j <- ans.dgTMatrix@j + 1
x <- ans.dgTMatrix@x
ijx <- cbind(i, j, x)
return(ijx)
}
# ROxygen comments ----
#' nimble_sparse_tcrossprod
#' @param i Vector of row indices.
#' @param j Vector of column indices.
#' @param x Vector of values in the matrix.
#' @param subset Optional vector of rows to include in the calculation.
#' @export
nimble_sparse_tcrossprod <- nimbleRcall(
prototype = function(i = double(1), j = double(1), x = double(1), subset = double(1)) {},
returnType = double(2),
Rfun = 'R_sparse_tcrossprod'
)
# ROxygen comments ----
#' nimble_sparse_crossprod
#' @param i Vector of row indices.
#' @param j Vector of column indices.
#' @param x Vector of values in the matrix.
#' @param z Vector to calculate the cross-product with.
#' @param n Length of the vector
#' @param subset Optional vector of rows to include in the calculation.
#' @param transp Optional indicator of using the transpose
#' @export
R_sparse_crossprod <- function(i, j, x, z, n, subset = -1, transp = 1) {
zSparse <- array(1:9, c(3,3))
if(transp == 1){ # use crossprod
Asparse <- sparseMatrix(i = i, j = j, x = x)
if(subset[1] < 0){ # No subset
ans.dsCMatrix <- crossprod(Asparse, zSparse)
} else{
ans.dsCMatrix <- crossprod(Asparse[subset,], as.numeric(z))
}
} else{ # Use %*%
Asparse <- sparseMatrix(i = j, j = i, x = x)
if(subset[1] < 0){ # No subset
ans.dsCMatrix <- crossprod(Asparse, zSparse)
} else{
ans.dsCMatrix <- crossprod(Asparse[,subset], as.numeric(z))
}
}
return(ans.dsCMatrix@x)
}
# ROxygen comments ----
#' nimble_sparse_crossprod
#' @param i Vector of row indices.
#' @param j Vector of column indices.
#' @param x Vector of values in the matrix.
#' @param z Vector to calculate the cross-product with.
#' @param n Length of the vector
#' @param subset Optional vector of rows to include in the calculation.
#' @param transp Optional indicator of using the transpose
#' @export
nimble_sparse_crossprod <- nimbleRcall(
prototype = function(i = double(1), j = double(1), x = double(1), z = double(1), n = double(), subset = double(1), transp = double()) {},
returnType = double(1),
Rfun = 'R_sparse_crossprod'
)
# ROxygen comments ----
#' R_sparse_chol
#' @param i Vector of row indices.
#' @param j Vector of column indices.
#' @param x Vector of values in the matrix.
#' @param n Length of the vector
#' @export
R_sparse_chol <- function(i, j, x, n) {
Asparse <- sparseMatrix(i = i, j = j, x = x)
ans.dsCMatrix <- t(chol(Asparse[n:1,n:1]))
ans.dgTMatrix <- as(ans.dsCMatrix, 'dgTMatrix')
i <- ans.dgTMatrix@i + 1
j <- ans.dgTMatrix@j + 1
x <- ans.dgTMatrix@x
ijx <- cbind(i, j, x)
return(ijx)
}
# ROxygen comments ----
#' nimble_sparse_chol
#' @param i Vector of row indices.
#' @param j Vector of column indices.
#' @param x Vector of values in the matrix.
#' @param n Length of the vector
#' @export
nimble_sparse_chol <- nimbleRcall(
prototype = function(i = double(1), j = double(1), x = double(1), n = double()) {},
returnType = double(2),
Rfun = 'R_sparse_chol'
)
# ROxygen comments ----
#' nimble_sparse_solve
#' @param i Vector of row indices.
#' @param j Vector of column indices.
#' @param x Vector of values in the matrix.
#' @param z Vector to calculate the cross-product with.
#' @export
R_sparse_solve <- function(i, j, x, z) {
# z3 <- solve(V_ord, rev(z2), system = "L")
Asparse <- sparseMatrix(i = i, j = j, x = x)
z_rev <- rev(z)
ans.dsCMatrix <- solve(Asparse, z_rev, system = "L")
return(ans.dsCMatrix@x)
}
# ROxygen comments ----
#' nimble_sparse_solve
#' @param i Vector of row indices.
#' @param j Vector of column indices.
#' @param x Vector of values in the matrix.
#' @param z Vector to calculate the cross-product with.
#' @export
nimble_sparse_solve <- nimbleRcall(
prototype = function(i = double(1), j = double(1), x = double(1), z = double(1)) {},
returnType = double(1),
Rfun = 'R_sparse_solve'
)
#==============================================================================
# Density function for the SGV approximation
#==============================================================================
# ROxygen comments ----
#' Function for the evaluating the SGV approximate density.
#'
#' \code{dmnorm_sgv} (and \code{rmnorm_sgv}) calculate the approximate SGV
#' likelihood for a fixed set of parameters (i.e., the U matrix). Finally,
#' the distributions must be registered within \code{nimble}.
#'
#' @param x Vector of measurements
#' @param mean Vector of mean valiues
#' @param U Matrix of size N x 3; representation of a sparse N x N Cholesky
#' of the precision matrix. The first two columns contain row and column
#' indices, respectively, and the last column is the nonzero elements of the
#' matrix.
#' @param N Number of measurements in x
#' @param k Number of neighbors for the SGV approximation.
#' @param log Logical; should the density be evaluated on the log scale.
#'
#' @return Returns the SGV approximation to the Gaussian likelihood.
#'
#' @export
#'
dmnorm_sgv <- nimbleFunction(
run = function(x = double(1), mean = double(1), U = double(2),
N = double(), k = double(), log = double(0, default = 1)) {
# Components
zo_ord <- x
z1 <- nimble_sparse_crossprod(
i = U[,1], j = U[,2], x = U[,3], z = zo_ord - mean, n = N,
subset = seq(from = 2, to = 2*N, by = 2), transp = 1)
logdet_U <- -sum(log(U[U[,1] == U[,2],3]))
z2 <- nimble_sparse_crossprod(
i = U[,1], j = U[,2], x = U[,3], z = z1, n = N,
subset = seq(from = 1, to = 2*N, by = 2), transp = 0)
Amat <- nimble_sparse_tcrossprod(
i = U[,1], j = U[,2], x = U[,3],
subset = seq(from = 1, to = 2*N, by = 2))
Vmat_ord <- nimble_sparse_chol(i = Amat[,1], j = Amat[,2], x = Amat[,3], n = N)
logdet_V <- sum(log(Vmat_ord[Vmat_ord[,1] == Vmat_ord[,2],3]))
z3 <- nimble_sparse_solve(i = Vmat_ord[,1], j = Vmat_ord[,2], x = Vmat_ord[,3], z = z2)
lp <- -(logdet_U + logdet_V + 0.5*sum(z1^2) - 0.5*sum(z3^2)) - 0.5*1.83787706649*N
returnType(double())
return(lp)
}, check = FALSE
)
# ROxygen comments ----
#' Function for the evaluating the SGV approximate density.
#'
#' \code{dmnorm_sgv} (and \code{rmnorm_sgv}) calculate the approximate SGV
#' likelihood for a fixed set of parameters (i.e., the U matrix). Finally,
#' the distributions must be registered within \code{nimble}.
#'
#' @param n Vector of measurements
#' @param mean Vector of mean valiues
#' @param U Matrix of size N x 3; representation of a sparse N x N Cholesky
#' of the precision matrix. The first two columns contain row and column
#' indices, respectively, and the last column is the nonzero elements of the
#' matrix.
#' @param N Number of measurements in x
#' @param k Number of neighbors for the SGV approximation.
#'
#' @return Not applicable.
#'
#' @export
rmnorm_sgv <- nimbleFunction(
run = function(n = integer(), mean = double(1), U = double(2), N = double(), k = double()) {
returnType(double(1))
return(numeric(N))
}
)
registerDistributions(list(
dmnorm_sgv = list(
BUGSdist = 'dmnorm_sgv(mean, U, N, k)',
types = c('value = double(1)', 'mean = double(1)', 'U = double(2)', 'N = double()', 'k = double()'),
mixedSizes = TRUE)
), verbose = FALSE)
|
/scratch/gouwar.j/cran-all/cranData/BayesNSGP/R/SGV.R
|
#================================================
# Bayesian nonstationary Gaussian process
# modeling in NIMBLE
# Mark Risser and Daniel Turek
# Lawrence Berkeley National Laboratory
# January, 2019
#================================================
#================================================
# Functions for ordering coordinates and finding
# nearest neighbors
#================================================
## Script #2: nsgpOrderingNN.R (functions for ordering and finding nearest neighbors)
##
## - orderCoordinatesMMD: order coordinates by maxmin distance
## - determineNeighbors: identify k nearest neighbors
#==============================================================================
# Maximum-minimum distance (MMD) coordinate ordering
#==============================================================================
# ROxygen comments ----
#' Order coordinates according to a maximum-minimum distance criterion.
#'
#' \code{orderCoordinatesMMD} orders an array of (x,y) spatial coordinates
#' according to the "maximum minimum distance" (MMD), as described in Guinness,
#' 2018. (Points are selected to maximize their minimum distance to already-
#' selected points).
#'
#' @param coords N x 2 array of N 2-dimensional (x,y) spatial coordinates.
#' @param exact Logical; \code{FALSE} uses a fast approximation to MMD ordering
#' (and is almost always recommended), while \code{TRUE} uses exact MMD
#' ordering but is infeasible for large number of locations.
#'
#' @return A list of distances matrices, with the following components:
#' \item{orderedCoords}{N x 2 matrix; contains the ordered spatial coordinates
#' as \code{coords}.}
#' \item{orderedIndicesNoNA}{N-vector; contains the ordered indices with any
#' NA values removed.}
#'
#' @examples
#' coords <- cbind(runif(100), runif(100))
#' orderCoordinatesMMD(coords)
#'
#' @export
#'
orderCoordinatesMMD <- function(coords, exact = FALSE) {
## input coords: an Nx2 array of spatial coordinates
N <- dim(coords)[1]
if(N < 3) return(coords)
if(!exact) { ## approximate MMD ordering
initialOrdering <- sample(1:N)
orderedIndices <- c(initialOrdering, rep(NA, 3*N))
indexLookupVector <- order(initialOrdering)
maxNeighbors <- floor(sqrt(N))
NN <- FNN::get.knn(coords, k = maxNeighbors)$nn.index
nextSpot <- N+1
cycleCheckIndex <- -1
for(i in 2:(3*N)) {
(targetIndex <- orderedIndices[i])
if(cycleCheckIndex == targetIndex) break
if(cycleCheckIndex == -1) cycleCheckIndex <- targetIndex
targetNeighbors <- NN[targetIndex, 1:min(maxNeighbors, round(N/(i+N-nextSpot)))]
targetNeighborLocs <- indexLookupVector[targetNeighbors]
if(min(targetNeighborLocs) < i) { ## relocate this index to the back
orderedIndices[nextSpot] <- targetIndex
orderedIndices[i] <- NA
indexLookupVector[targetIndex] <- nextSpot
nextSpot <- nextSpot + 1
} else cycleCheckIndex <- -1
}
orderedIndicesNoNA <- orderedIndices[!is.na(orderedIndices)]
orderedCoords <- coords[orderedIndicesNoNA,]
} else { ## exact MMD ordering
availableIndices <- 1:N
orderedCoords <- array(NA, c(N,2))
sbar <- apply(coords, 2, mean) ## group centroid
iNext <- which.min(sapply(1:N, function(i) sum((coords[i,] - sbar)^2)))
orderedCoords[1,] <- coords[iNext,]
availableIndices <- setdiff(availableIndices, iNext)
for(i in 2:N) {
aIndNext <- which.max( ## this indexes the availableIndices vector
sapply(1:(N-i+1), function(j) {
min(sapply(1:(i-1), function(k) sum((coords[availableIndices[j],] - orderedCoords[k,])^2)))
}))
iNext <- availableIndices[aIndNext] ## this indexes rows of the original s[] array
orderedCoords[i,] <- coords[iNext,]
availableIndices <- setdiff(availableIndices, iNext)
}
orderedIndicesNoNA <- NULL
}
return(list(orderedCoords = orderedCoords, orderedIndicesNoNA = orderedIndicesNoNA))
}
#==============================================================================
# Determine the k-nearest neighbors
#==============================================================================
# ROxygen comments ----
#' Determine the k-nearest neighbors for each spatial coordinate.
#'
#' \code{determineNeighbors} returns an N x k matrix of the nearest neighbors
#' for spatial locations coords, with the ith row giving indices of the k nearest
#' neighbors to the ith location, which are selected from among the 1,...(i-1)
#' other spatial locations. The first row is -1's, since the first location has
#' no neighbors. The i=2 through i=(k+1) rows each necessarily contain 1:i.
#'
#' @param coords N x 2 array of N 2-dimensional (x,y) spatial coordinates.
#' @param k Scalar; number of neighbors
#'
#' @return An N x k matrix of nearest neighbor indices
#'
#' @examples
#' coords <- cbind(runif(100), runif(100))
#' determineNeighbors(coords, 20)
#'
#' @export
#'
determineNeighbors <- function(coords, k) {
N <- dim(coords)[1]
d <- dim(coords)[2]
if(k+2 > N) stop()
nID <- array(-1, c(N,k)) ## populate unused values with -1, to prevent a warning from NIMBLE
for(i in 2:(k+1)) nID[i, 1:(i-1)] <- as.numeric(1:(i-1))
if(d == 2){
for(i in (k+2):N) nID[i, 1:k] <- as.numeric(order((coords[1:(i-1),1] - coords[i,1])^2 + (coords[1:(i-1),2] - coords[i,2])^2)[1:k])
} else{
for(i in (k+2):N){
disti <- 0
for(j in 1:d){
disti <- disti + (coords[1:(i-1),j] - coords[i,j])^2
}
nID[i, 1:k] <- as.numeric(order(disti)[1:k])
}
}
return(nID)
}
#================================================
# Core package functionality
#================================================
## Script #1: nsgpCore.R (core bayesNSGP functionality)
##
## - inverseEigen: calculate covariance elements based on eigendecomposition components
## - nsCorr: calculate a nonstationary Matern correlation matrix
## - nsCrosscorr: calculate a nonstationary Matern cross-correlation matrix
## - nsDist: calculate coordinate-specific distance matrices
## - nsCrossdist: calculate coordinate-specific cross-distance matrices
## - nsDist3d (formerly ns_dist_3d)
## - nsCrossdist3d
## - nsgpModel: NIMBLE code for a generic nonstationary GP model
## - nsgpPredict: posterior prediction for the NSGP
#==============================================================================
# Inverse eigendecomposition
#==============================================================================
# ROxygen comments ----
#' Calculate covariance elements based on eigendecomposition components
#'
#' \code{inverseEigen} calculates the inverse eigendecomposition -- in other
#' words, the covariance elements based on the eigenvalues and vectors. For a
#' 2x2 anisotropy (covariance) matrix, we parameterize the three unique values
#' in terms of the two log eigenvalues and a rotation parameter on the
#' rescaled logit. The function is coded as a \code{nimbleFunction} (see the
#' \code{nimble} package) but can also be used as a regular R function.
#'
#' @param eigen_comp1 N-vector; contains values of the log of the first
#' anisotropy eigenvalue for a set of locations.
#' @param eigen_comp2 N-vector; contains values of the log of the second
#' anisotropy eigenvalue for a set of locations.
#' @param eigen_comp3 N-vector; contains values of the rescaled logit of
#' the anisotropy rotation for a set of locations.
#' @param which_Sigma Scalar; one of \code{(1,2,3)}, corresponding to which
#' covariance component should be calculated (Sigma11, Sigma22, or Sigma12,
#' respectively).
#'
#' @return A vector of anisotropy values (Sigma11, Sigma22, or Sigma12; depends
#' on \code{which_Sigma}) for the corresponding set of locations.
#'
#' @examples
#' # Generate some eigendecomposition elements (all three are real-valued)
#' eigen_comp1 <- rnorm(10)
#' eigen_comp2 <- rnorm(10)
#' eigen_comp3 <- rnorm(10)
#' inverseEigen( eigen_comp1, eigen_comp2, eigen_comp3, 2) # Return the Sigma22 values
#'
#' @export
#'
inverseEigen <- nimble::nimbleFunction(
run = function( eigen_comp1 = double(1), eigen_comp2 = double(1),
eigen_comp3 = double(1), which_Sigma = double(0) ) {
returnType(double(1))
rotAngle <- (3.141592653/2)*exp(eigen_comp3)/(1 + exp(eigen_comp3)) # pi = 3.141592653
Gam11 <- cos(rotAngle)
Gam22 <- cos(rotAngle)
Gam12 <- -sin(rotAngle)
Gam21 <- sin(rotAngle)
Lam1 <- exp(eigen_comp1)
Lam2 <- exp(eigen_comp2)
if( which_Sigma == 1 ){ # Return Sigma11
return( Gam11^2*Lam1 + Gam12^2*Lam2 )
}
if( which_Sigma == 2 ){ # Return Sigma22
return( Gam21^2*Lam1 + Gam22^2*Lam2 )
}
if( which_Sigma == 3 ){ # Return Sigma12
return( Gam11*Gam21*Lam1 + Gam12*Gam22*Lam2 )
}
stop('Error in inverseEigen function') ## prevent compiler warning
return(numeric(10)) ## prevent compiler warning
}
)
#==============================================================================
# Compiled besselK function
#==============================================================================
# ROxygen comments ----
# Compiled besselK function
#
# \code{RbesselK} and \code{CbesselK} calculates the modified Bessel function
# of the third kind.
#
# @param dst Matrix; contains distances for the besselK function
# @param nu Scalar; smoothness.
#
# @return A matrix with values of the corresponding Bessel function.
#
# RbesselK <- nimbleFunction(
# run = function(dst = double(2), nu = double(0)) {
# xVector <- besselK(dst, nu)
# xMatrix <- matrix(xVector, dim(dst)[1], dim(dst)[2])
# returnType(double(2))
# return(xMatrix)
# }
# )
#==============================================================================
# Calculate a nonstationary Matern correlation matrix
#==============================================================================
# ROxygen comments ----
#' Calculate a nonstationary Matern correlation matrix
#'
#' \code{nsCorr} calculates a nonstationary correlation matrix for a
#' fixed set of locations, based on vectors of the unique anisotropy
#' parameters for each station. Since the correlation function uses a
#' spatially-varying Mahalanobis distance, this function requires coordinate-
#' specific distance matrices (see below). The function is coded as a
#' \code{nimbleFunction} (see the \code{nimble} package) but can also be
#' used as a regular R function.
#'
#' @param dist1_sq N x N matrix; contains values of pairwise squared distances
#' in the x-coordinate.
#' @param dist2_sq N x N matrix; contains values of pairwise squared distances
#' in the y-coordinate.
#' @param dist12 N x N matrix; contains values of pairwise signed cross-
#' distances between the x- and y-coordinates. The sign of each element is
#' important; see \code{nsDist} function for the details of this calculation.
#' in the x-coordinate.
#' @param Sigma11 Vector of length N; contains the 1-1 element of the
#' anisotropy process for each station.
#' @param Sigma22 Vector of length N; contains the 2-2 element of the
#' anisotropy process for each station.
#' @param Sigma12 Vector of length N; contains the 1-2 element of the
#' anisotropy process for each station.
#' @param nu Scalar; Matern smoothness parameter. \code{nu = 0.5} corresponds
#' to the Exponential correlation; \code{nu = Inf} corresponds to the Gaussian
#' correlation function.
#' @param d Scalar; dimension of the spatial coordinates.
#'
#' @return A correlation matrix for a fixed set of stations and fixed
#' parameter values.
#'
#' @examples
#' # Generate some coordinates and parameters
#' coords <- cbind(runif(100),runif(100))
#' Sigma11 <- rep(1, 100) # Identity anisotropy process
#' Sigma22 <- rep(1, 100)
#' Sigma12 <- rep(0, 100)
#' nu <- 2
#' # Calculate distances
#' dist_list <- nsDist(coords)
#' # Calculate the correlation matrix
#' corMat <- nsCorr(dist_list$dist1_sq, dist_list$dist2_sq, dist_list$dist12,
#' Sigma11, Sigma22, Sigma12, nu, ncol(coords))
#'
#' @export
#'
nsCorr <- nimble::nimbleFunction(
run = function( dist1_sq = double(2), dist2_sq = double(2), dist12 = double(2),
Sigma11 = double(1), Sigma22 = double(1), Sigma12 = double(1),
nu = double(0), d = double(0) ) {
returnType(double(2))
N <- length(Sigma11)
if( dist2_sq[1,1] == -1 ){ # Isotropic case
# Calculate the scale matrix
if(N == 1){
det1 <- Sigma11^d
diagSqrtSqrtDet1 <- matrix(sqrt(sqrt(det1)), N, N)
} else{
det1 <- Sigma11^d
diagSqrtSqrtDet1 <- diag(sqrt(sqrt(det1)))
}
mat11_a <- matrix(Sigma11, nrow = N, ncol = N)
mat11 <- 0.5*(mat11_a + t(mat11_a))
det12 <- mat11^d
oneOverDet12 <- 1/det12
Scale.mat <- diagSqrtSqrtDet1 %*% sqrt(oneOverDet12) %*% diagSqrtSqrtDet1
# Calculate the distance matrix
Dist.mat <- sqrt( dist1_sq/mat11 )
} else{
# Calculate the scale matrix
if(N == 1){
det1 <- Sigma11*Sigma22 - Sigma12^2
diagSqrtSqrtDet1 <- matrix(sqrt(sqrt(det1)), N, N)
} else{
det1 <- Sigma11*Sigma22 - Sigma12^2
diagSqrtSqrtDet1 <- diag(sqrt(sqrt(det1)))
}
mat11_a <- matrix(Sigma11, nrow = N, ncol = N)
mat22_a <- matrix(Sigma22, nrow = N, ncol = N)
mat12_a <- matrix(Sigma12, nrow = N, ncol = N)
mat11 <- 0.5*(mat11_a + t(mat11_a))
mat22 <- 0.5*(mat22_a + t(mat22_a))
mat12 <- 0.5*(mat12_a + t(mat12_a))
det12 <- mat11*mat22 - mat12^2
oneOverDet12 <- 1/det12
Scale.mat <- diagSqrtSqrtDet1 %*% sqrt(oneOverDet12) %*% diagSqrtSqrtDet1
# Calculate the distance matrix
inv11 <- mat22 * oneOverDet12
inv22 <- mat11 * oneOverDet12
inv12 <- -mat12 * oneOverDet12
Dist.mat <- sqrt( inv11*dist1_sq + 2*inv12*dist12 + inv22*dist2_sq )
}
# Combine
if( nu == 0.5 ){ # Exponential correlation
Unscl.corr <- exp(-Dist.mat)
} else{
if( nu == Inf ){ # Gaussian (squared exponential) correlation
Unscl.corr <- exp(-(Dist.mat^2))
} else{ # Else: Matern with smoothness nu
##Unscl.corr <- (exp(lgamma(nu)) * 2^(nu - 1))^(-1) * (Dist.mat)^nu * besselK( x = Dist.mat, nu = nu )
xVector <- besselK(Dist.mat, nu)
xMatrix <- matrix(xVector, dim(Dist.mat)[1], dim(Dist.mat)[2])
Unscl.corr <- (exp(lgamma(nu)) * 2^(nu - 1))^(-1) * (Dist.mat)^nu * xMatrix
diag(Unscl.corr) <- 1
}
}
nsCorr <- Scale.mat*Unscl.corr
return(nsCorr)
}
)
#==============================================================================
# Calculate a stationary Matern correlation matrix
#==============================================================================
# ROxygen comments ----
#' Calculate a stationary Matern correlation matrix
#'
#' \code{matern_corr} calculates a stationary Matern correlation matrix for a
#' fixed set of locations, based on a range and smoothness parameter. This
#' function is primarily used for the "npGP" and "approxGP" models. The
#' function is coded as a \code{nimbleFunction} (see the \code{nimble} package)
#' but can also be used as a regular R function.
#'
#' @param dist N x N matrix; contains values of pairwise Euclidean distances in
#' the x-y plane.
#' @param rho Scalar; "range" parameter used to rescale distances
#' @param nu Scalar; Matern smoothness parameter. \code{nu = 0.5} corresponds
#' to the Exponential correlation; \code{nu = Inf} corresponds to the Gaussian
#' correlation function.
#'
#' @return A correlation matrix for a fixed set of stations and fixed
#' parameter values.
#'
#' @examples
#' # Generate some coordinates
#' coords <- cbind(runif(100),runif(100))
#' nu <- 2
#' # Calculate distances -- can use nsDist to calculate Euclidean distances
#' dist_list <- nsDist(coords, isotropic = TRUE)
#' # Calculate the correlation matrix
#' corMat <- matern_corr(sqrt(dist_list$dist1_sq), 1, nu)
#'
#' @export
#'
matern_corr <- nimble::nimbleFunction(
run = function( dist = double(2), rho = double(0), nu = double(0) ) {
returnType(double(2))
Nr <- dim(dist)[1]
Nc <- dim(dist)[2]
if( nu == 0.5 ){ # Exponential correlation
return(exp(-dist/rho))
}
if( nu == Inf ){ # Gaussian (squared exponential) correlation
return(exp(-(dist/rho)^2))
}
# Else: Matern with smoothness nu
xVector <- besselK(dist/rho, nu)
xMatrix <- matrix(xVector, dim(dist)[1], dim(dist)[2])
temp <- (exp(lgamma(nu)) * 2^(nu - 1))^(-1) * (dist/rho)^nu * xMatrix
# Check for zeros in dist
if(min(dist) == 0) {
for(i in 1:Nc){
if( min(dist[1:Nr,i]) == 0 ) {
for(j in 1:Nr) {
if(dist[j,i] == 0) {
temp[j,i] <- 1
}
}
}
}
}
return(temp)
}
)
#==============================================================================
# Calculate a nonstationary Matern cross-correlation matrix
#==============================================================================
# ROxygen comments ----
#' Calculate a nonstationary Matern cross-correlation matrix
#'
#' \code{nsCrosscorr} calculates a nonstationary cross-correlation matrix
#' between two fixed sets of locations (a prediction set with M locations, and
#' the observed set with N locations), based on vectors of the unique anisotropy
#' parameters for each station. Since the correlation function uses a
#' spatially-varying Mahalanobis distance, this function requires coordinate-
#' specific distance matrices (see below). The function is coded as a
#' \code{nimbleFunction} (see the \code{nimble} package) but can also be
#' used as a regular R function.
#'
#' @param Xdist1_sq M x N matrix; contains values of pairwise squared cross-distances
#' in the x-coordinate.
#' @param Xdist2_sq M x N matrix; contains values of pairwise squared cross-distances
#' in the y-coordinate.
#' @param Xdist12 M x N matrix; contains values of pairwise signed cross/cross-
#' distances between the x- and y-coordinates. The sign of each element is
#' important; see \code{nsDist} function for the details of this calculation.
#' in the x-coordinate.
#' @param Sigma11 Vector of length N; contains the 1-1 element of the
#' anisotropy process for each observed location.
#' @param Sigma22 Vector of length N; contains the 2-2 element of the
#' anisotropy process for each observed location.
#' @param Sigma12 Vector of length N; contains the 1-2 element of the
#' anisotropy process for each observed location.
#' @param PSigma11 Vector of length N; contains the 1-1 element of the
#' anisotropy process for each prediction location.
#' @param PSigma22 Vector of length N; contains the 2-2 element of the
#' anisotropy process for each prediction location.
#' @param PSigma12 Vector of length N; contains the 1-2 element of the
#' anisotropy process for each prediction location.
#' @param nu Scalar; Matern smoothness parameter. \code{nu = 0.5} corresponds
#' to the Exponential correlation; \code{nu = Inf} corresponds to the Gaussian
#' correlation function.
#' @param d Scalar; dimension of the spatial domain.
#'
#' @return A M x N cross-correlation matrix for two fixed sets of stations and
#' fixed parameter values.
#'
#' @examples
#' # Generate some coordinates and parameters
#' coords <- cbind(runif(100),runif(100))
#' Sigma11 <- rep(1, 100) # Identity anisotropy process
#' Sigma22 <- rep(1, 100)
#' Sigma12 <- rep(0, 100)
#' Pcoords <- cbind(runif(200),runif(200))
#' PSigma11 <- rep(1, 200) # Identity anisotropy process
#' PSigma22 <- rep(1, 200)
#' PSigma12 <- rep(0, 200)
#' nu <- 2
#' # Calculate distances
#' Xdist_list <- nsCrossdist(coords, Pcoords)
#' # Calculate the correlation matrix
#' XcorMat <- nsCrosscorr(Xdist_list$dist1_sq, Xdist_list$dist2_sq, Xdist_list$dist12,
#' Sigma11, Sigma22, Sigma12, PSigma11, PSigma22, PSigma12, nu, ncol(coords))
#'
#' @export
#'
nsCrosscorr <- nimble::nimbleFunction(
run = function( Xdist1_sq = double(2), Xdist2_sq = double(2), Xdist12 = double(2),
Sigma11 = double(1), Sigma22 = double(1), Sigma12 = double(1),
PSigma11 = double(1), PSigma22 = double(1), PSigma12 = double(1),
nu = double(0), d = double(0) ) {
returnType(double(2))
N <- length(Sigma11)
M <- length(PSigma11)
if( Xdist2_sq[1,1] == -1 ){ # Isotropic case
# Calculate the scale matrix
if(N == 1){
det1 <- Sigma11^d
diagSqrtSqrtDet1 <- matrix(sqrt(sqrt(det1)), N, N)
} else{
det1 <- Sigma11^d
diagSqrtSqrtDet1 <- diag(sqrt(sqrt(det1)))
}
if(M == 1){
Pdet1 <- PSigma11^d
diagSqrtSqrtPDet1 <- matrix(sqrt(sqrt(Pdet1)), M, M)
} else{
Pdet1 <- PSigma11^d
diagSqrtSqrtPDet1 <- diag(sqrt(sqrt(Pdet1)))
}
mat11_1 <- t(matrix(Sigma11, nrow = N, ncol = M))
mat11_2 <- matrix(PSigma11, nrow = M, ncol = N)
mat11 <- 0.5*(mat11_1 + mat11_2)
det12 <- mat11^d
oneOverDet12 <- 1/det12
Scale.mat <- diagSqrtSqrtPDet1 %*% sqrt(oneOverDet12) %*% diagSqrtSqrtDet1
# Calculate the distance matrix
Dist.mat <- sqrt( Xdist1_sq/mat11 )
} else{
# Calculate the scale matrix
if(N == 1){
det1 <- Sigma11*Sigma22 - Sigma12^2
diagSqrtSqrtDet1 <- matrix(sqrt(sqrt(det1)), N, N)
} else{
det1 <- Sigma11*Sigma22 - Sigma12^2
diagSqrtSqrtDet1 <- diag(sqrt(sqrt(det1)))
}
if(M == 1){
Pdet1 <- PSigma11*PSigma22 - PSigma12^2
diagSqrtSqrtPDet1 <- matrix(sqrt(sqrt(Pdet1)), M, M)
} else{
Pdet1 <- PSigma11*PSigma22 - PSigma12^2
diagSqrtSqrtPDet1 <- diag(sqrt(sqrt(Pdet1)))
}
mat11_1 <- t(matrix(Sigma11, nrow = N, ncol = M))
mat11_2 <- matrix(PSigma11, nrow = M, ncol = N)
mat22_1 <- t(matrix(Sigma22, nrow = N, ncol = M))
mat22_2 <- matrix(PSigma22, nrow = M, ncol = N)
mat12_1 <- t(matrix(Sigma12, nrow = N, ncol = M))
mat12_2 <- matrix(PSigma12, nrow = M, ncol = N)
mat11 <- 0.5*(mat11_1 + mat11_2)
mat22 <- 0.5*(mat22_1 + mat22_2)
mat12 <- 0.5*(mat12_1 + mat12_2)
det12 <- mat11*mat22 - mat12^2
oneOverDet12 <- 1/det12
Scale.mat <- diagSqrtSqrtPDet1 %*% sqrt(oneOverDet12) %*% diagSqrtSqrtDet1
# Calculate the distance matrix
inv11 <- mat22 * oneOverDet12
inv22 <- mat11 * oneOverDet12
inv12 <- -mat12 * oneOverDet12
Dist.mat <- sqrt( inv11*Xdist1_sq + 2*inv12*Xdist12 + inv22*Xdist2_sq )
}
# Combine
if( nu == 0.5 ){ # Exponential correlation
Unscl.corr <- exp(-Dist.mat)
} else{
if( nu == Inf ){ # Gaussian (squared exponential) correlation
Unscl.corr <- exp(-(Dist.mat^2))
} else{ # Else: Matern with smoothness nu
##Unscl.corr <- (exp(lgamma(nu)) * 2^(nu - 1))^(-1) * (Dist.mat)^nu * besselK( x = Dist.mat, nu = nu )
xVector <- besselK(Dist.mat, nu)
xMatrix <- matrix(xVector, dim(Dist.mat)[1], dim(Dist.mat)[2])
Unscl.corr <- (exp(lgamma(nu)) * 2^(nu - 1))^(-1) * (Dist.mat)^nu * xMatrix
## this line will not fly: Unscl.corr[Unscl.corr == Inf] <- 1
## inelegant, but accomplishes the same:
if(min(Dist.mat) == 0) {
for(i in 1:dim(Unscl.corr)[1]) {
for(j in 1:dim(Unscl.corr)[2]) {
if(Dist.mat[i,j] == 0) Unscl.corr[i,j] <- 1
}
}
}
# diag(Unscl.corr) <- 1
}
}
nsCrosscorr <- Scale.mat*Unscl.corr
return(nsCrosscorr)
}
)
#==============================================================================
# Calculate coordinate-specific distance matrices
#==============================================================================
# ROxygen comments ----
#' Calculate coordinate-specific distance matrices
#'
#' \code{nsDist} calculates x, y, and x-y distances for use in the
#' nonstationary correlation calculation. The sign of the cross-distance
#' is important. The function contains an optional argument for re-scaling
#' the distances such that the coordinates lie in a square.
#'
#' @param coords N x 2 matrix; contains the x-y coordinates of stations
#' @param scale_factor Scalar; optional argument for re-scaling the distances.
#' @param isotropic Logical; indicates whether distances should be calculated
#' separately for each coordinate dimension (FALSE) or simultaneously for all
#' coordinate dimensions (TRUE). \code{isotropic = TRUE} can only be used for
#' two-dimensional coordinate systems.
#'
#' @return A list of distances matrices, with the following components:
#' \item{dist1_sq}{N x N matrix; contains values of pairwise squared distances
#' in the x-coordinate.}
#' \item{dist2_sq}{N x N matrix; contains values of pairwise squared distances
#' in the y-coordinate.}
#' \item{dist12}{N x N matrix; contains values of pairwise signed cross-
#' distances between the x- and y-coordinates.}
#' \item{scale_factor}{Value of the scale factor used to rescale distances.}
#'
#' @examples
#' # Generate some coordinates
#' coords <- cbind(runif(100),runif(100))
#' # Calculate distances
#' dist_list <- nsDist(coords)
#' # Use nsDist to calculate Euclidean distances
#' dist_Euclidean <- sqrt(nsDist(coords, isotropic = TRUE)$dist1_sq)
#'
#' @export
#'
nsDist <- function( coords, scale_factor = NULL, isotropic = FALSE ){
N <- nrow(coords)
d <- ncol(coords)
if( !isotropic & d != 2 ) stop("nsDist: Anisotropy (isotropic = FALSE) only available for 2-dimensional coordinate systems.")
if(!isotropic){
# Calculate distances
dists1 <- as.matrix(dist(coords[,1], upper = T, diag = T))
dists2 <- as.matrix(dist(coords[,2], upper = T, diag = T))
temp1 <- matrix(coords[,1], nrow = N, ncol = N)
temp2 <- matrix(coords[,2], nrow = N, ncol = N)
sgn_mat1 <- ( temp1 - t(temp1) >= 0 )
sgn_mat1[sgn_mat1 == FALSE] <- -1
sgn_mat2 <- ( temp2 - t(temp2) >= 0 )
sgn_mat2[sgn_mat2 == FALSE] <- -1
dist1_sq <- dists1^2
dist2_sq <- dists2^2
dist12 <- sgn_mat1*dists1*sgn_mat2*dists2
} else{
dist1_sq <- as.matrix(dist(coords, upper = T, diag = T))^2
dist2_sq <- matrix(-1, N, N)
dist12 <- matrix(0, N, N)
}
# Rescale if needed
if( !is.null(scale_factor) ){
dist1_sq <- dist1_sq/scale_factor
dist2_sq <- dist2_sq/scale_factor
dist12 <- dist12/scale_factor
}
return(list(
dist1_sq = dist1_sq, dist2_sq = dist2_sq,
dist12 = dist12, scale_factor = scale_factor ))
}
#==============================================================================
# Coordinate-specific distance matrices, only for NN
#==============================================================================
# ROxygen comments ----
#' Calculate coordinate-specific distance matrices, only for nearest neighbors
#' and store in an array
#'
#' \code{nsDist3d} generates and returns new 3-dimensional arrays containing
#' the former dist1_sq, dist2_sq, and dist12 matrices, but
#' only as needed for the k nearest-neighbors of each location.
#' these 3D matrices (dist1_3d, dist2_3d, and dist12_3d)
#' are used in the new implementation of calculateAD_ns().
#'
#' @param coords N x 2 matrix; contains the x-y coordinates of stations.
#' @param nID N x k matrix; contains indices of nearest neighbors.
#' @param scale_factor Scalar; optional argument for re-scaling the distances.
#' @param isotropic Logical; indicates whether distances should be calculated
#' separately for each coordinate dimension (FALSE) or simultaneously for all
#' coordinate dimensions (TRUE). \code{isotropic = TRUE} can only be used for
#' two-dimensional coordinate systems.
#'
#' @return Arrays with nearest neighbor distances in each coordinate
#' direction.
#'
#' @examples
#' # Generate some coordinates and neighbors
#' coords <- cbind(runif(100),runif(100))
#' nID <- determineNeighbors(coords, 10)
#' # Calculate distances
#' nsDist3d(coords, nID)
#'
#' @export
#'
nsDist3d <- function(coords, nID, scale_factor = NULL, isotropic = FALSE) {
N <- nrow(coords)
d <- ncol(coords)
if( !isotropic & d != 2 ) stop("Anisotropy (isotropic = FALSE) only available for 2-dimensional coordinate systems.")
k <- ncol(nID)
dist1_3d <- array(0, c(N, k+1, k+1))
dist2_3d <- array(0, c(N, k+1, k+1))
dist12_3d <- array(0, c(N, k+1, k+1))
if(!isotropic){
for(i in 2:N) {
if(i<=k) nNei <- i-1 else nNei <- k
ind <- c( nID[i,1:nNei], i )
thisN <- nNei + 1
theseCoords <- coords[ind, ]
dists1 <- as.matrix(dist(theseCoords[,1]))
dists2 <- as.matrix(dist(theseCoords[,2]))
temp1 <- matrix(theseCoords[,1], nrow = thisN, ncol = thisN)
temp2 <- matrix(theseCoords[,2], nrow = thisN, ncol = thisN)
sgn_mat1 <- ( temp1 - t(temp1) >= 0 )
sgn_mat1[sgn_mat1 == FALSE] <- -1
sgn_mat2 <- ( temp2 - t(temp2) >= 0 )
sgn_mat2[sgn_mat2 == FALSE] <- -1
dist1_3d[i, 1:thisN, 1:thisN] <- dists1^2
dist2_3d[i, 1:thisN, 1:thisN] <- dists2^2
dist12_3d[i, 1:thisN, 1:thisN] <- sgn_mat1*dists1*sgn_mat2*dists2
}
} else{
dist2_3d[1,,] <- -1
for(i in 2:N) {
if(i<=k) nNei <- i-1 else nNei <- k
ind <- c( nID[i,1:nNei], i )
thisN <- nNei + 1
theseCoords <- coords[ind, ]
dists1 <- as.matrix(dist(theseCoords))
dist1_3d[i, 1:thisN, 1:thisN] <- dists1^2
dist2_3d[i,,] <- -1
}
}
if(!is.null(scale_factor)) {
dist1_3d <- dist1_3d / scale_factor
dist2_3d <- dist2_3d / scale_factor
dist12_3d <- dist12_3d / scale_factor
}
return(list(dist1_3d = dist1_3d,
dist2_3d = dist2_3d,
dist12_3d = dist12_3d,
scale_factor = scale_factor))
}
#==============================================================================
# Calculate coordinate-specific cross-distance matrices
#==============================================================================
# ROxygen comments ----
#' Calculate coordinate-specific cross-distance matrices
#'
#' \code{nsCrossdist} calculates coordinate-specific cross distances in x, y,
#' and x-y for use in the nonstationary cross-correlation calculation. This
#' function is useful for calculating posterior predictions.
#'
#' @param coords N x 2 matrix; contains x-y coordinates of station (observed)
#' locations.
#' @param Pcoords M x 2 matrix; contains x-y coordinates of prediction
#' locations.
#' @param scale_factor Scalar; optional argument for re-scaling the distances.
#' @param isotropic Logical; indicates whether distances should be calculated
#' using Euclidean distance (\code{isotropic = TRUE}) or using the anisotropic
#' formulation (\code{isotropic = FALSE}).
#'
#' @return A list of distances matrices, with the following components:
#' \item{dist1_sq}{M x N matrix; contains values of pairwise squared cross-
#' distances in the x-coordinate.}
#' \item{dist2_sq}{M x N matrix; contains values of pairwise squared cross-
#' distances in the y-coordinate.}
#' \item{dist12}{M x N matrix; contains values of pairwise signed cross-
#' distances between the x- and y-coordinates.}
#' \item{scale_factor}{Value of the scale factor used to rescale distances.}
#'
#' @examples
#' # Generate some coordinates
#' coords <- cbind(runif(100),runif(100))
#' Pcoords <- cbind(runif(200),runif(200))
#' # Calculate distances
#' Xdist_list <- nsCrossdist(coords, Pcoords)
#'
#' @export
#'
nsCrossdist <- function(coords, Pcoords, scale_factor = NULL, isotropic = FALSE ){
N <- nrow(coords)
M <- nrow(Pcoords)
d <- ncol(Pcoords)
if( !isotropic & d != 2 ) stop("nsCrossdist: Anisotropy (isotropic = FALSE) only available for 2-dimensional coordinate systems.")
if(!isotropic){
# Calculate distances
dists1 <- mahalanobis.dist(data.x = Pcoords[,1], data.y = coords[,1], vc = diag(1))
dists2 <- mahalanobis.dist(data.x = Pcoords[,2], data.y = coords[,2], vc = diag(1))
temp1a <- matrix(coords[,1], nrow = M, ncol = N, byrow = TRUE)
temp1b <- matrix(Pcoords[,1], nrow = M, ncol = N)
temp2a <- matrix(coords[,2], nrow = M, ncol = N, byrow = TRUE)
temp2b <- matrix(Pcoords[,2], nrow = M, ncol = N)
sgn_mat1 <- ( temp1a - temp1b >= 0 )
sgn_mat1[sgn_mat1 == FALSE] <- -1
sgn_mat2 <- ( temp2a - temp2b >= 0 )
sgn_mat2[sgn_mat2 == FALSE] <- -1
dist1_sq <- dists1^2
dist2_sq <- dists2^2
dist12 <- sgn_mat1*dists1*sgn_mat2*dists2
} else{
dist1_sq <- mahalanobis.dist(data.x = Pcoords, data.y = coords, vc = diag(d))^2
dist2_sq <- matrix(-1, M, N)
dist12 <- matrix(0, M, N)
}
# Rescale if needed
if( !is.null(scale_factor) ){
dist1_sq <- dist1_sq/scale_factor
dist2_sq <- dist2_sq/scale_factor
dist12 <- dist12/scale_factor
}
return(list(
dist1_sq = dist1_sq, dist2_sq = dist2_sq,
dist12 = dist12, scale_factor = scale_factor ))
}
#==============================================================================
# Coordinate-specific cross-distance matrices, only for NN
#==============================================================================
# ROxygen comments ----
#' Calculate coordinate-specific cross-distance matrices, only for nearest neighbors
#' and store in an array
#'
#' \code{nsCrossdist3d} generates and returns new 3-dimensional arrays containing
#' the former dist1_sq, dist2_s1, and dist12 matrices, but
#' only as needed for the k nearest-neighbors of each location.
#' these 3D matrices (dist1_3d, dist2_3d, and dist12_3d)
#' are used in the new implementation of calculateAD_ns().
#'
#' @param coords N x d matrix; contains the x-y coordinates of stations.
#' @param predCoords M x d matrix
#' @param P_nID N x k matrix; contains indices of nearest neighbors.
#' @param scale_factor Scalar; optional argument for re-scaling the distances.
#' @param isotropic Logical; indicates whether distances should be calculated
#' separately for each coordinate dimension (FALSE) or simultaneously for all
#' coordinate dimensions (TRUE). \code{isotropic = FALSE} can only be used for
#' two-dimensional coordinate systems.
#'
#' @return Arrays with nearest neighbor distances in each coordinate
#' direction. When the spatial dimension d > 2, dist1_3d contains squared
#' Euclidean distances, and dist2_3d and dist12_3d are empty.
#'
#' @examples
#' # Generate some coordinates and neighbors
#' coords <- cbind(runif(100),runif(100))
#' predCoords <- cbind(runif(200),runif(200))
#' P_nID <- FNN::get.knnx(coords, predCoords, k = 10)$nn.index # Prediction NN
#' # Calculate distances
#' Pdist <- nsCrossdist3d(coords, predCoords, P_nID)
#'
#' @export
#'
nsCrossdist3d <- function(coords, predCoords, P_nID, scale_factor = NULL, isotropic = FALSE) {
N <- nrow(coords)
M <- nrow(predCoords)
d <- ncol(coords)
if( !isotropic & d != 2 ) stop("Anisotropy (isotropic = FALSE) only available for 2-dimensional coordinate systems.")
k <- ncol(P_nID)
thisN <- k+1
dist1_3d <- array(0, c(M, k+1, k+1))
dist2_3d <- array(0, c(M, k+1, k+1))
dist12_3d <- array(0, c(M, k+1, k+1))
if(!isotropic){
for(i in 1:M){
theseCoords <- rbind(coords[P_nID[i,], ], predCoords[i,])
dists1 <- as.matrix(dist(theseCoords[,1]))
dists2 <- as.matrix(dist(theseCoords[,2]))
temp1 <- matrix(theseCoords[,1], nrow = thisN, ncol = thisN)
temp2 <- matrix(theseCoords[,2], nrow = thisN, ncol = thisN)
sgn_mat1 <- ( temp1 - t(temp1) >= 0 )
sgn_mat1[sgn_mat1 == FALSE] <- -1
sgn_mat2 <- ( temp2 - t(temp2) >= 0 )
sgn_mat2[sgn_mat2 == FALSE] <- -1
dist1_3d[i, 1:thisN, 1:thisN] <- dists1^2
dist2_3d[i, 1:thisN, 1:thisN] <- dists2^2
dist12_3d[i, 1:thisN, 1:thisN] <- sgn_mat1*dists1*sgn_mat2*dists2
}
} else{
for(i in 1:M){
theseCoords <- rbind(coords[P_nID[i,], ], predCoords[i,])
dists1 <- as.matrix(dist(theseCoords))
dist1_3d[i, 1:thisN, 1:thisN] <- dists1^2
dist2_3d[i,,] <- -1
}
}
if(!is.null(scale_factor)) {
dist1_3d <- dist1_3d / scale_factor
dist2_3d <- dist2_3d / scale_factor
dist12_3d <- dist12_3d / scale_factor
}
return(list(dist1_3d = dist1_3d,
dist2_3d = dist2_3d,
dist12_3d = dist12_3d,
scale_factor = scale_factor))
}
#==============================================================================
# NIMBLE code for a generic nonstationary GP model
#==============================================================================
# ROxygen comments ----
#' NIMBLE code for a generic nonstationary GP model
#'
#' This function sets up and compiles a nimble model for a general
#' nonstationary Gaussian process.
#'
#' @param tau_model Character; specifies the model to be used for the log(tau)
#' process. Options are \code{"constant"} (spatially-constant),
#' \code{"logLinReg"} (log-linear regression), and \code{"approxGP"}
#' (approximation to a Gaussian process).
#' @param sigma_model Character; specifies the model to be used for the
#' log(sigma) process. See \code{tau_model} for options.
#' @param Sigma_model Character; specifies the model to be used for the
#' Sigma anisotropy process. Options are \code{"constant"} (spatially-constant),
#' \code{"constantIso"} (spatially-constant and isotropic), \code{"covReg"}
#' (covariance regression), \code{"compReg"} (componentwise regression),
#' \code{"compRegIso"} (isotropic componentwise regression), \code{"npApproxGP"}
#' (nonparameteric regression via an approximation to a stationary Gaussian
#' process), and \code{"npApproxGPIso"} (isotropic nonparameteric regression
#' via an approximation to a stationary Gaussian process)
#' @param mu_model Character; specifies the model to be used for the mu mean
#' process. Options are \code{"constant"} (spatially-constant), \code{"linReg"}
#' (linear regression), and \code{"zero"} (a fixed zero-mean).
#' @param likelihood Character; specifies the likelihood model. Options are
#' \code{"fullGP"} (the exact Gaussian process likelihood), \code{"NNGP"} (the
#' nearest-neighbor GP for the response approximate likelihood), and \code{"SGV"}
#' (the sparse general Vecchia approximate likelihood).
#' @param coords N x d matrix of spatial coordinates.
#' @param data N-vector; observed vector of the spatial process of interest
#' @param constants A list of constants required to build the model; depends on
#' the specific parameter process models chosen.
#' @param monitorAllSampledNodes Logical; indicates whether all sampled nodes
#' should be stored (\code{TRUE}) or not (\code{FALSE}).
#' @param ... Additional arguments can be passed to the function; for example,
#' as an alternative to the \code{constants} list, items can be passed directly
#' via this argument.
#'
#' @return A \code{nimbleCode} object.
#'
#' @examples
#' # Generate some data: stationary/isotropic
#' N <- 100
#' coords <- matrix(runif(2*N), ncol = 2)
#' alpha_vec <- rep(log(sqrt(1)), N) # Log process SD
#' delta_vec <- rep(log(sqrt(0.05)), N) # Log nugget SD
#' Sigma11_vec <- rep(0.4, N) # Kernel matrix element 1,1
#' Sigma22_vec <- rep(0.4, N) # Kernel matrix element 2,2
#' Sigma12_vec <- rep(0, N) # Kernel matrix element 1,2
#' mu_vec <- rep(0, N) # Mean
#' nu <- 0.5 # Smoothness
#' dist_list <- nsDist(coords)
#' Cor_mat <- nsCorr( dist1_sq = dist_list$dist1_sq, dist2_sq = dist_list$dist2_sq,
#' dist12 = dist_list$dist12, Sigma11 = Sigma11_vec,
#' Sigma22 = Sigma22_vec, Sigma12 = Sigma12_vec, nu = nu )
#' Cov_mat <- diag(exp(alpha_vec)) %*% Cor_mat %*% diag(exp(alpha_vec))
#' D_mat <- diag(exp(delta_vec)^2)
#' set.seed(110)
#' data <- as.numeric(mu_vec + t(chol(Cov_mat + D_mat)) %*% rnorm(N))
#' # Set up constants
#' constants <- list( nu = 0.5, Sigma_HP1 = 2 )
#' # Defaults: tau_model = "constant", sigma_model = "constant", mu_model = "constant",
#' # and Sigma_model = "constant"
#' Rmodel <- nsgpModel(likelihood = "fullGP", constants = constants, coords = coords, data = data )
#'
#' @export
#'
nsgpModel <- function( tau_model = "constant",
sigma_model = "constant",
Sigma_model = "constant",
mu_model = "constant",
likelihood = "fullGP",
coords,
data,
constants = list(),
monitorAllSampledNodes = TRUE,
... ) {
##============================================
## Models for tau
##============================================
tau_model_list <- list(
constant = list(
## 1. tau_HP1 Standard deviation for the log-linear standard deviation
## 2. delta Scalar; represents the standard deviation (constant over the domain)
## 3. ones N-vector of 1's
code = quote({
log_tau_vec[1:N] <- log(sqrt(delta))*ones[1:N]
delta ~ dunif(0, tau_HP1)
}),
constants_needed = c("ones", "tau_HP1"),
inits = list(delta = quote(tau_HP1/10))
),
logLinReg = list(
## 1. X_tau N x p_tau design matrix; leading column of 1's with (p_tau - 1) other covariates
## 2. tau_HP1 Standard deviation for the log-linear regression coefficients
## 3. p_tau Number of design columns
## 4. delta Vector of length p_tau; represents log-linear regression coefficients
code = quote({
log_tau_vec[1:N] <- X_tau[1:N,1:p_tau] %*% delta[1:p_tau]
for(l in 1:p_tau){
delta[l] ~ dnorm(0, sd = tau_HP1)
}
tau_constraint1 ~ dconstraint( max(abs(log_tau_vec[1:N])) < maxAbsLogSD )
}),
constants_needed = c("X_tau", "p_tau", "tau_HP1", "maxAbsLogSD"),
inits = list(delta = quote(rep(0, p_tau))),
constraints_needed = c('tau_constraint1')
),
approxGP = list(
## 1. tau_HP1 Gaussian process standard deviation
## 2. tau_HP2 Gaussian process mean
## 3. tau_HP3 Gaussian process range
## 4. tau_HP4 Gaussian process smoothness
## 5. ones N-vector of 1's
## 6. tau_cross_dist N x p_tau matrix of inter-point Euclidean distances, obs. coords vs. knot locations
## 7. tau_knot_dist p_tau x p_tau matrix of inter-point Euclidean distances, knot locations
## 8. p_tau Number of knot locations
code = quote({
log_tau_vec[1:N] <- tauGP_mu*ones[1:N] + tauGP_sigma*Pmat_tau[1:N,1:p_tau] %*% w_tau[1:p_tau]
Pmat_tau[1:N,1:p_tau] <- matern_corr(tau_cross_dist[1:N,1:p_tau], tauGP_phi, tau_HP2)
Vmat_tau[1:p_tau,1:p_tau] <- matern_corr(tau_knot_dist[1:p_tau,1:p_tau], tauGP_phi, tau_HP2)
w_tau_mean[1:p_tau] <- 0*ones[1:p_tau]
w_tau[1:p_tau] ~ dmnorm( mean = w_tau_mean[1:p_tau], prec = Vmat_tau[1:p_tau,1:p_tau] )
# Hyperparameters
tauGP_mu ~ dnorm(0, sd = tau_HP1)
tauGP_phi ~ dunif(0, tau_HP3) # Range parameter, GP
tauGP_sigma ~ dunif(0, tau_HP4) # SD parameter, GP
# Constraint
tau_constraint1 ~ dconstraint( max(abs(log_tau_vec[1:N])) < maxAbsLogSD )
}),
constants_needed = c("ones", "tau_knot_coords", "tau_cross_dist", "tau_knot_dist",
"p_tau", "tau_HP1", "tau_HP2", "tau_HP3", "tau_HP4", "maxAbsLogSD"),
inits = list(
w_tau = quote(rep(0, p_tau)),
tauGP_mu = quote(0),
tauGP_phi = quote(tau_HP3/100),
tauGP_sigma = quote(tau_HP4/100)
),
constraints_needed = c('tau_constraint1')
)
)
##============================================
## Models for sigma
##============================================
sigma_model_list <- list(
constant = list(
## 1. sigma_HP1 Standard deviation for the log-linear standard deviation
## 2. alpha Scalar; represents the standard deviation (constant over the domain)
## 3. ones N-vector of 1's
code = quote({
log_sigma_vec[1:N] <- log(sqrt(alpha))*ones[1:N]
alpha ~ dunif(0, sigma_HP1)
}),
constants_needed = c("ones", "sigma_HP1"),
inits = list(alpha = quote(sigma_HP1/10))
),
logLinReg = list(
## 1. X_sigma N x p_sigma design matrix; leading column of 1's with (p_sigma - 1) other covariates
## 2. sigma_HP1 Standard deviation for the log-linear regression coefficients
## 3. p_sigma Number of design columns
## 4. alpha Vector of length p_sigma; represents log-linear regression coefficients
code = quote({
log_sigma_vec[1:N] <- X_sigma[1:N,1:p_sigma] %*% alpha[1:p_sigma]
for(l in 1:p_sigma){
alpha[l] ~ dnorm(0, sd = sigma_HP1)
}
# Constraint
sigma_constraint1 ~ dconstraint( max(abs(log_sigma_vec[1:N])) < maxAbsLogSD )
}),
constants_needed = c("X_sigma", "p_sigma", "sigma_HP1", "maxAbsLogSD"),
inits = list(alpha = quote(rep(0, p_sigma))),
constraints_needed = c('sigma_constraint1')
),
approxGP = list(
## 1. sigma_HP1 Gaussian process standard deviation
## 2. sigma_HP2 Gaussian process mean
## 3. sigma_HP3 Gaussian process range
## 4. sigma_HP4 Gaussian process smoothness
## 5. ones N-vector of 1's
## 6. sigma_cross_dist N x p_sigma matrix of inter-point Euclidean distances, obs. coords vs. knot locations
## 7. sigma_knot_dist p_sigma x p_sigma matrix of inter-point Euclidean distances, knot locations
## 8. p_sigma Number of knot locations
code = quote({
log_sigma_vec[1:N] <- sigmaGP_mu*ones[1:N] + sigmaGP_sigma*Pmat_sigma[1:N,1:p_sigma] %*% w_sigma[1:p_sigma]
Pmat_sigma[1:N,1:p_sigma] <- matern_corr(sigma_cross_dist[1:N,1:p_sigma], sigmaGP_phi, sigma_HP2)
Vmat_sigma[1:p_sigma,1:p_sigma] <- matern_corr(sigma_knot_dist[1:p_sigma,1:p_sigma], sigmaGP_phi, sigma_HP2)
w_sigma_mean[1:p_sigma] <- 0*ones[1:p_sigma]
w_sigma[1:p_sigma] ~ dmnorm( mean = w_sigma_mean[1:p_sigma], prec = Vmat_sigma[1:p_sigma,1:p_sigma] )
# Hyperparameters
sigmaGP_mu ~ dnorm(0, sd = sigma_HP1)
sigmaGP_phi ~ dunif(0, sigma_HP3) # Range parameter, GP
sigmaGP_sigma ~ dunif(0, sigma_HP4) # SD parameter, GP
# Constraint
sigma_constraint1 ~ dconstraint( max(abs(log_sigma_vec[1:N])) < maxAbsLogSD )
}),
constants_needed = c("ones", "sigma_knot_coords", "sigma_cross_dist", "sigma_knot_dist",
"p_sigma", "sigma_HP1", "sigma_HP2", "sigma_HP3", "sigma_HP4", "maxAbsLogSD"),
inits = list(
w_sigma = quote(rep(0, p_sigma)),
sigmaGP_mu = quote(0),
sigmaGP_phi = quote(sigma_HP3/100),
sigmaGP_sigma = quote(sigma_HP4/100)),
constraints_needed = c('sigma_constraint1')
)
)
##============================================
## Models for Sigma
##============================================
Sigma_model_list <- list(
constant = list(
## 1. ones N-vector of 1's
## 2. Sigma_HP1 Upper bound for the eigenvalues
## 3. Sigma_coef{1,2,3} Vectors of length p_Sigma; represents the anisotropy components
code = quote({
Sigma11[1:N] <- ones[1:N]*(Sigma_coef1*cos(Sigma_coef3)*cos(Sigma_coef3) + Sigma_coef2*sin(Sigma_coef3)*sin(Sigma_coef3))
Sigma22[1:N] <- ones[1:N]*(Sigma_coef2*cos(Sigma_coef3)*cos(Sigma_coef3) + Sigma_coef1*sin(Sigma_coef3)*sin(Sigma_coef3))
Sigma12[1:N] <- ones[1:N]*(Sigma_coef1*cos(Sigma_coef3)*sin(Sigma_coef3) - Sigma_coef2*cos(Sigma_coef3)*sin(Sigma_coef3))
Sigma_coef1 ~ dunif(0, Sigma_HP1[1]) # phi1
Sigma_coef2 ~ dunif(0, Sigma_HP1[1]) # phi2
Sigma_coef3 ~ dunif(0, 1.570796) # eta --> 1.570796 = pi/2
}),
constants_needed = c("ones", "Sigma_HP1"),
inits = list(
Sigma_coef1 = quote(Sigma_HP1[1]/4),
Sigma_coef2 = quote(Sigma_HP1[1]/4),
Sigma_coef3 = 0.7853982 # pi/4
)
),
constantIso = list( # Isotropic version of
## 1. ones N-vector of 1's
## 2. Sigma_HP1 Standard deviation for the anisotropy components
## 3. Sigma_coef{1,2,3} Vectors of length p_Sigma; represents the anisotropy components
code = quote({
Sigma11[1:N] <- ones[1:N]*Sigma_coef1
Sigma22[1:N] <- ones[1:N]*Sigma_coef1
Sigma12[1:N] <- ones[1:N]*0
Sigma_coef1 ~ dunif(0, Sigma_HP1[1]) # phi1
}),
constants_needed = c("ones", "Sigma_HP1"),
inits = list( Sigma_coef1 = quote(Sigma_HP1[1]/4) )
),
covReg = list(
code = quote({
## 1. X_Sigma N x p_Sigma design matrix; leading column of 1's with (p_Sigma - 1) other covariates
## 2. Sigma_HP1 Standard deviation for the covariance regression coefficients
## 3. p_Sigma Number of design columns
## 4. gamma1, gamma2 Vectors of length p_Sigma; represents covariance regression coefficients
## 5. psi11, psi22, rho Baseline covariance regression parameters
## 6. Sigma_HP2 Upper bound for the baseline covariance regression variances
Sigma11[1:N] <- psi11*ones[1:N] + (X_Sigma[1:N,1:p_Sigma] %*% gamma1[1:p_Sigma])^2
Sigma12[1:N] <- rho*sqrt(psi11*psi22)*ones[1:N] + (X_Sigma[1:N,1:p_Sigma]%*%gamma1[1:p_Sigma])*(X_Sigma[1:N,1:p_Sigma]%*%gamma2[1:p_Sigma])
Sigma22[1:N] <- psi22*ones[1:N] + (X_Sigma[1:N,1:p_Sigma] %*% gamma2[1:p_Sigma])^2
psi11 ~ dunif(0, Sigma_HP2[1])
psi22 ~ dunif(0, Sigma_HP2[2])
rho ~ dunif(-1, 1)
for(j in 1:p_Sigma){
gamma1[j] ~ dnorm(0, sd = Sigma_HP1[1])
gamma2[j] ~ dnorm(0, sd = Sigma_HP1[2])
}
# Constraints: upper limits on eigen_comp1 and eigen_comp2
Sigma_constraint1 ~ dconstraint( max(Sigma11[1:N]) < maxAnisoRange )
Sigma_constraint2 ~ dconstraint( max(Sigma22[1:N]) < maxAnisoRange )
Sigma_constraint3 ~ dconstraint( min(Sigma11[1:N]*Sigma22[1:N] - Sigma12[1:N]*Sigma12[1:N]) > minAnisoDet )
}),
constants_needed = c("ones", "X_Sigma", "p_Sigma", "Sigma_HP1", "Sigma_HP2", "maxAnisoRange", "minAnisoDet"),
inits = list(
psi11 = quote(Sigma_HP2[1]/4),
psi22 = quote(Sigma_HP2[2]/4),
rho = 0,
gamma1 = quote(rep(0, p_Sigma)),
gamma2 = quote(rep(0, p_Sigma))
),
constraints_needed = c('Sigma_constraint1', 'Sigma_constraint2', 'Sigma_constraint3')
),
compReg = list(
code = quote({
## 1. X_Sigma N x p_Sigma design matrix; leading column of 1's with (p_Sigma - 1) other covariates
## 2. Sigma_HP1 Standard deviation for the component regression coefficients
## 3. p_Sigma Number of design columns
## 4. Sigma_coef{1,2,3} Vectors of length p_Sigma; represents component regression coefficients
eigen_comp1[1:N] <- X_Sigma[1:N,1:p_Sigma] %*% Sigma_coef1[1:p_Sigma]
eigen_comp2[1:N] <- X_Sigma[1:N,1:p_Sigma] %*% Sigma_coef2[1:p_Sigma]
eigen_comp3[1:N] <- X_Sigma[1:N,1:p_Sigma] %*% Sigma_coef3[1:p_Sigma]
Sigma11[1:N] <- inverseEigen(eigen_comp1[1:N], eigen_comp2[1:N], eigen_comp3[1:N], 1)
Sigma12[1:N] <- inverseEigen(eigen_comp1[1:N], eigen_comp2[1:N], eigen_comp3[1:N], 3)
Sigma22[1:N] <- inverseEigen(eigen_comp1[1:N], eigen_comp2[1:N], eigen_comp3[1:N], 2)
for(j in 1:p_Sigma){
Sigma_coef1[j] ~ dnorm(0, sd = Sigma_HP1[1])
Sigma_coef2[j] ~ dnorm(0, sd = Sigma_HP1[1])
Sigma_coef3[j] ~ dnorm(0, sd = Sigma_HP1[1])
}
# Constraints: upper limits on eigen_comp1 and eigen_comp2
Sigma_constraint1 ~ dconstraint( max(Sigma11[1:N]) < maxAnisoRange )
Sigma_constraint2 ~ dconstraint( max(Sigma22[1:N]) < maxAnisoRange )
Sigma_constraint3 ~ dconstraint( min(Sigma11[1:N]*Sigma22[1:N] - Sigma12[1:N]*Sigma12[1:N]) > minAnisoDet )
}),
constants_needed = c("X_Sigma", "p_Sigma", "Sigma_HP1", "maxAnisoRange", "minAnisoDet"),
inits = list(
Sigma_coef1 = quote(c(log(maxAnisoRange/100), rep(0, p_Sigma-1))),
Sigma_coef2 = quote(c(log(maxAnisoRange/100), rep(0, p_Sigma-1))),
Sigma_coef3 = quote(rep(0, p_Sigma))
),
constraints_needed = c('Sigma_constraint1', 'Sigma_constraint2', 'Sigma_constraint3')
),
compRegIso = list( # Isotropic version of compReg
code = quote({
## 1. X_Sigma N x p_Sigma design matrix; leading column of 1's with (p_Sigma - 1) other covariates
## 2. Sigma_HP1 Standard deviation for the component regression coefficients
## 3. p_Sigma Number of design columns
## 4. Sigma_coef{1,2,3} Vectors of length p_Sigma; represents component regression coefficients
eigen_comp1[1:N] <- X_Sigma[1:N,1:p_Sigma] %*% Sigma_coef1[1:p_Sigma]
Sigma11[1:N] <- exp(eigen_comp1[1:N])
Sigma22[1:N] <- exp(eigen_comp1[1:N])
Sigma12[1:N] <- ones[1:N]*0
for(j in 1:p_Sigma){
Sigma_coef1[j] ~ dnorm(0, sd = Sigma_HP1[1])
}
# Constraints: upper limits on eigen_comp1
Sigma_constraint1 ~ dconstraint( max(Sigma11[1:N]) < maxAnisoRange )
}),
constants_needed = c("ones", "X_Sigma", "p_Sigma", "Sigma_HP1", "maxAnisoRange"),
inits = list(
Sigma_coef1 = quote(c(log(maxAnisoRange/100), rep(0, p_Sigma-1)))
),
constraints_needed = c('Sigma_constraint1')
),
npApproxGP = list(
code = quote({
## 1. Sigma_HP1 3-vector; Gaussian process mean
## 2. Sigma_HP2 3-vector; Gaussian process smoothness
## 5. ones N-vector of 1's
## 6. dist N x N matrix of inter-point Euclidean distances
## 7. Sigma_cross_dist N x p_Sigma matrix of inter-point Euclidean distances, obs. coords vs. knot locations
## 8. Sigma_knot_dist p_Sigma x p_Sigma matrix of inter-point Euclidean distances, knot locations
## 9. p_Sigma Number of knot locations
Sigma11[1:N] <- inverseEigen(eigen_comp1[1:N], eigen_comp2[1:N], eigen_comp3[1:N], 1)
Sigma12[1:N] <- inverseEigen(eigen_comp1[1:N], eigen_comp2[1:N], eigen_comp3[1:N], 3)
Sigma22[1:N] <- inverseEigen(eigen_comp1[1:N], eigen_comp2[1:N], eigen_comp3[1:N], 2)
# approxGP1, approxGP2
eigen_comp1[1:N] <- SigmaGP_mu[1]*ones[1:N] + SigmaGP_sigma[1] * Pmat12_Sigma[1:N,1:p_Sigma] %*% w1_Sigma[1:p_Sigma]
eigen_comp2[1:N] <- SigmaGP_mu[1]*ones[1:N] + SigmaGP_sigma[1] * Pmat12_Sigma[1:N,1:p_Sigma] %*% w2_Sigma[1:p_Sigma]
Pmat12_Sigma[1:N,1:p_Sigma] <- matern_corr(Sigma_cross_dist[1:N,1:p_Sigma], SigmaGP_phi[1], Sigma_HP2[1])
Vmat12_Sigma[1:p_Sigma,1:p_Sigma] <- matern_corr(Sigma_knot_dist[1:p_Sigma,1:p_Sigma], SigmaGP_phi[1], Sigma_HP2[1])
w12_Sigma_mean[1:p_Sigma] <- 0*ones[1:p_Sigma]
w1_Sigma[1:p_Sigma] ~ dmnorm( mean = w12_Sigma_mean[1:p_Sigma], prec = Vmat12_Sigma[1:p_Sigma,1:p_Sigma] )
w2_Sigma[1:p_Sigma] ~ dmnorm( mean = w12_Sigma_mean[1:p_Sigma], prec = Vmat12_Sigma[1:p_Sigma,1:p_Sigma] )
# approxGP3
eigen_comp3[1:N] <- SigmaGP_mu[2]*ones[1:N] + SigmaGP_sigma[2] * Pmat3_Sigma[1:N,1:p_Sigma] %*% w3_Sigma[1:p_Sigma]
Pmat3_Sigma[1:N,1:p_Sigma] <- matern_corr(Sigma_cross_dist[1:N,1:p_Sigma], SigmaGP_phi[2], Sigma_HP2[2])
Vmat3_Sigma[1:p_Sigma,1:p_Sigma] <- matern_corr(Sigma_knot_dist[1:p_Sigma,1:p_Sigma], SigmaGP_phi[2], Sigma_HP2[2])
w3_Sigma_mean[1:p_Sigma] <- 0*ones[1:p_Sigma]
w3_Sigma[1:p_Sigma] ~ dmnorm( mean = w3_Sigma_mean[1:p_Sigma], prec = Vmat3_Sigma[1:p_Sigma,1:p_Sigma] )
# Hyperparameters
for(w in 1:2){
SigmaGP_mu[w] ~ dnorm(0, sd = Sigma_HP1[w])
SigmaGP_phi[w] ~ dunif(0, Sigma_HP3[w]) # Range parameter, GP
SigmaGP_sigma[w] ~ dunif(0, Sigma_HP4[w]) # SD parameter, GP
}
# Constraints: upper limits on eigen_comp1 and eigen_comp2
Sigma_constraint1 ~ dconstraint( max(Sigma11[1:N]) < maxAnisoRange )
Sigma_constraint2 ~ dconstraint( max(Sigma22[1:N]) < maxAnisoRange )
Sigma_constraint3 ~ dconstraint( min(Sigma11[1:N]*Sigma22[1:N] - Sigma12[1:N]*Sigma12[1:N]) > minAnisoDet )
}),
constants_needed = c("ones", "Sigma_HP1", "Sigma_HP2", "Sigma_HP3", "Sigma_HP4", "maxAnisoRange", "minAnisoDet",
"Sigma_knot_coords", "Sigma_cross_dist", "Sigma_knot_dist", "p_Sigma"),
inits = list(
w1_Sigma = quote(rep(0,p_Sigma)),
w2_Sigma = quote(rep(0,p_Sigma)),
w3_Sigma = quote(rep(0,p_Sigma)),
SigmaGP_mu = quote(rep(log(maxAnisoRange/100),2)),
SigmaGP_phi = quote(rep(Sigma_HP3[1]/100,2)),
SigmaGP_sigma = quote(rep(Sigma_HP4[1]/100,2))
),
constraints_needed = c('Sigma_constraint1', 'Sigma_constraint2', 'Sigma_constraint3')
),
npApproxGPIso = list(
code = quote({
## 1. Sigma_HP1 3-vector; Gaussian process mean
## 2. Sigma_HP2 3-vector; Gaussian process smoothness
## 5. ones N-vector of 1's
## 6. dist N x N matrix of inter-point Euclidean distances
## 7. Sigma_cross_dist N x p_Sigma matrix of inter-point Euclidean distances, obs. coords vs. knot locations
## 8. Sigma_knot_dist p_Sigma x p_Sigma matrix of inter-point Euclidean distances, knot locations
## 9. p_Sigma Number of knot locations
Sigma11[1:N] <- exp(eigen_comp1[1:N])
Sigma22[1:N] <- exp(eigen_comp1[1:N])
Sigma12[1:N] <- ones[1:N]*0
# approxGP1
eigen_comp1[1:N] <- SigmaGP_mu[1]*ones[1:N] + SigmaGP_sigma[1] * Pmat12_Sigma[1:N,1:p_Sigma] %*% w1_Sigma[1:p_Sigma]
Pmat12_Sigma[1:N,1:p_Sigma] <- matern_corr(Sigma_cross_dist[1:N,1:p_Sigma], SigmaGP_phi[1], Sigma_HP2[1])
Vmat12_Sigma[1:p_Sigma,1:p_Sigma] <- matern_corr(Sigma_knot_dist[1:p_Sigma,1:p_Sigma], SigmaGP_phi[1], Sigma_HP2[1])
w12_Sigma_mean[1:p_Sigma] <- 0*ones[1:p_Sigma]
w1_Sigma[1:p_Sigma] ~ dmnorm( mean = w12_Sigma_mean[1:p_Sigma], prec = Vmat12_Sigma[1:p_Sigma,1:p_Sigma] )
# Hyperparameters
for(w in 1){
SigmaGP_mu[w] ~ dnorm(0, sd = Sigma_HP1[w])
SigmaGP_phi[w] ~ dunif(0, Sigma_HP3[w]) # Range parameter, GP
SigmaGP_sigma[w] ~ dunif(0, Sigma_HP4[w]) # SD parameter, GP
}
# Constraints: upper limits on eigen_comp1 and eigen_comp2
Sigma_constraint1 ~ dconstraint( max(Sigma11[1:N]) < maxAnisoRange )
}),
constants_needed = c("ones", "Sigma_HP1", "Sigma_HP2", "Sigma_HP3", "Sigma_HP4", "maxAnisoRange",
"Sigma_knot_coords", "Sigma_cross_dist", "Sigma_knot_dist", "p_Sigma"),
inits = list(
w1_Sigma = quote(rep(0,p_Sigma)),
SigmaGP_mu = quote(rep(log(maxAnisoRange/100),1)),
SigmaGP_phi = quote(rep(Sigma_HP3[1]/100,1)),
SigmaGP_sigma = quote(rep(Sigma_HP4[1]/100,1))
),
constraints_needed = c('Sigma_constraint1')
)
)
##============================================
## Models for mu
##============================================
mu_model_list <- list(
constant = list(
## 1. sigma_HP1 Standard deviation for the log-linear standard deviation
## 2. alpha Scalar; represents log-linear standard deviation (constant over the domain)
## 3. ones N-vector of 1's
code = quote({
mu[1:N] <-beta*ones[1:N]
beta ~ dnorm(0, sd = mu_HP1)
}),
constants_needed = c("ones", "mu_HP1"),
inits = list(beta = 0)),
linReg = list(
## 1. X_mu N x p_mu design matrix; leading column of 1's with (p_mu - 1) other covariates
## 2. p_mu Number of design columns
## 3. beta Vector of length p_mu; represents regression coefficients
code = quote({
mu[1:N] <- X_mu[1:N,1:p_mu] %*% beta[1:p_mu]
for(l in 1:p_mu){
beta[l] ~ dnorm(0, sd = mu_HP1)
}
}),
constants_needed = c("X_mu", "p_mu", "mu_HP1"),
inits = list(beta = quote(rep(0, p_mu)))),
zero = list(
## 1. zeros N-vector of 0's
code = quote({
mu[1:N] <- zeros[1:N]
}),
constants_needed = c("zeros"),
inits = list()
)
)
##============================================
## Models for likelihood
##============================================
likelihood_list <- list(
fullGP = list(
code = quote({
Cor[1:N,1:N] <- nsCorr(dist1_sq[1:N,1:N], dist2_sq[1:N,1:N], dist12[1:N,1:N],
Sigma11[1:N], Sigma22[1:N], Sigma12[1:N], nu, d)
sigmaMat[1:N,1:N] <- diag(exp(log_sigma_vec[1:N]))
Cov[1:N, 1:N] <- sigmaMat[1:N,1:N] %*% Cor[1:N,1:N] %*% sigmaMat[1:N,1:N]
C[1:N,1:N] <- Cov[1:N, 1:N] + diag(exp(log_tau_vec[1:N])^2)
z[1:N] ~ dmnorm(mean = mu[1:N], cov = C[1:N,1:N])
}),
constants_needed = c("N", "coords", "d", "dist1_sq", "dist2_sq", "dist12", "nu"), ## keep N, coords, d here
inits = list()
),
NNGP = list(
code = quote({
AD[1:N,1:(k+1)] <- calculateAD_ns(dist1_3d[1:N,1:(k+1),1:(k+1)],
dist2_3d[1:N,1:(k+1),1:(k+1)],
dist12_3d[1:N,1:(k+1),1:(k+1)],
Sigma11[1:N], Sigma22[1:N], Sigma12[1:N],
log_sigma_vec[1:N], log_tau_vec[1:N],
nID[1:N,1:k], N, k, nu, d)
z[1:N] ~ dmnorm_nngp(mu[1:N], AD[1:N,1:(k+1)], nID[1:N,1:k], N, k)
}),
constants_needed = c("N", "coords", "d", "dist1_3d", "dist2_3d", "dist12_3d", "nID", "k", "nu"), ## keep N, coords, d here
inits = list()
),
SGV = list(
code = quote({
U[1:num_NZ,1:3] <- calculateU_ns( dist1_3d[1:N,1:(k+1),1:(k+1)],
dist2_3d[1:N,1:(k+1),1:(k+1)],
dist12_3d[1:N,1:(k+1),1:(k+1)],
Sigma11[1:N], Sigma22[1:N], Sigma12[1:N],
log_sigma_vec[1:N], log_tau_vec[1:N],
nu, nID[1:N,1:k], cond_on_y[1:N,1:k], N, k, d )
z[1:N] ~ dmnorm_sgv(mu[1:N], U[1:num_NZ,1:3], N, k)
}),
constants_needed = c("N", "coords", "d", "dist1_3d", "dist2_3d", "dist12_3d", "nID", "k", "nu", "cond_on_y", "num_NZ"), ## keep N, coords, d here
inits = list()
)
)
##============================================
## Setup
##============================================
if(is.null( tau_model_list[[ tau_model]])) stop("unknown specification for tau_model")
if(is.null(sigma_model_list[[sigma_model]])) stop("unknown specification for sigma_model")
if(is.null(Sigma_model_list[[Sigma_model]])) stop("unknown specification for Sigma_model")
if(is.null( mu_model_list[[ mu_model]])) stop("unknown specification for mu_model")
if(is.null( likelihood_list[[ likelihood]])) stop("unknown specification for likelihood")
model_selections_list <- list(
tau = tau_model_list [[tau_model]],
sigma = sigma_model_list[[sigma_model]],
Sigma = Sigma_model_list[[Sigma_model]],
mu = mu_model_list [[mu_model]],
likelihood = likelihood_list [[likelihood]]
)
## code
code_template <- quote({
SIGMA_MODEL ## Log variance
TAU_MODEL ## Log nugget -- gotta respect the nugget
CAP_SIGMA_MODEL ## Anisotropy
MU_MODEL ## Mean
LIKELIHOOD_MODEL ## Likelihood
})
code <-
eval(substitute(substitute(
CODE,
list(TAU_MODEL = model_selections_list$tau$code,
SIGMA_MODEL = model_selections_list$sigma$code,
CAP_SIGMA_MODEL = model_selections_list$Sigma$code,
CAP_SIGMA_MODEL = model_selections_list$Sigma$code,
MU_MODEL = model_selections_list$mu$code,
LIKELIHOOD_MODEL = model_selections_list$likelihood$code)),
list(CODE = code_template)))
if(missing(data)) stop("must provide data as 'data' argument")
N <- length(data)
if(missing(coords)) stop("must provide 'coords' argument, array of spatial coordinates")
d <- ncol(coords)
coords <- as.matrix(coords)
sd_default <- 100
mu_default <- 0
matern_rho_default <- 1
matern_nu_default <- 5
maxDist <- 0
for(j in 1:d){
maxDist <- maxDist + (max(coords[,j]) - min(coords[,j]))^2
}
maxDist <- sqrt(maxDist) # max(dist(coords))
if(N < 200){
ones_set <- rep(1,200); zeros_set <- rep(1,200)
} else{
ones_set <- rep(1,N); zeros_set <- rep(1,N)
}
constants_defaults_list <- list(
N = N,
coords = coords,
d = d,
zeros = zeros_set,
ones = ones_set,
mu_HP1 = sd_default, ## standard deviation
tau_HP1 = sd_default, ## standard deviation/upper bound for constant nugget
tau_HP2 = matern_nu_default, ## approxGP smoothness
tau_HP3 = maxDist, ## upper bound for approxGP range
tau_HP4 = sd_default, ## upper bound for approxGP sd
sigma_HP1 = sd_default, ## standard deviation
sigma_HP2 = matern_nu_default, ## approxGP smoothness
sigma_HP3 = maxDist, ## upper bound for approxGP range
sigma_HP4 = sd_default, ## upper bound for approxGP sd
Sigma_HP1 = rep(10,2), ## standard deviation/upper bound
Sigma_HP2 = rep(10,2), ## uniform upper bound for covReg 'psi' parameters / latent approxGP smoothness
Sigma_HP3 = rep(maxDist,2), ## upper bound for approxGP range
Sigma_HP4 = rep(sd_default, 2), ## upper bound for approxGP sd
maxAbsLogSD = 10, ## logSD must live between +/- maxAbsLogSD
maxAnisoRange = maxDist, ## maximum value for the diagonal elements of the anisotropy process
minAnisoDet = 1e-5, ## lower bound for the determinant of the anisotropy process
nu = matern_nu_default ## Process smoothness parameter
)
## use the isotropic model?
useIsotropic <- (Sigma_model %in% c("constantIso", "compRegIso", "npApproxGPIso"))
## initialize constants_to_use with constants_defaults_list
constants_to_use <- constants_defaults_list
## update constants_to_use with those arguments provided via ...
dotdotdot <- list(...)
## make sure all ... arguments were provided with names
if(length(dotdotdot) > 0 && (is.null(names(dotdotdot)) || any(names(dotdotdot) == ""))) stop("Only named arguemnts should be provided through ... argument")
constants_to_use[names(dotdotdot)] <- dotdotdot
## add 'constants' argument to constants_to_use list:
## if provided, make sure 'constants' argument is a named list
if(!missing(constants)) {
if(length(constants) > 0 && (is.null(names(constants)) || any(names(constants) == ""))) stop("All elements in constants list argument must be named")
constants_to_use[names(constants)] <- constants
}
## generate and add dist1_sq, dist2_sq, and dist12 arrays to constants_to_use list
if(likelihood == 'fullGP') {
dist_list <- nsDist(coords = coords, isotropic = useIsotropic)
} else { ## likelihood is NNGP, or SGV:
if(is.null(constants_to_use$k)) stop(paste0('missing k constants argument for ', likelihood, ' likelihood'))
mmd.seed <- sample(1e5, 1) # Set seed for reproducibility (randomness in orderCoordinatesMMD function)
if(likelihood == 'NNGP') {
# cat("\nOrdering the prediction locations and determining neighbors for NNGP (this may take a minute).\n")
# Re-order the coordinates/data
coords_mmd <- orderCoordinatesMMD(coords)
ord <- coords_mmd$orderedIndicesNoNA
coords <- coords[ord,]
data <- data[ord]
# Set neighbors and calculate distances
nID <- determineNeighbors(coords, constants_to_use$k)
constants_to_use$nID <- nID
dist_list <- nsDist3d(coords = coords, nID = nID, isotropic = useIsotropic)
}
if(likelihood == 'SGV') {
# cat("\nOrdering the prediction locations and determining neighbors/conditioning sets for SGV (this may take a minute).\n")
setupSGV <- sgvSetup(coords = coords, k = constants_to_use$k, seed = mmd.seed)
constants_to_use$nID <- setupSGV$nID_ord
constants_to_use$cond_on_y <- setupSGV$condition_on_y_ord
constants_to_use$num_NZ <- setupSGV$num_NZ
ord <- setupSGV$ord
# Re-order the coordinates/data
coords <- coords[ord,]
data <- data[ord]
dist_list <- nsDist3d(coords = coords, nID = setupSGV$nID_ord, isotropic = useIsotropic)
}
# Re-order any design matrices
if(!is.null(constants_to_use$X_tau)) constants_to_use$X_tau <- constants_to_use$X_tau[ord,]
if(!is.null(constants_to_use$X_sigma)) constants_to_use$X_sigma <- constants_to_use$X_sigma[ord,]
if(!is.null(constants_to_use$X_Sigma)) constants_to_use$X_Sigma <- constants_to_use$X_Sigma[ord,]
if(!is.null(constants_to_use$X_mu)) constants_to_use$X_mu <- constants_to_use$X_mu[ord,]
}
constants_to_use$coords <- coords
constants_to_use[names(dist_list)] <- dist_list
## if any models use approxGP: calculate XX_knot_dist and XX_cross_dist (coords already re-ordered for NNGP/SGV)
if( tau_model == 'approxGP' ) {
if(is.null(constants_to_use$tau_knot_coords)) stop(paste0('missing tau_knot_coords for tau_model: approxGP'))
constants_to_use$tau_knot_dist <- sqrt(nsDist(coords = constants_to_use$tau_knot_coords, isotropic = TRUE)$dist1_sq)
constants_to_use$tau_cross_dist <- sqrt(nsCrossdist(Pcoords = coords, coords = constants_to_use$tau_knot_coords, isotropic = TRUE)$dist1_sq)
}
if( sigma_model == 'approxGP' ) {
if(is.null(constants_to_use$sigma_knot_coords)) stop(paste0('missing sigma_knot_coords for sigma_model: approxGP'))
constants_to_use$sigma_knot_dist <- sqrt(nsDist(coords = constants_to_use$sigma_knot_coords, isotropic = TRUE)$dist1_sq)
constants_to_use$sigma_cross_dist <- sqrt(nsCrossdist(Pcoords = coords, coords = constants_to_use$sigma_knot_coords, isotropic = TRUE)$dist1_sq)
}
if( Sigma_model %in% c('npApproxGP', 'npApproxGPIso') ) {
if(is.null(constants_to_use$Sigma_knot_coords)) stop(paste0('missing Sigma_knot_coords for Sigma_model: ', Sigma_model))
constants_to_use$Sigma_knot_dist <- sqrt(nsDist(coords = constants_to_use$Sigma_knot_coords, isotropic = TRUE)$dist1_sq)
constants_to_use$Sigma_cross_dist <- sqrt(nsCrossdist(Pcoords = coords, coords = constants_to_use$Sigma_knot_coords, isotropic = TRUE)$dist1_sq)
}
## add the following (derived numbers of columns) to constants_to_use:
## p_tau:
if(!is.null(constants_to_use$X_tau)) constants_to_use$p_tau <- ncol(constants_to_use$X_tau)
if(!is.null(constants_to_use$tau_cross_dist)) constants_to_use$p_tau <- ncol(constants_to_use$tau_cross_dist)
## p_sigma:
if(!is.null(constants_to_use$X_sigma)) constants_to_use$p_sigma <- ncol(constants_to_use$X_sigma)
if(!is.null(constants_to_use$sigma_cross_dist)) constants_to_use$p_sigma <- ncol(constants_to_use$sigma_cross_dist)
## p_Sigma:
if(!is.null(constants_to_use$X_Sigma)) constants_to_use$p_Sigma <- ncol(constants_to_use$X_Sigma)
if(!is.null(constants_to_use$Sigma_cross_dist)) constants_to_use$p_Sigma <- ncol(constants_to_use$Sigma_cross_dist)
## p_mu:
if(!is.null(constants_to_use$X_mu)) constants_to_use$p_mu <- ncol(constants_to_use$X_mu)
## get a vector of all the constants we need for this model
constants_needed <- unique(unlist(lapply(model_selections_list, function(x) x$constants_needed), use.names = FALSE))
## check if we're missing any constants we need, and throw an error if any are missing
constants_missing <- setdiff(constants_needed, names(constants_to_use))
if(length(constants_missing) > 0) {
stop(paste0("Missing values for the following model constants: ",
paste0(constants_missing, collapse = ", "),
".\nThese values should be provided as named arguments, or named elements in the constants list argument"))
}
## generate the constants list
constants <- constants_to_use[constants_needed]
## append the mmd.seed and ord for SGV/NNGP
if(likelihood != 'fullGP') {
constants$mmd.seed <- mmd.seed
constants$ord <- ord
}
## ensure Sigma_HPX parameters are vectors of length 2
if(!is.null(constants$Sigma_HP1)){
if(length(constants$Sigma_HP1) == 1) constants$Sigma_HP1 <- rep(constants$Sigma_HP1, 2)
}
if(!is.null(constants$Sigma_HP2)){
if(length(constants$Sigma_HP2) == 1) constants$Sigma_HP2 <- rep(constants$Sigma_HP2, 2)
}
if(!is.null(constants$Sigma_HP3)){
if(length(constants$Sigma_HP3) == 1) constants$Sigma_HP3 <- rep(constants$Sigma_HP3, 2)
}
if(!is.null(constants$Sigma_HP4)){
if(length(constants$Sigma_HP4) == 1) constants$Sigma_HP41 <- rep(constants$Sigma_HP4, 2)
}
## gather constraints_needed data
constraints_needed <- unique(unlist(lapply(model_selections_list, function(x) x$constraints_needed), use.names = FALSE))
constraints_data <- as.list(rep(1, length(constraints_needed)))
names(constraints_data) <- constraints_needed
## data
data <- c(list(z = data), constraints_data)
## inits
inits_uneval <- do.call("c", unname(lapply(model_selections_list, function(x) x$inits)))
inits <- lapply(inits_uneval, function(x) eval(x, envir = constants))
# if(returnModelComponents) return(list(code=code, constants=constants, data=data, inits=inits))
## generate the "name" for the nimble model object, containing which submodels were used
thisName <- paste0(
'tau=' , tau_model , '_',
'sigma=' , sigma_model, '_',
'Sigma=' , Sigma_model, '_',
'mu=' , mu_model , '_',
'likelihood=', likelihood
)
## register custom NNGP or SGV distributions (if necessary),
## importantly, using mixedSizes = TRUE to avoid warnings
if(likelihood == 'NNGP') {
registerDistributions(list(
dmnorm_nngp = list(
BUGSdist = 'dmnorm_nngp(mean, AD, nID, N, k)',
types = c('value = double(1)', 'mean = double(1)', 'AD = double(2)', 'nID = double(2)', 'N = double()', 'k = double()'),
mixedSizes = TRUE)
), verbose = FALSE)
}
if(likelihood == 'SGV') {
registerDistributions(list(
dmnorm_sgv = list(
BUGSdist = 'dmnorm_sgv(mean, U, N, k)',
types = c('value = double(1)', 'mean = double(1)', 'U = double(2)', 'N = double()', 'k = double()'),
mixedSizes = TRUE)
), verbose = FALSE)
}
## NIMBLE model object
Rmodel <- nimbleModel(code, constants, data, inits, name = thisName)
lp <- Rmodel$getLogProb()
if(is(lp, 'try-error') || is.nan(lp) || is.na(lp) || abs(lp) == Inf) stop('model not properly initialized')
## store 'constants' list into Rmodel$isDataEnv
Rmodel$isDataEnv$.BayesNSGP_constants_list <- constants
## using the nsgpModel() argument monitorAllSampledNodes,
## set nimble package option: MCMCmonitorAllSampledNodes,
## so that latent process values are monitored by default, for use in predicition
nimbleOptions(MCMCmonitorAllSampledNodes = monitorAllSampledNodes)
return(Rmodel)
}
#==============================================================================
# Posterior prediction for the NSGP
#==============================================================================
# ROxygen comments ----
#' Posterior prediction for the NSGP
#'
#' \code{nsgpPredict} conducts posterior prediction for MCMC samples generated
#' using nimble and nsgpModel.
#'
#' @param model A NSGP nimble object; the output of \code{nsgpModel}.
#' @param samples A matrix of \code{J} rows, each is an MCMC sample of the
#' parameters corresponding to the specification in \code{nsgpModel}.
#' @param coords.predict M x d matrix of prediction coordinates.
#' @param predict.process Logical; determines whether the prediction corresponds to
#' the y(·) process (\code{TRUE}) or z(·) (\code{FALSE}; this would likely
#' only be used for, e.g., cross-validation).
#' @param constants An optional list of contants to use for prediction;
#' alternatively, additional arguments can be passed to the function via the
#' ... argument.
#' @param seed An optional random seed argument for reproducibility.
#' @param ... Additional arguments can be passed to the function; for example,
#' as an alternative to the \code{constants} list, items can be passed directly
#' via this argument.
#'
#' @return The output of the function is a list with two elements: \code{obs},
#' a matrix of \code{J} posterior predictive samples for the N observed
#' locations (only for \code{likelihood = "SGV"}, which produces predictions
#' for the observed locations by default; this element is \code{NULL}
#' otherwise); and \code{pred}, a corresponding matrix of posterior predictive
#' samples for the prediction locations. Ordering and neighbor selection
#' for the prediction coordinates in the SGV likelihood are conducted
#' internally, as with \code{nsgpModel}.
#'
#' @examples
#' \donttest{
#' # Generate some data: stationary/isotropic
#' N <- 100
#' coords <- matrix(runif(2*N), ncol = 2)
#' alpha_vec <- rep(log(sqrt(1)), N) # Log process SD
#' delta_vec <- rep(log(sqrt(0.05)), N) # Log nugget SD
#' Sigma11_vec <- rep(0.4, N) # Kernel matrix element 1,1
#' Sigma22_vec <- rep(0.4, N) # Kernel matrix element 2,2
#' Sigma12_vec <- rep(0, N) # Kernel matrix element 1,2
#' mu_vec <- rep(0, N) # Mean
#' nu <- 0.5 # Smoothness
#' dist_list <- nsDist(coords)
#' Cor_mat <- nsCorr( dist1_sq = dist_list$dist1_sq, dist2_sq = dist_list$dist2_sq,
#' dist12 = dist_list$dist12, Sigma11 = Sigma11_vec,
#' Sigma22 = Sigma22_vec, Sigma12 = Sigma12_vec, nu = nu )
#' Cov_mat <- diag(exp(alpha_vec)) %*% Cor_mat %*% diag(exp(alpha_vec))
#' D_mat <- diag(exp(delta_vec)^2)
#' set.seed(110)
#' data <- as.numeric(mu_vec + t(chol(Cov_mat + D_mat)) %*% rnorm(N))
#' # Set up constants
#' constants <- list( nu = 0.5, Sigma_HP1 = 2 )
#' # Defaults: tau_model = "constant", sigma_model = "constant", mu_model = "constant",
#' # and Sigma_model = "constant"
#' Rmodel <- nsgpModel(likelihood = "fullGP", constants = constants, coords = coords, data = data )
#' conf <- configureMCMC(Rmodel)
#' Rmcmc <- buildMCMC(conf)
#' Cmodel <- compileNimble(Rmodel)
#' Cmcmc <- compileNimble(Rmcmc, project = Rmodel)
#' samples <- runMCMC(Cmcmc, niter = 200, nburnin = 100)
#' # Prediction
#' predCoords <- as.matrix(expand.grid(seq(0,1,l=10),seq(0,1,l=10)))
#' postpred <- nsgpPredict( model = Rmodel, samples = samples, coords.predict = predCoords )
#' }
#'
#' @export
#'
nsgpPredict <- function(model, samples, coords.predict, predict.process = TRUE, constants, seed = 0, ... ) {
if(!nimble::is.model(model)) stop('first argument must be NSGP NIMBLE model object')
Rmodel <- if(nimble::is.Rmodel(model)) model else model$Rmodel
if(!nimble::is.Rmodel(Rmodel)) stop('something went wrong')
model_constants <- Rmodel$isDataEnv$.BayesNSGP_constants_list
coords <- model_constants$coords
mcmc_samples <- samples
predCoords <- as.matrix(coords.predict)
## extract the "submodel" information from the nimble model object "name"
thisName <- Rmodel$getModelDef()$name
modelsList <- lapply(strsplit(thisName, '_')[[1]], function(x) strsplit(x, '=')[[1]][2])
names(modelsList) <- sapply(strsplit(thisName, '_')[[1]], function(x) strsplit(x, '=')[[1]][1])
## avaialble for use:
## modelsList$tau
## modelsList$sigma
## modelsList$Sigma
## modelsList$mu
## modelsList$likelihood
## order predCoords for SGV
if( modelsList$likelihood == "SGV" ) {
message("\nOrdering the prediction locations and determining neighbors/conditioning sets for SGV (this may take a minute).\n")
pred.mmd.seed <- sample(1e5, 1)
predSGV_setup <- sgvSetup(coords = coords, coords_pred = predCoords, k = model_constants$k,
pred.seed = pred.mmd.seed, order_coords = FALSE)
prednID_SGV <- predSGV_setup$nID_ord
obs_ord <- predSGV_setup$ord
pred_ord <- predSGV_setup$ord_pred
predCoords <- predCoords[pred_ord,]
}
constants_to_use <- list()
## update constants_to_use with those arguments provided via ...
dotdotdot <- list(...)
## make sure all ... arguments were provided with names
if(length(dotdotdot) > 0 && (is.null(names(dotdotdot)) || any(names(dotdotdot) == ""))) stop("Only named arguemnts should be provided through ... argument")
constants_to_use[names(dotdotdot)] <- dotdotdot
## add 'constants' argument to constants_to_use list:
## if provided, make sure 'constants' argument is a named list
if(!missing(constants)) {
if(length(constants) > 0 && (is.null(names(constants)) || any(names(constants) == ""))) stop("All elements in constants list argument must be named")
constants_to_use[names(constants)] <- constants
}
## calculate XX_cross_dist_pred if necessary
if( modelsList$tau == 'approxGP' ) {
if(is.null(model_constants$tau_knot_coords)) stop(paste0('missing tau_knot_coords for tau_model: approxGP'))
constants_to_use$tau_cross_dist_pred <- sqrt(nsCrossdist(Pcoords = predCoords, coords = model_constants$tau_knot_coords, isotropic = TRUE)$dist1_sq)
}
if( modelsList$sigma == 'approxGP' ) {
if(is.null(model_constants$sigma_knot_coords)) stop(paste0('missing sigma_knot_coords for sigma_model: approxGP'))
constants_to_use$sigma_cross_dist_pred <- sqrt(nsCrossdist(Pcoords = predCoords, coords = model_constants$sigma_knot_coords, isotropic = TRUE)$dist1_sq)
}
if( modelsList$Sigma %in% c('npApproxGP', 'npApproxGPIso') ) {
if(is.null(model_constants$Sigma_knot_coords)) stop(paste0('missing Sigma_knot_coords for Sigma_model: ', modelsList$Sigma))
constants_to_use$Sigma_cross_dist_pred <- sqrt(nsCrossdist(Pcoords = predCoords, coords = model_constants$Sigma_knot_coords, isotropic = TRUE)$dist1_sq)
}
## check for discrepancies in any duplicates, between
## constants_to_use provided here, and model_constants from Rmodel
duplicatedNames <- intersect(names(constants_to_use), names(model_constants))
discrepancies <- character()
if(length(duplicatedNames) > 0) {
for(name in duplicatedNames) {
if(!identical(constants_to_use[[name]], model_constants[[name]]))
discrepancies <- c(discrepancies, name)
}
}
if(length(discrepancies) > 0) stop(paste0('Inconsistent values were provided for the following constants: ', paste0(discrepancies, collapse = ', ')))
## move original model_constants from nsgpModel into constants_to_use:
constants_to_use[names(model_constants)] <- model_constants
## determine the constants needed
predictConstantsNeeded <- list(
tau = list(
constant = character(),
logLinReg = c('X_tau', 'PX_tau'),
approxGP = c('p_tau', 'tau_cross_dist', 'tau_cross_dist_pred', 'tau_HP2')),
sigma = list(
constant = character(),
logLinReg = c('X_sigma', 'PX_sigma'),
approxGP = c('p_sigma', 'sigma_cross_dist', 'sigma_cross_dist_pred', 'sigma_HP2')),
Sigma = list(
constant = character(),
constantIso = character(),
covReg = c('X_Sigma', 'PX_Sigma'),
compReg = c('X_Sigma', 'PX_Sigma'),
compRegIso = c('X_Sigma', 'PX_Sigma'),
npApproxGP = c('p_Sigma', 'Sigma_cross_dist', 'Sigma_cross_dist_pred', 'Sigma_HP2'),
npApproxGPIso = c('p_Sigma', 'Sigma_cross_dist', 'Sigma_cross_dist_pred', 'Sigma_HP2')),
mu = list(
constant = character(),
linReg = c('X_mu', 'PX_mu'),
zero = character()),
likelihood = list(
fullGP = character(),
NNGP = character(),
SGV = character())
)
constants_needed <- unique(unlist(lapply(1:length(modelsList), function(i) predictConstantsNeeded[[names(modelsList)[i]]][[modelsList[[i]]]] )))
## check if we're missing any constants we need, and throw an error if any are missing
constants_missing <- setdiff(constants_needed, names(constants_to_use))
if(length(constants_missing) > 0) {
stop(paste0("Missing values for the following model constants: ",
paste0(constants_missing, collapse = ", "),
".\nThese values should be provided as named arguments, or named elements in the constants list argument"))
}
## generate the constants list
## do NOT truncate constants list like this:
##constants <- constants_to_use[constants_needed]
constants <- constants_to_use
d <- constants$d
z <- Rmodel$z
if( modelsList$likelihood == "fullGP" ){ # Predictions for the full GP likelihood
# Extract needed variables from constants
dist1_sq <- constants$dist1_sq
dist2_sq <- constants$dist2_sq
dist12 <- constants$dist12
N <- constants$N # number of observed locations
nu <- constants$nu
# Prediction distances
if(dist2_sq[1,1] == -1){ # Isotropic
Pdist <- nsDist(predCoords, isotropic = TRUE)
Xdist <- nsCrossdist(coords, predCoords, isotropic = TRUE)
} else{
Pdist <- nsDist(predCoords)
Xdist <- nsCrossdist(coords, predCoords)
}
Pdist1_sq <- Pdist$dist1_sq
Pdist2_sq <- Pdist$dist2_sq
Pdist12 <- Pdist$dist12
M <- nrow(Pdist1_sq) # number of prediction locations
# Cross distances
Xdist1_sq <- Xdist$dist1_sq
Xdist2_sq <- Xdist$dist2_sq
Xdist12 <- Xdist$dist12
postPredDrawsCols <- M
} else if( modelsList$likelihood == "NNGP" ){ # Predictions for the NNGP likelihood
# "Local kriging" only possible for NNGP
# Extract needed variables from constants
dist1_3d <- constants$dist1_3d
dist2_3d <- constants$dist2_3d
dist12_3d <- constants$dist12_3d
N <- constants$N # number of observed locations
k <- constants$k # number of neighbors
nu <- constants$nu
# Prediction/cross distances
P_nID <- get.knnx(coords, predCoords, k = k)$nn.index # Prediction NN
if(dist2_3d[1,1,1] == -1){
Pdist <- nsCrossdist3d(coords, predCoords, P_nID, isotropic = TRUE)
} else{
Pdist <- nsCrossdist3d(coords, predCoords, P_nID)
}
Pdist1_3d <- Pdist$dist1_3d
Pdist2_3d <- Pdist$dist2_3d
Pdist12_3d <- Pdist$dist12_3d
M <- dim(Pdist1_3d)[1] # number of prediction locations
postPredDrawsCols <- M
} else if( modelsList$likelihood == "SGV" ){ # Predictions for the SGV likelihood
if(predict.process == FALSE){
stop("Prediction for Z(.) not available with SGV.")
}
# Extract needed variables from constants
dist1_3d <- constants$dist1_3d
dist2_3d <- constants$dist2_3d
dist12_3d <- constants$dist12_3d
N <- constants$N # number of observed locations
k <- constants$k # number of neighbors
nu <- constants$nu
# Prediction setup
if(dist2_3d[1,1,1] == -1){
preddist_SGV <- nsDist3d( predSGV_setup$coords_ord, prednID_SGV, isotropic = TRUE )
} else{
preddist_SGV <- nsDist3d( predSGV_setup$coords_ord, prednID_SGV )
}
Alldist1_3d <- preddist_SGV$dist1_3d
Alldist2_3d <- preddist_SGV$dist2_3d
Alldist12_3d <- preddist_SGV$dist12_3d
M <- dim(predCoords)[1] # number of prediction locations
# REORDER INPUTS (for model != "constant") =================
if( modelsList$tau == "logLinReg" ){
constants$PX_tau <- constants$PX_tau[pred_ord,]
}
if( modelsList$sigma == "logLinReg" ){
constants$PX_sigma <- constants$PX_sigma[pred_ord,]
}
if( modelsList$Sigma == "covReg" | modelsList$Sigma == "compReg" ){
constants$PX_Sigma <- constants$PX_Sigma[pred_ord,]
}
if( modelsList$mu == "linReg" ){
constants$PX_mu <- constants$PX_mu[pred_ord,]
}
postPredDrawsCols <- M+N
} else stop('')
J <- nrow(mcmc_samples)
# Posterior draws - storage
postPredDraws <- matrix(NA, nrow = J, ncol = postPredDrawsCols)
nimCat("|-------------|-------------|-------------|-------------|")
nimCat("\n|")
for(j in 1:J){ # Loop over MCMC samples
samp_j <- mcmc_samples[j,]
# Calculate log_tau_vec and Plog_tau_vec ======================
if( modelsList$tau == "constant" ){
# Required constants: none
log_tau_vec_j <- log(sqrt(samp_j["delta"]))*rep(1,N)
Plog_tau_vec_j <- log(sqrt(samp_j["delta"]))*rep(1,M)
}
if( modelsList$tau == "logLinReg" ){
# Required constants: X_tau, PX_tau
X_tau <- constants$X_tau
PX_tau <- constants$PX_tau
log_tau_vec_j <- as.numeric(X_tau %*% samp_j[paste("delta[",1:ncol(X_tau),"]",sep = "")])
Plog_tau_vec_j <- as.numeric(PX_tau %*% samp_j[paste("delta[",1:ncol(PX_tau),"]",sep = "")])
}
if( modelsList$tau == "approxGP" ){
# Required constants: p_tau, tau_cross_dist, tau_cross_dist_pred, tau_HP2
p_tau <- constants$p_tau
tau_cross_dist_obs <- constants$tau_cross_dist
tau_cross_dist_pred <- constants$tau_cross_dist_pred
tau_HP2 <- constants$tau_HP2
w_tau_j <- as.numeric(samp_j[paste("w_tau[",1:p_tau,"]",sep = "")])
# Obs locations
Pmat_tau_obs_j <- matern_corr(tau_cross_dist_obs, samp_j["tauGP_phi"], tau_HP2)
log_tau_vec_j <- as.numeric(samp_j["tauGP_mu"]*rep(1,N) + samp_j["tauGP_sigma"] * Pmat_tau_obs_j %*% w_tau_j)
# Pred locations
Pmat_tau_pred_j <- matern_corr(tau_cross_dist_pred, samp_j["tauGP_phi"], tau_HP2)
Plog_tau_vec_j <- as.numeric(samp_j["tauGP_mu"]*rep(1,M) + samp_j["tauGP_sigma"] * Pmat_tau_pred_j %*% w_tau_j)
}
# Calculate log_sigma_vec and Plog_sigma_vec ==================
if( modelsList$sigma == "constant" ){
# Required constants: none
log_sigma_vec_j <- log(sqrt(samp_j["alpha"]))*rep(1,N)
Plog_sigma_vec_j <- log(sqrt(samp_j["alpha"]))*rep(1,M)
}
if( modelsList$sigma == "logLinReg" ){
# Required constants: X_sigma, PX_sigma
X_sigma <- constants$X_sigma
PX_sigma <- constants$PX_sigma
log_sigma_vec_j <- as.numeric(X_sigma %*% samp_j[paste("alpha[",1:ncol(X_sigma),"]",sep = "")])
Plog_sigma_vec_j <- as.numeric(PX_sigma %*% samp_j[paste("alpha[",1:ncol(PX_sigma),"]",sep = "")])
}
if( modelsList$sigma == "approxGP" ){
# Required constants: p_sigma, sigma_cross_dist, sigma_cross_dist_pred, sigma_HP2
p_sigma <- constants$p_sigma
sigma_cross_dist_obs <- constants$sigma_cross_dist
sigma_cross_dist_pred <- constants$sigma_cross_dist_pred
sigma_HP2 <- constants$sigma_HP2
w_sigma_j <- as.numeric(samp_j[paste("w_sigma[",1:p_sigma,"]",sep = "")])
# Obs locations
Pmat_sigma_obs_j <- matern_corr(sigma_cross_dist_obs, samp_j["sigmaGP_phi"], sigma_HP2)
log_sigma_vec_j <- as.numeric(samp_j["sigmaGP_mu"]*rep(1,N) + samp_j["sigmaGP_sigma"] * Pmat_sigma_obs_j %*% w_sigma_j)
# Pred locations
Pmat_sigma_pred_j <- matern_corr(sigma_cross_dist_pred, samp_j["sigmaGP_phi"], sigma_HP2)
Plog_sigma_vec_j <- as.numeric(samp_j["sigmaGP_mu"]*rep(1,M) + samp_j["sigmaGP_sigma"] * Pmat_sigma_pred_j %*% w_sigma_j)
}
# Calculate SigmaXX and PSigmaXX ==============================
if( modelsList$Sigma == "constant" ){
# Required constants: none
Sigma_coef1 <- samp_j["Sigma_coef1"]
Sigma_coef2 <- samp_j["Sigma_coef2"]
Sigma_coef3 <- samp_j["Sigma_coef3"]
Sigma11_j <- rep(1,N)*(Sigma_coef1*cos(Sigma_coef3)*cos(Sigma_coef3) + Sigma_coef2*sin(Sigma_coef3)*sin(Sigma_coef3))
Sigma22_j <- rep(1,N)*(Sigma_coef2*cos(Sigma_coef3)*cos(Sigma_coef3) + Sigma_coef1*sin(Sigma_coef3)*sin(Sigma_coef3))
Sigma12_j <- rep(1,N)*(Sigma_coef1*cos(Sigma_coef3)*sin(Sigma_coef3) - Sigma_coef2*cos(Sigma_coef3)*sin(Sigma_coef3))
PSigma11_j <- rep(1,M)*(Sigma_coef1*cos(Sigma_coef3)*cos(Sigma_coef3) + Sigma_coef2*sin(Sigma_coef3)*sin(Sigma_coef3))
PSigma22_j <- rep(1,M)*(Sigma_coef2*cos(Sigma_coef3)*cos(Sigma_coef3) + Sigma_coef1*sin(Sigma_coef3)*sin(Sigma_coef3))
PSigma12_j <- rep(1,M)*(Sigma_coef1*cos(Sigma_coef3)*sin(Sigma_coef3) - Sigma_coef2*cos(Sigma_coef3)*sin(Sigma_coef3))
}
if( modelsList$Sigma == "constantIso" ){
# Required constants: none
Sigma_coef1 <- samp_j["Sigma_coef1"]
Sigma_coef2 <- samp_j["Sigma_coef1"]
Sigma_coef3 <- 0
Sigma11_j <- rep(1,N)*(Sigma_coef1*cos(Sigma_coef3)*cos(Sigma_coef3) + Sigma_coef2*sin(Sigma_coef3)*sin(Sigma_coef3))
Sigma22_j <- rep(1,N)*(Sigma_coef2*cos(Sigma_coef3)*cos(Sigma_coef3) + Sigma_coef1*sin(Sigma_coef3)*sin(Sigma_coef3))
Sigma12_j <- rep(1,N)*(Sigma_coef1*cos(Sigma_coef3)*sin(Sigma_coef3) - Sigma_coef2*cos(Sigma_coef3)*sin(Sigma_coef3))
PSigma11_j <- rep(1,M)*(Sigma_coef1*cos(Sigma_coef3)*cos(Sigma_coef3) + Sigma_coef2*sin(Sigma_coef3)*sin(Sigma_coef3))
PSigma22_j <- rep(1,M)*(Sigma_coef2*cos(Sigma_coef3)*cos(Sigma_coef3) + Sigma_coef1*sin(Sigma_coef3)*sin(Sigma_coef3))
PSigma12_j <- rep(1,M)*(Sigma_coef1*cos(Sigma_coef3)*sin(Sigma_coef3) - Sigma_coef2*cos(Sigma_coef3)*sin(Sigma_coef3))
}
if( modelsList$Sigma == "covReg" ){
# Required constants: X_Sigma, PX_Sigma
X_Sigma <- constants$X_Sigma
PX_Sigma <- constants$PX_Sigma
Sigma11_j <- as.numeric(samp_j["psi11"]*rep(1,N) + (X_Sigma %*% samp_j[paste("gamma1[",1:ncol(X_Sigma),"]",sep = "")])^2)
Sigma12_j <- as.numeric(samp_j["rho"]*sqrt(samp_j["psi11"]*samp_j["psi22"])*rep(1,N) + (X_Sigma %*% samp_j[paste("gamma1[",1:ncol(X_Sigma),"]",sep = "")])*(X_Sigma %*% samp_j[paste("gamma2[",1:ncol(X_Sigma),"]",sep = "")]))
Sigma22_j <- as.numeric(samp_j["psi22"]*rep(1,N) + (X_Sigma %*% samp_j[paste("gamma2[",1:ncol(X_Sigma),"]",sep = "")])^2)
PSigma11_j <- as.numeric(samp_j["psi11"]*rep(1,M) + (PX_Sigma %*% samp_j[paste("gamma1[",1:ncol(PX_Sigma),"]",sep = "")])^2)
PSigma12_j <- as.numeric(samp_j["rho"]*sqrt(samp_j["psi11"]*samp_j["psi22"])*rep(1,M) + (PX_Sigma %*% samp_j[paste("gamma1[",1:ncol(PX_Sigma),"]",sep = "")])*(PX_Sigma %*% samp_j[paste("gamma2[",1:ncol(PX_Sigma),"]",sep = "")]))
PSigma22_j <- as.numeric(samp_j["psi22"]*rep(1,M) + (PX_Sigma %*% samp_j[paste("gamma2[",1:ncol(PX_Sigma),"]",sep = "")])^2)
}
if( modelsList$Sigma == "compReg" ){
# Required constants: X_Sigma, PX_Sigma
X_Sigma <- constants$X_Sigma
PX_Sigma <- constants$PX_Sigma
eigen_comp1_j <- X_Sigma %*% samp_j[paste("Sigma_coef1[",1:ncol(X_Sigma),"]",sep = "")]
eigen_comp2_j <- X_Sigma %*% samp_j[paste("Sigma_coef2[",1:ncol(X_Sigma),"]",sep = "")]
eigen_comp3_j <- X_Sigma %*% samp_j[paste("Sigma_coef3[",1:ncol(X_Sigma),"]",sep = "")]
Sigma11_j <- as.numeric(inverseEigen(eigen_comp1_j, eigen_comp2_j, eigen_comp3_j, 1))
Sigma12_j <- as.numeric(inverseEigen(eigen_comp1_j, eigen_comp2_j, eigen_comp3_j, 3))
Sigma22_j <- as.numeric(inverseEigen(eigen_comp1_j, eigen_comp2_j, eigen_comp3_j, 2))
Peigen_comp1_j <- PX_Sigma %*% samp_j[paste("Sigma_coef1[",1:ncol(X_Sigma),"]",sep = "")]
Peigen_comp2_j <- PX_Sigma %*% samp_j[paste("Sigma_coef2[",1:ncol(X_Sigma),"]",sep = "")]
Peigen_comp3_j <- PX_Sigma %*% samp_j[paste("Sigma_coef3[",1:ncol(X_Sigma),"]",sep = "")]
PSigma11_j <- as.numeric(inverseEigen(Peigen_comp1_j, Peigen_comp2_j, Peigen_comp3_j, 1))
PSigma12_j <- as.numeric(inverseEigen(Peigen_comp1_j, Peigen_comp2_j, Peigen_comp3_j, 3))
PSigma22_j <- as.numeric(inverseEigen(Peigen_comp1_j, Peigen_comp2_j, Peigen_comp3_j, 2))
}
if( modelsList$Sigma == "compRegIso" ){
# Required constants: X_Sigma, PX_Sigma
X_Sigma <- constants$X_Sigma
PX_Sigma <- constants$PX_Sigma
eigen_comp1_j <- X_Sigma %*% samp_j[paste("Sigma_coef1[",1:ncol(X_Sigma),"]",sep = "")]
Sigma11_j <- as.numeric(exp(eigen_comp1_j))
Sigma12_j <- as.numeric(exp(eigen_comp1_j))
Sigma22_j <- rep(0,N)
Peigen_comp1_j <- PX_Sigma %*% samp_j[paste("Sigma_coef1[",1:ncol(X_Sigma),"]",sep = "")]
PSigma11_j <- as.numeric(exp(Peigen_comp1_j))
PSigma12_j <- as.numeric(exp(Peigen_comp1_j))
PSigma22_j <- rep(0,N)
}
if( modelsList$Sigma == "npApproxGP" ){
# Required constants: p_Sigma, Sigma_cross_dist, Sigma_cross_dist_pred, Sigma_HP2
p_Sigma <- constants$p_Sigma
Sigma_cross_dist_obs <- constants$Sigma_cross_dist
Sigma_cross_dist_pred <- constants$Sigma_cross_dist_pred
Sigma_HP2 <- constants$Sigma_HP2
w1_Sigma_j <- samp_j[paste("w1_Sigma[",1:p_Sigma,"]",sep = "")]
w2_Sigma_j <- samp_j[paste("w2_Sigma[",1:p_Sigma,"]",sep = "")]
w3_Sigma_j <- samp_j[paste("w3_Sigma[",1:p_Sigma,"]",sep = "")]
# Obs locations
Pmat12_Sigma_obs_j <- matern_corr(Sigma_cross_dist_obs, samp_j["SigmaGP_phi[1]"], Sigma_HP2[1])
Pmat3_Sigma_obs_j <- matern_corr(Sigma_cross_dist_obs, samp_j["SigmaGP_phi[2]"], Sigma_HP2[2])
eigen_comp1_j <- samp_j["SigmaGP_mu[1]"]*rep(1,N) + samp_j["SigmaGP_sigma[1]"] * Pmat12_Sigma_obs_j %*% w1_Sigma_j
eigen_comp2_j <- samp_j["SigmaGP_mu[1]"]*rep(1,N) + samp_j["SigmaGP_sigma[1]"] * Pmat12_Sigma_obs_j %*% w2_Sigma_j
eigen_comp3_j <- samp_j["SigmaGP_mu[2]"]*rep(1,N) + samp_j["SigmaGP_sigma[2]"] * Pmat3_Sigma_obs_j %*% w3_Sigma_j
Sigma11_j <- as.numeric(inverseEigen(eigen_comp1_j, eigen_comp2_j, eigen_comp3_j, 1))
Sigma12_j <- as.numeric(inverseEigen(eigen_comp1_j, eigen_comp2_j, eigen_comp3_j, 3))
Sigma22_j <- as.numeric(inverseEigen(eigen_comp1_j, eigen_comp2_j, eigen_comp3_j, 2))
# Pred locations
Pmat12_Sigma_pred_j <- matern_corr(Sigma_cross_dist_pred, samp_j["SigmaGP_phi[1]"], Sigma_HP2[1])
Pmat3_Sigma_pred_j <- matern_corr(Sigma_cross_dist_pred, samp_j["SigmaGP_phi[2]"], Sigma_HP2[2])
Peigen_comp1_j <- samp_j["SigmaGP_mu[1]"]*rep(1,M) + samp_j["SigmaGP_sigma[1]"] * Pmat12_Sigma_pred_j %*% w1_Sigma_j
Peigen_comp2_j <- samp_j["SigmaGP_mu[1]"]*rep(1,M) + samp_j["SigmaGP_sigma[1]"] * Pmat12_Sigma_pred_j %*% w2_Sigma_j
Peigen_comp3_j <- samp_j["SigmaGP_mu[2]"]*rep(1,M) + samp_j["SigmaGP_sigma[2]"] * Pmat3_Sigma_pred_j %*% w3_Sigma_j
PSigma11_j <- as.numeric(inverseEigen(Peigen_comp1_j, Peigen_comp2_j, Peigen_comp3_j, 1))
PSigma12_j <- as.numeric(inverseEigen(Peigen_comp1_j, Peigen_comp2_j, Peigen_comp3_j, 3))
PSigma22_j <- as.numeric(inverseEigen(Peigen_comp1_j, Peigen_comp2_j, Peigen_comp3_j, 2))
}
if( modelsList$Sigma == "npApproxGPIso" ){
# Required constants: p_Sigma, Sigma_cross_dist, Sigma_cross_dist_pred, Sigma_HP2
p_Sigma <- constants$p_Sigma
Sigma_cross_dist_obs <- constants$Sigma_cross_dist
Sigma_cross_dist_pred <- constants$Sigma_cross_dist_pred
Sigma_HP2 <- constants$Sigma_HP2
w1_Sigma_j <- samp_j[paste("w1_Sigma[",1:p_Sigma,"]",sep = "")]
# Obs locations
Pmat12_Sigma_obs_j <- matern_corr(Sigma_cross_dist_obs, samp_j["SigmaGP_phi[1]"], Sigma_HP2[1])
eigen_comp1_j <- samp_j["SigmaGP_mu[1]"]*rep(1,N) + samp_j["SigmaGP_sigma[1]"] * Pmat12_Sigma_obs_j %*% w1_Sigma_j
Sigma11_j <- as.numeric(exp(eigen_comp1_j))
Sigma12_j <- as.numeric(exp(eigen_comp1_j))
Sigma22_j <- rep(0,N)
# Pred locations
Pmat12_Sigma_pred_j <- matern_corr(Sigma_cross_dist_pred, samp_j["SigmaGP_phi[1]"], Sigma_HP2[1])
Peigen_comp1_j <- samp_j["SigmaGP_mu[1]"]*rep(1,M) + samp_j["SigmaGP_sigma[1]"] * Pmat12_Sigma_pred_j %*% w1_Sigma_j
PSigma11_j <- as.numeric(exp(Peigen_comp1_j))
PSigma12_j <- as.numeric(exp(Peigen_comp1_j))
PSigma22_j <- rep(0,N)
}
# Calculate mu and Pmu ========================================
if( modelsList$mu == "constant" ){
mu <- samp_j[paste("beta")]*rep(1,N)
Pmu <- samp_j[paste("beta")]*rep(1,M)
}
if( modelsList$mu == "linReg" ){
X_mu <- constants$X_mu
PX_mu <- constants$PX_mu
mu <- as.numeric(X_mu %*% samp_j[paste("beta[",1:ncol(X_mu),"]",sep = "")])
Pmu <- as.numeric(PX_mu %*% samp_j[paste("beta[",1:ncol(X_mu),"]",sep = "")])
}
if( modelsList$mu == "zero" ){
mu <- 0*rep(1,N)
Pmu <- 0*rep(1,M)
}
if( modelsList$likelihood == "fullGP" ){ # Predictions for the full GP likelihood
# Posterior predictive draw ===================================
# Obs covariance
Cor <- nsCorr(dist1_sq, dist2_sq, dist12, Sigma11_j, Sigma22_j, Sigma12_j, nu, d)
sigmaMat <- diag(exp(log_sigma_vec_j))
Cov <- sigmaMat %*% Cor %*% sigmaMat
C <- Cov + diag(exp(log_tau_vec_j)^2)
C_chol <- chol(C)
# Prediction covariance
PCor <- nsCorr(Pdist1_sq, Pdist2_sq, Pdist12, PSigma11_j, PSigma22_j, PSigma12_j, nu, d)
PsigmaMat <- diag(exp(Plog_sigma_vec_j))
PCov <- PsigmaMat %*% PCor %*% PsigmaMat
if(predict.process){ # Do not include the nugget variance
PC <- PCov
} else{ # Include nugget variance
PC <- PCov + diag(exp(Plog_tau_vec_j)^2)
}
# Cross-covariance
XCor <- nsCrosscorr(Xdist1_sq, Xdist2_sq, Xdist12,
Sigma11_j, Sigma22_j, Sigma12_j,
PSigma11_j, PSigma22_j, PSigma12_j, nu, d)
XCov <- PsigmaMat %*% XCor %*% sigmaMat
# Conditional mean/covariance
crscov_covinv <- t(backsolve(C_chol, backsolve(C_chol, t(XCov), transpose = TRUE)))
condMean <- Pmu + crscov_covinv %*% (z - mu)
condCov <- PC - crscov_covinv %*% t(XCov)
condCov_chol <- chol(condCov)
# Store
postPredDraws[j,] <- condMean + t(condCov_chol) %*% rnorm(M)
} else if( modelsList$likelihood == "NNGP" ){ # Predictions for the NNGP likelihood
# Local kriging ===============================================
for(m in 1:M){
# Obs covariance -- for nearest neighbors
Cor <- nsCorr(Pdist1_3d[m,1:k,1:k],
Pdist2_3d[m,1:k,1:k],
Pdist12_3d[m,1:k,1:k],
Sigma11_j[P_nID[m,]], Sigma22_j[P_nID[m,]],
Sigma12_j[P_nID[m,]], nu, d)
sigmaMat <- diag(exp(log_sigma_vec_j[P_nID[m,]]))
Cov <- sigmaMat %*% Cor %*% sigmaMat
C <- Cov + diag(exp(log_tau_vec_j[P_nID[m,]])^2)
C_chol <- chol(C)
# Prediction variance
# PCor <- nsCorr(Pdist1_sq, Pdist2_sq, Pdist12, PSigma11_j, PSigma22_j, PSigma12_j, nu)
PsigmaMat <- exp(Plog_sigma_vec_j[m])
# PCov <- PsigmaMat %*% Cor %*% PsigmaMat
if(predict.process){ # Do not include the nugget variance
PC <- exp(Plog_sigma_vec_j[m])^2
} else{ # Include nugget variance
PC <- exp(Plog_tau_vec_j[m])^2 + exp(Plog_sigma_vec_j[m])^2
}
# Cross-covariance
XCor <- nsCrosscorr(matrix(Pdist1_3d[m,k+1,1:k], nrow = 1, ncol = k),
matrix(Pdist2_3d[m,k+1,1:k], nrow = 1, ncol = k),
matrix(Pdist12_3d[m,k+1,1:k], nrow = 1, ncol = k),
Sigma11_j[P_nID[m,]],
Sigma22_j[P_nID[m,]],
Sigma12_j[P_nID[m,]],
PSigma11_j[m], PSigma22_j[m], PSigma12_j[m], nu, d)
XCov <- PsigmaMat %*% XCor %*% sigmaMat
# Conditional mean/covariance
crscov_covinv <- t(backsolve(C_chol, backsolve(C_chol, t(XCov), transpose = TRUE)))
condMean <- Pmu[m] + crscov_covinv %*% (z[P_nID[m,]] - mu[P_nID[m,]])
condCov <- PC - crscov_covinv %*% t(XCov)
condCov_chol <- chol(condCov)
pred_sample <- condMean + t(condCov_chol) %*% rnorm(1)
# Store
postPredDraws[j,m] <- pred_sample
}
} else if( modelsList$likelihood == "SGV" ){ # Predictions for the SGV likelihood
# SGV prediction ==============================================
U_j <- calculateU_ns(
dist1_3d = Alldist1_3d, dist2_3d = Alldist2_3d, dist12_3d = Alldist12_3d,
Sigma11 = c(Sigma11_j, PSigma11_j),
Sigma22 = c(Sigma22_j, PSigma22_j),
Sigma12 = c(Sigma12_j, PSigma12_j),
log_sigma_vec = c(log_sigma_vec_j, Plog_sigma_vec_j),
log_tau_vec = c(log_tau_vec_j, Plog_tau_vec_j), nu = nu,
nID = prednID_SGV, cond_on_y = predSGV_setup$condition_on_y_ord,
N = N, k = k, M = M, d = d )
Usm <- sparseMatrix(i = U_j[,1], j = U_j[,2], x = U_j[,3])
Asm <- Usm[c(seq(from = 1, to = 2*N, by = 2), 2*N + 1:M),]
Bsm <- Usm[seq(from = 2, to = 2*N, by = 2),]
# Calculate V = rchol(W)
Aoo_sm <- Asm[1:N,1:(2*N)]
Woo_sm <- tcrossprod(Aoo_sm)[N:1,N:1]
Voo_sm <- t(chol(Woo_sm))[N:1,N:1]
Vsm <- Asm[,-(1:N)]
Vsm[1:N,1:N] <- Voo_sm
# Kriging predictor (mean zero)
ABtz <- crossprod(t(Asm), crossprod(Bsm, z - mu))
krigPredictor <- -as.numeric(solve(tcrossprod(Vsm), ABtz))
# Draw mean zero
pred_sample <- solve(t(Vsm), rnorm(N+M), system = "L")
# Combine (with mean)
# postpred_draw <- Pmu + pred_sample[-(1:N)] + krigPredictor[-(1:N)]
postpred_draw <- c(mu,Pmu) + as.numeric(pred_sample + krigPredictor)
# Store
postPredDraws[j,] <- postpred_draw
PPD_obs_ord <- postPredDraws[,1:N]
PPD_pred_ord <- postPredDraws[,-(1:N)]
PPD_obs_orig <- PPD_obs_ord[,order(model_constants$ord)]
PPD_pred_orig <- PPD_pred_ord[,order(predSGV_setup$ord_pred)]
} else stop('')
# Progress
if( j %% ceiling(J/56) == 0 ){nimCat("-")}
}
nimCat("|\n")
if( modelsList$likelihood == "fullGP" ){ # Predictions for the full GP likelihood
output <- list(obs = NULL, pred = postPredDraws, pred.mmd.seed = NULL)
} else if( modelsList$likelihood == "NNGP" ){ # Predictions for the NNGP likelihood
output <- list(obs = NULL, pred = postPredDraws, pred.mmd.seed = NULL)
} else if( modelsList$likelihood == "SGV" ){ # Predictions for the SGV likelihood
output <- list(obs = PPD_obs_orig, pred = PPD_pred_orig, pred.mmd.seed = pred.mmd.seed)
} else stop('')
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNSGP/R/core.R
|
EliminationOrder <- function(graph, node.class){
dag.graph <- igraph.from.graphNEL(graph)
dis.nodes <- names(node.class[node.class])
cont.nodes <- names(node.class[!node.class])
# topological order for discrete subgraph
graph.dis <- induced_subgraph(dag.graph, dis.nodes)
# topological order for continuous subgraph
graph.cont <- induced_subgraph(dag.graph, cont.nodes)
eo.dis <- names(topological.sort(graph.dis))
eo.cont <- names(topological.sort(graph.cont))
# eliminate continuous node first, then discrete ones
result <- c(rev(eo.cont), rev(eo.dis))
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/A1_EliminationOrder.R
|
#' @importFrom igraph as.undirected
#' @importFrom graph inEdges
#' @importFrom igraph simplify
Moralize <- function(graph){
dag_nodes <- nodes(graph)
und.graph <- as.undirected(igraph.from.graphNEL(graph, weight=FALSE), mode = "collapse")
for(i in 1:length(dag_nodes)){
parents <- inEdges(dag_nodes[i], graph)[[dag_nodes[i]]]
np <- length(parents)
if(np<2) next
for(j in 1:(np-1)){
for(k in (j+1):(np)){
und.graph <- add_edges(und.graph, c(parents[j], parents[k]))
}
}
}
und.graph <- simplify(und.graph, remove.loops=FALSE)
nel.mor <- igraph.to.graphNEL(und.graph)
return(nel.mor)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/A2_Moralize.R
|
#' @importFrom igraph get.edge.ids
Triangulate <- function(graph, elim.order){
dag.graph <- igraph.from.graphNEL(graph, weight=FALSE)
for(i in 1:length(elim.order)){
node <- elim.order[i]
neighbors <- names(neighbors(dag.graph, node, mode="all"))
nn <- length(neighbors)
if(nn >= 2){
node_pairs <- generate_pairs(nn)
for(j in 1:nrow(node_pairs)){
n1 <- neighbors[node_pairs[j, 1]]
n2 <- neighbors[node_pairs[j, 2]]
if (which(elim.order==n1)>i && which(elim.order==n2)>i){
if(get.edge.ids(dag.graph, c(n1, n2)) == 0){
dag.graph <- add_edges(dag.graph, c(n1, n2))
}
}
}
}
}
dag.tri <- igraph.to.graphNEL(dag.graph)
return(dag.tri)
}
# given a magnitude, generate all possible pairs of the numbers from 1 to magnitude inclusive. Order is not considered.
generate_pairs <- function(magnitude){
if(magnitude <= 1) return(list())
enumeration <- matrix(nrow = (magnitude-1)*magnitude/2, ncol = 2)
sum = 0
for(i in 1:(magnitude-1)){
enumeration[i:(magnitude-1) + sum, 1] <- i
enumeration[i:(magnitude-1) + sum, 2] <- (i+1):magnitude
sum = sum + magnitude - i - 1
}
return(enumeration)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/A2_Triangulate.R
|
ElimTreeNodes <- function(graph, elim.order) {
cluster.sets <- list()
dag.graph <- igraph.from.graphNEL(graph)
eo.rev <- rev(elim.order)
# iterate over all nodes in Bayesian network
for (i in length(elim.order):1){
node <- eo.rev[i]
neighbors <- names(neighbors(dag.graph, node, mode="all"))
formers <- eo.rev[1:i] # nodes appear later than this node in the EO
# a cluster for a node is formed by all its neighbors appearing later in the EO and itself
cluster <- c(node, intersect(formers, neighbors))
cluster.sets[[i]] <- cluster
}
# Each cluster correspond to a node in the BN, which is the eliminate node of that cluster
names(cluster.sets) <- eo.rev
return(cluster.sets)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/A3_ClusterSets.R
|
#' @importFrom igraph igraph.from.graphNEL
SemiEliminationTree <- function(graph, cluster.sets, node.class, elim.order){
cs.graph <- igraph.from.graphNEL(graph)
i <- 1
while (i < length(cluster.sets)){
# cat("one iteration")
cs.names <- names(cluster.sets)
cluster <- cluster.sets[[i]]
this.name <- cs.names[i]
this.parent <- neighbors(cs.graph, v=this.name, mode="in")$name
find_cluster <- FALSE
is.discrete <- prod(node.class[cluster]) # if this cluster is dicrete (all members are discete)
if (is.discrete){
# if it's discrete, search for the following clusters
for(j in (i+1):length(cluster.sets)){
if (prod(node.class[cluster.sets[[j]]])){
# if jth cluster is discrete
if (all(cluster %in% cluster.sets[[j]])){
# and it contains this cluster, set the indicator true
find_cluster <- TRUE
break
# stop searching
}
}
}
}
# if can not found such a cluster j, then proceed to next iteration
if(!find_cluster){i <- i+1; next}
# if such cluster j is found, do the following:
# find all the children of this cluster in the elimination tree
cluster.children <- neighbors(cs.graph, v=cs.names[i], mode="out")$name
# find a such a child among them,
# whose elimination node should appear as late as possible in the order
selected.child <- 0
selected.child.name <- NULL
current <- 0
for(j in 1:length(cluster.children)){
this.child <- cluster.children[[j]]
child.mem <- cluster.sets[[this.child]]
is.discrete <- prod(node.class[child.mem])
if (is.discrete & all(cluster %in% child.mem)) {
this.scan <- which(elim.order == cs.names[i])
# if the elimination node of this child appears later
# then update the selected child
if(this.scan > current){
current <- this.scan ##
selected.child <- j;
selected.child.name <- cluster.children[j]
}
}
}
# merge this cluster into its selected child by
# 1. deleting this cluster
cs.graph <- delete_vertices(cs.graph, this.name)
# 2. update parents and children of involved nodes
if(length(this.parent)!=0){ # if this cluster has a parent
for (j in 1:length(this.child)){
if(this.child!=selected.child.name){
# let the parent of all its child be its selected child, except for the selected child itself
cs.graph <- add_edges(cs.graph, c(selected.child.name, this.child[j]))
} else {
# let its parent be the parent of its selected child
cs.graph <- add_edges(cs.graph, c(this.parent, this.child[j]))
}
}
} else { # if it does not have a parent, then update as usual,
# but no need to update the parent of its selected child
for (j in 1:length(cluster.children)){
if(cluster.children[j]!=selected.child.name){
cs.graph <- add_edges(cs.graph, c(selected.child.name, cluster.children[j]))
}
}
}
# update the name vector of the tree
selected.child.index <- which(cs.names==selected.child.name)
cluster.sets[this.name] <- cluster.sets[selected.child.index]
cs.names[which(cs.names==this.name)] <- selected.child.name
cluster.sets <- cluster.sets[-selected.child.index]
cs.names <- cs.names[-selected.child.index]
names(cluster.sets) <- cs.names
}
# V(cs.graph)$name <- unlist(lapply(cluster.sets, paste0, collapse=","))
# x11(); plot(igraph.to.graphNEL(cs.graph))
return(igraph.to.graphNEL(cs.graph))
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/A4_SemiEliminationTree.R
|
StrongEliminationTree <- function(cs, elim.order){
# a vector to store the parent of each cluster
pvec <- c()
# iterate over all clusters
for (i in length(cs):1){
cluster <- cs[[i]]
elim.ind <- which(elim.order == names(cs)[i])
# Find a member such that
# (1) its order is as small as possible,
# (2) but it appears after the elim node of this cluster in order
# after finding this member, find the cluster (B) with this member as its elimination node
# set cluster B as the parent of cluster A
eo.pos <- match(cluster, elim.order)
names(eo.pos) <- cluster
eo.pos <- eo.pos[eo.pos>elim.ind]
if (length(eo.pos)>0) {
pvec[i] <- names(eo.pos)[which.min(eo.pos)]
}
}
# construct a graphNEL object of the strong elimination tree
names(pvec) <- names(cs)
nodes <- names(pvec)
Adj <- matrix(0, length(nodes), length(nodes))
colnames(Adj) <- nodes
rownames(Adj) <- nodes
cs.graph <- graph_from_adjacency_matrix(Adj, mode = "directed")
for (i in 1:length(nodes)){
if(!is.na(pvec[i])){
cs.graph <- add_edges(cs.graph, c(pvec[i], nodes[i]))
}
}
cs.graph <- igraph.to.graphNEL(cs.graph)
return(cs.graph)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/A5_StrongEliminationTree.R
|
#' Compile the cluster tree
#'
#' Get the cluster sets and strong semi-elimination tree from the Bayesian network
#'
#' @details This function forms the cluster sets and the semi-elimination tree graph
#' from the Bayesian network. The procedures include acquiring the elimination order,
#' moralization, triangulation, obtaining cluster sets, forming strong elimination
#' tree and strong semi-elimination tree. The cluster sets and the semi-elimination
#' tree are required to initialize the cluster tree.
#'
#' @param dag a \code{graphNEL} object of the Bayesian network
#' @param node.class a named \code{vector} of \code{logical} values, \code{TRUE} if node
#' is discrete, \code{FASLE} if otherwise
#' @return
#' \describe{
#' \item{\code{tree.graph}}{a \code{graphNEL} object of semi-elimination tree.}
#' \item{\code{dag}}{a \code{graphNEL} object of original Bayesian network.}
#' \item{\code{cluster.sets}}{a \code{list} of members of each cluster.}
#' \item{\code{node.class}}{a named \code{vector} of \code{logical} values, \code{TRUE} if node
#' is discrete, \code{FASLE} if otherwise}
#' \item{\code{elimination.order}}{a \code{vector} of node names sorted by the elimination order.}
#' }
#'
#' @author Han Yu
#'
#' @references Cowell, R. G. (2005). Local propagation in conditional Gaussian Bayesian networks.
#' Journal of Machine Learning Research, 6(Sep), 1517-1550. \cr
#' \cr
#' Yu H, Moharil J, Blair RH (2020). BayesNetBP: An R Package for Probabilistic Reasoning in Bayesian
#' Networks. Journal of Statistical Software, 94(3), 1-31. <doi:10.18637/jss.v094.i03>.
#'
#' @importFrom igraph neighbors add_edges are_adjacent delete_vertices graph_from_adjacency_matrix graph_from_data_frame topological.sort
#'
#' @examples
#'
#' data(liver)
#' cst <- ClusterTreeCompile(dag=liver$dag, node.class=liver$node.class)
#'
#' @seealso \code{\link{ElimTreeInitialize}}
#'
#' @export
ClusterTreeCompile <- function(dag, node.class) {
elim.order <- EliminationOrder(dag, node.class=node.class)
graph.mor <- Moralize(dag)
graph.tri <- Triangulate(graph.mor, elim.order)
csets <- ElimTreeNodes(graph.tri, elim.order)
strongET <- StrongEliminationTree(csets, elim.order)
semiET <- SemiEliminationTree(strongET, csets, node.class, elim.order)
output <- list(tree.graph=semiET,
dag=dag,
cluster.sets=csets,
node.class=node.class,
elimination.order=elim.order)
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/A6_ClusterTreeCompile.R
|
##################################################
## Obtain location of a locus
##################################################
loci.loc <- function(locus) {
split <- strsplit(locus, split='_')
chr <- c()
location <- c()
for (i in 1:length(split)) {
chr[i] <- substring(split[[i]][1], 4)
location[i] <- split[[i]][2]
}
result <- data.frame(chr, location)
result$chr <- as.character(result$chr)
result$location <- as.numeric(as.character(result$location))
return(result)
}
#####################################
## function for assigning universe
#####################################
assignUniverse <- function(dag, universes, nodes){
# universes <- cs.2
assignment <- list()
node.names <- dag@nodes
assigned <- c()
assigned <- setdiff(dag@nodes, nodes)
dag.graph <- igraph.from.graphNEL(dag)
i <- 1
for (universe in universes){
temp <- c()
for (node in universe){
if (node %in% assigned) next
node.parents <- names(neighbors(dag.graph, node, mode="in"))
if (length(node.parents)==0) {
temp <- c(temp, node)
assigned <- c(assigned, node)
next
}
if (prod(node.parents %in% universe) == 1) {
temp <- c(temp, node)
assigned <- c(assigned, node)
}
}
assignment[[i]] <- temp
i <- i+1
}
names(assignment) <- names(universes)
return(assignment)
}
############################################################
## Extract info from qtl fit results
# The qtl and qtlnet are optional
# @importFrom qtlnet loci.qtlnet
# @importFrom qtl scanone
############################################################
#' @importFrom graph nodes
#' @importFrom igraph is_dag
extractQTL <- function(qtl.fit) {
qtl.graph <- igraph_from_qtlnet(qtl.fit) # igraph.qtlnet(qtl.fit)
qtl <- qtl.fit$cross
dag <- igraph.to.graphNEL(qtl.graph)
if (!is_dag(qtl.graph)) stop("Graph is not a DAG.")
graph::nodes(dag) <- gsub("@", "_", graph::nodes(dag))
node.names <- graph::nodes(dag)
# pheno <- qtl$pheno[,pheno.cols]
pheno <- qtl$pheno
loci <- qtlnet::loci.qtlnet(qtl.fit)
locus <- unique(unlist(loci))
locus <- gsub("@", "_", locus)
qtl.df <- loci.loc(locus)
discrete.nodes <- locus
continuous.nodes <- names(pheno)
node.class <- node.names %in% discrete.nodes
names(node.class) <- node.names
geno.list <- list()
markers <- c()
for(i in 1:nrow(qtl.df)) {
markers[i] <- qtl::find.marker(qtl, qtl.df$chr[i], qtl.df$location[i])
geno.list[[i]] <-
data.frame(qtl$geno[[qtl.df$chr[i]]]$data)[[markers[i]]]
}
geno <- matrix(unlist(geno.list), byrow=FALSE, ncol=length(geno.list))
colnames(geno) <- locus
dat <- data.frame(geno, pheno)
result <- list(data=dat,
dag=dag,
node.class=node.class)
return(result)
}
###############
igraph_from_qtlnet <- function(qtl.fit) {
qtl_sum <- summary(qtl.fit)[[2]][, 1:2]
loci_list <- qtlnet::loci.qtlnet(qtl.fit)
df_loci <- data.frame(cause = unlist(loci_list), effect = names(loci_list))
rownames(df_loci) <- NULL
df_edge_list <- rbind(df_loci, qtl_sum)
igraph_output <- igraph::graph.data.frame(df_edge_list)
return(igraph_output)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/A7_Initialization_helpers.R
|
#' Convert qtlnet to graphNEL object
#'
#' Extract network structure from qtlnet object and convert to graphNEL object
#'
#' @details This function extracts network structure from qtlnet object and convert to graphNEL object.
#' The example data can be downloaded from <https://github.com/hyu-ub/BayesNetBP>.
#' @param data a \code{qtlnet} object
#'
#' @return \item{\code{graphNEL}}{a \code{graphNEL} object.}
#'
#' @importFrom igraph igraph.to.graphNEL
#' @author Han Yu
#'
#' @examples
#'
#' \dontrun{
#' load(liverqtl.rda)
#' qtlnet_to_graphNEL(liverqtl$qtlnet.fit)
#' }
#'
#' @export
qtlnet_to_graphNEL <- function(data) {
qtlnet_igraph <- igraph_from_qtlnet(data)
qtlnet_graphnel <- igraph::igraph.to.graphNEL(qtlnet_igraph)
return(qtlnet_graphnel)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/A8_qtlnet_to_graphNEL.R
|
ModelCompileData <- function(data, dag, node.class) {
# data=liver$data; dag=liver$dag; node.class=liver$node.class;
# data=nci.s; dag=tree.g
# data=df.2; dag=bn.graph; node.class=node.class;
###########
# dag, data.frame, node.class
# data <- dat
nodes <- names(node.class)# dag@nodes
dag.graph <- igraph.from.graphNEL(dag)
value.list <- list()
discrete.nodes <- nodes[node.class]
continuous.nodes <- nodes[!node.class]
df <- data
###### convert discrete variables into characters
df[discrete.nodes] <- lapply(df[discrete.nodes], as.character)
#################################################
dat.complete.0 <- df[complete.cases(df),]
######################
## discrete part starts
######################
cpt.pots <- list()
if (length(discrete.nodes)>0){
for (i in 1:length(discrete.nodes)) {
value.list[[i]] <- sort(unique(dat.complete.0[[discrete.nodes[i]]]))
}
names(value.list) <- discrete.nodes
# source("gRain_test_functions.R")
for (i in 1:length(discrete.nodes)) {
# i <- 2
this.node <- discrete.nodes[i]
this.parents <- names(neighbors(dag.graph, this.node, "in"))
all.nodes <- c(this.node, this.parents)
n.nodes <- length(all.nodes)
this.cpt <- expand.grid(value.list[all.nodes])
this.df <- df[all.nodes]
tab <- as.data.frame(xtabs(~., this.df))
pot.joint <- list(cpt=tab[1:n.nodes], prob=tab$Freq/sum(tab$Freq))
if(length(this.parents)==0) {
pot <- pot.joint
} else {
pot <- conditional(pot.joint, this.parents)
}
cpt.pots[[i]] <- pot
}
names(cpt.pots) <- discrete.nodes
}
######################
## discrete part ends
######################
######################
## continuous part starts
######################
bags <- list()
if (length(continuous.nodes)>0){
bags <- vector("list", length(continuous.nodes))
for (i in 1:length(continuous.nodes)){
# i <- 1
# this.bag <- list()
k <- 1
this.node <- continuous.nodes[i]
this.parents <- names(neighbors(dag.graph, this.node, "in"))
dat.complete <- df[, c(this.node, this.parents), drop=FALSE]
dat.complete <- dat.complete[complete.cases(dat.complete), , drop=FALSE]
this.classes <- node.class[this.parents]
######################
discrete.parents <- this.parents[which(this.classes)]
continuous.parents <- this.parents[which(!this.classes)]
######################
if(length(discrete.parents)==0 & length(continuous.parents)==0){
this.bag <- new("LPPotential",
head = this.node,
tail = continuous.parents
)
y <- dat.complete[[this.node]]
this.bag@const <- mean(y)
this.bag@variance <- var(y)
bags[[i]] <- this.bag
next
}
###### no discrete parent, but with continuous parents, there is only one linear model
if(length(discrete.parents)==0){
this.bag <- new("LPPotential",
head = this.node,
tail = continuous.parents,
beta = matrix(NA, nrow=1, ncol=length(continuous.parents))
)
colnames(this.bag@beta) <- continuous.parents
df.2 <- dat.complete[, c(this.node, continuous.parents), drop=FALSE]
df.sub <- df.2
form.str <- paste0(this.node, "~.")
form <- as.formula(form.str)
# print(c("######################################################FORM:", form))
lm.fit <- lm(form, df.sub)
coefs <- coef(lm.fit)
this.bag@beta[1,] <- coefs[2:length(coefs)]
this.bag@const <- coefs[1]
this.bag@variance <- summary(lm.fit)$sigma^2
bags[[i]] <- this.bag
next
}
####### conditions where there are discrete parents
this.disc.vals <- value.list[discrete.parents]
this.all.combs <- expand.grid(this.disc.vals, stringsAsFactors=FALSE)
comb.val.list <- apply(this.all.combs, 1, paste0, collapse="%")
df.1 <- dat.complete[discrete.parents] ## mark
df.comb <- apply(df.1, 1, paste0, collapse="%")
### condition 3, only discrete parents
if(length(continuous.parents)==0){
this.bag <- new("LPPotential",
head = this.node,
config = matrix(NA, nrow=length(comb.val.list), ncol=length(discrete.parents)),
beta = matrix(0, nrow=length(comb.val.list), ncol=0)
)
colnames(this.bag@config) <- discrete.parents
for (j in 1:length(comb.val.list)) {
# j <- 1
sub.ind <- which(df.comb==comb.val.list[j])
df.2 <- dat.complete[[this.node]]
y <- df.2[sub.ind]
this.bag@config[j,] <- as.vector(this.all.combs[j,], mode="character")
this.bag@const[j] <- mean(y)
this.bag@variance[j] <- var(y)
}
bags[[i]] <- this.bag
next
}
### condition 4, both discrete and continuous parents
this.bag <- new("LPPotential",
head = this.node,
tail = continuous.parents,
config = matrix(NA, nrow=length(comb.val.list), ncol=length(discrete.parents)),
beta = matrix(NA, nrow=length(comb.val.list), ncol=length(continuous.parents))
)
colnames(this.bag@config) <- discrete.parents
colnames(this.bag@beta) <- continuous.parents
for (j in 1:length(comb.val.list)) {
# j <- 1
sub.ind <- which(df.comb==comb.val.list[j])
df.2 <- dat.complete[ , c(this.node, continuous.parents), drop=FALSE] #######
df.sub <- df.2[sub.ind,]
form.str <- paste0(this.node, "~.")
form <- as.formula(form.str)
# print(c("######################################################FORM:", form))
lm.fit <- lm(form, df.sub)
coefs <- coef(lm.fit)
this.bag@config[j,] <- as.vector(this.all.combs[j,], mode="character")
this.bag@beta[j,] <- coefs[2:length(coefs)]
this.bag@const[j] <- coefs[1]
this.bag@variance[j] <- summary(lm.fit)$sigma^2
}
bags[[i]] <- this.bag
}
names(bags) <- continuous.nodes
}
######################
## continuous part ends
######################
result <- list(pots = cpt.pots,
bags = bags)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/B1_ModelCompileData.R
|
#' Model compilation
#'
#' Compile the local models
#'
#' @details This function compiles the local models, including the conditional
#' probability tables for discrete variables, and linear predictor potentials
#' for continuous variables. The qtlnet and qtl package need to be installed if data is
#' a \code{qtlnet} object.
#'
#' @param data a \code{data.frame} object or a \code{qtlnet} object
#' @param dag \code{NULL} if data is \code{qtlnet} object, or a \code{graphNEL} object of conditional
#' Gaussian Bayesian network if data is \code{data.frame}.
#' @param node.class \code{NULL} if data is \code{qtlnet} object, or a \code{vector} of logical values
#' named by node names, \code{TRUE} for discrete, \code{FALSE} for continuous variables if data
#' is \code{data.frame}.
#'
#' @return
#' \describe{
#' \item{\code{pots}}{a \code{list} of discrete potentials (conditional probability tables)
#' for each discrete variable. }
#' \item{\code{bags}}{a \code{list} of sets of continuous potentials (lppotentials), each set for a
#' continuous variables.}
#' }
#'
#' @import doBy
#' @importFrom graph nodes
#' @importFrom igraph igraph.options
#' @author Han Yu
#'
#' @references Cowell, R. G. (2005). Local propagation in conditional Gaussian Bayesian networks.
#' Journal of Machine Learning Research, 6(Sep), 1517-1550. \cr
#' \cr
#' Yu H, Moharil J, Blair RH (2020). BayesNetBP: An R Package for Probabilistic Reasoning in Bayesian
#' Networks. Journal of Statistical Software, 94(3), 1-31. <doi:10.18637/jss.v094.i03>.
#'
#' @examples
#'
#' data(liver)
#' models <- LocalModelCompile(data=liver$data, dag=liver$dag, node.class=liver$node.class)
#'
#' @seealso \code{\link{ElimTreeInitialize}}
#'
#' @export
LocalModelCompile <- function(data, dag=NULL, node.class=NULL) {
if("qtlnet" %in% class(data)){
extr <- extractQTL(data)
data <- extr$data
dag <- extr$dag
node.class <- extr$node.class
}
models <- ModelCompileData(data, dag, node.class)
return(models)
}
# @importFrom qtl find.marker
# @importFrom qtl scanone
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/B2_LocalModelCompile.R
|
#' Convert a bn object to graphNEL object
#'
#' Convert a bn object to graphNEL object while removing isolated nodes
#'
#' @param graph_bn a \code{bn} object of Bayesian network
#' @return a \code{graphNEL} object
#'
#' @author Han Yu
#'
#' @importFrom igraph igraph.from.graphNEL igraph.to.graphNEL induced_subgraph degree
#' @importFrom bnlearn as.graphNEL
#'
#' @export
bn_to_graphNEL <- function(graph_bn) {
graph.graphNEL <- bnlearn::as.graphNEL(graph_bn)
graph.igraph <- igraph.from.graphNEL(graph.graphNEL)
# remove isolated nodes
deg <- igraph::degree(graph.igraph)
nodes <- names(deg)[deg>0]
graph.igraph.sub <- induced_subgraph(graph.igraph, nodes)
graph.graphNEL.sub <- igraph.to.graphNEL(graph.igraph.sub)
return(graph.graphNEL.sub)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/B3_bn_to_graphNEL.R
|
#' Initialize the elimination tree
#'
#' Initialize the elimination tree with the local models
#'
#' @details Initialize the elimination tree with the local models
#'
#' @param tree a \code{graphNEL} object of the elimination tree
#' @param dag a \code{graphNEL} object of the Bayesian network
#' @param model a \code{list} of local models built from \code{\link{LocalModelCompile}} function
#' @param node.sets a \code{list} of cluster sets obtained from \code{\link{ClusterTreeCompile}} function
#' @param node.class a named \code{vector} of \code{logical} values, \code{TRUE} if node
#' is discrete, \code{FASLE} if otherwise
#'
#' @return \code{\linkS4class{ClusterTree}} object with the local models incorporated
#'
#' @author Han Yu
#'
#' @references Cowell, R. G. (2005). Local propagation in conditional Gaussian Bayesian networks.
#' Journal of Machine Learning Research, 6(Sep), 1517-1550. \cr
#' \cr
#' Yu H, Moharil J, Blair RH (2020). BayesNetBP: An R Package for Probabilistic Reasoning in Bayesian
#' Networks. Journal of Statistical Software, 94(3), 1-31. <doi:10.18637/jss.v094.i03>.
#'
#' @import doBy
#' @importFrom graph nodes
#' @importFrom igraph neighbors
#' @importFrom methods new
#' @examples
#'
#' data(liver)
#' cst <- ClusterTreeCompile(dag=liver$dag, node.class=liver$node.class)
#' models <- LocalModelCompile(data=liver$data, dag=liver$dag, node.class=liver$node.class)
#' tree.init <- ElimTreeInitialize(tree=cst$tree.graph,
#' dag=cst$dag,
#' model=models,
#' node.sets=cst$cluster.sets,
#' node.class=cst$node.class)
#'
#' @seealso The functions \code{\link{ClusterTreeCompile}} and \code{\link{LocalModelCompile}} provide necessary
#' objects to obtain \code{\linkS4class{ClusterTree}} object by initializing the elimination tree through this function.
#'
#' @export
ElimTreeInitialize <- function(tree, dag, model, node.sets, node.class){
e.seq <- EliminationOrder(dag, node.class)
ClusterTree <- new("ClusterTree",
cluster = graph::nodes(tree),
node = e.seq,
graph = list(dag = dag, tree = tree),
member = node.sets,
node.class = node.class[e.seq],
propagated = FALSE
)
ClusterTree@activeflag <- rep(TRUE, length(ClusterTree@cluster))
names(ClusterTree@activeflag) <- ClusterTree@cluster
tree.graph <- igraph.from.graphNEL(tree) # get the igraph object for tree
dag.graph <- igraph.from.graphNEL(dag)
for (i in 1:length(ClusterTree@cluster)) {
this.cluster <- ClusterTree@cluster[i]
this.par <- neighbors(tree.graph, v=this.cluster, mode="in")$name
if (length(this.par)==0) {
ClusterTree@parent[i] <- NA
} else {
ClusterTree@parent[i] <- this.par
}
[email protected][i] <- as.logical(prod(node.class[ node.sets[[ ClusterTree@cluster[i] ]] ]))
}
names(ClusterTree@parent) <- ClusterTree@cluster
names([email protected]) <- ClusterTree@cluster
continuous.clusters <- ClusterTree@cluster[[email protected]]
continuous.nodes <- names(node.class)[!node.class]
discrete.clusters <- ClusterTree@cluster[[email protected]]
discrete.nodes <- names(node.class)[node.class]
ClusterTree@assignment <- asgn <- assignUniverse(dag, node.sets[discrete.clusters], discrete.nodes)
#################################################
## initialize with local models
#################################################
## initialize the discrete part
if(length(discrete.clusters)!=0) {
for (i in 1:length(discrete.clusters)) {
this.cluster <- discrete.clusters[i]
for (j in 1:length(asgn[[this.cluster]])) {
this.asgn <- asgn[[this.cluster]][j]
if (j==1) {
pot <- model$pots[[this.asgn]]
} else {
pot <- factor.product(pot, model$pots[[this.asgn]])
}
}
ClusterTree@cpt[[i]] <- pot
}
names(ClusterTree@cpt) <- discrete.clusters
}
## initialize the continuous part
if (length(continuous.clusters)!=0) {
for (j in 1:length(continuous.clusters)) {
ClusterTree@lppotential[[j]] <- list()
ClusterTree@postbag[[j]] <- list()
}
names(ClusterTree@lppotential) <- continuous.clusters
names(ClusterTree@postbag) <- continuous.clusters
reallocate <- FALSE
cl.reallocate <- c()
for (i in 1:length(continuous.nodes)) {
this.node <- continuous.nodes[i]
this.par <- neighbors(dag.graph, v=this.node, mode="in")$name
this.all <- c(this.node, this.par)
for (j in 1:length(continuous.clusters)) {
this.cluster <- continuous.clusters[j]
this.member <- ClusterTree@member[[this.cluster]]
if (all(this.all %in% this.member)) {
if(this.node==this.cluster){
l <- length(ClusterTree@lppotential[[j]])
ClusterTree@lppotential[[j]][[l+1]] <- model$bags[[this.node]]
names(ClusterTree@lppotential[[j]]) <- c(names(ClusterTree@lppotential[[j]]), this.node)
} else {
reallocate <-TRUE # need reallocation of lppotentials
cl.reallocate <- union(cl.reallocate, this.cluster) # record the clusters requiring reallocation
l <- length(ClusterTree@postbag[[j]])
ClusterTree@postbag[[j]][[l+1]] <- model$bags[[this.node]]
names(ClusterTree@postbag[[j]]) <- c(names(ClusterTree@postbag[[j]]), this.node)
}
break
}
}
}
# This step seems to be unnecessary if the elimination sequence is properly arranged.
# No reallocation of lppotentials in postbags is needed in the data sets tested so far.
# In case of finding such a case, please report it to the maintainer
if (reallocate) {
cat("Reallocation of LPPotentials is required for clusters: ",
paste0(cl.reallocate, collapse=", "), ".", "Please contact maintainer.\n")
# Reallocate
}
}
return(ClusterTree)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/C1_ElimTreeInitialize.R
|
#' Initialize a ClusterTree object
#'
#' Initialize a ClusterTree object
#'
#' @details A wrapper function to initialize a \code{\linkS4class{ClusterTree}} object. It combines
#' the functions of \code{\link{ClusterTreeCompile}}, \code{\link{LocalModelCompile}},
#' \code{\link{ElimTreeInitialize}} and \code{\link{Propagate}}, thus initialize the \code{\linkS4class{ClusterTree}}
#' object in a single step.
#'
#' @param dag a \code{graphNEL} object of the Bayesian network
#' @param data a \code{data.frame} object
#' @param node.class a named \code{vector} of \code{logical} values, \code{TRUE} if node
#' is discrete, \code{FASLE} if otherwise
#' @param propagate \code{logical} \code{TRUE} if the discrete part of the \code{\linkS4class{ClusterTree}}
#' to be propagated
#'
#' @return \code{\linkS4class{ClusterTree}} object
#'
#' @author Han Yu
#'
#' @references Cowell, R. G. (2005). Local propagation in conditional Gaussian Bayesian networks.
#' Journal of Machine Learning Research, 6(Sep), 1517-1550. \cr
#' \cr
#' Yu H, Moharil J, Blair RH (2020). BayesNetBP: An R Package for Probabilistic Reasoning in Bayesian
#' Networks. Journal of Statistical Software, 94(3), 1-31. <doi:10.18637/jss.v094.i03>.
#'
#' @examples
#' data(liver)
#' tree.init.p <- Initializer(dag=liver$dag, data=liver$data,
#' node.class=liver$node.class,
#' propagate = TRUE)
#' @seealso \code{\link{ClusterTreeCompile}}, \code{\link{LocalModelCompile}}, \code{\link{ElimTreeInitialize}},
#' \code{\link{Propagate}}
#'
#' @export
Initializer <- function(dag, data, node.class, propagate = TRUE){
cst <- ClusterTreeCompile(dag=dag, node.class=node.class)
models <- LocalModelCompile(data=data, dag=dag, node.class=node.class)
tree.init <- ElimTreeInitialize(tree=cst$tree.graph,
dag=cst$dag,
model=models,
node.sets=cst$cluster.sets,
node.class=cst$node.class)
if(propagate) {
tree.init <- Propagate(tree.init)
}
return(tree.init)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/C2_Initializer.R
|
###########################################
## Exchange of two LPPotentials
###########################################
Exchange <- function(bag.post, bag.lp) {
# bag.post <- tree.push@postbag[[this.par]][[1]]; bag.lp <- tree.push@lppotential[[this.par]][[1]]
if (!bag.lp@head %in% bag.post@tail) {
# if the head of lpp is not in the tail of the postbag potential,
# there is no need to perform exchange operation
result <- list(postbag = bag.post,
lppotential = bag.lp)
return(result)
}
## need to deal with special cases where at least one configuation is empty
ind.exp <- index.generator(bag.post@config, bag.lp@config)
nt <- nrow(ind.exp)
ind.1 <- ind.exp[,1]
ind.2 <- ind.exp[,2]
beta.1 <- bag.post@beta[ind.1, , drop=FALSE]
## changed here for pure continuous case
if (ncol(bag.lp@beta)==0 && nrow(bag.lp@beta)==0) {
beta.2 <- matrix(0, ncol=0, nrow=1)
} else {
beta.2 <- bag.lp@beta[ind.2, , drop=FALSE]
}
## processing variables
w <- setdiff(union(bag.post@tail, bag.lp@tail), bag.lp@head) # name of W variables
b <- beta.1[, bag.lp@head] # vector
rem.1 <- setdiff(w, bag.post@tail)
rem.2 <- setdiff(w, bag.lp@tail)
if (length(rem.1)==0) {
a <- beta.1[, w, drop=FALSE]
} else {
expand.1 <- matrix(0, nrow=nt, ncol=length(rem.1))
colnames(expand.1) <- rem.1
a.0 <- cbind(beta.1, expand.1)
a <- a.0[, w, drop=FALSE]
}
if (length(rem.2)==0) {
if (length(w)==0) {
c <- beta.2
} else {
c <- beta.2[, w, drop=FALSE]
}
} else {
expand.2 <- matrix(0, nrow=nt, ncol=length(rem.2))
colnames(expand.2) <- rem.2
## bug fix 2019.2.18
if(ncol(beta.2) == 0) {
c.0 <- expand.2
} else {
c.0 <- cbind(beta.2, expand.2)
}
c <- c.0[, w, drop=FALSE]
}
s1 <- bag.post@variance[ind.1]
s2 <- bag.lp@variance[ind.2]
const1 <- bag.post@const[ind.1]
const2 <- bag.lp@const[ind.2]
## Exchange operation
beta.post <- a + b*c # matrix
colnames(beta.post) <- w
const.post <- const1 + b*const2
denom <- s.post <- s1 + b^2*s2 # vec
beta.lp.w <- (c*s1 - a*b*s2)/denom
beta.z <- b*s2/denom
beta.lp <- cbind(beta.z, beta.lp.w)
colnames(beta.lp) <- c(bag.post@head, w)
const.lp <- (const2*s1 - const1*b*s2)/denom
s.lp <- s1*s2/denom
## manipulation of configurations
config.var.1 <- colnames(bag.post@config)
config.var.2 <- colnames(bag.lp@config)
config.var.int <- intersect(config.var.1, config.var.2)
config.rem.1 <- setdiff(config.var.1, config.var.int)
config.rem.2 <- setdiff(config.var.2, config.var.int)
###### changed > 1.2.1
if(ncol(bag.post@config)==0 && ncol(bag.lp@config)==0) {
config.after <- matrix(0, ncol=0, nrow=0)
} else if (ncol(bag.post@config)==0) {
config.after <- bag.lp@config
} else if (ncol(bag.lp@config)==0) {
config.after <- bag.post@config
} else {
config.after <- cbind(bag.post@config[ind.1, config.rem.1, drop=FALSE],
bag.lp@config[ind.2, config.rem.2, drop=FALSE],
bag.lp@config[ind.2, config.var.int, drop=FALSE])
}
## wrap up the results
bag.post.after <- new("LPPotential",
head = bag.post@head,
# tail = colnames(beta.post),
config = config.after,
beta = beta.post,
const = const.post,
variance = s.post
)
## bag.lp.after@tail will never be empty
if(ncol(beta.post)==0) {
bag.post.after@tail <- character(0)
} else {
bag.post.after@tail <- colnames(beta.post)
}
bag.lp.after <- new("LPPotential",
head = bag.lp@head,
# tail = colnames(beta.lp),
config = config.after,
beta = beta.lp,
const = const.lp,
variance = s.lp
)
if(ncol(beta.lp)==0) {
bag.lp.after@tail <- character(0)
} else {
bag.lp.after@tail <- colnames(beta.lp)
}
result <- list(postbag = bag.post.after,
lppotential = bag.lp.after)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/D1_ExchangeOperation.R
|
#' Absorb evidence into the model
#'
#' @details Absorb multiple types and pieces of evidences into a \code{\linkS4class{ClusterTree}}
#' object. The discrete compartment of the \code{\linkS4class{ClusterTree}} will be automatically
#' propagated after evidence absorption, so that the object will be ready for making
#' queries and absorbing additional evidence.
#'
#' @param tree a \code{\linkS4class{ClusterTree}} object
#' @param vars a \code{vector} of the names of observed variables
#' @param values a \code{list} of observed values of the variables. Aside from a single value,
#' The element of the list can also be a vector of likelihood values
#'
#' @return \code{\linkS4class{ClusterTree}} object with the evidence absorbed
#'
#' @author Han Yu
#'
#' @references Cowell, R. G. (2005). Local propagation in conditional Gaussian Bayesian networks.
#' Journal of Machine Learning Research, 6(Sep), 1517-1550. \cr
#' \cr
#' Lauritzen, S. L., & Spiegelhalter, D. J. (1988). Local computations with probabilities on
#' graphical structures and their application to expert systems. Journal of the Royal Statistical
#' Society. Series B (Methodological), 157-224. \cr
#' \cr
#' Yu H, Moharil J, Blair RH (2020). BayesNetBP: An R Package for Probabilistic Reasoning in Bayesian
#' Networks. Journal of Statistical Software, 94(3), 1-31. <doi:10.18637/jss.v094.i03>.
#'
#' @import stats utils
#' @importFrom igraph igraph.from.graphNEL igraph.to.graphNEL V
#'
#' @examples
#'
#' data(liver)
#' tree.init.p <- Initializer(dag=liver$dag, data=liver$data,
#' node.class=liver$node.class,
#' propagate = TRUE)
#' tree.post <- AbsorbEvidence(tree.init.p, c("Nr1i3", "chr1_42.65"), list(1,"1"))
#'
#' @export
AbsorbEvidence <- function(tree, vars, values) {
node.class <- [email protected]
hard <- c()
soft <- c()
hard.values <- list()
soft.values <- list()
if(sum(vars %in% [email protected])!=0) {
var.in <- vars[vars %in% [email protected]]
msg1 <- paste0(var.in, collapse=", ")
stop(paste0(msg1, " is/are already observed."))
}
if(sum(vars %in% [email protected])!=0) {
var.in <- vars[vars %in% [email protected]]
msg1 <- paste0(var.in, collapse=", ")
warning(paste0(msg1, " has/have absorbed likelihood evidence multiple times."))
}
if(length(vars)!=0){
var.class <- node.class[vars]
for(i in 1:length(vars)) {
if (var.class[i]) {
if (length(values[[i]])==1){
tree <- DiscreteEvidence(tree, vars[i], values[[i]])
hard <- c(hard, vars[i]) #
hard.values <- append(hard.values, values[[i]]) #
}
if (length(values[[i]])>1) {
tree <- VirtualEvidence(tree, vars[i], values[[i]])
soft <- c(soft, vars[i]) #
soft.values <- append(soft.values, values[i]) #
}
}
}
for(i in 1:length(vars)) {
if (!var.class[i]) {
tree <- PushOperation(tree, vars[i], values[[i]])
hard <- c(hard, vars[i]) #
hard.values <- append(hard.values, values[[i]]) #
}
}
}
tree <- Propagate(tree, vars)
[email protected] <- c([email protected], hard)
[email protected] <- c([email protected], soft)
[email protected] <- append([email protected], hard.values)
[email protected] <- append([email protected], soft.values)
names([email protected]) <- [email protected]
names([email protected]) <- [email protected]
return(tree)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/D2_AbsorbEvidence.R
|
#' Propagate the cluster tree
#'
#' This function propagates the discrete compartment of a \code{\linkS4class{ClusterTree}} object.
#'
#' @details The discrete compartment must be propagted to get the joint distributions
#' of discrete variables in each discrete clusters. A \code{\linkS4class{ClusterTree}} object must be propagated
#' before absorbing evidence and making queries.
#'
#' @param tree an initialized \code{\linkS4class{ClusterTree}} object
#' @param targets the cluster involved in evidence propagation, usually set by default
#'
#' @return a \code{\linkS4class{ClusterTree}} object
#'
#' @references Cowell, R. G. (2005). Local propagation in conditional Gaussian Bayesian networks.
#' Journal of Machine Learning Research, 6(Sep), 1517-1550. \cr
#' \cr
#' Lauritzen, S. L., & Spiegelhalter, D. J. (1988). Local computations with probabilities on
#' graphical structures and their application to expert systems. Journal of the Royal Statistical
#' Society. Series B (Methodological), 157-224. \cr
#' \cr
#' Yu H, Moharil J, Blair RH (2020). BayesNetBP: An R Package for Probabilistic Reasoning in Bayesian
#' Networks. Journal of Statistical Software, 94(3), 1-31. <doi:10.18637/jss.v094.i03>.
#'
#' @examples
#'
#' data(liver)
#' tree.init <- Initializer(dag=liver$dag, data=liver$data,
#' node.class=liver$node.class,
#' propagate = FALSE)
#' tree.init@propagated
#' tree.init.p <- Propagate(tree.init)
#' tree.init.p@propagated
#'
#' @export
# targets is a vector of any of the nodes in the graph, or NA
Propagate <- function(tree, targets = NA) {
discrete.clusters <- tree@cluster[[email protected]]
if (length(discrete.clusters)==0) {
tree@propagated <- TRUE
return(tree)
}
if (length(discrete.clusters)==1) {
tree@jpt <- tree@cpt
tree@propagated <- TRUE
return(tree)
}
tree.graph <- igraph.from.graphNEL(tree@graph$tree)
tree.sub.graph <- induced_subgraph(tree.graph, discrete.clusters)
potentials.sub <- tree@cpt[discrete.clusters]
discrete.sets <- tree@member[discrete.clusters]
tree@jpt <- propagate.worker(tree.sub.graph, potentials.sub, discrete.sets, targets = targets)
tree@propagated <- TRUE
return(tree)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/D3_Propagate.R
|
###########################################
## Propagate
###########################################
# targets is a vector of ANY nodes in the graph
propagate.worker <- function(tree.graph, potentials, cluster.sets, targets = NA){
decomposed_tree <- igraph::decompose(tree.graph)
if(any(!is.na(targets))){ # if any elements in targets are not NA
cleaned_targets <- targets[!is.na(targets)] # remove NA
cleaned_targets <- cleaned_targets[cleaned_targets %in% unlist(cluster.sets)] # remove invalid targets
temp <- length(decomposed_tree)
for(j in 1:temp){ # for every tree component
if(!any( cleaned_targets %in% names(V(decomposed_tree[[j]])) )){ # if NONE of the cleaned targets are in the component
decomposed_tree[[j]] <- NA # set it to NA for later removal
}
}
decomposed_tree <- decomposed_tree[!is.na(decomposed_tree)] # remove all NA components(from above)
}
component_number <- length(decomposed_tree)
worker_results <- vector("list", component_number)
if(component_number > 0){
for(i in 1:component_number){
temp_names <- names(V(decomposed_tree[[i]]))
# we know propagate.worker_orig works for single-component graphs
# can exploit this and propagate.worker_orig on each component and then combine the results
worker_results[[i]] <- propagate.worker_orig(decomposed_tree[[i]], potentials[temp_names], cluster.sets[temp_names])
}
# print(worker_results)
return(unlist(worker_results, recursive = FALSE))
}
else{
return(propagate.worker(tree.graph, potentials, cluster.sets))
}
}
###################################################
propagate.worker_orig <- function(tree.graph, potentials, cluster.sets){
# tree.graph <- tree.sub.graph; potentials <- potentials.sub; cluster.sets <- discrete.sets
cluster.tree <- list(
# bn=dag,
tree=tree.graph,
clusters=cluster.sets,
# assignment=asgn,
collected=c(), active=c(), potentials=potentials, joint=potentials)
clusters <- names(potentials)
result <- list()
## NEW version of getting joints
# collect
ce <- CollectEvidence(cluster.tree, clusters[1])
# reset active nodes
ce$active <- c()
# distribute
de <- DistributeEvidence(ce, clusters[1])
result <- de$joint
return(result)
}
###################################################
Absorb <- function(absorbedTo, absorbedFrom, separator, distribute=FALSE){
pot1 <- absorbedFrom
pot2 <- absorbedTo
# pot2 <- cluster.tree$potentials[["Cyp2b10"]]; pot1 <- cluster.tree$potentials[["HDL"]];
# separator <- intersect( cluster.tree$clusters[["Cyp2b10"]], cluster.tree$clusters[["HDL"]])
inter.var <- separator
sep <- marginalize.discrete(pot1, inter.var)
results <- list()
if (distribute) {
results[[1]] <- NULL
} else {
results[[1]] <- factor.divide(pot1, sep)
}
results[[2]] <- factor.product(pot2, sep)
return(results)
}
###########################################
## Collect evidence
###########################################
CollectEvidence <- function(cluster.tree, node){
process_queue <- c(node) # nodes to be processed - end nodes are last (back)
node_stack <- c(list(c(node, NA))) # nodes to be absorbed - end nodes are first (front)
# add in pairs: child, parent pairs
processed <- c(node)
while(length(process_queue) > 0){
neighbors <- names(neighbors(cluster.tree$tree, process_queue[1], mode = "all")) # get neighbors
unprocessed_neighbors <- neighbors[!(neighbors %in% processed)] # cull for neighbors not in stack
if(length(unprocessed_neighbors) > 0){
temp <- cbind(unprocessed_neighbors, rep(process_queue[1], length(unprocessed_neighbors)))
node_stack <- c(split(temp, 1:nrow(temp)), node_stack) # add neighbors to stack top
process_queue <- c(process_queue[-1], unprocessed_neighbors) # remove first node and add neighbors to queue back
}
else{
process_queue <- process_queue[-1]
}
processed <- c(processed, unprocessed_neighbors)
}
for(i in 1:(length(node_stack)-1)){
child <- node_stack[[i]][1]
parent <- node_stack[[i]][2]
abb <- Absorb(cluster.tree$potentials[[parent]], cluster.tree$potentials[[child]],
separator = intersect( cluster.tree$clusters[[parent]], cluster.tree$clusters[[child]]))
cluster.tree$potentials[[parent]] <- abb[[2]]
cluster.tree$potentials[[child]] <- abb[[1]]
}
clique.names <- names(V(cluster.tree$tree))
cluster.tree$collected <- processed
cluster.tree$active <- processed
return(cluster.tree)
}
###########################################
## Distribute evidence
###########################################
DistributeEvidence <- function(cluster.tree, node){
clique.names <- names(V(cluster.tree$tree))
nodes_status <- data.frame(clique.names, active = FALSE, queued = FALSE, stringsAsFactors = FALSE) # are they actually inactive? NO, need to fix
# but they actually are inactive, since propagate.worker clears cluster.tree$active!
# nodes_status$active <- nodes_status$clique.names %in% cluster.tree$active
nodes_status[nodes_status$clique.names %in% node,]$queued <- TRUE
while(any(nodes_status$queued)){
n <- nodes_status[nodes_status$queued,][1,1]
nodes_status[nodes_status$queued,][1,]$active <- TRUE
nodes_status[nodes_status$queued,][1,]$queued <- FALSE
neighbors_names <- neighbors(cluster.tree$tree, n, mode = "all")$name # get neighbors of n
neighbors_status <- nodes_status[nodes_status$clique.names %in% neighbors_names,] # get neighbors' rows
inactive <- neighbors_status[!neighbors_status$active,] # cull for only uncollected neighbors
inactive_names <- inactive$clique.names # get the names of uncollected neighbors
if (length(inactive_names)>0) {
nodes_status[nodes_status$clique.names %in% inactive$clique.names, ]$queued <- TRUE # queue inactive neighbors
for (i in 1:length(inactive_names)) {
abb <- Absorb(cluster.tree$potentials[[inactive_names[i]]], cluster.tree$potentials[[n]],
separator = intersect( cluster.tree$clusters[[n]], cluster.tree$clusters[[inactive_names[i]]]),
distribute = TRUE)
cluster.tree$potentials[[inactive_names[i]]] <- abb[[2]]
}
}
cluster.tree$joint[[n]] <- cluster.tree$potentials[[n]]
}
cluster.tree$active <- nodes_status[nodes_status$active,]$clique.names
return(cluster.tree)
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/D4_PropagateWorker.R
|
###########################################
## Add Evidence to LPPotential
###########################################
substituteEvidence <- function(lppot, vars, vals)
{
if(!all(vars %in% lppot@tail)) {
stop("The variables are not a subset of the lppotential's tail.")
}
varinds <- c()
for(i in 1:length(vars)) {
varinds[i] <- which(lppot@tail==vars[i])
}
if (length(varinds)==0) {
return(lppot)
}
lppot@const <- c(lppot@const + lppot@beta[,vars, drop=FALSE] %*% vals)
lppot@tail <- lppot@tail[-varinds]
lppot@beta <- lppot@beta[, -varinds, drop=FALSE]
return(lppot)
}
###########################################
## Absorb Continuous Evidence
###########################################
PushOperation <- function(tree.push, var, val){
# Step 1
e.seq <- tree.push@node # the node is arranged in elimination order
k <- which(e.seq == var)
if (k!=1){
for (i in 1:(k-1)) {
# i <- 2
this.cluster <- e.seq[i]
if( length(tree.push@lppotential[[this.cluster]])>0 ) {
this.tail <- tree.push@lppotential[[this.cluster]][[1]]@tail
} else {
this.tail <- NA
}
if (var %in% this.tail) {
this.pot <- tree.push@lppotential[[this.cluster]][[1]]
this.pot <- substituteEvidence(this.pot, var, val)
tree.push@lppotential[[this.cluster]][[1]] <- this.pot
}
}
}
# Step 2
tree.push@postbag[[var]] <- tree.push@lppotential[[var]]
tree.push@lppotential[[var]] <- list()
tree.push@activeflag[[var]] <- FALSE
# Step 3
this.par <- tree.push@parent[[var]]
this.var <- var
while(!is.na(this.par) && [email protected][[this.par]]){
tree.push@postbag[[this.par]] <- tree.push@postbag[[this.var]]
## Check if it is necessary to perform exchange operation
flag <- tree.push@activeflag[[this.par]]
if (length(tree.push@lppotential[[this.par]])==0){
flag <- FALSE
} else {
## this check might be redundant, as it is also checked in Exchange function
lp.head <- tree.push@lppotential[[this.par]][[1]]@head
postbag.tail <- tree.push@postbag[[this.par]][[1]]@tail
if( !lp.head %in% postbag.tail){
flag <- FALSE
}
}
##
if (flag) {
newBag <- Exchange(tree.push@postbag[[this.par]][[1]], tree.push@lppotential[[this.par]][[1]])
tree.push@postbag[[this.par]][[1]] <- newBag$postbag
tree.push@lppotential[[this.par]][[1]] <- newBag$lppotential
tree.push@lppotential[[this.par]][[1]] <- substituteEvidence(tree.push@lppotential[[this.par]][[1]], var, val)
}
tree.push@postbag[[this.var]] <- list()
this.var <- this.par
this.par <- tree.push@parent[[this.var]]
}
# Step 4
if (is.na(this.par)) {
return(tree.push)
} else {
this.pot <- tree.push@postbag[[this.var]][[1]]
likelihood <- dnorm(rep(val, length(this.pot@const)), mean=this.pot@const, sd=sqrt(this.pot@variance))
pot <- list(cpt=this.pot@config, prob=likelihood)
tree.push@cpt[[this.par]] <- factor.product(pot, tree.push@cpt[[this.par]])
return(tree.push)
}
}
###########################################
## Absorb Discrete Evidence
###########################################
# tree <- tree.init; var <- "HDL"; val <- "High";
DiscreteEvidence <- function(tree, var, val) {
## CPT
if (length(tree@cpt) > 0) {
for (i in 1:length(tree@cpt)) {
tab <- tree@cpt[[i]]$cpt
prob <- tree@cpt[[i]]$prob
if(var %in% colnames(tab)){
k <- which(colnames(tab)==var)
keep <- which(tab[,k] == val)
tree@cpt[[i]]$cpt <- tab[keep, -k, drop=FALSE]
tree@cpt[[i]]$prob <- tree@cpt[[i]]$prob[keep]
tree@cpt[[i]]$prob <- tree@cpt[[i]]$prob/sum(tree@cpt[[i]]$prob)
}
}
}
## JPT
if (length(tree@jpt) > 0) {
for (i in 1:length(tree@jpt)) {
tab <- tree@jpt[[i]]$cpt
prob <- tree@jpt[[i]]$prob
if(var %in% colnames(tab)){
k <- which(colnames(tab)==var)
keep <- which(tab[,k] == val)
tree@jpt[[i]]$cpt <- tab[keep, -k, drop=FALSE]
tree@jpt[[i]]$prob <- tree@jpt[[i]]$prob[keep]
tree@jpt[[i]]$prob <- tree@jpt[[i]]$prob/sum(tree@jpt[[i]]$prob)
}
}
}
## LPPotential
if (length(tree@lppotential) > 0) {
for (i in 1:length(tree@lppotential)) {
tab <- tree@lppotential[[i]][[1]]@config
if(var %in% colnames(tab)){
k <- which(colnames(tab)==var)
keep <- which(tab[,k] == val)
tree@lppotential[[i]][[1]]@config <- tab[keep, -k, drop=FALSE]
tree@lppotential[[i]][[1]]@beta <- tree@lppotential[[i]][[1]]@beta[keep, , drop=FALSE]
tree@lppotential[[i]][[1]]@const <- tree@lppotential[[i]][[1]]@const[keep]
tree@lppotential[[i]][[1]]@variance <- tree@lppotential[[i]][[1]]@variance[keep]
}
}
}
## Postbag
if (length(tree@postbag) > 0) {
for (i in 1:length(tree@postbag)) {
if (length(tree@postbag[[i]]) > 0) {
for (j in 1:length(tree@postbag[[i]])) {
tab <- tree@postbag[[i]][[j]]@config
if(var %in% colnames(tab)){
k <- which(colnames(tab)==var)
keep <- which(tab[,k] == val)
tree@postbag[[i]][[j]]@config <- tab[keep, -k, drop=FALSE]
tree@postbag[[i]][[j]]@beta <- tree@postbag[[i]][[j]]@beta[keep, , drop=FALSE]
tree@postbag[[i]][[j]]@const <- tree@postbag[[i]][[j]]@const[keep]
tree@postbag[[i]][[j]]@variance <- tree@postbag[[i]][[j]]@variance[keep]
}
}
}
}
}
return(tree)
}
###########################################
## Virtual Evidence
###########################################
# tree <- tree.init; var <- vars[1]; val <- values[[1]];
VirtualEvidence <- function(tree, var, val) {
df.temp <- data.frame(names(val))
names(df.temp) <- var
lk.pot <- list(cpt = df.temp, prob = val/sum(val))
if (length(tree@cpt) > 0) {
for (i in 1:length(tree@cpt)) {
this.pot <- tree@cpt[[i]]
tab <- this.pot$cpt
if(var %in% colnames(tab)){
result <- factor.product(this.pot, lk.pot)
tree@cpt[[i]] <- result
return(tree)
}
}
} else {
stop("No discrete variable found.")
return(tree)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BayesNetBP/R/D5_Absorb_helpers.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.