content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Centering function
#'
#' This function determines the centering density of the normalized random
#' measure.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(x, distr = NULL, mu = NULL, sigma = NULL) {
#' if (is.null(distr)) {
#' stop("Argument \"distr\" should be defined numeric with possible values 1,2, or 3")
#' }
#' else if (distr == 1) {
#' a <- ifelse(is.null(mu), 0, mu)
#' b <- ifelse(is.null(sigma), 1, sigma)
#' p0 <- dnorm(x, mean = a, sd = b)
#' }
#' else if (distr == 2) {
#' a <- ifelse(is.null(mu), 1, mu^2 / sigma^2)
#' b <- ifelse(is.null(sigma), 1, mu / sigma^2)
#' p0 <- dgamma(x, shape = a, rate = b)
#' }
#' else if (distr == 3) {
#' a <- ifelse(is.null(mu), 0.5, (1 - mu) * (mu / sigma)^2 -
#' mu)
#' b <- ifelse(is.null(sigma), 1 / sqrt(12), (mu * (1 - mu) / sigma^2 -
#' 1) * (1 - mu))
#' if (any(c(a, b) <= 0)) {
#' stop(paste(
#' "\nNegative Beta parameters:\n a =", a,
#' ";\t b =", b
#' ))
#' }
#' p0 <- dbeta(x, shape1 = a, shape2 = b)
#' }
#' else {
#' stop("Argument \"distr\" should be defined numeric with possible values 1,2, or 3")
#' }
#' return(p0)
#' }
p0 <-
function(x, distr = NULL, mu = NULL, sigma = NULL) {
if (is.null(distr)) {
stop("Argument \"distr\" should be defined numeric with possible values 1,2, or 3")
}
else if (distr == 1) {
a <- ifelse(is.null(mu), 0, mu)
b <- ifelse(is.null(sigma), 1, sigma)
p0 <- dnorm(x, mean = a, sd = b)
}
else if (distr == 2) {
a <- ifelse(is.null(mu), 1, mu^2 / sigma^2)
b <- ifelse(is.null(sigma), 1, mu / sigma^2)
p0 <- dgamma(x, shape = a, rate = b)
}
else if (distr == 3) {
a <- ifelse(is.null(mu), 0.5, (1 - mu) * (mu / sigma)^2 -
mu)
b <- ifelse(is.null(sigma), 1 / sqrt(12), (mu * (1 - mu) / sigma^2 -
1) * (1 - mu))
if (any(c(a, b) <= 0)) {
stop(paste(
"\nNegative Beta parameters:\n a =", a,
";\t b =", b
))
}
p0 <- dbeta(x, shape1 = a, shape2 = b)
}
else {
stop("Argument \"distr\" should be defined numeric with possible values 1,2, or 3")
}
return(p0)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/p0.R
|
#' Distribution function half Cauchy
#'
#' Computes the cdf.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(q, location = 0, scale = 1) {
#' ifelse(x < 0, 0, 1) * (pcauchy(q, location, scale) - pcauchy(
#' 0,
#' location, scale
#' )) / (1 - pcauchy(0, location, scale))
#' }
phalfcauchy <-
function(q, location = 0, scale = 1) {
ifelse(q < 0, 0, 1) * (pcauchy(q, location, scale) - pcauchy(
0,
location, scale
)) / (1 - pcauchy(0, location, scale))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/phalfcauchy.R
|
#' Distribution function half Normal
#'
#' Computes the cdf.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(q, mean = 0, sd = 1) {
#' ifelse(q < 0, 0, 1) * (pnorm(q, mean, sd) - pnorm(
#' 0, mean,
#' sd
#' )) / (1 - pnorm(0, mean, sd))
#' }
phalfnorm <-
function(q, mean = 0, sd = 1) {
ifelse(q < 0, 0, 1) * (pnorm(q, mean, sd) - pnorm(
0, mean,
sd
)) / (1 - pnorm(0, mean, sd))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/phalfnorm.R
|
#' Distribution function half Student-t
#'
#' Computes the cumulative distribution function.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(q, df = 1, mean = 0, sd = 1) {
#' ifelse(x < 0, 0, 1) * (pt_(q, df, mean, sd) - pt_(
#' 0, df,
#' mean, sd
#' )) / (1 - pt_(0, df, mean, sd))
#' }
phalft <-
function(q, df = 1, mean = 0, sd = 1) {
ifelse(q < 0, 0, 1) * (pt_(q, df, mean, sd) - pt_(
0, df,
mean, sd
)) / (1 - pt_(0, df, mean, sd))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/phalft.R
|
#' Kernel distribution function
#'
#' This functions evaluates the cumulative distribution function at a certain
#' data point.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(q, distr = NULL, mu = NULL, sigma = NULL) {
#' if (is.null(distr)) {
#' stop(msg)
#' }
#' else if (distr == 1) {
#' a <- ifelse(is.null(mu), 0, mu)
#' b <- ifelse(is.null(sigma), 1, sigma)
#' pk <- pnorm(q, mean = a, sd = b)
#' }
#' else if (distr == 2) {
#' a <- ifelse(is.null(mu), 1, mu^2 / sigma^2)
#' b <- ifelse(is.null(sigma), 1, mu / sigma^2)
#' pk <- pgamma(q, shape = a, rate = b)
#' }
#' else if (distr == 3) {
#' a <- ifelse(is.null(mu), 0.5, (1 - mu) * (mu / sigma)^2 -
#' mu)
#' b <- ifelse(is.null(sigma), 1 / sqrt(12), (mu * (1 - mu) / sigma^2 -
#' 1) * (1 - mu))
#' if (any(c(a, b) <= 0)) {
#' stop(paste(
#' "\nNegative Beta parameters:\n a =", a,
#' ";\t b =", b
#' ))
#' }
#' pk <- pbeta(q, shape1 = a, shape2 = b)
#' }
#' else if (distr == 4) {
#' a <- ifelse(is.null(mu), 0, mu)
#' b <- ifelse(is.null(sigma), 1 / sqrt(2), sigma / sqrt(2))
#' pk <- ifelse(q < a, exp((q - a) / b) / 2, 1 - exp((a - q) / b) / 2)
#' }
#' else if (distr == 5) {
#' a <- ifelse(is.null(mu), exp(1 / 2), log(mu / sqrt(1 + (sigma / mu)^2)))
#' b <- ifelse(is.null(sigma), exp(1) * (exp(1) - 1), sqrt(log(1 +
#' (sigma / y)^2)))
#' pk <- plnorm(q, meanlog = a, sdlog = b)
#' }
#' else if (distr == 6) {
#' pk <- phalfcauchy(q, location = ifelse(is.null(mu), 0,
#' mu
#' ), scale = ifelse(is.null(sigma), 1, sigma))
#' }
#' else if (distr == 7) {
#' pk <- phalfnorm(q,
#' mean = ifelse(is.null(mu), 0, mu),
#' sd = ifelse(is.null(sigma), 1, sigma)
#' )
#' }
#' else if (distr == 8) {
#' pk <- phalft(q, df = 10, mean = ifelse(is.null(mu), 0,
#' mu
#' ), sd = ifelse(is.null(sigma), 1, sigma))
#' }
#' else if (distr == 9) {
#' pk <- punif(q, min = ifelse(is.null(mu), 0, mu), max = ifelse(is.null(sigma),
#' 1, sigma
#' ))
#' }
#' else if (distr == 10) {
#' pk <- ptnorm(q, mean = ifelse(is.null(mu), 0, mu), sd = ifelse(is.null(sigma),
#' 1, sigma
#' ), lower = 0.1)
#' }
#' else {
#' stop(msg)
#' }
#' return(pk)
#' }
pk <-
function(q, distr = NULL, mu = NULL, sigma = NULL) {
msg <- "Argument \"distr\" should be defined numeric with possible values 1 (normal), 2 (gamma), 3 (beta), 4 (exponential), 5 (lognormal), 6 (half-Cauchy), 7 (half-normal), 8 (half-student), 9 (uniform) and 10 (truncated normal)"
if (is.null(distr)) {
stop(msg)
}
else if (distr == 1) {
a <- ifelse(is.null(mu), 0, mu)
b <- ifelse(is.null(sigma), 1, sigma)
pk <- pnorm(q, mean = a, sd = b)
}
else if (distr == 2) {
a <- ifelse(is.null(mu), 1, mu^2 / sigma^2)
b <- ifelse(is.null(sigma), 1, mu / sigma^2)
pk <- pgamma(q, shape = a, rate = b)
}
else if (distr == 3) {
a <- ifelse(is.null(mu), 0.5, (1 - mu) * (mu / sigma)^2 -
mu)
b <- ifelse(is.null(sigma), 1 / sqrt(12), (mu * (1 - mu) / sigma^2 -
1) * (1 - mu))
if (any(c(a, b) <= 0)) {
stop(paste(
"\nNegative Beta parameters:\n a =", a,
";\t b =", b
))
}
pk <- pbeta(q, shape1 = a, shape2 = b)
}
else if (distr == 4) {
a <- ifelse(is.null(mu), 0, mu)
b <- ifelse(is.null(sigma), 1 / sqrt(2), sigma / sqrt(2))
pk <- ifelse(q < a, exp((q - a) / b) / 2, 1 - exp((a - q) / b) / 2)
}
else if (distr == 5) {
a <- ifelse(is.null(mu), exp(1 / 2), log(mu / sqrt(1 + (sigma / mu)^2)))
b <- ifelse(is.null(sigma), exp(1) * (exp(1) - 1), sqrt(log(1 +
(sigma / mu)^2)))
pk <- plnorm(q, meanlog = a, sdlog = b)
}
else if (distr == 6) {
pk <- phalfcauchy(q, location = ifelse(is.null(mu), 0,
mu
), scale = ifelse(is.null(sigma), 1, sigma))
}
else if (distr == 7) {
pk <- phalfnorm(q,
mean = ifelse(is.null(mu), 0, mu),
sd = ifelse(is.null(sigma), 1, sigma)
)
}
else if (distr == 8) {
pk <- phalft(q, df = 10, mean = ifelse(is.null(mu), 0,
mu
), sd = ifelse(is.null(sigma), 1, sigma))
}
else if (distr == 9) {
pk <- punif(q, min = ifelse(is.null(mu), 0, mu), max = ifelse(is.null(sigma),
1, sigma
))
}
else if (distr == 10) {
pk <- ptnorm(q, mean = ifelse(is.null(mu), 0, mu), sd = ifelse(is.null(sigma),
1, sigma
), lower = 0.1)
}
else {
stop(msg)
}
return(pk)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/pk.R
|
#' Plot the density estimate and the 95\% credible interval for noncensored
#' data
#'
#' The density estimate is the mean posterior density computed on the data
#' points.
#' @import graphics
#'
#' @param fit A fitted object of class NRMI1 or NRMI2
#' @return A graph with the density estimate, the 95\% credible interval and a
#' histogram of the data
#' @examples
#'
#' data(acidity)
#' out <- MixNRMI1(acidity, Nit = 50)
#' plot(out)
plotfit_noncensored <- function(fit) {
m <- ncol(fit$qx)
nbins <- length(hist(fit$data, plot = FALSE)$breaks) - 1
ggplot(data.frame(xx = fit$xx, infCI = fit$qx[, 2], supCI = fit$qx[, m], y = fit$qx[, 1]), aes_string(x = "xx")) +
theme_classic() +
geom_histogram(
data = data.frame(x = fit$data), aes_string(x = "x", y = "..density.."),
fill = grDevices::grey(0.9),
colour = "black",
bins = nbins
) +
geom_line(aes_string(y = "y"), size = 1.) +
geom_line(aes_string(y = "infCI"), colour = "blue", linetype = "dotted") +
geom_line(aes_string(y = "supCI"), colour = "blue", linetype = "dotted") +
xlab("Data") +
ylab("Density")
}
#' Plot the density estimate and the 95\% credible interval for censored data
#'
#' The density estimate is the mean posterior density computed on the data
#' points. It is not possible to display a histogram for censored data.
#'
#'
#' @param fit A fitted object of class NRMI1cens or NRMI2cens
#' @return A graph with the density estimate and the 95\% credible interval
#' @examples
#'
#' data(acidity)
#' out <- MixNRMI1(acidity, Nit = 50)
#' plot(out)
plotfit_censored <- function(fit) {
m <- ncol(fit$qx)
ggplot(data.frame(xx = fit$xx, infCI = fit$qx[, 2], supCI = fit$qx[, m], y = fit$qx[, 1]), aes_string(x = "xx")) +
theme_classic() +
geom_line(aes_string(y = "y"), size = 1.) +
geom_line(aes_string(y = "infCI"), colour = "blue", linetype = "dotted") +
geom_line(aes_string(y = "supCI"), colour = "blue", linetype = "dotted") +
xlab("Data") +
ylab("Density")
}
#' #' Plot the density estimate and grey lines to represent the uncertainty on the density estimate
#' #'
#' #' @param fit A fitted object of class NRMI1 or NRMI2
#' #'
#' #' @return A graph with the density estimate, the 95\% credible interval and a histogram of the data.
#' #' @export
#' #'
#' #' @examples
#' plotfit_manylines = function(fit) {
#' 1+1
#' }
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/plotfit.R
|
#' Compute the optimal clustering from an MCMC sample
#'
#' Summarizes the posterior on all possible clusterings by an optimal
#' clustering where optimality is defined as minimizing the posterior
#' expectation of a specific loss function, the Variation of Information or
#' Binder's loss function. Computation can be lengthy for large datasets,
#' because of the large size of the space of all clusterings.
#'
#'
#' @param fit The fitted object, obtained from one of the MixNRMIx functions
#' @param loss_type Defines the loss function to be used in the expected
#' posterior loss minimization. Can be one of "VI" (Variation of Information),
#' "B" (Binder's loss), "NVI" (Normalized Variation of Information) or "NID"
#' (Normalized Information Distance). Defaults to "VI".
#' @return A vector of integers with the same size as the data, indicating the
#' allocation of each data point.
#' @export compute_optimal_clustering
compute_optimal_clustering <- function(fit, loss_type = "VI") {
if (!requireNamespace("GreedyEPL", quietly = TRUE)) {
stop("Package GreedyEPL is needed for this function to work. Please install it.",
call. = FALSE
)
}
fit.draw <- Reduce(rbind, fit$Allocs)
fit_VI <- GreedyEPL::MinimiseEPL(sample_of_partitions = fit.draw, pars = list("loss_type" = loss_type))
return(fit_VI$decision)
}
# clustering = compute_optimal_clustering(out)
plot_clustering_and_CDF_noncensored <- function(fit, clustering, label_vector = NULL) {
data <- fit$data
grid <- grid_from_data(data)
if (is_semiparametric(fit)) {
cdf <- get_CDF_semi_BNPdensity(fit = fit, xs = data)
}
else {
cdf <- get_CDF_full_BNPdensity(fit = fit, xs = data)
}
p <- ggplot(data.frame(data = data, cluster_id = clustering, cdf = cdf)) +
theme_bw() +
geom_step(aes_string(x = "data", y = "ecdf(data)(data)")) +
geom_point(aes_string(x = "data", y = "cdf", colour = "factor(cluster_id)")) +
viridis::scale_colour_viridis(discrete = TRUE) +
theme(legend.position = "none") +
ylab("CDF") +
xlab("Data")
if (!is.null(label_vector)) {
p + geom_text(
data = data.frame(
txt = label_vector, x = data,
y = cdf + 0.05,
cluster_id = clustering
),
aes_string(x = "x", y = "y", colour = "factor(cluster_id)", label = "txt")
)
}
else {
return(p)
}
}
decide_abscissa <- function(censored_data, clustering) {
df <- cbind(censored_data, data.frame(
cluster_id = clustering,
loc = rowMeans(censored_data),
is_censored = is.na(rowMeans(censored_data))
))
df$loc <- unlist(mapply(FUN = function(is_cens, cluster_id, loc) {
if (is_cens) {
mean(df$loc[df$cluster_id == cluster_id], na.rm = TRUE)
}
else {
loc
}
}, df$is_censored, df$cluster_id, df$loc, SIMPLIFY = FALSE))
return(df)
}
plot_clustering_and_CDF_censored <- function(fit, clustering, label_vector = NULL) {
data <- fit$data
# grid <- grid_from_data(data)
grid <- decide_abscissa(data, clustering)$loc
Survival_object <- survival::survfit(formula = survival::Surv(data$left, data$right, type = "interval2") ~ 1)
if (is_semiparametric(fit)) {
cdf <- get_CDF_semi_BNPdensity(fit = fit, xs = grid[!is.na(grid)])
}
else {
cdf <- get_CDF_full_BNPdensity(fit = fit, xs = grid[!is.na(grid)])
}
p <- ggplot2::ggplot(
data = data.frame(data = grid[!is.na(grid)], CDF = cdf, cluster_id = clustering[!is.na(grid)]),
aes_string(x = "data", y = "CDF")
) +
geom_point(aes_string(colour = "factor(cluster_id)")) +
theme_classic() +
geom_step(
data = data.frame(
x = c(Survival_object$time, max(grid)),
y = c(1 - Survival_object$surv, 1)
),
aes_string(x = "x", y = "y")
) +
viridis::scale_colour_viridis(discrete = TRUE) +
theme(legend.position = "none") +
ylab("CDF") +
xlab("Data")
if (!is.null(label_vector)) {
p + geom_text(
data = data.frame(
txt = label_vector[!is.na(grid)],
x = grid[!is.na(grid)],
y = cdf + 0.05,
cluster_id = clustering[!is.na(grid)]
),
aes_string(x = "x", y = "y", colour = "factor(cluster_id)", label = "txt")
)
}
else {
return(p)
}
}
#' Plot the clustering and the Cumulative Distribution Function
#'
#' This is a function to visualize the clustering induced by the BNP model. The
#' data points are plotted with a color reflecting their cluster.
#'
#'
#' @param fit The fitted object, obtained from one of the MixNRMIx functions
#' @param clustering A vector of integers with the same length as the data,
#' representing the allocation variable for data each point.
#' @param label_vector A vector of data labels to be plotted, to provide some
#' identification to each point.
#' @return A plot of the Cumulative Distribution Function (or Turnbull estimate
#' for censored data) with data points whose color denotes the cluster
#' allocation. For censored data, right or left censored data points are not
#' represented, while interval censored data points are represented at the
#' middle of the censoring interval.
#' @export plot_clustering_and_CDF
plot_clustering_and_CDF <- function(fit, clustering, label_vector = NULL) {
if (is_censored(fit$data)) {
plot_clustering_and_CDF_censored(fit, clustering, label_vector = label_vector)
}
else {
plot_clustering_and_CDF_noncensored(fit, clustering, label_vector = label_vector)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/posterior_clustering_analysis.R
|
# prepare_independent_rng = function(nchains, niter){
# seeds = rep(0, nchains)
# runif(1) #Makes sure that the random number generator has been used at least once so that .Random.seed exists
#
# for (c in seq(nchains)){
# seeds[c] = .Random.seed
# runif(niter)
# }
#
# }
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/prepare_independent_rng.R
|
#' Distribution function non-standard student-t
#'
#' Computes the cdf.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(x, df, mean, sd) {
#' pt((x - mean) / sd, df, ncp = 0)
#' }
pt_ <-
function(x, df, mean, sd) {
pt((x - mean) / sd, df, ncp = 0)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/pt_.R
|
#' Distribution function truncated normal
#'
#' Computes the cumulative distribution function.
#'
#' For internal use
#'
#' @note Taken from \code{msm} R-package.
#' @author C. H. Jackson
#' @references Taken from
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(q, mean = 0, sd = 1, lower = -Inf, upper = Inf, lower.tail = TRUE,
#' log.p = FALSE) {
#' ret <- numeric(length(q))
#' if (lower.tail) {
#' ret[q < lower] <- 0
#' ret[q > upper] <- 1
#' }
#' else {
#' ret[q < lower] <- 1
#' ret[q > upper] <- 0
#' }
#' ret[upper < lower] <- NaN
#' ind <- q >= lower & q <= upper
#' if (any(ind)) {
#' denom <- pnorm(upper, mean, sd) - pnorm(
#' lower, mean,
#' sd
#' )
#' if (lower.tail) {
#' qtmp <- pnorm(q, mean, sd) - pnorm(lower, mean, sd)
#' } else {
#' qtmp <- pnorm(upper, mean, sd) - pnorm(
#' q, mean,
#' sd
#' )
#' }
#' if (log.p) {
#' qtmp <- log(qtmp) - log(denom)
#' } else {
#' qtmp <- qtmp / denom
#' }
#' ret[q >= lower & q <= upper] <- qtmp[ind]
#' }
#' ret
#' }
ptnorm <-
function(q, mean = 0, sd = 1, lower = -Inf, upper = Inf, lower.tail = TRUE,
log.p = FALSE) {
ret <- numeric(length(q))
if (lower.tail) {
ret[q < lower] <- 0
ret[q > upper] <- 1
}
else {
ret[q < lower] <- 1
ret[q > upper] <- 0
}
ret[upper < lower] <- NaN
ind <- q >= lower & q <= upper
if (any(ind)) {
denom <- pnorm(upper, mean, sd) - pnorm(
lower, mean,
sd
)
if (lower.tail) {
qtmp <- pnorm(q, mean, sd) - pnorm(lower, mean, sd)
} else {
qtmp <- pnorm(upper, mean, sd) - pnorm(
q, mean,
sd
)
}
if (log.p) {
qtmp <- log(qtmp) - log(denom)
} else {
qtmp <- qtmp / denom
}
ret[q >= lower & q <= upper] <- qtmp[ind]
}
ret
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/ptnorm.R
|
#' Generic function to find quantiles of a distribution
#'
#' Computes quantiles.
#'
#' For internal use
#'
#' @note Taken from msm R-package.
#' @author Christopher Jackson
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(pdist, p, ...) {
#' args <- list(...)
#' if (is.null(args$log.p)) {
#' args$log.p <- FALSE
#' }
#' if (is.null(args$lower.tail)) {
#' args$lower.tail <- TRUE
#' }
#' if (is.null(args$lbound)) {
#' args$lbound <- -Inf
#' }
#' if (is.null(args$ubound)) {
#' args$ubound <- Inf
#' }
#' if (args$log.p) {
#' p <- exp(p)
#' }
#' if (!args$lower.tail) {
#' p <- 1 - p
#' }
#' ret <- numeric(length(p))
#' ret[p == 0] <- args$lbound
#' ret[p == 1] <- args$ubound
#' args[c("lower.tail", "log.p", "lbound", "ubound")] <- NULL
#' maxlen <- max(sapply(c(args, p = list(p)), length))
#' for (i in seq(along = args)) {
#' args[[i]] <- rep(args[[i]],
#' length.out = maxlen
#' )
#' }
#' p <- rep(p, length.out = maxlen)
#' ret[p < 0 | p > 1] <- NaN
#' ind <- (p > 0 & p < 1)
#' if (any(ind)) {
#' hind <- seq(along = p)[ind]
#' h <- function(y) {
#' args <- lapply(args, function(x) x[hind[i]])
#' p <- p[hind[i]]
#' args$q <- y
#' (do.call(pdist, args) - p)
#' }
#' ptmp <- numeric(length(p[ind]))
#' for (i in 1:length(p[ind])) {
#' interval <- c(-1, 1)
#' while (h(interval[1]) * h(interval[2]) >= 0) {
#' interval <- interval + c(-1, 1) * 0.5 * (interval[2] -
#' interval[1])
#' }
#' ptmp[i] <- uniroot(h, interval, tol = .Machine$double.eps)$root
#' }
#' ret[ind] <- ptmp
#' }
#' if (any(is.nan(ret))) {
#' warning("NaNs produced")
#' }
#' ret
#' }
qgeneric <-
function(pdist, p, ...) {
args <- list(...)
if (is.null(args$log.p)) {
args$log.p <- FALSE
}
if (is.null(args$lower.tail)) {
args$lower.tail <- TRUE
}
if (is.null(args$lbound)) {
args$lbound <- -Inf
}
if (is.null(args$ubound)) {
args$ubound <- Inf
}
if (args$log.p) {
p <- exp(p)
}
if (!args$lower.tail) {
p <- 1 - p
}
ret <- numeric(length(p))
ret[p == 0] <- args$lbound
ret[p == 1] <- args$ubound
args[c("lower.tail", "log.p", "lbound", "ubound")] <- NULL
maxlen <- max(sapply(c(args, p = list(p)), length))
for (i in seq(along = args)) {
args[[i]] <- rep(args[[i]],
length.out = maxlen
)
}
p <- rep(p, length.out = maxlen)
ret[p < 0 | p > 1] <- NaN
ind <- (p > 0 & p < 1)
if (any(ind)) {
hind <- seq(along = p)[ind]
h <- function(y) {
args <- lapply(args, function(x) x[hind[i]])
p <- p[hind[i]]
args$q <- y
(do.call(pdist, args) - p)
}
ptmp <- numeric(length(p[ind]))
for (i in 1:length(p[ind])) {
interval <- c(-1, 1)
while (h(interval[1]) * h(interval[2]) >= 0) {
interval <- interval + c(-1, 1) * 0.5 * (interval[2] -
interval[1])
}
ptmp[i] <- uniroot(h, interval, tol = .Machine$double.eps)$root
}
ret[ind] <- ptmp
}
if (any(is.nan(ret))) {
warning("NaNs produced")
}
ret
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/qgeneric.R
|
#' Quantile function half Cauchy
#'
#' Computes the quantiles.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(p, location = 0, scale = 1) {
#' qcauchy(p * (1 - pcauchy(0, location, scale)) + pcauchy(
#' 0,
#' location, scale
#' ), location, scale)
#' }
qhalfcauchy <-
function(p, location = 0, scale = 1) {
qcauchy(p * (1 - pcauchy(0, location, scale)) + pcauchy(
0,
location, scale
), location, scale)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/qhalfcauchy.R
|
#' Quantile function half Normal
#'
#' Computes the quantiles.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(p, mean = 0, sd = 1) {
#' qnorm(
#' p * (1 - pnorm(0, mean, sd)) + pnorm(0, mean, sd),
#' mean, sd
#' )
#' }
qhalfnorm <-
function(p, mean = 0, sd = 1) {
qnorm(
p * (1 - pnorm(0, mean, sd)) + pnorm(0, mean, sd),
mean, sd
)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/qhalfnorm.R
|
#' Quantile function half Student-t
#'
#' Computes the quantiles.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(p, df = 1, mean = 0, sd = 1) {
#' qt_(
#' p * (1 - pt_(0, df, mean, sd)) + pt_(0, df, mean, sd),
#' df, mean, sd
#' )
#' }
qhalft <-
function(p, df = 1, mean = 0, sd = 1) {
qt_(
p * (1 - pt_(0, df, mean, sd)) + pt_(0, df, mean, sd),
df, mean, sd
)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/qhalft.R
|
#' Quantile function non-standard Student-t
#'
#' Computes the quantiles.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(p, df, mean, sd) {
#' sd * qt(p, df, ncp = 0) + mean
#' }
qt_ <-
function(p, df, mean, sd) {
sd * qt(p, df, ncp = 0) + mean
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/qt_.R
|
#' Quantile function truncated normal
#'
#' Computes the quantiles.
#'
#' For internal use
#'
#' @note Taken from \code{msm} R-package.
#' @author C. H. Jackson
#' @references Taken from
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(p, mean = 0, sd = 1, lower = -Inf, upper = Inf, lower.tail = TRUE,
#' log.p = FALSE) {
#' qgeneric(ptnorm,
#' p = p, mean = mean, sd = sd, lower = lower,
#' upper = upper, lbound = lower, ubound = upper, lower.tail = lower.tail,
#' log.p = log.p
#' )
#' }
qtnorm <-
function(p, mean = 0, sd = 1, lower = -Inf, upper = Inf, lower.tail = TRUE,
log.p = FALSE) {
qgeneric(ptnorm,
p = p, mean = mean, sd = sd, lower = lower,
upper = upper, lbound = lower, ubound = upper, lower.tail = lower.tail,
log.p = log.p
)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/qtnorm.R
|
#' Conditional posterior distribution of the distinct Ystar
#'
#' This function evaluates the ratio of conditional posterior distributions of
#' the distinct latents Ystar.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(v, v2, x, distr.k, sigma.k, distr.p0, mu.p0, sigma.p0) {
#' alpha <- p0(v, distr = distr.p0, mu = mu.p0, sigma = sigma.p0) /
#' p0(v2, distr = distr.p0, mu = mu.p0, sigma = sigma.p0)
#' Prod <- 1
#' for (i in seq(length(x))) {
#' fac <- dk(x[i], distr = distr.k, mu = v, sigma = sigma.k) /
#' dk(x[i], distr = distr.k, mu = v2, sigma = sigma.k)
#' Prod <- Prod * fac
#' }
#' f <- alpha * Prod
#' return(f)
#' }
rfystar <-
function(v, v2, x, distr.k, sigma.k, distr.p0, mu.p0, sigma.p0) {
alpha <- p0(v, distr = distr.p0, mu = mu.p0, sigma = sigma.p0) / p0(v2,
distr = distr.p0, mu = mu.p0, sigma = sigma.p0
)
Prod <- 1
for (i in seq(length(x))) {
fac <- dk(x[i], distr = distr.k, mu = v, sigma = sigma.k) / dk(x[i],
distr = distr.k, mu = v2, sigma = sigma.k
)
Prod <- Prod * fac
}
f <- alpha * Prod
return(f)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/rfystar.R
|
#' Conditional posterior distribution of the distinct Ystar in the case of
#' censoring
#'
#' This function evaluates the ratio of conditional posterior distributions of
#' the distinct latents Ystar.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(v, v2, xleft, xright, censor_code, distr.k, sigma.k,
#' distr.p0, mu.p0, sigma.p0) {
#' alpha <- p0(v, distr = distr.p0, mu = mu.p0, sigma = sigma.p0) / p0(v2,
#' distr = distr.p0, mu = mu.p0, sigma = sigma.p0
#' )
#' Prod <- 1
#' for (i in seq_along(xleft)) {
#' fac <- dkcens2_1val(
#' xleft = xleft[i], xright = xright[i],
#' c_code = censor_code[i], distr = distr.k, mu = v,
#' sigma = sigma.k
#' ) / dkcens2_1val(
#' xleft = xleft[i], xright = xright[i],
#' c_code = censor_code[i], distr = distr.k, mu = v2,
#' sigma = sigma.k
#' )
#' Prod <- Prod * fac
#' }
#' f <- alpha * Prod
#' return(f)
#' }
rfystarcens2 <-
function(v, v2, xleft, xright, censor_code, distr.k, sigma.k,
distr.p0, mu.p0, sigma.p0) {
alpha <- p0(v, distr = distr.p0, mu = mu.p0, sigma = sigma.p0) / p0(v2,
distr = distr.p0, mu = mu.p0, sigma = sigma.p0
)
Prod <- 1
for (i in seq_along(xleft)) {
fac <- dkcens2_1val(
xleft = xleft[i], xright = xright[i],
c_code = censor_code[i], distr = distr.k, mu = v,
sigma = sigma.k
) / dkcens2_1val(
xleft = xleft[i], xright = xright[i],
c_code = censor_code[i], distr = distr.k, mu = v2,
sigma = sigma.k
)
Prod <- Prod * fac
}
f <- alpha * Prod
return(f)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/rfystarcens2.R
|
#' Conditional posterior distribution of the distinct vectors (Ystar,Zstar)
#'
#' This function evaluates the ratio of conditional posterior distributions of
#' the distinct latent vectors (Ystar,Zstar).
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(v, v2, z, z2, x, distr.k, distr.py0, mu.py0, sigma.py0, distr.pz0, mu.pz0, sigma.pz0) {
#' alpha <- p0(v, distr = distr.py0, mu = mu.py0, sigma = sigma.py0) /
#' p0(v2, distr = distr.py0, mu = mu.py0, sigma = sigma.py0) *
#' p0(z, distr = distr.pz0, mu = mu.pz0, sigma = sigma.pz0) /
#' p0(z2, distr = distr.pz0, mu = mu.pz0, sigma = sigma.pz0)
#' Prod <- 1
#' for (i in seq(length(x))) {
#' fac <- dk(x[i], distr = distr.k, mu = v, sigma = z) / dk(x[i],
#' distr = distr.k, mu = v2, sigma = z2
#' )
#' Prod <- Prod * fac
#' }
#' f <- alpha * Prod
#' return(f)
#' }
rfyzstar <-
function(v, v2, z, z2, x, distr.k, distr.py0, mu.py0, sigma.py0,
distr.pz0, mu.pz0, sigma.pz0) {
alpha <- p0(v, distr = distr.py0, mu = mu.py0, sigma = sigma.py0) / p0(v2,
distr = distr.py0, mu = mu.py0, sigma = sigma.py0
) *
p0(z, distr = distr.pz0, mu = mu.pz0, sigma = sigma.pz0) / p0(z2,
distr = distr.pz0, mu = mu.pz0, sigma = sigma.pz0
)
Prod <- 1
for (i in seq(length(x))) {
fac <- dk(x[i], distr = distr.k, mu = v, sigma = z) / dk(x[i],
distr = distr.k, mu = v2, sigma = z2
)
Prod <- Prod * fac
}
f <- alpha * Prod
return(f)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/rfyzstar.R
|
#' Conditional posterior distribution of the distinct vectors (Ystar,Zstar) in
#' the case of censoring
#'
#' This function evaluates the ratio of conditional posterior distributions of
#' the distinct latent vectors (Ystar,Zstar).
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(v, v2, z, z2, xleft, xright, censor_code, distr.k,
#' distr.py0, mu.py0, sigma.py0, distr.pz0, mu.pz0, sigma.pz0) {
#' alpha <- p0(v, distr = distr.py0, mu = mu.py0, sigma = sigma.py0) / p0(v2,
#' distr = distr.py0, mu = mu.py0, sigma = sigma.py0
#' ) *
#' p0(z, distr = distr.pz0, mu = mu.pz0, sigma = sigma.pz0) / p0(z2,
#' distr = distr.pz0, mu = mu.pz0, sigma = sigma.pz0
#' )
#' Prod <- 1
#' for (i in seq_along(xleft)) {
#' fac <- dkcens2_1val(
#' xleft = xleft[i], xright = xright[i],
#' c_code = censor_code[i], distr = distr.k, mu = v,
#' sigma = z
#' ) / dkcens2_1val(
#' xleft = xleft[i], xright = xright[i],
#' c_code = censor_code[i], distr = distr.k, mu = v2,
#' sigma = z2
#' )
#' Prod <- Prod * fac
#' }
#' f <- alpha * Prod
#' return(f)
#' }
rfyzstarcens2 <-
function(v, v2, z, z2, xleft, xright, censor_code, distr.k,
distr.py0, mu.py0, sigma.py0, distr.pz0, mu.pz0, sigma.pz0) {
alpha <- p0(v, distr = distr.py0, mu = mu.py0, sigma = sigma.py0) / p0(v2,
distr = distr.py0, mu = mu.py0, sigma = sigma.py0
) *
p0(z, distr = distr.pz0, mu = mu.pz0, sigma = sigma.pz0) / p0(z2,
distr = distr.pz0, mu = mu.pz0, sigma = sigma.pz0
)
Prod <- 1
for (i in seq_along(xleft)) {
fac <- dkcens2_1val(
xleft = xleft[i], xright = xright[i],
c_code = censor_code[i], distr = distr.k, mu = v,
sigma = z
) / dkcens2_1val(
xleft = xleft[i], xright = xright[i],
c_code = censor_code[i], distr = distr.k, mu = v2,
sigma = z2
)
Prod <- Prod * fac
}
f <- alpha * Prod
return(f)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/rfyzstarcens2.R
|
#' Random number generator half Cauchy
#'
#' Computes a random number.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(n, location = 0, scale = 1) {
#' abs(rcauchy(n, location, scale))
#' }
rhalfcauchy <-
function(n, location = 0, scale = 1) {
abs(rcauchy(n, location, scale))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/rhalfcauchy.R
|
#' Random number generator half Normal
#'
#' Computes a random number.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(n, mean = 0, sd = 1) {
#' abs(rnorm(n, mean, sd))
#' }
rhalfnorm <-
function(n, mean = 0, sd = 1) {
abs(rnorm(n, mean, sd))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/rhalfnorm.R
|
#' Random number generator half Student-t
#'
#' Generates a random number.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(n, df = 1, mean = 0, sd = 1) {
#' abs(rt_(n, df, mean, sd))
#' }
rhalft <-
function(n, df = 1, mean = 0, sd = 1) {
abs(rt_(n, df, mean, sd))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/rhalft.R
|
#' Kernel density sampling function
#'
#' This function simulates from a density. There are 4 density options (1 =
#' Gaussian, 2 = Gamma, 3 = Beta, 4 = double exponential, 5 = lognormal). All
#' densities are parameterized in terms of mean and standard deviation.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(n, distr = NULL, mu = NULL, sigma = NULL) {
#' if (is.null(distr)) {
#' stop("Argument \"distr\" should be defined numeric with possible values 1,2,3,4 or 5")
#' }
#' else if (distr == 1) {
#' a <- ifelse(is.null(mu), 0, mu)
#' b <- ifelse(is.null(sigma), 1, sigma)
#' rk <- rnorm(n, mean = a, sd = b)
#' }
#' else if (distr == 2) {
#' a <- ifelse(is.null(mu), 0, mu)
#' b <- ifelse(is.null(sigma), 1 / sqrt(2), sigma / sqrt(2))
#' rk <- a + b * sample(c(-1, +1), size = n, replace = TRUE) *
#' rexp(n)
#' }
#' else if (distr == 3) {
#' a <- ifelse(is.null(mu), exp(1 / 2), log(mu / sqrt(1 + (sigma / mu)^2)))
#' b <- ifelse(is.null(sigma), exp(1) * (exp(1) - 1), sqrt(log(1 +
#' (sigma / y)^2)))
#' rk <- rlnorm(n, meanlog = a, sdlog = b)
#' }
#' else if (distr == 4) {
#' a <- ifelse(is.null(mu), 1, mu^2 / sigma^2)
#' b <- ifelse(is.null(sigma), 1, mu / sigma^2)
#' rk <- rgamma(n, shape = a, rate = b)
#' }
#' else if (distr == 5) {
#' a <- ifelse(is.null(mu), 0.5, (1 - mu) * (mu / sigma)^2 -
#' mu)
#' b <- ifelse(is.null(sigma), 1 / sqrt(12), (mu * (1 - mu) / sigma^2 -
#' 1) * (1 - mu))
#' if (any(c(a, b) <= 0)) {
#' stop(paste(
#' "\nNegative Beta parameters:\n a =", a,
#' ";\t b =", b
#' ))
#' }
#' rk <- rbeta(n, shape1 = a, shape2 = b)
#' }
#' else {
#' stop("Argument \"distr\" should be defined numeric with possible values 1,2,3,4 or 5")
#' }
#' return(rk)
#' }
rk <-
function(n, distr = NULL, mu = NULL, sigma = NULL) {
msg <- "Argument \"distr\" should be defined numeric with possible values 1 (normal), 2 (gamma), 3 (beta), 4 (exponential), 5 (lognormal), 6 (half-Cauchy), 7 (half-normal), 8 (half-student), 9 (uniform) and 10 (truncated normal)"
if (is.null(distr)) {
stop(msg)
}
else if (distr == 1) {
rk <- rnorm(n, mean = mu, sd = sigma)
}
else if (distr == 2) {
a <- ifelse(is.null(mu), 1, mu^2 / sigma^2)
b <- ifelse(is.null(sigma), 1, mu / sigma^2)
rk <- rgamma(n, shape = a, rate = b)
}
else if (distr == 3) {
a <- ifelse(is.null(mu), 0.5, (1 - mu) * (mu / sigma)^2 -
mu)
b <- ifelse(is.null(sigma), 1 / sqrt(12), (mu * (1 - mu) / sigma^2 -
1) * (1 - mu))
if (any(c(a, b) <= 0)) {
stop(paste(
"\nNegative Beta parameters:\n a =", a,
";\t b =", b
))
}
rk <- rbeta(n, shape1 = a, shape2 = b)
}
else if (distr == 4) {
a <- ifelse(is.null(mu), 0, mu)
b <- ifelse(is.null(sigma), 1 / sqrt(2), sigma / sqrt(2))
rk <- a + b * sample(c(-1, +1), size = n, replace = TRUE) *
rexp(n)
}
else if (distr == 5) {
a <- ifelse(is.null(mu), exp(1 / 2), log(mu / sqrt(1 + (sigma / mu)^2)))
b <- ifelse(is.null(sigma), exp(1) * (exp(1) - 1), sqrt(log(1 +
(sigma / mu)^2)))
rk <- rlnorm(n, meanlog = a, sdlog = b)
}
else if (distr == 6) {
rk <- rhalfcauchy(n, location = ifelse(is.null(mu), 0,
mu
), scale = ifelse(is.null(sigma), 1, sigma))
}
else if (distr == 7) {
rk <- rhalfnorm(n,
mean = ifelse(is.null(mu), 0, mu),
sd = ifelse(is.null(sigma), 1, sigma)
)
}
else if (distr == 8) {
rk <- rhalft(n, df = 10, mean = ifelse(is.null(mu), 0,
mu
), sd = ifelse(is.null(sigma), 1, sigma))
}
else if (distr == 9) {
rk <- runif(n, min = ifelse(is.null(mu), 0, mu), max = ifelse(is.null(sigma),
1, sigma
))
}
else if (distr == 10) {
rk <- rtnorm(n, mean = ifelse(is.null(mu), 0, mu), sd = ifelse(is.null(sigma),
1, sigma
), lower = 0.1)
}
else {
stop(msg)
}
return(rk)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/rk.R
|
#' Random number generator non-standard Student-t
#'
#' Computes a random number.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(n, df, mean, sd) {
#' mean + sd * rt(n, df, ncp = 0)
#' }
rt_ <-
function(n, df, mean, sd) {
mean + sd * rt(n, df, ncp = 0)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/rt_.R
|
#' Random number generator for a truncated normal distribution
#'
#' Generates a random number from a truncated normal distribution.
#'
#' For internal use
#'
#' @note Taken from \code{msm} R-package.
#' @author C. H. Jackson
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(n, mean = 0, sd = 1, lower = -Inf, upper = Inf) {
#' if (length(n) > 1) {
#' n <- length(n)
#' }
#' mean <- rep(mean, length = n)
#' sd <- rep(sd, length = n)
#' lower <- rep(lower, length = n)
#' upper <- rep(upper, length = n)
#' lower <- (lower - mean) / sd
#' upper <- (upper - mean) / sd
#' ind <- seq(length = n)
#' ret <- numeric(n)
#' alg <- ifelse(lower > upper, -1, ifelse(((lower < 0 & upper ==
#' Inf) | (lower == -Inf & upper > 0) | (is.finite(lower) &
#' is.finite(upper) & (lower < 0) & (upper > 0) & (upper -
#' lower > sqrt(2 * pi)))), 0, ifelse((lower >= 0 & (upper >
#' lower + 2 * sqrt(exp(1)) / (lower + sqrt(lower^2 + 4)) *
#' exp((lower * 2 - lower * sqrt(lower^2 + 4)) / 4))),
#' 1, ifelse(upper <= 0 & (-lower > -upper + 2 * sqrt(exp(1)) / (-upper +
#' sqrt(upper^2 + 4)) * exp((upper * 2 - -upper * sqrt(upper^2 +
#' 4)) / 4)), 2, 3)
#' )))
#' ind.nan <- ind[alg == -1]
#' ind.no <- ind[alg == 0]
#' ind.expl <- ind[alg == 1]
#' ind.expu <- ind[alg == 2]
#' ind.u <- ind[alg == 3]
#' ret[ind.nan] <- NaN
#' while (length(ind.no) > 0) {
#' y <- rnorm(length(ind.no))
#' done <- which(y >= lower[ind.no] & y <= upper[ind.no])
#' ret[ind.no[done]] <- y[done]
#' ind.no <- setdiff(ind.no, ind.no[done])
#' }
#' stopifnot(length(ind.no) == 0)
#' while (length(ind.expl) > 0) {
#' a <- (lower[ind.expl] + sqrt(lower[ind.expl]^2 + 4)) / 2
#' z <- rexp(length(ind.expl), a) + lower[ind.expl]
#' u <- runif(length(ind.expl))
#' done <- which((u <= exp(-(z - a)^2 / 2)) & (z <= upper[ind.expl]))
#' ret[ind.expl[done]] <- z[done]
#' ind.expl <- setdiff(ind.expl, ind.expl[done])
#' }
#' stopifnot(length(ind.expl) == 0)
#' while (length(ind.expu) > 0) {
#' a <- (-upper[ind.expu] + sqrt(upper[ind.expu]^2 + 4)) / 2
#' z <- rexp(length(ind.expu), a) - upper[ind.expu]
#' u <- runif(length(ind.expu))
#' done <- which((u <= exp(-(z - a)^2 / 2)) & (z <= -lower[ind.expu]))
#' ret[ind.expu[done]] <- -z[done]
#' ind.expu <- setdiff(ind.expu, ind.expu[done])
#' }
#' stopifnot(length(ind.expu) == 0)
#' while (length(ind.u) > 0) {
#' z <- runif(length(ind.u), lower[ind.u], upper[ind.u])
#' rho <- ifelse(lower[ind.u] > 0, exp((lower[ind.u]^2 -
#' z^2) / 2), ifelse(upper[ind.u] < 0, exp((upper[ind.u]^2 -
#' z^2) / 2), exp(-z^2 / 2)))
#' u <- runif(length(ind.u))
#' done <- which(u <= rho)
#' ret[ind.u[done]] <- z[done]
#' ind.u <- setdiff(ind.u, ind.u[done])
#' }
#' stopifnot(length(ind.u) == 0)
#' ret * sd + mean
#' }
rtnorm <-
function(n, mean = 0, sd = 1, lower = -Inf, upper = Inf) {
if (length(n) > 1) {
n <- length(n)
}
mean <- rep(mean, length = n)
sd <- rep(sd, length = n)
lower <- rep(lower, length = n)
upper <- rep(upper, length = n)
lower <- (lower - mean) / sd
upper <- (upper - mean) / sd
ind <- seq(length = n)
ret <- numeric(n)
alg <- ifelse(lower > upper, -1, ifelse(((lower < 0 & upper ==
Inf) | (lower == -Inf & upper > 0) | (is.finite(lower) &
is.finite(upper) & (lower < 0) & (upper > 0) & (upper -
lower > sqrt(2 * pi)))), 0, ifelse((lower >= 0 & (upper >
lower + 2 * sqrt(exp(1)) / (lower + sqrt(lower^2 + 4)) *
exp((lower * 2 - lower * sqrt(lower^2 + 4)) / 4))),
1, ifelse(upper <= 0 & (-lower > -upper + 2 * sqrt(exp(1)) / (-upper +
sqrt(upper^2 + 4)) * exp((upper * 2 - -upper * sqrt(upper^2 +
4)) / 4)), 2, 3)
)))
ind.nan <- ind[alg == -1]
ind.no <- ind[alg == 0]
ind.expl <- ind[alg == 1]
ind.expu <- ind[alg == 2]
ind.u <- ind[alg == 3]
ret[ind.nan] <- NaN
while (length(ind.no) > 0) {
y <- rnorm(length(ind.no))
done <- which(y >= lower[ind.no] & y <= upper[ind.no])
ret[ind.no[done]] <- y[done]
ind.no <- setdiff(ind.no, ind.no[done])
}
stopifnot(length(ind.no) == 0)
while (length(ind.expl) > 0) {
a <- (lower[ind.expl] + sqrt(lower[ind.expl]^2 + 4)) / 2
z <- rexp(length(ind.expl), a) + lower[ind.expl]
u <- runif(length(ind.expl))
done <- which((u <= exp(-(z - a)^2 / 2)) & (z <= upper[ind.expl]))
ret[ind.expl[done]] <- z[done]
ind.expl <- setdiff(ind.expl, ind.expl[done])
}
stopifnot(length(ind.expl) == 0)
while (length(ind.expu) > 0) {
a <- (-upper[ind.expu] + sqrt(upper[ind.expu]^2 + 4)) / 2
z <- rexp(length(ind.expu), a) - upper[ind.expu]
u <- runif(length(ind.expu))
done <- which((u <= exp(-(z - a)^2 / 2)) & (z <= -lower[ind.expu]))
ret[ind.expu[done]] <- -z[done]
ind.expu <- setdiff(ind.expu, ind.expu[done])
}
stopifnot(length(ind.expu) == 0)
while (length(ind.u) > 0) {
z <- runif(length(ind.u), lower[ind.u], upper[ind.u])
rho <- ifelse(lower[ind.u] > 0, exp((lower[ind.u]^2 -
z^2) / 2), ifelse(upper[ind.u] < 0, exp((upper[ind.u]^2 -
z^2) / 2), exp(-z^2 / 2)))
u <- runif(length(ind.u))
done <- which(u <= rho)
ret[ind.u[done]] <- z[done]
ind.u <- setdiff(ind.u, ind.u[done])
}
stopifnot(length(ind.u) == 0)
ret * sd + mean
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/rtnorm.R
|
#' Common text for the summary S3 methods
#'
#' @param fit NRMIx or PYx object
#' @param kernel_comment Text specific to the parametric and nonparametric nature of the model
#' @param BNP_process_comment Text specific to the nonparametric process, NRMI or Pitman-Yor
#' @param number_of_clusters Flag to decide whether to compute the optimal clustering
#'
#' @return Prints out the text for the summary S3 methods
summarytext <- function(fit, kernel_comment, BNP_process_comment, number_of_clusters = FALSE) {
ndata <- ifelse(is_censored(fit$data), nrow(fit$data), length(fit$data))
data_comment <- paste("There were", ndata, "data points.")
MCMC_comment <- paste("The MCMC algorithm was run for ", fit$Nit, " iterations with ", 100 * fit$Pbi, "% discarded for burn-in.", sep = "")
if (number_of_clusters) {
estimated_clustering <- compute_optimal_clustering(fit)
clustering_comment <- paste("The estimated number of clusters in the data is ", length(unique(estimated_clustering)), ".", sep = "")
}
else {
clustering_comment <- "To obtain information on the estimated number of clusters,\n please use summary(object, number_of_clusters = TRUE)."
}
writeLines(paste(BNP_process_comment, "\n\n", kernel_comment, "\n\n", data_comment, "\n\n", MCMC_comment, "\n\n", clustering_comment, sep = ""))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/summarytext.R
|
#' Choosing the truncation level for the NGG process
#'
#' This function uses the M_array which provides the threshold which ensures
#' a moment match of 5% for NGG parameters alpha, kappa, gama
#'
#' For internal use
#'
#' @param alpha Numeric constant. Total mass of the centering measure
#' @param kappa Numeric positive constant.
#' @param gama Numeric constant. \eqn{0 \leq Gama \leq 1}{0 <= Gama <=1}.
#' @param max_threshold Numeric positive integer. Maximum allowed value for the threshold
#'
#' @return Numeric positive integer, the truncation level of the NGG process
#'
#' @keywords internal
thresholdGG <-
function(alpha = 1, kappa = 1, gama = 1 / 2, max_threshold = 200) {
alpha_vect <- c(.1, 1, 5, 20) # mass param
kappa_vect <- c(.1, 1, 5, 20)
gama_vect <- c(0, .2, .4, .6)
alpha_index <- which.max(alpha_vect >= alpha)
kappa_index <- which.max(kappa_vect >= kappa)
gama_index <- which.max(gama_vect >= gama)
M <- M_array[alpha_index, kappa_index, gama_index]
# if we are out of the grid, we assign the max_threshold
out_of_grid <- (prod(1 - (alpha_vect >= alpha)) |
prod(1 - (kappa_vect >= kappa)) |
prod(1 - (gama_vect >= gama)))
if (out_of_grid) {
M <- max_threshold
}
return(M)
}
globalVariables(names = c("M_array"))
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/thresholdGG.R
|
#' Draw a traceplot for multiple chains
#'
#' This is a convenience function which works when coda is not yet loaded by the user. If coda is
#' loaded, it gets masked. See also file multMixNRMI.R
#'
#' @param fitlist Output of multMixNRMI.
#' @return A traceplot for multiple chains.
#' @export
traceplot <- function(fitlist) {
param <- value <- chain_id <- iteration <- NULL
mcmc_object <- convert_to_mcmc(fitlist)
to_plot <- tidyr::gather(
dplyr::bind_rows(
lapply(
X = seq_along(mcmc_object),
FUN = function(chain_id) {
dplyr::mutate(dplyr::mutate(data.frame(mcmc_object[[chain_id]]), chain_id = chain_id),
iteration = seq_along(chain_id)
)
}
)
),
param, value, -chain_id, -iteration
)
ggplot(to_plot, aes(x = iteration, y = value, colour = factor(chain_id), group = chain_id)) +
geom_line() +
facet_wrap(~param, scales = "free") +
theme_classic() +
ylab("") +
theme(legend.position = "none") +
xlab("Iteration")
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/traceplot.R
|
#' @keywords internal
"_PACKAGE"
|
/scratch/gouwar.j/cran-all/cranData/BNPmix/R/BNPmix-package.R
|
#' Collaborative Perinatal Project data
#'
#' @description
#' A subset of the Collaborative Perinatal Project data set (Klebanoff, 2009)
#' focused on studying the effect of DDE exposure on pregnancies (Longnecker et al., 2001).
#' The dataset contains the following variables for each pregnant women enrolled in the study:
#'\itemize{
#' \item hosp, factor denoting the hospital where the woman was hospitalized;
#' \item smoke, factor. It takes value 2 if the woman is a smoker, 1 otherwise;
#' \item gest, gestational age (in weeks);
#' \item dde, Dichlorodiphenyldichloroethylene (DDE) concentration in maternal serum;
#' \item weight, body weight of the baby at birth (in grams);
#' }
#'
#' @docType data
#' @keywords dataset internal
#' @name CPP
#'
#' @usage data(CPP)
#' @format A data.frame
#'
#' @examples
#' data(CPP)
#' str(CPP)
#'
#' @references
#'
#' Klebanoff M. A. (2009) The collaborative perinatal project: a 50-year retrospective.
#' Paediatric and perinatal epidemiology, 23, 2.
#'
#' Longnecker, M. P., Klebanof, M. A., Zhou, H., Brock, J. (2001)
#' Association between maternal serum concentration of the DDT metabolite
#' DDE and preterm and small-for-gestational-age babies at birth. The Lancet, 358, 110-114.
#'
"CPP"
|
/scratch/gouwar.j/cran-all/cranData/BNPmix/R/CPP.R
|
#' @name DDPdensity
#' @export DDPdensity
#'
#' @title MCMC for GM-dependent Dirichlet process mixtures of Gaussians
#' @description The \code{DDPdensity} function generates posterior density samples for a univariate Griffiths-Milne dependent Dirichlet process mixture model with Gaussian
#' kernel, for partially exchangeable data. The function implements the importance conditional sampler method.
#'
#' @param y a vector or matrix giving the data based on which densities are to be estimated;
#' @param group vector of length \code{length(y)} containing the group labels (integers)
#' for the elements of \code{y};
#' @param output a list of arguments for generating posterior output. It contains:
#'
#' \itemize{
#' \item \code{grid}, a grid of points at which to evaluate the estimated posterior mean densities (common for all the groups).
#' \item \code{out_type}, if \code{out_type = "FULL"}, return the estimated partitions and the realizations of the posterior density for each iterations. If \code{out_type = "MEAN"}, return
#' the estimated partitions and the mean of the densities sampled at each iterations. If \code{out_type = "CLUST"}, return the estimated partitions. Default \code{out_type = "FULL"}.
# \item \code{mcmc_dens}, if equal to \code{TRUE}, the function returns a total of \code{niter}-\code{nburn} realizations of the posterior
# densities, that is one per stored iteration, evaluated at
# \code{grid} (default is \code{TRUE}). See \code{value} for details.
#' }
#'
#' @param mcmc list of MCMC arguments:
#' \itemize{
#' \item \code{niter} (mandatory), number of iterations.
#'
#' \item \code{nburn} (mandatory), number of iterations to discard as burn-in.
#'
#' \item \code{nupd}, argument controlling the number of iterations to be displayed on screen: the function reports
#' on standard output every time \code{nupd} new iterations have been carried out (default is \code{niter/10}).
#'
#' \item \code{print_message}, control option. If equal to \code{TRUE}, the status is printed
#' to standard output every \code{nupd} iterations (default is \code{TRUE}).
#'
#' \item \code{m_imp}, number of generated values for the importance sampling step of the
#' importance conditional sampler (default is 10). See \code{details}.
#'
#' \item \code{var_MH_step}, variance of the Gaussian proposal for the Metropolis-Hastings of the weights update (default is 0.25).
#'
#' }
#'
#' @param prior a list giving the prior information, which contains:
#'
#' \itemize{
#' \item \code{strength}, the strength parameter, or total mass, of the marginal Dirichlet processes (default 1);
#' \item \code{m0}, mean of the normal base measure on the location parameter (default is the sample mean of the data);
#' \item \code{k0}, scale factor appearing in the normal base measure on the location parameter (default 1);
#' \item \code{a0}, shape parameter of the inverse gamma base measure on the scale parameter (default 2);
#' \item \code{b0}, scale parameter of the inverse gamma base measure on the scale parameter (default is the sample variance of the data);
#' \item \code{wei}, parameter controlling the strength of dependence across Dirichlet processes (default 1/2).
#' }
#'
#' @return A \code{BNPdensity} class object containing the estimated densities for each iteration,
#' the allocations for each iteration; the grid used to evaluate the densities (for each group); the
#' densities sampled from the posterior distribution (for each group); the groups; the weights of the processes.
#' The function returns also informations regarding the estimation: the number of iterations, the number
#' of burn-in iterations and the execution time.
#'
#' @details
#'
#' This function fits a Griffiths-Milne dependent Dirichlet process (GM-DDP) mixture
#' for density estimation for partially exchangeable data (Lijoi et al., 2014).
#' For each observation the \code{group} variable allows the observations to be gathered
#' into \eqn{L}=\code{length(unique(group))} distinct groups.
#' The model assumes exchangeability within each group, with observations in the \eqn{l}th group marginally
#' modelled by a location-scale Dirichlet process mixtures, i.e.
#' \deqn{\tilde f_l(y) = \int \phi(y; \mu, \sigma^2) \tilde p_l (d \mu, d \sigma^2)}
#' where each \eqn{\tilde p_l} is a Dirichlet process with total mass \code{strength} and base measure \eqn{P_0}.
#' The vector \eqn{\tilde p = (\tilde p_1,\ldots,\tilde p_L)} is assumed to be jointly distributed as a vector of
#' GM-DDP(\code{strength}, \code{wei}; \eqn{P_0}), where \code{strength} and
#' \eqn{P_0} are the total mass parameter and the base measure of each \eqn{\tilde p_l}, and \code{wei}
#' controls the dependence across the components of
#' \eqn{\tilde p}. Admissible values for \code{wei} are in \eqn{(0,1)}, with the two extremes of the range
#' corresponding to full exchangeability (\code{wei}\eqn{\rightarrow 0})
#' and independence across groups (\code{wei}\eqn{\rightarrow 1}).
#'
#' \eqn{P_0} is a normal-inverse gamma base measure, i.e.
#' \deqn{P_0(d\mu,d\sigma^2) = N(d \mu; m_0, \sigma^2 / k_0) \times IGa(d \sigma^2; a_0, b_0).}{%
#' P_0 (d\mu,d\sigma^2) = N(d \mu; m0, \sigma^2 / k0) IGa(d \sigma^2; a0, b0).}
#'
#' Posterior sampling is obtained by implementing the importance conditional sampler (Canale et al., 2019).
#' See Corradin et al. (to appear) for more details.
#'
#' @examples
#' data_toy <- c(rnorm(50, -4, 1), rnorm(100, 0, 1), rnorm(50, 4, 1))
#' group_toy <- c(rep(1,100), rep(2,100))
#' grid <- seq(-7, 7, length.out = 50)
#' est_model <- DDPdensity(y = data_toy, group = group_toy,
#' mcmc = list(niter = 200, nburn = 100, var_MH_step = 0.25),
#' output = list(grid = grid))
#' summary(est_model)
#' plot(est_model)
#'
#' @references
#'
#' Lijoi, A., Nipoti, B., and Pruenster, I. (2014). Bayesian inference with
#' dependent normalized completely random measures. Bernoulli 20, 1260–1291, doi:10.3150/13-BEJ521
#'
#' Canale, A., Corradin, R., & Nipoti, B. (2019). Importance conditional sampling for
#' Bayesian nonparametric mixtures. arXiv preprint arXiv:1906.08147
#'
#' Corradin, R., Canale, A., Nipoti, B. (2021), BNPmix: An R Package for Bayesian Nonparametric Modeling via Pitman-Yor Mixtures,
#' Journal of Statistical Software, doi:10.18637/jss.v100.i15
DDPdensity <- function(y,
group,
mcmc = list(),
prior = list(),
output = list()){
if(!is.vector(y)) stop("Wrong data dimension")
if(!is.vector(group) & !is.factor(group)) stop("Wrong group dimension")
if(is.null(mcmc$niter)) stop("Missing number of iterations")
if(is.null(mcmc$nburn)) mcmc$nburn = 0
if(!is.list(mcmc)) stop("mcmc must be a list")
if(!is.list(prior)) stop("prior must be a list")
if(!is.list(output)) stop("output must be a list")
if(!is.null(mcmc$niter) && (!is.numeric(mcmc$niter) | (mcmc$niter<1))) stop("mcmc$iter must be a positive integer")
if(!is.null(mcmc$nburn) && (!is.numeric(mcmc$nburn) | (mcmc$nburn<1)) & (mcmc$nburn>mcmc$niter)) stop("mcmc$nburn must be a positive integer less than niter")
if(!is.null(mcmc$nupd) && (!is.numeric(mcmc$nupd) | (mcmc$nupd<1))) stop("mcmc$nupd must be a positive integer")
if(!is.null(mcmc$m_imp) && (!is.numeric(mcmc$m_imp) | (mcmc$m_imp<1))) stop("mcmc$m_imp must be a positive integer")
if(!is.null(mcmc$print_message) & (!is.logical(mcmc$print_message))) stop("mcmc$print_message must be a logical value")
if(!is.null(mcmc$var_MH_step) && ((!is.numeric(mcmc$var_MH_step) | (mcmc$var_MH_step<0)))) stop("mcmc$var_MH_step must be a numerical positive value")
if(!is.null(prior$m0) & !is.numeric(prior$m0)) stop("prior$m0 must be a numerical value")
if(!is.null(prior$k0) && (!is.numeric(prior$k0) | (prior$k0<=0))) stop("prior$k0 must be a numerical positive value")
if(!is.null(prior$a0) && (!is.numeric(prior$a0) | (prior$a0<=0))) stop("prior$a0 must be a numerical positive value")
if(!is.null(prior$b0) && (!is.numeric(prior$b0) | (prior$b0<=0))) stop("prior$b0 must be a numerical positive value")
if(!is.null(prior$strength) & !is.numeric(prior$strength)) stop("prior$strength must be a numerical value")
if(!is.null(prior$wei) && (!is.numeric(prior$wei) | (prior$wei<0) | (prior$wei>1))) stop("prior$wei must be a numerical value between 0 and 1")
if(!is.null(output$grid) & (!is.vector(output$grid))) stop("output$grid must be a vector")
# if mcmc misses some parts, add default
niter = mcmc$niter
nburn = mcmc$nburn
nupd = ifelse(is.null(mcmc$nupd), round(niter / 10), mcmc$nupd)
print_message = ifelse(is.null(mcmc$print_message), TRUE, mcmc$print_message)
var_MH_step = ifelse(is.null(mcmc$var_MH_step), 0.25, mcmc$var_MH_step)
m_imp = ifelse(is.null(mcmc$m_imp), 10, mcmc$m_imp)
# output
output$out_type = ifelse(is.null(output$out_type), "FULL", output$out_type)
if(output$out_type == "FULL"){
mean_dens = FALSE
mcmc_dens = TRUE
if(is.null(output$grid)){
grid_use = seq(from = min(y) - 0.1 * diff(range(y)), to = max(y) + 0.1 * diff(range(y)), length.out = 30)
} else {
if(length(dim(output$grid)) > 1) stop("Wrong grid dimension")
grid_use <- as.vector(output$grid)
}
} else if (output$out_type == "MEAN"){
mean_dens = TRUE
mcmc_dens = TRUE
if(is.null(output$grid)){
grid_use = seq(from = min(y) - 0.1 * diff(range(y)), to = max(y) + 0.1 * diff(range(y)), length.out = 30)
} else {
if(length(dim(output$grid)) > 1) stop("Wrong grid dimension")
grid_use <- as.vector(output$grid)
}
} else if (output$out_type == "CLUST"){
mean_dens = FALSE
mcmc_dens = FALSE
grid_use = seq(from = min(y) - 0.1 * diff(range(y)), to = max(y) + 0.1 * diff(range(y)), length.out = 30)
}
group <- as.numeric(as.factor(group))
ngr <- length(unique(group))
if(is.null(prior$m0)){ m0 = mean(y) } else { m0 = prior$m0 }
if(is.null(prior$k0)){ k0 = 1 } else { k0 = prior$k0 }
if(is.null(prior$a0)){ a0 = 2 } else { a0 = prior$a0 }
if(is.null(prior$b0)){ b0 = var(y) } else { b0 = prior$b0 }
if(is.null(prior$strength)){ strength = 1 } else { strength = prior$strength }
if(is.null(prior$wei)){ wei = 0.5 } else { wei = prior$wei }
est_model <- cDDP(y,
group,
ngr,
grid_use,
niter,
nburn,
m0,
k0,
a0,
b0,
strength,
wei,
m_imp,
var_MH_step,
nupd,
mcmc_dens,
print_message,
mean_dens)
if(isTRUE(mcmc_dens)){
if(mean_dens == TRUE){
result <- BNPdens(density = est_model$dens[,,1],
grideval = grid_use,
clust = (est_model$clust + 1),
group_log = est_model$group_log,
niter = niter,
nburn = nburn,
tot_time = est_model$time,
group = group,
wvals = est_model$wvals,
dep = TRUE)
} else {
result <- BNPdens(density = est_model$dens,
grideval = grid_use,
clust = (est_model$clust + 1),
group_log = est_model$group_log,
niter = niter,
nburn = nburn,
tot_time = est_model$time,
group = group,
wvals = est_model$wvals,
dep = TRUE)
}
}else{
result <- BNPdens(clust = (est_model$clust + 1),
group_log = est_model$group_log,
niter = niter,
nburn = nburn,
tot_time = est_model$time,
group = group,
wvals = est_model$wvals,
dep = TRUE)
}
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPmix/R/DDPdensity.R
|
#' @name PYdensity
#' @export PYdensity
#'
#' @title MCMC for Pitman-Yor mixtures of Gaussians
#' @description The \code{PYdensity} function generates a posterior density sample for a selection of univariate and multivariate Pitman-Yor
#' process mixture models with Gaussian kernels. See details below for the description of the different specifications of the implemented models.
#'
#'
#' @param y a vector or matrix giving the data based on which the density is to be estimated;
#'
#' @param output a list of arguments for generating posterior output. It contains:
#' \itemize{
#' \item \code{grid}, a grid of points at which to evaluate the estimated posterior mean density; a data frame
#' obtained with the \code{expand.grid} function.
#'
#' \item \code{out_param}, if equal to \code{TRUE}, the function returns the draws of the kernel's
#' parameters for each MCMC iteration, default is \code{FALSE}. See \code{value} for details.
#'
#' \item \code{out_type}, if \code{out_type = "FULL"}, the function returns the visited
#' partitions and the realizations of the posterior density for each iterations.
#' If \code{out_type = "MEAN"}, the function returns the estimated partitions and the mean of the densities sampled at each iterations.
#' If \code{out_type = "CLUST"}, the function returns the estimated partition.
#' Default \code{"FULL"}.
#'
# \item \code{mcmc_dens}, if equal to \code{TRUE}, the function returns a total of \code{niter}-\code{nburn} realizations of the posterior
# density, that is one per stored iteration, evaluated at
# \code{grid} (default is \code{TRUE}). See \code{value} for details.
#' }
#'
#' @param mcmc a list of MCMC arguments:
#' \itemize{
#' \item \code{niter} (mandatory), number of iterations.
#'
#' \item \code{nburn} (mandatory), number of iterations to discard as burn-in.
#'
#' \item \code{method}, the MCMC sampling method to be used, options are \code{'ICS'}, \code{'MAR'} and \code{'SLI'} (default is \code{'ICS'}). See details.
#'
#' \item \code{model}, the type of model to be fitted (default is \code{'LS'}). See details.
#'
#' \item \code{nupd}, argument controlling the number of iterations to be displayed on screen: the function reports
#' on standard output every time \code{nupd} new iterations have been carried out (default is \code{niter/10}).
#'
#' \item \code{print_message}, control option. If equal to \code{TRUE}, the status is printed
#' to standard output every \code{nupd} iterations (default is \code{TRUE}).
#'
#' \item \code{m_imp}, number of generated values for the importance sampling step of \code{method = 'ICS'} (default is 10). See details.
#'
#' \item \code{slice_type}, when \code{method = 'SLI'} it specifies the type of slice sampler. Options are \code{'DEP'} for dependent slice-efficient, and \code{'INDEP'} for independent slice-efficient (default is \code{'DEP'}). See details.
#'
#' \item \code{hyper}, if equal to \code{TRUE}, hyperprior distributions on the base measure's
#' parameters are added, as specified in \code{prior} and explained in \code{details} (default is \code{TRUE}).
#' }
#'
#'
#' @param prior a list giving the prior information. The list includes \code{strength} and \code{discount},
#' the strength and discount parameters of the Pitman-Yor process
#' (default are \code{strength = 1} and \code{discount = 0}, the latter leading to the Dirichlet process).
#' The remaining parameters depend on the model choice.
#' \itemize{
#'
#' \item If \code{model = 'L'} (location mixture) and \code{y} is univariate:
#'
#' \code{m0} and \code{s20} are
#' mean and variance of the base measure on the location parameter (default are sample mean and sample variance of the data);
#' \code{a0} and \code{b0} are shape and scale parameters of the inverse gamma prior on the common scale parameter
#' (default are 2 and the sample variance of the data).
#' If \code{hyper = TRUE}, optional hyperpriors on the base measure's parameters are added:
#' specifically, \code{m1} and \code{k1} are the mean parameter and the scale factor defining the
#' normal hyperprior on \code{m0} (default are the sample mean of the data and 1), and
#' \code{a1} and \code{b1} are shape and rate parameters of the inverse gamma hyperprior on \code{b0}
#' (default are 2 and the sample variance of the data). See details.
#'
#' \item If \code{model = 'LS'} (location-scale mixture) and \code{y} is univariate:
#'
#' \code{m0} and \code{k0} are the mean parameter and the scale factor defining the normal base measure
#' on the location parameter (default are the sample mean of the data and 1), and \code{a0} and \code{b0} are
#' shape and scale parameters of the inverse gamma base measure on the scale parameter (default are 2 and the sample variance of the data).
#' If \code{hyper = TRUE}, optional hyperpriors on the base measure's parameters are added:
#' specifically, \code{m1} and \code{s21} are mean and variance parameters of the normal hyperprior on
#' \code{m0} (default are the sample mean and the sample variance of the data);
#' \code{tau1} and \code{zeta1} are shape and rate parameters of the gamma hyperprior on
#' \code{k0} (default is 1 for both);
#' \code{a1} and \code{b1} are shape and rate parameters of the gamma hyperprior on
#' \code{b0} (default are the sample variance of the data and 1). See details.
#'
#' \item If \code{model = 'L'} (location mixture) and \code{y} is multivariate (\code{p}-variate):
#'
#' \code{m0} and \code{S20} are
#' mean and covariance of the base measure on the location parameter (default are the sample mean and the sample covariance of the data);
#' \code{Sigma0} and \code{n0} are the parameters of the inverse Whishart prior on
#' the common scale matrix (default are the sample covariance of the data and \code{p}+2).
#' If \code{hyper = TRUE}, optional hyperpriors on the base measure's parameters are added:
#' specifically, \code{m1} and \code{k1} are the mean parameter and the scale factor defining the
#' normal hyperprior on \code{m0} (default are the sample mean of the data and 1), and
#' \code{lambda} and \code{Lambda1} are the parameters (degrees of freedom and scale) of the inverse Wishart prior on \code{S20}
#' (default are \code{p}+2 and the sample covariance of the data). See details.
#'
#' \item If \code{model = 'LS'} (location-scale mixture) and \code{y} is multivariate (\code{p}-variate):
#'
#' \code{m0} and \code{k0} are the mean parameter and the scale factor defining the normal base measure on the
#' location parameter (default are the sample mean of the data and 1), and
#' \code{n0} and \code{Sigma0} are the parameters (degrees of freedom and scale) of the inverse Wishart base measure on the location parameter
#' (default are \code{p}+2 and the sample covariance of the data).
#' If \code{hyper = TRUE}, optional hyperpriors on the base measure's parameters are added:
#' specifically, \code{m1} and \code{S1} are mean and covariance matrix parameters of the normal hyperprior on
#' \code{m0} (default are the sample mean and the sample covariance of the data);
#' \code{tau1} and \code{zeta1} are shape and rate parameters of the gamma hyperprior on
#' \code{k0} (default is 1 for both);
#' \code{n1} and \code{Sigma1} are the parameters (degrees of freedom and scale) of the Wishart prior for \code{Sigma0}
#' (default are \code{p}+2 and the sample covariance of the data divided \code{p}+2). See details.
#'
#'
#' \item If \code{model = 'DLS'} (diagonal location-scale mixture):
#'
#' \code{m0} and \code{k0} are the mean vector parameter and the vector of scale factors defining the normal base measure
#' on the location parameter (default are the sample mean and a vector of ones),
#' and \code{a0} and \code{b0} are vectors of
#' shape and scale parameters defining the base measure on the scale parameters (default are a vector of twos and the diagonal
#' of the sample covariance of the data).
#' If \code{hyper = TRUE}, optional hyperpriors on the base measure's parameters are added:
#' specifically, \code{m1} and \code{s21} are vectors of mean and variance parameters for the normal hyperpriors on the components of
#' \code{m0} (default are the sample mean and the diagonal of the sample covariance of the data);
#' \code{tau1} and \code{zeta1} are vectors of shape and rate parameters of the gamma hyperpriors on the components of
#' \code{k0} (default is a vector of ones for both);
#' \code{a1} and \code{b1} are vectors of shape and rate parameters of the gamma hyperpriors on the components of
#' \code{b0} (default is the diagonal of the sample covariance of the data and a vector of ones). See details.
#' }
#'
#'@details
#' This generic function fits a Pitman-Yor process mixture model for density estimation and clustering. The general model is
#' \deqn{\tilde f(y) = \int K(y; \theta) \tilde p (d \theta),} where \eqn{K(y; \theta)} is a kernel density with parameter
#' \eqn{\theta\in\Theta}. Univariate and multivariate Gaussian kernels are implemented with different specifications for the parametric space
#' \eqn{\Theta}, as described below.
#' The mixing measure \eqn{\tilde p} has a Pitman-Yor process prior with strength parameter \eqn{\vartheta},
#' discount parameter \eqn{\alpha}, and base measure \eqn{P_0} admitting the specifications presented below. For posterior sampling,
#' three MCMC approaches are implemented. See details below.
#'
#' \strong{Univariate data}
#'
#' For univariate \eqn{y} the function implements both a location and location-scale mixture model. The former assumes
#' \deqn{\tilde f(y) = \int \phi(y; \mu, \sigma^2) \tilde p (d \mu) \pi(\sigma^2),} where
#' \eqn{\phi(y; \mu, \sigma^2)} is a univariate Gaussian kernel function with mean \eqn{\mu} and variance \eqn{\sigma^2},
#' and \eqn{\pi(\sigma^2)} is an inverse gamma prior. The base measure is specified as
#' \deqn{P_0(d \mu) = N(d \mu; m_0, \sigma^2_0),}{%
#' P_0(d \mu) = N(d \mu; m0, s20),}
#' and \eqn{\sigma^2 \sim IGa(a_0, b_0)}{\sigma^2 ~ IGa(a0, b0)}.
#' Optional hyperpriors for the base measure's parameters are
#' \deqn{(m_0,\sigma^2_0) \sim N(m_1, \sigma^2_0 / k_1) \times IGa(a_1, b_1).}{%
#' (m0,s20) ~ N(m1, s20 / k_1) IGa(a1, b1).}
#'
#' The location-scale mixture model, instead, assumes
#' \deqn{\tilde f(y) = \int \phi(y; \mu, \sigma^2) \tilde p (d \mu, d \sigma^2)} with normal-inverse gamma base measure
#' \deqn{P_0 (d \mu, d \sigma^2) = N(d \mu; m_0, \sigma^2 / k_0) \times IGa(d \sigma^2; a_0, b_0).}{%
#' P_0 (d \mu, d \sigma^2) = N(d \mu; m0, \sigma^2 / k0) IGa(d \sigma^2; a0, b0)} and (optional) hyperpriors
#' \deqn{m_0 \sim N(m_1, \sigma_1^2 ),\quad k_0 \sim Ga(\tau_1, \zeta_1),\quad b_0 \sim Ga(a_1, b_1).}{%
#' m0 ~ N(m1, \sigma12 ), k0 ~ Ga(\tau1, \zeta2), b0 ~ Ga(a1, b1).}
#'
#'
#' \strong{Multivariate data}
#'
#' For multivariate \eqn{y} (\eqn{p}-variate) the function implements a location mixture model (with full covariance matrix) and two
#' different location-scale mixture models, with either full or diagonal covariance matrix. The location mixture model assumes
#' \deqn{\tilde f(y) = \int \phi_p(y; \mu, \Sigma) \tilde p (d \mu) \pi(\Sigma)} where
#' \eqn{\phi_p(y; \mu, \Sigma)} is a \eqn{p}-dimensional Gaussian kernel function with mean vector \eqn{\mu} and covariance matrix
#' \eqn{\Sigma}. The prior on \eqn{\Sigma} is inverse Whishart with parameters \eqn{\Sigma_0} and \eqn{\nu_0}, while the
#' base measure is
#' \deqn{P_0(d \mu) = N(d \mu; m_0, S_0),}{P_0 (d \mu) = N(d \mu; m0, S0),}
#' with optional hyperpriors
#' \deqn{m_0 \sim N(m_1, S_0 / k_1),\quad S_0 \sim IW(\lambda_1, \Lambda_1).}{%
#' m0 ~ N(m1, S0 / k1), S0 ~ IW(\lambda1, \Lambda1).}
#'
#' The location-scale mixture model assumes
#'
#' \deqn{\tilde f(x) = \int \phi_p(y; \mu, \Sigma) \tilde p (d \mu, d \Sigma).} Two possible structures for \eqn{\Sigma}
#' are implemented, namely full and diagonal covariance. For the full covariance mixture model, the base measure is
#' the normal-inverse Wishart
#' \deqn{P_0 (d \mu, d \Sigma) = N(d \mu; m_0, \Sigma / k_0) \times IW(d \Sigma; \nu_0, \Sigma_0),}{%
#' P_0 (d \mu, d \Sigma) = N(d \mu; m0, \Sigma / k0) IW(d \Sigma; \nu, \Sigma0),}
#' with optional hyperpriors
#' \deqn{m_0 \sim N(m_1, S_1),\quad k_0 \sim Ga(\tau_1, \zeta_1),\quad b_0 \sim W(\nu_1, \Sigma_1).}{%
#' m_0 ~ N(m1, S12), k0 ~ Ga(\tau1, \zeta1), b_0 ~ W(\nu1, \Sigma1).}
#'The second location-scale mixture model assumes a diagonal covariance structure. This is equivalent to write the
#'mixture model as a mixture of products of univariate normal kernels, i.e.
#' \deqn{\tilde f(y) = \int \prod_{r=1}^p \phi(y_r; \mu_r, \sigma^2_r) \tilde p (d \mu_1,\ldots,d \mu_p, d \sigma_1^2,\ldots,d \sigma_p^2).}
#' For this specification, the base measure is assumed defined as the product of \eqn{p} independent normal-inverse gamma distributions, that is
#' \deqn{P_0 = \prod_{r=1}^p P_{0r}} where
#' \deqn{P_{0r}(d \mu_r,d \sigma_r^2) = N(d \mu_r; m_{0r}, \sigma^2_r / k_{0r}) \times Ga(d \sigma^2_r; a_{0r}, b_{0r}).}{%
#' P_{0r}(d \mu_r, d \sigma_r^2) = N(d \mu_r; m_{0j}, \sigma^2_r / k_{0r}) Ga(d \sigma^2_r; a_{0r}, b_{0r}). }
#' Optional hyperpriors can be added, and, for each component, correspond to the set of hyperpriors considered
#' for the univariate location-scale mixture model.
#'
#' \strong{Posterior simulation methods}
#'
#' This generic function implements three types of MCMC algorithms for posterior simulation.
#' The default method is the importance conditional sampler \code{'ICS'} (Canale et al. 2019). Other options are
#' the marginal sampler \code{'MAR'} (Neal, 2000) and the slice sampler \code{'SLI'} (Kalli et al. 2011).
#' The importance conditional sampler performs an importance sampling step when updating the values of
#' individual parameters \eqn{\theta}, which requires to sample \code{m_imp} values from a suitable
#' proposal. Large values of \code{m_imp} are known to improve the mixing of the chain
#' at the cost of increased running time (Canale et al. 2019). Two options are available for the slice sampler,
#' namely the dependent slice-efficient sampler (\code{slice_type = 'DEP'}), which is set as default, and the
#' independent slice-efficient sampler (\code{slice_type = 'INDEP'}) (Kalli et al. 2011). See Corradin et al. (to appear)
#' for more details.
#'
#'
#' @return A \code{BNPdens} class object containing the estimated density and
#' the cluster allocations for each iterations. If \code{out_param = TRUE} the output
#' contains also the kernel specific parameters for each iteration. If \code{mcmc_dens = TRUE} the output
#' contains also a realization from the posterior density for each iteration. IF \code{mean_dens = TRUE}
#' the output contains just the mean of the realizations from the posterior density. The output contains
#' also informations as the number of iterations, the number of burn-in iterations, the used
#' computational time and the type of estimated model (\code{univariate = TRUE} or \code{FALSE}).
#'
#'
#' @examples
#' data_toy <- cbind(c(rnorm(100, -3, 1), rnorm(100, 3, 1)),
#' c(rnorm(100, -3, 1), rnorm(100, 3, 1)))
#' grid <- expand.grid(seq(-7, 7, length.out = 50),
#' seq(-7, 7, length.out = 50))
#' est_model <- PYdensity(y = data_toy, mcmc = list(niter = 200, nburn = 100),
#' output = list(grid = grid))
#' summary(est_model)
#' plot(est_model)
#'
#' @references
#'
#' Canale, A., Corradin, R., Nipoti, B. (2019), Importance conditional sampling for Bayesian nonparametric mixtures,
#' arXiv preprint, arXiv:1906.08147
#'
#' Corradin, R., Canale, A., Nipoti, B. (2021), BNPmix: An R Package for Bayesian Nonparametric Modeling via Pitman-Yor Mixtures,
#' Journal of Statistical Software, 100, doi:10.18637/jss.v100.i15
#'
#' Kalli, M., Griffin, J. E., and Walker, S. G. (2011), Slice sampling mixture models.
#' Statistics and Computing 21, 93-105, doi:10.1007/s11222-009-9150-y
#'
#' Neal, R. M. (2000), Markov Chain Sampling Methods for Dirichlet Process Mixture Models,
#' Journal of Computational and Graphical Statistics 9, 249-265, doi:10.2307/1390653
#'
PYdensity <- function(y,
mcmc = list(),
prior = list(),
output = list()){
if(!is.list(mcmc)) stop("mcmc must be a list")
if(!is.list(prior)) stop("prior must be a list")
if(!is.list(output)) stop("output must be a list")
if(!is.null(mcmc$niter) && (!is.numeric(mcmc$niter) | (mcmc$niter<1))) stop("mcmc$niter must be a positive integer")
if(!is.null(mcmc$nburn) && (!is.numeric(mcmc$nburn) | (mcmc$nburn<1)) & (mcmc$nburn>mcmc$niter)) stop("mcmc$nburn must be a positive integer less than niter")
if(!is.null(mcmc$nupd) && (!is.numeric(mcmc$nupd) | (mcmc$nupd<1))) stop("mcmc$nupd must be a positive integer")
if(!is.null(mcmc$m_imp) && (!is.numeric(mcmc$m_imp) | (mcmc$m_imp<1))) stop("mcmc$m_imp must be a positive integer")
if(!is.null(mcmc$print_message) & (!is.logical(mcmc$print_message))) stop("mcmc$print_message must be a logical value")
if(!is.null(mcmc$hyper) & !is.logical(mcmc$hyper)) stop("mcmc$hyper must be a logical value")
if(!is.null(output$grid) & !is.vector(output$grid) & !is.matrix(output$grid) & !is.data.frame(output$grid)) stop("wrong grid specification")
if(is.vector(y)){
p = 1
if(!is.null(prior$m0) & !is.numeric(prior$m0)) stop("prior$m0 must be a numerical value")
if(!is.null(prior$k0) && (!is.numeric(prior$k0) | (prior$k0<=0))) stop("prior$k0 must be a numerical positive value")
if(!is.null(prior$a0) && (!is.numeric(prior$a0) | (prior$a0<=0))) stop("prior$a0 must be a numerical positive value")
if(!is.null(prior$b0) && (!is.numeric(prior$b0) | (prior$b0<=0))) stop("prior$b0 must be a numerical positive value")
if(!is.null(prior$s20) && (!is.numeric(prior$s20) | (prior$s20<=0))) stop("prior$s20 must be a numerical positive value")
if(!is.null(prior$m1) & !is.numeric(prior$m1)) stop("prior$m1 must be a numerical value")
if(!is.null(prior$s21) && (!is.numeric(prior$s21) | (prior$s21<=0))) stop("prior$s21 must be a numerical positive value")
if(!is.null(prior$a1) && (!is.numeric(prior$a1) | (prior$a1<=0))) stop("prior$a1 must be a numerical positive value")
if(!is.null(prior$b1) && (!is.numeric(prior$b1) | (prior$b1<=0))) stop("prior$b1 must be a numerical positive value")
if(!is.null(prior$k1) && (!is.numeric(prior$k1) | (prior$k1<=0))) stop("prior$k1 must be a numerical positive value")
if(!is.null(prior$tau1) && (!is.numeric(prior$tau1) | (prior$tau1<=0))) stop("prior$tau1 must be a numerical positive value")
if(!is.null(prior$zeta1) && (!is.numeric(prior$zeta1) | (prior$zeta1<=0))) stop("prior$zeta1 must be a numerical positive value")
# mandatory parameters
if(is.null(mcmc$niter)) stop("Missing number of iterations")
if(is.null(mcmc$nburn)) mcmc$nburn = 0
# if mcmc misses some parts, add default
niter = mcmc$niter
nburn = mcmc$nburn
method = ifelse(is.null(mcmc$method), "ICS", mcmc$method)
model = ifelse(is.null(mcmc$model), "LS", mcmc$model)
nupd = ifelse(is.null(mcmc$nupd), round(niter / 10), mcmc$nupd)
m_imp = ifelse(is.null(mcmc$m_imp), 10, mcmc$m_imp)
print_message = ifelse(is.null(mcmc$print_message), TRUE, mcmc$print_message)
# output
out_param = ifelse(is.null(output$out_param), FALSE, output$out_param)
output$out_type = ifelse(is.null(output$out_type), "FULL", output$out_type)
if(output$out_type == "FULL"){
mcmc_dens = TRUE
if(is.null(output$grid)){
grid_use = seq(from = min(y) - 0.1 * diff(range(y)), to = max(y) + 0.1 * diff(range(y)), length.out = 30)
} else {
if(length(dim(output$grid)) > 1) stop("Wrong grid dimension")
grid_use <- as.vector(output$grid)
}
} else if (output$out_type == "CLUST"){
mcmc_dens = FALSE
grid_use = 0
}
if(!(model == "LS" | model == "L")) stop("Wrong model setting")
slice_type <- mcmc$slice_type
if(is.null(slice_type)){ slice_type <- "DEP"}
if(!(slice_type == "DEP" | slice_type == "INDEP")) stop("Wrong mcmc$slice_type setting")
if(!(method == "ICS" | method == "SLI" | method == "MAR")) stop("Wrong method setting")
if(is.null(mcmc$wei_slice)){
indep_sli = "DEFAULT"
} else {
indep_sli = "CUSTOM"
}
if(length(mcmc$wei_slice) > 2) stop("Wrong mcmc$wei_slice setting")
hyper = ifelse(is.null(mcmc$hyper), TRUE, mcmc$hyper)
# Check for different model parameters
# if null, initialize default parameters
if(model == "LS"){
if(hyper){
if(is.null(prior$a0)){ a0 = 2 } else { a0 = prior$a0 }
if(is.null(prior$m1)){ m1 = mean(y) } else { m1 = prior$m1 }
if(is.null(prior$s21)){ s21 = var(y) } else { s21 = prior$s21 }
if(is.null(prior$tau1)){ tau1 = 1 } else { tau1 = prior$tau1 }
if(is.null(prior$zeta1)){ zeta1 = 1 } else { zeta1 = prior$zeta1 }
if(is.null(prior$a1)){ a1 = var(y) } else { a1 = prior$a1 }
if(is.null(prior$b1)){ b1 = 1 } else { b1 = prior$b1 }
if(is.null(prior$m0)){ m0 = rnorm(1, m1, sqrt(s21)) } else { m0 = prior$m0 }
if(is.null(prior$k0)){ k0 = rgamma(1, tau1, zeta1) } else { k0 = prior$k0 }
if(is.null(prior$b0)){ b0 = rgamma(1, a1, b1) } else { b0 = prior$b0 }
} else {
if(is.null(prior$m0)){ m0 = mean(y) } else { m0 = prior$m0 }
if(is.null(prior$k0)){ k0 = 1 } else { k0 = prior$k0 }
if(is.null(prior$a0)){ a0 = 2 } else { a0 = prior$a0 }
if(is.null(prior$b0)){ b0 = var(y) } else { b0 = prior$b0 }
m1 <- s21 <- tau1 <- zeta1 <- a1 <- b1 <- 0
}
} else if(model == "L"){
if(hyper){
if(is.null(prior$a0)){ a0 = 2 } else { a0 = prior$a0 }
if(is.null(prior$b0)){ b0 = var(y) } else { b0 = prior$b0 }
if(is.null(prior$m1)){ m1 = mean(y) } else { m1 = prior$m1 }
if(is.null(prior$k1)){ k1 = 1 } else { k1 = prior$k1 }
if(is.null(prior$a1)){ a1 = 2 } else { a1 = prior$a1 }
if(is.null(prior$b1)){ b1 = var(y) } else { b1 = prior$b1 }
if(is.null(prior$m0)){ m0 = rnorm(1, m1, sqrt(b1 / ((a1 - 1) * k1))) } else { m0 = prior$m0 }
if(is.null(prior$s20)){ s20 = 1 / rgamma(1, a1, 1/b1) } else { s20 = prior$s20 }
} else {
if(is.null(prior$m0)){ m0 = mean(y) } else { m0 = prior$m0 }
if(is.null(prior$s20)){ s20 = var(y) } else { s20 = prior$s20 }
if(is.null(prior$a0)){ a0 = 2 } else { a0 = prior$a0 }
if(is.null(prior$b0)){ b0 = var(y) } else { b0 = prior$b0 }
m1 <- k1 <- a1 <- b1 <- 0
}
}
# process parameters
strength = ifelse(is.null(prior$strength), 1, prior$strength)
discount = ifelse(is.null(prior$discount), 0, prior$discount)
if(strength < - discount) stop("strength must be greater than -discount")
if(is.null(mcmc$wei_slice)){
mcmc$wei_slice <- c(strength, discount)
}
# estimate the model
if(method == "ICS"){
# call the ICS univariate function
if(model == "LS"){
est_model <- cICS(y, grid_use, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, zeta1, a1, b1, strength,
m_imp, nupd, out_param, mcmc_dens, discount, print_message, hyper)
} else if(model == "L"){
est_model <- cICS_L(y, grid_use, niter, nburn, m0, s20, a0, b0, m1, k1, a1, b1, strength,
m_imp, nupd, out_param, mcmc_dens, discount, print_message, hyper)
}
} else if(method == "SLI" & slice_type == "DEP"){
# call the SLI univariate function
if(model == "LS"){
est_model <- cSLI(y, grid_use, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, zeta1, a1, b1,
strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param, mcmc_dens, discount, print_message, hyper, FALSE)
} else if(model == "L"){
est_model <- cSLI_L(y, grid_use, niter, nburn, m0, s20, a0, b0, m1, k1, a1, b1,
strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param, mcmc_dens, discount, print_message, hyper, FALSE)
}
} else if(method == "SLI" & slice_type == "INDEP"){
if(indep_sli == "DEFAULT"){
# call the SLI univariate function
if(model == "LS"){
est_model <- cSLI(y, grid_use, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, zeta1, a1, b1,
strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param, mcmc_dens, discount, print_message, hyper, TRUE)
} else if(model == "L"){
est_model <- cSLI_L(y, grid_use, niter, nburn, m0, s20, a0, b0, m1, k1, a1, b1,
strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param, mcmc_dens, discount, print_message, hyper, TRUE)
}
}
if(indep_sli == "CUSTOM"){
if(model == "LS"){
est_model <- cSLI(y, grid_use, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, zeta1, a1, b1,
strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param, mcmc_dens, discount, print_message, hyper, TRUE)
} else if(model == "L"){
est_model <- cSLI_L(y, grid_use, niter, nburn, m0, s20, a0, b0, m1, k1, a1, b1,
strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param, mcmc_dens, discount, print_message, hyper, TRUE)
}
}
} else if(method == "MAR"){
# call the MAR univariate function
if(model == "LS"){
est_model <- MAR(y, grid_use, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, zeta1, a1, b1,
strength, nupd, out_param, mcmc_dens, discount, print_message, hyper)
} else if(model == "L"){
est_model <- MAR_L(y, grid_use, niter, nburn, m0, s20, a0, b0, m1, k1, a1, b1,
strength, nupd, out_param, mcmc_dens, discount, print_message, hyper)
}
}
# return the results
if(!isTRUE(out_param)){
if(isTRUE(mcmc_dens)){
result <- BNPdens(density = est_model$dens,
data = y,
grideval = grid_use,
clust = est_model$clust,
niter = niter,
nburn = nburn,
tot_time = est_model$time,
univariate = TRUE,
wvals = est_model$bound,
group_log = est_model$tdns)
}else{
result <- BNPdens(data = y,
clust = est_model$clust,
niter = niter,
nburn = nburn,
tot_time = est_model$time,
univariate = TRUE,
wvals = est_model$bound,
group_log = est_model$tdns)
}
} else {
if(isTRUE(mcmc_dens)){
result <- BNPdens(density = est_model$dens,
data = y,
grideval = grid_use,
clust = est_model$clust,
mean = est_model$mu,
sigma2 = est_model$s2,
probs = est_model$probs,
niter = niter,
nburn = nburn,
tot_time = est_model$time,
univariate = TRUE,
wvals = est_model$bound,
group_log = est_model$tdns)
}else{
result <- BNPdens(data = y,
clust = est_model$clust,
mean = est_model$mu,
sigma2 = est_model$s2,
probs = est_model$probs,
niter = niter,
nburn = nburn,
tot_time = est_model$time,
univariate = TRUE,
wvals = est_model$bound,
group_log = est_model$tdns)
}
}
} else if(!is.vector(y)){
p = ncol(y)
if(!is.null(prior$m0) && (!is.numeric(prior$m0) | length(prior$m0)!=p)) stop("prior$m0 must be a numerical vector of proper dimension")
if(!is.null(prior$k0) && (!is.numeric(prior$k0))) stop("prior$k0 must be a numerical vector of proper dimension")
if(!is.null(prior$a0) && (!is.numeric(prior$a0) | length(prior$a0)!=p)) stop("prior$a0 must be a numerical vector of proper dimension")
if(!is.null(prior$b0) && (!is.numeric(prior$b0) | length(prior$b0)!=p)) stop("prior$b0 must be a numerical vector of proper dimension")
if(!is.null(prior$a1) && (!is.numeric(prior$a1) | length(prior$a1)!=p)) stop("prior$a1 must be a numerical vector of proper dimension")
if(!is.null(prior$b1) && (!is.numeric(prior$b1) | length(prior$b1)!=p)) stop("prior$b1 must be a numerical vector of proper dimension")
if(!is.null(prior$k1) && (!is.numeric(prior$k1) | length(prior$k1)!=p)) stop("prior$k1 must be a numerical vector of proper dimension")
if(!is.null(prior$Sigma0) && (!is.matrix(prior$Sigma0) | ncol(prior$Sigma0) != nrow(prior$Sigma0) | ncol(prior$Sigma0) !=p)) stop("prior$Sigma0 must be a square matrix of proper dimension")
if(!is.null(prior$S20) && (!is.matrix(prior$S20) | ncol(prior$S0) != nrow(prior$S0) | ncol(prior$S0) !=p)) stop("prior$S20 must be a square matrix of proper dimension")
if(!is.null(prior$n0) && (!is.numeric(prior$n0) | prior$n0<=(p+1) )) stop("prior$n0 must be a positive value grater than ncol(y) + 1")
if(!is.null(prior$m1) && ( !is.vector(prior$m1) | length(prior$m1)!=p)) stop("prior$m1 must be a numerical vector of proper dimension")
if(!is.null(prior$s21) && ( !is.vector(prior$s21) | length(prior$s21)!=p)) stop("prior$s21 must be a numerical vector of proper dimension")
if(!is.null(prior$S1) && ( !is.matrix(prior$S1) )) stop("prior$S1 must be a square matrix of proper dimension")
if(!is.null(prior$Sigma1) && ( !is.matrix(prior$Sigma1) | ncol(prior$Sigma1) != nrow(prior$Sigma1) | ncol(prior$Sigma1) !=p)) stop("prior$Sigma1 must be a square matrix of proper dimension")
if(!is.null(prior$Lambda1) && ( !is.matrix(prior$Lambda1) | ncol(prior$Lambda1) != nrow(prior$Lambda1) | ncol(prior$Lambda1) !=p)) stop("prior$Lambda1 must be a square matrix of proper dimension")
if(!is.null(prior$tau1) & !is.numeric(prior$tau1) & !is.vector(prior$tau1)) stop("prior$tau1 must be a numerical value or a vector")
if(!is.null(prior$zeta1) & !is.numeric(prior$zeta1) & !is.vector(prior$zeta1)) stop("prior$zeta1 must be a numerical value or a vector")
if(!is.null(prior$n1) && ( !is.numeric(prior$n1) | prior$n1<=(p-1) )) stop("prior$n1 must be a numerical value grater than ncol(y) - 1")
if(!is.null(prior$lambda1) && ( !is.numeric(prior$lambda1))) stop("prior$lambda1 must be a numerical value")
if(!is.null(prior$a1) && ( !is.vector(prior$a1) | length(prior$a1)!=p)) stop("prior$a1 must be a vector of proper dimension")
if(!is.null(prior$b1) && ( !is.vector(prior$b1) | length(prior$b1)!=p)) stop("prior$b1 must be a vector of proper dimension")
# Check the mandatory parameters
if(is.null(mcmc$niter)) stop("Missing number of iterations")
if(is.null(mcmc$nburn)) mcmc$nburn = 0
# if mcmc misses some parts, add default
niter = mcmc$niter
nburn = mcmc$nburn
method = ifelse(is.null(mcmc$method), "ICS", mcmc$method)
model = ifelse(is.null(mcmc$model), "LS", mcmc$model)
nupd = ifelse(is.null(mcmc$nupd), round(niter / 10), mcmc$nupd)
print_message = ifelse(is.null(mcmc$print_message), TRUE, mcmc$print_message)
m_imp = ifelse(is.null(mcmc$m_imp), 10, mcmc$m_imp)
# output
out_param = ifelse(is.null(output$out_param), FALSE, output$out_param)
output$out_type = ifelse(is.null(output$out_type), "FULL", output$out_type)
if(output$out_type == "FULL"){
mean_dens = FALSE
mcmc_dens = TRUE
if(is.null(output$grid)){
if(p == 2){
grid_use <- as.matrix(expand.grid(
seq(from = min(y[,1]) - 0.1 * diff(range(y[,1])), to = max(y[,1]) + 0.1 * diff(range(y[,1])), length.out = 30),
seq(from = min(y[,2]) - 0.1 * diff(range(y[,2])), to = max(y[,2]) + 0.1 * diff(range(y[,2])), length.out = 30)))
} else if(p == 3){
grid_use <- as.matrix(expand.grid(
seq(from = min(y[,1]) - 0.1 * diff(range(y[,1])), to = max(y[,1]) + 0.1 * diff(range(y[,1])), length.out = 20),
seq(from = min(y[,2]) - 0.1 * diff(range(y[,2])), to = max(y[,2]) + 0.1 * diff(range(y[,2])), length.out = 20),
seq(from = min(y[,3]) - 0.1 * diff(range(y[,3])), to = max(y[,3]) + 0.1 * diff(range(y[,3])), length.out = 20)))
} else {
grid_use = matrix(0, nrow = 1, ncol = p)
}
} else {
# check the grid
if(ncol(output$grid) != ncol(y)) stop("The dimensions of grid and data not match")
grid_use <- as.matrix(output$grid)
}
} else if (output$out_type == "MEAN"){
mean_dens = TRUE
mcmc_dens = TRUE
if(is.null(output$grid)){
if(p == 2){
grid_use <- as.matrix(expand.grid(
seq(from = min(y[,1]) - 0.1 * diff(range(y[,1])), to = max(y[,1]) + 0.1 * diff(range(y[,1])), length.out = 30),
seq(from = min(y[,2]) - 0.1 * diff(range(y[,2])), to = max(y[,2]) + 0.1 * diff(range(y[,2])), length.out = 30)))
} else if(p == 3){
grid_use <- as.matrix(expand.grid(
seq(from = min(y[,1]) - 0.1 * diff(range(y[,1])), to = max(y[,1]) + 0.1 * diff(range(y[,1])), length.out = 20),
seq(from = min(y[,2]) - 0.1 * diff(range(y[,2])), to = max(y[,2]) + 0.1 * diff(range(y[,2])), length.out = 20),
seq(from = min(y[,3]) - 0.1 * diff(range(y[,3])), to = max(y[,3]) + 0.1 * diff(range(y[,3])), length.out = 20)))
} else {
grid_use = matrix(0, nrow = 1, ncol = p)
}
} else {
# check the grid
if(ncol(output$grid) != ncol(y)) stop("The dimensions of grid and data not match")
grid_use <- as.matrix(output$grid)
}
} else if (output$out_type == "CLUST"){
mean_dens = FALSE
mcmc_dens = FALSE
grid_use = matrix(0, nrow = 1, ncol = p)
}
if(!(model == "LS" | model == "L" | model == "DLS")) stop("Wrong model setting")
slice_type <- mcmc$slice_type
if(is.null(slice_type)){ slice_type <- "DEP"}
if(!(slice_type == "DEP" | slice_type == "INDEP")) stop("Wrong mcmc$slice_type setting")
if(!(method == "ICS" | method == "SLI" | method == "MAR")) stop("Wrong method setting")
if(is.null(mcmc$wei_slice)){
indep_sli = "DEFAULT"
} else {
indep_sli = "CUSTOM"
}
if(length(mcmc$wei_slice) > 2) stop("Wrong mcmc$wei_slice setting")
hyper = ifelse(is.null(mcmc$hyper), TRUE, mcmc$hyper)
# Check for different model parameters
if(model == "LS"){
if(hyper){
if(is.null(prior$m1)){ m1 = colMeans(y) } else { m1 = prior$m1 }
if(is.null(prior$S1)){ S1 = var(y) } else { S1 = prior$S1 }
if(is.null(prior$tau1)){ tau1 = 1 } else { tau1 = prior$tau1 }
if(is.null(prior$zeta1)){ zeta1 = 1 } else { zeta1 = prior$zeta1 }
if(is.null(prior$n1)){ n1 = ncol(y) + 2 } else { n1 = prior$n1 }
if(is.null(prior$Sigma1)){ Sigma1 = var(y) } else { Sigma1 = prior$Sigma1 }
if(is.null(prior$n0)){ n0 = ncol(y) + 2 } else { n0 = prior$n0 }
if(is.null(prior$Sigma0)){ Sigma0 = rWishart(n = 1, Sigma = Sigma1, df = n1)[,,1] } else {Sigma0 = prior$Sigma0}
if(is.null(prior$m0)){ m0 = as.vector(rnorm(ncol(y)) %*% t(chol(S1)) + m1)} else { m0 = prior$m0 }
if(is.null(prior$k0)){ k0 = rgamma(1, tau1, 1 / zeta1)} else { k0 = prior$k0 }
} else {
if(is.null(prior$m0)){ m0 = colMeans(y) } else { m0 = prior$m0 }
if(is.null(prior$k0)){ k0 = 1 } else { k0 = prior$k0 }
if(is.null(prior$n0)){ n0 = ncol(y) + 2 } else { n0 = prior$n0 }
if(is.null(prior$Sigma0)){ Sigma0 = var(y) / n0 } else { Sigma0 = prior$Sigma0 }
m1 <- rep(0, ncol(y))
n1 <- tau1 <- zeta1 <- 0
Sigma1 <- S1 <- diag(0, ncol(y))
}
} else if(model == "L"){
if(hyper){
if(is.null(prior$m1)){ m1 = colMeans(y) } else { m1 = prior$m1 }
if(is.null(prior$k1)){ k1 = 1 } else { k1 = prior$k1 }
if(is.null(prior$lambda1)){ lambda1 = ncol(y) + 2 } else { lambda1 = prior$lambda1 }
if(is.null(prior$Lambda1)){ Lambda1 = var(y) } else { Lambda1 = prior$Lambda1 }
if(is.null(prior$n0)){ n0 = ncol(y) + 2 } else { n0 = prior$n0 }
if(is.null(prior$Sigma0)){ Sigma0 = var(y) } else { Sigma0 = prior$Sigma0 }
if(is.null(prior$S20)){ S20 = solve(rWishart(n = 1, Sigma = solve(Lambda1), df = lambda1)[,,1]) } else {S20 = prior$S20}
if(is.null(prior$m0)){ m0 = as.vector(rnorm(ncol(y)) %*% t(chol(S20)) + m1)} else { m0 = prior$m0 }
} else {
if(is.null(prior$m0)){ m0 = colMeans(y) } else { m0 = prior$m0 }
if(is.null(prior$S20)){ S20 = diag(1, ncol(y)) } else { S20 = prior$S20 }
if(is.null(prior$Sigma0)){ Sigma0 = var(y) } else { Sigma0 = prior$Sigma0 }
if(is.null(prior$n0)){ n0 = ncol(y) + 2 } else { n0 = prior$n0 }
m1 <- rep(0, ncol(y))
k1 <- lambda1 <- 0
Lambda1 <- diag(0, ncol(y))
}
} else if(model == "DLS"){
if(hyper){
if(is.null(prior$a0)){ a0 = rep(2, ncol(y)) } else { a0 = prior$a0 }
if(is.null(prior$m1)){ m1 = colMeans(y) } else { m1 = prior$m1 }
if(is.null(prior$s21)){ s21 = diag(var(y)) } else { s21 = prior$s21 }
if(is.null(prior$tau1)){ tau1 = rep(1, ncol(y)) } else { tau1 = prior$tau1 }
if(is.null(prior$zeta1)){ zeta1 = rep(1, ncol(y)) } else { zeta1 = prior$zeta1 }
if(is.null(prior$a1)){ a1 = diag(var(y)) } else { a1 = prior$a1 }
if(is.null(prior$b1)){ b1 = rep(1, ncol(y)) } else { b1 = prior$b1 }
if(is.null(prior$m0)){ m0 = apply(cbind(m1, s21), 1,
function(x) rnorm(1, x[1], sqrt(x[2]))) } else { m0 = prior$m0 }
if(is.null(prior$k0)){ k0 = apply(cbind(tau1,zeta1), 1, function(x) rgamma(1, x[1], x[2])) } else { k0 = prior$k0 }
if(is.null(prior$b0)){ b0 = apply(cbind(a1,b1), 1, function(x) rgamma(1, x[1], x[2])) } else { b0 = prior$b0 }
} else {
if(is.null(prior$m0)){ m0 = colMeans(y) } else { m0 = prior$m0 }
if(is.null(prior$k0)){ k0 = rep(1, ncol(y)) } else { k0 = prior$k0 }
if(is.null(prior$a0)){ a0 = rep(2, ncol(y)) } else { a0 = prior$a0 }
if(is.null(prior$b0)){ b0 = diag(var(y)) } else { b0 = prior$b0 }
m1 <- s21 <- tau1 <- zeta1 <- a1 <- b1 <- rep(0, ncol(y))
}
}
# process parameters
strength = ifelse(is.null(prior$strength), 1, prior$strength)
discount = ifelse(is.null(prior$discount), 0, prior$discount)
if(strength < - discount) stop("strength must be greater than -discount")
if(is.null(mcmc$wei_slice)){
mcmc$wei_slice <- c(strength, discount)
}
# convert data to matrix
y <- as.matrix(y)
# estimate the model
if(method == "ICS"){
if(model == "LS"){
est_model <- cICS_mv(y, grid_use, niter, nburn, m0, k0, Sigma0, n0, m1, S1, tau1, zeta1, n1, Sigma1,
strength, m_imp, nupd, out_param, mcmc_dens, discount, print_message, mean_dens, hyper)
} else if(model == "L"){
est_model <- cICS_mv_L(y, grid_use, niter, nburn, m0, S20, Sigma0, n0, m1, k1, lambda1, Lambda1,
strength, m_imp, nupd, out_param, mcmc_dens, discount, print_message, mean_dens, hyper)
} else if(model == "DLS"){
est_model <- cICS_mv_P(y, grid_use, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, zeta1, a1, b1,
strength, m_imp, nupd, out_param, mcmc_dens, discount, print_message, mean_dens, hyper)
}
} else if(method == "SLI" & slice_type == "DEP"){
if(model == "LS"){
est_model <- cSLI_mv(y, grid_use, niter, nburn, m0, k0, Sigma0, n0, m1, S1, tau1, zeta1, n1, Sigma1,
strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param, mcmc_dens, discount, print_message, mean_dens, hyper, FALSE)
} else if(model == "L"){
est_model <- cSLI_mv_L(y, grid_use, niter, nburn, m0, S20, Sigma0, n0, m1, k1, lambda1, Lambda1,
strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param, mcmc_dens, discount, print_message, mean_dens, hyper, FALSE)
} else if(model == "DLS"){
est_model <- cSLI_mv_P(y, grid_use, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, zeta1, a1, b1,
strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param, mcmc_dens, discount, print_message, mean_dens, hyper, FALSE)
}
} else if(method == "SLI" & slice_type == "INDEP"){
if(indep_sli == "DEFAULT"){
if(model == "LS"){
est_model <- cSLI_mv(y, grid_use, niter, nburn, m0, k0, Sigma0, n0, m1, S1, tau1, zeta1, n1, Sigma1,
strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param, mcmc_dens, discount, print_message, mean_dens, hyper, TRUE)
} else if(model == "L"){
est_model <- cSLI_mv_L(y, grid_use, niter, nburn, m0, S20, Sigma0, n0, m1, k1, lambda1, Lambda1,
strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param, mcmc_dens, discount, print_message, mean_dens, hyper, TRUE)
} else if(model == "DLS"){
est_model <- cSLI_mv_P(y, grid_use, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, zeta1, a1, b1,
strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param, mcmc_dens, discount, print_message, mean_dens, hyper, TRUE)
}
}
if(indep_sli == "CUSTOM"){
if(model == "LS"){
est_model <- cSLI_mv(y, grid_use, niter, nburn, m0, k0, Sigma0, n0, m1, S1, tau1, zeta1, n1, Sigma1,
strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param, mcmc_dens,
discount, print_message, mean_dens, hyper, TRUE)
} else if(model == "L"){
est_model <- cSLI_mv_L(y, grid_use, niter, nburn, m0, S20, Sigma0, n0, m1, k1, lambda1, Lambda1,
strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param, mcmc_dens,
discount, print_message, mean_dens, hyper, TRUE)
} else if(model == "DLS"){
est_model <- cSLI_mv_P(y, grid_use, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, zeta1, a1, b1,
strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param, mcmc_dens,
discount, print_message, mean_dens, hyper, TRUE)
}
}
} else if(method == "MAR"){
if(model == "LS"){
est_model <- MAR_mv(y, grid_use, niter, nburn, m0, k0, Sigma0, n0, m1, S1, tau1, zeta1, n1, Sigma1,
strength, nupd, out_param, mcmc_dens, discount, print_message, mean_dens, hyper)
} else if(model == "L"){
est_model <- MAR_mv_L(y, grid_use, niter, nburn, m0, S20, Sigma0, n0, m1, k1, lambda1, Lambda1,
strength, nupd, out_param, mcmc_dens, discount, print_message, mean_dens, hyper)
} else if(model == "DLS"){
est_model <- MAR_mv_P(y, grid_use, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, zeta1, a1, b1,
strength, nupd, out_param, mcmc_dens, discount, print_message, mean_dens, hyper)
}
}
# return the results
if(!isTRUE(out_param)){
if(isTRUE(mcmc_dens)){
result <- BNPdens(density = est_model$dens,
data = y,
grideval = grid_use,
clust = (est_model$clust + 1),
niter = niter,
nburn = nburn,
tot_time = est_model$time,
univariate = FALSE,
wvals = est_model$wvals)
}else{
result <- BNPdens(data = y,
clust = (est_model$clust + 1),
niter = niter,
nburn = nburn,
tot_time = est_model$time,
univariate = FALSE,
wvals = est_model$wvals)
}
} else {
if(isTRUE(mcmc_dens)){
result <- BNPdens(density = est_model$dens,
data = y,
grideval = grid_use,
clust = (est_model$clust + 1),
mean = est_model$mu,
sigma2 = est_model$s2,
probs = est_model$probs,
niter = niter,
nburn = nburn,
tot_time = est_model$time,
univariate = FALSE,
wvals = est_model$wvals)
}else{
result <- BNPdens(data = y,
clust = (est_model$clust + 1),
mean = est_model$mu,
sigma2 = est_model$s2,
probs = est_model$probs,
niter = niter,
nburn = nburn,
tot_time = est_model$time,
univariate = FALSE,
wvals = est_model$wvals)
}
}
}
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPmix/R/PYdensity.R
|
#' @name PYregression
#' @export PYregression
#'
#' @title MCMC for Pitman-Yor mixture of Gaussian regressions
#'
#' @description The \code{PYregression} function generates a posterior sample
#' for mixtures of linear regression models inspired by the ANOVA-DDP model
#' introduced in De Iorio et al. (2004). See details below for model specification.
#'
#'@param y a vector of observations, univariate dependent variable;
#'@param x a matrix of observations, multivariate independent variable;
#'@param output list of posterior summaries:
#'
#'\itemize{
#
#'\item \code{grid_y}, a vector of points where to evaluate the estimated posterior mean density of
#'\code{y}, conditionally on each value of \code{x} in \code{grid_x};
#'
#'\item \code{grid_x}, a matrix of points where to evaluate the realization of the posterior conditional densities of
#'\code{y} given \code{x};
#' \item \code{out_type}, if \code{out_type = "FULL"}, the function returns the estimated partitions and the realizations of the posterior density for each iteration;
#' If \code{out_type = "MEAN"}, return the estimated partitions and the mean of the densities sampled at each iteration;
#' If \code{out_type = "CLUST"}, return the estimated partitions. Default \code{out_type = "FULL"};
#'\item \code{out_param}, if equal to \code{TRUE}, the function returns the draws of the kernel's
#' parameters for each MCMC iteration, default is \code{FALSE}. See \code{value} for details.
#'
#'}
#'
#'@param mcmc a list of MCMC arguments:
#' \itemize{
#' \item \code{niter} (mandatory), number of iterations.
#'
#' \item \code{nburn} (mandatory), number of iterations to discard as burn-in.
#'
#' \item \code{method}, the MCMC sampling method to be used. Options are \code{'ICS'}, \code{'MAR'} and \code{'SLI'} (default is \code{'ICS'}). See details.
#'
#' \item \code{model} the type of model to be fitted (default is 'LS'). See details.
#'
#' \item \code{nupd}, argument controlling the number of iterations to be displayed on screen: the function reports
#' on standard output every time \code{nupd} new iterations have been carried out (default is \code{niter/10}).
#'
#' \item \code{print_message}, control option. If equal to \code{TRUE}, the status is printed
#' to standard output every \code{nupd} iterations (default is \code{TRUE}).
#'
#' \item \code{m_imp}, number of generated values for the importance sampling step of \code{method = 'ICS'} (default is 10). See details.
#'
#' \item \code{slice_type}, when \code{method = 'SLI'} it specifies the type of slice sampler. Options are \code{'DEP'} for dependent slice-efficient, and \code{'INDEP'} for independent slice-efficient (default is \code{'DEP'}). See details.
#'
#' \item \code{m_marginal}, number of generated values for the augmentation step needed, if \code{method = 'MAR'}, to implement Algorithm 8 of Neal, 2000. (Default is 100). See details.
#'
#' \item \code{hyper}, if equal to \code{TRUE}, hyperprior distributions on the base measure's
#' parameters are added, as specified in \code{prior} and explained in \code{details} (default is \code{TRUE}).
#' }
#'
#' @param prior a list giving the prior information. The list includes
#' \code{strength} and \code{discount}, the strength and discount parameters of the Pitman-Yor process
#' (default are \code{strength = 1} and \code{discount = 0}, the latter leading to the Dirichlet process).
#' The remaining parameters specify the base measure: \code{m0} and \code{S0} are
#' the mean and covariance of normal base measure on the regression coefficients (default are a vector of zeroes, except for the first element equal
#' to \code{mean(y)}, and a diagonal matrix with each element equal to 100);
#' \code{a0} and \code{b0} are the shape and scale parameters of the inverse gamma base measure on the scale component
#' (default are 2 and var(y)).
#' If \code{hyper = TRUE}, optional hyperpriors on the base measure's parameters are added:
#' specifically, \code{m1} and \code{k1} are the mean parameter and scale factor defining the
#' normal hyperprior on \code{m0} (default are a vector of zeroes, except for the first element equal
#' to the sample mean of the dependent observed variable, and 1);
#' \code{tau1} and \code{zeta1} are the shape and rate parameters of the gamma hyperprior on
#' \code{b0} (default is 1 for both);
#' \code{n1} and \code{S1} are the parameters (degrees of freedom and scale) of the Wishart prior for \code{S0}
#' (default 4 and a diagonal matrix with each element equal to 100); See details.
#'
#' @details
#' This function fits a Pitman-Yor process mixture of Gaussian linear regression models, i.e
#' \deqn{\tilde f(y) = \int \phi(y; x^T \beta, \sigma^2) \tilde p (d \beta, d \sigma^2)}{%
#' \tilde f(y) = \int \phi(y; x^T \beta, \sigma^2) \tilde p (d \beta, d \sigma^2),}
#' where \eqn{x} is a bivariate vector containing the dependent variable in \code{x} and a value of 1
#' for the intercept term.
#' The mixing measure \eqn{\tilde p} has a Pitman-Yor process prior with strength \eqn{\vartheta},
#' discount parameter \eqn{\alpha}. The location model assume a base measures \eqn{P_0} specified as
#' \deqn{P_0(d \beta) = N(d \beta; m_0, S_0) .}{%
#' P0(d \beta) = N(d \beta; m0, S0).}
#' while the location-scale model assume a base measures \eqn{P_0} specified as
#' \deqn{P_0(d \beta, d \sigma^2) = N(d \beta; m_0, S_0) \times IGa(d \sigma^2; a_0, b_0).}{%
#' P0(d \beta, d \sigma^2) = N(d \beta; m0, S0) IG(d \sigma^2; a0, b0).}
#' Optional hyperpriors complete the model specification:
#' \deqn{m_0 \sim N(m_1, S_0 / k_1 ),\quad S_0 \sim IW(\nu_1, S_1),\quad b_0 \sim G(\tau_1, \zeta_1).}{%
#' m_0 ~ N(m1, S0/k1), S0 ~ IW(\nu1, S1), b0 ~ G(\tau1, \zeta1).}
#'
#' \strong{Posterior simulation methods}
#'
#' This generic function implements three types of MCMC algorithms for posterior simulation.
#' The default method is the importance conditional sampler \code{'ICS'} (Canale et al. 2019). Other options are
#' the marginal sampler \code{'MAR'} (algorithm 8 of Neal, 2000) and the slice sampler \code{'SLI'} (Kalli et al. 2011).
#' The importance conditional sampler performs an importance sampling step when updating the values of
#' individual parameters \eqn{\theta}, which requires to sample \code{m_imp} values from a suitable
#' proposal. Large values of \code{m_imp} are known to improve the mixing of the posterior distribution
#' at the cost of increased running time (Canale et al. 2019). When updateing the individual parameter
#' \eqn{\theta}, Algorithm 8 of Neal, 2000, requires to sample \code{m_marginal} values from the base
#' measure. \code{m_marginal} can be chosen arbitrarily. Two options are available for the slice sampler,
#' namely the dependent slice-efficient sampler (\code{slice_type = 'DEP'}), which is set as default, and the
#' independent slice-efficient sampler (\code{slice_type = 'INDEP'}) (Kalli et al. 2011). See Corradin et al. (to appear)
#' for more details.
#'
#'
#' @return A \code{BNPdens} class object containing the estimated density and
#' the cluster allocations for each iterations. The output contains also the data and
#' the grids. If \code{out_param = TRUE} the output
#' contains also the kernel specific parameters for each iteration. If \code{mcmc_dens = TRUE}, the
#' function returns also a realization from the posterior density for each iteration.
#' If \code{mean_dens = TRUE}, the output contains just the mean of the densities sampled at each iteration.
#' The output retuns also the number of iterations,
#' the number of burn-in iterations, the computational time and the type of model.
#'
#'
#' @references
#'
#' Canale, A., Corradin, R., Nipoti, B. (2019), Importance conditional sampling for Bayesian nonparametric mixtures,
#' arXiv preprint, arXiv:1906.08147
#'
#' Corradin, R., Canale, A., Nipoti, B. (2021), BNPmix: An R Package for Bayesian Nonparametric Modeling via Pitman-Yor Mixtures,
#' Journal of Statistical Software, doi:10.18637/jss.v100.i15
#'
#' De Iorio, M., Mueller, P., Rosner, G.L., and MacEachern, S. (2004), An ANOVA Model for Dependent Random Measures,
#' Journal of the American Statistical Association 99, 205-215, doi:10.1198/016214504000000205
#'
#' Kalli, M., Griffin, J. E., and Walker, S. G. (2011), Slice sampling mixture models.
#' Statistics and Computing 21, 93-105, doi:10.1007/s11222-009-9150-y
#'
#' Neal, R. M. (2000), Markov Chain Sampling Methods for Dirichlet Process Mixture Models,
#' Journal of Computational and Graphical Statistics 9, 249-265, doi:10.2307/1390653
#'
#'
#' @examples
#' x_toy <- c(rnorm(100, 3, 1), rnorm(100, 3, 1))
#' y_toy <- c(x_toy[1:100] * 2 + 1, x_toy[101:200] * 6 + 1) + rnorm(200, 0, 1)
#' grid_x <- c(0, 1, 2, 3, 4, 5)
#' grid_y <- seq(0, 35, length.out = 50)
#' est_model <- PYregression(y = y_toy, x = x_toy,
#' mcmc = list(niter = 200, nburn = 100),
#' output = list(grid_x = grid_x, grid_y = grid_y))
#' summary(est_model)
#' plot(est_model)
#'
PYregression <- function(y, x,
mcmc = list(),
prior = list(),
output = list()){
if(!is.vector(y)) stop("Wrong dimensions: y need to be a vector")
if(!(is.data.frame(x) | is.matrix(x) | is.vector(x))) stop("Wrong dimensions: x need to be a vector or a matrix")
if(!is.list(mcmc)) stop("mcmc must be a list")
if(!is.list(prior)) stop("prior must be a list")
if(!is.list(output)) stop("output must be a list")
if(!is.null(mcmc$niter) && (!is.numeric(mcmc$niter) | (mcmc$niter<1))) stop("mcmc$niter must be a positive integer")
if(!is.null(mcmc$nburn) && (!is.numeric(mcmc$nburn) | (mcmc$nburn<1)) & (mcmc$nburn>mcmc$niter)) stop("mcmc$nburn must be a positive integer less than niter")
if(!is.null(mcmc$nupd) && (!is.numeric(mcmc$nupd) | (mcmc$nupd<1))) stop("mcmc$nupd must be a positive integer")
if(!is.null(mcmc$m_imp) && (!is.numeric(mcmc$m_imp) | (mcmc$m_imp<1))) stop("mcmc$m_imp must be a positive integer")
if(!is.null(mcmc$print_message) & (!is.logical(mcmc$print_message))) stop("mcmc$print_message must be a logical value")
if(!is.null(mcmc$hyper) & !is.logical(mcmc$hyper)) stop("mcmc$hyper must be a logical value")
if(!is.null(mcmc$m_marginal) & !is.numeric(mcmc$m_marginal)) stop("mcmc$m_marginal must be a numerical value")
if(!is.null(prior$m0) & !is.vector(prior$m0)) stop("prior$m0 must be a vector of size 2")
if(!is.null(prior$S0) && (!is.matrix(prior$S0) | ncol(prior$S0) != nrow(prior$S0) | ncol(prior$S0) !=2) ) stop("prior$S0 must be a square matrix of dimension 2")
if(!is.null(prior$a0) && (!is.numeric(prior$a0) | (prior$a0<1))) stop("prior$a0 must be a positive value")
if(!is.null(prior$b0) && (!is.numeric(prior$b0) | (prior$b0<1))) stop("prior$n0 must be a positive value")
if(!is.null(prior$m1) & !is.vector(prior$m1)) stop("prior$m1 must be a vector")
if(!is.null(prior$S1) & !is.matrix(prior$S1)) stop("prior$S1 must be a matrix")
if(!is.null(prior$k1) & !is.numeric(prior$k1)) stop("prior$k1 must be a numerical value")
if(!is.null(prior$tau1) && (!is.numeric(prior$tau1) | (prior$tau1<1))) stop("prior$tau1 must be a positive value")
if(!is.null(prior$zeta1) && (!is.numeric(prior$zeta1) | (prior$zeta1<1))) stop("prior$zeta1 must be a positive value")
if(!is.null(prior$n1) & !is.numeric(prior$n1)) stop("prior$n1 must be a numerical value")
if(!is.null(prior$strength) & !is.numeric(prior$strength)) stop("prior$strength must be a numerical value")
if(!is.null(prior$discount) & !is.numeric(prior$discount)) stop("prior$discount must be a numerical value")
# mandatory parameters
if(is.null(mcmc$niter)) stop("Missing number of iterations")
if(is.null(mcmc$nburn)) mcmc$nburn = 0
# add variable for the intercept
if(length(dim(x)) <= 1){
x <- as.matrix(cbind(rep(1, length(x)), x))
} else {
x <- as.matrix(cbind(rep(1, nrow(x)), x))
}
d <- ncol(x)
# if mcmc misses some parts, add default
niter = mcmc$niter
nburn = mcmc$nburn
method = ifelse(is.null(mcmc$method), "ICS", mcmc$method)
model = ifelse(is.null(mcmc$model), "LS", mcmc$model)
nupd = ifelse(is.null(mcmc$nupd), round(niter / 10), mcmc$nupd)
print_message = ifelse(is.null(mcmc$print_message), TRUE, mcmc$print_message)
m_imp = ifelse(is.null(mcmc$m_imp), 10, mcmc$m_imp)
m_marginal = ifelse(is.null(mcmc$m_marginal), 100, mcmc$m_marginal)
# output
out_param = ifelse(is.null(output$out_param), FALSE, output$out_param)
output$out = ifelse(is.null(output$out), "FULL", output$out)
if(output$out == "FULL"){
mean_dens = FALSE
mcmc_dens = TRUE
if(is.null(output$grid_y) & is.null(output$grid_x)){
grid_y = seq(from = min(y) - 0.1 * diff(range(y)), to = max(y) + 0.1 * diff(range(y)), length.out = 30)
grid_x = as.matrix(cbind(rep(1,4), apply(x, 2, function(z) seq(from = min(z) - 0.1 * diff(range(z)), to = max(z) + 0.1 * diff(range(z)), length.out = 4))[,-1]))
} else if(is.null(output$grid_y) & !is.null(output$grid_x)){
grid_y = seq(from = min(y) - 0.1 * diff(range(y)), to = max(y) + 0.1 * diff(range(y)), length.out = 30)
if(ncol(x) == 2){
grid_x = as.matrix(cbind(rep(1, length(output$grid_x)), output$grid_x))
} else{
grid_x = as.matrix(cbind(rep(1, nrow(output$grid_x)), output$grid_x))
}
} else if(!is.null(output$grid_y) & is.null(output$grid_x)){
grid_y = output$grid_y
grid_x = as.matrix(cbind(rep(1,4), apply(x, 2, function(z) seq(from = min(z) - 0.1 * diff(range(z)), to = max(z) + 0.1 * diff(range(z)), length.out = 4))[,-1]))
} else {
if(ncol(x) == 2){
grid_x = as.matrix(cbind(rep(1, length(output$grid_x)), output$grid_x))
} else{
grid_x = as.matrix(cbind(rep(1, nrow(output$grid_x)), output$grid_x))
}
grid_y = output$grid_y
}
} else if (output$out == "MEAN"){
mean_dens = TRUE
mcmc_dens = TRUE
if(is.null(output$grid_y) & is.null(output$grid_x)){
grid_y = seq(from = min(y) - 0.1 * diff(range(y)), to = max(y) + 0.1 * diff(range(y)), length.out = 30)
grid_x = as.matrix(cbind(rep(1,4), apply(x, 2, function(z) seq(from = min(z) - 0.1 * diff(range(z)), to = max(z) + 0.1 * diff(range(z)), length.out = 4))[,-1]))
} else if(is.null(output$grid_y) & !is.null(output$grid_x)){
grid_y = seq(from = min(y) - 0.1 * diff(range(y)), to = max(y) + 0.1 * diff(range(y)), length.out = 30)
if(ncol(x) == 2){
grid_x = as.matrix(cbind(rep(1, length(output$grid_x)), output$grid_x))
} else{
grid_x = as.matrix(cbind(rep(1, nrow(output$grid_x)), output$grid_x))
}
} else if(!is.null(output$grid_y) & is.null(output$grid_x)){
grid_y = output$grid_y
grid_x = as.matrix(cbind(rep(1,4), apply(x, 2, function(z) seq(from = min(z) - 0.1 * diff(range(z)), to = max(z) + 0.1 * diff(range(z)), length.out = 4))[,-1]))
} else {
if(ncol(x) == 2){
grid_x = as.matrix(cbind(rep(1, length(output$grid_x)), output$grid_x))
} else{
grid_x = as.matrix(cbind(rep(1, nrow(output$grid_x)), output$grid_x))
}
grid_y = output$grid_y
}
} else if (output$out == "CLUST"){
mean_dens = FALSE
mcmc_dens = FALSE
grid_y = seq(from = min(y) - 0.1 * diff(range(y)), to = max(y) + 0.1 * diff(range(y)), length.out = 30)
grid_x = as.matrix(cbind(rep(1,4), apply(x, 2, function(z) seq(from = min(z) - 0.1 * diff(range(z)), to = max(z) + 0.1 * diff(range(z)), length.out = 4))[,-1]))
}
# mcmc_dens = ifelse(is.null(output$mcmc_dens), TRUE, output$mcmc_dens)
# mean_dens = ifelse(is.null(output$mean_dens), FALSE, output$mean_dens)
# if(is.null(output$grid_y)){
# mcmc_dens = FALSE
# grid_y = 0
# grid_x = matrix(c(0,0), ncol = 2)
# } else {
# grid_x <- cbind(rep(1, length(output$grid_x)), output$grid_x)
# grid_y = output$grid_y
# }
slice_type <- mcmc$slice_type
if(is.null(slice_type)){ slice_type <- "DEP"}
if(!(slice_type == "DEP" | slice_type == "INDEP")) stop("Wrong mcmc$slice_type setting")
if(!(method == "ICS" | method == "SLI" | method == "MAR")) stop("Wrong method setting")
if(!(model == "LS" | model == "L")) stop("Wrong model setting")
hyper = ifelse(is.null(mcmc$hyper), TRUE, mcmc$hyper)
if(is.null(mcmc$wei_slice)){
indep_sli = "DEFAULT"
} else {
indep_sli = "CUSTOM"
}
if(length(mcmc$wei_slice) > 2) stop("Wrong mcmc$wei_slice setting")
# if null, initialize default parameters
if(hyper){
if(is.null(prior$a0)){ a0 = 2 } else { a0 = prior$a0 }
if(is.null(prior$m1)){ m1 = c(mean(y), rep(0, d - 1)) } else { m1 = prior$m1 }
if(is.null(prior$k1)){ k1 = 1 } else { k1 = prior$k1 }
if(is.null(prior$tau1)){ tau1 = var(y) } else { tau1 = prior$tau1 }
if(is.null(prior$zeta1)){ zeta1 = 1 } else { zeta1 = prior$zeta1 }
if(is.null(prior$n1)){ n1 = d + 2 } else { n1 = prior$n1 }
if(is.null(prior$S1)){ S1 = diag(100, d) } else { S1 = prior$S1 }
if(is.null(prior$S0)){ S0 = solve(rWishart(n = 1, Sigma = solve(S1), df = n1)[,,1]) } else { S0 = prior$S0 }
if(is.null(prior$m0)){ m0 = as.vector(rnorm(d) %*% (t(chol(S0)) / k1) + m1) } else { m0 = prior$m0 }
if(is.null(prior$b0)){ b0 = rgamma(1, tau1, zeta1) } else { b0 = prior$b0 }
} else {
if(is.null(prior$m0)){ m0 = c(mean(y), rep(0, d - 1)) } else { m0 = prior$m0 }
if(is.null(prior$S0)){ S0 = diag(100, d) } else { S0 = prior$S0 }
if(is.null(prior$a0)){ a0 = 2 } else { a0 = prior$a0 }
if(is.null(prior$b0)){ b0 = var(y) } else { b0 = prior$b0 }
m1 <- rep(0, d)
k1 <- n1 <- 1
tau1 <- zeta1 <- 0
S1 <- diag(1, d)
}
# process parameters
strength = ifelse(is.null(prior$strength), 1, prior$strength)
discount = ifelse(is.null(prior$discount), 0, prior$discount)
if(strength < - discount) stop("strength must be greater than -discount")
if(is.null(mcmc$wei_slice)){
mcmc$wei_slice <- c(strength, discount)
}
# estimate the model
if(method == "ICS"){
if(model == "LS"){
est_model <- cICS_mv_MKR(y, x, grid_y, grid_x, niter, nburn, m0, S0, a0, b0,
m1, k1, n1, S1, tau1, zeta1, strength, m_imp, nupd, out_param,
mcmc_dens, discount, print_message, mean_dens, hyper)
} else if(model == "L"){
est_model <- cICS_mv_MKR_L(y, x, grid_y, grid_x, niter, nburn, m0, S0, a0, b0,
m1, k1, n1, S1, strength, m_imp, nupd, out_param,
mcmc_dens, discount, print_message, mean_dens, hyper)
}
} else if(method == "SLI" & slice_type == "INDEP"){
if(model == "LS"){
est_model <- cSLI_mv_MKR(y, x, grid_y, grid_x, niter, nburn, m0, S0, a0, b0,
m1, k1, n1, S1, tau1, zeta1, strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param,
mcmc_dens, discount, print_message, mean_dens, hyper, TRUE)
} else if(model == "L"){
est_model <- cSLI_mv_MKR_L(y, x, grid_y, grid_x, niter, nburn, m0, S0, a0, b0,
m1, k1, n1, S1, strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param,
mcmc_dens, discount, print_message, mean_dens, hyper, TRUE)
}
} else if(method == "SLI" & slice_type == "DEP"){
if(model == "LS"){
est_model <- cSLI_mv_MKR(y, x, grid_y, grid_x, niter, nburn, m0, S0, a0, b0,
m1, k1, n1, S1, tau1, zeta1, strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param,
mcmc_dens, discount, print_message, mean_dens, hyper, FALSE)
} else if(model == "L"){
est_model <- cSLI_mv_MKR_L(y, x, grid_y, grid_x, niter, nburn, m0, S0, a0, b0,
m1, k1, n1, S1, strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param,
mcmc_dens, discount, print_message, mean_dens, hyper, FALSE)
}
} else if(indep_sli == "CUSTOM"){
if(model == "LS"){
est_model <- cSLI_mv_MKR(y, x, grid_y, grid_x, niter, nburn, m0, S0, a0, b0,
m1, k1, n1, S1, tau1, zeta1, strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param,
mcmc_dens, discount, print_message, mean_dens, hyper, FALSE)
} else if(model == "L"){
est_model <- cSLI_mv_MKR_L(y, x, grid_y, grid_x, niter, nburn, m0, S0, a0, b0,
m1, k1, n1, S1, strength, mcmc$wei_slice[1], mcmc$wei_slice[2], nupd, out_param,
mcmc_dens, discount, print_message, mean_dens, hyper, FALSE)
}
} else if(method == "MAR"){
if(model == "LS"){
est_model <- MAR_mv_MKR(y, x, grid_y, grid_x, niter, nburn, m0, S0, a0, b0,
m1, k1, n1, S1, tau1, zeta1, strength, m_marginal, nupd, out_param,
mcmc_dens, discount, print_message, mean_dens, hyper)
} else if(model == "L"){
est_model <- MAR_mv_MKR_L(y, x, grid_y, grid_x, niter, nburn, m0, S0, a0, b0,
m1, k1, n1, S1, strength, m_marginal, nupd, out_param,
mcmc_dens, discount, print_message, mean_dens, hyper)
}
}
# return the results
if(!isTRUE(out_param)){
if(isTRUE(mcmc_dens)){
result <- BNPdens(density = est_model$dens,
data = cbind(y, x[,-1]),
grid_y = grid_y,
grid_x = grid_x,
clust = est_model$clust,
niter = niter,
nburn = nburn,
tot_time = est_model$time,
regression = TRUE)
}else{
result <- BNPdens(clust = est_model$clust,
data = cbind(y, x[,-1]),
niter = niter,
nburn = nburn,
tot_time = est_model$time,
regression = TRUE)
}
} else {
if(isTRUE(mcmc_dens)){
result <- BNPdens(density = est_model$dens,
data = cbind(y, x[,-1]),
grid_y = grid_y,
grid_x = grid_x,
clust = est_model$clust,
beta = est_model$beta,
sigma2 = est_model$sigma2,
probs = est_model$probs,
niter = niter,
nburn = nburn,
tot_time = est_model$time,
regression = TRUE)
}else{
result <- BNPdens(clust = est_model$clust,
data = cbind(y, x[,-1]),
beta = est_model$beta,
sigma2 = est_model$sigma2,
probs = est_model$probs,
niter = niter,
nburn = nburn,
tot_time = est_model$time,
regression = TRUE)
}
}
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPmix/R/PYregression.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @export
#' @name MAR_L
#' @title C++ function to estimate Pitman-Yor univariate mixtures via marginal sampler - LOCATION
#' @keywords internal
#'
#' @param data a vector of observations
#' @param grid vector to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param s20 variance of location component
#' @param a0 parameter of scale component
#' @param b0 parameter of scale component
#' @param m1 hyperparameter, mean of distribution of m0
#' @param k1 hyperparameter, scale factor of distribution of m0
#' @param a1 hyperparameter, shape of distribution of s20
#' @param b1 hyperparameter, scale of distribution of s20
#' @param mass mass parameter
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param process if 0 DP, if 1 PY
#' @param sigma_PY discount parameter
#' @param print_message print the status
#' @param hyper, if TRUE use hyperpriors, default TRUE
#'
NULL
#' @export
#' @name MAR
#' @title C++ function to estimate Pitman-Yor univariate mixtures via marginal sampler - LOCATION SCALE
#' @keywords internal
#'
#' @param data a vector of observations
#' @param grid vector to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param k0 tuning parameter of variance of location component
#' @param a0 parameter of scale component
#' @param b0 parameter of scale component
#' @param m1 mean of hyperdistribution of m0
#' @param s21 variance of hyperdistribution of m0
#' @param tau1 shape parameter of hyperdistribution of k0
#' @param tau2 rate parameter of hyperdistribution of k0
#' @param a1 shape parameter of hyperdistribution of b0
#' @param b1 rate parameter of hyperdistribution of b0
#' @param mass parameter
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param process if 0 DP, if 1 PY
#' @param sigma_PY discount parameter
#' @param print_message print the status
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
#' @export
#' @name MAR_mv_L
#' @title C++ function to estimate Pitman-Yor multivariate mixtures via marginal sampler - LOCATION
#' @keywords internal
#'
#' @param data a matrix of observations
#' @param grid matrix of points to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param S20 variance of location component
#' @param S0 parameter of scale component
#' @param n0 parameter of scale component
#' @param m1 mean of hyperdistribtion of m0
#' @param k1 scale factor of hyperdistribtion of m0
#' @param theta1 df of hyperdistribtion of S20
#' @param Theta1 matrix of hyperdistribution of S20
#' @param mass mass parameter
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param sigma_PY second parameter of PY
#' @param print_message print the status
#' @param light_dens if TRUE return only the posterior mean of the density
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
#' @export
#' @name MAR_mv
#' @title C++ function to estimate Pitman-Yor multivariate mixtures via marginal sampler - LOCATION SCALE
#' @keywords internal
#'
#' @param data a matrix of observations
#' @param grid matrix of points to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param k0 tuning parameter of variance of location component
#' @param S0 parameter of scale component
#' @param n0 parameter of scale component
#' @param m1 mean of hyperprior distribution of m0
#' @param S1 covariance of hyperprior distribution of m0
#' @param tau1 shape parameter of hyperprior distribution of k0
#' @param tau2 rate parameter of hyperprior distribution of k0
#' @param theta1 df of hyperprior distribution of S0
#' @param Theta1 matrix of hyperprior distribution of S0
#' @param mass mass parameter
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param sigma_PY second parameter of PY
#' @param print_message print the status
#' @param light_dens if TRUE return only the posterior mean of the density
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
#' @export
#' @name MAR_mv_P
#' @title C++ function to estimate Pitman-Yor multivariate mixtures via marginal sampler - LOCATION SCALE
#' @keywords internal
#'
#' @param data a matrix of observations
#' @param grid matrix of points to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param k0 vector, scale parameters for the location component
#' @param a0 vector, parameters of scale component
#' @param b0 vector, parameters of scale component
#' @param m1 means of hyperdistribution of m0
#' @param s21 variances of hyperdistribution of m0
#' @param tau1 shape parameters of hyperdistribution of k0
#' @param tau2 rate parameters of hyperdistribution of k0
#' @param a1 shape parameters of hyperdistribution of b0
#' @param b1 rate parameters of hyperdistribution of b0
#' @param mass mass parameter
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param sigma_PY second parameter of PY
#' @param print_message print the status
#' @param light_dens if TRUE return only the posterior mean of the density
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
#' @export
#' @name MAR_mv_MKR
#' @title C++ function to estimate Pitman-Yor multivariate mixtures via marginal sampler - PRODUCT KERNEL
#' @keywords internal
#'
#' @param data a matrix of observations
#' @param grid matrix of points to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param k0 vector, scale parameters for the location component
#' @param a0 vector, parameters of scale component
#' @param b0 vector, parameters of scale component
#' @param m1 means of hyperdistribution of m0
#' @param s21 variances of hyperdistribution of m0
#' @param tau1 shape parameters of hyperdistribution of k0
#' @param tau2 rate parameters of hyperdistribution of k0
#' @param a1 shape parameters of hyperdistribution of b0
#' @param b1 rate parameters of hyperdistribution of b0
#' @param strength strength parameter
#' @param napprox number of approximating values
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param discount second parameter of PY
#' @param print_message print the status
#' @param light_dens if TRUE return only the posterior mean of the density
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
#' @export
#' @name MAR_mv_MKR_L
#' @title C++ function to estimate Pitman-Yor multivariate mixtures via marginal sampler - PRODUCT KERNEL
#' @keywords internal
#'
#' @param data a matrix of observations
#' @param grid matrix of points to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param k0 vector, scale parameters for the location component
#' @param a0 vector, parameters of scale component
#' @param b0 vector, parameters of scale component
#' @param m1 means of hyperdistribution of m0
#' @param s21 variances of hyperdistribution of m0
#' @param a1 shape parameters of hyperdistribution of b0
#' @param b1 rate parameters of hyperdistribution of b0
#' @param strength strength parameter
#' @param napprox number of approximating values
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param discount second parameter of PY
#' @param print_message print the status
#' @param light_dens if TRUE return only the posterior mean of the density
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
MAR_L <- function(data, grid, niter, nburn, m0, s20, a0, b0, m1, k1, a1, b1, mass, nupd = 0L, out_param = 0L, out_dens = 1L, sigma_PY = 0, print_message = 1L, hyper = 1L) {
.Call('_BNPmix_MAR_L', PACKAGE = 'BNPmix', data, grid, niter, nburn, m0, s20, a0, b0, m1, k1, a1, b1, mass, nupd, out_param, out_dens, sigma_PY, print_message, hyper)
}
MAR <- function(data, grid, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, tau2, a1, b1, mass, nupd = 0L, out_param = 0L, out_dens = 1L, sigma_PY = 0, print_message = 1L, hyper = 1L) {
.Call('_BNPmix_MAR', PACKAGE = 'BNPmix', data, grid, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, tau2, a1, b1, mass, nupd, out_param, out_dens, sigma_PY, print_message, hyper)
}
MAR_mv_L <- function(data, grid, niter, nburn, m0, S20, S0, n0, m1, k1, theta1, Theta1, mass, nupd = 0L, out_param = 0L, out_dens = 1L, sigma_PY = 0, print_message = 1L, light_dens = 1L, hyper = 1L) {
.Call('_BNPmix_MAR_mv_L', PACKAGE = 'BNPmix', data, grid, niter, nburn, m0, S20, S0, n0, m1, k1, theta1, Theta1, mass, nupd, out_param, out_dens, sigma_PY, print_message, light_dens, hyper)
}
MAR_mv <- function(data, grid, niter, nburn, m0, k0, S0, n0, m1, S1, tau1, tau2, theta1, Theta1, mass, nupd = 0L, out_param = 0L, out_dens = 1L, sigma_PY = 0, print_message = 1L, light_dens = 1L, hyper = 1L) {
.Call('_BNPmix_MAR_mv', PACKAGE = 'BNPmix', data, grid, niter, nburn, m0, k0, S0, n0, m1, S1, tau1, tau2, theta1, Theta1, mass, nupd, out_param, out_dens, sigma_PY, print_message, light_dens, hyper)
}
MAR_mv_P <- function(data, grid, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, tau2, a1, b1, mass, nupd = 0L, out_param = 0L, out_dens = 1L, sigma_PY = 0, print_message = 1L, light_dens = 1L, hyper = 1L) {
.Call('_BNPmix_MAR_mv_P', PACKAGE = 'BNPmix', data, grid, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, tau2, a1, b1, mass, nupd, out_param, out_dens, sigma_PY, print_message, light_dens, hyper)
}
MAR_mv_MKR <- function(y, covs, grid_response, grid_covs, niter, nburn, beta0, Sb0, a0, b0, beta1, k1, sb1, Sb1, tau1, tau2, strength, napprox, nupd = 0L, out_param = 0L, out_dens = 1L, discount = 0, print_message = 1L, light_dens = 1L, hyper = 1L) {
.Call('_BNPmix_MAR_mv_MKR', PACKAGE = 'BNPmix', y, covs, grid_response, grid_covs, niter, nburn, beta0, Sb0, a0, b0, beta1, k1, sb1, Sb1, tau1, tau2, strength, napprox, nupd, out_param, out_dens, discount, print_message, light_dens, hyper)
}
MAR_mv_MKR_L <- function(y, covs, grid_response, grid_covs, niter, nburn, beta0, Sb0, a0, b0, beta1, k1, sb1, Sb1, strength, napprox, nupd = 0L, out_param = 0L, out_dens = 1L, discount = 0, print_message = 1L, light_dens = 1L, hyper = 1L) {
.Call('_BNPmix_MAR_mv_MKR_L', PACKAGE = 'BNPmix', y, covs, grid_response, grid_covs, niter, nburn, beta0, Sb0, a0, b0, beta1, k1, sb1, Sb1, strength, napprox, nupd, out_param, out_dens, discount, print_message, light_dens, hyper)
}
#' @export
#' @name cDDP
#' @title C++ function to estimate DDP models with 1 grouping variables
#' @keywords internal
#'
#' @param data a vector of observations.
#' @param group group allocation of the data.
#' @param ngr number of groups.
#' @param grid vector to evaluate the density.
#' @param niter number of iterations.
#' @param nburn number of burn-in iterations.
#' @param m0 expectation of location component.
#' @param k0 tuning parameter of variance of location component.
#' @param a0 parameter of scale component.
#' @param b0 parameter of scale component.
#' @param mass mass of Dirichlet process.
#' @param wei prior weight of the specific processes.
#' @param b tuning parameter of weights distribution
#' @param napprox number of approximating values.
#' @param n_approx_unif number of approximating values of the importance step for the weights updating.
#' @param nupd number of iterations to show current updating.
#' @param out_dens if TRUE, return also the estimated density (default TRUE).
#' @param print_message print the status.
#' @param light_dens if TRUE return only the posterior mean of the density
#'
NULL
cDDP <- function(data, group, ngr, grid, niter, nburn, m0, k0, a0, b0, mass, wei, napprox, var_MH_step, nupd = 0L, out_dens = 1L, print_message = 1L, light_dens = 1L) {
.Call('_BNPmix_cDDP', PACKAGE = 'BNPmix', data, group, ngr, grid, niter, nburn, m0, k0, a0, b0, mass, wei, napprox, var_MH_step, nupd, out_dens, print_message, light_dens)
}
#' @export
#' @name cICS_L
#' @title C++ function to estimate Pitman-Yor univariate mixtures via importance conditional sampler - LOCATION
#' @keywords internal
#'
#' @param data a vector of observations
#' @param grid vector to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param s20 variance of location component
#' @param a0 parameter of scale component
#' @param b0 parameter of scale component
#' @param m1 hyperparameter, mean of distribution of m0
#' @param k1 hyperparameter, scale factor of distribution of m0
#' @param a1 hyperparameter, shape of distribution of s20
#' @param b1 hyperparameter, scale of distribution of s20
#' @param strength parameter
#' @param napprox number of approximating values
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param discount discount parameter
#' @param print_message print the status
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
#' @export
#' @name cICS
#' @title C++ function to estimate Pitman-Yor univariate mixtures via importance conditional sampler - LOCATION SCALE
#' @keywords internal
#'
#' @param data a vector of observations
#' @param grid vector to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param k0 tuning parameter of variance of location component
#' @param a0 parameter of scale component
#' @param b0 parameter of scale component
#' @param m1 mean of hyperdistribution of m0
#' @param s21 variance of hyperdistribution of m0
#' @param tau1 shape parameter of hyperdistribution of k0
#' @param tau2 rate parameter of hyperdistribution of k0
#' @param a1 shape parameter of hyperdistribution of b0
#' @param b1 rate parameter of hyperdistribution of b0
#' @param strength parameter
#' @param napprox number of approximating values
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param discount discount parameter
#' @param print_message print the status
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
#' @export
#' @name cICS_mv_L
#' @title C++ function to estimate Pitman-Yor multivariate mixtures via importance conditional sampler
#' @keywords internal
#'
#' @param data a matrix of observations
#' @param grid matrix of points to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param S20 variance of location component
#' @param S0 parameter of scale component
#' @param n0 parameter of scale component
#' @param m1 mean of hyperdistribtion of m0
#' @param k1 scale factor of hyperdistribtion of m0
#' @param theta1 df of hyperdistribtion of S20
#' @param Theta1 matrix of hyperdistribution of S20
#' @param strength strength parameter
#' @param napprox number of approximating values
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param discount second parameter of PY
#' @param print_message print the status
#' @param light_dens if TRUE return only the posterior mean of the density
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
#' @export
#' @name cICS_mv
#' @title C++ function to estimate Pitman-Yor multivariate mixtures via importance conditional sampler - LOCATION SCALE
#' @keywords internal
#'
#' @param data a matrix of observations
#' @param grid matrix of points to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param k0 tuning parameter of variance of location component
#' @param S0 parameter of scale component
#' @param n0 parameter of scale component
#' @param m1 mean of hyperprior distribution of m0
#' @param S1 covariance of hyperprior distribution of m0
#' @param tau1 shape parameter of hyperprior distribution of k0
#' @param tau2 rate parameter of hyperprior distribution of k0
#' @param theta1 df of hyperprior distribution of S0
#' @param Theta1 matrix of hyperprior distribution of S0
#' @param strength strength parameter
#' @param napprox number of approximating values
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param discount second parameter of PY
#' @param print_message print the status
#' @param light_dens if TRUE return only the posterior mean of the density
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
#' @export
#' @name cICS_mv_P
#' @title C++ function to estimate Pitman-Yor multivariate mixtures via importance conditional sampler - PRODUCT KERNEL
#' @keywords internal
#'
#' @param data a matrix of observations
#' @param grid matrix of points to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param k0 vector, scale parameters for the location component
#' @param a0 vector, parameters of scale component
#' @param b0 vector, parameters of scale component
#' @param m1 means of hyperdistribution of m0
#' @param s21 variances of hyperdistribution of m0
#' @param tau1 shape parameters of hyperdistribution of k0
#' @param tau2 rate parameters of hyperdistribution of k0
#' @param a1 shape parameters of hyperdistribution of b0
#' @param b1 rate parameters of hyperdistribution of b0
#' @param strength strength parameter
#' @param napprox number of approximating values
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param discount second parameter of PY
#' @param print_message print the status
#' @param light_dens if TRUE return only the posterior mean of the density
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
#' @export
#' @name cICS_mv_MKR
#' @title C++ function to estimate Pitman-Yor multivariate mixtures via importance conditional sampler - PRODUCT KERNEL
#' @keywords internal
#'
#' @param data a matrix of observations
#' @param grid matrix of points to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param k0 vector, scale parameters for the location component
#' @param a0 vector, parameters of scale component
#' @param b0 vector, parameters of scale component
#' @param m1 means of hyperdistribution of m0
#' @param s21 variances of hyperdistribution of m0
#' @param tau1 shape parameters of hyperdistribution of k0
#' @param tau2 rate parameters of hyperdistribution of k0
#' @param a1 shape parameters of hyperdistribution of b0
#' @param b1 rate parameters of hyperdistribution of b0
#' @param strength strength parameter
#' @param napprox number of approximating values
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param discount second parameter of PY
#' @param print_message print the status
#' @param light_dens if TRUE return only the posterior mean of the density
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
#' @export
#' @name cICS_mv_MKR_L
#' @title C++ function to estimate Pitman-Yor multivariate mixtures via importance conditional sampler - PRODUCT KERNEL
#' @keywords internal
#'
#' @param data a matrix of observations
#' @param grid matrix of points to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param k0 vector, scale parameters for the location component
#' @param a0 vector, parameters of scale component
#' @param b0 vector, parameters of scale component
#' @param m1 means of hyperdistribution of m0
#' @param s21 variances of hyperdistribution of m0
#' @param a1 shape parameters of hyperdistribution of b0
#' @param b1 rate parameters of hyperdistribution of b0
#' @param strength strength parameter
#' @param napprox number of approximating values
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param discount second parameter of PY
#' @param print_message print the status
#' @param light_dens if TRUE return only the posterior mean of the density
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
cICS_L <- function(data, grid, niter, nburn, m0, s20, a0, b0, m1, k1, a1, b1, strength, napprox, nupd = 0L, out_param = 0L, out_dens = 1L, discount = 0, print_message = 1L, hyper = 1L) {
.Call('_BNPmix_cICS_L', PACKAGE = 'BNPmix', data, grid, niter, nburn, m0, s20, a0, b0, m1, k1, a1, b1, strength, napprox, nupd, out_param, out_dens, discount, print_message, hyper)
}
cICS <- function(data, grid, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, tau2, a1, b1, strength, napprox, nupd = 0L, out_param = 0L, out_dens = 1L, discount = 0, print_message = 1L, hyper = 1L) {
.Call('_BNPmix_cICS', PACKAGE = 'BNPmix', data, grid, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, tau2, a1, b1, strength, napprox, nupd, out_param, out_dens, discount, print_message, hyper)
}
cICS_mv_L <- function(data, grid, niter, nburn, m0, S20, S0, n0, m1, k1, theta1, Theta1, strength, napprox, nupd = 0L, out_param = 0L, out_dens = 1L, discount = 0, print_message = 1L, light_dens = 1L, hyper = 1L) {
.Call('_BNPmix_cICS_mv_L', PACKAGE = 'BNPmix', data, grid, niter, nburn, m0, S20, S0, n0, m1, k1, theta1, Theta1, strength, napprox, nupd, out_param, out_dens, discount, print_message, light_dens, hyper)
}
cICS_mv <- function(data, grid, niter, nburn, m0, k0, S0, n0, m1, S1, tau1, tau2, theta1, Theta1, strength, napprox, nupd = 0L, out_param = 0L, out_dens = 1L, discount = 0, print_message = 1L, light_dens = 1L, hyper = 1L) {
.Call('_BNPmix_cICS_mv', PACKAGE = 'BNPmix', data, grid, niter, nburn, m0, k0, S0, n0, m1, S1, tau1, tau2, theta1, Theta1, strength, napprox, nupd, out_param, out_dens, discount, print_message, light_dens, hyper)
}
cICS_mv_P <- function(data, grid, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, tau2, a1, b1, strength, napprox, nupd = 0L, out_param = 0L, out_dens = 1L, discount = 0, print_message = 1L, light_dens = 1L, hyper = 1L) {
.Call('_BNPmix_cICS_mv_P', PACKAGE = 'BNPmix', data, grid, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, tau2, a1, b1, strength, napprox, nupd, out_param, out_dens, discount, print_message, light_dens, hyper)
}
cICS_mv_MKR <- function(y, covs, grid_response, grid_covs, niter, nburn, beta0, Sb0, a0, b0, beta1, k1, sb1, Sb1, tau1, tau2, strength, napprox, nupd = 0L, out_param = 0L, out_dens = 1L, discount = 0, print_message = 1L, light_dens = 1L, hyper = 1L) {
.Call('_BNPmix_cICS_mv_MKR', PACKAGE = 'BNPmix', y, covs, grid_response, grid_covs, niter, nburn, beta0, Sb0, a0, b0, beta1, k1, sb1, Sb1, tau1, tau2, strength, napprox, nupd, out_param, out_dens, discount, print_message, light_dens, hyper)
}
cICS_mv_MKR_L <- function(y, covs, grid_response, grid_covs, niter, nburn, beta0, Sb0, a0, b0, beta1, k1, sb1, Sb1, strength, napprox, nupd = 0L, out_param = 0L, out_dens = 1L, discount = 0, print_message = 1L, light_dens = 1L, hyper = 1L) {
.Call('_BNPmix_cICS_mv_MKR_L', PACKAGE = 'BNPmix', y, covs, grid_response, grid_covs, niter, nburn, beta0, Sb0, a0, b0, beta1, k1, sb1, Sb1, strength, napprox, nupd, out_param, out_dens, discount, print_message, light_dens, hyper)
}
#' @export
#' @name cSLI_L
#' @title C++ function to estimate Pitman-Yor univariate mixtures via slice sampler - LOCATION
#' @keywords internal
#'
#' @param data a vector of observations
#' @param grid vector to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param s20 variance of location component
#' @param a0 parameter of scale component
#' @param b0 parameter of scale component
#' @param m1 hyperparameter, mean of distribution of m0
#' @param k1 hyperparameter, scale factor of distribution of m0
#' @param a1 hyperparameter, shape of distribution of s20
#' @param b1 hyperparameter, rate of distribution of s20
#' @param mass mass parameter
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param sigma_PY second parameter of PY
#' @param print_message print the status
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
#' @export
#' @name cSLI
#' @title C++ function to estimate Pitman-Yor univariate mixtures via slice sampler - LOCATION SCALE
#' @keywords internal
#'
#' @param data a vector of observations
#' @param grid vector to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param k0 tuning parameter of variance of location component
#' @param a0 parameter of scale component
#' @param b0 parameter of scale component
#' @param m1 mean of hyperdistribution of m0
#' @param s21 variance of hyperdistribution of m0
#' @param tau1 shape parameter of hyperdistribution of k0
#' @param tau2 rate parameter of hyperdistribution of k0
#' @param a1 shape parameter of hyperdistribution of b0
#' @param b1 rate parameter of hyperdistribution of b0
#' @param mass parameter
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param sigma_PY second parameter of PY
#' @param print_message print the status
#' @param hyper, if TRUE use hyperpriors, default TRUE
#' @param indep, if TRUE use the independent slice efficient
NULL
#' @export
#' @name cSLI_mv_L
#' @title C++ function to estimate Pitman-Yor multivariate mixtures via slice sampler - LOCATION
#' @keywords internal
#'
#' @param data a matrix of observations
#' @param grid matrix of points to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param S20 variance of location component
#' @param S0 parameter of scale component
#' @param n0 parameter of scale component
#' @param m1 mean of hyperdistribtion of m0
#' @param k1 scale factor of hyperdistribtion of m0
#' @param theta1 df of hyperdistribtion of S20
#' @param Theta1 matrix of hyperdistribution of S20
#' @param mass mass parameter
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param sigma_PY second parameter of PY
#' @param print_message print the status
#' @param light_dens if TRUE return only the posterior mean of the density
#' @param hyper, if TRUE use hyperpriors, default TRUE
#' @param indep if TRUE use independent slice efficient
NULL
#' @export
#' @name cSLI_mv
#' @title C++ function to estimate Pitman-Yor multivariate mixtures via slice sampler - LOCATION SCALE
#' @keywords internal
#'
#' @param data a matrix of observations
#' @param grid matrix of points to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param k0 tuning parameter of variance of location component
#' @param S0 parameter of scale component
#' @param n0 parameter of scale component
#' @param m1 mean of hyperprior distribution of m0
#' @param S1 covariance of hyperprior distribution of m0
#' @param tau1 shape parameter of hyperprior distribution of k0
#' @param tau2 rate parameter of hyperprior distribution of k0
#' @param theta1 df of hyperprior distribution of S0
#' @param Theta1 matrix of hyperprior distribution of S0
#' @param mass mass parameter
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param sigma_PY second parameter of PY
#' @param print_message print the status
#' @param light_dens if TRUE return only the posterior mean of the density
#' @param hyper, if TRUE use hyperpriors, default TRUE
#' @param indep if TRUE use the independent slice efficient
NULL
#' @export
#' @name cSLI_mv_P
#' @title C++ function to estimate Pitman-Yor multivariate mixtures via slice sampler - LOCATION SCALE
#' @keywords internal
#'
#' @param data a matrix of observations
#' @param grid matrix of points to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param k0 tuning parameter of variance of location component
#' @param S0 parameter of scale component
#' @param n0 parameter of scale component
#' @param m1 means of hyperdistribution of m0
#' @param s21 variances of hyperdistribution of m0
#' @param tau1 shape parameters of hyperdistribution of k0
#' @param tau2 rate parameters of hyperdistribution of k0
#' @param a1 shape parameters of hyperdistribution of b0
#' @param b1 rate parameters of hyperdistribution of b0
#' @param mass mass parameter
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param sigma_PY second parameter of PY
#' @param print_message print the status
#' @param light_dens if TRUE return only the posterior mean of the density
#' @param hyper, if TRUE use hyperpriors, default TRUE
#' @param indep if TRUE use the independent slice efficient
NULL
#' @export
#' @name cSLI_mv_MKR
#' @title C++ function to estimate Pitman-Yor multivariate mixtures via slice sampler - PRODUCT KERNEL
#' @keywords internal
#'
#' @param data a matrix of observations
#' @param grid matrix of points to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param k0 vector, scale parameters for the location component
#' @param a0 vector, parameters of scale component
#' @param b0 vector, parameters of scale component
#' @param m1 means of hyperdistribution of m0
#' @param s21 variances of hyperdistribution of m0
#' @param tau1 shape parameters of hyperdistribution of k0
#' @param tau2 rate parameters of hyperdistribution of k0
#' @param a1 shape parameters of hyperdistribution of b0
#' @param b1 rate parameters of hyperdistribution of b0
#' @param strength strength parameter
#' @param napprox number of approximating values
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param discount second parameter of PY
#' @param print_message print the status
#' @param light_dens if TRUE return only the posterior mean of the density
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
#' @export
#' @name cSLI_mv_MKR_L
#' @title C++ function to estimate Pitman-Yor multivariate mixtures via slice sampler - PRODUCT KERNEL
#' @keywords internal
#'
#' @param data a matrix of observations
#' @param grid matrix of points to evaluate the density
#' @param niter number of iterations
#' @param nburn number of burn-in iterations
#' @param m0 expectation of location component
#' @param k0 vector, scale parameters for the location component
#' @param a0 vector, parameters of scale component
#' @param b0 vector, parameters of scale component
#' @param m1 means of hyperdistribution of m0
#' @param s21 variances of hyperdistribution of m0
#' @param a1 shape parameters of hyperdistribution of b0
#' @param b1 rate parameters of hyperdistribution of b0
#' @param strength strength parameter
#' @param napprox number of approximating values
#' @param nupd number of iterations to show current updating
#' @param out_param if TRUE, return also the location and scale paramteres lists
#' @param out_dens if TRUE, return also the estimated density (default TRUE)
#' @param discount second parameter of PY
#' @param print_message print the status
#' @param light_dens if TRUE return only the posterior mean of the density
#' @param hyper, if TRUE use hyperpriors, default TRUE
NULL
cSLI_L <- function(data, grid, niter, nburn, m0, s20, a0, b0, m1, k1, a1, b1, mass, param_seq_one, param_seq_two, nupd = 0L, out_param = 0L, out_dens = 1L, sigma_PY = 0, print_message = 1L, hyper = TRUE, indep = FALSE) {
.Call('_BNPmix_cSLI_L', PACKAGE = 'BNPmix', data, grid, niter, nburn, m0, s20, a0, b0, m1, k1, a1, b1, mass, param_seq_one, param_seq_two, nupd, out_param, out_dens, sigma_PY, print_message, hyper, indep)
}
cSLI <- function(data, grid, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, tau2, a1, b1, mass, param_seq_one, param_seq_two, nupd = 0L, out_param = 0L, out_dens = 1L, sigma_PY = 0, print_message = 1L, hyper = 1L, indep = TRUE) {
.Call('_BNPmix_cSLI', PACKAGE = 'BNPmix', data, grid, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, tau2, a1, b1, mass, param_seq_one, param_seq_two, nupd, out_param, out_dens, sigma_PY, print_message, hyper, indep)
}
cSLI_mv_L <- function(data, grid, niter, nburn, m0, S20, S0, n0, m1, k1, theta1, Theta1, mass, param_seq_one, param_seq_two, nupd = 0L, out_param = 0L, out_dens = 1L, sigma_PY = 0, print_message = 1L, light_dens = 1L, hyper = 1L, indep = TRUE) {
.Call('_BNPmix_cSLI_mv_L', PACKAGE = 'BNPmix', data, grid, niter, nburn, m0, S20, S0, n0, m1, k1, theta1, Theta1, mass, param_seq_one, param_seq_two, nupd, out_param, out_dens, sigma_PY, print_message, light_dens, hyper, indep)
}
cSLI_mv <- function(data, grid, niter, nburn, m0, k0, S0, n0, m1, S1, tau1, tau2, theta1, Theta1, mass, param_seq_one, param_seq_two, nupd = 0L, out_param = 0L, out_dens = 1L, sigma_PY = 0, print_message = 1L, light_dens = 1L, hyper = 1L, indep = TRUE) {
.Call('_BNPmix_cSLI_mv', PACKAGE = 'BNPmix', data, grid, niter, nburn, m0, k0, S0, n0, m1, S1, tau1, tau2, theta1, Theta1, mass, param_seq_one, param_seq_two, nupd, out_param, out_dens, sigma_PY, print_message, light_dens, hyper, indep)
}
cSLI_mv_P <- function(data, grid, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, tau2, a1, b1, mass, param_seq_one, param_seq_two, nupd = 0L, out_param = 0L, out_dens = 1L, sigma_PY = 0, print_message = 1L, light_dens = 1L, hyper = 1L, indep = TRUE) {
.Call('_BNPmix_cSLI_mv_P', PACKAGE = 'BNPmix', data, grid, niter, nburn, m0, k0, a0, b0, m1, s21, tau1, tau2, a1, b1, mass, param_seq_one, param_seq_two, nupd, out_param, out_dens, sigma_PY, print_message, light_dens, hyper, indep)
}
cSLI_mv_MKR <- function(y, covs, grid_response, grid_covs, niter, nburn, beta0, Sb0, a0, b0, beta1, k1, sb1, Sb1, tau1, tau2, strength, param_seq_one, param_seq_two, nupd = 0L, out_param = 0L, out_dens = 1L, discount = 0, print_message = 1L, light_dens = 1L, hyper = 1L, indep = TRUE) {
.Call('_BNPmix_cSLI_mv_MKR', PACKAGE = 'BNPmix', y, covs, grid_response, grid_covs, niter, nburn, beta0, Sb0, a0, b0, beta1, k1, sb1, Sb1, tau1, tau2, strength, param_seq_one, param_seq_two, nupd, out_param, out_dens, discount, print_message, light_dens, hyper, indep)
}
cSLI_mv_MKR_L <- function(y, covs, grid_response, grid_covs, niter, nburn, beta0, Sb0, a0, b0, beta1, k1, sb1, Sb1, strength, param_seq_one, param_seq_two, nupd = 0L, out_param = 0L, out_dens = 1L, discount = 0, print_message = 1L, light_dens = 1L, hyper = 1L, indep = TRUE) {
.Call('_BNPmix_cSLI_mv_MKR_L', PACKAGE = 'BNPmix', y, covs, grid_response, grid_covs, niter, nburn, beta0, Sb0, a0, b0, beta1, k1, sb1, Sb1, strength, param_seq_one, param_seq_two, nupd, out_param, out_dens, discount, print_message, light_dens, hyper, indep)
}
#' @export BNPmix_psm
#' @name BNPmix_psm
#' @title C++ function - compute the posterior similarity matrix
#' @keywords internal
#'
#' @param M a matrix (r x n), r number of replications, n number of observations
#'
#' @examples{
#' M <- matrix(c(1,1,1,2,1,1,2,2,1,1,2,1,1,2,1,1), ncol = 4)
#' BNPmix_psm(M)
#' }
#'
NULL
#' @export clean_partition
#' @name clean_partition
#' @title C++ function - clean the partition matrix
#' @keywords internal
#'
#' @param M a matrix (r x n), r number of replications, n number of observations
#'
#' @examples{
#' M <- matrix(c(1,1,1,3,1,1,4,4,1,1,3,1,1,3,1,1), ncol = 4)
#' clean_partition(M)
#' }
#'
NULL
#' @export BNPmix_VI_LB
#' @name BNPmix_VI_LB
#' @title C++ function - compute the VI lower bound
#' @keywords internal
#'
#' @param M a matrix (r x n), r number of replications, n number of observations
#' @param psm_mat a posterior similarity matrix
#'
#' @examples{
#' M <- matrix(c(1,1,1,2,1,1,2,2,1,1,2,1,1,1,1,2), ncol = 4)
#' psmM <- BNPmix_psm(M)
#' BNPmix_VI_LB(M, psmM)
#' }
#'
NULL
#' @export BNPmix_BIN
#' @name BNPmix_BIN
#' @title C++ function - compute the Binder distances
#' @keywords internal
#'
#' @param M a matrix (r x n), r number of replications, n number of observations
#' @param psm_mat a posterior similarity matrix
#'
#' @examples{
#' M <- matrix(c(1,1,1,2,1,1,2,2,1,1,2,1,1,1,1,2), ncol = 4)
#' psmM <- BNPmix_psm(M)
#' BNPmix_BIN(M, psmM)
#' }
#'
NULL
BNPmix_psm <- function(M) {
.Call('_BNPmix_BNPmix_psm', PACKAGE = 'BNPmix', M)
}
clean_partition <- function(M) {
.Call('_BNPmix_clean_partition', PACKAGE = 'BNPmix', M)
}
BNPmix_VI_LB <- function(C_mat, psm_mat) {
.Call('_BNPmix_BNPmix_VI_LB', PACKAGE = 'BNPmix', C_mat, psm_mat)
}
BNPmix_BIN <- function(C_mat, psm_mat) {
.Call('_BNPmix_BNPmix_BIN', PACKAGE = 'BNPmix', C_mat, psm_mat)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPmix/R/RcppExports.R
|
#' BNPdens class constructor
#'
#' @description A constructor for the \code{BNPdens} class. The class \code{BNPdens} is a named list containing
#' the output generated by a specified Bayesian nonparametric mixture model implemented by means of
#' a specified MCMC strategy, as in \code{PYdensity}, \code{DDPdensity}, and \code{PYregression}.
#'
#' @param density a matrix containing the values taken by the density at the grid points;
#' @param data a dataset;
#' @param grideval a set of values where to evaluate the density;
#' @param grid_x regression grid, independent variable;
#' @param grid_y regression grid, dependent variable;
#' @param clust a (\code{niter - nburn}) \eqn{\times}{x} \code{nrow(data)}-dimensional matrix containing
#' the cluster labels for each observation (cols) and MCMC iteration (rows);
#' @param mean values for the location parameters;
#' @param beta coefficients for regression model (only for \code{PYregression});
#' @param sigma2 values of the scale parameters;
#' @param probs values for the mixture weights;
#' @param niter number of MCMC iterations;
#' @param nburn number of MCMC iterations to discard as burn-in;
#' @param tot_time total execution time;
#' @param univariate logical, \code{TRUE} if the model is univariate;
#' @param regression logical, \code{TRUE} for the output of \code{PYregression};
#' @param dep logical, \code{TRUE} for the output of \code{DDPdensity};
#' @param group_log group allocation for each iteration (only for \code{DDPdensity});
#' @param group vector, allocation of observations to strata (only for \code{DDPdensity});
#' @param wvals values of the processes weights (only for \code{DDPdensity}).
#'
#' @examples
#' data_toy <- c(rnorm(100, -3, 1), rnorm(100, 3, 1))
#' grid <- seq(-7, 7, length.out = 50)
#' est_model <- PYdensity(y = data_toy, mcmc = list(niter = 100,
#' nburn = 10, nupd = 100), output = list(grid = grid))
#' str(est_model)
#' class(est_model)
#' @export
#'
BNPdens <- function(
density = NULL,
data = NULL,
grideval = NULL,
grid_x = NULL,
grid_y = NULL,
clust = NULL,
mean = NULL,
beta = NULL,
sigma2 = NULL,
probs = NULL,
niter = NULL,
nburn = NULL,
tot_time = NULL,
univariate = TRUE,
regression = FALSE,
dep = FALSE,
group_log = NULL,
group = NULL,
wvals = NULL
){
value <- list(density = density,
data = data,
grideval = grideval,
grid_x = grid_x,
grid_y = grid_y,
clust = clust,
mean = mean,
beta = beta,
sigma2 = sigma2,
probs = probs,
niter = niter,
nburn = nburn,
tot_time = tot_time,
univariate = univariate,
regression = regression,
dep = dep,
group_log = group_log,
group = group,
wvals = wvals)
attr(value, "class") <- "BNPdens"
value
}
#' BNPpart class constructor
#'
#' @description A constructor for the \code{BNPpart} class. The class \code{BNPpart} is a named list containing
#' the output of partition estimation methods.
#'
#' @param partitions a matrix, each row is a visited partition;
#' @param scores a vector, each value is the score of a visited partition;
#' @param psm a matrix, posterior similarity matrix.
#'
#' @examples
#' data_toy <- c(rnorm(100, -3, 1), rnorm(100, 3, 1))
#' grid <- seq(-7, 7, length.out = 50)
#' est_model <- PYdensity(y = data_toy, mcmc = list(niter = 100,
#' nburn = 10, nupd = 100), output = list(grid = grid))
#' part <- partition(est_model)
#' class(part)
#'
#' @export
#'
BNPpart <- function(
partitions = NULL,
scores = NULL,
psm = NULL){
value <- list(partitions = partitions,
scores = scores,
psm = psm)
attr(value, "class") <- "BNPpart"
value
}
# -----------------------------------------------------------------------
# SUMMARY
# -----------------------------------------------------------------------
#' BNPdens summary method
#'
#' @description The \code{summary.BNPdens} method provides summary information on \code{BNPdens} objects.
#' @param object an object of class \code{BNPdens};
#' @param ... additional arguments
#'
#' @rdname summary.BNPdens
#' @examples
#' data_toy <- c(rnorm(100, -3, 1), rnorm(100, 3, 1))
#' grid <- seq(-7, 7, length.out = 50)
#' est_model <- PYdensity(y = data_toy, mcmc = list(niter = 100,
#' nburn = 10, napprox = 10), output = list(grid = grid))
#' class(est_model)
#' summary(est_model)
#' @export
summary.BNPdens <- function(object, ...) {
if(!object$dep){
nuniq <- apply(object$clust, 1, function(x) length(unique(x)))
if(object$univariate){
cat("PYdensity function call:\n",
object$nburn, "\tburn-in iterations\n",
object$niter, "\titerations \n",
"Global estimation time:", round(object$tot_time, digits = 2), "seconds\n",
"Average number of groups: ", mean(nuniq),"\n",
"Min number of groups: ", min(nuniq), "; Max number of groups: ", max(nuniq), "\n")
} else {
cat("PYdensity function call:\n",
object$nburn, "\tburn-in iterations\n",
object$niter, "\titerations \n",
"Global estimation time:", round(object$tot_time, digits = 2), "seconds\n","Average number of groups: ", mean(nuniq),"\n",
"Min number of groups: ", min(nuniq), "; Max number of groups: ", max(nuniq), "\n")
}
} else {
nuniq <- sapply(unique(object$group), function(x) apply(object$clust, 1, function(y) length(unique(y[object$group == x]))))
cat("DDPdensity function call:\n",
length(table(object$group)), "\tdifferent groups\n",
object$nburn, "\tburn-in iterations\n",
object$niter, "\titerations \n",
"Global estimation time:", round(object$tot_time, digits = 2), "seconds\n","Average number of groups: ", paste(colMeans(nuniq), collapse = " - "), "\n",
"Min number of groups: ", paste(apply(nuniq, 2, min), collapse = " - "), "; Max number of groups: ", paste(apply(nuniq, 2, max), collapse = " - "), "\n")
}
}
# -----------------------------------------------------------------------
# PRINT
# -----------------------------------------------------------------------
#' BNPdens print method
#'
#' @description The \code{BNPdens} method prints the type of a \code{BNPdens} object.
#' @param x an object of class \code{BNPdens};
#' @param ... additional arguments.
#' @rdname print.BNPdens
#' @examples
#' data_toy <- c(rnorm(100, -3, 1), rnorm(100, 3, 1))
#' grid <- seq(-7, 7, length.out = 50)
#' est_model <- PYdensity(y = data_toy, mcmc = list(niter = 100,
#' nburn = 10, napprox = 10), output = list(grid = grid))
#' class(est_model)
#' print(est_model)
#' @export
print.BNPdens <- function(x, ...) {
cat("BNPdens object\n")
if(!x$dep){
if(x$regression){
cat("Type: regression model")
} else if(x$univariate){
cat("Type: univariate density")
} else {
cat("Type: multivariate density")
}
} else {
cat("Type: dependent Dirichlet process model")
}
}
# -----------------------------------------------------------------------
# PLOT
# -----------------------------------------------------------------------
#' Density plot for BNPdens class
#'
#' @description Extension of the \code{plot} method to the \code{BNPdens} class. The method \code{plot.BNPdens} returns suitable plots for a \code{BNPdens}
#' object. See details.
#'
#' @details If the \code{BNPdens} object is generated by \code{PYdensity}, the function returns
#' the univariate or bivariate estimated density plot.
#' If the \code{BNPdens} object is generated by \code{PYregression}, the function returns
#' the scatterplot of the response variable jointly with the covariates (up to four), coloured according to the estimated partition.
#' up to four covariates.
#' If \code{x} is a \code{BNPdens} object generated by \code{DDPdensity}, the function returns
#' a wrapped plot with one density per group.
#' The plots can be enhanced in several ways: for univariate densities, if \code{show_hist = TRUE},
#' the plot shows also the histogram of the data; if \code{show_points = TRUE},
#' the plot shows also the observed points along the
#' x-axis; if \code{show_points = TRUE} and \code{show_clust = TRUE}, the points are colored
#' according to the partition estimated with the \code{partition} function.
#' For multivariate densities: if \code{show_points = TRUE},
#' the plot shows also the scatterplot of the data;
#' if \code{show_points = TRUE} and \code{show_clust = TRUE},
#' the points are colored according to the estimated partition.
#'
#' @param x an object of class \code{BNPdens};
#' @param dimension if \code{x} is the output of a model fitted to multivariate data,
#' \code{dimensions} specifies the two dimensions for the bivariate contour plot
#' (if they are equal, a marginal univarite plot is returned);
#' @param col the color of the lines;
#' @param show_points if \code{TRUE}, the function plots also the observations, default \code{FALSE};
#' @param show_hist if \code{TRUE}, and the model is univariate, the function plots also the histogram of the data, default \code{FALSE};
#' @param show_clust if \code{TRUE} the function plots also the points colored with respect to the estimated partition, default \code{FALSE};
#' @param bin_size if \code{show_hist = TRUE}, it correponds to the size of each bin,
#' default \code{range(data) / 30};
#' @param wrap_dim bivariate vector, if \code{x} is the output of \code{DDPdensity}, it correponds to the number of rows and columns in the plot. Default \code{c(ngroup, 1)};
#' @param xlab label of the horizontal axis;
#' @param ylab label of the vertical axis;
#' @param band if \code{TRUE} and \code{x} is the output of a univariate model or of \code{DDPdensity}, the plot method displays quantile-based posterior credible bands along with estimated densities;
#' @param conf_level bivariate vector, order of the quantiles for the posterior credible bands. Default \code{c(0.025, 0.975)};
#' @param ... additional arguments to be passed.
#'
#' @rdname plot.BNPdens
#'
#' @return A \code{ggplot2} object.
#'
#' @examples
#' # PYdensity example
#' data_toy <- c(rnorm(100, -3, 1), rnorm(100, 3, 1))
#' grid <- seq(-7, 7, length.out = 50)
#' est_model <- PYdensity(y = data_toy,
#' mcmc = list(niter = 200, nburn = 100, nupd = 100),
#' output = list(grid = grid))
#' class(est_model)
#' plot(est_model)
#'
#' # PYregression example
#' x_toy <- c(rnorm(100, 3, 1), rnorm(100, 3, 1))
#' y_toy <- c(x_toy[1:100] * 2 + 1, x_toy[101:200] * 6 + 1) + rnorm(200, 0, 1)
#' grid_x <- c(0, 1, 2, 3, 4, 5)
#' grid_y <- seq(0, 35, length.out = 50)
#' est_model <- PYregression(y = y_toy, x = x_toy,
#' mcmc = list(niter = 200, nburn = 100),
#' output = list(grid_x = grid_x, grid_y = grid_y))
#' summary(est_model)
#' plot(est_model)
#'
#' # DDPdensity example
#' data_toy <- c(rnorm(50, -4, 1), rnorm(100, 0, 1), rnorm(50, 4, 1))
#' group_toy <- c(rep(1,100), rep(2,100))
#' grid <- seq(-7, 7, length.out = 50)
#' est_model <- DDPdensity(y = data_toy, group = group_toy,
#' mcmc = list(niter = 200, nburn = 100, napprox_unif = 50),
#' output = list(grid = grid))
#' summary(est_model)
#' plot(est_model)
#'
#'
#' @export
plot.BNPdens <- function(x, dimension = c(1,2), col = "#0037c4", show_points = F,
show_hist = F, show_clust = F, bin_size = NULL, wrap_dim = NULL,
xlab = "", ylab = "", band = T, conf_level = c(0.025, 0.975), ...) {
ggplot2::theme_set(ggplot2::theme_bw())
if(is.null(x$density)){
with(x, {
xlab <- ifelse(xlab == "", "group", xlab)
ylab <- ifelse(ylab == "", "count", ylab)
part_temp <- partition(x)
temp_plot <- ggplot2::ggplot(data.frame(data = as.factor(part_temp$partitions[1,]))) +
ggplot2::geom_bar(map = ggplot2::aes(x = data, color = data, fill = data), alpha = 0.3) +
ggplot2::labs(x = xlab, y = ylab) +
ggplot2::theme(legend.position = "none")
temp_plot
})
} else {
if(isTRUE(show_clust)){show_points <- TRUE}
if(!x$regression){
if(!x$dep){
if(x$univariate){
with(x,{
if(is.null(bin_size)) bin_size <- (range(x$data)[2] - range(x$data)[1]) / 30
data_plot = data.frame(V1 = x$data, V2 = partition(x)$partitions[3,])
if(!is.null(x$density)){
if(length(dim(x$density)) == 2){
plot_df <- data.frame(grid = x$grideval, val = colMeans(x$density))
} else {
plot_df <- data.frame(grid = x$grideval, val = x$density)
}
if(isTRUE(band) & length(dim(x$density)) == 2){
plot_df <- data.frame(grid = x$grideval,
val = colMeans(x$density),
band_low = apply(x$density, 2, function(z) quantile(z, conf_level[1])),
band_up = apply(x$density, 2, function(z) quantile(z, conf_level[2])))
}
}
temp_plot <- ggplot2::ggplot(plot_df, mapping = ggplot2::aes(x = grid, y = val))
if(show_hist){
temp_plot <- temp_plot + ggplot2::geom_histogram(data = data_plot, ggplot2::aes(x = V1, y = ..density..),
fill = "#EFEFEF", col = "#969696", binwidth = bin_size)
}
if(show_clust){
temp_plot <- temp_plot + ggplot2::geom_point(data = data_plot, ggplot2::aes(x = V1, y =0, col = as.factor(V2)))
}
if(isTRUE(show_points) & !isTRUE(show_clust)){
temp_plot <- temp_plot + ggplot2::geom_point(data = data_plot, ggplot2::aes(x = V1, y =0), color = "#646464")
}
if(isTRUE(band)){
temp_plot <- temp_plot + ggplot2::geom_ribbon(data = plot_df,
mapping = ggplot2::aes(x = grid, ymin = band_low, ymax = band_up),
fill = col, alpha = 0.3)
}
temp_plot <- temp_plot +
ggplot2::geom_line(mapping = ggplot2::aes(x = grid, y = val), size= 1, color = col) +
ggplot2::theme(legend.position = "none") +
ggplot2::labs(x = xlab, y = ylab)
temp_plot
})
} else {
with(x, {
data_plot <- data.frame(V1 = x$data[,dimension[1]], V2 = x$data[,dimension[2]], V3 = partition(x)$partitions[3,])
if(dim(x$density)[2] > 1){
plot_df <- as.data.frame(cbind(x$grideval, colMeans(x$density)))
} else {
plot_df <- as.data.frame(cbind(x$grideval, x$density))
}
names(plot_df) = c(paste("GR", 1:ncol(x$grideval), sep = ''), "V1")
if(dimension[1] == dimension[2]){
plot_df_use <- aggregate(plot_df, by = list(plot_df[[dimension[1]]]), FUN = sum)
temp_plot <- ggplot2::ggplot(data = plot_df_use, mapping = ggplot2::aes(x = Group.1, y = V1)) +
ggplot2::geom_line(mapping = ggplot2::aes(x = Group.1, y = V1), size= 1, color = col) +
ggplot2::labs(x = xlab, y = ylab)
} else {
plot_df_use <- aggregate(plot_df, by = list(plot_df[[dimension[1]]],plot_df[[dimension[2]]]), FUN = sum)
temp_plot <- ggplot2::ggplot(data = plot_df_use, mapping = ggplot2::aes(x = Group.1, y = Group.2, z = V1))
if(isTRUE(show_points) & !isTRUE(show_clust)){
temp_plot <- temp_plot + ggplot2::geom_point(data = data_plot, ggplot2::aes(x = V1, y = V2), col = "#646464")
}
if(isTRUE(show_clust)){
temp_plot <- temp_plot + ggplot2::geom_point(data = data_plot, ggplot2::aes(x = V1, y = V2, col = as.factor(V3)))
}
temp_plot <- temp_plot +
ggplot2::stat_contour(data = plot_df_use, mapping = ggplot2::aes(x = Group.1, y = Group.2, z = V1), bins = 10, col = col) +
ggplot2::theme(legend.position = "none") +
ggplot2::labs(x = xlab, y = ylab)
temp_plot
}
})
}
} else {
if(is.null(wrap_dim)) wrap_dim = c(length(unique(x$group)), 1)
with(x,{
ngr <- length(unique(x$group))
if(isTRUE(band)){
plot_df <- data.frame(grid = rep(x$grideval, ngr),
val = as.vector(apply(x$density, c(1,2), mean)),
group = factor(rep(levels(factor(x$group)), each = length(x$grideval)),
levels = levels(factor(x$group))),
band_low = as.vector(apply(x$density, c(1,2), function(z) quantile(z, conf_level[1]))),
band_up = as.vector(apply(x$density, c(1,2), function(z) quantile(z, conf_level[2]))))
} else {
plot_df <- data.frame(grid = rep(x$grideval, ngr),
val = as.vector(apply(x$density, c(1,2), mean)),
group = factor(rep(levels(factor(x$group)), each = length(x$grideval)),
levels = levels(factor(x$group))))
}
temp_plot <- ggplot2::ggplot(plot_df, mapping = ggplot2::aes(x = grid, y = val)) +
ggplot2::labs(x = xlab, y = ylab) +
ggplot2::facet_wrap(~ factor(group), nrow = wrap_dim[1], ncol = wrap_dim[2])
if(isTRUE(band)){
temp_plot <- temp_plot + ggplot2::geom_ribbon(data = plot_df,
mapping = ggplot2::aes(x = grid, ymin = band_low, ymax = band_up), fill = col,
colour = NA, alpha = 0.3)
}
temp_plot <- temp_plot +
ggplot2::geom_line(color = col) +
ggplot2::guides(fill=FALSE, color=FALSE)
temp_plot
})
}
} else {
with(x,{
plot_df <- as.data.frame(x$data)
colnames(plot_df) = paste0("V", 1:ncol(x$data))
part <- partition(x)$partitions[3,]
if(ncol(x$data) == 2){
temp_plot <- ggplot2::ggplot(plot_df, mapping = ggplot2::aes(x = V1, y = V2)) +
ggplot2::geom_point(ggplot2::aes(x = V2, y = V1, col = as.factor(part))) +
ggplot2::guides(fill=FALSE, color=FALSE) +
ggplot2::labs(x = xlab, y = ylab)
temp_plot
} else {
temp_pl <- list()
tcn <- colnames(plot_df)
for(pl in 1:(min(4, ncol(x$data) - 1))){
colnames(plot_df)[pl + 1] = "temp"
temp_pl[[pl]] <- ggplot2::ggplot(plot_df, mapping = ggplot2::aes(x = temp, y = V2)) +
ggplot2::geom_point(ggplot2::aes(x = temp, y = V1, col = as.factor(part))) +
ggplot2::guides(fill=FALSE, color=FALSE) +
ggplot2::labs(x = paste0("X", pl), y = "Y")
colnames(plot_df)[pl + 1] = tcn[pl + 1]
}
ggpubr::ggarrange(plotlist = temp_pl, ncol = 2)
}
})
}
}
}
# -----------------------------------------------------------------------
# EXPORT TO CODA
# -----------------------------------------------------------------------
#' set generic
#' @name BNPdens2coda
#' @keywords internal
#' @export
BNPdens2coda <- function (object, dens) {
UseMethod("BNPdens2coda", object = object)
}
#'
#' Export to coda interface
#'
#' @description The method \code{BNPdens2coda} converts a \code{BNPdens} object into a \code{coda} mcmc object.
#'
#' @param object a BNPdens object;
#' @param dens logical, it can be TRUE only for models estimated with \code{PYdensity}.
#' If TRUE, it converts to \code{coda} also the estimated density. Default FALSE.
#'
#' @rdname BNPdens2coda.BNPdens
#'
#' @return
#' an mcmc object
#'
#' @examples
#' data_toy <- cbind(c(rnorm(100, -3, 1), rnorm(100, 3, 1)),
#' c(rnorm(100, -3, 1), rnorm(100, 3, 1)))
#' grid <- expand.grid(seq(-7, 7, length.out = 50),
#' seq(-7, 7, length.out = 50))
#' est_model <- PYdensity(y = data_toy, mcmc = list(niter = 200, nburn = 100),
#' output = list(grid = grid))
#' coda_mcmc <- BNPdens2coda(est_model)
#' class(coda_mcmc)
#'
#' @export
#'
BNPdens2coda.BNPdens <- function(object, dens = FALSE){
if(length(dim(object$density)) < 2) dens = FALSE
if(object$dep) dens = FALSE
if(object$regression) dens = FALSE
if(!dens){
if(!object$dep){
coda::as.mcmc(apply(object$clust, 1, function(x) length(unique(x))))
} else {
temp <- cbind(sapply(unique(object$group),
function(x) apply(object$clust, 1, function(y)
length(unique(y[object$group == x])))),
object$wvals)[,as.vector(t(cbind(1:(length(unique(object$group))), (length(unique(object$group)) + 1):(2 * (length(unique(object$group)))))))]
colnames(temp) <- as.vector(sapply(1:ncol(object$wvals), function(x) paste(c("clusters in group", "weigth for group"), x)))
coda::as.mcmc(temp)
}
} else {
temp <- apply(object$clust, 1, function(x) length(unique(x)))
temp <- rbind(temp, t(object$density))
coda::as.mcmc(temp)
}
}
# -----------------------------------------------------------------------
# EVALUATE THE DENSITY
# -----------------------------------------------------------------------
#' set generic
#' @name dBNPdens
#' @keywords internal
#' @export
dBNPdens <- function (object, x) {
UseMethod("dBNPdens", object = object)
}
#'
#' Evaluate estimated univariate densities at a given point
#'
#' @description The method \code{dBNPdens} provides an approximated evaluation of estimated univariate densities at a given point, for a \code{BNPdens} class object.
#'
#' @param object a \code{BNPdens} object (only if univariate);
#' @param x the point where to evaluate the density.
#' @rdname dBNPdens.BNPdens
#'
#' @return
#' a numeric value
#'
#' @examples
#' data_toy <- c(rnorm(100, -3, 1), rnorm(100, 3, 1))
#' grid <- seq(-7, 7, length.out = 50)
#' est_model <- PYdensity(y = data_toy, mcmc = list(niter = 200, nburn = 100),
#' output = list(grid = grid))
#' x <- 1.4
#' dBNPdens(est_model, x)
#'
#' @export
#'
dBNPdens.BNPdens <- function(object, x){
if(!isTRUE(object$univariate)) stop("The model must be univariate")
if( x < min(object$grideval) | x > max(object$grideval) ) stop("x must be between the min and the max of the grid")
if(!is.null(object$density)){
if(length(dim(object$density)) == 2){
y <- colMeans(object$density)
} else {
y <- object$density
}
approx(object$grideval, y, xout = x)$y
} else {
stop("An estimated density is required")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BNPmix/R/class_generic.R
|
# -----------------------------------------------------------------------
# PARTITIONS
# -----------------------------------------------------------------------
#' set generic
#' @name partition
#' @keywords internal
#' @export
partition <- function (object, ...) {
UseMethod("partition")
}
#' Estimate the partition of the data
#'
#' @description The \code{partition} method estimates the partition of the data based on the output generated by a Bayesian nonparametric mixture
#' model, according to a specified criterion, for a \code{BNPdens} class object.
#'
#' @param object an object of class \code{BNPdens};
#' @param dist a loss function defined on the space of partitions;
#' it can be variation of information (\code{"VI"}) or \code{"Binder"}, default \code{"VI"}. See details;
#' @param max_k maximum number of clusters passed to the \code{cutree} function. See value below;
#' @param ... additional arguments to be passed.
#'
#' @rdname partition.BNPdens
#'
#' @details
#' This method returns point estimates for the clustering of the data induced by a nonparametric mixture model.
#' This result is achieved exploiting two different loss fuctions on the space of partitions: variation of information
#' (\code{dist = 'VI'}) and Binder's loss (\code{dist = 'Binder'}). The function is based on the \code{mcclust.ext}
#' code by Sara Wade (Wade and Ghahramani, 2018).
#'
#' @return
#' The method returns a list containing a matrix with \code{nrow(data)} columns and 3 rows. Each row reports
#' the cluster labels for each observation according to three different approaches, one per row. The first and second rows
#' are the output of an agglomerative clustering procedure obtained by applying the function \code{hclust}
#' to the dissimilarity matrix, and by using the complete or average linkage,
#' respectively. The number of clusters is between 1 and \code{max_k} and is choosen according to a lower bound
#' on the expected loss, as described in Wade and Ghahramani (2018).
#' The third row reports the partition visited by the MCMC with the minimum distance \code{dist} from the dissimilarity matrix.
#'
#' In addition, the list reports a vector with three scores representing the lower bound on the expected loss
#' for the three partitions.
#'
#'
#' @references
#' Wade, S., Ghahramani, Z. (2018). Bayesian cluster analysis: Point estimation and credible balls.
#' Bayesian Analysis, 13, 559-626.
#'
#' @examples
#' data_toy <- c(rnorm(10, -3, 1), rnorm(10, 3, 1))
#' grid <- seq(-7, 7, length.out = 50)
#' fit <- PYdensity(y = data_toy, mcmc = list(niter = 100,
#' nburn = 10, nupd = 100), output = list(grid = grid))
#' class(fit)
#' partition(fit)
#'
#' @export
#'
# based on Wade's mcclust.ext package minVI function
partition.BNPdens <- function(object, dist = "VI", max_k = NULL, ...) {
# clean the clusters and compute PSM
clean_cl <- clean_partition(object$clust) + 1
psm_mat <- BNPmix_psm(clean_cl)
if(dist == "VI"){
if(is.null(max_k)) max_k <- ceiling(dim(psm_mat)[1]/8)
# complete linkage VI
hclust_comp <- hclust(as.dist(1 - psm_mat), method="complete")
cls_comp <- t(apply(matrix(1:max_k), 1, function(x) cutree(hclust_comp, k=x)))
VI_comp <- BNPmix_VI_LB(cls_comp, psm_mat)
val_comp <- min(VI_comp)
complete <- cls_comp[which.min(VI_comp),]
# avg linkage VI
hclust_avg <- hclust(d = as.dist(1 - psm_mat), method = "average")
cls_avg <- t(apply(matrix(1:max_k),1,function(x) cutree(hclust_avg,k=x)))
VI_avg <- BNPmix_VI_LB(cls_avg,psm_mat)
val_avg <- min(VI_avg)
average <- cls_avg[which.min(VI_avg),]
# draw VI
VI_draws <- BNPmix_VI_LB(clean_cl, psm_mat)
val_draws <- min(VI_draws)
draw <- clean_cl[which.min(VI_draws),]
output <- BNPpart(partitions = rbind(as.numeric(as.factor(complete)),
as.numeric(as.factor(average)),
as.numeric(as.factor(draw))),
scores = c(val_comp, val_avg, val_draws),
psm = psm_mat)
return(output)
} else if(dist == "Binder"){
if(is.null(max_k)) max_k <- ceiling(dim(psm_mat)[1]/8)
# complete linkage BINDER
hclust_comp <- hclust(as.dist(1 - psm_mat), method="complete")
cls_comp <- t(apply(matrix(1:max_k), 1, function(x) cutree(hclust_comp, k=x)))
BIN_comp <- BNPmix_BIN(cls_comp, psm_mat)
val_comp <- min(BIN_comp)
complete <- cls_comp[which.min(BIN_comp),]
# avg linkage VI
hclust_avg <- hclust(d = as.dist(1 - psm_mat), method = "average")
cls_avg <- t(apply(matrix(1:max_k),1,function(x) cutree(hclust_avg,k=x)))
BIN_avg <- BNPmix_BIN(cls_avg,psm_mat)
val_avg <- min(BIN_avg)
average <- cls_avg[which.min(BIN_avg),]
# draw BINDER
BIN_draws <- BNPmix_BIN(clean_cl, psm_mat)
val_draws <- min(BIN_draws)
draw <- clean_cl[which.min(BIN_draws),]
output <- BNPpart(partitions = rbind(as.numeric(as.factor(complete)),
as.numeric(as.factor(average)),
as.numeric(as.factor(draw))),
scores = c(val_comp, val_avg, val_draws),
psm = psm_mat)
return(output)
}
}
# -----------------------------------------------------------------------
# CALIBRATION
# -----------------------------------------------------------------------
#'
#' Pitman-Yor prior elicitation
#'
#' @description The function \code{PYcalibrate} elicits the strength parameter of the Pitman-Yor
#' process, given the discount parameter and the prior expected number of clusters.
#'
#' @param Ek prior expected number of cluster;
#' @param n sample size;
#' @param discount discount parameter; default is set equal to 0, corresponding to a Dirichlet process prior.
#'
#' @rdname PYcalibrate
#'
#' @return
#' A named list containingtthe values of strength and discount parameters.
#'
#' @examples
#' PYcalibrate(5, 100)
#'
#' PYcalibrate(5, 100, 0.5)
#'
#' @export
#'
PYcalibrate <- function(Ek, n, discount = 0){
if(!is.numeric(Ek)) stop("Ek must be numeric")
if(!is.numeric(n)) stop("n must be numeric")
if(!is.numeric(discount)) stop("discount must be numeric")
rfa <- function(theta, discount, n) prod( 1 + discount/(theta + 0:(n-1)))
if(discount == 0){
result <- try(uniroot(function(x) sum(x/(x+(1:n)-1)) - Ek, interval=c(0.0001,100))$root)
} else {
result <- try(uniroot(function(x) x/discount*(rfa(x,discount,n)-1) - Ek, interval=c(-discount+0.000001,100))$root)
}
return(list(strength = result, discount = discount))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPmix/R/utilities.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
aster_cpp <- function(matrix, tree_width = 0L, proc = 1L, s = 0, n = 0L, ss = 0L) {
.Call('BNSL_aster_cpp', PACKAGE = 'BNSL', matrix, tree_width, proc, s, n, ss)
}
aster_cpp_p <- function(matrix, psl, tree_width = 0L, proc = 1L, s = 0, n = 0L, ss = 0L) {
.Call('BNSL_aster_cpp_p', PACKAGE = 'BNSL', matrix, psl, tree_width, proc, s, n, ss)
}
gc <- function(n, a) {
.Call('BNSL_gc', PACKAGE = 'BNSL', n, a)
}
gc_all <- function(cc, a) {
.Call('BNSL_gc_all', PACKAGE = 'BNSL', cc, a)
}
kruskal <- function(W) {
.Call('BNSL_kruskal', PACKAGE = 'BNSL', W)
}
empirical_mi <- function(x, y) {
.Call('BNSL_empirical_mi', PACKAGE = 'BNSL', x, y)
}
mi <- function(x, y, proc = 0L) {
.Call('BNSL_mi', PACKAGE = 'BNSL', x, y, proc)
}
MDL_mi <- function(x, y, m_x = 0L, m_y = 0L) {
.Call('BNSL_MDL_mi', PACKAGE = 'BNSL', x, y, m_x, m_y)
}
Jeffreys_mi <- function(x, y, m_x = 0L, m_y = 0L) {
.Call('BNSL_Jeffreys_mi', PACKAGE = 'BNSL', x, y, m_x, m_y)
}
BDeu_mi <- function(x, y, m_x = 0L, m_y = 0L, d = 1) {
.Call('BNSL_BDeu_mi', PACKAGE = 'BNSL', x, y, m_x, m_y, d)
}
empirical_cmi <- function(x, y, z) {
.Call('BNSL_empirical_cmi', PACKAGE = 'BNSL', x, y, z)
}
cmi <- function(x, y, z, proc = 0L) {
.Call('BNSL_cmi', PACKAGE = 'BNSL', x, y, z, proc)
}
MDL_cmi <- function(x, y, z, m_x = 0L, m_y = 0L, m_z = 0L) {
.Call('BNSL_MDL_cmi', PACKAGE = 'BNSL', x, y, z, m_x, m_y, m_z)
}
Jeffreys_cmi <- function(x, y, z, m_x = 0L, m_y = 0L, m_z = 0L) {
.Call('BNSL_Jeffreys_cmi', PACKAGE = 'BNSL', x, y, z, m_x, m_y, m_z)
}
BDeu_cmi <- function(x, y, z, m_x = 0L, m_y = 0L, m_z = 0L, d = 1) {
.Call('BNSL_BDeu_cmi', PACKAGE = 'BNSL', x, y, z, m_x, m_y, m_z, d)
}
mi_matrix <- function(df, proc = 0L) {
.Call('BNSL_mi_matrix', PACKAGE = 'BNSL', df, proc)
}
cont_mi <- function(x, y) {
.Call('BNSL_cont_mi', PACKAGE = 'BNSL', x, y)
}
intervals <- function(level, array) {
.Call('BNSL_intervals', PACKAGE = 'BNSL', level, array)
}
binary_search <- function(array, pattern) {
.Call('BNSL_binary_search', PACKAGE = 'BNSL', array, pattern)
}
parent <- function(df0, h, tw = 0L, proc = 0L) {
.Call('BNSL_parent', PACKAGE = 'BNSL', df0, h, tw, proc)
}
fftable <- function(df, w) {
.Call('BNSL_fftable', PACKAGE = 'BNSL', df, w)
}
Bayes_score <- function(T, m, proc = 0L, s = 0, n = 0L, ss = 0L) {
.Call('BNSL_Bayes_score', PACKAGE = 'BNSL', T, m, proc, s, n, ss)
}
Jeffreys_score <- function(T, m) {
.Call('BNSL_Jeffreys_score', PACKAGE = 'BNSL', T, m)
}
MDL_score <- function(T, m, s, n) {
.Call('BNSL_MDL_score', PACKAGE = 'BNSL', T, m, s, n)
}
BDeu_score <- function(T, m, ss) {
.Call('BNSL_BDeu_score', PACKAGE = 'BNSL', T, m, ss)
}
bound <- function(T, m, proc = 0L, n = 0L, ss = 1L) {
.Call('BNSL_bound', PACKAGE = 'BNSL', T, m, proc, n, ss)
}
Jeffreys_bound <- function(T, m) {
.Call('BNSL_Jeffreys_bound', PACKAGE = 'BNSL', T, m)
}
quotient_Jeffreys_bound <- function(T, m, n, ss) {
.Call('BNSL_quotient_Jeffreys_bound', PACKAGE = 'BNSL', T, m, n, ss)
}
MDL_bound <- function(T, m, n, ss) {
.Call('BNSL_MDL_bound', PACKAGE = 'BNSL', T, m, n, ss)
}
BDeu_bound <- function(T, m) {
.Call('BNSL_BDeu_bound', PACKAGE = 'BNSL', T, m)
}
|
/scratch/gouwar.j/cran-all/cranData/BNSL/R/RcppExports.R
|
makematrix = function(df) {
nrow = dim(df)[1]
ncol = dim(df)[2]
m <- matrix(0, nrow = nrow, ncol = ncol)
for (i in 1:nrow) {
for (j in 1:ncol) {
m[i, j] = as.numeric(df[i, j])
}
}
return(m)
}
bnsl = function(df, tw = 0, proc=1, s = 0, n = 0, ss = 1) {
m = data.matrix(df)
v = aster_cpp(m, tw, proc, s, n, ss)
g = empty.graph(names(df))
i = 1
while (i <= length(v)/2) {
g = set.arc(g, names(df)[v[2 * i - 1] + 1], names(df)[v[2 * i] + 1])
i = i + 1
}
return(g)
}
bnsl_p = function(df, psl, tw = 0, proc=1, s = 0, n = 0, ss = 1) {
m = data.matrix(df)
v = aster_cpp_p(m, psl, tw, proc, s, n, ss)
g = empty.graph(names(df))
i = 1
while (i <= length(v)/2) {
g = set.arc(g, names(df)[v[2 * i - 1] + 1], names(df)[v[2 * i] + 1])
i = i + 1
}
return(g)
}
|
/scratch/gouwar.j/cran-all/cranData/BNSL/R/aster.R
|
parent.set=function(df,h,tw=0,proc=1){
N=ncol(df);
for(i in 1:N)df[[i]]=as.integer(df[[i]]);
df=as.matrix(df);
df=parent(df,h,tw,proc);
if(tw==0)tw=N-1;
S=1; for(j in 1:tw)S=S+choose(N-1,j)
df[1:S,]
}
FFtable=function(df){
N=ncol(df);
for(i in 1:N)df[[i]]=as.integer(df[[i]]);
df=as.matrix(df);
tb=table(df[,ncol(df)])
m=length(tb);
fftable(df,m)
}
|
/scratch/gouwar.j/cran-all/cranData/BNSL/R/parent_set.R
|
# s, te, ti, sm, sinusoid, DM, match.call.defaults, quiet
s<-function(...,data,knots=NULL,absorb.cons=FALSE, scale.penalty=TRUE,n=nrow(data),dataX=NULL,
null.space.penalty=FALSE,sparse.cons=0,diagonal.penalty=FALSE,apply.by=TRUE,modCon=0,
k=-1,fx=FALSE,bs="tp",m=NA,by=NA,xt=NULL,id=NULL,sp=NULL,pc=NULL){
a<-mgcv::s(...,k=k,fx=fx,bs=bs,m=m,by=by,xt=xt,id=id,sp=sp,pc=pc)
a$by<-deparse(substitute(by), backtick = TRUE, width.cutoff = 500)
b<- mgcv::smoothCon(a,data=data,knots=knots,absorb.cons=absorb.cons,
scale.penalty=scale.penalty,n=n,dataX=dataX,
null.space.penalty=null.space.penalty,sparse.cons=sparse.cons,
diagonal.penalty=diagonal.penalty,apply.by=apply.by,modCon=modCon)
return(b)
}
te<-function(...,data,knots=NULL,absorb.cons=FALSE, scale.penalty=TRUE,n=nrow(data),dataX=NULL,
null.space.penalty=FALSE,sparse.cons=0,diagonal.penalty=FALSE,apply.by=TRUE,modCon=0,
k=NA,bs="cr",m=NA,d=NA,by=NA,fx=FALSE,np=TRUE,xt=NULL,id=NULL,sp=NULL,pc=NULL){
a<-mgcv::te(...,k=k,bs=bs,m=m,d=d,by=by,fx=fx,np=np,xt=xt,id=id,sp=sp,pc=pc)
a$by<-deparse(substitute(by), backtick = TRUE, width.cutoff = 500)
b<- mgcv::smoothCon(a,data=data,knots=knots,absorb.cons=absorb.cons,
scale.penalty=scale.penalty,n=n,dataX=dataX,
null.space.penalty=null.space.penalty,sparse.cons=sparse.cons,
diagonal.penalty=diagonal.penalty,apply.by=apply.by,modCon=modCon)
return(b)
}
ti<-function(...,data,knots=NULL,absorb.cons=FALSE, scale.penalty=TRUE,n=nrow(data),dataX=NULL,
null.space.penalty=FALSE,sparse.cons=0,diagonal.penalty=FALSE,apply.by=TRUE,modCon=0,
k=NA,bs="cr",m=NA,d=NA,by=NA,fx=FALSE,np=TRUE,xt=NULL,id=NULL,sp=NULL,mc=NULL,pc=NULL){
a<-mgcv::ti(...,k=k,bs=bs,m=m,d=d,by=by,fx=fx,np=np,xt=xt,id=id,sp=sp,mc=mc,pc=pc)
a$by<-deparse(substitute(by), backtick = TRUE, width.cutoff = 500)
b<- mgcv::smoothCon(a,data=data,knots=knots,absorb.cons=absorb.cons,
scale.penalty=scale.penalty,n=n,dataX=dataX,
null.space.penalty=null.space.penalty,sparse.cons=sparse.cons,
diagonal.penalty=diagonal.penalty,apply.by=apply.by,modCon=modCon)
return(b)
}
sm<-function(...,k=10,knots=NULL,bs="rd"){
pf <- parent.frame()
vars<-as.list(substitute(list(...)))[-1]
d<-length(vars)
if (d > 2) stop("Up to bivariate covariates supported; other arguments are k, knots and bs")
term<-NULL
for (i in 1:d){
term[i]<-deparse(vars[[i]],backtick=TRUE,width.cutoff=500)
term[i] <- attr(terms(reformulate(term[i])),"term.labels")
}
if (length(unique(term))!=d) stop("Repeated variables are not permitted")
nknots<-k
RNK <- round(nknots)
if (RNK!=nknots) warning("Number of knots should be integer and has been rounded")
nknots <- RNK
label<-paste("sm(",term[1],sep="")
if (d>1) for (i in 2:d) label<-paste(label,",",term[i],sep="")
label<-paste(label,")",sep="")
is.D<-rep(0,d)
x2<-NULL
type<-1
for (i in 1:d){
mm<-model.matrix(~eval(vars[[i]],pf))
lvs<-levels(eval(vars[[i]],pf))
if (length(lvs)==0) {colnames(mm)[2]<-c(term[i]); is.D[i]<-0}
if (length(lvs)>=2) {colnames(mm)<-paste(term[i],lvs,sep=""); is.D[i]<-1}
x2<-cbind(x2,mm[,-1,drop=FALSE])
}
X<-x2
if (d==1 & nknots>0){
if (is.null(knots)){
knots<-seq(from = 0, to = 1, length = nknots + 2)[-c(1, nknots + 2)]
knots<-seq(from = 0, to = 1, length = nknots)
knots<-unique(round(quantile(x2,knots,type=type),5))
knots<-data.frame(knots=knots)
}
if (!is.null(knots)){
knots<-data.frame(knots=knots)
}
nknots<-dim(knots)[1]
if (bs=="pl"){
for (i in 1:nknots) X<-cbind(X,pmax(x2-knots[i],rep(0,length(x2))))
}
else if (bs=="rd"){
for (i in 1:nknots){
D<-(x2-knots[i,])^2
D2<-D*log(D)
D2[which(D==0)]<-0
X<-cbind(X,D2)
}
}
else stop("chosen bs not supported")
}
else if (d==2 & nknots>0){
if (!is.null(knots)) nknots<-c(dim(knots))
if (is.null(knots) && length(nknots)==1) nknots<-rep(nknots,2)
if (is.null(knots)){
knots1<-seq(from = 0, to = 1, length = nknots[1] + 2)[-c(1, nknots[1] + 2)]
knots1<-seq(from = 0, to = 1, length = nknots[1])
knots2<-seq(from = 0, to = 1, length = nknots[2] + 2)[-c(1, nknots[2] + 2)]
knots2<-seq(from = 0, to = 1, length = nknots[2])
if (dim(x2)[2] == 2){
knots1<-unique(round(quantile(x2[,1],knots1,type=type),5))
knots2<-unique(round(quantile(x2[,2],knots2,type=type),5))
knots<-as.matrix(expand.grid(knots1,knots2))
}else if (is.D[1] == 1){
knots1<-unique(x2[,-dim(x2)[2]])
knots2<-unique(round(quantile(x2[,dim(x2)[2],drop=FALSE],knots2,type=type),5))
knots <- cbind(knots1[rep(1:nrow(knots1), length(knots2)), ], rep(knots2, each = nrow(knots1)))
}else if (is.D[2] == 1){
knots1<-unique(round(quantile(x2[,1],knots1,type=type),5))
knots2<-unique(x2[,-1])
knots <- cbind(rep(knots1, nrow(knots2)), knots2[rep(1:nrow(knots2), length(knots1)), ])
}
colnames(knots)<-colnames(x2)
}
if (bs=="rd"){
for (i in 1:NROW(knots)){
D<-apply((x2-matrix(as.numeric(knots[i,]),nrow=NROW(x2),ncol=NCOL(x2),byrow=TRUE))^2,1,sum)
D2<-D*log(D)
D2[which(D==0)]<-0
X<-cbind(X,D2)
}
} else stop("for bivariate smoothers only rd supported")
}
X<-data.frame(X)
colnames(X)[1:NCOL(x2)]<-colnames(x2)
colnames(X)[-c(1:NCOL(x2))]<-paste(label,1:(NCOL(X)-NCOL(x2)),sep=".")
XK<-list(X=X,knots=knots,count=d,vars=unlist(term),is.D=is.D,label=label)
return(XK)
}
sinusoid<-function(..., harmonics = 1, amplitude = 1, period = 0, periodRange = NULL, breaks = NULL, knots = NULL){
pf <- parent.frame()
vars<-as.list(substitute(list(...)))[-1]
d<-length(vars)
if (d > 1) stop("univariate effects supported; other arguments are harmonics, amplitude, period, breaks")
if (amplitude > 1 && !is.null(breaks)) stop("cannot do varying amplitude with breaks")
#if (period == 0 && !is.null(breaks)) stop("cannot do unknown period with breaks")
term<-NULL
for (i in 1:d){
term[i]<-deparse(vars[[i]],backtick=TRUE,width.cutoff=500)
term[i] <- attr(terms(reformulate(term[i])),"term.labels")
}
if (length(unique(term))!=d) stop("repeated variables are not permitted")
if (harmonics <= 0) stop("harmonics should be a positive integer")
if (harmonics!=round(harmonics)) {warning("number of harmonics should be a positive integer; it has been rounded")
harmonics <- round(harmonics)}
periodUnknown <- 0
if (period <= 0) periodUnknown <- 1
if (periodUnknown == 1){
if (is.null(periodRange)) stop("in function sinusoid argument periodRange is required")
if (!sum(periodRange > 0)==2) stop("argument periodRange is a vector of two positive values")
#time <- eval(vars[[1]],pf)
#pdf(file = NULL)
#ssp <- spectrum(get('y', envir = parent.frame()))
#dev.off()
#period <- 1/ssp$freq[which.max(ssp$spec)]
#period <- time[min(period,length(t))] - time[1]
period <- mean(periodRange)
}
label<-paste("sinusoid(",term[1],sep="")
if (d>1) for (i in 2:d) label<-paste(label,",",term[i],sep="")
label<-paste(label,")",sep="")
is.D<-rep(0,d)
X<-NULL
Xnames<-NULL
for (k in 1:harmonics){
X<-cbind(X,sin(2 * k * pi * eval(vars[[1]],pf)/period))
X<-cbind(X,cos(2 * k * pi * eval(vars[[1]],pf)/period))
Xnames<-c(Xnames,paste(paste("sin(",2*k,sep=""),"pi", vars, "/ p)",sep=" "),paste(paste("cos(",2*k,sep=""),"pi", vars, "/ p)",sep=" "))
}
X<-data.frame(X)
colnames(X)[1:NCOL(X)]<-Xnames
Dynamic <- NULL
if (amplitude > 1){
XK<-sm(eval(parse(text=vars),pf),k=amplitude,knots=knots)
Design.Xamp<-XK$X
knots<-XK$knots
Dynamic<-sweep(Design.Xamp, MARGIN=1, X[,1], `*`)
interMat2<-Dynamic/sqrt(2)
for(j in 2:NCOL(X)){
interMat<-sweep(Design.Xamp, MARGIN=1, X[,j], `*`)
interMat2<-interMat2+interMat/sqrt(2)
Dynamic<-cbind(Dynamic,interMat)
}
Dynamic <- Design.Xamp #1
X<-interMat2
colnames(X)[1:NCOL(X)] <- paste("sm(amplitude)",seq(1:(amplitude+1)),sep=".")
}
XK<-list(X=X,knots=knots,count=d,vars=unlist(term),is.D=is.D,label=label,
amplitude=amplitude,harmonics=harmonics,Dynamic=Dynamic,breaks=breaks,
period=period,periodUnknown=periodUnknown,periodRange=periodRange)
return(XK)
}
DM<-function(formula,data,n,knots=NULL,predInd=FALSE,meanVector,indicator,mvrmObj,centre=centre){
is.M <- NULL
count <- NULL
k <- 0
Rknots <- list()
vars <- list()
is.D <- list()
which.Spec<-list()
specials <- c('sm','s','te','ti','sinusoid')
trms<-terms.formula(formula,specials=specials)
attr(trms,"intercept") <- 1
nFactors <- dim(attr(trms,"factors"))[2]
if (attr(trms,"response")){
y <- with(data,eval(attr(trms,"variables")[[2]]))
n <- length(y)
}else{
y <- NULL
n <- n
}
labels <- colnames(attr(trms,"factors"))
formula.terms <- attr(trms,"term.labels")
Design.X <- matrix(1,ncol=1,nrow=n)
colnames(Design.X) <- "(Intercept)"
assign <- 0
assign2 <- 0 # for 1-hot encoding
assign3 <- 0 # for breaking up couples of sinusoidal terms
repsX <- NULL # repeats for fixing differences between NG abd NG2
isSin <- 0
amplitude <- 0
harmonics <- 0
startSin <- 0
Dynamic <- NULL
breaks <- NULL
period <- 0
periodUnknown <- 0
periodRange <- 0
if (!is.null(nFactors)) isSin<-rep(0,nFactors)
if (!is.null(nFactors)){
for (i in 1:nFactors){
trms2<-drop.terms(trms, dropx = -i)
is.spec<-sum(unlist(lapply(attr(trms2,"specials"),sum))) > 0
is.sm<-!is.null(attr(trms2,"specials")$sm)
is.sin<-!is.null(attr(trms2,"specials")$sinusoid)
is.s<-!is.null(attr(trms2,"specials")$s) || !is.null(attr(trms2,"specials")$te) ||!is.null(attr(trms2,"specials")$ti)
is.M.i<-0
if (!is.spec && (length(with(data,eval(attr(trms2,"variables"))[[1]]))/n > 1)) is.M.i <-1
is.M <- c(is.M, is.M.i)
repsX <- c(repsX,i)
if (!is.spec){
Design.Xt<-model.matrix(trms2,data=data)
remove.cols<-which(c(apply(Design.Xt,2,sd)==0))
if (length(remove.cols)>0 && !predInd) Design.Xt<-Design.Xt[,-remove.cols,drop=FALSE]
count<-c(count,1)
vars[[i]]<-as.character(attr(trms2,"variables")[[2]])
LF<-which(vars[[i]]=="as.factor")
if (length(LF)>0) {vars[[i]]<-vars[[i]][-which(vars[[i]]=="as.factor")];labels[i]<-vars[[i]]}
is.D.i<-0
lvs<-levels(with(data,eval(attr(trms2,"variables")))[[1]])
if (length(lvs)>=2) is.D.i<-1
if (length(c(unique(with(data,eval(attr(trms2,"variables")))[[1]])))==2) is.D.i<-1
is.D[[i]]<-is.D.i
which.Spec[[i]]<--99
}
if (is.sm){
XK<-with(data,eval(trms2[1][[2]]))
Design.Xt<-XK$X
count<-c(count,XK$count)
vars[[i]]<-XK$vars
is.D[[i]]<-XK$is.D
labels[i]<-XK$label
Rknots[[k<-k+1]]<- XK$knots
which.Spec[[i]]<-k
}
if (is.sin){
XK<-with(data,eval(trms2[1][[2]]))
Design.Xt<-XK$X
count<-c(count,XK$count)
vars[[i]]<-XK$vars
is.D[[i]]<-XK$is.D
labels[i]<-XK$label
amplitude<-XK$amplitude
harmonics<-XK$harmonics
breaks<-XK$breaks
period<-XK$period
periodUnknown<-XK$periodUnknown
periodRange<-XK$periodRange
repsX<-c(repsX,rep(i, harmonics - 1))
isSin[i]<-1 #is it a sinusoidal term?
startSin<-NCOL(Design.X)
which.Spec[[i]]<--99
Dynamic<-XK$Dynamic
if (amplitude > 1){
Rknots[[k<-k+1]]<- XK$knots
which.Spec[[i]]<-k
}
if (amplitude == 1) is.M[i] <- 1 #convention to avoid centering sinusoidals with fixed amplitude
}
if (is.s){
dataPr<-match(c("data"), names(trms2[1][[2]]), 0L)
trmWD<-trms2[1][[2]]
if (!predInd){
if (!dataPr){
trmWD<-deparse(trms2[1][[2]], backtick = TRUE, width.cutoff = 500)
trmWD<-substr(trmWD,1,nchar(trmWD)-1)
trmWD<-paste(trmWD,", data=data)")
}
evalS<-eval(parse(text=trmWD))
NL<-1
if (!evalS[[1]]$by=="NA") NL<-max(1,length(levels(with(data,eval(as.name(evalS[[1]]$by))))))
Design.Xt<-NULL
for (j in 1:NL) Design.Xt<-cbind(Design.Xt,evalS[[j]]$X)
vars[[i]]<-evalS[[1]]$term
Rknots[[k<-k+1]]<-data.frame(evalS[[1]]$xp)
try(colnames(Rknots[[k]])<-vars[[i]],silent=TRUE)
count<-c(count,evalS[[1]]$dim)
is.D.i<-rep(0,evalS[[1]]$dim)
labels[i]<-evalS[[1]]$label
if (!evalS[[1]]$by=="NA"){
count[length(count)]<-count[length(count)]+1
vars[[i]]<-c(evalS[[1]]$term,evalS[[1]]$by)
is.D.i<-c(is.D.i,1)
labels[i]<-substr(labels[i],1,nchar(labels[i])-1)
}
is.D[[i]]<-is.D.i
which.Spec[[i]]<-k
is.M[i] <- 1 #convention to avoid centering objects from mgcv
}
if (predInd){
if (!dataPr){
trmWD<-deparse(trms2[1][[2]], backtick = TRUE, width.cutoff = 500)
trmWD<-substr(trmWD,1,nchar(trmWD)-1)
trmWD<-paste(trmWD,", data=mvrmObj$data)")
}
evalS<-eval(parse(text=trmWD))
NL<-1
if (!evalS[[1]]$by=="NA") NL<-max(1,length(levels(with(data,eval(as.name(evalS[[1]]$by))))))
Design.Xt<-NULL
for (j in 1:NL) Design.Xt<-cbind(Design.Xt,mgcv::PredictMat(evalS[[j]],data=data.frame(data)))
}
#remove.cols<-which(c(apply(Design.Xt,2,sd)==0))
colnames(Design.Xt)<-paste(labels[i],1:NCOL(Design.Xt),sep=".")
}
Design.X<-cbind(Design.X,Design.Xt)
assign<-c(assign,rep(i,NCOL(Design.Xt)))
ifelse(is.spec,assign2<-c(assign2,rep(max(assign2)+1,NCOL(Design.Xt))),
assign2<-c(assign2,max(assign2)+c(1:NCOL(Design.Xt))))
ifelse(is.sin && amplitude==1,
assign3<-c(assign3,max(assign3)+rep(seq(from=1,to=NCOL(Design.Xt)/2),each=2)),
assign3<-c(assign3,rep(max(assign3)+1,NCOL(Design.Xt))))
}
if (sum(isSin) > 1) stop("one sin term allowed")
}
remove.cols <- which(colnames(Design.X) == "(Intercept)")
if (length(remove.cols) > 1) Design.X<-Design.X[,-remove.cols[-1],drop=FALSE]
if (missing(meanVector)) meanVector<-apply(as.matrix(Design.X),2,mean)
if (missing(indicator)){
unique.values<-unlist(lapply(apply(Design.X,2,unique),length))
indicator<-c(unique.values<=2)
}
if (sum(is.M)){
mat.arg<-which(is.M==1)
for (i in 1:length(mat.arg))
indicator[which(assign==mat.arg[i])]<-TRUE
}
if (centre) Design.X[,!indicator]<-Design.X[,!indicator]-matrix(1,nrow=n)%*%matrix(meanVector[!indicator],nrow=1)
if (!centre) meanVector <- 0*meanVector
rDyn <- NULL
if (!is.null(Dynamic)) rDyn <- as.matrix(Dynamic)
return(list(y=y,X=Design.X,assign=assign,Rknots=Rknots,meanVector=meanVector,
indicator=indicator,labels=labels,count=count,vars=vars,is.D=is.D,
which.Spec=which.Spec,formula.terms=formula.terms,assign2=assign2,
assign3=assign3,repsX=repsX,isSin=isSin,Dynamic=rDyn,
DSP=c(amplitude,startSin,harmonics),breaks=breaks,
period=period,periodUnknown=periodUnknown,periodRange=periodRange))
}
match.call.defaults <- function(...) {
call <- evalq(match.call(expand.dots = FALSE), parent.frame(1))
formals <- evalq(formals(), parent.frame(1))
for(k in setdiff(names(formals), names(call)))
call[k] <- list( formals[[k]] )
match.call(sys.function(sys.parent()), call)
}
quiet <- function(x) {
sink(tempfile())
on.exit(sink())
invisible(force(x))
}
|
/scratch/gouwar.j/cran-all/cranData/BNSP/R/basics.R
|
# lmrm, postSigma, postSigma2, plot3.bcmg
lmrm <- function(formula,
data=list(),centre=TRUE,
id,
time,
sweeps,burn=0,thin=1,seed,StorageDir,
c.betaPrior="IG(0.5,0.5*n*p)",
pi.muPrior="Beta(1,1)",
c.alphaPrior="IG(1.1,1.1)",
pi.phiPrior="Beta(1,1)",
c.psiPrior="HN(2)",
sigmaPrior="HN(2)",
pi.sigmaPrior="Beta(1,1)",
corr.Model=c("common",nClust=1),
DP.concPrior="Gamma(5,2)",
c.etaPrior="IG(0.5,0.5*samp)",
pi.nuPrior="Beta(1,1)",
pi.fiPrior="Beta(1,1)",
c.omegaPrior="IG(1.1,1.1)",
sigmaCorPrior="HN(2)",
tuneCalpha,
tuneSigma2,
tuneCbeta,
tuneAlpha,
tuneSigma2R,
tuneR,
tuneCpsi,
tuneCbCor,
tuneOmega,
tuneComega,
tau,FT=1,...){
#Samples etc
if (thin <= 0) thin <- 1
thin <- as.integer(thin)
sweeps <- as.integer(sweeps)
burn <- as.integer(burn)
if (missing(sweeps)) stop("provide sweeps argument")
nSamples<-0
LASTsw<-0
if (sweeps > 0 && (sweeps-burn) > 0){
nSamples <- length(seq(1,(sweeps-burn),by=thin))
LASTsw<-seq(burn,by=thin,length.out=nSamples)[nSamples]
}
LASTWB<-1
# Match call
call <- match.call(expand.dots = FALSE)
call2 <- match.call.defaults()
#Data
if (!is.list(data) && !is.data.frame(data))
data <- as.data.frame(data)
#Formula & data dimensions
p <- length(as.Formula(formula))[1]
if (length(as.Formula(formula))[2] != 5) stop("ambiguous definition of regression model")
formula.m<-formula(as.Formula(formula),lhs=0,rhs=1)
formula.v<-formula(as.Formula(formula),lhs=0,rhs=2)
formula.d<-formula(as.Formula(formula),lhs=0,rhs=3)
formula.corm<-formula(as.Formula(formula),lhs=0,rhs=4)
formula.corv<-formula(as.Formula(formula),lhs=0,rhs=5)
# Responses, design matrices, indicators
Y<-NULL
varsY<-list()
for (i in 1:p){
trms<-terms.formula(formula(as.Formula(formula,~1),lhs=i,rhs=0))
Y<-cbind(Y,with(data,eval(attr(trms,"variables")[[2]])))
varsY[[i]] <- as.character(attr(trms,"variables")[[2]])
}
# Null Deviance
nullDeviance <- 0
for (i in 1:p) nullDeviance <- nullDeviance - 2 * logLik(lm(Y[,i] ~ 1))
# Response etc
if (missing(id)) stop("id needed")
id<-eval(substitute(id), data)
if (is.null(id)) stop("unrecognised id")
n<-length(unique(id))
#print("n");print(n)
niVec<-table(id)[1:n]
niMax<-max(niVec)
N<-sum(niVec)
cusumniVec<-c(0,cumsum(niVec))
cusumC<-c(0,cumsum(niVec*(niVec-1)/2))
# Times
varTime<-as.character(substitute(list(time))[-1])
time2<-eval(substitute(time), data)
LUT<-length(unique(time2))
SUT<-sort(unique(time2))
FUT<-table(time2)
intime<-time2
for (i in 1:length(intime)) intime[i] <- which(intime[i]==SUT)-1
intime2 <- rep(intime,each=p)
#Desing matrix C (dependence)
dataNew<-as.data.frame(cbind(data,lag=rnorm(dim(data)[1])))
bb<-DM(formula=formula.d,data=dataNew,n=dim(data)[1],centre=centre)
vars <- unlist(bb$vars)
if ("lag" %in% vars)
vars <- vars[-which(vars=="lag")]
C<-NULL
for (i in 1:n)
if (niVec[i] > 1) C<-rbind(C,cbind(cusumniVec[i]+rep(seq(2,niVec[i],1),seq(1,niVec[i]-1,1)),
cusumniVec[i]+unlist(sapply(1:(niVec[i]-1), function(i) seq(1,i,1)))
))
lag<-time2[C[,1]]-time2[C[,2]]
lag<-data.frame(lag=lag)
if (length(vars)>0){
for (k in 1:length(vars)){
V<-data[,vars[k]]
lag<-data.frame(lag,V[C[,1]])
colnames(lag)[k+1] <- vars[[k]]
}
}
#print(lag)
if (niMax>1){
XYK<-DM(formula=formula.d,data=lag,n=dim(lag)[1],centre=centre)
C<-as.matrix(XYK$X)
}else{
XYK<-NULL
C<-NULL
}
Cknots<-XYK$Rknots
storeMeanVectorC<-XYK$meanVector
storeIndicatorC<-XYK$indicator
LK<-NCOL(C)
if (!is.null(C)) C<-t(C)
vecLK<-table(XYK$assign)
NK<-length(vecLK)
cusumVecLK<-c(0,cumsum(vecLK))
assignC<-XYK$assign
labelsC<-XYK$labels
countC<-XYK$count
varsC<-XYK$vars
is.Dc<-XYK$is.D
which.SpecC<-XYK$which.Spec
formula.termsC<-XYK$formula.terms
#Desing matrix X (mean)
XYK<-DM(formula=formula.m,data=data,n=N,centre=centre)
X<-as.matrix(XYK$X)
Xknots<-XYK$Rknots
storeMeanVectorX<-XYK$meanVector
storeIndicatorX<-XYK$indicator
LG<-NCOL(X)-1
vecLG<-table(XYK$assign)[-1]
NG<-length(vecLG)
cusumVecLG<-c(0,cumsum(vecLG))
assignX<-XYK$assign
labelsX<-XYK$labels
countX<-XYK$count
varsX<-XYK$vars
is.Dx<-XYK$is.D
which.SpecX<-XYK$which.Spec
formula.termsX<-XYK$formula.terms
#print("2")
#Desing matrix Z (variances)
ZK<-DM(formula=formula.v,data=data,n=N,centre=centre)
Z<-as.matrix(ZK$X)
Zknots<-ZK$Rknots
storeMeanVectorZ<-ZK$meanVector
storeIndicatorZ<-ZK$indicator
LD<-NCOL(Z)-1
labelsZ<-ZK$labels
countZ<-ZK$count
varsZ<-ZK$vars
which.SpecZ<-ZK$which.Spec
formula.termsZ<-ZK$formula.terms
is.Dz<-ZK$is.D
assignZ<-ZK$assign
#vecLD<-table(assignZ)[-1]
#cusumVecLD<-c(0,cumsum(vecLD))
ND<-max(assignZ)#length(vecLD)
repeats<-rep(1,ND)
repeats[which(is.Dz==1)]<-table(assignZ)[which(is.Dz==1)+1]
oneHE<-1; if (ND > 0) oneHE<-rep(c(1:ND),repeats)
assignZ2<-ZK$assign2
vecLD<-table(assignZ2)[-1]
cusumVecLD<-c(0,cumsum(vecLD))
ND2<-max(assignZ2)#length(vecLD)
isDz2<-as.numeric(is.Dz)[oneHE]
if (is.na(isDz2)) isDz2<-1
MVLD <- 1
if (LD > 0 || LK > 0) MVLD<-max(vecLD,vecLK)
#Design matrix Xc (mean of correlations)
d<-p*(p-1)/2
t <- rep(SUT,each=d)
dataCor<-data.frame(t)
if (p > 1){
lab<-DM(formula=formula.corm,data=data,n=1,centre=centre)
if (length(lab$vars) > 0) colnames(dataCor)<-lab$vars[[1]]
XYK<-DM(formula=formula.corm,data=dataCor,n=(LUT*d),centre=centre)
Xc<-as.matrix(XYK$X)
}else{
XYK<-NULL
Xc<-NULL
}
Xcknots<-XYK$Rknots
storeMeanVectorXc<-XYK$meanVector
storeIndicatorXc<-XYK$indicator
LGc<-NCOL(Xc)-1
vecLGc<-table(XYK$assign)[-1]
NGc<-length(vecLGc)
cusumVecLGc<-c(0,cumsum(vecLGc))
assignXc<-XYK$assign
labelsXc<-XYK$labels
countXc<-XYK$count
varsXc<-XYK$vars
is.Dxc<-XYK$is.D
which.SpecXc<-XYK$which.Spec
formula.termsXc<-XYK$formula.terms
#Design matrix Zc (variance of correlations)
if (p > 1){
XYK<-DM(formula=formula.corv,data=dataCor,n=(LUT*d),centre=centre)
Zc<-as.matrix(XYK$X)
}else{
XYK<-NULL
Zc<-NULL
}
Zcknots<-XYK$Rknots
storeMeanVectorZc<-XYK$meanVector
storeIndicatorZc<-XYK$indicator
LDc<-NCOL(Zc)-1
vecLDc<-table(XYK$assign)[-1]
MVLD<-max(vecLDc,MVLD)
NDc<-length(vecLDc)
cusumVecLDc<-c(0,cumsum(vecLDc))
assignZc<-XYK$assign
labelsZc<-XYK$labels
countZc<-XYK$count
varsZc<-XYK$vars
is.Dzc<-XYK$is.D
which.SpecZc<-XYK$which.Spec
formula.termsZc<-XYK$formula.terms
#Initialize covariance & correlation matrix
#print("3")
LASTR<-LASTD<-LASTE<-rep(1,LUT)
if (p>1){
LASTR<-LASTD<-LASTE<-NULL
for (t in SUT){
Res<-NULL
for (i in 1:p){
#print(head(X))
#print(grep("(",colnames(X),fixed=TRUE))
#RM<-grep("(",colnames(X),fixed=TRUE)
#print(RM)
RM<-grep("(",colnames(X),fixed=TRUE)[-1]
#print(RM)
Xinit<-X[time2==t,]
if (length(RM) > 0) Xinit<-Xinit[,-RM]
#print(head(Xinit))
lm1<-lm(Y[time2==t,i] ~ Xinit)
Res<-cbind(Res,residuals(lm1))
}
CR<-0.9*cov(Res)+0.1*diag(mean(eigen(cov(Res))$values),p)
LASTR<-c(LASTR,c(cov2cor(CR)))
D<-matrix(0,p,p)
diag(D)<-sqrt(diag(CR))
LASTD<-c(LASTD,c(D))
LASTE<-c(LASTE,c(CR))
#LASTmuR<-mean(LASTR[upper.tri(LASTR)])
#LASTsigma2R<-1
#if (p > 2) LASTsigma2R<-var(LASTR[upper.tri(LASTR)])
#print(CR)
#print(D)
#print(cov2cor(CR))
}
}
LASTAll<-c(LASTR,LASTD,LASTE)
#print("4")
#print(LASTAll)
#print("2")
#Prior for pi.mu
if (!length(pi.muPrior)==1 && !length(pi.muPrior)==NG && !length(pi.muPrior)==(p*NG))
stop("pi.muPrior has incorrect dimension")
pimu<-NULL
for (k in 1:length(pi.muPrior)){
sp<-strsplit(pi.muPrior[k],"Beta\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
pimu<-c(pimu,as.numeric(sp[[1]]))
}
if (length(pi.muPrior)==1) pimu<-rep(pimu,p*NG)
if (length(pi.muPrior)==NG) pimu<-rep(pimu,p)
#Prior for pi.phi
if (!length(pi.phiPrior)==1 && !length(pi.phiPrior)==NK && !length(pi.phiPrior)==(p*p*NK))
stop("pi.phiPrior has incorrect dimension")
piphi<-NULL
for (k in 1:length(pi.phiPrior)){
sp<-strsplit(pi.phiPrior[k],"Beta\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
piphi<-c(piphi,as.numeric(sp[[1]]))
}
if (length(pi.phiPrior)==1) piphi<-rep(piphi,p*p*NK)
if (length(pi.phiPrior)==NK) piphi<-rep(piphi,p*p)
#Prior for c.beta
if (!length(c.betaPrior)==1)
stop("c.betaPrior has incorrect dimension")
#c.betaPrior<-sub("samp","p*n",c.betaPrior)
sp<-strsplit(c.betaPrior,"IG\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
cetaParams<-c(as.numeric(sp[[1]][1]),eval(parse(text=sp[[1]][2])))
#print(cetaParams)
#Prior for c.eta
if (!length(c.etaPrior)==1)
stop("c.etaPrior has incorrect dimension")
c.etaPrior<-sub("samp","LUT*d",c.etaPrior)
sp<-strsplit(c.etaPrior,"IG\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
cetaCorParams<-c(as.numeric(sp[[1]][1]),eval(parse(text=sp[[1]][2])))
#Prior for pi.sigma
if (ND > 0){
if (!length(pi.sigmaPrior)==1 && !length(pi.sigmaPrior)==ND && !length(pi.sigmaPrior)==(p*ND))
stop("pi.sigmaPrior has incorrect dimension")
if (length(pi.sigmaPrior)==(p*ND))
pi.sigmaPrior<-pi.sigmaPrior[oneHE+rep(seq(from=0,to=p*ND-1,by=ND),each=length(oneHE))]
if (length(pi.sigmaPrior)==ND) pi.sigmaPrior<-pi.sigmaPrior[oneHE]
pisigma<-NULL
for (k in 1:length(pi.sigmaPrior)){
sp<-strsplit(pi.sigmaPrior[k],"Beta\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
pisigma<-c(pisigma,as.numeric(sp[[1]]))
}
if (length(pi.sigmaPrior)==1) pisigma<-rep(pisigma,p*ND2)
if (length(pi.sigmaPrior)==ND2) pisigma<-rep(pisigma,p)
}else{pisigma<-1}
#Prior for c.alpha
if (!length(c.alphaPrior)==1 && !length(c.alphaPrior)==p)
stop("c.alphaPrior has incorrect dimension")
specials<-c("HN","IG")
calphaParams<-NULL
HNca<-vector()
for (k in 1:length(c.alphaPrior)){
sp<-strsplit(c.alphaPrior[k],"\\(")
if (sp[[1]][1] %in% specials){
if (match(sp[[1]][1],specials)==1) HNca[k]<-1
if (match(sp[[1]][1],specials)==2) HNca[k]<-0
} else stop("unrecognised prior for c.alpha")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
calphaParams<-c(calphaParams,as.numeric(sp[[1]]))
}
if (length(c.alphaPrior)==1){
calphaParams<-rep(calphaParams,p)
HNca<-rep(HNca,p)
}
#Prior for cPsi
if (!length(c.psiPrior)==1 && !length(c.psiPrior)==(p*p))
stop("c.psiPrior has incorrect dimension")
specials<-c("HN","IG")
cpsiParams<-NULL
HNcpsi<-vector()
for (k in 1:length(c.psiPrior)){
sp<-strsplit(c.psiPrior[k],"\\(")
if (sp[[1]][1] %in% specials){
if (match(sp[[1]][1],specials)==1) HNcpsi[k]<-1
if (match(sp[[1]][1],specials)==2) HNcpsi[k]<-0
} else stop("unrecognised prior for c.psi")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
cpsiParams<-c(cpsiParams,as.numeric(sp[[1]]))
}
if (length(c.psiPrior)==1){
cpsiParams<-rep(cpsiParams,(p*p))
HNcpsi<-rep(HNcpsi,(p*p))
}
#Prior for sigma2_{zero k}
if (!length(sigmaPrior)==1 && !length(sigmaPrior)==p)
stop("sigmaPrior has incorrect dimension")
specials<-c("HN","IG")
sigmaParams<-NULL
HNsg<-vector()
for (k in 1:length(sigmaPrior)){
sp<-strsplit(sigmaPrior[k],"\\(")
if (sp[[1]][1] %in% specials){
if (match(sp[[1]][1],specials)==1) HNsg[k]<-1
if (match(sp[[1]][1],specials)==2) HNsg[k]<-0
} else stop("unrecognised prior for sigma^2")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
sigmaParams<-c(sigmaParams,as.numeric(sp[[1]]))
}
if (length(sigmaPrior)==1){
sigmaParams<-rep(sigmaParams,p)
HNsg<-rep(HNsg,p)
}
#Prior for pi.nu
if (!length(pi.nuPrior)==NGc) if (!length(pi.nuPrior)==1)
stop("pi.nuPrior has incorrect dimension")
pinu<-NULL
for (k in 1:length(pi.nuPrior)){
sp<-strsplit(pi.nuPrior[k],"Beta\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
pinu<-c(pinu,as.numeric(sp[[1]]))
}
if (length(pi.nuPrior)==1) pinu<-rep(pinu,NGc)
#Prior for pi.fi
if (!length(pi.fiPrior)==NDc) if (!length(pi.fiPrior)==1)
stop("pi.fiPrior has incorrect dimension")
pifi<-NULL
for (k in 1:length(pi.fiPrior)){
sp<-strsplit(pi.fiPrior[k],"Beta\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
pifi<-c(pifi,as.numeric(sp[[1]]))
}
if (length(pi.fiPrior)==1) pifi<-rep(pifi,ND)
#Prior for c.omega
if (!length(c.omegaPrior)==1)
stop("c.omegaPrior has incorrect dimension")
specials<-c("HN","IG")
sp<-strsplit(c.omegaPrior,"\\(")
if (sp[[1]][1] %in% specials){
if (match(sp[[1]][1],specials)==1) HNco<-1
if (match(sp[[1]][1],specials)==2) HNco<-0
} else stop("unrecognised prior for c.omega")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
comegaParams<-as.numeric(sp[[1]])
#Prior for sigma2Cor
if (!length(sigmaCorPrior)==1)
stop("sigmaCorPrior has incorrect dimension")
HNscor<-0
specials<-c("HN","IG")
sp<-strsplit(sigmaCorPrior,"\\(")
if (sp[[1]][1] %in% specials){
if (match(sp[[1]][1],specials)==1 && p > 1) HNscor<-1
if (match(sp[[1]][1],specials)==2) HNscor<-0
} else stop("unrecognised prior for sigma2Cor")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
sigmaCorParams<-as.numeric(sp[[1]])
#Cor model
corModels<-c("common","groupC","groupV")
mcm<-4+match(corr.Model[1],corModels)
if (p==1 || p==2) mcm <- 5
if (is.na(mcm)) stop("unrecognised correlation model")
H <- G <- 1
if (mcm==6){
H<-as.numeric(corr.Model[2])
if (H==1) {mcm<-5; warning("Common correlations model specified with nClust = 1")}
if (is.na(H) || (!H%%1==0) || H==0) {H <- p*(p-1)/2; warning(cat("mispecified number of clusters. nClust set to ",H,"\n"))}
}
if (mcm==7){
G<-as.numeric(corr.Model[2])
if (G==1) {mcm<-5; warning("Common correlations model specified with nClust = 1")}
if (is.na(G) || (!G%%1==0) || G==0) {G <- p; warning(cat("mispecified number of clusters. nClust set to ",G,"\n"))}
H<-G*(G-1)/2+G #min(d,G*(G-1)/2+G) #min(G,abs(p-G))
}
#Prior for alpha DP
if (mcm == 6 || mcm==7){
sp<-strsplit(DP.concPrior,"Gamma\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
DPparams<-as.numeric(sp[[1]])
DPparams <- c(DPparams,0.01)
}
#Seed
if (missing(seed)) seed<-as.integer((as.double(Sys.time())*1000+Sys.getpid()) %% 2^31)
# Storage directory & files
if (!missing(StorageDir)){
StorageDir <- path.expand(StorageDir)
ncharwd <- nchar(StorageDir)}
if (!missing(StorageDir)) if (!(substr(StorageDir,ncharwd,ncharwd)=="/")) StorageDir <- paste(StorageDir,"/",sep="")
if (!missing(StorageDir)) if (!dir.exists(StorageDir)) dir.create(StorageDir, recursive = TRUE)
if (missing(StorageDir)) stop("provide a storage directory via argument StorageDir")
FL <- c("gamma", "cbeta", "delta", "alpha", "sigma2", "calpha", "beta", "psi", "ksi", "cpsi", "R", "nu", "fi", "omega",
"sigma2R","ceta","comega","eta", "deviance",
"compAlloc", "nmembers", "DPconc",
"compAllocV","nmembersV",
"DE", "test","nu.ls","eta.ls","nmembers.ls","clusters","probs","tune",
"muR", "phi2")
for (i in 1:length(FL)){
oneFile <- paste(StorageDir, paste("BNSP",FL[i], "txt",sep="."),sep="/")
if (file.exists(oneFile)) file.remove(oneFile)
}
#Tuning Parameters
if (missing(tuneCalpha)) tuneCalpha<-rep(1,p)
if (!length(tuneCalpha)==p) tuneCalpha<-rep(mean(tuneCalpha),p)
if (missing(tuneSigma2)) tuneSigma2<-rep(1,p)
if (!length(tuneSigma2)==p) tuneSigma2<-rep(mean(tuneSigma2),p)
if (missing(tuneCbeta)) tuneCbeta<-100
if (ND>0){
if (missing(tuneAlpha) || !length(tuneAlpha)==ND || !length(tuneAlpha)==(ND*p)){
tuneAlpha<-rep(5,ND)
tuneAlpha[which(is.Dz==1)]<-1
}
if (length(tuneAlpha)==(ND*p))
tuneAlpha<-tuneAlpha[oneHE+rep(seq(from=0,to=p*ND-1,by=ND),each=length(oneHE))]
if (length(tuneAlpha)==ND) tuneAlpha<-tuneAlpha[oneHE]
if (length(tuneAlpha)==ND2) tuneAlpha<-rep(tuneAlpha,p)
}else{tuneAlpha<-1}
if (missing(tuneSigma2R)) tuneSigma2R<-1
if (missing(tuneR)) tuneR<-rep(40*(p+2)^3,LUT)
tuneR[which(tuneR<p+2)]<-p+2
if (!length(tuneR)==LUT) tuneR<-rep(mean(tuneR),LUT)
if (missing(tuneCpsi)) tuneCpsi<-rep(5,p*p)
if (!length(tuneCpsi)==(p*p)) tuneCpsi<-rep(mean(tuneCpsi),p*p)
if (missing(tuneCbCor)) tuneCbCor<-10
if (missing(tuneOmega)) tuneOmega<-rep(5,NDc)
if (!length(tuneOmega)==NDc) tuneOmega<-rep(mean(tuneOmega),NDc)
if (missing(tuneComega)) tuneComega<-1
if (missing(tau)) tau = 0.01
#Block size selection
#if (missing(blockSizeProbG)){
blockSizeProbG <- rep(0,LG)
blockSizeProbG[1:5]<-c(10,25,30,25,10)
#}
#if (missing(blockSizeProbD)){
blockSizeProbD <- rep(0,LD)
blockSizeProbD[1:5]<-c(10,25,30,25,10)
#}
#if (missing(blockSizeProbK)){
blockSizeProbK <- rep(0,LK)
blockSizeProbK[1:5]<-c(10,25,30,25,10)
#}
#if (missing(blockSizeProbGc)){
blockSizeProbGc <- rep(0,LGc)
blockSizeProbGc[1:5]<-c(10,25,30,25,10)
#}
#if (missing(blockSizeProbD)){
blockSizeProbDc <- rep(0,LDc)
blockSizeProbDc[1:5]<-c(10,25,30,25,10)
#}
maxBSG <- max(which(blockSizeProbG>0))
maxBSD <- max(which(blockSizeProbD>0))
maxBSK <- max(which(blockSizeProbK>0))
maxBSGc <- max(which(blockSizeProbGc>0))
maxBSDc <- max(which(blockSizeProbDc>0))
#print(as.double(c(blockSizeProbG, blockSizeProbD, blockSizeProbK, blockSizeProbGc, blockSizeProbDc)))
#print(as.integer(c(maxBSG,maxBSD,maxBSK,maxBSGc,maxBSDc)))
#Deviance
deviance <- c(0,0)
#Cont
cont <- 0
#tol
tol <- sqrt(.Machine$double.eps)
#Call C
if (mcm==5){
out<-.C("longmult",
as.integer(seed), as.character(StorageDir),
as.integer(c(sweeps,burn,thin,n,p,N,MVLD,mcm)),
as.integer(niVec), as.integer(cusumniVec), as.integer(intime2), as.integer(intime),
as.integer(c(niMax,LUT)), as.integer(FUT),
as.integer(cusumC), as.double(C), as.double(c(t(Y))), as.double(t(X)), as.double(Z),
as.double(Xc),as.double(Zc),
as.integer(c(LG,LD,LK,LGc,LDc)), as.integer(c(NG,ND2,NK,NGc,NDc)),
as.integer(vecLG), as.integer(vecLD), as.integer(vecLK), as.integer(vecLGc),as.integer(vecLDc),
as.integer(cusumVecLG), as.integer(cusumVecLD), as.integer(cusumVecLK), as.integer(cusumVecLGc),
as.integer(cusumVecLDc),
as.double(c(blockSizeProbG, blockSizeProbD, blockSizeProbK, blockSizeProbGc, blockSizeProbDc)),
as.integer(c(maxBSG,maxBSD,maxBSK,maxBSGc,maxBSDc)),
as.double(tuneCalpha), as.double(tuneSigma2), as.double(tuneCbeta), as.double(tuneAlpha),
as.double(tuneSigma2R),
as.double(tuneR), as.double(tuneCpsi), as.double(tuneCbCor), as.double(tuneOmega),
as.double(tuneComega),
as.double(pimu), as.double(cetaParams), as.double(pisigma), as.integer(HNca),
as.double(calphaParams),
as.integer(HNsg), as.double(sigmaParams), as.double(piphi), as.integer(HNcpsi),
as.double(cpsiParams),
as.double(cetaCorParams),as.integer(HNco),as.double(comegaParams),as.double(c(pinu,pifi)),
as.integer(HNscor),as.double(sigmaCorParams),
as.double(c(tau,tol)), as.integer(FT), as.double(deviance),as.integer(isDz2),
as.integer(c(cont,LASTsw,LASTWB)),as.double(LASTAll))}
if (mcm==6){
out<-.C("longmultg",
as.integer(seed), as.character(StorageDir),
as.integer(c(sweeps,burn,thin,n,p,N,MVLD,mcm)),
as.integer(niVec), as.integer(cusumniVec), as.integer(intime2), as.integer(intime),
as.integer(c(niMax,LUT)), as.integer(FUT),
as.integer(cusumC), as.double(C), as.double(c(t(Y))), as.double(t(X)), as.double(Z),
as.double(Xc),as.double(Zc),
as.integer(c(LG,LD,LK,LGc,LDc)), as.integer(c(NG,ND2,NK,NGc,NDc)),
as.integer(vecLG), as.integer(vecLD), as.integer(vecLK), as.integer(vecLGc),as.integer(vecLDc),
as.integer(cusumVecLG), as.integer(cusumVecLD), as.integer(cusumVecLK), as.integer(cusumVecLGc),
as.integer(cusumVecLDc),
as.double(c(blockSizeProbG, blockSizeProbD, blockSizeProbK, blockSizeProbGc, blockSizeProbDc)),
as.integer(c(maxBSG,maxBSD,maxBSK,maxBSGc,maxBSDc)),
as.double(tuneCalpha), as.double(tuneSigma2), as.double(tuneCbeta), as.double(tuneAlpha),
as.double(tuneSigma2R),
as.double(tuneR), as.double(tuneCpsi), as.double(tuneCbCor), as.double(tuneOmega),
as.double(tuneComega),
as.double(pimu), as.double(cetaParams), as.double(pisigma), as.integer(HNca),
as.double(calphaParams),
as.integer(HNsg), as.double(sigmaParams), as.double(piphi), as.integer(HNcpsi),
as.double(cpsiParams),
as.double(cetaCorParams),as.integer(HNco),as.double(comegaParams),as.double(c(pinu,pifi)),
as.integer(HNscor),as.double(sigmaCorParams),
as.double(tau), as.integer(FT), as.double(deviance),as.integer(isDz2),
as.integer(c(cont,LASTsw,LASTWB)),as.double(LASTAll),
as.integer(H), as.double(DPparams))}
if (mcm==7){
out<-.C("longmultgv",
as.integer(seed), as.character(StorageDir),
as.integer(c(sweeps,burn,thin,n,p,N,MVLD,mcm)),
as.integer(niVec), as.integer(cusumniVec), as.integer(intime2), as.integer(intime),
as.integer(c(niMax,LUT)), as.integer(FUT),
as.integer(cusumC), as.double(C), as.double(c(t(Y))), as.double(t(X)), as.double(Z),
as.double(Xc),as.double(Zc),
as.integer(c(LG,LD,LK,LGc,LDc)), as.integer(c(NG,ND2,NK,NGc,NDc)),
as.integer(vecLG), as.integer(vecLD), as.integer(vecLK), as.integer(vecLGc),as.integer(vecLDc),
as.integer(cusumVecLG), as.integer(cusumVecLD), as.integer(cusumVecLK), as.integer(cusumVecLGc),as.integer(cusumVecLDc),
as.double(c(blockSizeProbG, blockSizeProbD, blockSizeProbK, blockSizeProbGc, blockSizeProbDc)),
as.integer(c(maxBSG,maxBSD,maxBSK,maxBSGc,maxBSDc)),
as.double(tuneCalpha), as.double(tuneSigma2), as.double(tuneCbeta), as.double(tuneAlpha), as.double(tuneSigma2R),
as.double(tuneR), as.double(tuneCpsi), as.double(tuneCbCor), as.double(tuneOmega), as.double(tuneComega),
as.double(pimu), as.double(cetaParams), as.double(pisigma), as.integer(HNca), as.double(calphaParams),
as.integer(HNsg), as.double(sigmaParams), as.double(piphi), as.integer(HNcpsi), as.double(cpsiParams),
as.double(cetaCorParams),as.integer(HNco),as.double(comegaParams),as.double(c(pinu,pifi)),
as.integer(HNscor),as.double(sigmaCorParams),
as.double(tau), as.integer(FT), as.double(deviance),as.integer(isDz2),
as.integer(c(cont,LASTsw,LASTWB)), as.double(LASTAll),
as.integer(G), as.double(DPparams))}
#Output
if (!is.null(C)) C<-t(C)
loc1<-31
loc2<-60
fit <- list(call=call,call2=call2,formula=formula,seed=seed,p=p,d=p*(p-1)/2,
data=data,lag=lag,Y=Y,
X=X,Xknots=Xknots,LG=LG,NG=NG,
Z=Z,Zknots=Zknots,LD=LD,ND=ND,ND2=ND2,
storeMeanVectorX=storeMeanVectorX,
storeMeanVectorZ=storeMeanVectorZ,
storeIndicatorX=storeIndicatorX,
storeIndicatorZ=storeIndicatorZ,
assignX=assignX,
assignZ=assignZ,
labelsX=labelsX,
labelsZ=labelsZ,
countX=countX,
countZ=countZ,
varsY=varsY,
varsX=varsX,
varsZ=varsZ,
is.Dx=is.Dx,
is.Dz=is.Dz,
which.SpecX=which.SpecX,
which.SpecZ=which.SpecZ,
formula.termsX=formula.termsX,
formula.termsZ=formula.termsZ,
nSamples=nSamples,
totalSweeps=sweeps,
mcpar=c(as.integer(burn+1),as.integer(seq(from=burn+1,by=thin,length.out=nSamples)[nSamples]),as.integer(thin)),
mcm=mcm,H=H,G=G,
tuneCalpha=c(tuneCalpha,out[[loc1+0]][1:p]),
tuneSigma2=c(tuneSigma2,out[[loc1+1]][1:p]),
tuneCbeta=c(tuneCbeta,out[[loc1+2]][1]),
tuneAlpha=c(tuneAlpha,out[[loc1+3]][1:(p*ND2)]),
tuneSigma2R=c(tuneSigma2R,out[[loc1+4]][1]),
tuneR=c(tuneR,out[[loc1+5]][1:LUT]),
tuneCpsi=c(tuneCpsi,out[[loc1+6]][1:(p*p)]),
tuneCbCor=c(tuneCbCor,out[[loc1+7]][1]),
tuneOmega=c(tuneOmega,out[[loc1+8]][1:NDc]),
tuneComega=c(tuneComega,out[[loc1+9]][1]),
deviance=c(out[[loc2]][1:2]),
nullDeviance=nullDeviance,
DIR=StorageDir,
out=out,
LUT=LUT,
SUT=SUT,
C=C,Cknots=Cknots,LK=LK,NK=NK,
Xc=Xc,Xcknots=Xcknots,LGc=LGc,NGc=NGc,
Zc=Zc,Zcknots=Zcknots,LDc=LDc,NDc=NDc,
storeMeanVectorC=storeMeanVectorC,
storeMeanVectorXc=storeMeanVectorXc,
storeMeanVectorZc=storeMeanVectorZc,
storeIndicatorC=storeIndicatorC,
storeIndicatorXc=storeIndicatorXc,
storeIndicatorZc=storeIndicatorZc,
assignC=assignC,
assignXc=assignXc,
assignZc=assignZc,
labelsC=labelsC,
labelsXc=labelsXc,
labelsZc=labelsZc,
countC=countC,
countXc=countXc,
countZc=countZc,
varsC=varsC,
varsXc=varsXc,
varsZc=varsZc,
varTime=varTime,
is.Dc=is.Dc,
is.Dxc=is.Dxc,
is.Dzc=is.Dzc,
which.SpecC=which.SpecC,
which.SpecXc=which.SpecXc,
which.SpecZc=which.SpecZc,
formula.termsC=formula.termsC,
formula.termsXc=formula.termsXc,
formula.termsZc=formula.termsZc,
HNca=HNca,
HNcpsi=HNcpsi,
HNsg=HNsg,
HNco=HNco,
HNscor=HNscor,
niVec=niVec,
intime=intime,
NC=0,
FT=FT,
qCont=0)
class(fit) <- 'mvrm'
return(fit)
}
#psiAll <- mvrm2mcmc(x,"psi")
#alphaAll <- mvrm2mcmc(x,"alpha")
#sigma2All <- mvrm2mcmc(x,"sigma2")
#RAll <- mvrm2mcmc(x,"R")
postSigma<-function(x, time, psi, alpha, sigma2, R, samples, ...){
p<-x$p
ifelse(missing(time), t<-x$SUT, t<-time)
T<-length(t)
postS<-matrix(0,nrow=T*p,ncol=T*p)
# Vars
varsd<-as.list(substitute(list(...)))[-1]
if (x$varTime %in% names(varsd))
varsd <-varsd[-which(names(varsd)==x$varTime)]
if ("lag" %in% names(varsd))
varsd <-varsd[-which(names(varsd)=="lag")]
comp.des.mats<-1
if (comp.des.mats==1){
comp.des.mats<-0
# Zmat
vars<-x$varsZ
wv<-NULL
newdata<-t
for (v in 1:length(vars)){
if (vars[[v]] %in% names(varsd)){
min1<-eval(varsd[[which(vars[[v]]==names(varsd))]])
newdata<-cbind(newdata,rep(min1,T))
wv<-c(wv,v)
}
}
if (! (length(wv)+1) == length(vars)) stop("insufficient input on covariates")
newdata <- as.data.frame(newdata)
colnames(newdata)<-c(x$varTime,vars[wv])
terms.reform<-NULL
k<-0
for (i in 1:length(x$formula.termsZ)){
term<-x$formula.termsZ[i]
if (!i %in% which(unlist(x$which.SpecZ)==-99)){
k<-k+1
if (!grepl("knots",term)){
term<-substr(term,1,nchar(term)-1)
term<-paste(term,",knots= knots[[",k,"]])")
}
}
terms.reform<-c(terms.reform,term)
}
formula2<-reformulate(terms.reform)
if (length(x$data)>0){
nd<-x$data[0,match(colnames(newdata),colnames(x$data)),drop=FALSE]
for (j in 1:dim(nd)[2])
nd[,j]<-drop(nd[,j])
nd[1:NROW(newdata),] <- newdata
}else{nd<-newdata}
Zmat<-DM(formula=formula2,data=nd,n=NROW(nd),knots=x$Zknots,meanVector=x$storeMeanVectorZ,indicator=x$storeIndicatorZ,centre=TRUE)$X
Zmat<-as.matrix(Zmat)[,-1]
# Cmat
vars<-x$varsC
wv<-NULL
C<-cbind(rep(seq(2,T,1),seq(1,T-1,1)), unlist(sapply(1:(T-1), function(i) seq(1,i,1))))
lag<-t[C[,1]]-t[C[,2]]
newdata<-lag
for (v in 1:length(vars)){
if (vars[[v]] %in% names(varsd)){
min1<-eval(varsd[[which(vars[[v]]==names(varsd))]])
newdata<-cbind(newdata,rep(min1,T))
wv<-c(wv,v)
}
}
if (! (length(wv)+1) == length(vars)) stop("insufficient input on covariates")
newdata<-as.data.frame(newdata)
colnames(newdata)<-c("lag",vars[wv])
newdata <- as.data.frame(newdata)
terms.reform<-NULL
k<-0
for (i in 1:length(x$formula.termsC)){
term<-x$formula.termsC[i]
if (!i %in% which(unlist(x$which.SpecC)==-99)){
k<-k+1
if (!grepl("knots",term)){
term<-substr(term,1,nchar(term)-1)
term<-paste(term,",knots= knots[[",k,"]])")
}
}
terms.reform<-c(terms.reform,term)
}
formula2<-reformulate(terms.reform)
if (length(x$data)>0){
dataNew<-as.data.frame(cbind(x$data,lag=rnorm(dim(x$data)[1])))
nd<-dataNew[0,match(colnames(newdata),colnames(dataNew)),drop=FALSE]
for (j in 1:dim(nd)[2])
nd[,j]<-drop(nd[,j])
nd[1:NROW(newdata),] <- newdata
}else{nd<-newdata}
Cmat<-DM(formula=formula2,data=nd,n=NROW(newdata),knots=x$Cknots,meanVector=x$storeMeanVectorC,indicator=x$storeIndicatorC,centre=TRUE)$X
Cmat<-as.matrix(Cmat)
}
# #
Sij<-matrix(0,p,p)
RA<-diag(rep(1,p))
Li<-diag(rep(1,p*T))
Di<-matrix(0,p*T,p*T)
if (missing(samples)) samples <- 1:x$nSamples
for (sw in samples){
buildPhi<-NULL
for (r1 in 1:p){
for (r2 in 1:p){
Pair<-(r1-1)*p + r2
psiA<-psi[sw,(1+((Pair-1)*x$LK)):(Pair*x$LK)]
buildPhi<-cbind(buildPhi,Cmat%*%matrix(psiA))
}
}
move<-1
if (T > 1){
for (j in 2:T){
for (k in 1:(j-1)){
Li[(j*p-p+1):(j*p),(k*p-p+1):(k*p)] <- -matrix(buildPhi[move,],p,p,byrow=TRUE)
move <- move + 1
}
}
}
Sr<-NULL
for (r in 1:p){
s2<-sigma2[sw,r]
alphaA<-alpha[sw,(1+((r-1)*x$LD)):(r*x$LD)]
Sr<-cbind(Sr,sqrt(s2*exp(Zmat%*%matrix(alphaA))))
}
for (j in 1:T){
diag(Sij) <- Sr[j,]
pick.time<-which(t[j]==x$SUT)#j ; x$intime[cusumniVec[subject]+j]+1
RA[lower.tri(RA)] <- R[sw,(1+(pick.time-1)*p*(p-1)/2):(pick.time*p*(p-1)/2)]
RA[upper.tri(RA)] <- t(RA)[upper.tri(RA)]
Di[((j-1)*p+1):(j*p),((j-1)*p+1):(j*p)] <- Sij %*% RA %*% Sij
}
SigmaSW <- solve(Li)%*%Di%*%solve(t(Li))
postS <- postS + SigmaSW
}
return(postS/length(samples))
}
postSigma2<-function(x, subject, psi, alpha, sigma2, R, samples, ...){
niVec<-x$niVec
p<-x$p
T<-niVec[subject]
postS<-matrix(0,nrow=T*p,ncol=T*p)
cusumniVec<-c(0,cumsum(niVec))
cusumC<-c(0,cumsum(niVec*(niVec-1)/2))
Cmat<-x$C[(cusumC[subject]+1):cusumC[subject+1],]
Zmat<-x$Z[(cusumniVec[subject]+1):cusumniVec[subject+1],-1]
Sij<-matrix(0,p,p)
RA<-diag(rep(1,p))
Li<-diag(rep(1,p*T))
Di<-matrix(0,p*T,p*T)
if (missing(samples)) samples <- 1:x$nSamples
for (sw in samples){
buildPhi<-NULL
for (r1 in 1:p){
for (r2 in 1:p){
Pair<-(r1-1)*p + r2
psiA<-psi[sw,(1+((Pair-1)*x$LK)):(Pair*x$LK)]
buildPhi<-cbind(buildPhi,Cmat%*%matrix(psiA))
}
}
move<-1
for (j in 2:T){
for (k in 1:(j-1)){
Li[(j*p-p+1):(j*p),(k*p-p+1):(k*p)] <- -matrix(buildPhi[move,],p,p,byrow=TRUE)
move <- move + 1
}
}
Sr<-NULL
for (r in 1:p){
s2<-sigma2[sw,r]
alphaA<-alpha[sw,(1+((r-1)*x$LD)):(r*x$LD)]
Sr<-cbind(Sr,sqrt(s2*exp(Zmat%*%matrix(alphaA))))
}
for (j in 1:T){
diag(Sij) <- Sr[j,]
pick.time<-x$intime[cusumniVec[subject]+j]+1
RA[lower.tri(RA)] <- R[sw,(1+(pick.time-1)*p*(p-1)/2):(pick.time*p*(p-1)/2)]
RA[upper.tri(RA)] <- t(RA)[upper.tri(RA)]
Di[((j-1)*p+1):(j*p),((j-1)*p+1):(j*p)] <- Sij %*% RA %*% Sij
}
SigmaSW <- solve(Li)%*%Di%*%solve(t(Li))
postS <- postS + SigmaSW
}
return(postS/length(samples))
}
plot3.bcmg<-function(x, model="mean", centre=mean, quantiles=c(0.10,0.90), plotEmptyCluster=FALSE, plotOptions=list(), ...){
t1<-unique(x$Xc[,2]) # this line is not needed
t1<-x$SUT
if (model=="mean"){
meanReg<-array(0,dim=c(x$nSamples,x$H,x$LUT))
uXc<-unique(x$Xc)
file1 <- paste(x$DIR,"BNSP.eta.ls.txt",sep="")
file2 <- paste(x$DIR,"BNSP.clusters.txt",sep="")
if (!file.exists(file1) && !file.exists(file2)) quiet(ls.mvrm(x))
eta<-array(unlist(read.table("BNSP.eta.ls.txt")),dim=c(x$nSamples,x$H,(x$LGc+1)))
tabs<-table(array(unlist(read.table("BNSP.clusters.txt"))))
labs<-array(0,x$H)
labs[as.numeric(names(tabs))]<-tabs
for (i in 1:x$nSamples)
meanReg[i,,] <- eta[i,,]%*%t(uXc)
if (x$FT==1) meanReg <- tanh(meanReg)
centreM<- apply(meanReg,c(2,3),centre)
QM<-NULL; SQM<-matrix(NA,ncol=2,nrow=NROW(centreM))
if (!is.null(quantiles)){
QM<-apply(meanReg,c(2,3),quantile,probs=quantiles,na.rm=TRUE)
SQM<-cbind(QLM=c(t(QM[1,,])),QUM=c(t(QM[2,,])))
}
dataM<-data.frame(group=factor(rep(1:x$H,each=x$LUT)),size=rep(labs,each=x$LUT),t=rep(t1,x$H),centre=c(t(centreM)),QLM=SQM[,1],QUM=SQM[,2])
dataM<-dataM[with(dataM, order(-size, t)),]
#dataM$group<-factor(rep(seq(1,x$H,1),each=x$LUT))# not needed
labs2<-unique(dataM$size)
if (!plotEmptyCluster) dataM<-dataM[dataM$size > 0,]
plotElM<-geom_line(aes_string(x="t",y="centre", group="group",col="group"),alpha=1.0,cex=1.3)
if (!is.null(quantiles))
plotElM<-c(geom_ribbon(aes_string(x="t",ymin="QLM",ymax="QUM",group="group",fill="group",col="group"),alpha =0.2),plotElM)
ggM<-ggplot(data=dataM)
plotM <- ggM + plotElM + ylab(expression(mu[t])) + guides(fill=FALSE) +
scale_color_discrete(name = "group [size]", labels = paste("[",labs2,"]",sep="")) + plotOptions
}
if (model=="stdev"){
fitV<-matrix(0,nrow=x$nSamples,ncol=length(t1))
uZ<-unique(x$Z)
alphaFN <- file.path(paste(x$DIR,"BNSP.alpha.txt",sep=""))
aFile<-file(alphaFN,open="r")
sigma2FN <- file.path(paste(x$DIR,"BNSP.sigma2.txt",sep=""))
s2File<-file(sigma2FN,open="r")
for (i in 1:x$nSamples){
alpha<-scan(aFile,what=numeric(),n=x$LD,quiet=T)
s2<-scan(s2File,what=numeric(),n=1,quiet=T)
fitV[i,]<-sqrt(s2*exp(uZ[,-1]%*%matrix(alpha)))
}
close(aFile)
close(s2File)
centreM<-apply(fitV,2,centre)
QM<-NULL; SQM<-matrix(NA,ncol=2,nrow=NROW(centreM))
if (!is.null(quantiles)){
QM<-apply(fitV,2,quantile,probs=quantiles,na.rm=TRUE)
SQM<-cbind(QLM=QM[1,],QUM=QM[2,])
}
dataM<-data.frame(t=t1,centre=centreM,QLM=SQM[,1],QUM=SQM[,2])
plotElM<-geom_line(aes_string(x="t",y="centre"),alpha=1.0,cex=1.3)
if (!is.null(quantiles))
plotElM<-c(geom_ribbon(aes_string(x="t",ymin="QLM",ymax="QUM"),alpha =0.2),plotElM)
ggM<-ggplot(data=dataM)
plotM <- ggM + plotElM + ylab(expression(sigma[t])) + plotOptions
}
return(plotM)
}
|
/scratch/gouwar.j/cran-all/cranData/BNSP/R/bnpLongMulti.R
|
dpmj <- function(formula,Fcdf,data,offset,sampler="truncated",Xpred,offsetPred,
StorageDir,ncomp,sweeps,burn,thin=1,seed,
H,Hdf,d,D,Alpha.xi,Beta.xi,Alpha.alpha,Beta.alpha,Trunc.alpha,...){
# Match call
call <- match.call()
# Seed
if (missing(seed)) seed<-as.integer((as.double(Sys.time())*1000+Sys.getpid()) %% 2^31)
# Family
Fcdf.indicator <- match(c(Fcdf),c("poisson","binomial","negative binomial","beta binomial","generalized poisson"))
if (is.na(Fcdf.indicator)){
stop('Fcdf must be character, and one of "poisson", "binomial", "negative binomial", "beta binomial", "generalized poisson"')
}
# Design matrix
if (missing(data))
data <- environment(formula)
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "offset"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- quote(stats::model.frame)
mf <- eval(mf, parent.frame())
X<-as.matrix(model.matrix(formula,data=data)[,-1])
# Dimensions
n<-NROW(X)
p<-NCOL(X)
totran<-p+1
# Binary covariates
binary <- numeric(p)
for (i in 1:p) binary[i] <- length(unique(X[,i]))
binary <- as.numeric(binary == 2)
NBC <- sum(binary) #number of binary covariates
NDV <- NBC + 1 #number of discrete variables
NCC <- p - NBC #number of continuous covariates
# Rearrange design matrix
X <- as.matrix(X[,c(which(binary==1),which(binary==0))],ncol=p)
# Prior parameters
# Concentration parameter
if (missing(Alpha.alpha)) Alpha.alpha<-2
if (missing(Beta.alpha)) Beta.alpha<-5
if (missing(Trunc.alpha)) Trunc.alpha<-0.25
# mu_h ~ N(d,D)
if (missing(d)){
d<-apply(as.matrix(X),2,mean)
if (NBC > 0) d[1:NBC] <- -qnorm(1-d[1:NBC])
}
if (missing(D)){
elements<-((apply(as.matrix(X),2,max)-apply(as.matrix(X),2,min))^2)/8
if (NBC > 0) elements[1:NBC]<-5
D<-diag(elements,p)
}
# Wishart(h,H)
if (missing(Hdf)) Hdf<-totran+2
if (missing(H)){
elements<-((apply(as.matrix(X),2,max)-apply(as.matrix(X),2,min))^2)/8
if (NBC > 0) elements[1:NBC]<-1
H<-diag(c(1,elements),totran)
}
# Family specific prior parameters
if (Fcdf.indicator==1){
if (!missing(Alpha.xi)) if (!length(Alpha.xi)==1) stop(paste("For Poisson mixtures, argument Alpha.xi must be of length 1"))
if (!missing(Alpha.xi)) if (Alpha.xi < 0) stop(paste("For Poisson mixtures, argument Alpha.xi must be positive"))
if (missing(Alpha.xi)) Alpha.xi<-1.0
if (!missing(Beta.xi)) if (!length(Beta.xi)==1) stop(paste("For Poisson mixtures, argument Beta.xi must be of length 1"))
if (!missing(Beta.xi)) if (Beta.xi < 0) stop(paste("For Poisson mixtures, argument Beta.xi must be positive"))
if (missing(Beta.xi)) Beta.xi<-0.1
} else if (Fcdf.indicator==2){
if (!missing(Alpha.xi)) if (!length(Alpha.xi)==1) stop(paste("For Binomial mixtures, argument Alpha.xi must be of length 1"))
if (!missing(Alpha.xi)) if (Alpha.xi < 0) stop(paste("For Binomial mixtures, argument Alpha.xi must be positive"))
if (missing(Alpha.xi)) Alpha.xi<-1.0
if (!missing(Beta.xi)) if (!length(Beta.xi)==1) stop(paste("For Binomial mixtures, argument Beta.xi must be of length 1"))
if (!missing(Beta.xi)) if (Beta.xi < 0) stop(paste("For Binomial mixtures, argument Beta.xi must be positive"))
if (missing(Beta.xi)) Beta.xi<-1.0
} else if (Fcdf.indicator==3){
if (!missing(Alpha.xi)) if (!length(Alpha.xi)==2) stop(paste("For Negative Binimial mixtures, argument Alpha.xi must be of length 2"))
if (!missing(Alpha.xi)) if (sum(Alpha.xi < 0) >0) stop(paste("For Negative Binimial mixtures, vector Alpha.xi must have positive elements"))
if (missing(Alpha.xi)) Alpha.xi<-c(1.0,1.0)
if (!missing(Beta.xi)) if (!length(Beta.xi)==2) stop(paste("For Negative Binimial mixtures, argument Beta.xi must be of length 2"))
if (!missing(Beta.xi)) if (sum(Beta.xi < 0) >0) stop(paste("For Negative Binimial mixtures, vector Beta.xi must have positive elements"))
if (missing(Beta.xi)) Beta.xi<-c(0.1,0.1)
} else if (Fcdf.indicator==4){
if (!missing(Alpha.xi)) if (!length(Alpha.xi)==2) stop(paste("For Beta Binimial mixtures, argument Alpha.xi must be of length 2"))
if (!missing(Alpha.xi)) if (sum(Alpha.xi < 0) >0) stop(paste("For Beta Binimial mixtures, vector Alpha.xi must have positive elements"))
if (missing(Alpha.xi)) Alpha.xi<-c(1.0,1.0)
if (!missing(Beta.xi)) if (!length(Beta.xi)==2) stop(paste("For Beta Binimial mixtures, argument Beta.xi must be of length 2"))
if (!missing(Beta.xi)) if (sum(Beta.xi < 0) >0) stop(paste("For Beta Binimial mixtures, vector Beta.xi must have positive elements"))
if (missing(Beta.xi)) Beta.xi<-c(0.1,0.1)
} else if (Fcdf.indicator==5){
if (!missing(Alpha.xi)) if (!length(Alpha.xi)==2) stop(paste("For Generalized Poisson mixtures, argument Alpha.xi must be of length 2"))
if (!missing(Alpha.xi)) if (sum(Alpha.xi < 0) >0) stop(paste("For Generalized Poisson mixtures, vector Alpha.xi must have positive elements"))
if (missing(Alpha.xi)) Alpha.xi<-c(1.0,1.0)
if (!missing(Beta.xi)) if (!length(Beta.xi)==2) stop(paste("For Generalized Poisson mixtures, argument Beta.xi must be of length 2"))
if (!missing(Beta.xi)) if (sum(Beta.xi < 0) >0) stop(paste("For Generalized Poisson mixtures, vector Beta.xi must have positive elements"))
if (missing(Beta.xi)) Beta.xi<-c(0.1,1.0)
}
# Sample mean and sd
xbar<-apply(as.matrix(X),2,mean)
xsd<-apply(as.matrix(X),2,sd)
# Family specific responses and offset terms
if (Fcdf.indicator==1 | Fcdf.indicator==3 | Fcdf.indicator==5){
Y <- model.response(mf, "any")
offset <- as.vector(model.offset(mf))
if (!is.null(offset)){
if (length(offset) != NROW(Y))
stop(gettextf("number of offsets is %d should equal %d (number of observations)",
length(offset), NROW(Y)), domain = NA)
}
if (is.null(offset)) offset<-rep(1,n)
} else if (Fcdf.indicator==2 | Fcdf.indicator==4){
Y1 <- model.response(mf, "any")
if (NCOL(Y1)==1){
if (any(Y1 < 0 | Y1 > 1)) stop("y values must be 0 <= y <= 1")
offset <- array(1,n)
Y<-Y1
} else if (NCOL(Y1) == 2){
offset <- Y1[, 1] + Y1[, 2]
Y<-Y1[,1]
} else
stop(paste("For the binomial Fcdf, y must be",
"a vector of 0 and 1's or a 2 column",
"matrix where col 1 is no. successes",
"and col 2 is no. failures"))
}
# Predictions
if (missing(Xpred)){
npred <- 0
Xpred <- 1
} else{
npred <- NROW(Xpred)
}
if (npred > 0){
if (NCOL(Xpred) != p)
stop(gettextf("Xpred matrix includes %d covariates, but it should include %d: the number covariates in the model",
NCOL(Xpred), p), domain = NA)
}
if ((!missing(offsetPred)) & (npred > 0)){
if (length(offsetPred) != 1)
stop(gettextf("the length of offsetPred %d, but it should be 1",
length(offsetPred)), domain = NA)
}
if (missing(offsetPred) & (npred > 0)) offsetPred <- round(mean(offset))
if (missing(offsetPred) & (npred == 0)) offsetPred <- 1.0
c1 <- cbind(rep(1,npred),!is.na(Xpred))
Xpred[is.na(Xpred)]<-0
meanReg <- array(0,npred)
medianReg <- array(0,npred)
q1Reg <- array(0,npred)
q3Reg <- array(0,npred)
modeReg <- array(0,npred)
# Family specific predictions
if (Fcdf.indicator==1 | Fcdf.indicator==3 | Fcdf.indicator==5){
maxy <- max(Y)+1000 # maxy <- max(y)+max(10,floor(0.1*max(y)))
} else if (Fcdf.indicator==2 | Fcdf.indicator==4){
maxy <- max(offsetPred)+1
}
denReg <- array(0,npred*maxy)
denVar <- array(0,npred*maxy)
#Sampler
sampler.indicator <- match(sampler,c("slice","truncated"))
if (is.na(sampler.indicator)){
stop(c(sampler," 'sampler' not recognized"))
}
# Storage directory & files
WF <- 1
if (!missing(StorageDir)){
StorageDir <- path.expand(StorageDir)
ncharwd <- nchar(StorageDir)}
if (!missing(StorageDir)) if (!(substr(StorageDir,ncharwd,ncharwd)=="/")) StorageDir <- paste(StorageDir,"/",sep="")
if (!missing(StorageDir)) if (!dir.exists(StorageDir)) DIRC<-dir.create(StorageDir,recursive = TRUE)
#if (!DIRC) stop("selected directory does not exist")
if (missing(StorageDir)) {WF <- 0; StorageDir <- paste(getwd(),"/",sep="")}
on.exit(if (WF==0) file.remove(paste(StorageDir,"BNSP.Sigmah.txt",sep=""),
paste(StorageDir,"BNSP.muh.txt",sep=""), paste(StorageDir,"BNSP.xih.txt",sep=""),
paste(StorageDir,"BNSP.alpha.txt",sep=""), paste(StorageDir,"BNSP.compAlloc.txt",sep=""),
paste(StorageDir,"BNSP.nmembers.txt",sep=""), paste(StorageDir,"BNSP.Updated.txt",sep=""),
paste(StorageDir,"BNSP.MeanReg.txt",sep=""), paste(StorageDir,"BNSP.MedianReg.txt",sep=""),
paste(StorageDir,"BNSP.Q05Reg.txt",sep=""), paste(StorageDir,"BNSP.Q10Reg.txt",sep=""),
paste(StorageDir,"BNSP.Q15Reg.txt",sep=""), paste(StorageDir,"BNSP.Q20Reg.txt",sep=""),
paste(StorageDir,"BNSP.Q25Reg.txt",sep=""), paste(StorageDir,"BNSP.Q75Reg.txt",sep=""),
paste(StorageDir,"BNSP.Q80Reg.txt",sep=""), paste(StorageDir,"BNSP.Q85Reg.txt",sep=""),
paste(StorageDir,"BNSP.Q90Reg.txt",sep=""), paste(StorageDir,"BNSP.Q95Reg.txt",sep="")))
#Call C
out<-.C("OneResLtnt",as.integer(seed),as.double(unlist(c(X))),as.integer(Y),as.double(offset),
as.integer(sweeps), as.integer(burn), as.integer(thin), as.integer(ncomp),
as.integer(n), as.integer(p), as.integer(NBC),
as.double(H), as.double(Hdf),
as.double(d), as.double(D),
as.double(Alpha.xi),as.double(Beta.xi),
as.double(Alpha.alpha),as.double(Beta.alpha),as.double(Trunc.alpha),
as.double(xbar), as.double(xsd), as.double(sum(Y)/sum(offset)),
as.integer(Fcdf.indicator), as.integer(sampler.indicator),
as.integer(npred),as.double(Xpred),as.double(offsetPred),as.integer(maxy), as.integer(c1),
as.double(meanReg),as.double(medianReg),as.double(q1Reg),as.double(q3Reg),as.double(modeReg),
as.double(denReg),as.double(denVar),
as.character(StorageDir),as.integer(WF))
#Output
location<-31
meanReg <- out[[location+0]][1:npred]
medianReg <- out[[location+1]][1:npred]
q1Reg <- out[[location+2]][1:npred]
q3Reg <- out[[location+3]][1:npred]
modeReg <- out[[location+4]][1:npred]
denReg <- matrix(out[[location+5]][1:(maxy*npred)],nrow=npred,ncol=maxy,byrow=TRUE)
denVar <- matrix(out[[location+6]][1:(maxy*npred)],nrow=npred,ncol=maxy,byrow=TRUE)
denVar <- denVar - denReg^2
fit <- list(call=call,seed=seed,meanReg=meanReg,medianReg=medianReg,q1Reg=q1Reg,q3Reg=q3Reg,modeReg=modeReg,
denReg=denReg,denVar=denVar)
class(fit) <- 'bnp'
return(fit)
}
print.bnp<-function(x,digits=max(3,getOption('digits')-3), ...){
#Print formula
cat('\nCall: ',deparse(x$call),'\n\n')
}
|
/scratch/gouwar.j/cran-all/cranData/BNSP/R/dpmj.R
|
chol <- function(x, mod = TRUE, p = 1, ...){
S<-base::chol(x)
list <- list(S)
if (mod){
list<-list()
nblocks <- dim(x)[2] / p
if (!nblocks == trunc(nblocks)) stop("dim of matrix and p non-conformable")
S<-t(S)
T<-solve(S %*% diag(1/diag(S)))
D<-T %*% x %*% t(T)
if (p > 1){
for (l in (0:(p-2))){
E<-diag(1,dim(x)[2])
indeces <- unlist(lapply(seq(0,(nblocks-1)),function(k) ((2+l) : p) + k * (dim(x)[2] + 1) * p + l * dim(x)[2]))
E[indeces] <- -T[indeces]
T <- E %*% T
#D <- E %*% D %*% t(E)
D <- T %*% x %*% t(T)
}
}
list<-list(L=T,D=D)
}
return(list)
}
|
/scratch/gouwar.j/cran-all/cranData/BNSP/R/matalg.R
|
# mvrm, continue, mvrm2mcmc, plotCorr, histCorr, predict.mvrm, print.mvrm, summary.mvrm, clustering
mvrm <- function(formula,distribution="normal",
data=list(),centre=TRUE,
sweeps,burn=0,thin=1,seed,StorageDir,
c.betaPrior="IG(0.5, 0.5 * n * p)",
pi.muPrior="Beta(1, 1)",
c.alphaPrior="IG(1.1, 1.1)",
sigmaPrior="HN(2)",
pi.sigmaPrior="Beta(1, 1)",
c.psiPrior = "HN(1)",
phiPrior="HN(2)",
pi.omegaPrior="Beta(1, 1)",
mu.RPrior="N(0, 1)",
sigma.RPrior="HN(1)",
corr.Model=c("common",nClust=1),
DP.concPrior="Gamma(5, 2)",
breaksPrior="SBeta(1, 2)",
tuneCbeta,
tuneCalpha,
tuneAlpha,
tuneSigma2,
tuneCpsi,
tunePsi,
tunePhi,
tuneR,
tuneSigma2R,
tuneHar,
tuneBreaks,
tunePeriod,
tau,FT=1,
compDeviance=FALSE,...){
#Samples etc
if (thin <= 0) thin <- 1
thin <- as.integer(thin)
sweeps <- as.integer(sweeps)
burn <- as.integer(burn)
if (missing(sweeps)) stop("provide sweeps argument")
nSamples<-0
LASTsw<-0
if (sweeps > 0 && (sweeps-burn) > 0){
nSamples <- length(seq(1,(sweeps-burn),by=thin))
LASTsw<-seq(burn,by=thin,length.out=nSamples)[nSamples]
}
if (nSamples<0) stop("problem with sweeps & burn arguments")
LASTWB<-1
#Distribution
distributions <- c('normal','t')
fam<-match(distribution,distributions)
if (is.na(fam)) stop("unrecognised distribution; should be one of: normal or t")
# Match call
call <- match.call(expand.dots = FALSE)
call2 <- match.call.defaults()
#Data
if (!is.list(data) && !is.data.frame(data))
data <- as.data.frame(data)
#Formula & data dimensions
p <- length(as.Formula(formula))[1]
if (fam==1 && length(as.Formula(formula))[2] > 2) stop("more than two regression models provided")
if (fam==1 && length(as.Formula(formula))[2] == 1) formula <- as.Formula(formula, ~1)
if (fam==2 && length(as.Formula(formula))[2] > 3) stop("more than three regression models provided")
if (fam==2 && length(as.Formula(formula))[2] == 1) formula <- as.Formula(formula, ~1, ~1)
if (fam==2 && length(as.Formula(formula))[2] == 2) stop("ambiguous definition of regression model")
formula.m<-formula(as.Formula(formula),lhs=1,rhs=1)
formula.v<-formula(as.Formula(formula),lhs=0,rhs=2)
if (fam==2) formula.s<-formula(as.Formula(formula),lhs=0,rhs=3)
# Responses, design matrices, indicators
Y<-NULL
varsY<-list()
for (i in 1:p){
trms<-terms.formula(formula(as.Formula(formula,~1),lhs=i,rhs=0))
Y<-cbind(Y,with(data,eval(attr(trms,"variables")[[2]])))
varsY[[i]] <- as.character(attr(trms,"variables")[[2]])
}
# Null Deviance
nullDeviance <- 0
for (i in 1:p)
nullDeviance <- nullDeviance - 2 * logLik(lm(Y[,i] ~ 1))
# Sample size
n<-length(c(Y))/p
#X
XYK<-DM(formula=formula.m,data=data,n=n,centre=centre)
X<-as.matrix(XYK$X)
Xknots<-XYK$Rknots
storeMeanVectorX<-XYK$meanVector
storeIndicatorX<-XYK$indicator
LG<-NCOL(X)-1
labelsX<-XYK$labels
countX<-XYK$count
varsX<-XYK$vars
which.SpecX<-XYK$which.Spec
formula.termsX<-XYK$formula.terms
is.Dx<-XYK$is.D
assignX<-XYK$assign
NG<-max(assignX)
assignX3<-XYK$assign3
vecLG<-table(assignX3)[-1]
cusumVecLG<-c(0,cumsum(vecLG))
NG2<-max(assignX3)
repsX <- XYK$repsX
DynamicSinPar <-XYK$DSP
amplitude <- DynamicSinPar[1]
nHar <- DynamicSinPar[3]
Dynamic <- XYK$Dynamic
isSin <- XYK$isSin
varSin <- sinXvar <- NULL
if (sum(isSin)) varSin <- varsX[[min(which(isSin==1))]]
#print(c("varSin",varSin,is.null(varSin),sum(isSin)))
if (amplitude==1) isSin <- isSin[repsX]
breaks<-XYK$breaks
nBreaks<-length(breaks)
period<-XYK$period
periodUnknown<-XYK$periodUnknown
periodRange<-XYK$periodRange
if (!is.null(varSin)) sinXvar <- with(data,get(varSin))
if (nBreaks > 0) if (min(breaks) < min(sinXvar) || max(breaks) > max(sinXvar)) stop("breaks outside the range of data")
#Z
ZK<-DM(formula=formula.v,data=data,n=n,centre=centre)
Z<-as.matrix(ZK$X)
Zknots<-ZK$Rknots
storeMeanVectorZ<-ZK$meanVector
storeIndicatorZ<-ZK$indicator
LD<-NCOL(Z)-1
labelsZ<-ZK$labels
countZ<-ZK$count
varsZ<-ZK$vars
which.SpecZ<-ZK$which.Spec
formula.termsZ<-ZK$formula.terms
is.Dz<-ZK$is.D
assignZ<-ZK$assign
#vecLD<-table(assignZ)[-1]
#cusumVecLD<-c(0,cumsum(vecLD))
ND<-max(assignZ)#length(vecLD)
repeats<-rep(1,ND)
repeats[which(is.Dz==1)]<-table(assignZ)[which(is.Dz==1)+1]
oneHE<-1; if (ND > 0) oneHE<-rep(c(1:ND),repeats)
assignZ2<-ZK$assign2
vecLD<-table(assignZ2)[-1]
cusumVecLD<-c(0,cumsum(vecLD))
ND2<-max(assignZ2)#length(vecLD)
isDz2<-as.numeric(is.Dz)[oneHE]
if (is.na(isDz2[1])) isDz2<-1
isSinVar <- ZK$isSin
if (sum(isSinVar) > 0) stop("sinusoids in variance model not allowed")
#W
W<-Wknots<-storeMeanVectorW<-storeIndicatorW<-assignW<-labelsW<-countW<-varsW<-is.Dw<-which.SpecW<-formula.termsW<-NULL
LC<-vecLC<-NC<-cusumVecLC<-0
if (fam==2){
WK<-DM(formula=formula.s,data=data,n=n,centre=centre)
W<-as.matrix(WK$X)
Wknots<-WK$Rknots
storeMeanVectorW<-WK$meanVector
storeIndicatorW<-WK$indicator
LC<-NCOL(W)-1
vecLC<-table(WK$assign)[-1]
NC<-length(vecLC)
cusumVecLC<-c(0,cumsum(vecLC))
assignW<-WK$assign
labelsW<-WK$labels
countW<-WK$count
varsW<-WK$vars
is.Dw<-WK$is.D
which.SpecW<-WK$which.Spec
formula.termsW<-WK$formula.terms
isSinW <- WK$isSin
if (sum(isSinW) > 0) stop("sinusoids in scale model not allowed")
}
MVLD <- 1
if (LD > 0 || LC > 0) MVLD<-max(vecLD,vecLC)
#Initialize covariance & correlation matrix
LASTDE <- LASTR <- LASTmuR <- LASTsigma2R <- 1
Res<-NULL
find.sm <- grep("sm(",colnames(X),fixed=TRUE)
Xmain <- X
if (length(find.sm) > 0) Xmain<-X[,-find.sm]
for (i in 1:p)
Res<-cbind(Res,residuals(lm(Y[,i] ~ Xmain - 1)))
CR<-0.9*cov(Res)+0.1*diag(mean(eigen(cov(Res))$values),p)
D<-matrix(0,p,p)
diag(D)<-sqrt(diag(CR))
LASTsigma2zk<-diag(CR)
LASTDE<-c(c(D),c(CR))
LASTR<-cov2cor(CR)
if (p > 1) LASTmuR<-mean(LASTR[upper.tri(LASTR)])
if (p > 2) LASTsigma2R<-var(LASTR[upper.tri(LASTR)])
#Prior for pi.mu
if (!length(pi.muPrior)==1 && !length(pi.muPrior)==NG && !length(pi.muPrior)==(p*NG))
stop("pi.muPrior has incorrect dimension")
if (length(pi.muPrior)==NG && NG2!=NG) pi.muPrior <- pi.muPrior[repsX]
if (length(pi.muPrior)==(p*NG) && NG2!=NG){
for (i in 1:(p-1)) repsX<-c(repsX,repsX+max(repsX))
pi.muPrior <- pi.muPrior[repsX]
}
pimu<-NULL
for (k in 1:length(pi.muPrior)){
sp<-strsplit(pi.muPrior[k],"Beta\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
pimu<-c(pimu,as.numeric(sp[[1]]))
}
if (length(pi.muPrior)==1) pimu<-rep(pimu,p*NG2)
if (length(pi.muPrior)==NG2) pimu<-rep(pimu,p)
piHar<-1 #pi of the harmonics for forming the dynamic matrix
if (sum(isSin) && amplitude > 1){
loc <- 1+2*(which(isSin==1)-1)
loc <-c(loc,loc+1)
piHar <- pimu[loc]
}
#print(piHar)
#Prior for c.beta
sp<-strsplit(c.betaPrior,"IG\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
cetaParams<-c(as.numeric(sp[[1]][1]),eval(parse(text=sp[[1]][2])))
#Prior for pi.sigma
if (ND > 0){
if (!length(pi.sigmaPrior)==1 && !length(pi.sigmaPrior)==ND && !length(pi.sigmaPrior)==(p*ND))
stop("pi.sigmaPrior has incorrect dimension")
if (length(pi.sigmaPrior)==(p*ND))
pi.sigmaPrior<-pi.sigmaPrior[oneHE+rep(seq(from=0,to=p*ND-1,by=ND),each=length(oneHE))]
if (length(pi.sigmaPrior)==ND) pi.sigmaPrior<-pi.sigmaPrior[oneHE]
pisigma<-NULL
for (k in 1:length(pi.sigmaPrior)){
sp<-strsplit(pi.sigmaPrior[k],"Beta\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
pisigma<-c(pisigma,as.numeric(sp[[1]]))
}
if (length(pi.sigmaPrior)==1) pisigma<-rep(pisigma,p*ND2)
if (length(pi.sigmaPrior)==ND2) pisigma<-rep(pisigma,p)
}else{pisigma<-1}
#Prior for c.alpha
if (!length(c.alphaPrior)==1 && !length(c.alphaPrior)==p)
stop("c.alphaPrior has incorrect dimension")
specials<-c("HN","IG")
calphaParams<-NULL
HNca<-vector()
for (k in 1:length(c.alphaPrior)){
sp<-strsplit(c.alphaPrior[k],"\\(")
if (sp[[1]][1] %in% specials){
if (match(sp[[1]][1],specials)==1) HNca[k]<-1
if (match(sp[[1]][1],specials)==2) HNca[k]<-0
} else stop("unrecognised prior for c.alpha")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
calphaParams<-c(calphaParams,as.numeric(sp[[1]]))
}
if (length(c.alphaPrior)==1){
calphaParams<-rep(calphaParams,p)
HNca<-rep(HNca,p)
}
#Prior for sigma2_{zero k}
if (!length(sigmaPrior)==1 && !length(sigmaPrior)==p)
stop("sigmaPrior has incorrect dimension")
specials<-c("HN","IG")
sigmaParams<-NULL
HNsg<-vector()
for (k in 1:length(sigmaPrior)){
sp<-strsplit(sigmaPrior[k],"\\(")
if (sp[[1]][1] %in% specials){
if (match(sp[[1]][1],specials)==1) HNsg[k]<-1
if (match(sp[[1]][1],specials)==2) HNsg[k]<-0
} else stop("unrecognised prior for sigma^2")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
sigmaParams<-c(sigmaParams,as.numeric(sp[[1]]))
}
if (length(sigmaPrior)==1){
sigmaParams<-rep(sigmaParams,p)
HNsg<-rep(HNsg,p)
}
#Prior for pi.omega
if (!length(pi.omegaPrior)==1 && !length(pi.omegaPrior)==NC && !length(pi.omegaPrior)==(p*NC))
stop("pi.omegaPrior has incorrect dimension")
piomega<-NULL
for (k in 1:length(pi.omegaPrior)){
sp<-strsplit(pi.omegaPrior[k],"Beta\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
piomega<-c(piomega,as.numeric(sp[[1]]))
}
if (length(pi.omegaPrior)==1) piomega<-rep(piomega,p*NC)
if (length(pi.omegaPrior)==NC) piomega<-rep(piomega,p)
#Prior for c.psi
if (!length(c.psiPrior)==1 && !length(c.psiPrior)==p)
stop("c.psiPrior has incorrect dimension")
specials<-c("HN","IG")
cpsiParams<-NULL
HNcp<-vector()
for (k in 1:length(c.psiPrior)){
sp<-strsplit(c.psiPrior[k],"\\(")
if (sp[[1]][1] %in% specials){
if (match(sp[[1]][1],specials)==1) HNcp[k]<-1
if (match(sp[[1]][1],specials)==2) HNcp[k]<-0
} else stop("unrecognised prior for c.psi")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
cpsiParams<-c(cpsiParams,as.numeric(sp[[1]]))
}
if (length(c.psiPrior)==1){
cpsiParams<-rep(cpsiParams,p)
HNcp<-rep(HNcp,p)
}
#Prior for phi2_k
if (!length(phiPrior)==1 && !length(phiPrior)==p)
stop("phiPrior has incorrect dimension")
specials<-c("HN","IG")
phiParams<-NULL
HNphi<-vector()
for (k in 1:length(phiPrior)){
sp<-strsplit(phiPrior[k],"\\(")
if (sp[[1]][1] %in% specials){
if (match(sp[[1]][1],specials)==1) HNphi[k]<-1
if (match(sp[[1]][1],specials)==2) HNphi[k]<-0
} else stop("unrecognised prior for phi")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
phiParams<-c(phiParams,as.numeric(sp[[1]]))
}
if (length(phiPrior)==1){
phiParams<-rep(phiParams,p)
HNphi<-rep(HNphi,p)
}
#Prior for muR
sp<-strsplit(mu.RPrior,"N\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
Rparams<-c(as.numeric(unlist(sp)))
#Prior for sigmaR
sp<-strsplit(sigma.RPrior,"HN\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
Rparams<-c(Rparams,as.numeric(unlist(sp)))
#Cor model
corModels<-c("common","groupC","groupV","uni")
mcm<-match(corr.Model[1],corModels)
if (p==1 && fam==1) mcm=4
if (p==1 && fam==2) mcm=1
if (p==2) mcm=1
if (is.na(mcm)) stop("unrecognised correlation model")
H <- G <- 1
if (mcm==2){
H<-as.numeric(corr.Model[2])
if (H==1) {mcm<-1; warning("Common correlations model specified with nClust = 1")}
if (is.na(H) || (!H%%1==0) || H==0) {H <- p*(p-1)/2; warning(cat("mispecified number of clusters. nClust set to ",H,"\n"))}
}
if (mcm==3){
G<-as.numeric(corr.Model[2])
if (G==1) {mcm<-1; warning("Common correlations model specified with nClust = 1")}
if (is.na(G) || (!G%%1==0) || G==0) {G <- p; warning(cat("mispecified number of clusters. nClust set to ",G,"\n"))}
H<-G*(G-1)/2+G #min(d,G*(G-1)/2+G) #min(G,abs(p-G))
}
if (fam==2) mcm <- mcm + 7
#Prior for alpha DP
sp<-strsplit(DP.concPrior,"Gamma\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
DPparams<-as.numeric(sp[[1]])
DPparams <- c(DPparams,0.01)
#Prior for breaks
sp<-strsplit(breaksPrior,"SBeta\\(")
sp<-strsplit(sp[[1]][2],"\\)")
sp<-strsplit(sp[[1]][1],",")
breaksPparams<-as.numeric(sp[[1]])
#print(breaksPparams)
if (sum(breaksPparams <= 0)) stop("SBeta prior should have positive parameters")
#Seed
if (missing(seed)) seed<-as.integer((as.double(Sys.time())*1000+Sys.getpid()) %% 2^31)
# Storage directory & files
if (!missing(StorageDir)){
StorageDir <- path.expand(StorageDir)
ncharwd <- nchar(StorageDir)}
if (!missing(StorageDir)) if (!(substr(StorageDir,ncharwd,ncharwd)=="/")) StorageDir <- paste(StorageDir,"/",sep="")
if (!missing(StorageDir)) if (!dir.exists(StorageDir)) dir.create(StorageDir, recursive = TRUE)
if (missing(StorageDir)) stop("provide a storage directory via argument StorageDir")
FL <- c("gamma", "cbeta", "delta", "alpha", "R", "muR", "sigma2R", "calpha", "sigma2", "beta",
"Hbeta", "Hgamma",
"ksi", "psi", "cpsi", "phi2",
"compAlloc", "nmembers", "deviance", "DPconc",
"compAllocV", "nmembersV",
"DE",
"nu", "fi", "omega", "ceta","comega","eta", "test", "nu.ls", "eta.ls", "nmembers.ls", "clusters",
"probs", "tune","breaks", "period")
for (i in 1:length(FL)){
oneFile <- paste(StorageDir, paste("BNSP",FL[i], "txt",sep="."),sep="/")
if (file.exists(oneFile)) file.remove(oneFile)
}
#Tuning Parameters
if (missing(tuneCbeta)) tuneCbeta<-20
if (missing(tuneCalpha)) tuneCalpha<-rep(1,p)
if (!length(tuneCalpha)==p) tuneCalpha<-rep(mean(tuneCalpha),p)
if (ND > 0){
if (missing(tuneAlpha) || !length(tuneAlpha)==ND || !length(tuneAlpha)==(ND*p)){
tuneAlpha<-rep(5,ND)
tuneAlpha[which(is.Dz==1)]<-1
}
if (length(tuneAlpha)==(ND*p))
tuneAlpha<-tuneAlpha[oneHE+rep(seq(from=0,to=p*ND-1,by=ND),each=length(oneHE))]
if (length(tuneAlpha)==ND) tuneAlpha<-tuneAlpha[oneHE]
if (length(tuneAlpha)==ND2) tuneAlpha<-rep(tuneAlpha,p)
}else{tuneAlpha<-1}
if (missing(tuneSigma2)) tuneSigma2<-rep(0.5,p)
if (!length(tuneSigma2)==p) tuneSigma2<-rep(mean(tuneSigma2),p)
if (missing(tuneCpsi)) tuneCpsi<-rep(1,p)
if (!length(tuneCpsi)==p) tuneCpsi<-rep(mean(tuneCpsi),p)
if (missing(tunePsi)) tunePsi<-rep(5,NC*p)
if (!length(tunePsi)==(NC*p)) tunePsi<-rep(mean(tunePsi),NC*p)
if (missing(tunePhi)) tunePhi<-rep(0.5,p)
if (!length(tunePhi)==p) tunePhi<-rep(mean(tunePhi),p)
if (missing(tuneR)) tuneR<-40*(p+2)^3
tuneR[which(tuneR<p+2)]<-p+2
if (missing(tuneSigma2R)) tuneSigma2R<-0.25
if (nHar > 0){
ifelse(missing(tuneHar), tuneHar<-rep(100,nHar), tuneHar<-rep(mean(tuneHar),nHar))
}else{tuneHar<-1}
if (missing(tuneBreaks)) tuneBreaks<-rep(0.0001*period,nBreaks)
if (!missing(tuneBreaks) && !length(tuneBreaks)==nBreaks) tuneBreaks<-rep(mean(tuneBreaks),nBreaks)
if (missing(tunePeriod)) tunePeriod<-0.0001
if (missing(tau)) tau = 0.01
#Block size selection
#if (missing(blockSizeProbG)){
blockSizeProbG <- rep(0,LG)
blockSizeProbG[1:5]<-c(10,25,30,25,10)
#}
#if (missing(blockSizeProbD)){
blockSizeProbD <- rep(0,LD)
blockSizeProbD[1:5]<-c(10,25,30,25,10)
#}
#if (missing(blockSizeProbC)){
blockSizeProbC <- rep(0,LC)
blockSizeProbC[1:5]<-c(10,25,30,25,10)
#}
maxBSG <- max(which(blockSizeProbG>0))
maxBSD <- max(which(blockSizeProbD>0))
maxBSC <- max(which(blockSizeProbC>0))
maxBSGDC <- c(maxBSG,maxBSD,maxBSC)
#Deviance
deviance <- c(0,0)
#Cont
cont <- 0
#Call C
if (mcm==4) {out<-.C("mvrmC",
as.integer(seed),as.character(StorageDir),
as.integer(sweeps),as.integer(burn),as.integer(thin),
as.double(c(t(Y))),as.double(X),as.double(Z),as.integer(n),as.integer(LG),as.integer(LD),
as.double(blockSizeProbG),as.integer(maxBSG),as.double(blockSizeProbD),as.integer(maxBSD),
as.double(tuneCalpha),as.double(tuneSigma2),as.double(tuneCbeta),as.double(tuneAlpha),
as.integer(NG2),as.integer(ND2),as.integer(vecLG),as.integer(vecLD),
as.integer(cusumVecLG),as.integer(cusumVecLD),as.integer(MVLD),
as.double(cetaParams),as.integer(HNca),as.double(calphaParams),as.double(pimu),as.double(pisigma),
as.integer(HNsg),as.double(sigmaParams),as.double(deviance),as.integer(isDz2),
as.integer(c(cont)),as.integer(c(0)),as.integer(c(0)),as.double(c(0)),as.double(LASTsigma2zk),
as.double(c(0)),as.double(c(0)),as.integer(LASTWB),
as.integer(isSin),as.double(Dynamic),as.integer(DynamicSinPar),as.double(tuneHar),
as.double(piHar),as.integer(nBreaks),as.double(c(period,sort(breaks),sinXvar,breaksPparams)),
as.double(tuneBreaks),as.integer(c(0)),as.double(c(0)),as.double(c(0)),as.double(tunePeriod),
as.integer(periodUnknown),as.double(sort(periodRange)))}
if (mcm==1) {out<-.C("mult",
as.integer(seed),as.character(StorageDir),
as.integer(sweeps),as.integer(burn),as.integer(thin),
as.integer(n),as.integer(c(p,mcm)),
as.double(c(t(Y))),as.double(t(X)),as.double(Z),
as.integer(LG),as.integer(LD),
as.double(blockSizeProbG),as.integer(maxBSG),as.double(blockSizeProbD),as.integer(maxBSD),
as.integer(NG2),as.integer(ND2),as.integer(vecLG),as.integer(vecLD),
as.integer(cusumVecLG),as.integer(cusumVecLD),as.integer(MVLD),
as.double(tuneCalpha),as.double(tuneSigma2),as.double(tuneCbeta),as.double(tuneAlpha),
as.double(tuneSigma2R),as.double(tuneR),
as.double(pimu),as.double(cetaParams),as.double(pisigma),
as.integer(HNca),as.double(calphaParams),as.double(Rparams),
as.integer(HNsg),as.double(sigmaParams),as.double(tau),as.integer(FT),as.double(deviance),
as.integer(isDz2),
as.integer(c(cont)),as.integer(c(0)),as.integer(c(0)),as.double(c(0)),as.double(LASTsigma2zk),
as.double(c(LASTR)),
as.double(c(LASTmuR)),as.double(c(LASTsigma2R)),as.double(c(0)),as.double(c(0)),
as.integer(c(LASTsw)),as.double(c(LASTDE)),as.integer(LASTWB),as.integer(isSin))}
if (mcm==2) {out<-.C("multg",
as.integer(seed),as.character(StorageDir),
as.integer(sweeps),as.integer(burn),as.integer(thin),
as.integer(n),as.integer(c(p,mcm)),
as.double(c(t(Y))),as.double(t(X)),as.double(Z),
as.integer(LG),as.integer(LD),
as.double(blockSizeProbG),as.integer(maxBSG),as.double(blockSizeProbD),as.integer(maxBSD),
as.integer(NG2),as.integer(ND2),as.integer(vecLG),as.integer(vecLD),
as.integer(cusumVecLG),as.integer(cusumVecLD),as.integer(MVLD),
as.double(tuneCalpha),as.double(tuneSigma2),as.double(tuneCbeta),as.double(tuneAlpha),
as.double(tuneSigma2R),as.double(tuneR), as.double(pimu),as.double(cetaParams),as.double(pisigma),
as.integer(HNca),as.double(calphaParams),as.double(Rparams),
as.integer(HNsg),as.double(sigmaParams),as.double(tau),as.integer(FT),as.double(deviance),as.integer(H),
as.double(DPparams), as.integer(isDz2),
as.integer(c(cont)),as.integer(c(0)),as.integer(c(0)),as.double(c(0)),as.double(LASTsigma2zk),
as.double(LASTR),as.double((rnorm(n=H,mean=LASTmuR,sd=0.01))),as.double(LASTsigma2R/H),
as.double(c(0)), as.double(c(0)),as.integer(c(0)),as.double(c(0)),
as.integer(c(LASTsw)),as.double(c(LASTDE)),as.integer(LASTWB),as.integer(isSin))}
if (mcm==3) {out<-.C("multgv",
as.integer(seed),as.character(StorageDir),
as.integer(sweeps),as.integer(burn),as.integer(thin),
as.integer(n),as.integer(c(p,mcm)),
as.double(c(t(Y))),as.double(t(X)),as.double(Z),
as.integer(LG),as.integer(LD),
as.double(blockSizeProbG),as.integer(maxBSG),as.double(blockSizeProbD),as.integer(maxBSD),
as.integer(NG2),as.integer(ND2),as.integer(vecLG),as.integer(vecLD),
as.integer(cusumVecLG),as.integer(cusumVecLD),as.integer(MVLD),
as.double(tuneCalpha),as.double(tuneSigma2),as.double(tuneCbeta),as.double(tuneAlpha),
as.double(tuneSigma2R),as.double(tuneR), as.double(pimu),as.double(cetaParams),as.double(pisigma),
as.integer(HNca),as.double(calphaParams),as.double(Rparams),
as.integer(HNsg),as.double(sigmaParams),as.double(tau),as.integer(FT),as.double(deviance),as.integer(G),
as.double(DPparams), as.integer(isDz2),
as.integer(c(cont)),as.integer(c(0)),as.integer(c(0)),as.double(c(0)),as.double(LASTsigma2zk),
as.double(LASTR),as.double((rnorm(n=H,mean=LASTmuR,sd=0.01))),as.double(LASTsigma2R/H),
as.double(c(0)),
as.double(c(0)),as.integer(c(0)),as.double(c(0)),
as.integer(c(LASTsw)),as.double(c(LASTDE)),as.integer(LASTWB),as.integer(isSin))}
if (mcm==8) {out<-.C("multT",
as.integer(seed),as.character(StorageDir),
as.integer(c(sweeps,burn,thin,n,p,mcm,MVLD,compDeviance)),
as.double(c(t(Y))),as.double(t(X)),as.double(Z),as.double(W),
as.integer(c(LG,LD,LC)),
as.double(blockSizeProbG),as.double(blockSizeProbD),as.double(blockSizeProbC),
as.integer(maxBSGDC),as.integer(c(NG2,ND2,NC)),as.integer(vecLG),as.integer(vecLD),as.integer(vecLC),
as.integer(cusumVecLG),as.integer(cusumVecLD),as.integer(cusumVecLC),
as.double(tuneCalpha),as.double(tuneSigma2),as.double(tuneCbeta),as.double(tuneAlpha),
as.double(tuneSigma2R),as.double(tuneR),as.double(tunePsi),as.double(tunePhi),as.double(tuneCpsi),
as.double(pimu),as.double(cetaParams),as.double(pisigma),
as.integer(HNca),as.double(calphaParams),as.integer(HNcp),as.double(cpsiParams),
as.integer(HNphi),as.double(phiParams),as.double(Rparams),
as.integer(HNsg),as.double(sigmaParams),as.double(piomega),
as.double(tau),as.integer(FT),as.double(deviance),as.double(Res),
as.integer(isDz2),
as.integer(c(cont)),as.integer(c(0)),as.integer(c(0)),as.double(c(0)),
as.double(c(LASTsigma2zk)),as.double(c(LASTR)),as.double(c(LASTDE)),
as.double(c(LASTmuR)),as.double(c(LASTsigma2R)),
as.integer(c(LASTsw)),as.integer(LASTWB),
as.double(c(0)),as.double(c(0)),
as.integer(c(0)),as.double(c(0,0,0)),as.integer(isSin))}
if (mcm==9) {out<-.C("multgT",
as.integer(seed),as.character(StorageDir),
as.integer(c(sweeps,burn,thin,n,p,mcm,MVLD,compDeviance)),
as.double(c(t(Y))),as.double(t(X)),as.double(Z),as.double(W),
as.integer(c(LG,LD,LC)),
as.double(blockSizeProbG),as.double(blockSizeProbD),as.double(blockSizeProbC),
as.integer(maxBSGDC),as.integer(c(NG,ND2,NC)),as.integer(vecLG),as.integer(vecLD),as.integer(vecLC),
as.integer(cusumVecLG),as.integer(cusumVecLD),as.integer(cusumVecLC),
as.double(tuneCalpha),as.double(tuneSigma2),as.double(tuneCbeta),as.double(tuneAlpha),
as.double(tuneSigma2R),as.double(tuneR),as.double(tunePsi),as.double(tunePhi), as.double(tuneCpsi),
as.double(pimu),as.double(cetaParams),as.double(pisigma),
as.integer(HNca),as.double(calphaParams),as.integer(HNcp),as.double(cpsiParams),
as.integer(HNphi),as.double(phiParams),as.double(Rparams),
as.integer(HNsg),as.double(sigmaParams),as.double(piomega),
as.double(tau),as.integer(FT),as.double(deviance),as.double(Res),
as.integer(H),as.double(DPparams),
as.integer(isDz2),
as.integer(c(cont)),as.integer(c(0)),as.integer(c(0)),as.double(c(0)),
as.double(c(LASTsigma2zk)),as.double(c(LASTR)),as.double(c(LASTDE)),
as.double(rnorm(n=H,mean=LASTmuR,sd=0.01),LASTsigma2R/H),
as.integer(c(LASTsw)),as.integer(LASTWB),
as.double(c(0)),as.double(c(0)),
as.integer(c(0)),as.double(c(0)),
as.integer(c(0)),as.double(c(0,0,0)),as.integer(isSin))}
if (mcm==10) {out<-.C("multgvT",
as.integer(seed),as.character(StorageDir),
as.integer(c(sweeps,burn,thin,n,p,mcm,MVLD,compDeviance)),
as.double(c(t(Y))),as.double(t(X)),as.double(Z),as.double(W),
as.integer(c(LG,LD,LC)),
as.double(blockSizeProbG),as.double(blockSizeProbD),as.double(blockSizeProbC),
as.integer(maxBSGDC),as.integer(c(NG,ND2,NC)),as.integer(vecLG),as.integer(vecLD),as.integer(vecLC),
as.integer(cusumVecLG),as.integer(cusumVecLD),as.integer(cusumVecLC),
as.double(tuneCalpha),as.double(tuneSigma2),as.double(tuneCbeta),as.double(tuneAlpha),
as.double(tuneSigma2R),as.double(tuneR),as.double(tunePsi),as.double(tunePhi), as.double(tuneCpsi),
as.double(pimu),as.double(cetaParams),as.double(pisigma),as.integer(HNca),as.double(calphaParams),
as.integer(HNcp),as.double(cpsiParams),as.integer(HNphi),as.double(phiParams),
as.double(Rparams),as.integer(HNsg),as.double(sigmaParams),as.double(piomega),
as.double(tau),as.integer(FT),as.double(deviance),as.double(Res),
as.integer(G), as.double(DPparams),
as.integer(isDz2),
as.integer(c(cont)),as.integer(c(0)),as.integer(c(0)),as.double(c(0)),
as.double(c(LASTsigma2zk)),as.double(c(LASTR)),as.double(c(LASTDE)),
as.double(rnorm(n=H,mean=LASTmuR,sd=0.01),LASTsigma2R/H),
as.integer(c(LASTsw)),as.integer(LASTWB),
as.double(c(0)),as.double(c(0)),
as.integer(c(0)),as.double(c(0)),
as.integer(c(0)),as.double(c(0,0,0)),as.integer(isSin))}
#Output
if (mcm<4){
loc1<-24
loc2<-40
tuneSigma2Ra<-out[[loc1+4]][1]
tuneRa<-out[[loc1+5]][1]
tunePsia<-tunePsi
tunePhia<-tunePhi
tuneCpsia<-tuneCpsi
DevCalcs<-2
}
if (mcm==4){
loc1<-16
loc2<-34
tuneSigma2Ra<-tuneSigma2R
tuneRa<-tuneR
tunePsia<-tunePsi
tunePhia<-tunePhi
tuneCpsia<-tuneCpsi
DevCalcs<-2
}
if (mcm>7){
loc1<-20
loc2<-44
tuneSigma2Ra<-out[[loc1+4]][1]
tuneRa<-out[[loc1+5]][1]
tunePsia<-out[[loc1+6]][1:(p*NC)]
tunePhia<-out[[loc1+7]][1:p]
tuneCpsia<-out[[loc1+8]][1:p]
DevCalcs<-1
}
fit <- list(call=call,call2=call2,formula=formula,seed=seed,p=p,d=p*(p-1)/2,
data=data,Y=Y,
X=X,Xknots=Xknots,LG=LG,NG=NG,NG2=NG2,
Z=Z,Zknots=Zknots,LD=LD,ND=ND,ND2=ND2,
W=W,Wknots=Wknots,LC=LC,NC=NC,
storeMeanVectorX=storeMeanVectorX,
storeMeanVectorZ=storeMeanVectorZ,
storeMeanVectorW=storeMeanVectorW,
storeIndicatorX=storeIndicatorX,
storeIndicatorZ=storeIndicatorZ,
storeIndicatorW=storeIndicatorW,
assignX=assignX,
assignZ=assignZ,
assignW=assignW,
labelsX=labelsX,
labelsZ=labelsZ,
labelsW=labelsW,
countX=countX,
countZ=countZ,
countW=countW,
varsY=varsY,
varsX=varsX,
varsZ=varsZ,
varsW=varsW,
is.Dx=is.Dx,
is.Dz=is.Dz,
is.Dw=is.Dw,
which.SpecX=which.SpecX,
which.SpecZ=which.SpecZ,
which.SpecW=which.SpecW,
formula.termsX=formula.termsX,
formula.termsZ=formula.termsZ,
formula.termsW=formula.termsW,
nSamples=nSamples,
totalSweeps=sweeps,
mcpar=c(as.integer(burn+1),as.integer(seq(from=burn+1,by=thin,length.out=nSamples)[nSamples]),as.integer(thin)),
mcm=mcm,H=H,G=G,
tuneCalpha=c(tuneCalpha,out[[loc1+0]][1:p]),
tuneSigma2=c(tuneSigma2,out[[loc1+1]][1:p]),
tuneCbeta=c(tuneCbeta,out[[loc1+2]][1]),
tuneAlpha=c(tuneAlpha,out[[loc1+3]][1:(p*ND2)]),
tuneSigma2R=c(tuneSigma2R,tuneSigma2Ra),
tuneR=c(tuneR,tuneRa),
tunePsi=c(tunePsi,tunePsia),
tunePhi=c(tunePhi,tunePhia),
tuneCpsi=c(tuneCpsi,tuneCpsia),
tuneHar=c(tuneHar,out[[47]][1:nHar]),
tuneBreaks=c(tuneBreaks,out[[51]][1:nBreaks]),
tunePeriod=c(tunePeriod,out[[55]][1]),
deviance=c(out[[loc2]][1:DevCalcs]),
nullDeviance=nullDeviance,
DIR=StorageDir,
out=out,
LUT=1, SUT=1, LGc=0, LDc=0, NGc=0, NDc=0, NK=0, FT=FT,
qCont=0,
HNca=HNca,HNsg=HNsg,HNcp=HNcp,HNphi=HNphi,nHar=nHar,
varSin=varSin,nBreaks=nBreaks,amplitude=amplitude,period=period,periodUnknown=periodUnknown)
class(fit) <- 'mvrm'
return(fit)
}
continue <- function(object,sweeps,burn=0,thin,discard=FALSE,...){
if (object$qCont==1) stop("current object has been continued; there is a more recent one")
eval.parent(substitute(object$qCont<-1))
cont<-1
#Sweeps
if (missing(sweeps)) stop("provide sweeps argument")
sweeps <- as.integer(sweeps)
burn <- as.integer(burn)
if (missing(thin) || !discard) thin <- object$mcpar[3]
if (thin <= 0) thin <- 1
thin <- as.integer(thin)
nSamples <- 0
LASTsw<-0
if (sweeps > 0 && (sweeps-burn) > 0){
nSamples <- length(seq(1,(sweeps-burn),by=thin))
LASTsw<-seq(burn,by=thin,length.out=nSamples)[nSamples]
}
if (nSamples<=0) stop("problem with sweeps & burn arguments")
LASTWB <- floor(object$totalSweeps/50)+1
#Files
FL <- c("gamma", "cbeta", "delta", "alpha", "R", "muR", "sigma2R", "calpha", "sigma2", "beta", #10
"compAlloc", "nmembers", "deviance", "DPconc", #4
"compAllocV","nmembersV",
"DE", #mcm 1,2,3,4
"psi", "ksi", "cpsi", "nu", "fi", "omega", "ceta", "comega", "eta","tune","probs", #mcm 5,6,7
"phi2", #mcm 8,9,10
"test", "Hgamma", "Hbeta", "breaks", "period")
gamma <- paste(object$DIR, paste("BNSP",FL[1], "txt",sep="."),sep="/")
gamma <- scan(gamma,what=numeric(),n=object$p*object$LG,quiet=TRUE,skip=object$nSamples-1)
cbeta <- paste(object$DIR, paste("BNSP",FL[2], "txt",sep="."),sep="/")
cbeta <- scan(cbeta,what=numeric(),n=1,quiet=TRUE,skip=object$nSamples-1)
delta <- paste(object$DIR, paste("BNSP",FL[3], "txt",sep="."),sep="/")
delta <- scan(delta,what=numeric(),n=object$p*object$LD,quiet=TRUE,skip=object$nSamples-1)
alpha <- paste(object$DIR, paste("BNSP",FL[4], "txt",sep="."),sep="/")
alpha <- scan(alpha,what=numeric(),n=object$p*object$LD,quiet=TRUE,skip=object$nSamples-1)
R <- paste(object$DIR, paste("BNSP",FL[5], "txt",sep="."),sep="/")
if (file.exists(R)) R <- scan(R,what=numeric(),n=object$p^2*object$LUT,quiet=TRUE,skip=object$nSamples-1)
muR <- paste(object$DIR, paste("BNSP",FL[6], "txt",sep="."),sep="/")
if (file.exists(muR)) muR <- scan(muR,what=numeric(),n=object$H,quiet=TRUE,skip=object$nSamples-1)
sigma2R <- paste(object$DIR, paste("BNSP",FL[7], "txt",sep="."),sep="/")
if (file.exists(sigma2R)) sigma2R <- scan(sigma2R,what=numeric(),n=1,quiet=TRUE,skip=object$nSamples-1)
calpha <- paste(object$DIR, paste("BNSP",FL[8], "txt",sep="."),sep="/")
calpha <- scan(calpha,what=numeric(),n=object$p,quiet=TRUE,skip=object$nSamples-1)
sigma2 <- paste(object$DIR, paste("BNSP",FL[9], "txt",sep="."),sep="/")
sigma2 <- scan(sigma2,what=numeric(),n=object$p,quiet=TRUE,skip=object$nSamples-1)
compAlloc <- paste(object$DIR, paste("BNSP",FL[11], "txt",sep="."),sep="/")
if (file.exists(compAlloc)) compAlloc <- scan(compAlloc,what=numeric(),n=object$d,quiet=TRUE,skip=object$nSamples-1)
DPconc <- paste(object$DIR, paste("BNSP",FL[14], "txt",sep="."),sep="/")
if (file.exists(DPconc)) DPconc <- scan(DPconc,what=numeric(),n=1,quiet=TRUE,skip=object$nSamples-1)
compAllocV <- paste(object$DIR, paste("BNSP",FL[15], "txt",sep="."),sep="/")
if (file.exists(compAllocV)) compAllocV <- scan(compAllocV,what=numeric(),n=object$p,quiet=TRUE,skip=object$nSamples-1)
DE <- paste(object$DIR, paste("BNSP",FL[17], "txt",sep="."),sep="/")
if (file.exists(DE)) DE <- scan(DE,what=numeric(),n=2*object$p^2*object$LUT,quiet=TRUE,skip=0)
psi <- paste(object$DIR, paste("BNSP",FL[18], "txt",sep="."),sep="/")
if (file.exists(psi)) psi <- scan(psi,what=numeric(),n=object$p*object$p*object$LK,quiet=TRUE,skip=object$nSamples-1)
ksi <- paste(object$DIR, paste("BNSP",FL[19], "txt",sep="."),sep="/")
if (file.exists(ksi)) ksi <- scan(ksi,what=numeric(),n=object$p*object$p*object$LK,quiet=TRUE,skip=object$nSamples-1)
cpsi <- paste(object$DIR, paste("BNSP",FL[20], "txt",sep="."),sep="/")
if (file.exists(cpsi)) cpsi <- scan(cpsi,what=numeric(),n=object$p^2,quiet=TRUE,skip=object$nSamples-1)
gammaCor <- paste(object$DIR, paste("BNSP",FL[21], "txt",sep="."),sep="/")
gammaCor <- if (file.exists(gammaCor)) scan(gammaCor,what=numeric(),n=object$LGc*object$H,quiet=TRUE,skip=object$nSamples-1)
deltaCor <- paste(object$DIR, paste("BNSP",FL[22], "txt",sep="."),sep="/")
deltaCor <- if (file.exists(deltaCor)) scan(deltaCor,what=numeric(),n=object$LDc,quiet=TRUE,skip=object$nSamples-1)
omega <- paste(object$DIR, paste("BNSP",FL[23], "txt",sep="."),sep="/")
omega <- if (file.exists(omega)) scan(omega,what=numeric(),n=object$LDc,quiet=TRUE,skip=object$nSamples-1)
ceta <- paste(object$DIR, paste("BNSP",FL[24], "txt",sep="."),sep="/")
ceta <- if (file.exists(ceta)) scan(ceta,what=numeric(),n=1,quiet=TRUE,skip=object$nSamples-1)
comega <- paste(object$DIR, paste("BNSP",FL[25], "txt",sep="."),sep="/")
comega <- if (file.exists(comega)) scan(comega,what=numeric(),n=1,quiet=TRUE,skip=object$nSamples-1)
gammaHar <- paste(object$DIR, paste("BNSP",FL[31], "txt",sep="."),sep="/")
gammaHar <- if (file.exists(gammaHar)) scan(gammaHar,what=numeric(),n=object$nHar*2,quiet=TRUE,skip=object$nSamples-1)
betaHar <- paste(object$DIR, paste("BNSP",FL[32], "txt",sep="."),sep="/")
betaHar <- if (file.exists(betaHar)) scan(betaHar,what=numeric(),n=object$nHar*2,quiet=TRUE,skip=object$nSamples-1)
shifts <- paste(object$DIR, paste("BNSP",FL[33], "txt",sep="."),sep="/")
shifts <- if (file.exists(shifts)) scan(shifts,what=numeric(),n=object$nBreaks,quiet=TRUE,skip=object$nSamples-1)
period <- paste(object$DIR, paste("BNSP",FL[34], "txt",sep="."),sep="/")
period <- if (file.exists(period)) scan(period,what=numeric(),n=1,quiet=TRUE,skip=object$nSamples-1)
LASTAll<-c(R,DE,gamma,delta,alpha,sigma2,ksi,psi,cbeta,calpha,cpsi,gammaCor)
if (object$mcm==6) LASTAll<-c(LASTAll,compAlloc,DPconc)
if (object$mcm==7) LASTAll<-c(LASTAll,compAllocV,DPconc)
LASTAll<-c(LASTAll,deltaCor,comega,sigma2R,omega,ceta)
#LASTAll<-c(LASTAll,deltaCor)
#LASTAll<-c(LASTAll,comega)
#LASTAll<-c(LASTAll,sigma2R)
#LASTAll<-c(LASTAll,omega)
#LASTAll<-c(LASTAll,ceta)
#Discard
if (discard==TRUE){
for (i in 1:length(FL)){
oneFile <- paste(object$DIR, paste("BNSP",FL[i], "txt",sep="."),sep="/")
if (file.exists(oneFile)) file.remove(oneFile)
}
}
#Deviance
deviance<-c(0,0)
if (discard==FALSE) deviance<-as.double(object$deviance)
#Tuning
tuneCalpha<-object$tuneCalpha[(object$p+1):(2*object$p)]
tuneSigma2<-object$tuneSigma2[(object$p+1):(2*object$p)]
tuneCbeta<-object$tuneCbeta[2]
tuneAlpha<-object$tuneAlpha[(object$p*object$ND2+1):(2*object$p*object$ND2)]
tuneSigma2R<-object$tuneSigma2R[2]
tuneR<-object$tuneR[2]
tunePsi<-NULL
if (object$NC > 0){
tunePsi<-object$tunePsi[(object$p*object$NC+1):(2*object$p*object$NC)]
}
tunePhi<-object$tunePhi[(object$p+1):(2*object$p)]
tuneCpsi<-object$tuneCpsi[(object$p+1):(2*object$p)]
#Call C
if (object$mcm==4) out<-.C("mvrmC",
as.integer(object$out[[1]]),as.character(object$out[[2]]),
as.integer(sweeps),as.integer(burn),as.integer(thin),
as.double(object$out[[6]]),as.double(object$out[[7]]),as.double(object$out[[8]]),
as.integer(object$out[[9]]),as.integer(object$out[[10]]),as.integer(object$out[[11]]),
as.double(object$out[[12]]),as.integer(object$out[[13]]),as.double(object$out[[14]]),
as.integer(object$out[[15]]),as.double(object$out[[16]]),as.double(object$out[[17]]),
as.double(object$out[[18]]),as.double(object$out[[19]]),as.integer(object$out[[20]]),
as.integer(object$out[[21]]),as.integer(object$out[[22]]),as.integer(object$out[[23]]),
as.integer(object$out[[24]]),as.integer(object$out[[25]]),as.integer(object$out[[26]]),
as.double(object$out[[27]]),as.integer(object$out[[28]]),as.double(object$out[[29]]),
as.double(object$out[[30]]),as.double(object$out[[31]]),as.integer(object$out[[32]]),
as.double(object$out[[33]]),as.double(deviance),as.integer(object$out[[35]]),
as.integer(cont),as.integer(gamma),as.integer(delta),as.double(alpha),as.double(sigma2),
as.double(cbeta),as.double(calpha),as.integer(LASTWB),
as.integer(object$out[[44]]),as.double(object$out[[45]]),as.integer(object$out[[46]]),
as.double(object$out[[47]]),as.double(object$out[[48]]),as.integer(object$out[[49]]),
as.double(c(period,object$out[[50]][-1])),as.double(object$out[[51]]),as.integer(gammaHar),
as.double(betaHar),as.double(shifts),as.double(object$out[[55]]),
as.integer(object$out[[56]]),as.double(object$out[[57]]))
if (object$mcm==1) out<-.C("mult",
as.integer(object$out[[1]]),as.character(object$out[[2]]),
as.integer(sweeps),as.integer(burn),as.integer(thin),
as.integer(object$out[[6]]),as.integer(object$out[[7]]),as.double(object$out[[8]]),
as.double(object$out[[9]]),as.double(object$out[[10]]),
as.integer(object$out[[11]]),as.integer(object$out[[12]]),
as.double(object$out[[13]]),as.integer(object$out[[14]]),as.double(object$out[[15]]),
as.integer(object$out[[16]]),
as.integer(object$out[[17]]),as.integer(object$out[[18]]),as.integer(object$out[[19]]),
as.integer(object$out[[20]]),
as.integer(object$out[[21]]),as.integer(object$out[[22]]),as.integer(object$out[[23]]),
as.double(object$out[[24]]),as.double(object$out[[25]]),as.double(object$out[[26]]),
as.double(object$out[[27]]),
as.double(object$out[[28]]),as.double(object$out[[29]]),
as.double(object$out[[30]]),as.double(object$out[[31]]),as.double(object$out[[32]]),
as.integer(object$out[[33]]),as.double(object$out[[34]]),as.double(object$out[[35]]),
as.integer(object$out[[36]]),as.double(object$out[[37]]),as.double(object$out[[38]]),
as.integer(object$out[[39]]),as.double(deviance),as.integer(object$out[[41]]),
as.integer(cont),as.integer(gamma),as.integer(delta),as.double(alpha),as.double(sigma2),
as.double(R),
as.double(muR),as.double(sigma2R),as.double(cbeta),as.double(calpha),
as.integer(c(LASTsw)),as.double(c(DE)),as.integer(LASTWB),as.integer(object$out[[55]]))
if (object$mcm==2) out<-.C("multg",
as.integer(object$out[[1]]),as.character(object$out[[2]]),
as.integer(sweeps),as.integer(burn),as.integer(thin),
as.integer(object$out[[6]]),as.integer(object$out[[7]]),as.double(object$out[[8]]),
as.double(object$out[[9]]),as.double(object$out[[10]]),
as.integer(object$out[[11]]),as.integer(object$out[[12]]),
as.double(object$out[[13]]),as.integer(object$out[[14]]),as.double(object$out[[15]]),
as.integer(object$out[[16]]),
as.integer(object$out[[17]]),as.integer(object$out[[18]]),as.integer(object$out[[19]]),
as.integer(object$out[[20]]),
as.integer(object$out[[21]]),as.integer(object$out[[22]]),as.integer(object$out[[23]]),
as.double(object$out[[24]]),as.double(object$out[[25]]),as.double(object$out[[26]]),
as.double(object$out[[27]]),
as.double(object$out[[28]]),as.double(object$out[[29]]),
as.double(object$out[[30]]),as.double(object$out[[31]]),as.double(object$out[[32]]),
as.integer(object$out[[33]]),as.double(object$out[[34]]),as.double(object$out[[35]]),
as.integer(object$out[[36]]),as.double(object$out[[37]]),as.double(object$out[[38]]),
as.integer(object$out[[39]]),as.double(deviance),as.integer(object$out[[41]]),
as.double(object$out[[42]]),as.integer(object$out[[43]]),
as.integer(cont),as.integer(gamma),as.integer(delta),as.double(alpha),as.double(sigma2),
as.double(R),
as.double(muR),as.double(sigma2R),as.double(cbeta),as.double(calpha),
as.integer(compAlloc),as.double(DPconc),as.integer(c(LASTsw)),as.double(c(DE)),
as.integer(LASTWB),as.integer(object$out[[59]]))
if (object$mcm==3) out<-.C("multgv",
as.integer(object$out[[1]]), as.character(object$out[[2]]), as.integer(sweeps),
as.integer(burn), as.integer(thin), as.integer(object$out[[6]]), as.integer(object$out[[7]]),
as.double(object$out[[8]]), as.double(object$out[[9]]), as.double(as.matrix(object$out[[10]])),
as.integer(object$out[[11]]), as.integer(object$out[[12]]), as.double(object$out[[13]]),
as.integer(object$out[[14]]),
as.double(object$out[[15]]), as.integer(object$out[[16]]), as.integer(object$out[[17]]),
as.integer(object$out[[18]]),
as.integer(object$out[[19]]), as.integer(object$out[[20]]), as.integer(object$out[[21]]),
as.integer(object$out[[22]]),
as.integer(object$out[[23]]), as.double(object$out[[24]]), as.double(object$out[[25]]),
as.double(object$out[[26]]),
as.double(object$out[[27]]), as.double(object$out[[28]]), as.double(object$out[[29]]),
as.double(object$out[[30]]),
as.double(object$out[[31]]), as.double(object$out[[32]]), as.integer(object$out[[33]]),
as.double(object$out[[34]]),
as.double(object$out[[35]]), as.integer(object$out[[36]]), as.double(object$out[[37]]),
as.double(object$out[[38]]),
as.integer(object$out[[39]]), as.double(deviance), as.integer(object$out[[41]]),
as.double(object$out[[42]]),as.integer(object$out[[43]]),
as.integer(cont), as.integer(gamma), as.integer(delta), as.double(alpha), as.double(sigma2),
as.double(R),
as.double(muR), as.double(sigma2R), as.double(cbeta), as.double(calpha), as.integer(compAllocV),
as.double(DPconc),
as.integer(c(LASTsw)), as.double(c(DE)), as.integer(LASTWB),as.integer(object$out[[59]]))
if (object$mcm==5)
out<-.C("longmult",
as.integer(object$out[[1]]), as.character(object$out[[2]]),
as.integer(c(sweeps,burn,thin,object$out[[3]][4:8])), as.integer(object$out[[4]]), as.integer(object$out[[5]]),
as.integer(object$out[[6]]), as.integer(object$out[[7]]), as.integer(object$out[[8]]), as.integer(object$out[[9]]),
as.integer(object$out[[10]]), as.double(object$out[[11]]), as.double(object$out[[12]]), as.double(object$out[[13]]),
as.double(object$out[[14]]), as.double(object$out[[15]]),as.double(object$out[[16]]), as.integer(object$out[[17]]),
as.integer(object$out[[18]]), as.integer(object$out[[19]]), as.integer(object$out[[20]]), as.integer(object$out[[21]]),
as.integer(object$out[[22]]), as.integer(object$out[[23]]), as.integer(object$out[[24]]), as.integer(object$out[[25]]),
as.integer(object$out[[26]]), as.integer(object$out[[27]]), as.integer(object$out[[28]]), as.double(object$out[[29]]),
as.integer(object$out[[30]]), as.double(object$out[[31]]), as.double(object$out[[32]]), as.double(object$out[[33]]),
as.double(object$out[[34]]), as.double(object$out[[35]]), as.double(object$out[[36]]), as.double(object$out[[37]]),
as.double(object$out[[38]]), as.double(object$out[[39]]), as.double(object$out[[40]]), as.double(object$out[[41]]),
as.double(object$out[[42]]), as.double(object$out[[43]]), as.integer(object$out[[44]]), as.double(object$out[[45]]),
as.integer(object$out[[46]]), as.double(object$out[[47]]), as.double(object$out[[48]]), as.integer(object$out[[49]]),
as.double(object$out[[50]]), as.double(object$out[[51]]), as.integer(object$out[[52]]), as.double(object$out[[53]]),
as.double(object$out[[54]]), as.integer(object$out[[55]]), as.double(object$out[[56]]),
as.double(object$out[[57]]), as.integer(object$out[[58]]), as.double(deviance), as.integer(object$out[[60]]),
as.integer(c(cont,LASTsw,LASTWB)), as.double(LASTAll))
if (object$mcm==6)
out<-.C("longmultg",
as.integer(object$out[[1]]), as.character(object$out[[2]]),
as.integer(c(sweeps,burn,thin,object$out[[3]][4:8])), as.integer(object$out[[4]]), as.integer(object$out[[5]]),
as.integer(object$out[[6]]), as.integer(object$out[[7]]), as.integer(object$out[[8]]), as.integer(object$out[[9]]),
as.integer(object$out[[10]]), as.double(object$out[[11]]), as.double(object$out[[12]]), as.double(object$out[[13]]),
as.double(object$out[[14]]), as.double(object$out[[15]]),as.double(as.matrix(16)), as.integer(object$out[[17]]),
as.integer(object$out[[18]]), as.integer(object$out[[19]]), as.integer(object$out[[20]]), as.integer(object$out[[21]]),
as.integer(object$out[[22]]), as.integer(object$out[[23]]), as.integer(object$out[[24]]), as.integer(object$out[[25]]),
as.integer(object$out[[26]]), as.integer(object$out[[27]]), as.integer(object$out[[28]]), as.double(object$out[[29]]),
as.integer(object$out[[30]]), as.double(object$out[[31]]), as.double(object$out[[32]]), as.double(object$out[[33]]),
as.double(object$out[[34]]), as.double(object$out[[35]]), as.double(object$out[[36]]), as.double(object$out[[37]]),
as.double(object$out[[38]]), as.double(object$out[[39]]), as.double(object$out[[40]]), as.double(object$out[[41]]),
as.double(object$out[[42]]), as.double(object$out[[43]]), as.integer(object$out[[44]]), as.double(object$out[[45]]),
as.integer(object$out[[46]]), as.double(object$out[[47]]), as.double(object$out[[48]]), as.integer(object$out[[49]]),
as.double(object$out[[50]]), as.double(object$out[[51]]), as.integer(object$out[[52]]), as.double(object$out[[53]]),
as.double(object$out[[54]]), as.integer(object$out[[55]]), as.double(object$out[[56]]),
as.double(object$out[[57]]), as.integer(object$out[[58]]), as.double(deviance),as.integer(object$out[[60]]),
as.integer(c(cont,LASTsw,LASTWB)),
as.double(LASTAll), as.integer(object$out[[63]]), as.double(object$out[[64]]))
if (object$mcm==7)
out<-.C("longmultgv",
as.integer(object$out[[1]]), as.character(object$out[[2]]),
as.integer(c(sweeps,burn,thin,object$out[[3]][4:8])), as.integer(object$out[[4]]), as.integer(object$out[[5]]),
as.integer(object$out[[6]]), as.integer(object$out[[7]]), as.integer(object$out[[8]]), as.integer(object$out[[9]]),
as.integer(object$out[[10]]), as.double(object$out[[11]]), as.double(object$out[[12]]), as.double(object$out[[13]]),
as.double(object$out[[14]]), as.double(object$out[[15]]),as.double(as.matrix(16)), as.integer(object$out[[17]]),
as.integer(object$out[[18]]), as.integer(object$out[[19]]), as.integer(object$out[[20]]), as.integer(object$out[[21]]),
as.integer(object$out[[22]]), as.integer(object$out[[23]]), as.integer(object$out[[24]]), as.integer(object$out[[25]]),
as.integer(object$out[[26]]), as.integer(object$out[[27]]), as.integer(object$out[[28]]), as.double(object$out[[29]]),
as.integer(object$out[[30]]), as.double(object$out[[31]]), as.double(object$out[[32]]), as.double(object$out[[33]]),
as.double(object$out[[34]]), as.double(object$out[[35]]), as.double(object$out[[36]]), as.double(object$out[[37]]),
as.double(object$out[[38]]), as.double(object$out[[39]]), as.double(object$out[[40]]), as.double(object$out[[41]]),
as.double(object$out[[42]]), as.double(object$out[[43]]), as.integer(object$out[[44]]), as.double(object$out[[45]]),
as.integer(object$out[[46]]), as.double(object$out[[47]]), as.double(object$out[[48]]), as.integer(object$out[[49]]),
as.double(object$out[[50]]), as.double(object$out[[51]]), as.integer(object$out[[52]]), as.double(object$out[[53]]),
as.double(object$out[[54]]), as.integer(object$out[[55]]), as.double(object$out[[56]]),
as.double(object$out[[57]]), as.integer(object$out[[58]]), as.double(deviance),as.integer(object$out[[60]]),
as.integer(c(cont,LASTsw,LASTWB)),
as.double(LASTAll), as.integer(object$out[[63]]), as.double(object$out[[64]]))
#Output
if (object$mcm<4){
loc1<-25
loc2<-41
tuneSigma2Ra<-out[[loc1+4]][1]
tuneRa<-out[[loc1+5]][1]
tunePsia<-tunePsi
tunePhia<-tunePhi
tuneCpsia<-tuneCpsi
}
if (object$mcm==4){
loc1<-16
loc2<-34
tuneSigma2Ra<-tuneSigma2R
tuneRa<-tuneR
tunePsia<-tunePsi
tunePhia<-tunePhi
tuneCpsia<-tuneCpsi
DevCalcs<-2
}
if (object$mcm %in% c(5,6,7)){
loc1<-31
loc2<-60
tuneSigma2Ra<-out[[loc1+4]][1]
tuneR<-object$tuneR[(1+object$LUT):(2*object$LUT)]
tuneRa<-out[[loc1+5]][1:object$LUT]
tuneCpsi<-object$tuneCpsi[(object$p*object$p+1):(2*object$p*object$p)]
tuneCpsia<-out[[loc1+6]][1:object$p*object$p]
DevCalcs<-2
}
#nSamples & mcpar
totalSweeps <- object$totalSweeps + sweeps
if (discard==TRUE) object$nSamples <- 0
nSamples <- nSamples + object$nSamples
totalBurn <- object$mcpar[1] + burn
if (discard==TRUE) totalBurn <- object$totalSweeps + burn
#Call
call <- object$call
call2 <- object$call2
sweeps.pos <- which.max(pmatch(names(call), "sweeps"))
call[[sweeps.pos]] <- as.integer(totalSweeps)
burn.pos <- which.max(pmatch(names(call), "burn"))
call[[burn.pos]] <- totalBurn
thin.pos <- which.max(pmatch(names(call), "thin"))
call[[thin.pos]] <- thin
sweeps.pos <- which.max(pmatch(names(call2), "sweeps"))
call2[[sweeps.pos]] <- totalSweeps
burn.pos <- which.max(pmatch(names(call2), "burn"))
call2[[burn.pos]] <- totalBurn
thin.pos <- which.max(pmatch(names(call2), "thin"))
call2[[thin.pos]] <- thin
#fit
fit <- list(call=call,call2=call2,formula=object$formula,seed=object$seed,p=object$p,d=object$d,
data=object$data,Y=object$Y,
X=object$X,Xknots=object$Xknots,LG=object$LG,NG=object$NG,NG2=object$NG2,
Z=object$Z,Zknots=object$Zknots,LD=object$LD,ND=object$ND,ND2=object$ND2,
W=object$W,Wknots=object$Wknots,LC=object$LC,NC=object$NC,
storeMeanVectorX=object$storeMeanVectorX,
storeMeanVectorZ=object$storeMeanVectorZ,
storeMeanVectorW=object$storeMeanVectorW,
storeIndicatorX=object$storeIndicatorX,
storeIndicatorZ=object$storeIndicatorZ,
storeIndicatorW=object$storeIndicatorW,
assignX=object$assignX,
assignZ=object$assignZ,
assignW=object$assignW,
labelsX=object$labelsX,
labelsZ=object$labelsZ,
labelsW=object$labelsW,
countX=object$countX,
countZ=object$countZ,
countW=object$countW,
varsY=object$varsY,
varsX=object$varsX,
varsZ=object$varsZ,
varsW=object$varsW,
is.Dx=object$is.Dx,
is.Dz=object$is.Dz,
is.Dw=object$is.Dw,
which.SpecX=object$which.SpecX,
which.SpecZ=object$which.SpecZ,
which.SpecW=object$which.SpecW,
formula.termsX=object$formula.termsX,
formula.termsZ=object$formula.termsZ,
formula.termsW=object$formula.termsW,
nSamples=nSamples,
totalSweeps=totalSweeps,
mcpar=c(totalBurn,as.integer(seq(from=totalBurn,by=thin,length.out=nSamples)[nSamples]),thin),
mcm=object$mcm,H=object$H,G=object$G,
tuneCalpha=c(tuneCalpha,out[[loc1+0]][1:object$p]),
tuneSigma2=c(tuneSigma2,out[[loc1+1]][1:object$p]),
tuneCbeta=c(tuneCbeta,out[[loc1+2]][1]),
tuneAlpha=c(tuneAlpha,out[[loc1+3]][1:(object$p*object$ND2)]),
tuneSigma2R=c(tuneSigma2R,tuneSigma2Ra),
tuneR=c(tuneR,tuneRa),
tunePsi=c(tunePsi,tunePsia),
tunePhi=c(tunePhi,tunePhia),
tuneCpsi=c(tuneCpsi,tuneCpsia),
deviance=c(out[[loc2]][1:DevCalcs]),
nullDeviance=object$nullDeviance,
DIR=object$DIR,
out=out,
LUT=object$LUT,
SUT=object$SUT,
LGc=object$LGc,
LDc=object$LDc,
NGc=object$NGc,
NDc=object$NDc,
NK=object$NK,
FT=object$FT,
qCont=0,
HNca=object$HNca,HNsg=object$HNsg,HNcp=object$HNcp,HNphi=object$HNphi)
if (object$mcm %in% c(5,6,7)){
tuneCbCor<-object$tuneCbCor[2]
tuneOmega<-object$tuneOmega[(object$NDc+1):(object$NDc*2)]
tuneComega<-object$tuneComega[2]
fit2 <- list(
C=object$C,Cknots=object$Cknots,LK=object$LK,
Xc=object$Xc,Xcknots=object$Xcknots,
Zc=object$Zc,Zcknots=object$Zcknots,
storeMeanVectorC=object$storeMeanVectorC,
storeMeanVectorXc=object$storeMeanVectorXc,
storeMeanVectorZc=object$storeMeanVectorZc,
storeIndicatorC=object$storeIndicatorC,
storeIndicatorXc=object$storeIndicatorXc,
storeIndicatorZc=object$storeIndicatorZc,
assignC=object$assignC,
assignXc=object$assignXc,
assignZc=object$assignZc,
labelsC=object$labelsC,
labelsXc=object$labelsXc,
labelsZc=object$labelsZc,
countC=object$countC,
countXc=object$countXc,
countZc=object$countZc,
varsC=object$varsC,
varsXc=object$varsXc,
varsZc=object$varsZc,
is.Dc=object$is.Dc,
is.Dxc=object$is.Dxc,
is.Dzc=object$is.Dzc,
which.SpecC=object$which.SpecC,
which.SpecXc=object$which.SpecXc,
which.SpecZc=object$which.SpecZc,
formula.termsC=object$formula.termsC,
formula.termsXc=object$formula.termsXc,
formula.termsZc=object$formula.termsZc,
tuneCbCor=c(tuneCbCor,out[[loc1+7]][1]),
tuneOmega=c(tuneOmega,out[[loc1+8]][1:object$NDc]),
tuneComega=c(tuneComega,out[[loc1+9]][1]),
HNcpsi=object$HNcpsi,HNscor=object$HNscor,HNco=object$HNco,
lag=object$lag,varTime=object$varTime,niVec=object$niVec,intime=object$intime)
fit<-c(fit,fit2)
}
class(fit) <- 'mvrm'
return(fit)
}
mvrm2mcmc <- function(mvrmObj,labels){
labels1 <- c("alpha","calpha","cbeta","delta","beta","gamma","sigma2") #7
labels2 <- c("muR","sigma2R","R") #3
labels3 <- c("compAlloc","nmembers","DPconc") #3
labels4 <- c("compAllocV","nmembersV","deviance")#3
labels5 <- c("psi","ksi","cpsi","nu","fi","omega","comega","eta","ceta")#9
labels6 <- c("probs","tune")#2
labels7 <- c("phi2")#1
labels8 <- c("Hbeta", "Hgamma", "breaks", "period")#4
all.labels<-c(labels1,labels2,labels3,labels4,labels5,labels6,labels7,labels8)
if (missing(labels)) labels <- all.labels
mtch<-match(labels,all.labels)
p<-mvrmObj$p
R<-NULL
if (any(mtch==1) && mvrmObj$LD > 0){
file <- paste(mvrmObj$DIR,"BNSP.alpha.txt",sep="")
if (file.exists(file)){
if (p > 1) names1<-paste("alpha",rep(colnames(mvrmObj$Z)[-1],p),rep(mvrmObj$varsY,each=mvrmObj$LD),sep=".")
if (p == 1) names1<-paste("alpha",colnames(mvrmObj$Z)[-1],sep=".")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=mvrmObj$LD*p,dimnames=list(c(),names1)))
}
}
if (any(mtch==2)){
file <- paste(mvrmObj$DIR,"BNSP.calpha.txt",sep="")
if (file.exists(file)){
if (p > 1) names1<-paste(rep("c_alpha",p),mvrmObj$varsY,sep=".")
if (p == 1) names1<-"c_alpha"
R<-cbind(R,matrix(unlist(read.table(file)),ncol=p,dimnames=list(c(),names1)))
}
}
if (any(mtch==3)){
file <- paste(mvrmObj$DIR,"BNSP.cbeta.txt",sep="")
if (file.exists(file))
R<-cbind(R,matrix(unlist(read.table(file)),ncol=1,dimnames=list(c(),c("c_beta"))))
}
if (any(mtch==4) && mvrmObj$LD > 0){
file <- paste(mvrmObj$DIR,"BNSP.delta.txt",sep="")
if (file.exists(file)){
if (p > 1) names1<-paste("delta",rep(colnames(mvrmObj$Z)[-1],p),rep(mvrmObj$varsY,each=mvrmObj$LD),sep=".")
if (p == 1) names1<-paste("delta",colnames(mvrmObj$Z)[-1],sep=".")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=mvrmObj$LD*p,dimnames=list(c(),names1)))
}
}
if (any(mtch==5)){
file <- paste(mvrmObj$DIR,"BNSP.beta.txt",sep="")
if (file.exists(file)){
if (p > 1) names1<-paste("beta",rep(colnames(mvrmObj$X),p),rep(mvrmObj$varsY,each=mvrmObj$LG+1),sep=".")
if (p == 1) names1<-paste("beta",colnames(mvrmObj$X),sep=".")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=p*(mvrmObj$LG+1),dimnames=list(c(),names1)))
}
}
if (any(mtch==6) && mvrmObj$LG > 0){
file <- paste(mvrmObj$DIR,"BNSP.gamma.txt",sep="")
if (file.exists(file)){
if (p > 1) names1<-paste("gamma",rep(colnames(mvrmObj$X)[-1],p),rep(mvrmObj$varsY,each=mvrmObj$LG),sep=".")
if (p == 1) names1<-paste("gamma",colnames(mvrmObj$X)[-1],sep=".")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=p*mvrmObj$LG,dimnames=list(c(),names1)))
}
}
if (any(mtch==7)){
file <- paste(mvrmObj$DIR,"BNSP.sigma2.txt",sep="")
if (file.exists(file))
if (p > 1) names1<-paste(rep("sigma2",p),mvrmObj$varsY,sep=".")
if (p == 1) names1<-"sigma2"
R<-cbind(R,matrix(unlist(read.table(file)),ncol=p,dimnames=list(c(),names1)))
}
if (any(mtch==8) && p > 1 && mvrmObj$LUT==1){
names <- paste("muR_clust",seq(1,mvrmObj$H),sep="_")
if (mvrmObj$H==1) names <- "muR"
file <- paste(mvrmObj$DIR,"BNSP.muR.txt",sep="")
if (file.exists(file)) R<-cbind(R,matrix(unlist(read.table(file)),nrow=mvrmObj$nSamples,
dimnames=list(c(),names)))
}
if (any(mtch==9) && p > 1){
file <- paste(mvrmObj$DIR,"BNSP.sigma2R.txt",sep="")
if (file.exists(file)) R<-cbind(R,matrix(unlist(read.table(file)),ncol=1,dimnames=list(c(),c("sigma2R"))))
}
subset <- rep(seq(1,p),each=p) < rep(seq(1,p),p)
cor.names <- paste(rep(seq(1,p),each=p),rep(seq(1,p),p),sep="")
if (any(mtch==10) && p > 1){
file <- paste(mvrmObj$DIR,"BNSP.R.txt",sep="")
if (file.exists(file)) R<-cbind(R,matrix(unlist(read.table(file)),ncol=mvrmObj$LUT*p*p,
dimnames=list(c(),paste("cor",rep(cor.names,mvrmObj$LUT),sep=".")))[,subset,drop=FALSE])
}
if (any(mtch==11)){
file <- paste(mvrmObj$DIR,"BNSP.compAlloc.txt",sep="")
if (file.exists(file)) R<-cbind(R,matrix(unlist(read.table(file)),ncol=mvrmObj$d,
dimnames=list(c(),paste("clustering of cor",cor.names[subset],sep="."))))
}
if (any(mtch==12)){
file <- paste(mvrmObj$DIR,"BNSP.nmembers.txt",sep="")
if (file.exists(file))R<-cbind(R,matrix(unlist(read.table(file)),ncol=mvrmObj$H,dimnames=list(c(),paste("cor cluster ",seq(1,mvrmObj$H)))))
}
if (any(mtch==13)){
file <- paste(mvrmObj$DIR,"BNSP.DPconc.txt",sep="")
if (file.exists(file)) R<-cbind(R,matrix(unlist(read.table(file)),ncol=1,dimnames=list(c(),c("DPconc"))))
}
if (any(mtch==14)){
file <- paste(mvrmObj$DIR,"BNSP.compAllocV.txt",sep="")
if (file.exists(file)) R<-cbind(R,matrix(unlist(read.table(file)),ncol=p,dimnames=list(c(),paste("clustering of ",mvrmObj$varsY))))
}
if (any(mtch==15)){
file <- paste(mvrmObj$DIR,"BNSP.nmembersV.txt",sep="")
if (file.exists(file)) R<-cbind(R,matrix(unlist(read.table(file)),ncol=mvrmObj$G,dimnames=list(c(),paste("var cluster ",seq(1,mvrmObj$G)))))
}
if (any(mtch==16)){
file <- paste(mvrmObj$DIR,"BNSP.deviance.txt",sep="")
if (file.exists(file)) R<-cbind(R,matrix(unlist(read.table(file)),ncol=2,dimnames=list(c(),c("marginal deviance","full deviance"))))
}
if (any(mtch==17) && mvrmObj$mcm < 8){
file <- paste(mvrmObj$DIR,"BNSP.psi.txt",sep="")
if (file.exists(file)){
if (p > 1){
e <- cbind(rep(unlist(mvrmObj$varsY),each=p),rep(unlist(mvrmObj$varsY),p))
e <- paste(e[,1],e[,2],sep=".")
names1<-paste("psi",rep(colnames(mvrmObj$C),p*p),rep(e,each=mvrmObj$LK),sep=".")
}
if (p == 1) names1<-paste("psi",colnames(mvrmObj$C),sep=".")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=p*p*mvrmObj$LK,dimnames=list(c(),names1)))
}
}
if (any(mtch==17) && mvrmObj$LC > 0 && mvrmObj$mcm > 7){
file <- paste(mvrmObj$DIR,"BNSP.psi.txt",sep="")
if (file.exists(file)){
if (p > 1) names1<-paste("psi",rep(colnames(mvrmObj$W)[-1],p),rep(mvrmObj$varsY,each=mvrmObj$LC),sep=".")
if (p == 1) names1<-paste("psi",colnames(mvrmObj$W)[-1],sep=".")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=mvrmObj$LC*p,dimnames=list(c(),names1)))
}
}
if (any(mtch==18) && mvrmObj$mcm < 8){
file <- paste(mvrmObj$DIR,"BNSP.ksi.txt",sep="")
if (file.exists(file)){
if (p > 1){
e <- cbind(rep(unlist(mvrmObj$varsY),each=p),rep(unlist(mvrmObj$varsY),p))
e <- paste(e[,1],e[,2],sep=".")
names1<-paste("ksi",rep(colnames(mvrmObj$C),p*p),rep(e,each=mvrmObj$LK),sep=".")
}
if (p == 1) names1<-paste("ksi",colnames(mvrmObj$C),sep=".")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=p*p*mvrmObj$LK,dimnames=list(c(),names1)))
}
}
if (any(mtch==18) && mvrmObj$LC > 0 && mvrmObj$mcm > 7){
file <- paste(mvrmObj$DIR,"BNSP.ksi.txt",sep="")
if (file.exists(file)){
if (p > 1) names1<-paste("ksi",rep(colnames(mvrmObj$W)[-1],p),rep(mvrmObj$varsY,each=mvrmObj$LC),sep=".")
if (p == 1) names1<-paste("ksi",colnames(mvrmObj$W)[-1],sep=".")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=mvrmObj$LC*p,dimnames=list(c(),names1)))
}
}
if (any(mtch==19) && mvrmObj$mcm < 8){
file <- paste(mvrmObj$DIR,"BNSP.cpsi.txt",sep="")
if (file.exists(file)){
if (p > 1){
e <- cbind(rep(unlist(mvrmObj$varsY),each=p),rep(unlist(mvrmObj$varsY),p))
e <- paste(e[,1],e[,2],sep=".")
names1<-paste("cpsi",e,sep=".")
}
if (p == 1) names1<-paste("cpsi")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=p*p,dimnames=list(c(),names1)))
}
}
if (any(mtch==19) && mvrmObj$mcm > 7){
file <- paste(mvrmObj$DIR,"BNSP.cpsi.txt",sep="")
if (file.exists(file)){
if (p > 1) names1<-paste(rep("c_psi",p),mvrmObj$varsY,sep=".")
if (p == 1) names1<-"c_psi"
R<-cbind(R,matrix(unlist(read.table(file)),ncol=p,dimnames=list(c(),names1)))
}
}
if (any(mtch==20) && mvrmObj$LGc > 0){
file <- paste(mvrmObj$DIR,"BNSP.nu.txt",sep="")
if (file.exists(file)){
if (mvrmObj$H==1) names1<-paste("nu",colnames(mvrmObj$Xc)[-1],sep=".")
if (mvrmObj$H>1) names1<-paste(paste("nu",rep(1:mvrmObj$H,each=mvrmObj$LGc),sep="."),rep(seq(1:mvrmObj$LGc),mvrmObj$H),sep=".")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=mvrmObj$LGc*mvrmObj$H,dimnames=list(c(),names1)))
}
}
if (any(mtch==21) && mvrmObj$LDc > 0){
file <- paste(mvrmObj$DIR,"BNSP.fi.txt",sep="")
if (file.exists(file)){
names1<-paste("fi",colnames(mvrmObj$Zc)[-1],sep=".")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=mvrmObj$LDc,dimnames=list(c(),names1)))
}
}
if (any(mtch==22) && mvrmObj$LDc > 0){
file <- paste(mvrmObj$DIR,"BNSP.omega.txt",sep="")
if (file.exists(file)){
names1<-paste("omega",colnames(mvrmObj$Zc)[-1],sep=".")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=mvrmObj$LDc,dimnames=list(c(),names1)))
}
}
if (any(mtch==23) && p > 1){
file <- paste(mvrmObj$DIR,"BNSP.comega.txt",sep="")
if (file.exists(file)){
names1<-"c_omega"
R<-cbind(R,matrix(unlist(read.table(file)),ncol=1,dimnames=list(c(),names1)))
}
}
if (any(mtch==24) && p > 1){
file <- paste(mvrmObj$DIR,"BNSP.eta.txt",sep="")
if (file.exists(file)){
if (mvrmObj$H==1) names1<-paste("eta",colnames(mvrmObj$Xc),sep=".")
if (mvrmObj$H>1) names1<-paste(paste("eta",rep(1:mvrmObj$H,each=(mvrmObj$LGc+1)),sep="."),rep(seq(1:(mvrmObj$LGc+1)),mvrmObj$H),sep=".")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=((mvrmObj$LGc+1)*mvrmObj$H),dimnames=list(c(),names1)))
}
}
if (any(mtch==25) && p > 1){
file <- paste(mvrmObj$DIR,"BNSP.ceta.txt",sep="")
if (file.exists(file))
R<-cbind(R,matrix(unlist(read.table(file)),ncol=1,dimnames=list(c(),c("c_eta"))))
}
if (any(mtch==26)){
file <- paste(mvrmObj$DIR,"BNSP.probs.txt",sep="")
if (file.exists(file))
R<-cbind(R,matrix(unlist(read.table(file)),ncol=mvrmObj$H,dimnames=list(c(),seq(1,mvrmObj$H))))
}
if (any(mtch==27)){
file <- paste(mvrmObj$DIR,"BNSP.tune.txt",sep="")
if (file.exists(file)){
if (p > 1) names1<-paste("alpha",rep(mvrmObj$varsY,each=mvrmObj$ND),rep(mvrmObj$varsZ,p),sep=".")
if (p == 1) names1<-paste("alpha",mvrmObj$varsZ,sep=".")
if (p > 1) names2<-paste(rep("sigma2",p),mvrmObj$varsY,sep=".")
if (p == 1) names2<-"sigma2"
if (p > 1) names3<-paste(rep("c_alpha",p),mvrmObj$varsY,sep=".")
if (p == 1) names3<-"c_alpha"
if (p > 1){
e <- cbind(rep(unlist(mvrmObj$varsY),each=p),rep(unlist(mvrmObj$varsY),p))
e <- paste(e[,1],e[,2],sep=".")
names4<-paste("cpsi",e,sep=".")
}
if (p == 1) names4<-paste("cpsi")
names5<-NULL
if (p > 1) names5<-paste("R",seq(1,mvrmObj$LUT),sep="_")
names6<-NULL
if (p > 1 && mvrmObj$NDc > 0) {names6<-"omega"}
names7<-NULL
if (p > 1) {names7<-"c_eta"}
tune.names<-c("c_beta",names1,names2,names3[mvrmObj$HNca==1],names4[mvrmObj$HNcpsi==1],names5,names6,
"sigms2R"[mvrmObj$HNscor==1],names7,"c_omega"[mvrmObj$HNco==1])
R<-cbind(R,matrix(unlist(read.table(file)),ncol=length(tune.names),dimnames=list(c(),tune.names)))
}
}
if (any(mtch==28)){
file <- paste(mvrmObj$DIR,"BNSP.phi2.txt",sep="")
if (file.exists(file)){
if (p > 1) names1<-paste(rep("phi2",p),mvrmObj$varsY,sep=".")
if (p == 1) names1<-"phi2"
R<-cbind(R,matrix(unlist(read.table(file)),ncol=p,dimnames=list(c(),names1)))
}
}
if (any(mtch==29) && mvrmObj$nHar > 0 && mvrmObj$amplitude > 1){
file <- paste(mvrmObj$DIR,"BNSP.Hbeta.txt",sep="")
if (file.exists(file)){
names1 <- NULL
for (k in 1:mvrmObj$nHar)
names1<-c(names1,
paste("beta",paste(paste("sin(",2*k,sep=""),"pi", mvrmObj$varSin, "/ p)",sep=" "),sep="."),
paste("beta",paste(paste("cos(",2*k,sep=""),"pi", mvrmObj$varSin, "/ p)",sep=" "),sep="."))
if (p > 1) names1<-paste(rep(names1,p),rep(mvrmObj$varsY,2*mvrmObj$nHar),sep=".")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=2*mvrmObj$nHar*p,dimnames=list(c(),names1)))
}
}
if (any(mtch==30) && mvrmObj$nHar > 1 && mvrmObj$amplitude > 1){
file <- paste(mvrmObj$DIR,"BNSP.Hgamma.txt",sep="")
if (file.exists(file)){
names1 <- NULL
for (k in 1:mvrmObj$nHar)
names1<-c(names1,
paste("gamma",paste(paste("sin(",2*k,sep=""),"pi", mvrmObj$varSin, "/ p)",sep=" "),sep="."),
paste("gamma",paste(paste("cos(",2*k,sep=""),"pi", mvrmObj$varSin, "/ p)",sep=" "),sep="."))
if (p > 1) names1<-paste(rep(names1,p),rep(mvrmObj$varsY,2*mvrmObj$nHar),sep=".")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=2*mvrmObj$nHar*p,dimnames=list(c(),names1)))
}
}
if (any(mtch==31) && mvrmObj$nBreaks > 0){
file <- paste(mvrmObj$DIR,"BNSP.breaks.txt",sep="")
if (file.exists(file)){
names1<-paste("break",seq(1:mvrmObj$nBreaks),sep=".")
R<-cbind(R,matrix(unlist(read.table(file)),ncol=mvrmObj$nBreaks,dimnames=list(c(),names1)))
}
}
if (any(mtch==32)){
file <- paste(mvrmObj$DIR,"BNSP.period.txt",sep="")
if (file.exists(file)){
names1<-"period"
R<-cbind(R,matrix(unlist(read.table(file)),ncol=p,dimnames=list(c(),names1)))
}
}
if (!is.null(R) && !any(mtch==27)){
attr(R, "mcpar") <- mvrmObj$mcpar
attr(R, "class") <- "mcmc"
}
if (!is.null(R) && any(mtch==27)){
attr(R, "mcpar") <- c(1,mvrmObj$mcpar[2],50)
attr(R, "class") <- "mcmc"
}
return(R)
}
plotCorr <- function(x, term="R", centre=mean, quantiles=c(0.1, 0.9), ...){
mvrmObj<-x
if (mvrmObj$p == 1) stop("doesn't apply for univariate response")
if (length(quantiles)==1) quantiles <- c(quantiles,1-quantiles)
if (length(quantiles) > 2) stop("up to two quantiles")
if (!is.null(quantiles)) quantiles <- unique(sort(quantiles))
if (!is.null(quantiles)) par(mfrow=c(1,2))
mt<-match(term,c("R","muR"))
if (is.na(mt)) stop("unrecognised term");
if (mt==1) R<-mvrm2mcmc(mvrmObj,"R")
if (mt==2){
muR<-mvrm2mcmc(mvrmObj,"muR")
if (mvrmObj$mcm == 1) return(plot(tanh(muR)))
compAlloc <- matrix(0,nrow=mvrmObj$nSamples,ncol=mvrmObj$d)
compAllocFile <- paste(mvrmObj$DIR, paste("BNSP","compAlloc","txt",sep="."),sep="/")
if (file.exists(compAllocFile)) compAlloc<-mvrm2mcmc(mvrmObj,"compAlloc")
R <- sapply(1:mvrmObj$d,
function(i) muR[cbind(1:mvrmObj$nSamples,compAlloc[,i]+1)])
if (x$FT==1) R <- tanh(R)
}
ec <- diag(rep(1,x$p))
ec[lower.tri(ec)] <- apply(R,2,centre)
ec[upper.tri(ec)]<-t(ec)[upper.tri(ec)]
colnames(ec) <- x$varsY
corrplot.mixed(ec,lower.col="black")
if (!is.null(quantiles)){
q<-apply(R,2,quantile,probs=quantiles)
ci<-diag(1,mvrmObj$p)
ci[lower.tri(ci)] <- q[2,]
ci[upper.tri(ci)]<-t(ci)[upper.tri(ci)]
ci[lower.tri(ci)] <- q[1,]
colnames(ci) <- x$varsY
rownames(ci) <- x$varsY
corrplot(ci,col="black",method="number")
}
}
histCorr <- function(x, term="R", plotOptions=list(), ...){
mvrmObj<-x
if (mvrmObj$p == 1) stop("doesn't apply for univariate response")
mt<-match(term,c("R","muR"))
if (is.na(mt)) stop("unrecognised term");
if (mt==1) R<-mvrm2mcmc(mvrmObj,"R")
if (mt==2){
muR<-mvrm2mcmc(mvrmObj,"muR")
if (mvrmObj$mcm == 1) return(plot(tanh(muR)))
compAlloc <- matrix(0,nrow=mvrmObj$nSamples,ncol=mvrmObj$d)
compAllocFile <- paste(mvrmObj$DIR, paste("BNSP","compAlloc","txt",sep="."),sep="/")
if (file.exists(compAllocFile)) compAlloc<-mvrm2mcmc(mvrmObj,"compAlloc")
R <- sapply(1:mvrmObj$d,
function(i) muR[cbind(1:mvrmObj$nSamples,compAlloc[,i]+1)])
if (x$FT==1) R <- tanh(R)
}
r<-rep(rep(seq(1,mvrmObj$p-1),times=seq(mvrmObj$p-1,1)),each=mvrmObj$nSample)
c<-rep(unlist(mapply(seq,seq(2,mvrmObj$p),mvrmObj$p)),each=mvrmObj$nSample)
df<-data.frame(cor=c(R),r=r,c=c)
pp<-ggplot(df) + geom_histogram(aes(x=cor),binwidth=0.01) + facet_wrap(r~c) + plotOptions #facet_grid(r~c)
return(pp)
}
predict.mvrm <- function(object,newdata,interval=c("none","credible","prediction"),level=0.95,ind.preds=FALSE,...){
if (missing(newdata) || is.null(newdata)) newdata<-object$data
if (length(newdata)==0) stop("no data found")
interval <- match.arg(interval)
newdata <- as.data.frame(newdata)
npred<-NROW(newdata)
fitM<-matrix(0,nrow=object$nSamples,ncol=npred*object$p)
terms.reform<-NULL
k<-0
period0<-0
for (i in 1:length(object$formula.termsX)){
term<-object$formula.termsX[i]
if (!i %in% which(unlist(object$which.SpecX)==-99)){
k<-k+1
if (!grepl("knots",term)){
term<-substr(term,1,nchar(term)-1)
term<-paste(term,",knots= knots[[",k,"]])")
}
}
terms.reform<-c(terms.reform,term)
if (grepl("period = 0",term)) period0<-1
}
formula2<-reformulate(terms.reform)
if (period0==0){
X<-DM(formula=formula2,data=newdata,n=npred,knots=object$Xknots,predInd=TRUE,meanVector=object$storeMeanVectorX,indicator=object$storeIndicatorX,mvrmObj=object,centre=TRUE)$X
etaFN <- file.path(paste(object$DIR,"BNSP.beta.txt",sep=""))
eFile<-file(etaFN,open="r")
for (i in 1:object$nSamples){
eta<-scan(eFile,what=numeric(),n=object$p*(object$LG+1),quiet=TRUE)
for (k in 1:object$p)
fitM[i,(1+(k-1)*npred):(k*npred)]<-c(as.matrix(X)%*%matrix(c(eta[(1+(k-1)*(object$LG+1)):(k*(object$LG+1))])))
}
close(eFile)
predictions<-fit<-apply(fitM,2,mean)
}
if (period0==1){
periodFN <- file.path(paste(object$DIR,"BNSP.period.txt",sep=""))
periodFile<-file(periodFN,open="r")
etaFN <- file.path(paste(object$DIR,"BNSP.beta.txt",sep=""))
eFile<-file(etaFN,open="r")
per.val <- 0
for (i in 1:object$nSamples){
per.val.r <- per.val
per.val<-scan(periodFile,what=numeric(),n=1,quiet=TRUE)
formula2<-substr(formula2,1,nchar(formula2)-1)[2]
formula2<-sub(paste(", period = ", per.val.r,sep=""), "", formula2)
formula2<-paste(formula2,", period = ", per.val,")",sep="")
formula2<-reformulate(formula2)
X<-DM(formula=formula2,data=newdata,n=npred,knots=object$Xknots,predInd=TRUE,meanVector=object$storeMeanVectorX,indicator=object$storeIndicatorX,mvrmObj=object,centre=TRUE)$X
eta<-scan(eFile,what=numeric(),n=object$p*(object$LG+1),quiet=TRUE)
for (k in 1:object$p)
fitM[i,(1+(k-1)*npred):(k*npred)]<-c(as.matrix(X)%*%matrix(c(eta[(1+(k-1)*(object$LG+1)):(k*(object$LG+1))])))
}
close(periodFile)
close(eFile)
predictions<-fit<-apply(fitM,2,mean)
}
if (interval=="credible"){
QMb<-apply(fitM,2,quantile,probs=c((1-level)/2,1-(1-level)/2),na.rm=TRUE)
predictions<-cbind(fit,t(QMb))
colnames(predictions) <- c("fit", "lwr", "upr")
}
if (interval=="prediction"){
terms.reform<-NULL
k<-0
if (length(object$formula.termsZ)>0){
for (i in 1:length(object$formula.termsZ)){
term<-object$formula.termsZ[i]
if (!i %in% which(unlist(object$which.SpecZ)==-99)){
k<-k+1
if (!grepl("knots",term)){
term<-substr(term,1,nchar(term)-1)
term<-paste(term,",knots= knots[[",k,"]])")
}
}
terms.reform<-c(terms.reform,term)
}
}
ifelse(is.null(terms.reform), formula2<-~1, formula2<-reformulate(terms.reform))
Z<-DM(formula=formula2,data=newdata,n=npred,knots=object$Zknots,predInd=TRUE,meanVector=object$storeMeanVectorZ,indicator=object$storeIndicatorZ,mvrmObj=object,centre=TRUE)$X
Z<-Z[,-1]
fitV<-matrix(0,nrow=object$nSamples,ncol=npred*object$p)
alphaFN <- file.path(paste(object$DIR,"BNSP.alpha.txt",sep=""))
aFile<-file(alphaFN,open="r")
sigma2FN <- file.path(paste(object$DIR,"BNSP.sigma2.txt",sep=""))
s2File<-file(sigma2FN,open="r")
for (i in 1:object$nSamples){
alpha<-scan(aFile,what=numeric(),n=object$p*object$LD,quiet=TRUE)
s2<-scan(s2File,what=numeric(),n=object$p,quiet=TRUE)
for (k in 1:object$p)
fitV[i,(1+(k-1)*npred):(k*npred)]<-sqrt(s2[k]*exp(as.matrix(Z)%*%matrix(c(alpha[(1+(k-1)*object$LD):(k*object$LD)]))))
}
close(aFile)
close(s2File)
fitSD<-apply(fitV,2,mean)
predictions<-cbind(fit,fit-qnorm(1-(1-level)/2)*fitSD,fit+qnorm(1-(1-level)/2)*fitSD,fitSD)
colnames(predictions) <- c("fit", "lwr", "upr", "fit.sd")
}
returns <- data.frame(predictions)
if (ind.preds) returns <- list(returns,fitM)
return(returns)
}
print.mvrm <- function(x, digits = 5, ...) {
cat("\nCall:\n")
print(x$call)
cat("\n")
cat(x$nSamples,"posterior samples\n")
G<-as.data.frame(mvrm2mcmc(x,"gamma"))
D<-as.data.frame(mvrm2mcmc(x,"delta"))
colnames(G)<- rep(colnames(x$X)[-1],x$p)
colnames(D)<- rep(colnames(x$Z)[-1],x$p)
if (x$LG > 0){
cat("\nMean model - marginal inclusion probabilities\n")
cat("\n")
for (k in 1:x$p){
cat(x$varsY[[k]])
cat("\n")
print(apply(G[(1+(k-1)*x$LG):(k*x$LG)],2,mean), digits=5)
cat("\n")
}
}
if (x$LD > 0){
cat("\nVariance model - marginal inclusion probabilities\n")
cat("\n")
for (k in 1:x$p){
cat(x$varsY[[k]])
cat("\n")
print(apply(D[(1+(k-1)*x$LD):(k*x$LD)],2,mean), digits=5)
cat("\n")
}
}
}
summary.mvrm <- function(object, nModels = 5, digits = 5, printTuning = FALSE, ...) {
cat("\nSpecified model for the mean and variance:\n")
print(object$formula, showEnv = FALSE)
cat("\nSpecified priors:\n")
upTo<-13
if (object$p > 1 && object$mcm==1) upTo<-16
if (object$p > 1 && object$mcm>1) upTo<-17
prior<-NULL
for (k in 9:upTo) prior<-c(prior,noquote(paste(c(names(as.list(object$call2))[k],object$call2[[k]]),collapse="=")))
print(noquote(sub("Prior","",prior)))
cat("\nTotal posterior samples:",object$nSamples,"; burn-in:",object$mcpar[1]-1,"; thinning:",object$mcpar[3],"\n")
cat("\nFiles stored in",object$DIR,"\n")
deviance <- matrix(c(object$nullDeviance,object$deviance[1:2]/object$nSamples),ncol=1)
rownames(deviance) <- c("Null deviance:", "Mean posterior deviance (marginal):", "Mean posterior deviance:")
colnames(deviance) <- c("")
print(deviance, digits = digits)
if ((nModels > 0) && ((object$LG+object$LD) > 0)){
cat("\nJoint mean/variance model posterior probabilities\n\n")
G<-as.data.frame(mvrm2mcmc(object,"gamma"))
D<-as.data.frame(mvrm2mcmc(object,"delta"))
if (object$LG > 0) colnames(G)<-paste(rep(sub(" ",".",paste("mean",colnames(object$X)[-1])),object$p))#,rep(seq(1,p),each=object$LG),sep=".")
if (object$LD > 0) colnames(D)<- paste(rep(sub(" ",".",paste("var",colnames(object$Z)[-1])),object$p))#,rep(seq(1,p),each=object$LD),sep=".")
for (k in 1:object$p){
if (object$LG > 0 && object$LD > 0) Joint<-cbind(G[(1+(k-1)*object$LG):(k*object$LG)],D[(1+(k-1)*object$LD):(k*object$LD)])
if (object$LG > 0 && object$LD == 0) Joint<-G
if (object$LG == 0 && object$LD > 0) Joint<-D
g<-count(Joint)
g<-g[order(g$freq,decreasing=TRUE),]
rownames(g)<-seq(1,NROW(g))
g$prob<-100*g$freq/sum(g$freq)
g$cumulative<-cumsum(g$prob)
TrueDim<-min(nModels,dim(g)[1])
cat(object$varsY[[k]]) #cat(paste(paste("Response model",k),": ",sep=""))
cat("\n")
print(g[1:TrueDim,])
cat("Displaying", TrueDim, "models of the",NROW(g),"visited\n")
cat(TrueDim,"models account for",sub(" ","",paste(g$cumulative[TrueDim],"%")),"of the posterior mass\n\n")
}
}
if (printTuning){
cat("\nTuning parameters: start and end values\n\n")
dm<-c("start","end")
names(object$tuneCbeta)<-dm
sigma2<-c(t(matrix(object$tuneSigma2,ncol=2)))
names(sigma2)<-rep(dm,object$p)
names(object$tuneSigma2R)<-dm
R<-c(t(matrix(object$tuneR,ncol=2)))
names(R)<-rep(dm,object$LUT)
#rearrange<-lapply(seq(1:object$p),function(x)seq(x,2*object$p,by=object$p))
#sigma2<-lapply(rearrange,function(x){object$tuneSigma2[x]})
#for (k in 1:object$p) names(sigma2[[k]])<-dm
#c.alpha<-lapply(rearrange,function(x){object$tuneCa[x]})
#for (k in 1:object$p) names(c.alpha[[k]])<-dm
c.alpha<-c(t(matrix(object$tuneCalpha,ncol=2)))
names(c.alpha)<-rep(dm,object$p)
if (object$ND > 0){
tot<-object$p*object$ND
#rearrange<-lapply(seq(1:tot),function(x)seq(x,2*tot,by=tot))
#tuneAlpha<-lapply(rearrange,function(x){object$tuneAlpha[x]})
#for (k in 1:tot) names(tuneAlpha[[k]])<-dm
tuneAlpha<-c(t(matrix(object$tuneAlpha,ncol=2)))
names(tuneAlpha)<-rep(dm,tot)
}
pT1<-list(c.beta=object$tuneCbeta,sigma2=sigma2)
if (object$ND > 0) {pT2<-list(Alpha=tuneAlpha, c.alpha = c.alpha); pT1<-c(pT1,pT2)}
if (object$p > 1) {pT2<-list(sigma2R = object$tuneSigma2R, R = R); pT1<-c(pT1,pT2)}
print(pT1)
}
}
clustering <- function(object, ...){
R <- list()
if (object$mcm==1 || object$mcm==5 || object$mcm==8) stop("common correlations model has no clustering")
if (object$mcm %in% c(2,3,6,7,9,10)){
simMatC<-matrix(0,object$d,object$d)
compAllocFP <- file.path(paste(object$DIR,"BNSP.compAlloc.txt",sep=""))
compAlloc<-file(compAllocFP,open="r")
for (j in 1:object$nSamples){
ca<-scan(compAlloc,what=numeric(),n=object$d,quiet=TRUE)
for (i in 1:object$d) {
temp<-which(ca==ca[i])
simMatC[i,temp]<-simMatC[i,temp]+1
}
}
close(compAlloc)
subset <- rep(seq(1,object$p),each=object$p) < rep(seq(1,object$p),object$p)
cor.names <- paste("cor",paste(rep(seq(1,object$p),each=object$p),rep(seq(1,object$p),object$p),sep=""),sep=".")[subset]
colnames(simMatC) <- cor.names
rownames(simMatC) <- cor.names
R[[1]]<-simMatC/object$nSamples
}
if (object$mcm %in%c(3,7,10)){
simMatV<-matrix(0,object$p,object$p)
compAllocVFP <- file.path(paste(object$DIR,"BNSP.compAllocV.txt",sep=""))
compAllocV<-file(compAllocVFP,open="r")
for (j in 1:object$nSamples){
ca<-scan(compAllocV,what=numeric(),n=object$p,quiet=TRUE)
for (i in 1:object$p) {
temp<-which(ca==ca[i])
simMatV[i,temp]<-simMatV[i,temp]+1
}
}
close(compAllocV)
colnames(simMatV) <- object$varsY
rownames(simMatV) <- object$varsY
R[[2]]<-simMatV/object$nSamples
}
return(R)
}
|
/scratch/gouwar.j/cran-all/cranData/BNSP/R/mvrm.R
|
# includes functions: compAllocVtoCompAlloc, ls.mvrm, label.count, plot.generic, plot.mvrm
compAllocVtoCompAlloc <- function(G,p,compAllocV){
compAlloc<-array(-19,p*(p-1)/2)
move <- 0
for (k in 0:(G-1)){
temp <- 0
for (i in 1:(p-1)){
for (j in (i+1):p){
if (compAllocV[i] == compAllocV[j] && compAllocV[i] == k){
compAlloc[(i-1)*p+j-(i)*(i+1)/2] <- move
temp <- temp+1
}
}
}
if (temp > 0) move<-move+1
}
move2<-0
for (k in 0:(G-2)){
for (l in (k+1):(G-1)){
temp <- 0
for (i in 1:(p-1)){
for (j in (i+1):p){
if (((compAllocV[i] == k && compAllocV[j] == l) || (compAllocV[i] == l && compAllocV[j] == k))){
compAlloc[(i-1)*p+j-(i)*(i+1)/2] <- move+move2
temp<-temp+1
}
}
}
if (temp > 0) move2<-move2+1
}
}
return(compAlloc)
}
ls.mvrm <- function(x){
file1 <- paste(x$DIR,"BNSP.nu.ls.txt",sep="")
file2 <- paste(x$DIR,"BNSP.eta.ls.txt",sep="")
file3 <- paste(x$DIR,"BNSP.nmembers.ls.txt",sep="")
file4 <- paste(x$DIR,"BNSP.clusters.txt",sep="")
nu<-mvrm2mcmc(x,"nu")
eta<-mvrm2mcmc(x,"eta")
nmembers<-mvrm2mcmc(x,"nmembers")
nparams<-x$LGc+(x$LGc+1)+1 #gamma + eta + nmembers
nu <- nu[,c(mapply(seq,seq(1,x$LGc),length.out=x$H,by=x$LGc))]
eta <- eta[,c(mapply(seq,seq(1,x$LGc+1),length.out=x$H,by=x$LGc+1))]
mat<-cbind(nu,eta,nmembers)
mcmc.pars <- array(data = mat, dim = c(x$nSamples, x$H, nparams))
compAlloc<-mvrm2mcmc(x,"compAlloc")
compAllocV<-mvrm2mcmc(x,"compAllocV")
ifelse(x$G == 1, z<-compAlloc+1, z<-compAllocV+1)
probs<-mvrm2mcmc(x,"probs")
ifelse(x$G ==1, DIM<-x$H, DIM<-x$G)
pr <- array(data = probs, dim = c(x$d, x$nSamples, DIM))
#pr <- array(data = probs, dim = c(x$p, x$nSamples, x$H))
p <- aperm(pr, c(2,1,3))
if (x$G == 1){
ls <- label.switching(method = "STEPHENS", p = p, mcmc = mcmc.pars, z = z, K = x$H)
mcmc.pars.per<-permute.mcmc(mcmc.pars, ls$permutations$STEPHENS)$output
}
if (x$G > 1){
ls <- label.switching(method = "STEPHENS", p = p, z = z, K = x$G)
#Permute component allocations for the variables
compAllocVPer<-compAllocV
for (sw in 1:x$nSamples)
for (k in 0:(x$G-1))
compAllocVPer[sw,compAllocV[sw,]==ls$permutations[[1]][sw,k+1]-1]<-k
#Permute component allocations for the correlations
compAllocPer<-matrix(0,ncol=x$d,nrow=x$nSamples)
for (sw in 1:x$nSamples){
if (sum(compAllocVPer[sw,]==compAllocV[sw,])==x$p) compAllocPer[sw,]<-compAlloc[sw,]
else compAllocPer[sw,]<-compAllocVtoCompAlloc(x$G,x$p,compAllocVPer[sw,])
}
#Find clustering
tab <- table(apply(compAllocPer+1, 1, paste, collapse=", "))
ls$clusters <- as.numeric(unlist(strsplit(names(which.max(tab)), ', ')))
#From permutation matrix of variables to permuation matrix of the correlations
fpm<-matrix(-99,nrow=x$nSamples,ncol=x$H)
for (sw in 1:x$nSamples){
b<-compAlloc[sw,]
a<-compAllocPer[sw,]
fpv<-array(-99,x$H)
if ((max(a,b)+2)<=x$H) fpv[(max(a,b)+2):x$H]<-c((max(a,b)+2):x$H)
for (k in 0:max(a,b))
fpv[k+1]<-a[min(which(b==k))]+1
fpm[sw,] <- fpv
}
#With fpm, permute parameters
mcmc.pars.per<-permute.mcmc(mcmc.pars, fpm)$output
}
nu<-NULL
if (x$LGc > 0){
nu<-mcmc.pars.per[,,1:x$LGc]
nu <-matrix(nu,nrow=x$nSamples)
nu <- nu[,c(mapply(seq,seq(1,x$H),length.out=x$LGc,by=x$H))]
}
eta<-mcmc.pars.per[,,(1+x$LGc):(nparams-1)]
eta <-matrix(eta,nrow=x$nSamples)
eta <- eta[,c(mapply(seq,seq(1,x$H),length.out=(x$LGc+1),by=x$H))]
nmembers<-mcmc.pars.per[,,nparams]
write.table(nu,file=file1,row.names = FALSE,col.names = FALSE)
write.table(eta,file=file2,row.names = FALSE,col.names = FALSE)
write.table(nmembers,file=file3,row.names = FALSE,col.names = FALSE)
write.table(ls$clusters,file=file4,row.names = FALSE,col.names = FALSE, quote=FALSE)
}
label.count <- function(term,labels,counts,model){
if (is.character(term)){
int.label1<-int.label2<-grep(term,labels,fixed=TRUE)
k<-1
while (!length(int.label1)==1 && (nchar(term)>k)){
int.label1<-grep(substr(term,1,nchar(term)-k),labels,fixed=TRUE)
k<-k+1
}
k<-1
while (!length(int.label2)==1 && (nchar(term)>k)){
int.label2<-grep(substr(term,k,nchar(term)),labels,fixed=TRUE)
k<-k+1
}
if ((!length(int.label1) == 1) || (!length(int.label2) == 1) || !(int.label1==int.label2))
stop(cat(cat("`term` in", model, "model should be an integer between 1 and", length(labels),"or one of: "), cat(labels,sep=", ",fill=TRUE)),call. = FALSE)
int.label<-int.label1
}
if (is.numeric(term)) int.label<-term
if ((!length(int.label) == 1) || (int.label > length(labels)))
stop(cat(cat("`term` in", model, "model should be an integer between 1 and", length(labels),"or one of: "), cat(labels,sep=", ",fill=TRUE)),call. = FALSE)
count<-counts[int.label]
return(c(int.label,count))
}
plotGeneric <- function(mvrmObj, MEAN, STDEV, CORM, CORS, DEP, SCALE,
response, intercept, grid,
centre, quantiles, static, contour, centreEffects, plotOptions,
int.label, count, vars, label, is.D, formula.term, which.Spec, assign,
knots, storeMeanVector, storeIndicator, data, plotEmptyCluster){
p<-mvrmObj$p
period0<-0
for (i in 1:length(formula.term)){
if (!grepl("knots",formula.term[i]) && which.Spec[i] > 0){
formula.term[i]<-substr(formula.term[i],1,nchar(formula.term[i])-1)
formula.term[i]<-paste(formula.term[i],", knots=knots[[",which.Spec[i],"]])",sep="")
}
if (grepl("period = 0",formula.term[i])){
period0<-1
calc.p <- mean(mvrm2mcmc(mvrmObj,"period"))
formula.term[i]<-substr(formula.term[i],1,nchar(formula.term[i])-1)
formula.term[i]<-sub(", period = 0", "", formula.term[i])
formula.term[i]<-paste(formula.term[i],", period = ", calc.p,")",sep="")
}
}
V<-NULL
Dynamic<-NULL
for (k in 1:length(int.label)) V<-c(V,which(assign %in% int.label[k]))
if (STDEV) V <- V - 1
if ((intercept || sum(is.D)) && MEAN) V<-c(1,V)
indeces<-V
if (MEAN && !(1 %in% indeces)) indeces<-c(1,indeces)
if (STDEV) indeces<-c(1,indeces+1)
if (count==1){
if (!is.D){
max.shift <- 0
if (mvrmObj$nBreaks > 0) max.shift <- max(mvrm2mcmc(mvrmObj,"breaks"))
min1<-min(with(data,eval(as.name(vars[1]))))
max1<-max(with(data,eval(as.name(vars[1])))) + max.shift
newR1<-seq(min1,max1,length.out=grid)
newData<-data.frame(newR1)
colnames(newData)<-vars
if (sum(!is.na(formula.term))){
dm<-DM(formula=reformulate(formula.term),data=newData,n=NROW(newData),knots=knots,
meanVector=storeMeanVector[indeces],indicator=storeIndicator[indeces],centre=TRUE)
Ds<-dm$X
Dynamic<-dm$Dynamic
amplitude<-dm$DSP[1]
harmonics<-dm$DSP[3]
startSin<-dm$DSP[2]
ifelse(intercept, startSin <- startSin, startSin <- startSin - 1)
}
if (!sum(!is.na(formula.term))) Ds<-matrix(1,1,1)
if (((!1%in%V)||STDEV) && sum(!is.na(formula.term))) Ds<-Ds[,-1]
}
if (is.D){
newData<-data.frame(sort(unique(with(data,eval(as.name(vars[1]))))))
colnames(newData)<-vars
Ds<-DM(formula=reformulate(formula.term),data=newData,n=NROW(newData),knots=NULL,
meanVector=storeMeanVector[indeces],indicator=storeIndicator[indeces],centre=TRUE)$X
if (STDEV) Ds<-Ds[,-1]
}
}
if (count==2){
if (sum(is.D)){
dv<-which(is.D==1)
cv<-which(is.D==0)
min1<-min(with(data,eval(as.name(vars[cv]))))
max1<-max(with(data,eval(as.name(vars[cv]))))
newR1<-seq(min1,max1,length.out=grid)
newR2<-unique(with(data,eval(as.name(vars[dv]))))
newData<-expand.grid(newR1,newR2)
colnames(newData)<-vars[c(cv,dv)]
#which.Spec <- which.Spec[which.Spec > 0]
#whichKnots <- which.Spec
#Dstar<-knots[[whichKnots]]
#Ds<-DM(formula=reformulate(formula.term),data=newData,n=NROW(newData),knots=Dstar,
# meanVector=storeMeanVector[indeces],indicator=storeIndicator[indeces],centre=TRUE)$X
Ds<-DM(formula=reformulate(formula.term),data=newData,n=NROW(newData),knots=knots,
meanVector=storeMeanVector[indeces],indicator=storeIndicator[indeces],centre=TRUE)$X
if (STDEV) Ds<-Ds[,-1]
}
if (sum(is.D)==0){
min1<-min(with(data,eval(as.name(vars[1]))))
min2<-min(with(data,eval(as.name(vars[2]))))
max1<-max(with(data,eval(as.name(vars[1]))))
max2<-max(with(data,eval(as.name(vars[2]))))
newR1<-seq(min1,max1,length.out=grid)
newR2<-seq(min2,max2,length.out=grid)
newData<-expand.grid(newR1,newR2)
colnames(newData)<-vars
#whichKnots <- which.Spec
#Dstar<-knots[[whichKnots]]
#Ds<-DM(formula=reformulate(formula.term),data=newData,n=NROW(newData),knots=Dstar,
# meanVector=storeMeanVector[indeces],indicator=storeIndicator[indeces],centre=TRUE)$X
Ds<-DM(formula=reformulate(formula.term),data=newData,n=NROW(newData),knots=knots,
meanVector=storeMeanVector[indeces],indicator=storeIndicator[indeces],centre=TRUE)$X
if (STDEV) Ds<-Ds[,-1]
}
}
if (MEAN){
dim2<-1
if (CORM) dim2<-mvrmObj$H
#print("jbf")
#print(dim(Ds))
fit<-array(0,dim=c(mvrmObj$nSamples,dim2,NROW(Ds)))
#fit<-matrix(0,nrow=mvrmObj$nSamples,ncol=NROW(Ds))
#meanReg<-array(0,dim=c(x$nSamples,x$H,x$LUT))
if (CORM==0 && DEP==0){
etaFN <- file.path(paste(mvrmObj$DIR,"BNSP.beta.txt",sep=""))
how.many <- p*(mvrmObj$LG+1)
multiplier <- mvrmObj$LG+1
if (!is.null(Dynamic)){
harmonicsFN <- file.path(paste(mvrmObj$DIR,"BNSP.Hbeta.txt",sep=""))
how.many2 <- 2*harmonics
}
if (period0==1){
periodFN <- file.path(paste(mvrmObj$DIR,"BNSP.period.txt",sep=""))
periodFile<-file(periodFN,open="r")
per.val <- calc.p
}
}else if (CORM==1){
if (mvrmObj$H==1) etaFN <- file.path(paste(mvrmObj$DIR,"BNSP.eta.txt",sep=""))
if (mvrmObj$H > 1){
etaFN <- file.path(paste(mvrmObj$DIR,"BNSP.eta.ls.txt",sep=""))
clusFN <- paste(mvrmObj$DIR,"BNSP.clusters.txt",sep="")
if (!file.exists(etaFN) || !file.exists(clusFN))
quiet(ls.mvrm(mvrmObj))
clus<-array(unlist(read.table(clusFN)))
tabs<-table(clus)
labs<-array(0,mvrmObj$H)
labs[as.numeric(names(tabs))]<-tabs
V <- rep(V,mvrmObj$H) + rep(seq(0,max(V)*(mvrmObj$H-1),by=max(V)),each=length(V))
}
how.many <- (mvrmObj$LGc+1)*mvrmObj$H
multiplier<-0
}else{
etaFN <- file.path(paste(mvrmObj$DIR,"BNSP.psi.txt",sep=""))
how.many <- p*p*mvrmObj$LK
multiplier <- mvrmObj$LK
}
eFile<-file(etaFN,open="r")
if (!is.null(Dynamic))
eFile2<-file(harmonicsFN,open="r")
for (i in 1:mvrmObj$nSamples){
eta<-scan(eFile,what=numeric(),n=how.many,quiet=TRUE)
if (!is.null(Dynamic)){
etaHar<-scan(eFile2,what=numeric(),n=how.many2,quiet=TRUE)
ifelse(period0==1,
per.val<-scan(periodFile,what=numeric(),n=1,quiet=TRUE),
per.val<-mvrmObj$period)
sinWphs <- 0 * newR1
for (l in 1:harmonics)
sinWphs <- sinWphs +
etaHar[2*l-1] * sin(2 * l * pi * newR1 / per.val) +
etaHar[2*l] * cos(2 * l * pi * newR1 / per.val)
Ds[,c((startSin+1):(startSin+amplitude+1))] <- sweep(Dynamic, MARGIN=1, sinWphs, `*`)
}
if (period0==1 && is.null(Dynamic)){
per.val.r <- per.val
per.val<-scan(periodFile,what=numeric(),n=1,quiet=TRUE)
#for (k in 1:length(formula.term)){
# formula.term[k]<-substr(formula.term[k],1,nchar(formula.term[k])-1)
# formula.term[k]<-sub(paste(", period = ", per.val.r,sep=""), "", formula.term[k])
# formula.term[k]<-paste(formula.term[k],", period = ", per.val,")",sep="")
#}
#dm<-DM(formula=reformulate(formula.term),data=newData,n=NROW(newData),knots=knots,
# meanVector=storeMeanVector[indeces],indicator=storeIndicator[indeces],centre=TRUE)
#Ds<-dm$X
for (l in 1:harmonics)
Ds[,startSin+2*l-1] <- sin(2 * l * pi * newR1 / per.val)
Ds[,startSin+2*l] <- cos(2 * l * pi * newR1 / per.val)
}
fit[i,,] <- matrix(c(eta[V+(response-1)*multiplier]),byrow=TRUE,ncol=dim(as.matrix(Ds))[2]) %*% t(as.matrix(Ds))
if (CORM && mvrmObj$FT==1) fit[i,,] <- tanh(fit[i,,])
if (centreEffects) fit[i,,]<-fit[i,,]-mean(fit[i,,])
}
close(eFile)
if (!is.null(Dynamic)) close(eFile2)
if (period0==1) close(periodFile)
}
if (STDEV){
fit<-array(0,dim=c(mvrmObj$nSamples,1,NROW(Ds)))
if (CORS==0 && SCALE==0){
alphaFN <- file.path(paste(mvrmObj$DIR,"BNSP.alpha.txt",sep=""))
sigma2FN <- file.path(paste(mvrmObj$DIR,"BNSP.sigma2.txt",sep=""))
how.many1 <- p*mvrmObj$LD
how.many2 <- p
}
if (CORS==1){
alphaFN <- file.path(paste(mvrmObj$DIR,"BNSP.omega.txt",sep=""))
sigma2FN <- file.path(paste(mvrmObj$DIR,"BNSP.sigma2R.txt",sep=""))
how.many1 <- mvrmObj$LDc
how.many2 <- 1
}
if (SCALE==1){
alphaFN <- file.path(paste(mvrmObj$DIR,"BNSP.psi.txt",sep=""))
sigma2FN <- file.path(paste(mvrmObj$DIR,"BNSP.phi2.txt",sep=""))
how.many1 <- p*mvrmObj$LC
how.many2 <- p
}
aFile<-file(alphaFN,open="r")
s2File<-file(sigma2FN,open="r")
for (i in 1:mvrmObj$nSamples){
alpha<-scan(aFile,what=numeric(),n=how.many1,quiet=TRUE)
#ifelse(intercept || !is.na(formula.term),
# s2<-scan(s2File,what=numeric(),n=how.many2,quiet=TRUE)[response], s2<-1)
#if (!is.na(formula.term) && intercept)
# fit[i,,]<-s2*exp(as.matrix(Ds)%*%matrix(c(alpha[V+(response-1)*(how.many1/p)])))
#if (!is.na(formula.term) && !intercept)
# fit[i,,]<-exp(as.matrix(Ds)%*%matrix(c(alpha[V+(response-1)*(how.many1/p)])))
#if (is.na(formula.term)) fit[i,,]<-s2
ifelse(intercept, s2<-scan(s2File,what=numeric(),n=how.many2,quiet=TRUE)[response], s2<-1)
fit[i,,]<-s2*exp(as.matrix(Ds)%*%matrix(c(alpha[V+(response-1)*(how.many1/p)])))
if (!SCALE) fit[i,,]<-sqrt(fit[i,,])
if (centreEffects) fit[i,,]<-fit[i,,]/mean(fit[i,,])
}
close(aFile)
close(s2File)
}
if (count==1 && !is.D){
newX<-unlist(newData)
centreM<-drop(apply(fit,c(2,3),centre))
if (sum(!is.na(formula.term)))
dataM<-data.frame(rep(newX,dim(fit)[2]),c(t(centreM)))
if (!sum(!is.na(formula.term)))
dataM<-data.frame(expand.grid(newX,centreM))
nms <- c(vars,"centreM")
if (!is.null(quantiles)){
nms<-c(nms,"QLM","QUM")
QM<-apply(fit,c(2,3),quantile,probs=quantiles,na.rm=TRUE)
if (sum(!is.na(formula.term)))
dataM<-data.frame(dataM,c(t(QM[1,,])),c(t(QM[2,,])))
if (!sum(!is.na(formula.term)))
dataM<-data.frame(dataM,rep(QM[1,,],each=length(newX)),rep(QM[2,,],each=length(newX)))
}
colnames(dataM) <- nms
if (CORM==1 && mvrmObj$H > 1){
dataM<-data.frame(dataM,group=factor(rep(1:mvrmObj$H,each=grid)),size=rep(labs,each=grid))
dataM<-dataM[order(-dataM$size, dataM[,1]),]
labs2<-labs
if (!plotEmptyCluster) {
dataM<-dataM[dataM$size > 0,]
labs2<-labs[labs>0]
}
}
#dataRug<-data.frame(b=with(data,eval(as.name(vars))))
plotElM<-c(geom_line(aes_string(x=as.name(vars),y=centreM),col=4,alpha=0.5),
#geom_rug(mapping=aes(x=dataRug$b),data=dataRug,alpha=0.3),
list(ylab(label)))
if (!is.null(quantiles))
plotElM<-c(geom_ribbon(data=dataM,aes_string(x=vars, ymin="QLM", ymax="QUM"),alpha=0.2),plotElM)
if (CORM==1 && mvrmObj$H > 1){
plotElM<-c(geom_line(aes_string(x=as.name(vars),y="centreM",group="group",linetype="group"),col=4,alpha=0.5),
#geom_rug(mapping=aes(x=dataRug$b),data=dataRug,alpha=0.3),
list(ylab(label)))
if (!is.null(quantiles))
plotElM<-c(geom_ribbon(data=dataM,aes_string(x=vars,ymin="QLM",ymax="QUM",group="group"),alpha=0.2),plotElM)
}
ggM<-ggplot(data=dataM)
plotM<-ggM + plotElM + plotOptions
if (CORM==1 && mvrmObj$H > 1){
cns<-noquote(unlist(lapply(seq(1,p-1),function (k) paste(rep(k,p-k),seq(k+1,p),sep=""))))
cgsa<-noquote(paste(lapply(1:length(labs2), function(k)cns[clus==k])))
plotM <- ggM + plotElM + guides(fill=FALSE) +
scale_linetype_manual(name = "correlation groups: ",
labels = cgsa,
values=seq(1:length(labs2))) +
theme(legend.position = "bottom") +
plotOptions
}
}
if (count==1 && is.D){
lvs<-levels(with(data,eval(as.name(vars[1]))))
if (is.null(lvs)) lvs<-sort(unique(with(data,eval(as.name(vars[1])))))
#sort(unique(with(data,eval(as.name(vars[1])))))
df<-data.frame(x=rep(lvs,each=mvrmObj$nSamples),y=c(fit))
plotElM<-c(geom_boxplot(),list(xlab(label),ylab("")))
#ggM<-ggplot(data=df,aes(x=factor(df$x),y=df$y))
ggM<-ggplot(df,aes(factor(df[,1]),df[,2]))
# ggM<-ggplot(data=dataM,aes_string(x=vars[1],y=vars[2],z=centreM))
plotM<-ggM + plotElM + plotOptions
}
if (count==2 && sum(is.D)==1){
centreM<-drop(apply(fit,c(2,3),centre))
disc.var<-vars[which(is.D==1)]
cont.var<-vars[which(is.D==0)]
dataM<-data.frame(newData,centreM)
nms <- c(colnames(newData),"centreM")
if (!is.null(quantiles)){
nms<-c(nms,"QLM","QUM")
QM<-apply(fit,c(2,3),quantile,probs=quantiles,na.rm=TRUE)
dataM<-data.frame(dataM,c(t(QM[1,,])),c(t(QM[2,,])))
}
colnames(dataM) <- nms
#DG<-data.frame(b=with(data,eval(as.name(cont.var))),c=with(data,eval(as.name(disc.var))))
#plotElM<-c(geom_line(aes_string(x=as.name(vars),y="centreM",group="group",linetype="group"),col=4,alpha=0.5),
# geom_rug(mapping=aes(x=dataRug$b),data=dataRug,alpha=0.3),
# list(ylab(label)))
#if (!is.null(quantiles))
# plotElM<-c(geom_ribbon(data=dataM,aes_string(x=vars,ymin="QLM",ymax="QUM",group="group"),alpha=0.2),plotElM
plotElM<-c(geom_line(aes_string(x=cont.var,y=centreM,
group=disc.var,linetype=disc.var,col=disc.var),alpha=0.5),
#geom_rug(mapping=aes(x=DG$b,group=c,linetype=c),data=DG,alpha=0.3),
list(ylab(label[which(is.D==0)])))
if (!is.null(quantiles))
plotElM<-c(geom_ribbon(data=dataM,aes_string(x=cont.var,ymin="QLM",ymax="QUM",
group=disc.var,col=disc.var),alpha=0.2),plotElM)
ggM<-ggplot(data=dataM)
plotM<-ggM + plotElM + plotOptions
}
if (count==2 && sum(is.D)==0){
centreM<-drop(apply(fit,c(2,3),centre))
if (contour==1){
dataM<-data.frame(newData,centreM)
nms <- c(colnames(newData),"centreM")
colnames(dataM) <- nms
ggM<-ggplot(data=dataM,aes_string(x=vars[1],y=vars[2],z=centreM))
level<-1
plotM<-ggM + geom_contour(data=dataM,aes(colour = stat(level))) + plotOptions
}
if (static && contour==0){
defaultList<-list(x=as.numeric(newR1),y=as.numeric(newR2),z=matrix(centreM,length(newR1),length(newR2)),colvar=matrix(centreM,length(newR1),length(newR2)))
along="xy";
space=0.6;
if (MEAN) optionalList<-list(xlab=vars[1],ylab=vars[2],zlab=label,along=along,space=space,add=FALSE,bty="g",main=paste("mean of", mvrmObj$varsY[[response]]))
if (STDEV) optionalList<-list(xlab=vars[1],ylab=vars[2],zlab=label,along=along,space=space,add=FALSE,bty="g",main=paste("stdev of", mvrmObj$varsY[[response]]))
allOptions<-c(defaultList,plotOptions,optionalList)
do.call("ribbon3D",allOptions[!duplicated(names(allOptions))])
}
if (!static && contour==0){
a<-as.matrix(cbind(newData,centreM))
if (is.null(plotOptions$col)) col=rainbow(16,2/3)
else col<-plotOptions$col
plotCentreM <- centreM
if (min(centreM)<=0) plotCentreM <- centreM + abs(min(centreM)) + 1
ra<-ceiling(length(col)*plotCentreM/max(plotCentreM))
defaultList<-list(x=a,col=col[ra])
if (MEAN) optionalList<-list(size=0.4,bg=1,axisLabels=c(vars[1],label,vars[2]),main=paste("mean of", mvrmObj$varsY[[response]]))
if (STDEV) optionalList<-list(size=0.4,bg=1,axisLabels=c(vars[1],label,vars[2]),main=paste("stdev of", mvrmObj$varsY[[response]]))
allOptions<-c(defaultList,plotOptions,optionalList)
do.call("scatterplot3js",allOptions[!duplicated(names(allOptions))])
}
}
if (contour==1)
return(plotM)
}
plot.mvrm <- function(x, model, term, response, response2,
intercept = TRUE, grid = 30, centre = mean,
quantiles = c(0.1, 0.9), contour = TRUE, static = TRUE,
centreEffects = FALSE, plotOptions = list(), nrow, ask = FALSE,
plotEmptyCluster = FALSE, combine = FALSE, ...)
{
oldpar <- NULL
on.exit(par(oldpar))
#
if (!is.function(centre)) stop("centre must be a function, usually mean or median")
if (length(quantiles)==1) quantiles <- c(quantiles,1-quantiles)
if (length(quantiles) > 2) stop("up to two quantiles")
if (!is.null(quantiles)) quantiles <- unique(sort(quantiles))
grid<-round(grid)
if (missing(response)) response <- c(1:x$p)
if (missing(response2)) response2 <- c(1:x$p)
if (max(response) > x$p) stop("argument response exceeds the number of responses")
#
if (missing(model)) model<-"all"
MEAN <- 0; STDEV <- 0; CORM <- 0; CORS <- 0; DEP <- 0; SCALE <- 0
if ((model=="all" || model=="mean") && (x$NG > 0)) MEAN <- 1
if ((model=="all" || model=="stdev") && (x$ND > 0)) STDEV <- 1
if ((model=="all" || model=="corm") && x$p > 1 && x$LUT > 1) CORM <- 1
if ((model=="all" || model=="cors") && x$p > 1 && x$LUT > 1) CORS <- 1
if ((model=="all" || model=="dep") && (x$NK > 0)) DEP <- 1
if ((model=="all" || model=="scale") && (x$NC > 0)) SCALE <- 1
#
if (MEAN==0 && STDEV==0 && CORM==0 && CORS==0 && DEP==0 && SCALE==0) stop("no terms to plot");
termM <- NULL; termSD <- NULL; termMC<-NULL; termSDC<-NULL; termD<-NULL; termSC<-NULL
countM <- NULL; countSD <- NULL; countMC <- NULL; countSDC <- NULL; countD <- NULL; countSC <- NULL
int.labelM <- NULL; int.labelSD <- NULL; int.labelD <- NULL; int.labelSC <- NULL
if (missing(term)){
if (MEAN) {termM<-1:x$NG; countM<-x$countX; int.labelM <- 1:x$NG}
if (STDEV) {termSD<-1:x$ND; countSD<-x$countZ; int.labelSD <- 1:x$ND}
if (DEP) {termD<-1:(x$NK-1); countD<-x$countC[termD]; int.labelD <- termD}
if (SCALE) {termSC<-1:x$NC; countSC<-x$countW; int.labelSC <- 1:x$NC}
}else{
if (MEAN){
termM<-term
for (i in termM){
#print(i)
lcX <- label.count(i,x$labelsX,x$countX,"mean")
int.labelM[i]<-lcX[1]
countM[i]<-lcX[2]
}
}
if (STDEV){
termSD<-term
for (i in termSD){
lcZ <- label.count(i,x$labelsZ,x$countZ,"stdev")
int.labelSD[i]<-lcZ[1]
countSD[i]<-lcZ[2]
}
}
if (DEP){
termD<-term
for (i in termD){
lcC <- label.count(i,x$labelsC,x$countC,"autoregressive")
int.labelD[i]<-lcC[1]
countD[i]<-lcC[2]
}
}
if (SCALE){
termSC<-term
for (i in termSC){
lcW <- label.count(i,x$labelsW,x$countW,"scale")
int.labelSC[i]<-lcW[1]
countSC[i]<-lcW[2]
}
}
}
if (CORM) {termMC<-1; countMC<-x$countXc[termMC]}
if (CORS) {termSDC<-1; countSDC<-x$countZc[termSDC]}
#
if ((contour == 0) &&
(sum(c(countM,countSD,countMC,countSDC,countD,countSC)>1) > 0) &&
(length(c(countM,countSD,countMC,countSDC,countD,countSC)) > 1)){
warning("for 3-d plots, specify one model, one term and one response at a time")
contour <- 1
}
#
if (combine && length(term) == 1) combine <- FALSE
if (combine) termM <- termSD <- DEP <- SCALE <- 1
#
my_plots <- list()
count <- 1
for (r in response){
if (MEAN)
for (i in termM){
#lcX <- label.count(i,x$labelsX,x$countX,"mean")
#int.labelX <- lcX[1]
#countX <- lcX[2]
if (!combine){
int.labelX <- int.labelM[i]
countX <- countM[i]
varsArg <- x$varsX[[int.labelX]]
is.DArg <- x$is.Dx[[int.labelX]]
which.SpecArg <- x$which.SpecX[[int.labelX]]
}else{
int.labelX <- term
varsArg <- unique(unlist(x$varsX[int.labelX]))
countX <- length(varsArg)
is.DArg <- unlist(x$is.Dx[int.labelX])
if (!sum(is.DArg)) is.DArg <- 0
which.SpecArg <- unlist(x$which.SpecX[int.labelX])
if ((length(unique(varsArg)) > 1) && (sum(!is.DArg) > 1)) stop("can't combine more than 2 continuous terms; consider interaction terms")
}
if (countX==1) contour <- 1
plotOptions2 <- list(ggtitle(paste("mean of", x$varsY[r])),plotOptions)
my_plots[[count]] <- plotGeneric(mvrmObj=x, MEAN=1, STDEV=0, CORM=0, CORS=0, DEP=0, SCALE=0,
response=r, intercept=intercept, grid=grid,
centre=centre, quantiles=quantiles, static=static,
contour=contour,
centreEffects=centreEffects, plotOptions=plotOptions2,
int.label=int.labelX, count=countX, vars=varsArg,
label=x$labelsX[int.labelX], is.D=is.DArg,
formula.term=x$formula.termsX[int.labelX],
which.Spec=which.SpecArg, assign=x$assignX,
knots=x$Xknots, storeMeanVector=x$storeMeanVectorX,
storeIndicator=x$storeIndicatorX, data=x$data,
plotEmptyCluster=plotEmptyCluster)
if ((ask==TRUE) && (length(my_plots)>0)) print(my_plots[[count]])
if (count==1)
oldpar <- c(oldpar, par(ask=ask))
count <- count + 1
}
if (STDEV)
for (i in termSD){
#lcZ <- label.count(i,x$labelsZ,x$countZ,"stdev")
#int.labelZ <- lcZ[1]
#countZ <- lcZ[2]
if (!combine){
int.labelZ <- int.labelSD[i]
countZ <- countSD[i]
varsArg <- x$varsZ[[int.labelZ]]
is.DArg <- x$is.Dz[[int.labelZ]]
which.SpecArg <- x$which.SpecZ[[int.labelZ]]
}else{
int.labelZ <- term
countZ <- 2
varsArg <- unlist(x$varsZ[int.labelZ])
is.DArg <- unlist(x$is.Dz[int.labelZ])
which.SpecArg <- unlist(x$which.SpecZ[int.labelZ])
}
if (combine && (!sum(is.DArg)==1)) stop("can only combine a continuous with a discrete term")
if (countZ==1) contour <- 1
plotOptions2 <- list(ggtitle(paste("stdev of", x$varsY[[r]])),plotOptions)
if (x$LUT > 1) plotOptions2 <- list(ggtitle(paste("innov stdev of", x$varsY[[r]])),plotOptions)
my_plots[[count]] <- plotGeneric(mvrmObj=x, MEAN=0, STDEV=1, CORM=0, CORS=0, DEP=0, SCALE=0,
response=r, intercept=intercept, grid=grid,
centre=centre, quantiles=quantiles, static=static,
contour=contour,
centreEffects=centreEffects, plotOptions=plotOptions2,
int.label=int.labelZ, count=countZ, vars=varsArg,
label=x$labelsZ[int.labelZ], is.D=is.DArg,
formula.term=x$formula.termsZ[int.labelZ],
which.Spec=which.SpecArg, assign=x$assignZ,
knots=x$Zknots, storeMeanVector=x$storeMeanVectorZ,
storeIndicator=x$storeIndicatorZ, data=x$data,
plotEmptyCluster=plotEmptyCluster)
if ((ask==TRUE) && (length(my_plots)>0)) print(my_plots[[count]])
if (count==1)
oldpar <- c(oldpar, par(ask=ask))
count <- count + 1
}
if (SCALE)
for (i in termSC){
#lcW <- label.count(i,x$labelsW,x$countW,"scale")
#int.labelW <- lcW[1]
#countW <- lcW[2]
if (!combine){
int.labelW <- int.labelSC[i]
countW <- countSC[i]
varsArg <- x$varsW[[int.labelW]]
is.DArg <- x$is.Dw[[int.labelW]]
which.SpecArg <- x$which.SpecW[[int.labelW]]
}else{
int.labelW <- term
countW <- 2
varsArg <- unlist(x$varsW[int.labelW])
is.DArg <- unlist(x$is.Dw[int.labelW])
which.SpecArg <- unlist(x$which.SpecW[int.labelW])
}
if (combine && (!sum(is.DArg)==1)) stop("can only combine a continuous with a discrete term")
if (countW==1) contour <- 1
plotOptions2 <- list(ggtitle(paste("scale of", x$varsY[[r]])),plotOptions)
my_plots[[count]] <- plotGeneric(mvrmObj=x, MEAN=0, STDEV=1, CORM=0, CORS=0, DEP=0, SCALE = 1,
response=r, intercept=intercept, grid=grid,
centre=centre, quantiles=quantiles, static=static,
contour=contour,
centreEffects=centreEffects, plotOptions=plotOptions2,
int.label=int.labelW, count=countW, vars=varsArg,
label=x$labelsW[int.labelW], is.D=is.DArg,
formula.term=x$formula.termsW[int.labelW],
which.Spec=which.SpecArg, assign=x$assignW,
knots=x$Wknots, storeMeanVector=x$storeMeanVectorW,
storeIndicator=x$storeIndicatorW, data=x$data,
plotEmptyCluster=plotEmptyCluster)
if ((ask==TRUE) && (length(my_plots)>0)) print(my_plots[[count]])
if (count==1)
oldpar <- c(oldpar, par(ask=ask))
count <- count + 1
}
}
if (DEP)
for (r1 in response){
for (r2 in response2){
Pair<-(r1-1)*x$p + r2
for (i in termD){
#lcC <- label.count(i,x$labelsC,x$countC,"autoregressive")
#int.labelC <- lcC[1]
#countC <- lcC[2]
if (!combine){
int.labelC <- int.labelD[i]
countC <- countD[i]
varsArg <- x$varsC[[int.labelC]]
is.DArg <- x$is.Dc[[int.labelC]]
which.SpecArg <- x$which.SpecC[[int.labelC]]
}else{
int.labelC <- term
countC <- 2
varsArg <- unlist(x$varsC[int.labelC])
is.DArg <- unlist(x$is.Dc[int.labelC])
which.SpecArg <- unlist(x$which.SpecC[int.labelC])
}
if (combine && (!sum(is.DArg)==1)) stop("can only combine a continuous with a discrete term")
if (countC==1) contour <- 1
plotOptions2 <- list(ggtitle(paste("autoreg", x$varsY[r1], x$varsY[r2])), plotOptions)
my_plots[[count]] <- plotGeneric(mvrmObj=x, MEAN=1, STDEV=0, CORM=0, CORS=0, DEP=1, SCALE=0,
response=Pair, intercept=intercept, grid=grid,
centre=centre, quantiles=quantiles, static=static,
contour=contour,
centreEffects=centreEffects, plotOptions=plotOptions2,
int.label=int.labelC, count=countC, vars=varsArg,
label=x$labelsC[int.labelC], is.D=is.DArg,
formula.term=x$formula.termsC[int.labelC],
which.Spec=which.SpecArg, assign=x$assignC,
knots=x$Cknots, storeMeanVector=x$storeMeanVectorC,
storeIndicator=x$storeIndicatorC, data=x$lag,
plotEmptyCluster=plotEmptyCluster)
if ((ask==TRUE) && (length(my_plots)>0)) print(my_plots[[count]])
if (count==1)
oldpar <- c(oldpar, par(ask=ask))
count <- count + 1
}
}
}
if (CORM){
for (i in termMC){
contour <- 1
plotOptions2 <- list(ggtitle(paste("mean cor")),plotOptions)
which.Spec <- 0
if (length(x$which.SpecXc)) which.Spec <- x$which.SpecXc[[1]]
is.D <- 0
if (length(x$is.Dxc)) is.D <- x$is.Dxc[[1]]
vars <- x$varTime
if (length(x$varsXc)) vars <- x$varsXc[[1]]
my_plots[[count]] <- plotGeneric(mvrmObj=x, MEAN=1, STDEV=0, CORM=1, CORS=0, DEP=0, SCALE=0,
response=1, intercept=intercept,
grid=grid, centre=centre, quantiles=quantiles, static=static, contour=contour,
centreEffects=centreEffects, plotOptions=plotOptions2,
int.label=1, count=1, vars=vars,
label=x$labelsXc[1], is.D=is.D,
formula.term=x$formula.termsXc[1],
which.Spec=which.Spec, assign=x$assignXc,
knots=x$Xcknots, storeMeanVector=x$storeMeanVectorXc,
storeIndicator=x$storeIndicatorXc, data=x$data,
plotEmptyCluster=plotEmptyCluster)
if ((ask==TRUE) && (length(my_plots)>0)) print(my_plots[[count]])
if (count==1)
oldpar <- c(oldpar, par(ask=ask))
count <- count + 1
}
}
if (CORS){
for (i in termSDC){
contour <- 1
plotOptions2 <- list(ggtitle(paste("stdev cor")),plotOptions)
which.Spec <- 0
if (length(x$which.SpecZc)) which.Spec <- x$which.SpecZc[[1]]
is.D <- 0
if (length(x$is.Dzc)) is.D <- x$is.Dzc[[1]]
vars <- x$varTime
if (length(x$varsZc)) vars <- x$varsXc[[1]]
my_plots[[count]] <- plotGeneric(mvrmObj=x, MEAN=0, STDEV=1, CORM=0, CORS=1, DEP=0, SCALE=0,
response=1, intercept=intercept,
grid=grid, centre=centre, quantiles=quantiles, static=static, contour=contour,
centreEffects=centreEffects, plotOptions=plotOptions2,
int.label=1, count=1, vars=vars,
label=x$labelsZc[1], is.D=is.D,
formula.term=x$formula.termsZc[1],
which.Spec=which.Spec, assign=x$assignZc,
knots=x$Zcknots, storeMeanVector=x$storeMeanVectorZc,
storeIndicator=x$storeIndicatorZc, data=x$data,
plotEmptyCluster=plotEmptyCluster)
if ((ask==TRUE) && (length(my_plots)>0)) print(my_plots[[count]])
if (count==1)
oldpar <- c(oldpar, par(ask=ask))
count <- count + 1
}
}
if (missing(nrow)){
if (CORM==0 && CORS==0 && DEP==0){nrow <- min(x$p,length(my_plots))}else{nrow <- ceiling(sqrt(count-1))}
}
if ((ask==FALSE) && (length(my_plots)>0)) quiet(print(grid.arrange(grobs = my_plots, nrow = nrow)))
}
|
/scratch/gouwar.j/cran-all/cranData/BNSP/R/plot.R
|
#' @title Download data file
#' @description Download necessary data file to start BNrich
#' @param destfile A directory in user's own computer for save preprocessed data file
#' @param verbose A logical argument to show verbose results
#' @importFrom utils download.file
#' @return A list contain mapkG, PathName_final and pathway.id. The mapkG is a list contains imported 187 preprocessed signaling pathways, PathName_final is a data.frame includes names and IDs of all 187 pathways and pathway.id is a character vector of pathways IDs
#'
#' @export
#'
#' @examples
#' \donttest{
#' destfile = tempfile("files", fileext = ".rda")
#' files <- fetch_data_file()
#' load(destfile)
#' }
fetch_data_file <- function(destfile, verbose = FALSE){
fileURL <-
"https://github.com/Samaneh-Bioinformatics/RData/raw/master/BNrich-start.rda"
if (!file.exists(destfile)) {
if (verbose) {
print("please be patient, the files are downloading...")
}
files <- c(fileURL)
for (file in files) {
tryCatch(utils::download.file(file, destfile, method="libcurl"),
error = function(e) print(paste(file, 'did not work out')))
}}}
#' @title Simplification networks -- applied to unifying nodes
#' @description Unifying nodes based imported signaling pathways and GE data
#' @param dataH A data frame contains (healthy) control objects data
#' @param dataD A data frame contains disease objects data
#' @param MapkG A list contains imported 187 signaling pathways
#' @param Pathway.id A vector contains 187 KEEG pathway IDs
#' @importFrom graph nodes removeNode edgeMatrix
#' @return A list contain data_h,data_d,mapkG1 and pathway.id1
#'
#' @export
#'
#' @examples
#' #All the 187 preprocessed signaling pathways can be entered in analysis by fetch_data_file().
#' #But here you enter a subset of those pathways to see how this package works.
#' files <- system.file("extdata", "test_files_to_start.RData", package = "BNrich", mustWork = TRUE)
#' load(files)
#' Data <- system.file("extdata", "Test_DATA.RData", package = "BNrich", mustWork = TRUE)
#' load(Data)
#' uni_Result <- unify_path(dataH, dataD, MapkG = sub_mapkG, Pathway.id = path.id)
unify_path <- function(dataH, dataD, MapkG, Pathway.id){
NOD <- lapply(MapkG, graph::nodes)
NOD <- lapply(NOD, as.vector)
data_h <- list()
data_d <- list()
Diff <- list()
for (i in seq_along(MapkG)) {
data_h[[i]] <- matrix()
data_h[[i]] <- subset(dataH, rownames(dataH) %in% NOD[[i]])
data_h[[i]] <- as.data.frame(t(data_h[[i]]))
rownames(data_h[[i]]) <- NULL
Diff[[i]] <- setdiff(NOD[[i]], colnames(data_h[[i]]))
MapkG[[i]]=removeNode(Diff[[i]], MapkG[[i]])
data_d[[i]] <- matrix()
data_d[[i]] <- subset(dataD, rownames(dataD) %in% NOD[[i]])
data_d[[i]] <- as.data.frame(t(data_d[[i]]))
rownames(data_d[[i]]) <- NULL
}
mapkG1 <- MapkG
pathway.id1 <- Pathway.id
for (i in length(mapkG1):1) {
if(ncol(edgeMatrix(mapkG1[[i]]))<5){
mapkG1 <- mapkG1[-i]
data_h <- data_h[-i]
data_d <- data_d[-i]
pathway.id1 <- pathway.id1[-i]
}}
for (i in seq_along(mapkG1)) {
data_h[[i]] <- data_h[[i]][, order(names(data_h[[i]]))]
data_d[[i]] <- data_d[[i]][, order(names(data_d[[i]]))]
}
unify_results <- list("data_h"= data_h,"data_d"=data_d,"mapkG1"= mapkG1,"pathway.id1" =pathway.id1)
return(unify_results)
}
#' @title Construct Bayesian networks structures
#' @description Construct BNs structures using unified signaling pathways
#' @param mapkG1 A list contains unified signaling pathways
#' @importFrom bnlearn as.bn
#' @return A list contains Bayesian networks structures
#'
#' @export
#'
#' @examples
#' #All the 187 preprocessed signaling pathways can be entered in analysis by fetch_data_file().
#' #But here you enter a subset of those pathways to see how this package works.
#' files <- system.file("extdata", "test_files_to_start.RData", package = "BNrich", mustWork = TRUE)
#' load(files)
#' Data <- system.file("extdata", "Test_DATA.RData", package = "BNrich", mustWork = TRUE)
#' load(Data)
#' uni_Result <- unify_path(dataH, dataD, MapkG = sub_mapkG, Pathway.id = path.id)
#' M1 <- uni_Result$mapkG1
#' BN <- BN_struct(M1)
BN_struct <- function(mapkG1){
BN <- list()
BN <- lapply(mapkG1, bnlearn::as.bn)
return(BN)
}
#' @title LASSO regression
#' @description LASSO regression – second step of simplification of BNs structures
#' @param BN A list of Bayesian networks achieved by BN_struct function
#' @param data_h A list contains data frames related to control objects
#' @param data_d A list contains data frames related to disease objects
#' @importFrom bnlearn drop.arc
#' @importFrom glmnet cv.glmnet
#' @importFrom stats coef
#' @return A list contains two lists.BN_h and BN_d are simplified BNs
#'
#' @export
#'
#' @examples
#' #All the 187 preprocessed signaling pathways can be entered in analysis by fetch_data_file().
#' #But here you enter a subset of those pathways to see how this package works.
#' files <- system.file("extdata", "test_files_to_start.RData", package = "BNrich", mustWork = TRUE)
#' load(files)
#' Data <- system.file("extdata", "Test_DATA.RData", package = "BNrich", mustWork = TRUE)
#' load(Data)
#' uni_Result <- unify_path(dataH, dataD, MapkG = sub_mapkG, Pathway.id = path.id)
#' M1 <- uni_Result$mapkG1
#' BN <- BN_struct(M1)
#' data_h1 <- uni_Result$data_h
#' data_d1 <- uni_Result$data_d
#' LASSO_Result <- LASSO_BN(BN = BN , data_h = data_h1 , data_d = data_d1)
LASSO_BN <- function(BN, data_h, data_d){
BN_h <- BN
BN_d <- BN
for(k in seq_along(BN)){
for(i in seq_along(BN[[k]]$nodes)) {
if(length((BN_h[[k]]$nodes)[[i]][3]$parents) > 1) {
Parents_h <- as.matrix(data_h[[k]][BN_h[[k]]$nodes[[i]][3]$parents])
respons_h <- as.matrix(data_h[[k]][bnlearn::nodes(BN_h[[k]])[i]])
Parents_d <- as.matrix(data_d[[k]][BN_d[[k]]$nodes[[i]][3]$parents])
respons_d <- as.matrix(data_d[[k]][bnlearn::nodes(BN_d[[k]])[i]])
cvfit_h = cv.glmnet(Parents_h, respons_h, grouped = FALSE)
cvfit_d = cv.glmnet(Parents_d, respons_d, grouped = FALSE)
for(j in nrow(coef(cvfit_h,s=cvfit_h$lambda.min)):1){
if((coef(cvfit_h, s=cvfit_h$lambda.min)[j] == 0)&& (coef(cvfit_d, s=cvfit_d$lambda.min)[j] == 0)){
BN_h[[k]] <- drop.arc(BN_h[[k]],from = row.names(coef(cvfit_h, s=cvfit_h$lambda.min))[j],to = bnlearn::nodes(BN_h[[k]])[i])
BN_d[[k]] <- drop.arc(BN_d[[k]],from = row.names(coef(cvfit_d, s=cvfit_d$lambda.min))[j],to = bnlearn::nodes(BN_d[[k]])[i])
}}}}}
LASSO_results <- list("BN_h" = BN_h, "BN_d" = BN_d)
return(LASSO_results)}
#' @title Estimate parameters of BNs in control and disease states
#' @description Estimate parameters of BNs in control and disease states
#' @param BN_H A list contains simplified BNs structures for control objects
#' @param BN_D A list contains simplified BNs structures for disease objects
#' @param data_h A list contains data frames related to control objects for any BN
#' @param data_d A list contains data frames related to disease objects for any BN
#' @importFrom bnlearn bn.fit
#' @importFrom stats coef
#' @return A listcontains four lists BNs_h, BNs_d, coef_h and coef_d
#'
#' @export
#'
#' @examples
#' #All the 187 preprocessed signaling pathways can be entered in analysis by fetch_data_file().
#' #But here you enter a subset of those pathways to see how this package works.
#' files <- system.file("extdata", "test_files_to_start.RData", package = "BNrich", mustWork = TRUE)
#' load(files)
#' Data <- system.file("extdata", "Test_DATA.RData", package = "BNrich", mustWork = TRUE)
#' load(Data)
#' uni_Result <- unify_path(dataH, dataD, MapkG = sub_mapkG, Pathway.id = path.id)
#' M1 <- uni_Result$mapkG1
#' BN <- BN_struct(M1)
#' data_h1 <- uni_Result$data_h
#' data_d1 <- uni_Result$data_d
#' LASSO_Result <- LASSO_BN(BN = BN , data_h = data_h1 , data_d = data_d1)
#' BN_h1 <- LASSO_Result$BN_h
#' BN_d1 <- LASSO_Result$BN_d
#' esti_result <- esti_par(BN_H = BN_h1, BN_D = BN_d1, data_h = data_h1, data_d = data_d1)
esti_par <- function(BN_H, BN_D, data_h, data_d){
BNs_h <- list()
BNs_d <- list()
coef_h <- list()
coef_d <- list()
for (i in seq_along(BN_H)) {
BNs_h[[i]] <- bn.fit(BN_H[[i]], data_h[[i]])
BNs_d[[i]] <- bn.fit(BN_D[[i]], data_d[[i]])
coef_h[[i]] <- coef(BNs_h[[i]])
coef_d[[i]] <- coef(BNs_d[[i]])
}
esti_results <- list("BNs_h"=BNs_h,"BNs_d"=BNs_d,"coef_h"=coef_h,"coef_d"=coef_d)
return(esti_results)
}
#' @title Estimate variance-covariance matrixes for any parameters of BNs
#' @description Estimate variance-covariance matrixes for any parameters of
#' @param Data_h A list contains data frames related to control objects for any BN
#' @param coef_H A lists of parameters of BN_h achieved
#' @param BNs_H A list of BNs learned by control objects data
#' @param Data_d A list contains data frames related to disease objects for any BN
#' @param coef_D A lists of parameters of BN_d
#' @param BNs_D A list of BNs learned by disease objects data
#' @importFrom corpcor pseudoinverse
#' @return A listcontains two lists var_mat_Bh and var_mat_Bd
#'
#' @export
#'
#' @examples
#' #All the 187 preprocessed signaling pathways can be entered in analysis by fetch_data_file().
#' #But here you enter a subset of those pathways to see how this package works.
#' files <- system.file("extdata", "test_files_to_start.RData", package = "BNrich", mustWork = TRUE)
#' load(files)
#' Data <- system.file("extdata", "Test_DATA.RData", package = "BNrich", mustWork = TRUE)
#' load(Data)
#' uni_Result <- unify_path(dataH, dataD, MapkG = sub_mapkG, Pathway.id = path.id)
#' M1 <- uni_Result$mapkG1
#' BN <- BN_struct(M1)
#' data_h1 <- uni_Result$data_h
#' data_d1 <- uni_Result$data_d
#' LASSO_Result <- LASSO_BN(BN = BN , data_h = data_h1 , data_d = data_d1)
#' BN_h1 <- LASSO_Result$BN_h
#' BN_d1 <- LASSO_Result$BN_d
#' esti_result <- esti_par(BN_H = BN_h1, BN_D = BN_d1, data_h = data_h1, data_d = data_d1)
#' BNs_H <- esti_result$BNs_h
#' BNs_D <- esti_result$BNs_d
#' coef_h <- esti_result$coef_h
#' coef_d <- esti_result$coef_d
#' var_result <- var_mat(data_h1, coef_h, BNs_H, data_d1, coef_d, BNs_D)
var_mat <- function(Data_h, coef_H, BNs_H, Data_d, coef_D, BNs_D){
X_h <- list()
for (k in seq_along(Data_h)) {
X_h[[k]] <- list()
for(i in seq_along(colnames(Data_h[[k]]))){
X_h[[k]][[i]] <- matrix(nrow = nrow(Data_h[[k]]),ncol = length(coef_H[[k]][[i]]))
X_h[[k]][[i]][,1] <- as.vector(rep(1,nrow(Data_h[[k]])))
if(1<length(coef_H[[k]][[i]])){
for (j in 2:length(coef_H[[k]][[i]])){
X_h[[k]][[i]][,j] <- Data_h[[k]][,names(coef_H[[k]][[i]][j])]}
}}}
var_mat_Bh <- list()
for (k in seq_along(Data_h)) {
var_mat_Bh[[k]] <- list()
for(i in seq_along(colnames(Data_h[[k]]))){
var_mat_Bh[[k]][[i]] <- (BNs_H[[k]][[i]]$sd)^2*pseudoinverse((t(X_h[[k]][[i]]))%*%(X_h[[k]][[i]]))}}
X_d <- list()
for (k in seq_along(Data_h)) {
X_d[[k]] <- list()
for(i in seq_along(colnames(Data_h[[k]]))){
X_d[[k]][[i]] <- matrix(nrow = nrow(Data_h[[k]]),ncol = length(coef_D[[k]][[i]]))
X_d[[k]][[i]][,1] <- as.vector(rep(1,nrow(Data_h[[k]])))
if(1<length(coef_D[[k]][[i]])){
for (j in 2:length(coef_D[[k]][[i]])){
X_d[[k]][[i]][,j] <- Data_h[[k]][,names(coef_D[[k]][[i]][j])]}
}}}
var_mat_Bd <- list()
for (k in seq_along(Data_h)) {
var_mat_Bd[[k]] <- list()
for(i in seq_along(colnames(Data_h[[k]]))){
var_mat_Bd[[k]][[i]] <- (BNs_D[[k]][[i]]$sd)^2*pseudoinverse((t(X_d[[k]][[i]]))%*%(X_d[[k]][[i]]))}}
var_mat_results<- list("var_mat_Bh"=var_mat_Bh,"var_mat_Bd"=var_mat_Bd)
return(var_mat_results)
}
#' @title Testing the equality regression coefficients
#' @description t-test for equality the corresponging parameters in any BN
#' @param Data_h A list contains data frames related to control objects for any BN
#' @param coef_H A list contains parameters of BN_h
#' @param BNs_H A list contains BNs learned by control objects data
#' @param Data_d A list contains data frames related to disease objects for any BN
#' @param coef_D A list contains parameters of BN_d
#' @param BNs_D A list contains BNs learned by disease objects data
#' @param Var_mat_Bh A list contains covariance matrixes for any node of BN_h
#' @param Var_mat_Bd A list contains covariance matrixes for any node of BN_d
#' @param Pathway.id1 A vector contains modified KEEG pathway IDs
#' @importFrom stats pt p.adjust complete.cases
#' @return A data frame contains T-test results for all parameters in final BNs
#'
#' @export
#'
#' @examples
#' #All the 187 preprocessed signaling pathways can be entered in analysis by fetch_data_file().
#' #But here you enter a subset of those pathways to see how this package works.
#' files <- system.file("extdata", "test_files_to_start.RData", package = "BNrich", mustWork = TRUE)
#' load(files)
#' Data <- system.file("extdata", "Test_DATA.RData", package = "BNrich", mustWork = TRUE)
#' load(Data)
#' uni_Result <- unify_path(dataH, dataD, MapkG = sub_mapkG, Pathway.id = path.id)
#' M1 <- uni_Result$mapkG1
#' BN <- BN_struct(M1)
#' data_h1 <- uni_Result$data_h
#' data_d1 <- uni_Result$data_d
#' LASSO_Result <- LASSO_BN(BN = BN , data_h = data_h1 , data_d = data_d1)
#' BN_h1 <- LASSO_Result$BN_h
#' BN_d1 <- LASSO_Result$BN_d
#' esti_result <- esti_par(BN_H = BN_h1, BN_D = BN_d1, data_h = data_h1, data_d = data_d1)
#' BNs_H <- esti_result$BNs_h
#' BNs_D <- esti_result$BNs_d
#' coef_h <- esti_result$coef_h
#' coef_d <- esti_result$coef_d
#' var_result <- var_mat(data_h1, coef_h, BNs_H, data_d1, coef_d, BNs_D)
#' Var_H = var_result$var_mat_Bh
#' Var_D = var_result$var_mat_Bd
#' path.id1 <- uni_Result$pathway.id1
#' Ttest_result <- parm_Ttest(data_h1, coef_h, BNs_H, data_d1, coef_d, BNs_D, Var_H, Var_D, path.id1)
parm_Ttest <- function(Data_h, coef_H, BNs_H, Data_d, coef_D, BNs_D, Var_mat_Bh , Var_mat_Bd, Pathway.id1){
t.test2 <- function(m1,m2,s1,s2,n1,n2){
se <- sqrt((s1^2/(n1)) + (s2^2/(n2)))
# welch-satterthwaite df
df <- se^4/((((s1^2/n1)^2)/(n1-1))+(((s2^2/n2)^2)/(n2-1)))
t <- (m1-m2)/se
dat <- c(m1-m2, se, t, 2*pt(-abs(t),df))
names(dat) <- c("Difference of means", "Std Error", "t", "p-value")
return(dat)
}
Ttest_results <- data.frame()
options(stringsAsFactors = FALSE)
for (k in seq_along(Data_h)) {
n1= nrow(Data_d[[k]])
n2= nrow(Data_h[[k]])
for(i in seq_along(colnames(Data_h[[k]]))){
To <- names(BNs_H[[k]][i])
for (j in seq_along(coef_H[[k]][[i]])){
m1=coef_D[[k]][[i]][j]
m2=coef_H[[k]][[i]][j]
s1= sqrt(Var_mat_Bd[[k]][[i]][j,j])
s2= sqrt(Var_mat_Bh[[k]][[i]][j,j])
if(j>1){
From <- names(m2)
}
else {
From <- "intercept"}
T <- t.test2(m1,m2,s1,s2,n1,n2)
Pval <- T["p-value"]
Ttest_results <- rbind(Ttest_results,c(From, To,k,Pathway.id1[k],Pval,m1,m2))
}}}
colnames(Ttest_results) <- c("From","To","pathway.number","pathwayID","Pval","coefficient in disease","coefficient in control")
Ttest_results$fdr <- p.adjust(Ttest_results$Pval,method = "fdr")
Ttest_results$pathway.number <-as.numeric(Ttest_results$pathway.number)
Ttest_results <- Ttest_results[complete.cases(Ttest_results),]
return(Ttest_results)
}
#' @title Analysis of significant final BNs
#' @description Fisher's exact test applied to PEA on final BNs
#' @param Ttest_Results A data frame contains T-test results for all parameters
#' @param Pathway.id1 A vector contains modified KEEG pathway IDs
#' @param PathName_Final A data frame contains is IDs and names of KEEG pathways
#' @param fdr.value A numeric threshold to determine significant parameters
#' @importFrom stats fisher.test p.adjust
#' @return A data frame contains fisher test results for any final pathways
#'
#' @export
#'
#' @examples
#' #All the 187 preprocessed signaling pathways can be entered in analysis by fetch_data_file().
#' #But here you enter a subset of those pathways to see how this package works.
#' files <- system.file("extdata", "test_files_to_start.RData", package = "BNrich", mustWork = TRUE)
#' load(files)
#' Data <- system.file("extdata", "Test_DATA.RData", package = "BNrich", mustWork = TRUE)
#' load(Data)
#' uni_Result <- unify_path(dataH, dataD, MapkG = sub_mapkG, Pathway.id = path.id)
#' M1 <- uni_Result$mapkG1
#' BN <- BN_struct(M1)
#' data_h1 <- uni_Result$data_h
#' data_d1 <- uni_Result$data_d
#' LASSO_Result <- LASSO_BN(BN = BN , data_h = data_h1 , data_d = data_d1)
#' BN_h1 <- LASSO_Result$BN_h
#' BN_d1 <- LASSO_Result$BN_d
#' esti_result <- esti_par(BN_H = BN_h1, BN_D = BN_d1, data_h = data_h1, data_d = data_d1)
#' BNs_H <- esti_result$BNs_h
#' BNs_D <- esti_result$BNs_d
#' coef_h <- esti_result$coef_h
#' coef_d <- esti_result$coef_d
#' var_result <- var_mat(data_h1, coef_h, BNs_H, data_d1, coef_d, BNs_D)
#' Var_H = var_result$var_mat_Bh
#' Var_D = var_result$var_mat_Bd
#' path.id1 <- uni_Result$pathway.id1
#' Ttest_result <- parm_Ttest(data_h1, coef_h, BNs_H, data_d1, coef_d, BNs_D, Var_H, Var_D, path.id1)
#' BNrich_result <- BNrich(Ttest_result, path.id1, Path.Name)
BNrich <- function(Ttest_Results, Pathway.id1, PathName_Final, fdr.value = 0.05){
Ttest_results <- Ttest_Results[order(Ttest_Results$pathway.number),]
BNrich_results=data.frame()
b=1
for (d in seq_len(max(Ttest_results$pathway.number))) {
a=0
f=1
while(b<=nrow(Ttest_results) & Ttest_results$pathway.number[b]==d)
{
if(Ttest_results$fdr[b]<fdr.value) {a=a+1}
f=f+1
b=b+1
}
BNrich_results[d,1]=f-1
BNrich_results[d,2]=a
}
N=sum(BNrich_results[,1])
M=sum(BNrich_results[,2])
for (d in seq_len(max(Ttest_results$pathway.number))){
df <- matrix(c(BNrich_results[d,2],(M-BNrich_results[d,2]),(BNrich_results[d,1]-BNrich_results[d,2])
,(N-M-BNrich_results[d,1]+BNrich_results[d,2])) ,nrow = 2)
fisher <- fisher.test(df)
BNrich_results[d,3]=fisher$p.value
BNrich_results[d,4]=Pathway.id1[d]
}
colnames(BNrich_results) <- c("nk","mk","p.value","ID")
BNrich_results$fdr <- p.adjust(BNrich_results$p.value, method = "fdr")
BNrich_results <- BNrich_results[,c("ID","p.value","fdr")]
BNrich_results <- merge(BNrich_results, PathName_Final, by = "ID")
colnames(BNrich_results) <- c("pathwayID","p.value","fdr","pathway.number","Name")
BNrich_results <- BNrich_results[order(BNrich_results$fdr),]
return(BNrich_results)
}
|
/scratch/gouwar.j/cran-all/cranData/BNrich/R/BNrich.R
|
## ----install_BNrich, eval=FALSE-----------------------------------------------
# install.packages("BNrich_0.1.0.tar.gz", type="source", repos=NULL)
# library("BNrich")
## ----start files, eval=FALSE--------------------------------------------------
# destfile = tempfile("files", fileext = ".rda")
# files <- fetch_data_file()
# load(destfile)
## ----example_of_dataset, eval=FALSE-------------------------------------------
# Data <- system.file("extdata", "Test_DATA.RData", package = "BNrich", mustWork = TRUE)
# load(Data)
# head(dataH)
## ----example of dataset, eval=FALSE-------------------------------------------
# head(dataD)
## ----unify_path, eval=FALSE---------------------------------------------------
# unify_results <- unify_path(dataH, dataD, mapkG, pathway.id)
## ----the_number_of_original_pathways, eval=FALSE------------------------------
# length(mapkG)
## ----the_number_of_unified_pathways, eval=FALSE-------------------------------
# mapkG1 <- unify_results$mapkG1
# length(mapkG1)
## ----the_first_original_pathwayID, eval=FALSE---------------------------------
# pathway.id[1]
## ----the_first_original_pathway_information, eval=FALSE-----------------------
# mapkG[[1]]
## ----the_first_unified_pathwayID, eval=FALSE----------------------------------
# pathway.id1 <- unify_results$pathway.id1
# pathway.id1[1]
## ----the_first_unified_pathway_information, eval=FALSE------------------------
# mapkG1[[1]]
## ----construct_BN, eval=FALSE-------------------------------------------------
# BN <- BN_struct(unify_results$mapkG1)
## ----LASSO_function, eval=FALSE-----------------------------------------------
# data_h <- unify_results$data_h
# data_d <- unify_results$data_d
# LASSO_results <- LASSO_BN(BN, data_h, data_d)
## ----the_number_of_edge_in_first_initial_BN, eval=FALSE-----------------------
# nrow(arcs(BN[[1]]))
## ----the_number_of_edge_in_first_control_BN, eval=FALSE-----------------------
# nrow(arcs(LASSO_results$BN_H[[1]]))
## ----the_number_of_edge_in_first_disease_BN, eval=FALSE-----------------------
# nrow(arcs(LASSO_results$BN_D[[1]]))
## ----estimate_parameters, eval=FALSE------------------------------------------
# BN_H <- LASSO_results$BN_H
# BN_D <- LASSO_results$BN_D
# esti_results <- esti_par(BN_H, BN_D, data_h, data_d)
## ----parameters_information_in_BN_h, eval=FALSE-------------------------------
# esti_results$BNs_h[[1]]$` hsa:1978`
## ----parameters_information_in_BN_d, eval=FALSE-------------------------------
# esti_results$BNs_d[[1]]$`hsa:1978`
## ----variance_matrix_function, eval=FALSE-------------------------------------
# BN_h <- esti_results$BNs_h
# BN_d <- esti_results$BNs_d
# coef_h <- esti_results$coef_h
# coef_d <- esti_results$coef_d
# var_mat_results<- var_mat (data_h, coef_h, BN_h, data_d, coef_d, BN_d)
## ----variance-covariance_matrixe_for_the_fifth_node_in_first_BNh, eval=FALSE----
# (var_mat_results$var_mat_Bh[[1]])[5]
## ----variance-covariance_matrixe_for_the_fifth_node_in_first_BNd, eval=FALSE----
# (var_mat_results$var_mat_Bd[[1]])[5]
## ----parm_Ttest_function, eval=FALSE------------------------------------------
# var_mat_Bh <- var_mat_results $var_mat_Bh
# var_mat_Bd <- var_mat_results $var_mat_Bd
# Ttest_results <- parm_Ttest(data_h, coef_h, BN_h, data_d, coef_d, BN_d, var_mat_Bh, var_mat_Bd, pathway.id1)
# head(Ttest_results)
## ----BNrich_function, eval=FALSE----------------------------------------------
# BNrich_results <- BNrich(Ttest_results, pathway.id1, PathName_final, fdr.value = 0.05)
# head(BNrich_results)
|
/scratch/gouwar.j/cran-all/cranData/BNrich/inst/doc/BNrich.R
|
---
title: 'BNrich: A Novel Pathway Enrichment Analysis Based on Bayesian Network'
author: Samaneh Maleknia*(1),Mohsen Namazi(1),Kaveh Kavousi(1),Ali Sharifi-Zarchi(2),Vahid
Rezaei Tabar(3)
output:
rmarkdown::html_vignette: default
rmarkdown::pdf: default
vignette: |
%\VignetteIndexEntry{BNrich_vignette}
%\usepackage[UTF-8]{inputenc}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::knitr}
---
(1) Department of Bioinformatics, Institute of Biochemistry and Biophysics, University of Tehran, Tehran, Iran
(2) Department of Computer Engineering, Sharif University of Technology, Tehran, Iran
(3) Department of Statis-tics, Allameh Tabataba'i University, Tehran, Iran
"[email protected]"
date: "`r Sys.Date()`"
## Abstract:
This package has developed a tool for performing a novel pathway enrichment analysis based on Bayesian network (BNrich) to investigate the topology features of the pathways. This algorithm as a biologically intuitive, method, analyzes of most structural data acquired from signaling pathways such as causal relationships between genes using the property of Bayesian networks and also infer finalized networks more conveniently by simplifying networks in the early stages and using Least Absolute Shrinkage Selector Operator (LASSO). impacted pathways are ultimately prioritized the by Fisher’s Exact Test on significant parameters. Here, we provide an instance code that applies BNrich in all of the fields described above.
<!---
- Compile from command-line
Rscript -e "rmarkdown::render('sample.Rmd', c('html_document'), clean=FALSE)"
-->
# Introduction
This document offers an introductory overview of how to use the package. The BNrich tool uses Bayesian Network (BN) in a new topology-based pathway analysis (TPA) method. The BN has been demonstrated as a beneficial technique for integrating and modeling biological data into causal relationships (1–4). The proposed method utilizes BN to model variations in downstream components (children) as a consequence of the change in upstream components (parents). For this purpose, The method employs 187 KEGG human non-metabolic pathways (5–7) which their cycles were eliminated manually by a biological intuitive, as BN structures and gene expression data to estimate its parameters (8,9). The cycles of inferred networks were eliminated on the basis of biologically intuitive rules instead of using computing algorithms (10). The inferred networks are simplified in two steps; unifying genes and LASSO. Similarly, the originally continuous gene expression data is used to BN parameters learning, rather than discretized data (8). The algorithm estimates regression coefficients by continuous data based on the parameter learning techniques in the BN (11,12). The final impacted pathways are gained by Fisher’s exact test. This method can represent effective genes and biological relations in impacted pathways based on a significant level.
# Quick Start
## Install BNrich
```{r install_BNrich, eval=FALSE}
install.packages("BNrich_0.1.0.tar.gz", type="source", repos=NULL)
library("BNrich")
```
## prepare essential data
At first, we can load all the 187 preprocessed KEGG pathways which their cycles were removed, the data frame includes information about the pathways and vector of pathway ID.
```{r start files, eval=FALSE}
destfile = tempfile("files", fileext = ".rda")
files <- fetch_data_file()
load(destfile)
```
Note that it's better to use (for example:) `destfile = "./R/BNrich-start.rda"` to save essential files permanently.
The input data should be as two data frames in states disease and (healthy) control. The row names of any data frame are KEGG geneID and the number of subjects in any of them should not be less than 20, otherwise the user may encounters error in `LASSO` step. Initially, we can load dataset example.
The example data extracted from a part of `GSE47756` dataset, the gene expression data from colorectal cancer study (13).
```{r example_of_dataset, eval=FALSE}
Data <- system.file("extdata", "Test_DATA.RData", package = "BNrich", mustWork = TRUE)
load(Data)
head(dataH)
```
| | H1 | H2 | H3 | H4 | H5 | H6 | H7 | H8 |
|-----------------|---------------|---------------|---------------|---------------|---------------|---------------|---------------|---------------|
| hsa:1 | 3.37954 | 3.3469 | 3.78383 | 3.35186 | 3.2091 | 3.40245 | 4.06329 | 3.43424 |
| hsa:100 | 3.1147 | 3.15981 | 3.37842 | 2.69868 | 3.43759 | 3.38588 | 2.95406 | 3.09631 |
| hsa:10000 | 3.21876 | 2.93611 | 2.62708 | 3.13507 | 2.62864 | 2.61367 | 2.7336 | 2.70867 |
| hsa:1001 | 3.4549 | 3.18683 | 3.34896 | 3.36903 | 3.49353 | 3.35175 | 3.27893 | 3.63678 |
| hsa:10010 | 2.17522 | 2.59843 | 2.56868 | 2.95009 | 2.52181 | 2.24635 | 2.05092 | 2.10438 |
| hsa:10013 | 2.992 | 2.94325 | 3.22677 | 2.87371 | 3.063 | 2.97679 | 3.07247 | 3.08168 |
```{r example of dataset, eval=FALSE}
head(dataD)
```
| | D1 | D2 | D3 | D4 | D5 | D6 | D7 | D8 |
|-----------------|---------------|---------------|---------------|---------------|---------------|---------------|---------------|---------------|
| hsa:1 | 3.29082 | 3.15924 | 3.45716 | 3.15391 | 3.29514 | 3.36502 | 3.63823 | 3.22192 |
| hsa:100 | 3.069 | 2.97546 | 2.99117 | 2.88929 | 3.00292 | 2.94948 | 2.93906 | 3.36357 |
| hsa:10000 | 2.68424 | 3.24284 | 3.57435 | 2.46992 | 4.57649 | 3.87179 | 2.94405 | 3.54207 |
| hsa:1001 | 3.27815 | 2.91081 | 3.53487 | 2.95122 | 2.67742 | 2.72358 | 3.10172 | 3.07123 |
| hsa:10010 | 2.68051 | 3.22719 | 3.58798 | 2.61269 | 3.72397 | 3.29004 | 2.5843 | 2.95756 |
| hsa:10013 | 3.05107 | 2.86273 | 3.06863 | 3.05318 | 3.04536 | 2.92021 | 3.12596 | 3.0468 |
# Unify data, the first step of simplification
Initially, we need to unify gene products based on 187 imported signaling pathways (`mapkG` list) in two states disease (dataD) and control (dataH). This is the first simplification step, unifying nodes in signaling pathways with genes those exist in gene expression data.
```{r unify_path, eval=FALSE}
unify_results <- unify_path(dataH, dataD, mapkG, pathway.id)
```
The `unify_path` function performs the following processes:
• Split datasets into KEGG pathways
• Delete all gene expression data are not in pathways
• Removes all gene products in pathways are not in dataset platforms
• Remove any pathways with the number of edges is less than 5
This function returns a list contain `data_h`,`data_d`,`mapkG1` and `pathway.id1`. `data_h` and `data_d` are lists contain data frames related to control and disease objects unified for any signaling pathways. The `mapkG1` is a list contains unified signaling pathways and `pathway.id1` is new pathway ID vector based on remained pathways.
In the example dataset, the number of edges in the one pathway becomes less than 5 and are removed:
```{r the_number_of_original_pathways, eval=FALSE}
length(mapkG)
```
```
187
```
```{r the_number_of_unified_pathways, eval=FALSE}
mapkG1 <- unify_results$mapkG1
length(mapkG1)
```
```
186
```
As well, the number of edges reduces in the remaining pathways. In first pathway `hsa:01521` the number of edges from 230 reduces to 204:
```{r the_first_original_pathwayID, eval=FALSE}
pathway.id[1]
```
```
"hsa:01521"
```
```{r the_first_original_pathway_information, eval=FALSE}
mapkG[[1]]
```
```
A graphNEL graph with directed edges
Number of Nodes = 79
Number of Edges = 230
```
```{r the_first_unified_pathwayID, eval=FALSE}
pathway.id1 <- unify_results$pathway.id1
pathway.id1[1]
```
```
"hsa:01521"
```
```{r the_first_unified_pathway_information, eval=FALSE}
mapkG1[[1]]
```
```
A graphNEL graph with directed edges
Number of Nodes = 71
Number of Edges = 204
```
# BN: construct structures and estimate parameters
## construct BN structures
Now we can construct BN structures based on unified signaling pathways and consequently need the results of `unify_path` function.
```{r construct_BN, eval=FALSE}
BN <- BN_struct(unify_results$mapkG1)
```
The `BN_struct` function returns a list contains BNs structures reconstructed from all `mapkG1`.
## The LASSO regression, the second step of simplification
Given that the data used is continuous, each node is modeled as a regression line on its parents (11,14). Thus, on some of these regression lines, the number of these independent variables is high, so in order to avoid the collinearity problem, we need to use the Lasso regression (15,16).
We perform this function for any node with more than one parent, in all BNs achieved by `BN_struct` function, based on control and disease data obtained by `unify_results` function.
```{r LASSO_function, eval=FALSE}
data_h <- unify_results$data_h
data_d <- unify_results$data_d
LASSO_results <- LASSO_BN(BN, data_h, data_d)
```
The LASSO_BN function returns a list contains two lists BN_H and BN_D are simplified BNs structures based on LASSO regression related to healthy and disease objects. This function lead to reduce number of edges too:
```{r the_number_of_edge_in_first_initial_BN, eval=FALSE}
nrow(arcs(BN[[1]]))
```
```
204
```
```{r the_number_of_edge_in_first_control_BN, eval=FALSE}
nrow(arcs(LASSO_results$BN_H[[1]]))
```
```
116
```
```{r the_number_of_edge_in_first_disease_BN, eval=FALSE}
nrow(arcs(LASSO_results$BN_D[[1]]))
```
```
116
```
## Estimate the BN parameters
Now we can estimate (learn) parameters for any BNs based on healthy and disease data lists.
```{r estimate_parameters, eval=FALSE}
BN_H <- LASSO_results$BN_H
BN_D <- LASSO_results$BN_D
esti_results <- esti_par(BN_H, BN_D, data_h, data_d)
```
The `esti_par` function returns a list contains four lists. The `BN_h`, `BN_d`, are lists of BNs which their parameters learned by control and disease objects data. The `coef_h` and `coef_d` are lists of parameters of `BN_h` and `BN_d`.
As you can see in below, node `hsa:1978` in the first BN has one parent. The coefficient in control (healthy) data is `0.6958609` and in disease data is `1.1870730`.
```{r parameters_information_in_BN_h, eval=FALSE}
esti_results$BNs_h[[1]]$` hsa:1978`
```
```
Parameters of node hsa:1978 (Gaussian distribution)
Conditional density: hsa:1978 | hsa:2475
Coefficients:
(Intercept) hsa:2475
2.8841264 0.6958609
Standard deviation of the residuals: 0.3489612
```
```{r parameters_information_in_BN_d, eval=FALSE}
esti_results$BNs_d[[1]]$`hsa:1978`
```
```
Parameters of node hsa:1978 (Gaussian distribution)
Conditional density: hsa:1978 | hsa:2475
Coefficients:
(Intercept) hsa:2475
0.9046357 1.1870730
Standard deviation of the residuals: 0.2713789
```
# Testing the equality BNs parameters
## Variance of BNs parameters
We require the variance of the BNs parameters to perform the T-test between the corresponding parameters.
```{r variance_matrix_function, eval=FALSE}
BN_h <- esti_results$BNs_h
BN_d <- esti_results$BNs_d
coef_h <- esti_results$coef_h
coef_d <- esti_results$coef_d
var_mat_results<- var_mat (data_h, coef_h, BN_h, data_d, coef_d, BN_d)
```
The `var_mat` function returns a list contains two lists `var_mat_Bh` and `var_mat_Bd` which are the variance-covariance matrixes for any parameters of `BN_h` and `BN_d`. The variance-covariance matrixes for the fifth node,`hsa:1978`, in first BN in two states control and disease is as follow:
```{r variance-covariance_matrixe_for_the_fifth_node_in_first_BNh, eval=FALSE}
(var_mat_results$var_mat_Bh[[1]])[5]
```
```
[,1] [,2]
[1,] 10.177073 -3.630152
[2,] -3.630152 1.296990
```
```{r variance-covariance_matrixe_for_the_fifth_node_in_first_BNd, eval=FALSE}
(var_mat_results$var_mat_Bd[[1]])[5]
```
```
[,1] [,2]
[1,] 3.549338 -1.0392040
[2,] -1.039204 0.3053785
```
## Testing the equality BNs parameters
T-test perfoms between any corresponding parameters between each pair of learned BNs, `BN_h` and `BN_d`, in disease and control states. Assumptions are unequal sample sizes and unequal variances for all samples.
```{r parm_Ttest_function, eval=FALSE}
var_mat_Bh <- var_mat_results $var_mat_Bh
var_mat_Bd <- var_mat_results $var_mat_Bd
Ttest_results <- parm_Ttest(data_h, coef_h, BN_h, data_d, coef_d, BN_d, var_mat_Bh, var_mat_Bd, pathway.id1)
head(Ttest_results)
```
| From | To | pathway.number | pathwayID | Pval | coefficient in disease | coefficient in control | fdr |
|-----------------|----------------|----------------------|-----------------|----------------|--------------------------------|------------------------------|----------------|
| intercept | hsa:2065 | 1 | hsa:01521 | 0.605294 | 4.893503 | 5.535163 | 6.72E-01 |
| hsa:7039 | hsa:2065 | 1 | hsa:01521 | 2.04E-05 | 1.072296 | -0.21107 | 6.95E-05 |
| hsa:1950 | hsa:2065 | 1 | hsa:01521 | 0.154223 | 0.125977 | -0.21675 | 2.11E-01 |
| hsa:4233 | hsa:2065 | 1 | hsa:01521 | 0.083296 | -0.63254 | -0.33154 | 1.23E-01 |
| hsa:3084 | hsa:2065 | 1 | hsa:01521 | 0.135981 | -0.55586 | -0.18792 | 1.89E-01 |
| hsa:9542 | hsa:2065 | 1 | hsa:01521 | 0.373051 | -0.39859 | -0.11334 | 4.49E-01 |
This function returns a data frame contains T-test results for all parameters in all final BNs. The row that is intercept in `From` variable, shows significance level for gene product that is shown in `To` variable. The rest of the data frame rows shows significance level for any edge of networks.
# Identification of enriched pathways
In the last step we can determine enriched pathways by own threshold on `p-value` or `fdr`. Hence we run the Fisher's exact test for any final pathways. As stated above, the `Ttest_results` is a data frame contains T-test results for all parameters in final BNs achieved by `parm_Ttest` function and `fdr.value` A numeric threshold to determine significant parameters (default is 0.05).
```{r BNrich_function, eval=FALSE}
BNrich_results <- BNrich(Ttest_results, pathway.id1, PathName_final, fdr.value = 0.05)
head(BNrich_results)
```
| pathwayID | p.value | fdr | pathway.number | Name |
|-----------------|----------------|----------------|----------------------|-----------------------------------------------|
| hsa:05016 | 2.66E-17 | 2.47E-15 | 123 | Huntington disease |
| hsa:05202 | 1.64E-17 | 2.47E-15 | 156 | Transcriptional misregulation in cancer |
| hsa:05012 | 2.92E-16 | 1.81E-14 | 121 | Parkinson disease |
| hsa:05010 | 1.55E-11 | 7.19E-10 | 120 | Alzheimer disease |
| hsa:04144 | 3.25E-08 | 1.21E-06 | 22 | Endocytosis |
| hsa:04714 | 2.99E-07 | 9.26E-06 | 72 | Thermogenesis |
# Session Info
The following package and versions were used in the production of this vignette.
```
R version 3.6.1 (2019-07-05)
Platform: x86_64-w64-mingw32/x64 (64-bit)
Running under: Windows 7 x64 (build 7601) Service Pack 1
Matrix products: default
locale:
[1] LC_COLLATE=English_United Kingdom.1252 LC_CTYPE=English_United Kingdom.1252
[3] LC_MONETARY=English_United Kingdom.1252 LC_NUMERIC=C
[5] LC_TIME=English_United Kingdom.1252
attached base packages:
[1] stats graphics grDevices utils datasets methods base
other attached packages:
[1] BNrich_0.1.0
loaded via a namespace (and not attached):
[1] Rcpp_1.0.2 codetools_0.2-16 lattice_0.20-38 corpcor_1.6.9
[5] foreach_1.4.7 glmnet_2.0-18 digest_0.6.20 grid_3.6.1
[9] stats4_3.6.1 evaluate_0.14 graph_1.63.0 Matrix_1.2-17
[13] rmarkdown_1.14 bnlearn_4.5 iterators_1.0.12 tools_3.6.1
[17] parallel_3.6.1 xfun_0.8 yaml_2.2.0 rsconnect_0.8.15
[21] compiler_3.6.1 BiocGenerics_0.31.5 htmltools_0.3.6 knitr_1.24
```
# References:
1. Yu J, Smith VA, Wang PP, Hartemink AJ, Jarvis ED. Advances to Bayesian network inference for generating causal networks from observational biological data. Bioinformatics . 2004 Dec 12;20(18):3594–603.
2. Gendelman R, Xing H, Mirzoeva OK, Sarde P, Curtis C, Feiler HS, et al. Bayesian network inference modeling identifies TRIB1 as a novel regulator of cell-cycle progression and survival in cancer cells. Cancer Res. 2017;77(7):1575–85.
3. Luo Y, El Naqa I, McShan DL, Ray D, Lohse I, Matuszak MM, et al. Unraveling biophysical interactions of radiation pneumonitis in non-small-cell lung cancer via Bayesian network analysis. Radiother Oncol . 2017 Apr 1 ;123(1):85–92.
4. Agrahari R, Foroushani A, Docking TR, Chang L, Duns G, Hudoba M, et al. Applications of Bayesian network models in predicting types of hematological malignancies. Sci Rep . 2018 Dec 3;8(1):6951.
5. Zhi-wei J, Zhen-lei Y, Cai-xiu Z, Li-ying W, Jun L, Hong-li W, et al. Comparison of the Network Structural Characteristics of Calcium Signaling Pathway in Cerebral Ischemia after Intervention by Different Components of Chinese Medicine. J Tradit Chinese Med. 2011;31(3):251–5.
6. Lou S, Ren L, Xiao J, Ding Q, Zhang W. Expression profiling based graph-clustering approach to determine renal carcinoma related pathway in response to kidney cancer. Eur Rev Med Pharmacol Sci. 2012;16(6):775–80.
7. Fu C, Deng S, Jin G, Wang X, Yu ZG. Bayesian network model for identification of pathways by integrating protein interaction with genetic interaction data. BMC Syst Biol. 2017;11.
8. Isci S, Ozturk C, Jones J, Otu HH. Pathway analysis of high-throughput biological data within a Bayesian network framework. 2011;27(12):1667–74.
9. Korucuoglu M, Isci S, Ozgur A, Otu HH. Bayesian pathway analysis of cancer microarray data. PLoS One. 2014;9(7):1–8.
10. Spirtes P, Richardson T. Directed Cyclic Graphical Representations of Feedback Models. Proc Elev Conf Uncertain Artif Intell. 1995;1–37.
11. Neapolitan RE. Learning Bayesian networks. first. Chicago: Pearson Prentice Hall; 2004. 291–425 p.
12. Scutari M. Learning Bayesian Networks with the bnlearn R Package. J Stat Softw. 2010;35(3):1–22.
13. Hamm A, Prenen H, Van Delm W, Di Matteo M, Wenes M, Delamarre E, et al. Tumour-educated circulating monocytes are powerful candidate biomarkers for diagnosis and disease follow-up of colorectal cancer. Gut. 2016;65(6):990–1000.
14. Nagarajan R, Scutari M, Lèbre S. Bayesian Networks in R . New York, NY: Springer New York; 2013 [cited 2018 Apr 17].
15. Tibshirani R. The lasso method for variable selection in the cox model. Stat Med. 1997;16(4):385–95.
16. Buhlmann P, Geer S van de. Statistics for high-dimensional data: Methods, Theory and Applications . Springer Series in Statistics. 2011. 7–34 p.
|
/scratch/gouwar.j/cran-all/cranData/BNrich/inst/doc/BNrich.Rmd
|
---
title: 'BNrich: A Novel Pathway Enrichment Analysis Based on Bayesian Network'
author: Samaneh Maleknia*(1),Mohsen Namazi(1),Kaveh Kavousi(1),Ali Sharifi-Zarchi(2),Vahid
Rezaei Tabar(3)
output:
rmarkdown::html_vignette: default
rmarkdown::pdf: default
vignette: |
%\VignetteIndexEntry{BNrich_vignette}
%\usepackage[UTF-8]{inputenc}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::knitr}
---
(1) Department of Bioinformatics, Institute of Biochemistry and Biophysics, University of Tehran, Tehran, Iran
(2) Department of Computer Engineering, Sharif University of Technology, Tehran, Iran
(3) Department of Statis-tics, Allameh Tabataba'i University, Tehran, Iran
"[email protected]"
date: "`r Sys.Date()`"
## Abstract:
This package has developed a tool for performing a novel pathway enrichment analysis based on Bayesian network (BNrich) to investigate the topology features of the pathways. This algorithm as a biologically intuitive, method, analyzes of most structural data acquired from signaling pathways such as causal relationships between genes using the property of Bayesian networks and also infer finalized networks more conveniently by simplifying networks in the early stages and using Least Absolute Shrinkage Selector Operator (LASSO). impacted pathways are ultimately prioritized the by Fisher’s Exact Test on significant parameters. Here, we provide an instance code that applies BNrich in all of the fields described above.
<!---
- Compile from command-line
Rscript -e "rmarkdown::render('sample.Rmd', c('html_document'), clean=FALSE)"
-->
# Introduction
This document offers an introductory overview of how to use the package. The BNrich tool uses Bayesian Network (BN) in a new topology-based pathway analysis (TPA) method. The BN has been demonstrated as a beneficial technique for integrating and modeling biological data into causal relationships (1–4). The proposed method utilizes BN to model variations in downstream components (children) as a consequence of the change in upstream components (parents). For this purpose, The method employs 187 KEGG human non-metabolic pathways (5–7) which their cycles were eliminated manually by a biological intuitive, as BN structures and gene expression data to estimate its parameters (8,9). The cycles of inferred networks were eliminated on the basis of biologically intuitive rules instead of using computing algorithms (10). The inferred networks are simplified in two steps; unifying genes and LASSO. Similarly, the originally continuous gene expression data is used to BN parameters learning, rather than discretized data (8). The algorithm estimates regression coefficients by continuous data based on the parameter learning techniques in the BN (11,12). The final impacted pathways are gained by Fisher’s exact test. This method can represent effective genes and biological relations in impacted pathways based on a significant level.
# Quick Start
## Install BNrich
```{r install_BNrich, eval=FALSE}
install.packages("BNrich_0.1.0.tar.gz", type="source", repos=NULL)
library("BNrich")
```
## prepare essential data
At first, we can load all the 187 preprocessed KEGG pathways which their cycles were removed, the data frame includes information about the pathways and vector of pathway ID.
```{r start files, eval=FALSE}
destfile = tempfile("files", fileext = ".rda")
files <- fetch_data_file()
load(destfile)
```
Note that it's better to use (for example:) `destfile = "./R/BNrich-start.rda"` to save essential files permanently.
The input data should be as two data frames in states disease and (healthy) control. The row names of any data frame are KEGG geneID and the number of subjects in any of them should not be less than 20, otherwise the user may encounters error in `LASSO` step. Initially, we can load dataset example.
The example data extracted from a part of `GSE47756` dataset, the gene expression data from colorectal cancer study (13).
```{r example_of_dataset, eval=FALSE}
Data <- system.file("extdata", "Test_DATA.RData", package = "BNrich", mustWork = TRUE)
load(Data)
head(dataH)
```
| | H1 | H2 | H3 | H4 | H5 | H6 | H7 | H8 |
|-----------------|---------------|---------------|---------------|---------------|---------------|---------------|---------------|---------------|
| hsa:1 | 3.37954 | 3.3469 | 3.78383 | 3.35186 | 3.2091 | 3.40245 | 4.06329 | 3.43424 |
| hsa:100 | 3.1147 | 3.15981 | 3.37842 | 2.69868 | 3.43759 | 3.38588 | 2.95406 | 3.09631 |
| hsa:10000 | 3.21876 | 2.93611 | 2.62708 | 3.13507 | 2.62864 | 2.61367 | 2.7336 | 2.70867 |
| hsa:1001 | 3.4549 | 3.18683 | 3.34896 | 3.36903 | 3.49353 | 3.35175 | 3.27893 | 3.63678 |
| hsa:10010 | 2.17522 | 2.59843 | 2.56868 | 2.95009 | 2.52181 | 2.24635 | 2.05092 | 2.10438 |
| hsa:10013 | 2.992 | 2.94325 | 3.22677 | 2.87371 | 3.063 | 2.97679 | 3.07247 | 3.08168 |
```{r example of dataset, eval=FALSE}
head(dataD)
```
| | D1 | D2 | D3 | D4 | D5 | D6 | D7 | D8 |
|-----------------|---------------|---------------|---------------|---------------|---------------|---------------|---------------|---------------|
| hsa:1 | 3.29082 | 3.15924 | 3.45716 | 3.15391 | 3.29514 | 3.36502 | 3.63823 | 3.22192 |
| hsa:100 | 3.069 | 2.97546 | 2.99117 | 2.88929 | 3.00292 | 2.94948 | 2.93906 | 3.36357 |
| hsa:10000 | 2.68424 | 3.24284 | 3.57435 | 2.46992 | 4.57649 | 3.87179 | 2.94405 | 3.54207 |
| hsa:1001 | 3.27815 | 2.91081 | 3.53487 | 2.95122 | 2.67742 | 2.72358 | 3.10172 | 3.07123 |
| hsa:10010 | 2.68051 | 3.22719 | 3.58798 | 2.61269 | 3.72397 | 3.29004 | 2.5843 | 2.95756 |
| hsa:10013 | 3.05107 | 2.86273 | 3.06863 | 3.05318 | 3.04536 | 2.92021 | 3.12596 | 3.0468 |
# Unify data, the first step of simplification
Initially, we need to unify gene products based on 187 imported signaling pathways (`mapkG` list) in two states disease (dataD) and control (dataH). This is the first simplification step, unifying nodes in signaling pathways with genes those exist in gene expression data.
```{r unify_path, eval=FALSE}
unify_results <- unify_path(dataH, dataD, mapkG, pathway.id)
```
The `unify_path` function performs the following processes:
• Split datasets into KEGG pathways
• Delete all gene expression data are not in pathways
• Removes all gene products in pathways are not in dataset platforms
• Remove any pathways with the number of edges is less than 5
This function returns a list contain `data_h`,`data_d`,`mapkG1` and `pathway.id1`. `data_h` and `data_d` are lists contain data frames related to control and disease objects unified for any signaling pathways. The `mapkG1` is a list contains unified signaling pathways and `pathway.id1` is new pathway ID vector based on remained pathways.
In the example dataset, the number of edges in the one pathway becomes less than 5 and are removed:
```{r the_number_of_original_pathways, eval=FALSE}
length(mapkG)
```
```
187
```
```{r the_number_of_unified_pathways, eval=FALSE}
mapkG1 <- unify_results$mapkG1
length(mapkG1)
```
```
186
```
As well, the number of edges reduces in the remaining pathways. In first pathway `hsa:01521` the number of edges from 230 reduces to 204:
```{r the_first_original_pathwayID, eval=FALSE}
pathway.id[1]
```
```
"hsa:01521"
```
```{r the_first_original_pathway_information, eval=FALSE}
mapkG[[1]]
```
```
A graphNEL graph with directed edges
Number of Nodes = 79
Number of Edges = 230
```
```{r the_first_unified_pathwayID, eval=FALSE}
pathway.id1 <- unify_results$pathway.id1
pathway.id1[1]
```
```
"hsa:01521"
```
```{r the_first_unified_pathway_information, eval=FALSE}
mapkG1[[1]]
```
```
A graphNEL graph with directed edges
Number of Nodes = 71
Number of Edges = 204
```
# BN: construct structures and estimate parameters
## construct BN structures
Now we can construct BN structures based on unified signaling pathways and consequently need the results of `unify_path` function.
```{r construct_BN, eval=FALSE}
BN <- BN_struct(unify_results$mapkG1)
```
The `BN_struct` function returns a list contains BNs structures reconstructed from all `mapkG1`.
## The LASSO regression, the second step of simplification
Given that the data used is continuous, each node is modeled as a regression line on its parents (11,14). Thus, on some of these regression lines, the number of these independent variables is high, so in order to avoid the collinearity problem, we need to use the Lasso regression (15,16).
We perform this function for any node with more than one parent, in all BNs achieved by `BN_struct` function, based on control and disease data obtained by `unify_results` function.
```{r LASSO_function, eval=FALSE}
data_h <- unify_results$data_h
data_d <- unify_results$data_d
LASSO_results <- LASSO_BN(BN, data_h, data_d)
```
The LASSO_BN function returns a list contains two lists BN_H and BN_D are simplified BNs structures based on LASSO regression related to healthy and disease objects. This function lead to reduce number of edges too:
```{r the_number_of_edge_in_first_initial_BN, eval=FALSE}
nrow(arcs(BN[[1]]))
```
```
204
```
```{r the_number_of_edge_in_first_control_BN, eval=FALSE}
nrow(arcs(LASSO_results$BN_H[[1]]))
```
```
116
```
```{r the_number_of_edge_in_first_disease_BN, eval=FALSE}
nrow(arcs(LASSO_results$BN_D[[1]]))
```
```
116
```
## Estimate the BN parameters
Now we can estimate (learn) parameters for any BNs based on healthy and disease data lists.
```{r estimate_parameters, eval=FALSE}
BN_H <- LASSO_results$BN_H
BN_D <- LASSO_results$BN_D
esti_results <- esti_par(BN_H, BN_D, data_h, data_d)
```
The `esti_par` function returns a list contains four lists. The `BN_h`, `BN_d`, are lists of BNs which their parameters learned by control and disease objects data. The `coef_h` and `coef_d` are lists of parameters of `BN_h` and `BN_d`.
As you can see in below, node `hsa:1978` in the first BN has one parent. The coefficient in control (healthy) data is `0.6958609` and in disease data is `1.1870730`.
```{r parameters_information_in_BN_h, eval=FALSE}
esti_results$BNs_h[[1]]$` hsa:1978`
```
```
Parameters of node hsa:1978 (Gaussian distribution)
Conditional density: hsa:1978 | hsa:2475
Coefficients:
(Intercept) hsa:2475
2.8841264 0.6958609
Standard deviation of the residuals: 0.3489612
```
```{r parameters_information_in_BN_d, eval=FALSE}
esti_results$BNs_d[[1]]$`hsa:1978`
```
```
Parameters of node hsa:1978 (Gaussian distribution)
Conditional density: hsa:1978 | hsa:2475
Coefficients:
(Intercept) hsa:2475
0.9046357 1.1870730
Standard deviation of the residuals: 0.2713789
```
# Testing the equality BNs parameters
## Variance of BNs parameters
We require the variance of the BNs parameters to perform the T-test between the corresponding parameters.
```{r variance_matrix_function, eval=FALSE}
BN_h <- esti_results$BNs_h
BN_d <- esti_results$BNs_d
coef_h <- esti_results$coef_h
coef_d <- esti_results$coef_d
var_mat_results<- var_mat (data_h, coef_h, BN_h, data_d, coef_d, BN_d)
```
The `var_mat` function returns a list contains two lists `var_mat_Bh` and `var_mat_Bd` which are the variance-covariance matrixes for any parameters of `BN_h` and `BN_d`. The variance-covariance matrixes for the fifth node,`hsa:1978`, in first BN in two states control and disease is as follow:
```{r variance-covariance_matrixe_for_the_fifth_node_in_first_BNh, eval=FALSE}
(var_mat_results$var_mat_Bh[[1]])[5]
```
```
[,1] [,2]
[1,] 10.177073 -3.630152
[2,] -3.630152 1.296990
```
```{r variance-covariance_matrixe_for_the_fifth_node_in_first_BNd, eval=FALSE}
(var_mat_results$var_mat_Bd[[1]])[5]
```
```
[,1] [,2]
[1,] 3.549338 -1.0392040
[2,] -1.039204 0.3053785
```
## Testing the equality BNs parameters
T-test perfoms between any corresponding parameters between each pair of learned BNs, `BN_h` and `BN_d`, in disease and control states. Assumptions are unequal sample sizes and unequal variances for all samples.
```{r parm_Ttest_function, eval=FALSE}
var_mat_Bh <- var_mat_results $var_mat_Bh
var_mat_Bd <- var_mat_results $var_mat_Bd
Ttest_results <- parm_Ttest(data_h, coef_h, BN_h, data_d, coef_d, BN_d, var_mat_Bh, var_mat_Bd, pathway.id1)
head(Ttest_results)
```
| From | To | pathway.number | pathwayID | Pval | coefficient in disease | coefficient in control | fdr |
|-----------------|----------------|----------------------|-----------------|----------------|--------------------------------|------------------------------|----------------|
| intercept | hsa:2065 | 1 | hsa:01521 | 0.605294 | 4.893503 | 5.535163 | 6.72E-01 |
| hsa:7039 | hsa:2065 | 1 | hsa:01521 | 2.04E-05 | 1.072296 | -0.21107 | 6.95E-05 |
| hsa:1950 | hsa:2065 | 1 | hsa:01521 | 0.154223 | 0.125977 | -0.21675 | 2.11E-01 |
| hsa:4233 | hsa:2065 | 1 | hsa:01521 | 0.083296 | -0.63254 | -0.33154 | 1.23E-01 |
| hsa:3084 | hsa:2065 | 1 | hsa:01521 | 0.135981 | -0.55586 | -0.18792 | 1.89E-01 |
| hsa:9542 | hsa:2065 | 1 | hsa:01521 | 0.373051 | -0.39859 | -0.11334 | 4.49E-01 |
This function returns a data frame contains T-test results for all parameters in all final BNs. The row that is intercept in `From` variable, shows significance level for gene product that is shown in `To` variable. The rest of the data frame rows shows significance level for any edge of networks.
# Identification of enriched pathways
In the last step we can determine enriched pathways by own threshold on `p-value` or `fdr`. Hence we run the Fisher's exact test for any final pathways. As stated above, the `Ttest_results` is a data frame contains T-test results for all parameters in final BNs achieved by `parm_Ttest` function and `fdr.value` A numeric threshold to determine significant parameters (default is 0.05).
```{r BNrich_function, eval=FALSE}
BNrich_results <- BNrich(Ttest_results, pathway.id1, PathName_final, fdr.value = 0.05)
head(BNrich_results)
```
| pathwayID | p.value | fdr | pathway.number | Name |
|-----------------|----------------|----------------|----------------------|-----------------------------------------------|
| hsa:05016 | 2.66E-17 | 2.47E-15 | 123 | Huntington disease |
| hsa:05202 | 1.64E-17 | 2.47E-15 | 156 | Transcriptional misregulation in cancer |
| hsa:05012 | 2.92E-16 | 1.81E-14 | 121 | Parkinson disease |
| hsa:05010 | 1.55E-11 | 7.19E-10 | 120 | Alzheimer disease |
| hsa:04144 | 3.25E-08 | 1.21E-06 | 22 | Endocytosis |
| hsa:04714 | 2.99E-07 | 9.26E-06 | 72 | Thermogenesis |
# Session Info
The following package and versions were used in the production of this vignette.
```
R version 3.6.1 (2019-07-05)
Platform: x86_64-w64-mingw32/x64 (64-bit)
Running under: Windows 7 x64 (build 7601) Service Pack 1
Matrix products: default
locale:
[1] LC_COLLATE=English_United Kingdom.1252 LC_CTYPE=English_United Kingdom.1252
[3] LC_MONETARY=English_United Kingdom.1252 LC_NUMERIC=C
[5] LC_TIME=English_United Kingdom.1252
attached base packages:
[1] stats graphics grDevices utils datasets methods base
other attached packages:
[1] BNrich_0.1.0
loaded via a namespace (and not attached):
[1] Rcpp_1.0.2 codetools_0.2-16 lattice_0.20-38 corpcor_1.6.9
[5] foreach_1.4.7 glmnet_2.0-18 digest_0.6.20 grid_3.6.1
[9] stats4_3.6.1 evaluate_0.14 graph_1.63.0 Matrix_1.2-17
[13] rmarkdown_1.14 bnlearn_4.5 iterators_1.0.12 tools_3.6.1
[17] parallel_3.6.1 xfun_0.8 yaml_2.2.0 rsconnect_0.8.15
[21] compiler_3.6.1 BiocGenerics_0.31.5 htmltools_0.3.6 knitr_1.24
```
# References:
1. Yu J, Smith VA, Wang PP, Hartemink AJ, Jarvis ED. Advances to Bayesian network inference for generating causal networks from observational biological data. Bioinformatics . 2004 Dec 12;20(18):3594–603.
2. Gendelman R, Xing H, Mirzoeva OK, Sarde P, Curtis C, Feiler HS, et al. Bayesian network inference modeling identifies TRIB1 as a novel regulator of cell-cycle progression and survival in cancer cells. Cancer Res. 2017;77(7):1575–85.
3. Luo Y, El Naqa I, McShan DL, Ray D, Lohse I, Matuszak MM, et al. Unraveling biophysical interactions of radiation pneumonitis in non-small-cell lung cancer via Bayesian network analysis. Radiother Oncol . 2017 Apr 1 ;123(1):85–92.
4. Agrahari R, Foroushani A, Docking TR, Chang L, Duns G, Hudoba M, et al. Applications of Bayesian network models in predicting types of hematological malignancies. Sci Rep . 2018 Dec 3;8(1):6951.
5. Zhi-wei J, Zhen-lei Y, Cai-xiu Z, Li-ying W, Jun L, Hong-li W, et al. Comparison of the Network Structural Characteristics of Calcium Signaling Pathway in Cerebral Ischemia after Intervention by Different Components of Chinese Medicine. J Tradit Chinese Med. 2011;31(3):251–5.
6. Lou S, Ren L, Xiao J, Ding Q, Zhang W. Expression profiling based graph-clustering approach to determine renal carcinoma related pathway in response to kidney cancer. Eur Rev Med Pharmacol Sci. 2012;16(6):775–80.
7. Fu C, Deng S, Jin G, Wang X, Yu ZG. Bayesian network model for identification of pathways by integrating protein interaction with genetic interaction data. BMC Syst Biol. 2017;11.
8. Isci S, Ozturk C, Jones J, Otu HH. Pathway analysis of high-throughput biological data within a Bayesian network framework. 2011;27(12):1667–74.
9. Korucuoglu M, Isci S, Ozgur A, Otu HH. Bayesian pathway analysis of cancer microarray data. PLoS One. 2014;9(7):1–8.
10. Spirtes P, Richardson T. Directed Cyclic Graphical Representations of Feedback Models. Proc Elev Conf Uncertain Artif Intell. 1995;1–37.
11. Neapolitan RE. Learning Bayesian networks. first. Chicago: Pearson Prentice Hall; 2004. 291–425 p.
12. Scutari M. Learning Bayesian Networks with the bnlearn R Package. J Stat Softw. 2010;35(3):1–22.
13. Hamm A, Prenen H, Van Delm W, Di Matteo M, Wenes M, Delamarre E, et al. Tumour-educated circulating monocytes are powerful candidate biomarkers for diagnosis and disease follow-up of colorectal cancer. Gut. 2016;65(6):990–1000.
14. Nagarajan R, Scutari M, Lèbre S. Bayesian Networks in R . New York, NY: Springer New York; 2013 [cited 2018 Apr 17].
15. Tibshirani R. The lasso method for variable selection in the cox model. Stat Med. 1997;16(4):385–95.
16. Buhlmann P, Geer S van de. Statistics for high-dimensional data: Methods, Theory and Applications . Springer Series in Statistics. 2011. 7–34 p.
|
/scratch/gouwar.j/cran-all/cranData/BNrich/vignettes/BNrich.Rmd
|
#'
#' Generate the optimal dose escalation and deescalation boundaries for conducting the trial.
#'
#' Use this function to generate the optimal dose escalation and deescalation boundaries for conducting the trial.
#'
#'
#' @param target the target DLT rate
#' @param ncohort the total number of cohorts
#' @param cohortsize the cohort size
#' @param n.earlystop the early stopping parameter. If the number of patients treated at
#' the current dose reaches \code{n.earlystop}, stop the trial
#' and select the MTD based on the observed data. The default
#' value \code{n.earlystop=100} essentially turns off the type
#' of early stopping.
#' @param p.saf the highest toxicity probability that is deemed subtherapeutic
#' (i.e., below the MTD) such that dose escalation should be made.
#' The default value is \code{p.saf = 0.6 * target}.
#' @param p.tox the lowest toxicity probability that is deemed overly toxic such
#' that deescalation is required. The default value is
#' \code{p.tox=1.4*target}.
#' @param cutoff.eli the cutoff to eliminate an overly toxic dose for safety.
#' We recommend the default value (\code{cutoff.eli=0.95}) for general use.
#' @param extrasafe set \code{extrasafe=TRUE} to impose a more strict stopping rule for extra safety,
#' expressed as the stopping boundary value in the result .
#' @param offset a small positive number (between 0 and 0.5) to control how strict
#' the stopping rule is when \code{extrasafe=TRUE}. A larger value leads
#' to a more strict stopping rule. The default value
#' (\code{offset=0.05}) generally works well.
#'
#' @details The dose escalation and deescalation boundaries are all we need to run a
#' phase I trial when using the BOIN design. The decision of which dose to
#' administer to the next cohort of patients does not require complicated
#' computations, but only a simple comparison of the observed DLT rate
#' at the current dose with the dose escalation and deescalation boundaries.
#' If the observed DLT rate at the current dose is smaller than or equal
#' to the escalation boundary, we escalate the dose; if the observed toxicity
#' rate at the current dose is greater than or equal to the deescalation boundary,
#' we deescalate the dose; otherwise, we retain the current dose. The dose
#' escalation and deescalation boundaries are chosen to minimize the probability
#' of assigning patients to subtherapeutic or overly toxic doses, thereby
#' optimizing patient ethics. \code{get.boundary()} also outputs the elimination
#' boundary, which is used to avoid treating patients at overly toxic doses based
#' on the following Bayesian safety rule: if \eqn{Pr(p_j > \phi | m_j , n_j ) > 0.95} and
#' \eqn{n_j \ge 3}, dose levels \eqn{j} and higher are eliminated from the trial, where \eqn{p_j} is
#' the toxicity probability of dose level \eqn{j}, \eqn{\phi} is the target DLT rate,
#' and \eqn{m_j} and \eqn{n_j} are the number of toxicities and patients treated at dose level \eqn{j}.
#' The trial is terminated if the lowest dose is eliminated.
#'
#'
#' The BOIN design has two built-in stopping rules: (1) stop the trial if the lowest dose is eliminated
#' due to toxicity, and no dose should be selected as the MTD; and (2) stop the trial
#' and select the MTD if the number of patients treated at the current dose reaches
#' \code{n.earlystop}. The first stopping rule is a safety rule to protect patients
#' from the case in which all doses are overly toxic. The rationale for the second
#' stopping rule is that when there is a large number (i.e., \code{n.earlystop})
#' of patients assigned to a dose, it means that the dose-finding algorithm has
#' approximately converged. Thus, we can stop the trial early and select the MTD
#' to save the sample size and reduce the trial duration. For some applications,
#' investigators may prefer a more strict safety stopping rule than rule (1) for
#' extra safety when the lowest dose is overly toxic. This can be achieved by
#' setting \code{extrasafe=TRUE}, which imposes the following more strict safety
#' stopping rule: stop the trial if (i) the number of patients treated at the
#' lowest dose >=3, and (ii) \eqn{Pr(toxicity\ rate\ of\ the\ lowest\ dose > \code{target} | data)
#' > \code{cutoff.eli}-\code{offset}}. As a tradeoff, the strong stopping rule will decrease the
#' MTD selection percentage when the lowest dose actually is the MTD.
#'
#' @return \code{get.boundary()} returns a list object, including the dose escalation and de-escalation
#' boundaries \code{$lambda_e} and \code{$lambda_d} and the corresponding decision tables
#' \code{$boundary_tab} and \code{$full_boundary_tab}. If \code{extrasafe=TRUE}, the function also returns
#' a (more strict) safety stopping boundary \code{$stop_boundary}.
#'
#'
#' @note We should avoid setting the values of \code{p.saf} and \code{p.tox} very close to the
#' \code{target}. This is because the small sample sizes of typical phase I trials prevent us from
#' differentiating the target DLT rate from the rates close to it. In addition,
#' in most clinical applications, the target DLT rate is often a rough guess,
#' and finding a dose level with a DLT rate reasonably close to the target rate
#' will still be of interest to the investigator. The default values provided by
#' \code{get.boundary()} are generally reasonable for most clinical applications.
#'
#' @references Liu S. and Yuan, Y. (2015). Bayesian Optimal Interval Designs for Phase I
#' Clinical Trials, \emph{Journal of the Royal Statistical Society: Series C}, 64, 507-523.
#'
#' Yan, F., Zhang, L., Zhou, Y., Pan, H., Liu, S. and Yuan, Y. (2020).BOIN: An R Package
#' for Designing Single-Agent and Drug-Combination Dose-Finding Trials Using Bayesian Optimal
#' Interval Designs. \emph{Journal of Statistical Software}, 94(13),1-32.<doi:10.18637/jss.v094.i13>.
#'
#' Yuan Y., Hess K.R., Hilsenbeck S.G. and Gilbert M.R. (2016). Bayesian Optimal Interval Design: A
#' Simple and Well-performing Design for Phase I Oncology Trials, \emph{Clinical Cancer Research}, 22, 4291-4301.
#'
#' @seealso Tutorial: \url{http://odin.mdacc.tmc.edu/~yyuan/Software/BOIN/BOIN2.6_tutorial.pdf}
#'
#' Paper: \url{http://odin.mdacc.tmc.edu/~yyuan/Software/BOIN/paper.pdf}
#'
#' @author Suyu Liu and Ying Yuan
#'
#' @examples
#'
#' ## get the dose escalation and deescalation boundaries for BOIN design with
#' ## the target DLT rate of 0.3, maximum sample size of 30, and cohort size of 3
#' bound <- get.boundary(target=0.3, ncohort=10, cohortsize=3)
#' summary(bound) # get the descriptive summary of the boundary
#' plot(bound) # plot the flowchart of the design with boundaries
#'
#' @import stats
#' @export
get.boundary <- function (target, ncohort, cohortsize, n.earlystop = 100, p.saf = 0.6 *
target, p.tox = 1.4 * target, cutoff.eli = 0.95, extrasafe = FALSE,
offset = 0.05)
{
density1 <- function(p, n, m1, m2) {
pbinom(m1, n, p) + 1 - pbinom(m2 - 1, n, p)
}
density2 <- function(p, n, m1) {
1 - pbinom(m1, n, p)
}
density3 <- function(p, n, m2) {
pbinom(m2 - 1, n, p)
}
if (target < 0.05) {
stop("the target is too low! ")
}
if (target > 0.6) {
stop("the target is too high!")
}
if ((target - p.saf) < (0.1 * target)) {
stop("the probability deemed safe cannot be higher than or too close to the target!")
}
if ((p.tox - target) < (0.1 * target)) {
stop("the probability deemed toxic cannot be lower than or too close to the target!")
}
if (offset >= 0.5) {
stop("the offset is too large!")
}
if (n.earlystop <= 6) {
warning("the value of n.earlystop is too low to ensure good operating characteristics. Recommend n.earlystop = 9 to 18.")
}
npts = ncohort * cohortsize
ntrt = NULL
b.e = NULL
b.d = NULL
elim = NULL
tol<-1e-12
for (n in 1:npts) {
lambda1 = log((1 - p.saf)/(1 - target))/log(target *
(1 - p.saf)/(p.saf * (1 - target)))
lambda2 = log((1 - target)/(1 - p.tox))/log(p.tox * (1 -
target)/(target * (1 - p.tox)))
cutoff1 = floor(lambda1 * n)
cutoff2 = ifelse(abs(round(lambda2 * n) - lambda2 * n) < tol, round(lambda2 * n)+1, ceiling(lambda2 * n))
ntrt = c(ntrt, n)
b.e = c(b.e, cutoff1)
b.d = c(b.d, cutoff2)
elimineed = 0
if (n < 3) {
elim = c(elim, NA)
}
else {
for (ntox in 1:n) {
if (1 - pbeta(target, ntox + 1, n - ntox + 1) >
cutoff.eli) {
elimineed = 1
break
}
}
if (elimineed == 1) {
elim = c(elim, ntox)
}
else {
elim = c(elim, NA)
}
}
}
for (i in 1:length(b.d)) {
if (!is.na(elim[i]) && (b.d[i] > elim[i]))
b.d[i] = elim[i]
}
boundaries0 = rbind(ntrt, b.e, b.d, elim)[, 1:min(npts, n.earlystop)]
rownames(boundaries0) = c("Number of patients treated", "Escalate if # of DLT <=",
"Deescalate if # of DLT >=", "Eliminate if # of DLT >=")
colnames(boundaries0) = rep("", min(npts, n.earlystop))
out = list()
if (cohortsize > 1) {
out = list(lambda_e = lambda1, lambda_d = lambda2,
boundary_tab = boundaries0[,(1:floor(min(npts, n.earlystop)/cohortsize)) * cohortsize],
full_boundary_tab = boundaries0)
}
else out = list(lambda_e = lambda1, lambda_d = lambda2, boundary_tab = boundaries0[,
(1:floor(min(npts, n.earlystop)/cohortsize)) * cohortsize])
if (extrasafe) {
stopbd = NULL
ntrt = NULL
for (n in 1:npts) {
ntrt = c(ntrt, n)
if (n < 3) {
stopbd = c(stopbd, NA)
}
else {
for (ntox in 1:n) {
if (1 - pbeta(target, ntox + 1, n - ntox +
1) > cutoff.eli - offset) {
stopneed = 1
break
}
}
if (stopneed == 1) {
stopbd = c(stopbd, ntox)
}
else {
stopbd = c(stopbd, NA)
}
}
}
stopboundary = rbind(ntrt, stopbd)[, 1:min(npts, n.earlystop)]
rownames(stopboundary) = c("The number of patients treated at the lowest dose ",
"Stop the trial if # of DLT >= ")
colnames(stopboundary) = rep("", min(npts, n.earlystop))
out = c(out, list(target = target, cutoff = cutoff.eli - offset, stop_boundary = stopboundary))
}
class(out)<-"boin"
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BOIN/R/get.boundary.R
|
#'
#' Generate operating characteristics for single agent trials
#'
#' Obtain the operating characteristics of the BOIN design for single agent trials by simulating trials.
#'
#' @usage get.oc(target, p.true, ncohort, cohortsize, n.earlystop=100,
#' startdose=1, titration=FALSE, p.saf=0.6*target, p.tox=1.4*target,
#' cutoff.eli=0.95,extrasafe=FALSE, offset=0.05, boundMTD=FALSE,
#' ntrial=1000, seed=6)
#'
#' @param target the target DLT rate
#' @param p.true a vector containing the true toxicity probabilities of the
#' investigational dose levels.
#' @param ncohort the total number of cohorts
#' @param cohortsize the cohort size
#' @param n.earlystop the early stopping parameter. If the number of patients
#' treated at the current dose reaches \code{n.earlystop},
#' stop the trial and select the MTD based on the observed data.
#' The default value \code{n.earlystop=100} essentially turns
#' off this type of early stopping.
#' @param startdose the starting dose level for the trial
#' @param titration set \code{titration=TRUE} to perform dose escalation with cohort size = 1 to accelerate dose escalation at the begining of the trial.
#' @param p.saf the highest toxicity probability that is deemed subtherapeutic
#' (i.e. below the MTD) such that dose escalation should be undertaken.
#' The default value is \code{p.saf=0.6*target}.
#' @param p.tox the lowest toxicity probability that is deemed overly toxic such
#' that deescalation is required. The default value is
#' \code{p.tox=1.4*target}).
#' @param cutoff.eli the cutoff to eliminate an overly toxic dose for safety.
#' We recommend the default value of (\code{cutoff.eli=0.95}) for general use.
#' @param extrasafe set \code{extrasafe=TRUE} to impose a more stringent stopping rule
#' @param offset a small positive number (between \code{0} and \code{0.5}) to control how strict the
#' stopping rule is when \code{extrasafe=TRUE}. A larger value leads to a more
#' strict stopping rule. The default value \code{offset=0.05} generally works well.
#' @param boundMTD set \code{boundMTD=TRUE} to impose the condition: the isotonic estimate of toxicity probability
#' for the selected MTD must be less than de-escalation boundary.
#' @param ntrial the total number of trials to be simulated
#' @param seed the random seed for simulation
#'
#' @details The operating characteristics of the BOIN design are generated by simulating trials
#' under the prespecified true toxicity probabilities of the investigational doses. If
#' \code{titration=TRUE}, we perform dose escalation with cohort size = 1 at the begining of the trial:
#' starting from \code{startdose}, if no toxicity is observed, we escalate the dose;
#' otherwise, the titration is completed and we switch to cohort size = \code{cohortsize}.
#' Titration accelerates the dose escalation and is useful when low doses are believed to be safe.
#'
#'
#' The BOIN design has two built-in stopping rules: (1) stop the trial if the lowest
#' dose is eliminated due to toxicity, and no dose should be selected as the MTD; and
#' (2) stop the trial and select the MTD if the number of patients treated at the current
#' dose reaches \code{n.earlystop}. The first stopping rule is a safety rule to protect patients
#' from the case in which all doses are overly toxic. The rationale for the second stopping
#' rule is that when there is a large number (i.e., \code{n.earlystop}) of patients
#' assigned to a dose, it means that the dose-finding algorithm has approximately converged.
#' Thus, we can stop the trial early and select the MTD to save sample size and reduce the
#' trial duration. For some applications, investigators may prefer a more strict safety
#' stopping rule than rule (1) for extra safety when the lowest dose is overly toxic.
#' This can be achieved by setting \code{extrasafe=TRUE}, which imposes the following more
#' strict safety stopping rule: stop the trial if (i) the number of patients treated at the
#' lowest dose \code{>=3}, and (ii) \eqn{Pr(toxicity\ rate\ of\ the\ lowest\ dose > \code{target} | data)
#' > \code{cutoff.eli}-\code{offset}}. As a tradeoff, the strong stopping rule will decrease the MTD
#' selection percentage when the lowest dose actually is the MTD.
#'
#' @return \code{get.oc()} returns the operating characteristics of the BOIN design as a list,
#' including:
#' (1) selection percentage at each dose level (\code{$selpercent}),
#' (2) the number of patients treated at each dose level (\code{$npatients}),
#' (3) the number of toxicities observed at each dose level (\code{$ntox}),
#' (4) the average number of toxicities (\code{$totaltox}),
#' (5) the average number of patients (\code{$totaln}),
#' (6) the percentage of early stopping without selecting the MTD (\code{$percentstop}),
#' (7) risk of overdosing 60\% or more of patients (\code{$overdose60}),
#' (8) risk of overdosing 80\% or more of patients (\code{$overdose80}),
#' (9) data.frame (\code{$simu.setup}) containing simulation parameters, such as target, p.true, etc.
#'
#' @note We should avoid setting the values of \code{p.saf} and \code{p.tox} very close to the
#' \code{target}. This is because the small sample sizes of typical phase I trials prevent us from
#' differentiating the target DLT rate from the rates close to it. The default values provided by
#' \code{get.oc()} are strongly recommended, and generally yield excellent operating characteristics.
#'
#' @author Suyu Liu, Yanhong Zhou, and Ying Yuan
#'
#' @references Liu S. and Yuan, Y. (2015). Bayesian Optimal Interval Designs for Phase I
#' Clinical Trials, \emph{Journal of the Royal Statistical Society: Series C}, 64, 507-523.
#'
#' Yan, F., Zhang, L., Zhou, Y., Pan, H., Liu, S. and Yuan, Y. (2020).BOIN: An R Package
#' for Designing Single-Agent and Drug-Combination Dose-Finding Trials Using Bayesian Optimal
#' Interval Designs. \emph{Journal of Statistical Software}, 94(13),1-32.<doi:10.18637/jss.v094.i13>.
#'
#'
#'
#' Yuan Y., Hess K.R., Hilsenbeck S.G. and Gilbert M.R. (2016) Bayesian Optimal Interval Design: A
#' Simple and Well-performing Design for Phase I Oncology Trials, \emph{Clinical Cancer Research}, 22, 4291-4301.
#'
#'
#' @seealso Tutorial: \url{http://odin.mdacc.tmc.edu/~yyuan/Software/BOIN/BOIN2.6_tutorial.pdf}
#'
#' Paper: \url{http://odin.mdacc.tmc.edu/~yyuan/Software/BOIN/paper.pdf}
#'
#' @examples
#'
#' ## get the operating characteristics for BOIN single agent trial
#' oc <- get.oc(target=0.3, p.true=c(0.05, 0.15, 0.3, 0.45, 0.6),
#' ncohort=20, cohortsize=3, ntrial=1000)
#'
#' summary(oc) # summarize design operating characteristics
#' plot(oc) # plot flowchart of the BOIN design and design operating characteristics, including
#' # selection percentage, number of patients, and observed toxicities at each dose
#'
#'
#' ## perform titration at the begining of the trial to accelerate dose escalation
#' oc <- get.oc(target=0.3, p.true=c(0.05, 0.15, 0.3, 0.45, 0.6),
#' titration=TRUE, ncohort=20, cohortsize=3, ntrial=1000)
#'
#' summary(oc) # summarize design operating characteristics
#' plot(oc) # plot flowchart of the BOIN design and design operating characteristics
#' @export
get.oc <- function (target, p.true, ncohort, cohortsize, n.earlystop = 100,
startdose = 1, titration = FALSE, p.saf = 0.6 * target, p.tox = 1.4 *
target, cutoff.eli = 0.95, extrasafe = FALSE, offset = 0.05,boundMTD=FALSE,
ntrial = 1000, seed = 6)
{
if (target < 0.05) {
stop("the target is too low!")
}
if (target > 0.6) {
stop("the target is too high!")
}
if ((target - p.saf) < (0.1 * target)) {
stop("the probability deemed safe cannot be higher than or too close to the target!")
}
if ((p.tox - target) < (0.1 * target)) {
stop("the probability deemed toxic cannot be lower than or too close to the target!")
}
if (offset >= 0.5) {
stop("the offset is too large!")
}
if (n.earlystop <= 6) {
warning("the value of n.earlystop is too low to ensure good operating characteristics. Recommend n.earlystop = 9 to 18.")
}
set.seed(seed)
if (cohortsize == 1)
titration = FALSE
lambda_e = log((1 - p.saf)/(1 - target))/log(target * (1 -
p.saf)/(p.saf * (1 - target)))
lambda_d = log((1 - target)/(1 - p.tox))/log(p.tox * (1 -
target)/(target * (1 - p.tox)))
ndose = length(p.true)
npts = ncohort * cohortsize
Y = matrix(rep(0, ndose * ntrial), ncol = ndose)
N = matrix(rep(0, ndose * ntrial), ncol = ndose)
dselect = rep(0, ntrial)
if (cohortsize > 1) {
temp = get.boundary(target, ncohort, cohortsize, n.earlystop=ncohort*cohortsize,
p.saf, p.tox, cutoff.eli, extrasafe)$full_boundary_tab
}
else {
temp = get.boundary(target, ncohort, cohortsize, n.earlystop=ncohort*cohortsize,
p.saf, p.tox, cutoff.eli, extrasafe)$boundary_tab
}
b.e = temp[2, ]
b.d = temp[3, ]
b.elim = temp[4, ]
for (trial in 1:ntrial) {
y <- rep(0, ndose)
n <- rep(0, ndose)
earlystop = 0
d = startdose
elimi = rep(0, ndose)
ft=TRUE #flag used to determine whether or not to add cohortsize-1 patients to a dose for the first time when titration is triggered.
if (titration) {
z <- (runif(ndose) < p.true)
if (sum(z) == 0) {
d = ndose
n[1:ndose] = 1
}
else {
d = which(z == 1)[1]
n[1:d] = 1
y[d] = 1
}
}
for (i in 1:ncohort) {
if (titration && n[d] < cohortsize && ft){
ft=FALSE
y[d] = y[d] + sum(runif(cohortsize - 1) < p.true[d])
n[d] = n[d] + cohortsize - 1
}
else {
newcohort = runif(cohortsize)<p.true[d];
if((sum(n)+cohortsize) >= npts){
nremain = npts - sum(n);
y[d] = y[d] + sum(newcohort[1:nremain]);
n[d] = n[d] + nremain;
break;
}
else{
y[d] = y[d] + sum(newcohort);
n[d] = n[d] + cohortsize;
}
}
if (!is.na(b.elim[n[d]])) {
if (y[d] >= b.elim[n[d]]) {
elimi[d:ndose] = 1
if (d == 1) {
earlystop = 1
break
}
}
if (extrasafe) {
if (d == 1 && n[1] >= 3) {
if (1 - pbeta(target, y[1] + 1, n[1] - y[1] +
1) > cutoff.eli - offset) {
earlystop = 1
break
}
}
}
}
if(n[d]>=n.earlystop &&
(
(y[d]>b.e[n[d]] && y[d]<b.d[n[d]])||
(d==1 && y[d]>=b.d[n[d]]) ||
((d==ndose||elimi[d+1]==1) && y[d]<=b.e[n[d]])
)
) break;
if (y[d] <= b.e[n[d]] && d != ndose) {
if (elimi[d + 1] == 0)
d = d + 1
}
else if (y[d] >= b.d[n[d]] && d != 1) {
d = d - 1
}
else {
d = d
}
}
Y[trial, ] = y
N[trial, ] = n
if (earlystop == 1) {
dselect[trial] = 99
}
else {
dselect[trial] = select.mtd(target, n, y, cutoff.eli,
extrasafe, offset, boundMTD = boundMTD, p.tox=p.tox)$MTD
}
}
selpercent = rep(0, ndose)
nptsdose = apply(N, 2, mean)
ntoxdose = apply(Y, 2, mean)
for (i in 1:ndose) {
selpercent[i] = sum(dselect == i)/ntrial * 100
}
if (length(which(p.true == target)) > 0) {
if (which(p.true == target) == ndose - 1) {
overdosing60 = mean(N[, p.true > target] > 0.6 *
npts) * 100
overdosing80 = mean(N[, p.true > target] > 0.8 *
npts) * 100
}
else {
overdosing60 = mean(rowSums(N[, p.true > target]) >
0.6 * npts) * 100
overdosing80 = mean(rowSums(N[, p.true > target]) >
0.8 * npts) * 100
}
out = list(selpercent = selpercent, npatients = nptsdose,
ntox = ntoxdose, totaltox = sum(Y)/ntrial, totaln = sum(N)/ntrial,
percentstop = sum(dselect == 99)/ntrial * 100, overdose60 = overdosing60,
overdose80 = overdosing80, simu.setup = data.frame(target = target,
p.true = p.true, ncohort = ncohort, cohortsize = cohortsize,
startdose = startdose, p.saf = p.saf, p.tox = p.tox,
cutoff.eli = cutoff.eli, extrasafe = extrasafe,
offset = offset, ntrial = ntrial, dose = 1:ndose),
flowchart = TRUE, lambda_e = lambda_e, lambda_d = lambda_d)
}
else {
out = list(selpercent = selpercent, npatients = nptsdose,
ntox = ntoxdose, totaltox = sum(Y)/ntrial, totaln = sum(N)/ntrial,
percentstop = sum(dselect == 99)/ntrial * 100, simu.setup = data.frame(target = target,
p.true = p.true, ncohort = ncohort, cohortsize = cohortsize,
startdose = startdose, p.saf = p.saf, p.tox = p.tox,
cutoff.eli = cutoff.eli, extrasafe = extrasafe,
offset = offset, ntrial = ntrial, dose = 1:ndose),
flowchart = TRUE, lambda_e = lambda_e, lambda_d = lambda_d)
}
class(out)<-"boin"
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BOIN/R/get.oc.R
|
#'
#' Generate operating characteristics for drug combination trials
#'
#' Obtain the operating characteristics of the BOIN design or waterfall design for drug combination
#' trials. The BOIN design is to find a MTD, and the waterfall design is to find the MTD contour
#' (i.e., multple MTDs in the dose matrix)
#'
#' @usage get.oc.comb(target, p.true, ncohort, cohortsize, n.earlystop=NULL, startdose=c(1, 1),
#' titration=FALSE,p.saf=0.6*target, p.tox=1.4*target, cutoff.eli=0.95,
#' extrasafe=FALSE,offset=0.05, ntrial=1000, mtd.contour=FALSE,
#' boundMTD=FALSE, seed=6)
#'
#' @param target the target DLT rate
#' @param p.true a \code{J*K} matrix \code{(J<=K)} containing the true toxicity probabilities of
#' combinations with \code{J} dose levels of agent A and \code{K} dose levels of agent B
#' @param ncohort a \code{1*J} vector specifying the number of cohorts for each of \code{J} subtrials
#' if \code{mtd.contour=TRUE}; Otherwise, a scalar specifying the total number of cohorts for
#' the trial.
#' @param cohortsize the cohort size
#' @param n.earlystop the early stopping parameter. If the number of patients treated at the current
#' dose reaches \code{n.earlystop}, stop the trial or subtrial and select the MTD based on
#' the observed data. When the waterfall design is used to find the MTD contour,
#' \code{n.earlystop=12} by default.
#' @param startdose the starting dose combination level for drug combination trial
#' @param titration set \code{titration=TRUE} to perform dose escalation with cohort size = 1 to accelerate dose escalation at the begining of the trial.
#' @param p.saf the highest toxicity probability that is deemed subtherapeutic (i.e. below the MTD)
#' such that dose escalation should be undertaken.
#' The default value is \code{p.saf=0.6*target}.
#' @param p.tox the lowest toxicity probability that is deemed overly toxic such that deescalation
#' is required. The default value is \code{p.tox=1.4*target}.
#' @param cutoff.eli the cutoff to eliminate an overly toxic dose for safety. We recommend the
#' default value of (\code{cutoff.eli=0.95}) for general use.
#' @param extrasafe set \code{extrasafe=TRUE} to impose a more stringent stopping rule
#' @param offset a small positive number (between 0 and 0.5) to control how strict the stopping
#' rule is when \code{extrasafe=TRUE}. A larger value leads to a more strict stopping
#' rule. The default value \code{offset=0.05} generally works well.
#' @param ntrial the total number of trials to be simulated
#' @param mtd.contour set \code{mtd.contour=TRUE} to select the MTD contour (claiming multiple MTDs).
#' Otherwise, BOIN design is used to search for a single MTD.
#' @param boundMTD set \code{boundMTD=TRUE} to impose the condition: the isotonic estimate of toxicity
#' probability for the selected MTD must be less than de-escalation boundary.
#' @param seed the random seed for simulation
#'
#' @details The operating characteristics of the BOIN design or waterfall design are generated by
#' simulating trials under the prespecified true toxicity probabilities of the investigational dose
#' combinations. If \code{titration=TRUE}, we perform dose escalation with cohort size = 1 at the begining of the trial:
#' starting from \code{startdose}, if no toxicity is observed, we escalate the dose;
#' otherwise, the titration is completed and we switch to cohort size = \code{cohortsize}.
#' Titration accelerates the dose escalation and is useful when low doses are believed to be safe.
#'
#'
#' The BOIN and waterfall designs have two built-in stopping rules:
#' (1) stop the trial/subtrial if the lowest dose is eliminated due to toxicity, and no dose should
#' be selected as the MTD; and (2) stop the trial/subtrial and select the MTD if the number of
#' patients treated at the current dose reaches \code{n.earlystop}. The first stopping rule is a safety
#' rule to protect patients from the case in which all doses are overly toxic. The rationale for
#' the second stopping rule is that when there is a large number (i.e., \code{n.earlystop}) of
#' patients assigned to a dose, it means that the dose-finding algorithm has approximately converged.
#' Thus, we can stop the trial/subtrial early and select the MTD to save sample size and reduce the
#' trial duration.
#'
#'
#' For some applications, investigators may prefer a more strict safety stopping rule than rule
#' (1) for extra safety when the lowest dose is overly toxic.
#' This can be achieved by setting \code{extrasafe=TRUE},
#' which imposes the following more strict safety stopping rule:
#' stop the trial if (i) the number of patients treated at the lowest dose \code{>=3},
#' and (ii) \eqn{Pr(toxicity\ rate\ of\ the\ lowest\ dose > \code{target} | data) > \code{cutoff.eli}-\code{offset}}.
#' As a tradeoff, the strong stopping rule will decrease the MTD selection percentage
#' when the lowest dose actually is the MTD.
#'
#' @return \code{get.oc.comb()} returns the operating characteristics of the BOIN combination or
#' waterfall design as a list. For the BOIN combination design, including:
#' (1) true toxicity probability at each dose level (\code{$p.true}),
#' (2) selection percentage at each dose level (\code{$selpercent}),
#' (3) the number of patients treated at each dose level (\code{$npatients})
#' (4) the number of toxicities observed at each dose level (\code{$ntox}),
#' (5) the total number of patients in the trial (\code{$totaln}),
#' (6) the total number of toxicities observed for the trial (\code{$totaltox})
#' (7) the pecentage of correct selection (\code{$pcs}),
#' (8) the total percentage of patients treated at the MTD (\code{$npercent}).
#' (9) the percentage of early stopping without selecting the MTD (\code{$percentstop})
#' For the the waterfall design, including:
#' (1) true toxicity probability at each dose level (\code{$p.true}),
#' (2) selection percentage of dose combinations (\code{$selpercent}),
#' (3) the number of patients treated at each dose combination (\code{$npatients})
#' (4) the number of toxicities observed at each dose combination (\code{$ntox}),
#' (5) the total number of patients in the trial (\code{$totaln}),
#' (6) the total number of toxicities observed for the trial (\code{$totaltox})
#' (7) the total percentage of correct selection at the MTD contour (\code{$pcs.contour}),
#' (8) the total percentage of patients treated at MTD contour
#' (\code{$npercent.contour})
#' (9) the total percentage of patients treated above MTD contour
#' (\code{$npercent.above.contour})
#' (10) the total percentage of patients treated below MTD contour
#' (\code{$npercent.below.contour})
#'
#'
#' @note We should avoid setting the values of \code{p.saf} and \code{p.tox} very close to the
#' \code{target}. This is because the small sample sizes of typical phase I trials prevent us from
#' differentiating the target DLT rate from the rates close to it. The default values provided by
#' \code{get.oc()} are strongly recommended, and generally yield excellent operating characteristics.
#'
#'
#' @author Suyu Liu, Liangcai Zhang, Yanhong Zhou, and Ying Yuan
#'
#' @references Liu S. and Yuan, Y. (2015). Bayesian Optimal Interval Designs for Phase I Clinical
#' Trials, \emph{Journal of the Royal Statistical Society: Series C}, 64, 507-523.
#'
#' Lin R. and Yin, G. (2017). Bayesian Optimal Interval Designs for Dose Finding in
#' Drug-combination Trials, \emph{Statistical Methods in Medical Research}, 26, 2155-2167.
#'
#' Yan, F., Zhang, L., Zhou, Y., Pan, H., Liu, S. and Yuan, Y. (2020).BOIN: An R Package
#' for Designing Single-Agent and Drug-Combination Dose-Finding Trials Using Bayesian Optimal
#' Interval Designs. \emph{Journal of Statistical Software}, 94(13),1-32.<doi:10.18637/jss.v094.i13>.
#'
#'
#' Zhang L. and Yuan, Y. (2016). A Simple Bayesian Design to Identify the Maximum
#' Tolerated Dose Contour for Drug Combination Trials, \emph{Statistics in Medicine}, 35, 4924-4936.
#'
#' @seealso Tutorial: \url{http://odin.mdacc.tmc.edu/~yyuan/Software/BOIN/BOIN2.6_tutorial.pdf}
#'
#' Paper: \url{http://odin.mdacc.tmc.edu/~yyuan/Software/BOIN/paper.pdf}
#'
#' @examples
#'
#' ###### drug-combination trial ######
#'
#' ##### combination trial to find a single MTD ######
#'
#' ## get the operating characteristics for BOIN design
#' p.true <- matrix(c(0.01,0.03,0.10,0.20,0.30,
#' 0.03,0.05,0.15,0.30,0.60,
#' 0.08,0.10,0.30,0.60,0.75), byrow=TRUE, ncol=5)
#'
#' oc.comb <- get.oc.comb(target=0.3, p.true, ncohort=20, cohortsize=3,
#' n.earlystop=12, startdose=c(1,1), ntrial=100)
#'
#' summary(oc.comb)
#' plot(oc.comb)
#'
#'
#' ## get the operating characteristics with titration for BOIN design
#' oc.comb <- get.oc.comb(target=0.3, p.true, ncohort=20, cohortsize=3,
#' n.earlystop=12, startdose=c(1,1), titration=TRUE, ntrial=100)
#' summary(oc.comb)
#' plot(oc.comb)
#'
#'
#' ##### combination trial to find the MTD contour ######
#'
#' ## find the MTD contour using waterfall design
#' oc.comb <- get.oc.comb(target=0.3, p.true, ncohort=c(10,5,5), cohortsize=3,
#' n.earlystop=12, startdose=c(1,1), ntrial=100, mtd.contour=TRUE)
#'
#' summary(oc.comb)
#' plot(oc.comb)
#'
#' @export
get.oc.comb <- function (target, p.true, ncohort, cohortsize, n.earlystop = NULL,
startdose = c(1, 1), titration = FALSE, p.saf = 0.6 * target,
p.tox = 1.4 * target, cutoff.eli = 0.95, extrasafe = FALSE,
offset = 0.05, ntrial = 1000, mtd.contour = FALSE, boundMTD=FALSE,seed = 6)
{
get.oc.comb.boin <- function(target, p.true, ncohort, cohortsize,
n.earlystop = 100, startdose = c(1, 1), titration = FALSE,
p.saf = 0.6 * target, p.tox = 1.4 * target, cutoff.eli = 0.95,
extrasafe = FALSE, offset = 0.05, boundMTD=FALSE,ntrial = 1000) {
JJ = nrow(p.true)
KK = ncol(p.true)
if (JJ > KK)
stop("p.true should be arranged in a way (i.e., rotated) such that\n the number of rows is less than or equal to the number of columns.")
if (JJ > KK)
p.true = t(p.true)
if (target < 0.05) {
stop("the target is too low!")
}
if (target > 0.6) {
stop("the target is too high!")
}
if ((target - p.saf) < (0.1 * target)) {
stop("the probability deemed safe cannot be higher than or too close to the target!")
}
if ((p.tox - target) < (0.1 * target)) {
stop("the probability deemed toxic cannot be lower than or too close to the target!")
}
if (offset >= 0.5) {
stop("the offset is too large!")
}
if (n.earlystop <= 6) {
warning("the value of n.earlystop is too low to ensure good operating characteristics. Recommend n.earlystop = 9 to 18")
}
ndose = length(p.true)
npts = ncohort * cohortsize
Y <- array(matrix(rep(0, length(p.true) * ntrial), dim(p.true)[1]),
dim = c(dim(p.true), ntrial))
N <- array(matrix(rep(0, length(p.true) * ntrial), dim(p.true)[1]),
dim = c(dim(p.true), ntrial))
dselect = matrix(rep(0, 2 * ntrial), ncol = 2)
if (cohortsize > 1) {
temp = get.boundary(target, ncohort, cohortsize,
n.earlystop=100, p.saf, p.tox, cutoff.eli, extrasafe)$full_boundary_tab
}else {
temp = get.boundary(target, ncohort, cohortsize,
n.earlystop=100, p.saf, p.tox, cutoff.eli, extrasafe)$boundary_tab
}
b.e = temp[2, ]
b.d = temp[3, ]
b.elim = temp[4, ]
lambda1 = log((1 - p.saf)/(1 - target))/log(target *
(1 - p.saf)/(p.saf * (1 - target)))
lambda2 = log((1 - target)/(1 - p.tox))/log(p.tox * (1 -
target)/(target * (1 - p.tox)))
if (cohortsize == 1)
titration = FALSE
for (trial in 1:ntrial) {
y <- matrix(rep(0, ndose), dim(p.true)[1], dim(p.true)[2])
n <- matrix(rep(0, ndose), dim(p.true)[1], dim(p.true)[2])
earlystop = 0
d = startdose
elimi = matrix(rep(0, ndose), dim(p.true)[1], dim(p.true)[2])
ft=TRUE #flag used to determine whether or not to add cohortsize-1 patients to a dose for the first time when titration is triggered.
if (titration) {
tmpa = d[1]
tmpb = d[2]
y[tmpa, tmpb] <- (runif(1) < p.true[tmpa, tmpb])
n[tmpa, tmpb] <- 1
while (tmpa <= dim(p.true)[1] & tmpb <= dim(p.true)[2]) {
if (tmpa == dim(p.true)[1] & tmpb == dim(p.true)[2]) {
break
}
if (sum(y) == 1) {
y[tmpa, tmpb] = 1
break
}
if (tmpa < dim(p.true)[1] & tmpb < dim(p.true)[2]) {
tmp.candidate = rbind(c(tmpa + 1, tmpb),
c(tmpa, tmpb + 1))
tmp.sel = rbinom(1, 1, prob = c(0.5, 0.5)) +
1
tmpa = tmp.candidate[tmp.sel, 1]
tmpb = tmp.candidate[tmp.sel, 2]
}
else if (tmpa == dim(p.true)[1]) {
tmpb = tmpb + 1
}
else {
tmpa = tmpa + 1
}
y[tmpa, tmpb] <- (runif(1) < p.true[tmpa, tmpb])
n[tmpa, tmpb] <- 1
}
if (sum(y) == 0) {
d = c(dim(p.true)[1], dim(p.true)[2])
}
else {
d = c(tmpa, tmpb)
}
}
for (pp in 1:ncohort) {
if (titration & n[d[1], d[2]] < cohortsize & ft) {
ft=FALSE
y[d[1], d[2]] = y[d[1], d[2]] + sum(runif(cohortsize -
1) < p.true[d[1], d[2]])
n[d[1], d[2]] = n[d[1], d[2]] + cohortsize -1
}
else {
y[d[1], d[2]] = y[d[1], d[2]] + sum(runif(cohortsize) <
p.true[d[1], d[2]])
n[d[1], d[2]] = n[d[1], d[2]] + cohortsize
}
nc = n[d[1], d[2]]
if (!is.na(b.elim[nc])) {
if (y[d[1], d[2]] >= b.elim[nc]) {
for (i in min(d[1], dim(p.true)[1]):dim(p.true)[1]) {
for (j in min(d[2], dim(p.true)[2]):dim(p.true)[2]) {
elimi[i, j] = 1
}
}
if (d[1] == 1 && d[2] == 1) {
d = c(99, 99)
earlystop = 1
break
}
}
if (extrasafe) {
if (d[1] == 1 && d[2] == 1 && n[1, 1] >=
3) {
if (1 - pbeta(target, y[1, 1] + 1, n[1,
1] - y[1, 1] + 1) > cutoff.eli - offset) {
d = c(99, 99)
earlystop = 1
break
}
}
}
}
if (n[d[1],d[2]] >= n.earlystop && (y[d[1],d[2]]>b.e[n[d[1],d[2]]] ||
(d[1]==dim(p.true)[1] && d[2]==dim(p.true)[2]) ||
( d[1]==dim(p.true)[1] && d[2]<dim(p.true)[2] && elimi[d[1],d[2]+1]==1 ) ||
( d[1]<dim(p.true)[1] && d[2]==dim(p.true)[2] && elimi[d[1]+1,d[2]]==1 ) ||
( d[1]<dim(p.true)[1] && d[2]<dim(p.true)[2] && elimi[d[1]+1,d[2]]==1 && elimi[d[1],d[2]+1]==1 ) ) &&
(y[d[1],d[2]]<b.d[n[d[1],d[2]]] || (d[1]==1 && d[2]==1) ) ) break;
if (y[d[1], d[2]] <= b.e[nc]) {
elevel = matrix(c(1, 0, 0, 1), 2)
pr_H0 = rep(0, length(elevel)/2)
nn = pr_H0
for (i in seq(1, length(elevel)/2, by = 1)) {
if (d[1] + elevel[1, i] <= dim(p.true)[1] &&
d[2] + elevel[2, i] <= dim(p.true)[2]) {
if (elimi[d[1] + elevel[1, i], d[2] + elevel[2,
i]] == 0) {
yn = y[d[1] + elevel[1, i], d[2] + elevel[2,
i]]
nn[i] = n[d[1] + elevel[1, i], d[2] +
elevel[2, i]]
pr_H0[i] <- pbeta(lambda2, yn + 0.5,
nn[i] - yn + 0.5) - pbeta(lambda1,
yn + 0.5, nn[i] - yn + 0.5)
}
}
}
pr_H0 = pr_H0 + nn * 5e-04
if (max(pr_H0) == 0) {
d = d
}
else {
k = which(pr_H0 == max(pr_H0))[as.integer(runif(1) *
length(which(pr_H0 == max(pr_H0))) + 1)]
d = d + c(elevel[1, k], elevel[2, k])
}
}
else if (y[d[1], d[2]] >= b.d[nc]) {
delevel = matrix(c(-1, 0, 0, -1), 2)
pr_H0 = rep(0, length(delevel)/2)
nn = pr_H0
for (i in seq(1, length(delevel)/2, by = 1)) {
if (d[1] + delevel[1, i] > 0 && d[2] + delevel[2,
i] > 0) {
yn = y[d[1] + delevel[1, i], d[2] + delevel[2,
i]]
nn[i] = n[d[1] + delevel[1, i], d[2] +
delevel[2, i]]
pr_H0[i] = pbeta(lambda2, yn + 0.5, nn[i] -
yn + 0.5) - pbeta(lambda1, yn + 0.5,
nn[i] - yn + 0.5)
}
}
pr_H0 = pr_H0 + nn * 5e-04
if (max(pr_H0) == 0) {
d = d
}
else {
k = which(pr_H0 == max(pr_H0))[as.integer(runif(1) *
length(which(pr_H0 == max(pr_H0))) + 1)]
d = d + c(delevel[1, k], delevel[2, k])
}
}
else {
d = d
}
}
Y[, , trial] = y
N[, , trial] = n
if (earlystop == 1) {
dselect[trial, ] = c(99, 99)
}else {
selcomb = select.mtd.comb.boin(target, n, y,
cutoff.eli, extrasafe, offset,
boundMTD=boundMTD,p.tox=p.tox,mtd.contour = FALSE)$MTD
dselect[trial, 1] = selcomb[1]
dselect[trial, 2] = selcomb[2]
}
}
selpercent = matrix(rep(0, ndose), dim(p.true)[1], dim(p.true)[2])
nptsdose = apply(N, c(1, 2), mean, digits = 2, format = "f")
ntoxdose = apply(Y, c(1, 2), mean, digits = 2, format = "f")
for (i in 1:dim(p.true)[1]) for (j in 1:dim(p.true)[2]) {
{
selpercent[i, j] = sum(dselect[, 1] == i & dselect[,
2] == j)/ntrial * 100
}
}
if (JJ <= KK) {
rownames(p.true) = paste("DoseA", 1:dim(p.true)[1],
sep = "")
colnames(p.true) = paste("DoseB", 1:dim(p.true)[2],
sep = "")
rownames(selpercent) = paste("DoseA", 1:dim(p.true)[1],
sep = "")
colnames(selpercent) = paste("DoseB", 1:dim(p.true)[2],
sep = "")
out = list(p.true = round(p.true, 2), selpercent = round(selpercent,2),
npatients = round(apply(N, c(1, 2), mean),2), ntox = round(apply(Y, c(1, 2), mean), 2),
totaltox = round(sum(Y)/ntrial, 1), totaln = round(sum(N)/ntrial,1),
pcs = paste(round(sum(selpercent[which(abs(p.true -target) == min(abs(p.true - target)), arr.ind = TRUE)]),1), "%", sep = ""),
npercent = paste(round(sum(nptsdose[which(abs(p.true -target) == min(abs(p.true - target)), arr.ind = TRUE)])/sum(nptsdose) *100, 1), "%", sep = ""),
percentstop=100-sum(round(selpercent,2)),flowchart = FALSE)
rownames(out$npatients) = paste("DoseA", 1:dim(p.true)[1],
sep = "")
colnames(out$npatients) = paste("DoseB", 1:dim(p.true)[2],
sep = "")
rownames(out$ntox) = paste("DoseA", 1:dim(p.true)[1],
sep = "")
colnames(out$ntox) = paste("DoseB", 1:dim(p.true)[2],
sep = "")
return(out)
}
else {
colnames(p.true) = paste("DoseB", 1:dim(t(p.true))[1],
sep = "")
rownames(p.true) = paste("DoseA", 1:dim(t(p.true))[2],
sep = "")
colnames(selpercent) = paste("DoseB", 1:dim(t(p.true))[1],
sep = "")
rownames(selpercent) = paste("DoseA", 1:dim(t(p.true))[2],
sep = "")
colnames(npatients) = paste("DoseB", 1:dim(t(p.true))[1],
sep = "")
rownames(npatients) = paste("DoseA", 1:dim(t(p.true))[2],
sep = "")
colnames(ntox) = paste("DoseB", 1:dim(t(p.true))[1],
sep = "")
rownames(ntox) = paste("DoseA", 1:dim(t(p.true))[2],
sep = "")
out = list(p.true = round(t(p.true), 2), selpercent = round(t(selpercent),2),
npatients = round(t(apply(N, c(1, 2), mean)), 2), ntox = round(t(apply(Y, c(1, 2), mean)),2),
totaltox = round(sum(Y)/ntrial, 1), totaln = round(sum(N)/ntrial,1), pcs = paste(round(sum(selpercent[which(abs(p.true -target) == min(abs(p.true - target)), arr.ind = TRUE)]),
1), "%"), npercent = paste(round(sum(nptsdose[which(abs(p.true -target) == min(abs(p.true - target)), arr.ind = TRUE)])/sum(nptsdose) *100, 1), "%"),
percentstop=100-sum(round(selpercent,2)),
flowchart = FALSE)
return(out)
}
}
select.mtd.comb.boin <- function(target, npts, ntox, cutoff.eli = 0.95,
extrasafe = FALSE, offset = 0.05,
boundMTD=FALSE,p.tox=1.4*target,
mtd.contour = FALSE) {
lambda_d = log((1 - target)/(1 - p.tox))/log(p.tox * (1 -target)/(target * (1 - p.tox)))
y = ntox
n = npts
if (nrow(n) > ncol(n) | nrow(y) > ncol(y)) {
stop("npts and ntox should be arranged in a way (i.e., rotated) such that for each of them, the number of rows is less than or equal to the number of columns.")
}
elimi = matrix(0, dim(n)[1], dim(n)[2])
if (extrasafe) {
if (n[1, 1] >= 3) {
if (1 - pbeta(target, y[1, 1] + 1, n[1, 1] -
y[1, 1] + 1) > cutoff.eli - offset) {
elimi[, ] = 1
}
}
}
for (i in 1:dim(n)[1]) {
for (j in 1:dim(n)[2]) {
if (n[i, j] >= 3) {
if (1 - pbeta(target, y[i, j] + 1, n[i, j] -
y[i, j] + 1) > cutoff.eli) {
elimi[i:dim(n)[1], j] = 1
elimi[i, j:dim(n)[2]] = 1
break
}
}
}
}
selectdose=NULL
if (elimi[1] == 1) {
selectdose = c(99, 99)
selectdoses = matrix(selectdose, nrow = 1)
}else {
phat = (y + 0.05)/(n + 0.1)
phat = Iso::biviso(phat, n + 0.1, warn = TRUE)[,
]
phat.out = phat
phat.out[n == 0] = NA
phat[elimi == 1] = 1.1
phat = phat * (n != 0) + (1e-05) * (matrix(rep(1:dim(n)[1],
each = dim(n)[2], len = length(n)), dim(n)[1],
byrow = T) + matrix(rep(1:dim(n)[2], each = dim(n)[1],
len = length(n)), dim(n)[1]))
if(boundMTD){
if(all(phat[n!=0]>=lambda_d)){
selectdose = c(99, 99)
selectdoses = matrix(selectdose, nrow = 1)
}else{
phat[phat>=lambda_d]=10}}
if(is.null(selectdose)){
phat[n == 0] = 10
selectdose = which(abs(phat - target) == min(abs(phat -
target)), arr.ind = TRUE)
if (length(selectdose) > 2)
selectdose = selectdose[1, ]
aa = function(x) as.numeric(as.character(x))
if (mtd.contour == TRUE) {
selectdoses = cbind(row = 1:dim(n)[1], col = rep(99,
dim(n)[1]))
for (k in dim(n)[1]:1) {
kn = n[k, ]
ky = y[k, ]
kelimi = elimi[k, ]
kphat = phat[k, ]
if (kelimi[1] == 1 || sum(n[kelimi == 0]) ==
0) {
kseldose = 99
}
else {
adm.set = (kn != 0) & (kelimi == 0)
adm.index = which(adm.set == T)
y.adm = ky[adm.set]
n.adm = kn[adm.set]
selectd = sort(abs(kphat[adm.set] - target),
index.return = T)$ix[1]
kseldose = adm.index[selectd]
}
selectdoses[k, 2] = ifelse(is.na(kseldose),
99, kseldose)
if (k < dim(n)[1])
if (selectdoses[k + 1, 2] == dim(n)[2])
selectdoses[k, 2] = dim(n)[2]
if (k < dim(n)[1])
if (aa(selectdoses[k + 1, 2]) == dim(n)[2] &
aa(selectdoses[k + 1, 2]) == aa(selectdoses[k,
2]))
selectdoses[k, 2] = 99
}
}
else {
selectdoses = matrix(99, nrow = 1, ncol = 2)
selectdoses[1, ] = matrix(selectdose, nrow = 1)
}
selectdoses = matrix(selectdoses[selectdoses[, 2] !=
99, ], ncol = 2)
}
colnames(selectdoses) = c("DoseA", "DoseB")
}
if (mtd.contour == FALSE) {
if (selectdoses[1, 1] == 99 && selectdoses[1, 2] ==
99) {
out=list(target = target, MTD = 99, p_est = matrix(NA,
nrow = dim(npts)[1], ncol = dim(npts)[2]))
return(out)
}
else {
out=list(target = target, MTD = selectdoses,
p_est = round(phat.out, 2))
return(out)
}
}
else {
if (length(selectdoses) == 0) {
out=list(target = target, MTD = 99, p_est = matrix(NA,
nrow = dim(npts)[1], ncol = dim(npts)[2]))
return(out)
}
else {
out=list(target = target, MTD = selectdoses,
p_est = round(phat.out, 2))
return(out)
}
}
}
waterfall.subtrial.mtd <- function(target,npts, ntox, cutoff.eli = 0.95,
extrasafe = FALSE, offset = 0.05, boundMTD=FALSE, p.tox, temp) {
lambda_d = log((1 - target)/(1 - p.tox))/log(p.tox * (1 -target)/(target * (1 - p.tox)))
b.e = temp[2, ]
pava <- function(x, wt = rep(1, length(x))) {
n <- length(x)
if (n <= 1)
return(x)
if (any(is.na(x)) || any(is.na(wt))) {
stop("Missing values in 'x' or 'wt' not allowed")
}
lvlsets <- (1:n)
repeat {
viol <- (as.vector(diff(x)) < 0)
if (!(any(viol)))
break
i <- min((1:(n - 1))[viol])
lvl1 <- lvlsets[i]
lvl2 <- lvlsets[i + 1]
ilvl <- (lvlsets == lvl1 | lvlsets == lvl2)
x[ilvl] <- sum(x[ilvl] * wt[ilvl])/sum(wt[ilvl])
lvlsets[ilvl] <- lvl1
}
x
}
y = ntox
n = npts
ndose = length(n)
elimi = rep(0, ndose)
is.escalation = 0
for (i in 1:ndose) {
if (n[i] >= 3) {
if (1 - pbeta(target, y[i] + 1, n[i] - y[i] +
1) > cutoff.eli) {
elimi[i:ndose] = 1
break
}
}
}
if (extrasafe) {
if (n[1] >= 3) {
if (1 - pbeta(target, y[1] + 1, n[1] - y[1] +
1) > cutoff.eli - offset) {
elimi[1:ndose] = 1
}
}
}
if (elimi[1] == 1 || sum(n[elimi == 0]) == 0) {
selectdose = 99
}
else {
adm.set = (n != 0) & (elimi == 0)
adm.index = which(adm.set == T)
y.adm = y[adm.set]
n.adm = n[adm.set]
phat = (y.adm + 0.05)/(n.adm + 0.1)
phat.var = (y.adm + 0.05) * (n.adm - y.adm + 0.05)/((n.adm +
0.1)^2 * (n.adm + 0.1 + 1))
phat = pava(phat, wt = 1/phat.var)
phat = phat + (1:length(phat)) * 1e-10
if(boundMTD){
if(all(phat>=lambda_d)){selectdose=99}else{
phat=phat[phat<lambda_d]
selectd = sort(abs(phat - target), index.return = T)$ix[1]
selectdose = adm.index[selectd]
}
}else{
selectd = sort(abs(phat - target), index.return = T)$ix[1]
selectdose = adm.index[selectd]
}
# selectd = sort(abs(phat - target), index.return = T)$ix[1]
# selectdose = adm.index[selectd]
if(selectdose!=99){
if (y[selectdose] <= b.e[n[selectdose]]) {
is.escalation = 1
}
}
}
list(selectdose = selectdose, is.escalation = is.escalation)
}
waterfall.subtrial <- function(target, p.true, dosespace,
npts, ntox, elimi, ncohort, cohortsize, n.earlystop = 20,
startdose = 1, p.saf = 0.6 * target, p.tox = 1.4 * target,
cutoff.eli = 0.95, extrasafe = FALSE, offset = 0.05,
totaln, titration.first.trial = FALSE, temp,boundMTD=FALSE) {
ndoses1 = nrow(p.true)
ndoses2 = ncol(p.true)
p.truee = p.true[dosespace]
npts = npts
ntox = ntox
elimi = elimi
if (target < 0.05) {
stop(" the target is too low! ")
}
if (target > 0.6) {
stop("the target is too high!")
}
if ((target - p.saf) < (0.1 * target)) {
stop("the probability deemed safe cannot be higher than or too close to the target! ")
}
if ((p.tox - target) < (0.1 * target)) {
stop("the probability deemed toxic cannot be lower than or too close to the target!")
}
if (offset >= 0.5) {
stop("the offset is too large! ")
}
ndose = length(p.truee)
selectdose = 0
is.escalation = 0
b.e = temp[2, ]
b.d = temp[3, ]
b.elim = temp[4, ]
lambda1 = log((1 - p.saf)/(1 - target))/log(target *
(1 - p.saf)/(p.saf * (1 - target)))
lambda2 = log((1 - target)/(1 - p.tox))/log(p.tox * (1 -
target)/(target * (1 - p.tox)))
y <- rep(0, ndose)
n <- rep(0, ndose)
earlystop = 0
d = startdose
elm = rep(0, ndose)
if (titration.first.trial) {
z <- (runif(ndose) < p.truee)
if (sum(z) == 0) {
d = ndose
n[1:ndose] = 1
}
else {
d = which(z == 1)[1]
n[1:d] = 1
y[d] = 1
}
}
ft=TRUE #flag used to determine whether or not to add cohortsize-1 patients to a dose for the first time when titration is triggered.
for (icohort in 1:ncohort) {
if (titration.first.trial & n[d] < cohortsize & ft) {
ft=FALSE
y[d] = y[d] + sum(runif(cohortsize - 1) < p.truee[d])
n[d] = n[d] + cohortsize - 1
}else {
y[d] = y[d] + sum(runif(cohortsize) < p.truee[d])
n[d] = n[d] + cohortsize
}
if (!is.na(b.elim[n[d]])) {
if (y[d] >= b.elim[n[d]]) {
elm[d:ndose] = 1
if (d == 1) {
earlystop = 1
break
}
}
if (extrasafe) {
if (d == 1 && n[1] >= 3) {
if (1 - pbeta(target, y[1] + 0.05, n[1] -
y[1] + 0.1) > cutoff.eli - offset) {
earlystop = 1
break
}
}
}
}
if (y[d] <= b.e[n[d]] && d != ndose) {
if (elm[d + 1] == 0)
d = d + 1
}
else if (y[d] >= b.d[n[d]] && d != 1) {
d = d - 1
}
else {
d = d
}
if (n[d] >= n.earlystop)
break
if (sum(n) >= (ncohort * cohortsize))
break
}
if (earlystop == 1) {
selectdose = 99
elm = rep(1, ndose)
}
else {
wsmtd = waterfall.subtrial.mtd(target, n, y, cutoff.eli,
extrasafe, offset, boundMTD=boundMTD, p.tox,temp)
selectdose = wsmtd$selectdose
is.escalation = wsmtd$is.escalation
}
npts[dosespace] = n
ntox[dosespace] = y
elimi[dosespace] = elm
list(ncohort = icohort, ntotal = icohort * cohortsize,
startdose = startdose, npts = npts, ntox = ntox,
totaltox = sum(ntox), totaln = sum(npts), pctearlystop = sum(selectdose ==
99) * 100, selectdose = selectdose, is.escalation = is.escalation,
elimi = elimi)
}
get.oc.comb.waterfall <- function(p.true, target, ncohort,
cohortsize, n.earlystop = 12, cutoff.eli = 0.95, p.saf = 0.6 *
target, p.tox = 1.4 * target, titration = FALSE,
extrasafe = FALSE, offset = 0.05, boundMTD=FALSE,ntrial = 1000) {
temp = get.boundary(target, ncohort = 150, cohortsize = 1,
cutoff.eli = cutoff.eli, extrasafe = extrasafe)$boundary_tab
JJ = nrow(p.true)
KK = ncol(p.true)
if (JJ > KK)
p.true = t(p.true)
true.mtd.position = cbind(1:JJ, apply(p.true, 1, function(x) {
flaga = rep(0, length(x))
tmp = which.min(abs(x - target))
flaga[tmp] = 1
flagb = (x <= target + 0.05)
ifelse(sum(flaga & flagb) > 0, which(flaga & flagb),
99)
}))
true.mtd.pos = apply(true.mtd.position, 1, function(x) (x[2] -
1) * JJ + x[1])
true.mtd.pos.new = true.mtd.pos[true.mtd.pos <= JJ *
KK]
nMTDs = paste(sort(true.mtd.pos.new), collapse = ",")
greater.than.contour = rep(1, length(JJ * KK))
less.than.contour = 1 - greater.than.contour
if (sum(true.mtd.position[, 2] <= KK) > 0) {
tmp = true.mtd.position[true.mtd.position[, 2] <=
JJ * KK, ]
if (length(tmp) == 2)
tmp = matrix(tmp, ncol = 2)
ONES = matrix(1, nrow = JJ, ncol = KK)
for (kkk in 1:nrow(tmp)) {
tmpa = tmp[kkk, 1]
tmpb = tmp[kkk, 2]
ONES[1:tmpa, 1:tmpb] = 0
}
ONES[true.mtd.pos.new] = 1
less.than.contour = which(ONES == 0)
ONES = matrix(1, nrow = JJ, ncol = KK)
for (kkk in 1:nrow(tmp)) {
tmpa = tmp[kkk, 1]
tmpb = tmp[kkk, 2]
ONES[tmpa:JJ, tmpb:KK] = 0
}
ONES[true.mtd.pos.new] = 1
greater.than.contour = which(ONES == 0)
}
at.contour = c(1:(JJ * KK))[-c(greater.than.contour,
less.than.contour)]
aa = function(x) as.numeric(as.character(x))
if (target < 0.05) {
stop(" the target is too low! ")
}
if (target > 0.6) {
stop("the target is too high!")
}
if ((target - p.saf) < (0.1 * target)) {
stop("the probability deemed safe cannot be higher than or too close to the target! ")
}
if ((p.tox - target) < (0.1 * target)) {
stop("the probability deemed toxic cannot be lower than or too close to the target!")
}
ndoses1 <- nrow(p.true)
ndoses2 <- ncol(p.true)
ntrial.phase1 = NULL
ntrial.mtd = NULL
ntrial.nt = NULL
ntrial.yt = NULL
ntrial.ne = NULL
ntrial.ye = NULL
if (cohortsize == 1)
titration = FALSE
titration.first.trial = FALSE
for (trial in 1:ntrial) {
trial.result = NULL
ntox = matrix(rep(0, (ndoses2) * ndoses1), ncol = ndoses2)
colnames(ntox) = paste("ntoxDoseB", 1:ndoses2, sep = "")
npts = matrix(rep(0, (ndoses2) * ndoses1), ncol = ndoses2)
colnames(npts) = paste("nptsDoseB", 1:ndoses2, sep = "")
elimi = matrix(0, nrow = ndoses1, ncol = ndoses2)
colnames(elimi) = paste("elimiDoseB", 1:ndoses2,
sep = "")
mtd = cbind(selectdoseA = 1:ndoses1, selectdoseB = rep(NA,
ndoses1))
trial.result = data.frame(cbind(trial = rep(trial,
ndoses1), mtd, npts, ntox, elimi))
totaln = 0
startdose = 1
dosespace = c(1:(ndoses1 - 1), (1:ndoses2) * ndoses1)
subtriali = 1
while (totaln < sum(ncohort) * cohortsize) {
if (titration & subtriali == 1) {
titration.first.trial = TRUE
}
else {
titration.first.trial = FALSE
}
subtrial = waterfall.subtrial(target, p.true = p.true,
dosespace = dosespace, npts = npts, ntox = ntox,
elimi = elimi, ncohort = ncohort[subtriali],
cohortsize = cohortsize, n.earlystop = n.earlystop,
startdose = startdose, p.saf = p.saf, p.tox = p.tox,
cutoff.eli = cutoff.eli, extrasafe = extrasafe,
offset = offset, totaln = totaln,
titration.first.trial = titration.first.trial,
temp = temp,boundMTD=boundMTD)
selectdose = ifelse(subtrial$selectdose == 99,
99, dosespace[subtrial$selectdose])
if (selectdose == 99)
break
dj = ifelse(selectdose%%ndoses1 == 0, selectdose%/%ndoses1,
selectdose%/%ndoses1 + 1)
di = selectdose - (dj - 1) * ndoses1
totaln = aa(subtrial$totaln)
npts = subtrial$npts
ntox = subtrial$ntox
elimi = subtrial$elimi
if ((subtriali == 1) & (selectdose < ndoses1)) {
for (a in (di + 1):ndoses1) for (b in 1:ndoses2) elimi[a,
b] = 1
if (subtrial$is.escalation == 1) {
startdose = 1
dosespace1 = c(di + ((2:ndoses2) - 1) * ndoses1)
subtriali = subtriali + 1
subtrial1 = waterfall.subtrial(target, p.true = p.true,
dosespace = dosespace1, npts = npts, ntox = ntox,
elimi = elimi, ncohort = ncohort[subtriali],
cohortsize = cohortsize, n.earlystop = n.earlystop,
startdose = startdose, p.saf = p.saf, p.tox = p.tox,
cutoff.eli, extrasafe, offset, totaln = totaln,
temp = temp,boundMTD=boundMTD)
selectdose1 = ifelse(subtrial1$selectdose ==
99, selectdose, dosespace1[subtrial1$selectdose])
if (selectdose1 == 99)
break
dj = ifelse(selectdose1%%ndoses1 == 0, selectdose1%/%ndoses1,
selectdose1%/%ndoses1 + 1)
di = selectdose1 - (dj - 1) * ndoses1
totaln = aa(subtrial1$totaln)
npts = subtrial1$npts
ntox = subtrial1$ntox
elimi = subtrial1$elimi
}
}
if (di - 1 == 0)
break
subtriali = subtriali + 1
if (dj < ndoses2)
elimi[di, (dj + 1):ndoses2] = 1
startdose = dj
dosespace = di - 1 + ((2:ndoses2) - 1) * ndoses1
if (dj == ndoses2)
startdose = dj - 1
}
npts = t(apply(npts, 1, aa))
ntox = t(apply(ntox, 1, aa))
elimi = t(apply(elimi, 1, aa))
phat = (ntox + 0.05)/(npts + 0.1)
phat = t(apply(phat, 1, aa))
colnames(phat) = paste("phat", 1:ndoses2, sep = "")
phat[elimi == 1] = 1.1
phat = Iso::biviso(phat, npts + 0.1, warn = TRUE)[,
]
phat = phat + (1e-05) * (matrix(rep(1:dim(npts)[1],
each = dim(npts)[2], len = length(npts)), dim(npts)[1],
byrow = T) + matrix(rep(1:dim(npts)[2], each = dim(npts)[1],
len = length(npts)), dim(npts)[1]))
colnames(phat) = paste("phat", 1:ndoses2, sep = "")
for (k in ndoses1:1) {
kn = npts[k, ]
ky = ntox[k, ]
kelimi = elimi[k, ]
kphat = phat[k, ]
if (kelimi[1] == 1 || sum(npts[kelimi == 0]) ==
0) {
kseldose = 99
}
else {
adm.set = (kn != 0) & (kelimi == 0)
adm.index = which(adm.set == T)
y.adm = ky[adm.set]
n.adm = kn[adm.set]
selectd = sort(abs(kphat[adm.set] - target),
index.return = T)$ix[1]
kseldose = adm.index[selectd]
}
mtd[k, 2] = kseldose
if (k < ndoses1)
if (mtd[k, 2] - mtd[k + 1, 2] <= 0 & mtd[k +
1, 2] != 99)
mtd[k, 2] = mtd[k + 1, 2]
}
trial.result[1:ndoses1, grep("nptsDoseB", colnames(trial.result))] = npts
trial.result[1:ndoses1, grep("ntoxDoseB", colnames(trial.result))] = ntox
trial.result[1:ndoses1, grep("selectdose", colnames(trial.result))] = mtd
trial.result[1:ndoses1, grep("elimiDoseB", colnames(trial.result))] = elimi
ntrial.mtd = rbind(ntrial.mtd, cbind(trial = rep(trial,
nrow(mtd)), mtd))
ntrial.nt = rbind(ntrial.nt, cbind(trial = rep(trial,
nrow(npts)), npts))
ntrial.yt = rbind(ntrial.yt, cbind(trial = rep(trial,
nrow(ntox)), ntox))
trial.result = cbind(trial.result, phat)
ntrial.phase1 = rbind(ntrial.phase1, trial.result)
}
ntrial.mtd = data.frame(ntrial.mtd)
colnames(ntrial.mtd) = c("trial", "doseA", "doseB")
alltoxpercent = round(sum(sapply(1:ntrial, function(x) ifelse(sum(ntrial.mtd$doseB[ntrial.mtd$trial ==
x] == 99) == ndoses1, 1, 0)), na.rm = T) * 100/1000,
3)
stoppercent = round(sum(sapply(1:ntrial, function(x) sum(ntrial.mtd$doseB[ntrial.mtd$trial ==
x] == 99))) * 100/1000, 1)
selpercent = matrix(0, nrow = ndoses1, ncol = ndoses2)
nselpercent = 0
selpercent1 = 0
selpercent2 = 0
mtdtable = NULL
mtdlist = list()
for (triali in 1:ntrial) {
mtddata = unique(as.matrix(ntrial.mtd[ntrial.mtd$trial ==
triali, 2:3]))
mtddata = mtddata[!is.na(mtddata[, 2]) & mtddata[,
2] <= KK, ]
if (length(mtddata) > 0) {
if (length(mtddata) == 2)
mtddata = matrix(mtddata, ncol = 2)
mtdlevel = aa(t(apply(mtddata, 1, function(x) (x[2] -
1) * ndoses1 + x[1])))
mtdlevel[mtdlevel > ndoses1 * ndoses2] = 99
if (sum(mtdlevel <= ndoses1 * ndoses2) > 0) {
selpercent[mtdlevel[mtdlevel <= ndoses1 * ndoses2]] = selpercent[mtdlevel[mtdlevel <=
ndoses1 * ndoses2]] + 1
mtdlist[[triali]] = mtdlevel[mtdlevel <= ndoses1 *
ndoses2]
mtdtable = c(mtdtable, paste(sort(aa(mtdlevel[mtdlevel <=
ndoses1 * ndoses2])), collapse = ","))
if (paste(sort(aa(mtdlevel[mtdlevel <= ndoses1 *
ndoses2])), collapse = ",") == nMTDs)
nselpercent = nselpercent + 1
if (sum(mtdlevel <= ndoses1 * ndoses2) == 1 &
sum(is.element(mtdlevel[mtdlevel <= ndoses1 *
ndoses2], which(p.true == target)) == F) ==
0)
selpercent1 = selpercent1 + 1
if (sum(mtdlevel <= ndoses1 * ndoses2) == 2 &
sum(is.element(mtdlevel[mtdlevel <= ndoses1 *
ndoses2], which(p.true == target)) == F) ==
0)
selpercent2 = selpercent2 + 1
}
else {
mtdlist[[triali]] = 99
mtdtable = c(mtdtable, 99)
}
}
}
selpercent = round(selpercent * 100/ntrial, 2)
if (JJ > KK) {
nptsdose = matrix(0, nrow = ndoses1, ncol = ndoses2)
for (i in seq(1, nrow(ntrial.nt), by = ndoses1)) nptsdose = nptsdose +
ntrial.nt[i + 0:(ndoses1 - 1), -1]
ntoxdose = matrix(0, nrow = ndoses1, ncol = ndoses2)
for (i in seq(1, nrow(ntrial.yt), by = ndoses1)) ntoxdose = ntoxdose +
ntrial.yt[i + 0:(ndoses1 - 1), -1]
colnames(p.true) = paste("DoseB", 1:dim(t(p.true))[1],
sep = "")
rownames(p.true) = paste("DoseA", 1:dim(t(p.true))[2],
sep = "")
colnames(selpercent) = paste("DoseB", 1:dim(t(p.true))[1],
sep = "")
rownames(selpercent) = paste("DoseA", 1:dim(t(p.true))[2],
sep = "")
colnames(nptsdose) = paste("DoseB", 1:dim(t(p.true))[1],
sep = "")
rownames(nptsdose) = paste("DoseA", 1:dim(t(p.true))[2],
sep = "")
colnames(ntoxdose) = paste("DoseB", 1:dim(t(p.true))[1],
sep = "")
rownames(ntoxdose) = paste("DoseA", 1:dim(t(p.true))[2],
sep = "")
out = list(p.true = round(t(p.true), 2), selpercent = round(t(selpercent),
2), npatients = round(t(nptsdose)/ntrial, 2),
ntox = round(t(ntoxdose)/ntrial, 2), totaltox = round(sum(ntoxdose/ntrial),
1), totaln = round(sum(nptsdose/ntrial), 1))
out2 = list()
if (length(at.contour) > 0) {
out2 = list(npercent.contour = paste(round(100 *
sum(nptsdose[at.contour])/sum(nptsdose), 1),
"%", sep = ""), npercent.above.contour = paste(round(100 *
sum(nptsdose[greater.than.contour])/sum(nptsdose),
1), "%", sep = ""), npercent.below.contour = paste(round(100 *
sum(nptsdose[less.than.contour])/sum(nptsdose),
1), "%", sep = ""), pcs.contour = paste(round(100 *
nselpercent/ntrial, 1), "%", sep = ""), flowchart = FALSE)
}
outnew = c(out, out2)
class(outnew)<-"boin"
return(outnew)
}
else {
nptsdose = matrix(0, nrow = ndoses1, ncol = ndoses2)
for (i in seq(1, nrow(ntrial.nt), by = ndoses1)) nptsdose = nptsdose +
ntrial.nt[i + 0:(ndoses1 - 1), -1]
ntoxdose = matrix(0, nrow = ndoses1, ncol = ndoses2)
for (i in seq(1, nrow(ntrial.yt), by = ndoses1)) ntoxdose = ntoxdose +
ntrial.yt[i + 0:(ndoses1 - 1), -1]
out2 = list()
if (length(at.contour) > 0) {
out2 = list(npercent.contour = paste(round(100 *
sum(nptsdose[at.contour])/sum(nptsdose), 1),
"%", sep = ""), npercent.above.contour = paste(round(100 *
sum(nptsdose[greater.than.contour])/sum(nptsdose),
1), "%", sep = ""), npercent.below.contour = paste(round(100 *
sum(nptsdose[less.than.contour])/sum(nptsdose),
1), "%", sep = ""), pcs.contour = paste(round(100 *
nselpercent/ntrial, 1), "%", sep = ""))
}
rownames(p.true) = paste("DoseA", 1:dim(p.true)[1],
sep = "")
colnames(p.true) = paste("DoseB", 1:dim(p.true)[2],
sep = "")
rownames(selpercent) = paste("DoseA", 1:dim(p.true)[1],
sep = "")
colnames(selpercent) = paste("DoseB", 1:dim(p.true)[2],
sep = "")
rownames(nptsdose) = paste("DoseA", 1:dim(p.true)[1],
sep = "")
colnames(nptsdose) = paste("DoseB", 1:dim(p.true)[2],
sep = "")
rownames(ntoxdose) = paste("DoseA", 1:dim(p.true)[1],
sep = "")
colnames(ntoxdose) = paste("DoseB", 1:dim(p.true)[2],
sep = "")
out = list(p.true = apply(formatC(p.true, digits = 2,
format = "f", width = 5), c(1, 2), as.numeric),
selpercent = apply(formatC(selpercent, digits = 2,
format = "f", width = 5), c(1, 2), as.numeric),
npatients = apply(formatC(nptsdose/ntrial, digits = 2,
format = "f", width = 5), c(1, 2), as.numeric),
ntox = apply(formatC(ntoxdose/ntrial, digits = 2,
format = "f", width = 5), c(1, 2), as.numeric),
totaltox = as.numeric(formatC(sum(ntoxdose/ntrial),
digits = 1, format = "f")), totaln = as.numeric(formatC(sum(nptsdose/ntrial),
digits = 1, format = "f")), flowchart = FALSE)
outnew = c(out, out2)
class(outnew)<-"boin"
return(outnew)
}
}
set.seed(seed)
JJ = nrow(p.true)
KK = ncol(p.true)
if (JJ > KK) {
stop("p.true should be arranged in a way (i.e., rotated) such that the number of rows is less than or equal to the number of columns.")
}
if (mtd.contour == FALSE) {
if (is.null(n.earlystop) == TRUE)
n.earlystop = 100
if (n.earlystop <= 6) {
warning("the value of n.earlystop is too low to ensure good operating characteristics. Recommend n.earlystop = 9 to 18 ")
}
if (length(ncohort) > 1) {
warning("ncohort is the total number of cohorts for the trial. Please enter a scalar.")
}
if (((JJ * KK) <= 4) & (sum(ncohort) <= 6)) {
warning("the sample size is too small, which may lead to poor operating characteristics. Suggest to increase the number of cohort.")
}
if (((JJ * KK) > 4) & (sum(ncohort) <= 8)) {
warning("the sample size is too small, which may lead to poor operating characteristics. Suggest to increase the number of cohort.")
}
out=get.oc.comb.boin(target = target, p.true = p.true,
ncohort = sum(ncohort), cohortsize = cohortsize,
n.earlystop = n.earlystop, startdose = startdose,
titration = titration,
p.saf = p.saf, p.tox = p.tox,
cutoff.eli = cutoff.eli, extrasafe = extrasafe,
offset = offset,boundMTD=boundMTD,
ntrial = ntrial)
class(out)<-"boin"
return(out)
}
if (mtd.contour == TRUE) {
if (missing(ncohort) == TRUE) {
constSeq = rep(round(4/cohortsize, 2), 20)
dosespaceSeq = c(JJ + KK - 1, rep(KK - 1, JJ - 1))
ncohort = ceiling(constSeq[1:JJ] * dosespaceSeq)
}
if (length(ncohort) != JJ) {
stop("The vector length of ncohort doesn't match the number of subtrials (the number of dose matrix rows).\n Please enter the number of ncohorts of each subtrial.")
}
if (((JJ * KK) <= 4) & (sum(ncohort) <= 6)) {
warning("the sample size is too small, which may lead to poor operating characteristics. Suggest to increase the number of cohort.")
}
if (((JJ * KK) > 4) & (sum(ncohort) <= 8)) {
warning("the sample size is too small, which may lead to poor operating characteristics. Suggest to increase the number of cohort.")
}
if (is.null(n.earlystop) == TRUE)
n.earlystop = 12
if (n.earlystop <= 6) {
warning("the value of n.earlystop is too low to ensure good operating characteristics. Recommend n.earlystop = 9 to 18 ")
}
out=get.oc.comb.waterfall(p.true = p.true, target = target,
ncohort = ncohort, cohortsize = cohortsize, n.earlystop = n.earlystop,
cutoff.eli = cutoff.eli, p.saf = p.saf, p.tox = p.tox,
titration = titration,
extrasafe = extrasafe, offset = offset,boundMTD=boundMTD,
ntrial = ntrial)
class(out)<-"boin"
return(out)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BOIN/R/get.oc.comb.R
|
#'
#' Determine the dose combination for the next cohort of new patients for drug-combination trials that aim to find a MTD
#'
#' Determine the dose combination for the next cohort of new patients for drug-combination trials that aim to find a MTD
#'
#' @usage next.comb(target, npts, ntox, dose.curr, n.earlystop=100,
#' p.saf=0.6*target, p.tox=1.4*target, cutoff.eli=0.95,
#' extrasafe=FALSE, offset=0.05)
#'
#' @param target the target DLT rate
#' @param npts a \code{J*K} matrix \code{(J<=K)} containing the number of patients treated at each dose combination
#' @param ntox a \code{J*K} matrix \code{(J<=K)} containing the number of patients experienced
#' dose-limiting toxicity at each dose combination
#' @param dose.curr the current dose combination
#' @param n.earlystop the early stopping parameter. If the number of patients
#' treated at the current dose reaches \code{n.earlystop},
#' stop the trial and select the MTD based on the observed data.
#' The default value \code{n.earlystop=100} essentially turns
#' off this type of early stopping.
#' @param p.saf the highest toxicity probability that is deemed subtherapeutic
#' (i.e. below the MTD) such that dose escalation should be undertaken.
#' The default value is \code{p.saf=0.6*target}.
#' @param p.tox the lowest toxicity probability that is deemed overly toxic such
#' that deescalation is required. The default value is
#' \code{p.tox=1.4*target}.
#' @param cutoff.eli the cutoff to eliminate an overly toxic dose for safety.
#' We recommend the default value of (\code{cutoff.eli=0.95})
#' for general use.
#' @param extrasafe set \code{extrasafe=TRUE} to impose a more stringent stopping rule
#' @param offset a small positive number (between \code{0} and \code{0.5}) to control how strict the
#' stopping rule is when \code{extrasafe=TRUE}. A larger value leads to a more
#' strict stopping rule. The default value \code{offset=0.05} generally works well.
#'
#' @details This function is used to determine dose combination for conducting combination trials.
#' Given the currently observed data, \code{next.comb()} determines dose combination for
#' treating the next cohort of new patients. The currently observed data include: the
#' number of patients treated at each dose combination (i.e., \code{npts}),
#' the number of patients who experienced dose-limiting toxicities at each dose
#' combination (i.e., \code{ntox}), and the level of current dose (i.e., \code{dose.curr}).
#'
#' @return the recommended dose for treating the next cohort of patients (\code{$next_dc}).
#'
#' @author Suyu Liu and Ying Yuan
#'
#' @references Liu S. and Yuan, Y. (2015). Bayesian Optimal Interval Designs for Phase I Clinical
#' Trials, \emph{Journal of the Royal Statistical Society: Series C}, 64, 507-523.
#'
#' Lin R. and Yin, G. (2017). Bayesian Optimal Interval Designs for Dose Finding in
#' Drug-combination Trials, \emph{Statistical Methods in Medical Research}, 26, 2155-2167.
#'
#' Yan, F., Zhang, L., Zhou, Y., Pan, H., Liu, S. and Yuan, Y. (2020).BOIN: An R Package
#' for Designing Single-Agent and Drug-Combination Dose-Finding Trials Using Bayesian Optimal
#' Interval Designs. \emph{Journal of Statistical Software}, 94(13),1-32.<doi:10.18637/jss.v094.i13>.
#'
#'
#'
#' @seealso Tutorial: \url{http://odin.mdacc.tmc.edu/~yyuan/Software/BOIN/BOIN2.6_tutorial.pdf}
#'
#' Paper: \url{http://odin.mdacc.tmc.edu/~yyuan/Software/BOIN/paper.pdf}
#'
#' @examples
#'
#' ## determine the dose combination for the next cohort of new patients
#' n <- matrix(c(3, 0, 0, 0, 0, 7, 6, 0, 0, 0, 0, 0, 0, 0, 0), ncol=5, byrow=TRUE)
#' y <- matrix(c(0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), ncol=5, byrow=TRUE)
#' nxt.comb <- next.comb(target=0.3, npts=n, ntox=y, dose.curr=c(2, 2))
#' summary(nxt.comb)
#'
#' @export
next.comb <- function (target, npts, ntox, dose.curr, n.earlystop = 100, p.saf = 0.6 *
target, p.tox = 1.4 * target, cutoff.eli = 0.95, extrasafe = FALSE,
offset = 0.05)
{
if (npts[dose.curr[1], dose.curr[2]] == 0) {
stop("dose entered is not the current dose")
}
if (target < 0.05) {
stop("the target is too low! ")
}
if (target > 0.6) {
stop("the target is too high! ")
}
if ((target - p.saf) < (0.1 * target)) {
stop("the probability deemed safe cannot be higher than or too close to the target! ")
}
if ((p.tox - target) < (0.1 * target)) {
stop("the probability deemed toxic cannot be lower than or too close to the target! ")
}
if (offset >= 0.5) {
stop("the offset is too large! ")
}
if (n.earlystop <= 6) {
warning("the value of n.earlystop is too low to ensure good operating characteristics. Recommend n.earlystop = 9 to 18. ")
}
temp = get.boundary(target, ncohort = 150, cohortsize = 1,
n.earlystop, p.saf, p.tox, cutoff.eli, extrasafe, offset)$boundary_tab
b.e = temp[2, ]
b.d = temp[3, ]
b.elim = temp[4, ]
lambda1 = log((1 - p.saf)/(1 - target))/log(target * (1 -
p.saf)/(p.saf * (1 - target)))
lambda2 = log((1 - target)/(1 - p.tox))/log(p.tox * (1 -
target)/(target * (1 - p.tox)))
n = npts
y = ntox
earlystop = 0
d = dose.curr
nc = n[d[1], d[2]]
ndose = length(npts)
elimi = matrix(rep(0, ndose), dim(n)[1], dim(n)[2])
if (n[d[1], d[2]] >= n.earlystop) {
cat("Terminate the trial because the number of patients treated at (",
d[1], ", ", d[2], ") has reached ", n.earlystop, ".")
d = c(99, 99)
earlystop = 1
}
if (!is.na(b.elim[nc])) {
if (d[1] == 1 && d[2] == 1 && y[d[1], d[2]] >= b.elim[nc]) {
d = c(99, 99)
earlystop = 1
cat("Terminate the trial because the lowest dose is overly toxic ")
}
if (extrasafe) {
if (d[1] == 1 && d[2] == 1 && n[1, 1] >= 3) {
if (1 - pbeta(target, y[1, 1] + 1, n[1, 1] -
y[1, 1] + 1) > cutoff.eli - offset) {
d = c(99, 99)
earlystop = 1
cat("Terminate the trial because the lowest dose is overly toxic ")
}
}
}
}
for (i in 1:dim(n)[1]) {
for (j in 1:dim(n)[2]) {
if (n[i, j] > 0 && (!is.na(b.elim[n[i, j]]))) {
if (y[i, j] >= b.elim[n[i, j]]) {
elimi[i:dim(n)[1], j:dim(n)[2]] = 1
}
}
}
}
out = list(next_dc = c(NA, NA))
if (earlystop == 0) {
if (y[d[1], d[2]] <= b.e[nc]) {
n.temp=n; n.temp[n.temp==0]=1
phat.mat=y/n.temp #p.hat for doses
elevel = matrix(c(1, 0, 0, 1), 2)
pr_H0 = rep(0, length(elevel)/2)
nn = pr_H0
for (i in seq(1, length(elevel)/2, by = 1)) {
if (d[1] + elevel[1, i] <= dim(n)[1] && d[2] +
elevel[2, i] <= dim(n)[2]) {
if (elimi[d[1] + elevel[1, i], d[2] + elevel[2,i]] == 0) {
if(i==1 & d[1]+1<=dim(n)[1]){
if(any(phat.mat[d[1]+1,1:d[2]]>=lambda2)){
pr_H0[i]=0
}else{
yn = y[d[1] + elevel[1, i], d[2] + elevel[2,i]]
nn[i] = n[d[1] + elevel[1, i], d[2] + elevel[2,i]]
pr_H0[i] <- pbeta(lambda2, yn + 0.5, nn[i] -yn + 0.5) -
pbeta(lambda1, yn + 0.5, nn[i] -yn + 0.5)
}
}
if(i==2 & d[2]+1<=dim(n)[2]){
if(any(phat.mat[1:d[1],d[2]+1]>=lambda2)){
pr_H0[i]=0
}else{
yn = y[d[1] + elevel[1, i], d[2] + elevel[2,i]]
nn[i] = n[d[1] + elevel[1, i], d[2] + elevel[2,i]]
pr_H0[i] <- pbeta(lambda2, yn + 0.5, nn[i] -yn + 0.5) -
pbeta(lambda1, yn + 0.5, nn[i] -yn + 0.5)
}
}
}
}
}
pr_H0 = pr_H0 + nn * 5e-04
if (max(pr_H0) == 0) {
d = d
}else{
k = which(pr_H0 == max(pr_H0))[as.integer(runif(1) *length(which(pr_H0 == max(pr_H0))) + 1)]
d = d + c(elevel[1, k], elevel[2, k])
}
}else if (y[d[1], d[2]] >= b.d[nc]) {
delevel = matrix(c(-1, 0, 0, -1), 2)
pr_H0 = rep(0, length(delevel)/2)
nn = pr_H0
for (i in seq(1, length(delevel)/2, by = 1)) {
if (d[1] + delevel[1, i] > 0 && d[2] + delevel[2,
i] > 0) {
yn = y[d[1] + delevel[1, i], d[2] + delevel[2,
i]]
nn[i] = n[d[1] + delevel[1, i], d[2] + delevel[2,
i]]
pr_H0[i] = pbeta(lambda2, yn + 0.5, nn[i] -
yn + 0.5) - pbeta(lambda1, yn + 0.5, nn[i] -
yn + 0.5)
}
}
pr_H0 = pr_H0 + nn * 5e-04
if (max(pr_H0) == 0) {
d = d
}
else {
k = which(pr_H0 == max(pr_H0))[as.integer(runif(1) *
length(which(pr_H0 == max(pr_H0))) + 1)]
d = d + c(delevel[1, k], delevel[2, k])
}
}
else {
d = d
}
out = list(next_dc = d)
}
class(out)<-"boin"
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BOIN/R/next.comb.R
|
#'
#' Determine the starting dose and the dose-searching space for next subtrial in waterfall design
#'
#' Determine the starting dose and the dose-searching space for next subtrial after
#' the current subtrial is completed when using the waterfall design
#'
#'
#' @param target the target DLT rate
#' @param npts a \code{J*K} matrix \code{(J<=K)} containing the number of patients treated at each dose combination
#' @param ntox a \code{J*K} matrix \code{(J<=K)} containing the number of patients who experienced dose-limiting
#' toxicities at each dose combination
#' @param p.saf the highest toxicity probability that is deemed subtherapeutic (i.e. below
#' the MTD) such that dose escalation should be undertaken. The default value
#' is \code{p.saf=0.6*target}.
#' @param p.tox the lowest toxicity probability that is deemed overly toxic such that
#' deescalation is required. The default value is \code{p.tox=1.4*target}.
#' @param cutoff.eli the cutoff to eliminate an overly toxic dose for safety. We recommend
#' the default value of (\code{cutoff.eli=0.95}) for general use.
#' @param extrasafe set \code{extrasafe=TRUE} to impose a more stringent stopping rule
#' @param offset a small positive number (between 0 and 0.5) to control how strict the
#' stopping rule is when \code{extrasafe=TRUE}. A larger value leads to a
#' more strict stopping rule. The default value \code{offset=0.05} generally
#' works well.
#'
#' @details For the waterfall design, this function is used to obtain the starting dose and
#' dose-searching space for the next subtrial when the current subtrial is completed.
#' The input data include: the number of patients treated at each dose combination
#' (i.e., \code{npts}), the number of patients who experienced dose-limiting
#' toxicities at each dose combination (i.e., \code{ntox}).
#'
#'
#' @return \code{next.subtrial()} returns a list object, containing the starting dose
#' (\code{$starting_dose}) and the dose-searching space for the the next subtrial
#' (\code{$next_subtrial}).
#'
#'
#' @author Suyu Liu, Liangcai Zhang and Ying Yuan
#'
#' @references Liu S. and Yuan, Y. (2015). Bayesian Optimal Interval Designs for Phase I Clinical
#' Trials, \emph{Journal of the Royal Statistical Society: Series C}, 64, 507-523.
#'
#' Lin R. and Yin, G. (2017). Bayesian Optimal Interval Designs for Dose Finding in
#' Drug-combination Trials, \emph{Statistical Methods in Medical Research}, 26, 2155-2167.
#'
#' Yan, F., Zhang, L., Zhou, Y., Pan, H., Liu, S. and Yuan, Y. (2020).BOIN: An R Package
#' for Designing Single-Agent and Drug-Combination Dose-Finding Trials Using Bayesian Optimal
#' Interval Designs. \emph{Journal of Statistical Software}, 94(13),1-32.<doi:10.18637/jss.v094.i13>.
#'
#'
#' Zhang L. and Yuan, Y. (2016). A Simple Bayesian Design to Identify the Maximum
#' Tolerated Dose Contour for Drug Combination Trials, \emph{Statistics in Medicine}, 35, 4924-4936.
#'
#' @seealso Tutorial: \url{http://odin.mdacc.tmc.edu/~yyuan/Software/BOIN/BOIN2.6_tutorial.pdf}
#'
#' Paper: \url{http://odin.mdacc.tmc.edu/~yyuan/Software/BOIN/paper.pdf}
#'
#' @examples
#'
#' ## determine the starting dose and dose searching space for next subtrial
#' n <- matrix(c(6, 0, 0, 0,
#' 6, 10, 12, 0,
#' 9, 12, 0, 0), ncol=4, byrow=TRUE)
#' y <- matrix(c(0, 0, 0, 0,
#' 1, 1, 4, 0,
#' 2, 3, 0, 0), ncol=4, byrow=TRUE)
#' nxt.trial <- next.subtrial(target=0.3, npts=n, ntox=y)
#' summary(nxt.trial)
#'
#' @export
next.subtrial <- function (target, npts, ntox, p.saf = 0.6 * target, p.tox = 1.4 *
target, cutoff.eli = 0.95, extrasafe = FALSE, offset = 0.05)
{
waterfall.subtrial.mtd <- function(target, npts, ntox, cutoff.eli = 0.95,
extrasafe = FALSE, offset = 0.05) {
temp = get.boundary(target, ncohort = 150, cohortsize = 1,
n.earlystop = 100, p.saf = p.saf, p.tox = p.tox,
cutoff.eli, extrasafe)$boundary_tab
b.e = temp[2, ]
pava <- function(x, wt = rep(1, length(x))) {
n <- length(x)
if (n <= 1)
return(x)
if (any(is.na(x)) || any(is.na(wt))) {
stop("Missing values in 'x' or 'wt' not allowed")
}
lvlsets <- (1:n)
repeat {
viol <- (as.vector(diff(x)) < 0)
if (!(any(viol)))
break
i <- min((1:(n - 1))[viol])
lvl1 <- lvlsets[i]
lvl2 <- lvlsets[i + 1]
ilvl <- (lvlsets == lvl1 | lvlsets == lvl2)
x[ilvl] <- sum(x[ilvl] * wt[ilvl])/sum(wt[ilvl])
lvlsets[ilvl] <- lvl1
}
x
}
y = ntox
n = npts
ndose = length(n)
elimi = rep(0, ndose)
is.escalation = 0
for (i in 1:ndose) {
if (n[i] >= 3) {
if (1 - pbeta(target, y[i] + 1, n[i] - y[i] +
1) > cutoff.eli) {
elimi[i:ndose] = 1
break
}
}
}
if (extrasafe) {
if (n[1] >= 3) {
if (1 - pbeta(target, y[1] + 1, n[1] - y[1] +
1) > cutoff.eli - offset) {
elimi[1:ndose] = 1
}
}
}
if (elimi[1] == 1 || sum(n[elimi == 0]) == 0) {
selectdose = 99
}
else {
adm.set = (n != 0) & (elimi == 0)
adm.index = which(adm.set == T)
y.adm = y[adm.set]
n.adm = n[adm.set]
phat = (y.adm + 0.05)/(n.adm + 0.1)
phat.var = (y.adm + 0.05) * (n.adm - y.adm + 0.05)/((n.adm +
0.1)^2 * (n.adm + 0.1 + 1))
phat = pava(phat, wt = 1/phat.var)
phat = phat + (1:length(phat)) * 1e-10
selectd = sort(abs(phat - target), index.return = T)$ix[1]
selectdose = adm.index[selectd]
if (y[selectdose] <= b.e[n[selectdose]]) {
is.escalation = 1
}
}
list(selectdose = selectdose, is.escalation = is.escalation)
}
out = list(next_subtrial = NA, starting_dose = c(NA, NA))
n = npts
y = ntox
if (sum(y > n) > 0) {
stop("The data entry may be wrong. Please check it. ")
}
if (nrow(n) > ncol(n) | nrow(y) > ncol(y)) {
stop("npts and ntox should be arranged in a way (i.e., rotated) such that for each of them, the number of rows is less than or equal to the number of columns.")
}
subtrial.space = list()
subtrial.space[[nrow(n)]] = c(1:(dim(n)[1] - 1), (1:dim(n)[2]) *
dim(n)[1])
for (j in (dim(n)[1] - 1):1) subtrial.space[[j]] = (2:ncol(n)) *
nrow(n) - (nrow(n) - j)
cur.subtrial = 0
nxt.subtrial = 0
for (k in 1:dim(n)[1]) if (sum(n[subtrial.space[[k]]]) >
0) {
cur.subtrial = k
break
}
if (cur.subtrial > 1)
nxt.subtrial = cur.subtrial - 1
if (cur.subtrial == 1) {
class(out)<-"boin"
return(out)
}
else {
cur.dosespace = subtrial.space[[cur.subtrial]]
nxt.dosespace = subtrial.space[[nxt.subtrial]]
sds = cur.dosespace[which(n[cur.dosespace] > 0)[1]]
dj = ifelse(sds%%dim(n)[1] == 0, sds%/%dim(n)[1], sds%/%dim(n)[1] +
1)
di = sds - (dj - 1) * dim(n)[1]
dose.curr = c(di, dj)
if (npts[dose.curr[1], dose.curr[2]] == 0) {
stop("dose entered is not the current dose.")
}
if (target < 0.05) {
stop("the target is too low! ")
}
if (target > 0.6) {
stop("the target is too high! ")
}
if ((target - p.saf) < (0.1 * target)) {
stop("the probability deemed safe cannot be higher than or too close to the target! ")
}
if ((p.tox - target) < (0.1 * target)) {
stop("the probability deemed toxic cannot be lower than or too close to the target! ")
}
if (offset >= 0.5) {
stop("the offset is too large! ")
}
temp = get.boundary(target, ncohort = 150, cohortsize = 1,
n.earlystop = 100, p.saf, p.tox, cutoff.eli, extrasafe,
offset)$boundary_tab
b.e = temp[2, ]
b.d = temp[3, ]
b.elim = temp[4, ]
lambda1 = log((1 - p.saf)/(1 - target))/log(target *
(1 - p.saf)/(p.saf * (1 - target)))
lambda2 = log((1 - target)/(1 - p.tox))/log(p.tox * (1 -
target)/(target * (1 - p.tox)))
earlystop = 0
d = dose.curr
nc = n[d[1], d[2]]
ndose = length(npts)
elimi = matrix(rep(0, ndose), dim(n)[1], dim(n)[2])
if (!is.na(b.elim[nc])) {
if (d[1] == 1 && d[2] == 1 && y[d[1], d[2]] >= b.elim[nc]) {
d = c(99, 99)
earlystop = 1
cat("Current subtrial is terminated because the lowest dose is overly toxic \n")
}
if (extrasafe) {
if (d[1] == 1 && d[2] == 1 && n[1, 1] >= 3) {
if (1 - pbeta(target, y[1, 1] + 1, n[1, 1] -
y[1, 1] + 1) > cutoff.eli - offset) {
d = c(99, 99)
earlystop = 1
cat("Current subtrial is terminated because the lowest dose is overly toxic \n")
}
}
}
}
for (i in 1:dim(n)[1]) {
for (j in 1:dim(n)[2]) {
if (n[i, j] > 0 && (!is.na(b.elim[n[i, j]]))) {
if (y[i, j] >= b.elim[n[i, j]]) {
elimi[i:dim(n)[1], j:dim(n)[2]] = 1
}
}
}
}
if (earlystop == 0) {
wsmtd = waterfall.subtrial.mtd(target, n[cur.dosespace],
y[cur.dosespace], cutoff.eli, extrasafe, offset)
seldose = cur.dosespace[wsmtd$selectdose]
if (is.na(seldose) == TRUE) {
cat("Current subtrial is terminated early and no MTD is suggested for current subtrial. \n\n")
}
else if (seldose == 99) {
d = c(99, 99)
cat("Current subtrial is terminated because the lowest dose is overly toxic \n")
}
else {
dj = ifelse(seldose%%dim(n)[1] == 0, seldose%/%dim(n)[1],
seldose%/%dim(n)[1] + 1)
di = seldose - (dj - 1) * dim(n)[1]
d = c(di, dj)
dnext = c(max(1, di - 1), ifelse(dj == dim(n)[2],
dj, dj + 1))
FUNC = function(x) paste("(", dnext[1], ", ",
x, ")", sep = "")
dnextspace = paste(unlist(lapply(2:ncol(n), FUNC)),
collapse = ", ")
out = list(next_subtrial = dnextspace, starting_dose = dnext)
}
}
}
class(out)<-"boin"
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BOIN/R/next.subtrial.R
|
#'
#' Plot the flowchart and simulation results for BOIN designs
#'
#' Plot the objects returned by other functions, including (1) flowchart of BOIN design;
#' (2) operating characteristics of the design, including selesction percentage and the
#' number of patients treated at each dose;
#' (3) the estimate of toxicity probability for each dose and corresponding 95\% credible interval
#'
#'
#' @param x the object returned by other functions
#' @param ... ignored arguments
#' @param name the name of the object to be plotted.
#' User doesn't need to input this parameter.
#'
#' @return \code{plot()} returns a figure or a series of figures depending on the object entered
#'
#' @author Suyu Liu, Liangcai Zhang, Yanhong Zhou, and Ying Yuan
#'
#' @examples
#'
#' ###### single-agent trial ######
#'
#' ## get dose escalation and deescalation boundaries for conducting the trial
#' bound <- get.boundary(target=0.3, ncohort=10, cohortsize=3)
#' plot(bound)
#'
#'
#' ## get the operating characteristics for BOIN single agent trial
#' oc <- get.oc(target=0.3, p.true=c(0.05,0.15,0.3,0.45,0.6),
#' ncohort=10, cohortsize=3, ntrial=1000)
#' summary(oc)
#' plot(oc)
#'
#'
#' ## select the MTD based on the trial data
#' n <- c(3, 3, 15, 9, 0)
#' y <- c(0, 0, 4, 4, 0)
#' selmtd <- select.mtd(target=0.3, npts=n, ntox=y)
#' summary(selmtd)
#' plot(selmtd)
#'
#'
#' ###### drug-combination trial ######
#'
#' ##### combination trial to find a single MTD ######
#'
#' ## get the operating characteristics for BOIN combination trial
#' p.true <- matrix(c(0.01,0.03,0.10,0.20,0.30,
#' 0.03,0.05,0.15,0.30,0.60,
#' 0.08,0.10,0.30,0.60,0.75), byrow=TRUE, ncol=5)
#'
#' oc.comb <- get.oc.comb(target=0.3, p.true, ncohort=20, cohortsize=3, n.earlystop=12,
#' startdose=c(1,1),ntrial=100)
#' summary(oc.comb)
#' plot(oc.comb)
#'
#'
#' ## select a MTD based on the trial data
#' n <- matrix(c(3, 5, 0, 0, 0, 7, 6, 15, 0, 0, 0, 0, 4, 0, 0), ncol=5, byrow=TRUE)
#' y <- matrix(c(0, 1, 0, 0, 0, 1, 1, 4, 0, 0, 0, 0, 2, 0, 0), ncol=5, byrow=TRUE)
#' sel.comb <- select.mtd.comb(target=0.3, npts=n, ntox=y)
#' summary(sel.comb)
#' plot(sel.comb)
#'
#'
#' ##### combination trial to find a MTD contour (e.g., multiple MTDs) #####
#'
#' ## get the operating characteristics for BOIN waterfall design
#' p.true <- matrix(c(0.01, 0.10, 0.20, 0.30,
#' 0.03, 0.15, 0.30, 0.60,
#' 0.08, 0.30, 0.60, 0.75), byrow=TRUE, ncol=4)
#'
#' oc.comb2 <- get.oc.comb(target=0.3, p.true, ncohort=c(8,6,6), cohortsize=3, n.earlystop=12,
#' startdose=c(1,1), ntrial=100, mtd.contour=TRUE)
#' summary(oc.comb2)
#' plot(oc.comb2)
#'
#'
#' ## select the MTD contour based on the trial data
#' n <- matrix(c(6, 9, 24, 0, 6, 24, 9, 0, 12, 18, 0, 0), ncol=4, byrow=TRUE)
#' y <- matrix(c(0, 1, 5, 0, 1, 5, 4, 0, 1, 5, 0, 0), ncol=4, byrow=TRUE)
#' sel.comb2 <- select.mtd.comb(target=0.3, npts=n, ntox=y, mtd.contour=TRUE)
#' summary(sel.comb2)
#' plot(sel.comb2)
#'
#' @importFrom grDevices dev.flush dev.hold devAskNewPage
#' @importFrom graphics abline arrows arrows axis barplot legend mtext par plot points rect segments text
#' @export
plot.boin<- function (x,..., name = deparse(substitute(x)))
{
new.obj = unlist(strsplit(name, split = "\\$"))
strpattern = "none"
if (length(new.obj) >= 2) {
strpattern = new.obj[2]
}
assign("objectPlot", get(new.obj[1]))
get.flowchart<-function(){
lambda2 = round(objectPlot$lambda_e, 3)
lambda1 = round(objectPlot$lambda_d, 3)
# if (.Platform$OS.type == "windows") {
# dev.new(height = 7.36, width = 6.27, rescale = "fit")
# }
# else {
# dev.new(height = 7.367442, width = 6.580645)
# }
par(lwd = 1.5, mar = c(1, 1, 1, 1))
plot(0, type = "n", xlim = c(1, 10), ylim = c(-3,
10.2), xaxt = "n", yaxt = "n", bty = "n", pch = "",
ylab = "", xlab = "")
theta = seq(0, 2 * pi, len = 100)
r = 0.56
x = 5 + 2 * r * cos(theta)
y = 10 + r * sin(theta)
points(x, y, type = "l")
arrows(5, 10 - 0.56, 5, 8.5, length = 0.15)
rect(4, 7.5, 6, 8.5)
arrows(5, 7.5, 5, 6.5, length = 0.15)
arrows(10, 8, 6, 8, length = 0.15)
r = 0.5
x = 2 + 2 * r * cos(theta)
y = 5.5 + r * sin(theta)
points(x, y, type = "l")
arrows(4, 5.5, 3, 5.5, length = 0.15)
segments(4, 5.5, 5, 6.5)
segments(4, 5.5, 5, 4.5)
segments(5, 6.5, 6, 5.5)
segments(5, 4.5, 6, 5.5)
arrows(5, 4.5, 5, 3.5, length = 0.15)
segments(5, 3.5, 4, 2.5)
segments(4, 2.5, 5, 1.5)
segments(5, 1.5, 6, 2.5)
segments(6, 2.5, 5, 3.5)
segments(2, 2.5, 4, 2.5)
arrows(2, 2.5, 2, 0.5, length = 0.15)
segments(5, 1.5, 5, 1.3)
arrows(5, 0.9, 5, 0.5, length = 0.15)
segments(6, 2.5, 8, 2.5)
arrows(8, 2.5, 8, 0.5, length = 0.15)
rect(1, -0.5, 3, 0.5)
rect(4, -0.5, 6, 0.5)
rect(7, -0.5, 9, 0.5)
segments(2, -0.5, 2, -1.25)
segments(5, -0.5, 5, -2)
segments(8, -0.5, 8, -1.25)
segments(2, -1.25, 8, -1.25)
segments(5, -2, 10, -2)
segments(10, 8, 10, -2)
text(5, 10, labels = "Start \n at the prespecified \n starting dose",
cex = 0.8)
text(5, 8, labels = "Treat a patient or a \n cohort of patients",
cex = 0.8)
text(2, 5.5, labels = "Stop the trial and \n select the MTD",
cex = 0.8)
text(3.5, 5.8, labels = "Yes", cex = 0.8)
text(5, 5.6, labels = "Reach \n the maximum \n sample size",
cex = 0.8)
text(5.2, 4.2, labels = "No", cex = 0.8)
text(3, 2.8, labels = expression("" <= ""), cex = 0.8)
text(3.4, 2.8, labels = lambda2, cex = 0.8)
text(7, 2.8, labels = expression("" > ""), cex = 0.8)
text(7.4, 2.8, labels = lambda1, cex = 0.8)
text(5, 2.4, labels = "Compute \n the DLT rate* \n at the current \n dose",
cex = 0.8)
text(5, 1.06, labels = paste("Within (", lambda2,
", ", lambda1, "]", sep = ""), cex = 0.8)
text(2, 0, labels = "Escalate the dose", cex = 0.8)
text(5, 0, labels = "Retain the current \n dose",
cex = 0.8)
text(8, 0, labels = "De-escalate the \n dose", cex = 0.8)
text(par("usr")[2]/2, -3, expression(paste("* DLT rate = ",
frac("Total number of patients who experienced DLT at the current dose",
"Total number of evaluable patients treated at the current dose"),
sep = "")), cex = 0.8, adj = c(0.5, NA))
}
if (!is.element(strpattern, c("none", names(objectPlot)))) {
warning("Please double check and specify the variable to be plotted...\n")
}
else {
#determine if flowchart is plotted, which argument is ignored if flowchart is plotted
if (!is.null(objectPlot$boundary_tab) | (!is.null(objectPlot$percentstop) &
strpattern == "flowchart")) {
get.flowchart()
}
else if (!is.null(objectPlot$lambda_e)) { #oc for single-agent trial is entered
get.flowchart()
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
dev.flush()
dev.hold()
par(mar = c(5, 6, 4, 2))
bplot = barplot(objectPlot$selpercent, ylab = "selection percentage (%)",
ylim = c(0, 100), cex.names = 1, xaxt = "n",
cex.lab = 1.3)
mtext("Selection percentage", 3, line = 0, cex = 1.3)
axis(1, at = bplot, labels = seq(1, length(objectPlot$selpercent)))
mtext("Dose level", 1, line = 2, cex = 1)
dev.flush()
dev.hold()
bplot = barplot(objectPlot$npatients, ylab = "Number of patients",
ylim = c(0, sum(objectPlot$npatients)), cex.names = 1,
beside = FALSE, xaxt = "n", cex.lab = 1.3)
axis(1, at = bplot, labels = seq(1, length(objectPlot$npatients)))
mtext("Patient allocation", 3, line = 0, cex = 1.3)
mtext("Dose level", 1, line = 2, cex = 1)
dev.flush()
dev.hold()
bplot = barplot(objectPlot$ntox, ylab = "Number of toxicities",
ylim = c(0, sum(objectPlot$ntox)), cex.names = 1,
beside = FALSE, xaxt = "n", cex.lab = 1.3)
axis(1, at = bplot, labels = seq(1, length(objectPlot$ntox)))
mtext("Observed toxicity", 3, line = 0, cex = 1.3)
mtext("Dose level", 1, line = 2, cex = 1)
}
else if (!is.null(objectPlot$pcs) | !is.null(objectPlot$pcs.contour)) {
if (is.null(objectPlot$pcs.contour)) { ##Diagram for combination trial when finding single-agent
J = nrow(objectPlot$p.true)
K = ncol(objectPlot$p.true)
xlab = "Drug B"
ylab = "Drug A"
if (J > K) {
S = J
J = K
K = S
xlab = "Drug A"
ylab = "Drug B"
}
xmax = K * 2 - 1
xmin = 1
ymax = 2 * J
ymin = 1
ptcex = 1.5
par(mar = c(5, 5, 2, 2))
plot(1:xmax, xlim = c(xmin, xmax), ylim = c(ymin,
ymax + 0.5), pch = "", axes = F, xlab = xlab,
ylab = ylab, cex.axis = 1, cex.lab = 1)
for (i in seq(1, xmax, by = 2)) for (j in seq(1,
ymax - 1, by = 2)) points(i, j, pch = 1,
cex = ptcex)
if (J > 2 & K > 2) {
arrows(3 + 0.1 * J/K, 3, 3 + 1, 3, col = 3,
length = 0.06, lty = 1, lwd = 2)
arrows(3 - 0.1 * J/K, 3, 3 - 1, 3, col = 2,
length = 0.06, lty = 1, lwd = 2)
arrows(3, 3 - 0.1 * J/K, 3, 3 - 1, col = 2,
length = 0.06, lty = 1, lwd = 2)
arrows(3, 3 + 0.1 * J/K, 3, 3 + 1, col = 3,
length = 0.06, lty = 1, lwd = 2)
points(3, 3, pch = 19, cex = ptcex)
}
if (J == 2 | K == 2) {
arrows(1 + 0.2 * J/K, 1, 1 + 1, 1, col = 3,
length = 0.06, lty = 1, lwd = 2)
arrows(1, 1 + 0.2 * J/K, 1, 1 + 1, col = 3,
length = 0.06, lty = 1, lwd = 2)
points(1, 1, pch = 19, cex = ptcex)
}
xx = quantile(1:xmax, c(1/4, 3/4))
text(x = xx[1] + 0.1 * ymax/(xmax + 1), y = ymax,
labels = "escalation", pos = 4)
arrows(xx[1] - 1, ymax, xx[1], ymax, col = 3,
length = 0.06, lty = 1, lwd = 2)
text(x = xx[2] + 0.1 * ymax/(xmax + 1), y = ymax,
labels = "de-escalation", pos = 4)
arrows(xx[2] - 1, ymax, xx[2], ymax, col = 2,
length = 0.06, lty = 1, lwd = 2)
}
else {##Waterfall design
J = nrow(objectPlot$p.true)
K = ncol(objectPlot$p.true)
xlab = "Drug B"
ylab = "Drug A"
if (J > K) {
S = J
J = K
K = S
xlab = "Drug A"
ylab = "Drug B"
}
xmax = K * 2
xmin = -0.8
ymax = J * (J + 1) + 2 * J
ymin = 1 - 0.5
ptcex = 1.5
par(mar = c(5, 5, 4, 2))
plot(1:xmax, xlim = c(xmin, xmax), ylim = c(ymin,
ymax + 0.5), pch = "", axes = F, xlab = xlab,
ylab = ylab, cex.axis = 1, cex.lab = 1)
active.rows = NULL
for (j in 1:(J + 1)) active.rows = c(active.rows,
1:J + (J + 2) * (j - 1))
for (i in seq(1, xmax, by = 2)) for (j in 1:ymax) if (is.element(j,
active.rows))
points(i, j, pch = 1, cex = ptcex)
text(-0.5, sort(seq(ymax - (J - 1)/2, (J +
1)/2, len = J + 1), decreasing = FALSE),
paste("(", letters[seq(J + 1, 1)], ")", sep = ""),
cex = 1)
segments(0.5, ymax + 0.5, xmax, ymax + 0.5)
segments(0.5, ymax - (J - 1) - 0.5, 0.5, ymax +
0.5)
segments(0.5, ymax - (J - 1) - 0.5, 1.5, ymax -
(J - 1) - 0.5)
segments(1.5, ymax - (J - 1) - 0.5, 1.5, ymax -
0.5)
segments(1.5, ymax - 0.5, xmax, ymax - 0.5)
segments(xmax, ymax - 0.5, xmax, ymax + 0.5)
mtds = NULL
crows = NULL
arrows(1, ymax - J + 1 + 0.15 * J/K, 1, ymax -
J + 1 + 0.6, col = 1, length = 0.06, lty = 1,
lwd = 2)
tmpx = sort(sample(seq(1, xmax - 2, 2), J -
1), decreasing = FALSE)
for (j in 1:(J - 1)) {
crow = ymax - J * j - 2 * j - j
crows = c(crows, crow)
rect(2, crow - 0.5, xmax, crow + 0.5)
points(tmpx[j], crow + 1, pch = 8, cex = ptcex)
mtds = rbind(mtds, c(tmpx[j], J - j + 1))
if (tmpx[j] + 2 < xmax - 1) {
arrows(tmpx[j] + 2 + 0.2, crow, tmpx[j] +
2 + 0.5, crow, col = 1, length = 0.08,
lty = 1, lwd = 2)
}
else {
arrows(tmpx[j] + 2 - 0.2, crow, tmpx[j] +
2 - 0.5, crow, col = 1, length = 0.08,
lty = 1, lwd = 2)
}
}
mtds = rbind(mtds, c(xmax - 1, 1))
for (j in 1:J) {
x = mtds[j, 1]
y = mtds[j, 2]
points(rep(x, y), y + (J + 2) * 0:(y - 1),
pch = 8, cex = ptcex)
}
}
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
dev.flush()
dev.hold()
par(mar = c(5, 6, 4, 2))
sel.comb=objectPlot$selpercent
rownames(sel.comb)=1:dim(sel.comb)[1]
colnames(sel.comb)=1:dim(sel.comb)[2]
barplot(sel.comb,beside=TRUE,ylab="Selection percentage (%)",
xlab="Drug B",ylim = c(0, 100),legend.text=rownames(sel.comb),
args.legend=list(title="Drug A",horiz=TRUE,x="top"))
mtext("Selection percentage", 3, line = 1, cex = 1.3)
dev.flush()
dev.hold()
npts.comb=objectPlot$npatients
rownames(npts.comb)=1:dim(npts.comb)[1]
colnames(npts.comb)=1:dim(npts.comb)[2]
barplot(npts.comb,beside=TRUE,ylab="Number of patients",
xlab="Drug B",ylim=c(0,sum(npts.comb,na.rm=TRUE)),
legend.text=rownames(npts.comb),
args.legend=list(title="Drug A",horiz=TRUE,x="top"))
mtext("Patient allocation", 3, line = 1, cex = 1.3)
dev.flush()
dev.hold()
ntox.comb=objectPlot$ntox
rownames(ntox.comb)=1:dim(ntox.comb)[1]
colnames(ntox.comb)=1:dim(ntox.comb)[2]
barplot(ntox.comb,beside=TRUE,ylab="Number of toxicities",
xlab="Drug B", ylim=c(0,sum(ntox.comb,na.rm=TRUE)),
legend.text=rownames(ntox.comb),
args.legend=list(title="Drug A",horiz=TRUE,x="top"))
mtext("Observed toxicity", 3, line = 1, cex = 1.3)
}
else if (!is.null(objectPlot$MTD)) {##select mtd
if (objectPlot$MTD[1] == 99) {
warning("All tested doses are overly toxic. No MTD is selected!\n")
}
else {
if (!is.null(objectPlot$p_est)) {
par(mfrow = c(1, 1), mar = c(5, 5, 4, 2))
if (length(objectPlot$MTD) >= 2) {
p_est.comb=objectPlot$p_est
rownames(p_est.comb)=1:dim(p_est.comb)[1]
colnames(p_est.comb)=1:dim(p_est.comb)[2]
barplot(p_est.comb,beside=TRUE,ylab="DLT rate",
ylim=c(0,round(max(p_est.comb,na.rm=TRUE)*1.5,1)),xlab="Drug B",legend.text=rownames(p_est.comb),
args.legend=list(title="Drug A",horiz=TRUE,x="top"))
}
else {
p_est = objectPlot$p_est
p_hat = p_est[, 2]
ci = p_est[, 3]
ci = gsub("[\\(\\)]", "", ci)
conf.intv = matrix(unlist(strsplit(ci, ",")),
byrow = TRUE, ncol = 2)
if (p_est[1, 2] == "----") {
warning("The trial is stopped since the lowest dose is too toxic.\n")
}
else {
numbs = ifelse(sum(p_hat == "----") ==
0, length(p_hat), min(which(p_hat ==
"----")) - 1)
numbs2 = length(p_hat)
phatx = as.numeric(as.character(p_hat[1:numbs]))
lwr = as.numeric(as.character(conf.intv[1:numbs,
1]))
upr = as.numeric(as.character(conf.intv[1:numbs,
2]))
par(mar = c(5, 5, 4, 2))
plot(1:numbs2, ylim = c(0, 1), xlab = "Dose level",
ylab = "DLT rate", pch = "", xaxt = "n",
cex.lab = 1.3)
axis(1, at = 1:numbs2, labels = 1:numbs2)
abline(h = objectPlot$target, lty = 2,
col = 2)
points(1:numbs, phatx, pch = 19)
arrows(x0 = 1:numbs, x1 = 1:numbs, y0 = lwr,
y1 = upr, code = 3, angle = 90, length = 0.1)
if (numbs < numbs2) {
points((numbs + 1):numbs2, seq(min(1,
max(phatx, na.rm = T) + 0.05), min(max(phatx,
na.rm = T) + 0.2, 1), length = numbs2 -
numbs), pch = "*", cex = 1.5)
legend("topleft", "* no patient treated")
}
}
}
}
else {
warning("Please set verbose=TRUE to get more details of the results.\n")
}
}
}
else {
warning("Please double check and specify the variable to be plotted...\n")
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BOIN/R/plot.boin.R
|
#'
#' Generate descriptive summary for objects returned by other functions
#'
#' Generate descriptive summary for objects returned by other functions.
#'
#' @param x the object returned by other functions
#' @param ... ignored arguments
#'
#'
#' @details \code{print()} prints the objects returned by other functions.
#'
#' @return \code{print()} prints the objects returned by other functions.
#'
#' @author Suyu Liu, Liangcai Zhang, Yanhong Zhou, and Ying Yuan
#'
#' @examples
#'
#' ###### single-agent trial ######
#'
#' ## sprint the object returned by get.boundary()
#' bound <- get.boundary(target=0.3, ncohort=10, cohortsize=3)
#' print(bound)
#'
#'
#' ## print the object returned by get.oc()
#' oc.single <- get.oc(target=0.3, p.true=c(0.05, 0.15, 0.3, 0.45, 0.6), ncohort=10,
#' cohortsize=3, ntrial=1000)
#' print(oc.single)
#'
#'
#' ## print the object returned by select.mtd()
#' n <- c(3, 3, 15, 9, 0)
#' y <- c(0, 0, 4, 4, 0)
#' selmtd <- select.mtd(target=0.3, npts=n, ntox=y)
#' print(selmtd)
#'
#'
#' ###### drug-combination trial######
#'
#' ###### drug-combiation trial to find a single MTD ######
#'
#' ## print the object returned by next.comb()
#' n <- matrix(c(3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ncol=4, byrow=TRUE)
#' y <- matrix(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ncol=4, byrow=TRUE)
#' nxt.comb <- next.comb(target=0.25, npts=n, ntox=y, dose.curr=c(1, 1))
#' print(nxt.comb)
#'
#'
#' ## print the object returned by next.comb()
#' n <- matrix(c(3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ncol=4, byrow=TRUE)
#' y <- matrix(c(0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ncol=4, byrow=TRUE)
#' nxt.comb <- next.comb(target=0.25, npts=n, ntox=y, dose.curr=c(1, 2))
#' print(nxt.comb)
#'
#'
#' ## print the object returned by get.oc.comb() when mtd.contour=FALSE
#' p.true <- matrix(c(0.02,0.04,0.08,0.14,
#' 0.08,0.25,0.42,0.48,
#' 0.25,0.45,0.50,0.60), byrow=TRUE, ncol=4)
#'
#' oc.comb <- get.oc.comb(target=0.25, p.true=p.true, ncohort=16, cohortsize=3,
#' ntrial=100)
#' print(oc.comb)
#'
#'
#' ## print the object returned by select.mtd.comb()
#' n <- matrix(c(6, 3, 0, 0, 6, 24, 9, 0, 0, 0, 0, 0), ncol=4, byrow=TRUE)
#' y <- matrix(c(0, 0, 0, 0, 1, 5, 4, 0, 0, 0, 0, 0), ncol=4, byrow=TRUE)
#' sel.comb <- select.mtd.comb(target=0.25, npts=n, ntox=y)
#' print(sel.comb)
#'
#'
#'
#' ###### drug-combiation trial to find the MTD contour ######
#'
#' ## print the object returned by next.subtrial()
#' n <- matrix(c(6, 0, 0, 0,
#' 6, 0, 0, 0,
#' 9, 12, 0, 0), ncol=4, byrow=TRUE)
#' y <- matrix(c(0, 0, 0, 0,
#' 1, 0, 0, 0,
#' 2, 3, 0, 0), ncol=4, byrow=TRUE)
#' nxt.trial <- next.subtrial(target=0.3, npts=n, ntox=y)
#' print(nxt.trial)
#'
#'
#' ## print the object returned by get.oc.comb() when mtd.contour=TRUE.
#' p.true <- matrix(c(0.01,0.03,0.10,0.20,0.30,
#' 0.03,0.05,0.15,0.30,0.60,
#' 0.08,0.10,0.30,0.60,0.75), byrow=TRUE, ncol=5)
#'
#' oc.comb <- get.oc.comb(target=0.3, p.true, ncohort=c(10,5,5), cohortsize=3,
#' n.earlystop=12, startdose=c(1,1),ntrial=100, mtd.contour=TRUE)
#' print(oc.comb)
#'
#'
#' ## print the object returned by select.mtd.comb()
#' n <- matrix(c(6, 9, 24, 0,
#' 6, 24, 9, 0,
#' 12, 18, 0, 0), ncol=4, byrow=TRUE)
#' y <- matrix(c(0, 1, 5, 0,
#' 1, 5, 4, 0,
#' 1, 5, 0, 0), ncol=4, byrow=TRUE)
#' sel.comb2 <- select.mtd.comb(target=0.3, npts=n, ntox=y, mtd.contour=TRUE)
#' print(sel.comb2)
#'
#'
#'
#' @export
print.boin<-function(x,...){
print.default(x)
}
|
/scratch/gouwar.j/cran-all/cranData/BOIN/R/print.boin.R
|
#'
#' Select the maximum tolerated dose (MTD) for single agent trials
#'
#' Select the maximum tolerated dose (MTD) when the single-agent trial is completed
#'
#' @usage select.mtd(target, npts, ntox, cutoff.eli=0.95, extrasafe=FALSE, offset=0.05,
#' boundMTD=FALSE,p.tox=1.4*target)
#'
#' @param target the target DLT rate
#' @param npts a vector containing the number of patients treated at each dose level
#' @param ntox a vector containing the number of patients who experienced dose-limiting
#' toxicity at each dose level
#' @param cutoff.eli the cutoff to eliminate overly toxic doses for safety. We recommend
#' the default value of (\code{cutoff.eli=0.95}) for general use.
#' @param extrasafe set \code{extrasafe=TRUE} to impose a more strict stopping rule for
#' extra safety
#' @param offset a small positive number (between \code{0} and \code{0.5}) to control how strict the
#' stopping rule is when \code{extrasafe=TRUE}. A larger value leads to
#' a more strict stopping rule. The default value \code{offset=0.05}
#' generally works well.
#' @param boundMTD set \code{boundMTD=TRUE} to impose the condition: the isotonic estimate of toxicity
#' probability for the selected MTD must be less than de-escalation boundary.
#' @param p.tox the lowest toxicity probability that is deemed overly toxic such
#' that deescalation is required. The default value is
#' \code{p.tox=1.4*target}.
#'
#' @details \code{select.mtd()} selects the MTD based on isotonic estimates of toxicity
#' probabilities. \code{select.mtd()} selects as the MTD dose \eqn{j^*}, for which the
#' isotonic estimate of the DLT rate is closest to the target. If there
#' are ties, we select from the ties the highest dose level when the estimate
#' of the DLT rate is smaller than the target, or the lowest dose level
#' when the estimate of the DLT rate is greater than the target. The
#' isotonic estimates are obtained by the pooled-adjacent-violators algorithm
#' (PAVA) (Barlow, 1972).
#'
#' @return \code{select.mtd()} returns (1) target toxicity probability (\code{$target}), (2) selected MTD (\code{$MTD}),
#' (3) isotonic estimate of the DLT probablity at each dose and associated \eqn{95\%} credible interval (\code{$p_est}),
#' and (4) the probability of overdosing defined as \eqn{Pr(toxicity>\code{target}|data)} (\code{$p_overdose})
#'
#'
#' @note The MTD selection and dose escalation/deescalation rule are two independent
#' components of the trial design. When appropriate, another dose selection
#' procedure (e.g., based on a fitted logistic model) can be used to select
#' the MTD after the completion of the trial using the BOIN design.
#'
#'
#' @author Suyu Liu, Yanhong Zhou, and Ying Yuan
#'
#' @references Liu S. and Yuan, Y. (2015). Bayesian Optimal Interval Designs for
#' Phase I Clinical Trials, \emph{Journal of the Royal Statistical Society:
#' Series C}, 64, 507-523.
#'
#' Yan, F., Zhang, L., Zhou, Y., Pan, H., Liu, S. and Yuan, Y. (2020).BOIN: An R Package
#' for Designing Single-Agent and Drug-Combination Dose-Finding Trials Using Bayesian Optimal
#' Interval Designs. \emph{Journal of Statistical Software}, 94(13),1-32.<doi:10.18637/jss.v094.i13>.
#'
#'
#' Yuan Y., Hess K.R., Hilsenbeck S.G. and Gilbert M.R. (2016). Bayesian Optimal Interval Design: A
#' Simple and Well-performing Design for Phase I Oncology Trials, \emph{Clinical Cancer Research}, 22, 4291-4301.
#'
#'
#' @seealso Tutorial: \url{http://odin.mdacc.tmc.edu/~yyuan/Software/BOIN/BOIN2.6_tutorial.pdf}
#'
#' Paper: \url{http://odin.mdacc.tmc.edu/~yyuan/Software/BOIN/paper.pdf}
#'
#' @examples
#'
#' ### select the MTD for BOIN single agent trial
#' n <- c(3, 3, 15, 9, 0)
#' y <- c(0, 0, 4, 4, 0)
#' selmtd <- select.mtd(target=0.3, npts=n, ntox=y)
#' summary(selmtd)
#' plot(selmtd)
#'
#' @export
select.mtd <- function (target, npts, ntox, cutoff.eli = 0.95, extrasafe = FALSE,
offset = 0.05, boundMTD = FALSE, p.tox=1.4*target)
{
pava <- function(x, wt = rep(1, length(x))) {
n <- length(x)
if (n <= 1)
return(x)
if (any(is.na(x)) || any(is.na(wt))) {
stop("Missing values in 'x' or 'wt' not allowed")
}
lvlsets <- (1:n)
repeat {
viol <- (as.vector(diff(x)) < 0)
if (!(any(viol)))
break
i <- min((1:(n - 1))[viol])
lvl1 <- lvlsets[i]
lvl2 <- lvlsets[i + 1]
ilvl <- (lvlsets == lvl1 | lvlsets == lvl2)
x[ilvl] <- sum(x[ilvl] * wt[ilvl])/sum(wt[ilvl])
lvlsets[ilvl] <- lvl1
}
x
}
lambda_d = log((1 - target)/(1 - p.tox))/log(p.tox * (1 -target)/(target * (1 - p.tox)))
y = ntox
n = npts
ndose = length(n)
elimi = rep(0, ndose)
for (i in 1:ndose) {
if (n[i] >= 3) {
if (1 - pbeta(target, y[i] + 1, n[i] - y[i] + 1) >
cutoff.eli) {
elimi[i:ndose] = 1
break
}
}
}
if (extrasafe) {
if (n[1] >= 3) {
if (1 - pbeta(target, y[1] + 1, n[1] - y[1] + 1) >
cutoff.eli - offset) {
elimi[1:ndose] = 1
}
}
}
if (elimi[1] == 1 || sum(n[elimi == 0]) == 0) {
selectdose = 99
}
else {
adm.set = (n != 0) & (elimi == 0)
adm.index = which(adm.set == T)
y.adm = y[adm.set]
n.adm = n[adm.set]
phat = (y.adm + 0.05)/(n.adm + 0.1)
phat.var = (y.adm + 0.05) * (n.adm - y.adm + 0.05)/((n.adm +
0.1)^2 * (n.adm + 0.1 + 1))
phat = pava(phat, wt = 1/phat.var)
phat = phat + (1:length(phat)) * 1e-10
if(boundMTD){
if(all(phat>lambda_d)){selectdose=99}else{
phat=phat[phat<=lambda_d]
selectd = sort(abs(phat - target), index.return = T)$ix[1]
selectdose = adm.index[selectd]
}
}else{
selectd = sort(abs(phat - target), index.return = T)$ix[1]
selectdose = adm.index[selectd]
}
}
trtd = (n != 0)
poverdose = pava(1 - pbeta(target, y[trtd] + 0.05, n[trtd] -
y[trtd] + 0.05))
phat.all = pava((y[trtd] + 0.05)/(n[trtd] + 0.1), wt = 1/((y[trtd] +
0.05) * (n[trtd] - y[trtd] + 0.05)/((n[trtd] + 0.1)^2 *
(n[trtd] + 0.1 + 1))))
lowerCIs=pava(qbeta(0.025, y[trtd] + 0.05,n[trtd] - y[trtd] + 0.05),wt = 1/((y[trtd] +
0.05) * (n[trtd] - y[trtd] + 0.05)/((n[trtd] + 0.1)^2 *
(n[trtd] + 0.1 + 1))))
upperCIs=pava(qbeta(0.975, y[trtd] + 0.05,n[trtd] - y[trtd] + 0.05),wt = 1/((y[trtd] +
0.05) * (n[trtd] - y[trtd] + 0.05)/((n[trtd] + 0.1)^2 *
(n[trtd] + 0.1 + 1))))
A1 = A2 = A3 = A4 = NULL
k = 1
for (i in 1:ndose) {
if (n[i] > 0) {
A1 = append(A1, formatC(phat.all[k], digits = 2,
format = "f"))
A2 = append(A2, formatC(lowerCIs[i], digits = 2, format = "f"))
A3 = append(A3, formatC(upperCIs[i], digits = 2, format = "f"))
A4 = append(A4, formatC(poverdose[k], digits = 2,
format = "f"))
k = k + 1
}
else {
A1 = append(A1, "----")
A2 = append(A2, "----")
A3 = append(A3, "----")
A4 = append(A4, "----")
}
}
p_est = data.frame(cbind(dose = 1:length(npts), phat = A1,
CI = paste("(", A2, ",", A3, ")", sep = "")))
out = list(target = target, MTD = selectdose, p_est = p_est,
p_overdose = A4)
class(out)<-"boin"
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BOIN/R/select.mtd.R
|
#'
#' Select the maximum tolerated dose (MTD) or MTD contour for drug combination trials
#'
#' Select the maximum tolerated dose (MTD) or MTD contour after the drug combination trial is
#' completed using the BOIN design or waterfall design
#'
#'
#' @param target the target DLT rate
#' @param npts a \code{J*K} matrix \code{(J<=K)} containing the number of patients treated at each dose combination
#' @param ntox a \code{J*K} matrix \code{(J<=K)} containing the number of patients experienced
#' dose-limiting toxicity at each dose combination
#' @param cutoff.eli the cutoff to eliminate an overly toxic dose for safety.
#' We recommend the default value of (\code{cutoff.eli=0.95})
#' for general use.
#' @param extrasafe set \code{extrasafe=TRUE} to impose a more strict stopping
#' rule for extra safety
#' @param offset a small positive number (between \code{0} and \code{0.5}) to control how
#' strict the stopping rule is when \code{extrasafe=TRUE}. A
#' larger value leads to a more strict stopping rule. The
#' default value \code{offset=0.05} generally works well.
#' @param boundMTD set \code{boundMTD=TRUE} to impose the condition: the isotonic estimate of toxicity
#' probability for the selected MTD must be less than de-escalation boundary.
#' @param p.tox the lowest toxicity probability that is deemed overly toxic such
#' that deescalation is required. The default value is
#' \code{p.tox=1.4*target}.
#' @param mtd.contour set \code{mtd.contour=TRUE} to select the MTD contour,
#' otherwise select a single MTD. The value of \code{mtd.contour}
#' should be consistent with that in \code{get.oc.comb()}.
#'
#'
#' @return \code{select.mtd.comb()} returns returns (1) target toxicity probability (\code{$target}),
#' (2) selected MTD or MTD contour (\code{$MTD}),
#' (3) isotonic estimate of the DLT probablity at each dose (\code{$p_est}).
#'
#'
#' @details \code{select.mtd.comb()} selects a MTD or the MTD contour based
#' on matrix isotonic estimates of toxicity probabilities, depending on
#' \code{mtd.contour} is set as \code{TRUE} or \code{FALSE}. The (matrix)
#' isotonic estimates are obtained by the R package (Iso::biviso).
#'
#' @note The MTD selection and dose escalation/deescalation rule are two independent
#' components of the trial design. When appropriate, another dose selection
#' procedure (e.g., based on a fitted logistic model) can be used to select
#' the MTD after the completion of the trial using the BOIN or waterfall design.
#'
#' @author Suyu Liu, Liangcai Zhang, Yanhong Zhou, and Ying Yuan
#'
#' @references Liu S. and Yuan, Y. (2015). Bayesian Optimal Interval Designs for Phase I Clinical
#' Trials, \emph{Journal of the Royal Statistical Society: Series C}, 64, 507-523.
#'
#' Lin R. and Yin, G. (2017). Bayesian Optimal Interval Designs for Dose Finding in
#' Drug-combination Trials, \emph{Statistical Methods in Medical Research}, 26, 2155-2167.
#'
#' Yan, F., Zhang, L., Zhou, Y., Pan, H., Liu, S. and Yuan, Y. (2020).BOIN: An R Package
#' for Designing Single-Agent and Drug-Combination Dose-Finding Trials Using Bayesian Optimal
#' Interval Designs. \emph{Journal of Statistical Software}, 94(13),1-32.<doi:10.18637/jss.v094.i13>.
#'
#'
#' Zhang L. and Yuan, Y. (2016). A Simple Bayesian Design to Identify the Maximum
#' Tolerated Dose Contour for Drug Combination Trials, \emph{Statistics in Medicine}, 35, 4924-4936.
#'
#' @seealso Tutorial: \url{http://odin.mdacc.tmc.edu/~yyuan/Software/BOIN/BOIN2.6_tutorial.pdf}
#'
#' Paper: \url{http://odin.mdacc.tmc.edu/~yyuan/Software/BOIN/paper.pdf}
#'
#' @examples
#'
#' ### drug-combination trial to find a single MTD
#'
#' ## Select the MTD based on the data from a 3x5 combination trial
#' ## matrix n contains the number of patients treated at each dose combination
#' ## matrix y contains the number of patients experienced toxicity at each dose combination
#' n <- matrix(c(3, 5, 0, 0, 0, 7, 6, 15, 0, 0, 0, 0, 4, 0, 0), ncol=5, byrow=TRUE)
#' y <- matrix(c(0, 1, 0, 0, 0, 1, 1, 4, 0, 0, 0, 0, 2, 0, 0), ncol=5, byrow=TRUE)
#' sel.comb <- select.mtd.comb(target=0.3, npts=n, ntox=y)
#' summary(sel.comb)
#' plot(sel.comb)
#'
#'
#' ### drug-combination trial to find the MTD contour
#'
#' ## Select the MTD contour based on the data from a 3x4 combination trial
#' ## matrix n contains the number of patients treated at each dose combination
#' ## matrix y contains the number of patients experienced toxicity at each dose combination
#' n <- matrix(c(6, 9, 24, 0, 6, 24, 9, 0, 12, 18, 0, 0), ncol=4, byrow=TRUE)
#' y <- matrix(c(0, 1, 5, 0, 1, 5, 4, 0, 1, 5, 0, 0), ncol=4, byrow=TRUE)
#' sel.comb2 <- select.mtd.comb(target=0.3, npts=n, ntox=y, mtd.contour=TRUE)
#' summary(sel.comb2)
#' plot(sel.comb2)
#'
#' @export
select.mtd.comb <- function (target, npts, ntox, cutoff.eli = 0.95, extrasafe = FALSE,
offset = 0.05, boundMTD=FALSE, p.tox=1.4*target,mtd.contour = FALSE)
{
lambda_d = log((1 - target)/(1 - p.tox))/log(p.tox * (1 -target)/(target * (1 - p.tox)))
y = ntox
n = npts
if (nrow(n) > ncol(n) | nrow(y) > ncol(y)) {
stop("npts and ntox should be arranged in a way (i.e., rotated) such that for each of them, the number of rows is less than or equal to the number of columns.")
}
elimi = matrix(0, dim(n)[1], dim(n)[2])
if (extrasafe) {
if (n[1, 1] >= 3) {
if (1 - pbeta(target, y[1, 1] + 1, n[1, 1] - y[1,
1] + 1) > cutoff.eli - offset) {
elimi[, ] = 1
}
}
}
for (i in 1:dim(n)[1]) {
for (j in 1:dim(n)[2]) {
if (n[i, j] >= 3) {
if (1 - pbeta(target, y[i, j] + 1, n[i, j] -
y[i, j] + 1) > cutoff.eli) {
elimi[i:dim(n)[1], j] = 1
elimi[i, j:dim(n)[2]] = 1
break
}
}
}
}
selectdose=NULL
if (elimi[1] == 1) {
selectdose = c(99, 99)
selectdoses = matrix(selectdose, nrow = 1)
}else {
phat = (y + 0.05)/(n + 0.1)
phat = round(Iso::biviso(phat, n + 0.1, warn = TRUE)[, ],2)
# phat.out = phat
lower.mat=qbeta(0.025,y+0.05,n-y+0.05)
lower.mat=round(Iso::biviso(lower.mat),2)
upper.mat=qbeta(0.975,y+0.05,n-y+0.05)
upper.mat=round(Iso::biviso(upper.mat),2)
phat.out<-matrix(paste0(format(phat,digits=1),"(",lower.mat,", ",upper.mat,")"),byrow=FALSE,nrow=dim(phat)[1])
colnames(phat.out)=paste0("B",1:dim(n)[2])
rownames(phat.out)=paste0("A",1:dim(n)[1])
phat.out.noCI=round(phat,2)
phat.out[n == 0] = "NA"
phat[elimi == 1] = 1.1
phat = phat * (n != 0) + (1e-05) * (matrix(rep(1:dim(n)[1],
each = dim(n)[2], len = length(n)), dim(n)[1], byrow = T) +
matrix(rep(1:dim(n)[2], each = dim(n)[1], len = length(n)),
dim(n)[1]))
if(boundMTD){
if(all(phat[n!=0]>lambda_d)){
selectdose = c(99, 99)
selectdoses = matrix(selectdose, nrow = 1)
}else{
phat[phat>lambda_d]=10}}
if(is.null(selectdose)){
phat[n == 0] = 10
selectdose = which(abs(phat - target) == min(abs(phat -
target)), arr.ind = TRUE)
if (length(selectdose) > 2)
selectdose = selectdose[1, ]
aa = function(x) as.numeric(as.character(x))
if (mtd.contour == TRUE) {
selectdoses = cbind(row = 1:dim(n)[1], col = rep(99,
dim(n)[1]))
for (k in dim(n)[1]:1) {
kn = n[k, ]
ky = y[k, ]
kelimi = elimi[k, ]
kphat = phat[k, ]
if (kelimi[1] == 1 || sum(n[kelimi == 0]) ==
0) {
kseldose = 99
}else {
adm.set = (kn != 0) & (kelimi == 0)
adm.index = which(adm.set == T)
y.adm = ky[adm.set]
n.adm = kn[adm.set]
selectd = sort(abs(kphat[adm.set] - target),
index.return = T)$ix[1]
kseldose = adm.index[selectd]
}
selectdoses[k, 2] = ifelse(is.na(kseldose), 99,
kseldose)
if (k < dim(n)[1])
if (selectdoses[k + 1, 2] == dim(n)[2])
selectdoses[k, 2] = dim(n)[2]
if (k < dim(n)[1])
if (aa(selectdoses[k + 1, 2]) == dim(n)[2] &
aa(selectdoses[k + 1, 2]) == aa(selectdoses[k,
2]))
selectdoses[k, 2] = 99
}
}else {
selectdoses = matrix(99, nrow = 1, ncol = 2)
selectdoses[1, ] = matrix(selectdose, nrow = 1)
}
selectdoses = matrix(selectdoses[selectdoses[, 2] !=
99, ], ncol = 2)
}
colnames(selectdoses) = c("DoseA", "DoseB")
}
if (mtd.contour == FALSE) {
if (selectdoses[1, 1] == 99 && selectdoses[1, 2] == 99) {
cat("All tested doses are overly toxic. No MTD is selected! \n")}
# out=list(target = target, MTD = 99, p_est = matrix(NA,nrow = dim(npts)[1], ncol = dim(npts)[2]))
# }
# else {
out=list(target = target, MTD = selectdoses, p_est=phat.out.noCI,p_est_CI = phat.out)
# }
class(out)<-"boin"
return(out)
}
else {
if (length(selectdoses) == 0) {
cat("All tested doses are overly toxic. No MTD is selected! \n")
out=list(target = target, MTD = 99, p_est = matrix(NA,nrow = dim(npts)[1], ncol = dim(npts)[2]))
}
else {
out=list(target = target, MTD = selectdoses, p_est=phat.out.noCI,p_est_CI = phat.out)
}
class(out)<-"boin"
return(out)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BOIN/R/select.mtd.comb.R
|
#'
#' Generate descriptive summary for objects returned by other functions
#'
#' Generate descriptive summary for objects returned by other functions.
#'
#' @param object the object returned by other functions.
#' @param ... ignored arguments
#'
#'
#' @details \code{summary()} prints the objects returned by other functions.
#'
#' @return \code{summary()} prints the objects returned by other functions.
#'
#' @author Suyu Liu, Liangcai Zhang and Ying Yuan
#'
#' @examples
#'
#' ###### single-agent trial ######
#'
#' ## summarize the object returned by get.boundary()
#' bound <- get.boundary(target=0.3, ncohort=10, cohortsize=3)
#' summary(bound)
#'
#'
#' ## summarize the object returned by get.oc()
#' oc.single <- get.oc(target=0.3, p.true=c(0.05, 0.15, 0.3, 0.45, 0.6), ncohort=10,
#' cohortsize=3, ntrial=1000)
#' summary(oc.single)
#'
#'
#' ## summarize the object returned by select.mtd()
#' n <- c(3, 3, 15, 9, 0)
#' y <- c(0, 0, 4, 4, 0)
#' selmtd <- select.mtd(target=0.3, npts=n, ntox=y)
#' summary(selmtd)
#'
#'
#' ###### drug-combination trial######
#'
#' ###### drug-combiation trial to find a single MTD ######
#'
#' ## summarize the object returned by next.comb()
#' n <- matrix(c(3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ncol=4, byrow=TRUE)
#' y <- matrix(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ncol=4, byrow=TRUE)
#' nxt.comb <- next.comb(target=0.25, npts=n, ntox=y, dose.curr=c(1, 1))
#' summary(nxt.comb)
#'
#'
#' ## summarize the object returned by next.comb()
#' n <- matrix(c(3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ncol=4, byrow=TRUE)
#' y <- matrix(c(0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ncol=4, byrow=TRUE)
#' nxt.comb <- next.comb(target=0.25, npts=n, ntox=y, dose.curr=c(1, 2))
#' summary(nxt.comb)
#'
#'
#' ## summarize the object returned by get.oc.comb() when mtd.contour=FALSE
#' p.true <- matrix(c(0.02,0.04,0.08,0.14,
#' 0.08,0.25,0.42,0.48,
#' 0.25,0.45,0.50,0.60), byrow=TRUE, ncol=4)
#'
#' oc.comb <- get.oc.comb(target=0.25, p.true=p.true, ncohort=16, cohortsize=3,
#' ntrial=100)
#' summary(oc.comb)
#'
#'
#' ## summarize the object returned by select.mtd.comb()
#' n <- matrix(c(6, 3, 0, 0, 6, 24, 9, 0, 0, 0, 0, 0), ncol=4, byrow=TRUE)
#' y <- matrix(c(0, 0, 0, 0, 1, 5, 4, 0, 0, 0, 0, 0), ncol=4, byrow=TRUE)
#' sel.comb <- select.mtd.comb(target=0.25, npts=n, ntox=y)
#' summary(sel.comb)
#'
#'
#'
#' ###### drug-combiation trial to find the MTD contour ######
#'
#' ## summarize the object returned by next.subtrial()
#' n <- matrix(c(6, 0, 0, 0,
#' 6, 0, 0, 0,
#' 9, 12, 0, 0), ncol=4, byrow=TRUE)
#' y <- matrix(c(0, 0, 0, 0,
#' 1, 0, 0, 0,
#' 2, 3, 0, 0), ncol=4, byrow=TRUE)
#' nxt.trial <- next.subtrial(target=0.3, npts=n, ntox=y)
#' summary(nxt.trial)
#'
#'
#' ## summarize the object returned by get.oc.comb() when mtd.contour=TRUE.
#' p.true <- matrix(c(0.01,0.03,0.10,0.20,0.30,
#' 0.03,0.05,0.15,0.30,0.60,
#' 0.08,0.10,0.30,0.60,0.75), byrow=TRUE, ncol=5)
#'
#' oc.comb <- get.oc.comb(target=0.3, p.true, ncohort=c(10,5,5), cohortsize=3,
#' n.earlystop=12, startdose=c(1,1),ntrial=100, mtd.contour=TRUE)
#' summary(oc.comb)
#'
#'
#' ## summarize the object returned by select.mtd.comb()
#' n <- matrix(c(6, 9, 24, 0,
#' 6, 24, 9, 0,
#' 12, 18, 0, 0), ncol=4, byrow=TRUE)
#' y <- matrix(c(0, 1, 5, 0,
#' 1, 5, 4, 0,
#' 1, 5, 0, 0), ncol=4, byrow=TRUE)
#' sel.comb2 <- select.mtd.comb(target=0.3, npts=n, ntox=y, mtd.contour=TRUE)
#' summary(sel.comb2)
#'
#'
#'
#' @export
summary.boin<- function (object, ...)
{
if (!is.null(object$boundary_tab)) {
if (!is.na(object$lambda_e))
cat("Escalate dose if the observed DLT rate at the current dose <= ",
object$lambda_e, "\n")
if (!is.na(object$lambda_d))
cat("Deescalate dose if the observed DLT rate at the current dose > ",
object$lambda_d, "\n\n")
if (!is.null(object$boundary_tab)) {
cat("This is equivalent to the following decision boundaries\n")
print(object$boundary_tab)
}
if (!is.null(object$full_boundary_tab)) {
cat("\n")
cat("A more completed version of the decision boundaries is given by\n")
print(object$full_boundary_tab)
}
if (!is.null(object$stop_boundary)) {
cat("\n")
cat("In addition to the default stopping rule (i.e., stop the trial if the lowest dose is eliminated), \n")
cat("the following more strict stopping safety rule will be used for extra safety: \n")
cat(" stop the trial if (1) the number of patients treated at the lowest dose >= 3 AND",
"\n", "(2) Pr(the DLT rate of the lowest dose >",
object$target, "| data) > ", object$cutoff, ",\n",
"which corresponds to the following stopping boundaries:\n")
print(object$stop_boundary)
}
else {
cat("\n")
cat("Default stopping rule: stop the trial if the lowest dose is eliminated.\n")
}
}
if (!is.null(object$next_subtrial)) {
if (is.na(object$next_subtrial) == TRUE) {
cat("No additional next subtrials are needed!!\n")
}
else {
cat("Next subtrial includes doses: ", "\n")
cat("\t\t", object$next_subtrial, "\n\n")
cat("The starting dose for this subtrial is:\n",
"\t\t", paste("(", object$starting_dose[1], ", ",
object$starting_dose[2], ")", sep = ""), "\n")
}
}
if (!is.null(object$next_dc)) {
if (is.na(object$next_dc[1]) == TRUE) {
cat("The trial experienced an early stopping.")
}
else {
cat("The recommended dose combination for the next cohort of patients is (",
object$next_dc[1], ", ", object$next_dc[2], ").",
"\n")
}
}
if (!is.null(object$MTD)) {
if (length(object$MTD) == 1) {
if (object$MTD == 99) {
cat("All tested doses are overly toxic. No MTD should be selected! \n\n")
}
else {
cat("The MTD is dose level ", object$MTD, "\n\n")
}
cat("Dose Posterior DLT 95% \n",
sep = "")
cat("Level Estimate Credible Interval Pr(toxicity>",
object$target, "|data)\n", sep = "")
for (i in 1:nrow(object$p_est)) {
cat(" ", i, " ", as.character(object$p_est[i,
2]), " ", as.character(object$p_est[i,
3]), " ", as.character(object$p_overdose[i]),
"\n")
}
cat("NOTE: no estimate is provided for the doses at which no patient was treated.\n")
}
if (length(object$MTD) >= 2) {
if (length(object$MTD) == 2) {
if (object$MTD[1, 1] == 99 && object$MTD[1, 2] ==
99) {
cat("All tested doses are overly toxic. No MTD is selected! \n")
}
else cat("The MTD is dose combination (", object$MTD[1,
1], ", ", object$MTD[1, 2], ") \n\n")
}
else {
if (length(object$MTD) == 0) {
cat("All tested doses are overly toxic. No MTD is selected! \n")
}
else {
cat("The MTD contour includes dose combinations ",
paste("(", object$MTD[, 1], ", ", object$MTD[,
2], ")", sep = ""), "\n\n")
}
}
cat("Isotonic estimates of toxicity probabilities and 95% confidence intervals for combinations are \n")
# for (i in 1:dim(object$p_est_CI)[1]) {
# cat(formatC(object$p_est_CI[i, ], digits = 2, format = "f",
# width = 5), sep = " ", "\n")
# }
print(noquote(object$p_est_CI))
cat("\n")
cat("NOTE: no estimate is provided for the doses at which no patient was treated.\n\n")
}
}
if (!is.null(object$percentstop)) {
if (!is.null(object$overdose60)) {
cat("selection percentage at each dose level (%):\n")
cat(formatC(object$selpercent, digits = 1, format = "f"),
sep = " ", "\n")
cat("average number of patients treated at each dose level:\n")
cat(formatC(object$npatients, digits = 1, format = "f"),
sep = " ", "\n")
cat("average number of toxicity observed at each dose level:\n")
cat(formatC(object$ntox, digits = 1, format = "f"),
sep = " ", "\n")
cat("average number of toxicities:", formatC(object$totaltox,
digits = 1, format = "f"), "\n")
cat("average number of patients:", formatC(object$totaln,
digits = 1, format = "f"), "\n")
cat("percentage of early stopping due to toxicity:",
formatC(object$percentstop, digits = 1, format = "f"),
"% \n")
cat("risk of overdosing (>60% of patients treated above the MTD):",
formatC(object$overdose60, digits = 1, format = "f"),
"% \n")
cat("risk of overdosing (>80% of patients treated above the MTD):",
formatC(object$overdose80, digits = 1, format = "f"),
"% \n")
}
else {
cat("selection percentage at each dose level (%):\n")
cat(formatC(object$selpercent, digits = 1, format = "f"),
sep = " ", "\n")
cat("average number of patients treated at each dose level:\n")
cat(formatC(object$npatients, digits = 1, format = "f"),
sep = " ", "\n")
cat("average number of toxicity observed at each dose level:\n")
cat(formatC(object$ntox, digits = 1, format = "f"),
sep = " ", "\n")
cat("average number of toxicities:", formatC(object$totaltox,
digits = 1, format = "f"), "\n")
cat("average number of patients:", formatC(object$totaln,
digits = 1, format = "f"), "\n")
cat("percentage of early stopping due to toxicity:",
formatC(object$percentstop, digits = 1, format = "f"),
"% \n")
}
}
if (!is.null(object$npercent) | !is.null(object$npercent.contour)) {
if (!is.null(object$npercent.contour)) {
cat("true DLT rate of dose combinations:\n")
for (i in 1:dim(object$p.true)[1]) cat(formatC(object$p.true[i,
], digits = 2, format = "f", width = 5), sep = " ",
"\n")
cat("\n")
cat("selection percentage at each dose combination (%):\n")
for (i in 1:dim(object$p.true)[1]) cat(formatC(object$selpercent[i,
], digits = 2, format = "f", width = 5), sep = " ",
"\n")
cat("\n")
cat("average number of patients treated at each dose combination:\n")
for (i in 1:dim(object$p.true)[1]) cat(formatC(object$npatients[i,
], digits = 2, format = "f", width = 5), sep = " ",
"\n")
cat("\n")
cat("average number of toxicity observed at each dose combination:\n")
for (i in 1:dim(object$p.true)[1]) cat(formatC(object$ntox[i,
], digits = 2, format = "f", width = 5), sep = " ",
"\n")
cat("\n")
cat("average number of toxicities:", formatC(object$totaltox,
digits = 1, format = "f"), "\n")
cat("average number of patients:", formatC(object$totaln,
digits = 1, format = "f"), "\n")
cat("percentage of patients treated at MTD contour:",
object$npercent.contour, "\n")
cat("percentage of patients treated above MTD contour:",
formatC(object$npercent.above.contour, digits = 1,
format = "f"), "\n")
cat("percentage of patients treated below MTD contour:",
formatC(object$npercent.below.contour, digits = 1,
format = "f"), "\n")
cat("percentage of correct selection of the MTD contour:",
formatC(object$pcs.contour, digits = 1, format = "f"),
"\n")
}
else {
cat("true DLT rate of dose combinations:\n")
for (i in 1:dim(object$p.true)[1]) {
cat(formatC(object$p.true[i, ], digits = 2, format = "f",
width = 5), sep = " ", "\n")
}
cat("\n")
cat("selection percentage at each dose combination (%):\n")
for (i in 1:dim(object$p.true)[1]) {
cat(formatC(object$selpercent[i, ], digits = 2,
format = "f", width = 5), sep = " ", "\n")
}
cat("\n")
cat("average number of patients treated at each dose combination:\n")
for (i in 1:dim(object$p.true)[1]) {
cat(formatC(object$npatients[i, ], digits = 2,
format = "f", width = 5), sep = " ", "\n")
}
cat("\n")
cat("average number of toxicity observed at each dose combination:\n")
for (i in 1:dim(object$p.true)[1]) {
cat(formatC(object$ntox[i, ], digits = 2, format = "f",
width = 5), sep = " ", "\n")
}
cat("\n")
cat("average number of toxicities:", formatC(object$totaltox,
digits = 1, format = "f"), "\n")
cat("average number of patients:", formatC(object$totaln,
digits = 1, format = "f"), "\n")
# cat("selection percentage of MTD:", formatC(object$pcs,
# digits = 1, format = "f"), "\n")
# cat("percentage of patients treated at MTD:", formatC(object$npercent,
# digits = 1, format = "f"), "\n")
cat("percentage of early stopping due to toxicity:",
formatC(object$percentstop, digits = 1, format = "f"),
"% \n")
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BOIN/R/summary.boin.R
|
if (getRversion() >= "2.15.1") utils::globalVariables(c("obs_value"))
.clean_names <- function(x) {
x <- make.unique(tolower(trimws(gsub("[[:space:]]", "_", x))))
return(x)
}
.pivot_longer_boj <- function(df) {
excl_cols <- names(df)[is.element(names(df),
c("code", "desc", "struc", "unit"))]
df <- tidyr::pivot_longer(data = df, cols = -tidyselect::all_of(excl_cols),
names_to = "date", values_to = "obs_value")
df <- dplyr::mutate(df, obs_value = as.numeric(obs_value))
return(df)
}
#' Download and parse a list of available BOJ data sets
#'
#' @param url URL of the BOJ's Time-Series Data portal flat files page
#' (optional).
#'
#' @return A tibble data frame
#' @export
#'
#' @examples
#' \dontrun{
#' datasets <- get_boj_datasets()
#' }
get_boj_datasets <- function(
url = "https://www.stat-search.boj.or.jp/info/dload_en.html") {
tbl <- tryCatch({
# Download webpage
page <- xml2::read_html(url)
nodes <- rvest::html_nodes(page, xpath = "//a[contains(@href, 'zip')]")
# Get file name and path to file
item_urls <- paste0("https://www.stat-search.boj.or.jp/info/",
(rvest::html_attr(nodes, "href")))
item_names <- sub(".*\\/(.*?) *\\.zip*", "\\1", item_urls)
item_descs <- gsub(".zip", "", rvest::html_text(nodes))
# Return tibble
tbl <- dplyr::tibble(desc = item_descs,
name = item_names,
url = item_urls)
if (nrow(tbl) == 0) {
message(paste("Unable to download and parse BOJ homepage:", url))
message("The resource is unavailable or has changed.")
}
tbl
},
error = function(x) {
message(paste("Unable to download and parse BOJ homepage:", url))
message("The resource is unavailable or has changed.")
message("Original error message:")
message(x)
return(NA)
},
warning = function(x) {
message(paste("Unable to download and parse BOJ homepage:", url))
message("The resource is unavailable or has changed.")
message("Original warning message:")
message(x)
return(NA)
}
)
return(tbl)
}
#' Download and parse a BOJ data set
#'
#' @param url URL of the data set to be imported (usually obtained through
#' \code{get_boj_datasets()})
#' @param ... Arguments passed to \code{download.file()} (e.g.
#' \code{quiet = TRUE})
#'
#' @return A tibble data frame
#' @export
#'
#' @examples
#' \dontrun{
#' datasets <- get_boj_datasets()
#' df <- get_boj(datasets$url[(datasets$name == "sppi_q_en")])
#' }
get_boj <- function(url, ...) {
# Get file name
file_name <- sub(".*\\/(.*?) *\\.zip*", "\\1", url)
# Download data
tmp_dir <- tempdir()
tmp_file <- tempfile(fileext = ".zip")
tryCatch({
# Download file
utils::download.file(url, tmp_file, mode = "wb", ...)
# Unpack zip file
filename <- utils::unzip(tmp_file, list = TRUE)
utils::unzip(tmp_file, exdir = tmp_dir)
path <- file.path(tmp_dir, filename$Name)
# Read data into a list of tibble data frames
df <- list()
i <- 0
# One tibble data frame per file
while (i < length(path)) {
i <- i + 1
df[[i]] <- readr::read_csv(path[[i]], col_names = FALSE,
show_col_types = FALSE,
na = c("", "NA", "ND"))
# Distinguish between wide and long data sets
if (!is.element(file_name, c("fof", "co", "colease"))) {
# Wide data (horizontal)
nms <- as.character(df[[i]][1, ])
if (!is.element(file_name, c("bp_m_en", "regbp_q_en", "qiip_q_en",
"iip_cy_en"))) {
# Three columns
if (is.element(file_name, c("bis1-1_q_en", "bis1-2_q_en",
"bis2-1_q_en", "bis2-2_q_en"))) {
nms[1:3] <- c("code", "struc", "unit")
} else {
nms[1:3] <- c("code", "desc", "struc")
}
} else {
# Four columns
nms[1:4] <- c("code", "desc", "struc", "unit")
}
names(df[[i]]) <- .clean_names(nms)
df[[i]] <- df[[i]][-1, ]
df[[i]] <- .pivot_longer_boj(df[[i]])
} else {
# Long data (vertical)
names(df[[i]]) <- c("code", "freq", "date", "obs_value")
}
if (nrow(df[[i]]) == 0) {
message(paste("Unable to download and parse BOJ homepage:", url))
message("The resource is unavailable or has changed.")
}
}
# If there is only one tibble data frame, return as single object
if (length(df) < 2) {
df <- df[[1]]
}
df
},
error = function(x) {
message(paste("Unable to download and parse file:", url))
message("The resource is unavailable or has changed.")
message("Original error message:")
message(x)
return(NA)
},
warning = function(x) {
message(paste("Unable to download and parse file:", url))
message("The resource is unavailable or has changed.")
message("Original warning message:")
message(x)
return(NA)
}
)
return(df)
}
|
/scratch/gouwar.j/cran-all/cranData/BOJ/R/BOJ.R
|
## ----setup, echo = FALSE------------------------------------------------------
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
collapse = TRUE,
fig.width = 6,
fig.height = 4
)
## ----loading------------------------------------------------------------------
library("BOJ")
## ----datasets, message=FALSE, warning=FALSE-----------------------------------
datasets <- get_boj_datasets()
datasets
## ----sppi, message=FALSE, warning=FALSE---------------------------------------
sppi <- get_boj(datasets$url[(datasets$name == "sppi_m_en")])
sppi
## ----plot, message=FALSE, warning=FALSE---------------------------------------
library("dplyr")
library("ggplot2")
library("zoo")
sppi_plot <- subset(sppi, code %in% c("PRCS15_5200000000", "PRCS15_5200010001",
"PRCS15_5200010002", "PRCS15_5200010003",
"PRCS15_5200010004", "PRCS15_5200010005",
"PRCS15_5200010006", "PRCS15_5200010007"))
sppi_plot <- mutate(sppi_plot, date = as.Date(as.yearmon(date, format = "%Y%m")))
sppi_plot <- mutate(sppi_plot, struc = gsub("^Major group/ ", "", struc))
sppi_plot <- subset(sppi_plot, !is.na(obs_value))
ggplot(sppi_plot, aes(x = date, y = obs_value)) +
geom_line(aes(colour = struc)) +
labs(x = "Date", y = "Services Producer Price Index (2015 base)") +
theme(legend.title = element_blank())
|
/scratch/gouwar.j/cran-all/cranData/BOJ/inst/doc/BOJ.R
|
---
title: "BOJ"
author: "Stefan Angrick"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{BOJ}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r setup, echo = FALSE}
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
collapse = TRUE,
fig.width = 6,
fig.height = 4
)
```
The `BOJ` package provides an `R` interface to [Bank of Japan](https://www.boj.or.jp/) statistics, specifically the [flat files](https://www.stat-search.boj.or.jp/info/dload_en.html) available on the [BOJ Time-Series Data](https://www.stat-search.boj.or.jp/) portal.
## Import data
To import data, first load the package:
```{r loading}
library("BOJ")
```
Next, run the `get_boj_datasets()` function to obtain a list of available data sets:
```{r datasets, message=FALSE, warning=FALSE}
datasets <- get_boj_datasets()
datasets
```
The function returns a [tibble](https://tibble.tidyverse.org/) data frame listing the available data sets. The column `url` can be used as input for the function `get_boj()` which downloads, parses and imports the corresponding data.
To import monthly-frequency data on Japan's [Services Producer Price Index](https://www.boj.or.jp/en/statistics/pi/sppi_2015/index.htm/), run:
```{r sppi, message=FALSE, warning=FALSE}
sppi <- get_boj(datasets$url[(datasets$name == "sppi_m_en")])
sppi
```
To plot the data using [ggplot2](https://ggplot2.tidyverse.org), run the following:
```{r plot, message=FALSE, warning=FALSE}
library("dplyr")
library("ggplot2")
library("zoo")
sppi_plot <- subset(sppi, code %in% c("PRCS15_5200000000", "PRCS15_5200010001",
"PRCS15_5200010002", "PRCS15_5200010003",
"PRCS15_5200010004", "PRCS15_5200010005",
"PRCS15_5200010006", "PRCS15_5200010007"))
sppi_plot <- mutate(sppi_plot, date = as.Date(as.yearmon(date, format = "%Y%m")))
sppi_plot <- mutate(sppi_plot, struc = gsub("^Major group/ ", "", struc))
sppi_plot <- subset(sppi_plot, !is.na(obs_value))
ggplot(sppi_plot, aes(x = date, y = obs_value)) +
geom_line(aes(colour = struc)) +
labs(x = "Date", y = "Services Producer Price Index (2015 base)") +
theme(legend.title = element_blank())
```
Note that BOJ data sets come with a number of different time formats. The [zoo](https://cran.r-project.org/package=zoo) package (e.g. `as.yearmon()`) should be able to parse most formats.
## Note
This package is in no way officially related to or endorsed by the [Bank of Japan](https://www.boj.or.jp/). It was inspired by the [BIS R package](https://github.com/expersso/BIS). Please don't abuse the BOJ's servers with unnecessary calls.
|
/scratch/gouwar.j/cran-all/cranData/BOJ/inst/doc/BOJ.Rmd
|
---
title: "BOJ"
author: "Stefan Angrick"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{BOJ}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r setup, echo = FALSE}
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
collapse = TRUE,
fig.width = 6,
fig.height = 4
)
```
The `BOJ` package provides an `R` interface to [Bank of Japan](https://www.boj.or.jp/) statistics, specifically the [flat files](https://www.stat-search.boj.or.jp/info/dload_en.html) available on the [BOJ Time-Series Data](https://www.stat-search.boj.or.jp/) portal.
## Import data
To import data, first load the package:
```{r loading}
library("BOJ")
```
Next, run the `get_boj_datasets()` function to obtain a list of available data sets:
```{r datasets, message=FALSE, warning=FALSE}
datasets <- get_boj_datasets()
datasets
```
The function returns a [tibble](https://tibble.tidyverse.org/) data frame listing the available data sets. The column `url` can be used as input for the function `get_boj()` which downloads, parses and imports the corresponding data.
To import monthly-frequency data on Japan's [Services Producer Price Index](https://www.boj.or.jp/en/statistics/pi/sppi_2015/index.htm/), run:
```{r sppi, message=FALSE, warning=FALSE}
sppi <- get_boj(datasets$url[(datasets$name == "sppi_m_en")])
sppi
```
To plot the data using [ggplot2](https://ggplot2.tidyverse.org), run the following:
```{r plot, message=FALSE, warning=FALSE}
library("dplyr")
library("ggplot2")
library("zoo")
sppi_plot <- subset(sppi, code %in% c("PRCS15_5200000000", "PRCS15_5200010001",
"PRCS15_5200010002", "PRCS15_5200010003",
"PRCS15_5200010004", "PRCS15_5200010005",
"PRCS15_5200010006", "PRCS15_5200010007"))
sppi_plot <- mutate(sppi_plot, date = as.Date(as.yearmon(date, format = "%Y%m")))
sppi_plot <- mutate(sppi_plot, struc = gsub("^Major group/ ", "", struc))
sppi_plot <- subset(sppi_plot, !is.na(obs_value))
ggplot(sppi_plot, aes(x = date, y = obs_value)) +
geom_line(aes(colour = struc)) +
labs(x = "Date", y = "Services Producer Price Index (2015 base)") +
theme(legend.title = element_blank())
```
Note that BOJ data sets come with a number of different time formats. The [zoo](https://cran.r-project.org/package=zoo) package (e.g. `as.yearmon()`) should be able to parse most formats.
## Note
This package is in no way officially related to or endorsed by the [Bank of Japan](https://www.boj.or.jp/). It was inspired by the [BIS R package](https://github.com/expersso/BIS). Please don't abuse the BOJ's servers with unnecessary calls.
|
/scratch/gouwar.j/cran-all/cranData/BOJ/vignettes/BOJ.Rmd
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
guideQR <- function(x, y, maxstep, full) {
.Call('_BOSSreg_guideQR', PACKAGE = 'BOSSreg', x, y, maxstep, full)
}
|
/scratch/gouwar.j/cran-all/cranData/BOSSreg/R/RcppExports.R
|
#' Best Orthogonalized Subset Selection (BOSS).
#'
#'\itemize{
#' \item Compute the solution path of BOSS and forward stepwise selection (FS).
#' \item Compute various information criteria based on a heuristic degrees of freedom (hdf)
#' that can serve as the selection rule to choose the subset given by BOSS.
#'}
#' @param x A matrix of predictors, with \code{nrow(x)=length(y)=n} observations and
#' \code{ncol(x)=p} predictors. Intercept shall NOT be included.
#' @param y A vector of response variable, with \code{length(y)=n}.
#' @param maxstep Maximum number of steps performed. Default is \code{min(n-1,p)} if \code{intercept=FALSE},
#' and it is \code{min(n-2, p)} otherwise.
#' @param intercept Logical, whether to include an intercept term. Default is TRUE.
#' @param hdf.ic.boss Logical, whether to calculate the heuristic degrees of freedom (hdf)
#' and information criteria (IC) for BOSS. IC includes AIC, BIC, AICc, BICc, GCV,
#' Cp. Default is TRUE.
#' @param mu True mean vector, used in the calculation of hdf. Default is NULL, and is estimated via
#' least-squares (LS) regression of y upon x for n>p, and 10-fold CV cross-validated lasso estimate for n<=p.
#' @param sigma True standard deviation of the error, used in the calculation of hdf. Default is NULL,
#' and is estimated via least-squares (LS) regression of y upon x for n>p, and 10-fold cross-validated lasso
#' for n<=p.
#' @param ... Extra parameters to allow flexibility. Currently none allows or requires, just for
#' the convinience of call from other parent functions like cv.boss.
#'
#' @return
#' \itemize{
#' \item beta_fs: A matrix of regression coefficients for all the subsets given by FS,
#' from a null model until stop, with \code{nrow=p} and \code{ncol=min(n,p)+1}, where \code{min(n,p)} is
#' the maximum number of steps performed.
#' \item beta_boss: A matrix of regression coefficients for all the subsets given by
#' BOSS, with \code{nrow=p} and \code{ncol=min(n,p)+1}. Note that unlike beta_fs and due to the nature of BOSS,
#' the number of non-zero components in columns of beta_boss may not be unique, i.e.
#' there maybe multiple columns corresponding to the same size of subset.
#' \item steps_x: A vector of numbers representing which predictor joins at each step,
#' with \code{length(steps)=min(n,p)}. The ordering is determined by the partial correlation between a predictor \eqn{x_j}
#' and the response \code{y}.
#' \item steps_q: A vector of numbers representing which predictor joins at each step in the orthogonal basis,
#' with \code{length(steps)=min(n,p)}. BOSS takes the ordered predictors (ordering given in \code{steps_x}) and performs best
#' subset regression upon their orthogonal basis, which is essentially ordering the orthogonalized predictors by their
#' marginal correlations with the response \code{y}. For example, \code{steps_q=c(2,1)} indicates that the orthogonal basis of
#' \code{x_2} joins first.
#' \item hdf_boss: A vector of heuristic degrees of freedom (hdf) for BOSS, with
#' \code{length(hdf_boss)=p+1}. Note that \code{hdf_boss=NULL} if n<=p or \code{hdf.ic.boss=FALSE}.
#' \item IC_boss: A list of information criteria (IC) for BOSS, where each element
#' in the list is a vector representing values of a given IC for each candidate subset
#' of BOSS (or each column in beta_boss). The output IC includes AIC, BIC, AICc, BICc,
#' GCV and Mallows' Cp. Note that each IC is calculated by plugging in hdf_boss.
#' \item sigma: estimated error standard deviation. It is only returned when hdf is calculated, i.e. \code{hdf.ic.boss=TRUE}.
#'
#' }
#'
#' @details This function computes the full solution path given by BOSS and FS on a given
#' dataset (x,y) with n observations and p predictors. It also calculates
#' the heuristic degrees of freedom for BOSS, and various information criteria, which can further
#' be used to select the subset from the candidates. Please refer to the Vignette
#' for implementation details and Tian et al. (2021) for methodology details (links are given below).
#'
#' @author Sen Tian
#' @references
#' \itemize{
#' \item Tian, S., Hurvich, C. and Simonoff, J. (2021), On the Use of Information Criteria
#' for Subset Selection in Least Squares Regression. https://arxiv.org/abs/1911.10191
#' \item Reid, S., Tibshirani, R. and Friedman, J. (2016), A Study of Error Variance Estimation in Lasso Regression. Statistica Sinica,
#' P35-67, JSTOR.
#' \item BOSSreg Vignette https://github.com/sentian/BOSSreg/blob/master/r-package/vignettes/BOSSreg.pdf
#' }
#' @seealso \code{predict} and \code{coef} methods for "boss" object, and the \code{cv.boss} function
#' @example R/example/eg.boss.R
#' @useDynLib BOSSreg
#' @importFrom Rcpp sourceCpp
#' @export
boss <- function(x, y, maxstep=min(nrow(x)-intercept-1, ncol(x)), intercept=TRUE, hdf.ic.boss=TRUE, mu=NULL, sigma=NULL, ...){
n = dim(x)[1]
p = dim(x)[2]
if(maxstep > min(nrow(x)-intercept-1, ncol(x))){
warning('Specified maximum number of steps is larger than expected.')
maxstep = min(nrow(x)-intercept-1, ncol(x))
}
if(!is.null(dim(y))){
if(dim(y)[2] == 1){
y = as.numeric(y)
}else{
stop('Multiple dependent variables are not supported.')
}
}
# standardize x (mean 0 and norm 1) and y (mean 0)
std_result = std(x, y, intercept)
x = std_result$x_std
y = std_result$y_std
mean_x = std_result$mean_x
mean_y = std_result$mean_y
sd_demanedx = std_result$sd_demeanedx
# if stops early, still calculate the full QR decomposition (for steps>maxstep, just use predictors in their physical orders)
# for the calculation of hdf
if(hdf.ic.boss & maxstep < p){
guideQR_result = guideQR(x, y, maxstep, TRUE)
Q = guideQR_result$Q[, 1:maxstep]
R = guideQR_result$R[1:maxstep, 1:maxstep]
}else{
guideQR_result = guideQR(x, y, maxstep, FALSE)
Q = guideQR_result$Q
R = guideQR_result$R
}
steps_x = as.numeric(guideQR_result$steps)
# coefficients
z = t(Q) %*% y
# transform coefficients in Q space back to X space, and re-order them
trans.q.to.x <- function(beta.q){
beta.x = Matrix::Matrix(0, nrow=p, ncol=maxstep, sparse = TRUE)
beta.x[steps_x, ] = diag(1/sd_demanedx[steps_x]) %*% backsolve(R, beta.q)
beta.x = cbind(0, beta.x)
if(intercept){
beta.x = rbind(Matrix::Matrix(mean_y - mean_x %*% beta.x, sparse=TRUE), beta.x)
}
return(beta.x)
}
# fs
beta_q = matrix(rep(z, maxstep), nrow=maxstep, byrow=FALSE)
beta_q = beta_q * upper.tri(beta_q, diag=TRUE)
beta_fs = trans.q.to.x(beta_q)
# boss
order_q = order(-z^2)
steps_q = steps_x[order_q]
row_i = rep(order_q, times=seq(maxstep,1))
col_j = unlist(lapply(1:maxstep, function(xx){seq(xx,maxstep)}))
beta_q = Matrix::sparseMatrix(row_i, col_j, x=z[row_i], dims=c(maxstep, maxstep))
beta_boss = trans.q.to.x(beta_q)
# hdf and IC
if(!hdf.ic.boss){
hdf = IC_result = NULL
}else{
if(n > p){
hdf_result = calc.hdf(guideQR_result$Q, y, sigma, mu, x=NULL)
}else{
hdf_result = calc.hdf(guideQR_result$Q, y, sigma, mu, x)
}
hdf = hdf_result$hdf[1:(maxstep+1)]
sigma = hdf_result$sigma
if(intercept){
hdf = hdf + 1
}
IC_result = calc.ic.all(cbind(0,beta_q), Q, y, hdf, sigma)
}
# take care the variable names
varnames = colnames(x)
if(is.null(varnames)){
varnames = paste0('X', seq(1,p))
}
if(intercept){
rownames(beta_fs) = rownames(beta_boss) = c('intercept', varnames)
}else{
rownames(beta_fs) = rownames(beta_boss) = varnames
}
names(steps_x) = varnames[steps_x]
names(steps_q) = varnames[steps_q]
# output
out = list(beta_fs=beta_fs,
beta_boss=beta_boss,
steps_x=steps_x,
steps_q=steps_q,
hdf_boss=hdf,
IC_boss=IC_result,
sigma=sigma,
call=list(intercept=intercept))
class(out) = 'boss'
invisible(out)
}
#' Select coefficient vector(s) for BOSS.
#'
#' This function returns the optimal coefficient vector of BOSS selected by AICc
#' (by default) or other types of information criterion.
#'
#' @param object The boss object, returned from calling the \code{boss} function.
#' @param ic Which information criterion is used to select the optimal coefficient vector for BOSS.
#' The default is AICc-hdf.
#' @param select.boss The index (or indicies) of columns in the coefficient matrix for which
#' one wants to select. By default (NULL) it's selected by the information criterion specified in
#' 'ic'.
#' @param ... Extra arguments (unused for now)
#'
#' @return The chosen coefficient vector(s) for BOSS.
#'
#' @details If \code{select.boss} is specified, the function returns
#' corresponding column(s) in the coefficient matrix.
#'
#' If \code{select.boss} is unspecified, the function returns the optimal coefficient
#' vector selected by AICc-hdf (other choice of IC can be specified in the argument \code{ic}).
#'
#' @example R/example/eg.boss.R
#' @importFrom stats coef
#' @export
coef.boss <- function(object, ic=c('aicc','bicc','aic','bic','gcv','cp'), select.boss=NULL, ...){
# for boss, the default is to return coef selected by AICc
if(is.null(select.boss)){
if(is.null(object$IC_boss)){
# this is where hdf.ic.boss is flagged FALSE
warning("boss was called with argument 'hdf.ic.boss=FALSE', the full coef matrix is returned here")
select.boss = 1:ncol(object$beta_boss)
}else{
ic = match.arg(ic)
select.boss = which.min(object$IC_boss[[ic]])
}
}else if(select.boss == 0){
select.boss = 1:ncol(object$select.boss)
}
select.boss[select.boss > ncol(object$beta_boss)] = ncol(object$beta_boss)
beta_boss_opt = object$beta_boss[, select.boss, drop=FALSE]
return(beta_boss_opt)
}
#' Prediction given new data entries.
#'
#' This function returns the prediction(s) given new observation(s), for BOSS,
#' where the optimal coefficient vector is chosen via certain selection rule.
#'
#' @param object The boss object, returned from calling 'boss' function.
#' @param newx A new data entry or several entries. It can be a vector, or a matrix with
#' \code{nrow(newx)} being the number of new entries and \code{ncol(newx)=p} being the
#' number of predictors. The function takes care of the intercept, NO need to add \code{1}
#' to \code{newx}.
#' @param ... Extra arguments to be plugged into \code{coef}, such as \code{select.boss},
#' see the description of \code{coef.boss} for more details.
#'
#' @return The prediction(s) for BOSS.
#'
#' @details The function basically calculates \eqn{x * coef}, where \code{coef}
#' is a coefficient vector chosen by a selection rule. See more details about the default
#' and available choices of the selection rule in the description of \code{coef.boss}.
#'
#' @example R/example/eg.boss.R
#' @importFrom stats predict
#' @export
predict.boss <- function(object, newx, ...){
# coefficients
# coef_result = coef(object, ...)
# beta_fs_opt = coef_result$fs
# beta_boss_opt = coef_result$boss
beta_boss_opt = coef(object, ...)
# make newx a matrix
# if newx is an array or a column vector, make it a row vector
if(is.null(dim(newx))){
newx = matrix(newx, nrow=1)
}else if(dim(newx)[2] == 1){
newx = t(newx)
}
# if intercept, add 1 to newx
if(object$call$intercept){
newx = cbind(rep(1,nrow(newx)), newx)
}
# check the dimension
# if(ncol(newx) != nrow(beta_fs_opt)){
# stop('Mismatch dimension of newx and coef for FS. Note do NOT add 1 to newx when intercept=TRUE')
# }else{
# mu_fs_opt = newx %*% beta_fs_opt
# }
if(is.null(beta_boss_opt)){
mu_boss_opt = NULL
}else{
if(ncol(newx) != nrow(beta_boss_opt)){
stop('Mismatch dimension of newx and coef for BOSS. Note do NOT add 1 to newx when intercept=TRUE')
}else{
mu_boss_opt = newx %*% beta_boss_opt
}
}
# return(list(fs=mu_fs_opt, boss=mu_boss_opt))
return(mu_boss_opt)
}
|
/scratch/gouwar.j/cran-all/cranData/BOSSreg/R/boss.R
|
#' Cross-validation for Best Orthogonalized Subset Selection (BOSS) and Forward Stepwise Selection (FS).
#'
#' @param x A matrix of predictors, see \code{boss}.
#' @param y A vector of response variable, see \code{boss}.
#' @param maxstep Maximum number of steps performed. Default is \code{min(n-1,p)} if \code{intercept=FALSE},
#' and it is \code{min(n-2, p)} otherwise.
#' @param intercept Logical, whether to fit an intercept term. Default is TRUE.
#' @param n.folds The number of cross validation folds. Default is 10.
#' @param n.rep The number of replications of cross validation. Default is 1.
#' @param show.warning Whether to display a warning if CV is only performed for a subset of candidates.
#' e.g. when n<p and 10-fold. Default is TRUE.
#' @param ... Arguments to \code{boss}, such as \code{hdf.ic.boss}.
#'
#' @return
#' \itemize{
#' \item boss: An object \code{boss} that fits on the full dataset.
#' \item n.folds: The number of cross validation folds.
#' \item cvm.fs: Mean OOS deviance for each candidate given by FS.
#' \item cvm.boss: Mean OSS deviance for each candidate given by BOSS.
#' \item i.min.fs: The index of minimum cvm.fs.
#' \item i.min.boss: The index of minimum cvm.boss.
#' }
#'
#' @details This function fits BOSS and FS (\code{boss}) on the full dataset, and performs \code{n.folds}
#' cross-validation. The cross-validation process can be repeated \code{n.rep} times to evaluate the
#' out-of-sample (OOS) performance for the candidate subsets given by both methods.
#'
#' @author Sen Tian
#' @references
#' \itemize{
#' \item Tian, S., Hurvich, C. and Simonoff, J. (2021), On the Use of Information Criteria
#' for Subset Selection in Least Squares Regression. https://arxiv.org/abs/1911.10191
#' \item BOSSreg Vignette https://github.com/sentian/BOSSreg/blob/master/r-package/vignettes/BOSSreg.pdf
#' }
#' @seealso \code{predict} and \code{coef} methods for \code{cv.boss} object, and the \code{boss} function
#' @example R/example/eg.cv.boss.R
#' @export
cv.boss <- function(x, y, maxstep=min(nrow(x)-intercept-1, ncol(x)), intercept=TRUE, n.folds=10, n.rep=1, show.warning=TRUE, ...){
# # arguments
argu = list(...)
# argu_boss = c('intercept', 'hdf.ic.boss') # arguments that boss accepts
# # arguments that user specify but unused
# argu_unused = setdiff(names(argu), argu_boss)
# if(length(argu_unused) > 0){
# warning(paste(argu_unused, ' are not valid arguments for boss, check spelling maybe?', sep=''))
# }
# overide hdf.ic.boss option in '...', to be used in CV
# boss.nohdf <- function(x, y, intercept, hdf.ic.boss) boss(x, y, intercept, hdf.ic.boss=FALSE)
# start the CV process
n = dim(x)[1]
p = dim(x)[2]
maxstep_tmp = maxstep
maxstep = trunc(min(n - n/n.folds - 1, maxstep_tmp))
if(maxstep < maxstep_tmp & show.warning){
warning(paste0('Subsets up to size ', maxstep,
' are evaluated by CV. Increase the number of folds to incorporate larger subsets.'))
}
# matrix to store the CV error
cv_rep_boss = cv_rep_fs = matrix(NA, nrow=n.rep, ncol=maxstep+1)
for(replication in 1:n.rep){
fold.index = sample(rep(1:n.folds, length.out=n)) # randomly assign a fold to each observation
cv_tmp_boss = cv_tmp_fs = matrix(NA, nrow=n.folds, ncol=maxstep+1)
for(fold in 1:n.folds){
# split the training and testing sets
test.index = which(fold.index==fold)
x.test = x[test.index, , drop=FALSE]
y.test = y[test.index]
x.train = x[-test.index, , drop=FALSE]
y.train = y[-test.index]
boss_result = boss(x.train, y.train, maxstep, intercept, hdf.ic.boss=FALSE)
beta_fs = boss_result$beta_fs
beta_boss = boss_result$beta_boss
# if intercept
if(intercept){
x.test = cbind(rep(1,nrow(x.test)), x.test)
}
cv_tmp_fs[fold, ] = Matrix::colMeans(sweep(x.test%*%beta_fs, 1, y.test, '-')^2)
cv_tmp_boss[fold, ] = Matrix::colMeans(sweep(x.test%*%beta_boss, 1, y.test, '-')^2)
}
cv_rep_fs[replication, ] = Matrix::colMeans(cv_tmp_fs)
cv_rep_boss[replication, ] = Matrix::colMeans(cv_tmp_boss)
}
cv_fs = Matrix::colMeans(cv_rep_fs)
cv_boss = Matrix::colMeans(cv_rep_boss)
# fit on the full sample
boss_result = boss(x, y, maxstep_tmp, intercept, ...)
# output
out = list(boss=boss_result,
n.folds=n.folds,
cvm.fs=cv_fs,
cvm.boss=cv_boss,
i.min.fs=which.min(cv_fs),
i.min.boss=which.min(cv_boss),
call=list(intercept=intercept))
class(out) = 'cv.boss'
invisible(out)
}
#' Select coefficient vector based on cross-validation for BOSS or FS.
#'
#' This function returns coefficient vector that minimizes out-of-sample (OOS) cross
#' validation score.
#'
#' @param object The cv.boss object, returned from calling \code{cv.boss} function.
#' @param method It can either be 'fs' or 'boss'. The default is 'boss'.
#' @param ... Extra arguments (unused for now).
#'
#' @return The chosen coefficient vector for BOSS or FS.
#'
#' @example R/example/eg.cv.boss.R
#' @importFrom stats coef
#' @export
coef.cv.boss <- function(object, method=c('boss', 'fs'), ...){
# coef_result = coef(object$boss, select.fs=object$i.min.fs, select.boss=object$i.min.boss)
# beta_fs_opt = coef_result$fs
# beta_boss_opt = coef_result$boss
# return(list(fs=beta_fs_opt, boss=beta_boss_opt))
if(match.arg(method) == 'fs'){
beta_fs_opt = object$boss$beta_fs[, object$i.min.fs, drop=FALSE]
return(beta_fs_opt)
}else{
beta_boss_opt = coef(object$boss, select.boss=object$i.min.boss)
return(beta_boss_opt)
}
}
#' Prediction given new data entries.
#'
#' This function returns the prediction(s) given new observation(s) for BOSS or FS,
#' where the optimal coefficient vector is chosen via cross-validation.
#'
#' @param object The cv.boss object, returned from calling \code{cv.boss} function.
#' @param newx A new data entry or several entries. It can be a vector, or a matrix with
#' \code{nrow(newx)} being the number of new entries and \code{ncol(newx)=p} being the
#' number of predictors. The function takes care of the intercept, NO need to add \code{1}
#' to \code{newx}.
#' @param ... Extra arguments to be plugged into \code{coef}, such as \code{method},
#' see the description of \code{coef.cv.boss} for more details.
#'
#' @return The prediction for BOSS or FS.
#'
#' @example R/example/eg.cv.boss.R
#' @importFrom stats predict
#' @export
predict.cv.boss <- function(object, newx, ...){
# predict_result = predict(object$boss, newx, select.fs=object$i.min.fs, select.boss=object$i.min.boss)
# mu_fs_opt = predict_result$fs
# mu_boss_opt = predict_result$boss
# return(list(fs=mu_fs_opt, boss=mu_boss_opt))
# make newx a matrix
# if newx is an array or a column vector, make it a row vector
if(is.null(dim(newx))){
newx = matrix(newx, nrow=1)
}else if(dim(newx)[2] == 1){
newx = t(newx)
}
# if intercept, add 1 to newx
if(object$call$intercept){
newx = cbind(rep(1,nrow(newx)), newx)
}
beta_opt = coef(object, ...)
# check the dimension
if(ncol(newx) != nrow(beta_opt)){
stop('Mismatch dimension of newx and coef. Note do NOT add 1 to newx when intercept=TRUE')
}else{
mu_opt = newx %*% beta_opt
}
return(mu_opt)
}
|
/scratch/gouwar.j/cran-all/cranData/BOSSreg/R/cv.boss.R
|
## Generate a trivial dataset, X has mean 0 and norm 1, y has mean 0
set.seed(11)
n = 20
p = 5
x = matrix(rnorm(n*p), nrow=n, ncol=p)
x = scale(x, center = colMeans(x))
x = scale(x, scale = sqrt(colSums(x^2)))
beta = c(1, 1, 0, 0, 0)
y = x%*%beta + scale(rnorm(n, sd=0.01), center = TRUE, scale = FALSE)
## Fit the model
boss_result = boss(x, y)
## Get the coefficient vector selected by AICc-hdf (S3 method for boss)
beta_boss_aicc = coef(boss_result)
# the above is equivalent to the following
beta_boss_aicc = boss_result$beta_boss[, which.min(boss_result$IC_boss$aicc), drop=FALSE]
## Get the fitted values of BOSS-AICc-hdf (S3 method for boss)
mu_boss_aicc = predict(boss_result, newx=x)
# the above is equivalent to the following
mu_boss_aicc = cbind(1,x) %*% beta_boss_aicc
## Repeat the above process, but using Cp-hdf instead of AICc-hdf
## coefficient vector
beta_boss_cp = coef(boss_result, method.boss='cp')
beta_boss_cp = boss_result$beta_boss[, which.min(boss_result$IC_boss$cp), drop=FALSE]
## fitted values of BOSS-Cp-hdf
mu_boss_cp = predict(boss_result, newx=x, method.boss='cp')
mu_boss_cp = cbind(1,x) %*% beta_boss_cp
|
/scratch/gouwar.j/cran-all/cranData/BOSSreg/R/example/eg.boss.R
|
## Generate a trivial dataset, X has mean 0 and norm 1, y has mean 0
set.seed(11)
n = 20
p = 5
x = matrix(rnorm(n*p), nrow=n, ncol=p)
x = scale(x, center = colMeans(x))
x = scale(x, scale = sqrt(colSums(x^2)))
beta = c(1, 1, 0, 0, 0)
y = x%*%beta + scale(rnorm(20, sd=0.01), center = TRUE, scale = FALSE)
## Perform 10-fold CV without replication
boss_cv_result = cv.boss(x, y)
## Get the coefficient vector of BOSS that gives minimum CV OSS score (S3 method for cv.boss)
beta_boss_cv = coef(boss_cv_result)
# the above is equivalent to
boss_result = boss_cv_result$boss
beta_boss_cv = boss_result$beta_boss[, boss_cv_result$i.min.boss, drop=FALSE]
## Get the fitted values of BOSS-CV (S3 method for cv.boss)
mu_boss_cv = predict(boss_cv_result, newx=x)
# the above is equivalent to
mu_boss_cv = cbind(1,x) %*% beta_boss_cv
## Get the coefficient vector of FS that gives minimum CV OSS score (S3 method for cv.boss)
beta_fs_cv = coef(boss_cv_result, method='fs')
## Get the fitted values of FS-CV (S3 method for cv.boss)
mu_fs_cv = predict(boss_cv_result, newx=x, method='fs')
|
/scratch/gouwar.j/cran-all/cranData/BOSSreg/R/example/eg.cv.boss.R
|
## Generate a trivial dataset, X has mean 0 and norm 1, y has mean 0
set.seed(11)
n = 20
p = 5
x = matrix(rnorm(n*p), nrow=n, ncol=p)
x = scale(x, center = colMeans(x))
x = scale(x, scale = sqrt(colSums(x^2)))
beta = c(1, 1, 0, 0, 0)
y = x%*%beta + scale(rnorm(20, sd=0.01), center = TRUE, scale = FALSE)
## Fit the model
boss_result = boss(x, y)
## Print the values of AICc-hdf for all subsets given by BOSS
print(boss_result$IC_boss$aicc)
## calculate them manually using the calc.ic function
y_hat = cbind(rep(1,n),x)%*%boss_result$beta_boss
print(calc.ic(y_hat, y, df=boss_result$hdf_boss))
|
/scratch/gouwar.j/cran-all/cranData/BOSSreg/R/example/eg.ic.R
|
#' Calculate an information criterion.
#'
#' Calculate a specified information criterion (IC) for an estimate or a group of estimates.
#' The choices of IC include AIC, BIC, AICc, BICc, GCV and Mallows' Cp.
#'
#' @param y_hat A vector of fitted values with \code{length(y_hat)=length(y)=n}, or
#' a matrix, with \code{nrow(coef)=length(y)=n} and \code{ncol(y_hat)=m}, containing m different fits.
#' @param y A vector of response variable, with \code{length(y)=n}.
#' @param ic A specified IC to calculate. Default is AICc ('aicc'). Other choices include AIC ('aic'),
#' BIC ('bic'), BICc ('bicc'), GCV ('gcv') and Mallows' Cp ('cp').
#' @param df A number if y_hat is a vector, or a vector with \code{length(df)=ncol(y_hat)=m} if y_hat is
#' a matrix. df represents the degrees of freedom for each fit.
#' @param sigma Standard deviation of the error term. It only needs to be specified if the argument \code{ic='cp'}.
#'
#' @return The value(s) of the specified IC for each fit.
#'
#' @details This function enables the computation of various common IC for model fits, which can
#' further be used to choose the optimal fit. This allows user comparing the effect of different IC.
#' In order to calculate an IC, degrees of freedoms (df) needs to be specified. To be more specific,
#' here are the formulas used to calculate each IC:
#'
#' \deqn{AIC = \log(\frac{RSS}{n}) + 2\frac{df}{n}}{AIC = log(RSS/n) + 2*df/n}
#' \deqn{BIC = \log(\frac{RSS}{n}) + \log(n)\frac{df}{n}}{BIC = log(RSS/n) + log(n)*df/n}
#' \deqn{AICc = \log(\frac{RSS}{n}) + 2\frac{df+1}{n-df-2}}{AICc = log(RSS/n) + 2*(df+1)/(n-df-2)}
#' \deqn{BICc = \log(\frac{RSS}{n}) + \log(n)\frac{df+1}{n-df-2}}{BICc = log(RSS/n) + log(n)*(df+1)/(n-df-2)}
#' \deqn{GCV = \frac{RSS}{(n-df)^2}}{GCV = RSS/(n-df)^2}
#' \deqn{Mallows' Cp = RSS + 2\times \sigma^2 \times df}{AIC = RSS + 2*\sigma^2*df}
#'
#' @author Sen Tian
#' @example R/example/eg.ic.R
#' @export
calc.ic <- function(y_hat, y, ic=c('aicc','bicc','aic','bic','gcv','cp'), df, sigma=NULL){
# match the argument
ic = match.arg(ic)
# unify dimensions
y = matrix(y, ncol=1)
df = matrix(df, nrow=1)
if(is.null(dim(y_hat))){
y_hat = matrix(y_hat, ncol=1)
}else if(dim(y_hat)[1]==1){
y_hat = matrix(y_hat, ncol=1)
}
# sanity check
if(ncol(y_hat) != ncol(df)){
stop('the number of fits does not match the number of df')
}
if(ic=='cp' & is.null(sigma)){
stop("need to specify sigma for Mallow's Cp")
}
n = nrow(y)
nfit = ncol(y_hat)
# for AICc and BICc df larger than n-2 will cause trouble, round it
if(ic=='aicc' | ic=='bicc'){
df[which(df>=n-2)]=n-3
}
rss = Matrix::colSums(sweep(y_hat, 1, y, '-')^2)
if(ic=='aic'){return(log(rss/n) + 2*df/n)}
else if(ic=='bic'){return(log(rss/n) + log(n)*df/n)}
else if(ic=='aicc'){return(log(rss/n) + 2*(df+1)/(n-df-2))}
else if(ic=='bicc'){return(log(rss/n) + log(n)*(df+1)/(n-df-2))}
# else if(ic=='bicc'){return(log(rss/n) + df*(log(n)+2*log(p))/n )}
else if(ic=='gcv'){return(rss / (n-df)^2)}
else if(ic=='cp'){return(rss + 2*sigma^2*df)}
}
|
/scratch/gouwar.j/cran-all/cranData/BOSSreg/R/ic.R
|
### functions that are called by the main functions, but invisible to users unless using namespace ':::'
## standardize the data ------------------------------------------------------------
# standardize x to be mean 0 and norm 1
# standardize y to be mean 0
std <- function(x, y, intercept){
n = dim(x)[1]
p = dim(x)[2]
if(intercept){
mean_x = Matrix::colMeans(x)
mean_y = mean(y)
x = scale(x, center = mean_x, scale = FALSE)
y = scale(y, center = mean_y, scale = FALSE)
}else{
mean_x = rep(0, p)
mean_y = 0
}
sd_demeanedx = sqrt(Matrix::colSums(x^2))
x = scale(x, center = FALSE, scale = sd_demeanedx)
return(list(x_std = x, y_std = y, mean_x=mean_x, sd_demeanedx=sd_demeanedx, mean_y=mean_y))
}
## calculate various information criteria: AIC, BIC, AICc, BICc, GCV, Cp -----------
calc.ic.all <- function(coef, x, y, df, sigma=NULL){
# unify dimensions
y = matrix(y, ncol=1)
df = matrix(df, nrow=1)
# sanity check
if(ncol(coef) != ncol(df)){
stop('the number of coef vectors does not match the number of df')
}
if(is.null(sigma)){
stop("need to specify sigma for Mallow's Cp")
}
n = nrow(y)
nfit = ncol(coef)
fit = x %*% coef
rss = Matrix::colSums(sweep(fit, 1, y, '-')^2)
ic = list()
ic$aic = log(rss/n) + 2*df/n
ic$bic = log(rss/n) + log(n)*df/n
ic$gcv = rss / (n-df)^2
ic$cp = rss + 2*sigma^2*df
# for AICc and BICc df larger than n-2 will cause trouble, round it
df[which(df>=n-2)] = n-3
ic$aicc = log(rss/n) + 2*(df+1)/(n-df-2)
ic$bicc = log(rss/n) + log(n)*(df+1)/(n-df-2)
# ic$bicc = log(rss/n) + df*(log(n)+2*log(p))/n
return(ic)
}
## Heuristic df for BOSS ------------------------------------------------------------
# @param Q An orthogonal matrix, with \code{nrow(Q)=length(y)=n} and
# \code{ncol(Q)=p}. For BOSS, Q is obtained by QR decomposition upon
# an ordered design matrix.
# @param y A vector of response variable, with \code{length(y)=n}.
# @param sigma,mu The standard deviation and mean vector of the true model.
# In practice, if not specified, they are calculated via full multiple
# regression of y upon Q.
# @param x is required for the case where n<=p and sigma is estimated via lasso estimate
calc.hdf <- function(Q, y, sigma=NULL, mu=NULL, x=NULL){
n = dim(Q)[1]
p = dim(Q)[2]
# if(p>=n){
# stop('hdf is undefined when p>=n')
# }
if(n <= p & is.null(x)){
stop("This is the n<p scenario. Need the design matrix X for the lasso estimated sigmahat.")
}
# mu is estimated via multiple LS regression
beta_hat = t(Q) %*% y # the multiple regression coef
if(is.null(mu)){
xtmu = beta_hat
}else{
xtmu = t(Q)%*%mu
}
xtmu_matrix = matrix(rep(xtmu,each=p-1), ncol=p-1, byrow=TRUE)
if(is.null(sigma)){
# For n>p, the estimated sigma is based on multiple LS regression
# For n<=p, the estimated sigma is based on lasso with 10-fold CV
if(n > p){
resid = y - Q %*% beta_hat
sigma = sqrt(sum(resid^2)/(n-p))
}else{
sigma = est.sigma.lasso(x, y)
}
}
tryCatch({
# calculate the inverse function of E(k(lambda))=k, where k=1,...p-1
inverse = function(f, lower, upper) {
function(y) stats::uniroot(function(x){f(x) - y}, lower=lower, upper=upper)[1]
}
exp_size <- function(x){
c = stats::pnorm((x-xtmu) / sigma)
d = stats::pnorm((-x-xtmu) / sigma)
return( sum(1 - c + d) )
}
inverse_exp_size = inverse(exp_size, 0, 100*max(abs(xtmu)))
sqrt_2lambda = unlist(lapply(1:(p-1), inverse_exp_size))
sqrt_2lambda_matrix = matrix(rep(sqrt_2lambda,each=p), nrow=p, byrow=F)
# plug the sequence of lambda into the expression of df(lambda)
a = stats::dnorm((sqrt_2lambda_matrix-xtmu_matrix) / sigma)
b = stats::dnorm((-sqrt_2lambda_matrix-xtmu_matrix) / sigma)
size = 1:(p-1)
sdf = (sqrt_2lambda/sigma) * Matrix::colSums(a + b)
df = size + sdf
names(df) = NULL
return(list(hdf=c(0, df, p), sigma=sigma))
}, error=function(e){
warning('returns the df for a null model')
return(list(hdf=c(0, 1:p + 2*p*stats::qnorm(1-1:p/(2*p))*stats::dnorm(stats::qnorm(1-1:p/(2*p)))), sigma=sigma))
})
}
## Estimate sigma based on lasso-cv
est.sigma.lasso <- function(x, y){
n = dim(x)[1]
lasso_cv = glmnet::cv.glmnet(x, y, intercept=FALSE)
betahat = glmnet::coef.glmnet(lasso_cv, s='lambda.min')[-1,]
df = sum(betahat != 0)
y_hat = x %*% betahat
sigma_hat = sqrt( sum((y-y_hat)^2) / (n-df-1) )
return(sigma_hat)
}
|
/scratch/gouwar.j/cran-all/cranData/BOSSreg/R/utils.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- eval=FALSE--------------------------------------------------------------
# library(devtools)
# install_github(repo="sentian/BOSSreg", subdir="r-package")
## ---- eval=FALSE--------------------------------------------------------------
# install.packages(repo="BOSSreg", repos = "http://cran.us.r-project.org")
## -----------------------------------------------------------------------------
n = 200 # Number of observations
p = 14 # Number of predictors
p0 = 6 # Number of active predictors (beta_j=0)
rho = 0.9 # Correlation between predictors
nrep = 1000 # Number of replications of y to be generated
SNR = 7 # Signal-to-noise ratio
seed = 65 # The seed for reproducibility
## -----------------------------------------------------------------------------
library(MASS)
# Function to generate the data
# Columns of X have mean 0 and norm 1, y has mean 0
simu.data <- function(n, p, p0, rho, nrep, SNR, seed){
# True beta
beta = rep(0,p)
beta = c(rep(c(1,-1),p0/2), rep(0,p-p0))
names(beta) = paste0('X', seq(1,p))
# Covariance matrix
covmatrix = matrix(0,nrow=p,ncol=p)
diag(covmatrix) = 1
for(i in 1:(p0/2)){
covmatrix[2*i-1,2*i] = covmatrix[2*i,2*i-1] = rho
}
# Generate the predictors given the correlation structure
set.seed(seed)
x = mvrnorm(n,mu=rep(0,p),Sigma=covmatrix)
x = scale(x,center=TRUE,scale=FALSE)
colnorm = apply(x,2,function(m){sqrt(sum(m^2))})
x = scale(x,center=FALSE,scale=colnorm) # standardization
# Sigma calculated based on SNR
sd = sqrt(t(beta/colnorm)%*%covmatrix%*%(beta/colnorm) / SNR)
mu = x%*%beta
# Generate replications of y by fixing X
y = matrix(rep(mu,each=nrep),ncol=nrep,byrow=TRUE) +
scale(matrix(rnorm(n*nrep,mean=0,sd=sd),nrow=n,ncol=nrep),center=TRUE,scale=FALSE)
return(list(x=x, y=y, beta=beta, sigma=sd))
}
dataset = simu.data(n, p, p0, rho, nrep, SNR, seed)
x = dataset$x
y = dataset$y
beta = dataset$beta
mu = x%*%beta
sigma = dataset$sigma
## -----------------------------------------------------------------------------
print(beta)
## -----------------------------------------------------------------------------
library(BOSSreg)
# Choose a single replication as illustration
rep = seed
# Fit the model
boss_model = boss(x, y[,rep], intercept = FALSE)
## -----------------------------------------------------------------------------
betahat_boss = boss_model$beta_boss
betahat_fs = boss_model$beta_fs
print(dim(betahat_boss))
## ---- fig.width=3, fig.height=3, fig.show='hold'------------------------------
# The heuristic degrees of freedom
plot(0:p, boss_model$hdf, main='hdf', ylab='', xlab='subset size', type='b')
abline(0, 1, lty=2)
# AICc-hdf (scaled by 1/n, and up to a constant)
plot(0:p, boss_model$IC_boss$aicc, main='AICc-hdf', ylab='', xlab='subset size', type='b')
## -----------------------------------------------------------------------------
# The default is chosen by AICc
betahat_aicc = coef(boss_model)
muhat_aicc = predict(boss_model, newx=x)
# Use Cp rather than AICc
betahat_cp = coef(boss_model, ic='cp')
muhat_cp = predict(boss_model, newx=x)
## -----------------------------------------------------------------------------
# The default is 10-fold CV with 1 replication
set.seed(seed)
boss_cv_model = cv.boss(x, y[,rep], intercept=FALSE)
# Coefficient vector selected by minimizing CV error
betahat_cv = coef(boss_cv_model)
# Fitted values
muhat_cv = predict(boss_cv_model, newx=x)
## -----------------------------------------------------------------------------
# Coefficient vector for FS selected by CV
betahat_fs_cv = coef(boss_cv_model, method='fs')
# Fitted values
muhat_fs_cv = predict(boss_cv_model, newx=x, method='fs')
## -----------------------------------------------------------------------------
tmp = cbind(betahat_aicc, betahat_cp, betahat_cv, betahat_fs_cv)
dimnames(tmp) = list(dimnames(tmp)[[1]], c('B0SS AICc', 'BOSS Cp', 'BOSS CV', 'FS CV'))
print(tmp)
## -----------------------------------------------------------------------------
# X9 joins first
print(boss_model$steps_x)
## ---- eval=FALSE--------------------------------------------------------------
# # Function to calculate RMSE
# calc.rmse <- function(muhat){
# sqrt( Matrix::colSums(sweep(muhat, 1, mu)^2) / n )
# }
# rmse_solutionpath = list(BOSS=list(), FS=list())
# for(rep in 1:nrep){
# boss_model = boss(x, y[,rep], intercept=FALSE)
# # RMSE along the solution path
# rmse_solutionpath[['BOSS']][[rep]] = calc.rmse(x %*% boss_model$beta_boss)
# rmse_solutionpath[['FS']][[rep]] = calc.rmse(x %*% boss_model$beta_fs)
# }
# # saveRDS(rmse_solutionpath, 'vignettes/rmse_solutionpath.rds')
## ---- include=FALSE-----------------------------------------------------------
rmse_solutionpath = readRDS(gzcon(url('https://raw.githubusercontent.com/sentian/BOSSreg/master/r-package/vignettes/rmse_solutionpath.rds')))
## ---- fig.width=5, fig.height=4-----------------------------------------------
# Average RMSE over replications
rmse_avg = lapply(rmse_solutionpath, function(xx){colMeans(do.call(rbind, xx))})
plot(0:p, rmse_avg$FS, col='blue', pch=1, main='Average RMSE along the solution path',
ylab='RMSE', xlab='Subset size')
points(0:p, rmse_avg$BOSS, col='red', pch=3)
legend('topright', legend = c('FS', 'BOSS'), col=c('blue', 'red'), pch=c(1,3))
## ---- eval=FALSE--------------------------------------------------------------
# rmse = nvar = list(BOSS=c(), FS=c())
# set.seed(seed)
# for(rep in 1:nrep){
# boss_cv_model = cv.boss(x, y[,rep], intercept=FALSE)
# # RMSE for the optimal subset selected via a selection rule
# rmse[['BOSS']][rep] = calc.rmse(predict(boss_cv_model$boss, newx = x)) # AICc
# rmse[['FS']][rep] = calc.rmse(predict(boss_cv_model, newx = x, method = 'fs')) # CV
# # Number of variables
# nvar[['BOSS']][rep] = sum(coef(boss_cv_model$boss)!=0)
# nvar[['FS']][rep] = sum(coef(boss_cv_model, method='fs')!=0)
# }
# # saveRDS(list(rmse=rmse, nvar=nvar), '/vignettes/boss_fs.rds')
## ---- include=FALSE-----------------------------------------------------------
tmp = readRDS(gzcon(url('https://raw.githubusercontent.com/sentian/BOSSreg/master/r-package/vignettes/boss_fs.rds')))
rmse = tmp$rmse
nvar = tmp$nvar
## ---- fig.width=3, fig.height=3, fig.show='hold'------------------------------
# Make the plots
boxplot(rmse, outline=FALSE, main='RMSE')
boxplot(nvar, outline=FALSE, main='Number of predictors')
## ---- include=FALSE-----------------------------------------------------------
library(ISLR)
dataset = list()
# Boston Housing data
tmp = Boston
tmp = na.omit(tmp)
tmp$chas = as.factor(tmp$chas)
dataset$boston$x = data.matrix(tmp[,!names(tmp) %in% 'medv'])
dataset$boston$y = tmp$medv
# MLB hitters salary
tmp = Hitters
tmp = na.omit(tmp)
tmp[,c('League', 'Division', 'NewLeague')] =
lapply(tmp[,c('League', 'Division', 'NewLeague')], as.factor)
dataset$hitters$x = data.matrix(tmp[,!(names(tmp) %in% c('Salary'))])
dataset$hitters$y = tmp$Salary
# College data
tmp = College
tmp$Private = as.factor(tmp$Private)
dataset$college$x = data.matrix(tmp[,!(names(tmp) %in% c('Outstate'))])
dataset$college$y = tmp$Outstate
# Auto data
tmp = Auto
dataset$auto$x = data.matrix(tmp[,!(names(tmp) %in% c('mpg','name','origin'))])
dataset$auto$y = tmp$mpg
## ---- echo=FALSE--------------------------------------------------------------
library(knitr)
library(kableExtra)
# read the results
result = readRDS(gzcon(url('https://raw.githubusercontent.com/sentian/BOSSreg/master/r-package/vignettes/realdata.rds')))
# function to extract the results
tmp_function <- function(method){
unlist(lapply(result, function(xx){
unlist(lapply(xx, function(yy){
round(mean(yy[[method]]), 3)
}))
}))
}
tmp = data.frame(Dataset = rep(names(result), each=3),
n_p = rep(unlist(lapply(dataset, function(xx){paste(dim(xx$x), collapse = ', ')})) , each=3),
Metrics = rep(c('RMSE', '# predictors', 'running time (s)'), length(result)),
BOSS = tmp_function('boss'),
FS = tmp_function('fs'),
LASSO = tmp_function('lasso'),
SparseNet = tmp_function('sparsenet'))
rownames(tmp) = NULL
colnames(tmp)[2] = 'n, p'
kable(tmp, align = "c") %>%
kable_styling(full_width = F) %>%
column_spec(1, bold = T) %>%
collapse_rows(columns = 1:2, valign = "middle")
## ---- eval=FALSE--------------------------------------------------------------
# library(ISLR)
# dataset = list()
# # Boston Housing data
# tmp = Boston
# tmp = na.omit(tmp)
# tmp$chas = as.factor(tmp$chas)
# dataset$boston$x = data.matrix(tmp[,!names(tmp) %in% 'medv'])
# dataset$boston$y = tmp$medv
#
# # MLB hitters salary
# tmp = Hitters
# tmp = na.omit(tmp)
# tmp[,c('League', 'Division', 'NewLeague')] =
# lapply(tmp[,c('League', 'Division', 'NewLeague')], as.factor)
# dataset$hitters$x = data.matrix(tmp[,!(names(tmp) %in% c('Salary'))])
# dataset$hitters$y = tmp$Salary
#
# # College data
# tmp = College
# tmp$Private = as.factor(tmp$Private)
# dataset$college$x = data.matrix(tmp[,!(names(tmp) %in% c('Outstate'))])
# dataset$college$y = tmp$Outstate
#
# # Auto data
# tmp = Auto
# dataset$auto$x = data.matrix(tmp[,!(names(tmp) %in% c('mpg','name','origin'))])
# dataset$auto$y = tmp$mpg
## ---- eval=FALSE--------------------------------------------------------------
# library(glmnet)
# library(sparsenet)
# rmse <- function(y_hat, y){
# sqrt(sum( (y_hat - y)^2 / length(y)) )
# }
# rdresult <- function(x, y, nrep, seed){
# p = dim(x)[2]
#
# allmethods = c('lasso','sparsenet','boss','fs')
# error = numvar = time = replicate(length(allmethods), rep(NA,nrep), simplify=F)
# names(error) = names(numvar) = names(time) = allmethods
#
# set.seed(seed)
# for(i in 1:nrep){
# index = 1:nrow(x)
# index = index[-i]
#
# x.train = x[index, , drop=FALSE]
# y.train = y[index]
# x.test = x[-index, , drop=FALSE]
# x.test.withint = cbind(rep(1,nrow(x.test)), x.test)
# y.test = y[-index]
#
# # BOSS
# ptm = proc.time()
# boss_model = boss(x.train, y.train, intercept = TRUE)
# time_tmp = proc.time() - ptm
# boss_pred = as.numeric( predict(boss_model, newx=x.test) )
# error$boss[i] = rmse(boss_pred, y.test)
# numvar$boss[i] = sum(coef(boss_model)!=0)
# time$boss[i] = time_tmp[3]
#
# # FS
# ptm = proc.time()
# boss_cv_model = cv.boss(x.train, y.train)
# time_tmp = proc.time() - ptm
# fs_pred = as.numeric( predict(boss_cv_model, newx=x.test, method='fs') )
# error$fs[i] = rmse(fs_pred, y.test)
# numvar$fs[i] = sum(coef(boss_cv_model, method='fs')!=0)
# time$fs[i] = time_tmp[3]
#
# # LASSO
# ptm = proc.time()
# lasso_model = glmnet(x.train, y.train, intercept=TRUE)
# lasso_aicc = as.numeric(calc.ic(predict(lasso_model, newx=x.train), y.train,
# ic='aicc', df=lasso_model$df+1))
# lasso_pred = predict(lasso_model, newx=x.test, s=lasso_model$lambda[which.min(lasso_aicc)])
# time_tmp = proc.time() - ptm
# error$lasso[i] = rmse(lasso_pred, y.test)
# numvar$lasso[i] = sum(coef(lasso_model, s=lasso_model$lambda[which.min(lasso_aicc)])!=0)
# time$lasso[i] = time_tmp[3]
#
# # SparseNet
# ptm = proc.time()
# sparsenet_cv_model = cv.sparsenet(x.train, y.train)
# time_tmp = proc.time() - ptm
# sparsenet_pred = predict(sparsenet_cv_model, newx=x.test, which='parms.min')
# error$sparsenet[i] = rmse(sparsenet_pred, y.test)
# numvar$sparsenet[i] = sum(coef(sparsenet_cv_model, which='parms.min')!=0)
# time$sparsenet[i] = time_tmp[3]
# }
# return(list(error=error, numvar=numvar, time=time))
# }
# result = lapply(dataset, function(xx){rdresult(xx$x, xx$y, nrow(xx$x), seed)})
# # saveRDS(result, '/vignettes/realdata.rds')
## ---- eval=FALSE--------------------------------------------------------------
# library(knitr)
# library(kableExtra)
# # Function to extract the results
# tmp_function <- function(method){
# unlist(lapply(result, function(xx){
# unlist(lapply(xx, function(yy){
# round(mean(yy[[method]]), 3)
# }))
# }))
# }
# tmp = data.frame(Dataset = rep(names(result), each=3),
# n_p = rep(unlist(lapply(dataset, function(xx){paste(dim(xx$x), collapse = ', ')})) , each=3),
# Metrics = rep(c('RMSE', '# predictors', 'running time (s)'), length(result)),
# BOSS = tmp_function('boss'),
# FS = tmp_function('fs'),
# LASSO = tmp_function('lasso'),
# SparseNet = tmp_function('sparsenet'))
# rownames(tmp) = NULL
# colnames(tmp)[2] = 'n, p'
# kable(tmp, align = "c") %>%
# kable_styling(full_width = F) %>%
# column_spec(1, bold = T) %>%
# collapse_rows(columns = 1:2, valign = "middle")
|
/scratch/gouwar.j/cran-all/cranData/BOSSreg/inst/doc/BOSSreg.R
|
---
title: "Best Orthogonalized Subset Selection (BOSS)"
author: "Sen Tian"
date: "`r Sys.Date()`"
#output: rmarkdown::html_vignette
output: pdf_document
vignette: >
%\VignetteIndexEntry{Best Orthogonalized Subset Selection (BOSS)}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
references:
- id: Tian2021
title: On the Use of Information Criteria for Subset Selection in Least Squares Regression
author:
- family: Tian
given: Sen
- family: Hurvich
given: Clifford M.
- family: Simonoff
given: Jeffrey S.
URL: 'https://arxiv.org/abs/1911.10191'
issued:
year: 2021
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Installation
We maintain a Github page for the package and keep the most updated version there.
To install, simply run the following commands in the console:
```{r, eval=FALSE}
library(devtools)
install_github(repo="sentian/BOSSreg", subdir="r-package")
```
A stable version can be installed from CRAN using
```{r, eval=FALSE}
install.packages(repo="BOSSreg", repos = "http://cran.us.r-project.org")
```
## Introduction
BOSS is a least squares-based subset selection method. It is based on takes the following steps:
* order the predictors based on their partial correlations with the response;
* perform best subset regression upon the orthogonal basis of the ordered predictors;
* transform the coefficients back to the original space;
* choose the optimal solution using the selection rule AICc-hdf.
The hdf is a heuristic degrees of freedom for BOSS that can be plugged into a selection rule such as AICc-hdf,
which can then be used as a selection rule for BOSS. AICc-hdf is defined as
\begin{equation*}
\text{AICc-hdf} = n \log\left(\frac{\text{RSS}}{n}\right) + n \frac{n+\text{hdf}}{n-{hdf}-2}.
\end{equation*}
More details can be referred to @Tian2021.
This vignette is structured as follows. We start by simulating a dataset. We then introduce the components,
functionalities and basic usage of the package. This is followed by a discussion contrasting BOSS and forward stepwise regression (FS).
Finally, we study real data examples and compare BOSS with some popular regularization methods. Note that this vignette is based on **R-3.6.1**. Slightly different results may be obtained using pre-3.6.0 versions of **R** since the default underlying random number generator has been changed in version 3.6.0.
## Simulated datasets
The model generating mechanism is $y=X\beta+\epsilon$. We consider a sparse model where only a few predictors matter,
with a high signal-to-noise ratio. The detailed parameters are given as follows:
```{r}
n = 200 # Number of observations
p = 14 # Number of predictors
p0 = 6 # Number of active predictors (beta_j=0)
rho = 0.9 # Correlation between predictors
nrep = 1000 # Number of replications of y to be generated
SNR = 7 # Signal-to-noise ratio
seed = 65 # The seed for reproducibility
```
We make the predictors with $\beta_j \ne 0$ pairwisely correlated with opposite effects.
We generate $1000$ replicated datasets where the response $y$ is generated with fixed $X$. The columns of $X$ and $y$
are constructed to have zero mean, so we can exclude the intercept term from model fitting.
```{r}
library(MASS)
# Function to generate the data
# Columns of X have mean 0 and norm 1, y has mean 0
simu.data <- function(n, p, p0, rho, nrep, SNR, seed){
# True beta
beta = rep(0,p)
beta = c(rep(c(1,-1),p0/2), rep(0,p-p0))
names(beta) = paste0('X', seq(1,p))
# Covariance matrix
covmatrix = matrix(0,nrow=p,ncol=p)
diag(covmatrix) = 1
for(i in 1:(p0/2)){
covmatrix[2*i-1,2*i] = covmatrix[2*i,2*i-1] = rho
}
# Generate the predictors given the correlation structure
set.seed(seed)
x = mvrnorm(n,mu=rep(0,p),Sigma=covmatrix)
x = scale(x,center=TRUE,scale=FALSE)
colnorm = apply(x,2,function(m){sqrt(sum(m^2))})
x = scale(x,center=FALSE,scale=colnorm) # standardization
# Sigma calculated based on SNR
sd = sqrt(t(beta/colnorm)%*%covmatrix%*%(beta/colnorm) / SNR)
mu = x%*%beta
# Generate replications of y by fixing X
y = matrix(rep(mu,each=nrep),ncol=nrep,byrow=TRUE) +
scale(matrix(rnorm(n*nrep,mean=0,sd=sd),nrow=n,ncol=nrep),center=TRUE,scale=FALSE)
return(list(x=x, y=y, beta=beta, sigma=sd))
}
dataset = simu.data(n, p, p0, rho, nrep, SNR, seed)
x = dataset$x
y = dataset$y
beta = dataset$beta
mu = x%*%beta
sigma = dataset$sigma
```
The first $p_0=6$ predictors are active with $\beta_j \ne 0$.
```{r}
print(beta)
```
## An illustration of the package
Fitting the model is simple.
```{r}
library(BOSSreg)
# Choose a single replication as illustration
rep = seed
# Fit the model
boss_model = boss(x, y[,rep], intercept = FALSE)
```
The 'boss' object contains estimated coefficient vectors for the entire solution paths of both BOSS and FS.
```{r}
betahat_boss = boss_model$beta_boss
betahat_fs = boss_model$beta_fs
print(dim(betahat_boss))
```
By default, it also provides the hdf for BOSS and multiple information criteria.
```{r, fig.width=3, fig.height=3, fig.show='hold'}
# The heuristic degrees of freedom
plot(0:p, boss_model$hdf, main='hdf', ylab='', xlab='subset size', type='b')
abline(0, 1, lty=2)
# AICc-hdf (scaled by 1/n, and up to a constant)
plot(0:p, boss_model$IC_boss$aicc, main='AICc-hdf', ylab='', xlab='subset size', type='b')
```
The optimal estimated coefficient vector and fitted mean vector can be obtained as follows.
```{r}
# The default is chosen by AICc
betahat_aicc = coef(boss_model)
muhat_aicc = predict(boss_model, newx=x)
# Use Cp rather than AICc
betahat_cp = coef(boss_model, ic='cp')
muhat_cp = predict(boss_model, newx=x)
```
In addition to information criteria, K-fold cross-validation (CV) with multiple replications can be used as a selection rule,
with 10-fold CV with one replication the default choice.
```{r}
# The default is 10-fold CV with 1 replication
set.seed(seed)
boss_cv_model = cv.boss(x, y[,rep], intercept=FALSE)
# Coefficient vector selected by minimizing CV error
betahat_cv = coef(boss_cv_model)
# Fitted values
muhat_cv = predict(boss_cv_model, newx=x)
```
Calling 'cv.boss' runs CV for FS as well.
```{r}
# Coefficient vector for FS selected by CV
betahat_fs_cv = coef(boss_cv_model, method='fs')
# Fitted values
muhat_fs_cv = predict(boss_cv_model, newx=x, method='fs')
```
Here is a comparison of the coefficient vectors selected using different selection rules. The first
three columns are for BOSS while the last column is for FS.
```{r}
tmp = cbind(betahat_aicc, betahat_cp, betahat_cv, betahat_fs_cv)
dimnames(tmp) = list(dimnames(tmp)[[1]], c('B0SS AICc', 'BOSS Cp', 'BOSS CV', 'FS CV'))
print(tmp)
```
## Comparing the solutions of BOSS and FS at every subset size
We see that FS gives a denser solution than BOSS in this case. Under the specific design of the true model,
the true active predictors ($X_1,\cdots, X_6$) are pairwisely correlated with opposite effects. Predictors (e.g. $(X_1,X_2)$) together
lead to a high $R^2$ but each single one of them contributes little.
As a result, FS can have trouble stepping in the true active predictors in the early stages. For example,
as indicated below, the inactive predictor $X_9$ joins in the first step. On the contrary, BOSS takes the same order of predictors as FS,
and performs best subset regression on their orthogonal basis, providing the chance to re-evaluate (or re-order) the predictors.
```{r}
# X9 joins first
print(boss_model$steps_x)
```
Let's set aside the selection rule for now, and compare the solutions of the two methods at every subset size.
The subset size is the number of predictors $k$ for FS, and it is the number of
We calculate the average RMSE at each subset size based on $1000$ replications. The RMSE is defined as
\begin{equation*}
\text{RMSE} = \sqrt{\frac{1}{n} \lVert \hat{\mu} - X\beta \rVert_2^2}.
\end{equation*}
```{r, eval=FALSE}
# Function to calculate RMSE
calc.rmse <- function(muhat){
sqrt( Matrix::colSums(sweep(muhat, 1, mu)^2) / n )
}
rmse_solutionpath = list(BOSS=list(), FS=list())
for(rep in 1:nrep){
boss_model = boss(x, y[,rep], intercept=FALSE)
# RMSE along the solution path
rmse_solutionpath[['BOSS']][[rep]] = calc.rmse(x %*% boss_model$beta_boss)
rmse_solutionpath[['FS']][[rep]] = calc.rmse(x %*% boss_model$beta_fs)
}
# saveRDS(rmse_solutionpath, 'vignettes/rmse_solutionpath.rds')
```
```{r, include=FALSE}
rmse_solutionpath = readRDS(gzcon(url('https://raw.githubusercontent.com/sentian/BOSSreg/master/r-package/vignettes/rmse_solutionpath.rds')))
```
BOSS clearly provides a better solution path than FS in steps less than $8$.
```{r, fig.width=5, fig.height=4}
# Average RMSE over replications
rmse_avg = lapply(rmse_solutionpath, function(xx){colMeans(do.call(rbind, xx))})
plot(0:p, rmse_avg$FS, col='blue', pch=1, main='Average RMSE along the solution path',
ylab='RMSE', xlab='Subset size')
points(0:p, rmse_avg$BOSS, col='red', pch=3)
legend('topright', legend = c('FS', 'BOSS'), col=c('blue', 'red'), pch=c(1,3))
```
Next, we bring back the selection rules and compare their performances. The selection rule for
BOSS is AICc-hdf and is 10-fold CV for FS. BOSS shows a better predictive performance, and it provides
sparser solutions than FS does.
```{r, eval=FALSE}
rmse = nvar = list(BOSS=c(), FS=c())
set.seed(seed)
for(rep in 1:nrep){
boss_cv_model = cv.boss(x, y[,rep], intercept=FALSE)
# RMSE for the optimal subset selected via a selection rule
rmse[['BOSS']][rep] = calc.rmse(predict(boss_cv_model$boss, newx = x)) # AICc
rmse[['FS']][rep] = calc.rmse(predict(boss_cv_model, newx = x, method = 'fs')) # CV
# Number of variables
nvar[['BOSS']][rep] = sum(coef(boss_cv_model$boss)!=0)
nvar[['FS']][rep] = sum(coef(boss_cv_model, method='fs')!=0)
}
# saveRDS(list(rmse=rmse, nvar=nvar), '/vignettes/boss_fs.rds')
```
```{r, include=FALSE}
tmp = readRDS(gzcon(url('https://raw.githubusercontent.com/sentian/BOSSreg/master/r-package/vignettes/boss_fs.rds')))
rmse = tmp$rmse
nvar = tmp$nvar
```
```{r, fig.width=3, fig.height=3, fig.show='hold'}
# Make the plots
boxplot(rmse, outline=FALSE, main='RMSE')
boxplot(nvar, outline=FALSE, main='Number of predictors')
```
## Real data examples
We compare the performance of BOSS with FS and some popular regularization methods on several real datasets.
We consider four datasets from the [StatLib library](http://lib.stat.cmu.edu/datasets/), 'boston housing', 'hitters', 'auto' and 'college'. An intercept term is included in all of the procedures. We present the results in this section and provide the code in the Appendix at the end of this document.
The selection rule is AICc for BOSS and LASSO, and 10-fold CV for FS and SparseNet, respectively. We use the **R** packages *glmnet* and *sparsenet* to fit
LASSO and SparseNet, respectively. We see that BOSS has the minimum RMSE for the 'hitters' and 'auto' datasets, while LASSO has the minimum RMSE for the 'boston housing' and 'college' datasets. Due to an efficient implementation of the cyclic coordinate descent, the 'glmnet' algorithm provides an extremely fast LASSO solution. BOSS is also relatively computationally efficient, and is
much faster than the remaining methods.
```{r, include=FALSE}
library(ISLR)
dataset = list()
# Boston Housing data
tmp = Boston
tmp = na.omit(tmp)
tmp$chas = as.factor(tmp$chas)
dataset$boston$x = data.matrix(tmp[,!names(tmp) %in% 'medv'])
dataset$boston$y = tmp$medv
# MLB hitters salary
tmp = Hitters
tmp = na.omit(tmp)
tmp[,c('League', 'Division', 'NewLeague')] =
lapply(tmp[,c('League', 'Division', 'NewLeague')], as.factor)
dataset$hitters$x = data.matrix(tmp[,!(names(tmp) %in% c('Salary'))])
dataset$hitters$y = tmp$Salary
# College data
tmp = College
tmp$Private = as.factor(tmp$Private)
dataset$college$x = data.matrix(tmp[,!(names(tmp) %in% c('Outstate'))])
dataset$college$y = tmp$Outstate
# Auto data
tmp = Auto
dataset$auto$x = data.matrix(tmp[,!(names(tmp) %in% c('mpg','name','origin'))])
dataset$auto$y = tmp$mpg
```
```{r, echo=FALSE}
library(knitr)
library(kableExtra)
# read the results
result = readRDS(gzcon(url('https://raw.githubusercontent.com/sentian/BOSSreg/master/r-package/vignettes/realdata.rds')))
# function to extract the results
tmp_function <- function(method){
unlist(lapply(result, function(xx){
unlist(lapply(xx, function(yy){
round(mean(yy[[method]]), 3)
}))
}))
}
tmp = data.frame(Dataset = rep(names(result), each=3),
n_p = rep(unlist(lapply(dataset, function(xx){paste(dim(xx$x), collapse = ', ')})) , each=3),
Metrics = rep(c('RMSE', '# predictors', 'running time (s)'), length(result)),
BOSS = tmp_function('boss'),
FS = tmp_function('fs'),
LASSO = tmp_function('lasso'),
SparseNet = tmp_function('sparsenet'))
rownames(tmp) = NULL
colnames(tmp)[2] = 'n, p'
kable(tmp, align = "c") %>%
kable_styling(full_width = F) %>%
column_spec(1, bold = T) %>%
collapse_rows(columns = 1:2, valign = "middle")
```
## References
<div id="refs"></div>
\newpage
## Appendix: Code for the real data examples
The following code is used to pre-process the datasets. We remove all of the entries with 'NA' values. We recast binary categorical variables
into $\{0,1\}$ and remove categorical variables with more than two categories.
```{r, eval=FALSE}
library(ISLR)
dataset = list()
# Boston Housing data
tmp = Boston
tmp = na.omit(tmp)
tmp$chas = as.factor(tmp$chas)
dataset$boston$x = data.matrix(tmp[,!names(tmp) %in% 'medv'])
dataset$boston$y = tmp$medv
# MLB hitters salary
tmp = Hitters
tmp = na.omit(tmp)
tmp[,c('League', 'Division', 'NewLeague')] =
lapply(tmp[,c('League', 'Division', 'NewLeague')], as.factor)
dataset$hitters$x = data.matrix(tmp[,!(names(tmp) %in% c('Salary'))])
dataset$hitters$y = tmp$Salary
# College data
tmp = College
tmp$Private = as.factor(tmp$Private)
dataset$college$x = data.matrix(tmp[,!(names(tmp) %in% c('Outstate'))])
dataset$college$y = tmp$Outstate
# Auto data
tmp = Auto
dataset$auto$x = data.matrix(tmp[,!(names(tmp) %in% c('mpg','name','origin'))])
dataset$auto$y = tmp$mpg
```
Code to calculate leave-one-out error, number of predictors and timing for each fitting procedure.
Note that the following code took roughly 20 minutes to run on a single core of a local machine with
a 2.7 GHz i7 processer and 16 GB RAM.
```{r, eval=FALSE}
library(glmnet)
library(sparsenet)
rmse <- function(y_hat, y){
sqrt(sum( (y_hat - y)^2 / length(y)) )
}
rdresult <- function(x, y, nrep, seed){
p = dim(x)[2]
allmethods = c('lasso','sparsenet','boss','fs')
error = numvar = time = replicate(length(allmethods), rep(NA,nrep), simplify=F)
names(error) = names(numvar) = names(time) = allmethods
set.seed(seed)
for(i in 1:nrep){
index = 1:nrow(x)
index = index[-i]
x.train = x[index, , drop=FALSE]
y.train = y[index]
x.test = x[-index, , drop=FALSE]
x.test.withint = cbind(rep(1,nrow(x.test)), x.test)
y.test = y[-index]
# BOSS
ptm = proc.time()
boss_model = boss(x.train, y.train, intercept = TRUE)
time_tmp = proc.time() - ptm
boss_pred = as.numeric( predict(boss_model, newx=x.test) )
error$boss[i] = rmse(boss_pred, y.test)
numvar$boss[i] = sum(coef(boss_model)!=0)
time$boss[i] = time_tmp[3]
# FS
ptm = proc.time()
boss_cv_model = cv.boss(x.train, y.train)
time_tmp = proc.time() - ptm
fs_pred = as.numeric( predict(boss_cv_model, newx=x.test, method='fs') )
error$fs[i] = rmse(fs_pred, y.test)
numvar$fs[i] = sum(coef(boss_cv_model, method='fs')!=0)
time$fs[i] = time_tmp[3]
# LASSO
ptm = proc.time()
lasso_model = glmnet(x.train, y.train, intercept=TRUE)
lasso_aicc = as.numeric(calc.ic(predict(lasso_model, newx=x.train), y.train,
ic='aicc', df=lasso_model$df+1))
lasso_pred = predict(lasso_model, newx=x.test, s=lasso_model$lambda[which.min(lasso_aicc)])
time_tmp = proc.time() - ptm
error$lasso[i] = rmse(lasso_pred, y.test)
numvar$lasso[i] = sum(coef(lasso_model, s=lasso_model$lambda[which.min(lasso_aicc)])!=0)
time$lasso[i] = time_tmp[3]
# SparseNet
ptm = proc.time()
sparsenet_cv_model = cv.sparsenet(x.train, y.train)
time_tmp = proc.time() - ptm
sparsenet_pred = predict(sparsenet_cv_model, newx=x.test, which='parms.min')
error$sparsenet[i] = rmse(sparsenet_pred, y.test)
numvar$sparsenet[i] = sum(coef(sparsenet_cv_model, which='parms.min')!=0)
time$sparsenet[i] = time_tmp[3]
}
return(list(error=error, numvar=numvar, time=time))
}
result = lapply(dataset, function(xx){rdresult(xx$x, xx$y, nrow(xx$x), seed)})
# saveRDS(result, '/vignettes/realdata.rds')
```
This is the code to construct the table on page 6.
```{r, eval=FALSE}
library(knitr)
library(kableExtra)
# Function to extract the results
tmp_function <- function(method){
unlist(lapply(result, function(xx){
unlist(lapply(xx, function(yy){
round(mean(yy[[method]]), 3)
}))
}))
}
tmp = data.frame(Dataset = rep(names(result), each=3),
n_p = rep(unlist(lapply(dataset, function(xx){paste(dim(xx$x), collapse = ', ')})) , each=3),
Metrics = rep(c('RMSE', '# predictors', 'running time (s)'), length(result)),
BOSS = tmp_function('boss'),
FS = tmp_function('fs'),
LASSO = tmp_function('lasso'),
SparseNet = tmp_function('sparsenet'))
rownames(tmp) = NULL
colnames(tmp)[2] = 'n, p'
kable(tmp, align = "c") %>%
kable_styling(full_width = F) %>%
column_spec(1, bold = T) %>%
collapse_rows(columns = 1:2, valign = "middle")
```
|
/scratch/gouwar.j/cran-all/cranData/BOSSreg/inst/doc/BOSSreg.Rmd
|
---
title: "Best Orthogonalized Subset Selection (BOSS)"
author: "Sen Tian"
date: "`r Sys.Date()`"
#output: rmarkdown::html_vignette
output: pdf_document
vignette: >
%\VignetteIndexEntry{Best Orthogonalized Subset Selection (BOSS)}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
references:
- id: Tian2021
title: On the Use of Information Criteria for Subset Selection in Least Squares Regression
author:
- family: Tian
given: Sen
- family: Hurvich
given: Clifford M.
- family: Simonoff
given: Jeffrey S.
URL: 'https://arxiv.org/abs/1911.10191'
issued:
year: 2021
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Installation
We maintain a Github page for the package and keep the most updated version there.
To install, simply run the following commands in the console:
```{r, eval=FALSE}
library(devtools)
install_github(repo="sentian/BOSSreg", subdir="r-package")
```
A stable version can be installed from CRAN using
```{r, eval=FALSE}
install.packages(repo="BOSSreg", repos = "http://cran.us.r-project.org")
```
## Introduction
BOSS is a least squares-based subset selection method. It is based on takes the following steps:
* order the predictors based on their partial correlations with the response;
* perform best subset regression upon the orthogonal basis of the ordered predictors;
* transform the coefficients back to the original space;
* choose the optimal solution using the selection rule AICc-hdf.
The hdf is a heuristic degrees of freedom for BOSS that can be plugged into a selection rule such as AICc-hdf,
which can then be used as a selection rule for BOSS. AICc-hdf is defined as
\begin{equation*}
\text{AICc-hdf} = n \log\left(\frac{\text{RSS}}{n}\right) + n \frac{n+\text{hdf}}{n-{hdf}-2}.
\end{equation*}
More details can be referred to @Tian2021.
This vignette is structured as follows. We start by simulating a dataset. We then introduce the components,
functionalities and basic usage of the package. This is followed by a discussion contrasting BOSS and forward stepwise regression (FS).
Finally, we study real data examples and compare BOSS with some popular regularization methods. Note that this vignette is based on **R-3.6.1**. Slightly different results may be obtained using pre-3.6.0 versions of **R** since the default underlying random number generator has been changed in version 3.6.0.
## Simulated datasets
The model generating mechanism is $y=X\beta+\epsilon$. We consider a sparse model where only a few predictors matter,
with a high signal-to-noise ratio. The detailed parameters are given as follows:
```{r}
n = 200 # Number of observations
p = 14 # Number of predictors
p0 = 6 # Number of active predictors (beta_j=0)
rho = 0.9 # Correlation between predictors
nrep = 1000 # Number of replications of y to be generated
SNR = 7 # Signal-to-noise ratio
seed = 65 # The seed for reproducibility
```
We make the predictors with $\beta_j \ne 0$ pairwisely correlated with opposite effects.
We generate $1000$ replicated datasets where the response $y$ is generated with fixed $X$. The columns of $X$ and $y$
are constructed to have zero mean, so we can exclude the intercept term from model fitting.
```{r}
library(MASS)
# Function to generate the data
# Columns of X have mean 0 and norm 1, y has mean 0
simu.data <- function(n, p, p0, rho, nrep, SNR, seed){
# True beta
beta = rep(0,p)
beta = c(rep(c(1,-1),p0/2), rep(0,p-p0))
names(beta) = paste0('X', seq(1,p))
# Covariance matrix
covmatrix = matrix(0,nrow=p,ncol=p)
diag(covmatrix) = 1
for(i in 1:(p0/2)){
covmatrix[2*i-1,2*i] = covmatrix[2*i,2*i-1] = rho
}
# Generate the predictors given the correlation structure
set.seed(seed)
x = mvrnorm(n,mu=rep(0,p),Sigma=covmatrix)
x = scale(x,center=TRUE,scale=FALSE)
colnorm = apply(x,2,function(m){sqrt(sum(m^2))})
x = scale(x,center=FALSE,scale=colnorm) # standardization
# Sigma calculated based on SNR
sd = sqrt(t(beta/colnorm)%*%covmatrix%*%(beta/colnorm) / SNR)
mu = x%*%beta
# Generate replications of y by fixing X
y = matrix(rep(mu,each=nrep),ncol=nrep,byrow=TRUE) +
scale(matrix(rnorm(n*nrep,mean=0,sd=sd),nrow=n,ncol=nrep),center=TRUE,scale=FALSE)
return(list(x=x, y=y, beta=beta, sigma=sd))
}
dataset = simu.data(n, p, p0, rho, nrep, SNR, seed)
x = dataset$x
y = dataset$y
beta = dataset$beta
mu = x%*%beta
sigma = dataset$sigma
```
The first $p_0=6$ predictors are active with $\beta_j \ne 0$.
```{r}
print(beta)
```
## An illustration of the package
Fitting the model is simple.
```{r}
library(BOSSreg)
# Choose a single replication as illustration
rep = seed
# Fit the model
boss_model = boss(x, y[,rep], intercept = FALSE)
```
The 'boss' object contains estimated coefficient vectors for the entire solution paths of both BOSS and FS.
```{r}
betahat_boss = boss_model$beta_boss
betahat_fs = boss_model$beta_fs
print(dim(betahat_boss))
```
By default, it also provides the hdf for BOSS and multiple information criteria.
```{r, fig.width=3, fig.height=3, fig.show='hold'}
# The heuristic degrees of freedom
plot(0:p, boss_model$hdf, main='hdf', ylab='', xlab='subset size', type='b')
abline(0, 1, lty=2)
# AICc-hdf (scaled by 1/n, and up to a constant)
plot(0:p, boss_model$IC_boss$aicc, main='AICc-hdf', ylab='', xlab='subset size', type='b')
```
The optimal estimated coefficient vector and fitted mean vector can be obtained as follows.
```{r}
# The default is chosen by AICc
betahat_aicc = coef(boss_model)
muhat_aicc = predict(boss_model, newx=x)
# Use Cp rather than AICc
betahat_cp = coef(boss_model, ic='cp')
muhat_cp = predict(boss_model, newx=x)
```
In addition to information criteria, K-fold cross-validation (CV) with multiple replications can be used as a selection rule,
with 10-fold CV with one replication the default choice.
```{r}
# The default is 10-fold CV with 1 replication
set.seed(seed)
boss_cv_model = cv.boss(x, y[,rep], intercept=FALSE)
# Coefficient vector selected by minimizing CV error
betahat_cv = coef(boss_cv_model)
# Fitted values
muhat_cv = predict(boss_cv_model, newx=x)
```
Calling 'cv.boss' runs CV for FS as well.
```{r}
# Coefficient vector for FS selected by CV
betahat_fs_cv = coef(boss_cv_model, method='fs')
# Fitted values
muhat_fs_cv = predict(boss_cv_model, newx=x, method='fs')
```
Here is a comparison of the coefficient vectors selected using different selection rules. The first
three columns are for BOSS while the last column is for FS.
```{r}
tmp = cbind(betahat_aicc, betahat_cp, betahat_cv, betahat_fs_cv)
dimnames(tmp) = list(dimnames(tmp)[[1]], c('B0SS AICc', 'BOSS Cp', 'BOSS CV', 'FS CV'))
print(tmp)
```
## Comparing the solutions of BOSS and FS at every subset size
We see that FS gives a denser solution than BOSS in this case. Under the specific design of the true model,
the true active predictors ($X_1,\cdots, X_6$) are pairwisely correlated with opposite effects. Predictors (e.g. $(X_1,X_2)$) together
lead to a high $R^2$ but each single one of them contributes little.
As a result, FS can have trouble stepping in the true active predictors in the early stages. For example,
as indicated below, the inactive predictor $X_9$ joins in the first step. On the contrary, BOSS takes the same order of predictors as FS,
and performs best subset regression on their orthogonal basis, providing the chance to re-evaluate (or re-order) the predictors.
```{r}
# X9 joins first
print(boss_model$steps_x)
```
Let's set aside the selection rule for now, and compare the solutions of the two methods at every subset size.
The subset size is the number of predictors $k$ for FS, and it is the number of
We calculate the average RMSE at each subset size based on $1000$ replications. The RMSE is defined as
\begin{equation*}
\text{RMSE} = \sqrt{\frac{1}{n} \lVert \hat{\mu} - X\beta \rVert_2^2}.
\end{equation*}
```{r, eval=FALSE}
# Function to calculate RMSE
calc.rmse <- function(muhat){
sqrt( Matrix::colSums(sweep(muhat, 1, mu)^2) / n )
}
rmse_solutionpath = list(BOSS=list(), FS=list())
for(rep in 1:nrep){
boss_model = boss(x, y[,rep], intercept=FALSE)
# RMSE along the solution path
rmse_solutionpath[['BOSS']][[rep]] = calc.rmse(x %*% boss_model$beta_boss)
rmse_solutionpath[['FS']][[rep]] = calc.rmse(x %*% boss_model$beta_fs)
}
# saveRDS(rmse_solutionpath, 'vignettes/rmse_solutionpath.rds')
```
```{r, include=FALSE}
rmse_solutionpath = readRDS(gzcon(url('https://raw.githubusercontent.com/sentian/BOSSreg/master/r-package/vignettes/rmse_solutionpath.rds')))
```
BOSS clearly provides a better solution path than FS in steps less than $8$.
```{r, fig.width=5, fig.height=4}
# Average RMSE over replications
rmse_avg = lapply(rmse_solutionpath, function(xx){colMeans(do.call(rbind, xx))})
plot(0:p, rmse_avg$FS, col='blue', pch=1, main='Average RMSE along the solution path',
ylab='RMSE', xlab='Subset size')
points(0:p, rmse_avg$BOSS, col='red', pch=3)
legend('topright', legend = c('FS', 'BOSS'), col=c('blue', 'red'), pch=c(1,3))
```
Next, we bring back the selection rules and compare their performances. The selection rule for
BOSS is AICc-hdf and is 10-fold CV for FS. BOSS shows a better predictive performance, and it provides
sparser solutions than FS does.
```{r, eval=FALSE}
rmse = nvar = list(BOSS=c(), FS=c())
set.seed(seed)
for(rep in 1:nrep){
boss_cv_model = cv.boss(x, y[,rep], intercept=FALSE)
# RMSE for the optimal subset selected via a selection rule
rmse[['BOSS']][rep] = calc.rmse(predict(boss_cv_model$boss, newx = x)) # AICc
rmse[['FS']][rep] = calc.rmse(predict(boss_cv_model, newx = x, method = 'fs')) # CV
# Number of variables
nvar[['BOSS']][rep] = sum(coef(boss_cv_model$boss)!=0)
nvar[['FS']][rep] = sum(coef(boss_cv_model, method='fs')!=0)
}
# saveRDS(list(rmse=rmse, nvar=nvar), '/vignettes/boss_fs.rds')
```
```{r, include=FALSE}
tmp = readRDS(gzcon(url('https://raw.githubusercontent.com/sentian/BOSSreg/master/r-package/vignettes/boss_fs.rds')))
rmse = tmp$rmse
nvar = tmp$nvar
```
```{r, fig.width=3, fig.height=3, fig.show='hold'}
# Make the plots
boxplot(rmse, outline=FALSE, main='RMSE')
boxplot(nvar, outline=FALSE, main='Number of predictors')
```
## Real data examples
We compare the performance of BOSS with FS and some popular regularization methods on several real datasets.
We consider four datasets from the [StatLib library](http://lib.stat.cmu.edu/datasets/), 'boston housing', 'hitters', 'auto' and 'college'. An intercept term is included in all of the procedures. We present the results in this section and provide the code in the Appendix at the end of this document.
The selection rule is AICc for BOSS and LASSO, and 10-fold CV for FS and SparseNet, respectively. We use the **R** packages *glmnet* and *sparsenet* to fit
LASSO and SparseNet, respectively. We see that BOSS has the minimum RMSE for the 'hitters' and 'auto' datasets, while LASSO has the minimum RMSE for the 'boston housing' and 'college' datasets. Due to an efficient implementation of the cyclic coordinate descent, the 'glmnet' algorithm provides an extremely fast LASSO solution. BOSS is also relatively computationally efficient, and is
much faster than the remaining methods.
```{r, include=FALSE}
library(ISLR)
dataset = list()
# Boston Housing data
tmp = Boston
tmp = na.omit(tmp)
tmp$chas = as.factor(tmp$chas)
dataset$boston$x = data.matrix(tmp[,!names(tmp) %in% 'medv'])
dataset$boston$y = tmp$medv
# MLB hitters salary
tmp = Hitters
tmp = na.omit(tmp)
tmp[,c('League', 'Division', 'NewLeague')] =
lapply(tmp[,c('League', 'Division', 'NewLeague')], as.factor)
dataset$hitters$x = data.matrix(tmp[,!(names(tmp) %in% c('Salary'))])
dataset$hitters$y = tmp$Salary
# College data
tmp = College
tmp$Private = as.factor(tmp$Private)
dataset$college$x = data.matrix(tmp[,!(names(tmp) %in% c('Outstate'))])
dataset$college$y = tmp$Outstate
# Auto data
tmp = Auto
dataset$auto$x = data.matrix(tmp[,!(names(tmp) %in% c('mpg','name','origin'))])
dataset$auto$y = tmp$mpg
```
```{r, echo=FALSE}
library(knitr)
library(kableExtra)
# read the results
result = readRDS(gzcon(url('https://raw.githubusercontent.com/sentian/BOSSreg/master/r-package/vignettes/realdata.rds')))
# function to extract the results
tmp_function <- function(method){
unlist(lapply(result, function(xx){
unlist(lapply(xx, function(yy){
round(mean(yy[[method]]), 3)
}))
}))
}
tmp = data.frame(Dataset = rep(names(result), each=3),
n_p = rep(unlist(lapply(dataset, function(xx){paste(dim(xx$x), collapse = ', ')})) , each=3),
Metrics = rep(c('RMSE', '# predictors', 'running time (s)'), length(result)),
BOSS = tmp_function('boss'),
FS = tmp_function('fs'),
LASSO = tmp_function('lasso'),
SparseNet = tmp_function('sparsenet'))
rownames(tmp) = NULL
colnames(tmp)[2] = 'n, p'
kable(tmp, align = "c") %>%
kable_styling(full_width = F) %>%
column_spec(1, bold = T) %>%
collapse_rows(columns = 1:2, valign = "middle")
```
## References
<div id="refs"></div>
\newpage
## Appendix: Code for the real data examples
The following code is used to pre-process the datasets. We remove all of the entries with 'NA' values. We recast binary categorical variables
into $\{0,1\}$ and remove categorical variables with more than two categories.
```{r, eval=FALSE}
library(ISLR)
dataset = list()
# Boston Housing data
tmp = Boston
tmp = na.omit(tmp)
tmp$chas = as.factor(tmp$chas)
dataset$boston$x = data.matrix(tmp[,!names(tmp) %in% 'medv'])
dataset$boston$y = tmp$medv
# MLB hitters salary
tmp = Hitters
tmp = na.omit(tmp)
tmp[,c('League', 'Division', 'NewLeague')] =
lapply(tmp[,c('League', 'Division', 'NewLeague')], as.factor)
dataset$hitters$x = data.matrix(tmp[,!(names(tmp) %in% c('Salary'))])
dataset$hitters$y = tmp$Salary
# College data
tmp = College
tmp$Private = as.factor(tmp$Private)
dataset$college$x = data.matrix(tmp[,!(names(tmp) %in% c('Outstate'))])
dataset$college$y = tmp$Outstate
# Auto data
tmp = Auto
dataset$auto$x = data.matrix(tmp[,!(names(tmp) %in% c('mpg','name','origin'))])
dataset$auto$y = tmp$mpg
```
Code to calculate leave-one-out error, number of predictors and timing for each fitting procedure.
Note that the following code took roughly 20 minutes to run on a single core of a local machine with
a 2.7 GHz i7 processer and 16 GB RAM.
```{r, eval=FALSE}
library(glmnet)
library(sparsenet)
rmse <- function(y_hat, y){
sqrt(sum( (y_hat - y)^2 / length(y)) )
}
rdresult <- function(x, y, nrep, seed){
p = dim(x)[2]
allmethods = c('lasso','sparsenet','boss','fs')
error = numvar = time = replicate(length(allmethods), rep(NA,nrep), simplify=F)
names(error) = names(numvar) = names(time) = allmethods
set.seed(seed)
for(i in 1:nrep){
index = 1:nrow(x)
index = index[-i]
x.train = x[index, , drop=FALSE]
y.train = y[index]
x.test = x[-index, , drop=FALSE]
x.test.withint = cbind(rep(1,nrow(x.test)), x.test)
y.test = y[-index]
# BOSS
ptm = proc.time()
boss_model = boss(x.train, y.train, intercept = TRUE)
time_tmp = proc.time() - ptm
boss_pred = as.numeric( predict(boss_model, newx=x.test) )
error$boss[i] = rmse(boss_pred, y.test)
numvar$boss[i] = sum(coef(boss_model)!=0)
time$boss[i] = time_tmp[3]
# FS
ptm = proc.time()
boss_cv_model = cv.boss(x.train, y.train)
time_tmp = proc.time() - ptm
fs_pred = as.numeric( predict(boss_cv_model, newx=x.test, method='fs') )
error$fs[i] = rmse(fs_pred, y.test)
numvar$fs[i] = sum(coef(boss_cv_model, method='fs')!=0)
time$fs[i] = time_tmp[3]
# LASSO
ptm = proc.time()
lasso_model = glmnet(x.train, y.train, intercept=TRUE)
lasso_aicc = as.numeric(calc.ic(predict(lasso_model, newx=x.train), y.train,
ic='aicc', df=lasso_model$df+1))
lasso_pred = predict(lasso_model, newx=x.test, s=lasso_model$lambda[which.min(lasso_aicc)])
time_tmp = proc.time() - ptm
error$lasso[i] = rmse(lasso_pred, y.test)
numvar$lasso[i] = sum(coef(lasso_model, s=lasso_model$lambda[which.min(lasso_aicc)])!=0)
time$lasso[i] = time_tmp[3]
# SparseNet
ptm = proc.time()
sparsenet_cv_model = cv.sparsenet(x.train, y.train)
time_tmp = proc.time() - ptm
sparsenet_pred = predict(sparsenet_cv_model, newx=x.test, which='parms.min')
error$sparsenet[i] = rmse(sparsenet_pred, y.test)
numvar$sparsenet[i] = sum(coef(sparsenet_cv_model, which='parms.min')!=0)
time$sparsenet[i] = time_tmp[3]
}
return(list(error=error, numvar=numvar, time=time))
}
result = lapply(dataset, function(xx){rdresult(xx$x, xx$y, nrow(xx$x), seed)})
# saveRDS(result, '/vignettes/realdata.rds')
```
This is the code to construct the table on page 6.
```{r, eval=FALSE}
library(knitr)
library(kableExtra)
# Function to extract the results
tmp_function <- function(method){
unlist(lapply(result, function(xx){
unlist(lapply(xx, function(yy){
round(mean(yy[[method]]), 3)
}))
}))
}
tmp = data.frame(Dataset = rep(names(result), each=3),
n_p = rep(unlist(lapply(dataset, function(xx){paste(dim(xx$x), collapse = ', ')})) , each=3),
Metrics = rep(c('RMSE', '# predictors', 'running time (s)'), length(result)),
BOSS = tmp_function('boss'),
FS = tmp_function('fs'),
LASSO = tmp_function('lasso'),
SparseNet = tmp_function('sparsenet'))
rownames(tmp) = NULL
colnames(tmp)[2] = 'n, p'
kable(tmp, align = "c") %>%
kable_styling(full_width = F) %>%
column_spec(1, bold = T) %>%
collapse_rows(columns = 1:2, valign = "middle")
```
|
/scratch/gouwar.j/cran-all/cranData/BOSSreg/vignettes/BOSSreg.Rmd
|
#' Get TOPK=500 DMCs and non-DMCs using moderated-t test
#' @param betaValue A matrix from TCGA array data
#' @param TOPK An integer number, default 500. Number of DMCs/non-DMCs.
#' @param tumorNum A postive number, First tumorNum columns in betaValue
#' are tumor samples. If tumorNum is NULL, first half of
#' columns are considered as tumor samples,
#' @param filterProbes Logistic. defalut is FALSE. The code use all probes in betaValue.
#' If TRUE,
#' you can use default good probes provided in our code.
#' you can also provide your good probes in userProbes.
#' @param userProbes A number list. The row numbers in betaValue.
#' These rows are considered as good probes.
#' return DMCs (TOPK DMCs and TOPK non-DMCs row index in betaValue)
#' @note User can provide the good probes indexes (row number)
#' to filter the probes.
#' A global variable goodProbes are used in this function.
#' goodProbes: probes with SNPs at the CpG or single base
#' extension sites, and corss-reative probes are removed.
#' More details see the reference paper.
#' @export
#'
ApiGetDMCs <-function(betaValue,TOPK=500,tumorNum=NULL,
filterProbes=FALSE,userProbes=NULL){
### get the TOPK=500 MDCs using moderated-t test
anrow<-dim(betaValue)[1] #row number
ancol<-dim(betaValue)[2] #col number
#goodProbes<-NULL
#data(goodProbes,)
if (filterProbes){
### filter probes
if (is.null(userProbes)){
#using good probes same as paper
tmpidx<-goodProbes<=anrow
#only keep probs < row number FALSE/TRUE
xbetaSub<-betaValue[goodProbes[tmpidx],] #
#xannoSub<-annot[goodProbes[tmpidx],]
xannoSub<-annotGeneNames[goodProbes[tmpidx]]
}else{
#using probes provided by user
tmpidx<-userProbes
xbetaSub<-betaValue[tmpidx,]
#xannoSub<-annot[tmpidx,]
xannoSub<-annotGeneNames[tmpidx]
}
}else{
### not filter probes
xbetaSub=betaValue
#xannoSub=annot
xannoSub=annotGeneNames
}
### remove NA
myidx<-which(!is.na(rowSums(xbetaSub)))
betaSub<-xbetaSub[myidx,] ## remove 'NA'
#annoSub<-xannoSub[myidx,] ## remove id contain 'NA'
annoSub<-xannoSub[myidx]
### moderated-t test of logit -beta value to find DMCs and non-DMCs
Lbeta=.logit(betaSub)
if (is.null(tumorNum)){
tumorNum=ancol/2 ## T_1-T_n tumor/cancer; N_1-N_n normal
}
#n=ncol(Lbeta)/2
#
mixedT=Lbeta[,1:tumorNum] ## tumor / cancer
pureN=Lbeta[,(tumorNum+1):ancol] ## normal
controlNum=ancol-tumorNum
#n<-tumorNum
#n<-controlNum
mv =cbind(pureN,mixedT) ## Lbeta #cbind(pureN, mixedT)
### mv=cbind(mixedT,pureN)
#pd = c(paste("N",1:n, sep = ""), paste("C",1:n, sep = "")) #normal cancer
#colnames(mv) = pd
design <- stats::model.matrix(~0 + factor(c(rep("Control",controlNum), rep("Tumor",tumorNum)))) #control /tumor
colnames(design) <- c("Control", "Tumor") # control, tumor
### moderated t-test functions in limma packages
#library(limma)
contrast.matrix <- limma::makeContrasts("Tumor-Control", levels=design)
fit0 <- limma::lmFit(mv, design)
fit1 <- limma::contrasts.fit(fit0, contrast.matrix)
fit2 <- limma::eBayes(fit1)
pv = fit2$p.value
#
ior = order(pv) ## increasing p-value
ior2 = order(pv, decreasing = T) ## decreasing p-values
# # # top 500
#print(length(ior))
if(length(ior)<TOPK ){
myDMCs<-NULL
stop("TOPK length is larger than dataset")
}
else{
#DMCs=annoSub[ior[1:TOPK],]$`Composite Element REF`#Chromosome
#nonDMCs=annoSub[ior2[1:TOPK],]$`Composite Element REF`
#DMCs=ior[1:TOPK]
#nonDMCs=ior2[1:TOPK]
DMCs=annoSub[ior[1:TOPK]]
nonDMCs=annoSub[ior2[1:TOPK]]
myDMCs=cbind(DMCs,nonDMCs)
#print(nonDMCs)
#print(DMCs)
}
return(myDMCs)
}
##if(FALSE){
### ------ logit
.logit <-function (a) {
#logit transform
x<-a
return(log2(x/(1-x)))
}
#}
|
/scratch/gouwar.j/cran-all/cranData/BPM/R/ApiGetDMCs.R
|
#' BPM software package
#'
#' Bayesian model for purity estimation using DNA
#' methylation data
#'
#' The main function is \code{\link{BayPM}}
#' @docType package
#' @name BPM
#' @rdname BPM-package
#' @references Jianzhao Gao, Linghao Shen, and Xiaodan Fan,
#' Bayesian model for purity estimation using DNA methylation data.(submitted)
#'
#' @author Jianzhao Gao(gaojz@@nankai.edu.cn), Linghao Shen Xiaodan Fan (xfan@@cuhk.edu.hk)
#'
#'
#' @examples
#' ### need to install package "limma"
#' ### source("https://bioconductor.org/biocLite.R");biocLite("limma");
#' library(BPM);
#' BayPM(simUCEC,20,2);
NULL
if (getRversion()>="2.15.1") utils::globalVariables(c("goodProbes","annotGeneNames"))
|
/scratch/gouwar.j/cran-all/cranData/BPM/R/BPM-package.R
|
#' Bayesian Purity Model (BPM) Main functions.
#'
#' @param betaValue A matrix,TCGA methlation array data. Each row: loci,
#' Tumor1,Tumor2,...,Normal1,Nomral2,...
#'
#' @param TOPK A number. Number of DMCs/nonDMCs selected
#' @param tumorNum The number of tumor samples.
#' if NULL, the default number is half of column number of dataset.
#' @param filterProbes Logistic. defalut is FALSE. The code use all probes in betaValue.
#' If TRUE,
#' you can use default good probes provided in our code.
#' you can also provide your good probes in userProbes.
#' @param userProbes A number list. The row numbers in betaValue.
#' These rows are considered as good probes.
#' @return tumor purity estimation of tumor samples
#' @export
#' @examples
#' ### need to install package "limma"
#' ### source("https://bioconductor.org/biocLite.R");biocLite("limma");
#' BayPM(simUCEC,20,2);
#'
BayPM <-function(betaValue,TOPK=500,tumorNum=NULL, filterProbes=FALSE,userProbes=NULL){
###dmcf: DMCs sites fiel (differentially methylated CpG sites)
### filter the dataset
#anrow<-dim(betaValue)[1] #row number
ancol<-dim(betaValue)[2] #col number
#############============================
#cat("Find DMCs (in ApiGetDMCs.R).. \n")
### get DMCs from ApiGetDMCs.R
DMCs_nonDMCs=ApiGetDMCs(betaValue,TOPK,tumorNum,filterProbes,userProbes)
#write.csv(DMCs_nonDMCs, file=dmcfn) ## DMCs-non-DMCs
cat("DMCs and non-DMCs were identified by moderated-t statistics.. \n")
if(is.null(DMCs_nonDMCs)){
stop("not find DMCs or non-DMCs.")
}else{
DMCs<-DMCs_nonDMCs[,1] # DMCs
nonDMCs<-DMCs_nonDMCs[,2] # non-DMCs
}
###find DMCs's index in annot
#DMCsidx=match(DMCs,annot$`Composite Element REF`)
#find nonDMCs's index in annot
#nonDMCsidx=match(nonDMCs,annot$`Composite Element REF`)
#annotGeneNames<-NULL
#data(annotGeneNames,envir=enviroment())
DMCsidx=match(DMCs,annotGeneNames)
nonDMCsidx=match(nonDMCs,annotGeneNames)
#print(dim(betaValue))
#print("=======")
#print(DMCsidx)
#print("==========")
DMCsBeta=betaValue[DMCsidx,] # beta value of DMCs
nonDMCsBeta=betaValue[nonDMCsidx,] # beta value of non-DMCs
##ancol=ncol(betaValue) #column number of Lbeta
if (is.null(tumorNum)){
tumorNum=ancol/2 # cancer,normal.
print("TumorNum not provided. First half columns used as tumor samples.")
}
### rt=50 #repeat 50 times
### top500 DMCs, top500 non-DMCs
m=TOPK #
#n=tumorNum #sample number
alp_est=rep(NA,tumorNum) #matrix(NA,n,rt) #store alpha:tumor purity
xbar_est=rep(NA,m) #matrix(NA,m,rt)
#store xi: mode of beta-value of each row of tumor samples x
#z=nonDMCsBeta[,1:sampleN] #mix cancer loci*sampleN
#y=nonDMCsBeta[,(sampleN+1):(sampleN+sampleN)] #normal
# ===============Estimate phi
# Step1: estimate phi using mean/var of y (normal)
cat("Step1 estimate phi ...\n")
y_all=betaValue[,(tumorNum+1):(ancol)] #normal samples
# mean/var of y: m1/v1
m1 = rowMeans(y_all, na.rm = T)
v1 = apply(y_all, 1, stats::var)
# estimate phi: mode of beta-value of each row of control samples y
phi = m1 + (2*m1 - 1)*v1 / (m1*(1-m1) - 3*v1)
# phi=y_bar if phi outside (0,1)
# phi[phi >= 1 | phi <= 0] = m1[phi >= 1 | phi <= 0]
phi[phi>=1 &!is.na(phi)]=m1[phi>=1 & !is.na(phi)]
phi[phi<=0 &!is.na(phi)]=m1[phi<=0 & !is.na(phi)]
# ================Estmate v_z noise intensity
# Step2/3: estimate v_z using non-DMCs by maximum likelihood estimation
cat("Step2/3 estimate v_z ...\n")
nonDMCs_z<-nonDMCsBeta[,1:tumorNum] # z is tumor
#print(c(dim(phi),length(phi),length(nonDMCsidx)))
nonDMCs_phi<-phi[nonDMCsidx]
nu_z <- estimateNu(nonDMCs_z,nonDMCs_phi)
# ===============Sampleing xi and alpha(tumor purity)
# STep4: sampel xi alpha form DMCs
cat("Step4 sample alpha and xi ...\n")
DMCs_z=DMCsBeta[,1:tumorNum] # tumor/cancer DMCs
DMCs_y=DMCsBeta[,(tumorNum+1):ancol] #normal in DMCs
nm = rowMeans(DMCs_y, na.rm = T) #nomral mean
tm = rowMeans(DMCs_z, na.rm = T) #tumor mean
# hypermethylation 1; hypomethylation 2;
mstates = rep(NA, m)
mstates[which(tm > nm)] = 1 #hyper
mstates[which(tm < nm)] = 2 #hypo
set.seed(0)
# fullSampler from purityGS_mode.R;
# output: x_bar,x_last,x_sample,xpar,nab(nv),alp(alpha)
res = fullSampler(DMCs_y, DMCs_z, mstates, matrix(c(0.5,0.5,0.5,0.5),2), maxit = 500, burnin = 3000, n_ab0 = nu_z)
eryn = 10
alp_est = colMeans(res$alp[(1:(length(res$nab)/eryn))*eryn,])
#xbar_est= res$x_bar
return(alp_est)
}
|
/scratch/gouwar.j/cran-all/cranData/BPM/R/BayPM.R
|
#' gene names of probes in 450K array dat
#'
#'
#'
#' @docType data
#' @keywords datasets
#' @format A vector with length 480457
#' @name annotGeneNames
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/BPM/R/annotGeneNames.R
|
#' Estimate noise intensity (nv) for non-DMCs,
#' using maximum likelihood estmiation.
#'
#' @param z A matrix. Observated mixed turmor samples.
#' @param phi mode of beta-values of each row in pure nomral samples y.
#' @param maxit A postive integer. The iteration number used in maximum likelihood.
#' @param beginP A number, where the method start to search from for root.
#'
#' return estimated nv (noise intensity)
#'
#'
estimateNu<-function(z, phi, maxit = 50,beginP=20) {
m = nrow(z) # row number
n= ncol(z) # column number # cliff
c1 <- 0
for (i in 1 : m) {
c1 <- c1 + sum(phi[i] * log(z[i,]/(1-z[i,])),na.rm = TRUE)
}
c1 <- c1 + sum(log(1-z),na.rm=TRUE)
c1 <- c1 / n
### solve c1=logLikFun(phim,nu) using bisection method
nu <- beginP #start point
nu_high <- nu
nu_low <- nu
for (tt in 1 : maxit)
{ temp = .logLikFun(phi, nu)
if (temp == c1)
{break }
else if (temp < c1)
{ if (nu_high == nu)
{ nu_low <- nu
nu <- nu * 2
nu_high <- nu
}
else
{ nu_low <- nu
nu <- (nu + nu_high) / 2
}
}
else
{ if (nu_low == nu)
{ nu_high <- nu
nu <- nu / 2
nu_low <- nu
}
else
{ nu_high <- nu
nu <- (nu + nu_low) / 2
}
}
}
nu
}
### ---- logLikFun (3.13)/or formula (6) in supplementary S2.
.logLikFun <-function (phi, nu) {
K <- length(phi)
ll <- sum(phi * digamma(phi*nu+1),na.rm=TRUE)
ll <- ll + sum((1-phi) * digamma((1-phi)*nu+1),na.rm=TRUE)
ll <- ll - K * digamma(nu + 2)
ll
}
|
/scratch/gouwar.j/cran-all/cranData/BPM/R/estimateNu.R
|
#' Sampling xi and alpha (tumor purity)
#' @param y A matrix, observed pure normal samples
#' @param z A matrix, observed mixed tumor samples
#' @param mstates A matrix, hyper/hypo of dataset
#' @param xprior A matrix, prior knowledge about purity
#' @param maxit A number, maximum iteraction
#' @param burnin A number, "burn-in" sample
#' @param xpar Logistic, default is FALSE
#' @param n_ab0 initial value of n_ab
#' @param alp0 initial value of alpha
#' @param xbar0 initial value of xbar
#' @param trace Logisitc, check the values in code, default is FALSE
#' @param verbose Logistic, output the message,default is FALSE
#' @return x_bar x_mode,
#' x_last x2
#' x_sample x_sample
#' xpar xprior2,
#' nab n_ab2,
#' alp alp2
#'@export
#'
#'
fullSampler <-function (y, z, mstates, xprior = NULL,
maxit = 1000, burnin = maxit,
xpar = FALSE, n_ab0 = NULL,
alp0 = NULL, xbar0 = NULL,
trace = FALSE, verbose = FALSE) {
#---- sample xi alpha from Top-K DMCs
# y normal; z mixed/cancer; mstats hyper/hypo;
m <- nrow(y) #loci
n <- ncol(y) #sample Number
if (is.null(xprior)) {
#xprior <- matrix(c(10,1,1,10),2)
xprior <- matrix(c(0.5,0.5,0.5,0.5),2)
}
hyper <- (mstates == 1)
hypo <- (mstates == 2)
# Initialization
if (!is.null(n_ab0)) {
n_ab <- n_ab0
}else{
n_ab <- 50
}
if (!is.null(alp0)) {
alp <- alp0
}else{
alp <- stats::rbeta(n, 5, 5)
}
x_bar <- rep(NA, m)
m1 <- rowMeans(y, na.rm = T)
v1 <- apply(y, 1, stats::var, na.rm = T)
y_bar <- m1 + (2*m1 - 1)*v1 / (m1*(1-m1) - 3*v1)
#print(v1)
#print(y_bar)
y_bar[y_bar >= 1 | y_bar <= 0] <- m1[y_bar >= 1 | y_bar <= 0]
if (is.null(xbar0)) {
x_bar[hyper] <- .sampleXbar(y_bar[hyper], z[hyper,], alp, n_ab, xprior[1,])
x_bar[hypo] <- .sampleXbar(y_bar[hypo], z[hypo,], alp, n_ab, xprior[2,])
} else {
x_bar <- xbar0
}
if (xpar) {
xprior[1,] <- .sampleXprior(x_bar[hyper])
xprior[2,] <- .sampleXprior(x_bar[hypo])
}
# Burn-in
track <- list()
if (trace) {
if (xpar) {
track$xprior <- array(0, dim = c(2,2,burnin))
}
track$nab <- rep(0, burnin)
}
for (it in 1 : burnin) {
if (it %% 10 == 1 & verbose) {
cat("Burn-in at iteration: ", it, "\n")
if (xpar) {
print(xprior)
}
}
n_ab <- .sampleNab(x_bar, y_bar, z, alp, n_ab)
for (k in 1 : n) {
alp[k] <- .sampleAlp(x_bar, y_bar, z[,k], alp[k], n_ab)
}
x_bar[hyper] <- .sampleXbar(y_bar[hyper], z[hyper,], alp, n_ab, xprior[1,], x_bar[hyper])
x_bar[hypo] <- .sampleXbar(y_bar[hypo], z[hypo,], alp, n_ab, xprior[2,], x_bar[hypo])
if (xpar) {
xprior[1,] <- .sampleXprior(x_bar[hyper], xprior[1,])
xprior[2,] <- .sampleXprior(x_bar[hypo], xprior[2,])
}
if (trace) {
if (xpar) {
track$xprior[,,it] <- xprior
}
track$nab[it] <- n_ab
}
}
it <- 1
n_ab2 <- rep(0, maxit)
alp2 <- matrix(0, maxit, n)
x2 <- x_bar
xprior2 <- array(0, dim = c(2,2,maxit))
x_mean <- matrix(0, length(x_bar), maxit/10)
x_mean[,1] <- x2
nrec <- 100
x_sample <- matrix(0, maxit/10, nrec)
x_sample[1,] <- c(x_bar[1:nrec])
n_ab2[it] <- .sampleNab(x_bar, y_bar, z, alp, n_ab)
for (k in 1 : n) {
alp2[it,k] <- .sampleAlp(x_bar, y_bar, z[,k], alp[k], n_ab2[it])
}
x2[hyper] <- .sampleXbar(y_bar[hyper], z[hyper,], alp2[it,], n_ab2[it], xprior[1,], x2[hyper])
x2[hypo] <- .sampleXbar(y_bar[hypo], z[hypo,], alp2[it,], n_ab2[it], xprior[2,], x2[hypo])
if (xpar) {
xprior2[1,,it] <- .sampleXprior(x_bar[hyper], xprior[1,])
xprior2[2,,it] <- .sampleXprior(x_bar[hypo], xprior[2,])
}
for (it in 2 : maxit) {
if (it %% 10 == 1) {
if (verbose)
cat("Sampling at iteration: ", it, "\n")
if (xpar) {
print(xprior2[,,it-1])
}
x_mean[,(it+9)/10] <- x2
x_sample[(it+9)/10,] <- c(x2[1:nrec])
}
n_ab2[it] <- .sampleNab(x2, y_bar, z, alp2[it-1,], n_ab2[it-1])
for (k in 1 : n) {
alp2[it,k] <- .sampleAlp(x2, y_bar, z[,k], alp2[it-1,k], n_ab2[it])
}
if (xpar) {
x2[hyper] <- .sampleXbar(y_bar[hyper], z[hyper,], alp2[it,], n_ab2[it], xprior2[1,,it-1], x2[hyper])
x2[hypo] <- .sampleXbar(y_bar[hypo], z[hypo,], alp2[it,], n_ab2[it], xprior2[2,,it-1], x2[hypo])
xprior2[1,,it] <- .sampleXprior(x2[hyper], xprior2[1,,it-1])
xprior2[2,,it] <- .sampleXprior(x2[hypo], xprior2[2,,it-1])
} else {
x2[hyper] <- .sampleXbar(y_bar[hyper], z[hyper,], alp2[it,], n_ab2[it], xprior[1,], x2[hyper])
x2[hypo] <- .sampleXbar(y_bar[hypo], z[hypo,], alp2[it,], n_ab2[it], xprior[2,], x2[hypo])
}
}
x_mode <- rep(NA, m)
m1 <- rowMeans(x_mean, na.rm = T)
v1 <- apply(x_mean, 1, stats::var, na.rm = T)
x_mode <- m1 + (2*m1 - 1)*v1 / (m1*(1-m1) - 3*v1)
x_mode[x_mode >= 1 | x_mode <= 0] <- m1[x_mode >= 1 | x_mode <= 0]
if (trace) {
return(list(x_bar = x_mode, x_last = x2, x_sample = x_sample, xpar = xprior2, nab = n_ab2, alp = alp2, track = track))
} else {
return(list(x_bar = x_mode, x_last = x2, x_sample = x_sample, xpar = xprior2, nab = n_ab2, alp = alp2))
}
}
####============== functions used in fullSampler.R
###-------- sampleAlp
.sampleAlp <-function(x_bar, y_bar, z, alp0, n_ab) {
## sampling alpha using MH algorithm
alp_new <- stats::rnorm(1, alp0, 0.005)
while (alp_new < 0 | alp_new > 1) {
alp_new <- stats::rnorm(1, alp0, 0.005)
}
a_new <- (x_bar*alp_new + y_bar*(1-alp_new))*n_ab
b_new <- n_ab - a_new
L_new <- sum(stats::dbeta(z, a_new+1, b_new+1, log = T), na.rm = T)
a0 <- (x_bar*alp0 + y_bar*(1-alp0))*n_ab
b0 <- n_ab - a0
L0 <- sum(stats::dbeta(z, a0+1, b0+1, log = T), na.rm = T)
acc <- exp(L_new - L0) #accept
if (is.nan(acc)) {
acc = 0.5
}
if (acc >= 1) {
return(alp_new)
} else {
if (stats::runif(1) > acc) {
return(alp0)
} else {
return(alp_new)
}
}
}
### ---------- sampleNab
.sampleNab <-function(x_bar, y_bar, z_all, alp_all, n_ab0) {
# *_all means using the whole dataset
# Assume uniform prior, log-normal proporsal
# MH method
if (is.null(n_ab0)) {
return(n_ab0)
} else {
return(30)
}
return(30)
n <- length(alp_all)
m <- length(x_bar)
propVar <- 0.01
n_ab_new <- exp(stats::rnorm(1, log(n_ab0), propVar))
L0 <- 0
L_new <- 0
a0 <- (x_bar %*% t(alp_all) + y_bar %*% t(1-alp_all))*n_ab0
b0 <- n_ab0 - a0
a_new <- (x_bar %*% t(alp_all) + y_bar %*% t(1-alp_all))*n_ab_new
b_new <- n_ab_new - a_new
L0 <- L0 + sum(stats::dbeta(z_all, a0+1, b0+1, log = T), na.rm = T)
L_new <- L_new + sum(stats::dbeta(z_all, a_new+1, b_new+1, log = T), na.rm = T)
acc <- exp(L_new - L0)
if (is.nan(acc)) {
acc = 0.5
}
if (acc >= 1) {
return(n_ab_new)
} else {
if (stats::runif(1) > acc) {
return(n_ab0)
} else {
return(n_ab_new)
}
}
}
### ---------sampleXbar
.sampleXbar <-function(y_bar, z, alp_all, n_ab, xprior = NULL, xbar0 = NULL) {
# sample xi (mode of beta value of each row in tumor)
# using MH algorithm
m <- length(y_bar)
n <- length(alp_all)
if (is.null(xprior)) {
xprior <- c(1,1)
}
if (is.null(xbar0)) {
return(stats::rbeta(m, xprior[1], xprior[2]))
}
x_bar <- stats::rnorm(m, xbar0, 0.005)
temp <- which(x_bar < 0 | x_bar > 1)
while(length(temp) > 0) {
x_bar[temp] <- stats::rnorm(length(temp), xbar0[temp], 0.005)
temp <- which(x_bar < 0 | x_bar > 1)
}
a0 <- (xbar0 %*% t(alp_all) + y_bar %*% t(1-alp_all))*n_ab
b0 <- n_ab - a0
a <- (x_bar %*% t(alp_all) + y_bar %*% t(1-alp_all))*n_ab
b <- n_ab - a
acc <- rowSums(stats::dbeta(z, a+1, b+1, log = T) - stats::dbeta(z, a0+1, b0+1, log = T), na.rm = T)
acc <- exp(acc + stats::dbeta(x_bar, xprior[1], xprior[2], log = T) - stats::dbeta(xbar0, xprior[1], xprior[2], log = T))
acc[is.nan(acc)] <- 0.5
u <- stats::runif(m)
x_bar[u > acc] <- xbar0[u > acc]
return(x_bar)
}
### ---------- mv2ab
.mv2ab <-function(m1, v1) {
### matrix form
a1 <- (m1*(1 - m1)/v1 - 1) * m1
b1 <- (m1*(1 - m1)/v1 - 1) * (1 - m1)
return(c(a1,b1))
}
###----------- sampleXprior using mv2ab
.sampleXprior <-function (x_bar, xprior0 = NULL) {
if (is.null(xprior0)) {
m1 <- mean(x_bar, na.rm = T)
v1 <- stats::var(as.vector(x_bar), na.rm = T)
xprior <- .mv2ab(m1, v1)
return(xprior)
}
# Assume xprior[1]/(xprior[1]+xprior[2]) is extreme
xprior <- stats::rnorm(2, xprior0, 0.002)
L_new <- sum(stats::dbeta(x_bar, xprior[1], xprior[2], log = T) +
stats::dbeta(xprior[1]/sum(xprior), 5, 5, log = T), na.rm = T)
L0 <- sum(stats::dbeta(x_bar, xprior0[1], xprior0[2], log = T) +
stats::dbeta(xprior0[1]/sum(xprior0), 5, 5, log = T), na.rm = T)
acc <- exp(L_new - L0)
if (is.nan(acc)) {
acc = 0.5
}
if (acc >= 1) {
return(xprior)
} else {
if (stats::runif(1) > acc) {
return(xprior0)
} else {
return(xprior)
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BPM/R/fullSampler.R
|
#' good probes in packages
#'
#' good probes removed Y chrome.
#'
#'
#' @docType data
#' @keywords datasets
#' @format A vector with length 425698
#' @name goodProbes
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/BPM/R/goodProbes.R
|
#' Simulated data to illustrate datasets in packages
#'
#' A dataset containing 100 gene and 4 smaples,
#' first two columns are tumor1 tumor2
#' last two columns are normal1 normal2
#' \itemize{
#' \item x. the genes
#' \item y. two tumor samples; two normal samples;
#' }
#'
#'
#' @docType data
#' @keywords datasets
#' @format A matrix with 100 rows and 4 columns
#' @name simUCEC
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/BPM/R/simUCEC.R
|
#' The 'BPrinStratTTE' package.
#'
#' @description Bayesian models to estimate causal effects of biological
#' treatments on time-to-event endpoints in clinical trials with principal
#' strata defined by the occurrence of antidrug antibodies.
#' The methodology is based on Frangakis and Rubin (2002)
#' <doi:10.1111/j.0006-341x.2002.00021.x> and Imbens and Rubin (1997)
#' <doi:10.1214/aos/1034276631>, and intended to be applied to a
#' specific time-to-event setting.#'
#' @docType package
#' @name BPrinStratTTE-package
#' @aliases BPrinStratTTE
#' @useDynLib BPrinStratTTE, .registration = TRUE
#' @import methods
#' @import Rcpp
#' @importFrom rstan sampling
#'
#' @references
#' Stan Development Team (2022). RStan: the R interface to Stan. R package version 2.21.5. https://mc-stan.org
#'
NULL
## usethis namespace: start
#' @importFrom stats pexp
#' @importFrom stats rexp
#' @importFrom stats runif
#' @importFrom dplyr select
#' @importFrom dplyr filter
#' @importFrom dplyr mutate
#' @importFrom purrr map
#' @importFrom furrr future_map
#' @importFrom magrittr %>%
#' @importFrom tibble as_tibble
#' @importFrom tibble tibble
#' @importFrom stringr str_pad
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/BPrinStratTTE-package.R
|
#' Fit multiple models to data from two-arm trials with an exponentially distributed time-to-event endpoint and one predictor of the intercurrent event
#'
#' @param dat_mult_trials List generated by `sim_dat_mult_trials_exp_covar`.
#' @param params List of model parameters as supplied to `fit_single_exp_covar`.
#' @param seed Numeric value, seed for reproducibility.
#'
#' @return A list of objects generated by `fit_single_exp_covar`.
#' @export
#'
#' @seealso [sim_dat_mult_trials_exp_covar()], [fit_single_exp_covar()], [fit_mult_exp_nocovar()]
#'
#' @examples
#' d_params_covar <- list(
#' n = 1000,
#' nt = 500,
#' prob_X1 = 0.4,
#' prob_ice_X1 = 0.5,
#' prob_ice_X0 = 0.2,
#' fu_max = 48*7,
#' T0T_rate = 0.2,
#' T0N_rate = 0.2,
#' T1T_rate = 0.15,
#' T1N_rate = 0.1
#' )
#' dat_mult_trials <- sim_dat_mult_trials_exp_covar(
#' n_iter = 2,
#' params = d_params_covar
#' )
#' m_params_covar <- list(
#' tg = 48,
#' p = 2,
#' prior_delta = matrix(
#' c(0, 5, 0, 5),
#' nrow = 2, byrow = TRUE),
#' prior_0N = c(1.5, 5),
#' prior_1N = c(1.5, 5),
#' prior_0T = c(1.5, 5),
#' prior_1T = c(1.5, 5),
#' t_grid = seq(7, 7 * 48, 7) / 30,
#' chains = 2,
#' n_iter = 3000,
#' warmup = 1500,
#' cores = 2,
#' open_progress = FALSE,
#' show_messages = TRUE
#' )
#' \donttest{
#' fit_multiple <- fit_mult_exp_covar(
#' dat_mult_trials = dat_mult_trials,
#' params = m_params_covar,
#' seed = 12
#' )
#' lapply(fit_multiple, dim)
#' head(fit_multiple[[1]])
#' }
#'
fit_mult_exp_covar <- function(
dat_mult_trials,
params,
seed = 23) {
furrr::future_map(
.x = dat_mult_trials,
.f = fit_single_exp_covar,
params = params,
.options = furrr::furrr_options(seed = seed)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/fit_mult_exp_covar.R
|
#' Fit multiple models to data from two-arm trials with an exponentially distributed time-to-event endpoint and no predictor of the intercurrent event
#'
#' @param dat_mult_trials List generated by `sim_dat_mult_trials_exp_nocovar`.
#' @param params List of model parameters as supplied to `fit_single_exp_nocovar`.
#' @param seed Numeric value, seed for reproducibility.
#'
#' @return A list of objects generated by `fit_single_exp_nocovar`.
#' @export
#'
#' @seealso [sim_dat_mult_trials_exp_nocovar()], [fit_single_exp_nocovar()], [fit_mult_exp_covar()]
#'
#' @examples
#' d_params_nocovar <- list(
#' n = 500L,
#' nt = 250L,
#' prob_ice = 0.5,
#' fu_max = 336L,
#' T0T_rate = 0.2,
#' T0N_rate = 0.2,
#' T1T_rate = 0.15,
#' T1N_rate = 0.1
#' )
#' dat_mult_trials <- sim_dat_mult_trials_exp_nocovar(
#' n_iter = 2,
#' params = d_params_nocovar
#' )
#' m_params_nocovar <- list(
#' tg = 48L,
#' prior_piT = c(0.5, 0.5),
#' prior_0N = c(1.5, 5),
#' prior_1N = c(1.5, 5),
#' prior_0T = c(1.5, 5),
#' prior_1T = c(1.5, 5),
#' t_grid = seq(7, 7 * 48, 7) / 30,
#' chains = 2L,
#' n_iter = 3000L,
#' warmup = 1500L,
#' cores = 2L,
#' open_progress = FALSE,
#' show_messages = TRUE
#' )
#' \donttest{
#' fit_multiple <- fit_mult_exp_nocovar(
#' dat_mult_trials = dat_mult_trials,
#' params = m_params_nocovar,
#' seed = 12
#' )
#' lapply(fit_multiple, dim)
#' head(fit_multiple[[1]])
#' }
fit_mult_exp_nocovar <- function(
dat_mult_trials,
params,
seed = 23) {
furrr::future_map(
.x = dat_mult_trials,
.f = fit_single_exp_nocovar,
params = params,
.options = furrr::furrr_options(seed = seed)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/fit_mult_exp_nocovar.R
|
#' Fit single model to data from a two-arm trial with an exponentially distributed time-to-event endpoint and one predictor of the intercurrent event
#'
#' @param data Data frame of a structure as generated by `sim_dat_one_trial_exp_covar()`.
#' @param params List, containing model parameters:
#' * `tg` Positive integer value, number of intervals to calculate restricted mean survival time using the trapezoidal rule.
#' * `p` Positive integer value, number of predictors of the intercurrent event of interest (i.e. the event that determines. the principal stratum membership).
#' * `prior_delta` `p`x2 matrix of positive numerical values, containing normal priors (mean and standard deviation) of the model parameter delta.
#' * `prior_0N` Numeric vector of length 2, containing parameters (alpha, beta) of the gamma prior on lambda_0N.
#' * `prior_1N` Numeric vector of length 2, containing parameters (alpha, beta) of the gamma prior on lambda_1N.
#' * `prior_0T` Numeric vector of length 2, containing parameters (alpha, beta) of the gamma prior on lambda_0T.
#' * `prior_1T` Numeric vector of length 2, containing parameters (alpha, beta) of the gamma prior on lambda_1T.
#' * `t_grid` Numeric vector of length `tg`, containing time points defining the time grid (in months) to calculate restricted mean survival time using the trapezoidal rule.
#' * `chains` Positive integer value, specifying the number of Markov chains.
#' * `n_iter` Positive integer value, specifying the number of iterations for each chain (including warmup).
#' * `warmup` Positive integer value, specifying the number of warmup (aka burnin) iterations per chain.
#' * `cores` Positive integer value, specifying the number of cores to use when executing the chains in parallel.
#' * `open_progress` Logical value, indicating whether the progress of the chains will be redirected to a file that is automatically opened for inspection.
#' * `show_messages` Logical value, indicating whether to print the summary of informational messages.
#' @param summarize_fit Logical, if `TRUE` (default), the output is restricted to a summary of results on key parameters over all chains, if `FALSE`, the complete `stanfit` object is returned.
#'
#' @return `tibble()` containing a summary of results on key parameters, or a `stanfit` object (S4 class), depending on `summarize_fit`.
#' @export
#'
#' @details
#' The data supplied as `params` are used either as priors (`prior_delta`, `prior_0N`, `prior_1N`, `prior_1T`), to inform the model setup (`tg`, `p`, `t_grid`), or as parameters to `rstan::sampling()` which is invoked internally (`chains`, `n_iter`, `warmup`, `cores`, `open_progress`, `show_messages`).
#'
#' @seealso [fit_single_exp_nocovar()] and [rstan::sampling()]
#'
#' @examples
#' d_params_covar <- list(
#' n = 1000,
#' nt = 500,
#' prob_X1 = 0.4,
#' prob_ice_X1 = 0.5,
#' prob_ice_X0 = 0.2,
#' fu_max = 48*7,
#' T0T_rate = 0.2,
#' T0N_rate = 0.2,
#' T1T_rate = 0.15,
#' T1N_rate = 0.1
#' )
#' dat_single_trial <- sim_dat_one_trial_exp_covar(
#' n = d_params_covar[["n"]],
#' nt = d_params_covar[["nt"]],
#' prob_X1 = d_params_covar[["prob_X1"]],
#' prob_ice_X1 = d_params_covar[["prob_ice_X1"]],
#' prob_ice_X0 = d_params_covar[["prob_ice_X0"]],
#' fu_max = d_params_covar[["fu_max"]],
#' T0T_rate = d_params_covar[["T0T_rate"]],
#' T0N_rate = d_params_covar[["T0N_rate"]],
#' T1T_rate = d_params_covar[["T1T_rate"]],
#' T1N_rate = d_params_covar[["T1N_rate"]]
#' )
#' m_params_covar <- list(
#' tg = 48,
#' p = 2,
#' prior_delta = matrix(
#' c(0, 5, 0, 5),
#' nrow = 2, byrow = TRUE),
#' prior_0N = c(1.5, 5),
#' prior_1N = c(1.5, 5),
#' prior_0T = c(1.5, 5),
#' prior_1T = c(1.5, 5),
#' t_grid = seq(7, 7 * 48, 7) / 30,
#' chains = 2,
#' n_iter = 3000,
#' warmup = 1500,
#' cores = 2,
#' open_progress = FALSE,
#' show_messages = FALSE
#' )
#' \donttest{
#' fit_single <- fit_single_exp_covar(
#' data = dat_single_trial,
#' params = m_params_covar,
#' summarize_fit = FALSE
#' )
#' print(fit_single)
#' }
fit_single_exp_covar <- function(data, params, summarize_fit = TRUE) {
# input data for model
data_stan <- list(
n = nrow(data),
nt = sum(data$Z==1),
p = params[["p"]],
tg = params[["tg"]],
Z = data$Z,
S = data$S,
S_trt = data$S[data$Z==1],
TIME = data$TIME/30,
EVENT = data$EVENT,
X = matrix(c(rep(1,nrow(data)), data$X), nrow = nrow(data)),
X_trt = matrix(c(rep(1,sum(data$Z==1)), data$X[data$Z==1]), nrow = sum(data$Z==1)),
prior_delta = params[["prior_delta"]],
prior_0N = params[["prior_0N"]],
prior_1N = params[["prior_1N"]],
prior_0T = params[["prior_0T"]],
prior_1T = params[["prior_1T"]],
t_grid = params[["t_grid"]]
)
# fit model
fit_stan <- rstan::sampling(
object = stanmodels$m_exp_covar,
data = data_stan,
iter = params[["n_iter"]],
warmup = params[["warmup"]],
chains = params[["chains"]],
cores = params[["cores"]],
open_progress = params[["open_progress"]],
show_messages = params[["show_messages"]]
)
# for use with .stan files:
# fit_stan <- rstan::stan(
# file = model,
# data = data_stan,
# iter = params[["n_iter"]],
# warmup = params[["warmup"]],
# chains = params[["chains"]],
# cores = params[["cores"]]
# )
if(isTRUE(summarize_fit)) {
fit_stan <- fit_stan %>% rstan::summary() %>% magrittr::extract2("summary")
patterns <- c("S_", "lp", "n_eff")
fit_stan <- tibble::as_tibble(fit_stan, rownames="var") %>%
dplyr::filter(!grepl(paste(patterns, collapse="|"), var)) %>%
dplyr::select(!c("se_mean", "sd", "25%", "75%"))
}
return(fit_stan)
}
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/fit_single_exp_covar.R
|
#' Fit single model to data from a two-arm trial with an exponentially distributed time-to-event endpoint and no predictor of the intercurrent event
#'
#' @param data Data frame of a structure as generated by `sim_dat_one_trial_exp_nocovar()`.
#' @param params List, containing model parameters:
#' * `tg` Positive integer value, number of intervals to calculate restricted mean survival time using the trapezoidal rule.
#' * `prior_piT` Numeric vector of length 2, containing parameters (alpha, beta) of the beta prior on pi, indicating the probability of belonging to the stratum of subjects developing the intercurrent event if given treatment.
#' * `prior_0N` Numeric vector of length 2, containing parameters (alpha, beta) of the gamma prior on lambda_0N.
#' * `prior_1N` Numeric vector of length 2, containing parameters (alpha, beta) of the gamma prior on lambda_1N.
#' * `prior_0T` Numeric vector of length 2, containing parameters (alpha, beta) of the gamma prior on lambda_0T.
#' * `prior_1T` Numeric vector of length 2, containing parameters (alpha, beta) of the gamma prior on lambda_1T.
#' * `t_grid` Numeric vector of length `tg`, containing time points defining the time grid (in months) to calculate restricted mean survival time using the trapezoidal rule.
#' * `chains` Positive integer value, specifying the number of Markov chains.
#' * `n_iter` Positive integer value, specifying the number of iterations for each chain (including warmup).
#' * `warmup` Positive integer value, specifying the number of warmup (aka burnin) iterations per chain.
#' * `cores` Positive integer value, specifying the number of cores to use when executing the chains in parallel.
#' * `open_progress` Logical value, indicating whether the progress of the chains will be redirected to a file that is automatically opened for inspection.
#' * `show_messages` Logical value, indicating whether to print the summary of informational messages.
#' @param summarize_fit Logical, if `TRUE` (default), the output is restricted to a summary of results on key parameters over all chains, if `FALSE`, the complete `stanfit` object is returned.
#'
#' @return `tibble()` containing a summary of results on key parameters, or a `stanfit` object, depending on `summarize_fit`.
#' @export
#'
#' @details
#' The data supplied as `params` are used either as priors (`prior_delta`, `prior_0N`, `prior_1N`, `prior_1T`), to inform the model setup (`tg`, `p`, `t_grid`), or as parameters to `rstan::sampling()` which is invoked internally (`chains`, `n_iter`, `warmup`, `cores`, `open_progress`, `show_messages`).
#'
#' @seealso [fit_single_exp_covar()] and [rstan::sampling()]
#'
#' @examples
#' d_params_nocovar <- list(
#' n = 500L,
#' nt = 250L,
#' prob_ice = 0.5,
#' fu_max = 336L,
#' T0T_rate = 0.2,
#' T0N_rate = 0.2,
#' T1T_rate = 0.15,
#' T1N_rate = 0.1
#' )
#' dat_single_trial <- sim_dat_one_trial_exp_nocovar(
#' n = d_params_nocovar[["n"]],
#' nt = d_params_nocovar[["nt"]],
#' prob_ice = d_params_nocovar[["prob_ice"]],
#' fu_max = d_params_nocovar[["fu_max"]],
#' T0T_rate = d_params_nocovar[["T0T_rate"]],
#' T0N_rate = d_params_nocovar[["T0N_rate"]],
#' T1T_rate = d_params_nocovar[["T1T_rate"]],
#' T1N_rate = d_params_nocovar[["T1N_rate"]]
#' )
#' m_params_nocovar <- list(
#' tg = 48L,
#' prior_piT = c(0.5, 0.5),
#' prior_0N = c(1.5, 5),
#' prior_1N = c(1.5, 5),
#' prior_0T = c(1.5, 5),
#' prior_1T = c(1.5, 5),
#' t_grid = seq(7, 7 * 48, 7) / 30,
#' chains = 2L,
#' n_iter = 3000L,
#' warmup = 1500L,
#' cores = 2L,
#' open_progress = FALSE,
#' show_messages = TRUE
#' )
#' \donttest{
#' fit_single <- fit_single_exp_nocovar(
#' data = dat_single_trial,
#' params = m_params_nocovar,
#' summarize_fit = TRUE
#' )
#' print(fit_single)
#' }
fit_single_exp_nocovar <- function(data, params, summarize_fit = TRUE) {
# input data for model
data_stan <- list(
n = nrow(data),
tg = params[["tg"]],
Z = data$Z,
S = data$S,
TIME = data$TIME/30,
EVENT = data$EVENT,
prior_piT = params[["prior_piT"]],
prior_0N = params[["prior_0N"]],
prior_1N = params[["prior_1N"]],
prior_0T = params[["prior_0T"]],
prior_1T = params[["prior_1T"]],
t_grid = params[["t_grid"]]
)
# fit model
fit_stan <- rstan::sampling(
object = stanmodels$m_exp_nocovar,
data = data_stan,
iter = params[["n_iter"]],
warmup = params[["warmup"]],
chains = params[["chains"]],
cores = params[["cores"]],
open_progress = params[["open_progress"]],
show_messages = params[["show_messages"]]
)
# for use with .stan files:
# fit_stan <- rstan::stan(
# file = model,
# data = data_stan,
# iter = params[["n_iter"]],
# warmup = params[["warmup"]],
# chains = params[["chains"]],
# cores = params[["cores"]]
#)
if(isTRUE(summarize_fit)) {
fit_stan <- fit_stan %>% rstan::summary() %>% magrittr::extract2("summary")
patterns <- c("S_", "lp", "n_eff")
fit_stan <- tibble::as_tibble(fit_stan, rownames="var") %>%
dplyr::filter(!grepl(paste(patterns, collapse="|"), var)) %>%
dplyr::select(!c("se_mean", "sd", "25%", "75%"))
}
return(fit_stan)
}
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/fit_single_exp_nocovar.R
|
utils::globalVariables(c("2.5%", "50%", "97.5%", "Rhat", "true_val", "var"))
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/globals.R
|
#' Inverse logit function
#'
#' @param x Numeric value (usually a logarithm of odds).
#'
#' @return Numeric value on the interval \eqn{[0,1]}, result of `log(pi/(1-pi))`.
#' @return Numeric value, result of `exp(x)/(1+exp(x))`.
#' @export
#'
#' @details
#' The inverse logit function is also known as logistic function.
#'
#' @seealso [logit()]
#'
#' @examples
#' # probabilities
#' prob_ICE_base <- 0.3
#' prob_ICE_risk <- 0.6
#' # model coefficients
#' (beta1 <- logit(prob_ICE_base))
#' (beta2 <- logit(prob_ICE_risk) - logit(prob_ICE_base))
#' # linear predictor
#' logit(prob_ICE_base); (lin_pred1 <- beta1 + beta2*0)
#' logit(prob_ICE_risk); (lin_pred2 <- beta1 + beta2*1)
#' # inverse logit of linear predictor
#' (inv_logit(lin_pred1)) # prob for X1 = 0
#' (inv_logit(lin_pred2)) # prob for X1 = 1
inv_logit <- function(x) {
exp(x)/(1+exp(x))
}
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/inv_logit.R
|
#' Logit function
#'
#' @param pi Numeric value on the interval \eqn{[0,1]} (usually a probability).
#'
#' @return Numeric value, result of `log(pi/(1-pi))`.
#' @export
#'
#' @seealso [inv_logit()]
#'
#' @examples
#' # probabilities
#' prob_ICE_base <- 0.3
#' prob_ICE_risk <- 0.6
#' # model coefficients
#' (beta1 <- logit(prob_ICE_base))
#' (beta2 <- logit(prob_ICE_risk) - logit(prob_ICE_base))
#' # linear predictor
#' logit(prob_ICE_base); (lin_pred1 <- beta1 + beta2*0)
#' logit(prob_ICE_risk); (lin_pred2 <- beta1 + beta2*1)
#' # inverse logit of linear predictor
#' (inv_logit(lin_pred1)) # prob for X1 = 0
#' (inv_logit(lin_pred2)) # prob for X1 = 1
logit <- function(pi) {
log(pi/(1-pi))
}
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/logit.R
|
#' Determine operating characteristics of fits from two-arm trials with an exponentially distributed time-to-event endpoint and one predictor of the intercurrent event
#'
#' @param multiple_fits List of model fits from `fit_mult_exp_covar`.
#' @param d_params List of data parameters as used in `sim_dat_one_trial_exp_covar`.
#' @param m_params List of model parameters as used in `fit_single_exp_covar`.
#'
#' @return A list of length 3, containing objects call `ocs`, `d_params`, `m_params`, where `ocs` is a `tibble` containing averaged parameter estimates and operating characteristics, and `d_params` and `m_params` are the objects supplied to the function.
#' @export
#'
#' @details
#' This function is used in `run_sim_exp_covar()`, the output of the two functions is the same.
#'
#' @seealso [ocs_exp_nocovar()] and [run_sim_exp_covar()].
#'
#' @examples
#' d_params_covar <- list(
#' n = 1000,
#' nt = 500,
#' prob_X1 = 0.4,
#' prob_ice_X1 = 0.5,
#' prob_ice_X0 = 0.2,
#' fu_max = 48*7,
#' T0T_rate = 0.2,
#' T0N_rate = 0.2,
#' T1T_rate = 0.15,
#' T1N_rate = 0.1
#' )
#' dat_mult_trials <- sim_dat_mult_trials_exp_covar(
#' n_iter = 2,
#' params = d_params_covar
#' )
#' m_params_covar <- list(
#' tg = 48,
#' p = 2,
#' prior_delta = matrix(
#' c(0, 5, 0, 5),
#' nrow = 2, byrow = TRUE),
#' prior_0N = c(1.5, 5),
#' prior_1N = c(1.5, 5),
#' prior_0T = c(1.5, 5),
#' prior_1T = c(1.5, 5),
#' t_grid = seq(7, 7 * 48, 7) / 30,
#' chains = 2,
#' n_iter = 3000,
#' warmup = 1500,
#' cores = 2,
#' open_progress = FALSE,
#' show_messages = TRUE
#' )
#' \donttest{
#' fit_multiple <- fit_mult_exp_covar(
#' dat_mult_trials = dat_mult_trials,
#' params = m_params_covar,
#' seed = 12
#' )
#' list_ocs <- ocs_exp_covar(
#' multiple_fits = fit_multiple,
#' d_params = d_params_covar,
#' m_params = m_params_covar
#' )
#' print(list_ocs)
#' }
#'
ocs_exp_covar <- function(multiple_fits, d_params, m_params) {
# obtain names of parameters to evaluate
var <- multiple_fits[[1]] %>% select(var)
# aggregate (numeric variables)
ocs <- purrr::map(
.x = multiple_fits,
.f = ~ true_vals_exp_covar(
x = .x,
d_params = d_params,
m_params = m_params
)
) %>%
purrr::map(~ dplyr::select(.x, -var)) %>%
purrr::map(as.matrix) %>%
simplify2array() %>%
apply(c(1, 2), mean)
# add names of parameters
ocs <- tibble::as_tibble(cbind(var = var, ocs))
# return ocs and data/model parameters
return(list(
"ocs" = ocs,
"d_params" = unlist(d_params),
"m_params" = unlist(m_params)
))
}
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/ocs_exp_covar.R
|
#' Determine operating characteristics of fits from two-arm trials with an exponentially distributed time-to-event endpoint and no predictor of the intercurrent event
#'
#' @param multiple_fits List of model fits from `fit_mult_exp_nocovar`.
#' @param d_params List of data parameters as used in `sim_dat_one_trial_exp_nocovar`.
#' @param m_params List of model parameters as used in `fit_single_exp_nocovar`.
#'
#' @return A list of length 3, containing objects call `ocs`, `d_params`, `m_params`, where `ocs` is a `tibble` containing averaged parameter estimates and operating characteristics, and `d_params` and `m_params` are the objects supplied to the function.
#' @export
#'
#' @details
#' This function is used in `run_sim_exp_nocovar()`, the output of the two functions is the same.
#'
#' @seealso [ocs_exp_covar()] and [run_sim_exp_nocovar()].
#'
#' @examples
#' d_params_nocovar <- list(
#' n = 500L,
#' nt = 250L,
#' prob_ice = 0.5,
#' fu_max = 336L,
#' T0T_rate = 0.2,
#' T0N_rate = 0.2,
#' T1T_rate = 0.15,
#' T1N_rate = 0.1
#' )
#' dat_mult_trials <- sim_dat_mult_trials_exp_nocovar(
#' n_iter = 2,
#' params = d_params_nocovar
#' )
#' m_params_nocovar <- list(
#' tg = 48L,
#' prior_piT = c(0.5, 0.5),
#' prior_0N = c(1.5, 5),
#' prior_1N = c(1.5, 5),
#' prior_0T = c(1.5, 5),
#' prior_1T = c(1.5, 5),
#' t_grid = seq(7, 7 * 48, 7) / 30,
#' chains = 2L,
#' n_iter = 3000L,
#' warmup = 1500L,
#' cores = 2L,
#' open_progress = FALSE,
#' show_messages = TRUE
#' )
#' \donttest{
#' fit_multiple <- fit_mult_exp_nocovar(
#' dat_mult_trials = dat_mult_trials,
#' params = m_params_nocovar,
#' seed = 12
#' )
#' list_ocs <- ocs_exp_nocovar(
#' multiple_fits = fit_multiple,
#' d_params = d_params_nocovar,
#' m_params = m_params_nocovar
#' )
#' print(list_ocs)
#' }
ocs_exp_nocovar <- function(multiple_fits, d_params, m_params) {
# obtain names of parameters to evaluate
var <- multiple_fits[[1]] %>% select(var)
# aggregate (numeric variables)
ocs <- purrr::map(
.x = multiple_fits,
.f = ~ true_vals_exp_nocovar(
x = .x,
d_params = d_params,
m_params = m_params
)
) %>%
purrr::map(~ dplyr::select(.x, -var)) %>%
purrr::map(as.matrix) %>%
simplify2array() %>%
apply(c(1, 2), mean)
# add names of parameters
ocs <- tibble::as_tibble(cbind(var = var, ocs))
# return ocs and data/model parameters
return(list(
"ocs" = ocs,
"d_params" = unlist(d_params),
"m_params" = unlist(m_params)
))
}
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/ocs_exp_nocovar.R
|
#' Run simulation of two-arm trials with an exponentially distributed time-to-event endpoint and one predictor of the intercurrent event
#'
#' @param n_iter Positive integer value, number of trials to be simulated.
#' @param d_params List of data parameters as used in `sim_dat_one_trial_exp_nocovar`.
#' @param m_params List of model parameters as used in `fit_single_exp_nocovar`.
#' @param seed Numeric value, seed for reproducibility.
#'
#' @return A list of length 3, containing objects call `ocs`, `d_params`, `m_params`, where `ocs` is a `tibble` containing averaged parameter estimates and operating characteristics, and `d_params` and `m_params` are the objects supplied to the function.
#' @export
#'
#' @seealso [run_sim_exp_nocovar()]
#'
#' @examples
#' d_params_covar <- list(
#' n = 1000,
#' nt = 500,
#' prob_X1 = 0.4,
#' prob_ice_X1 = 0.5,
#' prob_ice_X0 = 0.2,
#' fu_max = 48*7,
#' T0T_rate = 0.2,
#' T0N_rate = 0.2,
#' T1T_rate = 0.15,
#' T1N_rate = 0.1
#' )
#' m_params_covar <- list(
#' tg = 48,
#' p = 2,
#' prior_delta = matrix(
#' c(0, 5, 0, 5),
#' nrow = 2, byrow = TRUE),
#' prior_0N = c(1.5, 5),
#' prior_1N = c(1.5, 5),
#' prior_0T = c(1.5, 5),
#' prior_1T = c(1.5, 5),
#' t_grid = seq(7, 7 * 48, 7) / 30,
#' chains = 2,
#' n_iter = 3000,
#' warmup = 1500,
#' cores = 2,
#' open_progress = FALSE,
#' show_messages = TRUE
#' )
#' \donttest{
#' dat_ocs <- run_sim_exp_covar(
#' n_iter = 3,
#' d_params = d_params_covar,
#' m_params = m_params_covar,
#' seed = 12
#' )
#' print(dat_ocs)
#' }
#'
run_sim_exp_covar <- function(
n_iter, d_params, m_params, seed
) {
# Simulate data
d_mult <- sim_dat_mult_trials_exp_covar(
n_iter = n_iter,
params = d_params
)
# Run models
multiple_fits <- fit_mult_exp_covar(
dat_mult_trials = d_mult,
params = m_params,
seed = seed
)
# Obtain operating characteristics
sim <- ocs_exp_covar(
multiple_fits = multiple_fits,
d_params = d_params,
m_params = m_params
)
return(sim)
}
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/run_sim_exp_covar.R
|
#' Run simulation of two-arm trials with an exponentially distributed time-to-event endpoint and no predictor of the intercurrent event
#'
#' @param n_iter Positive integer value, number of trials to be simulated.
#' @param d_params List of data parameters as used in `sim_dat_one_trial_exp_nocovar`.
#' @param m_params List of model parameters as used in `fit_single_exp_nocovar`.
#' @param seed Numeric value, seed for reproducibility.
#'
#' @return A list of length 3, containing objects call `ocs`, `d_params`, `m_params`, where `ocs` is a `tibble` containing averaged parameter estimates and operating characteristics, and `d_params` and `m_params` are the objects supplied to the function.
#' @export
#'
#' @seealso [run_sim_exp_covar()]
#'
#' @examples
#' d_params_nocovar <- list(
#' n = 500L,
#' nt = 250L,
#' prob_ice = 0.5,
#' fu_max = 336L,
#' T0T_rate = 0.2,
#' T0N_rate = 0.2,
#' T1T_rate = 0.15,
#' T1N_rate = 0.1
#' )
#' m_params_nocovar <- list(
#' tg = 48L,
#' prior_piT = c(0.5, 0.5),
#' prior_0N = c(1.5, 5),
#' prior_1N = c(1.5, 5),
#' prior_0T = c(1.5, 5),
#' prior_1T = c(1.5, 5),
#' t_grid = seq(7, 7 * 48, 7) / 30,
#' chains = 2L,
#' n_iter = 3000L,
#' warmup = 1500L,
#' cores = 2L,
#' open_progress = FALSE,
#' show_messages = TRUE
#' )
#' \donttest{
#' dat_ocs <- run_sim_exp_nocovar(
#' n_iter = 3,
#' d_params = d_params_nocovar,
#' m_params = m_params_nocovar,
#' seed = 12
#' )
#' print(dat_ocs)
#' }
run_sim_exp_nocovar <- function(
n_iter, d_params, m_params, seed
) {
# Simulate data
d_mult <- sim_dat_mult_trials_exp_nocovar(
n_iter = n_iter,
params = d_params
)
# Run models
multiple_fits <- fit_mult_exp_nocovar(
dat_mult_trials = d_mult,
params = m_params,
seed = seed
)
# Obtain operating characteristics
sim <- ocs_exp_nocovar(
multiple_fits = multiple_fits,
d_params = d_params,
m_params = m_params
)
return(sim)
}
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/run_sim_exp_nocovar.R
|
#' Simulate data from multiple two-arm trials with an exponentially distributed time-to-event endpoint and one predictor of the intercurrent event
#'
#' @param n_iter Positive integer value, number of trials to be simulated.
#' @param params List of data parameters as used in `sim_dat_one_trial_exp_covar`.
#'
#' @return A list of length `n_iter`, containing objects of class `tibble()`, each containing one simulated trial dataset.
#' @export
#'
#' @seealso [sim_dat_mult_trials_exp_nocovar()]
#'
#' @examples
#' d_params_covar <- list(
#' n = 1000,
#' nt = 500,
#' prob_X1 = 0.4,
#' prob_ice_X1 = 0.5,
#' prob_ice_X0 = 0.2,
#' fu_max = 48*7,
#' T0T_rate = 0.2,
#' T0N_rate = 0.2,
#' T1T_rate = 0.15,
#' T1N_rate = 0.1
#' )
#' dat_mult_trials <- sim_dat_mult_trials_exp_covar(
#' n_iter = 3,
#' params = d_params_covar
#' )
#' lapply(dat_mult_trials, dim)
#' head(dat_mult_trials[[1]])
#'
sim_dat_mult_trials_exp_covar <- function(n_iter, params) {
replicate(
n_iter,
sim_dat_one_trial_exp_covar(
n = params[["n"]],
nt = params[["nt"]],
prob_X1 = params[["prob_X1"]],
prob_ice_X1 = params[["prob_ice_X1"]],
prob_ice_X0 = params[["prob_ice_X0"]],
fu_max = params[["fu_max"]],
T0T_rate = params[["T0T_rate"]],
T0N_rate = params[["T0N_rate"]],
T1T_rate = params[["T1T_rate"]],
T1N_rate = params[["T1N_rate"]]
),
simplify = F
) %>%
lapply(FUN = function(x) x[!(names(x) %in% c("PAT_ID","T0N","T0T","T1N","T1T"))])
}
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/sim_dat_mult_trials_exp_covar.R
|
#' Simulate data from multiple two-arm trials with an exponentially distributed time-to-event endpoint and no predictor of the intercurrent event
#'
#' @param n_iter Positive integer value, number of trials to be simulated.
#' @param params List of data parameters as used in `sim_dat_one_trial_exp_nocovar`.
#'
#' @return A list of length `n_iter`, containing objects of class `tibble()`, each containing one simulated trial dataset.
#' @export
#'
#' @seealso [sim_dat_mult_trials_exp_covar()]
#'
#' @examples
#' d_params_nocovar <- list(
#' n = 500L,
#' nt = 250L,
#' prob_ice = 0.5,
#' fu_max = 336L,
#' T0T_rate = 0.2,
#' T0N_rate = 0.2,
#' T1T_rate = 0.15,
#' T1N_rate = 0.1
#' )
#' dat_mult_trials <- sim_dat_mult_trials_exp_nocovar(
#' n_iter = 3,
#' params = d_params_nocovar
#' )
#' lapply(dat_mult_trials, dim)
#' head(dat_mult_trials[[1]])
#'
sim_dat_mult_trials_exp_nocovar <- function(n_iter, params) {
replicate(
n_iter,
sim_dat_one_trial_exp_nocovar(
n = params[["n"]],
nt = params[["nt"]],
prob_ice = params[["prob_ice"]],
fu_max = params[["fu_max"]],
T0T_rate = params[["T0T_rate"]],
T0N_rate = params[["T0N_rate"]],
T1T_rate = params[["T1T_rate"]],
T1N_rate = params[["T1N_rate"]]
),
simplify = F
) %>%
lapply(FUN = function(x) x[!(names(x) %in% c("PAT_ID","T0N","T0T","T1N","T1T"))])
}
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/sim_dat_mult_trials_exp_nocovar.R
|
#' Simulate data from a single two-arm trial with an exponentially distributed time-to-event endpoint and one predictor of the intercurrent event
#'
#' @param n Positive integer value, number of subjects in the trial.
#' @param nt Positive integer value, number of treated subjects.
#' @param prob_X1 Numeric value on the interval \eqn{(0,1)}, probability of being at high risk of experiencing the intercurrent event of interest when treated (i.e. the event that determines the principal stratum membership).
#' @param prob_ice_X1 Numeric value on the interval \eqn{(0,1)}, probability of the intercurrent event of interest if treated and at high risk of the intercurrent event.
#' @param prob_ice_X0 Numeric value on the interval \eqn{(0,1)}, probability of the intercurrent event of interest if treated and not at high risk of the intercurrent event.
#' @param fu_max Positive integer value, maximum follow-up time in days (administrative censoring assumed afterwards).
#' @param T0T_rate Positive numeric value, monthly event rate in control subjects that would develop the intercurrent event if treated.
#' @param T0N_rate Positive numeric value, monthly event rate in control subjects that never develop the intercurrent event.
#' @param T1T_rate Positive numeric value, monthly event rate in treated subjects that develop the intercurrent event.
#' @param T1N_rate Positive numeric value, monthly event rate in treated subjects that never develop the intercurrent event.
#'
#' @return ...
#' @export
#'
#' @seealso [sim_dat_one_trial_exp_nocovar()]
#'
#' @examples
#' d_params_covar <- list(
#' n = 1000,
#' nt = 500,
#' prob_X1 = 0.4,
#' prob_ice_X1 = 0.5,
#' prob_ice_X0 = 0.2,
#' fu_max = 48*7,
#' T0T_rate = 0.2,
#' T0N_rate = 0.2,
#' T1T_rate = 0.15,
#' T1N_rate = 0.1
#' )
#' dat_single_trial <- sim_dat_one_trial_exp_covar(
#' n = d_params_covar[["n"]],
#' nt = d_params_covar[["nt"]],
#' prob_X1 = d_params_covar[["prob_X1"]],
#' prob_ice_X1 = d_params_covar[["prob_ice_X1"]],
#' prob_ice_X0 = d_params_covar[["prob_ice_X0"]],
#' fu_max = d_params_covar[["fu_max"]],
#' T0T_rate = d_params_covar[["T0T_rate"]],
#' T0N_rate = d_params_covar[["T0N_rate"]],
#' T1T_rate = d_params_covar[["T1T_rate"]],
#' T1N_rate = d_params_covar[["T1N_rate"]]
#' )
#' dim(dat_single_trial)
#' head(dat_single_trial)
#'
sim_dat_one_trial_exp_covar <- function(
n, # number of patients
nt, # number of treated patients
prob_X1, # probability of X=1
prob_ice_X1, # probability of intercurrent event for X=1
prob_ice_X0, # probability of intercurrent event for X=0
fu_max, # maximum follow-up (days)
T0T_rate, # monthly event rate in controls TD
T0N_rate, # monthly event rate in controls ND
T1T_rate, # monthly event rate in treated TD
T1N_rate # monthly event rate in treated ND
) {
# baseline data
Z <- sample(c(rep(0L, n - nt), rep(1L, nt)))
# binary covariate
X <- sample(c(0, 1), size = n,
prob = c(1 - prob_X1, prob_X1), replace = T)
n_X1 <- sum(X==1)
# intercurrent event data
G <- rep(NA, n)
G[X==1] <- sample(c(0,1), size = n_X1,
prob = c(1 - prob_ice_X1, prob_ice_X1), replace = T)
G[X==0] <- sample(c(0,1), size = n - n_X1,
prob = c(1 - prob_ice_X0, prob_ice_X0), replace = T)
S <- G
S[Z==0L] <- 0L
# time to event endpoint data by principal stratum
cens <- runif(n = n, min = 1, max = fu_max)
T0T <- rexp(n = n, rate = T0T_rate) * 30
T0N <- rexp(n = n, rate = T0N_rate) * 30
T1T <- rexp(n = n, rate = T1T_rate) * 30
T1N <- rexp(n = n, rate = T1N_rate) * 30
E0T <- as.integer(T0T <= cens)
E0N <- as.integer(T0N <= cens)
E1T <- as.integer(T1T <= cens)
E1N <- as.integer(T1N <= cens)
EVENT <- NULL
EVENT[Z==0 & G==0] <- E0N[Z==0 & G==0]
EVENT[Z==0 & G==1] <- E0T[Z==0 & G==1]
EVENT[Z==1 & G==0] <- E1N[Z==1 & G==0]
EVENT[Z==1 & G==1] <- E1T[Z==1 & G==1]
TIME <- NULL
TIME[EVENT==0] <- round(cens[EVENT==0])
TIME[EVENT==1 & Z==0 & G==0] <- round(T0N[EVENT==1 & Z==0 & G==0])
TIME[EVENT==1 & Z==0 & G==1] <- round(T0T[EVENT==1 & Z==0 & G==1])
TIME[EVENT==1 & Z==1 & G==0] <- round(T1N[EVENT==1 & Z==1 & G==0])
TIME[EVENT==1 & Z==1 & G==1] <- round(T1T[EVENT==1 & Z==1 & G==1])
TIME <- as.integer(TIME)
return(
tibble(
PAT_ID = stringr::str_pad(1:n, nchar(n), pad = "0"),
Z = Z,
X = X,
G = G,
S = S,
TIME = TIME,
EVENT = EVENT,
T0N = T0N,
T0T = T0T,
T1N = T1N,
T1T = T1T
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/sim_dat_one_trial_exp_covar.R
|
#' Simulate data from a single two-arm trial with an exponentially distributed time-to-event endpoint and no predictor of the intercurrent event
#'
#' @param n Positive integer value, number of subjects in the trial.
#' @param nt Positive integer value, number of treated subjects.
#' @param prob_ice Numeric value on the interval \eqn{(0,1)}, probability of the intercurrent event of interest (i.e. the event that determines the principal stratum membership).
#' @param fu_max Positive integer value, maximum follow-up time in days (administrative censoring assumed afterwards).
#' @param T0T_rate Positive numeric value, monthly event rate in control subjects that would develop the intercurrent event if treated.
#' @param T0N_rate Positive numeric value, monthly event rate in control subjects that never develop the intercurrent event.
#' @param T1T_rate Positive numeric value, monthly event rate in treated subjects that develop the intercurrent event.
#' @param T1N_rate Positive numeric value, monthly event rate in treated subjects that never develop the intercurrent event.
#'
#' @return A `tibble()`containing the trial data for analysis.
#' @export
#'
#' @seealso [sim_dat_one_trial_exp_covar()]
#'
#' @examples
#' d_params_nocovar <- list(
#' n = 500L,
#' nt = 250L,
#' prob_ice = 0.5,
#' fu_max = 336L,
#' T0T_rate = 0.2,
#' T0N_rate = 0.2,
#' T1T_rate = 0.15,
#' T1N_rate = 0.1
#' )
#' dat_single_trial <- sim_dat_one_trial_exp_nocovar(
#' n = d_params_nocovar[["n"]],
#' nt = d_params_nocovar[["nt"]],
#' prob_ice = d_params_nocovar[["prob_ice"]],
#' fu_max = d_params_nocovar[["fu_max"]],
#' T0T_rate = d_params_nocovar[["T0T_rate"]],
#' T0N_rate = d_params_nocovar[["T0N_rate"]],
#' T1T_rate = d_params_nocovar[["T1T_rate"]],
#' T1N_rate = d_params_nocovar[["T1N_rate"]]
#' )
#' dim(dat_single_trial)
#' head(dat_single_trial)
#'
sim_dat_one_trial_exp_nocovar <- function(
n, # number of patients
nt, # number of treated patients
prob_ice, # prob ICE
fu_max, # maximum follow-up (days)
T0T_rate, # monthly event rate in controls TD
T0N_rate, # monthly event rate in controls ND
T1T_rate, # monthly event rate in treated TD
T1N_rate # monthly event rate in treated ND
) {
# baseline data
Z <- sample(c(rep(0L, n - nt), rep(1L, nt)))
# intercurrent event data
G <- sample(c(0L, 1L), size = n, prob = c(1 - prob_ice, prob_ice), replace = T)
S <- G
S[Z==0L] <- 0L
# time to event endpoint data by principal stratum
cens <- runif(n = n, min = 1, max = fu_max)
T0T <- rexp(n = n, rate = T0T_rate) * 30
T0N <- rexp(n = n, rate = T0N_rate) * 30
T1T <- rexp(n = n, rate = T1T_rate) * 30
T1N <- rexp(n = n, rate = T1N_rate) * 30
E0T <- as.integer(T0T <= cens)
E0N <- as.integer(T0N <= cens)
E1T <- as.integer(T1T <= cens)
E1N <- as.integer(T1N <= cens)
EVENT <- NULL
EVENT[Z==0 & G==0] <- E0N[Z==0 & G==0]
EVENT[Z==0 & G==1] <- E0T[Z==0 & G==1]
EVENT[Z==1 & G==0] <- E1N[Z==1 & G==0]
EVENT[Z==1 & G==1] <- E1T[Z==1 & G==1]
TIME <- NULL
TIME[EVENT==0] <- round(cens[EVENT==0])
TIME[EVENT==1 & Z==0 & G==0] <- round(T0N[EVENT==1 & Z==0 & G==0])
TIME[EVENT==1 & Z==0 & G==1] <- round(T0T[EVENT==1 & Z==0 & G==1])
TIME[EVENT==1 & Z==1 & G==0] <- round(T1N[EVENT==1 & Z==1 & G==0])
TIME[EVENT==1 & Z==1 & G==1] <- round(T1T[EVENT==1 & Z==1 & G==1])
TIME <- as.integer(TIME)
return(
tibble::tibble(
PAT_ID = stringr::str_pad(1:n, nchar(n), pad = "0"),
Z = Z,
G = G,
S = S,
TIME = TIME,
EVENT = EVENT,
T0N = T0N,
T0T = T0T,
T1N = T1N,
T1T = T1T
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/sim_dat_one_trial_exp_nocovar.R
|
# Generated by rstantools. Do not edit by hand.
# names of stan models
stanmodels <- c("m_exp_covar", "m_exp_nocovar")
# load each stan module
Rcpp::loadModule("stan_fit4m_exp_covar_mod", what = TRUE)
Rcpp::loadModule("stan_fit4m_exp_nocovar_mod", what = TRUE)
# instantiate each stanmodel object
stanmodels <- sapply(stanmodels, function(model_name) {
# create C++ code for stan model
stan_file <- if(dir.exists("stan")) "stan" else file.path("inst", "stan")
stan_file <- file.path(stan_file, paste0(model_name, ".stan"))
stanfit <- rstan::stanc_builder(stan_file,
allow_undefined = TRUE,
obfuscate_model_name = FALSE)
stanfit$model_cpp <- list(model_cppname = stanfit$model_name,
model_cppcode = stanfit$cppcode)
# create stanmodel object
methods::new(Class = "stanmodel",
model_name = stanfit$model_name,
model_code = stanfit$model_code,
model_cpp = stanfit$model_cpp,
mk_cppmodule = function(x) get(paste0("rstantools_model_", model_name)))
})
|
/scratch/gouwar.j/cran-all/cranData/BPrinStratTTE/R/stanmodels.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.