content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Bootstrap standard errors
#'
#' Calculates the standard errors of a given statistic using bootstrap
#'
#' @section References:
#' - A.C. Davison, D.V. Hinkley:
#' Bootstrap methods and their application. Cambridge University Press (1997)
#' - F. Campelo, F. Takahashi:
#' Sample size estimation for power and accuracy in the experimental
#' comparison of algorithms. Journal of Heuristics 25(2):305-338, 2019.
#'
#' @inheritParams calc_se
#' @param ... other parameters (used only for compatibility with calls to
#' [se_param()], unused in this function)
#'
#' @return Data frame containing, for each pair of interest, the estimated
#' difference (column "Phi") and the sample standard error (column "SE")
#'
#' @author Felipe Campelo (\email{fcampelo@@ufmg.br},
#' \email{f.campelo@@aston.ac.uk})
#'
#' @export
#'
#' @examples
#' # three vectors of normally distributed observations
#' set.seed(1234)
#' Xk <- list(rnorm(10, 5, 1), # mean = 5, sd = 1,
#' rnorm(20, 10, 2), # mean = 10, sd = 2,
#' rnorm(20, 15, 5)) # mean = 15, sd = 3
#'
#' se_boot(Xk, dif = "simple", comparisons = "all.vs.all")
#' se_boot(Xk, dif = "perc", comparisons = "all.vs.first")
#' se_boot(Xk, dif = "perc", comparisons = "all.vs.all")
# TESTED: OK
se_boot <- function(Xk, # vector of observations
dif = "simple", # type of difference
comparisons = "all.vs.all", # standard errors to calculate
boot.R = 999, # number of bootstrap resamples
...)
{
# ========== Error catching ========== #
assertthat::assert_that(
is.list(Xk),
all(sapply(Xk, is.numeric)),
all(sapply(Xk, function(x){length(x) >= 2})),
dif %in% c('simple', 'perc'),
comparisons %in% c("all.vs.all", "all.vs.first"),
assertthat::is.count(boot.R), boot.R > 1)
# ==================================== #
nalgs <- length(Xk)
Nk <- sapply(Xk, length)
# Get pairs for calculation
algo.pairs <- t(utils::combn(1:length(Xk), 2))
if (comparisons == "all.vs.first") algo.pairs <- algo.pairs[1:(nalgs - 1), , drop = FALSE]
# Calculate point estimates and standard errors for all required pairs
Phik <- numeric(nrow(algo.pairs))
SEk <- numeric(nrow(algo.pairs))
Roptk <- numeric(nrow(algo.pairs))
for (k in 1:nrow(algo.pairs)){
ind <- as.numeric(algo.pairs[k, ])
phi.hat <- numeric(boot.R)
ropt.hat <- numeric(boot.R)
for(i in 1:boot.R){
# Resample everyone with replacement
Xk.b <- mapply(FUN = sample,
Xk, lapply(Xk, length),
MoreArgs = list(replace = TRUE),
SIMPLIFY = FALSE)
# Calculate relevant statistics for this bootstrap replicate
Vark <- sapply(Xk.b, stats::var)
Xbark <- sapply(Xk.b, mean)
Xbar.all <- mean(Xbark)
if (dif == "simple") {
# mu1 - mu2
phi.hat[i] <- Xbark[ind[1]] - Xbark[ind[2]]
# s1 / s2
ropt.hat[i] <- sqrt(Vark[ind[1]] / Vark[ind[2]])
#
} else if (dif == "perc"){
if (comparisons == "all.vs.all"){
# (mu1 - mu2) / mu
phi.hat[i] <- (Xbark[ind[1]] - Xbark[ind[2]]) / Xbar.all
# r = s1 / s2
ropt.hat[i] <- sqrt(Vark[ind[1]] / Vark[ind[2]])
#
} else if (comparisons == "all.vs.first"){
# (mu1 - mu2) / mu1
phi.hat[i] <- 1 - Xbark[ind[2]] / Xbark[ind[1]]
# r = (s1 / s2) * (mu2 / mu1)
ropt.hat[i] <- sqrt(Vark[ind[1]] / Vark[ind[2]]) * (Xbark[ind[2]] / Xbark[ind[1]])
#
} else stop("comparisons option *", comparisons, "* not recognized.")
#
} else stop ("dif option *", dif, "* not recognized.")
}
# Estimate quantities of interest
Phik[k] <- mean(phi.hat)
SEk[k] <- stats::sd(phi.hat)
Roptk[k] <- mean(ropt.hat)
}
# Assemble data frame with results
output <- data.frame(Alg1 = algo.pairs[, 1],
Alg2 = algo.pairs[, 2],
N1 = Nk[algo.pairs[, 1]],
N2 = Nk[algo.pairs[, 2]],
Phi = Phik,
SE = SEk,
r = Nk[algo.pairs[, 1]] / Nk[algo.pairs[, 2]],
ropt = Roptk)
return(output)
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/CAISEr/R/se_boot.R
|
#' Parametric standard errors
#'
#' Calculates the standard errors of a given statistic using parametric formulas
#'
#' @section References:
#' - E.C. Fieller:
#' Some problems in interval estimation. Journal of the Royal Statistical
#' Society. Series B (Methodological) 16(2), 175–185 (1954)
#' - V. Franz:
#' Ratios: A short guide to confidence limits and proper use (2007).
#' https://arxiv.org/pdf/0710.2024v1.pdf
#' - D.C. Montgomery, C.G. Runger:
#' Applied Statistics and Probability for Engineers, 6th ed. Wiley (2013)
#' - F. Campelo, F. Takahashi:
#' Sample size estimation for power and accuracy in the experimental
#' comparison of algorithms. Journal of Heuristics 25(2):305-338, 2019.
#'
#' @inheritParams calc_se
#' @param ... other parameters (used only for compatibility with calls to
#' [se_boot()], unused in this function)
#'
#' @return Data frame containing, for each pair of interest, the estimated
#' difference (column "Phi") and the sample standard error (column "SE")
#'
#' @author Felipe Campelo (\email{fcampelo@@ufmg.br},
#' \email{f.campelo@@aston.ac.uk})
#'
#' @export
#'
#' @examples
#' # three vectors of normally distributed observations
#' set.seed(1234)
#' Xk <- list(rnorm(10, 5, 1), # mean = 5, sd = 1,
#' rnorm(20, 10, 2), # mean = 10, sd = 2,
#' rnorm(20, 15, 5)) # mean = 15, sd = 3
#'
#' se_param(Xk, dif = "simple", comparisons = "all.vs.all")
#' se_param(Xk, dif = "perc", comparisons = "all.vs.first")
#' se_param(Xk, dif = "perc", comparisons = "all.vs.all")
# TESTED: OK
se_param <- function(Xk, # vector of observations
dif = "simple", # type of difference
comparisons = "all.vs.all", # standard errors to calculate
...)
{
# ========== Error catching ========== #
assertthat::assert_that(
is.list(Xk),
all(sapply(Xk, is.numeric)),
all(sapply(Xk, function(x){length(x) >= 2})),
dif %in% c('simple', 'perc'),
comparisons %in% c("all.vs.all", "all.vs.first"))
# ==================================== #
# Estimates
nalgs <- length(Xk)
Vark <- sapply(Xk, stats::var)
Xbark <- sapply(Xk, mean)
Nk <- sapply(Xk, length)
Xbar.all <- mean(Xbark)
# Get pairs for comparison
algo.pairs <- t(utils::combn(1:length(Xk), 2))
if (comparisons == "all.vs.first") algo.pairs <- algo.pairs[1:(nalgs - 1), , drop = FALSE]
# Calculate point estimates and standard errors for all required pairs
Phik <- numeric(nrow(algo.pairs))
SEk <- numeric(nrow(algo.pairs))
Roptk <- numeric(nrow(algo.pairs))
for (i in 1:nrow(algo.pairs)){
ind <- as.numeric(algo.pairs[i, ])
if (dif == "simple") {
# mu1 - mu2
Phik[i] <- Xbark[ind[1]] - Xbark[ind[2]]
# se = s1/sqrt(n1) + s2/sqrt(n2)
SEk[i] <- sqrt(sum(Vark[ind] / Nk[ind]))
# r = s1 / s2
Roptk[i] <- sqrt(Vark[ind[1]] / Vark[ind[2]])
#
} else if (dif == "perc"){
if (comparisons == "all.vs.all"){
# (mu1 - mu2) / mu
Phik[i] <- (Xbark[ind[1]] - Xbark[ind[2]]) / Xbar.all
# c1 = 1/mu^2 + (mu1 - mu2)^2 / (A * mu^2)^2
# = (1 + phi^2 / A^2) / mu^2
C1 <- (1 + Phik[i] ^ 2 / nalgs ^ 2) / Xbar.all ^ 2
# c2 = sum_{k!=ind}(s_k^2/n_k^2) * phi^2 / (A^2 * mu^2)
C2 <- sum(Vark[-ind] / Nk[-ind]) * Phik[i] ^ 2 / (nalgs ^ 2 * Xbar.all ^ 2)
# se = sqrt(c1 (s1^2/n1 + s2^2/n2) + c2)
SEk[i] <- sqrt(C1 * (sum(Vark[ind] / Nk[ind])) + C2)
# r = s1 / s2
Roptk[i] <- sqrt(Vark[ind[1]] / Vark[ind[2]])
#
} else if (comparisons == "all.vs.first"){
# 1 - mu2/mu1
Phik[i] <- 1 - Xbark[ind[2]] / Xbark[ind[1]]
# c1 = s1^2 * (mu_2 / mu_1^2)^2
C1 <- Vark[ind[1]] * (Xbark[ind[2]] / (Xbark[ind[1]] ^ 2)) ^2
# c2 = s2^2 / mu_1^2
C2 <- Vark[ind[2]] / (Xbark[ind[1]] ^ 2)
# se = sqrt(c1 / n1 + c2 / n2)
SEk[i] <- sqrt(C1 / Nk[ind[1]] + C2 / Nk[ind[2]])
# r* = s1/s2 * mu2/mu1
Roptk[i] <- sqrt(Vark[ind[1]] / Vark[ind[2]]) * (Xbark[ind[2]] / Xbark[ind[1]])
#
} else stop("comparisons option *", comparisons, "* not recognized.")
} else stop ("dif option *", dif, "* not recognized.")
}
# Assemble data frame with results
output <- data.frame(Alg1 = algo.pairs[, 1],
Alg2 = algo.pairs[, 2],
N1 = Nk[algo.pairs[, 1]],
N2 = Nk[algo.pairs[, 2]],
Phi = Phik,
SE = SEk,
r = Nk[algo.pairs[, 1]] / Nk[algo.pairs[, 2]],
ropt = Roptk)
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/CAISEr/R/se_param.R
|
#' summary.CAISEr
#'
#' S3 method for summarizing _CAISEr_ objects output by [run_experiment()]).
#' Input parameters `test`, `alternative` and `sig.level` can be used to
#' override the ones used in the call to [run_experiment()].
#'
#' @inheritParams calc_nreps
#' @inheritParams calc_instances
#' @inheritParams run_experiment
#' @param object list object of class _CAISEr_
#' (generated by [run_experiment()])
#' @param ... other parameters to be passed down to specific
#' summary functions (currently unused)
#'
#' @return A list object is returned invisibly, containing the details of all
#' tests performed as well as information on the total number of runs
#' dedicated to each algorithm.
#' @examples
#' # Example using four dummy algorithms and 100 dummy instances.
#' # See [dummyalgo()] and [dummyinstance()] for details.
#' # Generating 4 dummy algorithms here, with means 15, 10, 30, 15 and standard
#' # deviations 2, 4, 6, 8.
#' algorithms <- mapply(FUN = function(i, m, s){
#' list(FUN = "dummyalgo",
#' alias = paste0("algo", i),
#' distribution.fun = "rnorm",
#' distribution.pars = list(mean = m, sd = s))},
#' i = c(alg1 = 1, alg2 = 2, alg3 = 3, alg4 = 4),
#' m = c(15, 10, 30, 15),
#' s = c(2, 4, 6, 8),
#' SIMPLIFY = FALSE)
#'
#' # Generate 100 dummy instances with centered exponential distributions
#' instances <- lapply(1:100,
#' function(i) {rate <- runif(1, 1, 10)
#' list(FUN = "dummyinstance",
#' alias = paste0("Inst.", i),
#' distr = "rexp", rate = rate,
#' bias = -1 / rate)})
#'
#' my.results <- run_experiment(instances, algorithms,
#' d = 1, se.max = .1,
#' power = .9, sig.level = .05,
#' power.target = "mean",
#' dif = "perc", comparisons = "all.vs.all",
#' seed = 1234, ncpus = 1)
#' summary(my.results)
#'
#' # You can override some defaults if you want:
#' summary(my.results, test = "wilcoxon")
#'
#' @method summary CAISEr
#'
#' @export
#'
summary.CAISEr <- function(object, test = NULL,
alternative = NULL,
sig.level = NULL,
...)
{
# Standard value assignment and error checking
if (is.null(test)) test <- object$Configuration$test
if (is.null(alternative)) alternative <- object$Configuration$alternative
if (is.null(sig.level)) sig.level <- object$Configuration$sig.level
assertthat::assert_that("CAISEr" %in% class(object),
is.character(test), length(test) == 1,
test %in% c("t.test", "wilcoxon", "binomial"),
is.character(alternative), length(alternative) == 1,
alternative %in% c("less", "greater", "two.sided"),
is.numeric(sig.level), length(sig.level) == 1,
sig.level > 0, sig.level < 1)
# ===========================================================================
algonames <- as.character(unique(object$data.raw$Algorithm))
algoruns <- as.numeric(table(object$data.raw$Algorithm))
algopairs <- paste(object$data.summary$Alg1,
object$data.summary$Alg2,
sep = " x ")
# perform initial tests just to calculate p-values
# (ignoring significance correction)
my.tests <- vector(mode = "list", length = length(unique(algopairs)))
for (i in seq_along(unique(algopairs))){
tmp <- object$data.summary[algopairs == unique(algopairs)[i], ]
my.tests[[i]]$comparison <- unique(algopairs)[i]
my.tests[[i]]$data <- tmp
my.tests[[i]]$d <- mean(tmp$Phi) / stats::sd(tmp$Phi)
if (test == "t.test"){
my.tests[[i]]$test <- stats::t.test(tmp$Phi,
conf.level = 1 - sig.level,
alternative = alternative)
} else if (test == "wilcoxon"){
my.tests[[i]]$test <- stats::wilcox.test(tmp$Phi,
conf.level = 1 - sig.level,
alternative = alternative)
} else if (test == "binomial"){
x <- tmp$Phi[tmp$Phi != 0]
n <- length(x)
x <- sum(x > 0)
my.tests[[i]]$test <- stats::binom.test(x, n,
conf.level = 1 - sig.level,
alternative = alternative)
} else stop("Test", test, "not recognised in function summary.CAISEr")
my.tests[[i]]$pval <- my.tests[[i]]$test$p.value
}
# Reorder the tests in increasing order of p-values
my.tests <- my.tests[order(sapply(my.tests, function(x) x$pval))]
# Re-evaluate tests based on corrected significance values
alpha <- object$samplesize.calc$sig.level
for (i in seq_along(my.tests)){
if (test == "t.test"){
my.tests[[i]]$test <- stats::t.test(my.tests[[i]]$data$Phi,
conf.level = 1 - alpha[i],
alternative = alternative)
} else if (test == "wilcoxon"){
my.tests[[i]]$test <- stats::wilcox.test(my.tests[[i]]$data$Phi,
conf.level = 1 - alpha[i],
alternative = alternative,
conf.int = TRUE)
} else if (test == "binomial"){
x <- my.tests[[i]]$data$Phi[my.tests[[i]]$data$Phi != 0]
n <- length(x)
x <- sum(x > 0)
my.tests[[i]]$test <- stats::binom.test(x, n,
conf.level = 1 - alpha[i],
alternative = alternative)
}
my.tests[[i]]$pval <- my.tests[[i]]$test$p.value
}
# Print summary
cat("#====================================")
cat("\n CAISEr object:")
cat("\n Number of instances sampled:", object$N)
cat("\n Number of instances required:", object$N.star)
cat("\n Adequate power:", !object$Underpowered)
for (i in seq_along(algonames)){
cat("\n Total runs of", algonames[i], ":", algoruns[i])
}
cat("\n#====================================")
cat("\n Pairwise comparisons of interest:")
cat("\n Test:", test)
cat("\n H1:", alternative)
cat("\n Comparisons:", object$Configuration$comparisons)
cat("\n Alpha (FWER):", sig.level)
cat("\n Power target:", object$Configuration$power.target)
cat("\n Desired power:", object$Configuration$power)
cat("\n#====================================")
cat("\nTests using Holm's step-down procedure:")
stflag <- FALSE
for (i in seq_along(my.tests)){
if (!stflag && (my.tests[[i]]$pval > alpha[i])){
cat("\n\n ----- Stop rejecting H0 at this point -----\n")
stflag <- TRUE
} else cat("\n")
cat("\n Test", i, ":", my.tests[[i]]$comparison)
cat("\n H0:", switch(test,
t.test = "mean = 0",
wilcoxon = "median = 0",
binomial = "prob = 0.5"))
cat("\n alpha\t\t=", signif(alpha[i], 4),
"\n p-value\t=", signif(my.tests[[i]]$pval, 4))
cat(paste0("\n Est. ", switch(test,
t.test = "mean",
wilcoxon = "median",
binomial = "prob"),
"\t="), signif(my.tests[[i]]$test$estimate, 4))
cat("\n CI{1-alpha}\t= [", signif(my.tests[[i]]$test$conf.int, 4), "]")
cat("\n d\t\t=", my.tests[[i]]$d)
}
cat("\n#====================================")
# Return invisibly
invisible(list(test.info = my.tests,
algoruns = algoruns,
algonames = algonames,
algopairs = algopairs))
}
|
/scratch/gouwar.j/cran-all/cranData/CAISEr/R/summary_caiser.R
|
#' summary.nreps
#'
#' S3 method for summarizing _nreps_ objects output by [calc_nreps()]).
#'
#' @param object list object of class _nreps_
#' (generated by [calc_nreps()])
#' @param ... other parameters to be passed down to specific
#' summary functions (currently unused)
#'
#'
#' @method summary nreps
#'
#' @export
#'
summary.nreps <- function(object, ...)
{
# Print summary
cat("#====================================")
cat("\nInstance:", object$instance)
cat("\nNumber of algorithms:", length(object$Nk))
for (i in seq_along(object$Nk)){
cat(paste0("\n", names(object$Nk)[i], ": ", object$Nk[i], " runs"))
}
cat("\n --------------------")
cat("\nTotal runs:", sum(object$Nk))
cat("\nComparisons:", object$comparisons)
cat("\n#====================================\n\n")
print(signif(object$Diffk, 3))
cat("\n#====================================")
}
|
/scratch/gouwar.j/cran-all/cranData/CAISEr/R/summary_nreps.R
|
#' Determine sample sizes for a set of algorithms on a single problem instance
#'
#' Iteratively calculates the required sample sizes for K algorithms
#' on a given problem instance, so that the standard errors of the estimates of
#' the pairwise differences in performance is controlled at a predefined level.
#'
#' @section Instance:
#' Parameter `instance` must be a named list containing all relevant parameters
#' that define the problem instance. This list must contain at least the field
#' `instance$FUN`, with the name of the function implementing the problem
#' instance, that is, a routine that calculates y = f(x). If the instance
#' requires additional parameters, these must also be provided as named fields.
#'
#' @section Algorithms:
#' Object `algorithms` is a list in which each component is a named
#' list containing all relevant parameters that define an algorithm to be
#' applied for solving the problem instance. In what follows `algorithm[[k]]`
#' refers to any algorithm specified in the `algorithms` list.
#'
#' `algorithm[[k]]` must contain an `algorithm[[k]]$FUN` field, which is a
#' character object with the name of the function that calls the algorithm; as
#' well as any other elements/parameters that `algorithm[[k]]$FUN` requires
#' (e.g., stop criteria, operator names and parameters, etc.).
#'
#' The function defined by the routine `algorithm[[k]]$FUN` must have the
#' following structure: supposing that the list in `algorithm[[k]]` has
#' fields `algorithm[[k]]$FUN = "myalgo"`, `algorithm[[k]]$par1 = "a"` and
#' `algorithm$par2 = 5`, then:
#'
#' \preformatted{
#' myalgo <- function(par1, par2, instance, ...){
#' # do stuff
#' # ...
#' return(results)
#' }
#' }
#'
#' That is, it must be able to run if called as:
#'
#' \preformatted{
#' # remove '$FUN' and '$alias' field from list of arguments
#' # and include the problem definition as field 'instance'
#' myargs <- algorithm[names(algorithm) != "FUN"]
#' myargs <- myargs[names(myargs) != "alias"]
#' myargs$instance <- instance
#'
#' # call function
#' do.call(algorithm$FUN,
#' args = myargs)
#' }
#'
#' The `algorithm$FUN` routine must return a list containing (at
#' least) the performance value of the final solution obtained, in a field named
#' `value` (e.g., `result$value`) after a given run.
#'
#' @section Initial Number of Observations:
#' In the **general case** the initial number of observations per algorithm
#' (`nstart`) should be relatively high. For the parametric case
#' we recommend between 10 and 20 if outliers are not expected, or between 30
#' and 50 if that assumption cannot be made. For the bootstrap approach we
#' recommend using at least 20. However, if some distributional assumptions can
#' be made - particularly low skewness of the population of algorithm results on
#' the test instances), then `nstart` can in principle be as small as 5 (if the
#' output of the algorithms were known to be normal, it could be 1).
#'
#' In general, higher sample sizes are the price to pay for abandoning
#' distributional assumptions. Use lower values of `nstart` with caution.
#'
#' @section Pairwise Differences:
#' Parameter `dif` informs the type of difference in performance to be used
#' for the estimation (\eqn{\mu_a} and \eqn{\mu_b} represent the mean
#' performance of any two algorithms on the test instance, and \eqn{mu}
#' represents the grand mean of all algorithms given in `algorithms`):
#'
#' - If `dif == "perc"` and `comparisons == "all.vs.first"`, the estimated quantity is
#' \eqn{\phi_{1b} = (\mu_1 - \mu_b) / \mu_1 = 1 - (\mu_b / \mu_1)}.
#'
#' - If `dif == "perc"` and `comparisons == "all.vs.all"`, the estimated quantity is
#' \eqn{\phi_{ab} = (\mu_a - \mu_b) / \mu}.
#'
#' - If `dif == "simple"` it estimates \eqn{\mu_a - \mu_b}.
#'
#' @param instance a list object containing the definitions of the problem
#' instance.
#' See Section `Instance` for details.
#' @param algorithms a list object containing the definitions of all algorithms.
#' See Section `Algorithms` for details.
#' @param se.max desired upper limit for the standard error of the estimated
#' difference between pairs of algorithms. See Section
#' `Pairwise Differences` for details.
#' @param dif type of difference to be used. Accepts "perc" (for percent
#' differences) or "simple" (for simple differences)
#' @param comparisons type of comparisons being performed. Accepts "all.vs.first"
#' (in which cases the first object in `algorithms` is considered to be
#' the reference algorithm) or "all.vs.all" (if there is no reference
#' and all pairwise comparisons are desired).
#' @param method method to use for estimating the standard errors. Accepts
#' "param" (for parametric) or "boot" (for bootstrap)
#' @param nstart initial number of algorithm runs for each algorithm.
#' See Section `Initial Number of Observations` for details.
#' @param nmax maximum **total** allowed sample size.
#' @param seed seed for the random number generator
#' @param boot.R number of bootstrap resamples to use (if `method == "boot"`)
#' @param ncpus number of cores to use
#' @param force.balanced logical flag to force the use of balanced sampling for
#' the algorithms on each instance
#' @param save.to.file logical flag: should the results be saved to a file?
#' @param load.from.file logical flag: should the results be loaded from a file?
#' @param folder directory to save/load files
#'
#'
#' @return a list object containing the following items:
#' \itemize{
#' \item \code{instance} - alias for the problem instance considered
#' \item \code{Xk} - list of observed performance values for all `algorithms`
#' \item \code{Nk} - vector of sample sizes generated for each algorithm
#' \item \code{Diffk} - data frame with point estimates, standard errors and
#' other information for all algorithm pairs of interest
#' \item \code{seed} - seed used for the PRNG
#' \item \code{dif} - type of difference used
#' \item \code{method} - method used ("param" / "boot")
#' \item \code{comparisons} - type of pairings ("all.vs.all" / "all.vs.first")
#' }
#'
#' @author Felipe Campelo (\email{fcampelo@@gmail.com})
#'
#' @references
#' - F. Campelo, F. Takahashi:
#' Sample size estimation for power and accuracy in the experimental
#' comparison of algorithms. Journal of Heuristics 25(2):305-338, 2019.
#' - P. Mathews.
#' Sample size calculations: Practical methods for engineers and scientists.
#' Mathews Malnar and Bailey, 2010.
#' - A.C. Davison, D.V. Hinkley:
#' Bootstrap methods and their application. Cambridge University Press (1997)
#' - E.C. Fieller:
#' Some problems in interval estimation. Journal of the Royal Statistical
#' Society. Series B (Methodological) 16(2), 175–185 (1954)
#' - V. Franz:
#' Ratios: A short guide to confidence limits and proper use (2007).
#' https://arxiv.org/pdf/0710.2024v1.pdf
#' - D.C. Montgomery, C.G. Runger:
#' Applied Statistics and Probability for Engineers, 6th ed. Wiley (2013)
#'
#' @export
#'
#' @examples
#' # Example using dummy algorithms and instances. See ?dummyalgo for details.
#' # We generate 4 dummy algorithms, with true means 15, 10, 30, 15; and true
#' # standard deviations 2, 4, 6, 8.
#' algorithms <- mapply(FUN = function(i, m, s){
#' list(FUN = "dummyalgo",
#' alias = paste0("algo", i),
#' distribution.fun = "rnorm",
#' distribution.pars = list(mean = m, sd = s))},
#' i = c(alg1 = 1, alg2 = 2, alg3 = 3, alg4 = 4),
#' m = c(15, 10, 30, 15),
#' s = c(2, 4, 6, 8),
#' SIMPLIFY = FALSE)
#'
#' # Make a dummy instance with a centered (zero-mean) exponential distribution:
#' instance = list(FUN = "dummyinstance", distr = "rexp", rate = 5, bias = -1/5)
#'
#' se.max = 0.05
#' dif = "perc"
#' comparisons = "all.vs.all"
#' method = "param"
#' seed = 1234
#' nstart = 20
#' nmax = 1000
#' ncpus = 1
#'
#' myreps <- calc_nreps(instance = instance, algorithms = algorithms,
#' se.max = se.max, dif = dif,
#' comparisons = comparisons, method = method,
#' nstart = nstart, nmax = nmax, seed = seed)
#' myreps$Diffk
# TESTED: OK
# calc_nreps_old <- function(instance, # instance parameters
# algorithms, # algorithm parameters
# se.max, # desired (max) standard error
# dif = "simple", # type of difference
# comparisons = "all.vs.all", # differences to consider
# method = "param", # method ("param", "boot")
# nstart = 20, # initial number of samples
# nmax = 200, # maximum allowed sample size
# seed = NULL, # seed for PRNG
# boot.R = 499, # number of bootstrap resamples
# ncpus = 1, # number of cores to use
# force.balanced = FALSE, # force balanced sampling?
# save.to.file = FALSE, # save results to tmp file?
# load.from.file = FALSE, # load results from file?
# folder = "./nreps_files") # directory to save tmp file
# {
#
# # ========== Error catching ========== #
# assertthat::assert_that(
# is.list(instance),
# assertthat::has_name(instance, "FUN"),
# is.list(algorithms),
# all(sapply(X = algorithms, FUN = is.list)),
# all(sapply(X = algorithms,
# FUN = function(x){assertthat::has_name(x, "FUN")})),
# is.numeric(se.max) && length(se.max) == 1,
# dif %in% c("simple", "perc"),
# comparisons %in% c("all.vs.all", "all.vs.first"),
# method %in% c("param", "boot"),
# assertthat::is.count(nstart),
# is.infinite(nmax) || assertthat::is.count(nmax),
# nmax >= length(algorithms) * nstart,
# is.null(seed) || seed == seed %/% 1,
# assertthat::is.count(boot.R), boot.R > 1,
# is.logical(force.balanced), length(force.balanced) == 1,
# is.logical(save.to.file), length(save.to.file) == 1,
# is.logical(load.from.file), length(load.from.file) == 1)
# # ==================================== #
#
# # set PRNG seed
# if (is.null(seed)) {
# if (!exists(".Random.seed")) stats::runif(1)
# seed <- .Random.seed #i.e., do not change anything
# } else{
# set.seed(seed)
# }
#
# # Get/set instance alias
# if (!("alias" %in% names(instance))) {
# instance$alias <- instance$FUN
# }
#
# if (load.from.file){
# # Get the filename
# filename <- paste0(folder, "/",
# instance$alias,
# ".rds")
#
# if (file.exists(filename)){
# output <- readRDS(filename)
# cat("\nSampling of instance", instance$alias, "loaded from file.")
# return(output)
# } else
# cat("\n**NOTE: Instance file", filename, "not found.**")
# }
#
# # Echo some information for the user
# cat("\nSampling algorithms on instance", instance$alias, ": ")
#
# # generate initial samples
# Nk <- rep(nstart, length(algorithms))
# Xk <- parallel::mcmapply(FUN = get_observations,
# algo = algorithms,
# n = Nk,
# MoreArgs = list(instance = instance),
# mc.cores = ncpus,
# SIMPLIFY = FALSE)
#
# # Calculate point estimates, SEs, and sample size ratios (current x optimal)
# Diffk <- calc_se(Xk = Xk,
# dif = dif,
# comparisons = comparisons,
# method = method,
# boot.R = boot.R)
#
# while(any(Diffk$SE > se.max) & (sum(Nk) < nmax)){
# # Echo something for the user
# if (!(sum(Nk) %% nstart)) cat(".")
#
# if (force.balanced) {
# # Generate a single new observation for each algorithm
# newX <- parallel::mcmapply(FUN = get_observations,
# algo = algorithms,
# n = 1,
# MoreArgs = list(instance = instance),
# mc.cores = ncpus,
# SIMPLIFY = FALSE)
#
# # Append new observation to each algo list and update sample size counters
# Xk <- mapply(FUN = c, Xk, newX,
# SIMPLIFY = FALSE)
# Nk <- Nk + 1
# } else {
# # Get pair that has the worst SE
# worst.se <- Diffk[which.max(Diffk$SE), ]
#
# # Determine algorithm that should receive a new observation
# if (worst.se$r <= worst.se$ropt){
# ind <- worst.se[1, 1]
# } else {
# ind <- worst.se[1, 2]
# }
# # Generate new observation and update Nk counter
# Xk[[ind]] <- c(Xk[[ind]],
# get_observations(algo = algorithms[[ind]],
# instance = instance,
# n = 1))
# Nk[ind] <- Nk[ind] + 1
#
# # Recalculate point estimates, SEs, and sample size ratios
# Diffk <- calc_se(Xk = Xk,
# dif = dif,
# comparisons = comparisons,
# method = method,
# boot.R = boot.R)
# }
# }
#
# # Assemble output list
# names(Nk) <- lapply(algorithms, function(x)x$alias)
# output <- list(instance = instance$alias,
# Xk = Xk,
# Nk = Nk,
# Diffk = Diffk,
# dif = dif,
# method = method,
# comparisons = comparisons,
# seed = seed)
#
# # Save to file if required
# if (save.to.file){
# # Get folder
# if(!dir.exists(folder)) dir.create(folder)
#
# # Get a unique filename
# filename <- paste0(folder, "/",
# instance$alias,
# ".rds")
#
# # save output to file
# saveRDS(output, file = filename)
# }
#
# # Return output
# return(output)
# }
#' Run a full experiment for comparing multiple algorithms using multiple
#' instances
#'
#' Design and run a full experiment - calculate the required number of
#' instances, run the algorithms on each problem instance using the iterative
#' approach based on optimal sample size ratios, and return the results of the
#' experiment. This routine builds upon [calc_instances()] and [calc_nreps()],
#' so refer to the documentation of these two functions for details.
#'
#' @section Instance List:
#' Parameter `instances` must contain a list of instance objects, where
#' each field is itself a list, as defined in the documentation of function
#' [calc_nreps()]. In short, each element of `instances` is an `instance`, i.e.,
#' a named list containing all relevant parameters that define the problem
#' instance. This list must contain at least the field `instance$FUN`, with the
#' name of the problem instance function, that is, a routine that calculates
#' y = f(x). If the instance requires additional parameters, these must also be
#' provided as named fields.
#' An additional field, "instance$alias", can be used to provide the instance
#' with a unique identifier (e.g., when using an instance generator).
#'
#' @section Algorithm List:
#' Object `algorithms` is a list in which each component is a named
#' list containing all relevant parameters that define an algorithm to be
#' applied for solving the problem instance. In what follows `algorithms[[k]]`
#' refers to any algorithm specified in the `algorithms` list.
#'
#' `algorithms[[k]]` must contain an `algorithms[[k]]$FUN` field, which is a
#' character object with the name of the function that calls the algorithm; as
#' well as any other elements/parameters that `algorithms[[k]]$FUN` requires
#' (e.g., stop criteria, operator names and parameters, etc.).
#'
#' The function defined by the routine `algorithms[[k]]$FUN` must have the
#' following structure: supposing that the list in `algorithms[[k]]` has
#' fields `algorithm[[k]]$FUN = "myalgo"`, `algorithms[[k]]$par1 = "a"` and
#' `algorithms[[k]]$par2 = 5`, then:
#'
#' \preformatted{
#' myalgo <- function(par1, par2, instance, ...){
#' #
#' # <do stuff>
#' #
#' return(results)
#' }
#' }
#'
#' That is, it must be able to run if called as:
#'
#' \preformatted{
#' # remove '$FUN' and '$alias' field from list of arguments
#' # and include the problem definition as field 'instance'
#' myargs <- algorithm[names(algorithm) != "FUN"]
#' myargs <- myargs[names(myargs) != "alias"]
#' myargs$instance <- instance
#'
#' # call function
#' do.call(algorithm$FUN,
#' args = myargs)
#' }
#'
#' The `algorithm$FUN` routine must return a list containing (at
#' least) the performance value of the final solution obtained, in a field named
#' `value` (e.g., `result$value`) after a given run. In general it is easier to
#' write a small wrapper funciton around existing implementations.
#'
#' @section Initial Number of Observations:
#' In the _general case_ the initial number of observations / algorithm /
#' instance (`nstart`) should be relatively high. For the parametric case
#' we recommend 10~15 if outliers are not expected, and 30~40 (at least) if that
#' assumption cannot be made. For the bootstrap approach we recommend using at
#' least 15 or 20. However, if some distributional assumptions can be
#' made - particularly low skewness of the population of algorithm results on
#' the test instances), then `nstart` can in principle be as small as 5 (if the
#' output of the algorithm were known to be normal, it could be 1).
#'
#' In general, higher sample sizes are the price to pay for abandoning
#' distributional assumptions. Use lower values of `nstart` with caution.
#'
#' @section Pairwise Differences:
#' Parameter `dif` informs the type of difference in performance to be used
#' for the estimation (\eqn{\mu_a} and \eqn{\mu_b} represent the mean
#' performance of any two algorithms on the test instance, and \eqn{mu}
#' represents the grand mean of all algorithms given in `algorithms`):
#'
#' - If `dif == "perc"` and `comparisons == "all.vs.first"`, the estimated
#' quantity is:
#' \eqn{\phi_{1b} = (\mu_1 - \mu_b) / \mu_1 = 1 - (\mu_b / \mu_1)}.
#'
#' - If `dif == "perc"` and `comparisons == "all.vs.all"`, the estimated
#' quantity is:
#' \eqn{\phi_{ab} = (\mu_a - \mu_b) / \mu}.
#'
#' - If `dif == "simple"` it estimates \eqn{\mu_a - \mu_b}.
#'
#' @section Sample Sizes for Nonparametric Methods:
#' If the parameter `` is set to either `Wilcoxon` or `Binomial`, this
#' routine approximates the number of instances using the ARE of these tests
#' in relation to the paired t.test:
#' - `n.wilcox = n.ttest / 0.86 = 1.163 * n.ttest`
#' - `n.binom = n.ttest / 0.637 = 1.570 * n.ttest`
#'
#' @inheritParams calc_nreps
#' @inheritParams calc_instances
#' @param instances list object containing the definitions of the
#' _available_ instances. This list may (or may not) be exhausted in the
#' experiment. To estimate the number of required instances,
#' see [calc_instances()]. For more details, see Section `Instance List`.
#' @param power (desired) test power. See [calc_instances()] for details.
#' Any value equal to or greater than one will force the method to use all
#' available instances in `Instance.list`.
#' @param d minimally relevant effect size (MRES), expressed as a standardized
#' effect size, i.e., "deviation from H0" / "standard deviation".
#' See [calc_instances()] for details.
#' @param sig.level family-wise significance level (alpha) for the experiment.
#' See [calc_instances()] for details.
#' @param alternative type of alternative hypothesis ("two.sided" or
#' "one.sided"). See [calc_instances()] for details.
#' @param save.partial.results logical, should partial results be saved to file?
#' @param load.partial.results logical, should previously saved partial results
#' be reloaded as part of the experiment?
#'
#' @return a list object containing the following fields:
#' \itemize{
#' \item \code{Configuration} - the full input configuration (for reproducibility)
#' \item \code{data.raw} - data frame containing all observations generated
#' \item \code{data.summary} - data frame summarizing the experiment.
#' \item \code{N} - number of instances sampled
#' \item \code{N.star} - number of instances required
#' \item \code{total.runs} - total number of algorithm runs performed
#' \item \code{instances.sampled} - names of the instances sampled
#' \item \code{Underpowered} - flag: TRUE if N < N.star
#' }
#'
#' @author Felipe Campelo (\email{fcampelo@@ufmg.br},
#' \email{f.campelo@@aston.ac.uk})
#'
#' @references
#' - F. Campelo, F. Takahashi:
#' Sample size estimation for power and accuracy in the experimental
#' comparison of algorithms. Journal of Heuristics 25(2):305-338, 2019.
#' - P. Mathews.
#' Sample size calculations: Practical methods for engineers and scientists.
#' Mathews Malnar and Bailey, 2010.
#' - A.C. Davison, D.V. Hinkley:
#' Bootstrap methods and their application. Cambridge University Press (1997)
#' - E.C. Fieller:
#' Some problems in interval estimation. Journal of the Royal Statistical
#' Society. Series B (Methodological) 16(2), 175–185 (1954)
#' - V. Franz:
#' Ratios: A short guide to confidence limits and proper use (2007).
#' https://arxiv.org/pdf/0710.2024v1.pdf
#' - D.C. Montgomery, C.G. Runger:
#' Applied Statistics and Probability for Engineers, 6th ed. Wiley (2013)
#' - D.J. Sheskin:
#' Handbook of Parametric and Nonparametric Statistical Procedures,
#' 4th ed., Chapman & Hall/CRC, 1996.
#'
#'
#' @examples
#' \dontrun{
#' # Example using four dummy algorithms and 100 dummy instances.
#' # See [dummyalgo()] and [dummyinstance()] for details.
#' # Generating 4 dummy algorithms here, with means 15, 10, 30, 15 and standard
#' # deviations 2, 4, 6, 8.
#' algorithms <- mapply(FUN = function(i, m, s){
#' list(FUN = "dummyalgo",
#' alias = paste0("algo", i),
#' distribution.fun = "rnorm",
#' distribution.pars = list(mean = m, sd = s))},
#' i = c(alg1 = 1, alg2 = 2, alg3 = 3, alg4 = 4),
#' m = c(15, 10, 30, 15),
#' s = c(2, 4, 6, 8),
#' SIMPLIFY = FALSE)
#'
#' # Generate 100 dummy instances with centered exponential distributions
#' instances <- lapply(1:100,
#' function(i) {rate <- runif(1, 1, 10)
#' list(FUN = "dummyinstance",
#' alias = paste0("Inst.", i),
#' distr = "rexp", rate = rate,
#' bias = -1 / rate)})
#'
#' my.results <- run_experiment(instances, algorithms,
#' d = .5, se.max = .1,
#' power = .9, sig.level = .05,
#' power.target = "mean",
#' dif = "perc", comparisons = "all.vs.all",
#' seed = 1234)
#'
#' # Take a look at the results
#' summary(my.results)
#' print(my.results)
#'}
#'
# run_experiment_old <- function(instances, algorithms, d, se.max,
# power = 0.8, sig.level = 0.05,
# power.target = "mean",
# dif = "simple", comparisons = "all.vs.all",
# alternative = "two.sided", test = "t.test",
# method = "param",
# nstart = 20, nmax = 100 * length(algorithms),
# force.balanced = FALSE,
# ncpus = 2, boot.R = 499, seed = NULL,
# save.partial.results = FALSE,
# load.partial.results = FALSE,
# folder = "./nreps_files")
# {
#
# # TODO:
# # save/load.partial.results can be either a folder, a vector of
# # file names, or NULL
# # If it is a folder, then filenames are generated based on instance aliases
# #
# # The call to calc_nreps will need to be changed from lapply to mapply
#
# # ======== Most error catching to be performed by specific routines ======== #
# assertthat::assert_that(assertthat::is.count(ncpus),
# is.null(seed) || seed == seed %/% 1)
# if (alternative == "one.sided"){
# assertthat::assert_that(comparisons == "all.vs.first")
# }
#
# # Fix a common mistake
# if (tolower(dif) == "percent") dif <- "perc"
#
# # Capture input parameters
# var.input.pars <- as.list(environment())
#
# # set PRNG seed
# if (is.null(seed)) {
# if (!exists(".Random.seed")) stats::runif(1)
# seed <- .Random.seed #i.e., do not change anything
# } else {
# set.seed(seed)
# }
#
#
# # Set up parallel processing
# if ((.Platform$OS.type == "windows") & (ncpus > 1)){
# cat("\nAttention: multicore not currently available for Windows.\n
# Forcing ncpus = 1.")
# ncpus <- 1
# } else {
# available.cores <- parallel::detectCores()
# if (ncpus >= available.cores){
# cat("\nAttention: ncpus too large, we only have ", available.cores,
# " cores.\nUsing ", available.cores - 1,
# " cores for run_experiment().")
# ncpus <- available.cores - 1
# }
# }
#
# # Fill up algorithm and instance aliases if needed
# for (i in 1:length(instances)){
# if (!("alias" %in% names(instances[[i]]))) {
# instances[[i]]$alias <- instances[[i]]$FUN
# }
# }
# for (i in 1:length(algorithms)){
# if (!("alias" %in% names(algorithms[[i]]))) {
# algorithms[[i]]$alias <- algorithms[[i]]$FUN
# }
# }
#
# # Calculate N*
# n.available <- length(instances)
# n.algs <- length(algorithms)
# n.comparisons <- switch(comparisons,
# all.vs.all = n.algs * (n.algs - 1) / 2,
# all.vs.first = n.algs - 1)
#
# ss.calc <- calc_instances(ncomparisons = n.comparisons,
# d = d,
# power = power,
# sig.level = sig.level,
# alternative = alternative,
# test = test,
# power.target = power.target)
# if (power >= 1) {
# N.star <- n.available
# } else {
# N.star <- ss.calc$ninstances
# if (N.star < n.available){
# # Randomize order of presentation for available instances
# instances <- instances[sample.int(n.available)]
# }
# }
#
# # Echo some information for the user
# cat("CAISEr running")
# cat("\n-----------------------------")
# cat("\nRequired number of instances:", N.star)
# cat("\nAvailable number of instances:", n.available)
# cat("\nUsing", ncpus, "cores.")
# cat("\n-----------------------------")
#
# # Sample instances
# if(ncpus > 1){
# my.results <- pbmcapply::pbmclapply(X = instances[1:min(N.star, n.available)],
# FUN = calc_nreps,
# # Arguments for calc_nreps:
# algorithms = algorithms,
# se.max = se.max,
# dif = dif,
# comparisons = comparisons,
# method = method,
# nstart = nstart,
# nmax = nmax,
# boot.R = boot.R,
# force.balanced = force.balanced,
# load.file = NULL,
# save.file = NULL,
# # other pbmclapply arguments:
# mc.cores = ncpus)
# } else {
# my.results <- lapply(X = instances[1:min(N.star, n.available)],
# FUN = calc_nreps,
# # Arguments for calc_nreps:
# algorithms = algorithms,
# se.max = se.max,
# dif = dif,
# comparisons = comparisons,
# method = method,
# nstart = nstart,
# nmax = nmax,
# boot.R = boot.R,
# force.balanced = force.balanced,
# save.to.file = save.partial.results,
# load.from.file = load.partial.results,
# folder = folder)
# }
# # Consolidate raw data
# data.raw <- lapply(X = my.results,
# FUN = function(x){
# inst <- x$instance
# nj <- sum(x$Nk)
# data.frame(Algorithm = do.call(what = c,
# mapply(rep,
# names(x$Nk),
# x$Nk,
# SIMPLIFY = FALSE)),
# Instance = rep(inst, nj),
# Observation = do.call(c, x$Xk))})
#
# data.raw <- do.call(rbind, data.raw)
# rownames(data.raw) <- NULL
#
# # Consolidate summary data
# data.summary <- lapply(X = my.results,
# FUN = function(x){
# cbind(Instance = rep(x$instance, nrow(x$Diffk)),
# x$Diffk)})
#
# data.summary <- do.call(rbind, data.summary)
# algonames <- sapply(algorithms, function(x) x$alias)
# rownames(data.summary) <- NULL
# data.summary$Alg1 <- as.factor(algonames[data.summary$Alg1])
# data.summary$Alg2 <- as.factor(algonames[data.summary$Alg2])
#
#
# # Assemble output
# output <- list(Configuration = var.input.pars,
# data.raw = data.raw,
# data.summary = data.summary,
# N = min(N.star, n.available),
# N.star = N.star,
# total.runs = nrow(data.raw),
# instances.sampled = unique(data.raw$Instance),
# Underpowered = (N.star > n.available),
# samplesize.calc = ss.calc)
#
# class(output) <- c("CAISEr", "list")
#
# return(output)
# }
|
/scratch/gouwar.j/cran-all/cranData/CAISEr/R/zzzz_oldfuns.R
|
## ---- echo=FALSE--------------------------------------------------------------
suppressPackageStartupMessages(library(smoof))
suppressPackageStartupMessages(library(CAISEr))
## ---- eval=FALSE--------------------------------------------------------------
# # Install if needed
# # devtools::install_github("fcampelo/MOEADr")
#
# suppressPackageStartupMessages(library(smoof))
# suppressPackageStartupMessages(library(MOEADr))
# suppressPackageStartupMessages(library(CAISEr))
#
# ### Build function names (instances: UF1 - UF7, dimensions 10 - 40)
# fname <- paste0("UF_", 1:7)
# dims <- c(10:40)
# allfuns <- expand.grid(fname, dims, stringsAsFactors = FALSE)
#
# # Assemble instances list
# instances <- vector(nrow(allfuns), mode = "list")
# for (i in 1:length(instances)){
# instances[[i]]$FUN <- paste0(allfuns[i,1], "_", allfuns[i,2])
# }
#
# ### Build the functions listed above (so that they can be properly used)
# for (i in 1:nrow(allfuns)){
# assign(x = instances[[i]]$FUN,
# value = MOEADr::make_vectorized_smoof(prob.name = "UF",
# dimensions = allfuns[i, 2],
# id = as.numeric(strsplit(allfuns[i, 1], "_")[[1]][2])))
# }
## ---- eval=FALSE--------------------------------------------------------------
# # Prepare algorithm function to be used in run_experiment():
# myalgo <- function(type, instance){
# # Input parameters:
# # - type (variant to use: "original", "original2", "moead.de" or "moead.de2")
# # - instance (instance to be solved, e.g., instance = instances[[i]])
# # All other parameters are set internally
#
# ## Extract instance information to build the MOEADr problem format
# fdef <- unlist(strsplit(instance$FUN, split = "_"))
# uffun <- smoof::makeUFFunction(dimensions = as.numeric(fdef[3]),
# id = as.numeric(fdef[2]))
# fattr <- attr(uffun, "par.set")
# prob.dim <- fattr$pars$x$len
#
# ## Build MOEADr problem list
# problem <- list(name = instance$FUN,
# xmin = fattr$pars$x$lower,
# xmax = fattr$pars$x$upper,
# m = attr(uffun, "n.objectives"))
#
# ## Load presets for the algorithm provided in input 'type' and
# ## modify whatever is needed for this particular experiment
# de2 <- FALSE
# if (type == "moead.de2"){
# de2 <- TRUE
# type <- "moead.de"
# }
# algo.preset <- MOEADr::preset_moead(type)
# algo.preset$decomp$H <- 99 # <-- set population size
# algo.preset$stopcrit[[1]]$name <- "maxeval" # <-- type of stop criterion
# algo.preset$stopcrit[[1]]$maxeval <- 2000 * prob.dim # stop crit.
# poly.ind <- which(sapply(algo.preset$variation,
# function(x){x$name == "polymut"}))
# algo.preset$variation[[poly.ind]]$pm <- 1 / prob.dim # <--- pm = 1/d
# if (de2){
# algo.preset$aggfun$name <- "pbi"
# algo.preset$aggfun$theta <- 5
# algo.preset$neighbors$name = "x"
# }
#
# ## Run algorithm on "instance"
# out <- MOEADr::moead(preset = algo.preset, problem = problem,
# showpars = list(show.iters = "none"))
#
# ## Read reference data to calculate the IGD
# Yref <- as.matrix(read.table(paste0("./inst/extdata/pf_data/",
# fdef[1], fdef[2], ".dat")))
# IGD = MOEADr::calcIGD(Y = out$Y, Yref = Yref)
#
# ## Return IGD as field "value" in the output list
# return(list(value = IGD))
# }
## ---- eval=FALSE--------------------------------------------------------------
# # Assemble Algorithm.list. Notice that we need to provide an alias for each
# # method, since both algorithms have the same '$FUN' argument.
# algorithms <- list(list(FUN = "myalgo",
# alias = "Original 1",
# type = "original"),
# list(FUN = "myalgo",
# alias = "Original 2",
# type = "original2"),
# list(FUN = "myalgo",
# alias = "MOEAD-DE",
# type = "moead.de"),
# list(FUN = "myalgo",
# alias = "MOEAD-DE2",
# type = "moead.de2"))
## ---- eval=FALSE--------------------------------------------------------------
# my.results <- run_experiment(instances = instances,
# algorithms = algorithms,
# power = 0.8, # Desired power: 80%
# power.target = "mean", # on average,
# d = 0.5, # to detect differences greater
# # than 0.5 standard deviations
# sig.level = 0.05, # at a 95% confidence level.
# se.max = 0.05, # Measurement error: 5%
# dif = "perc", # on the paired percent
# # differences of means,
# method = "param", # calculated using parametric
# # formula.
# comparisons = "all.vs.all", # Compare all algorithms
# # vs all others,
# nstart = 15, # start with 15 runs/algo/inst
# nmax = 200, # and do no more than 200 runs/inst.
# seed = 1234, # PRNG seed (for reproducibility)
# #
# # NOTICE: Using all but 1 cores. Change if needed
# ncpus = parallel::detectCores() - 1)
## ---- echo=FALSE--------------------------------------------------------------
load("../inst/extdata/vignette_results.RData")
## ---- fig.align="center", fig.width=8, fig.height=8---------------------------
plot(my.results)
## ---- fig.align="center", fig.width=6, fig.height=10--------------------------
suppressPackageStartupMessages(library(car))
algopairs <- paste(my.results$data.summary$Alg1,
my.results$data.summary$Alg2,
sep = " - ")
par(mfrow = c(3, 2))
for (i in seq_along(unique(algopairs))){
tmp <- my.results$data.summary[algopairs == unique(algopairs)[i], ]
car::qqPlot(tmp$Phi,
pch = 16, las = 1, main = unique(algopairs)[i],
ylab = "observed", xlab = "theoretical quantiles")
}
par(mfrow = c(1, 1))
## ---- fig.align="center", fig.width=6, fig.height=8---------------------------
par(mfrow = c(3, 2))
for (i in seq_along(unique(algopairs))){
tmp <- my.results$data.summary[algopairs == unique(algopairs)[i], ]
boot.means <- CAISEr::boot_sdm(tmp$Phi, boot.R = 999)
hist(boot.means, breaks = 30, main = unique(algopairs)[i], las = 1)
}
par(mfrow = c(1, 1))
## ---- fig.align="center", fig.width=6, fig.height=4---------------------------
df <- cbind(Comparison = algopairs, my.results$data.summary)
suppressPackageStartupMessages(library(ggplot2))
mp <- ggplot(df, aes(x = Comparison, y = Phi, fill = Comparison))
mp +
geom_violin(alpha = 0.6, show.legend = FALSE, scale = "width") +
geom_boxplot(alpha = 0, show.legend = FALSE,
outlier.shape = NA, width = .15) +
geom_point(shape = 16, col = "black", fill = "black", alpha = 0.6,
position = position_jitter(width = .15)) +
geom_abline(slope = 0, intercept = 0, col = "red", lty = 2) +
ylab("Percent difference in IGD") + xlab("") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
## ---- fig.align="center", fig.width=6, fig.height=8---------------------------
ggplot(df, aes(x = Instance, y = Phi, colour = Comparison,
ymin = Phi - SE, ymax = Phi + SE)) +
geom_pointrange(show.legend = FALSE) +
geom_abline(slope = 0, intercept = 0, col = 1, lty = 2) +
facet_grid(Comparison ~ .) +
theme(axis.text.x = element_text(angle = 60, hjust = 1)) +
xlab("")
## ---- fig.align="center", fig.width=6, fig.height=8---------------------------
summary(my.results, test = "wilcoxon")
|
/scratch/gouwar.j/cran-all/cranData/CAISEr/inst/doc/Adapting_Algorithm_for_CAISEr.R
|
---
title: "Adapting Algorithms for CAISEr"
author: "Felipe Campelo"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Adapting Algorithms for CAISEr}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## Introduction
This is a short guide to adapting existing algorithms and problem instances for
running an experiment using CAISEr. In this document, we cover:
- Definition of instance lists
- Adaptation of existing algorithms
- Some stuff you can do with the results
A general description of the CAISE methodology is available in our papers:
- F. Campelo, F. Takahashi, _Sample size estimation for power and accuracy in the experimental comparison of algorithms_. Journal of Heuristics 25(2):305-338,
2019.]
- F. Campelo, E. Wanner, _Sample size calculations for the experimental comparison of multiple algorithms on multiple problem instances_. Submitted, Journal of
Heuristics, 2019.
## Assembling an instance list
As stated in the documentation of both `run_experiment()` and `calc_nreps()`, each
instance must be a named list containing all relevant parameters that
define the problem instance. This list must contain at least the field
`instance$FUN`, with the name of the problem instance function, that is, a
routine that calculates $y = f(x)$. If the instance requires additional
parameters, these must also be provided as named fields. Each instance can
also have an `alias`, a unique name to distinguish it from other instances. If
no alias is provided, the name of the function (`instance$FUN`) is used as the
instance ID.
The `instances` parameter for `run_experiment()` is simply a list or vector of
these instance lists.
To illustrate how to adapt existing implementations to this structure, we
assume that we are interested in comparing four multi-objective optimisation
algorithms for a (hypothetical) problem class represented by
problems UF1 - UF7 (in dimensions between 10 and 40) from package
[smoof](https://CRAN.R-project.org/package=smoof). For this implementation to
work with the `moead()` routine from the `MOEADr` package (see next section) some
manipulation is necessary, but the instance list in this case is simply a list
with each element containing the name of the routine as field `$FUN` (since
all function names are different, no need for aliases).
```{r, echo=FALSE}
suppressPackageStartupMessages(library(smoof))
suppressPackageStartupMessages(library(CAISEr))
```
```{r, eval=FALSE}
# Install if needed
# devtools::install_github("fcampelo/MOEADr")
suppressPackageStartupMessages(library(smoof))
suppressPackageStartupMessages(library(MOEADr))
suppressPackageStartupMessages(library(CAISEr))
### Build function names (instances: UF1 - UF7, dimensions 10 - 40)
fname <- paste0("UF_", 1:7)
dims <- c(10:40)
allfuns <- expand.grid(fname, dims, stringsAsFactors = FALSE)
# Assemble instances list
instances <- vector(nrow(allfuns), mode = "list")
for (i in 1:length(instances)){
instances[[i]]$FUN <- paste0(allfuns[i,1], "_", allfuns[i,2])
}
### Build the functions listed above (so that they can be properly used)
for (i in 1:nrow(allfuns)){
assign(x = instances[[i]]$FUN,
value = MOEADr::make_vectorized_smoof(prob.name = "UF",
dimensions = allfuns[i, 2],
id = as.numeric(strsplit(allfuns[i, 1], "_")[[1]][2])))
}
```
## Adaptation of an existing algorithm implementation
We will use the MOEA/D implementation available in the [MOEADr](https://github.com/fcampelo/MOEADr) package as our base
algorithm, and assume that we are interested in comparing the performance of
four versions of this algorithm: the two versions described in the [original MOEA/D paper](https://ieeexplore.ieee.org/document/4358754), the one described in the [MOEA/D-DE paper](https://ieeexplore.ieee.org/document/4633340), and a variation of the MOEA/D-DE configuration that uses PBI as the aggregation function and calculates neighborhoods based on the space of parameters instead of on the scalarisation weight vectors (see the documentation of [MOEADr](https://github.com/fcampelo/MOEADr) and
references therein for details of these methods) as solvers of the hypothetical
problem class represented by the available test instances. The performance of
each algorithm on each instance will be measured according to an indicator
known as _Inverted Generational Distance_ (IGD - details [here](https://ieeexplore.ieee.org/document/1197687/)), for which
_smaller = better_.
As described in the documentation of both `run_experiment()` and
`calc_nreps()`, an `algorithm` must contain an `algorithm$FUN` field (the name
of the function that calls the algorithm) and any other elements/parameters
that `algorithm$FUN` requires (e.g., stop criteria, operator names and
parameters, etc.). An additional field, `algorithm$alias`, can be used to
provide the algorithm with a unique identifier.
Supposing that the list in `algorithm` has fields `algorithm$FUN = myalgo`, `algorithm$par1 = "a"`, `algorithm$par2 = 5`, then the function in
`algorithm$FUN` must have the following structure:
```
myalgo <- function(par1, par2, instance, ...){
# do stuff
return(results)
}
```
That is, it must be able to run if called as:
```
# remove '$FUN' and '$alias' field from list of arguments
# and include the problem definition as field 'instance'
myargs <- algorithm[names(algorithm) != "FUN"]
myargs <- myargs[names(myargs) != "alias"]
myargs$instance <- instance
# call 'algorithm$FUN' with the arguments in 'myargs'
do.call(algorithm$FUN, args = myargs)
```
Finally, the `algorithm$FUN` routine must return a list object containing (at
least) the performance value of the final solution obtained after a given
run, in a field named `value` (e.g., `result$value`) .
To build the algorithm functions to be used in `run_experiment()`, we
encapsulate (almost) all algorithm parameters within a `myalgo()` function,
which receives only two inputs: the instance to be solved (i.e., one element
from `Instance.list`) and the specification of which version of the algorithm
is to be run.
```{r, eval=FALSE}
# Prepare algorithm function to be used in run_experiment():
myalgo <- function(type, instance){
# Input parameters:
# - type (variant to use: "original", "original2", "moead.de" or "moead.de2")
# - instance (instance to be solved, e.g., instance = instances[[i]])
# All other parameters are set internally
## Extract instance information to build the MOEADr problem format
fdef <- unlist(strsplit(instance$FUN, split = "_"))
uffun <- smoof::makeUFFunction(dimensions = as.numeric(fdef[3]),
id = as.numeric(fdef[2]))
fattr <- attr(uffun, "par.set")
prob.dim <- fattr$pars$x$len
## Build MOEADr problem list
problem <- list(name = instance$FUN,
xmin = fattr$pars$x$lower,
xmax = fattr$pars$x$upper,
m = attr(uffun, "n.objectives"))
## Load presets for the algorithm provided in input 'type' and
## modify whatever is needed for this particular experiment
de2 <- FALSE
if (type == "moead.de2"){
de2 <- TRUE
type <- "moead.de"
}
algo.preset <- MOEADr::preset_moead(type)
algo.preset$decomp$H <- 99 # <-- set population size
algo.preset$stopcrit[[1]]$name <- "maxeval" # <-- type of stop criterion
algo.preset$stopcrit[[1]]$maxeval <- 2000 * prob.dim # stop crit.
poly.ind <- which(sapply(algo.preset$variation,
function(x){x$name == "polymut"}))
algo.preset$variation[[poly.ind]]$pm <- 1 / prob.dim # <--- pm = 1/d
if (de2){
algo.preset$aggfun$name <- "pbi"
algo.preset$aggfun$theta <- 5
algo.preset$neighbors$name = "x"
}
## Run algorithm on "instance"
out <- MOEADr::moead(preset = algo.preset, problem = problem,
showpars = list(show.iters = "none"))
## Read reference data to calculate the IGD
Yref <- as.matrix(read.table(paste0("./inst/extdata/pf_data/",
fdef[1], fdef[2], ".dat")))
IGD = MOEADr::calcIGD(Y = out$Y, Yref = Yref)
## Return IGD as field "value" in the output list
return(list(value = IGD))
}
```
Finally, the `algorithms` parameter must be assembled as a list of
algorithm objects (each containing fields `$FUN`, `$alias` and, in
this case, `$type`).
```{r, eval=FALSE}
# Assemble Algorithm.list. Notice that we need to provide an alias for each
# method, since both algorithms have the same '$FUN' argument.
algorithms <- list(list(FUN = "myalgo",
alias = "Original 1",
type = "original"),
list(FUN = "myalgo",
alias = "Original 2",
type = "original2"),
list(FUN = "myalgo",
alias = "MOEAD-DE",
type = "moead.de"),
list(FUN = "myalgo",
alias = "MOEAD-DE2",
type = "moead.de2"))
```
## Running an experiment using CAISEr
With the definitions above it is possible now to run an experiment
using the iterative sample size determination implemented in CAISEr. For
that, all we have to do is define the desired experimental parameters and
use `run_experiment()`.
**IMPORTANT NOTICE:** running this experiment takes a
while (about 4 hours in a 3.6 GHz Intel Core i7 iMac with 16Gb RAM, using 7 cores).
If you want a (much) faster example, check the documentation of `run_experiment()`.
```{r, eval=FALSE}
my.results <- run_experiment(instances = instances,
algorithms = algorithms,
power = 0.8, # Desired power: 80%
power.target = "mean", # on average,
d = 0.5, # to detect differences greater
# than 0.5 standard deviations
sig.level = 0.05, # at a 95% confidence level.
se.max = 0.05, # Measurement error: 5%
dif = "perc", # on the paired percent
# differences of means,
method = "param", # calculated using parametric
# formula.
comparisons = "all.vs.all", # Compare all algorithms
# vs all others,
nstart = 15, # start with 15 runs/algo/inst
nmax = 200, # and do no more than 200 runs/inst.
seed = 1234, # PRNG seed (for reproducibility)
#
# NOTICE: Using all but 1 cores. Change if needed
ncpus = parallel::detectCores() - 1)
```
```{r, echo=FALSE}
load("../inst/extdata/vignette_results.RData")
```
After that we can interrogate the results and perform inference, if we are so
inclined. But first, CAISEr provides a useful plot function for the output of `run_experiment()`. Let's try it (type `?plot.CAISEr` for details):
```{r, fig.align="center", fig.width=8, fig.height=8}
plot(my.results)
```
We can also check if all paired differences in performance
are (at least approximately) Normal, so that we can assume a Normal sampling
distribution of the means and use t-tests without much worry:
```{r, fig.align="center", fig.width=6, fig.height=10}
suppressPackageStartupMessages(library(car))
algopairs <- paste(my.results$data.summary$Alg1,
my.results$data.summary$Alg2,
sep = " - ")
par(mfrow = c(3, 2))
for (i in seq_along(unique(algopairs))){
tmp <- my.results$data.summary[algopairs == unique(algopairs)[i], ]
car::qqPlot(tmp$Phi,
pch = 16, las = 1, main = unique(algopairs)[i],
ylab = "observed", xlab = "theoretical quantiles")
}
par(mfrow = c(1, 1))
```
The normal QQ plots indicate some deviation from normality, with the `MOEA/D-DE` vs. `MOEA/D-DE2` comparison having a particularly longer left tail. We can check visually the effect of this in the sampling distribution of the means:
```{r, fig.align="center", fig.width=6, fig.height=8}
par(mfrow = c(3, 2))
for (i in seq_along(unique(algopairs))){
tmp <- my.results$data.summary[algopairs == unique(algopairs)[i], ]
boot.means <- CAISEr::boot_sdm(tmp$Phi, boot.R = 999)
hist(boot.means, breaks = 30, main = unique(algopairs)[i], las = 1)
}
par(mfrow = c(1, 1))
```
The deviations does not seem to be too severe, but are enough to justify using a nonparametric alternative (e.g., Wilcoxon's Rank-Sum tests) instead of the t-test. This will sacrifice a little power for borderline cases (i.e., comparisons where the actual effect size is close to the value of $d^*$ used in the design of the experiment), but give us more confidence in the comparisons that end up being statistically significant.
Some other graphical analysis can be quite informative:
```{r, fig.align="center", fig.width=6, fig.height=4}
df <- cbind(Comparison = algopairs, my.results$data.summary)
suppressPackageStartupMessages(library(ggplot2))
mp <- ggplot(df, aes(x = Comparison, y = Phi, fill = Comparison))
mp +
geom_violin(alpha = 0.6, show.legend = FALSE, scale = "width") +
geom_boxplot(alpha = 0, show.legend = FALSE,
outlier.shape = NA, width = .15) +
geom_point(shape = 16, col = "black", fill = "black", alpha = 0.6,
position = position_jitter(width = .15)) +
geom_abline(slope = 0, intercept = 0, col = "red", lty = 2) +
ylab("Percent difference in IGD") + xlab("") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
```
Recall first that `smaller = better` for the quality indicator used in this experiment. Looking at the plot above shows that the differences between all pairs of algorithms were quite high - e.g., differences of up to 200\% in the case of the `Original 2` versus both MOEA/D-DE versions. In general both `MOEA/D-DE` versions seem to generate better (lower) IGD values than the `Original` versions, and `MOEA/D-DE` seems generally better than `MOEA/D-DE2` (first comparison to the left. Also, the variability seem to be reasonably high for all comparisons.
We can also visualise the individual estimates (with their standard errors):
```{r, fig.align="center", fig.width=6, fig.height=8}
ggplot(df, aes(x = Instance, y = Phi, colour = Comparison,
ymin = Phi - SE, ymax = Phi + SE)) +
geom_pointrange(show.legend = FALSE) +
geom_abline(slope = 0, intercept = 0, col = 1, lty = 2) +
facet_grid(Comparison ~ .) +
theme(axis.text.x = element_text(angle = 60, hjust = 1)) +
xlab("")
```
Notice that the standard errors of the estimators are in most cases smaller than the marker used for the point estimate. Also notice that the observations fall, for each comparison, mainly on one side of the zero-line, which is an almost sure indicator that the comparisons will be statistically significant.
Finally, we can examine our hypothesis tests. The `summary` method of class `CAISEr` already outputs the desired information:
```{r, fig.align="center", fig.width=6, fig.height=8}
summary(my.results, test = "wilcoxon")
```
All differences were found to be different at the $95\%$ joint significance level.
Finally, the full data of the experiment is contained in other fields of
the output list `my.results`, and the user is encouraged to explore these.
It is possible, for instance, to can generate box plots and confidence intervals on the mean performance of each algorithm on each sampled instance, which can inspire new questions for the researcher.
|
/scratch/gouwar.j/cran-all/cranData/CAISEr/inst/doc/Adapting_Algorithm_for_CAISEr.Rmd
|
---
title: "Adapting Algorithms for CAISEr"
author: "Felipe Campelo"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Adapting Algorithms for CAISEr}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## Introduction
This is a short guide to adapting existing algorithms and problem instances for
running an experiment using CAISEr. In this document, we cover:
- Definition of instance lists
- Adaptation of existing algorithms
- Some stuff you can do with the results
A general description of the CAISE methodology is available in our papers:
- F. Campelo, F. Takahashi, _Sample size estimation for power and accuracy in the experimental comparison of algorithms_. Journal of Heuristics 25(2):305-338,
2019.]
- F. Campelo, E. Wanner, _Sample size calculations for the experimental comparison of multiple algorithms on multiple problem instances_. Submitted, Journal of
Heuristics, 2019.
## Assembling an instance list
As stated in the documentation of both `run_experiment()` and `calc_nreps()`, each
instance must be a named list containing all relevant parameters that
define the problem instance. This list must contain at least the field
`instance$FUN`, with the name of the problem instance function, that is, a
routine that calculates $y = f(x)$. If the instance requires additional
parameters, these must also be provided as named fields. Each instance can
also have an `alias`, a unique name to distinguish it from other instances. If
no alias is provided, the name of the function (`instance$FUN`) is used as the
instance ID.
The `instances` parameter for `run_experiment()` is simply a list or vector of
these instance lists.
To illustrate how to adapt existing implementations to this structure, we
assume that we are interested in comparing four multi-objective optimisation
algorithms for a (hypothetical) problem class represented by
problems UF1 - UF7 (in dimensions between 10 and 40) from package
[smoof](https://CRAN.R-project.org/package=smoof). For this implementation to
work with the `moead()` routine from the `MOEADr` package (see next section) some
manipulation is necessary, but the instance list in this case is simply a list
with each element containing the name of the routine as field `$FUN` (since
all function names are different, no need for aliases).
```{r, echo=FALSE}
suppressPackageStartupMessages(library(smoof))
suppressPackageStartupMessages(library(CAISEr))
```
```{r, eval=FALSE}
# Install if needed
# devtools::install_github("fcampelo/MOEADr")
suppressPackageStartupMessages(library(smoof))
suppressPackageStartupMessages(library(MOEADr))
suppressPackageStartupMessages(library(CAISEr))
### Build function names (instances: UF1 - UF7, dimensions 10 - 40)
fname <- paste0("UF_", 1:7)
dims <- c(10:40)
allfuns <- expand.grid(fname, dims, stringsAsFactors = FALSE)
# Assemble instances list
instances <- vector(nrow(allfuns), mode = "list")
for (i in 1:length(instances)){
instances[[i]]$FUN <- paste0(allfuns[i,1], "_", allfuns[i,2])
}
### Build the functions listed above (so that they can be properly used)
for (i in 1:nrow(allfuns)){
assign(x = instances[[i]]$FUN,
value = MOEADr::make_vectorized_smoof(prob.name = "UF",
dimensions = allfuns[i, 2],
id = as.numeric(strsplit(allfuns[i, 1], "_")[[1]][2])))
}
```
## Adaptation of an existing algorithm implementation
We will use the MOEA/D implementation available in the [MOEADr](https://github.com/fcampelo/MOEADr) package as our base
algorithm, and assume that we are interested in comparing the performance of
four versions of this algorithm: the two versions described in the [original MOEA/D paper](https://ieeexplore.ieee.org/document/4358754), the one described in the [MOEA/D-DE paper](https://ieeexplore.ieee.org/document/4633340), and a variation of the MOEA/D-DE configuration that uses PBI as the aggregation function and calculates neighborhoods based on the space of parameters instead of on the scalarisation weight vectors (see the documentation of [MOEADr](https://github.com/fcampelo/MOEADr) and
references therein for details of these methods) as solvers of the hypothetical
problem class represented by the available test instances. The performance of
each algorithm on each instance will be measured according to an indicator
known as _Inverted Generational Distance_ (IGD - details [here](https://ieeexplore.ieee.org/document/1197687/)), for which
_smaller = better_.
As described in the documentation of both `run_experiment()` and
`calc_nreps()`, an `algorithm` must contain an `algorithm$FUN` field (the name
of the function that calls the algorithm) and any other elements/parameters
that `algorithm$FUN` requires (e.g., stop criteria, operator names and
parameters, etc.). An additional field, `algorithm$alias`, can be used to
provide the algorithm with a unique identifier.
Supposing that the list in `algorithm` has fields `algorithm$FUN = myalgo`, `algorithm$par1 = "a"`, `algorithm$par2 = 5`, then the function in
`algorithm$FUN` must have the following structure:
```
myalgo <- function(par1, par2, instance, ...){
# do stuff
return(results)
}
```
That is, it must be able to run if called as:
```
# remove '$FUN' and '$alias' field from list of arguments
# and include the problem definition as field 'instance'
myargs <- algorithm[names(algorithm) != "FUN"]
myargs <- myargs[names(myargs) != "alias"]
myargs$instance <- instance
# call 'algorithm$FUN' with the arguments in 'myargs'
do.call(algorithm$FUN, args = myargs)
```
Finally, the `algorithm$FUN` routine must return a list object containing (at
least) the performance value of the final solution obtained after a given
run, in a field named `value` (e.g., `result$value`) .
To build the algorithm functions to be used in `run_experiment()`, we
encapsulate (almost) all algorithm parameters within a `myalgo()` function,
which receives only two inputs: the instance to be solved (i.e., one element
from `Instance.list`) and the specification of which version of the algorithm
is to be run.
```{r, eval=FALSE}
# Prepare algorithm function to be used in run_experiment():
myalgo <- function(type, instance){
# Input parameters:
# - type (variant to use: "original", "original2", "moead.de" or "moead.de2")
# - instance (instance to be solved, e.g., instance = instances[[i]])
# All other parameters are set internally
## Extract instance information to build the MOEADr problem format
fdef <- unlist(strsplit(instance$FUN, split = "_"))
uffun <- smoof::makeUFFunction(dimensions = as.numeric(fdef[3]),
id = as.numeric(fdef[2]))
fattr <- attr(uffun, "par.set")
prob.dim <- fattr$pars$x$len
## Build MOEADr problem list
problem <- list(name = instance$FUN,
xmin = fattr$pars$x$lower,
xmax = fattr$pars$x$upper,
m = attr(uffun, "n.objectives"))
## Load presets for the algorithm provided in input 'type' and
## modify whatever is needed for this particular experiment
de2 <- FALSE
if (type == "moead.de2"){
de2 <- TRUE
type <- "moead.de"
}
algo.preset <- MOEADr::preset_moead(type)
algo.preset$decomp$H <- 99 # <-- set population size
algo.preset$stopcrit[[1]]$name <- "maxeval" # <-- type of stop criterion
algo.preset$stopcrit[[1]]$maxeval <- 2000 * prob.dim # stop crit.
poly.ind <- which(sapply(algo.preset$variation,
function(x){x$name == "polymut"}))
algo.preset$variation[[poly.ind]]$pm <- 1 / prob.dim # <--- pm = 1/d
if (de2){
algo.preset$aggfun$name <- "pbi"
algo.preset$aggfun$theta <- 5
algo.preset$neighbors$name = "x"
}
## Run algorithm on "instance"
out <- MOEADr::moead(preset = algo.preset, problem = problem,
showpars = list(show.iters = "none"))
## Read reference data to calculate the IGD
Yref <- as.matrix(read.table(paste0("./inst/extdata/pf_data/",
fdef[1], fdef[2], ".dat")))
IGD = MOEADr::calcIGD(Y = out$Y, Yref = Yref)
## Return IGD as field "value" in the output list
return(list(value = IGD))
}
```
Finally, the `algorithms` parameter must be assembled as a list of
algorithm objects (each containing fields `$FUN`, `$alias` and, in
this case, `$type`).
```{r, eval=FALSE}
# Assemble Algorithm.list. Notice that we need to provide an alias for each
# method, since both algorithms have the same '$FUN' argument.
algorithms <- list(list(FUN = "myalgo",
alias = "Original 1",
type = "original"),
list(FUN = "myalgo",
alias = "Original 2",
type = "original2"),
list(FUN = "myalgo",
alias = "MOEAD-DE",
type = "moead.de"),
list(FUN = "myalgo",
alias = "MOEAD-DE2",
type = "moead.de2"))
```
## Running an experiment using CAISEr
With the definitions above it is possible now to run an experiment
using the iterative sample size determination implemented in CAISEr. For
that, all we have to do is define the desired experimental parameters and
use `run_experiment()`.
**IMPORTANT NOTICE:** running this experiment takes a
while (about 4 hours in a 3.6 GHz Intel Core i7 iMac with 16Gb RAM, using 7 cores).
If you want a (much) faster example, check the documentation of `run_experiment()`.
```{r, eval=FALSE}
my.results <- run_experiment(instances = instances,
algorithms = algorithms,
power = 0.8, # Desired power: 80%
power.target = "mean", # on average,
d = 0.5, # to detect differences greater
# than 0.5 standard deviations
sig.level = 0.05, # at a 95% confidence level.
se.max = 0.05, # Measurement error: 5%
dif = "perc", # on the paired percent
# differences of means,
method = "param", # calculated using parametric
# formula.
comparisons = "all.vs.all", # Compare all algorithms
# vs all others,
nstart = 15, # start with 15 runs/algo/inst
nmax = 200, # and do no more than 200 runs/inst.
seed = 1234, # PRNG seed (for reproducibility)
#
# NOTICE: Using all but 1 cores. Change if needed
ncpus = parallel::detectCores() - 1)
```
```{r, echo=FALSE}
load("../inst/extdata/vignette_results.RData")
```
After that we can interrogate the results and perform inference, if we are so
inclined. But first, CAISEr provides a useful plot function for the output of `run_experiment()`. Let's try it (type `?plot.CAISEr` for details):
```{r, fig.align="center", fig.width=8, fig.height=8}
plot(my.results)
```
We can also check if all paired differences in performance
are (at least approximately) Normal, so that we can assume a Normal sampling
distribution of the means and use t-tests without much worry:
```{r, fig.align="center", fig.width=6, fig.height=10}
suppressPackageStartupMessages(library(car))
algopairs <- paste(my.results$data.summary$Alg1,
my.results$data.summary$Alg2,
sep = " - ")
par(mfrow = c(3, 2))
for (i in seq_along(unique(algopairs))){
tmp <- my.results$data.summary[algopairs == unique(algopairs)[i], ]
car::qqPlot(tmp$Phi,
pch = 16, las = 1, main = unique(algopairs)[i],
ylab = "observed", xlab = "theoretical quantiles")
}
par(mfrow = c(1, 1))
```
The normal QQ plots indicate some deviation from normality, with the `MOEA/D-DE` vs. `MOEA/D-DE2` comparison having a particularly longer left tail. We can check visually the effect of this in the sampling distribution of the means:
```{r, fig.align="center", fig.width=6, fig.height=8}
par(mfrow = c(3, 2))
for (i in seq_along(unique(algopairs))){
tmp <- my.results$data.summary[algopairs == unique(algopairs)[i], ]
boot.means <- CAISEr::boot_sdm(tmp$Phi, boot.R = 999)
hist(boot.means, breaks = 30, main = unique(algopairs)[i], las = 1)
}
par(mfrow = c(1, 1))
```
The deviations does not seem to be too severe, but are enough to justify using a nonparametric alternative (e.g., Wilcoxon's Rank-Sum tests) instead of the t-test. This will sacrifice a little power for borderline cases (i.e., comparisons where the actual effect size is close to the value of $d^*$ used in the design of the experiment), but give us more confidence in the comparisons that end up being statistically significant.
Some other graphical analysis can be quite informative:
```{r, fig.align="center", fig.width=6, fig.height=4}
df <- cbind(Comparison = algopairs, my.results$data.summary)
suppressPackageStartupMessages(library(ggplot2))
mp <- ggplot(df, aes(x = Comparison, y = Phi, fill = Comparison))
mp +
geom_violin(alpha = 0.6, show.legend = FALSE, scale = "width") +
geom_boxplot(alpha = 0, show.legend = FALSE,
outlier.shape = NA, width = .15) +
geom_point(shape = 16, col = "black", fill = "black", alpha = 0.6,
position = position_jitter(width = .15)) +
geom_abline(slope = 0, intercept = 0, col = "red", lty = 2) +
ylab("Percent difference in IGD") + xlab("") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
```
Recall first that `smaller = better` for the quality indicator used in this experiment. Looking at the plot above shows that the differences between all pairs of algorithms were quite high - e.g., differences of up to 200\% in the case of the `Original 2` versus both MOEA/D-DE versions. In general both `MOEA/D-DE` versions seem to generate better (lower) IGD values than the `Original` versions, and `MOEA/D-DE` seems generally better than `MOEA/D-DE2` (first comparison to the left. Also, the variability seem to be reasonably high for all comparisons.
We can also visualise the individual estimates (with their standard errors):
```{r, fig.align="center", fig.width=6, fig.height=8}
ggplot(df, aes(x = Instance, y = Phi, colour = Comparison,
ymin = Phi - SE, ymax = Phi + SE)) +
geom_pointrange(show.legend = FALSE) +
geom_abline(slope = 0, intercept = 0, col = 1, lty = 2) +
facet_grid(Comparison ~ .) +
theme(axis.text.x = element_text(angle = 60, hjust = 1)) +
xlab("")
```
Notice that the standard errors of the estimators are in most cases smaller than the marker used for the point estimate. Also notice that the observations fall, for each comparison, mainly on one side of the zero-line, which is an almost sure indicator that the comparisons will be statistically significant.
Finally, we can examine our hypothesis tests. The `summary` method of class `CAISEr` already outputs the desired information:
```{r, fig.align="center", fig.width=6, fig.height=8}
summary(my.results, test = "wilcoxon")
```
All differences were found to be different at the $95\%$ joint significance level.
Finally, the full data of the experiment is contained in other fields of
the output list `my.results`, and the user is encouraged to explore these.
It is possible, for instance, to can generate box plots and confidence intervals on the mean performance of each algorithm on each sampled instance, which can inspire new questions for the researcher.
|
/scratch/gouwar.j/cran-all/cranData/CAISEr/vignettes/Adapting_Algorithm_for_CAISEr.Rmd
|
# Generates phyletic vectors of each genome in a group, which stores the
# count of appearences of a GO term in that genome.
#
# Generates phyletic vectors of each genome in a group, which stores the
# count of occurrences of a GO term in that genome.
#
# @param defs an enriched CALANGO-type list object containing at least the
# fields named in parameters `anno` and `genome.names`, as well as
# a field named `ontology` (containing the name of the ontology to
# use). Depending on the ontology other fields may be needed.
# @param anno name of the field in `defs` containing a list with annotation
# from the original input data
# @param genome.names name of the field in `defs` containing a char vector with
# the names of the genomes to select.
# @param someGV data.frame containing a previously processed set of genome
# vectors.
#
# @return list with the phyletic vector of each genome of the input group.
#
AddGenomeVectors <- function(defs, anno, genome.names,
someGV = NULL) {
# ================== Sanity checks ==================
assertthat::assert_that(all(c("list", "CALANGO") %in% class(defs)),
is.character(anno), length(anno) == 1,
is.character(genome.names), length(genome.names) == 1,
all(c(anno, genome.names, "ontology") %in% names(defs)),
is.null(someGV) || is.data.frame(someGV),
msg = "input error in CALANGO::AddGenomeVectors()")
# To generate genome vectors, we need some information about the ontology.
# Since it may be GO, KO or an arbitrary one, the information may come
# from different variables, hence the following wrapper.
ontologyInfo <- list(ontology = tolower(defs$ontology))
if (ontologyInfo$ontology %in% c("go", "gene ontology")) {
# TODO: add these to the function documentation
assertthat::assert_that(all(c("allAncestor",
"allObsolete",
"allSynonym") %in% names(defs)))
ontologyInfo$allAncestor <- defs$allAncestor
ontologyInfo$allObsolete <- defs$allObsolete
ontologyInfo$allSynonym <- defs$allSynonym
ontologyInfo$name <- names(defs$allAncestor)
} else if (ontologyInfo$ontology == "kegg") {
assertthat::assert_that("allKOs" %in% names(defs))
ontologyInfo$name <- names(defs$allKOs)
} else if (ontologyInfo$ontology == "other") {
assertthat::assert_that("dictionary" %in% names(defs))
ontologyInfo$name <- names(defs$dictionary)
} else{
stop("'", ontologyInfo$ontology, "' is not a valid ontology type.")
}
# Select genomes from someAnno that are in genome.names and not in someGV,
# so we don't add genome vectors that already exist.
genome.names <- setdiff(defs[[genome.names]], rownames(someGV))
someAnno <- defs[[anno]][genome.names]
# Add the genome vectors and fit them into a data.frame format.
message("Generating Genome Vectors")
if (.Platform$OS.type == "windows"){
genomeVectors <- parallel::parLapply(cl = defs$cl,
X = someAnno,
fun = GenerateGenomeVector,
ontologyInfo = ontologyInfo,
column = defs$column)
} else {
genomeVectors <- pbmcapply::pbmclapply(X = someAnno,
FUN = GenerateGenomeVector,
ontologyInfo = ontologyInfo,
column = defs$column,
mc.preschedule = FALSE,
mc.cores = defs$cores)
}
genomeVectors <- as.data.frame(do.call(rbind, genomeVectors),
optional = TRUE)
# Merge with the existing genome vectors, if given.
if (!is.null(someGV)) {
genomeVectors <- rbind(someGV, genomeVectors)
}
return(genomeVectors)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/AddGenomeVectors.R
|
# Adds the field "Term" of each GO/KO ID to the results.
#
# Args:
# defs: (list) a CALANGO-type list object (generated internally by the
# CALANGO functions)
# results: (list) MHT-corrected p-values of all GO/KO IDs
# ontology: (char) which ontology is used: GO, KO, other.
# Returns:
# annotation: (list) translation of the ontology's terms.
AnnotateResults <- function(defs, results, ontology) {
x <- switch(tolower(ontology),
"go" = as.list(AnnotationDbi::Term(GO.db::GOTERM[names(results)])),
"gene ontology" = as.list(AnnotationDbi::Term(GO.db::GOTERM[names(results)])),
"kegg" = as.list(defs$allKOs[names(results)]),
"other" = defs$dictionary[names(results)])
return(x)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/AnnotateResults.R
|
#' @importFrom grDevices rgb
CALANGO_brewer_pal <- function (n, name)
{
namelist <- c("BrBG", "PiYG", "PRGn", "PuOr", "RdBu",
"RdGy", "RdYlBu", "RdYlGn", "Spectral",
"Accent", "Dark2", "Paired", "Pastel1",
"Pastel2", "Set1", "Set2", "Set3", "Blues",
"BuGn", "BuPu", "GnBu", "Greens", "Greys",
"Oranges", "OrRd", "PuBu", "PuBuGn", "PuRd",
"Purples", "RdPu", "Reds", "YlGn", "YlGnBu",
"YlOrBr", "YlOrRd")
maxcolors <- c(11, 11, 11, 11, 11, 11, 11, 11, 11,
8, 8, 12, 9, 8, 9, 8, 12, 9, 9, 9,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9)
names(namelist) <- namelist
names(maxcolors) <- namelist
if (!(name %in% namelist)) {
stop(paste(name, "is not a valid palette name for brewer.pal\n"))
}
if (n < 3) {
# warning("
# Minimal value for n is 3.
# Returning requested palette with the ", n, " first components.
# Behaviour may be unexpected.\n")
return(CALANGO_brewer_pal(3, name)[1:n])
}
if (n > maxcolors[which(name == namelist)]) {
warning(paste("n too large, allowed maximum for palette",
name, "is", maxcolors[which(name == namelist)]),
"\nReturning the palette you asked for with that many colors\n")
return(CALANGO_brewer_pal(maxcolors[which(name == namelist)],
name))
}
switch(name,
Accent = switch(n-2,
rgb(c(127,190,253),
c(201,174,192),
c(127,212,134),maxColorValue=255),
rgb(c(127,190,253,255),
c(201,174,192,255),
c(127,212,134,153),maxColorValue=255),
rgb(c(127,190,253,255,56),
c(201,174,192,255,108),
c(127,212,134,153,176),maxColorValue=255),
rgb(c(127,190,253,255,56,240),
c(201,174,192,255,108,2),
c(127,212,134,153,176,127),maxColorValue=255),
rgb(c(127,190,253,255,56,240,191),
c(201,174,192,255,108,2,91),
c(127,212,134,153,176,127,23),maxColorValue=255),
rgb(c(127,190,253,255,56,240,191,102),
c(201,174,192,255,108,2,91,102),
c(127,212,134,153,176,127,23,102),maxColorValue=255)
),
Blues = switch(n-2,
rgb(c(222,158,49),
c(235,202,130),
c(247,225,189),maxColorValue=255),
rgb(c(239,189,107,33),
c(243,215,174,113),
c(255,231,214,181),maxColorValue=255),
rgb(c(239,189,107,49,8),
c(243,215,174,130,81),
c(255,231,214,189,156),maxColorValue=255),
rgb(c(239,198,158,107,49,8),
c(243,219,202,174,130,81),
c(255,239,225,214,189,156),maxColorValue=255),
rgb(c(239,198,158,107,66,33,8),
c(243,219,202,174,146,113,69),
c(255,239,225,214,198,181,148),maxColorValue=255),
rgb(c(247,222,198,158,107,66,33,8),
c(251,235,219,202,174,146,113,69),
c(255,247,239,225,214,198,181,148),maxColorValue=255),
rgb(c(247,222,198,158,107,66,33,8,8),
c(251,235,219,202,174,146,113,81,48),
c(255,247,239,225,214,198,181,156,107),maxColorValue=255)
),
BrBG = switch(n-2,
rgb(c(216,245,90),
c(179,245,180),
c(101,245,172),maxColorValue=255),
rgb(c(166,223,128,1),
c(97,194,205,133),
c(26,125,193,113),maxColorValue=255),
rgb(c(166,223,245,128,1),
c(97,194,245,205,133),
c(26,125,245,193,113),maxColorValue=255),
rgb(c(140,216,246,199,90,1),
c(81,179,232,234,180,102),
c(10,101,195,229,172,94),maxColorValue=255),
rgb(c(140,216,246,245,199,90,1),
c(81,179,232,245,234,180,102),
c(10,101,195,245,229,172,94),maxColorValue=255),
rgb(c(140,191,223,246,199,128,53,1),
c(81,129,194,232,234,205,151,102),
c(10,45,125,195,229,193,143,94),maxColorValue=255),
rgb(c(140,191,223,246,245,199,128,53,1),
c(81,129,194,232,245,234,205,151,102),
c(10,45,125,195,245,229,193,143,94),maxColorValue=255),
rgb(c(84,140,191,223,246,199,128,53,1,0),
c(48,81,129,194,232,234,205,151,102,60),
c(5,10,45,125,195,229,193,143,94,48),maxColorValue=255),
rgb(c(84,140,191,223,246,245,199,128,53,1,0),
c(48,81,129,194,232,245,234,205,151,102,60),
c(5,10,45,125,195,245,229,193,143,94,48),maxColorValue=255)
),
BuGn = switch(n-2,
rgb(c(229,153,44),
c(245,216,162),
c(249,201,95),maxColorValue=255),
rgb(c(237,178,102,35),
c(248,226,194,139),
c(251,226,164,69),maxColorValue=255),
rgb(c(237,178,102,44,0),
c(248,226,194,162,109),
c(251,226,164,95,44),maxColorValue=255),
rgb(c(237,204,153,102,44,0),
c(248,236,216,194,162,109),
c(251,230,201,164,95,44),maxColorValue=255),
rgb(c(237,204,153,102,65,35,0),
c(248,236,216,194,174,139,88),
c(251,230,201,164,118,69,36),maxColorValue=255),
rgb(c(247,229,204,153,102,65,35,0),
c(252,245,236,216,194,174,139,88),
c(253,249,230,201,164,118,69,36),maxColorValue=255),
rgb(c(247,229,204,153,102,65,35,0,0),
c(252,245,236,216,194,174,139,109,68),
c(253,249,230,201,164,118,69,44,27),maxColorValue=255)
),
BuPu = switch(n-2,
rgb(c(224,158,136),
c(236,188,86),
c(244,218,167),maxColorValue=255),
rgb(c(237,179,140,136),
c(248,205,150,65),
c(251,227,198,157),maxColorValue=255),
rgb(c(237,179,140,136,129),
c(248,205,150,86,15),
c(251,227,198,167,124),maxColorValue=255),
rgb(c(237,191,158,140,136,129),
c(248,211,188,150,86,15),
c(251,230,218,198,167,124),maxColorValue=255),
rgb(c(237,191,158,140,140,136,110),
c(248,211,188,150,107,65,1),
c(251,230,218,198,177,157,107),maxColorValue=255),
rgb(c(247,224,191,158,140,140,136,110),
c(252,236,211,188,150,107,65,1),
c(253,244,230,218,198,177,157,107),maxColorValue=255),
rgb(c(247,224,191,158,140,140,136,129,77),
c(252,236,211,188,150,107,65,15,0),
c(253,244,230,218,198,177,157,124,75),maxColorValue=255)
),
Dark2 = switch(n-2,
rgb(c(27,217,117),
c(158,95,112),
c(119,2,179),maxColorValue=255),
rgb(c(27,217,117,231),
c(158,95,112,41),
c(119,2,179,138),maxColorValue=255),
rgb(c(27,217,117,231,102),
c(158,95,112,41,166),
c(119,2,179,138,30),maxColorValue=255),
rgb(c(27,217,117,231,102,230),
c(158,95,112,41,166,171),
c(119,2,179,138,30,2),maxColorValue=255),
rgb(c(27,217,117,231,102,230,166),
c(158,95,112,41,166,171,118),
c(119,2,179,138,30,2,29),maxColorValue=255),
rgb(c(27,217,117,231,102,230,166,102),
c(158,95,112,41,166,171,118,102),
c(119,2,179,138,30,2,29,102),maxColorValue=255)
),
GnBu = switch(n-2,
rgb(c(224,168,67),
c(243,221,162),
c(219,181,202),maxColorValue=255),
rgb(c(240,186,123,43),
c(249,228,204,140),
c(232,188,196,190),maxColorValue=255),
rgb(c(240,186,123,67,8),
c(249,228,204,162,104),
c(232,188,196,202,172),maxColorValue=255),
rgb(c(240,204,168,123,67,8),
c(249,235,221,204,162,104),
c(232,197,181,196,202,172),maxColorValue=255),
rgb(c(240,204,168,123,78,43,8),
c(249,235,221,204,179,140,88),
c(232,197,181,196,211,190,158),maxColorValue=255),
rgb(c(247,224,204,168,123,78,43,8),
c(252,243,235,221,204,179,140,88),
c(240,219,197,181,196,211,190,158),maxColorValue=255),
rgb(c(247,224,204,168,123,78,43,8,8),
c(252,243,235,221,204,179,140,104,64),
c(240,219,197,181,196,211,190,172,129),maxColorValue=255)
),
Greens = switch(n-2,
rgb(c(229,161,49),c(245,217,163),c(224,155,84),maxColorValue=255),
rgb(c(237,186,116,35),
c(248,228,196,139),
c(233,179,118,69),maxColorValue=255),
rgb(c(237,186,116,49,0),
c(248,228,196,163,109),
c(233,179,118,84,44),maxColorValue=255),
rgb(c(237,199,161,116,49,0),
c(248,233,217,196,163,109),
c(233,192,155,118,84,44),maxColorValue=255),
rgb(c(237,199,161,116,65,35,0),
c(248,233,217,196,171,139,90),
c(233,192,155,118,93,69,50),maxColorValue=255),
rgb(c(247,229,199,161,116,65,35,0),
c(252,245,233,217,196,171,139,90),
c(245,224,192,155,118,93,69,50),maxColorValue=255),
rgb(c(247,229,199,161,116,65,35,0,0),
c(252,245,233,217,196,171,139,109,68),
c(245,224,192,155,118,93,69,44,27),maxColorValue=255)
),
Greys = switch(n-2,
rgb(c(240,189,99),
c(240,189,99),
c(240,189,99),maxColorValue=255),
rgb(c(247,204,150,82),
c(247,204,150,82),
c(247,204,150,82),maxColorValue=255),
rgb(c(247,204,150,99,37),
c(247,204,150,99,37),
c(247,204,150,99,37),maxColorValue=255),
rgb(c(247,217,189,150,99,37),
c(247,217,189,150,99,37),
c(247,217,189,150,99,37),maxColorValue=255),
rgb(c(247,217,189,150,115,82,37),
c(247,217,189,150,115,82,37),
c(247,217,189,150,115,82,37),maxColorValue=255),
rgb(c(255,240,217,189,150,115,82,37),
c(255,240,217,189,150,115,82,37),
c(255,240,217,189,150,115,82,37),maxColorValue=255),
rgb(c(255,240,217,189,150,115,82,37,0),
c(255,240,217,189,150,115,82,37,0),
c(255,240,217,189,150,115,82,37,0),maxColorValue=255)
),
Oranges = switch(n-2,
rgb(c(254,253,230),
c(230,174,85),
c(206,107,13),maxColorValue=255),
rgb(c(254,253,253,217),
c(237,190,141,71),
c(222,133,60,1),maxColorValue=255),
rgb(c(254,253,253,230,166),
c(237,190,141,85,54),
c(222,133,60,13,3),maxColorValue=255),
rgb(c(254,253,253,253,230,166),
c(237,208,174,141,85,54),
c(222,162,107,60,13,3),maxColorValue=255),
rgb(c(254,253,253,253,241,217,140),
c(237,208,174,141,105,72,45),
c(222,162,107,60,19,1,4),maxColorValue=255),
rgb(c(255,254,253,253,253,241,217,140),
c(245,230,208,174,141,105,72,45),
c(235,206,162,107,60,19,1,4),maxColorValue=255),
rgb(c(255,254,253,253,253,241,217,166,127),
c(245,230,208,174,141,105,72,54,39),
c(235,206,162,107,60,19,1,3,4),maxColorValue=255)
),
OrRd = switch(n-2,
rgb(c(254,253,227),
c(232,187,74),
c(200,132,51),maxColorValue=255),
rgb(c(254,253,252,215),
c(240,204,141,48),
c(217,138,89,31),maxColorValue=255),
rgb(c(254,253,252,227,179),
c(240,204,141,74,0),
c(217,138,89,51,0),maxColorValue=255),
rgb(c(254,253,253,252,227,179),
c(240,212,187,141,74,0),
c(217,158,132,89,51,0),maxColorValue=255),
rgb(c(254,253,253,252,239,215,153),
c(240,212,187,141,101,48,0),
c(217,158,132,89,72,31,0),maxColorValue=255),
rgb(c(255,254,253,253,252,239,215,153),
c(247,232,212,187,141,101,48,0),
c(236,200,158,132,89,72,31,0),maxColorValue=255),
rgb(c(255,254,253,253,252,239,215,179,127),
c(247,232,212,187,141,101,48,0,0),
c(236,200,158,132,89,72,31,0,0),maxColorValue=255)
),
Paired = switch(n-2,
rgb(c(166,31,178),
c(206,120,223),
c(227,180,138),maxColorValue=255),
rgb(c(166,31,178,51),
c(206,120,223,160),
c(227,180,138,44),maxColorValue=255),
rgb(c(166,31,178,51,251),
c(206,120,223,160,154),
c(227,180,138,44,153),maxColorValue=255),
rgb(c(166,31,178,51,251,227),
c(206,120,223,160,154,26),
c(227,180,138,44,153,28),maxColorValue=255),
rgb(c(166,31,178,51,251,227,253),
c(206,120,223,160,154,26,191),
c(227,180,138,44,153,28,111),maxColorValue=255),
rgb(c(166,31,178,51,251,227,253,255),
c(206,120,223,160,154,26,191,127),
c(227,180,138,44,153,28,111,0),maxColorValue=255),
rgb(c(166,31,178,51,251,227,253,255,202),
c(206,120,223,160,154,26,191,127,178),
c(227,180,138,44,153,28,111,0,214),maxColorValue=255),
rgb(c(166,31,178,51,251,227,253,255,202,106),
c(206,120,223,160,154,26,191,127,178,61),
c(227,180,138,44,153,28,111,0,214,154),maxColorValue=255),
rgb(c(166,31,178,51,251,227,253,255,202,106,255),
c(206,120,223,160,154,26,191,127,178,61,255),
c(227,180,138,44,153,28,111,0,214,154,153),maxColorValue=255),
rgb(c(166,31,178,51,251,227,253,255,202,106,255,177),
c(206,120,223,160,154,26,191,127,178,61,255,89),
c(227,180,138,44,153,28,111,0,214,154,153,40),maxColorValue=255)
),
Pastel1 = switch(n-2,
rgb(c(251,179,204),
c(180,205,235),
c(174,227,197),maxColorValue=255),
rgb(c(251,179,204,222),
c(180,205,235,203),
c(174,227,197,228),maxColorValue=255),
rgb(c(251,179,204,222,254),
c(180,205,235,203,217),
c(174,227,197,228,166),maxColorValue=255),
rgb(c(251,179,204,222,254,255),
c(180,205,235,203,217,255),
c(174,227,197,228,166,204),maxColorValue=255),
rgb(c(251,179,204,222,254,255,229),
c(180,205,235,203,217,255,216),
c(174,227,197,228,166,204,189),maxColorValue=255),
rgb(c(251,179,204,222,254,255,229,253),
c(180,205,235,203,217,255,216,218),
c(174,227,197,228,166,204,189,236),maxColorValue=255),
rgb(c(251,179,204,222,254,255,229,253,242),
c(180,205,235,203,217,255,216,218,242),
c(174,227,197,228,166,204,189,236,242),maxColorValue=255)
),
Pastel2 = switch(n-2,
rgb(c(179,253,203),
c(226,205,213),
c(205,172,232),maxColorValue=255),
rgb(c(179,253,203,244),
c(226,205,213,202),
c(205,172,232,228),maxColorValue=255),
rgb(c(179,253,203,244,230),
c(226,205,213,202,245),
c(205,172,232,228,201),maxColorValue=255),
rgb(c(179,253,203,244,230,255),
c(226,205,213,202,245,242),
c(205,172,232,228,201,174),maxColorValue=255),
rgb(c(179,253,203,244,230,255,241),
c(226,205,213,202,245,242,226),
c(205,172,232,228,201,174,204),maxColorValue=255),
rgb(c(179,253,203,244,230,255,241,204),
c(226,205,213,202,245,242,226,204),
c(205,172,232,228,201,174,204,204),maxColorValue=255)
),
PiYG = switch(n-2,
rgb(c(233,247,161),
c(163,247,215),
c(201,247,106),maxColorValue=255),
rgb(c(208,241,184,77),
c(28,182,225,172),
c(139,218,134,38),maxColorValue=255),
rgb(c(208,241,247,184,77),
c(28,182,247,225,172),
c(139,218,247,134,38),maxColorValue=255),
rgb(c(197,233,253,230,161,77),
c(27,163,224,245,215,146),
c(125,201,239,208,106,33),maxColorValue=255),
rgb(c(197,233,253,247,230,161,77),
c(27,163,224,247,245,215,146),
c(125,201,239,247,208,106,33),maxColorValue=255),
rgb(c(197,222,241,253,230,184,127,77),
c(27,119,182,224,245,225,188,146),
c(125,174,218,239,208,134,65,33),maxColorValue=255),
rgb(c(197,222,241,253,247,230,184,127,77),
c(27,119,182,224,247,245,225,188,146),
c(125,174,218,239,247,208,134,65,33),maxColorValue=255),
rgb(c(142,197,222,241,253,230,184,127,77,39),
c(1,27,119,182,224,245,225,188,146,100),
c(82,125,174,218,239,208,134,65,33,25),maxColorValue=255),
rgb(c(142,197,222,241,253,247,230,184,127,77,39),
c(1,27,119,182,224,247,245,225,188,146,100),
c(82,125,174,218,239,247,208,134,65,33,25),maxColorValue=255)
),
PRGn = switch(n-2,
rgb(c(175,247,127),
c(141,247,191),
c(195,247,123),maxColorValue=255),
rgb(c(123,194,166,0),
c(50,165,219,136),
c(148,207,160,55),maxColorValue=255),
rgb(c(123,194,247,166,0),
c(50,165,247,219,136),
c(148,207,247,160,55),maxColorValue=255),
rgb(c(118,175,231,217,127,27),
c(42,141,212,240,191,120),
c(131,195,232,211,123,55),maxColorValue=255),
rgb(c(118,175,231,247,217,127,27),
c(42,141,212,247,240,191,120),
c(131,195,232,247,211,123,55),maxColorValue=255),
rgb(c(118,153,194,231,217,166,90,27),
c(42,112,165,212,240,219,174,120),
c(131,171,207,232,211,160,97,55),maxColorValue=255),
rgb(c(118,153,194,231,247,217,166,90,27),
c(42,112,165,212,247,240,219,174,120),
c(131,171,207,232,247,211,160,97,55),maxColorValue=255),
rgb(c(64,118,153,194,231,217,166,90,27,0),
c(0,42,112,165,212,240,219,174,120,68),
c(75,131,171,207,232,211,160,97,55,27),maxColorValue=255),
rgb(c(64,118,153,194,231,247,217,166,90,27,0),
c(0,42,112,165,212,247,240,219,174,120,68),
c(75,131,171,207,232,247,211,160,97,55,27),maxColorValue=255)
),
PuBu = switch(n-2,
rgb(c(236,166,43),
c(231,189,140),
c(242,219,190),maxColorValue=255),
rgb(c(241,189,116,5),
c(238,201,169,112),
c(246,225,207,176),maxColorValue=255),
rgb(c(241,189,116,43,4),
c(238,201,169,140,90),
c(246,225,207,190,141),maxColorValue=255),
rgb(c(241,208,166,116,43,4),
c(238,209,189,169,140,90),
c(246,230,219,207,190,141),maxColorValue=255),
rgb(c(241,208,166,116,54,5,3),
c(238,209,189,169,144,112,78),
c(246,230,219,207,192,176,123),maxColorValue=255),
rgb(c(255,236,208,166,116,54,5,3),
c(247,231,209,189,169,144,112,78),
c(251,242,230,219,207,192,176,123),maxColorValue=255),
rgb(c(255,236,208,166,116,54,5,4,2),
c(247,231,209,189,169,144,112,90,56),
c(251,242,230,219,207,192,176,141,88),maxColorValue=255)
),
PuBuGn = switch(n-2,
rgb(c(236,166,28),
c(226,189,144),
c(240,219,153),maxColorValue=255),
rgb(c(246,189,103,2),
c(239,201,169,129),
c(247,225,207,138),maxColorValue=255),
rgb(c(246,189,103,28,1),
c(239,201,169,144,108),
c(247,225,207,153,89),maxColorValue=255),
rgb(c(246,208,166,103,28,1),
c(239,209,189,169,144,108),
c(247,230,219,207,153,89),maxColorValue=255),
rgb(c(246,208,166,103,54,2,1),
c(239,209,189,169,144,129,100),
c(247,230,219,207,192,138,80),maxColorValue=255),
rgb(c(255,236,208,166,103,54,2,1),
c(247,226,209,189,169,144,129,100),
c(251,240,230,219,207,192,138,80),maxColorValue=255),
rgb(c(255,236,208,166,103,54,2,1,1),
c(247,226,209,189,169,144,129,108,70),
c(251,240,230,219,207,192,138,89,54),maxColorValue=255)
),
PuOr = switch(n-2,
rgb(c(241,247,153),
c(163,247,142),
c(64,247,195),maxColorValue=255),
rgb(c(230,253,178,94),
c(97,184,171,60),
c(1,99,210,153),maxColorValue=255),
rgb(c(230,253,247,178,94),
c(97,184,247,171,60),
c(1,99,247,210,153),maxColorValue=255),
rgb(c(179,241,254,216,153,84),
c(88,163,224,218,142,39),
c(6,64,182,235,195,136),maxColorValue=255),
rgb(c(179,241,254,247,216,153,84),
c(88,163,224,247,218,142,39),
c(6,64,182,247,235,195,136),maxColorValue=255),
rgb(c(179,224,253,254,216,178,128,84),
c(88,130,184,224,218,171,115,39),
c(6,20,99,182,235,210,172,136),maxColorValue=255),
rgb(c(179,224,253,254,247,216,178,128,84),
c(88,130,184,224,247,218,171,115,39),
c(6,20,99,182,247,235,210,172,136),maxColorValue=255),
rgb(c(127,179,224,253,254,216,178,128,84,45),
c(59,88,130,184,224,218,171,115,39,0),
c(8,6,20,99,182,235,210,172,136,75),maxColorValue=255),
rgb(c(127,179,224,253,254,247,216,178,128,84,45),
c(59,88,130,184,224,247,218,171,115,39,0),
c(8,6,20,99,182,247,235,210,172,136,75),maxColorValue=255)
),
PuRd = switch(n-2,
rgb(c(231,201,221),
c(225,148,28),
c(239,199,119),maxColorValue=255),
rgb(c(241,215,223,206),
c(238,181,101,18),
c(246,216,176,86),maxColorValue=255),
rgb(c(241,215,223,221,152),
c(238,181,101,28,0),
c(246,216,176,119,67),maxColorValue=255),
rgb(c(241,212,201,223,221,152),
c(238,185,148,101,28,0),
c(246,218,199,176,119,67),maxColorValue=255),
rgb(c(241,212,201,223,231,206,145),
c(238,185,148,101,41,18,0),
c(246,218,199,176,138,86,63),maxColorValue=255),
rgb(c(247,231,212,201,223,231,206,145),
c(244,225,185,148,101,41,18,0),
c(249,239,218,199,176,138,86,63),maxColorValue=255),
rgb(c(247,231,212,201,223,231,206,152,103),
c(244,225,185,148,101,41,18,0,0),
c(249,239,218,199,176,138,86,67,31),maxColorValue=255)
),
Purples = switch(n-2,
rgb(c(239,188,117),
c(237,189,107),
c(245,220,177),maxColorValue=255),
rgb(c(242,203,158,106),
c(240,201,154,81),
c(247,226,200,163),maxColorValue=255),
rgb(c(242,203,158,117,84),
c(240,201,154,107,39),
c(247,226,200,177,143),maxColorValue=255),
rgb(c(242,218,188,158,117,84),
c(240,218,189,154,107,39),
c(247,235,220,200,177,143),maxColorValue=255),
rgb(c(242,218,188,158,128,106,74),
c(240,218,189,154,125,81,20),
c(247,235,220,200,186,163,134),maxColorValue=255),
rgb(c(252,239,218,188,158,128,106,74),
c(251,237,218,189,154,125,81,20),
c(253,245,235,220,200,186,163,134),maxColorValue=255),
rgb(c(252,239,218,188,158,128,106,84,63),
c(251,237,218,189,154,125,81,39,0),
c(253,245,235,220,200,186,163,143,125),maxColorValue=255)
),
RdBu = switch(n-2,
rgb(c(239,247,103),
c(138,247,169),
c(98,247,207),maxColorValue=255),
rgb(c(202,244,146,5),
c(0,165,197,113),
c(32,130,222,176),maxColorValue=255),
rgb(c(202,244,247,146,5),
c(0,165,247,197,113),
c(32,130,247,222,176),maxColorValue=255),
rgb(c(178,239,253,209,103,33),
c(24,138,219,229,169,102),
c(43,98,199,240,207,172),maxColorValue=255),
rgb(c(178,239,253,247,209,103,33),
c(24,138,219,247,229,169,102),
c(43,98,199,247,240,207,172),maxColorValue=255),
rgb(c(178,214,244,253,209,146,67,33),
c(24,96,165,219,229,197,147,102),
c(43,77,130,199,240,222,195,172),maxColorValue=255),
rgb(c(178,214,244,253,247,209,146,67,33),
c(24,96,165,219,247,229,197,147,102),
c(43,77,130,199,247,240,222,195,172),maxColorValue=255),
rgb(c(103,178,214,244,253,209,146,67,33,5),
c(0,24,96,165,219,229,197,147,102,48),
c(31,43,77,130,199,240,222,195,172,97),maxColorValue=255),
rgb(c(103,178,214,244,253,247,209,146,67,33,5),
c(0,24,96,165,219,247,229,197,147,102,48),
c(31,43,77,130,199,247,240,222,195,172,97),maxColorValue=255)
),
RdGy = switch(n-2,
rgb(c(239,255,153),
c(138,255,153),
c(98,255,153),maxColorValue=255),
rgb(c(202,244,186,64),
c(0,165,186,64),
c(32,130,186,64),maxColorValue=255),
rgb(c(202,244,255,186,64),
c(0,165,255,186,64),
c(32,130,255,186,64),maxColorValue=255),
rgb(c(178,239,253,224,153,77),
c(24,138,219,224,153,77),
c(43,98,199,224,153,77),maxColorValue=255),
rgb(c(178,239,253,255,224,153,77),
c(24,138,219,255,224,153,77),
c(43,98,199,255,224,153,77),maxColorValue=255),
rgb(c(178,214,244,253,224,186,135,77),
c(24,96,165,219,224,186,135,77),
c(43,77,130,199,224,186,135,77),maxColorValue=255),
rgb(c(178,214,244,253,255,224,186,135,77),
c(24,96,165,219,255,224,186,135,77),
c(43,77,130,199,255,224,186,135,77),maxColorValue=255),
rgb(c(103,178,214,244,253,224,186,135,77,26),
c(0,24,96,165,219,224,186,135,77,26),
c(31,43,77,130,199,224,186,135,77,26),maxColorValue=255),
rgb(c(103,178,214,244,253,255,224,186,135,77,26),
c(0,24,96,165,219,255,224,186,135,77,26),
c(31,43,77,130,199,255,224,186,135,77,26),maxColorValue=255)
),
RdPu = switch(n-2,
rgb(c(253,250,197),
c(224,159,27),
c(221,181,138),maxColorValue=255),
rgb(c(254,251,247,174),
c(235,180,104,1),
c(226,185,161,126),maxColorValue=255),
rgb(c(254,251,247,197,122),
c(235,180,104,27,1),
c(226,185,161,138,119),maxColorValue=255),
rgb(c(254,252,250,247,197,122),
c(235,197,159,104,27,1),
c(226,192,181,161,138,119),maxColorValue=255),
rgb(c(254,252,250,247,221,174,122),
c(235,197,159,104,52,1,1),
c(226,192,181,161,151,126,119),maxColorValue=255),
rgb(c(255,253,252,250,247,221,174,122),
c(247,224,197,159,104,52,1,1),
c(243,221,192,181,161,151,126,119),maxColorValue=255),
rgb(c(255,253,252,250,247,221,174,122,73),
c(247,224,197,159,104,52,1,1,0),
c(243,221,192,181,161,151,126,119,106),maxColorValue=255)
),
Reds = switch(n-2,
rgb(c(254,252,222),
c(224,146,45),
c(210,114,38),maxColorValue=255),
rgb(c(254,252,251,203),
c(229,174,106,24),
c(217,145,74,29),maxColorValue=255),
rgb(c(254,252,251,222,165),
c(229,174,106,45,15),
c(217,145,74,38,21),maxColorValue=255),
rgb(c(254,252,252,251,222,165),
c(229,187,146,106,45,15),
c(217,161,114,74,38,21),maxColorValue=255),
rgb(c(254,252,252,251,239,203,153),
c(229,187,146,106,59,24,0),
c(217,161,114,74,44,29,13),maxColorValue=255),
rgb(c(255,254,252,252,251,239,203,153),
c(245,224,187,146,106,59,24,0),
c(240,210,161,114,74,44,29,13),maxColorValue=255),
rgb(c(255,254,252,252,251,239,203,165,103),
c(245,224,187,146,106,59,24,15,0),
c(240,210,161,114,74,44,29,21,13),maxColorValue=255)
),
RdYlBu = switch(n-2,
rgb(c(252,255,145),
c(141,255,191),
c(89,191,219),maxColorValue=255),
rgb(c(215,253,171,44),
c(25,174,217,123),
c(28,97,233,182),maxColorValue=255),
rgb(c(215,253,255,171,44),
c(25,174,255,217,123),
c(28,97,191,233,182),maxColorValue=255),
rgb(c(215,252,254,224,145,69),
c(48,141,224,243,191,117),
c(39,89,144,248,219,180),maxColorValue=255),
rgb(c(215,252,254,255,224,145,69),
c(48,141,224,255,243,191,117),
c(39,89,144,191,248,219,180),maxColorValue=255),
rgb(c(215,244,253,254,224,171,116,69),
c(48,109,174,224,243,217,173,117),
c(39,67,97,144,248,233,209,180),maxColorValue=255),
rgb(c(215,244,253,254,255,224,171,116,69),
c(48,109,174,224,255,243,217,173,117),
c(39,67,97,144,191,248,233,209,180),maxColorValue=255),
rgb(c(165,215,244,253,254,224,171,116,69,49),
c(0,48,109,174,224,243,217,173,117,54),
c(38,39,67,97,144,248,233,209,180,149),maxColorValue=255),
rgb(c(165,215,244,253,254,255,224,171,116,69,49),
c(0,48,109,174,224,255,243,217,173,117,54),
c(38,39,67,97,144,191,248,233,209,180,149),maxColorValue=255)
),
RdYlGn = switch(n-2,
rgb(c(252,255,145),
c(141,255,207),
c(89,191,96),maxColorValue=255),
rgb(c(215,253,166,26),
c(25,174,217,150),
c(28,97,106,65),maxColorValue=255),
rgb(c(215,253,255,166,26),
c(25,174,255,217,150),
c(28,97,191,106,65),maxColorValue=255),
rgb(c(215,252,254,217,145,26),
c(48,141,224,239,207,152),
c(39,89,139,139,96,80),maxColorValue=255),
rgb(c(215,252,254,255,217,145,26),
c(48,141,224,255,239,207,152),
c(39,89,139,191,139,96,80),maxColorValue=255),
rgb(c(215,244,253,254,217,166,102,26),
c(48,109,174,224,239,217,189,152),
c(39,67,97,139,139,106,99,80),maxColorValue=255),
rgb(c(215,244,253,254,255,217,166,102,26),
c(48,109,174,224,255,239,217,189,152),
c(39,67,97,139,191,139,106,99,80),maxColorValue=255),
rgb(c(165,215,244,253,254,217,166,102,26,0),
c(0,48,109,174,224,239,217,189,152,104),
c(38,39,67,97,139,139,106,99,80,55),maxColorValue=255),
rgb(c(165,215,244,253,254,255,217,166,102,26,0),
c(0,48,109,174,224,255,239,217,189,152,104),
c(38,39,67,97,139,191,139,106,99,80,55),maxColorValue=255)
),
Set1 = switch(n-2,
rgb(c(228,55,77),
c(26,126,175),
c(28,184,74),maxColorValue=255),
rgb(c(228,55,77,152),
c(26,126,175,78),
c(28,184,74,163),maxColorValue=255),
rgb(c(228,55,77,152,255),
c(26,126,175,78,127),
c(28,184,74,163,0),maxColorValue=255),
rgb(c(228,55,77,152,255,255),
c(26,126,175,78,127,255),
c(28,184,74,163,0,51),maxColorValue=255),
rgb(c(228,55,77,152,255,255,166),
c(26,126,175,78,127,255,86),
c(28,184,74,163,0,51,40),maxColorValue=255),
rgb(c(228,55,77,152,255,255,166,247),
c(26,126,175,78,127,255,86,129),
c(28,184,74,163,0,51,40,191),maxColorValue=255),
rgb(c(228,55,77,152,255,255,166,247,153),
c(26,126,175,78,127,255,86,129,153),
c(28,184,74,163,0,51,40,191,153),maxColorValue=255)
),
Set2 = switch(n-2,
rgb(c(102,252,141),
c(194,141,160),
c(165,98,203),maxColorValue=255),
rgb(c(102,252,141,231),
c(194,141,160,138),
c(165,98,203,195),maxColorValue=255),
rgb(c(102,252,141,231,166),
c(194,141,160,138,216),
c(165,98,203,195,84),maxColorValue=255),
rgb(c(102,252,141,231,166,255),
c(194,141,160,138,216,217),
c(165,98,203,195,84,47),maxColorValue=255),
rgb(c(102,252,141,231,166,255,229),
c(194,141,160,138,216,217,196),
c(165,98,203,195,84,47,148),maxColorValue=255),
rgb(c(102,252,141,231,166,255,229,179),
c(194,141,160,138,216,217,196,179),
c(165,98,203,195,84,47,148,179),maxColorValue=255)
),
Set3 = switch(n-2,
rgb(c(141,255,190),
c(211,255,186),
c(199,179,218),maxColorValue=255),
rgb(c(141,255,190,251),
c(211,255,186,128),
c(199,179,218,114),maxColorValue=255),
rgb(c(141,255,190,251,128),
c(211,255,186,128,177),
c(199,179,218,114,211),maxColorValue=255),
rgb(c(141,255,190,251,128,253),
c(211,255,186,128,177,180),
c(199,179,218,114,211,98),maxColorValue=255),
rgb(c(141,255,190,251,128,253,179),
c(211,255,186,128,177,180,222),
c(199,179,218,114,211,98,105),maxColorValue=255),
rgb(c(141,255,190,251,128,253,179,252),
c(211,255,186,128,177,180,222,205),
c(199,179,218,114,211,98,105,229),maxColorValue=255),
rgb(c(141,255,190,251,128,253,179,252,217),
c(211,255,186,128,177,180,222,205,217),
c(199,179,218,114,211,98,105,229,217),maxColorValue=255),
rgb(c(141,255,190,251,128,253,179,252,217,188),
c(211,255,186,128,177,180,222,205,217,128),
c(199,179,218,114,211,98,105,229,217,189),maxColorValue=255),
rgb(c(141,255,190,251,128,253,179,252,217,188,204),
c(211,255,186,128,177,180,222,205,217,128,235),
c(199,179,218,114,211,98,105,229,217,189,197),maxColorValue=255),
rgb(c(141,255,190,251,128,253,179,252,217,188,204,255),
c(211,255,186,128,177,180,222,205,217,128,235,237),
c(199,179,218,114,211,98,105,229,217,189,197,111),maxColorValue=255)
),
Spectral = switch(n-2,
rgb(c(252,255,153),
c(141,255,213),
c(89,191,148),maxColorValue=255),
rgb(c(215,253,171,43),
c(25,174,221,131),
c(28,97,164,186),maxColorValue=255),
rgb(c(215,253,255,171,43),
c(25,174,255,221,131),
c(28,97,191,164,186),maxColorValue=255),
rgb(c(213,252,254,230,153,50),
c(62,141,224,245,213,136),
c(79,89,139,152,148,189),maxColorValue=255),
rgb(c(213,252,254,255,230,153,50),
c(62,141,224,255,245,213,136),
c(79,89,139,191,152,148,189),maxColorValue=255),
rgb(c(213,244,253,254,230,171,102,50),
c(62,109,174,224,245,221,194,136),
c(79,67,97,139,152,164,165,189),maxColorValue=255),
rgb(c(213,244,253,254,255,230,171,102,50),
c(62,109,174,224,255,245,221,194,136),
c(79,67,97,139,191,152,164,165,189),maxColorValue=255),
rgb(c(158,213,244,253,254,230,171,102,50,94),
c(1,62,109,174,224,245,221,194,136,79),
c(66,79,67,97,139,152,164,165,189,162),maxColorValue=255),
rgb(c(158,213,244,253,254,255,230,171,102,50,94),
c(1,62,109,174,224,255,245,221,194,136,79),
c(66,79,67,97,139,191,152,164,165,189,162),maxColorValue=255)
),
YlGn = switch(n-2,
rgb(c(247,173,49),
c(252,221,163),
c(185,142,84),maxColorValue=255),
rgb(c(255,194,120,35),
c(255,230,198,132),
c(204,153,121,67),maxColorValue=255),
rgb(c(255,194,120,49,0),
c(255,230,198,163,104),
c(204,153,121,84,55),maxColorValue=255),
rgb(c(255,217,173,120,49,0),
c(255,240,221,198,163,104),
c(204,163,142,121,84,55),maxColorValue=255),
rgb(c(255,217,173,120,65,35,0),
c(255,240,221,198,171,132,90),
c(204,163,142,121,93,67,50),maxColorValue=255),
rgb(c(255,247,217,173,120,65,35,0),
c(255,252,240,221,198,171,132,90),
c(229,185,163,142,121,93,67,50),maxColorValue=255),
rgb(c(255,247,217,173,120,65,35,0,0),
c(255,252,240,221,198,171,132,104,69),
c(229,185,163,142,121,93,67,55,41),maxColorValue=255)
),
YlGnBu = switch(n-2,
rgb(c(237,127,44),
c(248,205,127),
c(177,187,184),maxColorValue=255),
rgb(c(255,161,65,34),
c(255,218,182,94),
c(204,180,196,168),maxColorValue=255),
rgb(c(255,161,65,44,37),
c(255,218,182,127,52),
c(204,180,196,184,148),maxColorValue=255),
rgb(c(255,199,127,65,44,37),
c(255,233,205,182,127,52),
c(204,180,187,196,184,148),maxColorValue=255),
rgb(c(255,199,127,65,29,34,12),
c(255,233,205,182,145,94,44),
c(204,180,187,196,192,168,132),maxColorValue=255),
rgb(c(255,237,199,127,65,29,34,12),
c(255,248,233,205,182,145,94,44),
c(217,177,180,187,196,192,168,132),maxColorValue=255),
rgb(c(255,237,199,127,65,29,34,37,8),
c(255,248,233,205,182,145,94,52,29),
c(217,177,180,187,196,192,168,148,88),maxColorValue=255)
),
YlOrBr = switch(n-2,
rgb(c(255,254,217),
c(247,196,95),
c(188,79,14),maxColorValue=255),
rgb(c(255,254,254,204),
c(255,217,153,76),
c(212,142,41,2),maxColorValue=255),
rgb(c(255,254,254,217,153),
c(255,217,153,95,52),
c(212,142,41,14,4),maxColorValue=255),
rgb(c(255,254,254,254,217,153),
c(255,227,196,153,95,52),
c(212,145,79,41,14,4),maxColorValue=255),
rgb(c(255,254,254,254,236,204,140),
c(255,227,196,153,112,76,45),
c(212,145,79,41,20,2,4),maxColorValue=255),
rgb(c(255,255,254,254,254,236,204,140),
c(255,247,227,196,153,112,76,45),
c(229,188,145,79,41,20,2,4),maxColorValue=255),
rgb(c(255,255,254,254,254,236,204,153,102),
c(255,247,227,196,153,112,76,52,37),
c(229,188,145,79,41,20,2,4,6),maxColorValue=255)
),
YlOrRd = switch(n-2,
rgb(c(255,254,240),
c(237,178,59),
c(160,76,32),maxColorValue=255),
rgb(c(255,254,253,227),
c(255,204,141,26),
c(178,92,60,28),maxColorValue=255),
rgb(c(255,254,253,240,189),
c(255,204,141,59,0),
c(178,92,60,32,38),maxColorValue=255),
rgb(c(255,254,254,253,240,189),
c(255,217,178,141,59,0),
c(178,118,76,60,32,38),maxColorValue=255),
rgb(c(255,254,254,253,252,227,177),
c(255,217,178,141,78,26,0),
c(178,118,76,60,42,28,38),maxColorValue=255),
rgb(c(255,255,254,254,253,252,227,177),
c(255,237,217,178,141,78,26,0),
c(204,160,118,76,60,42,28,38),maxColorValue=255),
rgb(c(255,255,254,254,253,252,227,189,128),
c(255,237,217,178,141,78,26,0,0),
c(204,160,118,76,60,42,28,38,38),maxColorValue=255)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/CALANGO_brewer_pal.R
|
# Obtains the annotation from a file about a specific genome.
#
# Input:
# file: char, path to the file with the annotation.
# column: char label or integer index of column with the desired annotation.
# Returns list with the annotation of that genome.
FileAddition <- function(file, column) {
# ===== Sanity check =====
assertthat::assert_that(is.character(file),
length(file) == 1,
file.exists(file),
is.character(column) || is.numeric(column),
length(column) == 1)
anno <- utils::read.table(file,
sep = "\t", header = TRUE,
colClasses = "character",
strip.white = TRUE, comment.char = "",
row.names = 1, check.names = FALSE)
# ===== Sanity check =====
if (is.numeric(column)){
assertthat::assert_that(column %% floor(column) == 0,
column >= 1, column <= ncol(anno))
} else {
assertthat::assert_that(column %in% names(anno))
}
# Parsing the tab format from Uniprot
genomeMap <- strsplit(anno[, column], split = " *; *")
names(genomeAnno) <- rownames(anno)
return(genomeAnno)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/FileAddition.R
|
# -=-=-=- Phylogenetically Independent Contrast analysis -=-=-=-
# Produces a vector with the correlation of each ontology term with the
# attribute in question after correcting for phylogenetic bias (see
# Felsenstein 1985 and APE package for details).
#
# Args:
# x: (vector) variable with the counting of an attribute of
# interest, like G+C, gene count or longevity.
# y: (data.frame) table counting the presence of annotations
# of an ontology in each genome.
# method: (char) method to use, allows "pearson", "spearman" and "kendall".
# denominator: (numeric) parameter for normalization of the y variable.
#
# TODO: Check documentation of 'method' above. It is inconsistent with
# the function definition (which accepts method = "gls" or method = "pic")
#
# Returns:
# correlations: (vector) correlation of all listed ontology terms for the
# attribute in question.
FindContrasts <- function(x, y, tree, method = "gls", denominator = 1,
cores = 1, cl = NULL) {
# ================== Sanity checks ==================
assertthat::assert_that(is.data.frame(x),
is.data.frame(y),
class(tree) %in% c("phylo", "multiPhylo"),
method %in% c("pic", "gls"),
is.null(denominator) | is.numeric(denominator),
is.null(denominator) | all(denominator > 0))
tmp_x <- x[, 1]
names(tmp_x) <- rownames(x)
# Phylogenetically Independent Contrasts
if (method == "pic") {
contrast_x <- ape::pic(x = tmp_x, phy = tree)
# Normalizing
if (!is.null(denominator)) y <- sweep(y, MARGIN = 1, denominator, `/`) # y <- y / denominator
message("Computing contrasts")
if (.Platform$OS.type == "windows"){
models <- parallel::parLapply(cl = cl,
X = y,
fun = function(tmpy, tree, cx, nx){
names(tmpy) <- nx
cy <- ape::pic(tmpy, phy = tree)
mod <- stats::lm(cy ~ cx + 0)
return(summary(mod)$coefficients[1, 4])},
tree = tree,
cx = contrast_x,
nx = rownames(x))
} else {
models <- pbmcapply::pbmclapply(y,
function(tmpy, tree, cx, nx){
names(tmpy) <- nx
cy <- ape::pic(tmpy, phy = tree)
mod <- stats::lm(cy ~ cx - 1)
return(summary(mod)$coefficients[1, 4])},
tree = tree,
cx = contrast_x,
nx = rownames(x),
mc.preschedule = TRUE,
mc.cores = cores)
}
} else if (method == "gls") {
# TODO: method "gls" throws errors both in the original version and the
# reimplemented version below. Please check.
# Normalizing
if (!is.null(denominator)) {
# Getting counts per million to avoid false convergence (8) error
# from gls function for small values,
# see http://r.789695.n4.nabble.com/quot-False-convergence-quot-in-LME-td860675.html
# y <- (y / denominator) * 10^6
y <- sweep(y, MARGIN = 1, denominator, `/`) * 10^6
}
tmpfun <- function(tmpy, tmpx, nx, tree){
if(any(tmpy == 0)){
model <- nlme::gls(tmp_y ~ tmp_x,
data = as.data.frame(cbind(tmpx, tmpy)),
correlation = ape::corPagel(value = 1,
phy = tree),
control = list(singular.ok = TRUE))
return(as.numeric(summary(model)$coefficients[2]))
} else {
return(1)
}}
if (.Platform$OS.type == "windows"){
models <- parallel::parLapply(cl = cl,
X = y,
fun = tmpfun,
tmpx = tmp_x,
nx = rownames(x),
tree = tree)
} else {
models <- pbmcapply::pbmclapply(X = y,
FUN = tmpfun,
tmpx = tmp_x,
nx = rownames(x),
tree = tree,
mc.preschedule = TRUE,
mc.cores = cores)
}
} else{
stop("'", method,"' is not a recognized method in CALANGO::FindContrasts()")
}
models <- unlist(models)
return(sort(models, decreasing = FALSE))
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/FindContrasts.R
|
# -=-=-=- Correlation analysis -=-=-=-
# Produces a vector with the correlation of each ontology term with the
# attribute in question.
#
# Args:
# x: (vector) variable with the counting of an attribute of
# interest, like G+C, gene count or longevity.
# y: (data.frame) table counting the presence of annotations
# of an ontology in each genome.
# method: (char) method to use, allows "pearson", "spearman" and "kendall".
# denominator: (numeric) parameter for normalization of the y variable.
# Returns:
# correlations: (vector) correlation of all listed ontology terms for the
# attribute in question.
# Consider adding differentiation for x and y denominator column.
FindCorrelations <- function(x, y, method = "pearson", denominator = 1,
cores = 1, cl = NULL) {
# ================== Sanity checks ==================
assertthat::assert_that(is.data.frame(x),
is.data.frame(y),
is.null(denominator) | is.numeric(denominator),
is.null(denominator) | all(denominator > 0))
# Normalizing
# FIXED
# y is a data frame and denominator is a numeric vector.
# is this division *really* what you want? Because it is dividing in a very
# counterintuitive manner. A possibly better way is suggested below.
if (!is.null(denominator)) y <- sweep(y, MARGIN = 1, denominator, `/`) # y <- y / denominator
message("Calculating correlations:", method)
tmpfun <- function(tmpy, tmpx, method, ny){
mycor <- stats::cor(tmpx[ny, 1], tmpy,
method = method)
mypv <- stats::cor.test(tmpx[ny, 1], tmpy,
method = method)$p.value
return(list(mycor = mycor, mypv = mypv))}
if (.Platform$OS.type == "windows"){
tmp <- parallel::parLapply(cl = cl,
X = y,
fun = tmpfun,
tmpx = x,
method = method,
ny = rownames(y))
} else {
tmp <- pbmcapply::pbmclapply(X = y,
FUN = tmpfun,
tmpx = x,
method = method,
ny = rownames(y),
mc.preschedule = TRUE,
mc.cores = cores)
}
correlations <- sapply(tmp, function(tmpx) tmpx$mycor)
correlations.pvalue <- sapply(tmp, function(tmpx) tmpx$mypv)
names(correlations) <- colnames(y)
names(correlations.pvalue) <- colnames(y)
correlations <- sort(correlations,
decreasing = TRUE)
correlations.pvalue <- sort(correlations.pvalue,
decreasing = TRUE)
return(list(cor = correlations,
cor.pvalues = correlations.pvalue))
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/FindCorrelations.R
|
# Generates a phyletic vector for a specific genome.
# Input:
# genomeAnno: list, annotation of the genome.
# ontologyInfo: list, wrapper for the ontology. Since it may be GO, KO, or an
# arbitrary one, the information may come in different
# variables, hence the wrapper.
# Returns list with the frequency count of each term in that genome.
GenerateGenomeVector <- function(genomeAnno, ontologyInfo, column) {
# ===== Sanity check =====
assertthat::assert_that(is.list(genomeAnno) || is.character(genomeAnno),
is.list(ontologyInfo),
"name" %in% names(ontologyInfo))
# Check input format (file or annotation list), adapt if possible
if (is.character(genomeAnno)) {
if (utils::file_test("-f", genomeAnno)) {
genomeAnno <- FileAddition(genomeAnno, column)
} else {
# TODO: Should it throw an error in this case?
return(NULL)
}
}
if (!is.list(genomeAnno)) {
# TODO: Should it throw an error in this case?
return(NULL)
}
genomeVector <- rep.int(0, times = length(ontologyInfo$name))
names(genomeVector) <- ontologyInfo$name
if (is.element(ontologyInfo$ontology, c("go", "gene ontology"))) {
genomeAnno <- lapply(genomeAnno, RemoveObsoleteAndAlternative,
ontologyInfo$allObsolete, ontologyInfo$allSynonym)
genomeAnno <- lapply(genomeAnno, ObtainGeneGOancestors,
ontologyInfo$allAncestor)
}
# If the genome has any annotation, count terms; just return otherwise
# 'countIDs' has the names of ontologic terms found in a genome and the
# number of genes/proteins/elements in which they appeared
# countIDs[, 1] = ontologic terms
# countIDs[, 2] = occurrences of the term
genomeAnno <- unlist(genomeAnno)
if (length(genomeAnno) > 0) {
countIDs <- as.data.frame(table(genomeAnno), stringsAsFactors = FALSE)
countIDs <- countIDs[is.element(countIDs[, 1], ontologyInfo$name), ]
genomeVector[countIDs[, 1]] <- genomeVector[countIDs[, 1]] +
countIDs[, 2]
}
return(genomeVector)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/GenerateGenomeVector.R
|
# Function to generate and plot a taxonomic tree
GenerateTree <- function(taxonIds, db = "ncbi") {
taxize_class <- taxize::classification(taxonIds, db = db)
taxize_tree <- taxize::class2tree(taxize_class, check = TRUE)
# taxize::plot.classtree(taxize_tree)
invisible(taxize_tree)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/GenerateTree.R
|
# Count number of genes in each genome of a group.
#
# Input:
# someAnno: list of genomes, each with a data frame that maps
# each gene to its annotations (GO, KO).
# genome.names: char vector of names of the genomes to count elements.
# May be used to restrict which genomes to count in this
# function.
# mode: char, defines whether CALANGO must consider all elements in all
# genomes (default), or treat each element independently of
# others (experiment). The latter is an experiment for
# alignments.
# Returns an integer vector with the number of elements in each genome
# of someAnno.
GroupElementCount <- function(someAnno, genome.names = NULL, mode = "default") {
if (is.null(genome.names) | length(genome.names) == 0) {
# TODO: should this return zero or throw an error?
return(0)
}
if (mode == "default") {
elementCount <- sapply(X = someAnno[genome.names],
FUN = length)
} else if (mode == "experiment") {
elementCount <- rep(1, length(someAnno[genome.names]))
names(elementCount) <- names(someAnno[genome.names])
} else {
stop("'", mode, "' is not a recognized mode in CALANGO::GroupElementCount()")
}
return(elementCount)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/GroupElementCount.R
|
activate_CRAN_dependencies_from_Rmd_reports <- function(){
# IMPORTANT: any new CRAN package dependency that's used only in the .Rmd
# files must be called once below, to prevent CRAN warnings (CRAN will
# check if all packages in `Depends` are used at least once within the
# functions in the `R` folder.)
if (FALSE){ # <---------------- code in here is never to be called, of course.
dendextend::fac2num(factor(3:5))
heatmaply::BrBG(5) # for heatmaply
ggplot2::aes() # for ggplot2
plotly::api() # for plotly
DT::JS() # for DT
htmltools::a() # for htmltools
htmlwidgets::JS() # for htmlwidgets
pkgdown::as_pkgdown() # for pkgdown
knitr::all_labels() # for knitr
}
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/activate_CRAN_dependencies_from_Rmd_reports.R
|
check_bioc_dependencies <- function(...) {
# Check if bioconductor dependencies are installed
pkgs <- c("AnnotationDbi", "KEGGREST", "GO.db")
x <- rownames(utils::installed.packages())
idx <- which(!pkgs %in% x)
if (length(idx) > 0){
msg <- paste0("\nCALANGO: ",
"Comparative AnaLysis with ANnotation-based Genomic cOmponentes")
for (i in seq_along(idx)){
msg <- paste0(msg, "\n* NOTE: Missing BioConductor dependency: ", pkgs[idx[i]])
}
msg <- paste0(msg,
"\nPlease run install_bioc_dependencies() before using run_CALANGO()")
message(msg)
return(FALSE)
} else {
return(TRUE)
}
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/check_bioc_dependencies.R
|
# Checks that the input list for a correlation test has the necessary
# fields. (Not exported to the namespace)
check_inputs_correlation <- function(defs){
return(defs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/check_inputs_correlation.R
|
# Checks that the input list for a significance test has the necessary
# fields. (Not exported to the namespace)
check_inputs_significance <- function(defs){
return(defs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/check_inputs_significance.R
|
# Clean data for the CALANGO workflow
#
# This script implements the second step of the LCFD workflow of CALANGO.
# It is responsible for dealing with data inconsistencies, including
# missing values, outliers and undesired characteres, as well as data
# merging. It also preprocesses data to allow for more flexible inputs form
# the user, such as automatically converting common annotation output to a
# single standard format.
#
# The script expects enriched `CALANGO`-type lists, which are generated by
# [load_data()].
#
#
# @param defs an enriched CALANGO-type list object (see Details).
#
# @return updated \code{defs} list containing information from parsed
# genome maps (e.g., for test and back genomes if `type == "significance"`)
#
#
clean_data <- function(defs){
# Perform data preprocessing
message("Preliminary data cleaning:")
defs <- switch(tolower(defs$type),
significance = clean_data_significance(defs),
correlation = clean_data_correlation(defs))
if (defs$ontology == "other") {
assertthat::assert_that(!is.null(defs$dictionary))
# Convert to a named list
defs$dictionary <- unique(defs$dictionary)
defs$temp <- as.list(defs$dictionary[, 2])
names(defs$temp) <- defs$dictionary[, 1]
defs$dictionary <- defs$temp
defs$temp <- NULL
}
return(defs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/clean_data.R
|
# Specific function to clean data if type == "correlation" in the CALANGO
# definition list. (Not exported to to the namespace)
clean_data_correlation <- function(defs){
# Set names of x and y within defs
defs$y.name <- basename(defs$y.name)
names(defs$y) <- defs$y.name
names(defs$x) <- defs$y.name
if (length(defs$denominator) <= 1) {
defs$denominator <- NULL
} else {
names(defs$denominator) <- defs$y.name
}
# Safety check, returns error if any value is missing
if (is.null(defs$x) || any(is.na(defs$x))) {
stop("Missing values in the (x variable), check your dataset info file.")
}
defs$x <- as.data.frame(defs$x)
# Parse Genome Maps
if (.Platform$OS.type == "windows"){
defs$y.anno <- parallel::parLapply(cl = defs$cl,
X = defs$y,
fun = parse_GenomeMap,
column = defs$column)
} else {
defs$y.anno <- pbmcapply::pbmclapply(X = defs$y,
FUN = parse_GenomeMap,
column = defs$column,
mc.preschedule = FALSE,
mc.cores = defs$cores)
}
# Remove y field from defs
defs$y <- NULL
return(defs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/clean_data_correlation.R
|
# Specific function to clean data if type == "significance" in the CALANGO
# definition list. (Not exported to to the namespace)
clean_data_significance <- function(defs){
stop("Type 'significance' not yet available.")
# # Set the names of defs$test and defs$back
# defs$test.name <- basename(defs$test.name)
# defs$back.name <- basename(defs$back.name)
# names(defs$test) <- defs$test.name
# names(defs$back) <- defs$back.name
#
# # Safety check, removes the counts if any value is missing
# if (!is.null(defs$testElementCount)) {
# names(defs$testElementCount) <- defs$test.name
# if (any(is.na(defs$testElementCount))) {
# defs$testElementCount <- NULL
# }
# }
# if (!is.null(defs$backElementCount)) {
# names(defs$backElementCount) <- defs$back.name
# if (any(is.na(defs$backElementCount))) {
# defs$backElementCount <- NULL
# }
# }
#
# # Parse Genome Maps
# defs$test.anno <- pbmcapply::pbmclapply(X = defs$test,
# FUN = parse_GenomeMap,
# column = defs$column,
# mc.preschedule = FALSE,
# mc.cores = defs$cores)
#
# defs$back.anno <- pbmcapply::pbmclapply(X = defs$back,
# FUN = parse_GenomeMap,
# column = defs$column,
# mc.preschedule = FALSE,
# mc.cores = defs$cores)
#
# # Remove test and back fields from defs
# defs$test <- NULL
# defs$back <- NULL
#
# return(defs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/clean_data_significance.R
|
# Perform analysis for the CALANGO workflow
#
# This script implements the third step of the LCFD workflow of CALANGO.
# It is responsible for performing the actual analysis and generating
# all the processed data and tables.
#
# The script expects enriched `CALANGO`-type lists generated after running
# [load_data()] -> [clean_data()].
#
#
# @param defs an enriched CALANGO-type list object (see Details).
#
do_analysis <- function(defs){
# ================== Sanity checks ==================
assert_that("tree.type" %in% names(defs),
"tree.path" %in% names(defs))
if("MHT.method" %in% names(defs)){
assertthat::assert_that(defs$MHT.method %in% stats::p.adjust.methods)
} else {
defs$MHT.method <- "BH"
}
# Perform analysis
#cat("\nMain Analysis:\n")
defs <- switch(tolower(defs$type),
significance = do_analysis_significance(defs),
correlation = do_analysis_correlation(defs))
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/do_analysis.R
|
# Specific function to perform analysis if type == "correlation" in the CALANGO
# definition list. (Not exported to to the namespace)
do_analysis_correlation <- function(defs){
# ================== Sanity checks ==================
defs$ontology <- tolower(defs$ontology)
defs$tree.type <- tolower(defs$tree.type)
assertthat::assert_that(defs$tree.type %in% c("nexus", "newick"),
msg = "tree.type must be either 'nexus' or 'newick'")
assertthat::assert_that(defs$ontology %in% c("go", "gene ontology", "kegg", "other"),
msg = 'ontology must be "go", "gene ontology", "kegg" or "other"')
# Read tree file
defs$tree <- switch(defs$tree.type,
nexus = ape::read.nexus(defs$tree.path),
newick = ape::read.tree(defs$tree.path))
# Create fully dicotomic tree as required by pic() function
defs$tree <- ape::multi2di(defs$tree, equiprob = FALSE)
# Create data structure for dictionary
if (defs$ontology %in% c("go", "gene ontology")) {
defs$allAncestor <- ListAncestors()
defs$allObsolete <- ListObsoletes()
defs$allSynonym <- ListSynonyms()
} else if (defs$ontology == "kegg") {
# TODO: KEGG is not working anymore, must check
defs$allKOs <- ListKOs()
} else if (defs$ontology == "other" & defs$dict.path == "") {
defs$dictionary <- CreateDictionary(defs$y.anno)
}
# if (is.null(defs$yElementCount)) {
# # TODO: Check this: the output will *always* be zero under this condition.
# # See GroupElementCount() in utils_genome_vector.R
# defs$yElementCount <- GroupElementCount(defs$y.anno)
# }
defs$y <- AddGenomeVectors(defs,
anno = "y.anno",
genome.names = "y.name")
# Calculate the denominator for GO analysis
if (defs$ontology %in% c("go", "gene ontology") &
length(defs$denominator) < 2) {
defs$denominator <- rowSums(defs$y)
}
# Compute basic statistics of annotation elements and sum
defs$sum <- sapply(defs$y, sum)
defs$sd <- sapply(defs$y, stats::sd)
defs$mean <- sapply(defs$y, mean)
defs$cv <- mapply(FUN = function(x,y){ifelse(y == 0, 0, x/y)},
defs$sd, defs$mean,
SIMPLIFY = TRUE)
# Compute coverage
# (defined as: proportion of samples with annotation term count > 0)
defs$greaterthanzero <- sapply(defs$y, function(v){sum(v > 0) / length(v)})
# Compute sample mode and sample heterogenity
# (defined as: proportion of samples distinct from the sample mode)
message("Computing sample mode and sample heterogenity")
if (.Platform$OS.type == "windows"){
tmp <- parallel::parLapply(cl = defs$cl,
X = defs$y,
fun = function(v){
tmp <- sort(table(v), decreasing = TRUE)
return(list(m = as.numeric(names(tmp)[1]),
h = sum(tmp[-1]) / length(v)))
})
} else {
tmp <- pbmcapply::pbmclapply(defs$y,
function(v){
tmp <- sort(table(v), decreasing = TRUE)
return(list(m = as.numeric(names(tmp)[1]),
h = sum(tmp[-1]) / length(v)))
},
mc.cores = defs$cores)
}
defs$heterogeneity <- sapply(tmp, function(x) x$h)
defs$mode <- sapply(tmp, function(x) x$m)
# Compute contrasts
defs$contrasts <- FindContrasts(x = defs$x,
y = defs$y,
tree = defs$tree,
method = "pic",
denominator = defs$denominator,
cores = defs$cores,
cl = defs$cl)
defs$contrasts.corrected <- stats::p.adjust(p = defs$contrasts,
method = defs$MHT.method)
# Compute correlations, correlation p-values and
# MHT-corrected correlation p-values
cortypes <- c("pearson", "spearman", "kendall")
for (i in seq_along(cortypes)){
tmp <- FindCorrelations(x = defs$x,
y = defs$y,
method = cortypes[i],
denominator = defs$denominator,
cores = defs$cores,
cl = defs$cl)
tmp$mhtpv <- stats::p.adjust(p = tmp$cor.pvalues,
method = defs$MHT.method)
defs[[paste0("correlations.", cortypes[i])]] <- tmp$cor
defs[[paste0("correlations.pvalue.", cortypes[i])]] <- tmp$cor.pvalue
defs[[paste0("results.correlations.pvalue.", cortypes[i])]] <- tmp$mhtpv
}
# Annotate results
fieldnames <- c("correlations.pearson", "contrasts", "sum", "cv", "sd")
outnames <- paste0("annotation.", c("cor", "contrasts", "sum", "cv", "sd"))
for (i in seq_along(fieldnames)){
defs[[outnames[i]]] <- AnnotateResults(defs = defs,
results = defs[[fieldnames[i]]],
ontology = defs$ontology)
}
return(defs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/do_analysis_correlation.R
|
do_analysis_significance <- function(defs){
stop("Type 'significance' not yet available.")
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/do_analysis_significance.R
|
#' Install Bioconductor dependencies
#'
#' This function installs the latest versions of all Bioconductor packages
#' required for the report generation, namely:
#' \itemize{
#' \item AnnotationDbi
#' \item KEGGREST
#' \item GO.db
#' }
#'
#' It is essential that
#' these Bioconductor packages be installed for CALANGO to work properly.
#' It uses `BiocManager::install()` for installing Bioconductor
#' packages. Further arguments to this function are passed as a list.
#'
#' @param bioc.args list containing further arguments to be passed
#' down to `BiocManager::install()`.
#' @param force logical: reinstall already-installed packages?
#'
#' @export
#'
#' @examples
#' \dontrun{
#' install_bioc_dependencies()
#' }
#'
#' @return No return value, called for side effects.
install_bioc_dependencies <- function(bioc.args = list(),
force = FALSE){
# ================== Sanity checks ==================
assertthat::assert_that(is.list(bioc.args),
is.logical(force), length(force) == 1)
pkgs <- c("AnnotationDbi", "KEGGREST", "GO.db")
makeInst <- FALSE
if (force){
makeInst <- TRUE
bioc.args$force <- TRUE
} else {
x <- rownames(utils::installed.packages())
idx <- which(!pkgs %in% x)
if (length(idx) > 0){
pkgs <- pkgs[idx]
makeInst <- TRUE
}
}
if (makeInst){
message(paste0("\nInstalling package(s): ",
paste(pkgs, collapse = ", "),
"\nfrom BioConductor version ",
BiocManager::version()))
bioc.args$pkgs <- pkgs
bioc.args$ask <- TRUE
do.call(BiocManager::install, bioc.args)
}
invisible(TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/install_bioc_dependencies.R
|
# Load and verify all required data for the CALANGO workflow
#
# This script represents the first step of the LCFD workflow of CALANGO.
# It separates the data loading, which can be the longest step of a workflow,
# from the analysis itself, which is faster and can be redone multiple times.
#
# The script expects a `CALANGO`-type list, which is a list object containing
# at least the following fields:
# \itemize{
# \item \code{test.path} (char string): path to the folder containing
# annotation files of the test group
# \item \code{back.path} (char string): path to the folder containing
# annotation files of the background group
# \item \code{x.path} (char string): path to the file containing
# the genomes' attributes (for correlation test)
# \item \code{y.path} (char string): path to the folder containing
# the the genomes and their annotations (for correlation test)
# \item \code{ontology} (char string): which ontology to use. Currently
# accepts "GO" or "Gene Ontology", "KEGG" and "other".
# \item \code{dict.path} (char string): file with the dictionary (terms and
# their meaning) of the ontology, if `ontology` is set as "other".
# \item \code{type} (char string): comparison module to use. Accepts
# "significance" (compares two groups of genomes within an ontology) or
# "correlation" (establishes how much avariable explains the variations
# seen in the genomes).
# }
#
# The input definitions can also be passed as a file path. If that is the
# case the file must be in a `field = value` format. Blank likes and lines
# starting with `#` are ignored. Required fields are the same described for the
# `CALANGO` list described above.
#
# @param defs either a CALANGO-type list object (see Details) or
# a path to a text file containing the required definitions.
# @param cores positive integer, how many CPU cores to use (multicore
# acceleration does not work in Windows systems). Notice that setting
# this parameter will override any `type` field from `defs`.
#
# @return updated \code{defs} list containing the information loaded
# from the files.
#
load_data <- function(defs, cores = NULL){
# ================== Sanity checks ==================
# assertthat::assert_that(assertthat::has_name(defs, "type"),
# is.character(defs$type),
# length(defs$type) == 1,
# defs$type %in% c("correlation"),
# msg = "Invalid defs$type")
# ================== Load files and prepare list ==================
defs <- switch(tolower(defs$type),
significance = load_data_significance(defs),
correlation = load_data_correlation(defs))
if(is.null(defs)) stop("'", defs$type, "'",
"is not a recognized type argument for CALANGO.")
# Load ontology dictionary if ontology is "other"
if (defs$ontology == "other") {
assertthat::assert_that(file.exists(defs$dict.path),
msg = "defs$dict.path does not point to a file.")
defs$dictionary <- utils::read.csv(defs$dict.path,
sep = "\t",
quote = "",
colClasses = "character",
strip.white = TRUE,
comment.char = "#",
stringsAsFactors = FALSE)
}
class(defs) <- c("CALANGO", "list")
return(defs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/load_data.R
|
# Specific function to load data if type == "correlation" in the CALANGO
# definition list. (Not exported to to the namespace)
load_data_correlation <- function(defs){
# ================== Sanity checks ==================
# TODO: write check function
# defs <- check_inputs_correlation(defs)
# ================== Process test.path ==================
defs$y.name <- utils::read.csv(defs$dataset.info,
header = FALSE,
strip.white = TRUE,
comment.char = "",
check.names = FALSE,
sep = "\t",
stringsAsFactors = FALSE)
defs$x <- defs$y.name[, defs$x.column]
if (defs$denominator.column == "") {
defs$denominator <- ""
} else {
defs$denominator <- defs$y.name[, defs$denominator.column]
}
if (defs$short.name.column == "") {
defs$short.name <- ""
} else {
defs$short.name <- defs$y.name[, defs$short.name.column]
}
if (defs$group.column == "") {
defs$groups <- ""
} else {
defs$groups <- defs$y.name[, defs$group.column]
}
#q-value cutoffs for Pearson, Spearman, Kendall and phylogeny-aware linear models
if (defs$spearman.qvalue.cutoff == "") {
defs$spearman.qvalue.cutoff <- 1
}
if (defs$pearson.qvalue.cutoff == "") {
defs$pearson.qvalue.cutoff <- 1
}
if (defs$kendall.qvalue.cutoff == "") {
defs$kendall.qvalue.cutoff <- 1
}
if (defs$linear_model.qvalue.cutoff == "") {
defs$linear_model.qvalue.cutoff <- 1
}
#correlation cutoffs, used to select only highly correlated annotation terms
if (defs$spearman.cor.upper.cutoff == "") {
defs$spearman.cor.upper.cutoff <- -1
}
if (defs$spearman.cor.lower.cutoff == "") {
defs$spearman.cor.lower.cutoff <- 1
}
if (defs$pearson.cor.upper.cutoff == "") {
defs$pearson.cor.upper.cutoff <- -1
}
if (defs$pearson.cor.lower.cutoff == "") {
defs$pearson.cor.lower.cutoff <- 1
}
if (defs$kendall.cor.upper.cutoff == "") {
defs$kendall.cor.upper.cutoff <- -1
}
if (defs$kendall.cor.lower.cutoff == "") {
defs$kendall.cor.lower.cutoff <- 1
}
# standard deviation and correlation coefficient filters, used to remove low-variability terms. Only terms with values greater than cutoff are analyzed
if (defs$sd.cutoff == "") {
defs$sd.cutoff <- 0
}
if (defs$cv.cutoff == "") {
defs$cv.cutoff <- 0
}
#sum of annotation terms, used to remove low-count terms if needed. Only terms with counts greater than cutoff are further evaluated.
if (defs$annotation_size.cutoff == "") {
defs$annotation_size.cutoff <- 0
}
if (defs$prevalence.cutoff == "") {
defs$prevalence.cutoff <- 0
}
if (defs$heterogeneity.cutoff == "") {
defs$heterogeneity.cutoff <- 0
}
#to remove terms where standard deviation of counts equals zero
if (defs$raw_data_sd_filter %in% c("", "TRUE")) {
defs$raw_data_sd_filter <- TRUE
} else {
defs$raw_data_sd_filter <- FALSE
}
defs$y.name <- paste0(defs$annotation.files.dir, "/", defs$y.name[, 1])
defs$y.name <- gsub(pattern = "//", replacement = "/", x = defs$y.name,
fixed = TRUE)
message("Loading data:")
if (.Platform$OS.type == "windows"){
cat("...")
defs$y <- parallel::parLapply(cl = defs$cl,
X = defs$y.name,
fun = utils::read.csv,
sep = "\t",
header = TRUE,
colClasses = "character",
strip.white = TRUE,
comment.char = "",
check.names = FALSE)
cat(" done!\n")
} else {
defs$y <- pbmcapply::pbmclapply(X = defs$y.name,
FUN = utils::read.csv,
sep = "\t",
header = TRUE,
colClasses = "character",
strip.white = TRUE,
comment.char = "",
check.names = FALSE,
mc.preschedule = FALSE,
mc.cores = defs$cores)
}
return(defs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/load_data_correlation.R
|
# Specific function to load data if type == "significance" in the CALANGO
# definition list. (Not exported to to the namespace)
load_data_significance <- function(defs){
stop("Type 'significance' not yet available.")
# ================== Sanity checks ==================
# defs <- check_inputs_significance(defs)
# ================== Process test.path ==================
#
# # If path points to a file containing the paths to the genome files
# if (utils::file_test("-f", defs$test.path)) {
# defs$test.name <- utils::read.table(file = defs$test.path,
# sep = "\t",
# strip.white = TRUE,
# comment.char = "",
# check.names = FALSE,
# header = FALSE,
# stringsAsFactors = FALSE)
#
# # May have the total number of elements in that genome in the second
# # column, in case the files themselves have missing elements
# if (ncol(defs$test.name) == 2) {
# defs$testElementCount <- defs$test.name[, 2]
# }
# defs$test.name <- defs$test.name[, 1]
#
#
# # If path points to a folder containing the genome files themselves
# } else if (utils::file_test("-d", defs$test.path)){
# defs$test.name <- list.files(path = defs$test.path,
# all.files = FALSE,
# full.names = TRUE,
# recursive = FALSE)
#
# } else stop("test.path is neither a valid file nor a valid folder")
#
# # ================== Process back.path ==================
#
# # If path points to a file containing the paths to the genome files
# if (utils::file_test("-f", defs$back.path)) {
# defs$back.name <- utils::read.table(file = defs$back.path,
# sep = "\t",
# strip.white = TRUE,
# comment.char = "",
# check.names = FALSE,
# header = FALSE,
# stringsAsFactors = FALSE)
#
# # May have the total number of elements in that genome in the second
# # column, in case the files themselves have missing elements
# if (ncol(defs$back.name) == 2) {
# defs$backElementCount <- defs$back.name[, 2]
# }
# defs$back.name <- defs$back.name[, 1]
#
#
# # If path points to a folder containing the genome files themselves
# } else if (utils::file_test("-d", defs$back.path)) {
# defs$back.name <- list.files(path = defs$back.path,
# all.files = FALSE,
# full.names = TRUE,
# recursive = FALSE)
#
# } else stop("back.path is neither a valid file nor a valid folder")
#
#
# # Read genome files
# cat("\nLoading data:\n")
# defs$test <- pbmcapply::pbmclapply(X = defs$test.name,
# FUN = utils::read.table,
# sep = "\t",
# header = TRUE,
# colClasses = "character",
# strip.white = TRUE,
# comment.char = "",
# row.names = 1,
# check.names = FALSE,
# mc.preschedule = FALSE,
# mc.cores = defs$cores)
#
# defs$back <- pbmcapply::pbmclapply(X = defs$back.name,
# FUN = utils::read.table,
# sep = "\t",
# header = TRUE,
# colClasses = "character",
# strip.white = TRUE,
# comment.char = "",
# row.names = 1,
# check.names = FALSE,
# mc.preschedule = FALSE,
# mc.cores = defs$cores)
#
# return(defs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/load_data_significance.R
|
make_correlation_report <- function(defs){
# ================== Prepare report ==================
sumY <- sapply(defs$y, sum) # faster than apply() or colSums()!
# Ignore these, they're only here to initialize certain packages
# NOTE: these are *important*, for some weird reason the report generation
# breaks if you don't initialise (at least) the dendextend package.
# tmp <- dendextend::fac2num(factor(3:5))
# tmp <- plotly::hobbs
# tmp <- heatmaply::BrBG(5)
# filter out those with no observations (sum equals zero)
idx <- (sumY != 0)
idx <- intersect(names(idx), names(defs$correlations.pearson))
Y <- defs$y[, idx]
sumY <- sumY[idx]
defs$greaterthanzero <- defs$greaterthanzero[idx]
defs$heterogeneity <- defs$heterogeneity[idx]
defs$contrasts <- defs$contrasts[idx]
defs$contrasts.corrected <- defs$contrasts.corrected[idx]
defs$results.correlations.pvalue.pearson <- defs$results.correlations.pvalue.pearson[idx]
defs$results.correlations.pvalue.spearman <- defs$results.correlations.pvalue.spearman[idx]
defs$results.correlations.pvalue.kendall <- defs$results.correlations.pvalue.kendall[idx]
defs$correlations.pearson <- defs$correlations.pearson[idx]
defs$correlations.spearman <- defs$correlations.spearman[idx]
defs$correlations.kendall <- defs$correlations.kendall[idx]
sumY <- sumY[idx]
defs$sd <- defs$sd[idx]
defs$cv <- defs$cv[idx]
defs$greaterthanzero <- defs$greaterthanzero[idx]
defs$heterogeneity <- defs$heterogeneity[idx]
# Prepare plotframe
plotframe <- rbind(defs$contrasts.corrected[order(names(defs$contrasts.corrected))],
defs$results.correlations.pvalue.pearson[order(names(defs$results.correlations.pvalue.pearson))],
defs$results.correlations.pvalue.spearman[order(names(defs$results.correlations.pvalue.spearman))],
defs$results.correlations.pvalue.kendall[order(names(defs$results.correlations.pvalue.kendall))],
defs$correlations.pearson[order(names(defs$correlations.pearson))],
defs$correlations.spearman[order(names(defs$correlations.spearman))],
defs$correlations.kendall[order(names(defs$correlations.kendall))],
sumY[order(names(sumY))],
defs$sd[order(names(defs$sd))],
defs$cv[order(names(defs$cv))],
defs$greaterthanzero[order(names(defs$greaterthanzero))],
# defs$prevalence_per_group[order(names(defs$prevalence_per_group))],
defs$heterogeneity[order(names(defs$heterogeneity))])
plotframe <- rbind(plotframe,
Y[, order(colnames(Y))])
rownames(plotframe)[1:12] <- c("corrected_contrasts",
"Pearson_qvalue",
"Spearman_qvalue",
"Kendall_qvalue",
"Pearson_cor",
"Spearman_cor",
"Kendall_cor",
"size",
"sd",
"cv",
"prevalence",
# "prevalence per group",
"heterogeneity")
plotframe <- as.data.frame(t(plotframe))
description <- unlist(defs$annotation.cor)
plotframe$description <- description[order(names(description))]
plotframe$name <- rownames(plotframe)
idx <- which(
# q-value cutoffs
(plotframe$corrected_contrasts < defs$linear_model.qvalue.cutoff) &
(plotframe$Spearman_qvalue < defs$spearman.qvalue.cutoff) &
(plotframe$Pearson_qvalue < defs$pearson.qvalue.cutoff) &
(plotframe$Kendall_qvalue < defs$kendall.qvalue.cutoff) &
# correlation cutoffs
((plotframe$Pearson_cor > defs$pearson.cor.upper.cutoff) | (plotframe$Pearson_cor < defs$pearson.cor.lower.cutoff)) &
((plotframe$Spearman_cor > defs$spearman.cor.upper.cutoff) | (plotframe$Spearman_cor < defs$spearman.cor.lower.cutoff)) &
((plotframe$Kendall_cor > defs$kendall.cor.upper.cutoff) | (plotframe$Kendall_cor < defs$kendall.cor.lower.cutoff)) &
# basic statistics cutoffs
(plotframe$size > defs$annotation_size.cutoff) &
(plotframe$prevalence > defs$prevalence.cutoff) &
(plotframe$heterogeneity > defs$heterogeneity.cutoff) &
(plotframe$sd > defs$sd.cutoff) &
(plotframe$cv > defs$cv.cutoff))
if(length(idx) > 0) {
df_cutoff <- plotframe[idx, ]
} else {
warning("No event remaining after application of cut-off values.\nPlease review and re-run.\nmake_correlation_report() prematurely interrupted.\nReport not generated.")
invisible(FALSE)
}
if (isTRUE(defs$raw_data_sd_filter)) {
# remove trivial cases, constant values.
if(any(df_cutoff$sd != 0)) {
df_cutoff <- df_cutoff[df_cutoff$sd != 0, ]
} else {
warning("No event remaining after application of cutoffs and raw_data_sd_filter.\nmake_correlation_report() prematurely interrupted.\nReport not generated.")
invisible(FALSE)
}
}
defs$sig_IDs <- rownames(df_cutoff)
defs$print.msg <- paste0("\ncorrected_contrasts < ", sprintf("%.2f", defs$linear_model.qvalue.cutoff),
ifelse(defs$linear_model.qvalue.cutoff >= 1, "\t\t(no filtering)", ""),
"\nSpearman_qvalue < ", sprintf("%.2f", defs$spearman.qvalue.cutoff),
ifelse(defs$spearman.qvalue.cutoff >= 1, "\t\t(no filtering)", ""),
"\nPearson_qvalue < ", sprintf("%.2f", defs$pearson.qvalue.cutoff),
ifelse(defs$pearson.qvalue.cutoff >= 1, "\t\t(no filtering)", ""),
"\nKendall_qvalue < ", sprintf("%.2f", defs$kendall.qvalue.cutoff),
ifelse(defs$kendall.qvalue.cutoff >= 1, "\t\t(no filtering)", ""),
"\nPearson_cor < ", sprintf("%.2f", defs$pearson.cor.lower.cutoff),
" [OR] > ", sprintf("%.2f", defs$pearson.cor.upper.cutoff),
ifelse(defs$pearson.cor.lower.cutoff > defs$pearson.cor.upper.cutoff, "\t(no filtering)", ""),
"\nSpearman_cor < ", sprintf("%.2f", defs$spearman.cor.lower.cutoff),
" [OR] > ", sprintf("%.2f", defs$spearman.cor.upper.cutoff),
ifelse(defs$spearman.cor.lower.cutoff > defs$spearman.cor.upper.cutoff, "\t(no filtering)", ""),
"\nKendall_cor < ", sprintf("%.2f", defs$kendall.cor.lower.cutoff),
" [OR] > ", sprintf("%.2f", defs$kendall.cor.upper.cutoff),
ifelse(defs$kendall.cor.lower.cutoff > defs$kendall.cor.upper.cutoff, "\t(no filtering)", ""),
"\nsize > ", sprintf("%02d", defs$annotation_size.cutoff),
ifelse(defs$annotation_size.cutoff <= 0, "\t\t(no filtering)", ""),
"\nprevalence > ", sprintf("%.2f", defs$prevalence.cutoff),
ifelse(defs$prevalence.cutoff <= 0, "\t\t(no filtering)", ""),
"\nheterogeneity > ", sprintf("%.2f", defs$heterogeneity.cutoff),
ifelse(defs$heterogeneity.cutoff <= 0, "\t\t(no filtering)", ""),
"\nsd > ", sprintf("%.2f", defs$sd.cutoff),
ifelse(defs$sd.cutoff <= 0, "\t\t(no filtering)", ""),
"\ncv > ", sprintf("%.2f", defs$cv.cutoff),
ifelse(defs$cv.cutoff <= 0, "\t\t(no filtering)", ""),
"\nraw_data_sd_filter = ", defs$raw_data_sd_filter,
"\n-----------------------------------",
"\nThis may take a while...")
message(paste0("Generating HTML5 report for results",
"\n-----------------------------------",
"\nUsing filters:\n",
defs$print.msg))
# Prepare output folder
# od <- defs$output.dir
#Uncomment the line below to generate full paths. Results may not work in servers.
od <- normalizePath(defs$output.dir)
cpd <- gsub("//", "/", paste0(od, "/correlation_Plots/"),
fixed = TRUE)
if(!dir.exists(cpd)) dir.create(cpd, recursive = TRUE)
# Copy report template into output dir
files_to_copy <- dir(system.file("extdata/report_files", package = "CALANGO"))
fp <- files_to_copy
for (i in seq_along(files_to_copy)){
fp[i] <- gsub("//", "/", paste0(defs$output.dir, "/", files_to_copy[i]),
fixed = TRUE)
file.copy(system.file("extdata/report_files", files_to_copy[i], package = "CALANGO"),
to = fp[i], overwrite = TRUE)
}
suppressWarnings(rmarkdown::render_site(input = defs$output.dir,
quiet = TRUE))
file.remove(fp)
# Invoke browser and open results
myURL <- gsub("//", "/", paste0(defs$output.dir, "/index.html"), fixed = TRUE)
myURL <- paste0("file:/",
normalizePath(gsub("./", "", myURL, fixed = TRUE)))
utils::browseURL(myURL)
message("And we're done!")
invisible(defs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/make_correlation_report.R
|
#' Prepare and render the HTML5 report
#'
#' This script generates the HTML5 report based on an enriched
#' `CALANGO`-type list output by `run_CALANGO()`.
#'
#' @param defs an enriched `CALANGO`-type list generated by `run_CALANGO()`
#' @param render.report logical: should the HTML5 report be generated (for
#' internal use only)
#'
#' @return Updated `defs` list, containing:
#' \itemize{
#' \item All input parameters originally passed (see [run_CALANGO()] for
#' details).
#' \item Derived fields calculated for displaying the results, including
#' several statistical summaries of the data (including correlations,
#' contrasts, covariances, p-values).
#' }
#'
#' This function is mainly called for its side effect, namely the generation of
#' an HTML5 report of the analysis, which is saved to the folder indicated in
#' `defs$output.dir`.
#'
#' @export
make_report <- function(defs, render.report = TRUE){
# ================== Sanity checks ==================
assertthat::assert_that(is.logical(render.report),
length(render.report) == 1,
all(c("CALANGO", "list") %in% class(defs)))
if(render.report){
defs <- switch(tolower(defs$type),
significance = make_significance_report(defs),
correlation = make_correlation_report(defs))
# Open in browser
utils::browseURL(gsub("//", "/", paste0(normalizePath(defs$output.dir),
"/CALANGO_report.html"),
fixed = TRUE))
}
invisible(defs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/make_report.R
|
# Prepare and render a significance report
#
# This script generates a significance-type report based on an enriched
# `CALANGO`-type list output by `run_CALANGO()`.
#
# @param defs an enriched `CALANGO`-type list generated by `run_CALANGO()`
# @param ... other parameters (unused for standalone calls, used internally
# by other functions in the package)
#
make_significance_report <- function(defs, ...){
stop("Type 'significance' not yet available.")
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/make_significance_report.R
|
# Function to parse the tab format from Uniprot
parse_GenomeMap <- function(genome, column, split = " *; *") {
genomeMap <- strsplit(x = genome[, column], split = split)
names(genomeMap) <- rownames(genome)
return(genomeMap)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/parse_GenomeMap.R
|
print_results_correlations <- function(correlations, annotation,
outputName, type) {
# Generates a .tsv file with the results of the analysis.
#
# Args:
# correlations: (vector) value of the correlations between ontology's term
# and variable.
# annotation: (list) translation of the ontology's terms.
# outputName: (char) name of the output file.
# type: (char) type of file being produced.
# Returns:
# none.
outputName <- gsub("//", "/", outputName, fixed = TRUE)
output <- annotation[names(correlations)]
# colnames(output)
for (term in names(correlations)) {
# term <- names(correlations)[i]
output[[term]] <- c(term, correlations[[term]], annotation[[term]])
}
df.output <- data.frame(matrix(unlist(output), nrow = length(output),
byrow = T, dimnames = list(names(output))))
type_col <- switch(EXPR = tolower(type),
"correlation" = "CC",
"sum" = "sum",
"cv" = "cv",
"q_value" = "q_val")
if(is.null(type_col)) type_col <- "generic"
output.columns <- c("ann_term", type_col, "annotation")
utils::write.table(df.output, file = outputName, quote = FALSE,
row.names = FALSE, col.names = output.columns,
sep = "\t")
return(NULL)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/print_results_correlations.R
|
# Read a CALANGO list from a key-value file.
read_calango_file <- function(file.path){
df <- utils::read.table(file = file.path, header = FALSE,
strip.white = TRUE,
comment.char = "#", sep = '=' ,
col.names = c('Key', 'Value'),
stringsAsFactors = FALSE)
outlist <- as.list(df$Value)
# Coerce numeric values to "numeric"
outlist <- lapply(outlist,
function(v){
if(!is.na(suppressWarnings(as.numeric(v)))) {
return(as.numeric(v))
} else return(v)
})
names(outlist) <- as.character(df$Key)
class(outlist) <- c("CALANGO", "list")
return(outlist)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/read_calango_file.R
|
restore_relative_paths <- function(defs){
if(nrow(defs$paths) > 0){
for (i in 1:nrow(defs$paths)){
defs[[defs$paths$field[i]]] <- defs$paths$rel[i]
}
}
return(defs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/restore_relative_paths.R
|
#' Retrieve calanguize_genomes script from the Github repository
#'
#' This script downloads the *calanguize_genomes.pl* Perl script from the
#' repository, together with associated README instructions for using the
#' script, managing dependencies, etc. It will extract the data into a folder
#' containing everything that is needed for preparing data for using
#' CALANGO.
#'
#' If the `target.dir` provided does not exist it is created
#' (recursively) by the function.
#'
#' @param target.dir path to the folder where the files will be saved (
#' accepts relative and absolute paths)
#' @param method Method to be used for downloading files. Current download
#' methods are "internal", "wininet" (Windows only) "libcurl", "wget" and
#' "curl", and there is a value "auto": see _Details_ and _Note_ in the
#' documentation of \code{utils::download.file()}.
#' @param unzip The unzip method to be used. See the documentation of
#' \code{utils::unzip()} for details.
#'
#' @export
#'
#' @examples
#' \dontrun{
#' CALANGO::retrieve_calanguize_genomes(target.dir = "./data")
#' }
#'
#' @return No return value, called for side effects (see Description).
retrieve_calanguize_genomes <- function(target.dir,
method = "auto",
unzip = getOption("unzip")){
# ================== Sanity checks ==================
assertthat::assert_that(is.character(target.dir),
length(url) == 1)
if(!dir.exists(target.dir)){
dir.create(target.dir, recursive = TRUE)
} else {
filelist <- dir(target.dir, full.names = TRUE)
unlink(filelist, recursive = TRUE, force = TRUE)
}
url <- "https://github.com/fcampelo/CALANGO/raw/master/inst/extdata/calanguize_genomes.zip"
res1 <- utils::download.file(url,
quiet = TRUE,
destfile = paste0(target.dir, "/tmpdata.zip"),
cacheOK = FALSE,
method = method)
if(res1 != 0) stop("Error downloading file \n", url)
utils::unzip(paste0(target.dir, "/tmpdata.zip"),
unzip = unzip,
exdir = target.dir)
unlink(paste0(target.dir, "/__MACOSX"), recursive = TRUE, force = TRUE)
file.remove(paste0(target.dir, "/tmpdata.zip"))
invisible(TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/retrieve_calanguize_genomes.R
|
#' Retrieve data files from the Github repository
#'
#' This script downloads relevant data files from the CALANGO project
#' repository. It will extract the data into a folder containing
#' directories related to dictionary files, Gene Ontology annotation
#' files, tree files, etc. Note: you may need to edit the file paths in the
#' example scripts contained under the `parameters` subfolder of `target.dir`,
#' or pass an appropriate base path using parameter `basedir` in [run_CALANGO()].
#'
#' If the `target.dir` provided does not exist it is created
#' (recursively) by the function.
#'
#' @param target.dir path to the folder where the files will be saved (
#' accepts relative and absolute paths)
#' @param method Method to be used for downloading files. Current download
#' methods are "internal", "wininet" (Windows only) "libcurl", "wget" and
#' "curl", and there is a value "auto": see _Details_ and _Note_ in the
#' documentation of \code{utils::download.file()}.
#' @param unzip The unzip method to be used. See the documentation of
#' \code{utils::unzip()} for details.
#' @param ... additional attributes (currently ignored)
#'
#' @export
#'
#' @examples
#' \dontrun{
#' CALANGO::retrieve_data_files(target.dir = "./data")
#' }
#'
#' @return No return value, called for side effects (see Description).
retrieve_data_files <- function(target.dir,
method = "auto",
unzip = getOption("unzip"),
...){
# ================== Sanity checks ==================
assertthat::assert_that(is.character(target.dir))
if(!dir.exists(target.dir)){
dir.create(target.dir, recursive = TRUE)
} else {
filelist <- dir(target.dir, full.names = TRUE)
unlink(filelist, recursive = TRUE, force = TRUE)
}
url <- "https://github.com/fcampelo/CALANGO/raw/master/inst/extdata/Examples.zip"
res1 <- utils::download.file(url,
quiet = TRUE,
destfile = paste0(target.dir, "/tmpdata.zip"),
cacheOK = FALSE,
method = method)
if(res1 != 0) stop("Error downloading file \n", url)
utils::unzip(paste0(target.dir, "/tmpdata.zip"),
unzip = unzip,
exdir = target.dir)
unlink(paste0(target.dir, "/__MACOSX"), recursive = TRUE, force = TRUE)
file.remove(paste0(target.dir, "/tmpdata.zip"))
invisible(TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/retrieve_data_files.R
|
#' Run the CALANGO pipeline
#'
#' This function runs the complete workflow of CALANGO and generates the
#' HTML5 output pages and export files.
#'
#' The script expects a `CALANGO`-type list, passed either as an actual list
#' object or as a file path. In the latter case, notice that
#' the file must be a text file with a `field = value` format.
#' Blank likes and lines starting with `#` are ignored. The function expects the
#' input list to have the following fields:
#' \itemize{
#' \item\code{annotation.files.dir} (required, string) - Folder where
#' annotation files are located.
#' \item\code{output.dir} (required, string) - output folder for results
#' \item\code{dataset.info} (required, string) - genome metadata file, it
#' should contain at least:
#' \itemize{
#' \item File names. Please notice this information should be the first
#' column in metadata file;
#' \item Phenotype data (numeric, this is the value CALANGO uses to rank
#' species when searching for associations)
#' \item Normalization data (numeric, this is the value CALANGO uses as a
#' denominator to compute annotation term frequencies to remove potential
#' biases caused by, for instance, over annotation of model organisms or
#' large differences in the counts of genomic elements). Please notice that
#' CALANGO does not require normalization data for GO, as it computes the
#' total number of GO terms per species and uses it as a normalizing factor.
#' }
#' \item\code{x.column} (required, numeric) - which column in "dataset.info"
#' contains the phenotype data?
#' \item\code{ontology} (required, string) - which dictionary data type to
#' use? Possible values are "GO" and "other". For GO, CALANGO can compute
#' normalization data.
#' \item\code{dict.path} (required, string) - file for dictionary file
#' (two-column file containing annotation IDs and their descriptions. Not
#' needed for GO.
#' \item\code{column} (required, string) - which column in annotation files
#' should be used (column name)
#' \item\code{denominator.column} (optional, numeric) - which column contains
#' normalization data (column number)
#' \item\code{tree.path} (required, string) - path for tree file in either
#' newick or nexus format
#' \item\code{tree.type} (required, string) - tree file type (either "nexus"
#' or "newick")
#' \item\code{cores} (optional, numeric) - how many cores to use? If not
#' provided the function defaults to 1.
#' \item\code{linear.model.cutoff} (required, numeric) - parameter that
#' regulates how much graphical output is produced. We configure it to
#' generate plots only for annotation terms with corrected q-values for
#' phylogenetically independent contrasts (standard: smaller than 0.5).
#' \item\code{MHT.method} (optional, string) - type of multiple hypothesis
#' correction to be used. Accepts all methods listed by
#' `stats::p.adjust.methods()`. If not provided the function defaults to
#' "BH".
#' }
#'
#'
#' @param defs either a CALANGO-type list object or
#' a path to a text file containing the required definitions (see Details).
#' @param type type of analysis to perform. Currently only "correlation" is
#' supported.
#' @param cores positive integer, how many CPU cores to use (multicore
#' acceleration does not work in Windows systems). Setting
#' this parameter overrides any `cores` field from `defs`. Multicore support is
#' currently implemented using the `parallel` package, which uses forking
#' (which means that multicore support is not available under Windows)
#' @param render.report logical: should a HTML5 report be generated?
#' @param basedir path to base folder to which all relative paths in `defs`
#' refer to.
#'
#' @importFrom assertthat assert_that is.count has_name
#' @importFrom ggplot2 "%+%"
#'
#' @return Updated `defs` list, containing:
#' \itemize{
#' \item All input parameters originally passed or read from a `defs` file
#' (see **Details**).
#' \item Derived fields loaded and preprocessed from the files indicated in
#' `defs`.
#' \item Several statistical summaries of the data (used to render the
#' report), including correlations, contrasts, covariances, p-values and
#' other summary statistics.
#' }
#'
#' Results are also saved to files under `defs$output.dir`.
#'
#' @export
#'
#' @examples
#' \dontrun{
#'
#' ## Install any missing BioConductor packages for report generation
#' ## (only needs to be done once)
#' # CALANGO::install_bioc_dependencies()
#'
#' # Retrieve example files
#' basedir <- tempdir()
#' retrieve_data_files(target.dir = paste0(basedir, "/data"))
#' defs <- paste0(basedir, "/data/parameters/parameters_domain2GO_count_less_phages.txt")
#'
#' # Run CALANGO
#' res <- run_CALANGO(defs, cores = 2)
#' }
#'
run_CALANGO <- function(defs, type = "correlation",
cores = NULL, render.report = TRUE,
basedir = ""){
# ================== Sanity checks ==================
isOK <- check_bioc_dependencies()
if (!isOK) {
warning("run_CALANGO() stopped prematurely.")
return(NULL)
}
assert_that(is.list(defs) || file.exists(defs),
is.null(cores) || is.count(cores),
is.null(type) || is.character(type),
is.null(type) || length(type) == 1,
is.logical(render.report), length(render.report) == 1,
msg = "input error(s) in CALANGO::run_CALANGO()")
# If defs is a file path, read it into list
if(!is.list(defs)) {
defs <- read_calango_file(defs)
}
defs$basedir <- basedir
defs <- set_absolute_paths(defs)
if(is.null(defs$type)) defs$type <- type
# ================== Set up parallel processing ==================
if(!is.null(cores)) {
defs$cores <- cores
} else if(!has_name(defs, "cores")) {
defs$cores <- 1
}
available.cores <- parallel::detectCores()
if (defs$cores >= available.cores){
defs$cores <- max(1, available.cores - 1)
warning("Input argument 'cores' too large, we only have ", available.cores,
" cores.\nUsing ", defs$cores,
" cores for run_CALANGO().")
}
if (.Platform$OS.type == "windows"){
defs$cl <- parallel::makeCluster(defs$cores, setup_strategy = "sequential")
}
defs <- load_data(defs) # Load required data
defs <- clean_data(defs) # Preliminary data cleaning
defs <- do_analysis(defs) # perform the analysis
defs <- save_tsv_files(defs) # Save results to .tsv files
defs <- make_report(defs, render.report = render.report) # generate HTML page
if (.Platform$OS.type == "windows"){
## Stop the cluster
parallel::stopCluster(defs$cl)
}
if(is.null(defs$output.dir)) saveRDS(defs,
file = paste0(defs$output.dir,
"/results.rds"))
defs <- restore_relative_paths(defs)
invisible(defs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/run_CALANGO.R
|
save_tsv_files <- function(defs){
message("Saving results to files (this may take a while)")
tsvdir <- gsub("//", "/", paste0(defs$output.dir, "/tsv_files"), fixed = TRUE)
if (!dir.exists(tsvdir)) dir.create(tsvdir,
recursive = TRUE,
showWarnings = FALSE)
print_results_correlations(correlations = defs$contrasts.corrected,
annotation = defs$annotation.contrasts,
outputName = paste0(tsvdir,
"/contrasts_corrected.tsv"),
type = "q_value")
print_results_correlations(correlations = defs$contrasts,
annotation = defs$annotation.contrasts,
outputName = paste0(tsvdir,
"/contrasts_raw.tsv"),
type = "correlation")
print_results_correlations(correlations = defs$sum,
annotation = defs$annotation.sum,
outputName = paste0(tsvdir, "/sum.tsv"),
type = "sum")
print_results_correlations(correlations = defs$sd,
annotation = defs$annotation.sd,
outputName = paste0(tsvdir, "/sd.tsv"),
type = "sd")
print_results_correlations(correlations = defs$cv,
annotation = defs$annotation.cv,
outputName = paste0(tsvdir, "/cv.tsv"),
type = "cv")
fieldnames <- c(paste0("correlations.",
c("pearson", "spearman", "kendall")),
paste0("results.correlations.pvalue.",
c("pearson", "spearman", "kendall")))
filenames <- paste0(tsvdir, "/",
c(paste0(c("p", "s", "k"), "_corr_results.tsv"),
paste0(c("p", "s", "k"), "_corr_qvalues_results.tsv")))
types <- c(rep("correlation", 3), rep("q_value", 3))
for (i in seq_along(fieldnames)){
print_results_correlations(correlations = defs[[fieldnames[i]]],
annotation = defs$annotation.cor,
outputName = filenames[[i]],
type = types[i])
}
invisible(defs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/save_tsv_files.R
|
set_absolute_paths <- function(defs){
# Potential fields that require normalisation:
fields <- c("annotation.files.dir",
"output.dir",
"dataset.info",
"dict.path",
"tree.path")
if(defs$basedir == c("")){
defs$basedir <- "./"
}
defs$basedir <- normalizePath(defs$basedir)
defs$paths <- data.frame(field = character(),
rel = character(),
abs = character())
for (i in seq_along(fields)){
if(!is.null(defs[[fields[i]]]) && defs[[fields[i]]] != ""){
defs$paths <- rbind(defs$paths,
data.frame(field = fields[i],
rel = defs[[fields[i]]],
abs = paste0(defs$basedir, "/",
defs[[fields[i]]])))
defs[[fields[i]]] <- paste0(defs$basedir, "/", defs[[fields[i]]])
}
}
return(defs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/set_absolute_paths.R
|
# Small auxiliary functions for ontology manipulation
# (not exported to the package namespace)
# Prepares a listing of ancestors for each GO ID.
# Used to vectorize ObtainGeneGOancestors().
# Returns a list object with all GOXXANCESTOR lists combined.
ListAncestors <- function() {
return(c(as.list(GO.db::GOBPANCESTOR),
as.list(GO.db::GOCCANCESTOR),
as.list(GO.db::GOMFANCESTOR)))
}
# Prepares a listing of synonymous for GO IDs with alternative ids.
# Used to vectorize RemoveObsoleteAndAlternative().
# Returns char vector containing GOSYNONYM's mapping in a single vector.
ListSynonyms <- function() {
return(as.character(GO.db::GOSYNONYM))
}
# Prepares a listing of obsolete GO IDs.
# Used to vectorize RemoveObsoleteAndAlternative().
# Returns a vector with GOOBSOLETE's mapping.
ListObsoletes <- function() {
allObsolete <- as.character(GO.db::GOOBSOLETE)
names(allObsolete) <- NULL
return(allObsolete)
}
# Prepares a listing of KO and their annotation.
# Returns a char vector of KOs and their annotation.
ListKOs <- function() {
allKOs <- KEGGREST::keggList("ko")
names(allKOs) <- gsub(pattern = "ko:", replacement = "",
x = names(allKOs), fixed = TRUE)
return(allKOs)
}
# Creates an dictionary of valid terms from the data;
# does not contain an annotation description.
# Input:
# test.anno: (list) genomes in the test group, each with a data frame that
# maps each genomic element to its annotations.
# back.anno: (list) genomes in the background group, each with a data frame
# that maps each genomic element to its annotations.
#
# Returns char vector containing a dictionary of terms.
CreateDictionary <- function(test.anno, back.anno = NULL) {
# Parse the tab format from Uniprot
dict <- unique(unlist(sapply(test.anno, unlist)))
if (!is.null(back.anno)){
dict <- unique(c(dict, unlist(sapply(back.anno, unlist))))
}
# TODO: Why give each element a name that is equal to the element itself?
# Is this really necessary?
names(dict) <- dict
return(dict)
}
# Removes GOs that are obsolete and replace alternative IDs for main IDs.
# Input:
# geneIDs: char vector of factors mapping gene IDs to GO IDs.
# allObsolete: char vector with GOOBSOLETE's mapping,
# generated by ListObsoletes()
# allSynonym: char vector with GOSYNONYM's mapping,
# generated by ListSynonyms()
#
# Returns a char vector of factors mapping gene IDs to GO ID's,
# without obsolete and alternative GO IDs.
RemoveObsoleteAndAlternative <- function(geneIDs, allObsolete, allSynonym) {
geneIDs <- setdiff(geneIDs, allObsolete)
alternativeIDs <- intersect(geneIDs, names(allSynonym))
geneIDs <- setdiff(geneIDs, alternativeIDs)
newIDs <- allSynonym[alternativeIDs]
return(unique(c(geneIDs, newIDs)))
}
# Finds all GO ID ancestors for a given gene
# Input::
# geneIDs: char vector of factors mapping gene IDs to GO IDs.
# allAncestor: list containing all GOXXANCESTOR combined,
# generated by ListAncestors()
#
# Returns a char vector with all GO IDs found for the gene.
ObtainGeneGOancestors <- function(geneIDs, allAncestor) {
geneAncestors <- unlist(allAncestor[geneIDs], use.names = FALSE)
geneAncestors <- geneAncestors[!(is.null(geneAncestors) | (geneAncestors == "all"))]
geneIDs <- as.character(unique(c(geneIDs, geneAncestors)))
return(geneIDs)
}
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/R/utils_ontology_manipulation.R
|
---
title: "CALANGO Parameters"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{CALANGO Parameters}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
<img src="https://github.com/fcampelo/CALANGO/raw/master/inst/images/CALANGO_LOGO.svg" height="150" alt="CALANGO logo. Drawn by Brazilian artist Berze - https://www.facebook.com/berzearte">
This document lists the input parameters expected / accepted in the CALANGO
definition files (or, alternatively, in the `defs` list).
***
# General parameters
### annotation.files.dir
**Type**: character string
**Description**: path to the directory where annotation files are located
**Required**: YES
**Default**: none
### output.dir
**Type**: character string
**Description**: path to the output directory where results should be saved
**Required**: YES
**Default**: none
### dataset.info
**Type**: character string
**Description**: path to a file containing the genome
metadata. It should contain _at least_, for each genome:
(1) path for annotation data; (2) phenotype data (numeric);
(3) normalization data (numeric)
It must be a _tab-separated value_ file with no column headers.
**Required**: YES
**Default**: none
### x.column
**Type**: integer/numeric
**Description**: index of the column from the file specified in `dataset.info` containing the phenotype data, which will be used to sort the genomes and
find annotation terms associated to that phenotype.
**Required**: YES
**Default**: none
### short.name.column
**Type**: integer/numeric
**Description**: index of the column from the file specified in `dataset.info` containing the short names for species/lineages to be used when plotting data.
**Required**: YES
**Default**: none
### group.column
**Type**: integer/numeric
**Description**: index of the column from the file specified in `dataset.info` containing the group to be used for coloring the heatmaps
**Required**: YES
**Default**: none
### ontology
**Type**: character string.
**Description**: which dictionary data type to use? Accepts _"GO"_ or
_"other"_
**Required**: YES
**Default**: none
### dict.path
**Type**: character string
**Description**: path to dictionary file (a two-column _tab-separated value_
file containing annotation IDs and their descriptions). Not needed if
`ontology = "GO"`.
**Required**: NO
**Default**: none
### column
**Type**: character string
**Description**: the _name_ of the column in the annotation file that should be used.
**Required**: YES
**Default**: none
### denominator.column
**Type**: integer/numeric
**Description**: index of the column from the file specified in `dataset.info` containing the normalization data.
**Required**: NO
**Default**: none
### tree.path
**Type**: character string
**Description**: path to the tree file.
**Required**: YES
**Default**: none
### tree.type
**Type**: character string
**Description**: tree file type. Accepts _"nexus"_ or _"newick"_.
Case-sensitive.
**Required**: YES
**Default**: none
### type
**Type**: character string
**Description**: type of analysis to perform. Currently accepts only
_"correlation"_
**Required**: YES
**Default**: none
### MHT.method
**Type**: character string
**Description**: type of multiple hypothesis testing correction to apply.
Accepts all methods listed in `stats::p.adjust.methods`.
**Required**: NO
**Default**: _"BH"_
### cores
**Type**: integer/numeric
**Description**: Number of cores to use. Must be a positive integer.
**Required**: NO
**Default**: 1
***
# Cutoff values
Cutoffs are used to regulate how much graphical output is produced by CALANGO. The _tab-separated value_ files that are generated at the end of the analysis
(and saved in the _output.dir_) will always contain all, unfiltered results.
**q-value cutoffs** are used for correlation and phylogeny-aware linear models. Only entries with q-values _smaller_ than these cutoffs will be shown.
### spearman.qvalue.cutoff
**Type**: numeric between 0 and 1
**Required**: NO
**Default**: 1
### pearson.qvalue.cutoff
**Type**: numeric between 0 and 1
**Required**: NO
**Default**: 1
### kendall.qvalue.cutoff
**Type**: numeric between 0 and 1
**Required**: NO
**Default**: 1
### linear_model.qvalue.cutoff
**Type**: numeric between 0 and 1
**Required**: NO
**Default**: 1
***
**correlation cutoffs** are used to establish thresholds of positive/negative correlation values for the graphical output. **Important**: these parameters are a bit counter-intuitive. Please check the example below for clarity.
### spearman.cor.lower.cutoff / spearman.cor.upper.cutoff
**Type**: numeric values between 0 and 1
**Description**: Thresholds for Spearman correlation values. The selection criteria is:
(Spearman correlation < lower.cutoff) OR (Spearman correlation > upper.cutoff)
**Required**: NO
**Defaults**: `spearman.cor.upper.cutoff = -1`;
`spearman.cor.lower.cutoff = 1` (i.e., no filtering)
**Example 1**: If you set `spearman.cor.upper.cutoff = 0.8` and
`spearman.cor.lower.cutoff = -0.8`, only pairs with Spearman correlation values smaller than `-0.8` OR greater than `0.8` will be shown.
**Example 2**: If you set `spearman.cor.upper.cutoff = 0` and
`spearman.cor.lower.cutoff = -1`, pairs with Spearman correlation values smaller than `-1` OR greater than `0` will be shown. Since the Spearman correlation cannot be smaller than `-1`, this means that only positively correlated pairs will be shown.
**Example 3**: If you set any values such that `spearman.cor.upper.cutoff < spearman.cor.lower.cutoff`, all pairs are shown (no filtering is performed).
### pearson.cor.lower.cutoff / pearson.cor.upper.cutoff
**Type**: numeric values between 0 and 1
**Description**: Thresholds for Pearson correlation values. The selection criteria is:
(Pearson correlation < lower.cutoff) OR (Pearson correlation > upper.cutoff)
**Required**: NO
**Defaults**: `pearson.cor.upper.cutoff = -1`;
`pearson.cor.lower.cutoff = 1` (i.e., no filtering)
### kendall.cor.lower.cutoff / kendall.cor.upper.cutoff
**Type**: numeric values between 0 and 1
**Description**: Thresholds for Kendall correlation values. The selection criteria is:
(Kendall correlation < lower.cutoff) OR (Kendall correlation > upper.cutoff)
**Required**: NO
**Defaults**: `kendall.cor.upper.cutoff = -1`;
`kendall.cor.lower.cutoff = 1` (i.e., no filtering)
**standard deviation and coefficient of variation cutoffs** (only values greater than cutoff will be shown)
### sd.cutoff
**Type**: non-negative numeric value
**Required**: NO
**Default**: 0
### cv.cutoff
**Type**: non-negative numeric value
**Required**: NO
**Default**: 0
**sum of annotation terms cutoff** (only values greater than cutoff will be shown)
### annotation_size.cutoff
**Type**: non-negative integer/numeric value
**Required**: NO
**Default**: 0
**prevalence and heterogeneity cutoffs** (only values greater than cutoff will be shown). **Prevalence** is defined as the percentage of lineages where annotation term was observed at least once. **Heterogeneity** is defined as the percentage of lineages where annotation term count is different from the median.
### prevalence.cutoff
**Type**: numeric value between 0 and 1
**Required**: NO
**Default**: 0
### heterogeneity.cutoff
**Type**: numeric value between 0 and 1
**Required**: NO
**Default**: 0
***
# Advanced configurations
### raw_data_sd_filter
**Type**: character string. Accepts _"TRUE"_ or _"FALSE"_
**Description**: If _"TRUE"_ all annotation terms where standard deviation for annotation raw values before normalization is zero are removed. This filter is used to remove the (quite common) bias when QPAL (phenotype) and normalizing factors are strongly associated by chance.
**Required**: YES
**Default**: "TRUE"
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/inst/doc/CALANGO_Parameters.Rmd
|
---
title: "About: CALANGO"
output: html_document
---
<img src="https://github.com/fcampelo/CALANGO/raw/master/inst/images/CALANGO_LOGO.svg" alt="CALANGO logo. Drawn by Brazilian artist Berze - https://www.facebook.com/berzearte" height="300"/>
## DESCRIPTION
CALANGO is a first-principle, phylogeny-aware comparative genomics software to search for annotation terms (e.g Pfam IDs, GO terms or superfamilies), formally described in a dictionary-like structure and used to annotate genomic components, associated with a quantitave/rank variable (e.g. number of cell types, genome size or density of specific genomic elements).
Our software has been freely inspired by (and explicitly modelled to take into account information from) ideas and tools as diverse as comparative phylogenetics methods, genome annotation, gene enrichment analysis, and data visualization and interactivity.
## MAIN DEVELOPERS
- Jorge Augusto Hongo ([[email protected]](mailto:[email protected]))
- Giovanni Marques de Castro ([[email protected]](mailto:[email protected]))
- Felipe Campelo ([[email protected]](mailto:[email protected]), [[email protected]](mailto:[email protected]))
- Francisco Pereira Lobo ([[email protected]](mailto:[email protected]), [[email protected]]([email protected]))
## CODE REPOSITORY
The main code repository for the CALANGO package can be found at [https://github.com/fcampelo/CALANGO](https://github.com/fcampelo/CALANGO) and [https://github.com/franciscolobo/CALANGO](https://github.com/franciscolobo/CALANGO). Please use these for bug reports or to suggest improvements.
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/inst/extdata/report_files/about.Rmd
|
---
title: "Heatmap - species clustering by phylogeny"
output: html_document
---
```{r Heatmaps, echo = F, warnings = F, message = F}
###Prepare data.frame to plot
##getting data frame linking genome IDs to groups and short names
mynames <- data.frame(genomeID = defs$y.name,
group = defs$groups,
shortName = defs$short.name)
##DEBUG
#y.name
#groups
#short.name
#names
##getting relevant annotation IDs to plot only them
ids <- as.vector(defs$sig_IDs)
##DEBUG
#ids
#getting annotation frequencies for annotation IDs
tmp <- defs$y[as.vector(ids)]
#tmp <- data.frame(sapply(defs$y[as.vector(ids)], function(x) ifelse(x > 0, 1, 0)))
#mynorm <- tmp
mynorm <- tmp / defs$denominator
rm(tmp)
#replacing genome IDs by user-defined names
rownames(mynorm) <- mynames$shortName[match(rownames(mynorm), mynames$genomeID)]
##DEBUG
#mynorm
#getting a heatmaply-compatible tree
tree <- defs$tree
#replacing genome IDs by short names
tree$tip.label <- mynames$shortName[match(tree$tip.label, mynames$genomeID)]
#2char
tree$tip.label <- sapply(tree$tip.label, function(x) as.character(x))
#final tree must be a dendrogram and a dendextend object
tree2 <- stats::as.dendrogram(ape::as.hclust.phylo(tree))#stats::as.dendrogram(ape::as.phylo(tree))
tree_final <- dendextend::set(tree2, what = "branches_lwd", value = 0.4)
##DEBUG
#plot(tree2)
#colnames(mynorm) <- paste0(ids, " - ", output$annotation.cor[ids])
mat <- as.matrix(mynorm)
ord.mat <- mat[tree$tip.label, ]
rm(mynorm)
#to create custom hover text by adding annotation definition
onmouseover <- as.data.frame(ord.mat)
onmouseover[] <- lapply(colnames(onmouseover),
function(colname) {
paste0("definition: ",
defs$annotation.cor[colname], "\n")})
##DEBUG
#ord.mat
#color pallete for heatmap
my_palette <- grDevices::colorRampPalette(c("white", "blue"))(n = 51)
#establishing colors for groups
jColors <- data.frame(LABEL = levels(as.factor(mynames$group)),
COLOR = I(CALANGO_brewer_pal(nlevels(as.factor(mynames$group)),
name = 'Set1')))
##DEBUG
#jColors
#coloring species by group color
tmp <- data.frame(species = rownames(ord.mat), stringsAsFactors = FALSE)
tmp$group <- mynames$group[match(tmp$species, mynames$shortName)]
tmp$COLOR <- jColors$COLOR[match(tmp$group, jColors$LABEL)]
species2color <- base::unique(tmp[, -2])
species2group <- base::unique(tmp[, -3])
#rm(tmp)
##DEBUG
#species2color
#quantile normalization
#tmp <- normalize.quantiles(as.matrix(ord.mat))
#colnames(tmp) <- colnames(ord.mat)
#rownames(tmp) <- rownames(ord.mat)
#ord.norm.mat <- tmp
#rm(tmp)
#computing distance for row clustering
distance2 <- stats::dist(as.matrix(t(heatmaply::normalize(as.data.frame(ord.mat)))),
method = "euclidean")
cluster2 <- stats::hclust(distance2, method = "average")
#cluster2_final = dendextend::set(as.dendrogram(hclust(distance2, method=c("average"))), "branches_lwd", 0.4)
##DEBUG
#plot(cluster2_final)
#plot(tree_final)
#Plot
#mynorm
mp <- heatmaply::heatmaply(
t(heatmaply::normalize(as.data.frame(ord.mat))),
# row_dend_left = TRUE,
# cexCol = 0.8,
# subplot_heights=c(0.05, 0.01, 0.94),
Rowv = cluster2,
# col_side_colors = NULL,
# row_side_colors = species2group$group,
Colv = tree2,
col = my_palette,
plot_method= "ggplot",
custom_hovertext = t(onmouseover),
subplot_widths=c(0.9, 0.1),
subplot_heights=c(0.08, 0.02, 0.90),
col_side_colors = data.frame("Groups" = species2group$group,
check.names=FALSE))
suppressWarnings(
plotly::layout(mp,
width = 25 * nrow(ord.mat),
height = 20 * ncol(ord.mat))
)
# code used to generate pdf plots for article
# Colv = tree_final,
# file="heatmap_norm.pdf",
# Rowv = cluster2,
# branches_lwd = 0.4,
# col = my_palette,
# row_dend_left = TRUE,
# plot_method= "ggplot",
# cexRow = 0.6,
# width = 1000,
# height = 1000,
# custom_hovertext = t(onmouseover),
# subplot_widths=c(0.9, 0.1),
# subplot_heights=c(0.08, 0.02, 0.90),
# k_row = NA,
# showticklabels = FALSE,
# subplot_heights=c(0.05, 0.01, 0.94),
# col_side_colors = NULL,
# row_side_colors = species2group$group,
# col_side_colors = data.frame("Groups" = species2group$group, check.names=FALSE)
# ) %>% plotly::layout(width=25*(nrow(ord.mat)), height=20*(ncol(ord.mat)))
```
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/inst/extdata/report_files/heatmap_phylo_norm.Rmd
|
---
title: "Heatmap - species clustering by phylogeny"
output: html_document
---
```{r Heatmaps, echo = F, warnings = F, message = F}
###Prepare data.frame to plot
# getting data frame linking genome IDs to groups and short names
mynames <- data.frame(genomeID = defs$y.name,
group = defs$groups,
shortName = defs$short.name)
##DEBUG
#y.name
#groups
#short.name
#names
##getting relevant annotation IDs to plot only them
ids <- as.vector(df_cutoff$name)
##DEBUG
#ids
#getting annotation frequencies for annotation IDs
tmp <- defs$y[as.vector(ids)]
mynorm <- tmp / defs$denominator
#rm(tmp)
#replacing genome IDs by user-defined names
rownames(mynorm) <- mynames$shortName[match(rownames(mynorm), mynames$genomeID)]
##DEBUG
#mynorm
#getting a heatmaply-compatible tree
tree <- defs$tree
#replacing genome IDs by short names
tree$tip.label <- mynames$shortName[match(tree$tip.label, mynames$genomeID)]
#2char
tree$tip.label <- sapply(tree$tip.label, function(x) as.character(x))
#final tree must be a dendrogram and a dendextend object
tree2 <- stats::as.dendrogram(ape::as.hclust.phylo(tree))#stats::as.dendrogram(ape::as.phylo(tree))
tree_final <- dendextend::set(tree2, what = "branches_lwd", value = 1)
##DEBUG
#plot(tree2)
#colnames(mynorm) <- paste0(ids, " - ", output$annotation.cor[ids])
mat <- as.matrix(mynorm)
ord.mat <- mat[tree$tip.label,]
rm(mynorm)
#to create custom hover text by adding annotation definition
onmouseover <- as.data.frame(ord.mat)
onmouseover[] <- lapply(colnames(onmouseover), function(colname) {
paste0("definition: ", defs$annotation.cor[colname], "\n")
})
##DEBUG
#ord.mat
#color pallete for heatmap
my_palette <- grDevices::colorRampPalette(c("white", "blue"))(n = 51)
#establishing colors for groups
jColors <- data.frame(LABEL = levels(as.factor(mynames$group)),
COLOR = I(CALANGO_brewer_pal(nlevels(as.factor(mynames$group)),
name = 'Set1')))
##DEBUG
#jColors
#coloring species by group color
tmp <- data.frame(species = rownames(ord.mat), stringsAsFactors = FALSE)
tmp$group <- mynames$group[match(tmp$species, mynames$shortName)]
tmp$COLOR <- jColors$COLOR[match(tmp$group, jColors$LABEL)]
species2color <- base::unique(tmp[, -2])
species2group <- base::unique(tmp[, -3])
#rm(tmp)
##DEBUG
#species2color
#quantile normalization
#tmp <- normalize.quantiles(as.matrix(ord.mat))
#colnames(tmp) <- colnames(ord.mat)
#rownames(tmp) <- rownames(ord.mat)
#ord.norm.mat <- tmp
#rm(tmp)
#computing distance for column clustering
distance2 <- stats::dist(as.matrix(t(ord.mat)), method = "euclidean")
cluster2 <- stats::hclust(distance2, method = "average")
#cluster2_final = dendextend::set(as.dendrogram(hclust(distance2, method=c("average"))), "branches_lwd", 1)
##DEBUG
#plot(cluster2_final)
#plot(tree_final)
#Plot
#mynorm
mp <- heatmaply::heatmaply(
t(heatmaply::percentize(as.data.frame(ord.mat))),
# file="heatmap_norm.pdf",
Rowv = cluster2,
# row_dend_left = TRUE,
# width = 1000,
# height = 1800,
# subplot_heights=c(0.05, 0.01, 0.94),
# col_side_colors = NULL,
# row_side_colors = species2group$group,
Colv = tree2,
col = my_palette,
plot_method= "ggplot",
cexRow = 0.8,
custom_hovertext = t(onmouseover),
subplot_widths=c(0.9, 0.1),
subplot_heights=c(0.08, 0.02, 0.90),
col_side_colors = data.frame("Groups" = species2group$group,
check.names=FALSE))
suppressWarnings(
plotly::layout(mp,
width = 25 * nrow(ord.mat),
height = 20 * ncol(ord.mat))
)
```
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/inst/extdata/report_files/heatmap_phylo_perc.Rmd
|
---
title: "Heatmap - species clustering by phylogeny"
output: html_document
---
```{r Heatmaps, echo = F, warnings = F, message = F}
###Prepare data.frame to plot
##getting data frame linking genome IDs to groups and short names
mynames <- data.frame(genomeID = defs$y.name,
group = defs$groups,
shortName = defs$short.name)
##DEBUG
#y.name
#groups
#short.name
#names
##getting relevant annotation IDs to plot only them
ids <- as.vector(df_cutoff$name)
##DEBUG
#ids
#getting annotation frequencies for annotation IDs
tmp <- defs$y[as.vector(ids)]
mynorm <- tmp / defs$denominator
rm(tmp)
#replacing genome IDs by user-defined names
rownames(mynorm) <- mynames$shortName[match(rownames(mynorm), mynames$genomeID)]
##DEBUG
#mynorm
#getting a heatmaply-compatible tree
tree <- defs$tree
#replacing genome IDs by short names
tree$tip.label <- mynames$shortName[match(tree$tip.label, mynames$genomeID)]
#2char
tree$tip.label <- sapply(tree$tip.label, function(x) as.character(x))
#final tree must be a dendrogram and a dendextend object
tree2 <- stats::as.dendrogram(ape::as.hclust.phylo(tree))#stats::as.dendrogram(ape::as.phylo(tree))
tree_final <- dendextend::set(tree2, what = "branches_lwd", value = 1)
##DEBUG
#plot(tree2)
#colnames(mynorm) <- paste0(ids, " - ", output$annotation.cor[ids])
mat <- as.matrix(mynorm)
ord.mat <- mat[tree$tip.label,]
rm(mynorm)
#to create custom hover text by adding annotation definition
onmouseover <- as.data.frame(ord.mat)
onmouseover[] <- lapply(colnames(onmouseover),
function(colname) {
paste0("definition: ",
defs$annotation.cor[colname], "\n")})
##DEBUG
#ord.mat
#color pallete for heatmap
my_palette <- grDevices::colorRampPalette(c("white", "blue"))(n = 51)
#establishing colors for groups
jColors <- data.frame(LABEL = levels(as.factor(mynames$group)),
COLOR = I(CALANGO_brewer_pal(nlevels(as.factor(mynames$group)),
name = 'Set1')))
##DEBUG
#jColors
#coloring species by group color
tmp <- data.frame(species = rownames(ord.mat), stringsAsFactors = FALSE)
tmp$group <- mynames$group[match(tmp$species, mynames$shortName)]
tmp$COLOR <- jColors$COLOR[match(tmp$group, jColors$LABEL)]
#rm(tmp)
##DEBUG
#species2color
#quantile normalization
#tmp <- normalize.quantiles(as.matrix(ord.mat))
#colnames(tmp) <- colnames(ord.mat)
#rownames(tmp) <- rownames(ord.mat)
#ord.norm.mat <- tmp
#rm(tmp)
#computing distance for row clustering
distance2 <- stats::dist(as.matrix(t(heatmaply::normalize(as.data.frame(ord.mat)))),
method = "euclidean")
cluster2 <- stats::hclust(distance2, method = "average")
#cluster2_final = dendextend::set(as.dendrogram(hclust(distance2, method=c("average"))), "branches_lwd", 1)
##DEBUG
#plot(cluster2_final)
#plot(tree_final)
#Plot
#mynorm
mp <- heatmaply::heatmaply(
t(as.data.frame(ord.mat)),
# file="heatmap_p.html",
Rowv = cluster2,
# row_dend_left = TRUE,
# subplot_heights=c(0.05, 0.01, 0.94),
# col_side_colors = NULL,
# row_side_colors = species2group$group,
Colv = tree2,
col = my_palette,
plot_method= "ggplot",
cexCol = 0.8,
custom_hovertext = t(onmouseover),
subplot_widths=c(0.9, 0.1),
subplot_heights=c(0.08, 0.02, 0.90),
col_side_colors = data.frame("Groups" = species2group$group,
check.names=FALSE))
suppressWarnings(
plotly::layout(mp,
width = 25 * nrow(ord.mat),
height = 20 * ncol(ord.mat))
)
```
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/inst/extdata/report_files/heatmap_phylo_raw.Rmd
|
---
title: "CALANGO Report"
---
<img src="https://raw.githubusercontent.com/fcampelo/CALANGO/8e2c9efd7abb739f91a2ff36ffbce2b30f98f87b/inst/images/CALANGO_LOGO.svg" alt="CALANGO logo. Drawn by Brazilian artist Berze - https://www.facebook.com/berzearte" height="300"/>
*****
CALANGO is an annotation-based comparative genomics tool that searches for annotation terms, as defined in controlled dictionaries, associated with a quantitative phenotype across phenotypes.
CALANGO provides three main outputs as interactive HTML5 files:
- Heatmaps
- Scatterplots of q-values of annotation terms
- Table of annotation terms and their statistics
To access these interactive visualizations please click on the `Results` button in the top bar.
***
## Input configuration used
The current report was generated based on the input parameters listed below.
Job finished at: `r Sys.time()`
```{r, echo = FALSE}
defs2 <- restore_relative_paths(defs)
confs <- defs2[c("annotation.files.dir",
"output.dir",
"dataset.info",
"x.column",
"short.name.column",
"group.column",
"ontology",
"dict.path",
"column",
"denominator.column",
"tree.path",
"tree.type",
"type",
"MHT.method",
"cores",
"spearman.qvalue.cutoff",
"pearson.qvalue.cutoff",
"kendall.qvalue.cutoff",
"linear_model.qvalue.cutoff",
"spearman.cor.upper.cutoff",
"spearman.cor.lower.cutoff",
"pearson.cor.upper.cutoff",
"pearson.cor.lower.cutoff",
"kendall.cor.upper.cutoff",
"kendall.cor.lower.cutoff",
"sd.cutoff",
"cv.cutoff",
"annotation_size.cutoff",
"prevalence.cutoff",
"heterogeneity.cutoff",
"raw_data_sd_filter")]
confs <- data.frame(Parameter = names(confs),
Value = as.character(confs),
stringsAsFactors = FALSE)
DT::datatable(confs,
options = list(pageLength = 50,
searching = FALSE,
info = FALSE,
ordering = FALSE,
dom = "t"))
```
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/inst/extdata/report_files/index.Rmd
|
---
title: "Annotation terms: q-values, size and coefficient of variation"
output: html_document
---
```{r Correlation Plots, echo = FALSE, warnings = FALSE, message = FALSE}
#Prepare data.frame to plot
#Plot
p <- ggplot2::ggplot(data = df_cutoff,
mapping = ggplot2::aes(x = -log10(corrected_contrasts),
y = -log10(Spearman_qvalue),
label = name,
description = description,
cv = 1 / cv)) +
ggplot2::geom_point(ggplot2::aes(size = size, alpha = cv)) +
ggplot2::theme_bw() +
ggplot2::ggtitle("Spearman") +
ggplot2::labs(x = "-log10(Corrected contrasts)",
y = '-log10(Spearman correlation q-value)')
pl1 <- plotly::ggplotly(p,
tooltip = c('size','name','description','x','y'))
p <- ggplot2::ggplot(data = df_cutoff,
mapping = ggplot2::aes(x = -log10(corrected_contrasts),
y = -log10(Kendall_qvalue),
label = name,
description = description,
cv = 1 / cv)) +
ggplot2::geom_point(ggplot2::aes(size = size, alpha = cv)) +
ggplot2::theme_bw() +
ggplot2::ggtitle("Kendall") +
ggplot2::labs(x = "-log10(Corrected contrasts)",
y = '-log10(Kendall correlation q-value)')
pl2 <- plotly::ggplotly(p,
tooltip = c('x','y','size','name','description'))
p <- ggplot2::ggplot(data = df_cutoff,
mapping = ggplot2::aes(x = -log10(corrected_contrasts),
y = -log10(Pearson_qvalue),
label = name,
description = description,
cv = 1 / cv)) +
ggplot2::geom_point(ggplot2::aes(size = size, alpha = cv)) +
ggplot2::theme_bw() +
ggplot2::ggtitle("Pearson") +
ggplot2::labs(x = "-log10(Corrected contrasts)",
y = '-log10(Pearson correlation q-value)')
pl3 <- plotly::ggplotly(p,
tooltip = c('size','name','description','x','y'))
htmltools::tagList(pl1, pl2, pl3)
```
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/inst/extdata/report_files/q_value_scatter.Rmd
|
---
title: "Table"
output: html_document
---
Some columns are not visible by default.
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
```{r Multiplot, echo = FALSE, warning = FALSE}
# multiple scatterplots, with confidence intervals, in a single html page
#creating plot titles and axes labels
f <- list(size = 18, color = "black") # family = "Verdana",
title_p1 <- list(text = "Raw data",
font = f,
xref = "Phenotype",
yref = "Annotation term frequency",
yanchor = "bottom",
xanchor = "center",
align = "center",
x = 0.5,
y = 1,
showarrow = FALSE)
title_p2 <- title_p1
title_p2$text <- "Rank data"
title_p2$xref <- "Phenotype rank"
title_p2$yref <- "Annotation term frequency rank"
title_p3 <- title_p1
title_p3$text <- "Phylogeny-aware linear model data"
title_p3$xref <- "Contrasts of phenotypes"
title_p3$yref <- "Contrasts of annotation term frequencies"
# TODO: multicore-parallelize this for loop.
for (i in seq_along(df_cutoff$name)){#df_cutoff$name[1:length(df_cutoff[, 1])]) { # get filtered to plot a lot
fname <- df_cutoff$name[i]
#output dir
file <- paste0(cpd, gsub(":", "_", fname, fixed = TRUE), ".html")
# setting up data
Xdf <- defs$x
names(Xdf) <- "X_var"
Xdf$feature <- defs$y[, fname] / defs$denominator
ID <- row.names(Xdf)
group <-defs$groups
p1 <- ggplot2::ggplot(data = Xdf,
mapping = ggplot2::aes(x = X_var,
y = feature,
label = ID
)) +
ggplot2::geom_point() +
# expand_limits(x = 0, y = 0) +
ggplot2::geom_smooth(method = "lm", formula = y ~ x, se = TRUE) +
ggplot2::theme_bw() +
ggplot2::labs(x = "Phenotype value",
y = "Annotation frequency")
Xdf <- as.data.frame(rank(defs$x[, 1]))
rownames(Xdf) <- rownames(defs$x)
names(Xdf) <- "X_var"
Xdf$feature <- rank(defs$y[, fname] / defs$denominator)
p2 <- ggplot2::ggplot(data = Xdf,
mapping = ggplot2::aes(x = X_var,
y = feature,
label = ID)) +
ggplot2::geom_point() +
ggplot2::geom_smooth(method = "loess", formula = y ~ x, se = TRUE) +
ggplot2::theme_bw() +
ggplot2::labs(x = "Phenotype rank",
y = "Annotation frequency (rank)")
# TODO: plots only consider "pic". If we implement "gls", maybe we'll need to
# change something here too.
tmp_x <- defs$x[, 1]
names(tmp_x) <- rownames(defs$x)
contrast_x <- ape::pic(x = tmp_x, phy = defs$tree)
tmp_y <- defs$y[, fname] / defs$denominator
names(tmp_y) <- rownames(defs$x)
contrast_y <- ape::pic(x = tmp_y, phy = defs$tree)
Xdf <- data.frame(contrast_x, contrast_y)
model <- stats::lm(contrast_y ~ contrast_x + 0)
p3 <- ggplot2::ggplot(data = Xdf,
mapping = ggplot2::aes(x = contrast_x,
y = contrast_y,
label = rownames(Xdf))) +
ggplot2::geom_point() +
ggplot2::geom_abline(slope = model$coefficients[1],
intercept = 0,
color = c("#3366FFFF"),
size = 1) +
ggplot2::theme_bw() +
ggplot2::labs(x = "contrasts for phenotype",
y = "Contrasts for annotation frequency")
# ggplot2::geom_smooth(method = "lm", se = FALSE) +
# ggplot2::geom_line(data = fortify(model), aes(x = contrast_x, y = .fitted))
# ggtitle(paste0("phylogeny-aware linear model data for "), i),
# ggtitle("Representation of raw data Representation of rank data Representation of phylogenetic-aware linear model data")
p4 <- plotly::subplot(p1, p2, p3, shareX = TRUE, shareY = FALSE, titleY = TRUE)
p1ly <- plotly::ggplotly(p4,
tooltip = c('label','X_var','Normalized feature'))
htmlwidgets::saveWidget(p1ly, file = file,
libdir = 'lib',
selfcontained = FALSE)
}
```
```{r Boxplot, echo = FALSE, warning = FALSE}
for (i in seq_along(df_cutoff$name)){
fname <- df_cutoff$name[i]
#output dir
file <- paste0(cpd, gsub(":", "_", fname, fixed = TRUE), '_boxplot.html')
# file_name <- paste0(fname, "_boxplot_per_group.pdf")
# file_name <- gsub(":", "_", file_name)
tmp <- data.frame(defs$x, defs$short.name, defs$groups, defs$y[fname], (defs$y[fname]/defs$denominator))
colnames(tmp) <- c("cell_type", "short", "group", "count", "norm")
my_pallete <- data.frame(group = unique(tmp$group),
colour = I(CALANGO_brewer_pal(length(unique(tmp$group)),
name = 'Set1')))
tmp$fill <- my_pallete$colour[match(tmp$group, my_pallete$group)]
tmp$group <- stats::reorder(tmp$group, tmp$count, FUN = median)
p1 <- ggplot2::ggplot(tmp, ggplot2::aes(x = group,y = count)) +
ggplot2::geom_violin(ggplot2::aes(fill = I(fill)),
scale = "width", alpha = 0.25) +
ggplot2::geom_boxplot(ggplot2::aes(fill = I(fill)),
alpha = 0.3) +
ggplot2::geom_jitter(width = 0.05, height = 0, size = .5) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45, hjust = 1),
axis.title.x = ggplot2::element_blank())
tmp$group <- stats::reorder(tmp$group, tmp$norm, FUN = median)
p2 <- p1 %+% tmp +
ggplot2::aes(y = norm) +
ggplot2::annotate("text", y = 1.02 * max(tmp$norm), x = 1, label = "Norm")
p1 <- p1 +
ggplot2::annotate("text", y = 1.02 * max(tmp$count), x = 1, label = "Count") +
ggplot2::ggtitle(paste0(fname, " - ", defs$annotation.cor[fname]))
prow <- plotly::subplot(plotly::style(p1, showlegend = FALSE, width = .1),
plotly::style(p2, showlegend = FALSE, width = .1),
shareY = FALSE, titleY = FALSE)
p3 <- plotly::ggplotly(prow, tooltip = c("x", "y"))
htmlwidgets::saveWidget(p3,
file = file,
libdir = 'lib',
selfcontained = FALSE
)
}
```
```{r Table, echo = F}
# CHECK HERE - NAMES, INDICES, URL ETC.
dtable <- df_cutoff
#build links to scatterplots
dtable$corr_plots <- paste0('<a target=_blank href="',
paste0('correlation_Plots/',
gsub(":", "_", df_cutoff$name, fixed = TRUE),
'.html'),
'">', df_cutoff$name,'</a>')
#build links to boxplots
dtable$boxplots <- paste0('<a target=_blank href="',
paste0('correlation_Plots/',
gsub(":", "_", df_cutoff$name, fixed = TRUE),
'_boxplot.html'),
'">', df_cutoff$name,'</a>')
dtable <- DT::datatable(dtable,
escape = FALSE,
filter = 'bottom',
extensions = 'Buttons',
options = list(dom = 'Bfrtip',
buttons = c('colvis','csv'),
columnDefs = list(list(visible = FALSE,
targets = 4:(length(df_cutoff) - 2))))) # prepare table
dtable
```
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/inst/extdata/report_files/table.Rmd
|
---
title: "CALANGO Parameters"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{CALANGO Parameters}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
<img src="https://github.com/fcampelo/CALANGO/raw/master/inst/images/CALANGO_LOGO.svg" height="150" alt="CALANGO logo. Drawn by Brazilian artist Berze - https://www.facebook.com/berzearte">
This document lists the input parameters expected / accepted in the CALANGO
definition files (or, alternatively, in the `defs` list).
***
# General parameters
### annotation.files.dir
**Type**: character string
**Description**: path to the directory where annotation files are located
**Required**: YES
**Default**: none
### output.dir
**Type**: character string
**Description**: path to the output directory where results should be saved
**Required**: YES
**Default**: none
### dataset.info
**Type**: character string
**Description**: path to a file containing the genome
metadata. It should contain _at least_, for each genome:
(1) path for annotation data; (2) phenotype data (numeric);
(3) normalization data (numeric)
It must be a _tab-separated value_ file with no column headers.
**Required**: YES
**Default**: none
### x.column
**Type**: integer/numeric
**Description**: index of the column from the file specified in `dataset.info` containing the phenotype data, which will be used to sort the genomes and
find annotation terms associated to that phenotype.
**Required**: YES
**Default**: none
### short.name.column
**Type**: integer/numeric
**Description**: index of the column from the file specified in `dataset.info` containing the short names for species/lineages to be used when plotting data.
**Required**: YES
**Default**: none
### group.column
**Type**: integer/numeric
**Description**: index of the column from the file specified in `dataset.info` containing the group to be used for coloring the heatmaps
**Required**: YES
**Default**: none
### ontology
**Type**: character string.
**Description**: which dictionary data type to use? Accepts _"GO"_ or
_"other"_
**Required**: YES
**Default**: none
### dict.path
**Type**: character string
**Description**: path to dictionary file (a two-column _tab-separated value_
file containing annotation IDs and their descriptions). Not needed if
`ontology = "GO"`.
**Required**: NO
**Default**: none
### column
**Type**: character string
**Description**: the _name_ of the column in the annotation file that should be used.
**Required**: YES
**Default**: none
### denominator.column
**Type**: integer/numeric
**Description**: index of the column from the file specified in `dataset.info` containing the normalization data.
**Required**: NO
**Default**: none
### tree.path
**Type**: character string
**Description**: path to the tree file.
**Required**: YES
**Default**: none
### tree.type
**Type**: character string
**Description**: tree file type. Accepts _"nexus"_ or _"newick"_.
Case-sensitive.
**Required**: YES
**Default**: none
### type
**Type**: character string
**Description**: type of analysis to perform. Currently accepts only
_"correlation"_
**Required**: YES
**Default**: none
### MHT.method
**Type**: character string
**Description**: type of multiple hypothesis testing correction to apply.
Accepts all methods listed in `stats::p.adjust.methods`.
**Required**: NO
**Default**: _"BH"_
### cores
**Type**: integer/numeric
**Description**: Number of cores to use. Must be a positive integer.
**Required**: NO
**Default**: 1
***
# Cutoff values
Cutoffs are used to regulate how much graphical output is produced by CALANGO. The _tab-separated value_ files that are generated at the end of the analysis
(and saved in the _output.dir_) will always contain all, unfiltered results.
**q-value cutoffs** are used for correlation and phylogeny-aware linear models. Only entries with q-values _smaller_ than these cutoffs will be shown.
### spearman.qvalue.cutoff
**Type**: numeric between 0 and 1
**Required**: NO
**Default**: 1
### pearson.qvalue.cutoff
**Type**: numeric between 0 and 1
**Required**: NO
**Default**: 1
### kendall.qvalue.cutoff
**Type**: numeric between 0 and 1
**Required**: NO
**Default**: 1
### linear_model.qvalue.cutoff
**Type**: numeric between 0 and 1
**Required**: NO
**Default**: 1
***
**correlation cutoffs** are used to establish thresholds of positive/negative correlation values for the graphical output. **Important**: these parameters are a bit counter-intuitive. Please check the example below for clarity.
### spearman.cor.lower.cutoff / spearman.cor.upper.cutoff
**Type**: numeric values between 0 and 1
**Description**: Thresholds for Spearman correlation values. The selection criteria is:
(Spearman correlation < lower.cutoff) OR (Spearman correlation > upper.cutoff)
**Required**: NO
**Defaults**: `spearman.cor.upper.cutoff = -1`;
`spearman.cor.lower.cutoff = 1` (i.e., no filtering)
**Example 1**: If you set `spearman.cor.upper.cutoff = 0.8` and
`spearman.cor.lower.cutoff = -0.8`, only pairs with Spearman correlation values smaller than `-0.8` OR greater than `0.8` will be shown.
**Example 2**: If you set `spearman.cor.upper.cutoff = 0` and
`spearman.cor.lower.cutoff = -1`, pairs with Spearman correlation values smaller than `-1` OR greater than `0` will be shown. Since the Spearman correlation cannot be smaller than `-1`, this means that only positively correlated pairs will be shown.
**Example 3**: If you set any values such that `spearman.cor.upper.cutoff < spearman.cor.lower.cutoff`, all pairs are shown (no filtering is performed).
### pearson.cor.lower.cutoff / pearson.cor.upper.cutoff
**Type**: numeric values between 0 and 1
**Description**: Thresholds for Pearson correlation values. The selection criteria is:
(Pearson correlation < lower.cutoff) OR (Pearson correlation > upper.cutoff)
**Required**: NO
**Defaults**: `pearson.cor.upper.cutoff = -1`;
`pearson.cor.lower.cutoff = 1` (i.e., no filtering)
### kendall.cor.lower.cutoff / kendall.cor.upper.cutoff
**Type**: numeric values between 0 and 1
**Description**: Thresholds for Kendall correlation values. The selection criteria is:
(Kendall correlation < lower.cutoff) OR (Kendall correlation > upper.cutoff)
**Required**: NO
**Defaults**: `kendall.cor.upper.cutoff = -1`;
`kendall.cor.lower.cutoff = 1` (i.e., no filtering)
**standard deviation and coefficient of variation cutoffs** (only values greater than cutoff will be shown)
### sd.cutoff
**Type**: non-negative numeric value
**Required**: NO
**Default**: 0
### cv.cutoff
**Type**: non-negative numeric value
**Required**: NO
**Default**: 0
**sum of annotation terms cutoff** (only values greater than cutoff will be shown)
### annotation_size.cutoff
**Type**: non-negative integer/numeric value
**Required**: NO
**Default**: 0
**prevalence and heterogeneity cutoffs** (only values greater than cutoff will be shown). **Prevalence** is defined as the percentage of lineages where annotation term was observed at least once. **Heterogeneity** is defined as the percentage of lineages where annotation term count is different from the median.
### prevalence.cutoff
**Type**: numeric value between 0 and 1
**Required**: NO
**Default**: 0
### heterogeneity.cutoff
**Type**: numeric value between 0 and 1
**Required**: NO
**Default**: 0
***
# Advanced configurations
### raw_data_sd_filter
**Type**: character string. Accepts _"TRUE"_ or _"FALSE"_
**Description**: If _"TRUE"_ all annotation terms where standard deviation for annotation raw values before normalization is zero are removed. This filter is used to remove the (quite common) bias when QPAL (phenotype) and normalizing factors are strongly associated by chance.
**Required**: YES
**Default**: "TRUE"
|
/scratch/gouwar.j/cran-all/cranData/CALANGO/vignettes/CALANGO_Parameters.Rmd
|
##
#' Example data containing case and control data
#'
#' This data contains 136 marker variables for 68 individuals who are distinguished as case/control.
#'
#' @docType data
#' @keywords datasets
#' @name CaseControl
#' @usage data(CaseControl)
#' @format A data frame with 136 marker variables and 68 individuals.
"CaseControl"
|
/scratch/gouwar.j/cran-all/cranData/CALF/R/CALF-data.R
|
#' @name CALF-package
#' @aliases CALF-package
#' @title Coarse Approximation Linear Function
#' @description Forward selection linear regression greedy algorithm.
#' @encoding UTF-8
#' @author { Stephanie Lane [aut, cre],\cr
#' John Ford [aut],\cr
#' Clark Jeffries [aut],\cr
#' Diana Perkins [aut]
#' }
#' Maintainer: John Ford \email{JoRuFo@@gmail.com}
#' @importFrom stats t.test cor
#' @importFrom utils write.table
#' @import ggplot2
#' @keywords calf
#' @details The Coarse Approximation Linear Function (CALF) algorithm is a type of forward selection
#' linear regression greedy algorithm. Nonzero weights are restricted to the values +1 and -1 and
#' their number limited by an input parameter. CALF operates similarly on two different types of samples,
#' binary and nonbinary, with some notable distinctions between the two.
#' All sample data is provided to CALF as a data matrix. A binary sample must contain a distinguished first
#' column with at least one 0 entries (e.g. controls) and at least one 1 entry (e.g. cases); at least one
#' other column contains predictor values of some type. A nonbinary sample is similar but must contain a
#' first column with real dependent (target) values. Columns containing values other that 0 or 1 must be
#' normalized, e.g. as z-scores.
#' As its score of differentiation, CALF uses either the Welch t-statistic p-value or AUC for binary samples
#' and the Pearson correlation for non-binary samples, selected by input parameter. When initiated CALF
#' selects from all predictors (markers) (first in the case of a tie) the one that yields the best score.
#' CALF then checks if the number of selected markers is equal to the limit provided and terminates if so.
#' Otherwise, CALF seeks a second marker, if any, that best improves the score of the sum function generated
#' by adding the newly selected marker to the previous markers with weight +1 or weight -1.
#' The process continues until the limit is reached or until no additional marker can be included in the sum
#' to improve the score.
#' By default, for binary samples, CALF assumes control data is designated with a 0 and case data with a 1.
#' It is allowable to use the opposite convention, however the weights in the final sum may need to be reversed.
NULL
|
/scratch/gouwar.j/cran-all/cranData/CALF/R/CALF-package.R
|
calf_internal <- function(data,
nMarkers,
randomize = FALSE,
proportion = NULL,
times,
targetVector = "binary",
optimize = "pval",
verbose = FALSE){
# getting rid of global variable warning -------------------------- #
x = NULL
y = NULL
refx = NULL
refy = NULL
if (targetVector == "nonbinary")
optimize <- NULL
# setting up some initial values -----------------------------------#
if (any(apply(data, 2, is.numeric) == FALSE)) {
stop("CALF ERROR: Data are not numeric. Please check that data were read in correctly.")
}
nVars <- ncol(data) - 1
dNeg <- data[ ,2:ncol(data)]
dNeg <- dNeg * - 1
data <- data.frame(data, dNeg, check.names = FALSE)
if (nMarkers > nVars){
stop(paste0("CALF ERROR: Requested number of markers is larger than the number of markers in data set. ",
"Please revise this value or make sure your data were read in properly."))
}
if (randomize == TRUE) data[ ,1] <- sample(data[ ,1])
if (!is.null(proportion)) {
if (targetVector == "binary") {
ctrlRows <- which(data[ ,1] == 0)
caseRows <- which(data[ ,1] == 1)
# calculate number of case and control to keep
if(length(proportion) == 2) {
nCtrlKeep <- round(length(ctrlRows)*proportion[1], digits = 0)
nCaseKeep <- round(length(caseRows)*proportion[2], digits = 0)
} else {
nCtrlKeep <- round(length(ctrlRows)*proportion, digits = 0)
nCaseKeep <- round(length(caseRows)*proportion, digits = 0)
}
# sample randomly rows of case and control to keep, record rows to keep
keepRows <- c(sample(ctrlRows)[1:nCtrlKeep], sample(caseRows)[1:nCaseKeep])
# subset original data to keep these rows
data <- data[keepRows, ]
} else {
nDataKeep <- round(nrow(data)*proportion, digits = 0)
keepRows <- sample(1:nrow(data))[1:nDataKeep]
data <- data[keepRows, ]
}
}
real <- data[ ,1]
realMarkers <- data[ , 2:ncol(data)]
ctrl <- data[data[ ,1] == 0, 2:ncol(data)]
case <- data[data[ ,1] == 1, 2:ncol(data)]
indexNegPos <- rep(0, (nVars*2))
# end of setting up some initial values ----------------------------#
# initial loop to establish first optimal marker -------------------#
allCrit <- numeric()
for (i in 1:(nVars*2)){
if (targetVector == "binary"){
caseVar <- case[ ,i]
ctrlVar <- ctrl[ ,i]
if (optimize == "pval"){
crit <- t.test(caseVar, ctrlVar, var.equal = FALSE)$p.value
} else if (optimize == "auc"){
crit <- compute.auc(caseVar, ctrlVar)
crit <- 1/crit
}
} else {
realVar <- realMarkers[ ,i]
crit <- suppressWarnings(cor(real, realVar, use = "complete.obs"))
crit <- 1/crit
}
allCrit[i] <- crit
}
allCrit[allCrit < 0] <- NA
# end of initial loop ----------------------------------------------#
keepMarkers <- names(realMarkers)[which.min(allCrit)]
bestCrit <- min(allCrit, na.rm = TRUE)
keepIndex <- which.min(allCrit)
if (verbose == TRUE) {
if(targetVector == "binary") {
if (optimize == "pval"){
cat("Selected:", keepMarkers,
paste0("p value = ", round(bestCrit, digits = 15), "\n"))
} else if (optimize == "auc"){
cat("Selected:", keepMarkers,
paste0("AUC = ", round((1/bestCrit), digits = 15), "\n"))
}
} else if (targetVector == "nonbinary") {
cat("Selected:", keepMarkers,
paste0("Correlation = ", round((1/bestCrit), digits = 15), "\n"))
}
}
# second loop to add another marker --------------------------------#
if (nMarkers != 1){
allCrit <- numeric()
realPrev <- realMarkers[ ,keepIndex]
casePrev <- case[ ,keepIndex]
ctrlPrev <- ctrl[ ,keepIndex]
for (i in 1:(nVars*2)){
# Check the indicies and complement for the postive values, else the negative ones
if( i >= 1 && i <= nVars ) {
# ensure an index or its complement is not being used if already chosen
if (i != keepIndex && (nVars+i) != keepIndex){
caseVar <- casePrev + case[ ,i]
ctrlVar <- ctrlPrev + ctrl[ ,i]
realVar <- realPrev + realMarkers[ ,i]
if (targetVector == "binary"){
if (optimize == "pval"){
crit <- t.test(caseVar, ctrlVar, var.equal = FALSE)$p.value
} else if (optimize == "auc"){
crit <- compute.auc(caseVar, ctrlVar)
crit <- 1/crit
}
} else {
crit <- suppressWarnings(cor(real, realVar, use = "complete.obs"))
crit <- 1/crit
}
} else {
crit <- NA
}
} else if( i >= (nVars+1) && i <= 2*nVars) {
# ensure an index or its complement is not being used if already chosen
if (i != keepIndex && (i-nVars) != keepIndex){
caseVar <- casePrev + case[ ,i]
ctrlVar <- ctrlPrev + ctrl[ ,i]
realVar <- realPrev + realMarkers[ ,i]
if (targetVector == "binary"){
if (optimize == "pval"){
crit <- t.test(caseVar, ctrlVar, var.equal = FALSE)$p.value
} else if (optimize == "auc"){
crit <- compute.auc(caseVar, ctrlVar)
crit <- 1/crit
}
} else {
crit <- suppressWarnings(cor(real, realVar, use = "complete.obs"))
crit <- 1/crit
}
} else {
crit <- NA
}
} else {
crit <- NA # Should really never get here
}
allCrit[i] <- crit
}
# end of second loop ----------------------------------------------#
allCrit[allCrit < 0] <- NA
# check if the latest p is lower than the previous p #
continue <- ifelse(bestCrit[length(bestCrit)] > min(allCrit, na.rm = TRUE), TRUE, FALSE)
if (continue == TRUE){
keepMarkers <- append(keepMarkers, names(realMarkers)[which.min(allCrit)])
bestCrit <- append(bestCrit, min(allCrit, na.rm = TRUE))
keepIndex <- append(keepIndex, which.min(allCrit))
if (length(keepMarkers) == nMarkers) continue <- FALSE
}
if (verbose == TRUE) {
if(targetVector == "binary") {
if (optimize == "pval"){
cat("Selected:", keepMarkers[length(keepMarkers)],
paste0("p value = ", round(bestCrit[length(bestCrit)], digits = 15), "\n"))
} else if (optimize == "auc"){
cat("Selected:", keepMarkers[length(keepMarkers)],
paste0("AUC = ", round((1/bestCrit[length(bestCrit)]), digits = 15), "\n"))
}
} else if (targetVector == "nonbinary") {
cat("Selected:", keepMarkers[length(keepMarkers)],
paste0("Correlation = ", round((1/bestCrit[length(bestCrit)]), digits = 15), "\n"))
}
}
# loop for third through nMarker ----------------------------------#
while (continue == TRUE){
allCrit <- numeric()
casePrev <- rowSums(case[ ,keepIndex], na.rm = TRUE)
ctrlPrev <- rowSums(ctrl[ ,keepIndex], na.rm = TRUE)
realPrev <- rowSums(realMarkers[ ,keepIndex], na.rm = TRUE)
for (i in 1:(nVars*2)){
if( i >= 1 && i <= nVars ) {
if ( !(i %in% keepIndex) && !((nVars+i) %in% keepIndex) ){
caseVar <- casePrev + case[ ,i]
ctrlVar <- ctrlPrev + ctrl[ ,i]
realVar <- realPrev + realMarkers[ ,i]
if (targetVector == "binary"){
if (optimize == "pval"){
crit <- t.test(caseVar, ctrlVar, var.equal = FALSE)$p.value
} else if (optimize == "auc"){
crit <- compute.auc(caseVar, ctrlVar)
crit <- 1/crit
}
} else {
crit <- suppressWarnings(cor(real, realVar, use = "complete.obs"))
crit <- 1/crit
}
} else {
crit <- NA
}
} else if( i >= (nVars+1) && i <= 2*nVars) {
if ( !(i %in% keepIndex) && !((i-nVars) %in% keepIndex) ){
caseVar <- casePrev + case[ ,i]
ctrlVar <- ctrlPrev + ctrl[ ,i]
realVar <- realPrev + realMarkers[ ,i]
if (targetVector == "binary"){
if (optimize == "pval"){
crit <- t.test(caseVar, ctrlVar, var.equal = FALSE)$p.value
} else if (optimize == "auc"){
crit <- compute.auc(caseVar, ctrlVar)
crit <- 1/crit
}
} else {
crit <- suppressWarnings(cor(real, realVar, use = "complete.obs"))
crit <- 1/crit
}
} else {
crit <- NA
}
} else {
crit <- NA # Should really never get here
}
allCrit[i] <- crit
}
allCrit[allCrit < 0] <- NA
continue <- ifelse(bestCrit[length(bestCrit)] > min(allCrit, na.rm = TRUE),
TRUE, FALSE)
if (continue == TRUE){
keepMarkers <- append(keepMarkers, names(realMarkers)[which.min(allCrit)])
bestCrit <- append(bestCrit, min(allCrit, na.rm = TRUE))
keepIndex <- append(keepIndex, which.min(allCrit))
continue <- bestCrit[length(bestCrit)] < bestCrit[length(bestCrit)-1]
if (verbose == TRUE) {
if(targetVector == "binary") {
if (optimize == "pval"){
cat("Selected:", keepMarkers[length(keepMarkers)],
paste0("p value = ", round(bestCrit[length(bestCrit)], digits = 15), "\n"))
} else if (optimize == "auc"){
cat("Selected:", keepMarkers[length(keepMarkers)],
paste0("AUC = ", round((1/bestCrit[length(bestCrit)]), digits = 15), "\n"))
}
} else if (targetVector == "nonbinary") {
cat("Selected:", keepMarkers[length(keepMarkers)],
paste0("Correlation = ", round((1/bestCrit[length(bestCrit)]), digits = 15), "\n"))
}
}
}
if (length(keepMarkers) == nMarkers) continue <- FALSE
}
}
if (verbose == TRUE) cat("\n")
indexNegPos[keepIndex] <- ifelse(keepIndex > nVars, -1, 1)
finalIndex <- ifelse(keepIndex <= nVars, keepIndex, keepIndex - nVars)
finalMarkers <- data.frame(names(case)[finalIndex], indexNegPos[keepIndex], check.names = FALSE)
names(finalMarkers) <- c("Marker","Weight")
if (targetVector == "nonbinary" || optimize == "auc") {
finalBestCrit <- 1 / bestCrit[length(bestCrit)]
} else {
finalBestCrit <- bestCrit[length(bestCrit)]
}
## AUC -------------------------------------------------------------#
# create function value for each individual
if (targetVector == "binary"){
if (nMarkers != 1 & length(keepIndex) != 1){
funcValue <- c(rowSums(case[,c(keepIndex)]), rowSums(ctrl[,c(keepIndex)]))
} else {
funcValue <- c(case[,c(keepIndex)], ctrl[,c(keepIndex)])
}
funcValue <- round(funcValue, digits = 8)
# rank individual function values
ranks <- rank(funcValue, ties.method = "average")
seqCaseCtrl <- c(rep(1, nrow(case)), rep(0, nrow(ctrl)))
# set up plot -----------------------------------------------------#
all <- data.frame(funcValue,
seqCaseCtrl,
ranks)
all <- all[order(all$ranks),]
all$refx <- seq(0,1,1/(nrow(all)-1))
all$refy <- seq(0,1,1/(nrow(all)-1))
initVal <- all$seqCaseCtrl[1]
moveRight <- ifelse(initVal == 0, nrow(case), nrow(ctrl))
moveUp <- ifelse(initVal == 0, nrow(ctrl), nrow(case))
# moveLeft
for (i in 2:nrow(all)){
all$x[1] <- 0
all$y[1] <- 0
if (all$seqCaseCtrl[i] == initVal){
all$x[i] = all$x[i-1]
all$y[i] = all$y[i-1] + 1/(moveUp-1)
} else {
all$x[i] = all$x[i-1] + 1/(moveRight)
all$y[i] = all$y[i-1]
}
}
# if the plot prints upside-down, switch values for
# x and y
n <- round(length(all$refy)/2, digits = 0)
if (all$refy[n] > all$y[n]){
all$a <- all$x
all$b <- all$y
all$x <- all$b
all$y <- all$a
}
rocPlot <- ggplot(all, aes(x = x, y = y)) +
geom_line(size = 1) +
geom_line(aes(x = refx, y = refy, colour = "red"), size = 1.5) +
scale_x_continuous(limits = c(0,1)) +
theme_bw() +
theme(legend.position = "none") +
ylab("True Positive Rate (Sensitivity)") +
xlab("False Positive Rate (1 - Specificity)")
# set up plot -----------------------------------------------------#
# compute arguments for AUC
caseFunc <- sum(ranks[1:nrow(case)]) - nrow(case)*(nrow(case)+1)/2
ctrlFunc <- sum(ranks[(nrow(case)+1):length(ranks)]) - nrow(ctrl)*(nrow(ctrl)+1)/2
# compute AUC
auc <- round(max(ctrlFunc, caseFunc)/(caseFunc + ctrlFunc), digits = 4)
} else {
auc <- NULL
rocPlot <- NULL
}
est <- list(selection = finalMarkers,
auc = auc,
randomize = randomize,
proportion = proportion,
targetVec = targetVector,
rocPlot = rocPlot,
finalBest = finalBestCrit,
optimize = optimize)
class(est) <- "calf"
return(est)
}
compute.auc <- function(caseVar, ctrlVar){
funcValue <- c(caseVar, ctrlVar)
funcValue <- round(funcValue, digits = 8)
ranks <- rank(funcValue, ties.method = "average")
caseFunc <- sum(ranks[1:length(caseVar)]) - length(caseVar)*(length(caseVar)+1)/2
ctrlFunc <- sum(ranks[(length(caseVar)+1):length(ranks)]) - length(ctrlVar)*(length(ctrlVar)+1)/2
auc <- round(max(ctrlFunc, caseFunc)/(caseFunc + ctrlFunc), digits = 4)
return(auc)
}
|
/scratch/gouwar.j/cran-all/cranData/CALF/R/calf_internal.R
|
#'@import data.table
#'@import ggplot2
#'@title calf
#'@description Coarse Approximation Linear Function
#'@param data Matrix or data frame. First column must contain case/control dummy coded variable (if targetVector = "binary"). Otherwise, first column must contain real number vector corresponding to selection variable (if targetVector = "nonbinary"). All other columns contain relevant markers.
#'@param nMarkers Maximum number of markers to include in creation of sum.
#'@param targetVector Indicate "binary" for target vector with two options (e.g., case/control). Indicate "nonbinary" for target vector with real numbers.
#'@param optimize Criteria to optimize, "pval" or "auc", (if targetVector = "binary") or "corr" (if targetVector = "nonbinary"). Defaults to "pval".
#'@param verbose Logical. Indicate TRUE to print activity at each iteration to console. Defaults to FALSE.
#'@return A data frame containing the chosen markers and their assigned weight (-1 or 1)
#'@return The optimal AUC, pval, or correlation for the classification.
#'@return If targetVector is binary, rocPlot. A plot object from ggplot2 for the receiver operating curve.
#'@examples
#'calf(data = CaseControl, nMarkers = 6, targetVector = "binary", optimize = "pval")
#'@export
calf <- function(data,
nMarkers,
targetVector,
optimize = "pval",
verbose = FALSE){
calf_internal(data,
nMarkers,
proportion = NULL,
randomize = FALSE,
targetVector = targetVector,
times = 1,
optimize = optimize,
verbose = verbose)
}
#'@title calf_fractional
#'@description Randomly selects from binary input provided to data parameter while ensuring the requested proportions of case and control variables are used and runs Coarse Approximation Linear Function.
#'@param data Matrix or data frame. Must be binary data such that the first column must contain case/control dummy coded variable, as function is only approprite for binary data.
#'@param nMarkers Maximum number of markers to include in creation of sum.
#'@param controlProportion Proportion of control samples to use, default is .8.
#'@param caseProportion Proportion of case samples to use, default is .8.
#'@param optimize Criteria to optimize, "pval" or "auc". Defaults to "pval".
#'@param verbose Logical. Indicate TRUE to print activity at each iteration to console. Defaults to FALSE.
#'@return A data frame containing the chosen markers and their assigned weight (-1 or 1)
#'@return The optimal AUC or pval for the classification.
#'@return rocPlot. A plot object from ggplot2 for the receiver operating curve.
#'@examples
#'calf_fractional(data = CaseControl, nMarkers = 6, controlProportion = .8, caseProportion = .4)
#'@export
calf_fractional <- function(data,
nMarkers,
controlProportion = .8,
caseProportion = .8,
optimize = "pval",
verbose = FALSE){
calf_internal(data,
nMarkers,
proportion = c(controlProportion,caseProportion),
randomize = FALSE,
targetVector = "binary",
times = 1,
optimize = optimize,
verbose = verbose)
}
#'@title calf_randomize
#'@description Randomly selects from binary input provided to data parameter and runs Coarse Approximation Linear Function.
#'@param data Matrix or data frame. Must be binary data such that the first column must contain case/control dummy coded variable, as function is only approprite for binary data.
#'@param nMarkers Maximum number of markers to include in creation of sum.
#'@param targetVector Indicate "binary" for target vector with two options (e.g., case/control). Indicate "nonbinary" for target vector with real numbers.
#'@param times Numeric. Indicates the number of replications to run with randomization.
#'@param optimize Criteria to optimize if targetVector = "binary." Indicate "pval" to optimize the p-value corresponding to the t-test distinguishing case and control. Indicate "auc" to optimize the AUC.
#'@param verbose Logical. Indicate TRUE to print activity at each iteration to console. Defaults to FALSE.
#'@return A data frame containing the chosen markers and their assigned weight (-1 or 1)
#'@return The optimal AUC, pval, or correlation for the classification.
#'@return aucHist A histogram of the AUCs across replications, if applicable.
#'@examples
#'calf_randomize(data = CaseControl, nMarkers = 6, targetVector = "binary", times = 5)
#'@export
calf_randomize <- function(data,
nMarkers,
targetVector,
times = 1,
optimize = "pval",
verbose = FALSE){
auc <- numeric()
finalBest <- numeric()
allMarkers <- character()
count <- 1
AUC = NULL
randomize = TRUE
repeat {
out <- calf_internal(data,
nMarkers,
proportion = NULL,
randomize = randomize,
targetVector = targetVector,
times,
optimize = optimize,
verbose = verbose)
if(!is.null(out$auc))
auc[count] <- out$auc
selection <- out$selection
markers <- as.character(out$selection[,1])
finalBest <- append(finalBest, out$finalBest)
allMarkers <- as.character((append(allMarkers, markers)))
if (count == times) break
count <- count + 1
}
if (times > 1) {
summaryMarkers <- as.data.frame(table(allMarkers), check.names = FALSE)
colnames(summaryMarkers) <- c("Marker", "Frequency")
summaryMarkers <- summaryMarkers[order(-summaryMarkers$Frequency),]
if (targetVector == "binary"){
auc <- as.data.frame(auc)
colnames(auc) <- "AUC"
aucHist <- ggplot(auc, aes(AUC)) +
geom_histogram() +
ylab("Count") +
xlab("AUC") +
scale_x_continuous() +
theme_bw()
} else aucHist = NULL
} else {
summaryMarkers = NULL
aucHist = NULL
}
if (times == 1 & targetVector == "binary") {
rocPlot <- out$rocPlot
} else {
rocPlot <- NULL
}
est <- list(selection = selection,
multiple = summaryMarkers,
auc = auc,
randomize = randomize,
targetVec = targetVector,
aucHist = aucHist,
times = times,
finalBest = finalBest,
rocPlot = rocPlot,
optimize = optimize,
verbose = verbose)
class(est) <- "calf_randomize"
return(est)
}
#'@title calf_subset
#'@description Runs Coarse Approximation Linear Function on a random subset of the data provided, resulting in the same proportion applied to case and control, when applicable.
#'@param data Matrix or data frame. First column must contain case/control dummy coded variable (if targetVector = "binary"). Otherwise, first column must contain real number vector corresponding to selection variable (if targetVector = "nonbinary"). All other columns contain relevant markers.
#'@param nMarkers Maximum number of markers to include in creation of sum.
#'@param proportion Numeric. A value between 0 and 1 indicating the proportion of cases and controls to use in analysis (if targetVector = "binary"). If targetVector = "nonbinary", this is just a proportion of the full sample. Used to evaluate robustness of solution. Defaults to 0.8.
#'@param targetVector Indicate "binary" for target vector with two options (e.g., case/control). Indicate "nonbinary" for target vector with real numbers.
#'@param times Numeric. Indicates the number of replications to run with randomization.
#'@param optimize Criteria to optimize if targetVector = "binary." Indicate "pval" to optimize the p-value corresponding to the t-test distinguishing case and control. Indicate "auc" to optimize the AUC.
#'@param verbose Logical. Indicate TRUE to print activity at each iteration to console. Defaults to FALSE.
#'@return A data frame containing the chosen markers and their assigned weight (-1 or 1)
#'@return The optimal AUC, pval, or correlation for the classification. If multiple replications are requested, a data.frame containing all optimized values across all replications is returned.
#'@return aucHist A histogram of the AUCs across replications, if applicable.
#'@examples
#'calf_subset(data = CaseControl, nMarkers = 6, targetVector = "binary", times = 5)
#'@export
calf_subset <- function(data,
nMarkers,
proportion = .8,
targetVector,
times = 1,
optimize = "pval",
verbose = FALSE){
auc <- numeric()
allMarkers <- character()
finalBest <- numeric()
count <- 1
AUC = NULL
repeat {
out <- calf_internal(data,
nMarkers,
proportion = proportion,
randomize = FALSE,
targetVector = targetVector,
times,
optimize = optimize,
verbose = verbose)
if(!is.null(out$auc))
auc[count] <- out$auc
selection <- out$selection
finalBest <- append(finalBest, out$finalBest)
markers <- as.character(out$selection[,1])
allMarkers <- as.character((append(allMarkers, markers)))
if (count == times) break
count <- count + 1
}
if (times > 1){
summaryMarkers <- as.data.frame(table(allMarkers), check.names = FALSE)
colnames(summaryMarkers) <- c("Marker", "Frequency")
summaryMarkers <- summaryMarkers[order(-summaryMarkers$Frequency),]
if (targetVector == "binary"){
auc <- as.data.frame(auc)
colnames(auc) <- "AUC"
aucHist <- ggplot(auc, aes(AUC)) +
geom_histogram() +
ylab("Count") +
xlab("AUC") +
scale_x_continuous() +
theme_bw()
} else aucHist = NULL
} else {
summaryMarkers = NULL
aucHist = NULL
}
if (times == 1 & targetVector == "binary") {
rocPlot <- out$rocPlot
} else {
rocPlot <- NULL
}
est <- list(selection = selection,
multiple = summaryMarkers,
auc = auc,
proportion = proportion,
targetVec = targetVector,
aucHist = aucHist,
times = times,
finalBest = finalBest,
rocPlot = rocPlot,
optimize = optimize)
class(est) <- "calf_subset"
return(est)
}
#'@title calf_exact_binary_subset
#'@description Runs Coarse Approximation Linear Function on a random subset of binary data provided, with the ability to precisely control the number of case and control data used.
#'@param data Matrix or data frame. First column must contain case/control dummy coded variable.
#'@param nMarkers Maximum number of markers to include in creation of sum.
#'@param nCase Numeric. A value indicating the number of case data to use.
#'@param nControl Numeric. A value indicating the number of control data to use.
#'@param times Numeric. Indicates the number of replications to run with randomization.
#'@param optimize Criteria to optimize. Indicate "pval" to optimize the p-value corresponding to the t-test distinguishing case and control. Indicate "auc" to optimize the AUC.
#'@param verbose Logical. Indicate TRUE to print activity at each iteration to console. Defaults to FALSE.
#'@return A data frame containing the chosen markers and their assigned weight (-1 or 1)
#'@return The optimal AUC or pval for the classification. If multiple replications are requested, a data.frame containing all optimized values across all replications is returned.
#'@return aucHist A histogram of the AUCs across replications, if applicable.
#'@examples
#'calf_exact_binary_subset(data = CaseControl, nMarkers = 6, nCase = 5, nControl = 8, times = 5)
#'@export
calf_exact_binary_subset <- function(data,
nMarkers,
nCase,
nControl,
times = 1,
optimize = "pval",
verbose = FALSE){
targetVector = "binary"
proportion = 1
#Determine which is case and which is control
ctrlRows <- which(data[ ,1] == 0)
caseRows <- which(data[ ,1] == 1)
auc <- numeric()
allMarkers <- character()
finalBest <- numeric()
count <- 1
AUC = NULL
repeat {
#Resample the binary data, thus controlling the randomization here.
keepRows <- c(sample(ctrlRows)[1:nControl], sample(caseRows)[1:nCase])
resampledData <- data[keepRows, ]
out <- calf_internal(resampledData,
nMarkers,
proportion = proportion,
randomize = FALSE,
targetVector = targetVector,
times,
optimize = optimize,
verbose = verbose)
auc[count] <- out$auc
selection <- out$selection
finalBest <- append(finalBest, out$finalBest)
markers <- as.character(out$selection[,1])
allMarkers <- as.character((append(allMarkers, markers)))
if (count == times) break
count <- count + 1
}
if (times > 1){
summaryMarkers <- as.data.frame(table(allMarkers), check.names = FALSE)
colnames(summaryMarkers) <- c("Marker", "Frequency")
summaryMarkers <- summaryMarkers[order(-summaryMarkers$Frequency),]
auc <- as.data.frame(auc)
colnames(auc) <- "AUC"
aucHist <- ggplot(auc, aes(AUC)) +
geom_histogram() +
ylab("Count") +
xlab("AUC") +
scale_x_continuous() +
theme_bw()
} else {
summaryMarkers = NULL
aucHist = NULL
}
if (times == 1) {
rocPlot <- out$rocPlot
} else {
rocPlot <- NULL
}
est <- list(selection = selection,
multiple = summaryMarkers,
auc = auc,
proportion = proportion,
targetVec = targetVector,
aucHist = aucHist,
times = times,
finalBest = finalBest,
rocPlot = rocPlot,
optimize = optimize)
class(est) <- "calf_exact_binary_subset"
return(est)
}
#'@title cv.calf
#'@description Performs cross-validation using CALF data input
#'@param data Matrix or data frame. First column must contain case/control dummy coded variable (if targetVector = "binary"). Otherwise, first column must contain real number vector corresponding to selection variable (if targetVector = "nonbinary"). All other columns contain relevant markers.
#'@param limit Maximum number of markers to include in creation of sum.
#'@param proportion Numeric. A value between 0 and 1 indicating the proportion of cases and controls to use in analysis (if targetVector = "binary") or proportion of the full sample (if targetVector = "nonbinary"). Defaults to 0.8.
#'@param times Numeric. Indicates the number of replications to run with randomization.
#'@param targetVector Indicate "binary" for target vector with two options (e.g., case/control). Indicate "nonbinary" for target vector with real numbers.
#'@param optimize Criteria to optimize if targetVector = "binary." Indicate "pval" to optimize the p-value corresponding to the t-test distinguishing case and control. Indicate "auc" to optimize the AUC. Defaults to pval.
#'@param outputPath The path where files are to be written as output, default is NULL meaning no files will be written. When targetVector is "binary" file binary.csv will be output in the provided path, showing the reults. When targetVector is "nonbinary" file nonbinary.csv will be output in the provided path, showing the results. In the same path, the kept and unkept variables from the last iteration, will be output, prefixed with the targetVector type "binary" or "nonbinary" followed by Kept and Unkept and suffixed with .csv. Two files containing the results from each run have List in the filenames and suffixed with .txt.
#'@return A data frame containing "times" rows of CALF runs where each row represents a run of CALF on a randomized "proportion" of "data". Colunns start with the numer selected for the run, followed by AUC or pval and then all markers from "data". An entry in a marker column signifys a chosen marker for a particular run (a row) and their assigned coarse weight (-1, 0, or 1).
#'@examples
#'\dontrun{
#'cv.calf(data = CaseControl, limit = 5, times = 100, targetVector = 'binary')
#'}
#'@export
cv.calf <- function(data, limit, proportion = .8, times, targetVector, optimize = "pval", outputPath=NULL) {
if (targetVector != "binary" && targetVector != "nonbinary") {
cat('CALF ERROR: Invalid targetVector argument. Only "binary" or "nonbinary" is allowed.')
} else if (targetVector == "binary" && optimize=="corr") {
cat('CALF ERROR: Optimizing by "corr" is only applicable to nonbinary data.')
} else if (targetVector == "nonbinary" && optimize=="pval") {
cat('CALF ERROR: Optimizing by "pval" is only applicable to binary data.')
} else if (targetVector == "nonbinary" && optimize=="auc") {
cat('CALF ERROR: Optimizing by "auc" is only applicable to binary data.')
} else {
#Get the rows of interest first as there is no reason to repeat this
if (targetVector == "binary") {
ctrlRows <- which(data[ ,1] == 0)
caseRows <- which(data[ ,1] == 1)
# calculate number of case and control to keep
nCtrlKeep <- round(length(ctrlRows)*proportion, digits = 0)
nCaseKeep <- round(length(caseRows)*proportion, digits = 0)
} else if(targetVector == "nonbinary"){
nDataKeep <- round(nrow(data)*proportion, digits = 0)
}
#Build the header row for the table that will be output
if (targetVector == "binary") {
if (optimize == "pval") {
header <- c("Number Selected", "AUC", "pval", colnames(data)[-1])
} else if (optimize == "auc"){
header <- c("Number Selected", "AUC", colnames(data)[-1])
}
} else if (targetVector == "nonbinary"){
header <- c("Number Selected", "corr", colnames(data)[-1])
}
results <- matrix(0, times, length(header))
colnames(results)<-header
#Now run the CALF calculation "times" times
rowCount = 1
optimizedKeptList <- vector()
optimizedUnkeptList <- vector()
correlationList <- vector()
repeat {
if (targetVector == "binary") {
#Resample the binary data, keeping track of what was included and what was not.
keepCtrlRows <- sample(ctrlRows)[1:nCtrlKeep]
unkeptCtrlRows <- setdiff(union(ctrlRows,keepCtrlRows), intersect(ctrlRows,keepCtrlRows))
keepCaseRows <- sample(caseRows)[1:nCaseKeep]
unkeptCaseRows <- setdiff(union(caseRows,keepCaseRows), intersect(caseRows,keepCaseRows))
keepRows <- c(keepCtrlRows, keepCaseRows)
unkeptRows <- c(unkeptCtrlRows, unkeptCaseRows)
unkeptCaseData <- data[unkeptCaseRows, ]
unkeptCtrlData <- data[unkeptCtrlRows, ]
resampledData <- data[keepRows, ]
unkeptData <- data[unkeptRows, ]
if(!is.null(outputPath)) {
outputFile <- paste(outputPath, "binaryKept.csv")
fwrite(resampledData, outputFile)
outputFile <- paste(outputPath, "binaryUnkept.csv")
fwrite(unkeptData, outputFile)
}
} else if(targetVector == "nonbinary"){
#Resample the nonbinary data
keepRows <- sample(1:nrow(data))[1:nDataKeep]
unkeptRows <- setdiff(seq(1, length(data[,1]), by=1), keepRows)
resampledData <- data[keepRows, ]
unkeptData <- data[unkeptRows, ]
if(!is.null(outputPath)) {
outputFile <- paste(outputPath, "nonbinaryKept.csv")
fwrite(resampledData, outputFile)
outputFile <- paste(outputPath, "nonbinaryUnkept.csv")
fwrite(unkeptData, outputFile)
}
}
answer = calf_internal(data=resampledData,
nMarkers = limit,
randomize = FALSE,
proportion = ,
times = 1,
targetVector = targetVector,
optimize = optimize,
verbose = FALSE)
#Keep track of the optimizer values returned for each run
if(optimize == "auc") {
results[rowCount, "AUC"] = answer$auc
optimizedKeptList <- append(optimizedKeptList, answer$auc)
} else if(optimize == "pval") {
results[rowCount, "AUC"] = answer$auc
results[rowCount, "pval"] = answer$finalBest
optimizedKeptList <- append(optimizedKeptList, answer$finalBest)
} else if(optimize == "corr") {
results[rowCount, "corr"] = answer$finalBest
optimizedKeptList <- append(optimizedKeptList, answer$finalBest)
}
#Keep a tally of the results per calf run
markerCount = 1
markerList = as.character(answer$selection$Marker)
lenMarkerList = length(markerList)
results[rowCount, "Number Selected"] = lenMarkerList
repeat {
results[rowCount, markerList[markerCount]] = answer$selection$Weight[markerCount]
markerCount <- markerCount + 1
if (markerCount > lenMarkerList)
break
}
#Perform the cross-validation
if (targetVector == "binary") {
if (optimize == "pval") {
header <- c("Number Selected", "AUC", "pval", colnames(data)[-1])
weightsTimesUnkept<-as.matrix(unkeptData[-1]) %*% t(as.matrix(results[rowCount,-1:-3]))
resultCtrlData = weightsTimesUnkept[1:length(unkeptCtrlData[,1])]
resultCaseData = weightsTimesUnkept[length(unkeptCtrlData[,1])+1:length(unkeptCaseData[,1])]
#optimizedUnkeptList<-append(optimizedUnkeptList, t.test(resultCaseData, resultCtrlData, var.equal = FALSE)$p.value)
optimizedUnkeptList<-append(optimizedUnkeptList, compute.auc(resultCaseData, resultCtrlData))
} else if (optimize == "auc"){
weightsTimesUnkept<-as.matrix(unkeptData[-1]) %*% t(as.matrix(results[rowCount,-1:-2]))
resultCtrlData = weightsTimesUnkept[1:length(unkeptCtrlData[,1])]
resultCaseData = weightsTimesUnkept[length(unkeptCtrlData[,1])+1:length(unkeptCaseData[,1])]
optimizedUnkeptList<-append(optimizedUnkeptList, compute.auc(resultCaseData, resultCtrlData))
}
} else if (targetVector == "nonbinary"){
weightsTimesUnkept<-as.matrix(unkeptData[-1]) %*% t(results[rowCount,-1:-2])
corrResult <- cor(weightsTimesUnkept,unkeptData[1])
correlationList <- append(correlationList,corrResult )
}
rowCount <- rowCount + 1
if (rowCount > times)
break
}
}
#If an outputPath was provided, then output the extra data generated by the CV
if(!is.null(outputPath)) {
#Write the results
if (targetVector == "binary") {
outputFile <- paste(outputPath, "binary.csv")
fwrite(results, outputFile)
outputFile <- paste(outputPath, paste(optimize,"KeptList.txt", sep=""))
write(optimizedKeptList, outputFile, sep="\n")
outputFile <- paste(outputPath, "AUCUnkeptList.txt")
write(optimizedUnkeptList, outputFile, sep="\n")
} else if(targetVector == "nonbinary"){
outputFile <- paste(outputPath, "nonbinary.csv")
fwrite(results, outputFile)
outputFile <- paste(outputPath, "corrUnkeptList.txt")
write(correlationList, outputFile, sep="\n")
}
}
return(results)
}
#'@title perm_target_cv.calf
#'@description Performs cross-validation using CALF data input and randomizes the target column with each iteration of the loop, controlled by 'times'.
#'@param data Matrix or data frame. First column must contain case/control dummy coded variable (if targetVector = "binary"). Otherwise, first column must contain real number vector corresponding to selection variable (if targetVector = "nonbinary"). All other columns contain relevant markers.
#'@param limit Maximum number of markers to include in creation of sum.
#'@param proportion Numeric. A value between 0 and 1 indicating the proportion of cases and controls to use in analysis (if targetVector = "binary") or proportion of the full sample (if targetVector = "nonbinary"). Defaults to 0.8.
#'@param times Numeric. Indicates the number of replications to run with randomization.
#'@param targetVector Indicate "binary" for target vector with two options (e.g., case/control). Indicate "nonbinary" for target vector with real numbers.
#'@param optimize Criteria to optimize if targetVector = "binary." Indicate "pval" to optimize the p-value corresponding to the t-test distinguishing case and control. Indicate "auc" to optimize the AUC. Defaults to pval.
#'@param outputPath The path where files are to be written as output, default is NULL meaning no files will be written. When targetVector is "binary" file binary.csv will be output in the provided path, showing the reults. When targetVector is "nonbinary" file nonbinary.csv will be output in the provided path, showing the results. In the same path, the kept and unkept variables from the last iteration, will be output, prefixed with the targetVector type "binary" or "nonbinary" followed by Kept and Unkept and suffixed with .csv. Two files containing the results from each run have List in the filenames and suffixed with .txt.
#'@return A data frame containing "times" rows of CALF runs where each row represents a run of CALF on a randomized "proportion" of "data". Colunns start with the numer selected for the run, followed by AUC or pval and then all markers from "data". An entry in a marker column signifys a chosen marker for a particular run (a row) and their assigned coarse weight (-1, 0, or 1).
#'@examples
#'\dontrun{
#'perm_target_cv.calf(data = CaseControl, limit = 5, times = 100, targetVector = 'binary')
#'}
#'@export
perm_target_cv.calf <- function(data, limit, proportion = .8, times, targetVector, optimize = "pval", outputPath=NULL) {
if (targetVector != "binary" && targetVector != "nonbinary") {
cat('CALF ERROR: Invalid targetVector argument. Only "binary" or "nonbinary" is allowed.')
} else if (targetVector == "binary" && optimize=="corr") {
cat('CALF ERROR: Optimizing by "corr" is only applicable to nonbinary data.')
} else if (targetVector == "nonbinary" && optimize=="pval") {
cat('CALF ERROR: Optimizing by "pval" is only applicable to binary data.')
} else if (targetVector == "nonbinary" && optimize=="auc") {
cat('CALF ERROR: Optimizing by "auc" is only applicable to binary data.')
} else {
#Get the rows of interest first as there is no reason to repeat this
if (targetVector == "binary") {
ctrlRows <- which(data[ ,1] == 0)
caseRows <- which(data[ ,1] == 1)
# calculate number of case and control to keep
nCtrlKeep <- round(length(ctrlRows)*proportion, digits = 0)
nCaseKeep <- round(length(caseRows)*proportion, digits = 0)
} else if(targetVector == "nonbinary"){
nDataKeep <- round(nrow(data)*proportion, digits = 0)
}
#Build the header row for the table that will be output
if (targetVector == "binary") {
if (optimize == "pval") {
header <- c("Number Selected", "AUC", "pval", colnames(data)[-1])
} else if (optimize == "auc"){
header <- c("Number Selected", "AUC", colnames(data)[-1])
}
} else if (targetVector == "nonbinary"){
header <- c("Number Selected", "corr", colnames(data)[-1])
}
results <- matrix(0, times, length(header))
colnames(results)<-header
#Now run the CALF calculation "times" times
rowCount = 1
optimizedKeptList <- vector()
optimizedUnkeptList <- vector()
correlationList <- vector()
repeat {
print(paste("Iteration: ", rowCount))
if (targetVector == "binary") {
#Resample the binary data, keeping track of what was included and what was not.
shuffledCtrl = ctrlRows
shuffledCtrl[,1] = sample(shuffledCtrl[,1])
keepCtrlRows <- sample(shuffledCtrl)[1:nCtrlKeep]
unkeptCtrlRows <- setdiff(union(shuffledCtrl,keepCtrlRows), intersect(shuffledCtrl,keepCtrlRows))
shuffledCase = caseRows
shuffledCase[,1] = sample(shuffledCase[,1])
keepCaseRows <- sample(shuffledCase)[1:nCaseKeep]
unkeptCaseRows <- setdiff(union(shuffledCase,keepCaseRows), intersect(shuffledCase,keepCaseRows))
keepRows <- c(keepCtrlRows, keepCaseRows)
unkeptRows <- c(unkeptCtrlRows, unkeptCaseRows)
unkeptCaseData <- data[unkeptCaseRows, ]
unkeptCtrlData <- data[unkeptCtrlRows, ]
resampledData <- data[keepRows, ]
unkeptData <- data[unkeptRows, ]
if(!is.null(outputPath)) {
outputFile <- paste(outputPath, "binaryKept.csv")
fwrite(resampledData, outputFile)
outputFile <- paste(outputPath, "binaryUnkept.csv")
fwrite(unkeptData, outputFile)
}
} else if(targetVector == "nonbinary"){
shuffledData = data
shuffledData[,1] = sample(shuffledData[,1])
keepRows <- sample(1:nrow(shuffledData))[1:nDataKeep]
unkeptRows <- setdiff(seq(1, length(shuffledData[,1]), by=1), keepRows)
resampledData <- shuffledData[keepRows, ]
unkeptData <- shuffledData[unkeptRows, ]
if(!is.null(outputPath)) {
outputFile <- paste(outputPath, "nonbinaryKept.csv")
fwrite(resampledData, outputFile)
outputFile <- paste(outputPath, "nonbinaryUnkept.csv")
fwrite(unkeptData, outputFile)
}
}
answer = calf_internal(data=resampledData,
nMarkers = limit,
randomize = FALSE,
proportion = ,
times = 1,
targetVector = targetVector,
optimize = optimize,
verbose = FALSE)
#Keep track of the optimizer values returned for each run
if(optimize == "auc") {
results[rowCount, "AUC"] = answer$auc
optimizedKeptList <- append(optimizedKeptList, answer$auc)
} else if(optimize == "pval") {
results[rowCount, "AUC"] = answer$auc
results[rowCount, "pval"] = answer$finalBest
optimizedKeptList <- append(optimizedKeptList, answer$finalBest)
} else if(optimize == "corr") {
results[rowCount, "corr"] = answer$finalBest
optimizedKeptList <- append(optimizedKeptList, answer$finalBest)
}
#Keep a tally of the results per calf run
markerCount = 1
markerList = as.character(answer$selection$Marker)
lenMarkerList = length(markerList)
results[rowCount, "Number Selected"] = lenMarkerList
repeat {
results[rowCount, markerList[markerCount]] = answer$selection$Weight[markerCount]
markerCount <- markerCount + 1
if (markerCount > lenMarkerList)
break
}
#Perform the cross-validation
if (targetVector == "binary") {
if (optimize == "pval") {
header <- c("Number Selected", "AUC", "pval", colnames(data)[-1])
weightsTimesUnkept<-as.matrix(unkeptData[-1]) %*% t(as.matrix(results[rowCount,-1:-3]))
resultCtrlData = weightsTimesUnkept[1:length(unkeptCtrlData[,1])]
resultCaseData = weightsTimesUnkept[length(unkeptCtrlData[,1])+1:length(unkeptCaseData[,1])]
#optimizedUnkeptList<-append(optimizedUnkeptList, t.test(resultCaseData, resultCtrlData, var.equal = FALSE)$p.value)
optimizedUnkeptList<-append(optimizedUnkeptList, compute.auc(resultCaseData, resultCtrlData))
} else if (optimize == "auc"){
weightsTimesUnkept<-as.matrix(unkeptData[-1]) %*% t(as.matrix(results[rowCount,-1:-2]))
resultCtrlData = weightsTimesUnkept[1:length(unkeptCtrlData[,1])]
resultCaseData = weightsTimesUnkept[length(unkeptCtrlData[,1])+1:length(unkeptCaseData[,1])]
optimizedUnkeptList<-append(optimizedUnkeptList, compute.auc(resultCaseData, resultCtrlData))
}
} else if (targetVector == "nonbinary"){
weightsTimesUnkept<-as.matrix(unkeptData[-1]) %*% t(as.matrix(results[rowCount,-1:-2]))
corrResult <- cor(weightsTimesUnkept,unkeptData[1])
correlationList <- append(correlationList,corrResult )
}
rowCount <- rowCount + 1
if (rowCount > times)
break
}
}
#If an outputPath was provided, then output the extra data generated by the CV
if(!is.null(outputPath)) {
#Write the results
if (targetVector == "binary") {
outputFile <- paste(outputPath, "binary.csv")
fwrite(results, outputFile)
outputFile <- paste(outputPath, paste(optimize,"KeptList.txt", sep=""))
write(optimizedKeptList, outputFile, sep="\n")
outputFile <- paste(outputPath, "AUCUnkeptList.txt")
write(optimizedUnkeptList, outputFile, sep="\n")
} else if(targetVector == "nonbinary"){
outputFile <- paste(outputPath, "nonbinary.csv")
fwrite(results, outputFile)
outputFile <- paste(outputPath, "corrUnkeptList.txt")
write(correlationList, outputFile, sep="\n")
}
}
return(results)
}
#'@title write.calf
#'@description Writes output of the CALF dataframe
#'@param x A CALF data frame.
#'@param filename The output filename
#'@export
write.calf <- function(x, filename){
write.table(x$selection,
file = filename,
sep = ",",
row.names = FALSE)
if(x$targetVec == "binary" && x$optimize=="auc") {
write( paste("\n","AUC ,",x$finalBest),
file = filename,
append = TRUE)
} else if(x$targetVec == "binary" && x$optimize=="pval") {
write( paste("\n","pval ,",x$finalBest),
file = filename,
append = TRUE)
} else if(x$targetVec == "nonbinary") {
write( paste("\n","corr,", x$finalBest),
file = filename,
append = TRUE)
}
}
#'@title write.calf_randomize
#'@description Writes output of the CALF randomize dataframe
#'@param x A CALF randomize data frame.
#'@param filename The output filename
#'@export
write.calf_randomize <- function(x, filename){
options(warn=-1)
write.table(x$selection,
file = filename,
sep = ",",
row.names = FALSE)
write("\n",
file = filename,
append = TRUE)
write.table(x$multiple,
file = filename,
sep = ",",
row.names = FALSE,
append = TRUE)
write("\n",
file = filename,
append = TRUE)
if(x$targetVec == "binary" && x$optimize=="auc") {
finalBest = as.data.frame(x$finalBest)
colnames(finalBest) <- c("AUC")
write.table( finalBest,
file = filename,
sep = ",",
append = TRUE)
} else if(x$targetVec == "binary" && x$optimize=="pval") {
finalBest = as.data.frame(x$finalBest)
colnames(finalBest) <- c("pval")
write.table( finalBest,
file = filename,
sep = ",",
append = TRUE)
} else if(x$targetVec == "nonbinary") {
finalBest = as.data.frame(x$finalBest)
colnames(finalBest) <- c("corr")
write.table( finalBest,
file = filename,
sep = ",",
append = TRUE)
}
options(warn=1)
}
#'@title write.calf_subset
#'@description Writes output of the CALF subset dataframe
#'@param x A CALF subset data frame.
#'@param filename The output filename
#'@export
write.calf_subset <- function(x, filename){
options(warn=-1)
write.table(x$selection,
file = filename,
sep = ",",
row.names = FALSE)
write("\n",
file = filename,
append = TRUE)
write.table(x$multiple,
file = filename,
sep = ",",
row.names = FALSE,
append = TRUE)
write("\n",
file = filename,
append = TRUE)
if(x$targetVec == "binary" && (x$optimize=="auc")) {
finalBest = as.data.frame(x$finalBest)
colnames(finalBest) <- c("AUC")
write.table( finalBest,
file = filename,
sep = ",",
append = TRUE)
} else if(x$targetVec == "binary" && x$optimize=="pval") {
finalBest = as.data.frame(x$finalBest)
colnames(finalBest) <- c("pval")
write.table( finalBest,
file = filename,
sep = ",",
append = TRUE)
} else if(x$targetVec == "nonbinary") {
finalBest = as.data.frame(x$finalBest)
colnames(finalBest) <- c("corr")
write.table( finalBest,
file = filename,
sep = ",",
append = TRUE)
}
options(warn=1)
}
|
/scratch/gouwar.j/cran-all/cranData/CALF/R/calf_wrappers.R
|
#'@method print calf
#'@export
print.calf <- function(x, ...){
if (x$randomize == TRUE) cat("Randomized Output:", "\n", "\n")
if (!is.null(x$proportion)) cat("Proportion of Data:", x$proportion, "\n", "\n")
print.data.frame(x$selection, row.names = FALSE, check.names = FALSE)
if (x$targetVec == "binary") {
cat("\nAUC:", x$auc)
if (x$optimize == "pval") cat("\nFinal p-value:", x$finalBest)
} else {
cat("\nFinal Correlation:", x$finalBest)
}
}
#'@method print calf_randomize
#'@export
print.calf_randomize <- function(x, ...){
if (x$times == 1) {
cat("Randomized Output Across", x$times, "Replication:", "\n", "\n")
print.data.frame(x$selection, row.names = FALSE, check.names = FALSE)
if (x$targetVec == "binary") {
cat("\nAUC:", x$auc)
if (x$optimize == "pval") cat("\nFinal p-value:", x$finalBest)
} else {
cat("\nFinal Correlation:", x$finalBest)
}
} else {
cat("Randomized Output Across", x$times, "Replications:", "\n", "\n")
print.data.frame(x$multiple, row.names = FALSE, check.names = FALSE)
if (x$targetVec == "binary"){
cat("\n", "\n")
print.data.frame(x$auc, row.names = F)
} else {
cat("\nFinal Correlations:", "\n",
round(x$finalBest, digits = 4))
}
}
}
#'@method print calf_subset
#'@export
print.calf_subset <- function(x, ...){
if (x$times == 1) {
cat("Proportion =", x$proportion, "Output Across", x$times, "Replication:", "\n", "\n")
print.data.frame(x$selection, row.names = FALSE, check.names = FALSE)
if (x$targetVec == "binary") {
cat("\nAUC:", x$auc)
cat("\nFinal p-value:", x$finalBest)
} else {
cat("\nFinal Correlation:", x$finalBest)
}
} else {
cat("Proportion =", x$proportion, "Output Across", x$times, "Replications:", "\n", "\n")
print.data.frame(x$multiple, row.names = FALSE)
if (x$targetVec == "binary"){
cat("\n", "\n")
print.data.frame(x$auc, row.names = F, check.names = FALSE)
}
}
}
#'@method print calf_exact_binary_subset
#'@export
print.calf_exact_binary_subset <- function(x, ...){
if (x$times == 1) {
cat("Proportion =", x$proportion, "Output Across", x$times, "Replication:", "\n", "\n")
print.data.frame(x$selection, row.names = FALSE, check.names = FALSE)
cat("\nAUC:", x$auc)
cat("\nFinal p-value:", x$finalBest)
} else {
cat("Proportion =", x$proportion, "Output Across", x$times, "Replications:", "\n", "\n")
print.data.frame(x$multiple, row.names = FALSE)
cat("\n", "\n")
print.data.frame(x$auc, row.names = F, check.names = FALSE)
}
}
|
/scratch/gouwar.j/cran-all/cranData/CALF/R/print.calf.R
|
makemar <-
function(simdata, prop=0.2){
# generates a probability of missingness for x1 and x2 which is
# based on the logistic of y + x3 (i.e. it is dependent on outcome and
# fully observed)
predictions <- function(lp, n){
# uses the vector of linear predictions (lp) from a logistic model
# and the expected number of positive responses (n) to generate
# a set of predictions by modifying the baseline
# n does not have to be an integer
logistic <- function(x){
exp(x)/(1+exp(x))
}
trialn <- function(lptrial){
sum(logistic(lptrial))
}
stepsize <- 32
lptrial <- lp
while(abs(trialn(lptrial) - n) > 1){
if (trialn(lptrial) > n){
# trialn bigger than required
lptrial <- lptrial - stepsize
} else {
lptrial <- lptrial + stepsize
}
stepsize <- stepsize / 2
}
# Generate predictions from binomial distribution
pred <- as.logical(rbinom(logical(length(lp)), 1, logistic(lptrial)))
list(offset=(lptrial-lp)[1], pred=pred, n=sum(pred))
}
simdata[predictions(simdata[,'y'] + simdata[,'x3'],
prop*nrow(simdata))$pred, 'x1'] <- NA
simdata[predictions(simdata[,'y'] + simdata[,'x3'],
prop*nrow(simdata))$pred, 'x2'] <- NA
return(simdata)
}
|
/scratch/gouwar.j/cran-all/cranData/CALIBERrfimpute/R/makemar.R
|
mice.impute.rfcat <- function(y, ry, x,
ntree_cat = NULL, nodesize_cat = NULL,
maxnodes_cat = NULL, ntree = NULL, ...){
# y is the vector of y (observed and unobserved)
# ry is a vector of indicators as to whether y is observed
# x is the matrix of predictors
# If y is logical, convert to factor
if (is.logical(y)){
convertlogical <- TRUE
y <- as.factor(y)
} else {
convertlogical <- FALSE
}
# Select a bootstrap sample
x <- as.matrix(x)
bootsample <- sample(sum(ry), replace = TRUE)
yobs <- y[ry][bootsample]
xobs <- x[ry, , drop = FALSE][bootsample, , drop = FALSE]
xmiss <- x[!ry, , drop = FALSE]
# Use ntree to pass the number of trees (consistent with
# mice.impute.rf in the mice package)
if (is.null(ntree_cat) & !is.null(ntree)){
ntree_cat <- ntree
}
if (is.null(ntree_cat)){
if (is.null(getOption('CALIBERrfimpute_ntree_cat'))){
ntree_cat <- 10
} else {
ntree_cat <- getOption('CALIBERrfimpute_ntree_cat')
}
}
if (is.null(nodesize_cat)){
if (is.null(getOption('CALIBERrfimpute_nodesize_cat'))){
nodesize_cat <- 1
} else {
nodesize_cat <- getOption('CALIBERrfimpute_nodesize_cat')
}
}
if (is.null(maxnodes_cat)){
# default is NULL
maxnodes_cat <- getOption('CALIBERrfimpute_maxnodes_cat')
}
# Check there are no empty factor categories in outcome.
# If there are, re-order the factor
missinglevels <- (table(yobs) == 0)
newlevels <- rep(NA_integer_, length(levels(y)))
newlevels[!missinglevels] <- 1:sum(!missinglevels)
labels <- levels(y)
oldlevels <- 1:length(levels(y))
changes <- !identical(newlevels, 1:length(levels(y)))
if (changes){
temp <- data.frame(id_yobs = 1:length(yobs),
fac = as.integer(yobs))
lookup <- data.frame(fac = oldlevels, new = factor(newlevels))
temp <- merge(temp, lookup, all.x = TRUE)
yobs <- temp[order(temp$id_yobs), 'new']
}
# Build a set of trees
trees <- lapply(1:ntree_cat, function(x){
if (length(unique(yobs)) == 1){
# if all the variables to be imputed are unique
yobs[1]
} else {
randomForest(xobs, yobs, ntree = 1, nodesize = nodesize_cat,
maxnodes = maxnodes_cat)
}
})
# Choose a random tree and predict the outcome for each observation
yimp <- apply(xmiss, 1, function(x) {
thetree <- trees[[sample(ntree_cat, 1)]]
if ('randomForest' %in% class(thetree)){
predict(thetree, x)
} else {
# a single value
thetree
}
})
# Restore original factor categories if necessary
if (changes){
temp <- data.frame(id_yimp = 1:length(yimp),
fac = as.integer(yimp))
lookup <- data.frame(fac = newlevels,
old = factor(oldlevels))
levels(lookup$old) <- labels
temp <- merge(temp, lookup, all.x = TRUE)
yimp <- temp[order(temp$id_yimp), 'old']
}
# Convert from factor back to logical
if (convertlogical){
yimp <- as.logical(yimp == 'TRUE')
}
return(yimp)
}
|
/scratch/gouwar.j/cran-all/cranData/CALIBERrfimpute/R/mice.impute.rfcat.R
|
mice.impute.rfcont <- function(y, ry, x,
ntree_cont = NULL, nodesize_cont = NULL,
maxnodes_cont = NULL, ntree = NULL, ...){
# y is the vector of y (observed and unobserved)
# ry is a vector of indicators as to whether y is observed
# x is the matrix of predictors
x <- as.matrix(x)
bootsample <- sample(sum(ry), replace = TRUE)
# Use ntree to pass the number of trees (consistent with
# mice.impute.rf in the mice package)
if (is.null(ntree_cont) & !is.null(ntree)){
ntree_cont <- ntree
}
if (is.null(ntree_cont)){
if (is.null(getOption('CALIBERrfimpute_ntree_cont'))){
ntree_cont <- 10
} else {
ntree_cont <- getOption('CALIBERrfimpute_ntree_cont')
}
}
if (is.null(nodesize_cont)){
if (is.null(getOption('CALIBERrfimpute_nodesize_cont'))){
nodesize_cont <- 5
} else {
nodesize_cont <- getOption('CALIBERrfimpute_nodesize_cont')
}
}
if (is.null(maxnodes_cont)){
# default is NULL
maxnodes_cont <- getOption('CALIBERrfimpute_maxnodes_cont')
}
# Only bootstrap if more than one tree, because Random Forest
# fits to a bootstrap sample. Use drop = FALSE to ensure that the
# predictor matrix remains a matrix
if (ntree_cont > 1){
yobs <- y[ry][bootsample]
xobs <- x[ry, , drop = FALSE][bootsample, , drop = FALSE]
} else {
yobs <- y[ry]
xobs <- x[ry, , drop = FALSE]
}
xmiss <- x[!ry, , drop = FALSE]
# Build a random forest
rf <- randomForest(xobs, yobs, ntree = ntree_cont,
nodesize = nodesize_cont, maxnodes = maxnodes_cont, ...)
yhat <- predict(rf, xmiss)
# Draw imputed values from normal distributions
# centred on the means predicted by Random Forest
yimp <- rnorm(length(yhat), mean = yhat, sd = sqrt(rf$mse[ntree_cont]))
return(yimp)
}
|
/scratch/gouwar.j/cran-all/cranData/CALIBERrfimpute/R/mice.impute.rfcont.R
|
setRFoptions <- function(ntree_cat = NULL, ntree_cont = NULL,
nodesize_cat = NULL, nodesize_cont = NULL,
maxnodes_cat = NULL, maxnodes_cont = NULL){
# Records these global options for use by CALIBERrfimpute
for (opname in c('ntree_cat', 'ntree_cont',
'nodesize_cat', 'nodesize_cont',
'maxnodes_cat', 'maxnodes_cont')){
fullopname <- paste('CALIBERrfimpute', opname, sep='_')
if (!is.null(get(opname))){
message(paste(c('Setting option ', fullopname, ' = ',
get(opname)), collapse=''))
eval(parse(text=paste('options(', fullopname, '=', get(opname), ')')))
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/CALIBERrfimpute/R/setRFoptions.R
|
simdata <-
function(n=2000, mymean=rep(0,4), mysigma=matrix(
c(1 , 0.2, 0.1,-0.7,
0.2, 1 , 0.3, 0.1,
0.1, 0.3, 1 , 0.2,
-0.7, 0.1, 0.2, 1), byrow=TRUE, nrow=4, ncol=4),
residsd=1, x2binary=FALSE){
# Returns a simulated dataset. Predictors are drawn from a multivariate
# normal distribution with mean and covariance sigma, with residual
# variance residsd
# Covariance matrix: 1 0.2 0.1 -0.7
# 0.2 1 0.3 0.1
# 0.1 0.3 1 0.2
# -0.7 0.1 0.2 1
# output is calculated based on the predictors
# two of interest, two auxiliary
out <- rmvnorm(n, mymean, mysigma)
if (x2binary==TRUE){
# Convert x2 to a random draw from the logistic of x2
out[,2] <- rbinom(n, 1, exp(out[,2])/(1+exp(out[,2])))
}
# y is the sum of the first 3 x variables (i.e. true coefficients are 1)
# add a random error to the output
out <- cbind(rnorm(n, out[,1] + out[,2] + out[,3], residsd), out)
dimnames(out)[[2]] <- c('y', paste('x', 1:4, sep=''))
out <- data.frame(out)
if (x2binary==TRUE){
out$x2 <- as.factor(out$x2 + 1)
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/CALIBERrfimpute/R/simdata.R
|
### R code from vignette source 'simstudy_survival.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: simstudy_survival.Rnw:13-43
###################################################
# Chunk 1
library(CALIBERrfimpute)
library(missForest)
library(survival)
library(xtable)
library(rpart)
library(mice)
library(ranger)
kPmiss <- 0.2 # probability of missingness
kLogHR <- 0.5 # true log hazard ratio
# To analyse samples of more than 200 patients, (recommend about 2000,
# but this will slow down the program), set NPATS before running
# this vignette.
if (!exists('NPATS')){
kSampleSize <- 200 # number of patients in simulated datasets
} else {
kSampleSize <- NPATS
}
# e.g.
# NPATS <- 2000
# To analyse more than 3 samples, set N to a number greater than 3
# e.g.
# N <- 1000
# To use more than 4 imputations, set NIMPS to a number greater than 4
# e.g.
# NIMPS <- 10
###################################################
### code chunk number 2: simstudy_survival.Rnw:86-280
###################################################
# Chunk 2
#### DATA GENERATING FUNCTIONS ####
makeSurv <- function(n = 2000, loghr = kLogHR){
# Creates a survival cohort of n patients. Assumes that censoring is
# independent of all other variables
# x1 and x2 are random normal variables
data <- data.frame(x1 = rnorm(n), x2 = rnorm(n))
# Create the x3 variable
data$x3 <- 0.5 * (data$x1 + data$x2 - data$x1 * data$x2) + rnorm(n)
# Underlying log hazard ratio for all variables is the same
data$y <- with(data, loghr * (x1 + x2 + x3))
data$survtime <- rexp(n, exp(data$y))
# Censoring - assume uniform distribution of observation times
# up to a maximum
obstime <- runif(nrow(data), min = 0,
max = quantile(data$survtime, 0.5))
data$event <- as.integer(data$survtime <= obstime)
# Generate integer survival times
data$time <- ceiling(100 * pmin(data$survtime, obstime))
# Observed marginal cumulative hazard for imputation models
data$cumhaz <- nelsonaalen(data, time, event)
# True log hazard and survival time are not seen in the data
# so remove them
data$y <- NULL
data$survtime <- NULL
return(data)
}
makeMarSurv <- function(data, pmissing = kPmiss){
# Introduces missing data dependent on event indicator
# and cumulative hazard and x1 and x2
logistic <- function(x){
exp(x) / (1 + exp(x))
}
predictions <- function(lp, n){
# uses the vector of linear predictions (lp) from a logistic model
# and the expected number of positive responses (n) to generate
# a set of predictions by modifying the baseline
trialn <- function(lptrial){
sum(logistic(lptrial))
}
stepsize <- 32
lptrial <- lp
# To avoid errors due to missing linear predictors (ideally
# there should not be any missing), replace with the mean
if (any(is.na(lptrial))){
lp[is.na(lptrial)] <- mean(lptrial, na.rm = TRUE)
}
while(abs(trialn(lptrial) - n) > 1){
if (trialn(lptrial) > n){
# trialn bigger than required
lptrial <- lptrial - stepsize
} else {
lptrial <- lptrial + stepsize
}
stepsize <- stepsize / 2
}
# Generate predictions from binomial distribution
as.logical(rbinom(logical(length(lp)), 1, logistic(lptrial)))
}
data$x3[predictions(0.1 * data$x1 + 0.1 * data$x2 +
0.1 * data$cumhaz + 0.1 * data$event, nrow(data) * pmissing)] <- NA
return(data)
}
#### IMPUTATION FUNCTIONS FROM DOOVE AND VAN BUUREN ####
mice.impute.rfdoove10 <- function(y, ry, x, ...){
mice::mice.impute.rf(y = y, ry = ry, x = x, ntrees = 10)
}
mice.impute.rfdoove100 <- function(y, ry, x, ...){
mice::mice.impute.rf(y = y, ry = ry, x = x, ntrees = 100)
}
#### OUR MICE RANDOM FOREST FUNCTIONS ####
mice.impute.rfcont5 <- function(y, ry, x, ...){
CALIBERrfimpute::mice.impute.rfcont(
y = y, ry = ry, x = x, ntree_cont = 5)
}
mice.impute.rfcont10 <- function(y, ry, x, ...){
CALIBERrfimpute::mice.impute.rfcont(
y = y, ry = ry, x = x, ntree_cont = 10)
}
mice.impute.rfcont20 <- function(y, ry, x, ...){
CALIBERrfimpute::mice.impute.rfcont(
y = y, ry = ry, x = x, ntree_cont = 20)
}
mice.impute.rfcont50 <- function(y, ry, x, ...){
CALIBERrfimpute::mice.impute.rfcont(
y = y, ry = ry, x = x, ntree_cont = 50)
}
mice.impute.rfcont100 <- function(y, ry, x, ...){
CALIBERrfimpute::mice.impute.rfcont(
y = y, ry = ry, x = x, ntree_cont = 100)
}
#### FUNCTIONS TO DO THE ANALYSIS ####
coxfull <- function(data){
# Full data analysis
coefs <- as.data.frame(summary(coxph(myformula, data = data))$coef)
# return a data.frame of coefficients (est), upper and lower 95% limits
out <- data.frame(est = coefs[, 'coef'],
lo95 = coefs[, 'coef'] + qnorm(0.025) * coefs[, 'se(coef)'],
hi95 = coefs[, 'coef'] + qnorm(0.975) * coefs[, 'se(coef)'],
row.names = row.names(coefs))
out$cover <- kLogHR >= out$lo95 & kLogHR <= out$hi95
out
}
coximpute <- function(imputed_datasets){
# Analyses a list of imputed datasets
docoxmodel <- function(data){
coxph(myformula, data = data)
}
mirafits <- as.mira(lapply(imputed_datasets, docoxmodel))
coefs <- as.data.frame(summary(pool(mirafits)))
if ('term' %in% colnames(coefs)){
row.names(coefs) <- as.character(coefs$term)
}
if (!('lo 95' %in% colnames(coefs))){
# newer version of mice
# use normal approximation for now, as assume large sample
# and large degrees of freedom for t distribution
out <- data.frame(est = coefs$estimate,
lo95 = coefs$estimate + qnorm(0.025) * coefs$std.error,
hi95 = coefs$estimate + qnorm(0.975) * coefs$std.error,
row.names = row.names(coefs))
} else if ('lo 95' %in% colnames(coefs)){
# older version of mice
out <- data.frame(est = coefs$est,
lo95 = coefs[, 'lo 95'], hi95 = coefs[, 'hi 95'],
row.names = row.names(coefs))
} else {
stop('Unable to handle format of summary.mipo object')
}
# Whether this confidence interval contains the true hazard ratio
out$cover <- kLogHR >= out$lo95 & kLogHR <= out$hi95
out
}
domissf <- function(missdata, reps = NIMPS){
# Imputation by missForest
out <- list()
for (i in 1:reps){
invisible(capture.output(
out[[i]] <- missForest(missdata)$ximp))
}
out
}
domice <- function(missdata, functions, reps = NIMPS){
mids <- mice(missdata, defaultMethod = functions,
m = reps, visitSequence = 'monotone',
printFlag = FALSE, maxit = 10)
lapply(1:reps, function(x) complete(mids, x))
}
doanalysis <- function(x){
# Creates a dataset, analyses it using different methods, and outputs
# the result as a matrix of coefficients / SE and coverage
data <- makeSurv(kSampleSize)
missdata <- makeMarSurv(data)
out <- list()
out$full <- coxfull(data)
out$missf <- coximpute(domissf(missdata))
out$rf5 <- coximpute(domice(missdata, 'rfcont5'))
out$rf10 <- coximpute(domice(missdata, 'rfcont10'))
out$rf20 <- coximpute(domice(missdata, 'rfcont20'))
out$rf100 <- coximpute(domice(missdata, 'rfcont100'))
out$rfdoove10 <- coximpute(domice(missdata, 'rfdoove10'))
out$rfdoove100 <- coximpute(domice(missdata, 'rfdoove100'))
out$cart <- coximpute(domice(missdata, 'cart'))
out$mice <- coximpute(domice(missdata, 'norm'))
out
}
###################################################
### code chunk number 3: simstudy_survival.Rnw:284-290
###################################################
# Chunk 3
mydata <- makeSurv(200)
plot(mydata[, c('x1', 'x2', 'x3')],
main = "Associations between predictor variables in a sample dataset")
mydata <- makeSurv(20000)
###################################################
### code chunk number 4: simstudy_survival.Rnw:295-298
###################################################
# Chunk 4
summary(lm(x3 ~ x1*x2, data = mydata))
###################################################
### code chunk number 5: simstudy_survival.Rnw:301-314
###################################################
# Chunk 5
mydata <- makeSurv(2000)
mydata2 <- makeMarSurv(mydata)
# Plot non-missing data
plot(mydata$x1[!is.na(mydata2$x3)], mydata$x3[!is.na(mydata2$x3)],
pch = 19, xlab = 'x1', ylab = 'x3')
# Plot missing data
points(mydata$x1[is.na(mydata2$x3)], mydata$x3[is.na(mydata2$x3)],
col = 'red', pch = 19)
legend('bottomright', legend = c('x3 observed', 'x3 missing'),
col = c('black', 'red'), pch = 19)
title('Association of predictor variables x1 and x3')
###################################################
### code chunk number 6: simstudy_survival.Rnw:319-345
###################################################
# Chunk 6
# Cox proportional hazards analysis
myformula <- as.formula(Surv(time, event) ~ x1 + x2 + x3)
# Analysis with 10,000 simulated patients (or more
# if the variable REFERENCE_SAMPLESIZE exists)
if (!exists('REFERENCE_SAMPLESIZE')){
REFERENCE_SAMPLESIZE <- 10000
}
# Use parallel processing, if available, to create
# datasets more quickly.
if ('parallel' %in% loadedNamespaces() &&
!is.null(getOption('mc.cores')) &&
.Platform$OS.type == 'unix'){
REFERENCE_SAMPLESIZE <- REFERENCE_SAMPLESIZE %/%
getOption('mc.cores')
simdata <- parallel::mclapply(1:getOption('mc.cores'),
function(x) makeSurv(REFERENCE_SAMPLESIZE))
simdata <- do.call('rbind', simdata)
} else {
simdata <- makeSurv(REFERENCE_SAMPLESIZE)
}
summary(coxph(myformula, data = simdata))
###################################################
### code chunk number 7: simstudy_survival.Rnw:367-387
###################################################
# Chunk 7
# Setting analysis parameters: To analyse more than 3 samples,
# set N to the desired number before running this program
if (!exists('N')){
N <- 3
}
# Number of imputations (set to at least 10 when
# running an actual simulation)
if (!exists('NIMPS')){
NIMPS <- 4
}
# Use parallel processing if the 'parallel' package is loaded
if ('parallel' %in% loadedNamespaces() &&
.Platform$OS.type == 'unix'){
cat('Using parallel processing\n')
results <- parallel::mclapply(1:N, doanalysis)
} else {
results <- lapply(1:N, doanalysis)
}
###################################################
### code chunk number 8: simstudy_survival.Rnw:416-455
###################################################
# Chunk 8
getParams <- function(coef, method){
estimates <- sapply(results, function(x){
x[[method]][coef, 'est']
})
bias <- mean(estimates) - kLogHR
se_bias <- sd(estimates) / sqrt(length(estimates))
mse <- mean((estimates - kLogHR) ^ 2)
ci_len <- mean(sapply(results, function(x){
x[[method]][coef, 'hi95'] - x[[method]][coef, 'lo95']
}))
ci_cov <- mean(sapply(results, function(x){
x[[method]][coef, 'cover']
}))
out <- c(bias, se_bias, mse, sd(estimates), ci_len, ci_cov)
names(out) <- c('bias', 'se_bias', 'mse', 'sd', 'ci_len', 'ci_cov')
out
}
showTable <- function(coef){
methods <- c('full', 'missf', 'cart', 'rfdoove10',
'rfdoove100', 'rf5', 'rf10', 'rf20', 'rf100', 'mice')
methodnames <- c('Full data', 'missForest', 'CART MICE',
'RF Doove MICE 10', 'RF Doove MICE 100',
paste('RFcont MICE', c(5, 10, 20, 100)),
'Parametric MICE')
out <- t(sapply(methods, function(x){
getParams(coef, x)
}))
out <- formatC(out, digits = 3, format = 'fg')
out <- rbind(c('', 'Standard', 'Mean', 'SD of', 'Mean 95%',
'95% CI'), c('Bias', 'error of bias', 'square error', 'estimate',
'CI length', 'coverage'), out)
out <- cbind(c('', '', methodnames), out)
rownames(out) <- NULL
print(xtable(out), floating = FALSE, include.rownames = FALSE,
include.colnames = FALSE, hline.after = c(0, 2, nrow(out)))
}
###################################################
### code chunk number 9: simstudy_survival.Rnw:468-471
###################################################
# Chunk 9
showTable('x1')
###################################################
### code chunk number 10: simstudy_survival.Rnw:480-483
###################################################
# Chunk 10
showTable('x2')
###################################################
### code chunk number 11: simstudy_survival.Rnw:493-496
###################################################
# Chunk 11
showTable('x3')
###################################################
### code chunk number 12: simstudy_survival.Rnw:506-530
###################################################
# Chunk 12
numtrees <- c(5, 10, 20, 100)
bias <- sapply(numtrees, function(x){
getParams('x3', paste('rf', x, sep=''))['bias']
})
se_bias <- sapply(numtrees, function(x){
getParams('x3', paste('rf', x, sep=''))['se_bias']
})
lower_bias <- bias - 1.96*se_bias
upper_bias <- bias + 1.96*se_bias
# Blank plot
plot(-100, 0, type = 'p', pch = 15, cex = 1.3, ylab = 'Bias',
xlab = 'Number of trees', xlim = c(0,100),
ylim = c(min(lower_bias), max(upper_bias)))
# Zero bias line
lines(c(0,100), c(0,0), lty = 2, col = 'gray')
# Confidence interval lines
for (i in 1:5){lines(rep(numtrees[i], 2),
c(lower_bias[i], upper_bias[i]))}
# Points
points(numtrees, bias, pch = 15, cex = 1.3)
title('Bias in estimate of x3 coefficient after\nmultiple imputation using RFcont MICE')
###################################################
### code chunk number 13: simstudy_survival.Rnw:535-690
###################################################
# Chunk 13
# Comparing confidence interval coverage and bias between:
# RF MICE 100 trees
# RF MICE 10 trees
# Parametric MICE
# Names of the variables in the comparison
variables <- c('x1', 'x2', 'x3')
pstar <- function(x){
if (!is.na(x)){
if (x < 0.001){
'***'
} else if (x < 0.01){
'**'
} else if (x < 0.05){
'*'
} else {
''
}
} else {
''
}
}
compareBias <- function(method1, method2){
# Generates a table comparing bias
# Comparison statistic is the difference in absolute bias
# (negative means first method is better)
compareBiasVar <- function(varname){
# All coefficients should be kLogHR
bias1 <- sapply(results, function(x){
x[[method1]][varname, 'est']
}) - kLogHR
bias2 <- sapply(results, function(x){
x[[method2]][varname, 'est']
}) - kLogHR
if (sign(mean(bias1)) == -1){
bias1 <- -bias1
}
if (sign(mean(bias2)) == -1){
bias2 <- -bias2
}
paste(formatC(mean(bias1) - mean(bias2), format = 'fg', digits = 3),
pstar(t.test(bias1 - bias2)$p.value))
}
sapply(variables, compareBiasVar)
}
compareVariance <- function(method1, method2){
# Generates a table comparing precision between two methods
# Comparison statistic is ratio of variance
# (smaller means first method is better)
compareVarianceVar <- function(varname){
e1 <- sapply(results, function(x){
x[[method1]][varname, 'est']
})
e2 <- sapply(results, function(x){
x[[method2]][varname, 'est']
})
paste(formatC(var(e1) / var(e2), format = 'fg', digits = 3),
pstar(var.test(e1, e2)$p.value))
}
sapply(variables, compareVarianceVar)
}
compareCIlength <- function(method1, method2){
# Generates a table comparing coverage percentage between two methods
# Comparison statistic is the ratio of confidence interval lengths
# (less than 1 = first better)
compareCIlengthVar <- function(varname){
# Paired t test for bias (difference in estimate)
len1 <- sapply(results, function(x){
x[[method1]][varname, 'hi95'] -
x[[method1]][varname, 'lo95']
})
len2 <- sapply(results, function(x){
x[[method2]][varname, 'hi95'] -
x[[method2]][varname, 'lo95']
})
paste(formatC(mean(len1) / mean(len2),
format = 'fg', digits = 4),
pstar(t.test(len1 - len2)$p.value))
}
sapply(variables, compareCIlengthVar)
}
compareCoverage <- function(method1, method2){
# Generates a table comparing coverage percentage between two methods
# Comparison statistic is the difference in coverage
# (positive = first better)
compareCoverageVar <- function(varname){
# Paired t test for bias (difference in estimate)
cov1 <- sapply(results, function(x){
x[[method1]][varname, 'cover']
})
cov2 <- sapply(results, function(x){
x[[method2]][varname, 'cover']
})
paste(formatC(100 * (mean(cov1) - mean(cov2)), format = 'f',
digits = 1),
pstar(binom.test(c(sum(cov1 == TRUE & cov2 == FALSE),
sum(cov1 == FALSE & cov2 == TRUE)))$p.value))
}
sapply(variables, compareCoverageVar)
}
maketable <- function(comparison){
# comparison is a function such as compareCoverage, compareBias
compare <- cbind(comparison('rf10', 'mice'),
comparison('rf100', 'mice'),
comparison('rf10', 'rf100'))
compare <- cbind(rownames(compare), compare)
compare <- rbind(
c('', 'RFcont MICE 10 vs', 'RFcont MICE 100 vs',
'RFcont MICE 10 vs'),
c('Coefficient', 'parametric MICE',
'parametric MICE', 'RFcont MICE 100'),
compare)
rownames(compare) <- NULL
print(xtable(compare), include.rownames = FALSE,
include.colnames = FALSE, floating = FALSE,
hline.after = c(0, 2, nrow(compare)))
cat('\n\\vspace{1em}\n')
compare <- cbind(comparison('rfdoove10', 'rf10'),
comparison('rfdoove10', 'cart'),
comparison('rfdoove10', 'rfdoove100'))
compare <- cbind(rownames(compare), compare)
compare <- rbind(
c('', 'RF Doove MICE 10 vs', 'RF Doove MICE 10 vs',
'RF Doove MICE 10 vs'),
c('Coefficient', 'RFcont MICE 10',
'CART MICE', 'RF Doove MICE 100'),
compare)
rownames(compare) <- NULL
print(xtable(compare), include.rownames = FALSE,
include.colnames = FALSE, floating = FALSE,
hline.after = c(0, 2, nrow(compare)))
}
###################################################
### code chunk number 14: simstudy_survival.Rnw:699-702
###################################################
# Chunk 14
maketable(compareBias)
###################################################
### code chunk number 15: simstudy_survival.Rnw:711-714
###################################################
# Chunk 15
maketable(compareVariance)
###################################################
### code chunk number 16: simstudy_survival.Rnw:724-727
###################################################
# Chunk 16
maketable(compareCIlength)
###################################################
### code chunk number 17: simstudy_survival.Rnw:736-739
###################################################
# Chunk 17
maketable(compareCoverage)
###################################################
### code chunk number 18: simstudy_survival.Rnw:773-784
###################################################
# Chunk 18
showfunction <- function(functionname){
cat(paste(functionname, '<-',
paste(capture.output(print(get(functionname))),
collapse = '\n')))
cat('\n')
invisible(NULL)
}
showfunction('makeSurv')
showfunction('makeMarSurv')
###################################################
### code chunk number 19: simstudy_survival.Rnw:789-803
###################################################
# Chunk 19
showfunction('coxfull')
showfunction('coximpute')
showfunction('domissf')
showfunction('mice.impute.cart')
showfunction('mice.impute.rfdoove10')
showfunction('mice.impute.rfdoove100')
showfunction('mice.impute.rfcont5')
showfunction('mice.impute.rfcont10')
showfunction('mice.impute.rfcont20')
showfunction('mice.impute.rfcont100')
showfunction('domice')
showfunction('doanalysis')
###################################################
### code chunk number 20: simstudy_survival.Rnw:808-815
###################################################
# Chunk 20
showfunction('pstar')
showfunction('compareBias')
showfunction('compareVariance')
showfunction('compareCIlength')
showfunction('compareCoverage')
###################################################
### code chunk number 21: simstudy_survival.Rnw:820-825
###################################################
# Chunk 21
showfunction('getParams')
showfunction('showTable')
showfunction('maketable')
|
/scratch/gouwar.j/cran-all/cranData/CALIBERrfimpute/inst/doc/simstudy_survival.R
|
mixalg = function(obs, weights=NULL, family="gaussian", data=NULL,
pop.at.risk=NULL, var.lnOR=NULL, limit=0.01,
acc=10^(-7), numiter=5000, startk=50){
# Performs CAMAN (computer-assisted analysis of mixtures)
if (family == "gaussian") dens_i = 0
else if (family == "poisson") dens_i = 1
else if (family == "binomial") dens_i = 2
else return("Please enter a valid family of distribution (normal, poisson, binomial)")
#check data
if (is.null(data)) data <- data.frame() # no data was given
if (!((obs %in% colnames(data))||is.numeric(obs))) stop("obs must be a colname of 'data' or a numeric vector")
if (!((weights %in% colnames(data))||is.numeric(weights) || is.null(weights))) stop("weights must be a colname of 'data', a numeric vector or 'NULL'")
if (!((var.lnOR %in% colnames(data))||is.numeric(var.lnOR) || is.null(var.lnOR))) stop("variances must be a colname of 'data', a numeric vector or 'NULL'")
if (is.null(var.lnOR) ) is_metaAnalysis <- 0 #if variances are not specified, we apply don't perform a meta analysis and estimate the variances
else is_metaAnalysis <- 1 #variances are given --> meta analysis --> no estimation
n= max(nrow(data), length(obs) )
datmat <- matrix(1,ncol=4, nrow=n)
if (is.numeric(obs)&&length(obs>1)) datmat[,1] <- obs
else datmat[,1] <- data[,obs]
#build matrix 'datmat' by reading out the command
tmpdat <- list(obs, weights, pop.at.risk, var.lnOR)
for (i in 1:4){
if (is.character(tmpdat[[i]]) ) datmat[,i] <- data[,tmpdat[[i]]] #colname was given
else if (is.null(tmpdat[[i]]) ) datmat[,i] <- rep(1,n) #NULL was given
else if (is.numeric(tmpdat[[i]]) ) datmat[,i] <- tmpdat[[i]] #a numeric vector was given
else stop("Data initialization failed...")
}
rm(tmpdat)
#estimate variances
if ((sum(rep(1,n) == datmat[,4])== n) && family=="gaussian" && is.null(var.lnOR)) datmat[,4] <- rep(var(datmat[,1]),n)
res1 <- .C("caman_C", as.vector(as.double(datmat[,1])), as.vector(as.double(datmat[,2])), as.vector(as.double(datmat[,3])),
as.vector(as.double(datmat[,4])), as.integer(n), as.integer(startk), as.integer(dens_i),
as.integer(999), as.double(999), rep(as.double(999), 150), rep(as.double(999), 150),
as.double(limit), as.double(acc), as.integer(numiter), as.double(c(-999)),
rep(as.double(-999), (2*startk + 2)),rep(as.double(-999), 2) ,
as.integer(is_metaAnalysis) ,PACKAGE = "CAMAN")
numObs <- sum(datmat[,2])
k <- res1[[8]]
p=res1[[10]][1:k]
bic <- -2 * res1[[9]] + (2*k - 1) * log(numObs)
VEM_tmp <- res1[[16]]
EM_tmp <- res1[[17]]
finalacc <- c(VEM_tmp[2], EM_tmp[1]) # VEM, EM
vem_res <- matrix(VEM_tmp[3: (2*VEM_tmp[1] + 2)], ncol=2)
totalsteps <- c(res1[[14]][1], EM_tmp[2]) #VEM, EM
res <- new("CAMAN.object",dat=datmat, family=family, LL=res1[[9]],
num.k=k, p=p, t=res1[[11]][1:k], num.obs = numObs, steps=totalsteps,
otherParams = c(limit, numiter, acc, startk), BIC = bic, VEM_result =
vem_res, finalacc= finalacc, is_metaAnalysis=is_metaAnalysis)
if (dens_i == 0) [email protected]=res1[[15]]
#compute posterior prob
probs <- mix.densDistr(res)
res@prob <- probs
res@classification <- apply(probs, 1, which.max)
if (res@steps[1] >= res@otherParams[2]) {
warning("Warning: Solution does not satisfy convergence criterion:\n The last VEM-iteration had a accuracy of ",
res@finalacc[1],". You asked for (acc=)", acc,
"\n Please increase numiter or decrease acc", sep="")}
if (res@steps[2] >= res@otherParams[2]) {
warning("Warning: Solution does not satisfy convergence criterion:\n The last EM-iteration had a accuracy of ",
res@finalacc[2],". You asked for (acc=)", acc,
"\n Please increase numiter or decrease acc", sep="")}
return(res)
}
anova.CAMAN.object <- function(object, object1, nboot=2500, limit=0.01,
acc=10^(-7), numiter=5000,
giveBootstrapData=FALSE, giveLikelihood=FALSE, ...){
mix0 <- object
mix1 <- object1
#compute LL-ratio:
# simulate data from mix0 and compare LL of mix0 and mix1 based on this data
cl <- match.call()
if (nboot<1) return("Please enter a valid number for nboot")
if (mix0@family == "gaussian") dens_i0 = 0
else if (mix0@family == "poisson") dens_i0 = 1
else if (mix0@family == "binomial") dens_i0 = 2
if (mix1@family == "gaussian") dens_i1 = 0
else if (mix1@family == "poisson") dens_i1 = 1
else if (mix1@family == "binomial") dens_i1 = 2
datUnpack <- rep(mix0@dat[,1], mix0@dat[,2])
len <- length(datUnpack)
obs <- sapply(1:nboot, function(x){simDat(mix0)}) #returns a matrix with bootstrapped observations of m1
#do we need to unpack the data...
if (sum(mix0@dat[,2]) == nrow(mix0@dat) ){ #no weights --> datUnpacked == mix@dat[,1]
dataUnpacked=FALSE
}
else { #else: datUnpacked != mix@dat[,1] --> there were weights
dataUnpacked=TRUE
#if the got unpacked before, we now pack them together again
tmp_obs <- apply(obs,2, function(x){as.numeric(names(table(x)))})
tmp_weights <- apply(obs,2, function(x){as.numeric(table(x))})
SimulatedObs <- tmp_obs
SimulatedWeights <- tmp_weights
}
LL0 <- NULL
LL1 <- NULL
for (i in 1:nboot){
#perform EM for each bootstrap sample
#perform EM for mix0
if (dataUnpacked){ #if the data was packed, we
#need to handle it in another way because there might be less rows in our datamatrix...
obs_i <- SimulatedObs[[i]]
obs_weights <- SimulatedWeights [[i]]
tmplen <- length(obs_weights)
col3 <- rep(1, tmplen) #packing--> use ones as parameter
col4 <- rep(1, tmplen) #packing--> use ones as parameter
}
else { #no packing--> use original parameters for var.lnOR and pop.at.risk
obs_i <- obs[,i]
obs_weights <- mix0@dat[,2] #=rep(1, len)
col3 <- mix0@dat[,3]
col4 <- mix0@dat[,4]
tmplen <- len
}
res0 <- .C("mixalg_sub", as.double(obs_i), as.double(obs_weights), as.double(col3),
as.double(col4), as.integer(tmplen), as.integer([email protected]), as.integer(dens_i0),
as.integer([email protected]), as.double(999), as.double(mix0@p), as.double(mix0@t), as.double(limit), as.double(acc),
as.integer(numiter), as.double(c(-999)), as.integer(1), as.integer(mix0@is_metaAnalysis) , PACKAGE = "CAMAN")
res1 <- .C("mixalg_sub", as.double(obs_i), as.double(obs_weights), as.double(col3),
as.double(col4), as.integer(tmplen), as.integer([email protected]), as.integer(dens_i1),
as.integer([email protected]), as.double(999), as.double(mix1@p), as.double(mix1@t), as.double(limit), as.double(acc),
as.integer(numiter), as.double(c(-999)), as.integer(1) , as.integer(mix1@is_metaAnalysis), PACKAGE = "CAMAN")
LL0[i] <- res0[[9]]
LL1[i] <- res1[[9]]
}
res <- list()
LL_ratios <- sort(-2*(LL0 - LL1)) #90, 95, 97.5, 99 - quartils
LL_ratio_quartils <- LL_ratios[floor(c(.9, .95, .975, .99)*nboot)]
names(LL_ratio_quartils) <- c(.9, .95, .975, .99)
res$overview <- data.frame(c(as.character(cl$object), as.character(cl$object1)),
c([email protected], [email protected]), c(mix0@BIC, mix1@BIC),
c(mix0@LL, mix1@LL), c(NA, -2*(mix0@LL - mix1@LL)))
names(res$overview) = c("mixture model","k","BIC","LL", "LL-ratio")
res$"LL ratios in bootstrap-data" <- LL_ratio_quartils
res$"simulated p-value" <- sum(LL_ratios > (-2*(mix0@LL - mix1@LL)) )/nboot
if (giveBootstrapData) {
if (dataUnpacked) res$BootStrapData <- SimulatedObs
else res$BootStrapData <- obs
}
if (giveLikelihood) res$LL <- rbind(LL0,LL1)
return(res)
}
mixalg.paraBoot <- function(mix.estim, nboot=50, limit=0.01, acc=10^(-7), numiter=5000, startk=50, giveBootstrapData=FALSE){
# Performs a parametric bootstrap on data.
# Returns the standard deviation of patameters t and p of the given mixture model!
if (nboot<1) return("Please enter a valid number for nboot")
if (mix.estim@family == "gaussian") dens_i = 0
else if (mix.estim@family == "poisson") dens_i = 1
else if (mix.estim@family == "binomial") dens_i = 2
datUnpack <- rep(mix.estim@dat[,1], mix.estim@dat[,2])
len <- length(datUnpack)
obs <- sapply(1:nboot, function(x){simDat(mix.estim)}) #returns a matrix with bootstrapped observations
#other parameters for the EM-algorithm
if (sum(mix.estim@dat[,2]) == nrow(mix.estim@dat) ){ #no weights --> datUnpacked == mix@dat[,1] ==> use original parameters for var.lnOR and pop.at.risk
c2 <- mix.estim@dat[,2]
c3 <- mix.estim@dat[,3]
c4 <- mix.estim@dat[,4]
}
else { #else: there were weights --> datUnpacked != mix@dat[,1] ==> use ones as add. parameters!
c2 <- rep(1, len)
c3 <- c2
c4 <- c2
}
p_mat <- matrix(0, [email protected], nrow=nboot)
t_mat <- matrix(0, [email protected], nrow=nboot)
cat("\nProgress:\n0.........50.......100% of Bootstraps done.\n")
for (i in 1:nboot){
#perform EM for each bootstrap sample
res1 <- .C("mixalg_sub", as.double(obs[,i]), as.double(c2), as.double(c3),
as.double(c4), as.integer(len), as.integer([email protected]), as.integer(dens_i),
as.integer([email protected]), as.double(999), as.double(mix.estim@p), as.double(mix.estim@t), as.double(limit), as.double(acc),
as.integer(numiter), as.double(c(-999)), as.integer(1) , as.integer(mix.estim@is_metaAnalysis), PACKAGE = "CAMAN")
if (i %in% seq(0,nboot,max(floor(nboot/20),1)) ) cat ("|")
p_mat[i, ] <- res1[[10]][1:[email protected]]
t_mat[i, ] <- res1[[11]][1:[email protected]]
}
cat ("\n")
return(list(p_mat, t_mat, obs=obs))
sd.p <- apply(p_mat, 2, sd)
sd.t <- apply(t_mat, 2, sd)
if (giveBootstrapData) return(list(sd.p = sd.p, sd.t = sd.t, giveBootstrapData = obs))
return(list(sd.p = sd.p, sd.t = sd.t))
}
mixalg.boot <- function(mix, nboot=500, limit=0.01, acc=10^(-5), numiter=5000, startk=50, returnBootstrapRep= FALSE){
# Performs a nonparametric bootstrap on data.
# Returns the computed optimal number of component for each bootstrap replication
if (nboot<1) return("Please enter a valid number for nboot")
if (mix@family == "gaussian") dens_i = 0
else if (mix@family == "poisson") dens_i = 1
else if (mix@family == "binomial") dens_i = 2
datUnpack <- rep(mix@dat[,1], mix@dat[,2]) # If there are weights --> unpack data and do not use any weights/ -- freq = 1
#generate bootstrap samples:
x.sample <- matrix( rep(datUnpack,nboot), ncol=nboot, byrow=FALSE) #initialization
xlen <- nrow(x.sample)
x.sample <- apply(x.sample, 2, function(yy){sample(yy, xlen, replace=TRUE)}) #sample (bootstrap)
#pack data
tmp_obs <- apply(x.sample,2, function(x){as.numeric(names(table(x)))})
tmp_weights <- apply(x.sample,2, function(x){as.numeric(table(x))})
vec_n <- sapply(tmp_obs, length)
bootSamples <- unlist(tmp_obs)
bootWeights <- unlist(tmp_weights)
tmpn <- nrow(mix@dat)
bootVar <- rep(1, length(bootSamples))
if (sum(mix@dat[,4] == rep(1,tmpn)) != tmpn)
{ #variances were given --> extract them
tmpvar <- mix@dat[,4]
names(tmpvar) <- mix@dat[,1]
tmpBootVar <- sapply(tmp_obs, function(x){tmpvar[as.character(x)]})
bootVar <- as.numeric(unlist(tmpBootVar))
}
bootPopAtRisk <- rep(1, length(bootSamples))
if (sum(mix@dat[,3] == rep(1,tmpn)) != tmpn)
{ #popAtRisk were given --> extract them
tmppop <- mix@dat[,3]
names(tmppop) <- mix@dat[,1]
tmpBootPop <- sapply(tmp_obs, function(x){tmppop[as.character(x)]})
bootPopAtRisk <- as.numeric(unlist(tmpBootPop))
}
#permutation step; write observations in a vector (columnwise)
res1 <- .C("caman_boot", as.double(bootSamples), as.double(bootWeights), as.double(bootPopAtRisk),
as.double(bootVar), as.vector(as.integer(vec_n)), as.integer(startk), as.integer(dens_i),
as.integer(999), rep(as.double(999.99), nboot), rep(as.double(999), 150), rep(as.double(999), 150),
as.double(limit), as.double(acc), as.integer(numiter), as.double(c(-999)), as.integer(nboot), rep(as.integer(999),nboot), rep(as.double(999.99), nboot), as.integer(mix@is_metaAnalysis),PACKAGE = "CAMAN")
if (returnBootstrapRep) res <- list(dat.bootstrap=x.sample, LL=res1[[9]], numk.boot=res1[[17]], LL_k1 = res1[[18]]) #unpacked bootstrap data is returned!
else res <- list(LL=res1[[9]], numk.boot=res1[[17]], LL_k1 = res1[[18]])
return(res)
}
mixalg.EM <- function(mix = NULL, p, t, obs=NULL, weights=NULL, family="gaussian",
data=NULL, pop.at.risk=NULL, var.lnOR=NULL, limit=0.01,
acc=10^(-7), numiter=5000){
# computes the seconde (EM-) part of the CAMAN algorithm:
# use manualy defined values for p, t & k and
# --> refiend soultion with EM algorithm
# return updated estimates for p, t, acc & number of iterations (numiter)
if (length(p) == length(t) ) num.k = length(t)
else stop("Please enter valid data for p and t")
if (!is.null(mix)){
family = mix@family
datmat = mix@dat
is_metaAnalysis = mix@is_metaAnalysis
}
else{
#check data
if (is.null(data)) data <- data.frame() # no data was given
if (!((obs %in% colnames(data))||is.numeric(obs))) stop("obs must be a colname of 'data' or a numeric vector")
if (!((weights %in% colnames(data))||is.numeric(weights) || is.null(weights))) stop("weights must be a colname of 'data', a numeric vector or 'NULL'")
if (!((var.lnOR %in% colnames(data))||is.numeric(var.lnOR) || is.null(var.lnOR))) stop("weights must be a colname of 'data', a numeric vector or 'NULL'")
if (is.null(var.lnOR) ) is_metaAnalysis <- 0 #if variances are not specified, we apply don't perform a meta analysis and estimate the variances
else is_metaAnalysis <- 1 #variances are given --> meta analysis --> no estimation
n= max(nrow(data), length(obs) )
datmat <- matrix(1,ncol=4, nrow=n)
if (is.numeric(obs)&&length(obs>1)) datmat[,1] <- obs
else datmat[,1] <- data[,obs]
#build matrix 'datmat' by reading out the command
tmpdat <- list(obs, weights, pop.at.risk, var.lnOR)
for (i in 1:4){
if (is.character(tmpdat[[i]]) ) datmat[,i] <- data[,tmpdat[[i]]] #colname was given
else if (is.null(tmpdat[[i]]) ) datmat[,i] <- rep(1,n) #NULL was given
else if (is.numeric(tmpdat[[i]]) ) datmat[,i] <- tmpdat[[i]] #a numeric vector was given
else stop("Data initialization failed...")
}
#estimate variances
if ((sum(rep(1,n) == datmat[,4])== n) && family=="gaussian" && is.null(var.lnOR)) datmat[,4] <- rep(var(datmat[,1]),n)
rm(tmpdat)
}
if (family == "gaussian") dens_i = 0
else if (family == "poisson") dens_i = 1
else if (family == "binomial") dens_i = 2
else stop("Please enter a valid density distribution (gaussian, poisson, binomial)")
res1 <- .C("mixalg_sub", as.double(datmat[,1]), as.double(datmat[,2]), as.double(datmat[,3]),
as.double(datmat[,4]), as.integer(nrow(datmat)), as.integer(num.k), as.integer(dens_i),
as.integer(num.k), as.double(999), as.double(p), as.double(t), as.double(limit), as.double(acc),
as.integer(numiter), as.double(c(-999)), as.integer(1), as.integer(is_metaAnalysis) ,PACKAGE = "CAMAN")
numObs <- sum(datmat[,2])
bic <- -2 * res1[[9]] + (2*num.k - 1) * log(numObs)
totalsteps <- c(NA, res1[[14]]) #VEM, EM
finalacc <- c(NA, res1[[13]])
res <- new("CAMAN.object",dat=datmat, family=family, LL=res1[[9]], num.k=num.k, p=res1[[10]], t=res1[[11]],
num.obs = numObs, steps=totalsteps, otherParams = c(limit, numiter, acc, startk=num.k), BIC = bic,
VEM_result = matrix(), finalacc= finalacc, is_metaAnalysis = is_metaAnalysis)
if (dens_i == 0) {
if (num.k>1) [email protected]=res1[[ 15 ]]
else [email protected] = var(res@dat[,1])
}
#compute posterior probabilities
probs <- mix.densDistr(res)
res@prob <- probs
res@classification <- apply(probs, 1, which.max)
return(res)
}
mixalg.VEM <- function(mix = NULL, obs=NULL, weights=NULL, data=NULL, pop.at.risk=NULL, var.lnOR=NULL, family="gaussian", limit=0.01, acc=10^(-7), numiter=5000, startk=50){
# computes the first part of the CAMAN algorithm:
# 1. construct grid of potential subpopulation means
# 2. calculation of mxing kernel density
# 3. VEM algorithm
# returns estimates for parameters t & weights p
if (!is.null(mix)){
family = mix@family
datmat = mix@dat
is_metaAnalysis = mix@is_metaAnalysis
}
else{
#check data
if (is.null(data)) data <- data.frame() # no data was given
if (!((obs %in% colnames(data))||is.numeric(obs))) stop("obs must be a colname of 'data' or a numeric vector")
if (!((weights %in% colnames(data))||is.numeric(weights) || is.null(weights))) stop("weights must be a colname of 'data', a numeric vector or 'NULL'")
if (!((var.lnOR %in% colnames(data))||is.numeric(var.lnOR) || is.null(var.lnOR))) stop("weights must be a colname of 'data', a numeric vector or 'NULL'")
if (is.null(var.lnOR) ) {
is_metaAnalysis <- 0 #if variances are not specified, we apply don't perform a meta analysis and estimate the variances
}
else {
is_metaAnalysis <- 1 #variances are given --> meta analysis --> no estimation
}
n = max(nrow(data), length(obs) )
datmat <- matrix(1,ncol=4, nrow=n)
if (is.numeric(obs)&&length(obs>1)) datmat[,1] <- obs
else datmat[,1] <- data[,obs]
#build matrix 'datmat' by reading out the command
tmpdat <- list(obs, weights, pop.at.risk, var.lnOR)
for (i in 1:4){
if (is.character(tmpdat[[i]]) ) datmat[,i] <- data[,tmpdat[[i]]] #colname was given
else if (is.null(tmpdat[[i]]) ) datmat[,i] <- rep(1,n) #NULL was given
else if (is.numeric(tmpdat[[i]]) ) datmat[,i] <- tmpdat[[i]] #a numeric vector was given
else stop("Data initialization failed...")
}
#estimate variances
if ((sum(rep(1,n) == datmat[,4])== n) && family=="gaussian" && is.null(var.lnOR)) datmat[,4] <- rep(var(datmat[,1]),n)
rm(tmpdat)
}
if (family == "gaussian") dens_i = 0
else if (family == "poisson") dens_i = 1
else if (family == "binomial") dens_i = 2
else stop("Please enter a valid density distribution (normal, poisson, binomial)")
num.k <- startk #min(sum(datmat[,2]),startk)
p <- rep(999, num.k)
t <- rep(999, num.k)
#perform the EM algorithm for the given (simulated) data
res1 <- .C("mixalg_sub", as.double(datmat[,1]), as.double(datmat[,2]), as.double(datmat[,3]),
as.double(datmat[,4]), as.integer(nrow(datmat)), as.integer(num.k), as.integer(dens_i),
as.integer(num.k), as.double(999), as.double(p), as.double(t), as.double(limit), as.double(acc),
as.integer(numiter), as.double(c(-999)), as.integer(0) , as.integer(is_metaAnalysis),
as.double(rep(-999.9,num.k)),PACKAGE = "CAMAN")
LL <- res1[[9]]
numObs <- sum(datmat[,2])
bic <- -2 * res1[[9]] + (2*num.k - 1) * log(numObs)
grid <- data.frame(p=res1[[10]], t=res1[[11]])
grid <- grid[grid$p>0,]
rownames(grid) <- as.character(1:nrow(grid))
finalacc <- res1[[13]][1]
totalsteps <- res1[[14]][1]
totalgrid <- data.frame(p=res1[[10]], t=res1[[11]], gradient=res1[[18]])
res <- new("CAMAN.VEM.object",dat=datmat, family=family, LL=LL,
num.k=num.k, num.obs = numObs, steps=totalsteps, otherParams = c(limit, numiter, acc, startk),
BIC = bic, finalacc= finalacc, startk=startk, grid=grid, totalgrid=totalgrid)
return(res)
}
mix.densDistr <- function(mix){
# computes the probability for each observation (1..n - row of mix@dat) belonging to each component (1..k)
# returns a matrix of dimension n x k
dat <- mix@dat[,1]
res <- matrix([email protected], nrow=length(dat))
p <- mix@p
if (mix@family == "gaussian") {
mu <- mix@t
mix.sd <- sqrt([email protected])
for (i in 1:[email protected]) res[,i] <- sapply(dat,
function(x){p[i]*dnorm(x, mu[i], mix.sd ) / sum(p*dnorm(x, mu,
mix.sd ))})
}
if (mix@family == "binomial") {
prob <- mix@t
popAtRisk <- mix@dat[,3]
for (i in 1:[email protected]) res[,i] <- apply(cbind(dat, popAtRisk), 1,
function(x){p[i]*dbinom(x[1], x[2], prob[i]) / sum(p*dbinom(x[1],
x[2], prob))})
}
if (mix@family == "poisson") {
lambda <- mix@t
popAtRisk <- mix@dat[,3]
for (i in 1:[email protected]) res[,i] <- apply(cbind(dat, popAtRisk), 1,
function(x){p[i]*dpois(x[1], x[2]* lambda[i]) /
sum(p*dpois(x[1], x[2] * lambda))})
}
return(res)
}
getFDR <- function(dat, threshold=.7, idxNotDiff=1 ){
#computes False Discovery Rate, etc.
tau0 <- dat@prob[,idxNotDiff] #p(not differentail genes)
tau1 <- 1 - dat@prob[,idxNotDiff] #p(differentail genes)
n <- nrow(dat@prob)
fdr.hat <- sum(tau0 * (tau0 <= threshold)) / sum((tau0 <= threshold) )
fndr.hat <- sum( (tau1) * (tau0 >= threshold)) / (n - sum(tau0 <= threshold) )
fpr.hat <- sum(tau0 * (tau0 <= threshold)) / sum(tau0)
fnr.hat <- sum(tau1 * (tau0> threshold) ) / sum(tau1)
return(list(FDR = fdr.hat, FNDR=fndr.hat, FPR = fpr.hat, FNR=fnr.hat) )
}
simDat <- function(mix){
#simulate data for parametric bootstrap & compareMixModels
#if weights are != 1, other parameters needs to be == 1!!! (--> otherwise, data cannot be unpacked reasonably!)
n= [email protected] #number of observations
k= [email protected]
myunif <- runif(n)
z <- matrix(0, ncol=k, nrow=n)
for (i in 1:k)
if (i==1) z[,i] <- as.integer(sum(mix@p[1]) > myunif )
else z[,i] <- as.integer(sum(mix@p[1:(i-1)]) < myunif & (sum(mix@p[1:i]) >= myunif) )
if (k==1) z <-matrix(1, ncol=k, nrow=n)
#print (z)
# z is a matrix that randomly assigns each observation (row) to a component (column)
# --> each row consists of 1x1 and (k-1)x0
x = matrix(0,[email protected], nrow=n)
if (mix@family == "gaussian"){
if (sum(mix@dat[,4] == rep(1, n) ) == n) #dat[,4] only consists of ones
tmpsd <- sqrt([email protected])
else tmpsd <- mix@dat[,4]
for (i in 1:k)
x[,i] <- z[,i] * rnorm(n, mean=mix@t[i], sd= sqrt(tmpsd))
res = apply(x,1,sum) #a row consists of one number != 0 and (k-1) numbers == 0
}
else if (mix@family == "poisson"){
Ei <- mix@dat[,3]
for (i in 1:k)
x[,i] <- z[,i] * rpois(n, mix@t[i]*Ei)
res = apply(x,1,sum) #a row consists of one number != 0 and (k-1) numbers == 0
}
if (mix@family == "binomial"){
tmpsize <- mix@dat[,3]
for (i in 1:k)
x[,i] <- z[,i] * rbinom(n, tmpsize, mix@t[i])
res = apply(x,1,sum) #a row consists of one number != 0 and (k-1) numbers == 0
}
return(res)
}
summary.CAMAN.object <- function(object, ...){
cl <- match.call()
cat("Summary of a Computer Assisted Mixture Analysis: \n \n")
cat("Data consists of", [email protected], "observations (rows). \n")
cat("The Mixture Analysis identified", [email protected], "component")
if (length([email protected]) >0) cat("s")
cat(" of a", object@family, "distribution: \n \n")
details <- matrix(0, [email protected], ncol=2)
descr_var <- ""
if (object@family == "gaussian") {
tmp <- "mean"
if (object@is_metaAnalysis == 0) descr_var <- paste("component variance:", [email protected], "\n")
}
else if (object@family == "poisson") tmp <- "lambda"
else if (object@family == "binomial") tmp <- "prob"
colnames(details) = c("p", tmp)
rownames(details) = 1:[email protected]
details[,1] <- object@p
details[,2] <- object@t
cat("DETAILS:\n")
print(details)
cat(descr_var)
cat("\n \n")
cat("Classification:\n")
if ([email protected] > 20 || [email protected]>8) cat("The classification matrix is too big to visualize here. \n type ",as.character(cl[2]),"@prob to watch the probability matrix \n or type ",as.character(cl[2]),"@classification to watch the \n class labeling (each row was assigned to its most likely component!)\n", sep="")
else {
cat("Classification matrix:\n")
print(object@prob)
cat("Class labeling:\n")
print(object@classification)
}
cat("\n \nnumber of VEM-iterations done:",object@steps[1],"\n")
cat("alteration within the final VEM-iteration step:",object@finalacc[1],"\n")
cat("number of EM-iterations done:",object@steps[2],"\n")
cat("alteration within the final EM-iteration step:",object@finalacc[2],"\n \n")
cat("Log-Likelihood:",object@LL," ")
cat("BIC:",object@BIC,"\n \n")
# @otherParams = c(limit, numiter, acc, startk)
cat("User-defined parameters:\n")
cat(" max number of iterations:",object@otherParams[2],"\n")
cat(" limit for combining components:",object@otherParams[1],"\n")
cat(" threshold for converging:",object@otherParams[3],"\n")
cat(" number of grid points (startk):",object@otherParams[4],"\n")
}
#some abbrevated commands
#mixboot <- mixalg.boot
#mixalg.Boot <- mixalg.boot
#mixpboot <- mixalg.paraBoot
#mix.anova <- anova.CAMAN.object
##########################################################################
#
#
# New bivariate functions
##########################################################################
#
bivariate.EM<-function(obs1,obs2,type,data = NULL, var1, var2, corr, lambda1, lambda2,p,numiter=5000,acc=1.e-7,class){
## avoid attach/detach but keep data argument optional
if(!is.null(data)){
cl <- match.call()
varname <- function(x){sub("\\(\\)", "", deparse(x))}
obs1_name <- varname(cl[2])
obs2_name <- varname(cl[3])
stopifnot(c(obs1_name, obs2_name) %in% names(data))
obs1 <- getElement(data, obs1_name)
obs2 <- getElement(data, obs2_name)
var1_name <- varname(cl["var1"])
var2_name <- varname(cl["var2"])
if(!var1_name == "NULL" | !var2_name == "NULL"){
stopifnot(c(var1_name, var2_name) %in% names(data))
var1 <- getElement(data, var1_name)
var2 <- getElement(data, var2_name)
}
corr_name <- varname(cl["corr"])
if(!corr_name == "NULL"){
stopifnot(corr_name %in% names(data))
corr <- getElement(data, corr_name)
}
}
if(type=="bi" & all(p > 0) & all(lambda1 != 0) & all(lambda2 != 0)){
##cat("### EM-algorithm for bivariate normally distributed data")
z1 <- function(a,n,l1,l2,pro, numiter,acc){.Call("ema_versh_st", as.vector(a), as.vector(n),as.vector(l1),as.vector(l2),as.vector(pro), as.integer(numiter), as.double(acc), PACKAGE = "CAMAN")}
er <- z1(obs1,obs2,lambda1,lambda2,p,numiter,acc)
len<-length(er)
l<-len/7
lam1<-er[1:l]
lam2<-er[(l+1):(2*l)]
prob<-er[((2*l)+1):(3*l)]
var1<-er[((3*l)+1):(4*l)]
var2<-er[((4*l)+1):(5*l)]
corr<-er[((5*l)+1):(6*l)]
ll<-er[((6*l)+1)]
bic <- -2 * ll[1] + (3*length(prob)- 1) * log(length(obs1))
ERG<-matrix(data=c(lam1,lam2,prob,var1,var2,corr),nrow=l,ncol=6)
colnames(ERG) <- c("Lambda1","Lambda_2","Prob","Var1","Var2","Corr")
z2<-function(a,n,l1,l2,pro, numiter,acc){.Call("ema_ind_st", as.vector(a), as.vector(n),as.vector(l1),as.vector(l2),as.vector(pro), as.integer(numiter), as.double(acc), PACKAGE = "CAMAN")}
er2<-z2(obs1,obs2,lambda1,lambda2,p, numiter,acc)
x<-rep(1,629)
y<-rep(1,629)
a<-sqrt(qchisq(0.95,2))
t<-seq(0,6.28,0.01)
x<-array(rep(0,629*1*l),c(629,1,l))
y<-array(rep(0,629*1*l),c(629,1,l))
z<-array(rep(0,629*2*l),c(629,2,l))
for (i in 1:l){
x[, , i]<-lam1[i]+sqrt(var1[i])*a*cos(t)
y[,,i]<-lam2[i]+sqrt(var2[i])*a*cos(t+acos(corr[i]))
}
for (i in 1:l){
z[, , i]<-c(x[,,i],y[,,i])
}
id<-er2
id<-id+1
a<-0
nn<-length(obs1)
mat<-matrix(data=c(obs1,obs2,id),nrow=nn,ncol=3)
if (class=="TRUE"){
res<-new("CAMAN.BIEM.object", RESULT=ERG ,BIC=bic,LL=ll[1],Mat=mat, Z=z,cl=er2)}
if (class=="FALSE"){
res<-new("CAMAN.BIEM.object", RESULT=ERG ,BIC=bic,LL=ll[1],Mat=mat, Z=z)}
return(res)
}
if(type=="meta" & all(p > 0) & all(lambda1 != 0) & all(lambda2 != 0)){
z3 <- function(a,n,v1,v2,l1,l2,pro, numiter,acc){
.Call("ema_meta_st", as.vector(a), as.vector(n),as.vector(v1),as.vector(v2),
as.vector(l1),as.vector(l2),as.vector(p),as.integer(numiter),as.double(acc),
PACKAGE = "CAMAN")}
er<-z3(obs1,obs2,var1,var2,lambda1,lambda2,p,numiter,acc)
len<-length(er)
l<-len/5
lam1<-er[1:l]
lam2<-er[(l+1):(2*l)]
prob<-er[((2*l)+1):(3*l)]
ll<-er[((3*l)+1)]
max_grad<-er[(4*l+1):len]
bic <- -2 * ll[1] + (3*length(prob)- 1) * log(length(obs1))
ERG1<-matrix(data=c(lam1,lam2,prob),nrow=l,ncol=3)
colnames(ERG1) <- c("lambda1","lambda2","p")
#colnames(mat) <- c("Lambda_1","Lambda_2","Prob","LL","max_grad")
z4<-function(a,n,v1,v2,l1,l2,p, numiter,acc){.Call("ema_ind_meta_st", as.vector(a), as.vector(n),as.vector(v1),as.vector(v2),as.vector(l1),as.vector(l2),as.vector(p),as.integer(numiter), as.double(acc), PACKAGE = "CAMAN")}
er<-z4(obs1,obs2,var1,var2,lambda1,lambda2,p, numiter,acc)
if (class=="TRUE"){
res<-new("CAMAN.BIEM.object", RESULT=ERG1 ,BIC=bic,LL=ll[1], cl=er)}
if (class=="FALSE"){
res<-new("CAMAN.BIEM.object", RESULT=ERG1 ,BIC=bic,LL=ll[1])}
return(res)
}
}
bivariate.VEM <-function(obs1,obs2,type,data = NULL, var1, var2, lambda1, lambda2,p, startk, numiter=5000,acc=1.e-7){
## avoid attach/detach but keep data argument optional
if(!is.null(data)){
cl <- match.call()
varname <- function(x){sub("\\(\\)", "", deparse(x))}
obs1_name <- varname(cl[2])
obs2_name <- varname(cl[3])
stopifnot(c(obs1_name, obs2_name) %in% names(data))
obs1 <- getElement(data, obs1_name)
obs2 <- getElement(data, obs2_name)
var1_name <- varname(cl["var1"])
var2_name <- varname(cl["var2"])
if(!var1_name == "NULL" | !var2_name == "NULL"){
stopifnot(c(var1_name, var2_name) %in% names(data))
var1 <- getElement(data, var1_name)
var2 <- getElement(data, var2_name)
}
}
if(type=="uni"){
z5<-function(a, startk, numiter,acc){.Call("vem_uni", as.vector(a),as.integer(startk),as.integer(numiter), as.double(acc), PACKAGE = "CAMAN")}
er<-z5(obs1, startk,numiter,acc)
len<-length(er)
l<-len/3
lam<-er[1:l]
prob<-er[(l+1):(2*l)]
ll<-er[((2*l)+1)]
bic <- -2 * ll[1] + (3*2- 1) * log(length(obs1))
mat<-matrix(data=c(lam,prob),nrow=l,ncol=2)
colnames(mat) <- c("lambda","mixing Prob")
res<-new("CAMAN.BIVEM.object", RESULT_uni=mat ,BIC=bic,LL=ll[1])
return(res)
}
if(type=="bi"){
z6<-function(a,n, startk, numiter,acc){.Call("vem_bi_sh", as.vector(a), as.vector(n),as.integer(startk), as.integer(numiter), as.double(acc), PACKAGE = "CAMAN")}
er<-z6(obs1,obs2, startk,numiter,acc)
len<-length(er)
l<-len/4
lam1<-er[1:l]
lam2<-er[(l+1):(2*l)]
prob<-er[((2*l)+1):(3*l)]
ll<-er[((3*l)+1)]
bic <- -2 * ll[1] + (3*length(prob)- 1) * log(length(obs1))
mat<-matrix(data=c(lam1,lam2,prob),nrow=l,ncol=3)
#colnames(mat) <- c("Lambda_1","Lambda_2","Prob")
res<-new("CAMAN.BIVEM.object", RESULT=mat ,BIC=bic,LL=ll[1])
return(res)
#res<-list("Vem for bivariate data","Lambda_1"=lam1, #"Lambda_2"=lam2,"Prob"=prob)
#res<-list("VEM algorithm for bivariate data",mat)
#print(res)
#cat("BIC : ", bic,"\n")
#cat("Log-Likelihood: ", ll[1],"\n")
}
if(type=="meta"){
z7<-function(a,n,v1,v2, startk,numiter,acc){.Call("vem_versh_meta_sh", as.vector(a), as.vector(n),as.vector(v1),as.vector(v2),as.integer(startk), as.integer(numiter), as.double(acc), PACKAGE = "CAMAN")}
er<-z7(obs1,obs2,var1,var2, startk,numiter,acc)
len<-length(er)
l<-len/4
lam1<-er[1:l]
lam2<-er[(l+1):(2*l)]
prob<-er[((2*l)+1):(3*l)]
ll<-er[((3*l)+1) :(4*l)]
bic <- -2 * ll[1] + (3*3- 1) * log(length(obs1))
Mat<-matrix(data=c(prob,lam1,lam2),nrow=l,ncol=3)
#colnames(mat) <- c("p","lambda1","lambda1")
#res<-list("VEM algorithm for diagnostic meta analysis", mat)
res<-new("CAMAN.BIVEM.object", RESULT_meta=Mat,BIC=bic,LL=ll[1])
return(res)
#res<-list("VEM algorithm for diagnostic meta analysis", "lambda_1"=lam1, "lambda_2"=lam2,"p"=prob)
#print(res)
#cat("BIC : ", bic,"\n")
#cat("Log-Likelihood: ", ll[1],"\n")
}
}
vem_grad<-function(obs1,obs2,type,data = NULL,var1, var2,lambda1, lambda2,p, startk,numiter=5000,acc=1.e-7){
## avoid attach/detach but keep data argument optional
if(!is.null(data)){
cl <- match.call()
varname <- function(x){sub("\\(\\)", "", deparse(x))}
obs1_name <- varname(cl[2])
obs2_name <- varname(cl[3])
stopifnot(c(obs1_name, obs2_name) %in% names(data))
obs1 <- getElement(data, obs1_name)
obs2 <- getElement(data, obs2_name)
var1_name <- varname(cl["var1"])
var2_name <- varname(cl["var2"])
if(!var1_name == "NULL" | !var2_name == "NULL"){
stopifnot(c(var1_name, var2_name) %in% names(data))
var1 <- getElement(data, var1_name)
var2 <- getElement(data, var2_name)
}
}
z14<-function(a,n, startk, numiter,acc){.Call("vem_bi_grad", as.vector(a), as.vector(n),as.integer(startk), as.integer(numiter), as.double(acc), PACKAGE = "CAMAN")}
er<-z14(obs1,obs2, startk,numiter,acc)
len<-length(er)
l<-len/4
lam1<-er[1:l]
lam2<-er[(l+1):(2*l)]
prob<-er[((2*l)+1):(3*l)]
grad<-er[((3*l)+1):(4*l)]
#bic <- -2 * ll[1] + (3*5- 1) * log(length(obs1))
mat<-matrix(data=c(lam1,lam2,prob,grad),nrow=l,ncol=4)
colnames(mat) <- c("Lambda_1","Lambda_2","Prob","Grad")
#res<-list("Vem for bivariate data","Lambda_1"=lam1, "Lambda_2"=lam2,"Prob"=prob, "Grad"=grad)
res<-list("VEM algorithm for bivariate data",mat)
print(res)
}
bivariate.mixalg<-function(obs1,obs2,type,data = NULL,var1, var2, corr, lambda1, lambda2,p,startk, numiter=5000,acc=1.e-7,class){
## avoid attach/detach but keep data argument optional
if(!is.null(data)){
cl <- match.call()
varname <- function(x){sub("\\(\\)", "", deparse(x))}
obs1_name <- varname(cl[2])
obs2_name <- varname(cl[3])
stopifnot(c(obs1_name, obs2_name) %in% names(data))
obs1 <- getElement(data, obs1_name)
obs2 <- getElement(data, obs2_name)
var1_name <- varname(cl["var1"])
var2_name <- varname(cl["var2"])
if(!var1_name == "NULL" | !var2_name == "NULL"){
stopifnot(c(var1_name, var2_name) %in% names(data))
var1 <- getElement(data, var1_name)
var2 <- getElement(data, var2_name)
}
corr_name <- varname(cl["corr"])
if(!corr_name == "NULL"){
stopifnot(corr_name %in% names(data))
corr <- getElement(data, corr_name)
}
}
if(type=="uni"){
z8<-function(a, startk, numiter,acc){.Call("ema_uni", as.double(a),as.integer(startk),as.integer(numiter), as.double(acc), PACKAGE = "CAMAN")}
er<-z8(obs1, startk, numiter,acc)
len<-length(er)
l<-len/3
lam<-er[1:l]
prob<-er[(l+1):(2*l)]
var<-er[(2*l+1):len]
matu<-matrix(data=c(lam,prob,var),nrow=l,ncol=3)
#colnames(mat) <- c("Lambda","Prob","Var")
z9<-function(a,startk, numiter,acc){.Call("ema_ind_uni", as.double(a),as.integer(startk),as.integer(numiter), as.double(acc), PACKAGE = "CAMAN")}
er<-z9(obs1,startk, numiter,acc)
if (class=="TRUE"){
res<-new("CAMAN.BIMIXALG.object", RESULT_uni=matu ,BIC=bic,LL=ll[1],cl=er)}
if (class=="FALSE"){
res<-new("CAMAN.BIMIXALG.object", RESULT_uni=matu ,BIC=bic,LL=ll[1])}
return(res)
}
if(type=="bi"&& lambda1==0 && lambda2 ==0 && p==0){
z10<-function(a,n,startk, numiter,acc){.Call("ema_versh_sh", as.double(a), as.double(n),as.integer(startk),as.integer(numiter), as.double(acc), PACKAGE = "CAMAN")}
er<-z10(obs1,obs2,startk, numiter,acc)
len<-length(er)
l<-len/7
lambda1<-er[1:l]
lambda2<-er[(l+1):(2*l)]
prob<-er[((2*l)+1):(3*l)]
var1<-er[((3*l)+1):(4*l)]
var2<-er[((4*l)+1):(5*l)]
corr<-er[((5*l)+1):(6*l)]
ll<-er[((6*l)+1)]
bic <- -2 * ll[1] + (3*length(prob)- 1) * log(length(obs1))
ERG<-matrix(data=c(lambda1,lambda2,prob,var1,var2,corr),nrow=l,ncol=6)
#colnames(ERG) <- c("Lambda1","Lambda_2","Prob","Var1","Var2","Corr")
z11<-function(a,n,startk, numiter,acc){.Call("ema_ind_sh", as.double(a), as.double(n),as.integer(startk),as.integer(numiter), as.double(acc), PACKAGE = "CAMAN")}
er2<-z11(obs1,obs2,startk, numiter,acc)
#par(mfrow=c(2,1))
#plot(obs1,obs2, xlab = "x1", ylab = "x2",pch=19,col="blue",cex=0.4,main=" rs12363681");
x<-rep(1,629)
y<-rep(1,629)
a<-sqrt(qchisq(0.95,2))
t<-seq(0,6.28,0.01)
x<-array(rep(0,629*1*l),c(629,1,l))
y<-array(rep(0,629*1*l),c(629,1,l))
z<-array(rep(0,629*2*l),c(629,2,l))
for (i in 1:l){
x[, , i]<-lambda1[i]+sqrt(var1[i])*a*cos(t)
y[,,i]<-lambda2[i]+sqrt(var2[i])*a*cos(t+acos(corr[i]))
}
for (i in 1:l){
z[, , i]<-c(x[,,i],y[,,i])
}
id<-er2
id<-id+1
a<-0
nn<-length(obs1)
mat<-matrix(data=c(obs1,obs2,id),nrow=nn,ncol=3)
if (class=="TRUE"){
res<-new("CAMAN.BIMIXALG.object", RESULT=ERG ,BIC=bic,LL=ll[1],Mat=mat, Z=z,cl=er2)}
if (class=="FALSE"){
res<-new("CAMAN.BIMIXALG.object", RESULT=ERG ,BIC=bic,LL=ll[1],Mat=mat, Z=z)}
return(res)
}
if(type=="meta" && lambda1==0 && lambda2==0 && p==0){
z12<-function(a,n,v1,v2, startk, numiter,acc){.Call("ema_meta_sh", as.double(a), as.double(n),as.vector(v1),as.vector(v2),as.integer(startk),as.integer(numiter), as.double(acc), PACKAGE = "CAMAN")}
er<-z12(obs1,obs2,var1,var2, startk, numiter,acc)
len<-length(er)
l<-len/5
lambda1<-er[1:l]
lambda2<-er[(l+1):(2*l)]
prob<-er[((2*l)+1):(3*l)]
ll<-er[(3*l+1):(4*l)]
max_grad<-er[(4*l+1):len]
bic <- -2 * ll[1] + (3*length(prob)- 1) * log(length(obs1))
ERG1<-matrix(data=c(lambda1,lambda2,prob),nrow=l,ncol=3)
#colnames(ERG1) <- c("Lambda_1","Lambda_2","Prob")
z13<-function(a,n,v1,v2,startk, numiter,acc){.Call("ema_ind_meta_sh", as.double(a), as.double(n),as.vector(v1),as.vector(v2),as.integer(startk),as.integer(numiter), as.double(acc), PACKAGE = "CAMAN")}
er<-z13(obs1,obs2,var1,var2,startk, numiter,acc)
if (class=="TRUE"){
res<-new("CAMAN.BIMIXALG.object", RESULT_meta=ERG1 ,BIC=bic,LL=ll[1],cl=er)}
if (class=="FALSE"){
res<-new("CAMAN.BIMIXALG.object", RESULT_meta=ERG1 ,BIC=bic,LL=ll[1],)}
return(res)
}
}
CAMANboot<-function(obs1,obs2,var1,var2,lambda11,lambda12,prob1,lambda21,lambda22,prob2,rep,data,numiter=10000,acc=1.e-7){
cl <- match.call()
varname <- function(x){sub("\\(\\)", "", deparse(x))}
obs1_name <- varname(cl[2])
obs2_name <- varname(cl[3])
stopifnot(c(obs1_name, obs2_name) %in% names(data))
obs1 <- getElement(data, obs1_name)
obs2 <- getElement(data, obs2_name)
var1_name <- varname(cl["var1"])
var2_name <- varname(cl["var2"])
if(!var1_name == "NULL" | !var2_name == "NULL"){
stopifnot(c(var1_name, var2_name) %in% names(data))
var1 <- getElement(data, var1_name)
var2 <- getElement(data, var2_name)
}
a<-matrix(nrow=(length(data[,1])),ncol=2)
k_1<-matrix(nrow=rep,ncol=(length(lambda11)*3+2))
k_2<-matrix(nrow=rep,ncol=(length(lambda21)*3+2))
#print("###Bootstrap fuer Metadaten mit Startwerten")
fun<-function(a,n,v1,v2,l1,l2,pro,numiter,acc){.Call("ema_meta_st", as.vector(a), as.vector(n),as.vector(v1),as.vector(v2),as.vector(l1),as.vector(l2),as.vector(pro),as.integer(numiter), as.double(acc), PACKAGE = "CAMAN")}
j<-0
repeat{
j<-j+1
for(i in 1:(length(data[,1]))){
z<-runif(1)
size<-length(prob1)
if (size>1){
cdf<-rep(1:(size-1))
for(m in 1:(size-1)){
cdf[1]<-prob1[1]
cdf[m+1]<-(cdf[m]+prob1[m+1])}
if (z<cdf[1]){
a[i,]<-rmvnorm(n = 1, mean=c(lambda11[1],lambda12[1]),sigma <- matrix(c(var1[i],0,0,var2[i]), ncol=2))
}
for(ll in 2:(size)){
if (z>cdf[ll-1]&&z<cdf[ll]){
a[i,]<-rmvnorm(n =1, mean=c(lambda11[ll],lambda12[ll]),sigma <- matrix(c(var1[i],0,0,var2[i]), ncol=2))
if (ll<(size-1)){
ll=ll+1}
else break
rm(ll)
}
}
if (z>=cdf[size-1]){
a[i,]<-rmvnorm(n =1, mean=c(lambda11[size],lambda12[size]),sigma <- matrix(c(var1[i],0,0,var2[i]), ncol=2))
}
}
if (size==1){
a[i,]<-rmvnorm(n = 1, mean=c(lambda11[1],lambda12[1]),sigma <- matrix(c(var1[i],0,0,var2[i]), ncol=2))
}
}
er1<-fun(a[,1],a[,2],var1,var2,lambda11,lambda12,prob1,numiter,acc)
er2<-fun(a[,1],a[,2],var1,var2,lambda21,lambda22,prob2,numiter,acc)
len1<-length(er1)
l1<-len1/5
lambda1_1<-er1[1:l1]
lambda1_2<-er1[(l1+1):(2*l1)]
prob_1<-er1[((2*l1)+1):(3*l1)]
ll_1<-er1[(3*l1+1)]
max_grad_1<-er1[(4*l1+1)]
len2<-length(er2)
l2<-len2/5
lambda2_1<-er2[1:l2]
lambda2_2<-er2[(l2+1):(2*l2)]
prob_2<-er2[((2*l2)+1):(3*l2)]
ll_2<-er2[(3*l2+1)]
max_grad_2<-er2[(4*l2+1)]
#k_1<-matrix(nrow=3,ncol=((l*4)-1))
k_1[j,]<-matrix(data=c(lambda1_1,lambda1_2,prob_1,ll_1,max_grad_1),nrow=1,ncol=((length(lambda1_2)*3)+2))
k_2[j,]<-matrix(data=c(lambda2_1,lambda2_2,prob_2,ll_2,max_grad_2),nrow=1,ncol=((length(lambda2_2)*3)+2))
k_1<-round(x=k_1,digits=3)
k_2<-round(x=k_2,digits=3)
m1<-length(lambda1_2)*3+1
m2<-length(lambda2_2)*3+1
ii<-1:length(lambda1_1)
aa<-paste("lam1",ii,sep="")
bb<-paste("lam2",ii,sep="")
cc<-paste("prob",ii,sep="")
colnames(k_1)<-c(aa,bb,cc,"LL_1","max_grad")
i<-1:length(lambda2_1)
aaa<-paste("lam1",i,sep="")
bbb<-paste("lam2",i,sep="")
ccc<-paste("prob",i,sep="")
colnames(k_2)<-c(aaa,bbb,ccc,"LL_2","max_grad")
#colnames(k_1)<-c("lambda_1","lambda_2","prob","ll")
if(j==rep) break
}
var_1<-rep(1,TRUE)
var_2<-rep(1,TRUE)
corr_1<-rep(1,TRUE)
covv_1<-rep(1,TRUE)
corr_2<-rep(1,TRUE)
covv_2<-rep(1,TRUE)
S1 <- array(rep(0, 2 * 2 * l1), c(2, 2, l1))
S2 <- array(rep(0, 2 * 2 * l2), c(2, 2, l2))
for(i in 1:(l1*2)){
var_1[i]<-var(k_1[,i], use = "complete")
}
m<-1
for(i in 1:l1 ){
corr_1[i]<-cor(k_1[,m],k_1[,(m+1)], use = "complete")
covv_1[i]<-(sqrt(var_1[m])*sqrt(var_1[(m+1)]))*corr_1[i]
m=m+2}
m<-1
for(i in 1:l1 ){
S1[, , i]<-rbind(var_1[m],covv_1[i],covv_1[i],var_1[(m+1)])
m=m+2
}
m<-1
for(i in 1:(l2*2)){
var_2[i]<-var(k_2[,i], use = "complete")}
for(i in 1:l2){
corr_2[i]<-cor(k_2[,m],k_2[,(m+1)], use = "complete")
covv_2[i]<-(sqrt(var_2[m])*sqrt(var_2[m+1]))*corr_2[i]
m=m+2}
m<-1
for(i in 1:l2 ){
S2[, , i]<-rbind(var_2[m],covv_2[i],covv_2[i],var_2[(m+1)])
m=m+2
}
for(i in rep){
differenz<-(k_1[,m1]-k_2[,m2])}
llh<-(-2)*differenz
s_ll<-sort(llh)
res<-new("CAMAN.BOOT.object", H0=k_1, S1=S1, H1=k_2,S2=S2, LL=s_ll, Q95=quantile(s_ll,0.95), Q975=quantile(s_ll,0.975), Q99=quantile(s_ll,0.99))
return(res)
}
#some abbrevated commands
mixboot <- mixalg.boot
mixalg.Boot <- mixalg.boot
mixpboot <- mixalg.paraBoot
mix.anova <- anova.CAMAN.object
|
/scratch/gouwar.j/cran-all/cranData/CAMAN/R/CAMAN.R
|
hist.CAMAN.object <- function(x, nbreaks=NULL, mixdens=TRUE, mixdens.col="red",
return.mixdens=FALSE, data.plot=NULL, singleDistr=TRUE,
main="", xlab="", plotlegend=TRUE, ...){
object <- x
#plots an histogram and the determined distribution of the mixture model
if (is.null (nbreaks)) {nbreaks = min(60,max(30,[email protected]))}
manualData <- FALSE
if (is.null(data.plot)){
if (object@family == "poisson") data.plot <- object@dat[,1]/object@dat[,2]
else if (object@family == "binomial") data.plot <- object@dat[,1]/object@dat[,3]
else if (object@family == "gaussian") data.plot <- object@dat[,1]
}
else {manualData <- TRUE}
tmp = hist(data.plot, breaks=nbreaks, freq=FALSE, main=main, xlab=xlab,...)
idx = tmp$breaks
if(object@family != "gaussian") idx = round(idx)
x = rep(0,length(idx))
for(i in 1:[email protected]){
if (object@family == "gaussian" && class(object)[[1]] == "CAMAN.object") {
x = x + object@p[i] * dnorm(idx, mean=object@t[i], sd=sqrt([email protected]))
if(singleDistr) lines(idx, object@p[i] * dnorm(idx, mean=object@t[i], sd=sqrt([email protected])), lwd=1, lty=1)
}
else if (object@family == "poisson"){
x = x + object@p[i] * dpois(idx,object@t[i])
if(singleDistr) lines(idx, object@p[i] * dpois(idx,object@t[i]), lwd=1, lty=2)
}
else if (object@family == "gaussian" && class(object)[[1]] == "CAMAN.glm.object"){
x = x + object@p[i] * dnorm(idx, mean=object@t[i], sd=sqrt(object@residVar))
if(singleDistr) lines(idx, object@p[i] * dnorm(idx, mean=object@t[i], sd=sqrt(object@residVar)), lwd=1, lty=2)
}
}
lines(idx, x, col=mixdens.col, lwd=2)
if (return.mixdens || manualData) return(x)
if (plotlegend){
if(singleDistr) legend("topright",c("mixture density", "single components"),lty=c(1,1),col=c(mixdens.col,"black"), lwd=c(2,1))
else legend("topright",c("mixture density"),lty=c(1),col=c(mixdens.col), lwd=c(2))
}
}
plot.CAMAN.BIEM.object <- function(x, ellipse = TRUE, ...){
object<-x
if (!ellipse){
plot(object@Mat[,1] ,object@Mat[,2] , xlab = "x1", ylab = "x2",pch=19,col="blue",cex=0.4,main="scatter plot");}
if (ellipse){
colors <- c("red","green", "blue","brown","yellow","black")
plot(object@Mat, xlab = "x1", ylab = "x2", type = "n",main="scatter plot with ellipse")
for (a in 1:length(object@RESULT[,1])){
points(object@Mat[object@Mat[,3] == a, ], col = colors[a], pch = 19, cex = 0.4)
points(object@Z[,,a],col = colors[a],cex=0.4, pch=19)}
}
return(invisible(NULL))
}
plot.CAMAN.BIMIXALG.object <- function(x, ellipse = TRUE, ...){
object<-x
if (!ellipse){
plot(object@Mat[,1] ,object@Mat[,2] , xlab = "x1", ylab = "x2",pch=19,col="blue",cex=0.4,main="scatter plot");
}
if (ellipse){
colors <- c("red","green", "blue","brown","yellow","black")
plot(object@Mat, xlab = "x1", ylab = "x2", type = "n",main="scatter plot with ellipse")
for (a in 1:length(object@RESULT[,1])){
points(object@Mat[object@Mat[,3] == a, ], col = colors[a], pch = 19, cex = 0.4)
points(object@Z[,,a],col = colors[a],cex=0.4, pch=19)}
}
}
|
/scratch/gouwar.j/cran-all/cranData/CAMAN/R/CAMAN_plots.R
|
setClass("CAMAN.object", representation(dat="matrix", family="character",
LL="numeric", num.k="numeric", p="numeric", t="numeric", component.var = "numeric",
prob="matrix", classification = "numeric", num.obs="numeric", steps = "numeric",
otherParams = "numeric", BIC = "numeric", VEM_result = "matrix", finalacc = "numeric", cl = "call", is_metaAnalysis = "numeric"))
setClass("CAMAN.VEM.object", representation(dat="matrix", family="character",
LL="numeric", num.k="numeric", startk="numeric", p="numeric", t="numeric",
num.obs="numeric", steps = "numeric", BIC = "numeric", finalacc = "numeric",
otherParams = "numeric", cl = "call", is_metaAnalysis = "numeric", grid="data.frame", totalgrid="data.frame"))
setClass("CAMAN.glm.object", representation(dat="data.frame", family="character",
LL="numeric", num.k="numeric", p="numeric", t="numeric", hetvar = "numeric", prob="matrix",
classification = "numeric", num.obs="numeric", steps = "numeric", otherParams = "numeric",
BIC = "numeric", coefMatrix = "data.frame", commonEffect = "numeric", cl = "call", fittedObs="numeric",
numPara = "numeric", depVar = "character", fixedVar = "character", random = "character",
form="formula", glmModel = "glm", mode="character", residVar= "numeric", idxControl="list", inputData = "data.frame"))
setClass("CAMAN.BIVEM.object", representation(RESULT="matrix", RESULT_uni="matrix",RESULT_meta="matrix",
BIC = "numeric", LL = "numeric"))
setClass("CAMAN.BIEM.object", representation( RESULT="matrix", Mat="matrix", Z="array",
BIC = "numeric", LL = "numeric",cl="numeric"))
setClass("CAMAN.BIMIXALG.object", representation(RESULT="matrix", RESULT_uni="matrix",RESULT_meta="matrix", Z="array",
BIC = "numeric", LL = "numeric",cl="numeric",Mat="matrix"
))
setClass("CAMAN.BOOT.object", representation( H0="matrix", S1="array",H1="matrix", S2="array",
LL= "numeric", Q95="numeric",Q975="numeric",Q99="numeric"))
setMethod("show", "CAMAN.object", function(object){
cat("Computer Assisted Mixture Analysis: \n \n")
cat("Data consists of", [email protected], "observations (rows). \n")
cat("The Mixture Analysis identified", [email protected], "component")
if (length([email protected]) >0) cat("s")
cat(" of a", object@family, "distribution: \n \n")
n <- [email protected]
details <- matrix(0, [email protected], ncol=2)
descr_var =""
if (object@family == "gaussian") {
tmp <- "mean"
if (object@is_metaAnalysis == 0) descr_var <- paste("component variance:", [email protected], "\n")
}
else if (object@family == "poisson") tmp <- "lambda"
else if (object@family == "binomial") tmp <- "prob"
colnames(details) = c("p", tmp)
rownames(details) = 1:[email protected]
details[,1] <- object@p
details[,2] <- object@t
cat("DETAILS:\n")
print(details)
cat(descr_var,"\n")
cat("Log-Likelihood:",object@LL," ")
cat("BIC:",object@BIC,"\n")
})
setMethod("show", "CAMAN.VEM.object", function(object){
cat("Computer Assisted Mixture Analysis (VEM): \n \n")
cat("Data consists of", [email protected], "observations (rows). \n")
cat("The VEM-algorithm identified", nrow(object@grid), "grid point")
if (length([email protected]) >0) cat("s")
cat(" with positive support \n \n")
n <- [email protected]
print(object@grid)
cat("Log-Likelihood:",object@LL," ")
cat("BIC:",object@BIC,"\n")
})
setMethod("show", "CAMAN.glm.object", function(object){
cat("Computer Assisted Mixture Analysis with covariates: \n \n")
cat("Data consists of", [email protected], "observations (rows). \n")
cat("The Mixture Analysis identified", [email protected], "component")
if (length([email protected]) >0) cat("s")
cat(" of a", object@family, "distribution: \n \n")
n <- [email protected]
cat("mixing weights:\n")
p_tmp <- object@p
names(p_tmp) = paste("comp.", 1:[email protected])
print(p_tmp)
cat("\n Coefficients :\n")
coefPrint <- round(object@coefMatrix[,1],3)
names(coefPrint) <- rownames(object@coefMatrix)
print(coefPrint)
if (object@family=="gaussian") cat("residual variance:", object@residVar)
cat("\n")
cat("Log-Likelihood:",object@LL," ")
cat("BIC:",object@BIC,"\n")
})
setMethod("show","CAMAN.BIVEM.object", function(object){
cat("Computer Assisted Mixture Analysis (BIVEM): \n \n")
if(length(object@RESULT_meta>1)){
cat("VEM algorithm for diagnostic meta analysis: \n \n ")
colnames(object@RESULT_meta)<- c("Lambda1","Lambda2","Prob")
cat("RESULT_meta: \n \n" )
print(object@RESULT_meta[,])}
else if(length(object@RESULT>1)){
cat("Vem for bivariate data: \n \n")
colnames(object@RESULT)<- c("Lambda_1","Lambda_2","Prob")
cat("RESULT: \n \n" )
print(object@RESULT[,])
}
else if(length(object@RESULT_uni>1)){
cat("Vem for univariate data: \n \n")
colnames(object@RESULT_uni)<- c("Lambda_1","Prob")
cat("RESULT_uni: \n \n" )
print(object@RESULT_uni[,])
}
cat("\n","Log-Likelihood:",object@LL,"\n")
cat("\n","BIC:",object@BIC,"\n")
}
)
setMethod("show","CAMAN.BIEM.object", function(object){
cat("Computer Assisted Mixture Analysis (BIEM): \n \n")
if(length(object@Mat>1)){
cat("EM-algorithm for bivariate normally distributed data: \n \n")
colnames(object@RESULT) <- c("Lambda1","Lambda2","Prob","Var1","Var2","Corr")
cat("RESULT: \n \n" )
print(object@RESULT[,])
if (length(object@cl>1))cat("\n","cl:","\n","cl:","Classification of bivariate data with starting values","\n",object@cl)
cat("\n","LL:",object@LL,"\n")
cat("\n","BIC:",object@BIC,"\n")
}
else {
cat("EM-algorithm for bivariate diagnostic meta analysis: \n \n")
colnames(object@RESULT) <- c("Lambda1","Lambda2","Prob")
cat("RESULT: \n \n" )
print(object@RESULT[,])
if (length(object@cl>1))cat("\n","cl:","\n","cl:","Classification of meta data with start values","\n",object@cl)
}
cat("\n","LL:",object@LL,"\n")
cat("\n","BIC:",object@BIC,"\n")
})
setMethod("show","CAMAN.BIMIXALG.object", function(object){
cat("Computer Assisted Mixture Analysis (BIMIXALG): \n \n")
if(length(object@RESULT>1)){
cat("Combination of VEM- and EM-algorithm for bivariate normally distributed data: \n \n")
colnames(object@RESULT) <- c("Lambda1","Lambda_2","Prob","Var1","Var2","Corr")
cat("RESULT: \n \n" )
print(object@RESULT[,])
if (length(object@cl>1))
cat("\n","cl:","Classification of bivariate data ","\n",object@cl,"\n")
}
else if (length(object@RESULT_meta>1)){
cat("Combination of VEM- and EM-algorithm for bivariate diagnostic meta analysis: \n \n")
colnames(object@RESULT_meta) <- c("Lambda1","Lambda2","Prob")
cat("RESULT_meta: \n \n" )
print(object@RESULT_meta[,])
if (length(object@cl>1))cat("\n","cl:","Classification of meta data",object@cl,"\n")}
else if(length(object@RESULT_uni>1)){
cat("Combination of VEM- and EM-algorithm for univariate data : \n \n")
colnames(object@RESULT_uni)<- c("Lambda_1","Prob","Var")
cat("RESULT_uni: \n \n" )
print(object@RESULT_uni[,])
if (length(object@cl>1))cat("\n","Classification of univariate data ",object@cl,"\n")
}
cat("\n","LL:",object@LL,"\n")
cat("\n","BIC:",object@BIC,"\n")
})
setMethod("show","CAMAN.BOOT.object", function(object){
cat("Computer Assisted Mixture Analysis (BOOT): \n \n")
cat("Parametric Bootstrap: \n \n")
#colnames(object@H0) <- c("Lambda1","Lambda_2","Prob","Var1","Var2","Corr")
print(object@H0[,])
cat("\n","Covariance matrix:","\n")
print(object@S1)
print(object@H1)
cat("\n","Covariance matrix:","\n")
print(object@S2)
cat("\n","Log-Likelihood:",object@LL,"\n")
cat("\n","Quantile 0.95 :",object@Q95,"\n")
cat("\n","Quantile 0.975 :",object@Q975,"\n")
cat("\n","Quantile 0.99 :",object@Q99,"\n")
}
)
|
/scratch/gouwar.j/cran-all/cranData/CAMAN/R/classes.R
|
#
#`!`<- function(x)
# if (inherits(x, "character") == FALSE)
# .Primitive("!")(x) else invisible(x)
mixcov <- function(dep,fixed,random="",data,k,weight=NULL, pop.at.risk=NULL,
var.lnOR=NULL, family="gaussian", maxiter=50,
acc=10^-7, returnHomogeneousModel = FALSE)
{
stopifnot(family %in% c("gaussian", "poisson")) # disable binomial for now as it is not working as intended
cl <- match.call()
nn <- nrow(data)
if (!is.null(pop.at.risk) && family=="binomial"){
binomDat <- cbind(observed=data[[which(colnames(data)==dep)]],
popRisk=data[[which(colnames(data)==pop.at.risk)]]-data[[which(colnames(data)==dep)]])
dep="binomDat"
}
if (is.null(pop.at.risk)) pop.at.risk <- rep(1, nn)
else {
if (is.character(pop.at.risk) ) pop.at.risk = data[,which(colnames(data)==pop.at.risk)]
}
if (is.null(var.lnOR)) var.lnOR <- rep(1, nn)
else {
if (is.character(var.lnOR) ) var.lnOR = data[,which(colnames(data)==var.lnOR)]
}
#if (is.null(weight)) pop.at.risk <- rep(1, nn)
# Usual homogenous model
form<-as.formula(paste(paste(dep,"~"),paste(fixed,collapse="+"))) #dependencies for linear model
m<-glm(form,family=family,weights=weight,data=data,x=T,na.action=na.omit, offset=log(pop.at.risk)) #compute linear model
#y0 <- as.vector(model.extract(model.frame(formula(m), data = data), response))
ixDep <- which(names(data) == dep)
ixFixed <- which(names(data) %in% fixed)
ixRandom <- which(names(data) %in% random)
y <- data[,ixDep]
y0 <- y
ixColSort <- c(ixDep, ixFixed, ixRandom )
idxControl_dep <- 1; names(idxControl_dep) = dep;
idxControl_fixed <- integer(0)
idxControl_Random <- integer(0)
if(length(ixFixed)>0) {idxControl_fixed <- 2:(2+length(ixFixed)-1); names(idxControl_fixed) = names(data)[ixFixed];}
if(length(ixRandom)>0) {idxControl_Random <- 2:(2+length(ixRandom)-1); names(idxControl_Random) = names(data)[ixRandom];}
idxControl <- list(ixDep = idxControl_dep, ixFixed=idxControl_fixed, ixRandom=idxControl_Random)
colnmes = names(data)[ixColSort]
data <- as.data.frame(data[,ixColSort]) #rearrange data sothat the dependent variable is in the first column
#idxControl has the indices of the rearranged data!!
names(data) = colnmes
mix_data <- data.frame(data[,1], rep(1,nn), pop.at.risk, var.lnOR)
#no weights
if(is.null(weight) && family=="gaussian")
{
dfg1 <- m$df.residual
wt0<- deviance(m)/dfg1
}
else wt0<-1./weight
pre<-fitted(m) #generate data with homogeneous model m
#compute Log-Likelihood of the homogeneous model
logl0=0
if (family=="gaussian")
logl0<-sum(log(dnorm(y0,pre,sqrt(wt0))))
else if (family =="poisson")
logl0<-sum(log(dpois(y0,pre)))
# cat("\n", "Fit of the usual model:", "\n")
# print(summary(m))
# cat("\n", "resid.variance:", round(wt0, 5))
BIC_homo <- -2*logl0+log(nn)*length(coef(m))
LL_homo <- logl0
#print(logl0)
if(k==1)
{
return(list(m, BIC=BIC_homo, LL=LL_homo)) #only one component
}
x0<-m$x
#form<-as.formula(paste(paste(dep,"~"),paste("Z")))
form <- paste(paste(dep,"~",sep=""),paste("Z+",collapse="+"))
form <- paste(form,paste(fixed,collapse="+"))
### form <- paste(fixed[fixed!="1"], collapse="+")
if((random[1] !="") || (length(random)>1)){
random <- random[random!="1"] #cut out the intercept
form_rand<-paste("Z/",random,sep="",collapse="+")
form<-paste(form,form_rand,sep="+")
}
form<-as.formula(paste(form,"-1"))
# starting values
p <- rep(1/k,k) #components are equal distributed
ixCols_fixedEffects = which(colnames(data) %in% fixed);
ixCols_randomEffects = which(colnames(data) %in% random);
if ((length(fixed)==1)&&(fixed =="1")) numPara_fixed<-0 #no fixed effects
else if (sum(sapply(data[,ixCols_fixedEffects], is.factor))== 0) numPara_fixed<-length(fixed) ##?? numPara_fixed<-length(fixed)-1 or sum(fixed!="1")
else{
if (length(ixCols_fixedEffects)==1) ixFactorsFixed = which(lapply(data, is.factor)[[ixCols_fixedEffects]])
else ixFactorsFixed = which(unlist(lapply(data, is.factor)[ixCols_fixedEffects]))
#if clause is just needed to seperate between [[single_idx] and [several_idx]
#number_of_levels_Fixed = unlist(lapply(data, function(xx){length(levels(xx))}))[ixCols_randomEffects[ixFactorsRandom]]
number_of_levels_Fixed = unlist(lapply(data, function(xx){length(levels(xx))}))[ixCols_fixedEffects[ixFactorsFixed]]
numPara_fixed <- (length(fixed)-length(ixFactorsFixed)) + sum(number_of_levels_Fixed -1)
}
if((length(random) == 1) && (random == "")) numPara_random <- 0 #no random effects
else if (sum(sapply(data[,ixCols_randomEffects], is.factor))== 0) numPara_random<-k*length(random)
#--> there were just numeric (no factorized) data, so, numPara_random = k*nRandom
else{ if (length(ixCols_randomEffects)==1) ixFactorsRandom = which(lapply(data, is.factor)[[ixCols_randomEffects]])
else ixFactorsRandom = which(unlist(lapply(data, is.factor)[ixCols_randomEffects]))
#if clause is just needed to seperate between [[single_idx] and [several_idx]
# cat("ixFactorsRandom", ixFactorsRandom)
number_of_levels_Random = unlist(lapply(data, function(xx){length(levels(xx))}))[ixCols_randomEffects[ixFactorsRandom]]
numPara_random <- k*(length(random) - length(ixFactorsRandom)) #no factors ... just k parameter for a factor
numPara_random <- numPara_random + sum((number_of_levels_Random-1)*k)
# -1 needed because in the resulting Model, the last factor is considered to be the intercept
}
numPara<-numPara_fixed+k+numPara_random #total no. of parameters
b<-rep(0,numPara) #parameter initialization
b <- seq(min(y0), max(y0), length.out=k)
## b[1:k] -> estimated intercepts of the components (starting values)
if (numPara>k) b[(k+1):numPara] = 0
## b[k+1:numPara] -> starting values for other parameters
# obtain solution of EM-algorithm
mem<-mix.perform_glm(form,data,k,p,y,b,var=wt0,family=family,
maxiter=maxiter, acc=acc, expected = pop.at.risk)
logem<-mem$logl
p <- mem$p
#wp <- p[iii]
x <- mem$x
xf <- mem$xf
m1 <- mem$m1
steps <- mem$n_iter
# cat("\n", " Fit of the ", round(k, 2), "-component mixture model:", "\n")
# cat("\n", "coefficients:", "\n")
coefMatrix <- coefficients(summary(mem$m1))
coefMatrix[, 2] <- coefMatrix[, 2] * sqrt(2)
coefMatrix[, 3] <- coefMatrix[, 1]/coefMatrix[, 2]
#extract information out of the coefMatrix. We make use of the property
#that the first rows are the intercepts, then the fixed effects are
if (nrow(coefMatrix)!= numPara)
warning("NUMBER OF PARAMETERS SEEMS TO BE INCONSISTENT!!")
is.intercept = rep(NA, nrow(coefMatrix)); is.fixed = rep(NA, nrow(coefMatrix));
is.random = rep(NA, nrow(coefMatrix));
#if the value is unequal to NA, the value indicates the membership to a component
is.intercept[1:k] = 1:k
if (numPara_fixed>0) is.fixed[(k+1):(k+numPara_fixed)] = TRUE # member of all components! (--> fixed)
if (numPara_random >0 )
is.random[(k+numPara_fixed+1):(k+numPara_fixed+numPara_random)] = rep(1:k, length.out=numPara_random)
coefMatrix <- data.frame(coefMatrix, is.intercept=is.intercept, is.fixed=is.fixed, is.random=is.random)
coef<-coef(mem$m1)
meancoef <- 0
for (i in 1:k)
meancoef <- suppressWarnings(meancoef + p[i] * (coefMatrix[which(coefMatrix$is.intercept==k),1] +
sum(coefMatrix[which(coefMatrix$is.random==k),1] * as.numeric(mean(data[idxControl$ixRandom])), na.rm=TRUE) + sum(coefMatrix[which(coefMatrix$is.fixed),1] * as.numeric(mean(data[idxControl$ixFixed])) , na.rm=TRUE)))
#we need to suppress the warnings due to factorial data. mean(factors) == NA,
#thus here, factorial data doesn't have any influence in the common effect!
hetvar <- 0
for (i in 1:k)
hetvar = hetvar + p[i]*((meancoef - (coefMatrix[which(coefMatrix$is.intercept==k),1] +
mean(coefMatrix[which(coefMatrix$is.random==k),1]) + mean(coefMatrix[which(coefMatrix$"is.fixed"),1])))^2)
hetvar <- sqrt(mean(rep(p,nn) * (as.vector(fitted(m1)) - meancoef)^2)) # TODO check wheter hetvar correct.. sqrt seems to be false
#meancoef <- mean(as.vector(fitted(m1))[classification_tmp]) # TODO check wheter meancoef correct
residVar <- as.numeric(NA)
if (family=="gaussian"){
residVar <- mem$residVar
}
#hetvar = sum(p * (meancoef - coef)^2)
pPosteriori<-matrix(mem$pPosteriori,nrow=nn,ncol=k)
resultObj <- new("CAMAN.glm.object",dat=mix_data, family=family, LL=mem$logl,
num.k=k, p=p, t=coef[1:k], num.obs = nn, steps=steps,
otherParams = c(maxiter, acc), BIC=-2*logem+log(nn)*(length(b)+k-1),
commonEffect = meancoef, hetvar = hetvar, coefMatrix=coefMatrix,
numPara=numPara, cl= cl, depVar=dep, fixedVar = fixed, random = random,
form=form, glmModel=m1, residVar = residVar, idxControl=idxControl, inputData=data)
posterior_matrix <- mix.densDistr(resultObj)
resultObj@classification = as.numeric(apply(posterior_matrix, 1, which.max)) #as.numeric(apply(posterior_matrix, 1, which.max))
resultObj@prob <- posterior_matrix# posterior_matrix
resultObj@fittedObs <- fitted(m1)[resultObj@classification + ((0:(nn-1))*k)]
# cat("fittedObs", resultObj@fittedObs)
#resultObj@hetvar = (0:(nn-1))*k + resultObj@classification
# cat("sumS1",sum(mem$s1))
if (returnHomogeneousModel)
return(list(mixModel = resultObj, homoModel = list(lm=m, LL=LL_homo, BIC=BIC_homo) ) )
return(resultObj)
}
########################################
mix.perform_glm <- function(form,data,k,p=NULL,y,b=NULL,
expected=NULL, var=NULL,weight=NULL,family="gaussian",
shuffle=FALSE, maxiter=30, acc=10^-7)
## PD: argument shuffle is redundant right now, but doesn't hurt
{
nn<-length(data[,1]) #length of !!non-expanded!! data
# data augmentation
ii<-rep(1:nn,each=k)#e.g.:k = 3 --> ii = 111222333444...
#y<-y0[ii]
iii<-rep(1:k,nn)#e.g.:k = 3 --> ii = 12341234...
# model
#i<-rep(1:nn,each=k)#same as ii: e.g.:k = 3 --> ii = 111222333444...
dataExpanded <- data.frame(data[ii,]) #expand the data with factor k
names(dataExpanded) <- names(data)
expected = expected[ii]
dataExpanded$Z<-as.factor(rep(1:k,nn)) #add a categorial variable for group-assignment
y<-y[ii] #expand the outcome
#set weights
#if(is.null(weight)) wt<-rep(1,nn)
#else wt<-residVar[ii]
#set initial weights
if(is.null(weight)){
residVar<-var
}
else residVar<-1/weight
ii<-rep(1:nn,each=k) #get the indices 111222333
wt<-rep(1,nn) #set weigths
# create starting values
grad<-rep(10,k)
iii<-rep(1:k,nn) #123412341234....
pPosteriori <- p[iii] #expand component weigths
pre <- b[iii]
delta_acc <- 10
continue_iterate <- TRUE
n_shuffled <- 0
#while (delta_acc > 10^-6){
while (continue_iterate){
n_shuffled <- n_shuffled +1
#compute matrix of densities of the corresponding distribution
xf <- computeDensities(family, y, pre, residVar, k, nn)
#calculate mixture density
s1<- apply(xf*pPosteriori, 2,sum) #mischverteilungsdichte%
# calculate new weights
#pPosteriori <- as.vector(pPosteriori*xf/s1[ii]) #posteriori
pPosteriori <- as.vector(exp(log(pPosteriori)+log(xf)-log(s1[ii]))) #posteriori
#wt --> studienspezifische Gewichte, die mit den daten ?bergeben werden (weight)
dataExpanded$wgt <- pPosteriori/wt
dataExpanded$expect <- log(expected)
wgt <- pPosteriori/wt ## avoids CRAN policy problems with "no visible binding for global variable"
expect <- log(expected) ## dito
# model
if(family=="poisson") b[1:k]=log(b[1:k])
diffLL<- 10 #starting value...
n_iter <- 0
while (diffLL > acc)#
{
n_iter <- n_iter + 1
# cat ("diffLL=", diffLL, "\n")
# new coefficients
m1<-glm(form,family=family,weights=wgt,
start=b,x=TRUE,
data=dataExpanded, offset=expect)
b<-coef(m1)
if(is.null(weight) && family=="gaussian"){
residVar <- deviance(m1)/nn #residualvariance
}
pre<-fitted(m1)
#compute matrix of densities of the corresponding distribution
xf <- computeDensities(family, y, pre, residVar, k, nn)
# new mixing weights
fitW <- glm(pPosteriori~Z, family = gaussian, data = dataExpanded,
na.action = na.omit)
wp <- predict(fitW)
#calculate mixture density
s1 <- apply(xf*wp, 2,sum)
p <- wp[1:k]
# calculate new weights
pPosteriori <- as.vector(wp*xf/s1[ii])
for(i in 1:k) grad[i] <- sum(xf[i,]/s1)/nn
dataExpanded$wgt <- pPosteriori/wt
logl <- sum(log(s1))
diffLL <- abs(max(grad)-1)
# cat("diffLL_after =", diffLL)
logl0 <- logl
}
x <- m1$x
# emgb
## PD: I removed the complete shuffle block since it isn't working.
# if (shuffle){
# if(is.null(weight))wt<-residVar
#
# s1<-apply(wp*xf,2,sum)
# tmax<-my_grad(y0,s1,wt,x0,kk=40,family)
# if(family=="gaussian")
# temp<-dnorm(y0,tmax,sqrt(wt))
# else if (family=="poisson")
# temp<-dpois(y0,tmax)
# # cat("\n","SA max", tmax,"\n")
#
# for(i in 1:k){
# llnu[i]=sum(log(s1-p[i]*xf[i,]+p[i]*temp))
# }
# ix<-which.max(llnu)
# coef[ix]<-tmax
# b<-coef(m1)
# pre<-x%*%b
# xf <- computeDensities(family, y, pre, residVar, k, nn)
# s1<- apply(xf*wp, 2,sum)
# tt<-exp(log(wp)+log(xf))
# logs1<-apply(tt,2,lse)
#
# wt<-residVar ###???
# residVar<-wt
# delta_acc <- abs(oldlog - logl)
# if ((delta_acc < 10^-6) || (n_shuffled >= maxiter)) continue_iterate = FALSE #convergence criterio fulfilled
# else continue_iterate = TRUE #convergence criterion not fulfilled --> continue!
# oldlog<-log1
# }
# else continue_iterate = FALSE #there was no shuffeling, so just make one iteration and
continue_iterate = FALSE ## PD: need to drop this line if shuffling is reimplemented!
}
#cat("residualvarianz: ", residVar)
suppressWarnings(return(list(m1 = m1,p=p,pPosteriori = pPosteriori,xf = xf,x=x,logl = logl,
residVar = residVar,n_iter = n_iter, s1 =s1)))
}
my_grad <- function(y,s1,wt,x,kk=20,family)
{
nn<-length(y)
min<-min(y)
max<-max(y)
if(family=="poisson")
{
max<-log(max)
min<-log(min)
}
#$step<-(max-min)/(kk-1)
#$t<-seq(min,max,by=step)
grad<-matrix(0,kk,kk)
t<-matrix(0,kk,kk)
count<-0
co<-rep(0,2)
a<-seq(min,max,len=kk)
b<-seq(0.05,0.1,len=kk)
maxi<--1000
for(j in 1:kk)
{
for(i in 1:kk)
{
co[1]<-a[i]
co[2]<-b[j]
eta<-x%*%co
if(family=="gaussian")
xf<-dnorm(y,eta,sqrt(wt))
else if(family=="poisson")
xf<-dpois(y,exp(eta))
grad[j,i]<-sum(xf/s1)/nn
if(grad[j,i] > maxi)
{
maxi<-grad[j,i]
imax<-i
jmax<-j
}
}
}
op <- par(bg = "white")
persp(a,b,grad,theta=30,phi=40,col="lightblue")
co[1]<-a[imax]
co[2]<-b[jmax]
return(co)
}
computeDensities <- function(family, y, pre, residVar, k, nn){
if(family=="gaussian")
xf <- matrix(dnorm(y, pre, sqrt(residVar)), nrow = k, ncol = nn) #TODO sqrt(residVar)/k ???
if(family=="poisson")
xf <- matrix(dpois(y, pre), nrow = k, ncol = nn)
return(xf)
}
# warum wird die vorletzte Iteration zur?ckgegeben?
# objectShow <- function(){
# cat("\n", "Mixing weights: ", round(p, 4), "\n")
# cat("\n", "Coefficients: ", round(p, 4), "\n")
# cat("\n", "Common effect: ", meancoef, "\n")
# cat("\n", "Heterogeneity variance: ", hetvar)
# cat("\n", "Heterogeneity STD: ", sqrt(hetvar))
# cat("\n", "log-likelihood at iterate:",logem,"\n")
# cat("\n", "BIC ",-2*logem+log(nn)*(length(b)+k-1),"\n")
# }
mix.effectsOfCovariate <- function(obj, effectnme){ #for fixed effects
ixdat <- obj@idxControl$ixFixed[which(names(obj@idxControl$ixFixed) == effectnme)]
if (is.numeric(obj@inputData[,ixdat])){ #no
ParaOfFixedEffects <- obj@coefMatrix[which(rownames(obj@coefMatrix)==effectnme),1]
res <- ParaOfFixedEffects * obj@inputData[,ixdat]
}
else{
ixCoef <- which(substr(rownames(obj@coefMatrix),1,nchar(effectnme))== effectnme)
res <- rep(0, [email protected])
for (i in ixCoef){
tmp_factor <- substr(rownames(obj@coefMatrix)[i], nchar(effectnme)+1, nchar(rownames(obj@coefMatrix)[i]))
ixWithFactor <- which(obj@inputData[,ixdat] == tmp_factor)
res[ixWithFactor] <- obj@coefMatrix[i,1]
}
return(res)
}
}
mix.densDistr <- function(mix){
# computes the probability for each observation (1..n - row of mix@dat) belonging to each component (1..k)
# returns a matrix of dimension n x k
dat <- mix@dat[,1]
res <- matrix([email protected], nrow=length(dat))
p <- mix@p
if (class(mix)[1] == "CAMAN.object"){
if (mix@family == "gaussian") {
mu <- mix@t
mix.sd <- sqrt([email protected])
for (i in 1:[email protected]) res[,i] <- sapply(dat,
function(x){p[i]*dnorm(x, mu[i], mix.sd ) / sum(p*dnorm(x, mu,
mix.sd ))})
}
if (mix@family == "binomial") {
prob <- mix@t
popAtRisk <- mix@dat[,3]
for (i in 1:[email protected]) res[,i] <- apply(cbind(dat, popAtRisk), 1,
function(x){p[i]*dbinom(x[1], x[2], prob[i]) / sum(p*dbinom(x[1],
x[2], prob))})
}
if (mix@family == "poisson") {
lambda <- mix@t
popAtRisk <- mix@dat[,3]
for (i in 1:[email protected]) res[,i] <- apply(cbind(dat, popAtRisk), 1,
function(x){p[i]*dpois(x[1], x[2]* lambda[i]) /
sum(p*dpois(x[1], x[2] * lambda))})
}
}
else if (class(mix)[1]== "CAMAN.glm.object"){
obs.hat <- matrix(as.vector(fitted(mix@glmModel)), [email protected], byrow=TRUE)
for (i in 1:[email protected]){
if (mix@family == "gaussian") {
mu <- mix@coefMatrix[1:[email protected],1]
mix.sd <- sqrt(mix@residVar)
for (j in 1:[email protected]) res[,j] <- apply(cbind(dat,obs.hat), 1,
function(x){p[j]*dnorm(x[1], x[j+1], mix.sd ) / sum(p*dnorm(x[1], x[-1],mix.sd ))})
}
if (mix@family == "poisson") {
# TODO: poisson mix.densdistr
lambda <- mix@coefMatrix[1:[email protected],1]
popAtRisk <- mix@dat[,3]
for (j in 1:[email protected]) res[,j] <- apply(cbind(dat, popAtRisk, obs.hat), 1,
function(x){p[j]*dpois(x[1], x[2]* x[j+2]) /
sum(p*dpois(x[1], x[2] * x[-c(1:2)]))})
}
if (mix@family == "binomial") {
prob <- mix@t
popAtRisk <- mix@dat[,3]
for (i in 1:[email protected]) res[,i] <- apply(cbind(dat, popAtRisk), 1,
function(x){p[i]*dbinom(x[1], x[2], prob[i]) / sum(p*dbinom(x[1],
x[2], prob))})
}
}
}
return(res)
}
summary.CAMAN.glm.object <- function(object, ...){
cat("Summary of a Computer Assisted Mixture Analysis with covariates: \n \n")
cat("Data consists of", [email protected], "observations (rows). \n")
cat("The Mixture Analysis identified", [email protected], "component")
if (length([email protected]) >0) cat("s")
cat(" of a", object@family, "distribution: \n \n")
n <- [email protected]
cat("mixing weights:\n")
p_tmp <- object@p
names(p_tmp) = paste("comp.", 1:[email protected])
print(p_tmp)
cat("\n Coefficients :\n")
coefPrint <- object@coefMatrix[,1:4]
print(coefPrint)
if (object@family=="gaussian") cat("residual variance:", object@residVar)
cat("\n")
cat("\n Log-Likelihood:",object@LL," ")
cat("BIC:",object@BIC,"\n")
cat("regression formular: ")
print(object@form)
cat("iteration steps:", object@steps,"\n")
cat("heterogeneity variance:", object@hetvar,"\n")
cat("common effect:", object@commonEffect,"\n \n")
cat("overview over the variables of the model:\n")
cat("dependent variable:", object@depVar,"\n")
cat("fixed effects:", object@fixedVar,"\n")
cat("random effects:", object@random,"\n")
}
|
/scratch/gouwar.j/cran-all/cranData/CAMAN/R/covariate_CAMAN.r
|
.onLoad <- function(libname, pkgname)
{
# Load dll:
library.dynam("CAMAN", pkgname, libname)
}
|
/scratch/gouwar.j/cran-all/cranData/CAMAN/R/zzz.R
|
#
# BuildGeneSets.R
#
# @author [email protected]
#
#
# Takes in a Seurat Object or expression matrix and the labels for each cell, as well as a logFC cut-off and a species designation.
# The function then uses edgeR differential expression analysis and Bioconductor's annotation data packages to build gene sets for the provided labels.
# Gene weights will also be provided and can be customized by user input.
#
#
# Inputs:
#
# -exp.data: Either the expression matrix or Seurat Object of the reference data, already normalized.
# -labels: A vector of cell type labels for the expression matrix.
# The default is the Idents of the Seurat Object.
# -cutoff.type: Should be the same length as "cutoff" and should designate which value the cutoff(s) is for. Can include "logfc","fc", and "-logp".
# "logfc" will use the log2 fold change as a cutoff
# "fc" will use the fold change as a cutoff
# "-logp" will use the -log10(p-value) as a cutoff
# -cutoff: The desired cut-off(s) for genes to include in the gene sets.
# -species: Either "Hs", "Mm" or "Dr for human, mouse, or zebrafish respectively.
# This is used to convert the gene symbols to ensembl IDs.
# -weight.type: Either "logfc", "fc", or "-logp". Dictates what value will be returned for gene weights.
# "logfc" will return the log2 fold change
# "fc" will return the fold change
# "-logp" will return the negative log10 of the p-value
#
# Output:
#
# A data frame of the gene set information
#
#
BuildGeneSets <- function(exp.data, labels = as.character(Idents(exp.data)), cutoff.type = "logfc", cutoff = 2, species = "Hs", weight.type = "logfc") {
#access human reference data
if (! is.matrix(exp.data)){
if (is(exp.data, "Seurat")){
colcount <- exp.data@assays$RNA@counts
}
else{
stop("The expression data provided is neither a matrix nor a Seurat Object.")
}
}
else {
colcount <- exp.data
}
if (length(labels) != ncol(colcount)){
stop("Error! There should be a label for every cell in the expression matrix.")
}
if (!all(cutoff.type %in% c("fc","logfc","-logp"))){
stop("Error! Cut-off type should be one of the following: \"fc\",\"logfc\",\"-logp\"")
}
if (length(cutoff) != length(cutoff.type)){
stop("There must be a corresponding cut-off value for each cut-off type.")
}
if ("logfc" %in% cutoff.type){
if (!is.numeric(cutoff[cutoff.type == "logfc"])|cutoff[cutoff.type == "logfc"] <= 0){
stop("Error! The logfc cut-off should be a number greater than 0.")
}
}
if ("fc" %in% cutoff.type){
if (!is.numeric(cutoff[cutoff.type == "fc"])|cutoff[cutoff.type == "fc"] <= 1){
stop("Error! The fc cut-off should be a number greater than 1.")
}
}
if ("-logp" %in% cutoff.type){
if (!is.numeric(cutoff[cutoff.type == "-logp"])|cutoff[cutoff.type == "-logp"] <= 1){
stop("Error! The -log10(p-value) cut-off should be a number greater than 1.")
}
}
if (! species %in% c("Hs","Mm","Dr")){
stop("Species provided is not valid. Please give either \"Hs\" for human, \"Mm\" for mouse, or \"Dr\" for zebrafish.")
}
if (! weight.type %in% c("logfc","fc","-logp")){
stop("Weight types need to be one of the following: \"logfc\",\"fc\", or \"-log10p\"")
}
#set labels
labs <- as.character(unique(labels))
labs <- sort(labs)
#edgeR DE analysis pipeline
v <- data.frame()
for (i in 1:length(labs)){
d <- DGEList(counts=(colcount), group = ifelse(labels == labs[i],1,0))
d <- calcNormFactors(d)
d1 <- estimateCommonDisp(d, verbose=T)
d1 <- estimateTagwiseDisp(d1)
et12 <- exactTest(d1, pair = c(1,2))
gp <- et12$table
gp <- gp[order(gp[,1], decreasing = T),]
#save gene symbols
if ("fc" %in% cutoff.type){
ind <- which(cutoff.type == "fc")
gp <- gp[(2^(gp[,1]))>cutoff[ind],]
}
if ("logfc" %in% cutoff.type){
ind <- which(cutoff.type == "logfc")
gp <- gp[(gp[,1])>cutoff[ind],]
}
if ("-logp" %in% cutoff.type){
ind <- which(cutoff.type == "-logp")
gp <- gp[(-log10(gp[,3]))>cutoff[ind],]
gp <- gp[(gp[,1])>0,]
}
r <- rownames(gp)
if (weight.type == "logfc"){
gw <- gp[,1]
}
if (weight.type == "fc"){
gw <- gp[,1]
gw <- 2^gw
}
if (weight.type == "-logp"){
gw <- gp[,3]
gw <- -log10(gw)
}
print(paste(labs[i],":",length(r),"genes."))
v <- rbind(v,cbind(rep(labs[i], length(r)), r,gw))
}
df <- data.frame(v)
if (species == "Hs"){
symbol <- org.Hs.egSYMBOL2EG
ensem <- org.Hs.egENSEMBL
}
if (species == "Mm"){
symbol <- org.Mm.egSYMBOL2EG
ensem <- org.Mm.egENSEMBL
}
if (species == "Dr"){
symbol <- org.Dr.egSYMBOL2EG
ensem <- org.Dr.egENSEMBL
}
symbol2entrez = mappedkeys(symbol)
# Convert to a list
symbol2entrez = as.list(symbol[symbol2entrez])
# Convert Gene Symbols to Entrez IDs
gene.symbols = (df$r)
num.ids = length(gene.symbols)
entrez.ids = rep(NA, num.ids)
for (i in 1:num.ids) {
entrez.id = gene.symbols[i]
id.index = (which(names(symbol2entrez) == entrez.id))
if (length(id.index > 0)) {
# only use the first mapped ensembl id
entrez.ids[i] =(symbol2entrez[[id.index]][1])
}
}
# Get the entrez gene IDs that are mapped to an Ensembl ID
entrez2ensembl = mappedkeys(ensem)
# Convert to a list
entrez2ensembl = as.list(ensem[entrez2ensembl])
num.ids = length(entrez.ids)
ensembl.ids = rep(NA, num.ids)
for (i in 1:num.ids) {
entrez.id = entrez.ids[i]
id.index = (which(names(entrez2ensembl) == entrez.id))
if (length(id.index > 0)) {
# only use the first mapped ensembl id
ensembl.ids[i] =(entrez2ensembl[[id.index]][1])
}
}
df$ensembl_gene = ensembl.ids
colnames(df)[colnames(df) == "r"] <- "gene.symbol"
colnames(df)[colnames(df) == "V1"] <- "cell.type"
colnames(df)[colnames(df) == "gw"] <- "gene.weight"
colnames(df)[colnames(df) == "ensembl_gene"] <- "ensembl.id"
df <- df[order(df$cell.type),]
return(df)
}
|
/scratch/gouwar.j/cran-all/cranData/CAMML/R/BuildGeneSets.R
|
#
# CAMML.R
#
# @author [email protected]
#
#
# Takes in a Seurat Object of single-cell RNA-seq data and a data frame for the gene set or sets that represent genes and their gene weights.
# CAMML will then perform weighted VAM to calculate a score for each cell type in each cell.
#
# Inputs:
#
# -seurat: The single-cell RNA-seq Seurat Object, post-filtering, normalization, and scaling.
# -gene.set.df: A data frame containing ensembl ID, gene weight, and gene set membership information for each gene.
# The data frame should have specific column labels:
# "cell.type" for which gene set a gene belongs to
# "ensembl.id" for each gene's ensembl ID
# optional:
# "gene.weight" for each gene's gene weight
# "gene.symbol" for each gene's colloquial ID
#
# Output:
#
# Updated Seurat object with weighted VAM CDFs.
#
#
CAMML <- function(seurat, gene.set.df){
len <- nrow(seurat@assays$RNA)
if (missing(seurat)) {
stop("Missing Seurat Object.")
}
if (missing(gene.set.df)) {
stop("Missing gene set data frame.")
}
if(!is.data.frame(gene.set.df)){
stop("Gene set data frame is not a data frame.")
}
if (is.null(gene.set.df$cell.type)) {
stop("Missing cell types in gene set data frame. Please check your column names.")
}
if (is.null(gene.set.df$ensembl.id)) {
stop("Missing ensembl ids in gene set data frame. Please check your column names.")
}
df <- gene.set.df[!duplicated(gene.set.df),]
labs <- sort(unique(df$cell.type))
#set number of gene sets
num.sets = length(labs)
#set gene set collection to the Ensembl IDs present in each gene set
gene.set.collection = list()
for (i in 1:num.sets) {
gene.set.name = labs[i]
gene.set.rows = which(df$cell.type == gene.set.name)
gene.set.ensembl.ids = df$ensembl.id[gene.set.rows]
gene.set.collection[[i]] = gene.set.ensembl.ids
}
#set the names
names(gene.set.collection) = labs
if (grepl("^ENSG",df$ensembl.id[1])){
symbol <- org.Hs.egSYMBOL2EG
ensem <- org.Hs.egENSEMBL
cut <- 15
}
if (grepl("^ENSMUSG",df$ensembl.id[1])){
symbol <- org.Mm.egSYMBOL2EG
ensem <- org.Mm.egENSEMBL
cut <- 18
}
if (grepl("^ENSDARG",df$ensembl.id[1])){
symbol <- org.Dr.egSYMBOL2EG
ensem <- org.Dr.egENSEMBL
cut <- 18
}
symbol2entrez = mappedkeys(symbol)
# Convert to a list
symbol2entrez = as.list(symbol[symbol2entrez])
# Convert Gene Symbols to Entrez IDs
gene.symbols = rownames(seurat)
num.ids = length(gene.symbols)
entrez.ids = rep(NA, num.ids)
for (i in 1:num.ids) {
entrez.id = gene.symbols[i]
id.index = (which(names(symbol2entrez) == entrez.id))
if (length(id.index > 0)) {
# only use the first mapped ensembl id
entrez.ids[i] =(symbol2entrez[[id.index]][1])
}
}
# Get the entrez gene IDs that are mapped to an Ensembl ID
entrez2ensembl = mappedkeys(ensem)
# Convert to a list
entrez2ensembl = as.list(ensem[entrez2ensembl])
num.ids = length(entrez.ids)
ensembl.ids = rep(NA, num.ids)
for (i in 1:num.ids) {
entrez.id = entrez.ids[i]
id.index = (which(names(entrez2ensembl) == entrez.id))
if (length(id.index > 0)) {
# only use the first mapped ensembl id
ensembl.ids[i] =(entrez2ensembl[[id.index]][1])
}
}
gene.set.collection = createGeneSetCollection(gene.ids=ensembl.ids,
gene.set.collection=gene.set.collection)
for (i in 1:length(gene.set.collection)){
if (!is.null(nrow(gene.set.collection[[i]]))){
gene.set.collection[[i]] <- gene.set.collection[[i]][1,]
}
names(gene.set.collection[[i]]) <- substr(names(gene.set.collection[[i]]),1,cut)
gene.set.collection[[i]] <- gene.set.collection[[i]][!duplicated(names(gene.set.collection[[i]]))]
}
#remove NA genes
df <- df[!is.na(df$ensembl.id),]
if (is.null(df$gene.weight)) {
message("gene.weights not provided, defaulting all weights to 1")
gene.w = rep(1, len)
}
else{
#build list
gene.w <- list()
#save gene weights that have a non-NA gene ID
for (j in 1:length(gene.set.collection)){
gwi <- c()
for (i in 1:length(names(gene.set.collection[[j]]))){
gwi <- c(gwi, which(df$ensembl.id==names(gene.set.collection[[j]][i])))
}
gwi <- intersect(gwi, which(df$cell.type == names(gene.set.collection)[j]))
gene.w[[j]] <- (as.double(df$gene.weight[gwi]))
}
}
#run VAM with ensembl gene set
seurat = vamForSeurat(seurat.data=seurat, gene.weights = gene.w,
gene.set.collection=gene.set.collection, center=FALSE, gamma=TRUE, sample.cov=F, return.dist=T)
seurat <- RenameAssays(seurat, VAMcdf="CAMML")
DefaultAssay(object = seurat) = "CAMML"
return(seurat)
}
|
/scratch/gouwar.j/cran-all/cranData/CAMML/R/CAMML.R
|
#
# ChIMP.R
#
# @author [email protected]
#
#
# Takes in a Seurat Object that has previously been scored with CAMML and weights
# the cell type scores by discretized CITE-seq markers for each cell type.
#
#
# Inputs:
#
# -seurat: a Seurat Object that has previously been run on CAMML
# -citelist: a list of all the surface markers for each cell type, named by their cell type.
# -method: "k" for k means or "q" for quantile
# -cutoff: cutoff value for quantile
# -anyMP: A vector of booleans the length of the number of cell types being evaluated regarding whether the CITE-seq weighting will take any positive marker score (TRUE) or requires all positive marker scores (FALSE)
# -greater: A vector of booleans the length of the number of CITE-seq markers being evaluated designating whether the CITE-seq weighting should support cell typing for the presence or absence of a marker.
#
# Output:
#
# A Seurat Object with a ChIMP assay of scores for each cell type in each cell.
#
#
ChIMP <- function(seurat, citelist, method = "k", cutoff = .5, anyMP = rep(T, length(rownames(seurat))), greater = rep(T, length(unlist(citelist)))){
if (missing(seurat)) {
stop("Missing Seurat Object.")
}
if (!method %in% c("k","q")){
stop("Method needs to be either \"k\" or \"q\"")
}
if (cutoff > 1 | cutoff < 0){
stop("Quantile cut-off needs to be between 0 and 1.")
}
if(length(anyMP) != length(rownames(seurat))){
stop("Any option needs to equal the number of cell types")
}
if (any(! anyMP %in% c(T,F))){
stop("Any option needs to be TRUE or FALSE")
}
if (length(citelist) != length(rownames(seurat))){
stop("CITE list length needs to equal the number of cell types")
}
if (length(unlist(citelist)) != length(greater)){
stop("Greater or less than designations are needed for each CITE-seq marker")
}
DefaultAssay(object = seurat) = "CAMML"
ChIMP <- matrix(nrow = length(rownames(seurat)), ncol = length(colnames(seurat)))
k <- 0
for (i in 1:length(rownames(seurat))){
correct <- matrix(nrow = length(colnames(seurat)))
for (j in 1:length(citelist[[i]])){
k <- k+1
ind <- which(rownames(seurat@assays$ADT) == citelist[[i]][j])
if (length(ind) < 1){
stop("Marker names not found in Seurat Object")
}
if (method == "k"){
cd8 <- kmeans(seurat@assays$ADT@data[ind,], centers = 2)
if (greater[k]){
if (median(seurat@assays$ADT@data[ind,][cd8$cluster==1]) > median(seurat@assays$ADT@data[ind,][cd8$cluster==2])){
cd8$cluster[cd8$cluster == 2] <- 0
} else{cd8$cluster <- cd8$cluster-1}
}
else{
if (median(seurat@assays$ADT@data[ind,][cd8$cluster==1]) < median(seurat@assays$ADT@data[ind,][cd8$cluster==2])){
cd8$cluster[cd8$cluster == 2] <- 0
} else{cd8$cluster <- cd8$cluster-1}
}
correct <- cbind(correct, cd8$cluster)
}
if (method == "q"){
if (greater[k]){
correct <- cbind(correct, ifelse(seurat@assays$ADT@data[ind,] > quantile(seurat@assays$ADT@data[ind,], cutoff),1,0))
}
else{
correct <- cbind(correct, ifelse(seurat@assays$ADT@data[ind,] < quantile(seurat@assays$ADT@data[ind,], cutoff),1,0))
}
}
}
correct <- correct[,-1]
if (anyMP[i]){
if (length(citelist[[i]])>1){
mcorrect <- apply(correct, 1, max)
}
else{
mcorrect <- correct
}
}
else{
if (length(citelist[[i]])>1){
mcorrect <- apply(correct, 1, min)
}
else{
mcorrect <- correct
}
}
ChIMP[i,] <- seurat@assays$CAMML@data[i,]*mcorrect
}
rownames(ChIMP) <- rownames(seurat)
colnames(ChIMP) <- colnames(seurat)
assay <- CreateAssayObject(counts = ChIMP)
seurat[["ChIMP"]] <- assay
return(seurat)
}
|
/scratch/gouwar.j/cran-all/cranData/CAMML/R/ChIMP.R
|
#
# GetCAMMLLabels.R
#
# @author [email protected]
#
#
# Takes in a Seurat Object of single-cell RNA-seq data and a data frame for the gene set or sets that represent genes and their gene weights.
# CAMML will then perform weighted VAM to calculate a score for each cell type in each cell.
#
# Inputs:
#
# -seurat: The single-cell RNA-seq Seurat Object, post-filtering, normalization, and scaling, with CAMML assay.
# -labels: One of the following: "top1", "top2","top10p", or "top2xmean"
# "top1" will return the highest scoring cell type for each cell
# "top2" will return the top two highest scoring cell types for each cell
# "top10p" will return the highest scoring cell type and any other cell types with scores within 10\% of the highest score for each cell
# "top2xmean" will return all cell types with scores at least twice the mean of all cell type scores for each cell
#
# Output:
#
# Labels for each cell based on user choice
#
#
GetCAMMLLabels <- function(seurat, labels = "top1"){
if(! labels %in% c("top1", "top2","top10p","top2xmean")){
stop("Label option needs to be one of the following: \"top1\", \"top2\",\"top10p\",\"top2xmean\"")
}
if (missing(seurat)) {
stop("Missing Seurat Object.")
}
#initalize all label options
le <- length(seurat$orig.ident)
celz <- list(c(1:le))
celztop2 <- list(c(1:le))
celzfold <- list(c(1:le))
celz2 <- list(c(1:le))
celz3 <- list(c(1:le))
celz10p <- list(c(1:le))
#find labels
for (i in 1:le){
#take data and label
dif <- data.frame(seurat@assays$CAMML@data[,i])
dif <- data.frame(dif[order(-dif[,1]), , drop = FALSE])
#for cells with no scores, skip
if (max(dif) == 0){
celztop2[[i]]<- NA
celzfold[[i]]<- NA
celz10p[[i]] <- NA
celz[[i]] <- "none"
next
}
if (length(rownames(dif)) < 2){
celz[i]<- rownames(dif)[1]
celztop2[[i]] <- data.frame(dif[1,], row.names=rownames(dif)[1])
me <- mean(dif[,1])
fold <- which(dif[,1] > me*2)
celzfold[[i]] <- data.frame(dif[fold,],row.names=rownames(dif)[fold])
inde <- which(dif > max(dif*.9))
celz10p[[i]] <- data.frame(dif[inde,],row.names=rownames(dif)[inde])
next
}
#top cells
celz[i]<- rownames(dif)[1]
dif2 <- data.frame(dif[-1,], row.names=rownames(dif)[-1])
celz2[i] <- rownames(dif2)[1]
celz3[i] <- rownames(dif2)[2]
#top2
celztop2[[i]] <- data.frame(dif[c(1:2),], row.names=rownames(dif)[c(1:2)])
#fold change
me <- mean(dif[,1])
fold <- which(dif[,1] > me*2)
celzfold[[i]] <- data.frame(dif[fold,],row.names=rownames(dif)[fold])
#percent dif
inde <- which(dif > max(dif*.9))
celz10p[[i]] <- data.frame(dif[inde,],row.names=rownames(dif)[inde])
}
if (labels == "top1"){
output <- celz
}
if (labels == "top2"){
output <- celztop2
}
if (labels == "top10p"){
output <- celz10p
}
if (labels == "top2xmean"){
output <- celzfold
}
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/CAMML/R/GetCAMMLLabels.R
|
#
# BuildGeneSets.R
#
# @author [email protected]
#
#
# Loads the gene.set.collection and gene.weights for a previously developed gene set.
#
#
# Inputs:
#
# -data: One of the following: "immune.cells","skin.immune.cells","T.subset.cells", or "mouse.cells"
#
# Output:
#
# A list of the gene.set.collection [[1]] and the gene.weights [[2]]
#
#
GetGeneSets <- function(data = "immune.cells"){
if (!data %in% c("immune.cells","skin.immune.cells","T.subset.cells","mouse.cells")){
stop("Error! The dataset called doesn't exist.")
}
else{
if (data == "immune.cells"){
gene.set.df <- read.csv(paste(system.file(package = "CAMML"),"/extdata/zheng.csv", sep = ""))[,-1]
species <- "Hs"
}
if (data == "skin.immune.cells"){
gene.set.df <- read.csv(paste(system.file(package = "CAMML"),"/extdata/melanoma.csv", sep = ""))[,-1]
species <- "Hs"
}
if (data == "T.subset.cells"){
gene.set.df <- read.csv(paste(system.file(package = "CAMML"),"/extdata/tspec.csv", sep = ""))[,-1]
species <- "Hs"
}
if (data == "mouse.cells"){
gene.set.df <- read.csv(paste(system.file(package = "CAMML"),"/extdata/mouse.csv", sep = ""))[,-1]
species <- "Mm"
}
}
df <- data.frame(gene.set.df)
labs <- sort(unique(names(table(df$V1))))
if (species == "Hs"){
symbol <- org.Hs.egSYMBOL2EG
ensem <- org.Hs.egENSEMBL
}
if (species == "Mm"){
symbol <- org.Mm.egSYMBOL2EG
ensem <- org.Mm.egENSEMBL
}
symbol2entrez = mappedkeys(symbol)
# Convert to a list
symbol2entrez = as.list(symbol[symbol2entrez])
# Convert Gene Symbols to Entrez IDs
gene.symbols = (df$r)
num.ids = length(gene.symbols)
entrez.ids = rep(NA, num.ids)
for (i in 1:num.ids) {
entrez.id = gene.symbols[i]
id.index = (which(names(symbol2entrez) == entrez.id))
if (length(id.index > 0)) {
# only use the first mapped ensembl id
entrez.ids[i] =(symbol2entrez[[id.index]][1])
}
}
# Get the entrez gene IDs that are mapped to an Ensembl ID
entrez2ensembl = mappedkeys(ensem)
# Convert to a list
entrez2ensembl = as.list(ensem[entrez2ensembl])
num.ids = length(entrez.ids)
ensembl.ids = rep(NA, num.ids)
for (i in 1:num.ids) {
entrez.id = entrez.ids[i]
id.index = (which(names(entrez2ensembl) == entrez.id))
if (length(id.index > 0)) {
# only use the first mapped ensembl id
ensembl.ids[i] =(entrez2ensembl[[id.index]][1])
}
}
df$ensembl_gene = ensembl.ids
colnames(df)[colnames(df) == "r"] <- "gene.symbol"
colnames(df)[colnames(df) == "V1"] <- "cell.type"
colnames(df)[colnames(df) == "gw"] <- "gene.weight"
colnames(df)[colnames(df) == "ensembl_gene"] <- "ensembl.id"
df <- df[order(df$cell.type),]
return(df)
}
|
/scratch/gouwar.j/cran-all/cranData/CAMML/R/GetGeneSets.R
|
createStatCanVariables <- function(df){
VectorPosition <- match(c("VECTOR", "VECTEUR"), table = names(df))
indexPosition <- which(!is.na(VectorPosition)) # locate the first non-NA (i.e. where VECTOR|VECTEUR was found)
VectorPosition <- VectorPosition[indexPosition]
#Only create new variable if there is more than one column from StatCan
#Concatenates variables names accross column to create a single column (name)
if(VectorPosition > 5) df$StatCanVariable <- apply(df[,c(3:(VectorPosition-1))], 1, function(x) paste(x, collapse = "; "))
else df$StatCanVariable <- df[,3]
return(df)
}
|
/scratch/gouwar.j/cran-all/cranData/CANSIM2R/R/createStatCanVariables.r
|
downloadCANSIM <- function(cansimTableNumber, raw = FALSE, lang){
# validation of the lang parameter
# thanks to Professor Jean-Herman Guay (Université de Sherbrooke) for suggesting the inclusion of French data labels
separator = ','
if(lang == "eng") lang = "-eng"
else if(lang == "fra" || lang == "fr"){
lang = "-fra"
separator = ';'
}
else {
print("Only English (eng) and French (fra) are accepted values for lang. Defaulting to English.")
lang = "-eng"
}
temp <- tempfile() # create a temporary file to store the downloaded data
# create the url to download the CANSIM data according to the user's needs
url <- "https://www150.statcan.gc.ca/n1/en/tbl/csv/"
cansimTableNumber <- gsub('-', '', cansimTableNumber)
cansimTableNumberString <- sprintf("%08d", as.numeric(cansimTableNumber)) #Put the correct amount of leading zeroes; paste0 uses as.character which truncates leading zeroes from integers (special thanks to Soheil soheil Mahmoodzadeh for reporting the bug)
filename <- paste0(cansimTableNumberString, lang)
csv_filename <- paste0(cansimTableNumberString, ".csv")
url <- paste0(url, filename, ".zip")
tryCatch(
{
download(url, temp, quiet = TRUE, mode = "wb") # from the downloader package, easily handles cross-plaform https requests, wrapper for download.file
},
error=function(err){ return(-1) },
warning=function(warn){ return(-1) }
)
temp_filesize <- file.info(temp)$size
if(is.na(temp_filesize) || temp_filesize == 0) return(NA) # file is non-existent, exit prematurely
data <- read.csv(unz(temp, csv_filename), stringsAsFactors = FALSE, sep = separator, encoding = "UTF-8")
unlink(temp)
if(raw == TRUE) return(data) #if raw equals TRUE, then the raw download is returned; functionality suggested by Soheil Mahmoodzadeh
names(data) <- iconv(names(data), to='ASCII//TRANSLIT') # remove accents from variable names
data$DGUID <- NULL
data$IDENTIFICATEUR.D.UNITE.DE.MESURE <- NULL
data$UOM_ID <- NULL
data$SCALAR_FACTOR <- NULL
data$FACTEUR.SCALAIRE <- NULL
data$SCALAR_ID <- NULL
data$IDENTIFICATEUR.SCALAIRE <- NULL
data <- createStatCanVariables(data)
data$VECTOR <- NULL
data$VECTEUR <- NULL
data$COORDINATES <- NULL
data$COORDONEES <- NULL
if(lang == '-fra') suppressWarnings(data$VALEUR <- as.numeric(data$VALEUR))
else suppressWarnings(data$VALUE <- as.numeric(data$VALUE))
return(data)
}
|
/scratch/gouwar.j/cran-all/cranData/CANSIM2R/R/downloadCANSIM.r
|
#' getCANSIM
#'
#' Extracts a complete CANSIM (Statistics Canada) data table
#' and converts it into a readily usable panel (wide) format.
#'
#' Geographic variables are renamed i, time variables are renamed t,
#' and all the other variables are renamed with a generic V1, V2, ..., Vn.
#' The generic variables keep the full Statistics Canada description by using a label.
#' @import reshape2 Hmisc utils
#'
#' @param cansimTableNumber - the table number we wish to retrieve from CANSIM.
#' @param showLabels - show the Statistics Canada labels after finishing extracting and converting the table, TRUE by default.
#' @param raw - download the CANSIM table as-is, skipping all processing, FALSE by default.
#' @return data frame containing CANSIM table.
#' @examples
#' getCANSIM("12-10-0005")
#' getCANSIM("12-10-0005", lang = 'fra')
#' @export
getCANSIM <- function(cansimTableNumber='', showLabels = TRUE, raw = FALSE, lang = 'eng'){
df <- downloadCANSIM(cansimTableNumber, raw, lang)
# temporary fix, several local and remote tests confirm that download works but CRAN server fails to connect
# to StatCan website. This fix needs to be reviewed shortly
if(typeof(df) == 'logical'){
print("Please check that you can connect to the Statistics Canada website. (e.g. https://www150.statcan.gc.ca/n1/tbl/csv/23100238-eng.zip) and or that the table number is valid (please only use the first 8 digits) and try again. ")
return(NULL)
}
if(raw == TRUE) return(df)
if(lang == 'eng') df2 <- dcast(df, df[,1] + df[,2] ~ StatCanVariable, value.var = "VALUE") #function from reshape2 package
else df2 <- dcast(df, df[,1] + df[,2] ~ StatCanVariable, value.var = "VALEUR")
df2 <- df2[order(df2[,2]),]
colnames(df2)[1] <- "t"
colnames(df2)[2] <- "i"
df3 <- labelCANSIM(df2)
if(showLabels == TRUE) print( label(df3) )
return(df3)
}
|
/scratch/gouwar.j/cran-all/cranData/CANSIM2R/R/getCANSIM.r
|
labelCANSIM <- function(df){
for(i in 1:ncol(df)){
label(df[[i]]) <- names(df[i])
if(i > 2) names(df)[i] <- paste0("V", i-2)
label(df[[1]]) <- "period"
label(df[[2]]) <- "id"
}
return(df)
}
|
/scratch/gouwar.j/cran-all/cranData/CANSIM2R/R/labelCANSIM.r
|
#' mergeCANSIM
#'
#' Extracts more than one CANSIM table and produces a merged table (by ID and period).
#'
#' @import reshape2 Hmisc
#'
#' @param cansimTableVector - vector containing all the CANSIM tables to extract and merge.
#' @param keepUnmatched - if true, keeps all values, even if no match was found. FALSE by default.
#' @param showLabels - show the Statistics Canada labels after finishing extracting and converting the table, TRUE by default.
#' @return data frame containing CANSIM tables.
#' @examples
#' mergeCANSIM( c(2020501, 3260021) )
#' @export
mergeCANSIM <- function(cansimTableVector, keepUnmatched = FALSE, showLabels = TRUE, lang = 'eng'){
df <- getCANSIM(cansimTableVector[1], showLabels = FALSE, lang = lang)
if(typeof(df) == 'NULL'){
print("Please check that you can connect to the Statistics Canada website. (e.g. https://www150.statcan.gc.ca/n1/tbl/csv/23100238-eng.zip) and or that the table number is valid (please only use the first 8 digits) and try again. ")
return(NULL)
}
if( length(cansimTableVector) > 1){
for(i in 2:length(cansimTableVector) ){
df2 <- getCANSIM(cansimTableVector[i], showLabels = FALSE)
if(typeof(df2) == 'NULL'){
print("Please check that you can connect to the Statistics Canada website. (e.g. https://www150.statcan.gc.ca/n1/tbl/csv/23100238-eng.zip) and or that the table number is valid (please only use the first 8 digits) and try again. ")
return(NULL)
}
df2 <- renameSecondCANSIM(df2, ncol(df))
df <- merge(df, df2, by = c("t", "i"), all = keepUnmatched )
}
}
df <- df[order(df$i),]
if(showLabels == TRUE) print( label(df) )
return(df)
}
|
/scratch/gouwar.j/cran-all/cranData/CANSIM2R/R/mergeCANSIM.r
|
renameSecondCANSIM <- function(df, nColumnsInFirstCansim){
for(i in 1:ncol(df)){
if(i > 2) names(df)[i] <- paste0("V", nColumnsInFirstCansim+i-2-2)
}
return(df)
}
|
/scratch/gouwar.j/cran-all/cranData/CANSIM2R/R/renameSecondCANSIM.r
|
#' searchLabels
#'
#' Helper function to search through the Hmisc labels in order to obtain the variable name mapping to their respective labels.
#' It can be particularly useful for bigger datasets for which manual exploration and visualization can be prohibitive.
#'
#' Search is case insensitive.
#'
#' @import Hmisc
#'
#' @param df - data.frame obtained using the getCANSIM function.
#' @param pattern - the string to search for in the variable labels.
#' @examples
#' df <- getCANSIM("12-10-0005")
#' searchLabels(df, "imports")
#' @export
searchLabels <- function(df, pattern = ''){
if(is.null(df)){
print("Please feed a dataframe created with the getCANSIM function.")
return()
}
for(i in 1:ncol(df)){
if(grepl(pattern, label(df[[i]]), ignore.case = TRUE)){
resultString = paste(colnames(df)[i], '-', label(df[[i]]))
print(resultString)
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/CANSIM2R/R/searchLabels.r
|
MVS.CARleroux <- function(formula, family, data=NULL, trials=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.Sigma.df=NULL, prior.Sigma.scale=NULL, rho=NULL, MALA=TRUE, verbose=TRUE)
{
#### This is a wrapper function that calls one of
## binomial.MVlerouxCAR
## poisson.MVlerouxCAR
## multinomial.MVlerouxCAR
if(is.null(family)) stop("the family argument is missing", call.=FALSE)
#### Run the appropriate model according to the family arugment
if(family=="binomial")
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified", call.=FALSE)
model <- binomial.MVlerouxCAR(formula=formula, data=data, trials=trials, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, rho=rho, MALA=MALA, verbose=verbose)
}else if(family=="poisson")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- poisson.MVlerouxCAR(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, rho=rho, MALA=MALA, verbose=verbose)
}else if(family=="gaussian")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- gaussian.MVlerouxCAR(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, rho=rho, verbose=verbose)
}else if(family=="multinomial")
{
if(is.null(trials)) stop("a multinomial model was specified but the trials arugment was not specified", call.=FALSE)
model <- multinomial.MVlerouxCAR(formula=formula, data=data, trials=trials, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, rho=rho, verbose=verbose)
}else
{
stop("the family arugment is not one of `binomial', `Gaussian', `multinomial' or `poisson'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/MVS.CARleroux.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
linpredcompute <- function(X, nsites, p, beta, offset) {
.Call(`_CARBayes_linpredcompute`, X, nsites, p, beta, offset)
}
quadform <- function(Wtriplet, Wtripletsum, n_triplet, nsites, phi, theta, rho) {
.Call(`_CARBayes_quadform`, Wtriplet, Wtripletsum, n_triplet, nsites, phi, theta, rho)
}
binomialindepupdateRW <- function(nsites, theta, sigma2, y, failures, theta_tune, offset) {
.Call(`_CARBayes_binomialindepupdateRW`, nsites, theta, sigma2, y, failures, theta_tune, offset)
}
binomialcarupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, phi, tau2, y, failures, phi_tune, rho, offset) {
.Call(`_CARBayes_binomialcarupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsites, phi, tau2, y, failures, phi_tune, rho, offset)
}
binomialbetaupdateRW <- function(X, nsites, p, beta, offset, y, failures, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list) {
.Call(`_CARBayes_binomialbetaupdateRW`, X, nsites, p, beta, offset, y, failures, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list)
}
binomialbetaupdateMALA <- function(X, nsites, p, beta, offset, y, failures, trials, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list) {
.Call(`_CARBayes_binomialbetaupdateMALA`, X, nsites, p, beta, offset, y, failures, trials, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list)
}
poissonindepupdateRW <- function(nsites, theta, sigma2, y, theta_tune, offset) {
.Call(`_CARBayes_poissonindepupdateRW`, nsites, theta, sigma2, y, theta_tune, offset)
}
poissoncarupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, phi, tau2, y, phi_tune, rho, offset) {
.Call(`_CARBayes_poissoncarupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsites, phi, tau2, y, phi_tune, rho, offset)
}
poissonbetaupdateMALA <- function(X, nsites, p, beta, offset, y, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list) {
.Call(`_CARBayes_poissonbetaupdateMALA`, X, nsites, p, beta, offset, y, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list)
}
poissonbetaupdateRW <- function(X, nsites, p, beta, offset, y, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list) {
.Call(`_CARBayes_poissonbetaupdateRW`, X, nsites, p, beta, offset, y, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list)
}
zipcarupdateRW <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, phi, tau2, y, phi_tune, rho, offset, poiind) {
.Call(`_CARBayes_zipcarupdateRW`, Wtriplet, Wbegfin, Wtripletsum, nsites, phi, tau2, y, phi_tune, rho, offset, poiind)
}
zipindepupdateRW <- function(nsites, theta, sigma2, y, theta_tune, offset, poiind) {
.Call(`_CARBayes_zipindepupdateRW`, nsites, theta, sigma2, y, theta_tune, offset, poiind)
}
gaussiancarupdate <- function(Wtriplet, Wbegfin, Wtripletsum, nsites, phi, tau2, rho, nu2, offset) {
.Call(`_CARBayes_gaussiancarupdate`, Wtriplet, Wbegfin, Wtripletsum, nsites, phi, tau2, rho, nu2, offset)
}
binomialmcarupdateRW <- function(Wtriplet, Wbegfin, nsites, nvar, phi, Y, failures, phioffset, denoffset, Sigmainv, rho, phi_tune, innovations) {
.Call(`_CARBayes_binomialmcarupdateRW`, Wtriplet, Wbegfin, nsites, nvar, phi, Y, failures, phioffset, denoffset, Sigmainv, rho, phi_tune, innovations)
}
poissonmcarupdateRW <- function(Wtriplet, Wbegfin, nsites, nvar, phi, Y, phioffset, denoffset, Sigmainv, rho, phi_tune, innovations) {
.Call(`_CARBayes_poissonmcarupdateRW`, Wtriplet, Wbegfin, nsites, nvar, phi, Y, phioffset, denoffset, Sigmainv, rho, phi_tune, innovations)
}
gaussianmcarupdateRW <- function(Wtriplet, Wbegfin, nsites, nvar, phi, phioffset, denoffset, Sigmainv, rho, nu2, phi_tune, innovations) {
.Call(`_CARBayes_gaussianmcarupdateRW`, Wtriplet, Wbegfin, nsites, nvar, phi, phioffset, denoffset, Sigmainv, rho, nu2, phi_tune, innovations)
}
multinomialbetaupdateRW <- function(X, nsites, J, p, col, beta, offset, y, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list, zeros) {
.Call(`_CARBayes_multinomialbetaupdateRW`, X, nsites, J, p, col, beta, offset, y, prior_meanbeta, prior_varbeta, nblock, beta_tune, block_list, zeros)
}
multinomialmcarupdateRW <- function(Wtriplet, Wbegfin, nsites, nvar, phi, Y, phioffset, denoffset, Sigmainv, rho, phi_tune, innovations) {
.Call(`_CARBayes_multinomialmcarupdateRW`, Wtriplet, Wbegfin, nsites, nvar, phi, Y, phioffset, denoffset, Sigmainv, rho, phi_tune, innovations)
}
gaussiancarmultilevelupdate <- function(Wtriplet, Wbegfin, Wtripletsum, n_individual, nsites, phi, tau2, rho, nu2, offset) {
.Call(`_CARBayes_gaussiancarmultilevelupdate`, Wtriplet, Wbegfin, Wtripletsum, n_individual, nsites, phi, tau2, rho, nu2, offset)
}
binomialcarmultilevelupdate <- function(Wtriplet, Wbegfin, Wtripletsum, ind_area_list, n_individual, nsites, phi, tau2, y, failures, phi_tune, rho, offset) {
.Call(`_CARBayes_binomialcarmultilevelupdate`, Wtriplet, Wbegfin, Wtripletsum, ind_area_list, n_individual, nsites, phi, tau2, y, failures, phi_tune, rho, offset)
}
poissoncarmultilevelupdate <- function(Wtriplet, Wbegfin, Wtripletsum, ind_area_list, n_individual, nsites, phi, tau2, y, phi_tune, rho, offset) {
.Call(`_CARBayes_poissoncarmultilevelupdate`, Wtriplet, Wbegfin, Wtripletsum, ind_area_list, n_individual, nsites, phi, tau2, y, phi_tune, rho, offset)
}
basiscomputeinverse <- function(D, nrows, ncols, Z, startcol) {
.Call(`_CARBayes_basiscomputeinverse`, D, nrows, ncols, Z, startcol)
}
basiscomputelinear <- function(D, nrows, ncols, Z, startcol) {
.Call(`_CARBayes_basiscomputelinear`, D, nrows, ncols, Z, startcol)
}
basiscomputeexponential <- function(D, nrows, ncols, Z, startcol) {
.Call(`_CARBayes_basiscomputeexponential`, D, nrows, ncols, Z, startcol)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/RcppExports.R
|
S.CARbym <- function(formula, formula.omega=NULL, family, data=NULL, trials=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, prior.sigma2=NULL, prior.mean.delta=NULL, prior.var.delta=NULL, MALA=TRUE, verbose=TRUE)
{
#### This is a wrapper function that calls one of
## binomial.bymCAR
## poisson.bymCAR
## zip.bymCAR
if(is.null(family)) stop("the family argument is missing", call.=FALSE)
#### Run the appropriate model according to the family arugment
if(family=="binomial")
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified", call.=FALSE)
if(!is.null(formula.omega)) stop("you do not need a formula.omega argument as the zip model was not specified", call.=FALSE)
model <- binomial.bymCAR(formula=formula, data=data, trials=trials, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.sigma2=prior.sigma2, MALA=MALA, verbose=verbose)
}else if(family=="poisson")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
if(!is.null(formula.omega)) stop("you do not need a formula.omega argument as the zip model was not specified", call.=FALSE)
model <- poisson.bymCAR(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.sigma2=prior.sigma2, MALA=MALA, verbose=verbose)
}else if(family=="zip")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
if(is.null(formula.omega)) stop("a zip model was specified but the formula.omega argument was not specified", call.=FALSE)
model <- zip.bymCAR(formula=formula, formula.omega=formula.omega, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.sigma2=prior.sigma2, prior.mean.delta=prior.mean.delta, prior.var.delta=prior.var.delta, MALA=MALA, verbose=verbose)
}else
{
stop("the family arugment is not one of `binomial', `poisson', or `zip'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/S.CARbym.R
|
S.CARdissimilarity <- function(formula, family, data=NULL, trials=NULL, W, Z, W.binary=TRUE, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.tau2=NULL, MALA=TRUE, verbose=TRUE)
{
#### This is a wrapper function that calls one of
## binomial.properCAR
## gaussian.properCAR
## poisson.properCAR
if(is.null(family)) stop("the family argument is missing", call.=FALSE)
#### Run the appropriate model according to the family arugment
if(family=="binomial")
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified", call.=FALSE)
model <- binomial.dissimilarityCAR(formula=formula, data=data, trials=trials, W=W, Z=Z, W.binary=W.binary, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, MALA=MALA, verbose=verbose)
}else if(family=="gaussian")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- gaussian.dissimilarityCAR(formula=formula, data=data, W=W, Z=Z, W.binary=W.binary, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.tau2=prior.tau2, verbose=verbose)
}else if(family=="poisson")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- poisson.dissimilarityCAR(formula=formula, data=data, W=W, Z=Z, W.binary=W.binary, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, MALA=MALA, verbose=verbose)
}else
{
stop("the family arugment is not one of `binomial', `gaussian' or `poisson'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/S.CARdissimilarity.R
|
S.CARleroux <- function(formula, formula.omega=NULL, family, data=NULL, trials=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.tau2=NULL, prior.mean.delta=NULL, prior.var.delta=NULL, rho=NULL, MALA=TRUE, verbose=TRUE)
{
#### This is a wrapper function that calls one of
## binomial.lerouxCAR
## gaussian.lerouxCAR
## poisson.lerouxCAR
if(is.null(family)) stop("the family argument is missing", call.=FALSE)
#### Run the appropriate model according to the family arugment
if(family=="binomial")
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified", call.=FALSE)
if(!is.null(formula.omega)) stop("you do not need a formula.omega argument as the zip model was not specified", call.=FALSE)
model <- binomial.lerouxCAR(formula=formula, data=data, trials=trials, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, rho=rho, MALA=MALA, verbose=verbose)
}else if(family=="gaussian")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
if(!is.null(formula.omega)) stop("you do not need a formula.omega argument as the zip model was not specified", call.=FALSE)
model <- gaussian.lerouxCAR(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.tau2=prior.tau2, rho=rho, verbose=verbose)
}else if(family=="poisson")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
if(!is.null(formula.omega)) stop("you do not need a formula.omega argument as the zip model was not specified", call.=FALSE)
model <- poisson.lerouxCAR(formula=formula, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, rho=rho, MALA=MALA, verbose=verbose)
}else if(family=="zip")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
if(is.null(formula.omega)) stop("a zip model was specified but the formula.omega argument was not specified", call.=FALSE)
model <- zip.lerouxCAR(formula=formula, formula.omega=formula.omega, data=data, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.mean.delta=prior.mean.delta, prior.var.delta=prior.var.delta, rho=rho, MALA=MALA, verbose=verbose)
}else
{
stop("the family arugment is not one of `binomial', `gaussian', `poisson' or `zip'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/S.CARleroux.R
|
S.CARlocalised <- function(formula, family, data=NULL, G, trials=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, prior.delta=NULL, MALA=TRUE, verbose=TRUE)
{
#### This is a wrapper function that calls one of
## binomial.localisedCAR
## poisson.localisedCAR
if(is.null(family)) stop("the family argument is missing", call.=FALSE)
#### Run the appropriate model according to the family arugment
if(family=="binomial")
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified", call.=FALSE)
model <- binomial.localisedCAR(formula=formula, data=data, G=G, trials=trials, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.delta=prior.delta, MALA=MALA, verbose=verbose)
}else if(family=="poisson")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- poisson.localisedCAR(formula=formula, data=data, G=G, W=W, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.delta=prior.delta, MALA=MALA, verbose=verbose)
}else
{
stop("the family arugment is not one of `binomial' or `poisson'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/S.CARlocalised.R
|
S.CARmultilevel <- function(formula, family, data=NULL, trials=NULL, W, ind.area, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.tau2=NULL, rho=NULL, MALA=TRUE, verbose=TRUE)
{
#### This is a wrapper function that calls one of
## binomial.multilevelCAR
## gaussian.multilevelCAR
## poisson.multilevelCAR
if(is.null(family)) stop("the family argument is missing", call.=FALSE)
#### Run the appropriate model according to the family arugment
if(family=="binomial")
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified", call.=FALSE)
model <- binomial.multilevelCAR(formula=formula, data=data, trials=trials, W=W, ind.area=ind.area, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, rho=rho, MALA=MALA, verbose=verbose)
}else if(family=="gaussian")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- gaussian.multilevelCAR(formula=formula, data=data, W=W, ind.area=ind.area, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, prior.tau2=prior.tau2, rho=rho, verbose=verbose)
}else if(family=="poisson")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- poisson.multilevelCAR(formula=formula, data=data, W=W, ind.area=ind.area, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, rho=rho, MALA=MALA, verbose=verbose)
}else
{
stop("the family arugment is not one of `binomial', `gaussian' or `poisson'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/S.CARmultilevel.R
|
S.RAB <- function(formula, family, data=NULL, trials=NULL, W, V, nlambda=100, verbose=TRUE)
{
#### This is a wrapper function that calls one of
## binomial.RAB
## gaussian.RAB
## poisson.RAB
if(is.null(family)) stop("the family argument is missing", call.=FALSE)
#### Run the appropriate model according to the family arugment
if(family=="binomial")
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified", call.=FALSE)
model <- binomial.RAB(formula=formula, data=data, trials=trials, W=W, V=V, nlambda=nlambda, verbose=verbose)
}else if(family=="gaussian")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- gaussian.RAB(formula=formula, data=data, W=W, V=V, nlambda=nlambda, verbose=verbose)
}else if(family=="poisson")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
model <- poisson.RAB(formula=formula, data=data, W=W, V=V, nlambda=nlambda, verbose=verbose)
}else
{
stop("the family arugment is not one of `binomial', `gaussian', or `poisson'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/S.RAB.R
|
S.glm <- function(formula, formula.omega=NULL, family, data=NULL, trials=NULL, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.nu2=NULL, prior.mean.delta=NULL, prior.var.delta=NULL, MALA=TRUE, verbose=TRUE)
{
#### This is a wrapper function that calls one of
## binomial.glm
## gaussian.glm
## multinomial.glm
## poisson.glm
## zip.glm
if(is.null(family)) stop("the family argument is missing", call.=FALSE)
#### Run the appropriate model according to the family arugment
if(family=="binomial")
{
if(is.null(trials)) stop("a binomial model was specified but the trials arugment was not specified", call.=FALSE)
if(!is.null(formula.omega)) stop("you do not need a formula.omega argument as the zip model was not specified", call.=FALSE)
model <- binomial.glm(formula=formula, data=data, trials=trials, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, MALA=MALA, verbose=verbose)
}else if(family=="gaussian")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
if(!is.null(formula.omega)) stop("you do not need a formula.omega argument as the zip model was not specified", call.=FALSE)
model <- gaussian.glm(formula=formula, data=data, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.nu2=prior.nu2, verbose=verbose)
}else if(family=="multinomial")
{
if(is.null(trials)) stop("a multinomial model was specified but the trials arugment was not specified", call.=FALSE)
if(!is.null(formula.omega)) stop("you do not need a formula.omega argument as the zip model was not specified", call.=FALSE)
model <- multinomial.glm(formula=formula, data=data, trials=trials, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, verbose=verbose)
}else if(family=="poisson")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
if(!is.null(formula.omega)) stop("you do not need a formula.omega argument as the zip model was not specified", call.=FALSE)
model <- poisson.glm(formula=formula, data=data, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, MALA=MALA, verbose=verbose)
}else if(family=="zip")
{
if(!is.null(trials)) stop("you do not need a trials arugment as a binomial model was not specified", call.=FALSE)
if(is.null(formula.omega)) stop("a zip model was specified but the formula.omega argument was not specified", call.=FALSE)
model <- zip.glm(formula=formula, formula.omega=formula.omega, data=data, burnin=burnin, n.sample=n.sample, thin=thin, n.chains=n.chains, n.cores=n.cores, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.delta=prior.mean.delta, prior.var.delta=prior.var.delta, MALA=MALA, verbose=verbose)
}else
{
stop("the family arugment is not one of `binomial', `gaussian', `multinomial', `poisson' or `zip'.", call.=FALSE)
}
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/S.glm.R
|
binomial.MVlerouxCAR <- function(formula, data=NULL, trials, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.Sigma.df=NULL, prior.Sigma.scale=NULL, rho=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "binomial")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
J <- ncol(Y)
N.all <- K * J
#### Create a missing list
if(n.miss>0)
{
miss.locator <- array(NA, c(n.miss, 2))
colnames(miss.locator) <- c("row", "column")
locations <- which(t(which.miss)==0)
miss.locator[ ,1] <- ceiling(locations/J)
miss.locator[ ,2] <- locations - (miss.locator[ ,1]-1) * J
}else
{}
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Check and format the trials argument
if(ncol(trials)!=J) stop("trials has the wrong number of columns.", call.=FALSE)
if(nrow(trials)!=K) stop("trials has the wrong number of rows.", call.=FALSE)
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- N.all-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
failures <- trials - Y
if(sum(Y>trials, na.rm=TRUE)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
#### W matrix
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
if(ceiling(N.all/K)!= floor(N.all/K)) stop("The number of data points divided by the number of rows in W is not a whole number.", call.=FALSE)
#### rho
if(is.null(rho))
{
rho <- runif(1)
fix.rho <- FALSE
}else
{
fix.rho <- TRUE
}
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.Sigma.df)) prior.Sigma.df <- 2
if(is.null(prior.Sigma.scale)) prior.Sigma.scale <- rep(100000, J)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
if(!is.numeric(prior.Sigma.scale)) stop("prior.Sigma.scale has non-numeric values.", call.=FALSE)
if(sum(is.na(prior.Sigma.scale))!=0) stop("prior.Sigma.scale has missing values.", call.=FALSE)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- binomial.MVlerouxCARMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, J=J, N.all=N.all, which.miss=which.miss, n.miss=n.miss, miss.locator=miss.locator, burnin=burnin, n.sample=n.sample, thin=thin, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, MALA=MALA, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- binomial.MVlerouxCARMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, J=J, N.all=N.all, which.miss=which.miss, n.miss=n.miss, miss.locator=miss.locator, burnin=burnin, n.sample=n.sample, thin=thin, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, MALA=MALA, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=binomial.MVlerouxCARMCMC, Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, J=J, N.all=N.all, which.miss=which.miss, n.miss=n.miss, miss.locator=miss.locator, burnin=burnin, n.sample=n.sample, thin=thin, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, MALA=MALA, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * sum(results$accept.beta[1:J]) / sum(results$accept.beta[(J+1):(2*J)])
accept.phi <- 100 * results$accept[1] / results$accept[2]
accept.Sigma <- 100
if(!fix.rho)
{
accept.rho <- 100 * results$accept[3] / results$accept[4]
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.Sigma)
names(accept.final) <- c("beta", "phi", "rho", "Sigma")
## Compute the model fit criterion
mean.beta <- matrix(apply(results$samples.beta, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=K, ncol=J, byrow=T)
mean.logit <- X.standardised %*% mean.beta + mean.phi + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=as.numeric(t(Y)), size=as.numeric(t(trials)), prob=as.numeric(t(mean.prob)), log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- matrix(apply(results$samples.fitted, 2, mean), nrow=K, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- results$samples.beta
for(r in 1:J)
{
samples.beta.orig[ ,((r-1)*p+1):(r*p)] <- common.betatransform(results$samples.beta[ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE)
}
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), Sigma=results$samples.Sigma, rho=mcmc(results$samples.rho), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, J*p), rep(accept.beta,J*p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z)
col.name <- rep(NA, p*J)
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c((J+1) ,7))
summary.hyper[1:J, 1] <- diag(apply(samples$Sigma, c(2,3), mean))
summary.hyper[1:J, 2] <- diag(apply(samples$Sigma, c(2,3), quantile, c(0.025)))
summary.hyper[1:J, 3] <- diag(apply(samples$Sigma, c(2,3), quantile, c(0.975)))
summary.hyper[1:J, 4] <- n.keep
summary.hyper[1:J, 5] <- accept.Sigma
summary.hyper[1:J, 6] <- diag(apply(samples$Sigma, c(2,3), effectiveSize))
for(r in 1:J)
{
summary.hyper[r, 7] <- geweke.diag(samples$Sigma[ ,r,r])$z
}
if(!fix.rho)
{
summary.hyper[(J+1), 1:3] <- c(mean(samples$rho), quantile(samples$rho, c(0.025, 0.975)))
summary.hyper[(J+1), 4:7] <- c(n.keep, accept.rho, effectiveSize(samples$rho), geweke.diag(samples$rho)$z)
}else
{
summary.hyper[(J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(J+1), 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept.beta"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1:J]) / sum(accept.temp2[ ,(J+1):(2*J)])
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.phi <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.Sigma <- 100
if(!fix.rho)
{
accept.rho <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.Sigma)
names(accept.final) <- c("beta", "phi", "rho", "Sigma")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.Sigma.list <- lapply(results, function(l) l[["samples.Sigma"]])
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- matrix(apply(samples.beta.matrix, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=K, ncol=J, byrow=T)
mean.logit <- X.standardised %*% mean.beta + mean.phi + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=as.numeric(t(Y)), size=as.numeric(t(trials)), prob=as.numeric(t(mean.prob)), log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- matrix(apply(samples.fitted.matrix, 2, mean), nrow=K, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- list(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
for(r in 1:J)
{
samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)] <- common.betatransform(samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)], X.indicator, X.mean, X.sd, p, FALSE)
}
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
rho.temp <- samples.rho.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
rho.temp[[j]] <- mcmc(samples.rho.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
rho.mcmc <- as.mcmc.list(rho.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rho.mcmc, Sigma=samples.Sigma.list, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, J*p), rep(accept.beta,J*p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
col.name <- rep(NA, p*J)
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Category ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c((J+1) ,7))
summary.hyper[1:J, 4] <- rep(n.keep, J)
summary.hyper[1:J, 5] <- rep(accept.Sigma, J)
for(r in 1:J)
{
test.vec <- samples.Sigma.list[[1]][ , r, r]
test.list <- as.list(rep(NA, n.chains))
test.list[[1]] <- mcmc(samples.Sigma.list[[1]][ , r, r])
for(i in 2:n.chains)
{
test.vec <- c(test.vec, samples.Sigma.list[[i]][ , r, r])
test.list[[i]] <- mcmc(samples.Sigma.list[[i]][ , r, r])
}
test.mcmc <- as.mcmc.list(test.list)
summary.hyper[r,1] <- mean(test.vec)
summary.hyper[r,2:3] <- quantile(test.vec, c(0.025, 0.975))
summary.hyper[r,6] <- effectiveSize(test.mcmc)
summary.hyper[r,7] <- gelman.diag(test.mcmc)$psrf[ ,2]
}
if(!fix.rho)
{
summary.hyper[(J+1), 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[(J+1), 4:7] <- c(n.keep, accept.rho, effectiveSize(rho.mcmc), gelman.diag(rho.mcmc)$psrf[ ,2])
}else
{
summary.hyper[(J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(J+1), 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Binomial (logit link function)", "\nRandom effects model - Leroux MCAR\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/binomial.MVlerouxCAR.R
|
binomial.MVlerouxCARMCMC <- function(Y, failures, trials, offset, X.standardised, W, rho, fix.rho, K, p, J, N.all, which.miss, n.miss, miss.locator, burnin, n.sample, thin, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.Sigma.df, prior.Sigma.scale, MALA, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
# library(MCMCpack)
##########################################
#### Generate the initial parameter values
##########################################
beta <- array(NA, c(p, J))
for(i in 1:J)
{
mod.glm <- glm(cbind(Y[ ,i], failures[ ,i])~X.standardised-1, offset=offset[ ,i], family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta[ ,i] <- rnorm(n=p, mean=beta.mean, sd=beta.sd)
}
theta.hat <- Y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat / (1 - theta.hat)) - X.standardised %*% beta - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi.vec <- rnorm(n=N.all, mean=0, sd=res.sd)
phi <- matrix(phi.vec, nrow=K, byrow=TRUE)
Sigma <- cov(phi)
Sigma.inv <- solve(Sigma)
Sigma.a <- rep(1, J)
####################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
regression <- X.standardised %*% beta
lp <- regression + phi + offset
prob <- exp(lp) / (1 + exp(lp))
fitted <- trials * prob
Y.DA <- Y
failures.DA <- trials - Y.DA
###############################
#### Set up the MCMC quantities
###############################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, J*p))
samples.phi <- array(NA, c(n.keep, N.all))
samples.Sigma <- array(NA, c(n.keep, J, J))
samples.Sigma.a <- array(NA, c(n.keep, J))
if(!fix.rho) samples.rho <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, N.all))
samples.fitted <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,4)
accept.beta <- rep(0,2*J)
proposal.sd.beta <- rep(0.01, J)
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.02
Sigma.post.df <- prior.Sigma.df + K + J - 1
Sigma.a.post.shape <- (prior.Sigma.df + J) / 2
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
Wstar <- diag(apply(W,1,sum)) - W
Q <- rho * Wstar + diag(rep(1-rho,K))
#### Create the determinant
if(!fix.rho)
{
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
islands.all <- rep(islands,J)
n.islands <- max(W.islands$nc)
if(rho==1) Sigma.post.df <- prior.Sigma.df + K + J - 1 - n.islands
#### Specify vector variants
Y.vec <- as.numeric(t(Y))
trials.vec <- as.numeric(t(trials))
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[miss.locator] <- rbinom(n=n.miss, size=trials[miss.locator], prob=prob[miss.locator])
failures.DA <- trials - Y.DA
}else
{}
###################
## Sample from beta
###################
offset.temp <- phi + offset
for(r in 1:J)
{
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, K, p, beta[ ,r], offset.temp[ ,r], Y.DA[ ,r], failures.DA[ ,r], trials[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, K, p, beta[ ,r], offset.temp[ ,r], Y.DA[ ,r], failures.DA[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block)
}
beta[ ,r] <- temp[[1]]
accept.beta[r] <- accept.beta[r] + temp[[2]]
accept.beta[(r+J)] <- accept.beta[(r+J)] + n.beta.block
}
regression <- X.standardised %*% beta
##################
## Sample from phi
##################
#### Create the inputs to the function
den.offset <- rho * W.triplet.sum + 1 - rho
phi.offset <- regression + offset
Chol.Sigma <- t(chol(proposal.sd.phi*Sigma))
z.mat <- matrix(rnorm(n=N.all, mean=0, sd=1), nrow=J, ncol=K)
innovations <- t(Chol.Sigma %*% z.mat)
temp1 <- binomialmcarupdateRW(W.triplet, W.begfin, K, J, phi, Y.DA, failures.DA, phi.offset, den.offset, Sigma.inv, rho, proposal.sd.phi, innovations)
phi <- temp1[[1]]
for(r in 1:J)
{
phi[ ,r] <- phi[ ,r] - mean(phi[ ,r])
}
accept[1] <- accept[1] + temp1[[2]]
accept[2] <- accept[2] + K
####################
## Sample from Sigma
####################
Sigma.post.scale <- 2 * prior.Sigma.df * diag(1 / Sigma.a) + t(phi) %*% Q %*% phi
Sigma <- riwish(Sigma.post.df, Sigma.post.scale)
Sigma.inv <- solve(Sigma)
######################
## Sample from Sigma.a
######################
Sigma.a.posterior.scale <- prior.Sigma.df * diag(Sigma.inv) + 1 / prior.Sigma.scale^2
Sigma.a <- 1 / rgamma(J, Sigma.a.post.shape, scale=(1/Sigma.a.posterior.scale))
##################
## Sample from rho
##################
if(!fix.rho)
{
## Propose a new value
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
Q.prop <- proposal.rho * Wstar + diag(rep(1-proposal.rho), K)
det.Q.prop <- sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
## Compute the acceptance rate
logprob.current <- 0.5 * J * det.Q - 0.5 * sum(diag(t(phi) %*% Q %*% phi %*% Sigma.inv))
logprob.proposal <- 0.5 * J * det.Q.prop - 0.5 * sum(diag(t(phi) %*% Q.prop %*% phi %*% Sigma.inv))
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- det.Q.prop
Q <- Q.prop
accept[3] <- accept[3] + 1
}else
{
}
accept[4] <- accept[4] + 1
}else
{}
#########################
## Calculate the deviance
#########################
lp <- regression + phi + offset
prob <- exp(lp) / (1 + exp(lp))
fitted <- trials * prob
loglike <- dbinom(x=as.numeric(t(Y)), size=as.numeric(t(trials)), prob=as.numeric(t(prob)), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- as.numeric(beta)
samples.phi[ele, ] <- as.numeric(t(phi))
samples.Sigma[ele, , ] <- Sigma
samples.Sigma.a[ele, ] <- Sigma.a
if(!fix.rho) samples.rho[ele, ] <- rho
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- as.numeric(t(fitted))
if(n.miss>0) samples.Y[ele, ] <- Y.DA[miss.locator]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
for(r in 1:J)
{
if(p>2)
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 40, 50)
}else
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 30, 40)
}
}
proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50)
if(!fix.rho)
{
proposal.sd.rho <- common.accceptrates2(accept[3:4], proposal.sd.rho, 40, 50, 0.5)
}
accept <- c(0,0,0,0)
accept.beta <- rep(0,2*J)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
if(fix.rho) samples.rho=NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.Sigma=samples.Sigma, samples.Sigma.a=samples.Sigma.a, samples.rho=samples.rho, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept, accept.beta=accept.beta)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/binomial.MVlerouxCARMCMC.R
|
binomial.RAB <- function(formula, data=NULL, trials, W, V, nlambda, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "gaussian")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
which.present <- which(!is.na(Y))
n.miss <- frame.results$n.miss
if(p==0) stop("The model (via the formula object) must at least have an intercept term.", call.=FALSE)
#### Trials argument
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- K-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
failures <- trials - Y
if(sum(Y>trials, na.rm=TRUE)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
#### Ancillary data
if(!is.numeric(V)) stop("The ancillary data V is not a vector.", call.=FALSE)
if(length(V) != K) stop("The ancillary data V is not the same length as the remaining data.", call.=FALSE)
if(sum(is.na(V))>0) stop("The ancillary data V has missing 'NA' values.", call.=FALSE)
if(!is.numeric(V)) stop("The ancillary data V has non-numeric values.", call.=FALSE)
#### Neighbourhood matrix W
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
if(ncol(W)!= nrow(W)) stop("W is not a square matrix.", call.=FALSE)
if(sum(is.na(W))>0) stop("W has missing 'NA' values.", call.=FALSE)
if(!is.numeric(W)) stop("W has non-numeric values.", call.=FALSE)
if(min(W)<0) stop("W has negative elements.", call.=FALSE)
if(sum(W!=t(W))>0) stop("W is not symmetric.", call.=FALSE)
if(min(apply(W, 1, sum))==0) stop("W has some areas with no neighbours (one of the row sums equals zero).", call.=FALSE)
#### Create the shortest path matrix
graph.W <- graph.adjacency(W, mode="undirected")
graph.dist <- shortest.paths(graph.W)
#####################################################
#### Create the basis functions and the data elements
#####################################################
#### Create the three sets of basis functions
B.anisotropic.exp <- basiscomputeexponential(D=graph.dist, nrows=K, ncols=K, Z=V, startcol=1)
B.anisotropic.inv <- basiscomputeinverse(D=graph.dist, nrows=K, ncols=K, Z=V, startcol=1)
B.anisotropic.linear <- basiscomputelinear(D=graph.dist, nrows=K, ncols=K, Z=V, startcol=1)
#### Combine with the covariate matrix if needed
X.anisotropic.exp <- cbind(X, B.anisotropic.exp)
X.anisotropic.inv <- cbind(X, B.anisotropic.inv)
X.anisotropic.linear <- cbind(X, B.anisotropic.linear)
#### Remove an intercept term if it present
if(var(X.anisotropic.exp[ ,1])==0)
{
X.anisotropic.exp <- X.anisotropic.exp[ ,-1]
X.anisotropic.inv <- X.anisotropic.inv[ ,-1]
X.anisotropic.linear <- X.anisotropic.linear[ ,-1]
p <- p-1
}else
{}
#### Remove rows with missing values for model fitting
Y.train <- Y[which.present]
failures.train <- failures[which.present]
trials.train <- trials[which.present]
response.train <- cbind(failures.train, Y.train)
offset.train <- offset[which.present]
K.train <- length(Y.train)
X.anisotropic.exp.train <- X.anisotropic.exp[which.present, ]
X.anisotropic.inv.train <- X.anisotropic.inv[which.present, ]
X.anisotropic.linear.train <- X.anisotropic.linear[which.present, ]
W.train <- W[which.present, which.present]
W.list.train <- mat2listw(W.train, style="B")
########################################
#### Fit the models and make predictions
########################################
#### Update the user on the functions progress
if(verbose) cat("Fitting the model.")
#### Fit the models with the 3 different types of basis functions
penfac <- c(rep(0, p), rep(1,K))
mod.ridge.exp <- glmnet(x=X.anisotropic.exp.train, y=response.train, offset=offset.train, alpha=0, nlambda=nlambda, penalty.factor = penfac, family = "binomial", intercept=TRUE, standardize=FALSE)
mod.ridge.inv <- glmnet(x=X.anisotropic.inv.train, y=response.train, offset=offset.train, alpha=0, nlambda=nlambda, penalty.factor = penfac, family = "binomial", intercept=TRUE, standardize=FALSE)
mod.ridge.linear <- glmnet(x=X.anisotropic.linear.train, y=response.train, offset=offset.train, alpha=0, nlambda=nlambda, penalty.factor = penfac, family = "binomial", intercept=TRUE, standardize=FALSE)
#### Compute the level of residual spatial autocorrelation for each model and lambda value
## Remove 0s and 1s from proportions for computing residual Moran's I
prop <- Y.train / trials.train
prop[prop==0] <- 0.01
prop[prop==1] <- 0.99
## Exponential model
fits.lp.exp <- predict(object=mod.ridge.exp, newx=X.anisotropic.exp.train, newoffset=offset.train)
m <- ncol(fits.lp.exp)
results.exp <- data.frame(lambda=mod.ridge.exp$lambda, I=rep(NA, m))
for(j in 1:m)
{
resids <- log(prop / (1-prop)) - fits.lp.exp[ ,j]
results.exp$I[j] <- moran.mc(x=resids, listw=W.list.train, zero.policy = TRUE, nsim=1)$statistic
}
row.exp <- which(abs(results.exp$I)==min(abs(results.exp$I)))
moran.exp <- results.exp$I[row.exp]
## Inverse model
fits.lp.inv <- predict(object=mod.ridge.inv, newx=X.anisotropic.inv.train, newoffset=offset.train)
m <- ncol(fits.lp.inv)
results.inv <- data.frame(lambda=mod.ridge.inv$lambda, I=rep(NA, m))
for(j in 1:m)
{
resids <- log(prop / (1-prop)) - fits.lp.inv[ ,j]
results.inv$I[j] <- moran.mc(x=resids, listw=W.list.train, zero.policy = TRUE, nsim=1)$statistic
}
row.inv <- which(abs(results.inv$I)==min(abs(results.inv$I)))
moran.inv <- results.inv$I[row.inv]
## Linear model
fits.lp.linear <- predict(object=mod.ridge.linear, newx=X.anisotropic.linear.train, newoffset=offset.train)
m <- ncol(fits.lp.linear)
results.linear <- data.frame(lambda=mod.ridge.linear$lambda, I=rep(NA, m))
for(j in 1:m)
{
resids <- log(prop / (1-prop)) - fits.lp.linear[ ,j]
results.linear$I[j] <- moran.mc(x=resids, listw=W.list.train, zero.policy = TRUE, nsim=1)$statistic
}
row.linear <- which(abs(results.linear$I)==min(abs(results.linear$I)))
moran.linear <- results.linear$I[row.linear]
#### Choose the final model
moran.all <- abs(c(moran.exp, moran.inv, moran.linear))
model <- which(moran.all == min(moran.all))[1]
if(model==1)
{
model.string <- c("Likelihood model - Binomial (logit link function)", "Spatial structure model - Anistropic exponential distance-decay basis functions")
model <- mod.ridge.exp
row <- row.exp
X.final <- X.anisotropic.exp
X.final.train <- X.final[which.present, ]
lambda.hat <- results.exp$lambda[row] * K.train
I <- results.exp$I[row]
}else if(model==2)
{
model.string <- c("Likelihood model - Binomial (logit link function)", "Spatial structure model - Anistropic inverse distance-decay basis functions")
model <- mod.ridge.inv
row <- row.inv
X.final <- X.anisotropic.inv
X.final.train <- X.final[which.present, ]
lambda.hat <- results.inv$lambda[row] * K.train
I <- results.inv$I[row]
}else if(model==3)
{
model.string <- c("Likelihood model - Binomial (logit link function)", "Spatial structure model - Anistropic linear distance-decay basis functions")
model <- mod.ridge.linear
row <- row.linear
X.final <- X.anisotropic.linear
X.final.train <- X.final[which.present, ]
lambda.hat <- results.linear$lambda[row] * K.train
I <- results.linear$I[row]
}else{}
#### Compute the parameter estimate for beta
beta.hat <- c(model$a0[row], model$beta[ ,row])
#####################################
#### Summarise and return the results
#####################################
#### Update the user on the progress
if(verbose) cat("\nSummarising results.\n")
#### Compute the final fitted / predicted values and residuals
lp.all <- beta.hat[1] + X.final %*% beta.hat[-1] + offset
theta.all <- as.numeric(exp(lp.all) / (1 + exp(lp.all)))
fitted.values <- trials * theta.all
response.residuals <- Y - fitted.values
pearson.residuals <- response.residuals / sqrt(fitted.values * (1 - theta.all))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
#### Format the final X matrix returned
X.extend <- cbind(rep(1, K), X.final)
colnames(X.extend)[1] <- "(Intercept)"
colnames(X.extend)[(p+2):(p+K+1)] <- paste("Basis function", 1:K, sep=" ")
#######################
#### Return the results
#######################
results <- list(beta.hat=beta.hat, sigma2.hat=NA, lambda.hat=lambda.hat, I=I, fitted.values=fitted.values, residuals=residuals, formula=formula, model.string=model.string, X=X.extend, model=model)
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/binomial.RAB.R
|
binomial.bymCAR <- function(formula, data=NULL, trials, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, prior.sigma2=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "binomial")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Check and format the trials argument
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- K-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
failures <- trials - Y
if(sum(Y>trials, na.rm=TRUE)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
if(is.null(prior.sigma2)) prior.sigma2 <- c(1, 0.01)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
common.prior.var.check(prior.tau2)
common.prior.var.check(prior.sigma2)
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- binomial.bymCARMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.sigma2=prior.sigma2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- binomial.bymCARMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.sigma2=prior.sigma2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=binomial.bymCARMCMC, Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, prior.sigma2=prior.sigma2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * results$accept[1] / results$accept[2]
accept.phi <- 100 * results$accept[3] / results$accept[4]
accept.theta <- 100 * results$accept[5] / results$accept[6]
accept.tau2 <- 100
accept.sigma2 <- 100
accept.final <- c(accept.beta, accept.phi, accept.theta, accept.tau2, accept.sigma2)
names(accept.final) <- c("beta", "phi", "theta", "tau2", "sigma2")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
mean.psi <- apply(results$samples.psi, 2, mean)
mean.logit <- as.numeric(X.standardised %*% mean.beta) + mean.psi + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), psi=mcmc(results$samples.psi), tau2=mcmc(results$samples.tau2), sigma2=mcmc(results$samples.sigma2), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$tau2), quantile(samples$tau2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.hyper[2, 1:3] <- c(mean(samples$sigma2), quantile(samples$sigma2, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.sigma2, effectiveSize(samples$sigma2), geweke.diag(samples$sigma2)$z)
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "sigma2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.phi <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.theta <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
accept.tau2 <- 100
accept.sigma2 <- 100
accept.final <- c(accept.beta, accept.phi, accept.theta, accept.tau2, accept.sigma2)
names(accept.final) <- c("beta", "phi", "theta", "tau2", "sigma2")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.psi.list <- lapply(results, function(l) l[["samples.psi"]])
samples.sigma2.list <- lapply(results, function(l) l[["samples.sigma2"]])
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.psi.matrix <- do.call(what=rbind, args=samples.psi.list)
samples.sigma2.matrix <- do.call(what=rbind, args=samples.sigma2.list)
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
mean.psi <- apply(samples.psi.matrix, 2, mean)
mean.logit <- as.numeric(X.standardised %*% mean.beta) + mean.psi + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
psi.temp <- samples.psi.list
sigma2.temp <- samples.sigma2.list
tau2.temp <- samples.tau2.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
psi.temp[[j]] <- mcmc(samples.psi.list[[j]])
sigma2.temp[[j]] <- mcmc(samples.sigma2.list[[j]])
tau2.temp[[j]] <- mcmc(samples.tau2.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
psi.mcmc <- as.mcmc.list(psi.temp)
sigma2.mcmc <- as.mcmc.list(sigma2.temp)
tau2.mcmc <- as.mcmc.list(tau2.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, psi=psi.mcmc, tau2=tau2.mcmc, sigma2=sigma2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples.tau2.matrix), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.hyper[2, 1:3] <- c(mean(samples.sigma2.matrix), quantile(samples.sigma2.matrix, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.sigma2, effectiveSize(samples.sigma2.matrix), gelman.diag(sigma2.mcmc)$psrf[ ,2])
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "sigma2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Binomial (logit link function)", "\nRandom effects model - BYM CAR\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/binomial.bymCAR.R
|
binomial.bymCARMCMC <- function(Y, failures, trials, offset, X.standardised, W, K, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, prior.sigma2, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
#
#
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
dat <- cbind(Y, failures)
mod.glm <- glm(dat~X.standardised-1, offset=offset, family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
theta.hat <- Y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat / (1 - theta.hat)) - X.standardised %*% beta.mean - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
tau2 <- var(phi) / 10
theta <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
sigma2 <- var(theta) / 10
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
lp <- as.numeric(X.standardised %*% beta) + phi + theta + offset
prob <- exp(lp) / (1 + exp(lp))
fitted <- trials * prob
Y.DA <- Y
failures.DA <- trials - Y.DA
########################################
#### Set up the MCMC model run quantities
#########################################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.re <- array(NA, c(n.keep, K))
samples.tau2 <- array(NA, c(n.keep, 1))
samples.sigma2 <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
## Metropolis quantities
accept <- rep(0,6)
proposal.sd.beta <- 0.01
proposal.sd.phi <- 0.1
proposal.sd.theta <- 0.1
sigma2.posterior.shape <- prior.sigma2[1] + 0.5 * K
tau2.posterior.shape <- prior.tau2[1] + 0.5 * K
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Check for islands
W.list<- mat2listw(W, style = "B")
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
tau2.posterior.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rbinom(n=n.miss, size=trials[which.miss==0], prob=prob[which.miss==0])
failures.DA <- trials - Y.DA
}else
{}
####################
## Sample from beta
####################
offset.temp <- phi + offset + theta
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, K, p, beta, offset.temp, Y.DA, failures.DA, trials, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, K, p, beta, offset.temp, Y.DA, failures.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
####################
## Sample from phi
####################
beta.offset <- X.standardised %*% beta + theta + offset
temp1 <- binomialcarupdateRW(Wtriplet=W.triplet, Wbegfin=W.begfin, Wtripletsum=W.triplet.sum, nsites=K, phi=phi, tau2=tau2, y=Y.DA, failures=failures.DA, phi_tune=proposal.sd.phi, rho=1, offset=beta.offset)
phi <- temp1[[1]]
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + K
####################
## Sample from theta
####################
beta.offset <- as.numeric(X.standardised %*% beta) + phi + offset
temp2 <- binomialindepupdateRW(nsites=K, theta=theta, sigma2=sigma2, y=Y.DA, failures=failures.DA, theta_tune=proposal.sd.theta, offset=beta.offset)
theta <- temp2[[1]]
theta <- theta - mean(theta)
accept[5] <- accept[5] + temp2[[2]]
accept[6] <- accept[6] + K
###################
## Sample from tau2
###################
temp2 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, 1)
tau2.posterior.scale <- temp2 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
#####################
## Sample from sigma2
#####################
sigma2.posterior.scale <- prior.sigma2[2] + 0.5*sum(theta^2)
sigma2 <- 1 / rgamma(1, sigma2.posterior.shape, scale=(1/sigma2.posterior.scale))
#########################
## Calculate the deviance
#########################
lp <- as.numeric(X.standardised %*% beta) + phi + theta + offset
prob <- exp(lp) / (1 + exp(lp))
fitted <- trials * prob
loglike <- dbinom(x=Y, size=trials, prob=prob, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.re[ele, ] <- phi + theta
samples.tau2[ele, ] <- tau2
samples.sigma2[ele, ] <- sigma2
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{
}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
proposal.sd.theta <- common.accceptrates1(accept[5:6], proposal.sd.theta, 40, 50)
accept <- c(0,0,0,0,0,0)
}else
{
}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
#### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
chain.results <- list(samples.beta=samples.beta, samples.psi=samples.re, samples.tau2=samples.tau2, samples.sigma2=samples.sigma2, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/binomial.bymCARMCMC.R
|
binomial.dissimilarityCAR <- function(formula, data=NULL, trials, W, Z, W.binary=TRUE, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "binomial")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Check and format the trials argument
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- K-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
failures <- trials - Y
if(sum(Y>trials, na.rm=TRUE)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
#### Dissimilarity metric matrix
if(!is.list(Z)) stop("Z is not a list object.", call.=FALSE)
if(sum(is.na(as.numeric(lapply(Z, sum, na.rm=FALSE))))>0) stop("Z contains missing 'NA' values.", call.=FALSE)
q <- length(Z)
if(sum(as.numeric(lapply(Z,nrow))==K) <q) stop("Z contains matrices of the wrong size.", call.=FALSE)
if(sum(as.numeric(lapply(Z,ncol))==K) <q) stop("Z contains matrices of the wrong size.", call.=FALSE)
if(min(as.numeric(lapply(Z,min)))<0) stop("Z contains negative values.", call.=FALSE)
if(!is.logical(W.binary)) stop("W.binary is not TRUE or FALSE.", call.=FALSE)
if(length(W.binary)!=1) stop("W.binary has the wrong length.", call.=FALSE)
if(W.binary)
{
alpha.max <- rep(NA,q)
alpha.threshold <- rep(NA,q)
for(k in 1:q)
{
Z.crit <- quantile(as.numeric(Z[[k]])[as.numeric(Z[[k]])!=0], 0.5)
alpha.max[k] <- -log(0.5) / Z.crit
alpha.threshold[k] <- -log(0.5) / max(Z[[k]])
}
}else
{
alpha.max <- rep(50, q)
}
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
common.prior.var.check(prior.tau2)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- binomial.dissimilarityCARMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, Z=Z, W.binary=W.binary, W=W, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, alpha.max=alpha.max, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- binomial.dissimilarityCARMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, Z=Z, W.binary=W.binary, W=W, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, alpha.max=alpha.max, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=binomial.dissimilarityCARMCMC, Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, Z=Z, W.binary=W.binary, W=W, K=K, p=p, q=q, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, alpha.max=alpha.max, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * results$accept[1] / results$accept[2]
accept.phi <- 100 * results$accept[3] / results$accept[4]
accept.tau2 <- 100
accept.alpha <- 100 * results$accept[5] / results$accept[6]
accept.final <- c(accept.beta, accept.phi, accept.tau2, accept.alpha)
names(accept.final) <- c("beta", "phi", "tau2", "alpha")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
mean.phi <- apply(results$samples.phi, 2, mean)
mean.logit <- as.numeric(X.standardised %*% mean.beta) + mean.phi + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), alpha=mcmc(results$samples.alpha), tau2=mcmc(results$samples.tau2), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.alpha <- t(rbind(apply(samples$alpha, 2, mean), apply(samples$alpha, 2, quantile, c(0.025, 0.975))))
summary.alpha <- cbind(summary.alpha, rep(n.keep, q), rep(accept.alpha,q), effectiveSize(samples$alpha), geweke.diag(samples$alpha)$z)
if(!is.null(names(Z)))
{
rownames(summary.alpha) <- names(Z)
}else
{
names.Z <- rep(NA,q)
for(j in 1:q)
{
names.Z[j] <- paste("Z[[",j, "]]", sep="")
}
rownames(summary.alpha) <- names.Z
}
summary.hyper <- array(NA, c(1 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$tau2), quantile(samples$tau2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
summary.results <- rbind(summary.beta, summary.hyper, summary.alpha)
if(W.binary)
{
alpha.min <- c(rep(NA, (p+1)), alpha.threshold)
summary.results <- cbind(summary.results, alpha.min)
}else
{}
rownames(summary.results)[(p+1)] <- c("tau2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
if(W.binary) summary.results[ , 8] <- round(summary.results[ , 8], 4)
#### Create the posterior medians for the neighbourhood matrix W
W.posterior <- array(NA, c(K,K))
if(W.binary)
{
W.border.prob <- array(NA, c(K,K))
}else
{
W.border.prob <- NA
}
for(i in 1:K)
{
for(j in 1:K)
{
if(W[i,j]==1)
{
z.temp <- NA
for(k in 1:q)
{
z.temp <- c(z.temp, Z[[k]][i,j])
}
z.temp <- z.temp[-1]
w.temp <- exp(-samples$alpha %*% z.temp)
if(W.binary)
{
w.posterior <- as.numeric(w.temp>=0.5)
W.posterior[i,j] <- ceiling(median(w.posterior))
W.border.prob[i,j] <- (1 - sum(w.posterior) / length(w.posterior))
}else
{
W.posterior[i,j] <- median(w.temp)
}
}else
{
}
}
}
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.phi <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.tau2 <- 100
accept.alpha <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
accept.final <- c(accept.beta, accept.phi, accept.tau2, accept.alpha)
names(accept.final) <- c("beta", "phi", "tau2", "alpha")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.alpha.list <- lapply(results, function(l) l[["samples.alpha"]])
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.alpha.matrix <- do.call(what=rbind, args=samples.alpha.list)
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
mean.phi <- apply(samples.phi.matrix, 2, mean)
mean.logit <- as.numeric(X.standardised %*% mean.beta) + mean.phi + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
alpha.temp <- samples.alpha.list
tau2.temp <- samples.tau2.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
alpha.temp[[j]] <- mcmc(samples.alpha.list[[j]])
tau2.temp[[j]] <- mcmc(samples.tau2.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
alpha.mcmc <- as.mcmc.list(alpha.temp)
tau2.mcmc <- as.mcmc.list(tau2.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, phi=phi.mcmc, alpha=alpha.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.alpha <- t(rbind(apply(samples.alpha.matrix, 2, mean), apply(samples.alpha.matrix, 2, quantile, c(0.025, 0.975))))
summary.alpha <- cbind(summary.alpha, rep(n.keep, q), rep(accept.alpha,q), effectiveSize(alpha.mcmc), gelman.diag(alpha.mcmc)$psrf[ ,2])
if(!is.null(names(Z)))
{
rownames(summary.alpha) <- names(Z)
}else
{
names.Z <- rep(NA,q)
for(j in 1:q)
{
names.Z[j] <- paste("Z[[",j, "]]", sep="")
}
rownames(summary.alpha) <- names.Z
}
summary.hyper <- array(NA, c(1 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
summary.results <- rbind(summary.beta, summary.hyper, summary.alpha)
if(W.binary)
{
alpha.min <- c(rep(NA, (p+1)), alpha.threshold)
summary.results <- cbind(summary.results, alpha.min)
}else
{}
rownames(summary.results)[(p+1)] <- c("tau2")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
if(W.binary) summary.results[ , 8] <- round(summary.results[ , 8], 4)
#### Create the posterior medians for the neighbourhood matrix W
W.posterior <- array(NA, c(K,K))
if(W.binary)
{
W.border.prob <- array(NA, c(K,K))
}else
{
W.border.prob <- NA
}
for(i in 1:K)
{
for(j in 1:K)
{
if(W[i,j]==1)
{
z.temp <- NA
for(k in 1:q)
{
z.temp <- c(z.temp, Z[[k]][i,j])
}
z.temp <- z.temp[-1]
w.temp <- exp(-samples.alpha.matrix %*% z.temp)
if(W.binary)
{
w.posterior <- as.numeric(w.temp>=0.5)
W.posterior[i,j] <- ceiling(median(w.posterior))
W.border.prob[i,j] <- (1 - sum(w.posterior) / length(w.posterior))
}else
{
W.posterior[i,j] <- median(w.temp)
}
}else
{
}
}
}
}
###################################
#### Compile and return the results
###################################
## Generate the dissimilarity equation
if(q==1)
{
dis.eq <- rownames(summary.results)[nrow(summary.results)]
}else
{
dis.eq <- paste(rownames(summary.alpha), "+")
len <- length(dis.eq)
dis.eq[len] <- substr(dis.eq[len],1,nchar(dis.eq[2])-1)
}
if(W.binary)
{
model.string <- c("Likelihood model - Binomial (logit link function)", "\nRandom effects model - Binary dissimilarity CAR", "\nDissimilarity metrics - ", dis.eq, "\n")
}else
{
model.string <- c("Likelihood model - Binomial (logit link function)", "\nRandom effects model - Non-binary dissimilarity CAR", "\nDissimilarity metrics - ", dis.eq, "\n")
}
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=list(W.posterior=W.posterior, W.border.prob=W.border.prob), formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/binomial.dissimilarityCAR.R
|
binomial.dissimilarityCARMCMC <- function(Y, failures, trials, offset, X.standardised, Z, W.binary, W, K, p, q, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, alpha.max, verbose, chain)
{
# Rcpp::sourceCpp("src/CARBayes.cpp")
# source("R/common.functions.R")
# library(spdep)
# library(truncnorm)
# library(spam)
#
#
##########################################
#### Generate the initial parameter values
##########################################
#### Generate initial values for each chain
dat <- cbind(Y, failures)
mod.glm <- glm(dat~X.standardised-1, offset=offset, family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
theta.hat <- Y / trials
theta.hat[theta.hat==0] <- 0.01
theta.hat[theta.hat==1] <- 0.99
res.temp <- log(theta.hat / (1 - theta.hat)) - X.standardised %*% beta.mean - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=rep(0,K), sd=res.sd)
tau2 <- var(phi) / 10
alpha <- runif(n=q, min=rep(0,q), max=rep(alpha.max/(2+q)))
lp <- as.numeric(X.standardised %*% beta) + phi + offset
prob <- exp(lp) / (1 + exp(lp))
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
lp <- as.numeric(X.standardised %*% beta) + phi + offset
prob <- exp(lp) / (1 + exp(lp))
fitted <- trials * prob
Y.DA <- Y
failures.DA <- trials - Y.DA
########################################
#### Set up the MCMC model run quantities
#########################################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.phi <- array(NA, c(n.keep, K))
samples.tau2 <- array(NA, c(n.keep, 1))
samples.alpha <- array(NA, c(n.keep, q))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
## Metropolis quantities
accept <- rep(0,6)
proposal.sd.alpha <- 0.02 * alpha.max
proposal.sd.beta <- 0.01
proposal.sd.phi <- 0.1
tau2.posterior.shape <- prior.tau2[1] + 0.5 * K
##################################
#### Set up the spatial quantities
##################################
#### CAR quantities
W.quants <- common.Wcheckformat.disimilarity(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
spam.W <- W.quants$spam.W
#### Create the Z triplet form
Z.triplet <- array(NA, c(n.triplet, q))
for(i in 1:n.triplet)
{
row <- W.triplet[i,1]
col <- W.triplet[i,2]
for(j in 1:q)
{
Z.triplet[i,j] <- Z[[j]][row, col]
}
}
if(W.binary)
{
W.triplet[ ,3] <- as.numeric(exp(-Z.triplet %*% alpha)>=0.5)
}else
{
W.triplet[ ,3] <- as.numeric(exp(-Z.triplet %*% alpha))
}
W.triplet.sum <- tapply(W.triplet[ ,3], W.triplet[ ,1], sum)
spam.W@entries <- W.triplet[ ,3]
spam.Wprop <- spam.W
W.tripletprop <- W.triplet
#### Create the matrix form of Q
rho <- 0.99
Q <- -rho * spam.W
diag(Q) <- rho * rowSums(spam.W) + 1-rho
det.Q <- sum(log(diag(chol.spam(Q))))
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rbinom(n=n.miss, size=trials[which.miss==0], prob=prob[which.miss==0])
failures.DA <- trials - Y.DA
}else
{}
####################
## Sample from beta
####################
offset.temp <- phi + offset
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, K, p, beta, offset.temp, Y.DA, failures.DA, trials, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, K, p, beta, offset.temp, Y.DA, failures.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
####################
## Sample from phi
####################
beta.offset <- X.standardised %*% beta + offset
temp1 <- binomialcarupdateRW(Wtriplet=W.triplet, Wbegfin=W.begfin, Wtripletsum=W.triplet.sum, nsites=K, phi=phi, tau2=tau2, y=Y.DA, failures=failures.DA, phi_tune=proposal.sd.phi, rho=rho, offset=beta.offset)
phi <- temp1[[1]]
phi <- phi - mean(phi)
accept[3] <- accept[3] + temp1[[2]]
accept[4] <- accept[4] + K
##################
## Sample from tau2
##################
temp2 <- quadform(W.triplet, W.triplet.sum, n.triplet, K, phi, phi, rho)
tau2.posterior.scale <- temp2 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
######################
#### Sample from alpha
######################
## Propose a value
proposal.alpha <- alpha
for(r in 1:q)
{
proposal.alpha[r] <- rtruncnorm(n=1, a=0, b=alpha.max[r], mean=alpha[r], sd=proposal.sd.alpha[r])
}
## Create the proposal values for W and Q
if(W.binary)
{
W.tripletprop[ ,3] <- as.numeric(exp(-Z.triplet %*% proposal.alpha)>=0.5)
}else
{
W.tripletprop[ ,3] <- as.numeric(exp(-Z.triplet %*% proposal.alpha))
}
W.triplet.sum.prop <- tapply(W.tripletprop[ ,3], W.tripletprop[ ,1], sum)
spam.Wprop@entries <- W.tripletprop[ ,3]
Qprop <- -rho * spam.Wprop
diag(Qprop) <- rho * rowSums(spam.Wprop) + 1-rho
det.Qprop <- sum(log(diag(chol.spam(Qprop))))
temp3 <- quadform(W.tripletprop, W.triplet.sum.prop, n.triplet, K, phi, phi, rho)
#### Calculate the acceptance probability
logprob.current <- det.Q - temp2 / tau2
logprob.proposal <- det.Qprop - temp3 / tau2
hastings <- sum(log(dtruncnorm(x=alpha, a=rep(0,q), b=alpha.max, mean=proposal.alpha, sd=proposal.sd.alpha)) - log(dtruncnorm(x=proposal.alpha, a=rep(0,q), b=alpha.max, mean=alpha, sd=proposal.sd.alpha)))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposed value
if(prob > runif(1))
{
alpha <- proposal.alpha
det.Q <- det.Qprop
W.triplet[ ,3] <- W.tripletprop[ ,3]
W.triplet.sum <- W.triplet.sum.prop
accept[5] <- accept[5] + 1
}else
{}
accept[6] <- accept[6] + 1
#########################
## Calculate the deviance
#########################
lp <- as.numeric(X.standardised %*% beta) + phi + offset
prob <- exp(lp) / (1 + exp(lp))
fitted <- trials * prob
loglike <- dbinom(x=Y, size=trials, prob=prob, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.tau2[ele, ] <- tau2
samples.alpha[ele, ] <- alpha
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50)
proposal.sd.alpha <- common.accceptrates2(accept[5:6], proposal.sd.alpha, 40, 50, alpha.max/4)
accept <- c(0,0,0,0,0,0)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.tau2=samples.tau2, samples.alpha=samples.alpha, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/binomial.dissimilarityCARMCMC.R
|
binomial.glm <- function(formula, data=NULL, trials, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "binomial")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Check and format the trials argument
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- K-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
failures <- trials - Y
if(sum(Y>trials, na.rm=TRUE)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- binomial.glmMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- binomial.glmMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=binomial.glmMCMC, Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * results$accept[1] / results$accept[2]
accept.final <- c(accept.beta)
names(accept.final) <- c("beta")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
mean.logit <- as.numeric(X.standardised %*% mean.beta) + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.results <- summary.beta
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.final <- c(accept.beta)
names(accept.final) <- c("beta")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
mean.logit <- as.numeric(X.standardised %*% mean.beta) + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.results <- summary.beta
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Binomial (logit link function)", "\nRandom effects model - None\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/binomial.glm.R
|
binomial.glmMCMC <- function(Y, failures, trials, offset, X.standardised, K, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, verbose, chain)
{
#library(Rcpp)
#Rcpp::sourceCpp("src/CARBayes.cpp")
#source("R/common.functions.R")
##########################################
#### Generate the initial parameter values
##########################################
dat <- cbind(Y, failures)
mod.glm <- glm(dat~X.standardised-1, offset=offset, family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
###################################################################
#### Compute the fitted values based on the current parameter values
####################################################################
lp <- as.numeric(X.standardised %*% beta) + offset
prob <- exp(lp) / (1 + exp(lp))
Y.DA <- Y
failures.DA <- trials - Y.DA
###############################
#### Set up the MCMC quantities
###############################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.loglike <- array(NA, c(n.keep, K))
samples.fitted <- array(NA, c(n.keep, K))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept <- rep(0,2)
proposal.sd.beta <- 0.01
#### Start timer
if(verbose)
{
cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
######################
#### Run an MCMC chain
######################
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rbinom(n=n.miss, size=trials[which.miss==0], prob=prob[which.miss==0])
failures.DA <- trials - Y.DA
}else
{}
####################
## Sample from beta
####################
offset.temp <- offset
if(MALA)
{
temp <- binomialbetaupdateMALA(X.standardised, K, p, beta, offset.temp, Y.DA, failures.DA, trials, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- binomialbetaupdateRW(X.standardised, K, p, beta, offset.temp, Y.DA, failures.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}
beta <- temp[[1]]
accept[1] <- accept[1] + temp[[2]]
accept[2] <- accept[2] + n.beta.block
#########################
## Calculate the deviance
#########################
lp <- as.numeric(X.standardised %*% beta) + offset
prob <- exp(lp) / (1 + exp(lp))
fitted <- trials * prob
loglike <- dbinom(x=Y, size=trials, prob=prob, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- fitted
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40)
}
accept <- rep(0,2)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
#### Close the progress bar if used
if(verbose)
{
close(progressBar)
}else
{}
############################################
#### Return the results to the main function
############################################
#### Compile the results
if(n.miss==0) samples.Y = NA
chain.results <- list(samples.beta=samples.beta, samples.loglike=samples.loglike, samples.fitted=samples.fitted,
samples.Y=samples.Y, accept=accept)
#### Return the results
return(chain.results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/binomial.glmMCMC.R
|
binomial.lerouxCAR <- function(formula, data=NULL, trials, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, rho=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "binomial")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Check and format the trials argument
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- K-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
failures <- trials - Y
if(sum(Y>trials, na.rm=TRUE)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
#### rho
if(is.null(rho))
{
rho <- runif(1)
fix.rho <- FALSE
}else
{
fix.rho <- TRUE
}
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
common.prior.var.check(prior.tau2)
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
########################
#### Run the MCMC chains
########################
if(n.chains==1)
{
#### Only 1 chain
results <- binomial.lerouxCARMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=1)
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1)
{
#### Multiple chains in series
results <- as.list(rep(NA, n.chains))
for(i in 1:n.chains)
{
results[[i]] <- binomial.lerouxCARMCMC(Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=i)
}
}else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores))
{
#### Multiple chains in parallel
results <- as.list(rep(NA, n.chains))
if(verbose)
{
compclust <- makeCluster(n.cores, outfile="CARBayesprogress.txt")
cat("The current progress of the model fitting algorithm has been output to CARBayesprogress.txt in the working directory")
}else
{
compclust <- makeCluster(n.cores)
}
results <- clusterCall(compclust, fun=binomial.lerouxCARMCMC, Y=Y, failures=failures, trials=trials, offset=offset, X.standardised=X.standardised, W=W, rho=rho, fix.rho=fix.rho, K=K, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain="all")
stopCluster(compclust)
}else
{
stop("n.chains or n.cores are not positive integers.", call.=FALSE)
}
#### end timer
if(verbose)
{
cat("\nSummarising results.\n")
}else
{}
###################################
#### Summarise and save the results
###################################
if(n.chains==1)
{
## Compute the acceptance rates
accept.beta <- 100 * results$accept[1] / results$accept[2]
accept.phi <- 100 * results$accept[3] / results$accept[4]
accept.tau2 <- 100
if(!fix.rho)
{
accept.rho <- 100 * results$accept[5] / results$accept[6]
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.tau2)
names(accept.final) <- c("beta", "phi", "rho", "tau2")
## Compute the model fit criterion
mean.beta <- apply(results$samples.beta, 2, mean)
mean.phi <- apply(results$samples.phi, 2, mean)
mean.logit <- as.numeric(X.standardised %*% mean.beta) + mean.phi + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(results$samples.loglike, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(results$samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Create MCMC objects and back transform the regression parameters
samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), rho=mcmc(results$samples.rho), tau2=mcmc(results$samples.tau2), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y))
#### Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples$tau2), quantile(samples$tau2, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z)
if(!fix.rho)
{
summary.hyper[2, 1:3] <- c(mean(samples$rho), quantile(samples$rho, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.rho, effectiveSize(samples$rho), geweke.diag(samples$rho)$z)
}else
{
summary.hyper[2, 1:3] <- c(rho, rho, rho)
summary.hyper[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}else
{
## Compute the acceptance rates
accept.temp <- lapply(results, function(l) l[["accept"]])
accept.temp2 <- do.call(what=rbind, args=accept.temp)
accept.beta <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2])
accept.phi <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4])
accept.tau2 <- 100
if(!fix.rho)
{
accept.rho <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6])
}else
{
accept.rho <- NA
}
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.tau2)
names(accept.final) <- c("beta", "phi", "rho", "tau2")
## Extract the samples into separate lists
samples.beta.list <- lapply(results, function(l) l[["samples.beta"]])
samples.phi.list <- lapply(results, function(l) l[["samples.phi"]])
samples.rho.list <- lapply(results, function(l) l[["samples.rho"]])
samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]])
samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]])
samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]])
samples.Y.list <- lapply(results, function(l) l[["samples.Y"]])
## Convert the samples into separate matrix objects
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list)
samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list)
samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list)
samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list)
samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list)
## Compute the model fit criteria
mean.beta <- apply(samples.beta.matrix, 2, mean)
mean.phi <- apply(samples.phi.matrix, 2, mean)
mean.logit <- as.numeric(X.standardised %*% mean.beta) + mean.phi + offset
mean.prob <- exp(mean.logit) / (1 + exp(mean.logit))
fitted.mean <- trials * mean.prob
deviance.fitted <- -2 * sum(dbinom(x=Y, size=trials, prob=mean.prob, log=TRUE), na.rm=TRUE)
modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted)
## Create the Fitted values and residuals
fitted.values <- apply(samples.fitted.matrix, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values * (1 - mean.prob))
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
## Backtransform the regression parameters
samples.beta.list <- samples.beta.list
for(j in 1:n.chains)
{
samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE)
}
samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list)
## Create MCMC objects
beta.temp <- samples.beta.list
phi.temp <- samples.phi.list
rho.temp <- samples.rho.list
tau2.temp <- samples.tau2.list
loglike.temp <- samples.loglike.list
fitted.temp <- samples.fitted.list
Y.temp <- samples.Y.list
for(j in 1:n.chains)
{
beta.temp[[j]] <- mcmc(samples.beta.list[[j]])
phi.temp[[j]] <- mcmc(samples.phi.list[[j]])
rho.temp[[j]] <- mcmc(samples.rho.list[[j]])
tau2.temp[[j]] <- mcmc(samples.tau2.list[[j]])
loglike.temp[[j]] <- mcmc(samples.loglike.list[[j]])
fitted.temp[[j]] <- mcmc(samples.fitted.list[[j]])
Y.temp[[j]] <- mcmc(samples.Y.list[[j]])
}
beta.mcmc <- as.mcmc.list(beta.temp)
phi.mcmc <- as.mcmc.list(phi.temp)
rho.mcmc <- as.mcmc.list(rho.temp)
tau2.mcmc <- as.mcmc.list(tau2.temp)
fitted.mcmc <- as.mcmc.list(fitted.temp)
Y.mcmc <- as.mcmc.list(Y.temp)
samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rho.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc)
## Create a summary object
n.keep <- floor((n.sample - burnin)/thin)
summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975))))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2])
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)")
summary.hyper <- array(NA, c(2 ,7))
summary.hyper[1, 1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)))
summary.hyper[1, 4:7] <- c(n.keep, accept.tau2, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2])
if(!fix.rho)
{
summary.hyper[2, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975)))
summary.hyper[2, 4:7] <- c(n.keep, accept.rho, effectiveSize(rho.mcmc), gelman.diag(rho.mcmc)$psrf[ ,2])
}else
{
summary.hyper[2, 1:3] <- c(rho, rho, rho)
summary.hyper[2, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[(nrow(summary.results)-1):nrow(summary.results)] <- c("tau2", "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
}
###################################
#### Compile and return the results
###################################
model.string <- c("Likelihood model - Binomial (logit link function)", "\nRandom effects model - Leroux CAR\n")
n.total <- floor((n.sample - burnin) / thin) * n.chains
mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains)
names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X)
class(results) <- "CARBayes"
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CARBayes/R/binomial.lerouxCAR.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.