content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Required Sample Size
#'
#' Function to determine required sample size to be within a given margin of
#' error.
#'
#' Answer is based on a normal approximation when using type \code{"pi"}.
#'
#' @param b the desired bound.
#' @param sigma population standard deviation. Not required if using type
#' \code{"pi"}.
#' @param p estimate for the population proportion of successes. Not required
#' if using type \code{"mu"}.
#' @param conf.level confidence level for the problem, restricted to lie
#' between zero and one.
#' @param type character string, one of \code{"mu"} or \code{"pi"}, or just the
#' initial letter of each, indicating the appropriate parameter. Default value
#' is \code{"mu"}.
#' @return Returns required sample size.
#' @author Alan T. Arnholt
#' @keywords univar
#' @examples
#'
#' nsize(b=.03, p=708/1200, conf.level=.90, type="pi")
#' # Returns the required sample size (n) to estimate the population
#' # proportion of successes with a 0.9 confidence interval
#' # so that the margin of error is no more than 0.03 when the
#' # estimate of the population propotion of successes is 708/1200.
#' # This is problem 5.38 on page 257 of Kitchen's BSDA.
#'
#' nsize(b=.15, sigma=.31, conf.level=.90, type="mu")
#' # Returns the required sample size (n) to estimate the population
#' # mean with a 0.9 confidence interval so that the margin
#' # of error is no more than 0.15. This is Example 5.17 on page
#' # 261 of Kitchen's BSDA.
#'
#' @export nsize
nsize <-
function(b, sigma = NULL, p = 0.5, conf.level = 0.95, type = "mu")
{
choices <- c("mu", "pi")
alt <- pmatch(type, choices)
type <- choices[alt]
if(length(type) > 1 || is.na(type))
stop("type must be one \"mu\", \"pi\"")
if(type == "pi" && b > 1)
stop("b must be less than 1")
if(!missing(b))
if(length(b) != 1 || is.na(b))
stop("b must be a single number")
if(type == "mu") {
z <- qnorm(1 - (1 - conf.level)/2)
n <- ((z * sigma)/b)^2
n <- ceiling(n)
cat("\n")
cat("The required sample size (n) to estimate the population",
"\n")
cat("mean with a", conf.level,
"confidence interval so that the margin", "\n")
cat("of error is no more than", b, "is", n, ".", "\n")
cat("\n\n")
}
else if(type == "pi") {
z <- qnorm(1 - (1 - conf.level)/2)
n <- p * (1 - p) * (z/b)^2
n <- ceiling(n)
cat("\n")
cat("The required sample size (n) to estimate the population",
"\n")
cat("proportion of successes with a", conf.level,
"confidence interval", "\n")
cat("so that the margin of error is no more than", b, "is",
n, ".", "\n")
cat("\n\n")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BSDA/R/nsize.R
|
#' Normality Tester
#'
#' Q-Q plots of randomly generated normal data of the same size as the tested
#' data are generated and ploted on the perimeter of the graph while a Q-Q plot
#' of the actual data is depicted in the center of the graph.
#'
#' Q-Q plots of randomly generated normal data of the same size as the tested
#' data are generated and ploted on the perimeter of the graph sheet while a
#' Q-Q plot of the actual data is depicted in the center of the graph. The
#' p-values are calculated form the Shapiro-Wilk W-statistic. Function will
#' only work on numeric vectors containing less than or equal to 5000
#' observations.
#'
#' @param actual.data a numeric vector. Missing and infinite values are
#' allowed, but are ignored in the calculation. The length of
#' \code{actual.data} must be less than 5000 after dropping nonfinite values.
#' @author Alan T. Arnholt
#' @references Shapiro, S.S. and Wilk, M.B. (1965). An analysis of variance
#' test for normality (complete samples). Biometrika \bold{52} : 591-611.
#' @keywords distribution
#' @examples
#'
#' ntester(rexp(50,1))
#' # Q-Q plot of random exponential data in center plot
#' # surrounded by 8 Q-Q plots of randomly generated
#' # standard normal data of size 50.
#'
#' @export ntester
ntester <-
function(actual.data)
{
Ared <- "#C00000"
Ablue <- "#0080FF"
par(mfrow = c(3, 3))
par(oma = c(1, 0, 2, 0))
par(mar = c(0, 0, 2, 0))
par(pty = "s")
for(i in 1:4) {
SimData <- rnorm(length(actual.data))
s <- shapiro.test(SimData)
qqnorm(SimData, xlab = "", ylab = "", axes = FALSE, col = Ablue, main=paste("SimNorm p-val = ", round(s$p.value, 3)),col.main=Ablue)
box()
qqline(SimData, col = Ared)
}
sx <- shapiro.test(actual.data)
qqnorm(actual.data, xlab = "", ylab = "", axes =FALSE, col = Ared, main = paste("YourData p-val = ", round(sx$p.value, 3)),col.main=Ared)
box()
qqline(actual.data, col = Ablue)
for(i in 6:9) {
SimData <- rnorm(length(actual.data))
s <- shapiro.test(SimData)
qqnorm(SimData, xlab = "", ylab = "", axes = FALSE, col = Ablue, main= paste("SimNorm p-val = ", round(s$p.value, 3)),col.main=Ablue)
box()
qqline(SimData, col = Ared)
}
mtext("Simulated Normal Data on Perimeter - Actual Data in Center", side = 3, outer = TRUE, cex = 1.5, col = Ared)
par(oma = c(0, 0, 0, 0))
par(mfrow = c(1, 1))
par(mar = c(5.1, 4.1, 4.1, 2.1))
par(pty = "m")
}
|
/scratch/gouwar.j/cran-all/cranData/BSDA/R/ntester.R
|
#' Summarized t-test
#'
#' Performs a one-sample, two-sample, or a Welch modified two-sample t-test
#' based on user supplied summary information. Output is identical to that
#' produced with \code{t.test}.
#'
#' If \code{y} is \code{NULL}, a one-sample t-test is carried out with
#' \code{x}. If y is not \code{NULL}, either a standard or Welch modified
#' two-sample t-test is performed, depending on whether \code{var.equal} is
#' \code{TRUE} or \code{FALSE}.
#'
#' @param mean.x a single number representing the sample mean of \code{x}
#' @param s.x a single number representing the sample standard deviation for
#' \code{x}
#' @param n.x a single number representing the sample size for \code{x}
#' @param mean.y a single number representing the sample mean of \code{y}
#' @param s.y a single number representing the sample standard deviation for
#' \code{y}
#' @param n.y a single number representing the sample size for \code{y}
#' @param alternative is a character string, one of \code{"greater"},
#' \code{"less"} or \code{"two.sided"}, or just the initial letter of each,
#' indicating the specification of the alternative hypothesis. For one-sample
#' tests, \code{alternative} refers to the true mean of the parent population
#' in relation to the hypothesized value \code{mu}. For the standard
#' two-sample tests, \code{alternative} refers to the difference between the
#' true population mean for \code{x} and that for \code{y}, in relation to
#' \code{mu}. For the one-sample and paired t-tests, \code{alternative} refers
#' to the true mean of the parent population in relation to the hypothesized
#' value \code{mu}. For the standard and Welch modified two-sample t-tests,
#' \code{alternative} refers to the difference between the true population mean
#' for \code{x} and that for \code{y}, in relation to \code{mu}. For the
#' one-sample t-tests, alternative refers to the true mean of the parent
#' population in relation to the hypothesized value \code{mu}. For the standard
#' and Welch modified two-sample t-tests, alternative refers to the difference
#' between the true population mean for \code{x} and that for \code{y}, in
#' relation to \code{mu}.
#' @param mu is a single number representing the value of the mean or
#' difference in means specified by the null hypothesis.
#' @param var.equal logical flag: if \code{TRUE}, the variances of the parent
#' populations of \code{x} and \code{y} are assumed equal. Argument
#' \code{var.equal} should be supplied only for the two-sample tests.
#' @param conf.level is the confidence level for the returned confidence
#' interval; it must lie between zero and one.
#' @return A list of class \code{htest}, containing the following components:
#' \item{statistic}{the t-statistic, with names attribute \code{"t"}}
#' \item{parameters}{is the degrees of freedom of the t-distribution associated
#' with statistic. Component \code{parameters} has names attribute
#' \code{"df"}.} \item{p.value}{the p-value for the test.} \item{conf.int }{is
#' a confidence interval (vector of length 2) for the true mean or difference
#' in means. The confidence level is recorded in the attribute
#' \code{conf.level}. When alternative is not \code{"two.sided"}, the
#' confidence interval will be half-infinite, to reflect the interpretation of
#' a confidence interval as the set of all values \code{k} for which one would
#' not reject the null hypothesis that the true mean or difference in means is
#' \code{k} . Here infinity will be represented by \code{Inf}.}
#' \item{estimate}{vector of length 1 or 2, giving the sample mean(s) or mean
#' of differences; these estimate the corresponding population parameters.
#' Component \code{estimate} has a names attribute describing its elements.}
#' \item{null.value}{the value of the mean or difference in means specified by
#' the null hypothesis. This equals the input argument \code{mu}. Component
#' \code{null.value} has a names attribute describing its elements.}
#' \item{alternative}{records the value of the input argument alternative:
#' \code{"greater"} , \code{"less"} or \code{"two.sided"}.} \item{data.name}{a
#' character string (vector of length 1) containing the names x and y for the
#' two summarized samples.}
#' @section Null Hypothesis: For the one-sample t-test, the null hypothesis is
#' that the mean of the population from which \code{x} is drawn is \code{mu}.
#' For the standard and Welch modified two-sample t-tests, the null hypothesis
#' is that the population mean for \code{x} less that for \code{y} is
#' \code{mu}.
#'
#' The alternative hypothesis in each case indicates the direction of
#' divergence of the population mean for \code{x} (or difference of means for
#' \code{x} and \code{y}) from \code{mu} (i.e., \code{"greater"},
#' \code{"less"}, or \code{"two.sided"}).
#' @author Alan T. Arnholt
#' @seealso \code{\link{z.test}}, \code{\link{zsum.test}}
#' @references Kitchens, L.J. (2003). \emph{Basic Statistics and Data
#' Analysis}. Duxbury.
#'
#' Hogg, R. V. and Craig, A. T. (1970). \emph{Introduction to Mathematical
#' Statistics, 3rd ed}. Toronto, Canada: Macmillan.
#'
#' Mood, A. M., Graybill, F. A. and Boes, D. C. (1974). \emph{Introduction to
#' the Theory of Statistics, 3rd ed}. New York: McGraw-Hill.
#'
#' Snedecor, G. W. and Cochran, W. G. (1980). \emph{Statistical Methods, 7th
#' ed}. Ames, Iowa: Iowa State University Press.
#' @keywords htest
#' @examples
#'
#' tsum.test(mean.x=5.6, s.x=2.1, n.x=16, mu=4.9, alternative="greater")
#' # Problem 6.31 on page 324 of BSDA states: The chamber of commerce
#' # of a particular city claims that the mean carbon dioxide
#' # level of air polution is no greater than 4.9 ppm. A random
#' # sample of 16 readings resulted in a sample mean of 5.6 ppm,
#' # and s=2.1 ppm. One-sided one-sample t-test. The null
#' # hypothesis is that the population mean for 'x' is 4.9.
#' # The alternative hypothesis states that it is greater than 4.9.
#'
#' x <- rnorm(12)
#' tsum.test(mean(x), sd(x), n.x=12)
#' # Two-sided one-sample t-test. The null hypothesis is that
#' # the population mean for 'x' is zero. The alternative
#' # hypothesis states that it is either greater or less
#' # than zero. A confidence interval for the population mean
#' # will be computed. Note: above returns same answer as:
#' t.test(x)
#'
#' x <- c(7.8, 6.6, 6.5, 7.4, 7.3, 7.0, 6.4, 7.1, 6.7, 7.6, 6.8)
#' y <- c(4.5, 5.4, 6.1, 6.1, 5.4, 5.0, 4.1, 5.5)
#' tsum.test(mean(x), s.x=sd(x), n.x=11 ,mean(y), s.y=sd(y), n.y=8, mu=2)
#' # Two-sided standard two-sample t-test. The null hypothesis
#' # is that the population mean for 'x' less that for 'y' is 2.
#' # The alternative hypothesis is that this difference is not 2.
#' # A confidence interval for the true difference will be computed.
#' # Note: above returns same answer as:
#' t.test(x, y)
#'
#' tsum.test(mean(x), s.x=sd(x), n.x=11, mean(y), s.y=sd(y), n.y=8, conf.level=0.90)
#' # Two-sided standard two-sample t-test. The null hypothesis
#' # is that the population mean for 'x' less that for 'y' is zero.
#' # The alternative hypothesis is that this difference is not
#' # zero. A 90% confidence interval for the true difference will
#' # be computed. Note: above returns same answer as:
#' t.test(x, y, conf.level=0.90)
#'
#'
#' @export tsum.test
tsum.test <-
function(mean.x, s.x = NULL, n.x = NULL, mean.y = NULL, s.y = NULL, n.y = NULL,
alternative = "two.sided", mu = 0, var.equal = FALSE, conf.level = 0.95)
{
alt.expanded <- if(!missing(alternative)) char.expand(alternative,
c("two.sided", "greater", "less"), stop(
"argument 'alternative' must match one of \"greater\", \"less\", \"two.sided\"."
)) else alternative
if(!missing(mu))
if((length(mu) != 1) || !is.finite(mu))
stop("argument 'mu' must be a single finite numeric value."
)
if(!missing(conf.level))
if((length(conf.level) != 1) || !is.finite(conf.level) || (
conf.level <= 0) || (conf.level >= 1))
stop("argument 'conf.level' must be a single number greater than zero and less than one \n.")
if(!is.null(mean.x) && is.null(mean.y) && is.null(n.x) && is.null(
s.x))
stop("You must enter the value for both s.x and n.x")
if(is.null(n.x) && !is.null(mean.x) && !is.null(s.x) && is.null(mean.y
))
stop("You must enter the value for n.x")
if(is.null(s.x) && !is.null(mean.x) && !is.null(n.x) && is.null(mean.y
))
stop("You must enter the value for s.x")
if(is.null(n.y) && !is.null(mean.x) && !is.null(mean.y) && !is.null(
s.y) && !is.null(s.x) && !is.null(n.x))
stop("You must enter the value for n.y")
if(is.null(n.y) && is.null(n.x) && !is.null(mean.x) && !is.null(mean.y
) && !is.null(s.y) && !is.null(s.x))
stop("You must enter the value for both n.x and n.y")
if(is.null(s.x) && is.null(s.y) && !is.null(mean.x) && !is.null(mean.y
) && !is.null(n.x) && !is.null(n.y))
stop("You must enter the value for both s.x and s.y")
if(!is.null(s.x) && is.null(s.y) && !is.null(mean.x) && !is.null(
mean.y) && !is.null(n.x) && !is.null(n.y))
stop("You must enter the value for s.y")
if(is.null(n.y) && is.null(s.y) && !is.null(mean.x) && !is.null(mean.y
) && !is.null(s.x) && !is.null(n.x))
stop("You must enter the value for both s.y and n.y")
alpha <- 1 - conf.level
if(is.null(mean.y)) {
# one-sample t-test.
if(!var.equal) warning(
"argument 'var.equal' ignored for one-sample test."
)
conf.int.xbar <- mean.x
conf.int.s <- sqrt(s.x^2/n.x)
ret.val <- list(statistic = (conf.int.xbar - mu)/conf.int.s,
parameters = n.x - 1, estimate = conf.int.xbar,
null.value = mu, alternative = alt.expanded, method =
"One-sample t-Test", data.name = c("Summarized x"))
names(ret.val$estimate) <- "mean of x"
names(ret.val$null.value) <- "mean"
}
else {
# a two-sample test
mean.x <- mean.x
mean.y <- mean.y
conf.int.xbar <- mean.x - mean.y
var.x <- s.x^2
var.y <- s.y^2
conf.int.s <- if(var.equal) sqrt((((n.x - 1) * var.x + (n.y -
1) * var.y) * (1/n.x + 1/n.y))/(n.x + n.y -
2)) else sqrt((var.x/n.x) + (var.y/n.y))
ret.val <- c(if(var.equal) list(method =
"Standard Two-Sample t-Test", parameters = n.x +
n.y - 2) else list(method =
"Welch Modified Two-Sample t-Test", parameters
= {
const <- 1/(1 + (n.x * var.y)/(n.y * var.x))
1/((const^2)/(n.x - 1) + ((1 - const)^2)/
(n.y - 1))
}
), list(statistic = (conf.int.xbar - mu)/conf.int.s,
estimate = c(mean.x, mean.y), null.value = mu,
alternative = alt.expanded,
data.name = paste("Summarized ", deparse(substitute (x)), " and ", deparse(substitute(y)), sep = "")))
names(ret.val$estimate) <- c("mean of x", "mean of y")
names(ret.val$null.value) <- "difference in means"
}
ret.val <- c(ret.val, switch(alt.expanded,
two.sided = {
conf.int.hw <- qt((1 - alpha/2), ret.val$parameters) *
conf.int.s
list(p.value = 2 * pt( - abs(ret.val$statistic),
ret.val$parameters), conf.int = c(
conf.int.xbar - conf.int.hw, conf.int.xbar +
conf.int.hw))
}
,
greater = {
list(p.value = 1 - pt(ret.val$statistic, ret.val$
parameters), conf.int = c(conf.int.xbar - qt(
(1 - alpha), ret.val$parameters) * conf.int.s,
NA))
}
,
less = {
list(p.value = pt(ret.val$statistic, ret.val$
parameters), conf.int = c(NA, conf.int.xbar +
qt((1 - alpha), ret.val$parameters) *
conf.int.s))
}
))
names(ret.val$statistic) <- "t"
names(ret.val$parameters) <- "df"
attr(ret.val$conf.int, "conf.level") <- conf.level
ret.val <- ret.val[c("statistic", "parameters", "p.value", "conf.int",
"estimate", "null.value", "alternative", "method", "data.name"
)]
oldClass(ret.val) <- "htest"
return(ret.val)
}
|
/scratch/gouwar.j/cran-all/cranData/BSDA/R/tsum.test.R
|
#' Z-test
#'
#' This function is based on the standard normal distribution and creates
#' confidence intervals and tests hypotheses for both one and two sample
#' problems.
#'
#' If \code{y} is \code{NULL}, a one-sample z-test is carried out with
#' \code{x}. If y is not \code{NULL}, a standard two-sample z-test is
#' performed.
#'
#' @param x numeric vector; \code{NA}s and \code{Inf}s are allowed but will be
#' removed.
#' @param y numeric vector; \code{NA}s and \code{Inf}s are allowed but will be
#' removed.
#' @param alternative character string, one of \code{"greater"}, \code{"less"}
#' or \code{"two.sided"}, or the initial letter of each, indicating the
#' specification of the alternative hypothesis. For one-sample tests,
#' \code{alternative} refers to the true mean of the parent population in
#' relation to the hypothesized value \code{mu}. For the standard two-sample
#' tests, \code{alternative} refers to the difference between the true
#' population mean for \code{x} and that for \code{y}, in relation to
#' \code{mu}.
#' @param mu a single number representing the value of the mean or difference
#' in means specified by the null hypothesis
#' @param sigma.x a single number representing the population standard
#' deviation for \code{x}
#' @param sigma.y a single number representing the population standard
#' deviation for \code{y}
#' @param conf.level confidence level for the returned confidence interval,
#' restricted to lie between zero and one
#' @return A list of class \code{htest}, containing the following components:
#' \item{statistic}{the z-statistic, with names attribute \code{"z"}}
#' \item{p.value}{the p-value for the test} \item{conf.int}{is a confidence
#' interval (vector of length 2) for the true mean or difference in means. The
#' confidence level is recorded in the attribute \code{conf.level}. When
#' alternative is not \code{"two.sided"}, the confidence interval will be
#' half-infinite, to reflect the interpretation of a confidence interval as the
#' set of all values \code{k} for which one would not reject the null
#' hypothesis that the true mean or difference in means is \code{k} . Here
#' infinity will be represented by \code{Inf}.} \item{estimate}{vector of
#' length 1 or 2, giving the sample mean(s) or mean of differences; these
#' estimate the corresponding population parameters. Component \code{estimate}
#' has a names attribute describing its elements.} \item{null.value}{is the
#' value of the mean or difference in means specified by the null hypothesis.
#' This equals the input argument \code{mu}. Component \code{null.value} has a
#' names attribute describing its elements.} \item{alternative}{records the
#' value of the input argument alternative: \code{"greater"}, \code{"less"} or
#' \code{"two.sided"}.} \item{data.name}{a character string (vector of length
#' 1) containing the actual names of the input vectors \code{x} and \code{y}}
#' @section Null Hypothesis: For the one-sample z-test, the null hypothesis is
#' that the mean of the population from which \code{x} is drawn is \code{mu}.
#' For the standard two-sample z-tests, the null hypothesis is that the
#' population mean for \code{x} less that for \code{y} is \code{mu}.
#'
#' The alternative hypothesis in each case indicates the direction of
#' divergence of the population mean for \code{x} (or difference of means for
#' \code{x} and \code{y}) from \code{mu} (i.e., \code{"greater"},
#' \code{"less"}, \code{"two.sided"}).
#' @author Alan T. Arnholt
#' @seealso \code{\link{zsum.test}}, \code{\link{tsum.test}}
#' @references Kitchens, L.J. (2003). \emph{Basic Statistics and Data
#' Analysis}. Duxbury.
#'
#' Hogg, R. V. and Craig, A. T. (1970). \emph{Introduction to Mathematical
#' Statistics, 3rd ed}. Toronto, Canada: Macmillan.
#'
#' Mood, A. M., Graybill, F. A. and Boes, D. C. (1974). \emph{Introduction to
#' the Theory of Statistics, 3rd ed}. New York: McGraw-Hill.
#'
#' Snedecor, G. W. and Cochran, W. G. (1980). \emph{Statistical Methods, 7th
#' ed}. Ames, Iowa: Iowa State University Press.
#' @keywords htest
#' @examples
#'
#' x <- rnorm(12)
#' z.test(x,sigma.x=1)
#' # Two-sided one-sample z-test where the assumed value for
#' # sigma.x is one. The null hypothesis is that the population
#' # mean for 'x' is zero. The alternative hypothesis states
#' # that it is either greater or less than zero. A confidence
#' # interval for the population mean will be computed.
#'
#' x <- c(7.8, 6.6, 6.5, 7.4, 7.3, 7., 6.4, 7.1, 6.7, 7.6, 6.8)
#' y <- c(4.5, 5.4, 6.1, 6.1, 5.4, 5., 4.1, 5.5)
#' z.test(x, sigma.x=0.5, y, sigma.y=0.5, mu=2)
#' # Two-sided standard two-sample z-test where both sigma.x
#' # and sigma.y are both assumed to equal 0.5. The null hypothesis
#' # is that the population mean for 'x' less that for 'y' is 2.
#' # The alternative hypothesis is that this difference is not 2.
#' # A confidence interval for the true difference will be computed.
#'
#' z.test(x, sigma.x=0.5, y, sigma.y=0.5, conf.level=0.90)
#' # Two-sided standard two-sample z-test where both sigma.x and
#' # sigma.y are both assumed to equal 0.5. The null hypothesis
#' # is that the population mean for 'x' less that for 'y' is zero.
#' # The alternative hypothesis is that this difference is not
#' # zero. A 90% confidence interval for the true difference will
#' # be computed.
#' rm(x, y)
#'
#' @export z.test
z.test <-
function(x, y = NULL, alternative = "two.sided", mu = 0, sigma.x = NULL,
sigma.y = NULL, conf.level = 0.95)
{
choices <- c("two.sided", "greater", "less")
alt <- pmatch(alternative, choices)
alternative <- choices[alt]
if(length(alternative) > 1 || is.na(alternative))
stop("alternative must be one \"greater\", \"less\", \"two.sided\""
)
if(!missing(mu))
if(length(mu) != 1 || is.na(mu))
stop("mu must be a single number")
if(is.null(sigma.x) && !is.null(x) && is.null(y))
stop("You must enter the value for sigma.x")
if(!is.null(y) && is.null(sigma.y) || is.null(sigma.x))
stop("You must enter values for both sigma.x and sigma.y")
if(!missing(conf.level))
if(length(conf.level) != 1 || is.na(conf.level) || conf.level <
0 || conf.level > 1)
stop("conf.level must be a number between 0 and 1")
if(!is.null(y)) {
dname <- paste(deparse(substitute(x)), "and", paste(deparse(
substitute(y))))
}
else {
dname <- deparse(substitute(x))
}
# remove NAs 8/07/17
xok <- !is.na(x)
x <- x[xok]
nx <- length(x)
if(nx <= 2)
stop("not enough x observations")
mx <- mean(x)
estimate <- mx
if(is.null(y)) {
stderr <- sigma.x/sqrt(nx)
zobs <- (mx - mu)/stderr
method <- c("One-sample z-Test")
names(estimate) <- c("mean of x")
}
else {
yok <- !is.na(y)
y <- y[yok]
ny <- length(y)
if(ny <= 2)
stop("not enough y observations")
my <- mean(y)
method <- c("Two-sample z-Test")
estimate <- c(mx, my)
names(estimate) <- c("mean of x", "mean of y")
stderr <- sqrt(((sigma.x^2)/nx) + ((sigma.y^2)/ny))
zobs <- (mx - my - mu)/stderr
}
if(alternative == "less") {
pval <- pnorm(zobs)
cint <- c(NA, zobs * stderr + qnorm(conf.level) * stderr)
}
else if(alternative == "greater") {
pval <- 1 - pnorm(zobs)
cint <- c(zobs * stderr - qnorm(conf.level) * stderr, NA)
}
else {
pval <- 2 * pnorm( - abs(zobs))
alpha <- 1 - conf.level
cint <- c(zobs * stderr - qnorm((1 - alpha/2)) * stderr, zobs *
stderr + qnorm((1 - alpha/2)) * stderr)
}
cint <- cint + mu
names(zobs) <- "z"
if(!is.null(y))
names(mu) <- "difference in means"
else names(mu) <- "mean"
attr(cint, "conf.level") <- conf.level
rval <- list(statistic = zobs, p.value = pval, conf.int = cint,
estimate = estimate, null.value = mu, alternative =
alternative, method = method, data.name = dname)
attr(rval, "class") <- "htest"
return(rval)
}
|
/scratch/gouwar.j/cran-all/cranData/BSDA/R/z.test.R
|
#' Summarized z-test
#'
#' This function is based on the standard normal distribution and creates
#' confidence intervals and tests hypotheses for both one and two sample
#' problems based on summarized information the user passes to the function.
#' Output is identical to that produced with \code{z.test}.
#'
#' If \code{y} is \code{NULL} , a one-sample z-test is carried out with
#' \code{x} . If y is not \code{NULL}, a standard two-sample z-test is
#' performed.
#'
#' @param mean.x a single number representing the sample mean of \code{x}
#' @param sigma.x a single number representing the population standard
#' deviation for \code{x}
#' @param n.x a single number representing the sample size for \code{x}
#' @param mean.y a single number representing the sample mean of \code{y}
#' @param sigma.y a single number representing the population standard
#' deviation for \code{y}
#' @param n.y a single number representing the sample size for \code{y}
#' @param alternative is a character string, one of \code{"greater"},
#' \code{"less"} or \code{"two.sided"}, or the initial letter of each,
#' indicating the specification of the alternative hypothesis. For one-sample
#' tests, \code{alternative} refers to the true mean of the parent population
#' in relation to the hypothesized value \code{mu}. For the standard two-sample
#' tests, \code{alternative} refers to the difference between the true
#' population mean for \code{x} and that for \code{y}, in relation to
#' \code{mu}.
#' @param mu a single number representing the value of the mean or difference
#' in means specified by the null hypothesis
#' @param conf.level confidence level for the returned confidence interval,
#' restricted to lie between zero and one
#' @return A list of class \code{htest}, containing the following components:
#' \item{statistic}{the z-statistic, with names attribute \code{z}.}
#' \item{p.value}{the p-value for the test} \item{conf.int }{is a confidence
#' interval (vector of length 2) for the true mean or difference in means. The
#' confidence level is recorded in the attribute \code{conf.level}. When
#' alternative is not \code{"two.sided"}, the confidence interval will be
#' half-infinite, to reflect the interpretation of a confidence interval as the
#' set of all values \code{k} for which one would not reject the null
#' hypothesis that the true mean or difference in means is \code{k}. Here,
#' infinity will be represented by \code{Inf}.} \item{estimate}{vector of
#' length 1 or 2, giving the sample mean(s) or mean of differences; these
#' estimate the corresponding population parameters. Component \code{estimate}
#' has a names attribute describing its elements.} \item{null.value}{the value
#' of the mean or difference in means specified by the null hypothesis. This
#' equals the input argument \code{mu}. Component \code{null.value} has a names
#' attribute describing its elements.} \item{alternative}{records the value of
#' the input argument alternative: \code{"greater"} , \code{"less"} or
#' \code{"two.sided"}. } \item{data.name}{a character string (vector of length
#' 1) containing the names \code{x} and \code{y} for the two summarized
#' samples}
#' @section Null Hypothesis: For the one-sample z-test, the null hypothesis is
#' that the mean of the population from which \code{x} is drawn is \code{mu}.
#' For the standard two-sample z-tests, the null hypothesis is that the
#' population mean for \code{x} less that for \code{y} is \code{mu}.
#'
#' The alternative hypothesis in each case indicates the direction of
#' divergence of the population mean for \code{x} (or difference of means of
#' \code{x} and \code{y}) from \code{mu} (i.e., \code{"greater"} ,
#' \code{"less"}, \code{"two.sided"} ).
#' @author Alan T. Arnholt
#' @seealso \code{\link{z.test}}, \code{\link{tsum.test}}
#' @references
#'
#' Kitchens, L. J. (2003). \emph{Basic Statistics and Data Analysis}. Duxbury.
#'
#' Hogg, R. V. and Craig, A. T. (1970). \emph{Introduction to Mathematical
#' Statistics, 3rd ed}. Toronto, Canada: Macmillan.
#'
#' Mood, A. M., Graybill, F. A. and Boes, D. C. (1974). \emph{Introduction to
#' the Theory of Statistics, 3rd ed}. New York: McGraw-Hill.
#'
#' Snedecor, G. W. and Cochran, W. G. (1980). \emph{Statistical Methods, 7th
#' ed}. Ames, Iowa: Iowa State University Press.
#' @keywords htest
#' @examples
#'
#' zsum.test(mean.x=56/30,sigma.x=2, n.x=30, alternative="greater", mu=1.8)
#' # Example 9.7 part a. from PASWR.
#' x <- rnorm(12)
#' zsum.test(mean(x),sigma.x=1,n.x=12)
#' # Two-sided one-sample z-test where the assumed value for
#' # sigma.x is one. The null hypothesis is that the population
#' # mean for 'x' is zero. The alternative hypothesis states
#' # that it is either greater or less than zero. A confidence
#' # interval for the population mean will be computed.
#' # Note: returns same answer as:
#' z.test(x,sigma.x=1)
#' #
#' x <- c(7.8, 6.6, 6.5, 7.4, 7.3, 7.0, 6.4, 7.1, 6.7, 7.6, 6.8)
#' y <- c(4.5, 5.4, 6.1, 6.1, 5.4, 5.0, 4.1, 5.5)
#' zsum.test(mean(x), sigma.x=0.5, n.x=11 ,mean(y), sigma.y=0.5, n.y=8, mu=2)
#' # Two-sided standard two-sample z-test where both sigma.x
#' # and sigma.y are both assumed to equal 0.5. The null hypothesis
#' # is that the population mean for 'x' less that for 'y' is 2.
#' # The alternative hypothesis is that this difference is not 2.
#' # A confidence interval for the true difference will be computed.
#' # Note: returns same answer as:
#' z.test(x, sigma.x=0.5, y, sigma.y=0.5)
#' #
#' zsum.test(mean(x), sigma.x=0.5, n.x=11, mean(y), sigma.y=0.5, n.y=8,
#' conf.level=0.90)
#' # Two-sided standard two-sample z-test where both sigma.x and
#' # sigma.y are both assumed to equal 0.5. The null hypothesis
#' # is that the population mean for 'x' less that for 'y' is zero.
#' # The alternative hypothesis is that this difference is not
#' # zero. A 90% confidence interval for the true difference will
#' # be computed. Note: returns same answer as:
#' z.test(x, sigma.x=0.5, y, sigma.y=0.5, conf.level=0.90)
#' rm(x, y)
#'
#' @export zsum.test
zsum.test <-
function(mean.x, sigma.x = NULL, n.x = NULL, mean.y = NULL, sigma.y = NULL,
n.y = NULL, alternative = "two.sided", mu = 0, conf.level = 0.95)
{
choices <- c("two.sided", "greater", "less")
alt <- pmatch(alternative, choices)
alternative <- choices[alt]
if(length(alternative) > 1 || is.na(alternative))
stop("alternative must be one \"greater\", \"less\", \"two.sided\""
)
if(!missing(mu))
if(length(mu) != 1 || is.na(mu))
stop("mu must be a single number")
if(!is.null(mean.x) && is.null(mean.y) && is.null(n.x) && is.null(
sigma.x))
stop("You must enter the value for both sigma.x and n.x")
if(is.null(n.x) && !is.null(mean.x) && !is.null(sigma.x) && is.null(
mean.y))
stop("You must enter the value for n.x")
if(is.null(sigma.x) && !is.null(mean.x) && !is.null(n.x) && is.null(
mean.y))
stop("You must enter the value for sigma.x")
if(is.null(n.y) && !is.null(mean.x) && !is.null(mean.y) && !is.null(
sigma.y) && !is.null(sigma.x) && !is.null(n.x))
stop("You must enter the value for n.y")
if(is.null(n.y) && is.null(n.x) && !is.null(mean.x) && !is.null(mean.y
) && !is.null(sigma.y) && !is.null(sigma.x))
stop("You must enter the value for both n.x and n.y")
if(is.null(sigma.x) && is.null(sigma.y) && !is.null(mean.x) && !
is.null(mean.y) && !is.null(n.x) && !is.null(n.y))
stop("You must enter the value for both sigma.x and sigma.y")
if(!is.null(sigma.x) && is.null(sigma.y) && !is.null(mean.x) && !
is.null(mean.y) && !is.null(n.x) && !is.null(n.y))
stop("You must enter the value for sigma.y")
if(is.null(n.y) && is.null(sigma.y) && !is.null(mean.x) && !is.null(
mean.y) && !is.null(sigma.x) && !is.null(n.x))
stop("You must enter the value for both sigma.y and n.y")
if(!missing(conf.level))
if(length(conf.level) != 1 || is.na(conf.level) || conf.level <
0 || conf.level > 1)
stop("conf.level must be a number between 0 and 1")
if(!is.null(mean.y)) {
dname <- c("Summarized x and y")
}
else {
dname <- c("Summarized x")
}
n.x
if(n.x <= 1)
stop("not enough x observations")
estimate <- mean.x
if(is.null(mean.y)) {
stderr <- sigma.x/sqrt(n.x)
zobs <- (mean.x - mu)/stderr
method <- c("One-sample z-Test")
names(estimate) <- c("mean of x")
}
else {
n.y
if(n.y <= 1)
stop("not enough y observations")
method <- c("Two-sample z-Test")
estimate <- c(mean.x, mean.y)
names(estimate) <- c("mean of x", "mean of y")
stderr <- sqrt(((sigma.x^2)/n.x) + ((sigma.y^2)/n.y))
zobs <- (mean.x - mean.y - mu)/stderr
}
if(alternative == "less") {
pval <- pnorm(zobs)
cint <- c(NA, zobs * stderr + qnorm(conf.level) * stderr)
}
else if(alternative == "greater") {
pval <- 1 - pnorm(zobs)
cint <- c(zobs * stderr - qnorm(conf.level) * stderr, NA)
}
else {
pval <- 2 * pnorm( - abs(zobs))
alpha <- 1 - conf.level
cint <- c(zobs * stderr - qnorm((1 - alpha/2)) * stderr, zobs *
stderr + qnorm((1 - alpha/2)) * stderr)
}
cint <- cint + mu
names(zobs) <- "z"
if(!is.null(mean.y))
names(mu) <- "difference in means"
else names(mu) <- "mean"
attr(cint, "conf.level") <- conf.level
rval <- list(statistic = zobs, p.value = pval, conf.int = cint,
estimate = estimate, null.value = mu, alternative =
alternative, method = method, data.name = dname)
attr(rval, "class") <- "htest"
return(rval)
}
|
/scratch/gouwar.j/cran-all/cranData/BSDA/R/zsum.test.R
|
# estimate
# inside control: lambda, lambdas, alpha.min, alpha.max, beta.max, iter, sd.thresh, scalex
bsgw <- function(formula, data, formulas=formula, weights, subset, na.action=na.fail
, init = "survreg"
, ordweib=FALSE, scale=0, control=bsgw.control(), print.level=2) {
# TODO: implement weights, subset, na.action, scale
# TODO: add robustness to init: 1) add bsgw objects, 2) allow failover when survreg doesn't work
mycall <- match.call()
if (!missing(weights)) warning("weights argument not supported yet, this argument will be ignored")
if (!missing(subset)) warning("subset argument not supported yet, this argument will be ignored")
if (!identical(na.action,na.fail)) stop("na.action argument not supported yet; only na.fail is currently accepted")
if (scale>0) stop("scale argument not supported yet")
if (ordweib) {
formulas <- bsgw.strip.formula(formulas)
}
# TODO: need to make sure number of rows in X and Xs ends up being equal (watch out for removal of missing rows)
mf <- model.frame(formula, data, drop.unused.levels=TRUE, na.action = na.fail) # incorporate na.action argument
mt <- attr(mf, "terms")
X <- model.matrix(mt, mf)
y <- model.response(mf, "numeric")
censoring.type <- attr(y, "type")
if (! (censoring.type %in% c("left", "right"))) stop("invalid censoring type; only left and right are currently supported")
right.censoring <- 1 * (censoring.type == "right")
#if (right.censoring == 0) cat("using left censoring...\n")
colnamesX <- colnames(X)
if (colnamesX[1]!="(Intercept)") stop("intercept term must be included in formula")
mfs <- model.frame(formulas, data, drop.unused.levels=TRUE, na.action = na.fail) # incorporate na.action argument
mts <- attr(mfs, "terms")
Xs <- model.matrix(mts, mfs)
colnamesXs <- colnames(Xs)
if (colnamesXs[1]!="(Intercept)") stop("intercept term must be included in formulas")
if (init[1]=="survreg") { # using survreg to initialize the coefficients
wreg <- tryCatch(survreg(formula, data)
, error = function(e) e, warning = function(w) w) # TODO: make sure all relevant parameters are passed to this call, e.g. na.action
if (is(wreg, "error") || is(wreg, "warning")) {
warning("survreg deemed unreliable for initialization; using naive method")
init <- list(beta=rep(0,ncol(X)), betas=rep(0,ncol(Xs)))
survreg.scale.ref <- NULL
wreg <- NULL
} else {
alpha.ow <- 1/wreg$scale
betas.0 <- if (ordweib) log(alpha.ow) else log((alpha.ow-control$alpha.min)/(control$alpha.max-alpha.ow))
beta <- unname(-wreg$coefficients/wreg$scale)
# make sure initialized coefficients are within boundaries defined in bsgw.control
betas.0 <- max(min(betas.0, abs(control$betas.max)), -abs(control$betas.max))
beta <- pmax(pmin(beta, abs(control$beta.max)), -abs(control$beta.max))
init <- list(beta=beta, betas=c(betas.0, rep(0, ncol(Xs)-1)))
survreg.scale.ref <- wreg$scale
}
} else if (inherits(init, "survreg")) { # using a previous survreg estimation object
wreg <- init
alpha.ow <- 1/wreg$scale
betas.0 <- if (ordweib) log(alpha.ow) else log((alpha.ow-control$alpha.min)/(control$alpha.max-alpha.ow))
beta <- unname(-wreg$coefficients/wreg$scale)
# make sure initialized coefficients are within boundaries defined in bsgw.control
betas.0 <- max(min(betas.0, abs(control$betas.max)), -abs(control$betas.max))
beta <- pmax(pmin(beta, abs(control$beta.max)), -abs(control$beta.max))
init <- list(beta=beta, betas=c(betas.0, rep(0, ncol(Xs)-1)))
survreg.scale.ref <- wreg$scale
} else {
warning("init argument not recognized, initializing all coefficients to zero") # TODO: is this desired behavior?
init <- list(beta=rep(0,ncol(X)), betas=rep(0,ncol(Xs)))
survreg.scale.ref <- NULL
wreg <- NULL
}
if (is.list(control$scalex)) {
X <- bsgw.scale(X, apply.sc=control$scalex$apply.scale.X, center=control$scalex$centerVec.X, scale=control$scalex$scaleVec.X)
Xs <- bsgw.scale(Xs, apply.sc=control$scalex$apply.scale.Xs, center=control$scalex$centerVec.Xs, scale=control$scalex$scaleVec.Xs)
control$scalex <- TRUE
} else if (control$scalex) {
X <- bsgw.scale(X)
Xs <- bsgw.scale(Xs)
}
if (ordweib) formulas <- bsgw.strip.formula(formulas)
ret <- list(call=mycall, formula=formula, formulas=formulas, weights=rep(1,nrow(X)), subset=1:nrow(X)
, na.action=na.action, init=init, ordweib=ordweib, survreg.scale.ref=survreg.scale.ref, ordreg=wreg, scale=scale, control=control
, X=X, Xs=Xs, y=y
, contrasts=attr(X, "contrasts"), contrastss=attr(Xs, "contrasts")
, xlevels=.getXlevels(mt, mf), xlevelss=.getXlevels(mts, mfs)
, terms=mt, termss=mts
, colnamesX=colnamesX, colnamesXs=colnamesXs
)
if (is.list(control$scalex) || control$scalex) {
ret <- c(ret, list(apply.scale.X=attr(X, "apply.scale"), apply.scale.Xs=attr(Xs, "apply.scale")
, centerVec.X=attr(X, "centerVec"), scaleVec.X=attr(X, "scaleVec")
, centerVec.Xs=attr(Xs, "centerVec"), scaleVec.Xs=attr(Xs, "scaleVec")))
}
#browser()
mcmc <- bsgw.mcmc(X, Xs, y[,1], y[,2], control$lambda, control$lambdas, iter=control$iter, sd.thresh=control$sd.thresh
, init=init, ordweib=ordweib, alpha.min=control$alpha.min, alpha.max=control$alpha.max, beta.max=control$beta.max
, betas.max = control$betas.max
, print.level=print.level, nskip=control$nskip, right.censoring = right.censoring)
sel <- (control$burnin+1):control$iter
median <- list(beta=apply(mcmc$beta[sel,,drop=F], 2, median), betas=apply(mcmc$betas[sel,,drop=F], 2, median)
, survreg.scale=apply(mcmc$scale[sel,,drop=F], 2, median))
km.fit <- survfit(bsgw.strip.formula(formula), data)
ret <- c(ret, list(idx=mcmc$idx, idxs=mcmc$idxs, median=median
, smp=list(beta=mcmc$beta, betas=mcmc$betas, survreg.scale=mcmc$scale, lp=mcmc$lp, loglike=mcmc$loglike, logpost=mcmc$logpost)
, km.fit=km.fit, tmax=max(y[,1])))
class(ret) <- "bsgw"
return (ret)
}
print.bsgw <- function(x, ...) {
cat("Call:\n")
print(x$call)
cat("Scale formula:\n")
print(x$formula)
cat("Shape formula:\n")
print(x$formulas)
cat("lambda (shrinkage for scale coefficients):", x$control$lambda, "\n")
cat("lambdas (shrinkage for shape coefficients):", x$control$lambdas, "\n")
cat("ordinary (constant-shape) Weibull:", x$ordweib, "\n")
if (!x$ordweib) {
cat("Lower bound on shape parameter:", x$control$alpha.min, "\n")
cat("Upper bound on shape parameter:", x$control$alpha.max, "\n")
}
cat("Number of MCMC iterations:", x$control$iter, "\n")
cat("Number of burn-in iterations (used in calculating medians):", x$control$burnin, "\n")
cat("Threshold on standard deviation of covariates:", x$control$sd.thresh, "\n")
cat("Model matrix is scaled:", x$control$scalex, "\n")
cat("Scale Coefficients:\n")
beta <- x$median$beta; names(beta) <- x$colnamesX
print(beta)
cat("Shape Coefficients:\n")
betas <- x$median$betas; names(betas) <- x$colnamesXs
print(betas)
cat("mean survreg-style scale parameter:", mean(x$median$survreg.scale), "\n")
if (!is.null(x$survreg.scale.ref)) cat("\tscale parameter from non-Bayesian Weibull regression:", x$survreg.scale.ref, "\n")
cat("number of observations:", nrow(x$X), "\n")
}
plot.bsgw <- function(x, pval=0.05, burnin=round(x$control$iter/2), nrow=2, ncol=3, ...) {
iter <- x$control$iter
sel <- (burnin+1):x$control$iter
nsel <- length(sel)
CI_prob <- c(pval/2, 0.5, 1-pval/2)
nplot_per_page <- nrow*ncol
# determine number of beta coefficients
nbeta <- ncol(x$smp$beta)
nbetas <- ncol(x$smp$betas)
npage_beta <- ceiling(nbeta/nplot_per_page)
npage_betas <- ceiling(nbetas/nplot_per_page)
## loglike and logpost
# TODO: adjust ylim to make sure ordreg loglike is visible
oldpar <- par(mfrow=c(2,2))
plot(x$smp$loglike, type="l", xlab="Iteration", ylab="Log-likelihood", main="Log-likelihood, All")
if (!is.null(x$ordreg)) abline(h=x$ordreg$loglik[2], lty=2, col="red")
plot(x$smp$logpost, type="l", xlab="Iteration", ylab="Log-posterior", main="Log-posterior, All")
if (!is.null(x$ordreg)) abline(h=x$ordreg$loglik[2], lty=2, col="red")
plot(x$smp$loglike[sel], type="l", xlab="Iteration", ylab="Log-likelihood", main="Log-likelihood, Post-Burnin")
if (!is.null(x$ordreg)) abline(h=x$ordreg$loglik[2], lty=2, col="red")
plot(x$smp$logpost[sel], type="l", xlab="Iteration", ylab="Log-posterior", main="Log-posterior, Post-Burnin")
if (!is.null(x$ordreg)) abline(h=x$ordreg$loglik[2], lty=2, col="red")
# histogram of scale parameters for training set
if (!x$ordweib) {
par(mfrow=c(1,1))
hist(x$median$survreg.scale, main="Survreg-Style Scale Parameter - Training Set", xlab="Survreg Scale Parameter")
}
## traceplots
# beta
beta_q <- apply(x$smp$beta[sel,,drop=F], 2, quantile, probs=CI_prob)
beta_lower <- beta_q[1,]
beta_median <- beta_q[2,]
beta_upper <- beta_q[3,]
for (n in 1:npage_beta) {
par(mfrow=c(nrow,ncol))
offset <- (n-1)*nplot_per_page
for (i in 1:nplot_per_page) {
if (offset+i<=nbeta) {
beta_ylim <- range(x$smp$beta[,offset+i], 0.0)
plot(x$smp$beta[,offset+i], type="l", xlab="Iteration", ylab="Sample Value", ylim=beta_ylim
, main = paste("beta[", x$colnamesX[offset+i], "]", sep=""))
abline(h = 0)
lines(sel, rep(beta_lower[offset+i], nsel), lty=2, col="red")
lines(sel, rep(beta_median[offset+i], nsel), lty=2, col="red")
lines(sel, rep(beta_upper[offset+i], nsel), lty=2, col="red")
}
}
}
if (x$ordweib) {
if (!is.null(x$survreg.scale.ref)) yref <- x$survreg.scale.ref
else yref <- 0.0
survreg.scale_q <- quantile(x$smp$survreg.scale[sel,1], probs=CI_prob)
survreg.scale_lower <- survreg.scale_q[1]
survreg.scale_median <- survreg.scale_q[2]
survreg.scale_upper <- survreg.scale_q[3]
par(mfrow=c(1,1))
survreg.scale_ylim <- range(x$smp$survreg.scale[,1], yref)
plot(x$smp$survreg.scale[,1], type="l", xlab="Iteration", ylab="Sample Value", ylim=survreg.scale_ylim
, main = paste("Survreg Scale", sep=""))
abline(h = yref)
lines(sel, rep(survreg.scale_lower, nsel), lty=2, col="red")
lines(sel, rep(survreg.scale_median, nsel), lty=2, col="red")
lines(sel, rep(survreg.scale_upper, nsel), lty=2, col="red")
} else {
# betas
betas_q <- apply(x$smp$betas[sel,], 2, quantile, probs=CI_prob)
betas_lower <- betas_q[1,]
betas_median <- betas_q[2,]
betas_upper <- betas_q[3,]
for (n in 1:npage_betas) {
par(mfrow=c(nrow,ncol))
offset <- (n-1)*nplot_per_page
for (i in 1:nplot_per_page) {
if (offset+i<=nbetas) {
betas_ylim <- range(x$smp$betas[,offset+i], 0.0)
plot(x$smp$betas[,offset+i], type="l", xlab="Iteration", ylab="Sample Value", ylim=betas_ylim
, main = paste("betas[", x$colnamesXs[offset+i], "]", sep=""))
abline(h = 0)
lines(sel, rep(betas_lower[offset+i], nsel), lty=2, col="red")
lines(sel, rep(betas_median[offset+i], nsel), lty=2, col="red")
lines(sel, rep(betas_upper[offset+i], nsel), lty=2, col="red")
}
}
}
}
## autocorrelation plots
# beta
for (n in 1:npage_beta) {
par(mfrow=c(nrow,ncol))
offset <- (n-1)*nplot_per_page
for (i in 1:nplot_per_page) {
if (offset+i<=nbeta) {
if ((offset+i) %in% x$idx) {
acf(x$smp$beta[sel,offset+i], main=paste("beta[", x$colnamesX[offset+i], "]", sep=""))
} else {
bsgw.empty.plot(main=paste("beta[", x$colnamesX[offset+i], "]", sep=""))
}
}
}
}
if (x$ordweib) {
par(mfrow=c(1,1))
acf(x$smp$survreg.scale[sel], main=paste("Survreg Scale", sep=""))
} else {
# betas
for (n in 1:npage_betas) {
par(mfrow=c(nrow,ncol))
offset <- (n-1)*nplot_per_page
for (i in 1:nplot_per_page) {
if (offset+i<=nbetas) {
if ((offset+i) %in% x$idxs) {
acf(x$smp$betas[sel,offset+i], main=paste("betas[", x$colnamesXs[offset+i], "]", sep=""))
} else {
bsgw.empty.plot(main=paste("betas[", x$colnamesXs[offset+i], "]", sep=""))
}
}
}
}
}
## histograms
# beta
for (n in 1:npage_beta) {
par(mfrow=c(nrow,ncol))
offset <- (n-1)*nplot_per_page
for (i in 1:nplot_per_page) {
if (offset+i<=nbeta) {
if ((offset+i) %in% x$idx) {
hist(x$smp$beta[sel,offset+i], xlab="Sample Value"
, main=paste("beta[", x$colnamesX[offset+i], "]", sep=""))
abline(v = 0)
abline(v = beta_median[offset+i], lty=2)
abline(v = beta_lower[offset+i], lty=3)
abline(v = beta_upper[offset+i], lty=3)
} else {
bsgw.empty.plot(main=paste("beta[", x$colnamesX[offset+i], "]", sep=""))
}
}
}
}
# TODO: create branch for ordweib
if (x$ordweib) {
par(mfrow=c(1,1))
hist(x$smp$survreg.scale[sel,1], xlab="Sample Value"
, main=paste("Survreg Scale", sep=""))
abline(v = 0)
abline(v = survreg.scale_median, lty=2)
abline(v = survreg.scale_lower, lty=3)
abline(v = survreg.scale_upper, lty=3)
} else {
# betas
for (n in 1:npage_betas) {
par(mfrow=c(nrow,ncol))
offset <- (n-1)*nplot_per_page
for (i in 1:nplot_per_page) {
if (offset+i<=nbetas) {
if ((offset+i) %in% x$idxs) {
hist(x$smp$betas[sel,offset+i], xlab="Sample Value"
, main=paste("betas[", x$colnamesXs[offset+i], "]", sep=""))
abline(v = 0)
abline(v = betas_median[offset+i], lty=2)
abline(v = betas_lower[offset+i], lty=3)
abline(v = betas_upper[offset+i], lty=3)
} else {
bsgw.empty.plot(main=paste("betas[", x$colnamesXs[offset+i], "]", sep=""))
}
}
}
}
}
par(oldpar)
}
summary.bsgw <- function(object, pval=0.05, burnin=object$control$burnin, ...) {
iter <- object$control$iter
CI_prob <- c(pval/2, 0.5, 1-pval/2)
sel <- (burnin+1):iter
# beta
beta_q <- apply(object$smp$beta[sel,,drop=F], 2, quantile, probs=CI_prob)
beta_lb <- beta_q[1,]
beta_med <- beta_q[2,]
beta_ub <- beta_q[3,]
beta_pval <- apply(object$smp$beta[sel,,drop=F], 2, bsgw.calc.pval, ref=0.0)
coefficients_beta <- as.matrix(cbind(beta_med, beta_lb, beta_ub, beta_pval))
dimnames(coefficients_beta) <- list(object$colnamesX, c("Estimate", "Lower Bound", "Upper Bound", "P-val"))
# betas
betas_q <- apply(object$smp$betas[sel,,drop=F], 2, quantile, probs=CI_prob)
betas_lb <- betas_q[1,]
betas_med <- betas_q[2,]
betas_ub <- betas_q[3,]
betas_pval <- apply(object$smp$betas[sel,,drop=F], 2, bsgw.calc.pval, ref=0.0)
coefficients_betas <- as.matrix(cbind(betas_med, betas_lb, betas_ub, betas_pval))
dimnames(coefficients_betas) <- list(object$colnamesXs, c("Estimate", "Lower Bound", "Upper Bound", "P-val"))
# alpha (for training set)
survreg.scale_q <- apply(object$smp$survreg.scale[sel,], 2, quantile, probs=CI_prob)
survreg.scale_lb <- survreg.scale_q[1,]
survreg.scale_med <- survreg.scale_q[2,]
survreg.scale_ub <- survreg.scale_q[3,]
ret <- list(call=object$call, pval=pval, burnin=burnin
, coefficients=list(beta=coefficients_beta, betas=coefficients_betas)
, survreg.scale=list(lower=survreg.scale_lb, median=survreg.scale_med, upper=survreg.scale_ub)
)
class(ret) <- "summary.bsgw"
return (ret)
}
print.summary.bsgw <- function(x, ...) {
cat("Call:\n")
print(x$call)
cat("number of burn-in iterations discarded:", x$burnin, "\n")
cat("confidence interval:", x$pval, "\n")
cat("mean of median survreg-scale:", mean(x$survreg.scale$median), "\n")
cat("## coefficients ##\n")
cat("scale parameter:\n")
print(x$coefficients$beta)
cat("\nshape parameter:\n")
print(x$coefficients$betas)
}
predict.bsgw <- function(object, newdata=NULL, tvec=NULL, burnin=object$control$burnin, ncores=1, ...) {
iter <- object$control$iter
alpha.min <- object$control$alpha.min
alpha.max <- object$control$alpha.max
tt <- object$terms
tts <- object$termss
Terms <- delete.response(tt)
Termss <- delete.response(tts)
if (is.null(newdata)) {
nobs <- nrow(object$X)
X <- object$X
Xs <- object$Xs
km.fit <- object$km.fit
} else {
newdata <- droplevels(newdata)
mf <- model.frame(Terms, newdata, xlev = object$xlevels)
mfs <- model.frame(Termss, newdata, xlev = object$xlevelss)
X <- model.matrix(Terms, mf, contrasts.arg = object$contrasts)
Xs <- model.matrix(Termss, mfs, contrasts.arg = object$contrastss)
if (object$control$scalex) {
X <- bsgw.scale(X, apply.sc=object$apply.scale.X, center=object$centerVec.X, scale=object$scaleVec.X)
Xs <- bsgw.scale(Xs, apply.sc=object$apply.scale.Xs, center=object$centerVec.Xs, scale=object$scaleVec.Xs)
}
nobs <- nrow(newdata)
}
if (!is.null(tvec)) {
# TODO: we need an upper bound on length of tvec to avoid memory blow-up
if (length(tvec)==1) tvec <- seq(from=0.0, to=object$tmax, length.out=tvec) # tvec is interpreted as number of time points
nt <- length(tvec)
mem.gb.per.iter <- 3*8*nt*nobs/(1024*1024*1024)
required.mem.gb <- 3*8*iter*nt*nobs/(1024*1024*1024)
tvec <- as.matrix(tvec)
t_mat <- tvec[,rep(1,nobs)]
# making sure returned prediction objects are not too big
if (required.mem.gb>object$control$memlim.gb)
stop("require memory exceeds specified limit\nconsider increasing limit via memlim.gb parameter or making prediction for a subset of data")
if (ncores==1) {
xbeta.all <- X%*%t(object$smp$beta)
xbetas.all <- Xs%*%t(object$smp$betas)
ret <- lapply(1:iter, FUN=function(i) {
xbeta <- xbeta.all[,i,drop=F]
xbetas <- xbetas.all[,i,drop=F]
if (object$ordweib) {
alpha <- exp(xbetas)
} else {
alpha <- alpha.min + (alpha.max-alpha.min)/(1+exp(-xbetas))
}
survreg.scale <- 1/alpha
exbeta <- as.matrix(exp(xbeta))
alpha_mat <- t(alpha[,rep(1,nt)])
exbeta_mat <- t(exbeta[,rep(1,nt)])
Htmp <- (t_mat^alpha_mat)*exbeta_mat
htmp <- alpha_mat*(t_mat^(alpha_mat-1))*exbeta_mat
return (list(h=htmp, H=Htmp, survreg.scale=survreg.scale))
})
} else {
registerDoParallel(ncores)
ret <- foreach (i=1:iter, .options.multicore=list(preschedule=TRUE)) %dopar% {
xbeta <- X%*%object$smp$beta[i,]
xbetas <- Xs%*%object$smp$betas[i,]
if (object$ordweib) {
alpha <- exp(xbetas)
} else {
alpha <- alpha.min + (alpha.max-alpha.min)/(1+exp(-xbetas))
}
survreg.scale <- 1/alpha
exbeta <- as.matrix(exp(xbeta))
alpha_mat <- t(alpha[,rep(1,nt)])
exbeta_mat <- t(exbeta[,rep(1,nt)])
Htmp <- (t_mat^alpha_mat)*exbeta_mat
htmp <- alpha_mat*(t_mat^(alpha_mat-1))*exbeta_mat
return (list(h=htmp, H=Htmp, survreg.scale=survreg.scale))
}
}
h <- array(NA, dim=c(iter, nt, nobs))
H <- array(NA, dim=c(iter, nt, nobs))
survreg.scale <- array(NA, dim=c(iter, nobs))
for (i in 1:iter) {
h[i,,] <- ret[[i]]$h
H[i,,] <- ret[[i]]$H
survreg.scale[i,] <- ret[[i]]$survreg.scale
}
S <- exp(-H)
survreg.scale_median <- apply(survreg.scale[(burnin+1):iter,], 2, median)
} else {
h <- NA
H <- NA
S <- NA
survreg.scale <- NA
survreg.scale_median <- NA
}
if (is.null(newdata)) {
y <- object$y
do_loglike <- T
} else {
Rterms <- drop.terms(tt)
if (all(all.vars(Rterms)[1:2] %in% colnames(newdata))) {
mfy <- model.frame(Rterms, newdata, xlev = object$xlevels) # TODO: add check to make sure response variable is available for newdata
y <- model.response(mfy, "numeric")
do_loglike <- T
km.fit <- survfit(bsgw.strip.formula(object$formula), newdata)
} else {
do_loglike <- F
km.fit <- NULL
}
}
if (do_loglike) { # TODO: include logpost
loglike <- sapply(1:iter, FUN=function(i) {
bsgw.loglike(c(object$smp$beta[i,], object$smp$betas[i,]), X, Xs, y[,1], y[,2], object$ordweib, alpha.min, alpha.max)
})
loglike_median <- median(loglike[(burnin+1):iter])
} else {
loglike <- NA
loglike_median <- NA
}
ret <- list(tvec=as.vector(tvec), burnin=burnin, median=list(loglike=loglike_median, survreg.scale=survreg.scale_median)
, smp=list(h=h, H=H, S=S, survreg.scale=survreg.scale, loglike=loglike), km.fit=km.fit)
class(ret) <- "predict.bsgw"
return (ret)
}
summary.predict.bsgw <- function(object, idx=1:length(object$median$survreg.scale), burnin=object$burnin
, pval=0.05, popmean=identical(idx,1:length(object$median$survreg.scale))
, make.plot=TRUE, ...) { # TODO: verify this function, it was accidentally modified during editing of mixture model
if (!all(idx %in% 1:length(object$median$survreg.scale))) {
stop("invalid idx argument")
}
if (is.null(object$tvec)) {
cat("prediction summary must be applied to time-dependent prediction entities")
return (NULL) # TODO: can't we still return something useful?!
}
CI_prob <- c(pval/2, 0.5, 1-pval/2)
iter <- dim(object$smp$h)[1]
sel <- (burnin+1):iter
tvec <- object$tvec
# first, calculate summary statistics of h,H,S for each point
# h
h.q <- apply(object$smp$h[sel,,idx], c(2,3), quantile, probs=CI_prob)
h.lower <- h.q[1,,]
h.median <- h.q[2,,]
h.upper <- h.q[3,,]
# H
H.q <- apply(object$smp$H[sel,,idx], c(2,3), quantile, probs=CI_prob)
H.lower <- H.q[1,,]
H.median <- H.q[2,,]
H.upper <- H.q[3,,]
# S
S.q <- apply(object$smp$S[sel,,idx], c(2,3), quantile, probs=CI_prob)
S.lower <- S.q[1,,]
S.median <- S.q[2,,]
S.upper <- S.q[3,,]
if (popmean) {
S.popmean <- apply(object$smp$S[,,idx], c(1,2), mean)
S.popmean.q <- apply(S.popmean[sel,], 2, quantile, probs=CI_prob)
S.popmean.lower <- S.popmean.q[1,]
S.popmean.median <- S.popmean.q[2,]
S.popmean.upper <- S.popmean.q[3,]
if (make.plot) {
S.popmean.ylim <- range(S.popmean.lower, S.popmean.median, S.popmean.upper)
plot(tvec, S.popmean.median, type="l", xlab="Time", ylab="Population Survival Probability", ylim=S.popmean.ylim)
lines(tvec, S.popmean.lower, lty=2)
lines(tvec, S.popmean.upper, lty=2)
lines(object$km.fit, col="red")
legend("topright", legend = c("bsgw model", "kaplan-meyer"), col=c("black","red"), lty = c(1,1))
}
} else {
S.popmean.lower <- NA
S.popmean.median <- NA
S.popmean.upper <- NA
}
# survival curves
S.q <- apply(object$smp$S[sel,,idx], c(2,3), quantile, probs=CI_prob)
S.lower <- S.q[1,,]
S.median <- S.q[2,,]
S.upper <- S.q[3,,]
# pair-wise comparisons
if (length(idx)==2) {
if (tvec[1]==0) {
tindex <- 2:length(tvec)
} else {
tindex <- 1:length(tvec)
}
idx1 <- idx[1]
idx2 <- idx[2]
# hazard ratio
hr <- object$smp$h[,,idx2]/object$smp$h[,,idx1]
hr.q <- apply(hr[sel,tindex], 2, quantile, probs=CI_prob)
hr.lower <- hr.q[1,]
hr.median <- hr.q[2,]
hr.upper <- hr.q[3,]
# survival diff
S.diff <- object$smp$S[sel,,idx2]-object$smp$S[sel,,idx1]
S.diff.q <- apply(S.diff, 2, quantile, probs=CI_prob)
S.diff.lower <- S.diff.q[1,]
S.diff.median <- S.diff.q[2,]
S.diff.upper <- S.diff.q[3,]
if (make.plot) {
hr.range <- range(hr.lower, hr.median, hr.upper, 1.0)
plot(tvec[tindex], hr.median, type="l", xlab="Time", ylab="Hazard Ratio"
, ylim=hr.range, main=paste0("pval=, ", pval, ", idx1=", idx1, ", idx2=", idx2))
lines(tvec[tindex], hr.lower, lty=2)
lines(tvec[tindex], hr.upper, lty=2)
abline(h=1.0, lty=2, col="red")
S.range <- range(S.lower, S.median, S.upper)
plot(tvec, S.median[,1], type="l", xlab="Time", ylab="Survival Probability", ylim=S.range, col="green"
, main=paste0("pval=", pval))
lines(tvec, S.lower[,1], lty=2, col="green")
lines(tvec, S.upper[,1], lty=2, col="green")
lines(tvec, S.median[,2], col="red")
lines(tvec, S.lower[,2], lty=2, col="red")
lines(tvec, S.upper[,2], lty=2, col="red")
legend("topright", legend=c(paste0("idx1=",idx1),paste0("idx2=",idx2)), col=c("green","red"), lty=c(1,1))
S.diff.range <- range(S.diff.lower, S.diff.median, S.diff.upper, 0.0)
plot(tvec, S.diff.median, type="l", xlab="Time", ylab="Survival Probability Difference"
, ylim=S.diff.range, main=paste0("pval=, ", pval, ", idx1=", idx1, ", idx2=", idx2))
lines(tvec, S.diff.lower, lty=2)
lines(tvec, S.diff.upper, lty=2)
abline(h=0.0, lty=2, col="red")
}
} else {
hr.lower <- NA
hr.median <- NA
hr.upper <- NA
S.diff.lower <- NA
S.diff.median <- NA
S.diff.upper <- NA
}
return (list(lower=list(h=h.lower, H=H.lower, S=S.lower, hr=hr.lower, S.diff=S.diff.lower)
, median=list(h=h.median, H=H.median, S=S.median, hr=hr.median, S.diff=S.diff.median)
, upper=list(h=h.upper, H=H.upper, S=S.upper, hr=hr.upper, S.diff=S.diff.upper)
, popmean=list(lower=list(S=S.popmean.lower), median=list(S=S.popmean.median), upper=list(S=S.popmean.upper))
, km.fit=object$km.fit))
}
bsgw.crossval <- function(data, folds, all=FALSE, print.level=1, control=bsgw.control(), ncores=1, ...) {
nfolds <- max(folds) # TODO: add more checks for validity of folds
if (all) {
if (ncores==1) {
ret <- lapply (1:nfolds, function(n) {
if (print.level>=1) cat("processing fold", n, "of", nfolds, "\n")
flush.console()
est <- bsgw(data=data[which(folds!=n),], control=control, print.level=print.level, ...)
pred <- predict(est, newdata=data[which(folds==n),], burnin=control$burnin)
ret <- max(pred$smp$loglike)
attr(ret, "estobj") <- est
return (ret)
})
} else { # parallel code; TODO: set upper bound on ncores based on maximum processor cores
registerDoParallel(ncores)
ret <- foreach (n=1:nfolds, .options.multicore=list(preschedule=FALSE)) %dopar% {
if (print.level>=1) cat("processing fold", n, "of", nfolds, "\n")
flush.console()
est <- bsgw(data=data[which(folds!=n),], control=control, print.level=print.level, ...)
pred <- predict(est, newdata=data[which(folds==n),], burnin=control$burnin)
ret <- max(pred$smp$loglike)
attr(ret, "estobj") <- est
return (ret)
}
}
fret <- sum(unlist(ret))
estobjs <- list()
for (n in 1:nfolds) estobjs[[n]] <- attr(ret[[n]], "estobj")
attr(fret, "estobjs") <- estobjs
return (fret)
} else {
if (ncores==1) {
loglike <- sapply (1:nfolds, function(n) {
if (print.level>=1) cat("processing fold", n, "of", nfolds, "\n")
est <- bsgw(data=data[which(folds!=n),], control=control, print.level=print.level, ...)
pred <- predict(est, newdata=data[which(folds==n),], burnin=control$burnin)
return (max(pred$smp$loglike))
})
return (sum(loglike))
} else {
registerDoParallel(ncores)
loglike <- foreach (n=1:nfolds, .options.multicore=list(preschedule=FALSE), .combine=c) %dopar% {
if (print.level>=1) cat("processing fold", n, "of", nfolds, "\n")
est <- bsgw(data=data[which(folds!=n),], control=control, print.level=print.level, ...)
pred <- predict(est, newdata=data[which(folds==n),], burnin=control$burnin)
return (max(pred$smp$loglike))
}
return (sum(loglike))
}
}
}
# TODO: under construction
bsgw.crossval.wrapper <- function(data, folds, all=FALSE, print.level=1, control=bsgw.control(), ncores=1
, lambda.vec=exp(seq(from=log(0.01), to=log(100), length.out = 10))
, lambdas.vec=NULL
, lambda2=if (is.null(lambdas.vec)) cbind(lambda=lambda.vec, lambdas=lambda.vec)
else as.matrix(expand.grid(lambda=lambda.vec, lambdas=lambdas.vec)), plot=TRUE, ...) {
# TODO: need to impose upper bound on ncores by physical number of cores on system
nfold <- length(unique(folds))
# determining if to parallelize inner loop (over folds) or outer loop (over lambda's)
# preference is inner due to better load balancing, unless we have more cores than folds
# TODO: this logic can be improved, e.g. what if ncores=2 and nfold=3? then parallelization gain is only 3/2=1.5
# but if there are many lambda combinations in outer loop then despite potentially uneven times, we can use dynamic
# scheduling and gain better performance
if (ncores<=nfold && nfold%%ncores==0) {
if (print.level>=1) cat("using inner parallelization\n")
ncores.inner <- ncores
ncores.outer <- 1
} else {
if (print.level>=1) cat("using outer parallelization\n")
ncores.inner <- 1
ncores.outer <- ncores
}
nlambda <- nrow(lambda2)
loglike <- rep(NA, nlambda)
estobjs <- list()
if (print.level>=1) cat("number of lambda combinations to test:", nlambda, "\n")
if (ncores.outer==1) {
for (i in 1:nlambda) {
if (print.level>=1) cat("processing lambda combo", i, "of", nlambda, "\n")
flush.console()
control$lambda <- lambda2[i,"lambda"]
control$lambdas <- lambda2[i,"lambdas"]
ret <- bsgw.crossval(data=data, folds=folds, all=all, print.level=print.level, control=control, ncores=ncores.inner, ...)
loglike[i] <- ret
if (all) estobjs[[i]] <- attr(ret, "estobjs")
}
} else {
registerDoParallel(ncores.outer)
ret.all <- foreach (i=1:nlambda, .options.multicore=list(preschedule=FALSE)) %dopar% {
if (print.level>=1) cat("processing lambda combo", i, "of", nlambda, "\n")
flush.console()
control$lambda <- lambda2[i,"lambda"]
control$lambdas <- lambda2[i,"lambdas"]
ret <- bsgw.crossval(data=data, folds=folds, all=all, print.level=print.level, control=control, ncores=ncores.inner, ...)
return (list(loglike=ret, estobj=attr(ret, "estobjs")))
}
for (i in 1:nlambda) {
loglike[i] <- ret.all[[i]]$loglike
estobjs[[i]] <- ret.all[[i]]$estobj
}
}
opt.index <- which(loglike==max(loglike))[1]
lambda.opt <- lambda2[opt.index,"lambda"]
lambdas.opt <- lambda2[opt.index,"lambdas"]
ret <- list(lambda=lambda.opt, lambdas=lambdas.opt)
attr(ret, "loglike.vec") <- loglike
attr(ret, "loglike.opt") <- max(loglike)
attr(ret, "lambda2") <- lambda2
if (all) attr(ret, "estobjs") <- estobjs
if (print.level>=1) cat("selected lambda's: ", c(lambda.opt, lambdas.opt), "\n")
if (plot) {
# we only plot when lambda and lambdas are the same, otherwise we need a 2D plot
if (identical(attr(ret,"lambda2")[,"lambda"], attr(ret, "lambda2")[,"lambdas"])) {
plot(lambda2[,"lambda"], loglike, type="l", log="x", xlab="Shrinkage", ylab="Log-likelihood")
}
}
return (ret)
}
|
/scratch/gouwar.j/cran-all/cranData/BSGW/R/BSGW.R
|
# evaluating h,H,S as well as alpha (scale parameter in survreg), lp (linear predictor)
bsgw.eval <- function(coeff, X, Xs, t, ordweib=FALSE, alpha.min=0.1, alpha.max=10.0) {
K <- ncol(X)
Ks <- ncol(Xs)
beta <- coeff[1:K] # scale coefficients, corresponds to ordinary Weibull coefficients
betas <- coeff[K+1:Ks] # shape coefficients
lp <- X%*%beta
if (ordweib) {
alpha <- exp(betas[1])
} else {
alpha <- alpha.min + (alpha.max-alpha.min)/(1+exp(-Xs%*%betas))
}
expterm <- exp(lp)
H <- t^(alpha) * expterm
h <- alpha * t^(alpha-1) * expterm
S <- exp(-H)
return (list(h=h, H=H, S=S, scale=1/alpha, lp=lp))
}
bsgw.loglike <- function(coeff, X, Xs, t, s, right.censoring, ...) {
ret <- bsgw.eval(coeff, X, Xs, t, ...)
if (right.censoring) return (sum((s*log(ret$h)-ret$H)))
else {
index.death <- which(s == 1)
sum.death <- sum(log(ret$h[index.death]) - ret$H[index.death])
sum.censoring <- sum(log(1 - ret$S[-index.death]))
return (sum.death + sum.censoring)
}
#else return (sum(s*log(ret$h) + log(1 - ret$S)))
}
bsgw.logpost <- function(coeff, X, Xs, t, s, lambda, lambdas, right.censoring, ...) {
return (bsgw.loglike(coeff, X, Xs, t, s, right.censoring, ...) - lambda*sum(abs(coeff[1:ncol(X)])) - lambdas*sum(abs(coeff[ncol(X)+1:ncol(Xs)])))
}
bsgw.mcmc <- function(X, Xs, t, s, lambda, lambdas, iter=1000, sd.thresh=1e-06
, init=NULL, ordweib=FALSE, alpha.min=0.1, alpha.max=10.0, beta.max=log(20), betas.max=5.0
, print.level=2, nskip=10, right.censoring = 1) {
#browser()
mbeta <- 100 # TODO: convert from magic number to parameter that can be controlled
w <- 1.0 # TODO: same as above
K <- ncol(X)
Ks <- ncol(Xs)
beta_smp <- array(NA, dim=c(iter,K))
betas_smp <- array(NA, dim=c(iter,Ks))
nobs <- nrow(X)
scale_smp <- array(NA, dim=c(iter, nobs))
lp_smp <- array(NA, dim=c(iter, nobs))
loglike_smp <- rep(NA, iter)
logpost_smp <- rep(NA, iter)
# excluding zero-variance variables from sampling process
idx <- c(1, setdiff(1:K, which(apply(X, 2, function(x) sd(x)<sd.thresh))))
idxs <- c(1, setdiff(1:Ks, which(apply(Xs, 2, function(x) sd(x)<sd.thresh))))
if (ordweib) {
idxs <- 1
}
N <- length(idx)
Ns <- length(idxs)
if (is.null(init)) {
beta <- rep(0, K)
betas <- rep(0, Ks)
} else {
beta <- init$beta; beta[-idx] <- 0
betas <- init$betas; betas[-idxs] <- 0
}
for (n in 1:iter) {
#bsgw.logpost <- function(coeff, X, Xs, t, s, lambda, lambdas, ...)
coeffs <- MfU.Sample(c(beta[idx], betas[idxs]), f=bsgw.logpost, uni.sampler="slice"
, X=X[,idx,drop=FALSE], Xs=Xs[,idxs,drop=FALSE], t=t, s=s, lambda=lambda, lambdas=lambdas
, right.censoring = right.censoring
, ordweib=ordweib, alpha.min=alpha.min, alpha.max=alpha.max
, control=MfU.Control(n=N+Ns, slice.w=w, slice.m=mbeta
, slice.lower=c(c(-Inf,rep(-beta.max,N-1)),c(-Inf,rep(-betas.max,Ns-1)))
, slice.upper=c(c(+Inf,rep(+beta.max,N-1)),c(+Inf,rep(+betas.max,Ns-1)))
)
)
#coeffs <- bsgw.multislice.from.unislice(c(beta[idx], betas[idxs]), bsgw.logpost, X[,idx,drop=F], Xs[,idxs,drop=F]
# , t, s, lambda, lambdas
# , ordweib=ordweib, alpha.min=alpha.min, alpha.max=alpha.max, w=w, m=mbeta
# , lower=c(c(-Inf,rep(-beta.max,N-1)),c(-Inf,rep(-betas.max,Ns-1)))
# , upper=c(c(+Inf,rep(+beta.max,N-1)),c(+Inf,rep(+betas.max,Ns-1)))
# )
beta[idx] <- coeffs[1:N]
betas[idxs] <- coeffs[N+1:Ns]
beta_smp[n,] <- beta
betas_smp[n,] <- betas
ret <- bsgw.eval(coeffs, X, Xs, t, ordweib)
scale_smp[n,] <- ret$scale
lp_smp[n,] <- ret$lp
loglike_smp[n] <- bsgw.loglike(coeffs, X[,idx,drop=F], Xs[,idxs,drop=F], t, s, ordweib, alpha.min, alpha.max)
logpost_smp[n] <- bsgw.logpost(coeffs, X[,idx,drop=F], Xs[,idxs,drop=F], t, s, lambda, lambdas, ordweib, alpha.min, alpha.max)
if (n%%nskip==0 && print.level>=2) cat("finished sample", n, "of", iter, "\n")
flush.console()
}
return (list(beta=beta_smp, betas=betas_smp, scale=scale_smp, lp=lp_smp, loglike=loglike_smp, logpost=logpost_smp, idx=idx, idxs=idxs))
}
|
/scratch/gouwar.j/cran-all/cranData/BSGW/R/Sample.R
|
bsgw.control <- function(scalex=TRUE, iter=1000, burnin=round(iter/2), sd.thresh=1e-4, lambda=0.0, lambdas=lambda, nskip=round(iter/10), alpha.min=0.1, alpha.max=10.0
, beta.max=log(20), betas.max=5.0, memlim.gb=8) {
return (list(scalex=scalex, iter=iter, burnin=burnin, sd.thresh=sd.thresh, lambda=lambda, lambdas=lambdas, nskip=nskip
, alpha.min=alpha.min, alpha.max=alpha.max, beta.max=beta.max, betas.max=betas.max, memlim.gb=memlim.gb))
}
bsgw.strip.formula <- function(survformula) {
allvars <- all.vars(survformula)
return (formula(paste("Surv(", allvars[1], ",", allvars[2], ")~1", sep="")))
}
bsgw.empty.plot <- function(...) {
plot(0,0,type="l", xlab="", ylab="",...)}
bsgw.calc.pval <- function(x, ref=0.0, na.rm = FALSE) { # add flag for one-sided vs. two-sided
if (na.rm) x <- x[!is.na(x)]
bigger <- median(x)>ref
if (sd(x)<.Machine$double.eps) {
ret <- NA
} else {
ret <- 2*length(which(if (bigger) x<ref else x>ref))/length(x)
}
attr(ret, "bigger") <- bigger
return (ret)
}
bsgw.scale <- function(X, apply.sc, ...) {
if (missing(apply.sc)) apply.sc <- which(sapply(1:ncol(X), function(n) length(unique(X[,n]))>2))
ret <- scale(X[,apply.sc], ...)
X[,apply.sc] <- ret
attr(X, "centerVec") <- attr(ret, "scaled:center")
attr(X, "scaleVec") <- attr(ret, "scaled:scale")
attr(X, "apply.scale") <- apply.sc
return (X)
}
bsgw.generate.folds <- function(ntot, nfold=5) {
# determine size of each fold
foldsize <- rep(round(ntot/nfold), nfold-1)
foldsize <- c(foldsize, ntot-sum(foldsize))
remain <- 1:ntot
folds <- rep(NA, ntot)
for (n in 1:(nfold-1)) {
idxtmp <- sample(remain, size=foldsize[n])
folds[idxtmp] <- n
remain <- setdiff(remain, idxtmp)
}
folds[remain] <- nfold
return (folds)
}
bsgw.generate.folds.eventbalanced <- function(formula, data, nfold=5) {
statusCol <- all.vars(formula)[2]
index_with_event <- which(data[,statusCol]==1); nwith <- length(index_with_event)
index_without_event <- which(data[,statusCol]==0); nwithout <- length(index_without_event)
ret_with_event <- bsgw.generate.folds(nwith, nfold)
ret_without_event <- bsgw.generate.folds(nwithout, nfold)
ret_all <- list()
ret_flat <- rep(NA, nrow(data))
for (n in 1:nfold) {
ret_all[[n]] <- c(index_with_event[which(ret_with_event==n)], index_without_event[which(ret_without_event==n)])
ret_flat[ret_all[[n]]] <- n
}
return (ret_flat)
}
|
/scratch/gouwar.j/cran-all/cranData/BSGW/R/utils.R
|
.onAttach <- function(libname, pkgname) {
RFver <- read.dcf(file=system.file("DESCRIPTION", package=pkgname),
fields="Version")
packageStartupMessage(paste0("Package: ", pkgname, ", Version: ", RFver))
packageStartupMessage("Dynamic Survival Model using Bayesian Generalized Weibull Regression.")
packageStartupMessage("Scientific Computing Group, Sentrana Inc. &\nHeart and Lung Institute, Imperial College London")
}
|
/scratch/gouwar.j/cran-all/cranData/BSGW/R/zzz.R
|
#'Bayesian synthetic likelihood
#'
#'@description Bayesian synthetic likelihood (BSL,
#' \insertCite{Price2018;textual}{BSL}) is an alternative to standard,
#' non-parametric approximate Bayesian computation (ABC). BSL assumes a
#' multivariate normal distribution for the summary statistic likelihood and it
#' is suitable when the distribution of the model summary statistics is
#' sufficiently regular.
#'
#' In this package, a Metropolis Hastings Markov chain Monte Carlo (MH-MCMC)
#' implementation of BSL is available. We also include implementations of four
#' methods (BSL, uBSL, semiBSL and BSLmisspec) and two shrinkage estimators
#' (graphical lasso and Warton's estimator).
#'
#' Methods: (1) BSL \insertCite{Price2018}{BSL}, which is the standard form of
#' Bayesian synthetic likelihood, assumes the summary statistic is roughly
#' multivariate normal; (2) uBSL \insertCite{Price2018}{BSL}, which uses an
#' unbiased estimator to the normal density; (3) semiBSL
#' \insertCite{An2018}{BSL}, which relaxes the normality assumption to an
#' extent and maintains the computational advantages of BSL without any tuning;
#' and (4) BSLmisspec \insertCite{Frazier2019}{BSL}, which estimates the
#' Gaussian synthetic likelihood whilst acknowledging that there may be
#' incompatibility between the model and the observed summary statistic.
#'
#' Shrinkage estimators are designed particularly to reduce the number of
#' simulations if method is BSL or semiBSL: (1) graphical lasso
#' \insertCite{Friedman2008}{BSL} finds a sparse precision matrix with an
#' L1-regularised log-likelihood. \insertCite{An2019;textual}{BSL} use
#' graphical lasso within BSL to bring down the number of simulations
#' significantly when the dimension of the summary statistic is high; and (2)
#' Warton's estimator \insertCite{Warton2008}{BSL} penalises the correlation
#' matrix and is straightforward to compute. When using the Warton's shrinkage
#' estimator, it is also possible to utilise the Whitening transformation
#' \insertCite{Kessy2018}{BSL} to help decorrelate the summary statsitics, thus
#' encouraging sparsity of the synthetic likelihood covariance matrix.
#'
#' Parallel computing is supported through the \code{foreach} package and users
#' can specify their own parallel backend by using packages like
#' \code{doParallel} or \code{doMC}. The \code{n} model simulations required to
#' estimate the synthetic likelihood at each iteration of MCMC will be
#' distributed across multiple cores. Alternatively a vectorised simulation
#' function that simultaneously generates \code{n} model simulations is also
#' supported.
#'
#' The main functionality is available through:
#'
#' \itemize{ \item \code{\link{bsl}}: The general function to perform BSL,
#' uBSL, or semiBSL (with or without parallel computing). \item
#' \code{\link{selectPenalty}}: A function to select the penalty when using
#' shrinkage estimation within BSL or semiBSL. }
#'
#' Several examples have also been included. These examples can be used to
#' reproduce the results of An et al. (2019), and can help practitioners learn
#' how to use the package.
#'
#' \itemize{
#'
#' \item \code{\link{ma2}}: The MA(2) example from
#' \insertCite{An2019;textual}{BSL}.
#'
#' \item \code{\link{mgnk}}: The multivariate G&K example from
#' \insertCite{An2019;textual}{BSL}.
#'
#' \item \code{\link{cell}}: The cell biology example from
#' \insertCite{Price2018;textual}{BSL} and \insertCite{An2019;textual}{BSL}.
#'
#' \item \code{\link{toad}}: The toad example from
#' \insertCite{Marchand2017;textual}{BSL}, and also considered in
#' \insertCite{An2018;textual}{BSL}.
#'
#' }
#'
#' Extensions to this package are planned. For a journal article describing how
#' to use this package, including full descriptions on the MA(2) and toad examples,
#' see \insertCite{An2022;textual}{BSL}.
#'
#'@references
#'
#'\insertAllCited{}
#'
#'@author Ziwen An, Leah F. South and Christopher Drovandi
"_PACKAGE"
#> [1] "_PACKAGE"
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/BSL-package.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Simulation function of the cell biology example
#'
#' @description Simulation function of the cell biology example.
#' @param x The initial matrix of cell presences of size \code{rows}
#' \eqn{\times} \code{cols}.
#' @param Pm Parameter \eqn{P_m},
#' the probability of cell movement.
#' @param Pp Parameter \eqn{P_p},
#' the probability of cell proliferation.
#' @inheritParams cell
#' @return A \code{rows} \eqn{\times} \code{cols}
#' \eqn{\times} \code{num_obs} array
#' of the cell presences at times \code{1:num_obs} (not time 0).
#' @export
simulate_cell <- function(x, rows, cols, Pm, Pp, sim_iters, num_obs) {
.Call(`_BSL_simulate_cell`, x, rows, cols, Pm, Pp, sim_iters, num_obs)
}
#' Generate a random sample from the zero-centered stable distribution
#'
#' @description Draw a sample from a symmetric, zero-centered stable distribution with
#' given scale and stability (alpha) parameters, using the CMS algorithm.
#' @param scale The scale parameter.
#' @param alpha The stability parameter.
#' @return A random sample from the zero-centered stable distribution.
#' @keywords internal
rstable <- function(scale, alpha) {
.Call(`_BSL_rstable`, scale, alpha)
}
#' The simulation function for the toad example
#'
#' @description The simulation function for the toad example.
#' @param params A vector of proposed model parameters, \eqn{\alpha},
#' \eqn{gamma} and \eqn{p_0}.
#' @param ntoad The number of toads to simulate in the observation.
#' @param nday The number of days lasted of the observation.
#' @param model Which model to be used. 1 for the random return model, 2 for the nearest return model,
#' and 3 for the distance-based return probability model.
#' @param d0 Characteristic distance for model 3. Only used if model is 3.
#' @return A data matrix.
#' @examples sim_toad(c(1.7,36,0.6), 10, 8, 1)
#' @export
sim_toad <- function(params, ntoad, nday, model = 1L, d0 = 100) {
.Call(`_BSL_sim_toad`, params, ntoad, nday, model, d0)
}
#' Convert an observation matrix to a vector of n-day displacements
#'
#' @description Convert an observation matrix to a vector of n-day
#' displacements. This is a function for the toad example.
#' @param X The observation matrix to be converted.
#' @param lag Interger, the number of day lags to compute the displacement.
#' @return A vector of displacements.
#' @export
obsMat2deltax <- function(X, lag) {
.Call(`_BSL_obsMat2deltax`, X, lag)
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/RcppExports.R
|
#' @include s4-BSL.R
NULL
#' Performing BSL, uBSL, semiBSL and BSLmisspec
#'
#' @description This is the main function for performing MCMC BSL (with a
#' standard or non-standard likelihood estimator) to sample from the
#' approximate posterior distribution. A couple of extentions to the standard
#' approach are available by changing the following arguments, \code{method},
#' \code{shrinkage}, \code{whitening}, \code{misspecType}. Parallel computing
#' is supported with the R package \code{foreach}.
#'
#' @param y The observed data. Note this should be the raw dataset NOT the
#' set of summary statistics.
#' @param n The number of simulations from the model per MCMC iteration for
#' estimating the synthetic likelihood.
#' @param M The number of MCMC iterations.
#' @param model A ``MODEL'' object generated with function
#' \code{newModel}. See \code{\link{newModel}}.
#' @param covRandWalk The covariance matrix of a multivariate normal random walk
#' proposal distribution used in the MCMC.
#' @param method A string argument indicating the method to be used. The
#' default, ``BSL'', runs standard BSL. ``uBSL'' uses the unbiased estimator
#' of a normal density of \insertCite{Ghurye1969;textual}{BSL}. ``semiBSL''
#' runs the semi-parametric BSL algorithm and is more robust to non-normal
#' summary statistics. ``BSLmisspec'' estimates the Gaussian synthetic
#' likelihood whilst acknowledging that there may be incompatibility between
#' the model and the observed summary statistic \insertCite{Frazier2019}{BSL}.
#' @param shrinkage A string argument indicating which shrinkage method to
#' be used. The default is \code{NULL}, which means no shrinkage is used.
#' Shrinkage estimation is only available for methods ``BSL'' and ``semiBSL''.
#' Current options are ``glasso'' for the graphical lasso method of
#' \insertCite{Friedman2008;textual}{BSL} and ``Warton'' for the ridge
#' regularisation method of \insertCite{Warton2008;textual}{BSL}.
#' @param penalty The penalty value to be used for the specified shrinkage
#' method. Must be between zero and one if the shrinkage method is ``Warton''.
#'
#' @param logitTransformBound A \eqn{p} by \eqn{2} numeric matrix indicating the
#' upper and lower bounds of parameters if a logit transformation is used on
#' the parameter space, where \eqn{p} is the number of parameters. The default
#' is \code{NULL}, which means no logit transformation is used. It is also
#' possible to define other transformations within the simulation and prior
#' function from \code{model}. The first column contains the lower bound of
#' each parameter and the second column contains the upper bound. Infinite
#' lower or upper bounds are also supported, eg.
#' \code{matrix(c(1,Inf,0,10,-Inf,0.5),3,2,byrow=TRUE)}.
#' @param standardise A logical argument that determines whether to standardise
#' the summary statistics before applying the graphical lasso. This is only
#' valid if method is ``BSL'', shrinkage is ``glasso'' and penalty is not
#' \code{NULL}. The diagonal elements will not be penalised if the shrinkage
#' method is ``glasso''. The default is \code{FALSE}.
#' @param GRC A logical argument indicating whether the Gaussian rank
#' correlation matrix \insertCite{Boudt2012}{BSL} should be used to estimate
#' the covariance matrix in ``BSL'' method. The default is \code{FALSE}, which
#' uses the sample covariance by default.
#' @param whitening This argument determines whether Whitening transformation
#' should be used in ``BSL'' method with Warton's shrinkage. Whitening
#' transformation helps decorrelate the summary statistics, thus encouraging
#' sparsity of the synthetic likelihood covariance matrix. This might allow
#' heavier shrinkage to be applied without losing much accuracy, hence
#' allowing the number of simulations to be reduced. By default, \code{NULL}
#' represents no Whitening transformation. Otherwise this is enabled if a
#' Whitening matrix is provided. See \code{\link{estimateWhiteningMatrix}} for
#' the function to estimate the Whitening matrix.
#' @param misspecType A string argument indicating which type of model
#' misspecification to be used. The two options are "mean" and "variance".
#' Only used when method is ``BSLmisspec''. The default, \code{NULL}, means no
#' model misspecification is considered.
#' @param tau A numeric argument, parameter of the prior distribution
#' for "BSLmisspec" method. For mean adjustment, \code{tau} is the scale of
#' the Laplace distribution. For variance inflation, \code{tau} is the mean of
#' the exponential distribution. Only used when method is ``BSLmisspec''.
#' @param parallel A logical value indicating whether parallel computing should
#' be used for simulation and summary statistic evaluation. The default is
#' \code{FALSE}. When model simulation is fast, it may be preferable to
#' perform serial or vectorised computations to avoid significant
#' communication overhead between workers. Parallel computation can only be
#' used if not using a vectorised simulation function, see \code{\link{MODEL}}
#' for options of vectorised simulation function.
#' @param parallelArgs A list of additional arguments to pass into the
#' \code{foreach} function. Only used when parallel computing is enabled,
#' default is \code{NULL}.
#' @param plotOnTheFly A logical or numeric argument defining whether or by how
#' many iterations a posterior figure will be plotted during running. If
#' \code{TRUE}, a plot of approximate univariate posteriors based on the
#' current accepted samples will be shown every one thousand iterations.
#' The default is \code{FALSE}.
#' @param verbose An integer indicating the verbose style. 0L
#' means no verbose messages will be printed. 1L uses a custom progress bar to
#' track the progress. 2L prints the iteration numbers (\code{1:M}) to track
#' the progress. The default is 1L.
#'
#' @param theta0 Deprecated, will be removed in the future, use \code{model}
#' instead. Initial guess of the parameter value, which is used as the
#' starting value for MCMC.
#' @param fnSim Deprecated, will be removed in the future, use
#' \code{model} instead. A function that simulates data for a given parameter
#' value. The first argument should be the parameters. Other necessary
#' arguments (optional) can be specified with \code{simArgs}.
#' @param fnSum Deprecated, will be removed in the future, use
#' \code{model} instead. A function for computing summary statistics of data.
#' The first argument should be the observed or simulated dataset. Other
#' necessary arguments (optional) can be specified with \code{sumArgs}.
#' @param fnPrior Deprecated, will be removed in the future, use
#' \code{model} instead. A function that computes the log prior density for a
#' parameter. The default is \code{NULL}, which uses an improper flat prior
#' over the real line for each parameter. The function must have a single
#' input: a vector of parameter values.
#' @param simArgs Deprecated, will be removed in the future, use
#' \code{model} instead. A list of additional arguments to pass into the
#' simulation function. Only use when the input \code{fnSim} requires
#' additional arguments. The default is \code{NULL}.
#' @param sumArgs Deprecated, will be removed in the future, use
#' \code{model} instead. A list of additional arguments to pass into the
#' summary statistics function. Only use when the input \code{fnSum} requires
#' additional arguments. The default is \code{NULL}.
#' @param thetaNames Deprecated, will be removed in the future, use \code{model}
#' instead. A string vector of parameter names, which must have the same
#' length as the parameter vector. The default is \code{NULL}.
#'
#' @return An object of class \code{bsl} is returned, see \code{\link{BSL}}
#' for more information of the S4 class.
#'
#' @references
#'
#' \insertAllCited{}
#'
#' \insertRef{Price2018}{BSL}
#'
#' \insertRef{An2019}{BSL}
#'
#' \insertRef{An2018}{BSL}
#'
#' @examples
#' \dontshow{
#' toy_sim <- function(n, theta) matrix(rnorm(n, theta), nrow = n)
#' toy_sum <- function(x) x
#'
#' model <- newModel(fnSimVec = toy_sim, fnSum = toy_sum, theta0 = 0)
#'
#' result_toy <- bsl(y = 1, n = 10, M = 100, model = model, covRandWalk = matrix(1),
#' method = "BSL", verbose = FALSE)
#' summary(result_toy)
#' plot(result_toy)
#' }
#' \dontrun{
#' # This is just a minimal test run, please see package built-in examples for more
#' # comprehensive usages of the function
#' toy_sim <- function(n, theta) matrix(rnorm(n, theta), nrow = n)
#' toy_sum <- function(x) x
#' model <- newModel(fnSimVec = toy_sim, fnSum = toy_sum, theta0 = 0)
#'
#' result_toy <- bsl(y = 1, n = 100, M = 1e4, model = model, covRandWalk = matrix(1),
#' method = "BSL", plotOnTheFly = TRUE)
#' summary(result_toy)
#' plot(result_toy)
#' }
#'
#' @author Ziwen An, Leah F. South and Christopher Drovandi
#' @seealso \code{\link{ma2}}, \code{\link{cell}}, \code{\link{mgnk}} and
#' \code{\link{toad}} for examples. \code{\link{selectPenalty}} for a function
#' to tune the BSLasso tuning parameter and \code{\link{plot}} for functions
#' related to visualisation.
#' @export
bsl <- function(y, n, M, model, covRandWalk, theta0, fnSim, fnSum, method = c("BSL", "uBSL",
"semiBSL", "BSLmisspec"), shrinkage = NULL, penalty = NULL, fnPrior = NULL, simArgs = NULL,
sumArgs = NULL, logitTransformBound = NULL, standardise = FALSE, GRC = FALSE, whitening = NULL,
misspecType = NULL, tau = 1, parallel = FALSE, parallelArgs = NULL,
thetaNames = NULL, plotOnTheFly = FALSE, verbose = 1L) {
method <- match.arg(method)
if (is.null(misspecType)) {
flagType <- FALSE
} else {
flagType <- TRUE
misspecType <- match.arg(misspecType, c("mean", "variance"))
}
if (method != "BSLmisspec" && flagType) {
warning("\"misspecType\" will be ignored because method is not \"BSLmisspec\"")
}
if (method == "BSLmisspec" && !flagType) {
stop("\"misspecType\" must be provided to enable \"BSLmisspec\" method")
}
if (!parallel & !is.null(parallelArgs)) {
warning("\"parallelArgs\" is omitted in serial computing")
}
if (!is.null(shrinkage)) {
flagShrinkage <- TRUE
shrinkage <- match.arg(shrinkage, c("glasso", "Warton"))
} else {
flagShrinkage <- FALSE
}
if (!flagShrinkage && !is.null(penalty)) {
warning("\"penalty\" will be ignored because no shrinkage method is specified")
}
if (flagShrinkage && is.null(penalty)) {
stop("a penalty value must be specified to enable shrinkage estimation")
}
if (!flagShrinkage && standardise) {
warning("\"standardise\" will be ignored because shrinkage method is not \"glasso\"")
}
# deprecated arguments
if (!missing(theta0)) {
warning("theta0 will be deprecated in the future, use model instead, see \"?model\"")
}
if (!missing(fnSim)) {
warning("fnSim will be deprecated in the future, use model instead, see \"?model\"")
}
if (!missing(fnSum)) {
warning("fnSum will be deprecated in the future, use model instead, see \"?model\"")
}
if (!is.null(simArgs)) {
warning("simArgs will be deprecated in the future, use model instead, see \"?model\"")
}
if (!is.null(sumArgs)) {
warning("sumArgs will be deprecated in the future, use model instead, see \"?model\"")
}
if (!is.null(fnPrior)) {
warning("fnPrior will be deprecated in the future, use model instead, see \"?model\"")
}
if (!is.null(thetaNames)) {
warning("thetaNames will be deprecated in the future, use model instead, see \"?model\"")
}
if (missing(model)) {
if (is.null(fnPrior)) {
fnLogPrior <- NULL
} else {
fnLogPrior <- function(...) log(fnPrior(...))
}
model <- model(fnSim = fnSim, fnSum = fnSum, simArgs = simArgs, sumArgs = sumArgs,
fnLogPrior = fnLogPrior, theta0 = theta0, thetaNames = thetaNames)
} else {
stopifnot(inherits(model, "MODEL"))
}
ns <- model@ns
if (method == "semiBSL" && ns < 2) {
stop("The dimension of summary statistic must be at least 2 to use method \"semiBSL\"")
}
if (is.null(whitening)) {
flagWhitening <- FALSE
ssyTilde <- NULL
} else if (is.atomic(whitening) & is.matrix(whitening)) {
if (all(dim(whitening) == c(ns, ns))) {
flagWhitening <- TRUE
} else {
stop(paste("The Whitening matrix must be of dimension", ns, "by", ns))
}
} else if (is.atomic(whitening) & length(whitening) == 1) {
flagWhitening <- as.logical(whitening)
if (flagWhitening) {
if (verbose) cat("estimating the Whitening matrix ... ")
whitening <- estimateWhiteningMatrix(n = 1e3, model = model)
if (verbose) cat("finish\n")
} else {
whitening <- ssyTilde <- NULL
}
} else {
stop("invalid argument \"whitening\"")
}
if (!flagShrinkage && flagWhitening) {
warning("\"whitening\" will be ignored because shrinkage method is not \"Warton\"")
}
if (flagShrinkage) {
if (shrinkage != "glasso" && standardise) {
warning("standardisation is only supported if shrinkage is \"glasso\"")
}
if (shrinkage != "Warton" && flagWhitening) {
warning("Whitening is only supported if shrinkage is \"Warton\"")
}
}
if (verbose) {
if (flagType) typeText <- switch(misspecType,
"mean" = "mean-adjusted",
"variance" = "variance-inflated")
methodText <- switch(method,
"BSL" = "standard BSL",
"uBSL" = "unbiased BSL",
"semiBSL" = "semi-BSL",
"BSLmisspec" = paste("BSL with", typeText, "model misspecification")
)
shrinkageText <- paste("shrinkage:", ifelse(flagShrinkage, shrinkage, "disabled"))
whiteningText <- paste("whitening:", ifelse(flagWhitening, "enabled", "disabled"))
cat("*** running ", methodText, ", ", shrinkageText, ", ", whiteningText, " *** \n", sep = "")
# cat(shrinkageText, ", ", whiteningText, "\n", sep = "")
}
p <- length(model@theta0)
fnLogPrior <- model@fnLogPrior
logitTransform <- !is.null(logitTransformBound)
if (logitTransform) {
if (any(dim(logitTransformBound) != c(p, 2))) {
stop("\"logitTransformBound\" must be a p by 2 matrix, where p is the length of parameter")
}
}
cl <- match.call()
startTime <- Sys.time()
# initialise parameters
ssy <- do.call(model@fnSum, c(list(y), model@sumArgs))
stopifnot(length(ssy) == ns)
thetaCurr <- model@theta0
loglikeCurr <- Inf
if (logitTransform) {
thetaTildeCurr <- paraLogitTransform(thetaCurr, logitTransformBound)
}
theta <- array(0, c(M, p), dimnames = list(NULL, thetaNames))
loglike <- numeric(M) # ignore if method is not "BSLmisspec"
gamma <- array(0, c(M, ns))
countAcc <- countEar <- countErr <- 0
if (flagWhitening) {
ssyTilde <- c(tcrossprod(ssy, whitening))
}
if (method == "BSLmisspec") {
gammaCurr <- switch(misspecType,
mean = numeric(ns),
variance = rep(tau, ns))
}
# map the simulation function
if (parallel) {
myFnSimSum <- function(n, theta) fn(model)$fnPar(n, theta, parallelArgs)
} else {
myFnSimSum <- fn(model)$fn
}
# plot-on-the-fly
if (plotOnTheFly) {
if (plotOnTheFly == 1) {
plotOnTheFly <- 1000
}
oldpar <- par(no.readonly = TRUE) # get current user par settings
on.exit(par(oldpar)) # reset current user par settings at the end of the function
a <- floor(sqrt(p))
b <- ceiling(p/a)
par(mfrow = c(a, b))
}
# if (verbose) cat("initialising parameters ... ")
while (is.infinite(loglikeCurr)) {
# simulate with thetaProp and calculate the summary statistics
ssx <- myFnSimSum(n, thetaCurr)
if (any(is.infinite(ssx))) {
stop("Inf detected in the summary statistics vector, this will cause an error in likelihood evaluation")
}
# compute the loglikelihood
loglikeCurr <- switch(method,
BSL = gaussianSynLike(ssy, ssx, shrinkage, penalty, standardise, GRC, whitening, ssyTilde, log = TRUE, verbose = verbose),
uBSL = gaussianSynLikeGhuryeOlkin(ssy, ssx, log = TRUE, verbose = verbose),
semiBSL = semiparaKernelEstimate(ssy, ssx, shrinkage = shrinkage, penalty = penalty),
BSLmisspec = synLikeMisspec(ssy, ssx, type = misspecType, gamma = gammaCurr, log = TRUE, verbose = verbose)
)
if (method == "BSLmisspec") {
ssxCurr <- ssx
stdCurr <- attr(loglikeCurr, "std")
}
}
# if (verbose) cat("finish\n")
if (verbose == 1L) timeStart <- Sys.time()
for (i in 1:M) {
flush.console()
if (verbose == 2L) {
cat("i =", i, "\n")
}
if (method == "BSLmisspec") {
gammaCurr <- switch(misspecType,
mean = sliceGammaMean(ssy, ssxCurr, loglikeCurr, gammaCurr, tau, std = stdCurr),
variance = sliceGammaVariance(ssy, ssxCurr, loglikeCurr, gammaCurr, tau, std = stdCurr)
)
loglikeCurr <- attr(gammaCurr, "loglike")
}
# multivariate normal random walk to the proposed value of theta
if (!logitTransform) {
thetaProp <- c(mvtnorm::rmvnorm(1, mean = thetaCurr, sigma = covRandWalk))
logp2 <- 0
} else {
thetaTildeCurr <- paraLogitTransform(thetaCurr, logitTransformBound)
# thetaTildeProp <- mvrnorm(1, thetaTildeCurr, covRandWalk)
thetaTildeProp <- mvtnorm::rmvnorm(1, mean = thetaTildeCurr, sigma = covRandWalk)
thetaProp <- paraLogitBackTransform(thetaTildeProp, logitTransformBound)
logp2 <- jacobianLogitTransform(thetaTildeProp, logitTransformBound, TRUE) -
jacobianLogitTransform(thetaTildeCurr, logitTransformBound, TRUE)
}
# early rejection if the proposed theta falls outside of prior coverage
# / feasible region
if (!is.null(fnLogPrior)) {
logp1 <- fnLogPrior(thetaProp) - fnLogPrior(thetaCurr)
if (logp1 == -Inf) {
if (verbose == 2L) {
cat("*** early rejection ***\n")
} else if (verbose == 1L){
timeElapsed <- difftime(Sys.time(), timeStart, units = "secs")
timeLeft <- timeElapsed / i * (M - i)
elapsed <- myTimeStr(timeElapsed)
left <- myTimeStr(timeLeft)
acc <- paste0(formatC(100 * countAcc / i, format = "f", digits = 2), "%")
myMiniProgressBar(i / M, txt1 = paste(sprintf("%-2.1f%% finished,", 100 * i / M), "i =", i, "*** early rejection ***"),
txt2 = paste0("acceptance rate = ", acc, ", elapsed = ", elapsed, ", remaining = ", left),
style = 3, label = c("=", ".", ""))
flush.console()
}
theta[i, ] <- thetaCurr
loglike[i] <- loglikeCurr
if (method == "BSLmisspec") {
gamma[i, ] <- gammaCurr
}
countEar <- countEar + 1
next
}
} else {
logp1 <- 0
}
log_prob <- logp1 + logp2
# simulate with thetaProp and calculate the summary statistics
ssx <- myFnSimSum(n, thetaProp)
# reject if inifite value is detected in ssx
if (any(is.infinite(ssx))) {
if (verbose == 2L) {
cat("*** reject (infinite ssx) ***\n")
} else if (verbose == 1L){
timeElapsed <- difftime(Sys.time(), timeStart, units = "secs")
timeLeft <- timeElapsed / i * (M - i)
elapsed <- myTimeStr(timeElapsed)
left <- myTimeStr(timeLeft)
acc <- paste0(formatC(100 * countAcc / i, format = "f", digits = 2), "%")
myMiniProgressBar(i / M, txt1 = paste(sprintf("%-2.1f%% finished,", 100 * i / M), "i =", i, "*** reject (infinite ssx) ***"),
txt2 = paste0("acceptance rate = ", acc, ", elapsed = ", elapsed, ", remaining = ", left),
style = 3, label = c("=", ".", ""))
flush.console()
}
theta[i, ] <- thetaCurr
loglike[i] <- loglikeCurr
if (method == "BSLmisspec") {
gamma[i, ] <- gammaCurr
}
countErr <- countErr + 1
next
}
# compute the loglikelihood
loglikeProp <- switch(method,
BSL = gaussianSynLike(ssy, ssx, shrinkage, penalty, standardise, GRC, whitening, ssyTilde, log = TRUE, verbose = verbose),
uBSL = gaussianSynLikeGhuryeOlkin(ssy, ssx, log = TRUE, verbose = verbose),
semiBSL = semiparaKernelEstimate(ssy, ssx, shrinkage = shrinkage, penalty = penalty),
BSLmisspec = synLikeMisspec(ssy, ssx, type = misspecType, gamma = gammaCurr, log = TRUE, verbose = verbose)
)
if (loglikeProp == Inf) {
if (verbose == 2L) {
cat("*** reject (positive infinite loglike) ***\n")
} else if (verbose == 1L){
timeElapsed <- difftime(Sys.time(), timeStart, units = "secs")
timeLeft <- timeElapsed / i * (M - i)
elapsed <- myTimeStr(timeElapsed)
left <- myTimeStr(timeLeft)
acc <- paste0(formatC(100 * countAcc / i, format = "f", digits = 2), "%")
myMiniProgressBar(i / M, txt1 = paste(sprintf("%-2.1f%% finished,", 100 * i / M), "i =", i, "*** reject (positive infinite loglike) ***"),
txt2 = paste0("acceptance rate = ", acc, ", elapsed = ", elapsed, ", remaining = ", left),
style = 3, label = c("=", ".", ""))
flush.console()
}
theta[i, ] <- thetaCurr
loglike[i] <- loglikeCurr
if (method == "BSLmisspec") {
gamma[i, ] <- gammaCurr
}
countErr <- countErr + 1
next
}
log_rloglike <- loglikeProp - loglikeCurr
if (runif(1) < exp(log_prob + log_rloglike) ) {
if (verbose == 2L) {
cat("*** accept ***\n")
} else if (verbose == 1L){
timeElapsed <- difftime(Sys.time(), timeStart, units = "secs")
timeLeft <- timeElapsed / i * (M - i)
elapsed <- myTimeStr(timeElapsed)
left <- myTimeStr(timeLeft)
acc <- paste0(formatC(100 * countAcc / i, format = "f", digits = 2), "%")
myMiniProgressBar(i / M, txt1 = paste(sprintf("%-2.1f%% finished,", 100 * i / M), "i =", i, "*** accept ***"),
txt2 = paste0("acceptance rate = ", acc, ", elapsed = ", elapsed, ", remaining = ", left),
style = 3, label = c("=", ".", ""))
flush.console()
}
thetaCurr <- thetaProp
loglikeCurr <- loglikeProp
if (method == "BSLmisspec") {
ssxCurr <- ssx
stdCurr <- attr(loglikeProp, "std")
}
countAcc <- countAcc + 1
} else {
if (verbose == 1L){
timeElapsed <- difftime(Sys.time(), timeStart, units = "secs")
timeLeft <- timeElapsed / i * (M - i)
elapsed <- myTimeStr(timeElapsed)
left <- myTimeStr(timeLeft)
acc <- paste0(formatC(100 * countAcc / i, format = "f", digits = 2), "%")
myMiniProgressBar(i / M, txt1 = paste(sprintf("%-2.1f%% finished,", 100 * i / M), "i =", i),
txt2 = paste0("acceptance rate = ", acc, ", elapsed = ", elapsed, ", remaining = ", left),
style = 3, label = c("=", ".", ""))
flush.console()
}
}
theta[i, ] <- thetaCurr
loglike[i] <- loglikeCurr
if (method == "BSLmisspec") {
gamma[i, ] <- gammaCurr
}
if (plotOnTheFly) {
if (i %% plotOnTheFly == 0) {
for (k in 1:p) {
plot(density(theta[1:i, k]), main = NA, xlab = thetaNames[k],
col = 1, lty = 1)
}
}
}
}
if (verbose == 1L) cat('\n')
accRate <- countAcc/M
earRate <- countEar/M
errRate <- countErr/M
time <- difftime(Sys.time(), startTime)
result <- new("BSL", theta = theta, loglike = loglike, call = cl, model = model,
acceptanceRate = accRate, earlyRejectionRate = earRate, errorRate = errRate,
y = y, n = n, M = M, covRandWalk = covRandWalk, method = method,
shrinkage = shrinkage, penalty = penalty, standardise = standardise, GRC = GRC,
logitTransform = logitTransform, logitTransformBound = logitTransformBound,
parallel = parallel, parallelArgs = parallelArgs, time = time,
gamma = gamma, misspecType = misspecType, tau = tau, whitening = whitening)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/bsl.R
|
#' Cell biology example
#'
#' @description This example estimates the probabilities of cell motility and
#' cell proliferation for a discrete-time stochastic model of cell spreading.
#' We provide the data and tuning parameters required to reproduce the results
#' in \insertCite{An2019;textual}{BSL}.
#'
#' @param theta A vector of proposed model parameters,
#' \eqn{P_m} and \eqn{P_p}.
#' @param Y A \code{rows} \eqn{\times}
#' \code{cols} \eqn{\times} \code{num_obs} array
#' of the cell presences at times \code{1:num_obs} (not time 0).
#' @param Yinit The initial matrix of cell presences of size \code{rows}
#' \eqn{\times} \code{cols}.
#' @param rows The number of rows in the lattice (rows in the cell location
#' matrix).
#' @param cols The number of columns in the lattice (columns in the cell
#' location matrix).
#' @param sim_iters The number of discretisation steps to get to when an
#' observation is actually taken. For example, if observations are taken every
#' 5 minutes but the discretisation level is 2.5 minutes, then
#' \code{sim_iters} would be 2. Larger values of \code{sim_iters} lead to more
#' ``accurate'' simulations from the model, but they also increase the
#' simulation time.
#' @param num_obs The total number of images taken after initialisation.
#'
#' @details Cell motility (movement) and proliferation (reproduction) cause
#' tumors to spread and wounds to heal. If we can measure cell proliferation
#' and cell motility under different situations, then we may be able to use
#' this information to determine the efficacy of different medical treatments.
#'
#' A common method for measuring in vitro cell movement and proliferation is
#' the scratch assay. Cells form a layer on an assay and, once they are
#' completely covering the assay, a scratch is made to separate the cells.
#' Images of the cells are taken until the scratch has closed up and the cells
#' are in contact again. Each image can be converted to a binary matrix by
#' forming a lattice and recording the binary matrix (of size \code{rows}
#' \eqn{\times} \code{cols}) of cell presences.
#'
#' The model that we consider is a random walk model with parameters for the
#' probability of cell movement
#' (\eqn{P_m}) and the probability
#' of cell proliferation
#' (\eqn{P_p}) and it has no
#' tractable likelihood function. We use the vague priors
#' \eqn{P_m \sim U(0,1)}
#' and \eqn{P_p \sim U(0,1)}.
#'
#' We have a total of 145 summary statistics, which are made up of the Hamming
#' distances between the binary matrices for each time point and the total
#' number of cells at the final time.
#'
#' Details about the types of cells that this model is suitable for and other
#' information can be found in \insertCite{Price2018;textual}{BSL} and
#' \insertCite{An2019;textual}{BSL}. \insertCite{Johnston2014;textual}{BSL}
#' use a different ABC method and different summary statistics for a similar
#' example.
#'
#' @section A simulated dataset:
#'
#' An example ``observed'' dataset and the tuning parameters relevant to that
#' example can be obtained using \code{data(cell)}. This ``observed'' data is
#' a simulated dataset with \eqn{P_m = 0.35} and
#' \eqn{P_p = 0.001}. The lattice has 27 \code{rows} and 36
#' \code{cols} and there are \code{num_obs = 144} observations after time 0
#' (to mimic images being taken every 5 minutes for 12 hours). The simulation
#' is based on there initially being 110 cells in the assay.
#'
#' Further information about the specific choices of tuning parameters used in
#' BSL and BSLasso can be found in An et al. (2019).
#'
#' \itemize{
#'
#' \item \code{data}: The \code{rows}
#' \eqn{\times} \code{cols}
#' \eqn{\times} \code{num_obs} array of the cell
#' presences at times 1:144.
#'
#' \item \code{sim_args}: Values of \code{sim_args} relevant to this example.
#'
#' \item \code{sum_args}: Values of \code{sum_args} relevant to this example,
#' i.e. just the value of \code{Yinit}.
#'
#' \item \code{start}: A vector of suitable initial values of the parameters
#' for MCMC.
#'
#' \item \code{cov}: The covariance matrix of a multivariate normal random
#' walk proposal distribution used in the MCMC, in the form of a 2
#' \eqn{\times} 2 matrix.
#'
#' }
#'
#' @examples
#' \dontrun{
#' require(doParallel) # You can use a different package to set up the parallel backend
#'
#' # Loading the data for this example
#' data(cell)
#' model <- newModel(fnSim = cell_sim, fnSum = cell_sum, simArgs = cell$sim_args,
#' sumArgs = cell$sum_args, theta0 = cell$start, fnLogPrior = cell_prior,
#' thetaNames = expression(P[m], P[p]))
#' thetaExact <- c(0.35, 0.001)
#'
#' # Performing BSL (reduce the number of iterations M if desired)
#' # Opening up the parallel pools using doParallel
#' cl <- makeCluster(min(detectCores() - 1,2))
#' registerDoParallel(cl)
#' resultCellBSL <- bsl(cell$data, n = 5000, M = 10000, model = model, covRandWalk = cell$cov,
#' parallel = TRUE, verbose = 1L)
#' stopCluster(cl)
#' registerDoSEQ()
#' show(resultCellBSL)
#' summary(resultCellBSL)
#' plot(resultCellBSL, thetaTrue = thetaExact, thin = 20)
#'
#' # Performing uBSL (reduce the number of iterations M if desired)
#' # Opening up the parallel pools using doParallel
#' cl <- makeCluster(min(detectCores() - 1,2))
#' registerDoParallel(cl)
#' resultCelluBSL <- bsl(cell$data, n = 5000, M = 10000, model = model, covRandWalk = cell$cov,
#' method = "uBSL", parallel = TRUE, verbose = 1L)
#' stopCluster(cl)
#' registerDoSEQ()
#' show(resultCelluBSL)
#' summary(resultCelluBSL)
#' plot(resultCelluBSL, thetaTrue = thetaExact, thin = 20)
#'
#' # Performing tuning for BSLasso
#' ssy <- cell_sum(cell$data, cell$sum_args$Yinit)
#' lambda_all <- list(exp(seq(0.5,2.5,length.out=20)), exp(seq(0,2,length.out=20)),
#' exp(seq(-1,1,length.out=20)), exp(seq(-1,1,length.out=20)))
#' # Opening up the parallel pools using doParallel
#' cl <- makeCluster(min(detectCores() - 1,2))
#' registerDoParallel(cl)
#' set.seed(100)
#' sp_cell <- selectPenalty(ssy, n = c(500, 1000, 1500, 2000), lambda_all, theta = thetaExact,
#' M = 100, sigma = 1.5, model = model, method = "BSL", shrinkage = "glasso",
#' parallelSim = TRUE, parallelMain = FALSE)
#' stopCluster(cl)
#' registerDoSEQ()
#' sp_cell
#' plot(sp_cell)
#'
#' # Performing BSLasso with a fixed penalty (reduce the number of iterations M if desired)
#' # Opening up the parallel pools using doParallel
#' cl <- makeCluster(min(detectCores() - 1,2))
#' registerDoParallel(cl)
#' resultCellBSLasso <- bsl(cell$data, n = 1500, M = 10000, model = model, covRandWalk = cell$cov,
#' shrinkage = "glasso", penalty = 1.3, parallel = TRUE, verbose = 1L)
#' stopCluster(cl)
#' registerDoSEQ()
#' show(resultCellBSLasso)
#' summary(resultCellBSLasso)
#' plot(resultCellBSLasso, thetaTrue = thetaExact, thin = 20)
#'
#' # Performing semiBSL (reduce the number of iterations M if desired)
#' # Opening up the parallel pools using doParallel
#' cl <- makeCluster(min(detectCores() - 1,2))
#' registerDoParallel(cl)
#' resultCellSemiBSL <- bsl(cell$data, n = 5000, M = 10000, model = model, covRandWalk = cell$cov,
#' method = "semiBSL", parallel = TRUE, verbose = 1L)
#' stopCluster(cl)
#' registerDoSEQ()
#' show(resultCellSemiBSL)
#' summary(resultCellSemiBSL)
#' plot(resultCellSemiBSL, thetaTrue = thetaExact, thin = 20)
#'
#' # Plotting the results together for comparison
#' # plot using the R default plot function
#' oldpar <- par()
#' par(mar = c(5, 4, 1, 2), oma = c(0, 1, 2, 0))
#' combinePlotsBSL(list(resultCellBSL, resultCelluBSL, resultCellBSLasso, resultCellSemiBSL),
#' which = 1, thetaTrue = thetaExact, thin = 20, label = c("bsl", "ubsl", "bslasso", "semiBSL"),
#' col = 1:4, lty = 1:4, lwd = 1)
#' mtext("Approximate Univariate Posteriors", outer = TRUE, cex = 1.5)
#' par(mar = oldpar$mar, oma = oldpar$oma)
#'
#' }
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @author Ziwen An, Leah F. South and Christopher Drovandi
#' @name cell
#' @usage data(ma2)
NULL
#' @describeIn cell The function \code{cell_sim(theta, Yinit, rows, cols,
#' sim_iters, num_obs)} simulates data from the model, using C++ in the
#' backend.
#' @export
cell_sim <-function(theta, Yinit, rows, cols, sim_iters, num_obs) {
Pm <- theta[1]
Pp <- theta[2]
Y <- simulate_cell(Yinit, rows, cols, Pm, Pp, sim_iters, num_obs)
return(Y)
}
#' @describeIn cell The function \code{cell_sum(Y,sum_options)} calculates the
#' summary statistics for this example.
#' @export
cell_sum <- function(Y, Yinit) {
num_obs = dim(Y)[3]
summ_stat = numeric(num_obs+1)
# Hamming distances between cell locations across time
summ_stat[1] = sum(abs(Yinit-Y[, , 1]))
for (i in 2:num_obs) {
summ_stat[i] = sum(abs(Y[, , i-1]-Y[, , i]))
}
# Total number of cells in the final time period
summ_stat[num_obs + 1] = sum(Y[, , num_obs])
return(summ_stat)
}
#' @describeIn cell The function \code{cell_prior(theta)} evaluates the log
#' prior density at the parameter value
#' \eqn{\theta}.
#' @export
cell_prior <- function(theta) {
log(theta[1] > 0 & theta[1] < 1 & theta[2] > 0 & theta[2] < 1)
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/cell.R
|
#' Plot the densities of multiple ``bsl'' class objects.
#'
#' @description The function \code{combinePlotsBSL} can be used to plot multiple
#' BSL densities together, optionally with the true values for the parameters.
#' @param objectList A list of ``bsl'' class objects.
#' @param label A string vector indicating the labels to be shown in
#' the plot legend. The default is \code{NULL}, which uses the names from
#' \code{objectList}.
#' @param legendPosition One of the three string arguments, ``auto'', ``right''
#' or ``bottom'', indicating the legend position. The default is ``auto'',
#' which automatically choose from ``right'' and ``bottom''. Only used when
#' \code{which} is \code{1L}.
#' @param legendNcol An integer argument indicating the number of columns of
#' the legend. The default, \code{NULL}, put all legends in the same row or
#' column depending on \code{legendPosition}. Only used when \code{which} is
#' \code{1L}.
#' @param col A vector argument containing the plotting color for
#' each density curve. Each element of the vector will be passed into
#' \code{lines}. Only used when \code{which} is \code{1L}.
#' @param lty A vector argument containing the line type for each
#' density curve. Each element of the vector will be passed into \code{lines}.
#' Only used when \code{which} is \code{1L}.
#' @param lwd A vector argument containing the line width for each
#' density curve. Each element of the vector will be passed into \code{lines}.
#' Only used when \code{which} is \code{1L}.
#' @param cex.lab The magnification to be used for x and y labels
#' relative to the current setting of cex. To be passed into \code{plot}. Only
#' used when \code{which} is \code{1L}.
#' @param cex.axis The magnification to be used for axis annotation
#' relative to the current setting of cex. To be passed into \code{plot}. Only
#' used when \code{which} is \code{1L}.
#' @param cex.legend The magnification to be used for legend annotation
#' relative to the current setting of cex. Only used when \code{which} is
#' \code{1L}.
#' @param top A string argument of the combined plot title. Only used
#' when \code{which} is \code{2L}.
#' @param options.color A list of additional arguments to pass into function
#' \code{ggplot2::scale_color_manual}. Only used when \code{which} is
#' \code{2L}.
#' @param options.linetype A list of additional arguments to pass into function
#' \code{ggplot2::scale_linetype_manual}. Only used when \code{which} is
#' \code{2L}.
#' @param options.size A list of additional arguments to pass into function
#' \code{ggplot2::scale_size_manual}. Only used when \code{which} is
#' \code{2L}.
#' @inheritParams BSL-class
#'
#' @return No return value, called for the plots produced.
#'
#' @examples
#' \dontshow{
#' toy_sim <- function(n, theta) matrix(rnorm(2*n, theta), nrow = n)
#' toy_sum <- ma2_sum
#'
#' model <- newModel(fnSimVec = toy_sim, fnSum = toy_sum, sumArgs = list(epsilon = 2), theta0 = 0)
#'
#' result1 <- bsl(y = 1:2, n = 50, M = 10, model = model, covRandWalk = matrix(1),
#' method = "BSL")
#' result2 <- bsl(y = 1:2, n = 50, M = 10, model = model, covRandWalk = matrix(1),
#' method = "uBSL")
#' result3 <- bsl(y = 1:2, n = 50, M = 10, model = model, covRandWalk = matrix(1),
#' method = "semiBSL")
#' combinePlotsBSL(list(result1, result2, result3), label = c("BSL","uBSL","semiBSL"), thin = 2)
#' }
#' \dontrun{
#' toy_sim <- function(n, theta) matrix(rnorm(2*n, theta), nrow = n)
#' toy_sum <- ma2_sum
#'
#' model <- newModel(fnSimVec = toy_sim, fnSum = toy_sum, sumArgs = list(epsilon = 2), theta0 = 0)
#'
#' result1 <- bsl(y = 1:2, n = 100, M = 5e3, model = model, covRandWalk = matrix(1),
#' method = "BSL", plotOnTheFly = TRUE)
#' result2 <- bsl(y = 1:2, n = 100, M = 5e3, model = model, covRandWalk = matrix(1),
#' method = "uBSL", plotOnTheFly = TRUE)
#' result3 <- bsl(y = 1:2, n = 100, M = 5e3, model = model, covRandWalk = matrix(1),
#' method = "semiBSL", plotOnTheFly = TRUE)
#' combinePlotsBSL(list(result1, result2, result3), label = c("BSL","uBSL","semiBSL"), thin = 20)
#' }
#'
#' @seealso \code{\link{ma2}}, \code{\link{cell}}, \code{\link{mgnk}} and
#' \code{\link{toad}} for examples.
#' @export
combinePlotsBSL <- function(objectList, which = 1L, thin = 1, burnin = 0, thetaTrue = NULL, label = NULL,
legendPosition = c('auto','right','bottom')[1],
legendNcol = NULL, col = NULL, lty = NULL, lwd = NULL, cex.lab = 1, cex.axis = 1, cex.legend = 0.75,
top = 'Approximate Marginal Posteriors', options.color = list(), options.linetype = list(),
options.size = list(), options.theme = list()) {
if (which == 1L) {
if (length(options.color) != 0 || length(options.linetype) != 0 || length(options.size) != 0 || length(options.theme) != 0) {
warning('"options.color", "options.linetype", "options.size" and "options.theme" are ignored when which = 1')
}
multiPlotDefault(objectList, thin, burnin, thetaTrue, label, legendPosition, legendNcol, col, lty, lwd, cex.lab, cex.axis, cex.legend)
} else if (which == 2L) {
multiPlotGgplot(objectList, thin, burnin, thetaTrue, label, top, options.color, options.linetype, options.size, options.theme)
} else {
stop('Indicate a supported plot number, 1 for R default density plot or 2 for ggplot density plot')
}
}
multiPlotDefault <- function(objectList, thin = 1, burnin = 0, thetaTrue = NULL, label = NULL, legendPosition = c('auto','right','bottom')[1],
legendNcol = NULL, col = NULL, lty = NULL, lwd = NULL, cex.lab = 1, cex.axis = 1, cex.legend = 0.75) {
nList <- length(objectList)
p <- ncol(objectList[[1]]@theta)
a <- floor(sqrt(p))
b <- ceiling(p / a)
if (is.null(col)) {
col <- 1 : nList
} else {
if (length(col) != nList) {
stop ('length of "col" must match "objectList"')
}
}
if (is.null(lty)) {
lty <- 1 : nList
} else {
if (length(col) != nList) {
stop ('length of "col" must match "objectList"')
}
}
if (is.null(lwd)) {
lwd <- rep(1, nList)
} else {
if (length(col) != nList) {
stop ('length of "col" must match "objectList"')
}
}
thetaNames <- objectList[[1]]@model@thetaNames
if (length(thin) == 1L) {
thin <- rep(thin, nList)
}
if (length(burnin) == 1L) {
burnin <- rep(burnin, nList)
}
if (legendPosition == 'auto') {
if (a*b == p) { # legend at bottom
layoutM <- matrix(c(1:p,rep(p+1,b)), nrow = a+1, ncol = b, byrow = TRUE)
layout(mat = layoutM, heights = c(rep(0.85/a,a),0.15))
legendx <- 'center'
legendHorz <- TRUE
} else { # legend at corner
layoutM <- matrix(c(1:p,rep(p+1,a*b-p)),nrow = a, ncol = b, byrow = TRUE)
layout(mat = layoutM, heights = rep.int(1,a))
legendx <- 'center'
legendHorz <- FALSE
}
} else if (legendPosition == 'right') {
if (a*b == p) {
layoutM <- cbind(matrix(c(1:p), nrow = a, ncol = b, byrow = TRUE), rep(p+1,a))
layout(mat = layoutM, widths = c(rep(0.85/b,b),0.15))
legendx <- 'center'
legendHorz <- FALSE
mfg <- c(a, b+1)
} else {
layoutM <- cbind(matrix(c(1:p,rep(p+2,a*b-p)),nrow = a, ncol = b, byrow = TRUE), rep(p+1,a))
layout(mat = layoutM, widths = c(rep(0.85/b,b),0.15))
legendx <- 'center'
legendHorz <- FALSE
}
} else if (legendPosition == 'bottom') {
if (a*b == p) {
layoutM <- matrix(c(1:p,rep(p+1,b)), nrow = a+1, ncol = b, byrow = TRUE)
layout(mat = layoutM, heights = c(rep(0.85/a,a),0.15))
legendx <- 'center'
legendHorz <- TRUE
} else {
layoutM <- rbind(matrix(c(1:p,rep(p+2,a*b-p)),nrow = a, ncol = b, byrow = TRUE), rep(p+1,b))
layout(mat = layoutM, heights = rep.int(1,a))
legendx <- 'center'
legendHorz <- TRUE
}
} else {
stop('"legendPosition" must be "auto" or "right" or "bottom"')
}
if (is.null(legendNcol)) {
legendNcol <- ifelse(legendHorz, nList, 1)
}
idx <- lapply(1:nList, FUN = function(i) seq((burnin[i] + 1), objectList[[i]]@M, thin[i]))
theta <- d <- xRange <- yRange <- vector('list', p)
for (k in 1:p) {
theta[[k]] <- lapply(1:nList, FUN = function(i) objectList[[i]]@theta[idx[[i]], k])
d[[k]] <- lapply(theta[[k]], FUN = density)
xRange[[k]] <- range(sapply(1:nList, FUN = function(i) range(d[[k]][[i]]$x)))
yRange[[k]] <- c(0, max(sapply(1:nList, FUN = function(i) max(d[[k]][[i]]$y))))
}
for (k in 1:p) {
# par(mar = c(5.1,5.1,2,2))
plot(0, type = 'n', main = NA, xlab = thetaNames[k], ylab = 'density', cex.lab = cex.lab,
xlim = xRange[[k]], ylim = yRange[[k]], cex.axis = cex.axis)
for (i in 1 : nList) {
lines(d[[k]][[i]], col = col[i], lty = lty[i], lwd = lwd[i])
}
if (!is.null(thetaTrue)) {
abline(v = thetaTrue[k], col = 'forestgreen', lty = 3)
}
}
oldpar <- par(no.readonly = TRUE) # get current user par settings
on.exit(par(oldpar)) # reset current user par settings
par(mar=c(0,0,0,0), cex = cex.legend)
plot(0, type = "n", axes = FALSE, xlab = "", ylab = "")
if (is.null(label)) {
if (is.null(names(objectList))) {
label <- paste0('result', 1 : nList)
} else {
label <- names(objectList)
}
}
legend(x = legendx, legend = label, col = col, lty = lty, lwd = lwd, ncol = legendNcol)
par(mfrow = c(1,1), mar = c(5, 4, 4, 2) + 0.1)
}
multiPlotGgplot <- function(objectList, thin = 1, burnin = 0, thetaTrue = NULL, label = NULL, top = 'Approximate Marginal Posteriors',
options.color = list(), options.linetype = list(), options.size = list(), options.theme = list()) {
nList <- length(objectList)
p <- ncol(objectList[[1]]@theta)
a <- floor(sqrt(p))
b <- ceiling(p / a)
if (!is.null(thetaTrue) & length(thetaTrue) != p) {
stop('Length of thetaTrue does not match the number of parameters.')
}
thetaNames <- objectList[[1]]@model@thetaNames
if (is.null(label)) {
if (is.null(names(objectList))) {
label <- names(objectList) <- paste0('result', 1 : nList)
} else {
label <- names(objectList)
}
}
samples <- array(list(), nList)
for (i in 1 : nList) {
theta <- getTheta(objectList[[i]], burnin = burnin, thin = thin)
samples[[i]] <- data.frame(theta, label = label[i])
}
samples <- do.call('rbind', samples)
plist <- array(list(), p)
for (i in 1 : p) {
plist[[i]] <- ggplot(samples, aes_string(x = colnames(samples)[i])) +
geom_density(aes(color = label, linetype = label, size = label)) + {
if (length(options.color) != 0) {
do.call(scale_color_manual, options.color)
}
} + {
if (length(options.linetype) != 0) {
do.call(scale_linetype_manual, options.linetype)
}
} + {
if (!'values' %in% names(options.size)) {
options.size$values <- rep(1, nList)
}
do.call(scale_size_manual, options.size)
} +
geom_hline(yintercept = 0, colour = "grey", size = 0.75) + {
if (!is.null(thetaTrue)) {
geom_vline(xintercept = thetaTrue[i], color = 'forestgreen', linetype = 'dashed', size = 0.5)
}
} +
labs(x = thetaNames[i], y = 'density') + {
if (!'plot.margin' %in% names(options.theme)) {
options.theme$plot.margin <- unit(rep(0.08,4), "npc")
}
if (!'legend.title' %in% names(options.theme)) {
options.theme$legend.title <- element_blank()
}
options.theme$legend.position <- 'none'
do.call(theme, options.theme)
}
}
g <- ggplotGrob(plist[[1]] + theme(legend.position = 'bottom'))$grobs
legend <- g[[which(sapply(g, function(x) x$name) == "guide-box")]]
lheight <- sum(legend$height)
combined <- arrangeGrob(do.call(arrangeGrob, c(plist, nrow = a, ncol = b, top = top)), legend, nrow = 2, heights = unit.c(unit(1, "npc") - lheight, lheight))
grid.newpage()
grid.draw(combined)
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/combinePlotsBSL.R
|
covWarton <- function(S, gamma) {
if (gamma < 0 || gamma > 1) {
stop("\"gamma\" must be between 0 and 1")
}
ns <- ncol(S)
D1 <- diag(diag(S)^-0.5)
D2 <- diag(diag(S)^0.5)
R <- gamma * D1 %*% S %*% D1 + (1 - gamma) * diag(ns)
Sigma <- D2 %*% R %*% D2
return (Sigma)
}
corrWarton <- function(R, gamma) {
ns <- ncol(R)
gamma * R + (1 - gamma) * diag(ns)
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/covWarton.R
|
#' Estimate the synthetic likelihood
#'
#' @description This function computes the estimated synthetic (log) likelihood
#' using one of the four methods (``BSL'', ``uBSL'', ``semiBSL'' and
#' ``BSLmisspec''). Please find the links below in the see also section for
#' more details.
#'
#' @inheritParams gaussianSynLike
#' @inheritParams bsl
#' @param ... Arguments to be passed to methods.
#'
#' \itemize{
#'
#' \item \code{shrinkage} Available for methods ``BSL'' and ``semiBSL''. A
#' string argument indicating which shrinkage method to be used. The default
#' is \code{NULL}, which means no shrinkage is used. Shrinkage estimation is
#' only available for methods ``BSL'' and ``semiBSL''. Current options are
#' ``glasso'' for the graphical lasso method of
#' \insertCite{Friedman2008;textual}{BSL} and ``Warton'' for the ridge
#' regularisation method of \insertCite{Warton2008;textual}{BSL}.
#'
#' \item \code{penalty} Available for methods ``BSL'' and ``semiBSL''. The
#' penalty value to be used for the specified shrinkage method. Must be
#' between zero and one if the shrinkage method is ``Warton''.
#'
#' \item \code{standardise} Available for method ``BSL''. A logical argument
#' that determines whether to standardise the summary statistics before
#' applying the graphical lasso. This is only valid if method is ``BSL'',
#' shrinkage is ``glasso'' and penalty is not \code{NULL}. The diagonal
#' elements will not be penalised if the shrinkage method is ``glasso''. The
#' default is \code{FALSE}.
#'
#' \item \code{GRC} Available for method ``BSL''. A logical argument
#' indicating whether the Gaussian rank correlation matrix
#' \insertCite{Boudt2012}{BSL} should be used to estimate the covariance
#' matrix in ``BSL'' method. The default is \code{FALSE}, which uses the
#' sample covariance by default.
#'
#' \item \code{whitening} Available for method ``BSL''. This argument determines
#' whether Whitening transformation should be used in ``BSL'' method with
#' Warton's shrinkage. Whitening transformation helps decorrelate the summary
#' statistics, thus encourages sparsity of the synthetic likelihood covariance
#' matrix. This might allow heavier shrinkage to be applied without losing
#' much accuracy, hence allowing the number of simulations to be reduced. By
#' default, \code{NULL} represents no Whitening transformation. Otherwise this
#' is enabled if a Whitening matrix is provided. See
#' \code{\link{estimateWhiteningMatrix}} for the function to estimate the
#' Whitening matrix.
#'
#' \item \code{ssyTilde} Available for method ``BSL''. The whitened observed
#' summary statisic. If this is not \code{NULL}, it will be used to save
#' computation effort. Only used if Whitening is enabled.
#'
#' \item \code{kernel} Available for method ``semiBSL''. A string argument
#' indicating the smoothing kernel to pass into \code{density} for estimating
#' the marginal distribution of each summary statistic. Only ``gaussian" and
#' ``epanechnikov" are available. The default is ``gaussian".
#'
#' \item \code{type} Available for method ``BSLmisspec''. A string argument
#' indicating which method is used to account for and detect potential
#' incompatibility. The two options are "mean" and "variance".
#'
#' \item \code{gamma} Available for method ``BSLmisspec''. The additional
#' latent parameter to account for possible incompatability between the model
#' and observed summary statistic. In ``BSLmisspec'' method, this is updated
#' with a slice sampler \insertCite{Neal2003}{BSL}.
#'
#' }
#'
#' @return The estimated synthetic (log) likelihood value.
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @examples
#' data(ma2)
#' ssy <- ma2_sum(ma2$data)
#' m <- newModel(fnSim = ma2_sim, fnSum = ma2_sum, simArgs = ma2$sim_args,
#' theta0 = ma2$start)
#' ssx <- simulation(m, n = 300, theta = c(0.6, 0.2), seed = 10)$ssx
#' estimateLoglike(ssy, ssx, method = "BSL")
#' estimateLoglike(ssy, ssx, method = "uBSL")
#' estimateLoglike(ssy, ssx, method = "semiBSL")
#' estimateLoglike(ssy, ssx, method = "BSLmisspec", type = "mean", gamma = rep(0.1, 50))
#'
#' @seealso \code{\link{gaussianSynLike}},
#' \code{\link{gaussianSynLikeGhuryeOlkin}},
#' \code{\link{semiparaKernelEstimate}} and \code{\link{synLikeMisspec}}.
#' @export
estimateLoglike <- function(ssy, ssx, method = c("BSL", "uBSL", "semiBSL", "BSLmisspec"), log = TRUE, verbose = FALSE, ...) {
method <- match.arg(method)
switch(method,
"BSL" = gaussianSynLike(ssy, ssx, log = log, verbose = verbose, ...),
"uBSL" = gaussianSynLikeGhuryeOlkin(ssy, ssx, log = log, verbose = verbose, ...),
"semiBSL" = semiparaKernelEstimate(ssy, ssx, log = log, ...),
"BSLmisspec" = synLikeMisspec(ssy, ssx, log = log, verbose = verbose, ...)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/estimateLoglike.R
|
#' Estimate the Whitening matrix to be used in the ``wBSL'' method of
#' \insertCite{Priddle2019;textual}{BSL}
#'
#' @description This function estimates the Whitening matrix to be used in BSL
#' with Warton's shrinkage and Whitening (``wBSL'' method of
#' \insertCite{Priddle2019;textual}{BSL}). The Whitening transformation and
#' decorrelation methods are detailed in \insertCite{Kessy2018;textual}{BSL}.
#'
#' @param n The number of model simulations to estimate the Whitening matrix.
#' @param method The type of Whitening method to be used. The default is
#' ``PCA''.
#' @param thetaPoint A point estimate of the parameter value with non-negligible
#' posterior support.
#' @inheritParams bsl
#' @return The estimated Whitening matrix.
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @examples
#' \dontshow{
#' data(ma2)
#' model <- newModel(fnSim = ma2_sim, fnSum = ma2_sum, simArgs = ma2$sim_args, theta0 = ma2$start)
#' W <- estimateWhiteningMatrix(500, model, method = "PCA", thetaPoint = c(0.6, 0.2))
#' }
#' \dontrun{
#' data(ma2)
#' model <- newModel(fnSim = ma2_sim, fnSum = ma2_sum, simArgs = ma2$sim_args, theta0 = ma2$start)
#' W <- estimateWhiteningMatrix(20000, model, method = "PCA", thetaPoint = c(0.6, 0.2))
#' }
#'
#' @export
estimateWhiteningMatrix <- function(n, model, method = c("PCA", "ZCA", "Cholesky", "ZCA-cor", "PCA-cor"),
thetaPoint = NULL, parallel = FALSE, parallelArgs = NULL) {
method <- match.arg(method)
if (parallel) {
myFnSimSum <- function(n, theta) fn(model)$fnPar(n, theta, parallelArgs)
} else {
myFnSimSum <- fn(model)$fn
}
if (is.null(thetaPoint)) {
thetaPoint <- model@theta0
}
ssx <- myFnSimSum(n, thetaPoint)
S <- cov(ssx)
W <- whitening::whiteningMatrix(S, method = method)
return(W)
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/estimateWhiteningMatrix.R
|
#' Gaussian rank correlation
#'
#' @description This function computes the Gaussian rank correlation of
#' \insertCite{Boudt2012;textual}{BSL}.
#' @param x A numeric matrix representing data where the number of rows
#' is the number of independent data points and the number of columns is the
#' number of variables in the dataset.
#' @param vec A logical argument indicating if the vector of correlations
#' should be returned instead of a matrix.
#' @return Gaussian rank correlation matrix (default) or a vector of
#' pair correlations.
#' @references
#'
#' \insertAllCited{}
#'
#' @rdname gaussianRankCorr
#' @examples
#' data(ma2)
#' model <- newModel(fnSimVec = ma2_sim_vec, fnSum = ma2_sum, simArgs = list(TT = 10),
#' theta0 = ma2$start, fnLogPrior = ma2_prior)
#' set.seed(100)
#'
#' # generate 1000 simualtions from the ma2 model
#' x <- simulation(model, n = 1000, theta = c(0.6, 0.2))$x
#'
#' corr1 <- cor(x) # traditional correlation matrix
#' corr2 <- gaussianRankCorr(x) # Gaussian rank correlation matrix
#' oldpar <- par()
#' par(mfrow = c(1, 2))
#' image(corr1, main = 'traditional correlation matrix')
#' image(corr2, main = 'Gaussian rank correlation matrix')
#' par(mfrow = oldpar$mfrow)
#'
#' std <- apply(x, MARGIN = 2, FUN = sd) # standard deviations
#' cor2cov(gaussianRankCorr(x), std) # convert to covariance matrix
#'
#' @seealso \code{\link{cor2cov}} for conversion from correlation matrix
#' to covariance matrix.
#' @export
gaussianRankCorr <- function(x, vec = FALSE) {
n <- nrow(x)
p <- ncol(x)
stopifnot(p >= 2)
r <- apply(x, FUN = rank, MARGIN = 2, ties.method = "average")
rqnorm <- qnorm(r / (n + 1))
den <- sum((qnorm(1 : n / (n + 1))) ^ 2)
res <- unlist(sapply(1:(p-1), FUN = function(i) c(rqnorm[, i] %*% rqnorm[, (i+1):p]))) / den
if (!vec) {
res <- p2P(res)
}
return (res)
}
#' Convert a correlation matrix to a covariance matrix
#' @description This function converts a correlation matrix to a covariance matrix
#' @param corr The correlation matrix to be converted. This must be symmetric.
#' @param std A vector that contains the standard deviations of the variables in the correlation matrix.
#' @return The covariance matrix.
#' @export
cor2cov <- function(corr, std) {
outer(std, std) * corr
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/gaussianRankCorr.R
|
#' Estimate the Gaussian synthetic (log) likelihood
#'
#' @description This function estimates the Gaussian synthetic log-likelihood
#' \insertCite{@see @Wood2010 and @Price2018}{BSL}. Several extensions are
#' provided in this function: \code{shrinkage} enables shrinkage estimation of
#' the covariance matrix and is helpful to bring down the number of model
#' simulations (see \insertCite{An2019;textual}{BSL} for an example of BSL
#' with glasso \insertCite{Friedman2008}{BSL} shrinkage estimation);
#' \code{GRC} uses Gaussian rank correlation \insertCite{Boudt2012}{BSL} to
#' find a more robust correlation matrix; \code{whitening}
#' \insertCite{Kessy2018}{BSL} could further reduce the number of model
#' simulations upon Warton's shrinkage \insertCite{Warton2008}{BSL} by
#' decorrelating the summary statistics.
#'
#' @param ssy The observed summary statisic.
#' @param ssx A matrix of the simulated summary statistics. The number
#' of rows is the same as the number of simulations per iteration.
#' @param ssyTilde The whitened observed summary statisic. If this is not
#' \code{NULL}, it will be used to save computation effort. Only used if
#' Whitening is enabled.
#' @param log A logical argument indicating if the log of likelihood is
#' given as the result. The default is \code{TRUE}.
#' @param verbose A logical argument indicating whether an error message
#' should be printed if the function fails to compute a likelihood. The
#' default is \code{FALSE}.
#' @inheritParams bsl
#'
#' @return The estimated synthetic (log) likelihood value.
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @examples
#' data(ma2)
#' ssy <- ma2_sum(ma2$data)
#' m <- newModel(fnSim = ma2_sim, fnSum = ma2_sum, simArgs = ma2$sim_args,
#' theta0 = ma2$start)
#' ssx <- simulation(m, n = 300, theta = c(0.6, 0.2), seed = 10)$ssx
#'
#' # the standard Gaussian synthetic likelihood (the likelihood estimator used in BSL)
#' gaussianSynLike(ssy, ssx)
#' # the Gaussian synthetic likelihood with glasso shrinkage estimation
#' # (the likelihood estimator used in BSLasso)
#' gaussianSynLike(ssy, ssx, shrinkage = 'glasso', penalty = 0.1)
#' # the Gaussian synthetic likelihood with Warton's shrinkage estimation
#' gaussianSynLike(ssy, ssx, shrinkage = 'Warton', penalty = 0.9)
#' # the Gaussian synthetic likelihood with Warton's shrinkage estimation and Whitening transformation
#' W <- estimateWhiteningMatrix(20000, m)
#' gaussianSynLike(ssy, ssx, shrinkage = 'Warton', penalty = 0.9, whitening = W)
#'
#' @seealso Other available synthetic likelihood estimators:
#' \code{\link{gaussianSynLikeGhuryeOlkin}} for the unbiased synthetic
#' likelihood estimator, \code{\link{semiparaKernelEstimate}} for the
#' semi-parametric likelihood estimator, \code{\link{synLikeMisspec}} for the
#' Gaussian synthetic likelihood estimator for model misspecification.
#'
#' @export
gaussianSynLike <- function(ssy, ssx, shrinkage = NULL, penalty = NULL, standardise = FALSE, GRC = FALSE,
whitening = NULL, ssyTilde = NULL, log = TRUE, verbose = FALSE) {
if (!is.null(shrinkage)) {
flagShrinkage <- TRUE
shrinkage <- match.arg(shrinkage, c("glasso", "Warton"))
} else {
flagShrinkage <- FALSE
}
if (is.null(whitening)) {
flagWhitening <- FALSE
ssyTilde <- NULL
} else if (is.atomic(whitening) & is.matrix(whitening)) {
ns <- length(ssy)
if (all(dim(whitening) == c(ns, ns))) {
flagWhitening <- TRUE
} else {
stop(paste("The Whitening matrix must be of dimension", ns, "by", ns))
}
} else {
stop("invalid argument \"whitening\"")
}
if (!flagShrinkage && !is.null(penalty)) {
warning('"penalty" will be ignored because no shrinkage method is specified')
}
if (flagShrinkage && is.null(penalty)) {
stop('a penalty value must be specified to enable shrinkage estimation')
}
if (!flagShrinkage && standardise) {
warning('"standardise" will be ignored because shrinkage method is not "glasso"')
}
if (!flagShrinkage && flagWhitening) {
warning('"whitening" will be ignored because shrinkage method is not "Warton"')
}
if (flagShrinkage) {
if (shrinkage != 'glasso' && standardise) {
warning("standardisation is only supported if shrinkage is \"glasso\"")
}
if (shrinkage != 'Warton' && flagWhitening) {
warning("Whitening is only supported if shrinkage is \"Warton\"")
}
}
if (!flagShrinkage) { # BSL if no shrinkage
mu <- colMeans(ssx)
if (GRC) {
std <- apply(ssx, MARGIN = 2, FUN = sd)
corr <- gaussianRankCorr(ssx)
Sigma <- cor2cov(corr, std)
} else {
Sigma <- cov(ssx)
}
} else { # BSL with shrinkage (glasso or Warton)
mu <- colMeans(ssx)
if (shrinkage == 'glasso') {
if (!standardise) { # use graphical lasso without standardisation
if (GRC) {
std <- apply(ssx, MARGIN = 2, FUN = sd)
corr <- gaussianRankCorr(ssx)
S <- cor2cov(corr, std)
} else {
S <- cov(ssx)
}
gl <- glasso(S, rho = penalty)
Sigma <- gl$w
} else { # standardise the summary statistics before passing into the graphical lasso function
n <- nrow(ssx)
ns <- ncol(ssx)
std <- apply(ssx, MARGIN = 2, FUN = sd)
ssx_std <- (ssx - matrix(mu, n, ns, byrow = TRUE)) / matrix(std, n, ns, byrow = TRUE)
if (GRC) {
corr <- gaussianRankCorr(ssx_std)
S <- cor2cov(corr, apply(ssx_std, MARGIN = 2, FUN = sd))
} else {
S <- cov(ssx_std)
}
gl <- glasso(S, rho = penalty, penalize.diagonal = FALSE) # do not penalise the diagonal entries since we want the correlation matrix
corr <- gl$w
Sigma <- outer(std, std) * corr
}
}
if (shrinkage == 'Warton') {
if (flagWhitening) { # Whitening transformation
if (is.null(ssyTilde)) {
ssy <- c(tcrossprod(ssy, whitening))
} else { # inherit ssy to save computation
ssy <- ssyTilde
}
ssx <- tcrossprod(ssx, whitening)
mu <- c(tcrossprod(mu, whitening))
}
if (GRC) {
std <- apply(ssx, MARGIN = 2, FUN = sd)
corr <- gaussianRankCorr(ssx)
S <- cor2cov(corr, std)
} else {
S <- cov(ssx)
}
Sigma <- covWarton(S, penalty)
}
}
loglike <- try(mvtnorm::dmvnorm(ssy, mean = mu, sigma = Sigma, log = log))
if (inherits(loglike, 'try-error')) {
if (verbose) {
cat('*** reject (probably singular covariance matrix) ***\n')
}
return (-Inf)
}
return (loglike)
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/gaussianSynLike.R
|
#' Estimate the Gaussian synthetic (log) likelihood with an unbiased estimator
#'
#' @description This function computes an unbiased, nonnegative estimate of a
#' normal density function from simulations assumed to be drawn from it. See
#' \insertCite{Price2018;textual}{BSL} and
#' \insertCite{Ghurye1969;textual}{BSL}.
#'
#' @inheritParams gaussianSynLike
#'
#' @return The estimated synthetic (log) likelihood value.
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @examples
#' data(ma2)
#' ssy <- ma2_sum(ma2$data)
#' m <- newModel(fnSim = ma2_sim, fnSum = ma2_sum, simArgs = ma2$sim_args,
#' theta0 = ma2$start)
#' ssx <- simulation(m, n = 300, theta = c(0.6, 0.2), seed = 10)$ssx
#'
#' # unbiased estimate of the Gaussian synthetic likelihood
#' # (the likelihood estimator used in uBSL)
#' gaussianSynLikeGhuryeOlkin(ssy, ssx)
#'
#' @seealso Other available synthetic likelihood estimators:
#' \code{\link{gaussianSynLike}} for the standard synthetic likelihood
#' estimator, \code{\link{semiparaKernelEstimate}} for the semi-parametric
#' likelihood estimator, \code{\link{synLikeMisspec}} for the Gaussian
#' synthetic likelihood estimator for model misspecification.
#'
#' @export
gaussianSynLikeGhuryeOlkin <- function(ssy, ssx, log = TRUE, verbose = FALSE) {
d <- length(ssy)
n <- nrow(ssx)
mu <- colMeans(ssx)
Sigma <- cov(ssx)
psi <- (n-1) * Sigma - (ssy-mu) %*% t(ssy-mu) / (1-1/n)
temp <- try(chol(psi))
if (inherits(temp, 'try-error')) {
if (verbose) {
cat('*** reject (cov(ssx) is not positive definite) ***\n')
}
loglike <- -Inf
} else {
A <- wcon(d, n-2) - wcon(d, n-1) - 0.5*d*log(1-1/n)
B <- -0.5 * (n-d-2) * (log(n-1) + logdet(Sigma))
C <- 0.5 * (n-d-3) * logdet(psi)
loglike <- -0.5*d*log(2*pi) + A + B + C
}
if (!log) {
loglike <- exp(loglike)
}
return (loglike)
}
# log of c(k,nu) from Ghurye & Olkin (1969)
wcon <- function(k, nu) {
cc <- -k*nu/2*log(2) - k*(k-1)/4*log(pi) - sum(lgamma(0.5*(nu-(1:k)+1)))
cc
}
# calculating the log of the determinant
logdet <- function(A) {
L <- chol(A)
y <- 2 * sum(log(diag(L)))
y
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/gaussianSynLikeGhuryeOlkin.R
|
.onUnload <- function (libpath) {
library.dynam.unload("BSL", libpath)
}
#' @importFrom stats runif cov sd pnorm rnorm qnorm density approx dexp rexp median quantile
#' @importFrom graphics par lines abline layout legend
#' @importFrom utils flush.console capture.output head tail
#' @importFrom MASS mvrnorm
#' @importFrom mvtnorm rmvnorm
#' @importFrom glasso glasso
#' @importFrom coda effectiveSize
#' @import foreach
#' @importFrom ggplot2 ggplot aes_string geom_density geom_hline geom_vline theme labs element_blank ggplotGrob
#' @importFrom ggplot2 scale_color_manual scale_linetype_manual scale_size_manual
#' @importFrom ggplot2 aes geom_line facet_wrap label_both geom_label element_text
#' @importFrom grid unit unit.c grid.newpage grid.draw
#' @importFrom gridExtra grid.arrange arrangeGrob
#' @importFrom copula normalCopula p2P P2p dCopula
#' @importFrom whitening whiteningMatrix
#' @importFrom Rdpack reprompt
#' @importFrom doRNG registerDoRNG
#' @import methods
NULL
#' @useDynLib BSL, .registration = TRUE
#' @importFrom Rcpp sourceCpp
NULL
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/imports.R
|
kernelCDF <- function(x, kernel) {
switch(kernel,
'gaussian' = pnorm(x),
'epanechnikov' = epanechnikovCDF(x))
}
epanechnikovCDF <- function(x) {
y <- ifelse(x > 1, 1, 0)
idx <- which(abs(x) <= 1)
y[idx] <- -0.25 * x[idx] ^ 3 + 0.75 * x[idx] + 0.5
return (y)
}
# rectangularCDF <- function(x) {
# y <- ifelse(x > 1, 1, 0)
# idx <- which(abs(x) <= 1)
# y[idx] <- 0.5 * x[idx] + 0.5
# return (y)
# }
#
# triangularCDF <- function(x) {
# y <- ifelse(x > 1, 1, 0)
# idx <- which(x >= -1 & x <= 0)
# y[idx] <- 0.5 * x[idx] ^ 2 + x[idx] + 0.5
# idx <- which(x > 0 & x <= 1)
# y[idx] <- -0.5 * x[idx] ^ 2 + x[idx] + 0.5
# return (y)
# }
#
# biweightCDF <- function(x) {
# y <- ifelse(x > 1, 1, 0)
# idx <- which(abs(x) <= 1)
# y[idx] <- 3/16*x[idx]^5 - 5/8*x[idx]^3 + 15/16*x[idx] + 0.5
# return (y)
# }
#
# optcosineCDF <- function(x) {
# y <- ifelse(x > 1, 1, 0)
# idx <- which(abs(x) <= 1)
# y[idx] <- 0.5*sin(0.5*pi*x[idx]) + 0.5
# return (y)
# }
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/kernelCDF.R
|
paraLogitTransform <- function(theta, bound) {
p <- length(theta)
thetaTilde <- numeric(p)
type <- as.character(is.infinite(bound) %*% c(1,2))
for (i in 1:p) {
a <- bound[i, 1]
b <- bound[i, 2]
x <- theta[i]
thetaTilde[i] <- switch(type[i],
'0' = log((x-a)/(b-x)),
'1' = log(1/(b-x)),
'2' = log(x-a),
'3' = x
)
}
return(thetaTilde)
}
paraLogitBackTransform <- function(thetaTilde, bound) {
p <- length(thetaTilde)
theta <- numeric(p)
type <- as.character(is.infinite(bound) %*% c(1,2))
for (i in 1:p) {
a <- bound[i, 1]
b <- bound[i, 2]
y <- thetaTilde[i]
ey <- exp(y)
theta[i] <- switch(type[i],
'0' = a/(1+ey) + b/(1+1/ey),
'1' = b-1/ey,
'2' = a+ey,
'3' = y
)
}
return(theta)
}
jacobianLogitTransform <- function(thetaTilde, bound, log = TRUE) {
p <- length(thetaTilde)
type <- as.character(is.infinite(bound) %*% c(1,2))
logJ <- numeric(p)
for (i in 1:p) {
y <- thetaTilde[i]
if (type[i] == '0') {
a <- bound[i, 1]
b <- bound[i, 2]
ey <- exp(y)
}
logJ[i] <- switch(type[i],
'0' = log(b-a) - log(1/ey+2+ey),
'1' = y,
'2' = y,
'3' = 0
)
}
J = sum(logJ)
if (!log) {
J <- exp(J)
}
return(J)
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/logitTransform.R
|
#' An MA(2) model
#'
#' @description In this example we wish to estimate the parameters of a simple
#' MA(2) time series model. We provide the data and tuning parameters required
#' to reproduce the results in \insertCite{An2019;textual}{BSL}.
#' The journal article \insertCite{An2022;textual}{BSL} provides a full
#' description of how to use this package for the toad example.
#'
#' @param theta A vector of proposed model parameters,
#' \eqn{\theta_1} and
#' \eqn{\theta_2}.
#' @param n The number of simulations to run with the vectorised
#' simulation function.
#' @param x Observed or simulated data in the format of a vector of length
#' \eqn{TT}.
#' @param TT The number of observations.
#' @param epsilon The skewness parameter in the sinh-arcsinh transformation.
#' @param delta The kurtosis parameter in the sinh-arcsinh transformation.
#'
#' @details This example is based on estimating the parameters of a basic MA(2)
#' time series model of the form
#'
#' \deqn{y_t = z_t + \theta_1 z_{t-1} + \theta_2 z_{t-2},}
#'
#' where \eqn{t=1,\ldots,TT} and \eqn{z_t \sim N(0,1)}
#' for \eqn{t=-1,0,\ldots,TT}. A uniform
#' prior is used for this example, subject to the restrictions that
#' \eqn{-2<\theta_1<2},
#' \eqn{\theta_1+\theta_2>-1}
#' and
#' \eqn{\theta_1-\theta_2<1}
#' so that invertibility of the time series is satisfied. The summary
#' statistics are simply the full data.
#'
#' @section A simulated dataset:
#'
#' An example ``observed'' dataset and the tuning parameters relevant to that
#' example can be obtained using \code{data(ma2)}. This ``observed'' data is a
#' simulated dataset with
#' \eqn{\theta_1 = 0.6},
#' \eqn{\theta_2=0.2} and
#' \eqn{TT=50}. Further information about this model and the specific choices
#' of tuning parameters used in BSL and BSLasso can be found in An et al.
#' (2019).
#'
#' \itemize{
#'
#' \item \code{data}: A time series dataset, in the form of a vector of length
#' \eqn{TT}
#'
#' \item \code{sim_args}: A list containing \eqn{TT=50}
#'
#'
#' \item \code{start}: A vector of suitable initial values of the parameters
#' for MCMC
#'
#' \item \code{cov}: The covariance matrix of a multivariate normal random
#' walk proposal distribution used in the MCMC, in the form of a 2
#' \eqn{\times} 2 matrix }
#'
#' @examples
#' \dontrun{
#' # Load the data for this example and set up the model object
#' data(ma2)
#' model <- newModel(fnSimVec = ma2_sim_vec, fnSum = ma2_sum, simArgs = ma2$sim_args,
#' theta0 = ma2$start, fnLogPrior = ma2_prior)
#' thetaExact <- c(0.6, 0.2)
#'
#' # reduce the number of iterations M if desired for all methods below
#' # Method 1: standard BSL
#' resultMa2BSL <- bsl(y = ma2$data, n = 500, M = 300000, model = model, covRandWalk = ma2$cov,
#' method = "BSL", verbose = 1L)
#' show(resultMa2BSL)
#' summary(resultMa2BSL)
#' plot(resultMa2BSL, thetaTrue = thetaExact, thin = 20)
#'
#' # Method 2: unbiased BSL
#' resultMa2uBSL <- bsl(y = ma2$data, n = 500, M = 300000, model = model, covRandWalk=ma2$cov,
#' method = "uBSL", verbose = 1L)
#' show(resultMa2uBSL)
#' summary(resultMa2uBSL)
#' plot(resultMa2uBSL, thetaTrue = thetaExact, thin = 20)
#'
#' # Method 3: BSLasso (BSL with glasso shrinkage estimation)
#' # tune the penalty parameter fisrt
#' ssy <- ma2_sum(ma2$data)
#' lambdaAll <- list(exp(seq(-5.5,-1.5,length.out=20)))
#' set.seed(100)
#' penaltyGlasso <- selectPenalty(ssy = ssy, n = 300, lambdaAll, theta = thetaExact,
#' M = 100, sigma = 1.5, model = model, method = "BSL", shrinkage = "glasso")
#' penaltyGlasso
#' plot(penaltyGlasso)
#'
#' resultMa2BSLasso <- bsl(y = ma2$data, n = 300, M = 250000, model = model, covRandWalk=ma2$cov,
#' method = "BSL", shrinkage = "glasso", penalty = 0.027, verbose = 1L)
#' show(resultMa2BSLasso)
#' summary(resultMa2BSLasso)
#' plot(resultMa2BSLasso, thetaTrue = thetaExact, thin = 20)
#'
#' # Method 4: BSL with Warton's shrinkage and Whitening
#' # estimate the Whtieing matrix and tune the penalty parameter first
#' W <- estimateWhiteningMatrix(20000, model, method = "PCA", thetaPoint = ma2$start)
#' gammaAll <- list(seq(0.3, 0.8, 0.02))
#' set.seed(100)
#' penaltyWarton <- selectPenalty(ssy = ssy, n = 300, gammaAll, theta = thetaExact,
#' M = 100, sigma = 1.2, model = model, method = "BSL", shrinkage = "Warton",
#' whitening = W)
#' penaltyWarton
#' plot(penaltyWarton, logscale = FALSE)
#'
#' resultMa2Whitening <- bsl(y = ma2$data, n = 300, M = 250000, model = model, covRandWalk=ma2$cov,
#' method = "BSL", shrinkage = "Warton", whitening = W,
#' penalty = 0.52, verbose = 1L)
#' show(resultMa2Whitening)
#' summary(resultMa2Whitening)
#' plot(resultMa2Whitening, thetaTrue = thetaExact, thin = 20)
#'
#' # Method 5: semiBSL, the summary statistics function is different from previous methods
#' model2 <- newModel(fnSimVec = ma2_sim_vec, fnSum = ma2_sum, simArgs = ma2$sim_args,
#' sumArgs = list(epsilon = 2), theta0 = ma2$start, fnLogPrior = ma2_prior)
#' sim <- simulation(model, n = 1e4, theta = ma2$start, seed = 1) # run a short simulation
#' plot(density(sim$ssx[, 1])) # the first marginal summary statistic is right-skewed
#' resultMa2SemiBSL <- bsl(y = ma2$data, n = 500, M = 200000, model = model2, covRandWalk=ma2$cov,
#' method = "semiBSL", verbose = 1L)
#' show(resultMa2SemiBSL)
#' summary(resultMa2SemiBSL)
#' plot(resultMa2SemiBSL, thetaTrue = thetaExact, thin = 20)
#'
#' # Method 6: BSL with consideration of model misspecification (mean adjustment)
#' resultMa2Mean <- bsl(y = ma2$data, n = 500, M = 200000, model = model, covRandWalk=ma2$cov,
#' method = "BSLmisspec", misspecType = "mean", verbose = 1L)
#' show(resultMa2Mean)
#' summary(resultMa2Mean)
#' plot(resultMa2Mean, thetaTrue = thetaExact, thin = 20)
#'
#' # Method 7: BSL with consideration of model misspecification (variance inflation)
#' resultMa2Variance <- bsl(y = ma2$data, n = 500, M = 200000, model = model, covRandWalk=ma2$cov,
#' method = "BSLmisspec", misspecType = "variance", verbose = 1L)
#' show(resultMa2Variance)
#' summary(resultMa2Variance)
#' plot(resultMa2Variance, thetaTrue = thetaExact, thin = 20)
#'
#' # Plotting the results together for comparison
#' # plot using the R default plot function
#' oldpar <- par()
#' par(mar = c(5, 4, 1, 2), oma = c(0, 1, 2, 0))
#' combinePlotsBSL(list(resultMa2BSL, resultMa2uBSL, resultMa2BSLasso, resultMa2SemiBSL), which = 1,
#' thetaTrue = thetaExact, thin = 20, label = c("bsl", "uBSL", "bslasso", "semiBSL"),
#' col = c("black", "red", "blue", "green"), lty = 1:4, lwd = 1)
#' mtext("Approximate Univariate Posteriors", outer = TRUE, cex = 1.5)
#'
#' # plot using the ggplot2 package
#' combinePlotsBSL(list(resultMa2BSL, resultMa2uBSL, resultMa2BSLasso, resultMa2SemiBSL), which = 2,
#' thetaTrue = thetaExact, thin = 20, label = c("bsl", "ubsl", "bslasso", "semiBSL"),
#' options.color = list(values=c("black", "red", "blue", "green")),
#' options.linetype = list(values = 1:4), options.size = list(values = rep(1, 4)),
#' options.theme = list(plot.margin = grid::unit(rep(0.03,4), "npc"),
#' axis.title = ggplot2::element_text(size=12), axis.text = ggplot2::element_text(size = 8),
#' legend.text = ggplot2::element_text(size = 12)))
#' par(mar = oldpar$mar, oma = oldpar$oma)
#' }
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @author Ziwen An, Leah F. South and Christopher Drovandi
#'
#' @name ma2
#' @usage data(ma2)
NULL
#' @describeIn ma2 Simulates an MA(2) time series.
#' @export
ma2_sim <- function(theta, TT) {
rand <- rnorm(TT + 2)
y <- rand[3 : (TT+2)] + theta[1] * rand[2 : (TT+1)] + theta[2] * rand[1 : TT]
return(y)
}
#' @describeIn ma2 Simulates n MA(2) time series with a vectorised simulation
#' function.
#' @export
ma2_sim_vec <- function(n, theta, TT) {
rand <- matrix(rnorm(n * (TT + 2)), n, TT + 2)
y <- rand[, 3 : (TT + 2)] + theta[1] * rand[, 2 : (TT + 1)]
+ theta[2] * rand[, 1 : TT]
return(y)
}
#' @describeIn ma2 Returns the summary statistics for a given data set. The
#' skewness and kurtosis of the summary statistics can be controlled via the
#' \eqn{\epsilon} and
#' \eqn{\delta} parameters. This is the
#' sinh-arcsinnh transformation of \insertCite{Jones2009;textual}{BSL}. By default,
#' the summary statistics function simply returns the raw data. Otherwise, the
#' transformation is introduced to motivate the ``semiBSL'' method.
#' @export
ma2_sum <- function(x, epsilon = 0, delta = 1) {
ssx = sinh((asinh(x) + epsilon) / delta)
return(ssx)
}
#' @describeIn ma2 Evaluates the (unnormalised) log prior, which is uniform
#' subject to several restrictions related to invertibility of the time
#' series.
#' @export
ma2_prior <- function(theta) {
log(theta[2] < 1 & sum(theta) > -1 & diff(theta) > -1)
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/ma2.R
|
#' The multivariate G&K example
#'
#' @description Here we provide the data and tuning parameters required to reproduce
#' the results from the multivariate G & K \insertCite{Drovandi2011}{BSL} example from \insertCite{An2019;textual}{BSL}.
#'
#' @param theta_tilde A vector with 15 elements for the proposed model parameters.
#' @param TT The number of observations in the data.
#' @param J The number of variables in the data.
#' @param bound A matrix of boundaries for the uniform prior.
#' @param y A \code{TT} \eqn{\times} \code{J} matrix of data.
#'
#' @details
#' It is not practical to give a reasonable explanation of this example through R documentation
#' given the number of equations involved. We refer the reader to the BSLasso paper \insertCite{An2019}{BSL}
#' at <doi:10.1080/10618600.2018.1537928> for information on the model and summary statistic used in this example.
#'
#' @section An example dataset:
#'
#' We use the foreign currency exchange data available from \url{https://www.rba.gov.au/statistics/historical-data.html}
#' as in \insertCite{An2019;textual}{BSL}.
#'
#' \itemize{
#' \item \code{data}: A \code{1651} \eqn{\times} \code{3} matrix of data.
#' \item \code{sim_args}: Values of \code{sim_args} relevant to this example.
#' \item \code{start}: A vector of suitable initial values of the parameters for MCMC.
#' \item \code{cov}: The covariance matrix of a multivariate normal random walk proposal distribution used in the MCMC, in the form of a 15 by 15 matrix
#' }
#'
#' @examples
#' \dontrun{
#' require(doParallel) # You can use a different package to set up the parallel backend
#' require(MASS)
#' require(elliplot)
#'
#' # Loading the data for this example
#' data(mgnk)
#' model <- newModel(fnSim = mgnk_sim, fnSum = mgnk_sum, simArgs = mgnk$sim_args, theta0 = mgnk$start,
#' thetaNames = expression(a[1],b[1],g[1],k[1],a[2],b[2],g[2],k[2],
#' a[3],b[3],g[3],k[3],delta[12],delta[13],delta[23]))
#'
#' # Performing BSL (reduce the number of iterations M if desired)
#' # Opening up the parallel pools using doParallel
#' cl <- makeCluster(min(detectCores() - 1,2))
#' registerDoParallel(cl)
#' resultMgnkBSL <- bsl(mgnk$data, n = 60, M = 80000, model = model, covRandWalk = mgnk$cov,
#' method = "BSL", parallel = FALSE, verbose = 1L, plotOnTheFly = TRUE)
#' stopCluster(cl)
#' registerDoSEQ()
#' show(resultMgnkBSL)
#' summary(resultMgnkBSL)
#' plot(resultMgnkBSL, which = 2, thin = 20)
#'
#' # Performing uBSL (reduce the number of iterations M if desired)
#' # Opening up the parallel pools using doParallel
#' cl <- makeCluster(min(detectCores() - 1,2))
#' registerDoParallel(cl)
#' resultMgnkuBSL <- bsl(mgnk$data, n = 60, M = 80000, model = model, covRandWalk = mgnk$cov,
#' method = "uBSL", parallel = FALSE, verbose = 1L)
#' stopCluster(cl)
#' registerDoSEQ()
#' show(resultMgnkuBSL)
#' summary(resultMgnkuBSL)
#' plot(resultMgnkuBSL, which = 2, thin = 20)
#'
#'
#' # Performing tuning for BSLasso
#' ssy <- mgnk_sum(mgnk$data)
#' lambda_all <- list(exp(seq(-2.5,0.5,length.out=20)), exp(seq(-2.5,0.5,length.out=20)),
#' exp(seq(-4,-0.5,length.out=20)), exp(seq(-5,-2,length.out=20)))
#'
#' # Opening up the parallel pools using doParallel
#' cl <- makeCluster(min(detectCores() - 1,2))
#' registerDoParallel(cl)
#' set.seed(100)
#' sp_mgnk <- selectPenalty(ssy, n = c(15, 20, 30, 50), lambda = lambda_all, theta = mgnk$start,
#' M = 100, sigma = 1.5, model = model, method = "BSL", shrinkage = "glasso", standardise = TRUE,
#' parallelSim = TRUE, parallelSimArgs = list(.packages = "MASS", .export = "ninenum"),
#' parallelMain = TRUE)
#' stopCluster(cl)
#' registerDoSEQ()
#' sp_mgnk
#' plot(sp_mgnk)
#'
#' # Performing BSLasso with a fixed penalty (reduce the number of iterations M if desired)
#' # Opening up the parallel pools using doParallel
#' cl <- makeCluster(min(detectCores() - 1,2))
#' registerDoParallel(cl)
#' resultMgnkBSLasso <- bsl(mgnk$data, n = 20, M = 80000, model = model, covRandWalk = mgnk$cov,
#' method = "BSL", shrinkage = "glasso", penalty = 0.3, standardise = TRUE, parallel = FALSE,
#' verbose = 1L)
#' stopCluster(cl)
#' registerDoSEQ()
#' show(resultMgnkBSLasso)
#' summary(resultMgnkBSLasso)
#' plot(resultMgnkBSLasso, which = 2, thin = 20)
#'
#'
#' # Performing semiBSL (reduce the number of iterations M if desired)
#' # Opening up the parallel pools using doParallel
#' cl <- makeCluster(min(detectCores() - 1,2))
#' registerDoParallel(cl)
#' resultMgnkSemiBSL <- bsl(mgnk$data, n = 60, M = 80000, model = model, covRandWalk = mgnk$cov,
#' method = "semiBSL", parallel = FALSE, verbose = 1L)
#' stopCluster(cl)
#' registerDoSEQ()
#' show(resultMgnkSemiBSL)
#' summary(resultMgnkSemiBSL)
#' plot(resultMgnkSemiBSL, which = 2, thin = 20)
#'
#' # Plotting the results together for comparison
#' # plot using the R default plot function
#' oldpar <- par()
#' par(mar = c(4, 4, 1, 1), oma = c(0, 1, 2, 0))
#' combinePlotsBSL(list(resultMgnkBSL, resultMgnkuBSL, resultMgnkBSLasso, resultMgnkSemiBSL),
#' which = 1, thin = 20, label = c("bsl", "ubsl", "bslasso", "semiBSL"),
#' col = c("red", "yellow", "blue", "green"), lty = 2:5, lwd = 1)
#' mtext("Approximate Univariate Posteriors", outer = TRUE, line = 0.75, cex = 1.2)
#'
#' # plot using the ggplot2 package
#' combinePlotsBSL(list(resultMgnkBSL, resultMgnkuBSL, resultMgnkBSLasso, resultMgnkSemiBSL),
#' which = 2, thin = 20, label=c("bsl","ubsl","bslasso","semiBSL"),
#' options.color=list(values=c("red","yellow","blue","green")),
#' options.linetype = list(values = 2:5), options.size = list(values = rep(1, 4)),
#' options.theme = list(plot.margin = grid::unit(rep(0.03,4),"npc"),
#' axis.title = ggplot2::element_text(size=12), axis.text = ggplot2::element_text(size = 8),
#' legend.text = ggplot2::element_text(size = 12)))
#' par(mar = oldpar$mar, oma = oldpar$oma)
#' }
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @author Ziwen An, Leah F. South and Christopher Drovandi
#'
#' @name mgnk
#' @usage data(mgnk)
NULL
# quantile function of a g-and-k distribution
qgnk <- function(z, a, b, g, k) {
e <- exp(- g * z)
a + b * (1 + 0.8 * (1 - e) / (1 + e)) * (1 + z^2) ^ k * z
}
logTransform <- function(x, bound) {
x_tilde <- numeric(4)
for (i in 1 : 4) {
x_tilde[i] <- log((x[i] - bound[i, 1]) / (bound[i, 2] - x[i]))
}
return(x_tilde)
}
backLogTransform <- function(x_tilde, bound) {
x <- numeric(4)
for (i in 1 : 4) {
x[i] <- (bound[i, 1] + bound[i, 2] * exp(x_tilde[i])) / (1 + exp(x_tilde[i]))
}
return(x)
}
reparaCorr <- function(theta_corr, J) {
Sigma <- diag(J)
count <- 1
for (i in 1 : (J-1)) {
for (j in (i+1) : J) {
Sigma[i, j] <- Sigma[j, i] <- theta_corr[count]
count <- count + 1
}
}
L <- t(chol(Sigma))
gamma <- matrix(0, J, J)
w <- numeric(choose(J, 2))
count <- 1
for (i in 2 : J) {
gamma[i, 1] <- acos(L[i, 1])
}
for (j in 2 : (J-1)) {
for (i in (j+1): J) {
gamma[i, j] <- acos((L[i, j]) / (prod(sin(gamma[i, 1:(j-1)]))))
}
}
for (i in 2 : J) {
for (j in 1: (i-1)) {
w[count] <- log(gamma[i, j] / (pi - gamma[i, j]))
count <- count + 1
}
}
return(list(w = w, Sigma = Sigma))
}
backReparaCorr <- function(w, J) {
G <- array(0, c(J, J))
count <- 1
for (i in 2 : J) {
for (j in 1 : (i-1)) {
G[i, j] <- pi / (1 + exp(-w[count]))
count <- count + 1
}
}
L <- array(0, c(J, J))
L[1, 1] <- 1
for (i in 2 : J) {
L[i, 1] <- cos(G[i, 1])
L[i, i] <- prod(sin(G[i, 1 : (i-1)]))
}
for (i in 3 : J) {
for (j in 2 : (i-1)) {
L[i, j] <- prod(sin(G[i, 1 : (j-1)])) * cos(G[i, j])
}
}
Sigma <- L %*% t(L)
theta_corr <- numeric(choose(J, 2))
count <- 1
for (i in 1 : (J - 1)) {
for (j in (i + 1) : J) {
theta_corr[count] <- Sigma[i, j]
count <- count + 1
}
}
return(theta_corr)
}
paraTransformGnk <- function(theta, J, bound) {
if (J == 1L) {
theta_tilde <- logTransform(theta, bound)
} else {
theta_tilde <- numeric(length(theta))
for (i in 1:J) {
theta_tilde[(4*i-3) : (4*i)] <- logTransform(theta[(4*i-3) : (4*i)], bound)
}
theta_tilde[(4*J + 1) : length(theta_tilde)] <- reparaCorr(tail(theta, -4*J), J)$w
}
return(theta_tilde)
}
paraBackTransformGnk <- function(theta_tilde, J, bound) {
if (J == 1L) {
theta <- backLogTransform(theta_tilde, bound)
} else {
theta <- numeric(length(theta_tilde))
for (i in 1:J) {
theta[(4*i-3) : (4*i)] <- backLogTransform(theta_tilde[(4*i-3) : (4*i)], bound)
}
theta[(4*J + 1) : length(theta)] <- backReparaCorr(tail(theta_tilde, -4*J), J)
}
return(theta)
}
#' The function \code{mgnk_sim} simulates from the multivariate G & K model.
#' @rdname mgnk
#' @export
mgnk_sim <- function(theta_tilde, TT, J, bound) {
theta <- paraBackTransformGnk(theta_tilde, J, bound)
if (J == 1) {
theta_gnk <- theta
Sigma <- 1
} else {
theta_gnk <- head(theta, 4*J)
theta_corr <- tail(theta, -4*J)
if (length(theta_corr) != choose(J, 2)) {
stop('wrong parameter length or dimension')
}
Sigma <- reparaCorr(theta_corr, J)$Sigma
}
y <- array(0, c(TT, J))
zu <- mvrnorm(n = TT, mu = numeric(J), Sigma = Sigma)
for (i in 1 : J) {
y[, i] <- qgnk(zu[, i], theta[4*(i-1) + 1], theta[4*(i-1) + 2], theta[4*(i-1) + 3], theta[4*(i-1) + 4])
}
return(y)
}
summStatRobust <- function(x) {
TT <- length(x)
ssx <- numeric(4)
octile <- elliplot::ninenum(x)[2:8]
ssx[1] <- octile[4]
ssx[2] <- octile[6] - octile[2]
ssx[3] <- (octile[7] - octile[5] + octile[3] - octile[1]) / ssx[2]
ssx[4] <- (octile[6] + octile[2] - 2*octile[4]) / ssx[2]
return(ssx)
}
normScore <- function(x, y) {
n <- length(x)
r0 <- 1 : n
z1 <- qnorm(rank(x) / (n + 1))
z2 <- qnorm(rank(y) / (n + 1))
c <- qnorm(r0 / (n + 1))
r <- sum(z1 * z2) / sum(c ^ 2)
norm_score <- 0.5 * log((1 + r) / (1 - r))
return(norm_score)
}
#' The function \code{mgnk_sum(y)} calculates the summary statistics for the multivariate G & K example.
#' @rdname mgnk
#' @export
mgnk_sum <- function(y) {
J <- ncol(y)
ssxRobust <- c(apply(y, MARGIN = 2, FUN = summStatRobust))
if (J == 1L) {
return(ssx = ssxRobust)
} else {
ssxNormScore <- numeric(choose(J, 2))
count <- 1
for (i in 1 : (J - 1)) {
for (j in (i + 1) : J) {
ssxNormScore[count] <- normScore(y[, i], y[, j])
count <- count + 1
}
}
return(ssx = c(ssxRobust, ssxNormScore))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/mgnk.R
|
#' Progress Bar
#' @description Print a customisable progress bar in the console.
#' @param p Numeric, percentage of finished progress, between 0 and 1.
#' @param txt1 String to put before the progress bar
#' @param txt2 String to put after the progress bar
#' @param style The display style. 1 is single-lined; 2 is double-lined; 3 display the progress in a 5-lined block.
#' @param label Character labels for "finished", "un-finished", and "side bars".
#' @return No return value, called for side effects.
#' @keywords internal
myMiniProgressBar <- function(p, txt1 = '', txt2 = '', style = 1, label = c('=', '-', '|')) {
stopifnot(style %in% c(1, 2, 3))
label <- as.character(label)
stopifnot(length(label) == 3)
stopifnot(all(nchar(label) <= 1))
width <- options('width')[[1]] + 3
txt1 <- as.character(txt1)
txt2 <- as.character(txt2)
finished <- label[1]
unfinished <- label[2]
bar <- label[3]
n1 <- nchar(txt1)
n2 <- nchar(txt2)
if (style == 1) {
n3 <- ((n1 + 1) %/% width + 1) * width - n1 - n2 - 3
} else if (style == 2) {
txt2 <- stringr::str_pad(txt2, ((n2 + 1) %/% width + 1) * width - 2, "right")
n3 <- ((n1 + 1) %/% width + 1) * width - n1 - 2
} else { # style == 3
txt1 <- stringr::str_pad(txt1, ((n1 + 1) %/% width + 1) * width - 2, "right")
txt2 <- stringr::str_pad(txt2, ((n2 + 1) %/% width + 1) * width - 2, "right")
n3 <- width * 5
}
if (n3 <= 4) {
stop('not enough line width')
}
if (p < 0) p <- 0
if (p > 1) p <- 1
done <- round((n3 - 2 * nchar(bar)) * p)
progress <- paste0(bar, strrep(finished, done),
strrep(unfinished, n3 - 2 * nchar(bar) - done), bar)
if (style == 3) {
cat('\r', txt1, progress, txt2)
} else {
cat('\r', txt1, progress, txt2)
}
invisible(NULL)
}
# x in second
myTimeStr <- function(x) {
x <- round(as.numeric(x))
y <- c(x %/% 3600, (x %% 3600) %/% 60, (x %% 3600) %% 60)
if (y[1] >= 1) {
paste0(y[1:2], c('h', 'm'), collapse = ' ')
} else if (y[2] >= 1) {
paste0(y[2:3], c('m', 's'), collapse = ' ')
} else {
paste0(y[3], 's')
}
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/myMiniProgressBar.R
|
#' @include s4-MODEL.R
NULL
setOldClass("difftime")
setClassUnion("numericOrNULL", c("numeric", "NULL"))
setClassUnion("matrixOrNULL", c("matrix", "NULL"))
setClassUnion("characterOrNULL", c("character", "NULL"))
setClassUnion("logicalOrMatrixOrNULL", c("logical", "matrix", "NULL"))
# setClassUnion("listOrNULL", c("list", "NULL")) # defined in s4-MODEL.R
# setClassUnion("functionOrNULL", c("function", "NULL")) # defined in s4-MODEL.R
#' S4 class ``BSL''.
#' @description The S4 class ``BSL'' is produced by running function
#' \code{\link{bsl}} and contains the result of a BSL run. Basic S4 methods
#' \code{show}, \code{summary} and \code{plot} are provided. \code{theta} and
#' \code{loglike} returns the MCMC samples of parameter values and estimated
#' log-likelihoods.
#' @rawRd \Rdversion{1.1}
#' @slot theta Object of class ``matrix''. MCMC samples from the joint
#' approximate posterior distribution of the parameters.
#' @slot loglike Object of class ``numeric''. Accepted MCMC samples of the
#' estimated log-likelihood values.
#' @slot call Object of class ``call''. The original code that was used to call
#' the method.
#' @slot model Object of class ``MODEL''.
#' @slot acceptanceRate Object of class ``numeric''. The acceptance rate of the
#' MCMC algorithm.
#' @slot earlyRejectionRate Object of class ``numeric''. The early rejection
#' rate of the algorithm (early rejection may occur when using bounded prior
#' distributions).
#' @slot errorRate Object of class ``numeric''. The error rate. If any infinite
#' summary statistic or infinite log-likelihood estimate occurs during the
#' process, it is marked as an error and the proposed parameter will be
#' rejected.
#' @slot y Object of class ``ANY''. The observed data.
#' @slot n Object of class ``numeric''. The number of simulations from the model
#' per MCMC iteration to estimate the synthetic likelihood.
#' @slot M Object of class ``numeric''. The number of MCMC iterations.
#' @slot covRandWalk Object of class ``matrix''. The covariance matrix used in
#' multivariate normal random walk proposals.
#' @slot method Object of class ``character''. The character argument indicating
#' the used method.
#' @slot shrinkage Object of class ``characterOrNULL''. The character argument
#' indicating the shrinkage method.
#' @slot penalty Object of class ``numericOrNULL''. The penalty value.
#' @slot GRC Object of class ``logical''. Whether the Gaussian rank correlation
#' matrix is used.
#' @slot logitTransform Object of class ``logical''. The logical argument
#' indicating whether a logit transformation is used in the algorithm.
#' @slot logitTransformBound Object of class ``matrixOrNULL''. The matrix of
#' logitTransformBound.
#' @slot standardise Object of class ``logical''. The logical argument that
#' determines whether to standardise the summary statistics.
#' @slot parallel Object of class ``logical''. The logical value indicating
#' whether parallel computing is used in the process.
#' @slot parallelArgs Object of class ``listOrNULL''. The list of additional
#' arguments to pass into the \code{foreach} function.
#' @slot time Object of class ``difftime''. The running time.
#' @slot gamma Object of class ``numeric''. MCMC samples of gamma parameter
#' values of the mean adjustment or variance inflation for method
#' ``BSLmisspec''.
#' @slot misspecType Object of class ``characterOrNULL''. The character argument
#' indicating whether mean adjustment ("mean") or variance inflation
#' ("variance") to be used in "BSLmisspec" method.
#' @slot tau Object of class ``numeric''. Parameter of the prior distribution
#' for "BSLmisspec" method. For mean adjustment, \code{tau} is the scale of
#' the Laplace distribution. For variance inflation, \code{tau} is the mean of
#' the exponential distribution.
#' @slot whitening Object of class ``logicalOrMatrixOrNULL''. A logical argument
#' determines whether Whitening transformation is used in ``BSL'' method with
#' Warton's shrinkage, or just the Whitening matrix used.
#'
#' @examples
#' \dontshow{
#' # a toy example
#' toy_simVec <- function(n, theta) matrix(rnorm(n, theta), nrow = n) # the simulation function
#' toy_sum <- function(x) x # the summary statistic function
#' model <- newModel(fnSimVec = toy_simVec, fnSum = toy_sum, theta0 = 0) # create the model object
#' result_toy <- bsl(y = 1, n = 50, M = 100, model = model, covRandWalk = matrix(1), verbose = 0)
#' summary(result_toy)
#' plot(result_toy)
#' }
#' \dontrun{
#' # a toy example
#' toy_simVec <- function(n, theta) matrix(rnorm(n, theta), nrow = n) # the simulation function
#' toy_sum <- function(x) x # the summary statistic function
#' model <- newModel(fnSimVec = toy_simVec, fnSum = toy_sum, theta0 = 0) # create the model object
#' result_toy <- bsl(y = 1, n = 100, M = 1e4, model = model, covRandWalk = matrix(1))
#' summary(result_toy)
#' plot(result_toy)
#' }
#'
#' @aliases BSLclass
#' @export
setClass("BSL",
slots = c(theta = "matrix",
loglike = "numeric",
model = "MODEL",
acceptanceRate = "numeric",
earlyRejectionRate = "numeric",
errorRate = "numeric",
y = "ANY",
n = "numeric",
M = "numeric",
covRandWalk = "matrix",
method = "character",
shrinkage = "characterOrNULL",
penalty = "numericOrNULL",
standardise = "logical",
GRC = "logical",
gamma = "matrix",
misspecType = "characterOrNULL",
tau = "numeric",
whitening = "logicalOrMatrixOrNULL",
logitTransform = "logical",
logitTransformBound = "matrixOrNULL",
parallel = "logical",
parallelArgs = "listOrNULL",
time = "difftime",
call = "call")
)
setValidity("BSL",
method = function(object) {
if (any(length(object@theta) == 0, length(object@M) == 0)) { # slots that must include in bsl class
warnings('empty slot "theta" or "M" in the "bsl" object')
} else {
errors <- character()
p <- ncol(object@theta)
M <- nrow(object@theta)
if (M != object@M) {
msg <- paste('The number of rows of theta', M, 'does not match the number of iterations M', object@M)
error <- c(errors, msg)
}
temp <- length(object@loglike)
if (temp != 0 && temp != M) {
msg <- paste('The number of iterations M', M, 'does not match the length of loglike', temp)
error <- c(errors, msg)
}
temp <- class(object@model)
if (temp != 'MODEL') {
msg <- paste('model must be a MODEL class object')
error <- c(errors, msg)
} else {
invisible(capture.output(validObject(object@model)))
}
if (nrow(object@covRandWalk) != p || ncol(object@covRandWalk) != p) {
msg <- paste('covRandWalk must be a', p, 'by', p, 'square matrix')
error <- c(errors, msg)
}
if (!is.null(object@logitTransformBound)) {
if (nrow(object@logitTransformBound) != p || ncol(object@logitTransformBound) != 2L) {
msg <- paste('logitTransformBound must be a', p, 'by', 2, 'matrix')
error <- c(errors, msg)
}
}
if (length(errors) == 0) {
return (TRUE)
} else {
return (errors)
}
}
}
)
#' @param object A ``BSL'' class object.
#' @rdname BSL-class
#' @export
setMethod("show",
signature = c(object = "BSL"),
definition = function(object) {
digits = max(3L, getOption("digits") - 3L)
cat("\nCall:\n", paste(deparse(object@call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
if (nrow(object@theta)) {
cat("Summary of theta:\n")
summ <- summary(object@theta)
attr(summ, 'dimnames') = list(NULL, object@model@thetaNames)
print.default(format(summ, digits = digits), print.gap = 2L,
quote = FALSE)
}
else cat("No theta\n")
if (length(object@loglike)) {
cat("Summary of loglikelihood:\n")
summ <- summary(object@loglike)
print.default(format(summ, digits = digits), print.gap = 2L,
quote = FALSE)
}
else cat("No loglikelihood\n")
if (length(object@acceptanceRate)) {
cat("Acceptance Rate:\n")
print.default(format(object@acceptanceRate, digits = digits), print.gap = 2L,
quote = FALSE)
}
else cat("No acceptance rate\n")
if (length(object@earlyRejectionRate)) {
cat("Early Rejection Rate:\n")
print.default(format(object@earlyRejectionRate, digits = digits), print.gap = 2L,
quote = FALSE)
}
else cat("No early rejection rate\n")
cat("\n")
}
)
#' @param object A ``BSL'' class object.
#' @param burnin the number of MCMC burn-in steps to be taken.
#' @param thetaNames Parameter names to be shown in the summary table. If not
#' given, parameter names of the ``BSL'' object will be used by default.
#' @rdname BSL-class
#' @export
setMethod("summary",
signature = c(object = "BSL"),
definition = function(object, burnin = 0, thetaNames = NULL) {
theta <- getTheta(object, burnin = burnin)
#theta <- as.matrix(object@theta[(burnin + 1) : nrow(object@theta), ])
n <- object@n
M <- nrow(theta)
p <- ncol(theta)
if (is.null(thetaNames)) {
if (!is.null(object@model@thetaNames)) {
thetaNames <- object@model@thetaNames
} else {
thetaNames <- vector('expression', p)
for (i in 1:p) {
thetaNames[i] <- as.expression(substitute(theta[j],list(j=i)))
}
}
}
if (length(object@model@thetaNames) != p) {
warning('length of "thetaNames" does not match number of parameters\n')
}
accRate <- round(mean(diff(theta[, 1]) !=0), 2)
ess <- round(effectiveSize(theta), 0)
# ess <- round(effectiveSize(theta) / n / M * 1000000, 0)
summ <- c(n, accRate*100, ess)
names(summ) <- c('n', 'acc. rate (%)', paste('ESS', thetaNames))
return(summ)
}
)
#' @param x A ``BSL'' class object to plot.
#' @param which An integer argument indicating which plot function to be
#' used. The default, \code{1L}, uses the plain \code{plot} to visualise the
#' result. \code{2L} uses ggplot2 to draw the plot.
#' @param thin A numeric argument indicating the gap between samples to
#' be taken when thinning the MCMC draws. The default is \code{1}, which means
#' no thinning is used.
#' @param thetaTrue A set of true parameter values to be included on the plots
#' as a reference line. The default is \code{NULL}.
#' @param options.plot A list of additional arguments to pass into the
#' \code{plot} function. Only use when \code{which} is \code{1L}.
#' @param top A character argument of the combined plot title if
#' \code{which} is \code{2L}.
#' @param options.density A list of additional arguments to pass into the
#' \code{geom_density} function. Only use when \code{which} is \code{2L}.
#' @param options.theme A list of additional arguments to pass into the
#' \code{theme} function. Only use when \code{which} is \code{2L}.
#' @rdname BSL-class
#' @export
setMethod("plot",
signature = c(x = "BSL"),
definition = function(x, which = 1L, thin = 1, burnin = 0, thetaTrue = NULL, options.plot = NULL,
top = 'Approximate Univariate Posteriors', options.density = list(), options.theme = list()) {
if (which == 1L) {
if (length(options.density) != 0 || length(options.theme) != 0) {
warning('"options.density" and "options.theme" are ignored when which = 1')
}
marginalPostDefault(x, thin, burnin, thetaTrue, options.plot)
} else if (which == 2L) {
if (!is.null(options.plot)) {
warning('"options.plot" is ignored when which = 2')
}
marginalPostGgplot(x, thin, burnin, thetaTrue, top, options.density, options.theme)
} else {
stop('Indicate a supported plot number, 1 for R default density plot or 2 for ggplot density plot')
}
}
)
# Plot the univariate marginal posterior plot of a bsl class object using the R
# default plot function.
marginalPostDefault <- function(x, thin = 1, burnin = 0, thetaTrue = NULL, options.plot = NULL) {
theta <- getTheta(x, burnin = burnin, thin = thin)
n <- nrow(theta)
p <- ncol(theta)
a <- floor(sqrt(p))
b <- ceiling(p / a)
if (!is.null(thetaTrue) & length(thetaTrue) != p) {
stop('Length of thetaTrue does not match the number of parameters.')
}
thetaNames <- x@model@thetaNames
oldpar <- par(no.readonly = TRUE) # get current user par settings
on.exit(par(oldpar)) # reset current user par settings at the end of the function
par(mfrow = c(a, b))
for(k in 1:p) {
d <- density(theta[, k])
if ('main' %in% names(options.plot)) {
do.call(plot, c(list(d, xlab = thetaNames[k]), options.plot))
} else {
do.call(plot, c(list(d, xlab = thetaNames[k], main = NA), options.plot))
}
if (!is.null(thetaTrue)) {
abline(v = thetaTrue[k], col = 'forestgreen', lty = 3)
}
}
}
# Plot the univariate marginal posterior plot of a bsl class object using the
# ggplot2 package.
marginalPostGgplot <- function(x, thin = 1, burnin = 0, thetaTrue = NULL, top = 'Approximate Univariate Posteriors', options.density = list(), options.theme = list()) {
theta <- getTheta(x, burnin = burnin, thin = thin)
n <- nrow(theta)
p <- ncol(theta)
a <- floor(sqrt(p))
b <- ceiling(p / a)
if (!is.null(thetaTrue) & length(thetaTrue) != p) {
stop('Length of thetaTrue does not match the number of parameters.')
}
samples <- data.frame(theta)
thetaNames <- x@model@thetaNames
plist <- list()
for (i in 1 : p) {
plist[[i]] <- ggplot(samples, aes_string(x = colnames(samples)[i])) +
do.call(geom_density, options.density) +
geom_hline(yintercept = 0, colour = "grey", size = 0.75) + {
if (!is.null(thetaTrue)) {
geom_vline(xintercept = thetaTrue[i], color = 'forestgreen', linetype = 'dashed', size = 0.5)
}
} +
labs(x = thetaNames[i], y = 'density') +
do.call(theme, options.theme)
}
do.call('grid.arrange', c(plist, nrow = a, ncol = b, top = top))
}
#' Obtain the samples from a "BSL" object
#' @description see \code{\link{BSLclass}}
#' @param object A ``BSL'' class object.
#' @param ... Other arguments.
#' @return The matrix of samples, after removing burn-in and thinning.
setGeneric("getTheta", function(object, ...) standardGeneric("getTheta"))
#' @rdname BSL-class
#' @export
setMethod("getTheta",
signature = c(object = "BSL"),
definition = function(object, burnin = 0, thin = 1) {
as.matrix(object@theta[seq((burnin + 1), nrow(object@theta), by = thin), ])
}
)
#' Obtain the log-likelihoods from a "BSL" object
#' @description see \code{\link{BSLclass}}
#' @param object A ``BSL'' class object.
#' @param ... Other arguments.
#' @return The vector of log likelihood evaluations, after removing burn-in and thinning.
setGeneric("getLoglike", function(object, ...) standardGeneric("getLoglike"))
#' @rdname BSL-class
#' @export
setMethod("getLoglike",
signature = c(object = "BSL"),
definition = function(object, burnin = 0, thin = 1) {
object@loglike[seq((burnin + 1), length(object@loglike), by = thin)]
}
)
#' Obtain the gamma samples (the latent parameters for BSLmisspec method) from a
#' "BSL" object
#' @description see \code{\link{BSLclass}}
#' @param object A ``BSL'' class object.
#' @param ... Other arguments.
#' @return The matrix of gamma samples (the latent parameters for BSLmisspec
#' method), after removing burn-in and thinning.
setGeneric("getGamma", function(object, ...) standardGeneric("getGamma"))
#' @rdname BSL-class
#' @export
setMethod("getGamma",
signature = c(object = "BSL"),
definition = function(object, burnin = 0, thin = 1) {
as.matrix(object@gamma[seq((burnin + 1), nrow(object@gamma), by = thin), ])
}
)
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/s4-BSL.R
|
setClassUnion("functionOrNULL", c("function", "NULL"))
setClassUnion("listOrNULL", c("list", "NULL"))
#' S4 class ``MODEL''
#' @description The S4 class contains the simulation and summary statistics
#' function and other necessary arguments for a model to run in the main
#' \code{bsl} function.
#' @rawRd \Rdversion{1.1}
#' @slot fnSim A function that simulates data for a given parameter value. The
#' first argument should be the parameters. Other necessary arguments
#' (optional) can be specified with \code{simArgs}.
#' @slot fnSimVec A vectorised function that simulates a number of datasets
#' simultaneously for a given parameter value. If this is not \code{NULL},
#' vectorised simulation function will be used instead of \code{fnSim}. The
#' first two arguments should be the number of simulations to run and
#' parameters, respectively. Other necessary arguments (optional) can be
#' specified with \code{simArgs}. The output must be a list of each simulation
#' result.
#' @slot fnSum A function for computing summary statistics of data. The first
#' argument should be the observed or simulated dataset. Other necessary
#' arguments (optional) can be specified with \code{sumArgs}. The users should
#' code this function carefully so the output have fixed length and never
#' contain any \code{Inf} value.
#' @slot fnLogPrior A function that computes the log of prior density for a
#' parameter. The default is \code{NULL}, which uses an improper flat prior
#' over the real line for each parameter. The function must have a single
#' input: a vector of parameter values.
#' @slot simArgs A list of additional arguments to pass into the simulation
#' function. Only use when the input \code{fnSim} or \code{fnSimVec} requires
#' additional arguments. The default is \code{NULL}.
#' @slot sumArgs A list of additional arguments to pass into the summary
#' statistics function. Only use when the input \code{fnSum} requires
#' additional arguments. The default is \code{NULL}.
#' @slot theta0 Initial guess of the parameter value, which is used as the
#' starting value for MCMC.
#' @slot thetaNames Expression, parameter names.
#' @slot ns The number of summary statistics of a single observation. Note this
#' will be generated automatically, thus is not required for initialisation.
#' @slot test Logical, whether a short simulation test will be ran upon
#' initialisation.
#' @slot verbose Logical, whether to print verbose messages when initialising a
#' ``MODEL'' object.
#' @export
MODEL <- setClass("MODEL",
slots = c(fnSim = "functionOrNULL",
fnSimVec = "functionOrNULL",
fnSum = "functionOrNULL",
fnLogPrior = "functionOrNULL",
simArgs = "listOrNULL",
sumArgs = "listOrNULL",
theta0 = "numeric",
thetaNames = "expression",
ns = "integer",
test = "logical",
verbose = "logical")
)
#' Constructor for class ``MODEL''
#' @description \code{newModel} is the constructor function for a \code{MODEL}
#' object.
#' @param fnSim A function that simulates data for a given parameter
#' value. The first argument should be the parameters. Other necessary
#' arguments (optional) can be specified with \code{simArgs}.
#' @param fnSimVec A vectorised function that simulates a number of
#' datasets simultaneously for a given parameter value. The first two
#' arguments should be the number of simulations to run and parameters,
#' respectively. Other necessary arguments (optional) can be specified with
#' \code{simArgs}. The output must be a list of each simulation result or a
#' matrix with each row corresponding to a simulation.
#' @param simArgs A list of additional arguments to pass into the simulation
#' function. Only use when the input \code{fnSim} requires additional
#' arguments.
#' @param fnSum A function for computing summary statistics of data. The
#' first argument should be the observed or simulated dataset. Other necessary
#' arguments (optional) can be specified with \code{sumArgs}.
#' @param sumArgs A list of additional arguments to pass into the summary
#' statistics function. Only use when the input \code{fnSum} requires
#' additional arguments.
#' @param fnLogPrior A function that computes the log of prior density for a
#' parameter. If this is missing, the prior by default is an improper flat
#' prior over the real line for each parameter. The function must have a
#' single input: a vector of parameter values.
#' @param theta0 Initial guess of the parameter value.
#' @param thetaNames A string vector of parameter names, which must have the
#' same length as the parameter vector.
#' @param test Logical, whether a short simulation test will be ran
#' upon initialisation.
#' @param verbose Logical, whether to print verbose messages when
#' initialising a ``MODEL'' object.
#' @examples
#' # set up the model for the ma2 example
#' data(ma2)
#' m <- newModel(fnSim = ma2_sim, fnSum = ma2_sum, simArgs = ma2$sim_args,
#' theta0 = ma2$start, fnLogPrior = ma2_prior, verbose = FALSE)
#' validObject(m)
#'
#' # benchmark the serial and vectorised simulation function (require the rbenchmark package)
#' m1 <- newModel(fnSim = ma2_sim, fnSum = ma2_sum, simArgs = ma2$sim_args,
#' theta0 = ma2$start, fnLogPrior = ma2_prior)
#' m2 <- newModel(fnSimVec = ma2_sim_vec, fnSum = ma2_sum, simArgs = ma2$sim_args,
#' theta0 = ma2$start, fnLogPrior = ma2_prior)
#' require("rbenchmark")
#' \dontshow{
#' benchmark(serial = simulation(m1, n = 50, theta = c(0.6, 0.2)),
#' vectorised = simulation(m2, n = 50, theta = c(0.6, 0.2)))
#' }
#' \dontrun{
#' benchmark(serial = simulation(m1, n = 1000, theta = c(0.6, 0.2)),
#' vectorised = simulation(m2, n = 1000, theta = c(0.6, 0.2)))
#' }
#'
#' @rdname MODEL-class
#' @export
newModel <- function(fnSim, fnSimVec, fnSum, fnLogPrior, simArgs, sumArgs, theta0, thetaNames, test = TRUE, verbose = TRUE) {
new(Class = "MODEL", fnSim = fnSim, fnSimVec = fnSimVec, fnSum = fnSum, fnLogPrior = fnLogPrior,
simArgs = simArgs, sumArgs = sumArgs, theta0 = theta0, thetaNames = thetaNames, test = test, verbose = verbose)
}
setMethod("initialize",
signature = "MODEL",
definition = function(.Object, fnSim, fnSimVec, simArgs, fnSum, sumArgs, fnLogPrior, theta0, thetaNames,
test = TRUE, verbose = TRUE) {
if (verbose) cat("*** initialize \"MODEL\" ***\n")
has.fnSim <- !missing(fnSim) || !missing(fnSimVec)
if (verbose) cat(paste("has simulation function:", has.fnSim, "\n"))
has.fnSum <- !missing(fnSum)
if (verbose) cat(paste("has summary statistics function:", has.fnSum, "\n"))
has.theta0 <- !missing(theta0)
if (verbose) cat(paste("has initial guess / point estimate of the parameter:", has.theta0, "\n"))
if (has.fnSim && has.fnSum && has.theta0) {
if (!missing(fnSim)) .Object@fnSim <- fnSim
if (!missing(fnSimVec)) .Object@fnSimVec <- fnSimVec
if (!missing(simArgs)) .Object@simArgs <- simArgs
.Object@fnSum <- fnSum
if (!missing(sumArgs)) .Object@sumArgs <- sumArgs
if (!missing(fnLogPrior)) .Object@fnLogPrior <- fnLogPrior
if (is.null(.Object@fnLogPrior)) {
.Object@fnLogPrior <- function(theta) 0
if (verbose) cat("No prior has been defined in the model, use the default improper flat prior\n")
}
.Object@theta0 <- theta0
.Object@test <- test
.Object@verbose <- verbose
validObject(.Object)
.Object@ns <- getns(.Object)
if (missing(thetaNames)) { # missing
if (!is.null(names(theta0))) { # use the name of theta0
.Object@thetaNames <- as.expression(names(theta0))
} else { # use default theta names
thetaNames <- vector("expression", length(theta0))
for (i in 1 : length(theta0)) {
thetaNames[i] <- as.expression(substitute(theta[j], list(j = i)))
}
.Object@thetaNames <- thetaNames
}
} else { # !missing
if (is.null(thetaNames)) { # use default theta names
thetaNames <- vector("expression", length(theta0))
for (i in 1 : length(theta0)) {
thetaNames[i] <- as.expression(substitute(theta[j], list(j = i)))
}
.Object@thetaNames <- thetaNames
} else { # has thetaNames
if (length(thetaNames) != length(theta0)) {
if (verbose) cat(paste("The length of thetaNames does not match the length of theta0,", length(theta0), "\n"))
} else {
.Object@thetaNames <- as.expression(thetaNames)
}
}
}
} else {
if (verbose) cat("an empty (invalid) MODEL object has been created due to one or more missing slots\n")
}
if (verbose) cat("*** end initialize ***\n")
return (.Object)
}
)
setValidity("MODEL",
method = function(object) {
if (is.null(object@fnSim) && is.null(object@fnSimVec)) {
return("No available simulation function is provided")
}
if (object@test) {
if (object@verbose) cat("running a short simulation test ... ")
# test the simulation function
if (!is.null(object@fnSimVec)) {
x <- try(do.call(object@fnSimVec, c(list(10, object@theta0), object@simArgs)))
if (inherits(x, "try-error")) {
return("Fail to run simulations with the given vectorised simulation function")
}
if (!(is.matrix(x) && nrow(x) == 10) && !(is.list(x) && length(x) == 10)) {
return("Output from the vectorised simulation function must be either a matrix
(each row corresponds to a simulation) or a list")
}
} else {
x <- list()
for (i in 1 : 10) {
temp <- try(do.call(object@fnSim, c(list(object@theta0), object@simArgs)))
if (inherits(temp, "try-error")) {
return("Fail to run simulations with the given simulation function")
}
x[[i]] <- temp
}
}
# test the summary statistics function
if (is.matrix(x)) {
ssx <- try(do.call(object@fnSum, c(list(x[1, ]), object@sumArgs)))
} else {
ssx <- try(do.call(object@fnSum, c(list(x[[1]]), object@sumArgs)))
}
if (inherits(ssx, "try-error")) {
return("Fail to get summary statistics with the given summary statistics function")
}
if (!is.numeric(ssx)) {
return("The output of the summary statistics function must be numeric")
}
if (object@verbose) cat("success\n")
}
# prior
if (object@fnLogPrior(object@theta0) == -Inf) {
return("The given parameter value theta0 has no prior support\n")
}
# pass all checks
TRUE
}
)
#' Run simulations with a give "MODEL" object
#' @description see \code{\link{MODEL}}
#' @param model A ``MODEL'' object.
#' @param ... Other arguments.
setGeneric("simulation", function(model, ...) standardGeneric("simulation"))
#' @description \code{simulation} runs a number of simulations and computes the
#' correponding summary statistics with the provided model.
#' @param model A ``MODEL'' class object.
#' @param n The number of simulations to run.
#' @param theta The parameter value.
#' @param summStat Logical indicator whether the correpsonding summary statistics
#' should be returned or not. The default is \code{TRUE}.
#' @param seed A seed number to pass to the \code{set.seed} function. The
#' default is \code{NULL}, when no seed number is specified. Please note
#' \code{parallel} also affects the result even with the same seed.
#' @inheritParams BSL-class
#' @inheritParams bsl
#' @return A list of simulation results using the given parameter. \code{x}
#' contains the raw simulated datasets. \code{ssx} contains the summary
#' statistics.
#' @rdname MODEL-class
#' @export
setMethod("simulation",
signature(model = "MODEL"),
definition = function(model, n = 1, theta = model@theta0, summStat = TRUE, parallel = FALSE, parallelArgs = NULL, seed = NULL) {
if (!is.null(seed)) {
if (parallel) doRNG::registerDoRNG()
set.seed(seed)
}
flagVec <- !is.null(model@fnSimVec)
if (flagVec && parallel) {
parallel <- FALSE
warning("Parallel computation is disabled for vecotised simulations")
}
if (!parallel & !is.null(parallelArgs)) {
warning("\"parallelArgs\" is omitted in serial computing")
}
if (model@fnLogPrior(theta) == -Inf) {
warning("The given parameter has no prior support")
}
if (n == 1 && parallel) {
parallel <- FALSE
warning("Parallel computation is disabled for n = 1")
}
ssx <- NULL
if (parallel) { # parallel
parallelArgs$.export <- c(parallelArgs$.export, "model")
suppressWarnings(
x <- do.call(foreach, c(list(j = 1:n), parallelArgs)) %dopar% {
do.call(model@fnSim, c(list(theta), model@simArgs))
}
)
if (summStat) {
suppressWarnings(
ssx <- do.call(foreach, c(list(j = 1:n, .combine = rbind), parallelArgs)) %dopar% {
do.call(model@fnSum, c(x[j], model@sumArgs))
}
)
}
if (is.atomic(x[[1]]) && is.vector(x[[1]])) { # reduce to matrix
if (length(unique(sapply(x, FUN = length))) == 1) {
x <- matrix(unlist(x), ncol = length(x[[1]]), byrow = TRUE)
}
}
} else { # not parallel
if (flagVec) { # vectorised
if (n == 1) {
x <- do.call(model@fnSimVec, c(list(1, theta), model@simArgs))
if (is.matrix(x)) {
x <- as.vector(x)
if (summStat) ssx <- do.call(model@fnSum, c(list(x), model@sumArgs))
} else {
if (summStat) ssx <- do.call(model@fnSum, c(x, model@sumArgs))
}
} else {
x <- do.call(model@fnSimVec, c(list(n, theta), model@simArgs))
if (summStat) {
ns <- ifelse(length(model@ns) == 0, getns(model), model@ns)
if (is.matrix(x)) {
temp <- apply(x, FUN = function(y) do.call(model@fnSum, c(list(y), model@sumArgs)), MARGIN = 1)
ssx <- matrix(temp, nrow = n, ncol = ns, byrow = TRUE)
} else {
temp <- sapply(x, FUN = function(y) do.call(model@fnSum, c(list(y), model@sumArgs)))
ssx <- matrix(temp, nrow = n, ncol = ns, byrow = TRUE)
}
# if (!is.vector(ssx)) {
# ssx <- t(ssx)
# }
}
}
} else { # serial
if (n == 1) {
x <- do.call(model@fnSim, c(list(theta), model@simArgs))
if (summStat) ssx <- do.call(model@fnSum, c(list(x), model@sumArgs))
} else {
x <- vector("list", n)
ns <- ifelse(length(model@ns) == 0, getns(model), model@ns)
if (summStat) ssx <- array(0, c(n, ns))
for (j in 1 : n) {
x[[j]] <- do.call(model@fnSim, c(list(theta), model@simArgs))
if (summStat) {
ssx[j, ] <- do.call(model@fnSum, c(x[j], model@sumArgs))
}
}
if (is.atomic(x[[1]]) && is.vector(x[[1]])) { # reduce to matrix
if (length(unique(sapply(x, FUN = length))) == 1) {
x <- matrix(unlist(x), ncol = length(x[[1]]), byrow = TRUE)
}
}
}
}
}
return (list(x = x, ssx = ssx))
}
)
#' Compute the summary statistics with the given data
#' @description see \code{\link{MODEL}}
#' @param x The data to pass to the summary statistics function.
#' @param model A ``MODEL'' object.
setGeneric("summStat", function(x, model) standardGeneric("summStat"))
#' @description \code{summStat} computes the summary statistics with the given data and model object.
#' The summary statistics function and relevant arguments are obtained from the model.
#' @param x The data to pass to the summary statistics function.
#' @inheritParams BSL-class
#' @inheritParams bsl
#' @return A vector of the summary statistics.
#' @rdname MODEL-class
#' @export
setMethod("summStat",
signature(model = "MODEL"),
definition = function(x, model) {
stopifnot(!is.null(x))
do.call(model@fnSum, c(list(x), model@sumArgs))
}
)
setGeneric("fn", function(.Object) standardGeneric("fn"))
setMethod("fn",
signature = c(.Object = "MODEL"),
definition = function(.Object) {
if (!is.null(.Object@fnSimVec)) { # use vectorised simulation function
fn <- function(n, theta) {
x <- do.call(.Object@fnSimVec, c(list(n, theta), .Object@simArgs))
if (is.matrix(x)) {
ssx <- apply(x, FUN = function(y) do.call(.Object@fnSum, c(list(y), .Object@sumArgs)), MARGIN = 1)
} else {
ssx <- sapply(x, FUN = function(y) do.call(.Object@fnSum, c(list(y), .Object@sumArgs)))
}
if (is.vector(ssx)) {
return(as.matrix(ssx))
}
return(t(ssx))
}
fnPar <- NULL
} else { # non-vectorised simulation function
fnPar <- function(n, theta, parallelArgs = list()) {
j <- NULL
parallelArgs$.export <- c(parallelArgs$.export, ".Object")
do.call(foreach, c(list(j = 1:n, .combine = rbind), parallelArgs)) %dopar% {
x <- do.call(.Object@fnSim, c(list(theta), .Object@simArgs))
do.call(.Object@fnSum, c(list(x), .Object@sumArgs))
}
}
fn <- function(n, theta) {
ns <- ifelse(length(.Object@ns) == 0, getns(.Object), .Object@ns)
ssx <- array(0, c(n, ns))
for (j in 1:n) {
x <- do.call(.Object@fnSim, c(list(theta), .Object@simArgs))
ssx[j, ] <- do.call(.Object@fnSum, c(list(x), .Object@sumArgs))
}
ssx
}
}
return(list(fn = fn, fnPar = fnPar))
}
)
setGeneric("getns", valueClass = "integer", function(model) standardGeneric("getns"))
setMethod("getns",
signature = c(model = "MODEL"),
definition = function(model) {
if (!is.null(model@fnSimVec)) {
x <- do.call(model@fnSimVec, c(list(2, model@theta0), model@simArgs))
} else {
x <- list()
x[[1]] <- do.call(model@fnSim, c(list(model@theta0), model@simArgs))
}
if (is.matrix(x)) {
y <- x[1, ]
} else {
y <- x[[1]]
}
ns <- length(do.call(model@fnSum, c(list(y), model@sumArgs)))
return(as.integer(ns))
}
)
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/s4-MODEL.R
|
#' @include s4-MODEL.R
NULL
#' S4 class ``PENALTY''
#' @description This S4 class contains the penalty selection result from
#' function \code{\link{selectPenalty}}. \code{show} display the penalty
#' selection result. \code{plot} plot the penalty selection result using
#' ggplot2.
#' @rawRd \Rdversion{1.1}
#' @slot loglike A list of the log-likelihood values. The list contains multiple
#' matrices (each corresponds to the result for a specific n value). The
#' number of row of the matrix equals to the number of repeats \code{M}. The
#' columns of the matrix stands for different penalty values.
#' @slot n A vector of \code{n}, the number of simulations from the model per
#' MCMC iteration for estimating the synthetic likelihood.
#' @slot lambda A list, with each entry containing the vector of penalty values
#' for the corresponding choice of \code{n}.
#' @slot M The number of repeats used in estimating the standard deviation of
#' the estimated log synthetic likelihood.
#' @slot sigma The standard deviation of the log synthetic likelihood estimator
#' to aim for, usually a value between 1 and 2. This reflects the mixing of a
#' Markov chain.
#' @slot model A ``MODEL'' object generated with function \code{newModel}.
#' See \code{\link{newModel}}.
#' @slot stdLoglike A list contains the estimated standard deviations of
#' log-likelihoods.
#' @slot penalty The vector stores the selected penalty values for each given
#' \code{n} by choosing from the candidate \code{lambda} list. The selected
#' values produce closest standard deviations \code{stdLoglike} to the target
#' \code{sigma}.
#' @slot result The result data frame.
#' @slot call The original code used to run \code{\link{selectPenalty}}.
#' @seealso \code{\link{selectPenalty}} for the function that selects the
#' penalty parameter.
#' @aliases PENALTYclass
#' @export
PENALTY <- setClass("PENALTY",
slots = c(loglike = "list",
n = "numeric",
lambda = "list",
M = "integer",
sigma = "numeric",
model = "MODEL",
stdLoglike = "list",
penalty = "numeric",
result = "data.frame",
call = "call")
)
setMethod("initialize",
signature = "PENALTY",
definition = function(.Object, loglike, n, lambda, sigma, model, call) {
.Object@loglike <- loglike
.Object@M <- nrow(loglike[[1]])
if (missing(sigma)) {
.Object@sigma <- 1.5
cat("Set the target sigma value as 1.5 by default\n")
} else {
.Object@sigma <- sigma
}
.Object@n <- n
.Object@lambda <- lambda
if (!missing(model)) .Object@model <- model
if (!missing(call)) .Object@call <- call
validObject(.Object)
.Object <- computePenaltyResult(.Object)
return(.Object)
}
)
setValidity("PENALTY",
method = function(object) {
if (!is.list(object@loglike)) {
return("No loglike list found")
}
if (length(object@n) != length(object@lambda)) {
return("Length of n mismatch the length of lambda")
}
if (length(object@loglike) != length(object@n)) {
return("Length of the loglike list mismatch the length of n")
}
if (any(sapply(object@loglike, FUN = nrow) != object@M)) {
return("M mismatch the loglike list")
}
if (any(sapply(object@loglike, FUN = ncol) != sapply(object@lambda, FUN = length))) {
return("lambda mismatch the loglike list")
}
if (object@sigma <= 0) {
return("The target sigma value must be positive, a value between 0 and 1 is recommended")
}
TRUE
}
)
# #' @description Find the closest penalty value to the target sigma and format
# #' the result into a data frame. Notice that the closest value is not
# #' necessarily a local minimum or maximum.
setGeneric("computePenaltyResult", function(object) standardGeneric("computePenaltyResult"))
setMethod("computePenaltyResult",
signature = c("PENALTY"),
definition = function(object) {
N <- length(object@n)
stdLoglike <- vector("list", N)
idxClosest <- integer(N)
for (i in 1 : N) {
temp <- apply(object@loglike[[i]], FUN = sd, MARGIN = 2)
stdLoglike[[i]] <- temp
idxClosest[i] <- which.min(abs(temp - object@sigma))[1]
}
object@stdLoglike <- stdLoglike
object@penalty <- sapply(object@lambda, min)
result <- vector("list", N)
for (i in 1 : N) {
isClosest <- logical(length(object@stdLoglike[[i]]))
isClosest[idxClosest[i]] <- TRUE
result[[i]] <- data.frame(n = object@n[i], penalty = object@lambda[[i]], logPenalty = log(object@lambda[[i]]),
stdLoglike = object@stdLoglike[[i]], isClosest = isClosest)
}
result <- Reduce(rbind, result)
object@result <- result
return(object)
}
)
#' @param object The S4 object of class ``PENALTY'' to show.
#' @rdname PENALTY-class
#' @export
setMethod("show",
signature = c(object = "PENALTY"),
definition = function(object) {
digits = max(3L, getOption("digits") - 4L)
cat("\nCall:\n", paste(deparse(object@call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
if (length(object@result)) {
r1 <- object@result[object@result$isClosest, c("n", "penalty", "stdLoglike")]
cat("Penalty selected based on the standard deviation of the loglikelihood:\n")
print(format(r1, digits = digits))
} else {
cat("No result to show\n")
}
return(invisible(r1))
}
)
#' @param x The S4 object of class ``PENALTY'' to plot.
#' @param logscale A logical argument whether the x-axis (penalty) should be log transformed. The
#' default is \code{TRUE}.
#' @rdname PENALTY-class
setMethod("plot",
signature = c(x = "PENALTY"),
definition = function(x, logscale = TRUE) {
penalty <- logPenalty <- stdLoglike <- isClosest <- NULL
result <- x@result
n <- x@n
a <- floor(sqrt(length(n)))
b <- ceiling(length(n) / a)
nRepeats <- sapply(x@lambda, length)
yPosSigma <- sapply(n, FUN = function(xx) mean(range(result[result$n == xx, "stdLoglike"])))
textYSigma <- c(unlist(mapply(yPosSigma, nRepeats, FUN = rep)))
result$isClosest[!result$isClosest] <- NA
if (logscale) {
ggplot(data = result, aes(x = logPenalty, y = stdLoglike)) +
geom_line(color = "darkblue", linetype = "dashed", size = 1) +
facet_wrap( ~ n, scales = "free", nrow = a, ncol = b, labeller = label_both) +
geom_vline(aes(xintercept = logPenalty * isClosest), na.rm = TRUE, color = "forestgreen", linetype = 4) +
geom_label(aes(x = logPenalty * isClosest, y = textYSigma, label = paste("penalty == ", round(penalty, 3))),
hjust = 0.5, vjust = "inward", parse = TRUE, color = "white", fill = "#FE66A9", size = 2.7,
alpha = 0.8, na.rm = TRUE) +
labs(x = "log penalty", y = "standard deviation of log-likelihood", title = "Penalty Selection") +
theme(plot.title = element_text(size = 14, hjust = 0.5)) +
theme(strip.text.x = element_text(size = 12, face = "bold"), axis.title = element_text(size = 12))
} else {
ggplot(data = result, aes(x = penalty, y = stdLoglike)) +
geom_line(color = "darkblue", linetype = "dashed", size = 1) +
facet_wrap( ~ n, scales = "free", nrow = a, ncol = b, labeller = label_both) +
geom_vline(aes(xintercept = penalty * isClosest), na.rm = TRUE, color = "forestgreen", linetype = 4) +
geom_label(aes(x = penalty * isClosest, y = textYSigma, label = paste("penalty == ", round(penalty, 3))),
hjust = 0.5, vjust = "inward", parse = TRUE, color = "white", fill = "#FE66A9", size = 2.7,
alpha = 0.8, na.rm = TRUE) +
labs(x = "penalty", y = "standard deviation of log-likelihood", title = "Penalty Selection") +
theme(plot.title = element_text(size = 14, hjust = 0.5)) +
theme(strip.text.x = element_text(size = 12, face = "bold"), axis.title = element_text(size = 12))
}
}
)
#' Obtain the selected penalty values from a "PENALTY" object
#' @description see \code{\link{PENALTYclass}}
#' @param object A ``PENALTY'' class object.
#' @param ... Other arguments.
#' @return The selecty penalty values.
setGeneric("getPenalty", function(object, ...) standardGeneric("getPenalty"))
#' @rdname PENALTY-class
#' @export
setMethod("getPenalty",
signature = c(object = "BSL"),
definition = function(object) {
object@penalty
}
)
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/s4-PENALTY.R
|
#' Selecting the Penalty Parameter
#'
#' @description This is the main function for selecting the shrinkage (graphical
#' lasso or Warton's estimator) penalty parameter for method BSL or semiBSL
#' based on a point estimate of the parameters. Parallel computing is
#' supported with the R package \code{foreach}. The penalty selection method
#' is outlined in \insertCite{An2019;textual}{BSL}.
#'
#' @param ssy A summary statistic vector for the observed
#' data.
#' @param n A vector of possible values of \code{n}, the
#' number of simulations from the model per MCMC iteration for estimating the
#' synthetic likelihood.
#' @param lambda A list, with each entry containing the vector of
#' penalty values to test for the corresponding choice of \code{n}.
#' @param theta A point estimate of the parameter value which
#' all of the simulations will be based on. By default, if \code{theta} is
#' \code{NULL}, it will be replaced by \code{theta0} from the given
#' \code{model}.
#' @param M The number of repeats to use in estimating the
#' standard deviation of the estimated log synthetic likelihood.
#' @param sigma The standard deviation of the log synthetic
#' likelihood estimator to aim for, usually a value between 1 and 2. This
#' parameter helps to control the mixing of a Markov chain.
#' @param method A string argument indicating the method to be
#' used. If the method is ``BSL'', the shrinkage is applied to the Gaussian
#' covariance matrix. Otherwise if the method is ``semiBSL'', the shrinkage is
#' applied to the correlation matrix of the Gaussian copula.
#' @param shrinkage A string argument indicating which shrinkage method to
#' be used. Current options are ``glasso'' for the graphical lasso method of
#' \insertCite{Friedman2008;textual}{BSL} and ``Warton'' for the ridge
#' regularisation method of \insertCite{Warton2008;textual}{BSL}.
#' @param parallelSim A logical value indicating whether parallel
#' computing should be used for simulation and summary statistic evaluation.
#' Default is \code{FALSE}.
#' @param parallelSimArgs A list of additional arguments to pass into the
#' \code{foreach} function. Only used when \code{parallelSim} is \code{TRUE},
#' default is \code{NULL}.
#' @param parallelMain A logical value indicating whether parallel
#' computing should be used to computing the graphical lasso function. Notice
#' that this should only be turned on when there are a lot of candidate values
#' in \code{lambda}. Default is \code{FALSE}.
#' @param ... Other arguments to pass to \code{\link{gaussianSynLike}} (``BSL''
#' method) or \code{\link{semiparaKernelEstimate}} (``semiBSL'' method).
#' @inheritParams bsl
#'
#' @return An S4 object \code{PENALTY} of the penalty selection results. The
#' \code{show} and \code{plot} methods are provided with the S4 class.
#'
#' @examples
#' \dontrun{
#' data(ma2)
#' model <- newModel(fnSimVec = ma2_sim_vec, fnSum = ma2_sum, simArgs = ma2$sim_args,
#' theta0 = ma2$start, fnLogPrior = ma2_prior)
#' theta <- c(0.6,0.2)
#'
#' # Performing tuning for BSLasso (BSL with glasso shrinkage estimation)
#' ssy <- ma2_sum(ma2$data)
#' lambda_all <- list(exp(seq(-3,0.5,length.out=20)), exp(seq(-4,-0.5,length.out=20)),
#' exp(seq(-5.5,-1.5,length.out=20)), exp(seq(-7,-2,length.out=20)))
#' set.seed(100)
#' sp_ma2 <- selectPenalty(ssy = ssy, n = c(50, 150, 300, 500), lambda_all, theta = theta,
#' M = 100, sigma = 1.5, model = model, method = 'BSL', shrinkage = 'glasso')
#' sp_ma2
#' plot(sp_ma2)
#' }
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @author Ziwen An, Leah F. South and Christopher Drovandi
#' @seealso \code{PENALTY} for the usage of the S4 class. \code{\link{ma2}},
#' \code{\link{cell}} and \code{\link{mgnk}} for examples. \code{\link{bsl}}
#' for the main function to run BSL.
#' @export
selectPenalty <- function(ssy, n, lambda, M, sigma = 1.5, model, theta = NULL,
method = c("BSL", "semiBSL"), shrinkage = c("glasso", "Warton"),
parallelSim = FALSE, parallelSimArgs = NULL, parallelMain = FALSE, verbose = 1L, ...) {
method <- match.arg(method)
shrinkage <- match.arg(shrinkage)
if (!verbose %in% c(0, 1, 2)) {
stop("verbose must be 0 or 1 or 2")
}
if (!parallelSim & !is.null(parallelSimArgs)) {
warning("\"parallelSimArgs\" is omitted in serial computing")
}
stopifnot(inherits(model, "MODEL"))
if (is.null(theta)) {
theta <- model@theta0
}
n <- as.vector(n)
N <- length(n)
if (is.atomic(lambda) && is.vector(lambda)) {
lambda <- rep(list(lambda), N)
}
if (length(lambda) != N) {
stop("lambda must be a list with the same length as n")
}
ns <- length(ssy)
call <- match.call()
if (verbose) {
cat("*** selecting penalty with", method, "likelihood and", shrinkage, "shrinkage estimator ***\n")
}
nMax <- max(n)
loglike <- vector("list", N)
for (i in 1 : N) loglike[[i]] <- array(NA, c(M, length(lambda[[i]])))
# map the simulation function
if (parallelSim) {
myFnSimSum <- function(n, theta) fn(model)$fnPar(n, theta, parallelSimArgs)
} else {
myFnSimSum <- fn(model)$fn
}
if (verbose == 1L) timeStart <- Sys.time()
for (m in 1 : M) {
flush.console()
if (verbose == 2L) {
cat("m =", m, "\n")
} else if (verbose == 1L){
timeElapsed <- difftime(Sys.time(), timeStart, units = "secs")
timeLeft <- timeElapsed / m * (M - m)
elapsed <- myTimeStr(timeElapsed)
left <- myTimeStr(timeLeft)
myMiniProgressBar(m / M, txt1 = paste("m =", m),
txt2 = paste0("elapsed = ", elapsed, ", remaining = ", left),
style = 2, label = c("=", ".", "|"))
flush.console()
}
# simulate with theta_prop and calculate summaries
ssx <- myFnSimSum(nMax, theta)
for (i in 1 : N) {
nCurr <- n[i]
nLambda <- length(lambda[[i]])
ssxCurr <- ssx[sample(nMax, nCurr), ]
if (!parallelMain) {
for (k in 1 : nLambda) {
lambdaCurr = lambda[[i]][k]
loglike[[i]][m, k] <- switch(method,
"BSL" = gaussianSynLike(ssy, ssxCurr, shrinkage = shrinkage, penalty = lambdaCurr, log = TRUE, ...),
"semiBSL" = semiparaKernelEstimate(ssy, ssxCurr, shrinkage = shrinkage, penalty = lambdaCurr, log = TRUE, ...))
}
} else {
loglike[[i]][m, ] <- foreach(k = 1 : nLambda, .combine = c, .packages = "glasso",
.export = c("gaussianSynLike", "semiparaKernelEstimate")) %dopar% {
lambdaCurr = lambda[[i]][k]
switch(method,
"BSL" = gaussianSynLike(ssy, ssxCurr, shrinkage = shrinkage, penalty = lambdaCurr, log = TRUE, ...),
"semiBSL" = semiparaKernelEstimate(ssy, ssxCurr, shrinkage = shrinkage, penalty = lambdaCurr, log = TRUE, ...))
}
}
}
}
if (verbose == 1L) cat("\n")
ret <- PENALTY(loglike = loglike, n = n, lambda = lambda, sigma = sigma, model = model, call = call)
return(ret)
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/selectPenalty.R
|
#' Estimate the semi-parametric synthetic (log) likelihood
#'
#' @description This function computes the semi-parametric synthetic likelihood
#' estimator of \insertCite{An2018}{BSL}. The advantage of this
#' semi-parametric estimator over the standard synthetic likelihood estimator
#' is that the semi-parametric one is more robust to non-normal summary
#' statistics. Kernel density estimation is used for modelling each univariate
#' marginal distribution, and the dependence structure between summaries are
#' captured using a Gaussian copula. Shrinkage on the correlation matrix
#' parameter of the Gaussian copula is helpful in decreasing the number of
#' simulations.
#'
#' @param kernel A string argument indicating the smoothing kernel to pass
#' into \code{density} for estimating the marginal distribution of each
#' summary statistic. Only ``gaussian" and ``epanechnikov" are available. The
#' default is ``gaussian".
#' @param shrinkage A string argument indicating which shrinkage method to
#' be used. The default is \code{NULL}, which means no shrinkage is used.
#' Current options are ``glasso'' for the graphical lasso method of
#' \insertCite{Friedman2008;textual}{BSL} and ``Warton'' for the ridge
#' regularisation method of \insertCite{Warton2008;textual}{BSL}.
#' @inheritParams bsl
#' @inheritParams gaussianSynLike
#'
#' @return The estimated synthetic (log) likelihood value.
#'
#' @references
#'
#' \insertAllCited{}
#'
#' \insertRef{Friedman2008}{BSL}
#'
#' \insertRef{Warton2008}{BSL}
#'
#' \insertRef{Boudt2012}{BSL}
#'
#' @examples
#' data(ma2)
#' ssy <- ma2_sum(ma2$data)
#' m <- newModel(fnSim = ma2_sim, fnSum = ma2_sum, simArgs = ma2$sim_args,
#' theta0 = ma2$start, sumArgs = list(delta = 0.5))
#' ssx <- simulation(m, n = 300, theta = c(0.6, 0.2), seed = 10)$ssx
#'
#' # check the distribution of the first summary statistic: highly non-normal
#' plot(density(ssx[, 1]))
#'
#' # the standard synthetic likelihood estimator over-estimates the likelihood here
#' gaussianSynLike(ssy, ssx)
#' # the semi-parametric synthetic likelihood estimator is more robust to non-normality
#' semiparaKernelEstimate(ssy, ssx)
#' # using shrinkage on the correlation matrix of the Gaussian copula is also possible
#' semiparaKernelEstimate(ssy, ssx, shrinkage = "Warton", penalty = 0.8)
#'
#' @seealso Other available synthetic likelihood estimators:
#' \code{\link{gaussianSynLike}} for the standard synthetic likelihood
#' estimator, \code{\link{gaussianSynLikeGhuryeOlkin}} for the unbiased
#' synthetic likelihood estimator, \code{\link{synLikeMisspec}} for the
#' Gaussian synthetic likelihood estimator for model misspecification.
#'
#' @export
semiparaKernelEstimate <- function (ssy, ssx, kernel = "gaussian", shrinkage = NULL,
penalty = NULL, log = TRUE) {
if (!is.null(shrinkage)) {
flagShrinkage <- TRUE
shrinkage <- match.arg(shrinkage, c("glasso", "Warton"))
} else {
flagShrinkage <- FALSE
}
if (!flagShrinkage && !is.null(penalty)) {
warning("\"penalty\" will be ignored since no shrinkage method is specified")
}
if (flagShrinkage && is.null(penalty)) {
stop("a penalty value must be specified to enable shrinkage estimation")
}
n <- nrow(ssx)
ns <- ncol(ssx)
stopifnot(ns >= 2)
stopifnot(length(ssy) == ns)
pdfy <- yu <- numeric(ns)
for (j in 1 : ns) {
f <- density(ssx[, j], kernel = kernel, n = 512, from = min(ssx[,j],ssy[j]),
to = max(ssx[,j],ssy[j]))
approxy <- approx(f$x, f$y, ssy[j])
pdfy[j] <- approxy$y
yu[j] <- mean(kernelCDF((ssy[j] - ssx[, j]) / f$bw, kernel))
}
# Gaussian rank correlation
rhohat <- gaussianRankCorr(ssx, TRUE)
if (!is.null(shrinkage)) {
RHOHAT <- p2P(rhohat)
Sigma <- switch(shrinkage,
"glasso" = glasso(RHOHAT, rho = penalty, penalize.diagonal = FALSE)$w,
"Warton" = corrWarton(RHOHAT, penalty))
rhohat <- P2p(Sigma)
}
# density
copula <- normalCopula(rhohat, dim = ns, dispstr = "un")
if (log) {
f <- dCopula(yu, copula, log = TRUE) + sum(log(pdfy))
} else {
f <- dCopula(yu, copula, log = FALSE) * prod(pdfy)
}
return(f)
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/semiparaKernelEstimate.R
|
#' Generate a random sample of gamma for the R-BSL-M method of
#' \insertCite{Frazier2019;textual}{BSL} using slice sampling
#'
#' @description This function updates the gamma of the R-BSL-M method of
#' \insertCite{Frazier2019;textual}{BSL} with a slice sampler
#' \insertCite{Neal2003}{BSL}. Note this function is mainly designed for
#' internal usage.
#'
#' @param loglike The current log synthetic likelihood. This is computed with
#' function \code{\link{synLikeMisspec}} with the current gamma value.
#' @param tau Scale (or inverse rate) parameter of the Laplace prior
#' distribution for gamma.
#' @param w Step size for the stepping out in the slice sampler. The default
#' step size is 1.
#' @param std Standard deviation of the columns of ssx. If this is not
#' \code{NULL}, it will be used to save computation effort.
#' @param maxit The maximum number of iteration of the stepping out and shrink
#' steps of slice sampler. The default is 1e3.
#' @inheritParams gaussianSynLike
#' @inheritParams synLikeMisspec
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @seealso \code{\link{sliceGammaVariance}} for the slice sampler of the
#' variance inflated target distribution.
#'
#' @keywords internal
sliceGammaMean <- function(ssy, ssx, loglike, gamma = numeric(length(ssy)), tau = 1, w = 1,
std = NULL, maxit = 1e3) {
logGammaPrior <- function(x) dLaplace(x, rate = 1 / tau)
mu <- colMeans(ssx)
Sigma <- cov(ssx)
if (is.null(std) || length(std) != length(gamma)) {
std <- apply(ssx, FUN = sd, MARGIN = 2)
}
gammaCurr <- gamma
for (i in 1 : length(gamma)) {
A <- loglike
B <- logGammaPrior(gammaCurr)
target <- A + B - rexp(1)
curr <- gammaCurr[i]
lower <- curr - w
upper <- curr + w
# step out for lower limit
iter <- 1
while (iter <= maxit) {
gammaLower <- gammaCurr
gammaLower[i] <- lower
muLower <- mu + std * gammaLower
A <- mvtnorm::dmvnorm(ssy, mean = muLower, sigma = Sigma, log = TRUE)
B <- logGammaPrior(gammaLower)
targetLower <- A + B
if (targetLower < target) {
break
}
lower <- lower - w
iter <- iter + 1
}
# step out for upper limit
iter <- 1
while (iter <= maxit) {
gammaUpper <- gammaCurr
gammaUpper[i] <- upper
muUpper <- mu + std * gammaUpper
A <- mvtnorm::dmvnorm(ssy, mean = muUpper, sigma = Sigma, log = TRUE)
B <- logGammaPrior(gammaUpper)
targetUpper <- A + B
if (targetUpper < target) {
break
}
upper <- upper + w
iter <- iter + 1
}
# shrink
iter <- 1
while (iter <= maxit) {
prop <- runif(1, lower, upper)
gammaProp <- gammaCurr
gammaProp[i] <- prop
muProp <- mu + std * gammaProp
A <- mvtnorm::dmvnorm(ssy, mean = muProp, sigma = Sigma, log = TRUE)
B <- logGammaPrior(gammaProp)
targetProp <- A + B
if (targetProp > target) {
gammaCurr <- gammaProp
loglike <- A
break
}
if (prop < curr) {
lower <- prop
} else {
upper <- prop
}
iter <- iter + 1
}
}
ret <- gammaCurr
attr(ret, 'loglike') <- loglike
return (ret)
}
dLaplace <- function(x, rate = 1) {
n <- length(x)
n * log(rate / 2) - rate * sum(abs(x))
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/sliceGammaMean.R
|
#' Generate a random sample of gamma for the R-BSL-V method of
#' \insertCite{Frazier2019;textual}{BSL} using slice sampling
#'
#' @description This function updates the gamma parameter with a slice sampler
#' \insertCite{Neal2003}{BSL}. The target distribution is the variance
#' inflated approximate posterior of BSL with model misspecification. See
#' \insertCite{Frazier2019;textual}{BSL}. Note this function is mainly
#' designed for internal usage.
#'
#' @param tau Numeric. Scale (or inverse rate) parameter of the exponential
#' prior distribution.
#' @inheritParams gaussianSynLike
#' @inheritParams synLikeMisspec
#' @inheritParams sliceGammaMean
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @seealso \code{\link{sliceGammaMean}} for the slice sampler of the mean
#' adjusted target distribution.
#'
#' @keywords internal
sliceGammaVariance <- function(ssy, ssx, loglike, gamma = numeric(length(ssy)), tau = 1, w = 1,
std = NULL, maxit = 1e3) {
logGammaPrior <- function(x) sum(dexp(x, rate = 1 / tau, log = TRUE))
mu <- colMeans(ssx)
Sigma <- cov(ssx)
if (is.null(std) || length(std) != length(gamma)) {
std <- apply(ssx, FUN = sd, MARGIN = 2)
}
gammaCurr <- gamma
for (i in 1 : length(gamma)) {
A <- loglike
B <- logGammaPrior(gammaCurr)
target <- A + B - rexp(1)
curr <- gammaCurr[i]
lower <- 0
upper <- curr + w
# step out for upper limit
iter <- -1
while (iter <= maxit) {
gammaUpper <- gammaCurr
gammaUpper[i] <- upper
SigmaUpper <- Sigma + diag((std * gammaUpper) ^ 2)
A <- mvtnorm::dmvnorm(ssy, mean = mu, sigma = SigmaUpper, log = TRUE)
B <- logGammaPrior(gammaUpper)
targetUpper <- A + B
if (targetUpper < target) {
break
}
upper <- upper + w
iter <- iter + 1
}
# shrink
iter <- 1
while (iter <= maxit) {
prop <- runif(1, lower, upper)
gammaProp <- gammaCurr
gammaProp[i] <- prop
SigmaProp <- Sigma + diag((std * gammaProp) ^ 2)
A <- mvtnorm::dmvnorm(ssy, mean = mu, sigma = SigmaProp, log = TRUE)
B <- logGammaPrior(gammaProp)
targetProp <- A + B
if (targetProp > target) {
gammaCurr <- gammaProp
loglike <- A
break
}
if (prop < curr) {
lower <- prop
} else {
upper <- prop
}
iter <- iter + 1
}
}
ret <- gammaCurr
attr(ret, 'loglike') <- loglike
return (ret)
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/sliceGammaVariance.R
|
#' Estimate the Gaussian synthetic (log) likelihood whilst acknowledging model
#' incompatibility
#'
#' @description This function estimates the Gaussian synthetic likelihood whilst
#' acknowledging that there may be incompatibility between the model and the
#' observed summary statistic. The method has two different ways to
#' account for and detect incompatibility (mean adjustment and variance
#' inflation). An additional free parameter \code{gamma} is employed to account for the
#' model misspecification. See the R-BSL methods of
#' \insertCite{Frazier2019;textual}{BSL} for more details. Note this function
#' is mainly designed for interal use as the latent variable \code{gamma} need
#' to be chosen otherwise. Alternatively, \code{gamma} is updated with a slice
#' sampler \insertCite{Neal2003}{BSL}, which is the method of
#' \insertCite{Frazier2019;textual}{BSL}.
#'
#' @param type A string argument indicating which method is used to account for
#' and detect potential incompatibility. The two options are "mean" and
#' "variance" for mean adjustment and variance inflation, respectively.
#' @param gamma The additional latent parameter to account for possible
#' incompatability between the model and observed summary statistic. In
#' ``BSLmisspec'' method, this is updated with a slice sampler
#' \insertCite{Neal2003}{BSL}. The default gamma implies no model misspecification
#' and is equivalent to the standard \code{\link{gaussianSynLike}} estimator.
#' @inheritParams gaussianSynLike
#'
#' @return The estimated synthetic (log) likelihood value.
#'
#' @references
#'
#' \insertAllCited{}
#'
#' @examples
#' # a toy model (for details see section 4.1 from Frazier et al 2019)
#' # the true underlying model is a normal distribution with standard deviation equals to 0.2
#' # whist the data generation process has the standard deviation fixed to 1
#' set.seed(1)
#' y <- rnorm(50, 1, sd = 0.2)
#' ssy <- c(mean(y), var(y))
#' m <- newModel(fnSim = function(theta) rnorm(50, theta), fnSum = function(x) c(mean(x), var(x)),
#' theta0 = 1, fnLogPrior = function(x) log(dnorm(x, sd = sqrt(10))))
#' ssx <- simulation(m, n = 300, theta = 1, seed = 10)$ssx
#'
#' # gamma is updated with a slice sampler
#' gamma <- rep(0.1, length(ssy))
#' synLikeMisspec(ssy, ssx, type = "variance", gamma = gamma)
#'
#' @seealso Other available synthetic likelihood estimators:
#' \code{\link{gaussianSynLike}} for the standard synthetic likelihood
#' estimator, \code{\link{gaussianSynLikeGhuryeOlkin}} for the unbiased
#' synthetic likelihood estimator, \code{\link{semiparaKernelEstimate}} for
#' the semi-parametric likelihood estimator, \code{\link{synLikeMisspec}} for
#' the Gaussian synthetic likelihood estimator for model misspecification.
#' Slice sampler to sample gamma \code{\link{sliceGammaMean}} and
#' \code{\link{sliceGammaVariance}} (internal functions).
#'
#' @export
synLikeMisspec <- function(ssy, ssx, type = c("mean", "variance"), gamma = numeric(length(ssy)),
log = TRUE, verbose = FALSE) {
stopifnot(length(gamma) == length(ssy))
type <- match.arg(type)
n <- nrow(ssx)
d <- ncol(ssx)
mu <- colMeans(ssx)
Sigma <- cov(ssx)
std <- apply(ssx, MARGIN = 2, FUN = sd)
if (type == "mean") {
mu <- mu + std * gamma
}
if (type == "variance") {
Sigma <- Sigma + diag((std * gamma) ^ 2)
}
loglike <- try(mvtnorm::dmvnorm(ssy, mean = mu, sigma = Sigma, log = log))
if (inherits(loglike, 'try-error')) {
if (verbose) {
cat('*** reject (probably singular cov(ssx) matrix) ***\n')
}
return (-Inf)
}
attr(loglike, 'std') <- std
return (loglike)
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/synLikeMisspec.R
|
#' Toad example
#'
#' @description This example estimates the parameter for the toad example. The
#' model simulates the movement of an amphibian called Fowler's toad. The
#' model is proposed by \insertCite{Marchand2017;textual}{BSL}. This example
#' includes both simulated and real data. The real data is obtained from
#' the supplementary material of \insertCite{Marchand2017;textual}{BSL}.
#' The journal article \insertCite{An2022;textual}{BSL} provides a full
#' description of how to use this package for the toad example.
#'
#' @param theta A vector of proposed model parameters,
#' \eqn{\alpha}, \eqn{\gamma} and \eqn{p_0}.
#' @param ntoads The number of toads to simulate in the observation.
#' @param ndays The number of days observed.
#' @param model Which model to be used: 1 for the random return model, 2 for the
#' nearest return model, and 3 for the distance-based return probability
#' model. The default is 1.
#' @param d0 Characteristic distance for model 3. Only used if \code{model} is
#' 3.
#' @param na Logical. This is the index matrix for missing observations. By
#' default, \code{matrix(FALSE, ndays, ntoads)} indicates there is no
#' missingness in the observation matrix.
#' @param X The data matrix.
#' @param p The numeric vector of probabilities to compute the quantiles.
#' @param lag The lag of days to compute the summary statistics, default as 1,
#' 2, 4 and 8.
#'
#' @details The example includes the three different returning models of
#' \insertCite{Marchand2017;textual}{BSL}. Please see
#' \insertCite{Marchand2017;textual}{BSL} for a full description of the toad
#' model, and also \insertCite{An2018;textual}{BSL} for Bayesian inference
#' with the semi-BSL method.
#'
#' @usage data(toad)
#'
#' @section datasets (simulated and real):
#'
#' A simulated dataset and a real dataset are provided in this example. Both
#' datasets contain observations from 66 toads for 63 days. The simulated
#' dataset is simulated with parameter
#' \eqn{\theta = (1.7, 35,
#' 0.6)}. This is the data used in \insertCite{An2018;textual}{BSL}. The real
#' dataset is obtained from the supplementary data of
#' \insertCite{Marchand2017;textual}{BSL}.
#'
#' \itemize{
#'
#' \item \code{data_simulated}: A 63
#' \eqn{\times} 66 matrix of the observed
#' toad locations (simulated data).
#'
#' \item \code{data_real}: A 63
#' \eqn{\times} 66 matrix of the observed
#' toad locations (real data).
#'
#' \item \code{cov}: The covariance matrix of a multivariate normal random
#' walk proposal distribution used in the MCMC, in the form of a 3
#' \eqn{\times} 3 matrix.
#'
#' \item \code{theta0}: A vector of suitable initial values of the parameters
#' for MCMC.
#'
#' \item \code{sim_args_simulated} and \code{sim_args_real}: A list of the
#' arguments to pass into the simulation function.
#'
#' \itemize{
#'
#' \item \code{ndays}: The number of days observed.
#'
#' \item \code{ntoads}: The total number of toads being observed.
#'
#' \item \code{model}: Indicator of which model to be used.
#'
#' \item \code{na}: Indicator matrix for missingness.
#'
#' }
#'
#' }
#'
#' @examples
#' \dontrun{
#' require(doParallel) # You can use a different package to set up the parallel backend
#'
#' data(toad)
#'
#' ## run standard BSL for the simulated dataset
#' model1 <- newModel(fnSim = toad_sim, fnSum = toad_sum, theta0 = toad$theta0,
#' fnLogPrior = toad_prior, simArgs = toad$sim_args_simulated,
#' thetaNames = expression(alpha,gamma,p[0]))
#' paraBound <- matrix(c(1,2,0,100,0,0.9), 3, 2, byrow = TRUE)
#'
#' # Performing BSL (reduce the number of iterations M if desired)
#' # Opening up the parallel pools using doParallel
#' cl <- makeCluster(min(detectCores() - 1,2))
#' registerDoParallel(cl)
#' resultToadSimulated <- bsl(toad$data_simulated, n = 1000, M = 10000, model = model1,
#' covRandWalk = toad$cov, logitTransformBound = paraBound,
#' parallel = TRUE, verbose = 1L, plotOnTheFly = 100)
#' stopCluster(cl)
#' registerDoSEQ()
#' show(resultToadSimulated)
#' summary(resultToadSimulated)
#' plot(resultToadSimulated, thetaTrue = toad$theta0, thin = 20)
#'
#' ## run standard BSL for the real dataset
#' model2 <- newModel(fnSim = toad_sim, fnSum = toad_sum, theta0 = toad$theta0,
#' fnLogPrior = toad_prior, simArgs = toad$sim_args_real,
#' thetaNames = expression(alpha,gamma,p[0]))
#' paraBound <- matrix(c(1,2,0,100,0,0.9), 3, 2, byrow = TRUE)
#'
#' # Performing BSL (reduce the number of iterations M if desired)
#' # Opening up the parallel pools using doParallel
#' cl <- makeCluster(min(detectCores() - 1,2))
#' registerDoParallel(cl)
#' resultToadReal <- bsl(toad$data_real, n = 1000, M = 10000, model = model2,
#' covRandWalk = toad$cov, logitTransformBound = paraBound,
#' parallel = TRUE, verbose = 1L, plotOnTheFly = 100)
#' stopCluster(cl)
#' registerDoSEQ()
#' show(resultToadReal)
#' summary(resultToadReal)
#' plot(resultToadReal, thetaTrue = toad$theta0, thin = 20)
#' }
#'
#' @references
#'
#' \insertAllCited()
#'
#' @author Ziwen An, Leah F. South and
#' Christopher Drovandi
#' @name toad
NULL
#' @describeIn toad Simulates data from the model, using C++ in the backend.
#' @export
toad_sim <- function (theta, ntoads, ndays, model = 1, d0 = 100, na = matrix(FALSE, ndays, ntoads)) {
stopifnot(model %in% 1:3)
X <- sim_toad(theta, ntoads, ndays, model, d0)
X[na] <- NA
return (X)
}
#' @describeIn toad Computes the summary statistics for this example. The summary
#' statistics are the log differences between adjacent quantiles and also the median.
#' @export
toad_sum <- function(X, lag = c(1,2,4,8), p = seq(0,1,0.1)) {
nlag <- length(lag)
ssx <- c()
for (k in 1 : nlag) {
disp <- obsMat2deltax(X, lag[k])
indret <- disp < 10
noret <- disp[!indret]
logdiff <- log(diff(quantile(noret, probs = p, names = FALSE)))
ssx <- c(ssx, mean(indret), median(noret), logdiff)
}
return (ssx)
}
#' @describeIn toad Evaluates the log prior at the chosen parameters.
#' @export
toad_prior <- function(theta) {
log(theta[1] > 1 & theta[1] < 2 & theta[2] > 0 & theta[2] < 80 & theta[3] > 0 & theta[3] < 1)
}
|
/scratch/gouwar.j/cran-all/cranData/BSL/R/toad.R
|
# Fitting heteroscedastic General model
hetero_general=function(formulamean,formulavar,data,W1,W2=NULL,nsim,burn,step,prior,initial,kernel="normal",mateq=TRUE,seed=0,impacts=TRUE){
## Mean model ##
y_n_mean <- as.character(formulamean[[2]])
X0_mean <- as.character(formulamean[[3]])[-1]
X1_mean <- as.character(do.call("c",sapply(X0_mean, function(x){strsplit(x,"\\+")})))
X_n_mean <- gsub(" ","",X1_mean)
y_mean <- data[,which(names(data)==y_n_mean)]
X_mean <- as.matrix(data[,which(names(data)%in%X_n_mean)])
## Variance model ##
X0_var <- as.character(formulavar)[[2]]
X1_var <- as.character(do.call("c",sapply(X0_var, function(x){strsplit(x,"\\+")})))
X_n_var <- gsub(" ","",X1_var)
X_var <- as.matrix(data[,which(names(data)%in%X_n_var)])
## Prior information ##
b_pri <- prior$b_pri
B_pri <- prior$B_pri
g_pri <- prior$g_pri
G_pri <- prior$G_pri
## Initial values ##
beta_0 <- initial$beta_0
gammas_0 <- initial$gamma_0
rho_0 <- initial$rho_0
lambda_0 <- initial$lambda_0
output <- hetero_general_int(y_mean,X_mean,X_var,W1,W2,nsim,burn,step,b_pri,B_pri,g_pri,G_pri,beta_0,gammas_0,rho_0,lambda_0,kernel="normal",seed=seed,impacts = impacts)
return(output)
}
#' Hello
#'
#' @keywords internal
#'
hetero_general_int=function(y,X,Z,W1,W2=NULL,nsim,burn,step,b_pri,B_pri,g_pri,G_pri,beta_0,gammas_0,rho_0,lambda_0,kernel="normal",seed=0,mateq=TRUE,impacts=TRUE)
{
set.seed(seed)
########Lectura de la informaci?n
y=as.matrix(y)
if (is.null(X) | is.null(y) ){
stop("No data")
}
if(burn>nsim | burn<0){
stop("Burn must be between 0 and nsim")
}
if(nsim<=0){
stop("There must be more than 0 simulations")
}
if(step<0 | step > nsim){
stop("Jump length must not be lesser than 0 or greater than nsim")
}
rowst=function(x){
x1=c()
x1=(x)/sum(x)}
if(mateq==TRUE){
if(class(W1)=="nb"){
matstand=nb2mat(W1)
mat0=nb2listw(W1,style="B")
mat=listw2mat(mat0)
matstand2=matstand
mat2=mat
}
else{
if(class(W1)=="listw"){
mat=listw2mat(W1)
matstand=apply(mat,2,rowst)
matstand=t(matstand)
matstand2=matstand
mat2=mat
}
else{
if(sum(rowSums(W1))==nrow(X))
{
matstand=W1
mat=matrix(nrow=nrow(X),ncol=nrow(X))
for(i in 1:nrow(mat)){
for(j in 1:ncol(mat)){
if(matstand[i,j]==0){mat[i,j]=0}
else{mat[i,j]=1/matstand[i,j]}
}
}
}
else{
mat=W1
matstand=apply(mat,2,rowst)
matstand=t(matstand)
}
matstand2=matstand
}
}
}
else{
if(class(W1)=="nb"){
matstand=nb2mat(W1)
mat0=nb2listw(W1,style="B")
mat=listw2mat(mat0)
}
else{
if(class(W1)=="listw"){
mat=listw2mat(W1)
matstand=apply(mat,2,rowst)
matstand=t(matstand)
}
else{
if(sum(rowSums(W1))==nrow(X))
{
matstand=W1
mat=matrix(nrow=nrow(X),ncol=nrow(X))
for(i in 1:nrow(mat)){
for(j in 1:ncol(mat)){
if(matstand[i,j]==0){mat[i,j]=0}
else{mat[i,j]=1}
}
}
}
else{
mat=W1
matstand=apply(mat,2,rowst)
matstand=t(matstand)
}
}
}
if(class(W2)=="nb"){
matstand2=nb2mat(W2)
mat02=nb2listw(W2,style="B")
mat2=listw2mat(mat02)
}
else{
if(class(W2)=="listw"){
mat2=listw2mat(W2)
matstand2=apply(mat2,2,rowst)
matstand2=t(matstand2)
}
else{
if(sum(rowSums(W2))==nrow(X))
{
matstand2=W2
mat2=matrix(nrow=nrow(X),ncol=nrow(X))
for(i in 1:nrow(mat2)){
for(j in 1:ncol(mat2)){
if(matstand2[i,j]==0){mat2[i,j]=0}
else{mat2[i,j]=1}
}
}
}
else{
mat2=W2
matstand2=apply(mat2,2,rowst)
matstand2=t(matstand2)
}
}
}
}
dpost <- function(betas,gammas,rho,lambda) {
A=diag(nrow(X))-rho*matstand
B=diag(nrow(X))-lambda*matstand
Sigma=diag(c(exp((Z)%*%gammas)))
solvSigma=diag(c(1/diag(Sigma)))
logdetSigma = sum(Z%*%gammas)
k=t(B%*%(A%*%y-X%*%(betas)))%*%solvSigma%*%(B%*%(A%*%y-X%*%(betas)))
fc.y=k
fc.beta=t(betas - b_pri)%*%solve(B_pri)%*%(betas-b_pri)
fc.gamma=t(gammas - g_pri)%*%solve(G_pri)%*%(gammas-g_pri)
# dp <- (det(Sigma)^(-1/2))*det(A)*det(B)*exp(-0.5*(fc.y + fc.beta+fc.gamma))
# dp
logdp <- (-1/2)*logdetSigma + log(det(A)) + log(det(B)) -0.5*fc.y - 0.5*fc.beta - 0.5*fc.gamma
return(logdp)
}
#Generaci?n de valores para las distribuciones propuestas
r.proposal_gamma=function(Gammas){
a.now=Z%*%Gammas
A=diag(nrow(X))-Rho*matstand
B=diag(nrow(X))-Lambda*matstand
b.now=B%*%(A%*%y-X%*%betas.now)
y.now=a.now+(b.now^2/exp(a.now))-1
G_pos=solve(solve(G_pri)+0.5*t(Z)%*%Z)
g_pos=G_pos%*%(solve(G_pri)%*%g_pri+0.5*(t(Z)%*%y.now))
gammas.pro=rmvnorm(1,g_pos,G_pos)
gammas.pro
}
dproposal_gamma<-function(gammas.now, gammas.old){
a.now=Z%*%gammas.old
A=diag(nrow(X))-Rho*matstand
B=diag(nrow(X))-Lambda*matstand
b.now=B%*%(A%*%y-X%*%betas.now)
y.now=a.now+(b.now^2/exp(a.now))-1
G_pos=solve(solve(G_pri)+0.5*t(Z)%*%Z)
g_pos=G_pos%*%(solve(G_pri)%*%g_pri+0.5*(t(Z)%*%y.now))
dmvnorm(as.vector(gammas.now),as.vector(g_pos),G_pos,log = TRUE)
}
dproposal_lambda<-function(lambda){
A=diag(nrow(X))-Rho*matstand
Sigma=diag(c(exp(Z%*%Gammas)))
solvSigma=diag(1/c(exp(Z%*%Gammas)))
a=t(A%*%y-X%*%betas.now)%*%t(matstand)%*%solvSigma%*%matstand%*%(A%*%y-X%*%betas.now)
b=t(A%*%y-X%*%betas.now)%*%t(matstand)%*%solvSigma%*%(A%*%y-X%*%betas.now)
dnorm(lambda,b/a,1/sqrt(a),log = TRUE)
}
dproposal_rho<-function(rho){
B=diag(nrow(X))-Lambda*matstand
Sigma=diag(c(exp(Z%*%Gammas)))
solvSigma=diag(1/c(exp(Z%*%Gammas)))
a=t(y)%*%t(matstand)%*%t(B)%*%solvSigma%*%B%*%matstand%*%y
b=t(y)%*%t(matstand)%*%t(B)%*%solvSigma%*%B%*%(y-X%*%betas.now)
dnorm(rho,b/a,1/sqrt(a),log = TRUE)
}
#Algoritmo Metropolis Hastings
beta.mcmc=matrix(NA,nrow=nsim,ncol(X))
gamma.mcmc=matrix(NA,nrow=nsim,ncol(Z))
rho.mcmc=c()
lambda.mcmc=c()
ind1=rep(0,nsim)
ind2=rep(0,nsim)
ind3=rep(0,nsim)
logV_DIC=c()
pb <- txtProgressBar(min = 0, max = nsim, style = 3)
if(kernel=="uniform"){
for(i in 1:nsim){
#Valores a posteriori condicional
if(i==1){
Gammas=gammas_0
Sigma_0=diag(c(exp((Z)%*%Gammas)))
Sigma=Sigma_0
Rho=rho_0
Lambda=lambda_0
}
else{
Sigma=diag(c(exp(Z%*%Gammas)))
}
A=diag(nrow(X))-Rho*matstand
B=diag(nrow(X))-Lambda*matstand
B_pos=solve(t(X)%*%t(B)%*%solve(Sigma)%*%B%*%X+solve(B_pri))
b_pos=B_pos%*%(t(X)%*%t(B)%*%solve(Sigma)%*%B%*%A%*%y+solve(B_pri)%*%b_pri)
betas.now=c(rmvnorm(1,b_pos,B_pos))
###Propuesta de gammas
gammas.now=c(r.proposal_gamma(Gammas))
q1.1=dproposal_gamma(gammas.now,Gammas)
q2.1=dproposal_gamma(Gammas,gammas.now)
p1.1=dpost(betas.now,gammas.now,Rho,Lambda)
p2.1=dpost(betas.now,Gammas,Rho,Lambda)
T.val=min(1,(p1.1/p2.1)*(q1.1/q2.1))
u<-runif(1)
if(p2.1==0){T.val=0}
if(q2.1==0){T.val=0}
if (u <=T.val) {
Gammas= gammas.now
ind1[i] = 1
}
###Propuesta de Rho
rho.now=runif(1,1/abs(min(eigen(mat)$values)),1)
p1.2=dpost(betas.now,Gammas,rho.now,Lambda)
p2.2=dpost(betas.now,Gammas,Rho,Lambda)
T.val2=min(1,p1.2/p2.2)
u<-runif(1)
if(p2.2==0){T.val2=0}
if (u <=T.val2) {
Rho <- rho.now
ind2[i] = 1
}
###Propuesta de Lambda
lambda.now=runif(1,1/abs(min(eigen(mat)$values)),1)
p1.3=dpost(betas.now,Gammas,Rho,lambda.now)
p2.3=dpost(betas.now,Gammas,Rho,Lambda)
T.val3=min(1,p1.3/p2.3)
u<-runif(1)
if(p2.3==0){T.val3=0}
if (u <=T.val3) {
Lambda=lambda.now
ind3[i]=1
}
beta.mcmc[i,]<-betas.now
gamma.mcmc[i,]<-gammas.now
rho.mcmc[i]<-rho.now
lambda.mcmc[i]<-lambda.now
Sigma=diag(c(exp(Z%*%gamma.mcmc[i,])))
detS=det(Sigma)
detB=det(diag(nrow(X))-lambda.mcmc[i]*matstand2)
detA=det(diag(nrow(X))-rho.mcmc[i]*matstand)
A=(diag(nrow(X))-rho.mcmc[i]*matstand)
Yg=(diag(nrow(X))-lambda.mcmc[i]*matstand2)%*%(A%*%y-X%*%beta.mcmc[i,])
logV_DIC[i]=(-(nrow(X)/2)*log(pi))+log(detA)+log(detB)-0.5*log(detS)-0.5*t(Yg)%*%solve(Sigma)%*%Yg
Sys.sleep(0.000000001)
# update progress bar
setTxtProgressBar(pb, i)
}
}
if(kernel=="normal"){
for(i in 1:nsim){
#Valores a posteriori condicional
if(i==1){
Gammas=gammas_0
Sigma_0=diag(c(exp((Z)%*%Gammas)))
Sigma=Sigma_0
Rho=rho_0
Lambda=lambda_0
}
else{
Sigma=diag(c(exp(Z%*%Gammas)))
}
A=diag(nrow(X))-Rho*matstand
B=diag(nrow(X))-Lambda*matstand
solvSigma=diag(1/c(exp(Z%*%Gammas)))
B_pos=solve(t(X)%*%t(B)%*%solvSigma%*%B%*%X+solve(B_pri))
b_pos=B_pos%*%(t(X)%*%t(B)%*%solvSigma%*%B%*%A%*%y+solve(B_pri)%*%b_pri)
betas.now=c(rmvnorm(1,b_pos,B_pos))
###Propuesta de gammas
gammas.now=c(r.proposal_gamma(Gammas))
q1.1=dproposal_gamma(gammas.now,Gammas)
q2.1=dproposal_gamma(Gammas,gammas.now)
p1.1=dpost(betas.now,gammas.now,Rho,Lambda)
p2.1=dpost(betas.now,Gammas,Rho,Lambda)
met.a1 <- ifelse(p1.1>p2.1,log(p1.1-p2.1),-log(p2.1-p1.1))
met.b1 <- ifelse(q1.1>q2.1,log(q1.1-q2.1),-log(q2.1-q1.1))
T.val1=min(0,met.a1+met.b1)
u<-runif(1)
# if(p2.1==0){T.val=0}
# if(q2.1==0){T.val=0}
if (u <=exp(T.val1)) {
Gammas= gammas.now
ind1[i] = 1
}
###Propuesta de Rho
Sigma=diag(c(exp(Z%*%Gammas)))
solvSigma=diag(1/c(exp(Z%*%Gammas)))
B=diag(nrow(X))-Lambda*matstand2
eigenvals <- eigen(matstand)$values
lowlim <- -1/(max(abs(eigenvals[eigenvals<0])))
a=t(y)%*%t(matstand)%*%t(B)%*%solvSigma%*%B%*%matstand%*%y
b=t(y)%*%t(matstand)%*%t(B)%*%solvSigma%*%B%*%(y-X%*%betas.now)
rho.now=rnorm(1,b/a,1/sqrt(a))
#while(det(diag(nrow(X))-rho.now*matstand)<=0){# || rho.now>1 || rho.now< lowlim){
while(rho.now>1 || rho.now< lowlim){
rho.now <- rnorm(1,b/a,1/sqrt(a))
}
p1.2=dpost(betas.now,Gammas,rho.now,Lambda)
p2.2=dpost(betas.now,Gammas,Rho,Lambda)
q1.2=dproposal_rho(rho.now)
q2.2=dproposal_rho(Rho)
met.a2 <- ifelse(p1.2>p2.2,log(p1.2-p2.2),-log(p2.2-p1.2))
met.b2 <- ifelse(q1.2>q2.2,log(q1.2-q2.2),-log(q2.2-q1.2))
T.val2=min(0,met.a2+met.b2)
#T.val2=min(1,(p1.2/p2.2)*(q1.2/q2.2))
# if(p2.2==0){T.val2=0}
# if(q2.2==0){T.val2=0}
u<-runif(1)
if (u <=exp(T.val2)) {
Rho <- rho.now
ind2[i] = 1
}
###Propuesta de Lambda
Sigma=diag(c(exp(Z%*%Gammas)))
solvSigma=diag(1/c(exp(Z%*%Gammas)))
A=diag(nrow(X))-Rho*matstand
eigenvals <- eigen(matstand2)$values
lowlim <- -1/(max(abs(eigenvals[eigenvals<0])))
a=t(A%*%y-X%*%betas.now)%*%t(matstand)%*%solvSigma%*%matstand%*%(A%*%y-X%*%betas.now)
b=t(A%*%y-X%*%betas.now)%*%t(matstand)%*%solvSigma%*%(A%*%y-X%*%betas.now)
lambda.now=rnorm(1,b/a,1/sqrt(a))
#while(det(diag(nrow(X))-lambda.now*matstand)<=0){# || lambda.now>1 || lambda.now< lowlim){
while(lambda.now>1 || lambda.now< lowlim){
lambda.now <- rnorm(1,b/a,1/sqrt(a))
}
p1.3=dpost(betas.now,Gammas,Rho,lambda.now)
p2.3=dpost(betas.now,Gammas,Rho,Lambda)
q1.3=dproposal_lambda(lambda.now)
q2.3=dproposal_lambda(Lambda)
met.a3 <- ifelse(p1.3>p2.3,log(p1.3-p2.3),-log(p2.3-p1.3))
met.b3 <- ifelse(q1.3>q2.3,log(q1.3-q2.3),-log(q2.3-q1.3))
T.val3=min(0,met.a3+met.b3)
#T.val3=min(1,(p1.3/p2.3)*(q1.3/q2.3))
#if(p2.3==0){T.val3=0}
#if(q2.3==0){T.val3=0}
u<-runif(1)
if (u <=exp(T.val3)) {
Lambda <- lambda.now
ind3[i] = 1
}
beta.mcmc[i,]<-betas.now
gamma.mcmc[i,]<-gammas.now
rho.mcmc[i]<-rho.now
lambda.mcmc[i]<-lambda.now
Sigma=diag(c(exp(Z%*%gamma.mcmc[i,])))
solvSigma <- diag(1/c(exp(Z%*%gamma.mcmc[i,])))
detS=det(Sigma)
logdetSigma = sum(Z%*%gamma.mcmc[i,])
detB=det(diag(nrow(X))-lambda.mcmc[i]*matstand2)
detA=det(diag(nrow(X))-rho.mcmc[i]*matstand)
A=(diag(nrow(X))-rho.mcmc[i]*matstand)
Yg=(diag(nrow(X))-lambda.mcmc[i]*matstand2)%*%(A%*%y-X%*%beta.mcmc[i,])
logV_DIC[i]=(-(nrow(X)/2)*log(2*pi))+log(detA)+log(detB)-0.5*logdetSigma-0.5*t(Yg)%*%solvSigma%*%Yg
Sys.sleep(0.000000001)
# update progress bar
setTxtProgressBar(pb, i)
# print(i)
# print(c(rho.mcmc[i],lambda.mcmc[i]))
}
}
beta.mcmc_1=beta.mcmc[(burn+1):nsim,]
gamma.mcmc_1=gamma.mcmc[(burn+1):nsim,]
rho.mcmc_1=rho.mcmc[(burn+1):nsim]
lambda.mcmc_1=lambda.mcmc[(burn+1):nsim]
beta.mcmc_2=matrix(NA,nrow=(nsim-burn+1)/step,ncol(X))
gamma.mcmc_2=matrix(NA,nrow=(nsim-burn+1)/step,ncol(Z))
rho.mcmc_2=c()
lambda.mcmc_2=c()
for (i in 1:(nsim-burn+1))
{
if(i%%step==0)
{
beta.mcmc_2[i/step,]=beta.mcmc_1[i,]
gamma.mcmc_2[i/step,]=gamma.mcmc_1[i,]
rho.mcmc_2[i/step]=rho.mcmc_1[i]
lambda.mcmc_2[i/step]=lambda.mcmc_1[i]
}
}
Bestimado = colMeans(beta.mcmc_2)
Gammaest = colMeans(gamma.mcmc_2)
rho.mcmc_3=rho.mcmc_2[rho.mcmc_2<=1]
lambda.mcmc_3=lambda.mcmc_2[lambda.mcmc_2<=1]
Rhoest=mean(rho.mcmc_3)
Lambdaest=mean(lambda.mcmc_3)
Betaquant <- t(apply(beta.mcmc_2,2,function(x){quantile(x,c(0.025,0.5,0.975))}))
Gammaquant <- t(apply(gamma.mcmc_2,2,function(x){quantile(x,c(0.025,0.5,0.975))}))
Rhoquant <- quantile(rho.mcmc_3,c(0.025,0.5,0.975))
Lambdaquant <- quantile(lambda.mcmc_3,c(0.025,0.5,0.975))
DesvBeta <- apply(beta.mcmc_2,2,sd)
DesvGamma <- apply(gamma.mcmc_2,2,sd)
DesvRho<-sd(rho.mcmc_3)
DesvLambda<-sd(lambda.mcmc_3)
AccRate1<-sum(ind1)/nsim
AccRate2<-sum(ind2)/nsim
AccRate3<-sum(ind3)/nsim
Sigma1=diag(c(exp(Z%*%Gammaest)))
solvSigma1=diag(1/c(exp(Z%*%Gammaest)))
detS=det(Sigma1)
logdetSigma = sum(Z%*%Gammaest)
detA=det(diag(nrow(X))-Lambdaest*matstand)
detB=det(diag(nrow(X))-Rhoest*matstand)
Yg=(diag(nrow(X))-Lambdaest*matstand)%*%((diag(nrow(X))-Rhoest*matstand)%*%y-X%*%Bestimado)
Veros=detA*detB*((detS)^(-0.5))*exp(-0.5*t(Yg)%*%solvSigma1%*%Yg)
p=ncol(X)+ncol(Z)+2
BIC=-2*log(Veros)+p*log(nrow(X))
logV_DIC=logV_DIC[is.nan(logV_DIC)==FALSE]
Dbar=mean(-2*logV_DIC)
logV1_DIC=(-(nrow(X)/2)*log(2*pi))+log(detB)+log(detA)-0.5*logdetSigma-0.5*t(Yg)%*%solvSigma1%*%Yg
Dev=-2*logV1_DIC
DIC=2*Dbar+Dev
summary = data.frame( mean=c(Bestimado,Gammaest,Rhoest,Lambdaest),
sd = c(DesvBeta,DesvGamma,DesvRho,DesvLambda),
q0.025=c(Betaquant[,1],Gammaquant[,1],Rhoquant[1],Lambdaquant[1]),
q0.5=c(Betaquant[,2],Gammaquant[,2],Rhoquant[2],Lambdaquant[2]),
q0.975=c(Betaquant[,3],Gammaquant[,3],Rhoquant[3],Lambdaquant[3]))
#rownames(summary) = c("x0","x1","x2","z0","z1","rho","lambda")
rownames(summary) = c("x0","x1","x2","z0","z1","z2","rho","lambda")
if(impacts){
n <- nrow(X)
V <- pblapply(1:(nsim-burn),function(x){solve(diag(n)-rho.mcmc_1[x]*matstand)})
S <- lapply(1:ncol(X),function(y){
pblapply(1:(nsim-burn), function(x){
beta.mcmc_1[x,y]*V[[x]]},cl=2)})
retain <- rep(c(rep(0,step-1),1),(nsim-burn)/step)
S <- lapply(S,function(x){x[which(retain>0)]})
impact.df <- lapply(S, function(x){
lapply(x, function(y){
impact.direct <- sum(diag(y))/n
impact.total<- sum((y))/n
impact.indirect <- impact.total - impact.direct
return(data.frame(Direct=impact.direct,Total=impact.total,Indirect=impact.indirect))
})})
impacts <- t(sapply(impact.df, function(x){tmp <- do.call("rbind",x)
colMeans(tmp)
}))
rownames(impacts) <- rownames(summary)[1:ncol(X)]
impacts <- impacts[-1,]
}
return(list(summary=summary,Acceptance_Rates=list(Gamma_AccRate=AccRate1,Rho_AccRate=AccRate2,Lambda_AccRate=AccRate3),Criteria=list(BIC=BIC,DIC=DIC),chains=mcmc(data.frame(beta_chain=beta.mcmc,gamma_chain=gamma.mcmc,rho_chain=rho.mcmc,lambda_chain=lambda.mcmc),thin = 1),
impacts=impacts))
}
|
/scratch/gouwar.j/cran-all/cranData/BSPADATA/R/hetero_general.R
|
# Fitting heteroscedastic SAR models
#
hetero_sar=function(formulamean,formulavar,data,W,nsim,burn,step,prior,initial,kernel="normal",seed=0,impacts=TRUE){
## Mean model ##
y_n_mean <- as.character(formulamean[[2]])
X0_mean <- as.character(formulamean[[3]])[-1]
X1_mean <- as.character(do.call("c",sapply(X0_mean, function(x){strsplit(x,"\\+")})))
X_n_mean <- gsub(" ","",X1_mean)
y_mean <- data[,which(names(data)==y_n_mean)]
X_mean <- as.matrix(data[,which(names(data)%in%X_n_mean)])
## Variance model ##
X0_var <- as.character(formulavar)[[2]]
X1_var <- as.character(do.call("c",sapply(X0_var, function(x){strsplit(x,"\\+")})))
X_n_var <- gsub(" ","",X1_var)
X_var <- as.matrix(data[,which(names(data)%in%X_n_var)])
## Prior information ##
b_pri <- prior$b_pri
B_pri <- prior$B_pri
g_pri <- prior$g_pri
G_pri <- prior$G_pri
## Initial values ##
beta_0 <- initial$beta_0
gammas_0 <- initial$gamma_0
rho_0 <- initial$rho_0
output <- hetero_sar_int(y_mean,X_mean,X_var,W,nsim,burn,step,b_pri,B_pri,g_pri,G_pri,beta_0,gammas_0,rho_0,kernel="normal",seed,impacts)
return(output)
}
#' Hello
#'
#' @keywords internal
#'
hetero_sar_int=function(y,X,Z,W,nsim,burn,step,b_pri,B_pri,g_pri,G_pri,beta_0,gammas_0,rho_0,kernel="normal",seed=0,impacts=TRUE)
{
set.seed(seed)
########Lectura de la informaci?n
rowst=function(x){
x1=c()
x1=(x)/sum(x)
}
y=as.matrix(y)
if (is.null(X) | is.null(y) ){
stop("No data")
}
if(burn>nsim | burn<0){
stop("Burn must be between 0 and nsim")
}
if(nsim<=0){
stop("There must be more than 0 simulations")
}
if(step<0 | step > nsim){
stop("Jump length must not be lesser than 0 or greater than nsim")
}
if(class(W)=="nb"){
matstand=nb2mat(W)
mat0=nb2listw(W,style="B")
mat=listw2mat(mat0)
}
else{
if(class(W)=="listw"){
mat=listw2mat(W)
matstand=apply(mat,2,rowst)
matstand=t(matstand)
}
else{
if(sum(rowSums(W))==nrow(X))
{
matstand=W
mat=matrix(nrow=nrow(X),ncol=nrow(X))
for(i in 1:nrow(mat)){
for(j in 1:ncol(mat)){
if(matstand[i,j]==0){mat[i,j]=0}
else{mat[i,j]=1/matstand[i,j]}
}
}
}
else{
mat=W
matstand=apply(mat,2,rowst)
matstand=t(matstand)
}
}
}
dpost <- function(betas,gammas,rho) {
A=diag(nrow(X))-rho*matstand
Sigma=diag(c(exp(Z%*%gammas)))
solvSigma=diag(1/c(exp(Z%*%gammas)))
logdetSigma = sum(Z%*%gammas)
k=t(A%*%y-X%*%(betas))%*%solvSigma%*%(A%*%y-X%*%(betas))
fc.y=k
fc.beta=t(betas - b_pri)%*%solve(B_pri)%*%(betas-b_pri)
fc.gamma=t(gammas - g_pri)%*%solve(G_pri)%*%(gammas-g_pri)
# dp <- (det(Sigma)^(-1/2))*det(A)*exp(-0.5*fc.y)*exp(-0.5*fc.beta)*exp(-0.5*fc.gamma)
# dp
logdp <- (-1/2)*logdetSigma + log(det(A)) -0.5*fc.y - 0.5*fc.beta - 0.5*fc.gamma
return(logdp)
}
#Generaci?n de valores para la distribuci?n propuesta
r.proposal=function(Gammas){
a.now=Z%*%Gammas
A=diag(nrow(X))-Rho*matstand
b.now=A%*%y-X%*%betas.now
y.now=a.now+(b.now^2/exp(a.now))-1
G_pos=solve(solve(G_pri)+0.5*t(Z)%*%Z)
g_pos=G_pos%*%(solve(G_pri)%*%g_pri+0.5*(t(Z)%*%y.now))
gammas.pro=rmvnorm(1,g_pos,G_pos)
gammas.pro
}
dproposal<-function(gammas.now, gammas.old){
a.now=Z%*%gammas.old
A=diag(nrow(X))-Rho*matstand
b.now=A%*%y-X%*%betas.now
y.now=a.now+(b.now^2/exp(a.now))-1
G_pos=solve(solve(G_pri)+0.5*t(Z)%*%Z)
g_pos=G_pos%*%(solve(G_pri)%*%g_pri+0.5*(t(Z)%*%y.now))
dmvnorm(gammas.now,g_pos,G_pos,log = TRUE)
}
dproposal_rho<-function(rho){
Sigma=diag(c(exp(Z%*%Gammas)))
solvSigma=diag(1/c(exp(Z%*%Gammas)))
a=t(y)%*%t(matstand)%*%solvSigma%*%matstand%*%y
b=t(y)%*%t(matstand)%*%solvSigma%*%(y-X%*%betas.now)
dnorm(rho,b/a,1/sqrt(a),log = TRUE)
}
#Algoritmo Metropolis Hastings
beta.mcmc=matrix(NA,nrow=nsim,ncol(X))
gamma.mcmc=matrix(NA,nrow=nsim,ncol(Z))
rho.mcmc=c()
ind1=rep(0,nsim)
ind2=rep(0,nsim)
logV_DIC=c()
pb <- txtProgressBar(min = 0, max = nsim, style = 3)
if(kernel=="uniform"){
for(i in 1:nsim){
#Valores a posteriori condicional
if(i==1){
Gammas=gammas_0
Sigma=diag(c(exp(Z%*%Gammas)))
Rho=rho_0
}
else{
Sigma=diag(c(exp(Z%*%Gammas)))
}
solvSigma=diag(1/c(exp(Z%*%Gammas)))
B_pos=solve(solve(B_pri)+t(X)%*%solvSigma%*%X)
b_pos=B_pos%*%(solve(B_pri)%*%b_pri+t(X)%*%solvSigma%*%y-Rho*t(X)%*%solvSigma%*%matstand%*%y)
#Beta a posteriori condicional
betas.now=c(rmvnorm(1,b_pos,B_pos))
#A posteriori condicional completa para Sigma2
gammas.now=c(r.proposal(Gammas))
q1.1=dproposal(gammas.now,Gammas)
q2.1=dproposal(Gammas,gammas.now)
p1.1=dpost(betas.now,gammas.now,Rho)
p2.1=dpost(betas.now,Gammas,Rho)
T.val=min(1,(p1.1/p2.1)*(q1.1/q2.1))
u<-runif(1)
if(p2.1==0){T.val=0}
if(q2.1==0){T.val=0}
if (u <=T.val) {
Gammas= gammas.now
ind1[i] = 1
}
#A posteriori condicional completa para Rho
rho.now=runif(1,1/abs(min(eigen(mat)$values)),1)
p1.2=dpost(betas.now,Gammas,rho.now)
p2.2=dpost(betas.now,Gammas,Rho)
T.val2=min(1,p1.2/p2.2)
u<-runif(1)
if(p2.2==0){T.val2=0}
if (u <=T.val2) {
Rho <- rho.now
ind2[i] = 1
}
beta.mcmc[i,]<-betas.now
gamma.mcmc[i,]<-gammas.now
rho.mcmc[i]<-rho.now
Sigma=diag(c(exp(Z%*%gamma.mcmc[i,])))
detS=det(Sigma)
detB=det(diag(nrow(X))-rho.mcmc[i]*matstand)
Yg=(diag(nrow(X))-rho.mcmc[i]*matstand)%*%y-X%*%beta.mcmc[i,]
logV_DIC[i]=(-(nrow(X)/2)*log(pi))+log(detB)-0.5*log(detS)-0.5*t(Yg)%*%solve(Sigma)%*%Yg
Sys.sleep(0.000000001)
# update progress bar
setTxtProgressBar(pb, i)
}
}
if(kernel=="normal")
{
for(i in 1:nsim){
#Valores a posteriori condicional
if(i==1){
Gammas=gammas_0
Sigma=diag(c(exp(Z%*%Gammas)))
Rho=rho_0
}
else{
Sigma=diag(c(exp(Z%*%Gammas)))
}
solvSigma=diag(1/c(exp(Z%*%Gammas)))
B_pos=solve(solve(B_pri)+t(X)%*%solvSigma%*%X)
b_pos=B_pos%*%(solve(B_pri)%*%b_pri+t(X)%*%solvSigma%*%y-Rho*t(X)%*%solvSigma%*%matstand%*%y)
#Beta a posteriori condicional
betas.now=c(rmvnorm(1,b_pos,B_pos))
#A posteriori condicional completa para Sigma2
gammas.now=c(r.proposal(Gammas))
q1.1=dproposal(gammas.now,Gammas)
q2.1=dproposal(Gammas,gammas.now)
p1.1=dpost(betas.now,gammas.now,Rho)
p2.1=dpost(betas.now,Gammas,Rho)
met.a1 <- ifelse(p1.1>p2.1,log(p1.1-p2.1),-log(p2.1-p1.1))
met.b1 <- ifelse(q1.1>q2.1,log(q1.1-q2.1),-log(q2.1-q1.1))
T.val1=min(0,met.a1+met.b1)
u<-runif(1)
# if(p2.1==0){T.val=0}
# if(q2.1==0){T.val=0}
if (u <=exp(T.val1)) {
Gammas= gammas.now
ind1[i] = 1
}
###Propuesta de Rho
Sigma=diag(c(exp(Z%*%Gammas)))
solvSigma=diag(1/c(exp(Z%*%Gammas)))
a=t(y)%*%t(matstand)%*%solvSigma%*%matstand%*%y
b=t(y)%*%t(matstand)%*%solvSigma%*%(y-X%*%betas.now)
rho.now=rnorm(1,b/a,1/sqrt(a))
while(det(diag(nrow(X))-rho.now*matstand)<=0){
rho.now <- rnorm(1,b/a,1/sqrt(a))
}
p1.2=dpost(betas.now,Gammas,rho.now)
p2.2=dpost(betas.now,Gammas,Rho)
q1.2=dproposal_rho(rho.now)
q2.2=dproposal_rho(Rho)
met.a2 <- ifelse(p1.2>p2.2,log(p1.2-p2.2),-log(p2.2-p1.2))
met.b2 <- ifelse(q1.2>q2.2,log(q1.2-q2.2),-log(q2.2-q1.2))
T.val2=min(0,met.a2+met.b2)
u<-runif(1)
#if(p2.2==0){T.val2=0}
if (u <=exp(T.val2)) {
Rho <- rho.now
ind2[i] = 1
}
beta.mcmc[i,]<-betas.now
gamma.mcmc[i,]<-gammas.now
rho.mcmc[i]<-rho.now
Sigma=diag(c(exp(Z%*%gamma.mcmc[i,])))
solvSigma=diag(1/c(exp(Z%*%gamma.mcmc[i,])))
detS=det(Sigma)
logdetSigma = sum(Z%*%gamma.mcmc[i,])
detB=det(diag(nrow(X))-rho.mcmc[i]*matstand)
Yg=(diag(nrow(X))-rho.mcmc[i]*matstand)%*%y-X%*%beta.mcmc[i,]
logV_DIC[i]=(-(nrow(X)/2)*log(2*pi))+log(detB)-0.5*logdetSigma-0.5*t(Yg)%*%solvSigma%*%Yg
Sys.sleep(0.000000001)
# update progress bar
setTxtProgressBar(pb, i)
}
}
beta.mcmc_1=beta.mcmc[(burn+1):nsim,]
gamma.mcmc_1=gamma.mcmc[(burn+1):nsim,]
rho.mcmc_1=rho.mcmc[(burn+1):nsim]
beta.mcmc_2=matrix(NA,nrow=(nsim-burn+1)/step,ncol(X))
gamma.mcmc_2=matrix(NA,nrow=(nsim-burn+1)/step,ncol(Z))
rho.mcmc_2=c()
for (i in 1:(nsim-burn+1))
{
if(i%%step==0)
{
beta.mcmc_2[i/step,]=beta.mcmc_1[i,]
gamma.mcmc_2[i/step,]=gamma.mcmc_1[i,]
rho.mcmc_2[i/step]=rho.mcmc_1[i]
}
}
rho.mcmc_3=rho.mcmc_2[rho.mcmc_2<=1]
Bestimado = colMeans(beta.mcmc_2)
Gammaest = colMeans(gamma.mcmc_2)
Rhoest=mean(rho.mcmc_3)
DesvBeta <- apply(beta.mcmc_2,2,sd)
DesvGamma <- apply(gamma.mcmc_2,2,sd)
DesvRho<-sd(rho.mcmc_3)
Betaquant <- t(apply(beta.mcmc_2,2,function(x){quantile(x,c(0.025,0.5,0.975))}))
Gammaquant <- t(apply(gamma.mcmc_2,2,function(x){quantile(x,c(0.025,0.5,0.975))}))
Rhoquant <- quantile(rho.mcmc_3,c(0.025,0.5,0.975))
AccRate1<-sum(ind1)/nsim
AccRate2<-sum(ind2)/nsim
Sigma1=diag(c(exp(Z%*%Gammaest)))
solvSigma1=diag(1/c(exp(Z%*%Gammaest)))
detS=det(Sigma1)
detA=det(diag(nrow(X))-Rho*matstand)
Yg=(diag(nrow(X))-Rho*matstand)%*%y-X%*%Bestimado
Veros=detA*((detS)^(-0.5))*exp(-0.5*t(Yg)%*%solvSigma1%*%Yg)
logV1=-(nrow(X)/2)*log(2*pi)+log(detA)-0.5*log(detS)-0.5*t(Yg)%*%solvSigma1%*%Yg
p=ncol(X)+ncol(Z)+1
BIC=-2*logV1+p*log(nrow(X))
logV_DIC=logV_DIC[is.nan(logV_DIC)==FALSE]
Dbar=mean(-2*logV_DIC)
logV1_DIC=(-(nrow(X)/2)*log(pi))+log(detB)-0.5*log(detS)-0.5*t(Yg)%*%solve(Sigma)%*%Yg
Dev=-2*logV1_DIC
DIC=2*Dbar+Dev
summary = data.frame( mean=c(Bestimado,Gammaest,Rhoest),
sd = c(DesvBeta,DesvGamma,DesvRho),
q0.025=c(Betaquant[,1],Gammaquant[,1],Rhoquant[1]),
q0.5=c(Betaquant[,2],Gammaquant[,2],Rhoquant[2]),
q0.975=c(Betaquant[,3],Gammaquant[,3],Rhoquant[3]))
rownames(summary) = c("x0","x1","x2","z0","z1","z2","rho")
#rownames(summary) = c("x0","x1","x2","z0","z1","rho")
if(impacts){
n <- nrow(X)
V <- pblapply(1:(nsim-burn),function(x){solve(diag(n)-rho.mcmc_1[x]*matstand)})
S <- lapply(1:ncol(X),function(y){
pblapply(1:(nsim-burn), function(x){
beta.mcmc_1[x,y]*V[[x]]},cl=2)})
retain <- rep(c(rep(0,step-1),1),(nsim-burn)/step)
S <- lapply(S,function(x){x[which(retain>0)]})
impact.df <- lapply(S, function(x){
lapply(x, function(y){
impact.direct <- sum(diag(y))/n
impact.total<- sum((y))/n
impact.indirect <- impact.total - impact.direct
return(data.frame(Direct=impact.direct,Total=impact.total,Indirect=impact.indirect))
})})
impacts <- t(sapply(impact.df, function(x){tmp <- do.call("rbind",x)
colMeans(tmp)
}))
rownames(impacts) <- rownames(summary)[1:ncol(X)]
impacts <- impacts[-1,]
}
return(list(summary=summary,Acceptance_Rates=list(Gamma_AccRate=AccRate1,Rho_AccRate=AccRate2),Criteria=list(BIC=BIC,DIC=DIC),chains=mcmc(data.frame(beta_chain=beta.mcmc,gamma_chain=gamma.mcmc,rho_chain=rho.mcmc),thin = 1),
impacts=impacts))
}
|
/scratch/gouwar.j/cran-all/cranData/BSPADATA/R/hetero_sar.R
|
# Fitting heteroscedastic SEM models
#
hetero_sem=function(formulamean,formulavar,data,W,nsim,burn,step,prior,initial,kernel="normal",seed=0){
## Mean model ##
y_n_mean <- as.character(formulamean[[2]])
X0_mean <- as.character(formulamean[[3]])[-1]
X1_mean <- as.character(do.call("c",sapply(X0_mean, function(x){strsplit(x,"\\+")})))
X_n_mean <- gsub(" ","",X1_mean)
y_mean <- data[,which(names(data)==y_n_mean)]
X_mean <- as.matrix(data[,which(names(data)%in%X_n_mean)])
## Variance model ##
X0_var <- as.character(formulavar)[[2]]
X1_var <- as.character(do.call("c",sapply(X0_var, function(x){strsplit(x,"\\+")})))
X_n_var <- gsub(" ","",X1_var)
X_var <- as.matrix(data[,which(names(data)%in%X_n_var)])
## Prior information ##
b_pri <- prior$b_pri
B_pri <- prior$B_pri
g_pri <- prior$g_pri
G_pri <- prior$G_pri
## Initial values ##
beta_0 <- initial$beta_0
gammas_0 <- initial$gamma_0
lambda_0 <- initial$lambda_0
output <- hetero_sem_int(y_mean,X_mean,X_var,W,nsim,burn,step,b_pri,B_pri,g_pri,G_pri,beta_0,gammas_0,lambda_0,kernel="normal",seed=0)
return(output)
}
#' Hello
#'
#' @keywords internal
#'
hetero_sem_int=function(y,X,Z,W,nsim,burn,step,b_pri,B_pri,g_pri,G_pri,beta_0,gammas_0,lambda_0,kernel="normal",seed=0)
{
set.seed(seed)
########Lectura de la informaci?n
rowst=function(x){
x1=c()
x1=(x)/sum(x)
}
y=as.matrix(y)
if (is.null(X) | is.null(y) ){
stop("No data")
}
if(burn>nsim | burn<0){
stop("Burn must be between 0 and nsim")
}
if(nsim<=0){
stop("There must be more than 0 simulations")
}
if(step<0 | step > nsim){
stop("Jump length must not be lesser than 0 or greater than nsim")
}
if(class(W)=="nb"){
matstand=nb2mat(W)
mat0=nb2listw(W,style="B")
mat=listw2mat(mat0)
}
else{
if(class(W)=="listw"){
mat=listw2mat(W)
matstand=apply(mat,2,rowst)
matstand=t(matstand)
}
else{
if(sum(rowSums(W))==nrow(X))
{
matstand=W
mat=matrix(nrow=nrow(X),ncol=nrow(X))
for(i in 1:nrow(mat)){
for(j in 1:ncol(mat)){
if(matstand[i,j]==0){mat[i,j]=0}
else{mat[i,j]=1/matstand[i,j]}
}
}
}
else{
mat=W
matstand=apply(mat,2,rowst)
matstand=t(matstand)
}
}
}
dpost <- function(betas,gammas,lambda) {
A=diag(nrow(X))-lambda*matstand
Sigma=diag(c(exp((Z)%*%gammas)))
solvSigma=diag(1/c(exp(Z%*%gammas)))
k=t(A%*%(y-X%*%(betas)))%*%solvSigma%*%(A%*%(y-X%*%(betas)))
fc.y=k
fc.beta=t(betas - b_pri)%*%solve(B_pri)%*%(betas-b_pri)
fc.gamma=t(gammas - g_pri)%*%solve(G_pri)%*%(gammas-g_pri)
# dp <- det(Sigma)^(-1/2)*det(A)*exp(-0.5*(fc.y + fc.beta+fc.gamma))
# dp
logdp <- (-1/2)*log(det(Sigma)) + log(det(A)) -0.5*fc.y - 0.5*fc.beta - 0.5*fc.gamma
return(logdp)
}
#Generacion de valores para las distribuciones propuestas
r.proposal_gamma=function(Gammas){
a.now=Z%*%Gammas
A=diag(nrow(X))-Lambda*matstand
b.now=A%*%(y-X%*%betas.now)
y.now=a.now+(b.now^2/exp(a.now))-1
G_pos=solve(solve(G_pri)+0.5*t(Z)%*%Z)
g_pos=G_pos%*%(solve(G_pri)%*%g_pri+0.5*(t(Z)%*%y.now))
gammas.pro=rmvnorm(1,g_pos,G_pos)
gammas.pro
}
dproposal_gamma<-function(gammas.now, gammas.old){
a.now=Z%*%gammas.old
A=diag(nrow(X))-Lambda*matstand
b.now=A%*%(y-X%*%betas.now)
y.now=a.now+(b.now^2/exp(a.now))-1
G_pos=solve(solve(G_pri)+0.5*t(Z)%*%Z)
g_pos=G_pos%*%(solve(G_pri)%*%g_pri+0.5*(t(Z)%*%y.now))
dmvnorm(gammas.now,g_pos,G_pos,log = TRUE)
}
dproposal_lambda<-function(lambda){
Sigma=diag(c(exp(Z%*%Gammas)))
solvSigma=diag(1/c(exp(Z%*%Gammas)))
a=t(y-X%*%betas.now)%*%t(matstand)%*%solvSigma%*%matstand%*%(y-X%*%betas.now)
b=t(y-X%*%betas.now)%*%t(matstand)%*%solvSigma%*%(y-X%*%betas.now)
dnorm(lambda,b/a,1/sqrt(a),log = TRUE)
}
#Algoritmo Metropolis Hastings
beta.mcmc=matrix(NA,nrow=nsim,ncol(X))
gamma.mcmc=matrix(NA,nrow=nsim,ncol(Z))
lambda.mcmc=c()
ind1=rep(0,nsim)
ind2=rep(0,nsim)
logV_DIC=c()
pb <- txtProgressBar(min = 0, max = nsim, style = 3)
if(kernel=="uniform"){
for(i in 1:nsim){
#Valores a posteriori condicional
if(i==1){
Lambda=lambda_0
Gammas=gammas_0
Sigma=diag(c(exp(Z%*%Gammas)))
}
else{
Sigma=diag(c(exp(Z%*%Gammas)))
}
A=diag(nrow(X))-Lambda*matstand
solvSigma=diag(1/c(exp(Z%*%Gammas)))
B_pos=solve(solve(B_pri)+t(A%*%X)%*%solvSigma%*%A%*%X)
b_pos=B_pos%*%(solve(B_pri)%*%b_pri+t(A%*%X)%*%solvSigma%*%A%*%y)
#Beta a posteriori condicional
betas.now=c(rmvnorm(1,b_pos,B_pos))
#A posteriori condicional completa para Sigma2
gammas.now=c(r.proposal_gamma(Gammas))
q1.1=dproposal_gamma(gammas.now,Gammas)
q2.1=dproposal_gamma(Gammas,gammas.now)
p1.1=dpost(betas.now,gammas.now,Lambda)
p2.1=dpost(betas.now,Gammas,Lambda)
T.val=min(1,(p1.1/p2.1)*(q1.1/q2.1))
u<-runif(1)
if(p2.1==0){T.val=0}
if(q2.1==0){T.val=0}
if (u <=T.val) {
Gammas= gammas.now
ind1[i] = 1
}
#A posteriori condicional completa para Lambda
lambda.now=runif(1,1/abs(min(eigen(mat)$values)),1)
p1.2=dpost(betas.now,Gammas,lambda.now)
p2.2=dpost(betas.now,Gammas,Lambda)
T.val2=min(1,p1.2/p2.2)
u<-runif(1)
if(p2.2==0){T.val2=0}
if (u <=T.val2) {
Lambda <- lambda.now
ind2[i] = 1
}
beta.mcmc[i,]<-betas.now
gamma.mcmc[i,]<-gammas.now
lambda.mcmc[i]<-lambda.now
Sigma=diag(c(exp(Z%*%gamma.mcmc[i,])))
detS=det(Sigma)
detB=det(diag(nrow(X))-lambda.mcmc[i]*matstand)
Yg=(diag(nrow(X))-lambda.mcmc[i]*matstand)%*%(y-X%*%beta.mcmc[i,])
logV_DIC[i]=(-(nrow(X)/2)*log(pi))+log(detB)-0.5*log(detS)-0.5*t(Yg)%*%diag(1/c(exp(Z%*%gamma.mcmc[i,])))%*%Yg
Sys.sleep(0.000000001)
# update progress bar
setTxtProgressBar(pb, i)
}
}
if(kernel=="normal"){
for(i in 1:nsim){
#Valores a posteriori condicional
if(i==1){
Gammas=gammas_0
Sigma=diag(c(exp(Z%*%Gammas)))
Lambda=lambda_0
}
else{
Sigma=diag(c(exp(Z%*%Gammas)))
}
A=diag(nrow(X))-Lambda*matstand
solvSigma=diag(1/c(exp(Z%*%Gammas)))
B_pos=solve(solve(B_pri)+t(A%*%X)%*%solvSigma%*%A%*%X)
b_pos=B_pos%*%(solve(B_pri)%*%b_pri+t(A%*%X)%*%solvSigma%*%A%*%y)
betas.now=c(rmvnorm(1,b_pos,B_pos))
###Propuesta de gammas
gammas.now=c(r.proposal_gamma(Gammas))
q1.1=dproposal_gamma(gammas.now,Gammas)
q2.1=dproposal_gamma(Gammas,gammas.now)
p1.1=dpost(betas.now,gammas.now,Lambda)
p2.1=dpost(betas.now,Gammas,Lambda)
met.a1 <- ifelse(p1.1>p2.1,log(p1.1-p2.1),-log(p2.1-p1.1))
met.b1 <- ifelse(q1.1>q2.1,log(q1.1-q2.1),-log(q2.1-q1.1))
T.val1=min(0,met.a1+met.b1)
#T.val=min(1,(p1.1/p2.1)*(q1.1/q2.1))
u<-runif(1)
# if(p2.1==0){T.val=0}
# if(q2.1==0){T.val=0}
if (u <=exp(T.val1)) {
Gammas= gammas.now
ind1[i] = 1
}
###Propuesta de Lambda
Sigma=diag(c(exp(Z%*%Gammas)))
solvSigma=diag(1/c(exp(Z%*%Gammas)))
a=t(y-X%*%betas.now)%*%t(matstand)%*%solvSigma%*%matstand%*%(y-X%*%betas.now)
b=t(y-X%*%betas.now)%*%t(matstand)%*%solvSigma%*%(y-X%*%betas.now)
eigenvals <- eigen(matstand)$values
lowlim <- -1/(max(abs(eigenvals[eigenvals<0])))
lambda.now=rnorm(1,b/a,1/sqrt(a))
while(lambda.now>1 || lambda.now< lowlim){
lambda.now <- rnorm(1,b/a,1/sqrt(a))
}
p1.2=dpost(betas.now,Gammas,lambda.now)
p2.2=dpost(betas.now,Gammas,Lambda)
q1.2=dproposal_lambda(lambda.now)
q2.2=dproposal_lambda(Lambda)
met.a2 <- ifelse(p1.2>p2.2,log(p1.2-p2.2),-log(p2.2-p1.2))
met.b2 <- ifelse(q1.2>q2.2,log(q1.2-q2.2),-log(q2.2-q1.2))
T.val2=min(0,met.a2+met.b2)
#T.val2=min(1,p1.2/p2.2)
u<-runif(1)
#if(p2.2==0){T.val2=0}
if (u <=exp(T.val2)) {
Lambda <- lambda.now
ind2[i] = 1
}
beta.mcmc[i,]<-betas.now
gamma.mcmc[i,]<-gammas.now
lambda.mcmc[i]<-lambda.now
Sigma=diag(c(exp(Z%*%gamma.mcmc[i,])))
detS=det(Sigma)
detB=det(diag(nrow(X))-lambda.mcmc[i]*matstand)
Yg=(diag(nrow(X))-lambda.mcmc[i]*matstand)%*%(y-X%*%beta.mcmc[i,])
logV_DIC[i]=(-(nrow(X)/2)*log(pi))+log(detB)-0.5*log(detS)-0.5*t(Yg)%*%diag(1/c(exp(Z%*%gamma.mcmc[i,])))%*%Yg
Sys.sleep(0.000000001)
# update progress bar
setTxtProgressBar(pb, i)
}
}
beta.mcmc_1=beta.mcmc[(burn+1):nsim,]
gamma.mcmc_1=gamma.mcmc[(burn+1):nsim,]
lambda.mcmc_1=lambda.mcmc[(burn+1):nsim]
beta.mcmc_2=matrix(NA,nrow=(nsim-burn+1)/step,ncol(X))
gamma.mcmc_2=matrix(NA,nrow=(nsim-burn+1)/step,ncol(Z))
lambda.mcmc_2=c()
for (i in 1:(nsim-burn+1))
{
if(i%%step==0)
{
beta.mcmc_2[i/step,]=beta.mcmc_1[i,]
gamma.mcmc_2[i/step,]=gamma.mcmc_1[i,]
lambda.mcmc_2[i/step]=lambda.mcmc_1[i]
}
}
Bestimado = colMeans(beta.mcmc_2)
Gammaest = colMeans(gamma.mcmc_2)
lambda.mcmc_3=lambda.mcmc_2[lambda.mcmc_2<=1]
Lambdaest=mean(lambda.mcmc_3)
DesvBeta <- apply(beta.mcmc_2,2,sd)
DesvGamma <- apply(gamma.mcmc_2,2,sd)
DesvLambda<-sd(lambda.mcmc_3)
Betaquant <- t(apply(beta.mcmc_2,2,function(x){quantile(x,c(0.025,0.5,0.975))}))
Gammaquant <- t(apply(gamma.mcmc_2,2,function(x){quantile(x,c(0.025,0.5,0.975))}))
Lambdaquant <- quantile(lambda.mcmc_3,c(0.025,0.5,0.975))
AccRate1<-sum(ind1)/nsim
AccRate2<-sum(ind2)/nsim
Sigma1=diag(c(exp(Z%*%Gammaest)))
detS=det(Sigma1)
detA=det(diag(nrow(X))-Lambdaest*matstand)
Yg=(diag(nrow(X))-Lambdaest*matstand)%*%(y-X%*%Bestimado)
Veros=detA*((detS)^(-0.5))*exp(-0.5*t(Yg)%*%solve(Sigma1)%*%Yg)
p=ncol(X)+ncol(Z)+1
BIC=-2*log(Veros)+p*log(nrow(X))
logV_DIC=logV_DIC[is.nan(logV_DIC)==FALSE]
Dbar=mean(-2*logV_DIC)
logV1_DIC=(-(nrow(X)/2)*log(pi))+log(detA)-0.5*log(detS)-0.5*t(Yg)%*%solve(Sigma1)%*%Yg
Dev=-2*logV1_DIC
DIC=2*Dbar+Dev
summary = data.frame( mean=c(Bestimado,Gammaest,Lambdaest),
sd = c(DesvBeta,DesvGamma,DesvLambda),
q0.025=c(Betaquant[,1],Gammaquant[,1],Lambdaquant[1]),
q0.5=c(Betaquant[,2],Gammaquant[,2],Lambdaquant[2]),
q0.975=c(Betaquant[,3],Gammaquant[,3],Lambdaquant[3]))
rownames(summary) = c("x0","x1","x2","z0","z1","z2","lambda")
#rownames(summary) = c("x0","x1","x2","z0","z1","lambda")
return(list(summary=summary,Acceptance_Rates=list(Gamma_AccRate=AccRate1,Lambda_AccRate=AccRate2),Criteria=list(BIC=BIC,DIC=DIC),chains=mcmc(data.frame(beta_chain=beta.mcmc,gamma_chain=gamma.mcmc,lambda_chain=lambda.mcmc),thin = 1)))
}
|
/scratch/gouwar.j/cran-all/cranData/BSPADATA/R/hetero_sem.R
|
# Fitting homoscedastic General model
hom_general=function(formula,data,W1,W2=NULL,nsim,burn,step,prior,initial,kernel="normal",mateq=TRUE,impacts=TRUE,seed=0){
y_n <- as.character(formula[[2]])
X0 <- as.character(formula[[3]])[-1]
X1 <- as.character(do.call("c",sapply(X0, function(x){strsplit(x,"\\+")})))
X_n <- gsub(" ","",X1)
y <- data[,which(names(data)==y_n)]
X <- as.matrix(data[,which(names(data)%in%X_n)])
b_pri <- prior$b_pri
B_pri <- prior$B_pri
r_pri <- prior$r_pri
rho_pri <- prior$rho_pri
lambda_pri <- prior$lambda_pri
beta_0 <- initial$beta_0
sigma2_0 <- initial$sigma2_0
rho_0 <- initial$rho_0
lambda_0 <- initial$lambda_0
output <- hom_general_int(y,X,W1,W2,nsim,burn,step,b_pri,B_pri,r_pri,lambda_pri,beta_0,sigma2_0,rho_0,lambda_0,kernel="normal",mateq = TRUE,seed=seed,impacts=impacts)
return(output)
}
#' Hello
#'
#' @keywords internal
#'
hom_general_int=function(y,X,W1,W2=NULL,nsim,burn,step,b_pri,B_pri,r_pri,lambda_pri,beta_0,sigma2_0,rho_0,lambda_0,kernel="normal",seed=0,mateq=TRUE,impacts=TRUE)
{
set.seed(seed)
rowst=function(x){
x1=c()
x1=(x)/sum(x)}
########Lectura de la informaci?n
y=as.matrix(y)
if (is.null(X) | is.null(y) ){
stop("No data")
}
#if(burn>1 | burn<0){
#stop("Burn must be between 0 and 1. It is a proportion")
#}
if(nsim<=0){
stop("There must be more than 0 simulations")
}
if(step<0 | step > nsim){
stop("Jump length must not be lesser than 0 or greater than nsim")
}
rowst=function(x){
x1=c()
x1=(x)/sum(x)}
if(mateq==TRUE){
if(class(W1)=="nb"){
matstand=nb2mat(W1)
mat0=nb2listw(W1,style="B")
mat=listw2mat(mat0)
W2=W1
matstand2=matstand
mat2=mat
}
else{
if(class(W1)=="listw"){
mat=listw2mat(W1)
matstand=apply(mat,2,rowst)
matstand=t(matstand)
W2=W1
matstand2=matstand
mat2=mat
}
else{
if(sum(rowSums(W1))==nrow(X))
{
matstand=W1
mat=matrix(nrow=nrow(X),ncol=nrow(X))
for(i in 1:nrow(mat)){
for(j in 1:ncol(mat)){
if(matstand[i,j]==0){mat[i,j]=0}
else{mat[i,j]=1/matstand[i,j]}
}
}
}
else{
mat=W1
matstand=apply(mat,2,rowst)
matstand=t(matstand)
}
W2=W1
matstand2=matstand
}
}
}
else{
if(class(W1)=="nb"){
matstand=nb2mat(W1)
mat0=nb2listw(W1,style="B")
mat=listw2mat(mat0)
}
else{
if(class(W1)=="listw"){
mat=listw2mat(W1)
matstand=apply(mat,2,rowst)
matstand=t(matstand)
}
else{
if(sum(rowSums(W1))==nrow(X))
{
matstand=W1
mat=matrix(nrow=nrow(X),ncol=nrow(X))
for(i in 1:nrow(mat)){
for(j in 1:ncol(mat)){
if(matstand[i,j]==0){mat[i,j]=0}
else{mat[i,j]=1}
}
}
}
else{
mat=W1
matstand=apply(mat,2,rowst)
matstand=t(matstand)
}
}
}
if(class(W2)=="nb"){
matstand2=nb2mat(W2)
mat02=nb2listw(W2,style="B")
mat2=listw2mat(mat02)
}
else{
if(class(W2)=="listw"){
mat2=listw2mat(W2)
matstand2=apply(mat2,2,rowst)
matstand2=t(matstand2)
}
else{
if(sum(rowSums(W2))==nrow(X))
{
matstand2=W2
mat2=matrix(nrow=nrow(X),ncol=nrow(X))
for(i in 1:nrow(mat2)){
for(j in 1:ncol(mat2)){
if(matstand2[i,j]==0){mat2[i,j]=0}
else{mat2[i,j]=1}
}
}
}
else{
mat2=W2
matstand2=apply(mat2,2,rowst)
matstand2=t(matstand2)
}
}
}
}
dpost <- function(betas,sigma2,rho,lambda) {
A=diag(nrow(X))-rho*matstand
B=diag(nrow(X))-lambda*matstand2
k=t(B%*%(A%*%y-X%*%betas))%*%(B%*%(A%*%y-X%*%betas))
fc.y=k
fc.beta=t(betas - b_pri)%*%solve(B_pri)%*%(betas-b_pri)
fc.sigma2=(lambda_pri^(r_pri))*(sigma2)^(-r_pri-1)*exp(-lambda_pri/sigma2)/gamma(r_pri)
logdp <- (-nrow(X)/2)*log(sigma2) + log(det(A)) + log(det(B)) -0.5*fc.y/sigma2 - 0.5*fc.beta + fc.sigma2
return(logdp)
}
dproposalrho<-function(rho){
B=diag(nrow(X))-Lambda*matstand2
a=(1/sigma2.now)*t(B%*%matstand%*%y)%*%(B%*%matstand%*%y)
b=(1/sigma2.now)*t(B%*%(y-X%*%betas.now))%*%(B%*%matstand%*%y)
dnorm(rho,b/a,1/sqrt(a),log = TRUE)
}
dproposallambda<-function(lambda){
A=diag(nrow(X))-Rho*matstand
a=(1/sigma2.now)*t(matstand%*%(A%*%y-X%*%betas.now))%*%(matstand%*%(A%*%y-X%*%betas.now))
b=(1/sigma2.now)*t(A%*%y-X%*%betas.now)%*%matstand%*%(A%*%y-X%*%betas.now)
dnorm(lambda,b/a,1/sqrt(a),log = TRUE)
}
ind=matrix(0,nsim,2)
beta.mcmc=matrix(NA,nrow=nsim,ncol=ncol(X))
sigma2.mcmc=c()
rho.mcmc=c()
lambda.mcmc=c()
logV_DIC=c()
Sigma_0=(sigma2_0)*diag(nrow(X))
pb <- txtProgressBar(min = 0, max = nsim, style = 3)
if(kernel=="uniform"){
for(i in 1:nsim){
if(i==1){
Sigma=Sigma_0
Rho=rho_0
Lambda=lambda_0
}
else{
Sigma=sigma2.now*diag(nrow(X))
}
A=diag(nrow(X))-Rho*matstand
B=diag(nrow(X))-Lambda*matstand2
B_pos=solve(solve(B_pri)+t(X)%*%t(B)%*%solve(Sigma)%*%B%*%X)
b_pos=B_pos%*%(t(X)%*%t(B)%*%solve(Sigma)%*%B%*%A%*%y+solve(B_pri)%*%b_pri)
#Beta a posteriori condicional
betas.now=c(rmvnorm(1,b_pos,B_pos))
#A posteriori condicional completa para gammas
r_pos=nrow(X)/2+r_pri
k=t(B%*%(A%*%y-X%*%betas.now))%*%(B%*%(A%*%y-X%*%betas.now))
lambda_pos=(k+2*lambda_pri)/2
sigma2.now=rigamma(1,r_pos, lambda_pos)
#A posteriori condicional completa para Rho
rho.now=runif(1,1/abs(min(eigen(mat)$values)),1)
p1=dpost(betas.now,sigma2.now,rho.now,Lambda)
p2=dpost(betas.now,sigma2.now,Rho,Lambda)
T.val=min(1,p1/p2)
u<-runif(1)
if (u <=T.val) {
Rho <- rho.now
ind[i,1] = 1
}
#A posteriori condicional completa para Lambda
lambda.now=runif(1,1/abs(min(eigen(mat2)$values)),1)
p1=dpost(betas.now,sigma2.now,Rho,lambda.now)
p2=dpost(betas.now,sigma2.now,Rho,Lambda)
T.val=min(1,p1/p2)
u<-runif(1)
if (u <=T.val) {
Lambda <- lambda.now
ind[i,2] = 1
}
beta.mcmc[i,]<-betas.now
sigma2.mcmc[i]<-sigma2.now
rho.mcmc[i]<-rho.now
lambda.mcmc[i]<-lambda.now
Sigma=diag(sigma2.mcmc[i],nrow(X))
detS=det(Sigma)
detB=det(diag(nrow(X))-lambda.mcmc[i]*matstand2)
detA=det(diag(nrow(X))-rho.mcmc[i]*matstand)
A=(diag(nrow(X))-rho.mcmc[i]*matstand)
Yg=(diag(nrow(X))-lambda.mcmc[i]*matstand2)%*%(A%*%y-X%*%beta.mcmc[i,])
logV_DIC[i]=(-(nrow(X)/2)*log(pi))+log(detA)+log(detB)-0.5*log(detS)-0.5*t(Yg)%*%solve(Sigma)%*%Yg
Sys.sleep(0.000000001)
# update progress bar
setTxtProgressBar(pb, i)
}
}
if(kernel=="normal")
{
for(i in 1:nsim)
{
if(i==1){
Sigma=Sigma_0
Rho=rho_0
Lambda=lambda_0
}
else{
Sigma=sigma2.now*diag(nrow(X))
}
A=diag(nrow(X))-Rho*matstand
B=diag(nrow(X))-Lambda*matstand2
B_pos=solve(solve(B_pri)+t(X)%*%t(B)%*%diag(1/diag(Sigma))%*%B%*%X)
b_pos=B_pos%*%(t(X)%*%t(B)%*%diag(1/diag(Sigma))%*%B%*%A%*%y+solve(B_pri)%*%b_pri)
#Beta a posteriori condicional
betas.now=c(rmvnorm(1,b_pos,B_pos))
#A posteriori condicional completa para gammas
r_pos=nrow(X)/2+r_pri
k=t(B%*%(A%*%y-X%*%betas.now))%*%(B%*%(A%*%y-X%*%betas.now))
lambda_pos=(k+2*lambda_pri)/2
sigma2.now=rigamma(1,r_pos, lambda_pos)
#A posteriori condicional completa para Rho
a_rho=(1/sigma2.now)*t(B%*%matstand%*%y)%*%(B%*%matstand%*%y)
b_rho=(1/sigma2.now)*t(B%*%(y-X%*%betas.now))%*%(B%*%matstand%*%y)
rho.now=rnorm(1,b_rho/a_rho,1/sqrt(a_rho))
while(det(diag(nrow(X))-rho.now*matstand)<=0 || rho.now <= -1 || rho.now>=1){
rho.now <- rnorm(1,b_rho/a_rho,1/sqrt(a_rho))
}
# while(rho.now>1){
# rho.now=rnorm(1,b/a,1/sqrt(a))
# }
p1.1=dpost(betas.now,sigma2.now,rho.now,Lambda)
p2.1=dpost(betas.now,sigma2.now,Rho,Lambda)
q1.1=dproposalrho(rho.now)
q2.1=dproposalrho(Rho)
met.a1 <- ifelse(p1.1>p2.1,log(p1.1-p2.1),-log(p2.1-p1.1))
met.b1 <- ifelse(q1.1>q2.1,log(q1.1-q2.1),-log(q2.1-q1.1))
T.val1=min(0,met.a1+met.b1)
u<-runif(1)
if (u <=exp(T.val1)) {
Rho <- rho.now
ind[i,1] = 1
}
#A posteriori condicional completa para Lambda
A=diag(nrow(X))-Rho*matstand
a_lambda=(1/sigma2.now)*t(matstand2%*%(A%*%y-X%*%betas.now))%*%(matstand2%*%(A%*%y-X%*%betas.now))
b_lambda=(1/sigma2.now)*t(matstand2%*%(A%*%y-X%*%betas.now))%*%(A%*%y-X%*%betas.now)
lambda.now=rnorm(1,b_lambda/a_lambda,1/sqrt(a_lambda))
while(det(diag(nrow(X))-lambda.now*matstand)<=0){
lambda.now <- rnorm(1,b_lambda/a_lambda,1/sqrt(a_lambda))
}
# while(lambda.now>1){
# lambda.now=rnorm(1,b/a,1/sqrt(a))
# }
p1.2=dpost(betas.now,sigma2.now,Rho,lambda.now)
p2.2=dpost(betas.now,sigma2.now,Rho,Lambda)
q1.2=dproposallambda(lambda.now)
q2.2=dproposallambda(Lambda)
met.a2 <- ifelse(p1.2>p2.2,log(p1.2-p2.2),-log(p2.2-p1.2))
met.b2 <- ifelse(q1.2>q2.2,log(q1.2-q2.2),-log(q2.2-q1.2))
T.val2=min(0,met.a2+met.b2)
u<-runif(1)
if (u <=exp(T.val2)) {
Lambda <- lambda.now
ind[i,2] = 1
}
beta.mcmc[i,]<-betas.now
sigma2.mcmc[i]<-sigma2.now
rho.mcmc[i]<-rho.now
lambda.mcmc[i]<-lambda.now
Sigma=diag(sigma2.mcmc[i],nrow(X))
detS=det(Sigma)
detB=det(diag(nrow(X))-lambda.mcmc[i]*matstand2)
detA=det(diag(nrow(X))-rho.mcmc[i]*matstand)
A=(diag(nrow(X))-rho.mcmc[i]*matstand)
Yg=(diag(nrow(X))-lambda.mcmc[i]*matstand2)%*%(A%*%y-X%*%beta.mcmc[i,])
logV_DIC[i]=(-(nrow(X)/2)*log(pi))+log(detA)+log(detB)-0.5*log(detS)-0.5*t(Yg)%*%solve(Sigma)%*%Yg
Sys.sleep(0.000000001)
# update progress bar
setTxtProgressBar(pb, i)
}
}
beta.mcmc_1=beta.mcmc[(burn+1):nsim,]
sigma2.mcmc_1=sigma2.mcmc[(burn+1):nsim]
rho.mcmc_1=rho.mcmc[(burn+1):nsim]
lambda.mcmc_1=lambda.mcmc[(burn+1):nsim]
beta.mcmc_2=matrix(NA,nrow=(nsim-burn+1)/step,3)
sigma2.mcmc_2=c()
rho.mcmc_2=c()
lambda.mcmc_2=c()
for (i in 1:(nsim-burn+1))
{
if(i%%step==0)
{
beta.mcmc_2[i/step,]=beta.mcmc_1[i,]
sigma2.mcmc_2[i/step]=sigma2.mcmc_1[i]
rho.mcmc_2[i/step]=rho.mcmc_1[i]
lambda.mcmc_2[i/step]=lambda.mcmc_1[i]
}
}
Bestimado=colMeans(beta.mcmc_2)
Sigma2est=mean(sigma2.mcmc_2)
Bestimado=colMeans(beta.mcmc_2)
Sigma2est=mean(sigma2.mcmc_2)
rho.mcmc_3=rho.mcmc_2[rho.mcmc_2<=1]
Rhoest=mean(rho.mcmc_3)
lambda.mcmc_3=lambda.mcmc_2[lambda.mcmc_2<=1]
Lambdaest=mean(lambda.mcmc_3)
Betaquant <- t(apply(beta.mcmc_2,2,function(x){quantile(x,c(0.025,0.5,0.975))}))
Sigma2quant <- quantile(sigma2.mcmc_2,c(0.025,0.5,0.975))
Rhoquant <- quantile(rho.mcmc_3,c(0.025,0.5,0.975))
Lambdaquant <- quantile(lambda.mcmc_3,c(0.025,0.5,0.975))
DesvBeta <- apply(beta.mcmc_2,2,sd)
DesvSigma2 <- sd(sigma2.mcmc_2)
DesvRho<-sd(rho.mcmc_3)
DesvLambda<-sd(lambda.mcmc_3)
AccRate1<-sum(ind[,1])/nsim
AccRate2<-sum(ind[,2])/nsim
Sigma=diag(Sigma2est,nrow(X))
detS=det(Sigma)
detA=det(diag(nrow(X))-Rhoest*matstand)
A=(diag(nrow(X))-Rhoest*matstand)
detB=det(diag(nrow(X))-Lambdaest*matstand2)
Yg=(diag(nrow(X))-Lambdaest*matstand2)%*%(A%*%y-X%*%Bestimado)
Veros=detB*detA*((detS)^(-0.5))*exp(-0.5*t(Yg)%*%solve(Sigma)%*%Yg)
logV=log(Veros)
logV1=-(nrow(X)/2)*log(2*pi)+log(detA)+log(detB)-0.5*log(detS)-0.5*(1/Sigma2est)*t(Yg)%*%Yg
p=ncol(X)+3
BIC=-2*logV1+p*log(nrow(X))
logV_DIC=logV_DIC[is.nan(logV_DIC)==FALSE]
Dbar=mean(-2*logV_DIC)
logV1_DIC=(-(nrow(X)/2)*log(2*pi))+log(detB)+log(detA)-0.5*log(detS)-0.5*t(Yg)%*%solve(Sigma)%*%Yg
Dev=-2*logV1_DIC
DIC=2*Dbar+Dev
# yestimado=solve(diag(nrow(X))-Rhoest*matstand)%*%X%*%Bestimado
# residuals=(y)-yestimado
summary = data.frame( mean=c(Bestimado,Sigma2est,Rhoest,Lambdaest),
sd = c(DesvBeta,DesvSigma2,DesvRho,DesvLambda),
q0.025=c(Betaquant[,1],Sigma2quant[1],Rhoquant[1],Lambdaquant[1]),
q0.5=c(Betaquant[,2],Sigma2quant[2],Rhoquant[2],Lambdaquant[2]),
q0.975=c(Betaquant[,3],Sigma2quant[3],Rhoquant[3],Lambdaquant[3]))
rownames(summary) = c("x0","x1","x2","sigma2","rho","lambda")
if(impacts){
n <- nrow(X)
V <- pblapply(1:(nsim-burn),function(x){solve(diag(n)-rho.mcmc_1[x]*matstand)})
S <- lapply(1:ncol(X),function(y){
pblapply(1:(nsim-burn), function(x){
beta.mcmc_1[x,y]*V[[x]]},cl=2)})
retain <- rep(c(rep(0,step-1),1),(nsim-burn)/step)
S <- lapply(S,function(x){x[which(retain>0)]})
impact.df <- lapply(S, function(x){
lapply(x, function(y){
impact.direct <- sum(diag(y))/n
impact.total<- sum((y))/n
impact.indirect <- impact.total - impact.direct
return(data.frame(Direct=impact.direct,Total=impact.total,Indirect=impact.indirect))
})})
impacts <- t(sapply(impact.df, function(x){tmp <- do.call("rbind",x)
colMeans(tmp)
}))
rownames(impacts) <- rownames(summary)[1:ncol(X)]
impacts <- impacts[-1,]
}
return(list(summary=summary,Acceptance_Rates=list(Rho_AccRate=AccRate1,Lambda_AccRate=AccRate2),Criteria=list(BIC=BIC,DIC=DIC),chains=mcmc(data.frame(beta_chain=beta.mcmc,sigma2_chain=sigma2.mcmc,rho_chain=rho.mcmc,lambda_chain=lambda.mcmc),thin = 1),
impacts=impacts))
}
|
/scratch/gouwar.j/cran-all/cranData/BSPADATA/R/hom_general.R
|
# Fitting homoscedastic SAR models
#
hom_sar=function(formula,data,W,nsim,burn,step,prior,initial,kernel="normal",impacts=TRUE,seed=0){
y_n <- as.character(formula[[2]])
X0 <- as.character(formula[[3]])[-1]
X1 <- as.character(do.call("c",sapply(X0, function(x){strsplit(x,"\\+")})))
X_n <- gsub(" ","",X1)
y <- data[,which(names(data)==y_n)]
X <- as.matrix(data[,which(names(data)%in%X_n)])
b_pri <- prior$b_pri
B_pri <- prior$B_pri
r_pri <- prior$r_pri
lambda_pri <- prior$lambda_pri
beta_0 <- initial$beta_0
sigma2_0 <- initial$sigma2_0
rho_0 <- initial$rho_0
output <- hom_sar_int(y,X,W,nsim,burn,step,b_pri,B_pri,r_pri,lambda_pri,beta_0,sigma2_0,rho_0,kernel="normal",seed=seed,impacts=impacts)
return(output)
}
#' Hello
#'
#' @keywords internal
#'
hom_sar_int=function(y,X,W,nsim,burn,step,b_pri,B_pri,r_pri,lambda_pri,beta_0,sigma2_0,rho_0,kernel="normal",seed=0,impacts=TRUE)
{
set.seed(seed)
rowst=function(x){
x1=c()
x1=(x)/sum(x)}
########Lectura de la informacion
y=as.matrix(y)
if (is.null(X) | is.null(y) ){
stop("No data")
}
if(burn>nsim | burn<0){
stop("Burn must be between 0 and nsim")
}
if(nsim<=0){
stop("There must be more than 0 simulations")
}
if(step<0 | step > nsim){
stop("Jump length must not be lesser than 0 or greater than nsim")
}
if(class(W)[1]=="nb"){
matstand=nb2mat(W)
mat0=nb2listw(W,style="B")
mat=listw2mat(mat0)
}
else{
if(class(W)[1]=="listw"){
mat=listw2mat(W)
matstand=apply(mat,2,rowst)
matstand=t(matstand)
}
else{
if(sum(rowSums(W))==nrow(X))
{
matstand=W
mat=matrix(nrow=nrow(X),ncol=nrow(X))
for(i in 1:nrow(mat)){
for(j in 1:ncol(mat)){
if(matstand[i,j]==0){mat[i,j]=0}
else{mat[i,j]=1/matstand[i,j]}
}
}
}
else{
mat=W
matstand=apply(mat,2,rowst)
matstand=t(matstand)
}
}
}
dpost <- function(betas,sigma2,rho) {
A=diag(nrow(X))-rho*matstand
k=t(A%*%y-X%*%(betas))%*%(A%*%y-X%*%(betas))
fc.y=k
fc.beta=t(b_pri - betas)%*%solve(B_pri)%*%(b_pri-betas)
fc.sigma2=(lambda_pri^(r_pri))*(sigma2)^(-r_pri-1)*exp(-lambda_pri/sigma2)/gamma(r_pri)
#dp <- (sigma2^(-nrow(X)/2))*det(A)*exp(-0.5*fc.y/sigma2)*exp(-0.5*fc.beta)*fc.sigma2
logdp <- (-nrow(X)/2)*log(sigma2) + log(det(A)) -0.5*fc.y/sigma2 - 0.5*fc.beta + fc.sigma2
return(logdp)
}
dproposal <- function(rho) {
a=(sigma2.now)*t(y)%*%t(matstand)%*%matstand%*%y
b=(sigma2.now)*t(y)%*%t(matstand)%*%(y-X%*%betas.now)
dmvnorm(rho,b/a,1/sqrt(a),log = T)
}
ind=rep(0,nsim)
beta.mcmc=matrix(NA,nrow=nsim,ncol=ncol(X))
sigma2.mcmc=c()
rho.mcmc=c()
logV_DIC=c()
Sigma_0=(sigma2_0)*diag(nrow(X))
pb <- txtProgressBar(min = 0, max = nsim, style = 3)
if(kernel=="uniform"){
for(i in 1:nsim){
if(i==1){
Sigma=Sigma_0
Rho=rho_0
}
else{
Sigma=sigma2.now*diag(nrow(X))
}
B_pos=solve(solve(B_pri)+t(X)%*%solve(Sigma)%*%(X))
b_pos=B_pos%*%(solve(B_pri)%*%b_pri+t(X)%*%solve(Sigma)%*%y-Rho*t(X)%*%solve(Sigma)%*%matstand%*%y)
#Beta a posteriori condicional
betas.now=c(rmvnorm(1,b_pos,B_pos))
#A posteriori condicional completa para Sigma2
r_pos=+nrow(X)/2+r_pri
A=diag(nrow(X))-Rho*matstand
k=t(A%*%y-X%*%(betas.now))%*%(A%*%y-X%*%(betas.now))
lambda_pos=(k+2*lambda_pri)/2
sigma2.now=rigamma(1,r_pos,lambda_pos)
#A posteriori condicional completa para Rho
rho.now=runif(1,1/abs(min(eigen(mat)$values)),1)
p1=dpost(betas.now,sigma2.now,rho.now)
p2=dpost(betas.now,sigma2.now,Rho)
T.val=min(1,p1/p2)
u<-runif(1)
if (u <=T.val) {
Rho= rho.now
ind[i] = 1
}
beta.mcmc[i,]<-betas.now
sigma2.mcmc[i]<-sigma2.now
rho.mcmc[i]<-rho.now
Sigma=diag(sigma2.mcmc[i],nrow(X))
detS=det(Sigma)
detB=det(diag(nrow(X))-rho.mcmc[i]*matstand)
Yg=(diag(nrow(X))-rho.mcmc[i]*matstand)%*%(y-X%*%beta.mcmc[i,])
logV_DIC[i]=(-(nrow(X)/2)*log(pi))+log(detB)-0.5*log(detS)-0.5*t(Yg)%*%solve(Sigma)%*%Yg
Sys.sleep(0.000000001)
# update progress bar
setTxtProgressBar(pb, i)
}
}
if(kernel=="normal"){
for(i in 1:nsim){
#A posteriori condicional completa para Betas
if(i==1){
Sigma=Sigma_0
Rho=rho_0
}
else{
Sigma=sigma2.now*diag(nrow(X))
}
B_pos=solve(solve(B_pri)+t(X)%*%diag(1/diag(Sigma))%*%X)
b_pos=B_pos%*%(solve(B_pri)%*%b_pri+t(X)%*%diag(1/diag(Sigma))%*%y-Rho*t(X)%*%diag(1/diag(Sigma))%*%matstand%*%y)
betas.now=c(rmvnorm(1,b_pos,B_pos))
r_pos=nrow(X)/2+r_pri
A=diag(nrow(X))-Rho*matstand
k=t(A%*%y-X%*%(betas.now))%*%(A%*%y-X%*%(betas.now))
lambda_pos=(k-2*lambda_pri)/2
sigma2.now=rigamma(1,r_pos,lambda_pos)
a=(1/sigma2.now)*t(y)%*%t(matstand)%*%matstand%*%y
b=(1/sigma2.now)*t(y)%*%t(matstand)%*%(y-X%*%betas.now)
rho.now=rnorm(1,b/a,1/sqrt(a))
while(det(diag(nrow(X))-rho.now*matstand)<=0){
rho.now <- rnorm(1,b/a,1/sqrt(a))
}
p1=dpost(betas.now,sigma2.now,rho.now)
p2=dpost(betas.now,sigma2.now,Rho)
q1=dproposal(rho.now)
q2=dproposal(Rho)
#T.val=min(1,(p1*q1)/(p2*q2))
met.a <- ifelse(p1>p2,log(p1-p2),-log(p2-p1))
met.b <- ifelse(q1>q2,log(q1-q2),-log(q2-q1))
T.val=min(0,met.a+met.b)
u<-runif(1)
if (u <=exp(T.val)) {
Rho= rho.now
ind[i] = 1
}
beta.mcmc[i,]<-betas.now
sigma2.mcmc[i]<-sigma2.now
rho.mcmc[i]<-rho.now
Sigma=diag(sigma2.mcmc[i],nrow(X))
detS=det(Sigma)
detB=det(diag(nrow(X))-rho.mcmc[i]*matstand)
Yg=(diag(nrow(X))-rho.mcmc[i]*matstand)%*%(y-X%*%beta.mcmc[i,])
logV_DIC[i]=(-(nrow(X)/2)*log(pi))+log(detB)-0.5*log(detS)-0.5*t(Yg)%*%diag(1/diag(Sigma))%*%Yg
Sys.sleep(0.000000001)
# update progress bar
setTxtProgressBar(pb, i)
}
}
beta.mcmc_1=beta.mcmc[(burn+1):nsim,]
sigma2.mcmc_1=sigma2.mcmc[(burn+1):nsim]
rho.mcmc_1=rho.mcmc[(burn+1):nsim]
beta.mcmc_2=matrix(NA,nrow=(nsim-burn+1)/step,ncol(X))
sigma2.mcmc_2=c()
rho.mcmc_2=c()
for (i in 1:(nsim-burn+1))
{
if(i%%step==0)
{
beta.mcmc_2[i/step,]=beta.mcmc_1[i,]
sigma2.mcmc_2[i/step]=sigma2.mcmc_1[i]
rho.mcmc_2[i/step]=rho.mcmc_1[i]
}
}
Bestimado = colMeans(beta.mcmc_2)
Sigma2est = mean(sigma2.mcmc_2)
rho.mcmc_3=rho.mcmc_2[rho.mcmc_2<=1]
Rhoest=mean(rho.mcmc_3)
DesvBeta <- apply(beta.mcmc_2,2,sd)
DesvSigma2 <- sd(sigma2.mcmc_2)
DesvRho<-sd(rho.mcmc_3)
Betaquant <- t(apply(beta.mcmc_2,2,function(x){quantile(x,c(0.025,0.5,0.975))}))
Sigma2quant <- quantile(sigma2.mcmc_2,c(0.025,0.5,0.975))
Rhoquant <- quantile(rho.mcmc_3,c(0.025,0.5,0.975))
AccRate<-sum(ind)/nsim
Sigma=diag(Sigma2est,nrow(X))
detS=det(Sigma)
detB=det(diag(nrow(X))-Rhoest*matstand)
Yg=((diag(nrow(X))-Rhoest*matstand)%*%y)-X%*%Bestimado
Veros=detB*((detS)^(-0.5))*exp(-0.5*t(Yg)%*%solve(Sigma)%*%Yg)
logV=log(Veros)
#logV1=log(detB)-0.5*log(detS)-0.5*t(Yg)%*%solve(Sigma)%*%Yg
logV1=-(nrow(X)/2)*log(2*pi)+log(detB)-0.5*log(detS)-0.5*(1/Sigma2est)*t(Yg)%*%Yg
p=ncol(X)+2
BIC=-2*logV1+p*log(nrow(X))
logV_DIC=logV_DIC[is.nan(logV_DIC)==FALSE]
Dbar=mean(-2*logV_DIC)
logV1_DIC=(-(nrow(X)/2)*log(2*pi))+log(detB)-0.5*log(detS)-0.5*t(Yg)%*%solve(Sigma)%*%Yg
Dev=-2*logV1_DIC
DIC=2*Dbar+Dev
summary = data.frame( mean=c(Bestimado,Sigma2est,Rhoest),
sd = c(DesvBeta,DesvSigma2,DesvRho),
q0.025=c(Betaquant[,1],Sigma2quant[1],Rhoquant[1]),
q0.5=c(Betaquant[,2],Sigma2quant[2],Rhoquant[2]),
q0.975=c(Betaquant[,3],Sigma2quant[3],Rhoquant[3]))
rownames(summary) = c("x0","x1","x2","sigma2","rho")
if(impacts){
n <- nrow(X)
V <- pblapply(1:(nsim-burn),function(x){solve(diag(n)-rho.mcmc_1[x]*matstand)})
S <- lapply(1:ncol(X),function(y){
pblapply(1:(nsim-burn), function(x){
beta.mcmc_1[x,y]*V[[x]]},cl=2)})
retain <- rep(c(rep(0,step-1),1),(nsim-burn)/step)
S <- lapply(S,function(x){x[which(retain>0)]})
impact.df <- lapply(S, function(x){
lapply(x, function(y){
impact.direct <- sum(diag(y))/n
impact.total<- sum((y))/n
impact.indirect <- impact.total - impact.direct
return(data.frame(Direct=impact.direct,Total=impact.total,Indirect=impact.indirect))
})})
impacts <- t(sapply(impact.df, function(x){tmp <- do.call("rbind",x)
colMeans(tmp)
}))
rownames(impacts) <- rownames(summary)[1:ncol(X)]
impacts <- impacts[-1,]
}
out<- list(summary=summary, Acceptance_Rate=AccRate,Criteria=list(BIC=BIC,DIC=DIC),
chains=mcmc(data.frame(beta_chain=beta.mcmc,sigma2_chain=sigma2.mcmc,rho_chain=rho.mcmc)),
impacts=impacts)
class(out) <- "out"
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BSPADATA/R/hom_sar.R
|
# Fitting homoscedastic SEM models
hom_sem=function(formula,data,W,nsim,burn,step,prior,initial,kernel="normal",seed=0){
y_n <- as.character(formula[[2]])
X0 <- as.character(formula[[3]])[-1]
X1 <- as.character(do.call("c",sapply(X0, function(x){strsplit(x,"\\+")})))
X_n <- gsub(" ","",X1)
y <- data[,which(names(data)==y_n)]
X <- as.matrix(data[,which(names(data)%in%X_n)])
b_pri <- prior$b_pri
B_pri <- prior$B_pri
r_pri <- prior$r_pri
lambda_pri <- prior$lambda_pri
beta_0 <- initial$beta_0
sigma2_0 <- initial$sigma2_0
lambda_0 <- initial$lambda_0
output <- hom_sem_int(y,X,W,nsim,burn,step,b_pri,B_pri,r_pri,lambda_pri,beta_0,sigma2_0,lambda_0,kernel="normal",seed=seed)
return(output)
}
#' Hello
#'
#' @keywords internal
#'
hom_sem_int=function(y,X,W,nsim,burn,step,b_pri,B_pri,r_pri,lambda_pri,beta_0,sigma2_0,lambda_0,kernel="normal",seed=0)
{
set.seed(seed)
rowst=function(x){
x1=c()
x1=(x)/sum(x)
}
########Lectura de la informaci?n
y=as.matrix(y)
if (is.null(X) | is.null(y) ){
stop("No data")
}
if(burn>nsim | burn<0){
stop("Burn must be between 0 and nsim")
}
if(nsim<=0){
stop("There must be more than 0 simulations")
}
if(step<0 | step > nsim){
stop("Jump length must not be lesser than 0 or greater than nsim")
}
if(class(W)=="nb"){
matstand=nb2mat(W)
mat0=nb2listw(W,style="B")
mat=listw2mat(mat0)
}
else{
if(class(W)=="listw"){
mat=listw2mat(W)
matstand=apply(mat,2,rowst)
matstand=t(matstand)
}
else{
if(sum(rowSums(W))==nrow(X))
{
matstand=W
mat=matrix(nrow=nrow(X),ncol=nrow(X))
for(i in 1:nrow(mat)){
for(j in 1:ncol(mat)){
if(matstand[i,j]==0){mat[i,j]=0}
else{mat[i,j]=1/matstand[i,j]}
}
}
}
else{
mat=W
matstand=apply(mat,2,rowst)
matstand=t(matstand)
}
}
}
dpost <- function(betas,sigma2,lambda) {
A=diag(nrow(X))-lambda*matstand
k=t(A%*%(y-X%*%betas))%*%(A%*%(y-X%*%betas))
fc.y=k
fc.beta=t(b_pri - betas)%*%solve(B_pri)%*%(b_pri-betas)
fc.sigma2=(lambda_pri^(r_pri))*(sigma2)^(-r_pri-1)*exp(-lambda_pri/sigma2)/gamma(r_pri)
logdp <- (-nrow(X)/2)*log(sigma2) + log(det(A)) -0.5*fc.y/sigma2 - 0.5*fc.beta + fc.sigma2
return(logdp)
}
dproposal <- function(lambda) {
a=(1/sigma2.now)*t(y-X%*%betas.now)%*%t(matstand)%*%matstand%*%(y-X%*%betas.now)
b=(1/sigma2.now)*t(y-X%*%betas.now)%*%(matstand)%*%(y-X%*%betas.now)
dmvnorm(lambda,b/a,1/sqrt(a),log = TRUE)
}
ind=rep(0,nsim)
beta.mcmc=matrix(NA,nrow=nsim,ncol=ncol(X))
sigma2.mcmc=c()
lambda.mcmc=c()
logV_DIC=c()
Sigma_0=(sigma2_0)*diag(nrow(X))
pb <- txtProgressBar(min = 0, max = nsim, style = 3)
if(kernel=="uniform"){
for(i in 1:nsim){
if(i==1){
Sigma=Sigma_0
Lambda=lambda_0
}
else{
Sigma=sigma2.now*diag(nrow(X))
}
A=diag(nrow(X))-Lambda*matstand
B_pos=solve(solve(B_pri)+t(X)%*%t(A)%*%solve(Sigma)%*%A%*%X)
b_pos=B_pos%*%(solve(B_pri)%*%b_pri+t(X)%*%t(A)%*%solve(Sigma)%*%A%*%y)
#Beta a posteriori condicional
betas.now=c(rmvnorm(1,b_pos,B_pos))
#A posteriori condicional completa para gammas
r_pos=nrow(X)/2+r_pri
A=diag(nrow(X))-Lambda*matstand
k=t(A%*%(y-X%*%(betas.now)))%*%(A%*%(y-X%*%(betas.now)))
lambda_pos=(k+2*lambda_pri)/2
sigma2.now=rigamma(1,r_pos, lambda_pos)
#A posteriori condicional completa para Lambda
lambda.now=runif(1,1/abs(min(eigen(mat)$values)),1)
p1=dpost(betas.now,sigma2.now,lambda.now)
p2=dpost(betas.now,sigma2.now,Lambda)
T.val=min(1,p1/p2)
u<-runif(1)
if (u <=T.val) {
Lambda <- lambda.now
ind[i] = 1
}
beta.mcmc[i,]<-betas.now
sigma2.mcmc[i]<-sigma2.now
lambda.mcmc[i]<-lambda.now
Sigma=diag(sigma2.mcmc[i],nrow(X))
detS=det(Sigma)
detB=det(diag(nrow(X))-lambda.mcmc[i]*matstand)
Yg=(diag(nrow(X))-lambda.mcmc[i]*matstand)%*%(y-X%*%beta.mcmc[i,])
logV_DIC[i]=(-(nrow(X)/2)*log(pi))+log(detB)-0.5*log(detS)-0.5*t(Yg)%*%solve(Sigma)%*%Yg
Sys.sleep(0.000000001)
# update progress bar
setTxtProgressBar(pb, i)
}
}
if(kernel=="normal"){
for(i in 1:nsim){
#A posteriori condicional completa para Betas
if(i==1){
Sigma=Sigma_0
Lambda=lambda_0
}
else{
Sigma=sigma2.now*diag(nrow(X))
}
A=diag(nrow(X))-Lambda*matstand
B_pos=solve(solve(B_pri)+t(X)%*%t(A)%*%diag(1/diag(Sigma))%*%A%*%X)
b_pos=B_pos%*%(solve(B_pri)%*%b_pri+t(X)%*%t(A)%*%diag(1/diag(Sigma))%*%A%*%y)
#Beta a posteriori condicional
betas.now=c(rmvnorm(1,b_pos,B_pos))
#A posteriori condicional completa para gammas
r_pos=nrow(X)/2+r_pri
A=diag(nrow(X))-Lambda*matstand
k=t(A%*%(y-X%*%(betas.now)))%*%(A%*%(y-X%*%(betas.now)))
lambda_pos=(k+2*lambda_pri)/2
sigma2.now=rigamma(1,r_pos, lambda_pos)
eigenvals <- eigen(matstand)$values
lowlim <- -1/(max(abs(eigenvals[eigenvals<0])))
a=(1/sigma2.now)*t(y-X%*%betas.now)%*%t(matstand)%*%matstand%*%(y-X%*%betas.now)
b=(1/sigma2.now)*t(y-X%*%betas.now)%*%(matstand)%*%(y-X%*%betas.now)
lambda.now=rnorm(1,b/a,1/sqrt(a))
while(lambda.now>1 || lambda.now< lowlim){
lambda.now <- rnorm(1,b/a,1/sqrt(a))
}
p1=dpost(betas.now,sigma2.now,lambda.now)
p2=dpost(betas.now,sigma2.now,Lambda)
q1=dproposal(lambda.now)
q2=dproposal(Lambda)
met.a <- ifelse(p1>p2,log(p1-p2),-log(p2-p1))
met.b <- ifelse(q1>q2,log(q1-q2),-log(q2-q1))
T.val=min(0,met.a+met.b)
u<-runif(1)
if (u <=exp(T.val)) {
Lambda <- lambda.now
ind[i] = 1
}
beta.mcmc[i,]<-betas.now
sigma2.mcmc[i]<-sigma2.now
lambda.mcmc[i]<-lambda.now
Sigma=diag(sigma2.mcmc[i],nrow(X))
detS=det(Sigma)
detB=det(diag(nrow(X))-lambda.mcmc[i]*matstand)
Yg=(diag(nrow(X))-lambda.mcmc[i]*matstand)%*%(y-X%*%beta.mcmc[i,])
logV_DIC[i]=(-(nrow(X)/2)*log(pi))+log(detB)-0.5*log(detS)-0.5*t(Yg)%*%solve(Sigma)%*%Yg
Sys.sleep(0.000000001)
# update progress bar
setTxtProgressBar(pb, i)
}
}
beta.mcmc_1=beta.mcmc[(burn+1):nsim,]
sigma2.mcmc_1=sigma2.mcmc[(burn+1):nsim]
lambda.mcmc_1=lambda.mcmc[(burn+1):nsim]
beta.mcmc_2=matrix(NA,nrow=(nsim-burn+1)/step,3)
sigma2.mcmc_2=c()
lambda.mcmc_2=c()
for (i in 1:(nsim-burn+1))
{
if(i%%step==0)
{
beta.mcmc_2[i/step,]=beta.mcmc_1[i,]
sigma2.mcmc_2[i/step]=sigma2.mcmc_1[i]
lambda.mcmc_2[i/step]=lambda.mcmc_1[i]
}
}
Bestimado = colMeans(beta.mcmc_2)
Sigma2est = mean(sigma2.mcmc_2)
lambda.mcmc_3=lambda.mcmc_2[lambda.mcmc_2<=1]
Lambdaest=mean(lambda.mcmc_3)
DesvBeta <- apply(beta.mcmc_2,2,sd)
DesvSigma2 <- sd(sigma2.mcmc_2)
DesvLambda<-sd(lambda.mcmc_3)
Betaquant <- t(apply(beta.mcmc_2,2,function(x){quantile(x,c(0.025,0.5,0.975))}))
Sigma2quant <- quantile(sigma2.mcmc_2,c(0.025,0.5,0.975))
Lambdaquant <- quantile(lambda.mcmc_3,c(0.025,0.5,0.975))
AccRate<-sum(ind)/nsim
Sigma=diag(Sigma2est,nrow(X))
detS=det(Sigma)
detB=det(diag(nrow(X))-Lambdaest*matstand)
Yg=(diag(nrow(X))-Lambdaest*matstand)%*%(y-X%*%Bestimado)
Veros=detB*((detS)^(-0.5))*exp(-0.5*t(Yg)%*%solve(Sigma)%*%Yg)
logV=(-(nrow(X)/2)*log(2*pi))+log(detB)-0.5*log(detS)-0.5*t(Yg)%*%solve(Sigma)%*%Yg
p=ncol(X)+2
BIC=-2*logV+p*log(nrow(X))
logV_DIC=logV_DIC[is.nan(logV_DIC)==FALSE]
Dbar=mean(-2*logV_DIC)
logV1_DIC=(-(nrow(X)/2)*log(2*pi))+log(detB)-0.5*log(detS)-0.5*t(Yg)%*%solve(Sigma)%*%Yg
Dev=-2*logV1_DIC
DIC=2*Dbar+Dev
summary = data.frame( mean=c(Bestimado,Sigma2est,Lambdaest),
sd = c(DesvBeta,DesvSigma2,DesvLambda),
q0.025=c(Betaquant[,1],Sigma2quant[1],Lambdaquant[1]),
q0.5=c(Betaquant[,2],Sigma2quant[2],Lambdaquant[2]),
q0.975=c(Betaquant[,3],Sigma2quant[3],Lambdaquant[3]))
rownames(summary) = c("x0","x1","x2","sigma2","lambda")
return(list(summary=summary,Acceptance_Rate=AccRate,Criteria=list(BIC=BIC,DIC=DIC),chains=mcmc(data.frame(beta_chain=beta.mcmc,sigma2_chain=sigma2.mcmc,lambda_chain=lambda.mcmc),thin = 1)))
}
|
/scratch/gouwar.j/cran-all/cranData/BSPADATA/R/hom_sem.R
|
#' @importFrom ica icaimax
ICA_imax = function(X,q){
ica0 = icaimax(t(X),nc=q,center=FALSE)
IC_initial = ica0$S
A_mt = ica0$M
D = diag(apply(A_mt,2,'norm_vec'))
A_0 = A_mt%*%solve(D)
S_0 = t(IC_initial%*%t(D))
out = list()
out$S = S_0
out$A = A_0
return(out)
}
norm_vec = function(v){
return(sqrt(sum(v^2)))
}
|
/scratch/gouwar.j/cran-all/cranData/BSPBSS/R/ICA_imax.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
cal_sumb <- function(b, psi) {
.Call('_BSPBSS_cal_sumb', PACKAGE = 'BSPBSS', b, psi)
}
cal_S <- function(sumb, zeta) {
.Call('_BSPBSS_cal_S', PACKAGE = 'BSPBSS', sumb, zeta)
}
cal_core <- function(X, A, S) {
.Call('_BSPBSS_cal_core', PACKAGE = 'BSPBSS', X, A, S)
}
dL_b_sub <- function(b, X, A, lambda, psi, epsilon, zeta, sigma, sizep, sizen) {
.Call('_BSPBSS_dL_b_sub', PACKAGE = 'BSPBSS', b, X, A, lambda, psi, epsilon, zeta, sigma, sizep, sizen)
}
GP_update_b_SGHMC <- function(b, X, A, S, sigma, lambda, psi, epsilon, sumb, zeta, X_core, eta, alpha, sizep, sizen, m, itr, nu) {
invisible(.Call('_BSPBSS_GP_update_b_SGHMC', PACKAGE = 'BSPBSS', b, X, A, S, sigma, lambda, psi, epsilon, sumb, zeta, X_core, eta, alpha, sizep, sizen, m, itr, nu))
}
GP_update_A <- function(A, prior, X_core, X, S, sigma) {
invisible(.Call('_BSPBSS_GP_update_A', PACKAGE = 'BSPBSS', A, prior, X_core, X, S, sigma))
}
GP_update_sigma <- function(sigma, X_core, prior_sigma) {
invisible(.Call('_BSPBSS_GP_update_sigma', PACKAGE = 'BSPBSS', sigma, X_core, prior_sigma))
}
log_p_zeta_Gaussian <- function(zeta, X_core, sigma) {
.Call('_BSPBSS_log_p_zeta_Gaussian', PACKAGE = 'BSPBSS', zeta, X_core, sigma)
}
GP_update_zeta <- function(zeta, sumb, X_core, sigma, X, A, S, stepsize, prior, count) {
invisible(.Call('_BSPBSS_GP_update_zeta', PACKAGE = 'BSPBSS', zeta, sumb, X_core, sigma, X, A, S, stepsize, prior, count))
}
loglk <- function(X, A, S, sigma) {
.Call('_BSPBSS_loglk', PACKAGE = 'BSPBSS', X, A, S, sigma)
}
mcmc_bspbss_c <- function(X, A, b, sigma, zeta, stepsize_zeta, subsample_n, subsample_p, prior, psi, lambda, epsilon, lr, decay, num_leapfrog, MClength, burn_in, thin, show_step) {
.Call('_BSPBSS_mcmc_bspbss_c', PACKAGE = 'BSPBSS', X, A, b, sigma, zeta, stepsize_zeta, subsample_n, subsample_p, prior, psi, lambda, epsilon, lr, decay, num_leapfrog, MClength, burn_in, thin, show_step)
}
smoos <- function(S, xgrid, smooth) {
.Call('_BSPBSS_smoos', PACKAGE = 'BSPBSS', S, xgrid, smooth)
}
|
/scratch/gouwar.j/cran-all/cranData/BSPBSS/R/RcppExports.R
|
create.circle.in.2D.image = function(voxels,center = c(0.5,0.5),radius = 0.2){
return((voxels[,1]-center[1])^2+(voxels[,2]-center[2])^2<radius^2)
}
create.triangle.in.2D.image = function(voxels,x0,y0, x1,y1, x2,y2){
return(points.in.triangle(voxels[,1],voxels[,2],x0,y0, x1,y1, x2,y2))
}
create.square.in.2D.image = function(voxels,x0,y0,x1,y1){
return(points.in.rectangle(voxels[,1],voxels[,2],x0,y0,x1,y1))
}
points.in.rectangle = function(x,y,x0,y0,x1,y1){
return((x>x0 & x<x1) & (y>y0 & y<y1))
}
points.in.triangle = function(x,y, x0,y0, x1,y1, x2,y2) {
s = y0 * x2 - x0 * y2 + (y2 - y0) * x + (x0 - x2) * y;
t = x0 * y1 - y0 * x1 + (y0 - y1) * x + (x1 - x0) * y;
res = rep(FALSE,length=length(x))
false_idx = which((s < 0) != (t < 0))
if(length(false_idx)<length(x)){
A = -y1 * x2 + y0 * (x2 - x1) + x0 * (y1 - y2) + x1 * y2;
if (A < 0.0) {
s = -s;
t = -t;
A = -A;
}
res = (s > 0 & t > 0 & (s + t) <= A)
}
res[false_idx] = FALSE
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/BSPBSS/R/create.2D.image.R
|
#' @title Initial values
#' @description Generate initial values, set up priors and perform kernel decomposition
#' for the MCMC algorithm.
#'
#' @param X Data matrix with n rows (sample) and p columns (voxel).
#' @param coords Cordinate matrix with p rows (voxel) and d columns (dimension).
#' @param rescale If TRUE, rows of X are rescaled to have unit variance.
#' @param center If TRUE, rows of X are mean-centered.
#' @param q Number of latent sources.
#' @param dens The initial density level (between 0 and 1) of the latent sources.
#' @param ker_par 2-dimensional vector (a,b) with a>0, b>0, specifing the parameters in the modified exponetial squared kernel.
#' @param num_eigen Number of eigen functions.
#' @param noise Gaussian noise added to the initial latent sources, with mean 0 and standard deviation being noise * sd(S0),
#' where sd(S0) is the standard deviation of the initial latent sources.
#'
#' @return List containing initial values, priors and eigen functions/eigen values of the kernel of the Gaussian process.
#' @export
#'
#' @importFrom Rcpp sourceCpp
#' @importFrom stats sd
#' @importFrom glmnet glmnet
#' @importFrom svd propack.svd
#' @import movMF
#' @import gridExtra
#' @import gtools
#' @importFrom stats quantile
#' @importFrom BayesGPfit GP.std.grids
#' @importFrom BayesGPfit GP.eigen.value
#' @importFrom BayesGPfit GP.eigen.funcs.fast
#' @importFrom stats kmeans
#' @importFrom stats optim
#'
#'
#' @useDynLib BSPBSS
#'
#' @examples
#'
#' sim = sim_2Dimage(length = 30, sigma = 5e-4, n = 30, smooth = 6)
#' ini = init_bspbss(sim$X, sim$coords, q = 3, ker_par = c(0.1,50), num_eigen = 50)
#'
init_bspbss= function(X, coords, rescale = TRUE, center = FALSE, q = 2, dens = 0.5, ker_par = c(0.05, 20), num_eigen = 500, noise = 0.0 ){
dim = ncol(coords)
n = nrow(X)
p = ncol(X)
if(center){
X = X - apply(X, 1, mean)
}
if(rescale){
for(i in 1:n){
sdx = sd(X[i,])
if(sdx!=0){
X[i,] = (X[i,])/sdx
}
}
}
coords0 = GP.std.grids(coords,center=apply(coords,2,mean),scale=NULL,max_range=1)
lambda0 = GP.eigen.value(1000,ker_par[1],ker_par[2],dim)
if(num_eigen < length(lambda0)){
lambda_tmp = lambda0[1:num_eigen]
}
L = length(lambda_tmp)
k = 0
tag = 0
while(k<=L){
tag = tag + 1
k = choose(tag+dim,dim)
}
Psi0 = t( GP.eigen.funcs.fast(coords0,tag,ker_par[1],ker_par[2] ) )
Psi_tmp = Psi0[1:L,]
ica_tmp = ICA_imax(X,q)
A0 = ica_tmp$A * sqrt(n)
S00 = ica_tmp$S * sqrt(1/n)
sdS00 = sd(S00)
S0 = S00 + matrix( rnorm( q*p, mean = 0, sd = noise*sdS00 ) , nrow = q, ncol =p )
b0 = matrix(0, nrow = q, ncol = num_eigen)
for(j in 1:q){
tmp = glmnet(x = t(Psi_tmp), y = S0[j,], family = "gaussian", alpha = 0.5, nlambda = 2,intercept = FALSE)
b0[j,] = tmp$beta[,length(tmp$lambda)]
}
sumb0 = cal_sumb(b0,Psi_tmp)
zeta0 = quantile(abs(sumb0),1-dens)
S1 = cal_S(sumb0,zeta0 - (1e-5) )
init = list()
init$A = A0
init$ICA_S = S0
init$S = S1
init$zeta = zeta0+1e-10
init$stepsize_zeta = (zeta0+1e-10) * 0.1
init$b = b0
init$sigma = apply(X - A0%*%S1,2,var)
kernel = list()
kernel$psi = Psi_tmp
kernel$lambda = lambda_tmp
prior = list()
prior$sigma = c(1,1e-10)
prior$A = c(1e-10, rep(1/sqrt(n),n) )
prior$zeta = c(0,1)
out = list()
out$init = init
out$prior = prior
out$kernel = kernel
out$coords = coords0
out$X = X
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BSPBSS/R/init_bspbss.R
|
#' @title levelplot for 2D images.
#' @description The function plots 2D images for a data matrix.
#'
#' @param S Data matrix with q rows (sample) and p colums (pixel).
#' @param lim 2-dimensional numeric vector, specifying the limits for the data.
#' @param xlim 2-dimensional numeric vector, specifying the lower and upper limits of \code{x}.
#' @param ylim 2-dimensional numeric vector, specifying the lower and upper limits of \code{y}.
#' @param coords Coordinates matrix with p rows (pixel) and 2 columns (dimension), specifying the coordinates of the data points.
#' @param layout 2-dimensional numeric vector, specifying the number of rows and number of columns for the layout of components.
#' @param color Colorbar.
#' @param file Name of the file to be saved.
#'
#' @return No return value.
#'
#' @import gplots
#' @import ggplot2
#'
#' @export
#'
#' @examples
#' sim = sim_2Dimage(length = 30, sigma = 5e-4, n = 30, smooth = 6)
#' levelplot2D(sim$S,lim = c(-0.04,0.04), sim$coords)
#'
levelplot2D = function(S, coords, lim = c(min(S),max(S)), xlim=c(0,max(coords[,1])), ylim=c(0,max(coords[,2])),
color = bluered(100),layout = c(1,nrow(S)),file=NULL){
q = nrow(S)
p = ncol(S)
coordx = NULL
coordy = NULL
value = NULL
comp = NULL
for(j in 1:q){
coordx = c(coordx,coords[,1])
coordy = c(coordy,coords[,2])
value = c(value,S[j,])
comp = c(comp,rep(j,p))
#data0 = rbind(data0, data.frame(x = coords[,1], y = coords[,2], value = S[j,], comp = j) )
}
data0 = data.frame(coordx = coordx, coordy = coordy, value = value, comp = comp)
plot = ggplot(data0,aes(x=coordx, y=coordy,fill=value)) +
geom_tile( ) + xlim(xlim) + ylim(ylim) +
facet_wrap(~comp, nrow=layout[1],ncol=layout[2]) +
labs(x = NULL, y = NULL) +
scale_fill_gradientn(name = NULL,lim = c(lim[1],lim[2]),na.value = "black",colors=color) +
theme_dark() +
coord_equal()
print(plot)
if(!is.null(file)){
ggsave(file,plot)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BSPBSS/R/levelplot2D.R
|
#' @title MCMC algorithm for Bayesian spatial blind source separation
#' with the thresholded Gaussian Process prior.
#' @description Performan MCMC algorithm to draw samples from a Bayesian spatial blind source separation
#' model.
#'
#' @param X Data matrix with n rows (sample) and p columns (voxel).
#' @param init List of initial values, see \code{init_bspbss}.
#' @param prior List of priors, see \code{init_bspbss}.
#' @param kernel List including eigenvalues and eigenfunctions of the kernel, see \code{init_bspbss}.
#' @param n.iter Total iterations in MCMC.
#' @param n.burn_in Number of burn-in.
#' @param thin Thining interval.
#' @param show_step Frequency for printing the current number of iterations.
#' @param ep Approximation parameter.
#' @param lr Per-batch learning rate in SGHMC.
#' @param decay Decay parameter in SGHMC.
#' @param num_leapfrog Number of leapfrog steps in SGHMC.
#' @param subsample_n Mini-batch size of samples.
#' @param subsample_p Mini-batch size of voxels.
#'
#' @return List containing MCMC samples of: A, b, sigma, and zeta.
#' @export
#'
#' @examples
#'
#' sim = sim_2Dimage(length = 30,
#' sigma = 5e-4,
#' n = 30,
#' smooth = 6)
#' ini = init_bspbss(sim$X, sim$coords,
#' q = 3,
#' ker_par = c(0.1,50),
#' num_eigen = 50)
#' res = mcmc_bspbss(ini$X,ini$init,
#' ini$prior,ini$kernel,
#' n.iter=200,n.burn_in=100,
#' thin=10,show_step=50)
#'
mcmc_bspbss = function(X,init,prior,kernel,n.iter,n.burn_in,thin=1,show_step,
ep = 0.01,lr = 0.01, decay = 0.01, num_leapfrog = 5,
subsample_n = 0.5,subsample_p=0.5){
sigma = init$sigma * 1
A = init$A * 1
b = init$b * 1
zeta = init$zeta * 1
stepsize_zeta = init$stepsize_zeta
if(prior$zeta[1] < 0){
prior$zeta[1] = 0
}
if(prior$zeta[2] > 1){
prior$zeta[2] = 1
}
lr0 = lr / nrow(X) / ncol(X)
out = mcmc_bspbss_c(X, A, b, sigma, zeta, stepsize_zeta, subsample_n, subsample_p, prior, kernel$psi, kernel$lambda,
ep,lr0, decay, num_leapfrog, n.iter,n.burn_in,thin, show_step)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BSPBSS/R/mcmc_bspbss.R
|
#' @title Write a NIfTI file.
#' @description This function saves a data matrix into a NIfTI file.
#'
#' @param X Data matrix with n rows (sample) and p colums (pixel).
#' @param nii a reference NIfTI-class object, representing a image with p voxels.
#' @param xgrid Cordinate matrix with p rows (voxel) and d columns (dimension).
#' @param file The name of the file to be saved.
#' @param std If TRUE, standarize each row of X.
#' @param thres Quantile to threshold each row of X.
#'
#' @return NIfTI-class object.
#' @export
#'
#' @importFrom oro.nifti writeNIfTI
#' @importFrom neurobase copyNIfTIHeader
#'
output_nii = function(X,nii,xgrid, file=NULL, std=TRUE, thres = 0){
if(std){
X0 = matrix(0, ncol = ncol(X), nrow = nrow(X))
for(i in 1:nrow(X)){
if(sd(X[i,])>0){
X0[i,] = ( X[i,] )/max(abs(X[i,]) )
}
}
}
else{
X0 = X
}
if(thres>0){
for(i in 1:nrow(X)){
if(sd(X[i,])>0){
ind = ( X0[i,] > quantile(X0[i,],thres))
ind1 = ( X0[i,] < quantile(X0[i,],1-thres))
X0[i,] = X0[i,] * ( (ind +ind1)>0 )
}
}
}
out_S = array(0, dim = c(dim(nii)[1], dim(nii)[2], dim(nii)[3], nrow(X)))
tag = 1
for(i in 1:nrow(xgrid)){
out_S[xgrid[i,1],xgrid[i,2],xgrid[i,3],] = X0[,i]
}
copyNIfTIHeader(img = nii, arr = out_S, drop = FALSE)
if(!is.null(file)){
writeNIfTI(out_S,file = file)
}
return(out_S)
}
|
/scratch/gouwar.j/cran-all/cranData/BSPBSS/R/output_nii.R
|
#' @title Transforms NIfTI to matrix
#'
#' @description This function transforms a NIfTI-class object into a matrix.
#'
#' @param nii 4D NIfTI-class object with dimensions x,y,z and t. Can be read from NIfTI file with \code{readNIfTI} function from the package \code{oro.nifti}.
#' @param mask Mask variable, also in NIfTI format.
#'
#' @importFrom oro.nifti readNIfTI
#'
#' @return List containing the data matrix with t rows and x*y*z colums (voxels), and the coordinates of the voxels.
#' @export
pre_nii = function(nii,mask){
dim_nii = dim(mask)
xgrid = as.matrix(expand.grid(1:dim_nii[1],1:dim_nii[2],1:dim_nii[3]))
mask_vec = as.numeric(mask)
data0 = NULL
for(i in 1:dim(nii)[4]){
data0 = cbind(data0,as.numeric(nii[,,,i])[mask_vec!=0])
}
out = list()
out$data = t(data0)
out$coords = xgrid[mask_vec!=0,]
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BSPBSS/R/pre_nii.R
|
#' @title Simulate image data using ICA
#' @description The function simulates image data using a probabilistic ICA model
#' whose latent components have specific spatial patterns.
#'
#' @param length The length of the image.
#' @param n sample size.
#' @param sigma variance of the noise.
#' @param smooth smoothness of the latent components.
#'
#' @details
#' The observations are generated using probabilistic ICA:
#' \deqn{ X_i(v) = \sum_{j=1}^q A_{i,j} S_j(v) + \epsilon_i(v) , }
#' where \eqn{S_j, j=1,...,q} are the latent components, \eqn{A_{i,j}} is
#' the mixing coeffecient and \eqn{\epsilon_i} is the noise term.
#' Specifically, the number of components in this function is \eqn{q = 3},
#' with each of them being a specific geometric shape. The mixing coefficient matrix
#' is generated with a von Mises-Fisher distribution with the concentration parameter
#' being zero, which means it is uniformly distributed on the sphere. \eqn{\epsilon_i}
#' is a i.i.d. Gaussian noise term with 0 mean and user-specified variance.
#'
#' @return List that contains the following terms:
#' \describe{
#' \item{X}{Data matrix with n rows (sample) and p columns (pixel).}
#' \item{coords}{Cordinate matrix with p rows (pixel) and d columns (dimension)}
#' \item{S}{Latent components.}
#' \item{A}{Mixing coefficent matrix.}
#' \item{snr}{Signal-to-noise ratio.}
#' }
#'
#' @export
#'
#' @importFrom stats cov rnorm var
#' @importFrom rstiefel rmf.vector
#'
#' @examples
#' sim = sim_2Dimage(length = 30, sigma = 5e-4, n = 30, smooth = 6)
#'
sim_2Dimage = function(length = 20, n = 50, sigma = 2e-3, smooth = 6){
grid = 1:length
xgrid = as.matrix(expand.grid(grid,grid))
voxel = xgrid / max(xgrid)
p = nrow(xgrid)
q = 3
S0 = matrix(0,nrow = q, ncol = p)
S0[1,] = create.square.in.2D.image(voxel, 0.1,0.1,0.5,0.5)
S0[2,] = create.circle.in.2D.image(voxel, center = c(0.7,0.7))
S0[3,] = create.triangle.in.2D.image(voxel, 0.2,0.8,0.2,0.2,0.8,0.5)
if(smooth>0){
S = smoos(S0,xgrid,smooth) /sqrt(p)
}
else{
S = S0/sqrt(p)
}
A = matrix(0,nrow = n, ncol = q)
for(j in 1:q){
A[,j] = rmf.vector(rep(0,n)) * sqrt(n)
}
sigma2 = rep(sigma,p)
ep = matrix(0,nrow = n, ncol = p)
for(i in 1:p){
ep[,i] = rnorm(n)
ep[,i] = ep[,i] * sqrt(sigma2[i])
}
AS = A%*%S
X = AS + ep
data0 = array(0,dim = c( grid[length(grid)], grid[length(grid)], nrow(X)) )
tag = 1
for(i in 1:nrow(data0)){
for(j in 1:ncol(data0)){
data0[i,j,] = X[,tag]
tag = tag + 1
}
}
tmpp = 0
for(i in 1:n){
tmpp = tmpp + var(AS[i,])/( var(X[i,]) - var(AS[i,]) )
}
snr = tmpp/n
out = list()
out$X = X
out$coords = xgrid[,2:1]
out$S = S
out$A = A
out$snr = snr
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BSPBSS/R/sim_2Dimage.R
|
#' @title Summarization of the MCMC result.
#' @description The function summarizes the MCMC results obtained from \code{mcmc_bspbss}.
#'
#' @param res List including MCMC samples, which can be obtained from function \code{mcmc_bspbss}
#' @param X Original data matrix.
#' @param kernel List including eigenvalues and eigenfunctions of the kernel, see \code{init_bspbss}.
#' @param start Start point of the iterations being summarized.
#' @param end End point of the iterations being summarized.
#' @param select_prob Lower bound of the posterior inclusion probability required when summarizing
#' the samples of latent sources.
#'
#' @return List that contains the following terms:
#' \describe{
#' \item{S}{Estimated latent sources.}
#' \item{pip}{Voxel-wise posterior inclusion probability for the latent sources.}
#' \item{A}{Estimated mixing coefficent matrix.}
#' \item{zeta}{Estimated zeta.}
#' \item{sigma}{Estimated sigma.}
#' \item{logLik}{Trace of log-likelihood.}
#' \item{Slist}{MCMC samples of S.}
#' }
#' @export
#'
#' @examples
#'
#' sim = sim_2Dimage(length = 30, sigma = 5e-4, n = 30, smooth = 6)
#' ini = init_bspbss(sim$X, sim$coords, q = 3, ker_par = c(0.1,50), num_eigen = 50)
#' res = mcmc_bspbss(ini$X,ini$init,ini$prior,ini$kernel,n.iter=200,n.burn_in=100,thin=10,show_step=50)
#' res_sum = sum_mcmc_bspbss(res, ini$X, ini$kernel, start = 11, end = 20, select_p = 0.5)
#'
sum_mcmc_bspbss = function(res, X, kernel, start = 1, end = 100, select_prob = 0.8){
out = list()
tmp = mcmc_sum_avgS(res, X, kernel, start, end, select_prob)
out$S = tmp$S
out$pip = tmp$pip
out$A = apply(res$A,c(1,2),mean)
out$zeta = mean(res$zeta)
out$sigma = apply(res$sigma,1,mean)
out$loglik = tmp$loglik
out$Slist = tmp$Slist
return(out)
}
mcmc_sum_avgS = function(res, data, kernel, start = 1, end = 1, select_prob = 0.8){
n = end - start + 1
q = dim(res$A)[2]
p = ncol(kernel$psi)
loglik = rep(0,n)
Sl = matrix(0,nrow=q,ncol=p)
spMat = matrix(0,nrow=q,ncol=p)
Slist=list()
for(i in start:end){
sumb = cal_sumb(res$b[,,i],kernel$psi)
S = cal_S(sumb,res$zeta[i])
Slist[[i-start + 1]] = S
loglik[i-start + 1] = loglk(data,res$A[,,i],S,res$sigma[,i])
Sl = Sl + S
spMat = spMat + (S!=0)
}
Sl = Sl * ( (spMat / n) > select_prob)
Sl[spMat > 0] = Sl[spMat > 0]/spMat[spMat > 0]
out = list()
out$S = Sl
out$pip = spMat/n
out$loglik = loglik
out$Slist =Slist
return( out )
}
|
/scratch/gouwar.j/cran-all/cranData/BSPBSS/R/sum_mcmc_bspbss.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup, message=FALSE-----------------------------------------------------
library(BSPBSS)
## ---- message=FALSE-----------------------------------------------------------
library(BSPBSS)
set.seed(612)
sim = sim_2Dimage(length = 30, sigma = 5e-4, n = 30, smooth = 6)
## -----------------------------------------------------------------------------
levelplot2D(sim$S,lim = c(-0.04,0.04), sim$coords)
## -----------------------------------------------------------------------------
levelplot2D(sim$X[1:3,], lim = c(-0.12,0.12), sim$coords)
## -----------------------------------------------------------------------------
ini = init_bspbss(sim$X, sim$coords, q = 3, ker_par = c(0.1,50), num_eigen = 50)
## ---- message = TRUE----------------------------------------------------------
res = mcmc_bspbss(ini$X,ini$init,ini$prior,ini$kernel,n.iter=2000,n.burn_in=1000,thin=10,show_step=100)
## -----------------------------------------------------------------------------
res_sum = sum_mcmc_bspbss(res, ini$X, ini$kernel, start = 101, end = 200, select_p = 0.5)
## -----------------------------------------------------------------------------
levelplot2D(res_sum$S, lim = c(-1.3,1.3), sim$coords)
## -----------------------------------------------------------------------------
levelplot2D(ini$init$ICA_S, lim = c(-1.7,1.7), sim$coords)
|
/scratch/gouwar.j/cran-all/cranData/BSPBSS/inst/doc/BSPBSS-vignette.R
|
---
title: "BSPBSS-vignette"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{BSPBSS-vignette}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup, message=FALSE}
library(BSPBSS)
```
## A toy example
This is a basic example which shows you how to solve a common problem.
First we load the package and generate simulated images with a probabilistic ICA model:
```{r, message=FALSE}
library(BSPBSS)
set.seed(612)
sim = sim_2Dimage(length = 30, sigma = 5e-4, n = 30, smooth = 6)
```
The true source signals are three 2D geometric patterns (set `smooth=0` to generate patterns with sharp edges).
```{r}
levelplot2D(sim$S,lim = c(-0.04,0.04), sim$coords)
```
which generate observed images such as
```{r}
levelplot2D(sim$X[1:3,], lim = c(-0.12,0.12), sim$coords)
```
Then we generate initial values for mcmc,
```{r}
ini = init_bspbss(sim$X, sim$coords, q = 3, ker_par = c(0.1,50), num_eigen = 50)
```
and run!
```{r, message = TRUE}
res = mcmc_bspbss(ini$X,ini$init,ini$prior,ini$kernel,n.iter=2000,n.burn_in=1000,thin=10,show_step=100)
```
Then the results can be summarized by
```{r}
res_sum = sum_mcmc_bspbss(res, ini$X, ini$kernel, start = 101, end = 200, select_p = 0.5)
```
and shown by
```{r}
levelplot2D(res_sum$S, lim = c(-1.3,1.3), sim$coords)
```
For comparison, we show the estimated sources provided by informax ICA here.
```{r}
levelplot2D(ini$init$ICA_S, lim = c(-1.7,1.7), sim$coords)
```
|
/scratch/gouwar.j/cran-all/cranData/BSPBSS/inst/doc/BSPBSS-vignette.Rmd
|
---
title: "BSPBSS-vignette"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{BSPBSS-vignette}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup, message=FALSE}
library(BSPBSS)
```
## A toy example
This is a basic example which shows you how to solve a common problem.
First we load the package and generate simulated images with a probabilistic ICA model:
```{r, message=FALSE}
library(BSPBSS)
set.seed(612)
sim = sim_2Dimage(length = 30, sigma = 5e-4, n = 30, smooth = 6)
```
The true source signals are three 2D geometric patterns (set `smooth=0` to generate patterns with sharp edges).
```{r}
levelplot2D(sim$S,lim = c(-0.04,0.04), sim$coords)
```
which generate observed images such as
```{r}
levelplot2D(sim$X[1:3,], lim = c(-0.12,0.12), sim$coords)
```
Then we generate initial values for mcmc,
```{r}
ini = init_bspbss(sim$X, sim$coords, q = 3, ker_par = c(0.1,50), num_eigen = 50)
```
and run!
```{r, message = TRUE}
res = mcmc_bspbss(ini$X,ini$init,ini$prior,ini$kernel,n.iter=2000,n.burn_in=1000,thin=10,show_step=100)
```
Then the results can be summarized by
```{r}
res_sum = sum_mcmc_bspbss(res, ini$X, ini$kernel, start = 101, end = 200, select_p = 0.5)
```
and shown by
```{r}
levelplot2D(res_sum$S, lim = c(-1.3,1.3), sim$coords)
```
For comparison, we show the estimated sources provided by informax ICA here.
```{r}
levelplot2D(ini$init$ICA_S, lim = c(-1.7,1.7), sim$coords)
```
|
/scratch/gouwar.j/cran-all/cranData/BSPBSS/vignettes/BSPBSS-vignette.Rmd
|
#' Estimate accumulated volatility processes
#'
#' \code{estimateAccumulatedVolatility} estimates the pth power accumulated volatility process for a Brownian
#' semistationary process, using either parametric methods of model fitting first, or using a non-parametric
#' estimator for the scale factor.
#'
#'
#' @param Y a vector of observations of a BSS process.
#' @param n positive integer indicating the number of observations per unit of time.
#' @param p the power to evaluate the accumulated power volatility process for. Defaults to 2, in order to
#' estimate the accumulated squared volatility process.
#' @param method text string representing the method used to estimate the accumulated volatility. Options are \code{'acf'}, \code{'cof'}
#' or \code{nonparametric}. If \code{'acf'} is selected, model parameters are fit to the data \code{Y} using
#' least squares on the autocorrelation function and these parameters are used to estimate the scale factor.
#' If \code{'cof'} is selected, only the smoothness parameter \code{alpha} is estimated using the change of frequency
#' method, and then put into an asymptotic expression for the scale factor in the calculation. If \code{'nonparametric'}
#' is selected then the non-parametric estimator for the scale factor will be used in the calculation. Defaults to \code{'nonparametric'}.
#' @param kernel text string representing the choice of kernel when fitting the model to estimate
#' the scale factor parametrically. Options are \code{'gamma'} and \code{'power'}. Defaults to \code{'gamma'}.
#'
#' @return The function returns a vector of the same length as \code{Y} which is the estimate for the
#' accumulated volatility process, observed from time 0 to T, at intervals of T/n. Note that the values have been
#' divided by m_p in the output, so that the estimation is of the integral alone. If the non-parametric
#' estimator for tau_n is used then the values will be scaled by the expectation of the squared volatility, as
#' per the theory.
#'
#' @examples
#'
#' N <- 10000
#' n <- 100
#' T <- 1.0
#' theta <- 0.5
#' beta <- 0.125
#'
#' kappa <- 3
#' alpha <- -0.2
#' lambda <- 1.0
#'
#'
#' vol <- exponentiatedOrnsteinUhlenbeck(N, n, T, theta, beta)
#' bss_simulation <- gammaKernelBSS(N, n, T, kappa, alpha, lambda, sigma = vol)
#' y <- bss_simulation$bss
#' estimateAccumulatedVolatility(y, n, p = 2, method = 'nonparametric', kernel = 'gamma')
#'
#' #'
#'
#' @export
#'
estimateAccumulatedVolatility <- function(Y, n, p = 2, method = 'nonparametric', kernel = 'gamma') {
m_p <- 2^(p/2) * gamma((p + 1)/2) / sqrt(pi)
if (method == 'cof'){
alpha <- bssAlphaFit(Y)
if (kernel == 'gamma') {
return(cumsum(abs(diff(Y))^p) / n / gammaKernelTauAsymptotic(n, alpha)^p / m_p)
}
} else if (method == 'acf') {
if (kernel == 'gamma') {
theta <- gammaKernelBSSFit(Y, n)
alpha <- theta[[1]]
lambda <- theta[[2]]
return(cumsum(abs(diff(Y))^p) / n / gammaKernelTau(n, alpha, lambda)^p / m_p)
} else if (kernel == 'power') {
theta <- powerKernelBSSFit(Y, n)
alpha <- theta[[1]]
beta <- theta[[2]]
return(cumsum(abs(diff(Y))^p) / n / powerKernelTau(n, alpha, beta)^p / m_p)
}
} else {
return(cumsum(abs(diff(Y))^p) / n / tauNonParametricEstimate(Y)^p / m_p)
}
}
#' Estimate confidence interval for the accumulated volatility processes
#'
#' \code{estimateAccumulatedVolatility} estimates a confidence interval for the pth power accumulated volatility process for a Brownian
#' semistationary process, using either parametric methods of model fitting first, or using a non-parametric
#' estimator for the scale factor.
#'
#'
#' @param Y a vector of observations of a BSS process.
#' @param n positive integer indicating the number of observations per unit of time.
#' @param p the power to evaluate the accumulated power volatility process for. Defaults to 2, in order to
#' estimate the accumulated squared volatility process.
#' @param method text string representing the method used to estimate the accumulated volatility. Options are \code{'acf'}, \code{'cof'}
#' or \code{nonparametric}. If \code{'acf'} is selected, model parameters are fit to the data \code{Y} using
#' least squares on the autocorrelation function and these parameters are used to estimate the scale factor.
#' If \code{'cof'} is selected, only the smoothness parameter \code{alpha} is estimated using the change of frequency
#' method, and then put into an asymptotic expression for the scale factor in the calculation. If \code{'nonparametric'}
#' is selected then the non-parametric estimator for the scale factor will be used in the calculation. Defaults to \code{'nonparametric'}.
#' @param kernel text string representing the choice of kernel when fitting the model to estimate
#' the scale factor parametrically. Options are \code{'gamma'} and \code{'power'}. Defaults to \code{'gamma'}.
#' @param confidence_level the required level for the confidence interval, as a probability between 0 and 1.
#' @return The function returns a list of two vectors of the same length as \code{Y} which are the estimates for the
#' lower and upper values for the confidence interval. Note that the values have been
#' divided by m_p in the output, so that the estimation is of the integral alone. If the non-parametric
#' estimator for tau_n is used then the values will be scaled by the expectation of the squared volatility, as
#' per the theory.
#'
#' @examples
#'
#' N <- 10000
#' n <- 100
#' T <- 1.0
#' theta <- 0.5
#' beta <- 0.125
#'
#' kappa <- 3
#' alpha <- -0.2
#' lambda <- 1.0
#'
#'
#' vol <- exponentiatedOrnsteinUhlenbeck(N, n, T, theta, beta)
#' bss_simulation <- gammaKernelBSS(N, n, T, kappa, alpha, lambda, sigma = vol)
#' y <- bss_simulation$bss
#' estimateAccumulatedVolatility(y, n, p = 2, method = 'nonparametric', kernel = 'gamma')
#'
#' #'
#'
#' @export
#'
estimateAccumulatedVolatilityCI <- function(Y, n, p, method = "nonparametric", kernel = "gamma", confidence_level) {
p_val = 0.5 + 0.5 * confidence_level
z_a = qnorm(p_val)
K_p <- estimateK(Y, p)
mean <- estimateAccumulatedVolatility(Y, n, p, method = "nonparametric", kernel = "gamma")
var_term <- estimateAccumulatedVolatility(Y, n, 2*p, method = "nonparametric", kernel = "gamma")
var <- z_a * K_p * sqrt(var_term)
list(lower = (mean - var), upper = (mean + var))
}
|
/scratch/gouwar.j/cran-all/cranData/BSS/R/bss_accumulated_volatility.R
|
#' Fitting gamma kernel Brownian semistationary processes
#'
#' \code{gammaKernelBSSFit} uses a method of moments to fit the parameters of a gamma kernel Brownian semistationary process
#' to a vector of observations. A least squares estimate of the parameters is obtained
#' by minimising the mean square error between the true gamma kernel autocorrelation function and the
#' empirical ACF of the data, using lags 0,...,H. The number of lags \code{num_lags} used can be adjusted.
#' The volatility process does not need to be specified.
#'
#' @param Y a vector of observations of a BSS process at frequency \code{n}.
#' @param n positive integer indicating the number of observations per unit of time.
#' @param num_lags the number of lags to be used in the regression. The default is to use the first 10 lags.
#'
#' @return The function returns a list containing the parameters \code{alpha} and \code{lambda}, and also the mean square
#' error \code{mse} of the least squares fit. This can be used to compare model fit when trying different kernels.
#'
#' @examples
#'
#' N <- 10000
#' n <- 100
#' T <- 1.0
#' theta <- 0.5
#' beta <- 0.125
#'
#' kappa <- 3
#' alpha <- -0.2
#' lambda <- 1.0
#'
#'
#' vol <- exponentiatedOrnsteinUhlenbeck(N, n, T, theta, beta)
#' bss_simulation <- gammaKernelBSS(N, n, T, kappa, alpha, lambda, sigma = vol)
#' y <- bss_simulation$bss
#'
#' gammaKernelBSSFit(y, n, num_lags = 10)
#'
#'
#' @export
#'
gammaKernelBSSFit <- function(Y, n, num_lags = 10) {
gammaKernelCorr <- function(theta, h) {
alpha <- theta[1]
lambda <- theta[2]
2^(-alpha + 1/2) / gamma( alpha + 1/2) * (lambda * h)^(alpha + 1/2) * besselK(lambda*h, nu = alpha + 1/2)
}
## loss function which calculates mean of squares of differences between true and observed acf
Loss <- function(theta, Y) {
rho_hat <- acf(Y, lag.max = num_lags, demean = FALSE, plot = FALSE)$acf
h <- (0:num_lags)/n
true_vals <- sapply(h, gammaKernelCorr, theta = theta)
true_vals[1] <- 1
mean( (true_vals - rho_hat)^2)
}
optimimum_value <- optim(Loss, par = c(0,1), Y = Y)
theta <- optimimum_value$par
mse <- optimimum_value$value
list(alpha = theta[1], lambda = theta[2], mse = mse)
}
#' Fitting power law kernel Brownian semistationary processes
#'
#' \code{powerKernelBSSFit} uses a method of moments to fit the parameters of a power law kernel Brownian semistationary process
#' to a vector of observations. A least squares estimate of the parameters is obtained
#' by minimising the mean square error between the true power law kernel autocorrelation function (found by numerical intergration)
#' and the empirical ACF of the data, using lags 0,...,H. The number of lags \code{num_lags} used can be adjusted.
#' The volatility process does not need to be specified.
#'
#' @param Y a vector of observations of a BSS process at frequency \code{n}.
#' @param n positive integer indicating the number of observations per unit of time.
#' @param num_lags the number of lags to be used in the regression. The default is to use the first 10 lags.
#'
#' @return The function returns a list containing the parameters \code{alpha} and \code{beta}, and also the mean square
#' error \code{mse} of the least squares fit. This can be used to compare model fit when trying different kernels.
#'
#' @examples
#'
#' N <- 10000
#' n <- 100
#' T <- 1.0
#' theta <- 0.5
#' beta_vol <- 0.125
#'
#' kappa <- 3
#' alpha <- -0.2
#' beta_pwr <- -1.0
#'
#'
#' vol <- exponentiatedOrnsteinUhlenbeck(N, n, T, theta, beta_vol)
#' bss_simulation <- gammaKernelBSS(N, n, T, kappa, alpha, beta_pwr, sigma = vol)
#' y <- bss_simulation$bss
#'
#' powerKernelBSSFit(y, n, num_lags = 10)
#'
#'
#' @export
#'
powerKernelBSSFit <- function(Y, n, num_lags = 10) {
powerKernelCorr <- function(theta, h) {
alpha <- theta[1]
beta <- theta[2]
cov_integrand <- function(x) x^alpha * (1 + x)^(beta - alpha) * (x + h)^alpha * (1 + x + h)^(beta - alpha)
(integrate(cov_integrand, 0, 1)$val + integrate(cov_integrand, 1, Inf)$val)/ beta(1 + 2*alpha, -1 - 2*beta)
}
## loss function which calculates mean of squares of differences between true and observed acf
Loss <- function(theta, Y) {
rho_hat <- acf(Y, lag.max = num_lags, demean = FALSE, plot = FALSE)$acf
h <- (0:num_lags)/n
true_vals <- sapply(h, powerKernelCorr, theta = theta)
true_vals[1] <- 1
mean( (true_vals - rho_hat)^2)
}
optimimum_value <- optim(Loss, par = c(-0.1,-2), Y = Y, method = "L-BFGS-B", upper = c(0.5, -0.5))
theta <- optimimum_value$par
mse <- optimimum_value$value
list(alpha = theta[1], beta = theta[2], mse = mse)
}
#' Estimating the smoothness parameter of a Brownian semistationary process
#'
#' \code{bssAlphaFit} uses the 'Change of Frequency' method to estimate the smoothness parameter, \code{alpha},
#' of a BSS process. The COF method needs only minimal assumptions on the parametric form of the kernel,
#' therefore the estimate can be used in any kernel.
#'
#' @param Y a vector of observations of a BSS process at any frequency.
#' @param p the power to be used in the change of frequency method. The default value is p = 2.
#'
#' @return The function returns a single value - an estimate for the smoothness parameter alpha.
#'
#' @examples
#'
#' N <- 10000
#' n <- 100
#' T <- 1.0
#' theta <- 0.5
#' beta <- 0.125
#'
#' kappa <- 3
#' alpha <- -0.2
#' lambda <- 1.0
#'
#'
#' vol <- exponentiatedOrnsteinUhlenbeck(N, n, T, theta, beta)
#' bss_simulation <- gammaKernelBSS(N, n, T, kappa, alpha, lambda, sigma = vol)
#' y <- bss_simulation$bss
#'
#' bssAlphaFit(y, p = 2)
#'
#' @export
#'
bssAlphaFit <- function(Y, p = 2) {
n <- length(Y)
filter1 <- 0:((n-1)/2)*2 + 1
filter2 <- 1:(n/2)*2
Y1 <- Y[filter1]
Y2 <- Y[filter2]
V <- sum((diff(diff(Y1)))^p) + sum((diff(diff(Y2)))^p)
cof <- V / sum((diff(diff(Y)))^p)
log2(cof) / p - 1/2
}
|
/scratch/gouwar.j/cran-all/cranData/BSS/R/bss_fit.R
|
#' Realised power variation
#'
#' \code{realisedPowerVariation} calculates the realised power variation for a BSS process, which is then
#' used as a component of estimating the accumulated volatility process.
#'
#' @param Y a vector of observations of a BSS process.
#' @param p the power variation to be calculated.
#'
#' @return The function returns the sum of the pth powers of the absolute first differences of the BSS process.
#' @export
#'
realisedPowerVariation <- function(Y, p) {
sum(abs(diff(Y))^p)
}
#' Autocorrelation function for the gamma kernel
#'
#' \code{gammaKernelCorrelation} calculates the value of the gamma kernel autocorrelation function
#' directly using the analytic expression.
#'
#' @param alpha the smoothness parameter, alpha, for the gamma kernel.
#' @param lambda the exponent parameter, lambda, for the gamma kernel.
#' @param h the lag to calculate the autocorrelation at.
#'
#' @return The function returns the autocorrelation for the gamma kernel with
#' parameters \code{alpha} and \code{lambda} at lag \code{h}.
#' @export
#'
gammaKernelCorrelation <- function(alpha, lambda, h) {
ifelse(h == 0, 1,
2^(-alpha + 1/2) / gamma( alpha + 1/2) * (lambda * h)^(alpha + 1/2) * besselK(lambda*h, nu = alpha + 1/2))
}
#' Autocorrelation function for the power law kernel
#'
#' \code{powerKernelCorrelation} calculates the value of the power law kernel autocorrelation function
#' directly using numerical integration for the numerator (the covariance term) and the analytic expression
#' for the denominator (variance term).
#'
#' @param alpha the smoothness parameter, alpha, for the power law kernel.
#' @param beta the exponent parameter, beta, for the power law kernel.
#' @param h the lag to calculate the autocorrelation at.
#'
#' @return The function returns the autocorrelation for the power law kernel with
#' parameters \code{alpha} and \code{beta} at lag \code{h}.
#' @export
#'
powerKernelCorrelation <- function(alpha, beta, h) {
cov_integrand <- function(x) x^alpha * (1 + x)^(beta - alpha) * (x + h)^alpha * (1 + x + h)^(beta - alpha)
max(integrate(cov_integrand, 0, Inf)$val / beta(1 + 2*alpha, -1 - 2*beta), 1)
}
#' Scale factor for the gamma kernel
#'
#' \code{gammaKernelTau} evaluates the scale factor tau_n for the gamma kernel using the
#' exact expression derived from the covariance function.
#'
#' @param n a positive integer indicating the number of observations per unit of time.
#' @param alpha the smoothness parameter, alpha, for the gamma kernel.
#' @param lambda the exponent parameter, lambda, for the gamma kernel.
#'
#' @return The function returns the scale factor (tau_n) for the gamma kernel with
#' parameters \code{alpha} and \code{lambda}, observed at frequency \code{n} per unit of time.
#' @export
#'
gammaKernelTau <- function(n, alpha, lambda) {
lambda^(-alpha - 1)*sqrt( gamma(alpha + 1)/gamma(1/2) * (gamma(alpha + 1/2) - 2^(-alpha + 1/2) * (lambda/n)^(alpha + 1/2) * besselK(lambda/n , alpha + 1/2)) )
}
#' Scale factor for the power law kernel
#'
#' \code{powerKernelTau} evaluates the scale factor tau_n for the power law kernel using
#' numerical integration for the covariance term, and exact evaluation for the variance term.
#'
#' @param n a positive integer indicating the number of observations per unit of time.
#' @param alpha the smoothness parameter, alpha, for the power law kernel.
#' @param beta the exponent parameter, beta, for the power law kernel.
#'
#' @return The function returns the scale factor (tau_n) for the power law kernel with
#' parameters \code{alpha} and \code{beta}, observed at frequency \code{n} per unit of time.
#' @export
#'
powerKernelTau <- function(n, alpha, beta) {
cov_integrand <- function(x) x^alpha * (1 + x)^(beta - alpha) * (x + 1/n)^alpha * (1 + x + 1/n)^(beta - alpha)
cov_delta <- integrate(cov_integrand, 0, Inf)$val
sqrt(2*beta(1+2*alpha, -1 -2*beta) - 2*cov_delta)
}
#' Asymptotic scale factor for the gamma kernel
#'
#' @param n a positive integer indicating the number of observations per unit of time.
#' @param alpha the smoothness parameter, alpha, for the gamma kernel.
#'
#' @return The function returns an approximation for the scale factor (tau_n) for the gamma kernel with
#' smoothness parameter \code{alpha}, observed at frequency \code{n} per unit of time, using the asymptotic
#' expression for the scale factor.
#' @export
#'
gammaKernelTauAsymptotic <- function(n, alpha) {
sqrt(2^(-4*alpha - 1) * gamma(2*alpha + 1) * gamma(1/2 - alpha) / gamma(alpha + 3/2) /n^(2*alpha + 1))
}
#' Non-parametric estimate of the scale factor
#'
#' @param Y a vector of observations of a BSS process.
#'
#' @return The function returns the non-parametric estimate for the scale factor.
#' Note that this will be scaled by the expectation of the square of the volatitity.
#' @export
#'
tauNonParametricEstimate <- function(Y) {
sqrt(mean(diff(Y)^2))
}
|
/scratch/gouwar.j/cran-all/cranData/BSS/R/bss_functions.R
|
#' Hybrid scheme covariance matrix
#'
#' Generates the covariance matrix used in simulating Brownian semistationary processes by
#' the hybrid scheme.
#'
#' @param kappa number of terms needed for the lower sum in the hybrid scheme.
#' @param n number of observations per unit of time, n = 1/delta.
#' @param alpha smoothness parameter used in the BSS simulation.
#'
#' @return Returns the covariance matrix for the lower sum in the hybrid scheme calculations.
#' The dimensions of the covariance matrix will be (kappa + 1) by (kappa + 1).
#'
#' @examples
#'
#' kappa <- 3
#' n <- 100
#' alpha <- -0.2
#'
#' hybridSchemeCovarianceMatrix(kappa, n, alpha)
#'
#'
#' @export
hybridSchemeCovarianceMatrix <- function(kappa, n, alpha) {
# create empty matrix
Sigma <- matrix(0, nrow = kappa + 1, ncol = kappa + 1)
# fill in top corner = Var(W_i)
Sigma[1,1] <- 1/n
# loop over other columns
for (j in 2:(kappa + 1)) {
# fill in according to given expressions
Sigma[1,j] <- ((j-1)^(alpha+1) - (j-2)^(alpha+1))/(alpha+1)/n^(alpha+1)
Sigma[j,j] <- ((j-1)^(2*alpha+1) - (j-2)^(2*alpha+1))/(2*alpha+1)/n^(2*alpha+1)
# fill in remaining rows
if (j < kappa + 1) {
for (k in (j+1):(kappa+1)) {
Sigma[j,k] <- 1/(alpha + 1)/n^(2*alpha + 1) *
((j - 1)^(alpha + 1) * (k - 1)^alpha *
hypergeo::hypergeo(-alpha, 1, alpha + 2, (j - 1)/(k - 1) ) -
(j - 2)^(alpha + 1) * (k - 2)^alpha *
hypergeo::hypergeo(-alpha, 1, alpha + 2, (j - 2)/(k - 2) ))
}
}
}
# loop has given an upper triangular (possibly complex) matrix
# Imaginary part = 0 but display shows complex values (r + 0i) so take real part
# fill in lower triangle so that S[i,j] = S[j,i]
Re(Sigma + t(upper.tri(Sigma) * Sigma))
}
#' Simulate an exponentiated OU volatility process
#'
#' \code{exponentiatedOrnsteinUhlenbeck} simulates an exponentiated Ornstein-Uhlenbeckprocess of the correct length to
#' be used as the volatility process within the hybrid scheme.
#'
#' @param N positive integer determining the number of terms in the Riemman sum element of the
#' hybrid scheme calculation. Should be of order at least \code{n}.
#' @param n positive integer indicating the number of observations per unit of time. It represents the fineness or frequency of observations.
#' @param T the time interval to simulate the BSS process over.
#' @param theta positive number giving the mean reversion rate of the OU process.
#' @param beta the factor in the exponential.
#' @return The function returns a vector of length \code{N + n*T + 1}
#'
#' @examples
#'
#' N <- 10000
#' n <- 100
#' T <- 1.0
#' theta <- 0.5
#' beta <- 0.125
#'
#' vol <- exponentiatedOrnsteinUhlenbeck(N, n, T, theta, beta)
#'
#' @export
exponentiatedOrnsteinUhlenbeck <- function(N, n, T, theta, beta) {
# initialise values
v <- numeric(N + n*T + 1)
# to start in stationary distribution:
v[1] <- rnorm(1, 0, sqrt(1/(2*theta)))
# otherwise start from the mean:
# v[1] <- 0
for (i in 2:(N + n*T + 1)) {
v[i] <- v[i-1] - theta * v[i-1] * 1/n + sqrt(1/n) * rnorm(1, 0, 1)
}
exp(beta * v)
}
#' Simulation of gamma kernel Brownian semistationary processes
#'
#' \code{gammaKernelBSS} uses the Hybrid scheme to simulate a Brownian semistationary process from the
#' gamma kernel. It simulates a path where the volatility process is independent of the driving Brownian motion of the
#' BSS process.
#'
#' @param N positive integer determining the number of terms in the Riemman sum element of the
#' hybrid scheme calculation. Should be of order at least \code{n}.
#' @param n positive integer indicating the number of observations per unit of time. It represents the fineness or frequency of observations.
#' @param T the time interval to simulate the BSS process over.
#' @param kappa positive integer giving the number of terms to use in the 'lower' sum of the hybrid scheme. Default set to 3.
#' @param alpha the smoothness parameter of the BSS process to simulate.
#' @param lambda the exponent parameter of the BSS process to simulate.
#' @param sigma the volatility process used in the BSS simulation. This should be a vector of length \code{N + n*T + 1}
#' representing the sample path of sigma from -N to nT. By default this is set to by a vector of 1s so that the
#' Gaussian core is simulated.
#'
#' @return The function returns a list of three objects, \code{core} gives the Gaussian core of the process
#' between 0 and T, at intervals of 1/n. \code{bss} gives the BSS sample path on the between 0 and T, at intervals of 1/n,
#' and \code{vol} gives the volatilty process over the same time period.
#'
#' N <- 10000
#' n <- 100
#' T <- 1.0
#' theta <- 0.5
#' beta <- 0.125
#'
#' kappa <- 3
#' alpha <- -0.2
#' lambda <- 1.0
#'
#'
#' vol <- exponentiatedOrnsteinUhlenbeck(N, n, T, theta, beta)
#' bss_simulation <- gammaKernelBSS(N, n, T, kappa, alpha, lambda, sigma = vol)
#'
#'
#' @export
#'
gammaKernelBSS <- function(N, n, T, kappa = 3, alpha, lambda, sigma = rep(1, N + n*T + 1)) {
## initialise kernel and discretization parameters:
# create empty vectors for the 'lower' part of the hybrid scheme sums
X_lower <- numeric(n*T + 1)
Y_lower <- numeric(n*T + 1)
# define gamma kernel
g <- function(x) x^alpha * exp(-lambda*x)
# split indices into lower and upper sums
k_lower <- 1:kappa
k_upper <- (kappa + 1):N
# function to calculate the optimal discretization
b_star <- function(k) ((k^(alpha + 1) - (k-1)^(alpha + 1))/(alpha + 1))^(1/alpha)
# vector of the L_g(k/n)
L_g <- exp(-lambda*k_lower/n)
# vector of g(b*/n) for hybrid scheme
g_b_star <- c(rep(0, kappa), g(b_star(k_upper)/n))
## generate the Brownian increments according to hybrid scheme
# create the required covariance matrix
Sigma_W <- hybridSchemeCovarianceMatrix(kappa, n, alpha)
# sample N + n*T random variables from this multivariate Gaussian
W <- MASS::mvrnorm(N + n*T, mu = rep(0,kappa + 1), Sigma = Sigma_W)
## create the sample hybrid scheme and Riemann sum sample paths
# split into cases as when kappa = 1, we are dealing with a scalar not a matrix in the first sum
if (kappa == 1) {
# loop over each time i/n with i = 0, ..., n*T
for (i in 1:(n*T + 1)) {
# calculate X[i] = X(i-1/n) from hybrid scheme
# sum first kappa terms and remaining N - kappa terms separately
# generate the Gaussian core element
X_lower[i] <- sum( L_g * W[(i + N - kappa):(i+N-1), 2])
# generate the BSS sample path element
Y_lower[i] <- sum( L_g * sigma[(i + N - kappa):(i+N-1)] * W[(i + N - kappa):(i+N-1), 2])
}
# add the 'upper' term using the convolution
# Gaussian core, convolve on with Brownian increments
X <- X_lower + convolve( g_b_star, rev(W[,1]), type = 'open')[N:(N+n*T)]
# BSS sample path, convolve with volatility process * Brownian increments
Y <- Y_lower + convolve( g_b_star, rev(head(sigma,-1) * W[,1]), type = 'open')[N:(N+n*T)]
} else { # if kappa > 1
# loop over each time i/n with i = 0, ..., n*T
for (i in 1:(n*T + 1)) {
# sum first kappa terms and remaining N - kappa terms separately
# for the Gaussian core
X_lower[i] <- sum( L_g * diag(W[(i + N - kappa):(i+N-1), 2:(kappa + 1)][kappa:1, 1:kappa]))
# for the BSS sample path
Y_lower[i] <- sum( L_g * sigma[(i + N - kappa):(i+N-1)] * diag(W[(i + N - kappa):(i+N-1), 2:(kappa + 1)][kappa:1, 1:kappa]))
}
# Gaussian core, convolve on with Brownian increments
X <- X_lower + convolve( g_b_star, rev(W[,1]), type = 'open')[N:(N+n*T)]
# BSS sample path, convolve with volatility process * Brownian increments
Y <- Y_lower + convolve( g_b_star, rev(head(sigma,-1) * W[,1]), type = 'open')[N:(N+n*T)]
}
# return Gaussian core, BSS sample path and volatility process for [0,T]
list(core = X, bss = Y, vol = tail(sigma, n*T + 1))
}
#' Simulation of power law kernel Brownian semistationary processes
#'
#' \code{powerKernelBSS} uses the Hybrid scheme to simulate a Brownian semistationary process from the
#' power law kernel. It simulates a path where the volatility process is independent of the driving Brownian motion of the
#' BSS process.
#'
#' @param N positive integer determining the number of terms in the Riemman sum element of the
#' hybrid scheme calculation. Should be of order at least \code{n}.
#' @param n positive integer indicating the number of observations per unit of time. It represents the fineness or frequency of observations.
#' @param T the time interval to simulate the BSS process over.
#' @param kappa positive integer giving the number of terms to use in the 'lower' sum of the hybrid scheme. Default set to 3.
#' @param alpha the smoothness parameter of the BSS process to simulate.
#' @param beta the exponent parameter of the BSS process to simulate.
#' @param sigma the volatility process used in the BSS simulation. This should be a vector of length \code{N + n*T + 1}
#' representing the sample path of sigma from -N to nT. By default this is set to by a vector of 1s so that the
#' Gaussian core is simulated.
#'
#' @return The function returns a list of three objects, \code{core} gives the Gaussian core of the process
#' between 0 and T, at intervals of 1/n. \code{bss} gives the BSS sample path on the between 0 and T, at intervals of 1/n,
#' and \code{vol} gives the volatilty process over the same time period.
#'
#' @examples
#'
#' N <- 10000
#' n <- 100
#' T <- 1.0
#' theta <- 0.5
#' beta_vol <- 0.125
#'
#' kappa <- 3
#' alpha <- -0.2
#' beta_pwr <- -1.0
#'
#'
#' vol <- exponentiatedOrnsteinUhlenbeck(N, n, T, theta, beta_vol)
#' bss_simulation <- powerKernelBSS(N, n, T, kappa, alpha, beta_pwr, sigma = vol)
#'
#'
#'
#' @export
#'
powerKernelBSS <- function(N, n, T, kappa, alpha, beta, sigma = rep(1, N + n*T + 1)) {
## initialise kernel and discretization parameters:
# create empty vectors for the 'lower' part of the hybrid scheme sums
X_lower <- numeric(n*T + 1)
Y_lower <- numeric(n*T + 1)
# define gamma kernel
g <- function(x) x^alpha * (1 + x)^(beta - alpha)
# split indices into lower and upper sums
k_lower <- 1:kappa
k_upper <- (kappa + 1):N
# function to calculate the optimal discretization
b_star <- function(k) ((k^(alpha + 1) - (k-1)^(alpha + 1))/(alpha + 1))^(1/alpha)
# vector of the L_g(k/n)
L_g <- (1 + k_lower/n)^(beta - alpha)
# vector of g(b*/n) for hybrid scheme
g_b_star <- c(rep(0, kappa), g(b_star(k_upper)/n))
## generate the Brownian increments according to hybrid scheme
# create the required covariance matrix
Sigma_W <- hybridSchemeCovarianceMatrix(kappa, n, alpha)
# sample N + n*T random variables from this multivariate Gaussian
W <- MASS::mvrnorm(N + n*T, mu = rep(0,kappa + 1), Sigma = Sigma_W)
## create the sample hybrid scheme and Riemann sum sample paths
# split into cases as when kappa = 1, we are dealing with a scalar not a matrix in the first sum
if (kappa == 1) {
# loop over each time i/n with i = 0, ..., n*T
for (i in 1:(n*T + 1)) {
# calculate X[i] = X(i-1/n) from hybrid scheme
# sum first kappa terms and remaining N - kappa terms separately
# generate the Gaussian core element
X_lower[i] <- sum( L_g * W[(i + N - kappa):(i+N-1), 2])
# generate the BSS sample path element
Y_lower[i] <- sum( L_g * sigma[(i + N - kappa):(i+N-1)] * W[(i + N - kappa):(i+N-1), 2])
}
# add the 'upper' term using the convolution
# Gaussian core, convolve on with Brownian increments
X <- X_lower + convolve( g_b_star, rev(W[,1]), type = 'open')[N:(N+n*T)]
# BSS sample path, convolve with volatility process * Brownian increments
Y <- Y_lower + convolve( g_b_star, rev(head(sigma,-1) * W[,1]), type = 'open')[N:(N+n*T)]
} else { # if kappa > 1
# loop over each time i/n with i = 0, ..., n*T
for (i in 1:(n*T + 1)) {
# sum first kappa terms and remaining N - kappa terms separately
# for the Gaussian core
X_lower[i] <- sum( L_g * diag(W[(i + N - kappa):(i+N-1), 2:(kappa + 1)][kappa:1, 1:kappa]))
# for the BSS sample path
Y_lower[i] <- sum( L_g * sigma[(i + N - kappa):(i+N-1)] * diag(W[(i + N - kappa):(i+N-1), 2:(kappa + 1)][kappa:1, 1:kappa]))
}
# Gaussian core, convolve on with Brownian increments
X <- X_lower + convolve( g_b_star, rev(W[,1]), type = 'open')[N:(N+n*T)]
# BSS sample path, convolve with volatility process * Brownian increments
Y <- Y_lower + convolve( g_b_star, rev(head(sigma,-1) * W[,1]), type = 'open')[N:(N+n*T)]
}
# return Gaussian core, BSS sample path and volatility process for [0,T]
list(core = X, bss = Y, vol = tail(sigma, n*T + 1))
}
|
/scratch/gouwar.j/cran-all/cranData/BSS/R/hybrid_scheme.R
|
#' Caclulate the coefficients a_1 in the expression for K_1
#' @param n an integer
#' @keywords internal
a1Coefficients <- function(n) {
if (n %% 2 == 0) {
if (n ==2){
return (1 / sqrt(2*pi))
} else {
return( 2 / sqrt(2*pi) * phangorn::dfactorial(n-3) / factorial(n))
}
} else {
return(0)
}
}
#' Caclulate the coefficients a_3 in the expression for K_3
#' @param n an integer
#' @keywords internal
a3Coefficients <- function(n) {
if (n %% 2 == 0) {
if (n == 2){
return (6 / sqrt(2*pi))}
if (n == 4){
return (1 / 2 / sqrt(2*pi))
} else {
return( 12 / sqrt(2*pi) * phangorn::dfactorial(n-5) / factorial(n))
}
} else {
return(0)
}
}
#' Caclulate the correlation of a fractional Gaussian - needed in the calculation of K
#' @param j an integer
#' @param alpha a float, the smoothness parameter of the BSS process
#' @keywords internal
rhoFractionGaussian <- function(j, alpha) 1/2 * ( (j+1)^(2*alpha + 1) - 2*j^(2*alpha + 1) + (j-1)^(2*alpha + 1) )
#' Caclulate K_1 for a BSS process
#' @param alpha a float, the smoothness parameter of the BSS process
#' @keywords internal
calculateK1 <- function(alpha) {
rho_vals <- rhoFractionGaussian(1:1e6, alpha)
summation <- 0
for (n in 2*(1:50)){
summation <- summation + factorial(n) * a1Coefficients(n)^2 *(1 + 2*sum(rho_vals^n))
}
sqrt(summation)
}
#' Caclulate K_2 for a BSS process
#' @param alpha a float, the smoothness parameter of the BSS process
#' @keywords internal
calculateK2 <- function(alpha) {
sqrt(2*(1 + 2*sum(rhoFractionGaussian(1:1e6, alpha)^2)))
}
#' Caclulate K_3 for a BSS process
#' @param alpha a float, the smoothness parameter of the BSS process
#' @keywords internal
calculateK3 <- function(alpha) {
rho_vals <- rhoFractionGaussian(1:1e6, alpha)
summation <- 0
for (n in 2*(1:50)){
summation <- summation + factorial(n) * a3Coefficients(n)^2 *(1 + 2*sum(rho_vals^n))
}
sqrt(summation)
}
#' Caclulate K_4 for a BSS process
#' @param alpha a float, the smoothness parameter of the BSS process
#' @keywords internal
calculateK4 <- function(alpha) {
sqrt(2*6^2 * (1 + 2*sum(rhoFractionGaussian(1:1e6, alpha)^2)) + 24 * (1 + 2*sum(rhoFractionGaussian(1:1e6, alpha)^4)))
}
#' Caclulate K_p for a BSS process for a given value of p
#' @param p an integer - the power to use for K_p
#' @param alpha a float, the smoothness parameter of the BSS process
#' @keywords internal
calculateK <- function(p, alpha) {
helper_functions <- list(calculateK1, calculateK2, calculateK3, calculateK4)
helper_functions[[p]](alpha)
}
#' Estimate K_p for a BSS process, for a given power p
#' @param p an integer - the power to use for K_p
#' @param Y a vector of observations of the BSS process
#' @keywords internal
estimateK <- function(Y, p) {
alpha <- bssAlphaFit(Y, p = 2)
calculateK(p, alpha)
}
|
/scratch/gouwar.j/cran-all/cranData/BSS/R/utils.R
|
CRB <- function(sdf, supp=NULL, A=NULL, eps=1e-04,...)
{
p <- length(sdf)
dsdf <- function(x,j){ (sdf[[j]](x+eps)-sdf[[j]](x-eps))/(2*eps)}
if(is.null(supp)) supp <- matrix(c(rep(-8,p),rep(8,p)),ncol=2)
if(is.null(A)) A <- diag(p)
kap <- NULL
lambda <- NULL
for(j in 1:p){
kap[j] <- integrate(Vectorize(function(x){dsdf(x,j)^2/sdf[[j]](x)}),supp[j,1],supp[j,2],...)$value
lambda[j] <- integrate(Vectorize(function(x){dsdf(x,j)^2*x^2/sdf[[j]](x)}),supp[j,1],supp[j,2],...)$value-1
}
crlb <- matrix(0,p,p)
for(i in 1:p){
for(j in 1:p){
if(i!=j){
crlb[i,j] <- kap[j]/(kap[i]*kap[j]-1)
}else{
crlb[i,i] <- 1/(lambda[i])
}
}
}
FIM <- matrix(0,p^2,p^2)
FIM1 <- matrix(0,p,p)
for(i in 1:p){
for(j in 1:p){
if(i==j){
FIM1 <- tcrossprod(A[,j],A[,i])*lambda[i]
for(l in 1:p){
if(l!=i){
FIM1 <- FIM1+tcrossprod(A[,l],A[,l])*kap[i]
}
}
FIM[((i-1)*p+1):(i*p),((j-1)*p+1):(j*p)]<-FIM1
}else{
FIM[((i-1)*p+1):(i*p),((j-1)*p+1):(j*p)]<-tcrossprod(A[,j],A[,i])
}
}
}
EMD <- sum(crlb-diag(crlb)*as.vector(diag(p)))
list(CRLB=crlb, FIM=FIM, EMD=EMD)
}
|
/scratch/gouwar.j/cran-all/cranData/BSSasymp/R/CRB.R
|
ASCOV_FOBI <- function(sdf, supp=NULL, A=NULL, ...)
{
p <- length(sdf)
moment3 <- NULL
moment4 <- NULL
moment6 <- NULL
if(is.null(supp)) supp <- matrix(c(rep(-Inf,p),rep(Inf,p)),ncol=2)
if(is.null(A)) A <- diag(p)
for(j in 1:p){
moment3[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*x^3}),supp[j,1],supp[j,2],...)$value
moment4[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*x^4}),supp[j,1],supp[j,2],...)$value
moment6[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*x^6}),supp[j,1],supp[j,2],...)$value
}
P <- matrix(0,p,p)
ord <- order(moment4,decreasing=TRUE)
for(j in 1:p){
P[j,ord[j]] <- 1
}
moment3 <- moment3[ord]
moment4 <- moment4[ord]
moment6 <- moment6[ord]
kurt <- moment4-3
ASCOV <- matrix(0,p^2,p^2)
for(i in 1:p){
for(j in 1:p){
if(i!=j){
ASVij <- moment6[i]+moment6[j]-moment3[i]^2-moment3[j]^2+sum(kurt)-7*kurt[i]-7*kurt[j]-kurt[i]^2+2*p-22
ASVij <- ASVij/(moment4[i]-moment4[j])^2
ASCOVij <- -moment6[i]-moment6[j]+moment3[i]^2+moment3[j]^2+kurt[i]^2+kurt[j]^2-kurt[i]*kurt[j]+7*(kurt[i]+kurt[j])-p*(p-1)+40-sum(kurt)
ASCOVij <- ASCOVij/(moment4[i]-moment4[j])^2
ASCOV <- ASCOV+ASCOVij*kronecker(tcrossprod(diag(p)[,i],diag(p)[,j]),tcrossprod(diag(p)[,j],diag(p)[,i]))+ASVij*kronecker(tcrossprod(diag(p)[,j],diag(p)[,j]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
if(i==j) ASCOV <- ASCOV+0.25*(moment4[i]-1)*kronecker(tcrossprod(diag(p)[,i],diag(p)[,i]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
}
EMD <- sum(diag(ASCOV)-diag(ASCOV)*as.vector(diag(p)))
W <- crossprod(t(P),solve(A))
W <- crossprod(diag(sign(rowMeans(W))),W)
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A, EMD=EMD)
}
ASCOV_FOBI_est <- function(X,mixed=TRUE)
{
n <- dim(X)[1]
p <- dim(X)[2]
if(mixed){
W <- FOBI(X)$W
}else W <- diag(p)
X <- tcrossprod(sweep(X,2,colMeans(X)),W)
moment3 <- NULL
moment4 <- NULL
moment6 <- NULL
for(j in 1:p){
moment3[j] <- mean(X[,j]^3)
moment4[j] <- mean(X[,j]^4)
moment6[j] <- mean(X[,j]^6)
}
P <- matrix(0,p,p)
ord <- order(moment4,decreasing=TRUE)
for(j in 1:p){
P[j,ord[j]] <- 1
}
moment3 <- moment3[ord]
moment4 <- moment4[ord]
moment6 <- moment6[ord]
kurt <- moment4-3
W <- crossprod(t(P),W)
ASCOV <- matrix(0,p^2,p^2)
for(i in 1:p){
for(j in 1:p){
if(i!=j){
ASVij <- moment6[i]+moment6[j]-moment3[i]^2-moment3[j]^2+sum(kurt)-7*kurt[i]-7*kurt[j]-kurt[i]^2+2*p-22
ASVij <- ASVij/(kurt[i]-kurt[j])^2
ASCOVij <- -moment6[i]-moment6[j]+moment3[i]^2+moment3[j]^2+kurt[i]^2+kurt[j]^2- kurt[i]*kurt[j]+7*(kurt[i]+kurt[j])-p*(p-1)+40-sum(kurt)
ASCOVij <- ASCOVij/(moment4[i]-moment4[j])^2
ASCOV <- ASCOV+ASCOVij*kronecker(tcrossprod(diag(p)[,i],diag(p)[,j]),tcrossprod(diag(p)[,j],diag(p)[,i]))+ASVij*kronecker(tcrossprod(diag(p)[,j],diag(p)[,j]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
if(i==j) ASCOV <- ASCOV+0.25*(moment4[i]-1)*kronecker(tcrossprod(diag(p)[,i],diag(p)[,i]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
}
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))/n
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))/n
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A)
}
|
/scratch/gouwar.j/cran-all/cranData/BSSasymp/R/FOBI_ascov.R
|
ASCOV_FastICAdefl <- function(sdf, gs, dgs, Gs=NULL, method="adapt", name=NULL, supp=NULL, A=NULL, ...)
{
if(method=="adapt"){
ng <- length(gs)
}else ng <- 1
if(length(name)!=length(gs)){
name <- NULL
for(i in 1:ng){
name[i] <- paste("g",i)
}
}
p <- length(sdf)
if(is.null(supp)) supp <- matrix(c(rep(-Inf,p),rep(Inf,p)),ncol=2)
if(is.null(A)) A <- diag(p)
alpha <- matrix(0,ng,p)
var_diag <- NULL
for(j in 1:p){
Ex4 <- integrate(Vectorize(function(x){sdf[[j]](x)*x^4}),supp[j,1],supp[j,2])$value
var_diag[j] <- (Ex4-1)/4
for(i in 1:ng){
Eg2 <- integrate(Vectorize(function(x){sdf[[j]](x)*gs[[i]](x)^2}),supp[j,1],supp[j,2],...)$value
Eg <- integrate(Vectorize(function(x){sdf[[j]](x)*gs[[i]](x)}),supp[j,1],supp[j,2],...)$value
Egx <- integrate(Vectorize(function(x){sdf[[j]](x)*gs[[i]](x)*x}),supp[j,1],supp[j,2],...)$value
Edg <- integrate(Vectorize(function(x){sdf[[j]](x)*dgs[[i]](x)}),supp[j,1],supp[j,2],...)$value
alpha[i,j] <- ifelse(abs(Egx-Edg)>1e-06,(Eg2-Eg^2-Egx^2)/(Egx-Edg)^2,Inf)
}
}
if(method=="adapt"){
alph <- ifelse(alpha>0,alpha,Inf)
usedg <- NULL
for(i in 1:(p-1)){
mina <- which.min(alph)
comp <- ceiling(mina/ng)
gc <- mina-(comp-1)*ng
usedg[i] <- name[[gc]]
alph[,comp] <- Inf
}
ba <- NULL
for(j in 1:p){
ba[j] <- min(alpha[,j])
}
ord <- order(ba)
}else if(method=="G"){
Gn <- integrate(Vectorize(function(x){Gs[[1]](x)*dnorm(x)}),-10,10,...)$value
EG <- NULL
for(j in 1:p){
EG[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*Gs[[1]](x)}),max(-20,supp[j,1]),min(20,supp[j,2]),...)$value
}
ord <- order(abs(EG-Gn),decreasing=TRUE)
usedg <- rep(name[[1]],p-1)
ba <- alpha[1,]
}else{
ord <- 1:p
usedg <- rep(name[[1]],p-1)
ba <- alpha[1,]
}
P <- diag(p)[ord,]
bas <- ba[ord]
var_diags <- var_diag[ord]
ASV <- matrix(0,p,p)
for(i in 1:p){
for(j in 1:p){
if(i<j){
ASV[i,j] <- bas[i]
}else if(i==j){
ASV[i,j] <- var_diags[j]
}else ASV[i,j] <- bas[j]+1
}
}
ASCOV <- diag(as.vector(ASV))
for(i in 1:p){
for(j in 1:p){
if(i<j){
ASCOV[(i-1)*p+j,(j-1)*p+i] <- -bas[i]
ASCOV[(j-1)*p+i,(i-1)*p+j] <- -bas[i]
}else if(i>j){
ASCOV[(i-1)*p+j,(j-1)*p+i] <- -bas[j]
ASCOV[(j-1)*p+i,(i-1)*p+j] <- -bas[j]
}
}
}
EMD <- sum(diag(ASCOV)-diag(ASCOV)*as.vector(diag(p)))
W <- crossprod(t(P),solve(A))
W <- crossprod(diag(sign(rowMeans(W))),W)
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A, EMD=EMD, used_gs=usedg)
}
alphas <- function(sdf,gs,dgs,name=NULL,supp=NULL,...)
{
ng<-length(gs)
if(length(name)!=length(gs)){
name <- NULL
for(i in 1:ng){
name[i] <- paste("g",i)
}
}
p <- length(sdf)
cnam<-NULL
for(j in 1:p){
cnam[j] <- paste("IC",j)
}
alpha <- matrix(0,ng,p)
colnames(alpha) <- cnam
rownames(alpha) <- name
if(is.null(supp)) supp <- matrix(c(rep(-Inf,p),rep(Inf,p)),ncol=2)
for(j in 1:p){
for(i in 1:ng){
Eg2 <- integrate(Vectorize(function(x){sdf[[j]](x)*gs[[i]](x)^2}),supp[j,1],supp[j,2],...)$value
Eg <- integrate(Vectorize(function(x){sdf[[j]](x)*gs[[i]](x)}),supp[j,1],supp[j,2],...)$value
Egx <- integrate(Vectorize(function(x){sdf[[j]](x)*gs[[i]](x)*x}),supp[j,1],supp[j,2],...)$value
Edg <- integrate(Vectorize(function(x){sdf[[j]](x)*dgs[[i]](x)}),supp[j,1],supp[j,2],...)$value
alpha[i,j] <- ifelse(abs(Egx-Edg)>1e-06,(Eg2-Eg^2-Egx^2)/(Egx-Edg)^2,Inf)
}
}
alpha
}
ASCOV_FastICAdefl_est <- function(X, gs, dgs, Gs=NULL, method="adapt",
name=NULL, mixed=TRUE)
{
if(method=="adapt"){
ng <- length(gs)
}else ng <- 1
if(length(name)!=length(gs)){
name <- NULL
for(i in 1:ng){
name[i] <- paste("g",i)
}
}
n <- dim(X)[1]
p <- dim(X)[2]
if(mixed){
fI <- adapt_fICA(X,gs,dgs)
W <- fI$W
alpha <- fI$alphas
usedg <- fI$used_gs
}else{
alpha <- alphas_data(X,gs,dgs)
alph <- ifelse(alpha>0,alpha,Inf)
usedg <- NULL
for(i in 1:(p-1)){
mina <- which.min(alph)
comp <- ceiling(mina/ng)
gc <- mina-(comp-1)*ng
usedg[i] <- name[[gc]]
alph[,comp] <- Inf
}
W <- diag(p)
}
X <- tcrossprod(sweep(X,2,colMeans(X)),W)
var_diag <- NULL
for(j in 1:p){
var_diag[j] <- (mean(X[,j]^4)-1)/4
}
if(method=="adapt"){
ba <- NULL
for(j in 1:p){
ba[j] <- min(alpha[,j])
}
ord <- order(ba)
}else{
Gn <- integrate(Vectorize(function(x){Gs[[1]](x)*dnorm(x)}),-10,10)$value
EG <- NULL
for(j in 1:p){
EG[j] <- mean(Gs[[1]](X[,j]))
}
ord <- order(abs(EG-Gn),decreasing=TRUE)
usedg <- rep(name[[1]],p-1)
ba <- alpha[1,]
}
P <- diag(p)[ord,]
bas <- ba[ord]
var_diags <- var_diag[ord]
W <- P%*%W
ASV <- matrix(0,p,p)
for(i in 1:p){
for(j in 1:p){
if(i<j){
ASV[i,j] <- bas[i]
}else if(i==j){
ASV[i,j] <- var_diags[j]
}else ASV[i,j] <- bas[j]+1
}
}
ASCOV <- diag(as.vector(ASV))
for(i in 1:p){
for(j in 1:p){
if(i<j){
ASCOV[(i-1)*p+j,(j-1)*p+i] <- -bas[i]
ASCOV[(j-1)*p+i,(i-1)*p+j] <- -bas[i]
}else if(i>j){
ASCOV[(i-1)*p+j,(j-1)*p+i] <- -bas[j]
ASCOV[(j-1)*p+i,(i-1)*p+j] <- -bas[j]
}
}
}
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))/n
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))/n
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A, used_gs=usedg)
}
alphas_data <- function(Z, gs, dgs)
{
ng <- length(gs)
p <- ncol(Z)
alpha <- matrix(0,ng,p)
for(j in 1:p){
for(i in 1:ng){
Eg <- mean(gs[[i]](Z[,j]))
Eg2 <- mean(gs[[i]](Z[,j])^2)
Egx <- mean(gs[[i]](Z[,j])*Z[,j])
Edg <- mean(dgs[[i]](Z[,j]))
alpha[i,j] <- ifelse(abs(Egx-Edg)>1e-06,(Eg2-Eg^2-Egx^2)/(Egx-Edg)^2,Inf)
}
}
alpha
}
ASCOV_FastICAsym <- function(sdf, G, g, dg, supp=NULL, A=NULL, ...)
{
p <- length(sdf)
if(is.null(supp)) supp <- matrix(c(rep(-Inf,p),rep(Inf,p)),ncol=2)
if(is.null(A)) A <- diag(p)
var_diag <- NULL
Ex4 <- Eg2 <- Eg <- Egx <- Edg <- EG <- sEGEgx_Edg <- NULL
EGn <- integrate(Vectorize(function(x){G(x)*dnorm(x)}),-10,10)$value
for(j in 1:p){
Ex4[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*x^4}),supp[j,1],supp[j,2])$value
var_diag[j] <- (Ex4[j]-1)/4
Eg2[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*g(x)^2}),supp[j,1],supp[j,2],...)$value
Eg[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*g(x)}),supp[j,1],supp[j,2],...)$value
Egx[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*g(x)*x}),supp[j,1],supp[j,2],...)$value
Edg[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*dg(x)}),supp[j,1],supp[j,2],...)$value
EG[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*G(x)}),max(-20,supp[j,1]),min(20,supp[j,2]),...)$value-EGn
sEGEgx_Edg[j] <- sign(EG[j]*(Egx[j]-Edg[j]))
}
if(sum(sEGEgx_Edg)<sum(abs(sEGEgx_Edg))) stop("at least one bad component")
ASV <- matrix(0,p,p)
for(i in 1:p){
for(j in 1:p){
if(i!=j){
ASV[i,j] <- (Eg2[i]-Eg[i]^2+Eg2[j]-Eg[j]^2-Egx[i]^2+Edg[j]*(Edg[j]-2*Egx[j]))/((sign(EG[i])*(Egx[i]-Edg[i])+sign(EG[j])*(Egx[j]-Edg[j]))^2)
}else ASV[i,j] <- var_diag[j]
}
}
ASCOV <- diag(as.vector(ASV))
for(i in 1:(p-1)){
for(j in (i+1):p){
ASCOV[(i-1)*p+j,(j-1)*p+i] <- (-(Eg2[i]-Eg[i]^2)-(Eg2[j]-Eg[j]^2)+Egx[i]^2+
Egx[j]^2+sign(EG[i])*sign(EG[j])*(Egx[i]-Edg[i])*(Egx[j]-Edg[j]))/
((sign(EG[i])*(Egx[i]-Edg[i])+sign(EG[j])*(Egx[j]-Edg[j]))^2)
ASCOV[(j-1)*p+i,(i-1)*p+j] <- ASCOV[(i-1)*p+j,(j-1)*p+i]
}
}
EMD <- sum(diag(ASCOV)-diag(ASCOV)*as.vector(diag(p)))
W <- solve(A)
W <- crossprod(diag(sign(rowMeans(W))),W)
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A, EMD=EMD)
}
ASCOV_FastICAsym_est <- function(X, G, g, dg, mixed=TRUE)
{
n <- dim(X)[1]
p <- dim(X)[2]
var_diag <- NULL
if(mixed){
fI <- fICA(X,g,dg,method="sym")
W <- fI$W
}else W <- diag(p)
X <- tcrossprod(sweep(X,2,colMeans(X)),W)
Ex4 <- Eg2 <- Eg <- Egx <- Edg <- EG <- NULL
EGn <- integrate(Vectorize(function(x){G(x)*dnorm(x)}),-10,10)$value
for(j in 1:p){
Ex4[j] <- mean(X[,j]^4)
var_diag[j] <- (Ex4[j]-1)/4
Eg[j] <- mean(g(X[,j]))
Eg2[j] <- mean(g(X[,j])^2)
Egx[j] <- mean(g(X[,j])*X[,j])
Edg[j] <- mean(dg(X[,j]))
EG[j] <- mean(G(X[,j]))-EGn
}
ASV <- matrix(0,p,p)
for(i in 1:p){
for(j in 1:p){
if(i!=j){
ASV[i,j] <- (Eg2[i]-Eg[i]^2+Eg2[j]-Eg[j]^2-Egx[i]^2+Edg[j]*(Edg[j]-2*Egx[j]))/((sign(EG[i])*(Egx[i]-Edg[i])+sign(EG[j])*(Egx[j]-Edg[j]))^2)
}else ASV[i,j] <- var_diag[j]
}
}
ASCOV <- diag(as.vector(ASV))
for(i in 1:(p-1)){
for(j in (i+1):p){
ASCOV[(i-1)*p+j,(j-1)*p+i] <- (-(Eg2[i]-Eg[i]^2)-(Eg2[j]-Eg[j]^2)+Egx[i]^2+
Egx[j]^2+sign(EG[i])*sign(EG[j])*(Egx[i]-Edg[i])*(Egx[j]-Edg[j]))/
((sign(EG[i])*(Egx[i]-Edg[i])+sign(EG[j])*(Egx[j]-Edg[j]))^2)
ASCOV[(j-1)*p+i,(i-1)*p+j] <- ASCOV[(i-1)*p+j,(j-1)*p+i]
}
}
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))/n
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))/n
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A)
}
ASCOV_FastICAsym2 <- function(sdf, G, g, dg, supp=NULL, A=NULL, ...)
{
p <- length(sdf)
if(is.null(supp)) supp <- matrix(c(rep(-Inf,p),rep(Inf,p)), ncol=2)
if(is.null(A)) A <- diag(p)
var_diag <- NULL
Ex4 <- Eg2 <- Eg <- Egx <- Edg <- EG <- sEGEgx_Edg <- NULL
EGn <- integrate(Vectorize(function(x){G(x)*dnorm(x)}),-10,10)$value
for(j in 1:p){
Ex4[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*x^4}),supp[j,1],supp[j,2])$value
var_diag[j] <- (Ex4[j]-1)/4
Eg2[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*g(x)^2}),supp[j,1],supp[j,2],...)$value
Eg[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*g(x)}),supp[j,1],supp[j,2],...)$value
Egx[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*g(x)*x}),supp[j,1],supp[j,2],...)$value
Edg[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*dg(x)}),supp[j,1],supp[j,2],...)$value
EG[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*G(x)}),max(-20,supp[j,1]),min(20,supp[j,2]),...)$value-EGn
sEGEgx_Edg[j] <- sign(EG[j]*(Egx[j]-Edg[j]))
}
if(sum(sEGEgx_Edg)<sum(abs(sEGEgx_Edg))) stop("at least one bad component")
ASV <- matrix(0,p,p)
for(i in 1:p){
for(j in 1:p){
if(i!=j){
ASV[i,j] <- (EG[i]^2*(Eg2[i]-Eg[i]^2-Egx[i]^2)+EG[j]^2*(Eg2[j]-Eg[j]^2-
2*Egx[j]*Edg[j]+Edg[j]^2))/((EG[i]*(Egx[i]-Edg[i])+EG[j]*(Egx[j]-Edg[j]))^2)
}else ASV[i,j] <- var_diag[j]
}
}
ASCOV <- diag(as.vector(ASV))
for(i in 1:(p-1)){
for(j in (i+1):p){
ASCOV[(i-1)*p+j,(j-1)*p+i] <- (-EG[i]^2*(Eg2[i]-Eg[i]^2-Egx[i]^2)-EG[j]^2*(Eg2[j]-
Eg[j]^2-Egx[j]^2)+EG[i]*EG[j]*(Egx[i]-Edg[i])*(Egx[j]-Edg[j]))/((EG[i]*(Egx[i]-Edg[i])+EG[j]*(Egx[j]-Edg[j]))^2)
ASCOV[(j-1)*p+i,(i-1)*p+j] <- ASCOV[(i-1)*p+j,(j-1)*p+i]
}
}
EMD <- sum(diag(ASCOV)-diag(ASCOV)*as.vector(diag(p)))
W <- solve(A)
W <- crossprod(diag(sign(rowMeans(W))),W)
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A, EMD=EMD)
}
ASCOV_FastICAsym2_est <- function(X, G, g, dg, mixed=TRUE)
{
n <- dim(X)[1]
p <- dim(X)[2]
var_diag <- NULL
if(mixed){
fI <- fICA(X,g,dg,G,method="sym2")
W <- fI$W
}else W <- diag(p)
X <- tcrossprod(sweep(X,2,colMeans(X)),W)
Ex4 <- Eg2 <- Eg <- Egx <- Edg <- EG <- NULL
EGn <- integrate(Vectorize(function(x){G(x)*dnorm(x)}),-10,10)$value
for(j in 1:p){
Ex4[j] <- mean(X[,j]^4)
var_diag[j] <- (Ex4[j]-1)/4
Eg[j] <- mean(g(X[,j]))
Eg2[j] <- mean(g(X[,j])^2)
Egx[j] <- mean(g(X[,j])*X[,j])
Edg[j] <- mean(dg(X[,j]))
EG[j] <- mean(G(X[,j]))-EGn
}
ASV <- matrix(0,p,p)
for(i in 1:p){
for(j in 1:p){
if(i!=j){
ASV[i,j] <- (EG[i]^2*(Eg2[i]-Eg[i]^2-Egx[i]^2)+EG[j]^2*(Eg2[j]-Eg[j]^2-2*Egx[j]*Edg[j]+Edg[j]^2))/((EG[i]*(Egx[i]-Edg[i])+EG[j]*(Egx[j]-Edg[j]))^2)
}else ASV[i,j] <- var_diag[j]
}
}
ASCOV <- diag(as.vector(ASV))
for(i in 1:(p-1)){
for(j in (i+1):p){
ASCOV[(i-1)*p+j,(j-1)*p+i] <- (-EG[i]^2*(Eg2[i]-Eg[i]^2-Egx[i]^2)-EG[j]^2*(Eg2[j]-
Eg[j]^2-Egx[j]^2)+EG[i]*EG[j]*(Egx[i]-Edg[i])*(Egx[j]-Edg[j]))/
((EG[i]*(Egx[i]-Edg[i])+EG[j]*(Egx[j]-Edg[j]))^2)
ASCOV[(j-1)*p+i,(i-1)*p+j] <- ASCOV[(i-1)*p+j,(j-1)*p+i]
}
}
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))/n
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))/n
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A)
}
|
/scratch/gouwar.j/cran-all/cranData/BSSasymp/R/FastICA_ascov.R
|
ASCOV_JADE <- function(sdf, supp=NULL, A=NULL, ...)
{
p <- length(sdf)
if(is.null(supp)) supp <- matrix(c(rep(-Inf,p),rep(Inf,p)),ncol=2)
if(is.null(A)) A <- diag(p)
moment3 <- NULL
moment4 <- NULL
moment6 <- NULL
for(j in 1:p){
moment3[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*x^3}),supp[j,1],supp[j,2],...)$value
moment4[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*x^4}),supp[j,1],supp[j,2],...)$value
moment6[j] <- integrate(Vectorize(function(x){sdf[[j]](x)*x^6}),supp[j,1],supp[j,2],...)$value
}
P <- matrix(0,p,p)
ord <- order(moment4,decreasing=TRUE)
for(j in 1:p){
P[j,ord[j]] <- 1
}
moment3 <- moment3[ord]
moment4 <- moment4[ord]
moment6 <- moment6[ord]
kurt <- moment4-3
ASCOV <- matrix(0,p^2,p^2)
for(i in 1:p){
for(j in 1:p){
if(i!=j){
ASVij <- kurt[i]^2*(moment6[i]-moment3[i]^2-kurt[i]^2-6*kurt[i]-9)+kurt[j]^2*(moment6[j]-moment3[j]^2+-6*kurt[j]-9)
ASVij <- ASVij/((kurt[i]^2+kurt[j]^2)^2)
ASCOVij <- -kurt[i]^2*(moment6[i]-moment3[i]^2-kurt[i]^2-6*kurt[i]-9)-kurt[j]^2*(moment6[j]-moment3[j]^2-kurt[j]^2-6*kurt[j]-9-kurt[i]^2)
ASCOVij <- ASCOVij/((kurt[i]^2+kurt[j]^2)^2)
ASCOV <- ASCOV+ASCOVij*kronecker(tcrossprod(diag(p)[,i],diag(p)[,j]),tcrossprod(diag(p)[,j],diag(p)[,i]))+ASVij*kronecker(tcrossprod(diag(p)[,j],diag(p)[,j]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
if(i==j) ASCOV <- ASCOV+0.25*(moment4[j]-1)*kronecker(tcrossprod(diag(p)[,i],diag(p)[,i]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
}
EMD <- sum(diag(ASCOV)-diag(ASCOV)*as.vector(diag(p)))
W <- crossprod(t(P),solve(A))
W <- crossprod(diag(sign(rowMeans(W))),W)
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A, EMD=EMD)
}
ASCOV_JADE_est <- function(X, mixed=TRUE)
{
n <- dim(X)[1]
p <- dim(X)[2]
if(mixed){
W <- JADE(X)$W
}else W <- diag(p)
X <- tcrossprod(sweep(X,2,colMeans(X)),W)
moment3 <- NULL
moment4 <- NULL
moment6 <- NULL
for(j in 1:p){
moment3[j] <- mean(X[,j]^3)
moment4[j] <- mean(X[,j]^4)
moment6[j] <- mean(X[,j]^6)
}
P <- matrix(0,p,p)
ord <- order(moment4,decreasing=TRUE)
for(j in 1:p){
P[j,ord[j]] <- 1
}
moment3 <- moment3[ord]
moment4 <- moment4[ord]
moment6 <- moment6[ord]
kurt <- moment4-3
W <- crossprod(t(P),W)
ASCOV <- matrix(0,p^2,p^2)
for(i in 1:p){
for(j in 1:p){
if(i!=j){
ASVij <- kurt[i]^2*(moment6[i]-moment3[i]^2-kurt[i]^2-6*kurt[i]-9)+kurt[j]^2*(moment6[j]-moment3[j]^2-6*kurt[j]-9)
ASVij <- ASVij/((kurt[i]^2+kurt[j]^2)^2)
ASCOVij <- -kurt[i]^2*(moment6[i]-moment3[i]^2-kurt[i]^2-6*kurt[i]-9)-kurt[j]^2*(moment6[j]-moment3[j]^2-kurt[j]^2-6*kurt[j]-9-kurt[i]^2)
ASCOVij <- ASCOVij/((kurt[i]^2+kurt[j]^2)^2)
ASCOV <- ASCOV+ASCOVij*kronecker(tcrossprod(diag(p)[,i],diag(p)[,j]),tcrossprod(diag(p)[,j],diag(p)[,i]))+ASVij*kronecker(tcrossprod(diag(p)[,j],diag(p)[,j]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
if(i==j) ASCOV <- ASCOV+0.25*(moment4[j]-1)*kronecker(tcrossprod(diag(p)[,i],diag(p)[,i]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
}
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))/n
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))/n
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A)
}
|
/scratch/gouwar.j/cran-all/cranData/BSSasymp/R/JADE_ascov.R
|
D_lm <- function(F, l, m, Beta)
{
p <- dim(F)[1]
lm <- max(l,m)
q <- dim(F)[3]-1
D <- matrix(0,p,p)
for(i in 1:(p-1)){
for(j in (i+1):p){
for(k in (-q+lm):(q-lm)){
D[i,j] <- D[i,j]+F[i,i,abs(k+l)+1]*F[j,j,abs(k+m)+1]+
F[i,i,abs(k+l)+1]*F[j,j,abs(k-m)+1]
}
D[i,j] <- 0.5*D[i,j]+0.25*(Beta[i,j]-1)*((F[,,l+1]+
t(F[,,l+1]))[i,j]*(F[,,m+1]+t(F[,,m+1]))[i,j])
}
}
D <- D+t(D)
for(i in 1:p){
for(k in (-q+lm):(q-lm)){
D[i,i] <- D[i,i]+F[i,i,abs(k+l)+1]*F[i,i,abs(k+m)+1]+
F[i,i,abs(k+l)+1]*F[i,i,abs(k-m)+1]
}
D[i,i] <- D[i,i]+(Beta[i,i]-3)*F[i,i,l+1]*F[i,i,m+1]
}
D
}
ASCOV_SOBI <- function(psi, taus, a=2, Beta=NULL, A=NULL)
{
p <- dim(psi)[2]
q <- dim(psi)[1]
K <- length(taus)
if(is.null(A)) A <- diag(p)
if(q<(3*K)) psi <- rbind(psi,matrix(0,3*K-q,p))
q <- dim(psi)[1]
Psi <- matrix(0,q+1,p)
for(i in 1:p){
Psi[,i] <- c(1,psi[,i])
Psi[,i] <- Psi[,i]/sqrt(sum(Psi[,i]^2))
}
PSI <- array(0,c(p,p,q+1))
for(i in 1:p){
for(j in 1:(q+1)){
PSI[i,i,j] <- Psi[j,i]
}
}
F_tau <- array(0,c(p,p,q+1))
for(i in 0:q){
for(j in 1:(q+1-i)){
F_tau[,,i+1] <- F_tau[,,i+1]+tcrossprod(diag(PSI[,,j]),diag(PSI[,,j+i]))
}
}
if(is.null(Beta)) Beta <- 2*diag(p)+matrix(1,p,p)
Lambda <- array(0,c(p,p,K))
for(k in 1:K){
for(j in 1:p){
Lambda[j,j,k] <- F_tau[j,j,taus[k]+1]
}
}
Sum_lam <- rep(0,p)
for(j in 1:p){
for(k in 1:K){
Sum_lam[j] <- Sum_lam[j]+Lambda[j,j,k]^2
}
}
P <- matrix(0,p,p)
ord <- order(Sum_lam,decreasing=TRUE)
for(j in 1:p){
P[j,ord[j]] <- 1
}
for(k in 1:K){
Lambda[,,k] <- tcrossprod(crossprod(t(P),Lambda[,,k]),P)
}
for(i in 1:(q+1)){
F_tau[,,i] <- tcrossprod(crossprod(t(P),F_tau[,,i]),P)
}
ASCOV<-matrix(0,p^2,p^2)
for(j in 1:p){
for(i in 1:p){
if(i!=j){
ASV <- .C("ascov", as.double(as.vector(F_tau)),as.double(as.vector(Lambda)), as.double(taus),as.integer(c(i-1,j-1,p,q,K)),as.double(as.vector(Beta)),as.double(a),res=double(2), PACKAGE="BSSasymp")$res
ASCOV <- ASCOV+ASV[2]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,j]),tcrossprod(diag(p)[,j],diag(p)[,i]))+ASV[1]*kronecker(tcrossprod(diag(p)[,j],diag(p)[,j]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
if(i==j) ASCOV <- ASCOV+0.25*D_lm(F_tau,0,0,Beta)[i,i]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,i]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
}
EMD <- sum(diag(ASCOV)-diag(ASCOV)*as.vector(diag(p)))
W <- crossprod(t(P),solve(A))
W <- crossprod(diag(sign(rowMeans(W))),W)
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A, EMD=EMD)
}
# M is the number of autocovariances used in estimation.
ASCOV_SOBI_estN <- function(X, taus, mixed=TRUE, M=100, a=2)
{
p <- dim(X)[2]
T <- dim(X)[1]
K <- length(taus)
if(mixed){
W <- SOBI(X,taus)$W
}else W <- diag(p)
X <- tcrossprod(sweep(X,2,colMeans(X)),W)
F_tau <- array(0,c(p,p,M+1))
for(m in 0:M){
F_tau[,,m+1] <- tcrossprod(t(X[1:(T-m),]),t(X[(m+1):T,]))/(T-m)
}
Beta <- 2*diag(p)+matrix(1,p,p)
Lambda <- array(0,c(p,p,K))
for(k in 1:K){
for(j in 1:p){
Lambda[j,j,k] <- F_tau[j,j,taus[k]+1]
}
}
Sum_lam <- rep(0,p)
for(j in 1:p){
for(k in 1:K){
Sum_lam[j] <- Sum_lam[j]+Lambda[j,j,k]^2
}
}
P <- matrix(0,p,p)
ord <- order(Sum_lam,decreasing=TRUE)
for(j in 1:p){
P[j,ord[j]] <- 1
}
for(k in 1:K){
Lambda[,,k] <- tcrossprod(crossprod(t(P),Lambda[,,k]),P)
}
for(i in 1:(M+1)){
F_tau[,,i] <- tcrossprod(crossprod(t(P),F_tau[,,i]),P)
}
W <- crossprod(t(P),W)
ASCOV <- matrix(0,p^2,p^2)
for(j in 1:p){
for(i in 1:p){
if(i!=j){
ASV <- .C("ascov", as.double(as.vector(F_tau)),as.double(as.vector(Lambda)), as.double(taus),as.integer(c(i-1,j-1,p,M,K)),as.double(as.vector(Beta)),as.double(a),res=double(2), PACKAGE="BSSasymp")$res
ASCOV <- ASCOV+ASV[2]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,j]),tcrossprod(diag(p)[,j],diag(p)[,i]))+ASV[1]*kronecker(tcrossprod(diag(p)[,j],diag(p)[,j]), tcrossprod(diag(p)[,i],diag(p)[,i]))
}
if(i==j) ASCOV <- ASCOV+0.25*D_lm(F_tau,0,0,Beta)[i,i]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,i]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
}
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))/T
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))/T
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A)
}
ASCOV_SOBI_est <- function(X, taus, arp=NULL, maq=NULL, mixed=TRUE, M=100, a=2, ...)
{
p <- dim(X)[2]
T <- dim(X)[1]
K <- length(taus)
if(mixed){
W <- SOBI(X,taus)$W
}else W <- diag(p)
X <- tcrossprod(sweep(X,2,colMeans(X)),W)
if(is.null(arp)) arp <- rep(1,p)
if(is.null(maq)) maq <- rep(1,p)
Psi <- matrix(0,M+1,p)
for(i in 1:p){
arma <- arima(X[,i],c(arp[i],0,maq[i]),...)
psi0 <- ARMAtoMA(ar=arma$coef[min(1,arp[i]):arp[i]],
ma=arma$coef[(arp[i]+1):(arp[i]+maq[i])],M)
Psi[,i] <- c(1,psi0)
Psi[,i] <- Psi[,i]/sqrt(sum(Psi[,i]^2))
}
Beta <- matrix(0,p,p)
for(i in 1:p){
Ex4 <- mean(X[,i]^4)
SumPsi4 <- sum(Psi[,i]^4)
Beta[i,i] <- (Ex4-3)/SumPsi4+3
}
for(i in 1:(p-1)){
for(j in (i+1):p){
Exi2xj2 <- mean(X[,i]^2*X[,j]^2)
SumPsii2j2 <- sum(Psi[,i]^2*Psi[,j]^2)
Beta[i,j] <- (Exi2xj2-1)/SumPsii2j2+1
Beta[j,i] <- Beta[i,j]
}
}
PSI <- array(0,c(p,p,M+1))
for(i in 1:p){
for(j in 1:(M+1)){
PSI[i,i,j] <- Psi[j,i]
}
}
F_tau <- array(0,c(p,p,M+1))
for(i in 0:M){
for(j in 1:(M+1-i)){
F_tau[,,i+1] <- F_tau[,,i+1]+tcrossprod(diag(PSI[,,j]),diag(PSI[,,j+i]))
}
}
for(m in 0:M){
diag(F_tau[,,m+1]) <- diag(tcrossprod(t(X[1:(T-m),]),t(X[(m+1):T,]))/(T-m))
}
Lambda <- array(0,c(p,p,K))
for(k in 1:K){
for(j in 1:p){
Lambda[j,j,k] <- F_tau[j,j,taus[k]+1]
}
}
Sum_lam <- rep(0,p)
for(j in 1:p){
for(k in 1:K){
Sum_lam[j] <- Sum_lam[j]+Lambda[j,j,k]^2
}
}
P <- matrix(0,p,p)
ord <- order(Sum_lam,decreasing=TRUE)
for(j in 1:p){
P[j,ord[j]] <- 1
}
for(k in 1:K){
Lambda[,,k] <- tcrossprod(crossprod(t(P),Lambda[,,k]),P)
}
for(i in 1:(M+1)){
F_tau[,,i] <- tcrossprod(crossprod(t(P),F_tau[,,i]),P)
}
W <- crossprod(t(P),W)
ASCOV <- matrix(0,p^2,p^2)
for(j in 1:p){
for(i in 1:p){
if(i!=j){
ASV <- .C("ascov", as.double(as.vector(F_tau)),as.double(as.vector(Lambda)), as.double(taus),as.integer(c(i-1,j-1,p,M,K)),as.double(as.vector(Beta)),as.double(a),res=double(2), PACKAGE="BSSasymp")$res
ASCOV <- ASCOV+ASV[2]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,j]),tcrossprod(diag(p)[,j],diag(p)[,i]))+ASV[1]*kronecker(tcrossprod(diag(p)[,j],diag(p)[,j]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
if(i==j) ASCOV <- ASCOV+0.25*D_lm(F_tau,0,0,Beta)[i,i]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,i]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
}
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))/T
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))/T
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A)
}
|
/scratch/gouwar.j/cran-all/cranData/BSSasymp/R/SOBI_ascov.R
|
ASCOV_SOBIdefl <- function(psi, taus, Beta=NULL, A=NULL)
{
p <- dim(psi)[2]
q <- dim(psi)[1]
K <- length(taus)
if(is.null(A)) A <- diag(p)
if(q<(3*K)) psi <- rbind(psi,matrix(0,3*K-q,p))
q <- dim(psi)[1]
Psi <- matrix(0,q+1,p)
for(i in 1:p){
Psi[,i] <- c(1,psi[,i])
Psi[,i] <- Psi[,i]/sqrt(sum(Psi[,i]^2))
}
PSI <- array(0,c(p,p,q+1))
for(i in 1:p){
for(j in 1:(q+1)){
PSI[i,i,j] <- Psi[j,i]
}
}
F_tau <- array(0,c(p,p,q+1))
for(i in 0:q){
for(j in 1:(q+1-i)){
F_tau[,,i+1] <- F_tau[,,i+1]+tcrossprod(diag(PSI[,,j]),diag(PSI[,,j+i]))
}
}
if(is.null(Beta)) Beta <- 2*diag(p)+matrix(1,p,p)
Lambda <- array(0,c(p,p,K))
for(k in 1:K){
for(j in 1:p){
Lambda[j,j,k] <- F_tau[j,j,taus[k]+1]
}
}
Sum_lam <- rep(0,p)
for(j in 1:p){
for(k in 1:K){
Sum_lam[j] <- Sum_lam[j]+Lambda[j,j,k]^2
}
}
P <- matrix(0,p,p)
ord <- order(Sum_lam,decreasing=TRUE)
for(j in 1:p){
P[j,ord[j]] <- 1
}
for(k in 1:K){
Lambda[,,k] <- tcrossprod(crossprod(t(P),Lambda[,,k]),P)
}
for(i in 1:(q+1)){
F_tau[,,i] <- tcrossprod(crossprod(t(P),F_tau[,,i]),P)
}
ASCOV <- matrix(0,p^2,p^2)
for(j in 1:p){
for(i in 1:p){
if(j<i){
ASV <- .C("ascov_deflji", as.double(as.vector(F_tau)),as.double(as.vector(Lambda)), as.double(taus),as.integer(c(i-1,j-1,p,q,K)),as.double(as.vector(Beta)),res=double (2), PACKAGE="BSSasymp")$res
ASCOV <- ASCOV+ASV[2]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,j]),tcrossprod(diag(p)[,j],diag(p)[,i]))+ASV[1]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,i]),tcrossprod(diag(p)[,j],diag(p)[,j]))
}
if(i==j){
ASCOV <- ASCOV+0.25*D_lm(F_tau,0,0,Beta)[i,i]*kronecker(tcrossprod(diag(p)[,i], diag(p)[,i]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
if(i<j){
ASV <- .C("ascov_deflij", as.double(as.vector(F_tau)),as.double(as.vector(Lambda)),as.double(taus),as.integer(c(i-1,j-1,p,q,K)),as.double(as.vector(Beta)), res=double (2),PACKAGE="BSSasymp")$res
ASCOV <- ASCOV+ASV[2]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,j]),tcrossprod(diag(p)[,j],diag(p)[,i]))+ASV[1]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,i]), tcrossprod(diag(p)[,j],diag(p)[,j]))
}
}
}
EMD <- sum(diag(ASCOV)-diag(ASCOV)*as.vector(diag(p)))
W <- crossprod(t(P),solve(A))
W <- crossprod(diag(sign(rowMeans(W))),W)
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A, EMD=EMD)
}
ASCOV_SOBIdefl_estN <- function(X, taus, mixed=TRUE, M=100)
{
p <- dim(X)[2]
T <- dim(X)[1]
if(length(taus)==1) taus <- 1:taus
K <- length(taus)
if(mixed){
W <- SOBI(X,taus,method="djd")$W
}else W <- diag(p)
X <- tcrossprod(sweep(X,2,colMeans(X)),W)
F_tau <- array(0,c(p,p,M+1))
for(m in 0:M){
F_tau[,,m+1] <- tcrossprod(t(X[1:(T-m),]),t(X[(m+1):T,]))/(T-m)
}
Beta <- 2*diag(p)+matrix(1,p,p)
Lambda <- array(0,c(p,p,K))
for(k in 1:K){
for(j in 1:p){
Lambda[j,j,k] <- F_tau[j,j,taus[k]+1]
}
}
Sum_lam <- rep(0,p)
for(j in 1:p){
for(k in 1:K){
Sum_lam[j] <- Sum_lam[j]+Lambda[j,j,k]^2
}
}
P <- matrix(0,p,p)
ord <- order(Sum_lam,decreasing=TRUE)
for(j in 1:p){
P[j,ord[j]] <- 1
}
for(k in 1:K){
Lambda[,,k] <- tcrossprod(crossprod(t(P),Lambda[,,k]),P)
}
for(i in 1:(M+1)){
F_tau[,,i] <- tcrossprod(crossprod(t(P),F_tau[,,i]),P)
}
W <- crossprod(t(P),W)
ASCOV <- matrix(0,p^2,p^2)
for(j in 1:p){
for(i in 1:p){
if(j<i){
ASV <- .C("ascov_deflji", as.double(as.vector(F_tau)),as.double(as.vector(Lambda)),as.double(taus),as.integer(c(i-1,j-1,p,M,K)),as.double(as.vector(Beta)), res=double(2),PACKAGE="BSSasymp")$res
ASCOV <- ASCOV+ASV[2]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,j]),tcrossprod(diag(p)[,j],diag(p)[,i]))+ASV[1]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,i]),tcrossprod(diag(p)[,j],diag(p)[,j]))
}
if(i==j){
ASCOV <- ASCOV+0.25*D_lm(F_tau,0,0,Beta)[i,i]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,i]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
if(i<j){
ASV <- .C("ascov_deflij", as.double(as.vector(F_tau)),as.double(as.vector(Lambda)),as.double(taus),as.integer(c(i-1,j-1,p,M,K)),as.double(as.vector(Beta)), res=double (2),PACKAGE="BSSasymp")$res
ASCOV <- ASCOV+ASV[2]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,j]),tcrossprod(diag(p)[,j],diag(p)[,i]))+ASV[1]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,i]),tcrossprod(diag(p)[,j],diag(p)[,j]))
}
}
}
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))/T
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))/T
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A)
}
ASCOV_SOBIdefl_est <- function(X, taus, arp=NULL, maq=NULL, mixed=TRUE, M=100, ...)
{
p <- dim(X)[2]
T <- dim(X)[1]
K <- length(taus)
if(mixed){
W <- SOBI(X,taus,method="djd")$W
}else W <- diag(p)
X <- tcrossprod(sweep(X,2,colMeans(X)),W)
if(is.null(arp)) arp <- rep(3,p)
if(is.null(maq)) maq <- rep(4,p)
Psi <- matrix(0,M+1,p)
for(i in 1:p){
arma <- arima(X[,i],c(arp[i],0,maq[i]),...)
psi0 <- ARMAtoMA(ar=arma$coef[min(1,arp[i]):arp[i]],
ma=arma$coef[(arp[i]+1):(arp[i]+maq[i])],M)
Psi[,i] <- c(1,psi0)
Psi[,i] <- Psi[,i]/sqrt(sum(Psi[,i]^2))
}
Beta <- matrix(0,p,p)
for(i in 1:p){
Ex4 <- mean(X[,i]^4)
SumPsi4 <- sum(Psi[,i]^4)
Beta[i,i] <- (Ex4-3)/SumPsi4+3
}
for(i in 1:(p-1)){
for(j in (i+1):p){
Exi2xj2 <- mean(X[,i]^2*X[,j]^2)
SumPsii2j2 <- sum(Psi[,i]^2*Psi[,j]^2)
Beta[i,j] <- (Exi2xj2-1)/SumPsii2j2+1
Beta[j,i] <- Beta[i,j]
}
}
PSI <- array(0,c(p,p,M+1))
for(i in 1:p){
for(j in 1:(M+1)){
PSI[i,i,j] <- Psi[j,i]
}
}
F_tau <- array(0,c(p,p,M+1))
for(i in 0:M){
for(j in 1:(M+1-i)){
F_tau[,,i+1] <- F_tau[,,i+1]+tcrossprod(diag(PSI[,,j]),diag(PSI[,,j+i]))
}
}
for(m in 0:M){
diag(F_tau[,,m+1]) <- diag(tcrossprod(t(X[1:(T-m),]),t(X[(m+1):T,]))/(T-m))
}
Lambda <- array(0,c(p,p,K))
for(k in 1:K){
for(j in 1:p){
Lambda[j,j,k] <- F_tau[j,j,taus[k]+1]
}
}
Sum_lam <- rep(0,p)
for(j in 1:p){
for(k in 1:K){
Sum_lam[j] <- Sum_lam[j]+Lambda[j,j,k]^2
}
}
P <- matrix(0,p,p)
ord <- order(Sum_lam,decreasing=TRUE)
for(j in 1:p){
P[j,ord[j]] <- 1
}
for(k in 1:K){
Lambda[,,k] <- tcrossprod(crossprod(t(P),Lambda[,,k]),P)
}
for(i in 1:(M+1)){
F_tau[,,i] <- tcrossprod(crossprod(t(P),F_tau[,,i]),P)
}
W <- crossprod(t(P),W)
ASCOV <- matrix(0,p^2,p^2)
for(j in 1:p){
for(i in 1:p){
if(j<i){
ASV <- .C("ascov_deflji", as.double(as.vector(F_tau)),as.double(as.vector(Lambda)),as.double(taus),as.integer(c(i-1,j-1,p,M,K)),as.double(as.vector(Beta)), res=double (2),PACKAGE="BSSasymp")$res
ASCOV <- ASCOV+ASV[2]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,j]),tcrossprod(diag(p)[,j],diag(p)[,i]))+ASV[1]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,i]),tcrossprod(diag(p)[,j],diag(p)[,j]))
}
if(i==j){
ASCOV <- ASCOV+0.25*D_lm(F_tau,0,0,Beta)[i,i]*kronecker(tcrossprod(diag(p)[,i], diag(p)[,i]),tcrossprod(diag(p)[,i],diag(p)[,i]))
}
if(i<j){
ASV <- .C("ascov_deflij", as.double(as.vector(F_tau)),as.double(as.vector(Lambda)),as.double(taus),as.integer(c(i-1,j-1,p,M,K)),as.double(as.vector(Beta)), res=double (2),PACKAGE="BSSasymp")$res
ASCOV <- ASCOV+ASV[2]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,j]),tcrossprod(diag(p)[,j],diag(p)[,i]))+ASV[1]*kronecker(tcrossprod(diag(p)[,i],diag(p)[,i]), tcrossprod(diag(p)[,j],diag(p)[,j]))
}
}
}
A <- solve(W)
COV_A <- crossprod(t(tcrossprod(kronecker(diag(p),A),ASCOV)),kronecker(diag(p),t(A)))/T
COV_W <- crossprod(t(tcrossprod(kronecker(t(W),diag(p)),ASCOV)),kronecker(W,diag(p)))/T
list(W=W, COV_W=COV_W, A=A, COV_A=COV_A)
}
|
/scratch/gouwar.j/cran-all/cranData/BSSasymp/R/SOBIdefl_ascov.R
|
aSOBI <- function(X, k=12, a=4, eps = 1e-06, maxiter = 1000)
{
if (length(k)==1) k <- 1:k
n <- dim(X)[1]
p <- dim(X)[2]
K <- length(k)
sobi <- SOBI(X,k=k,eps=eps,maxiter=maxiter)
W0 <- sobi$W
Z <- sobi$S
CM <- array(0,dim=c(p,p,K))
for(i in 1:K){
S <- crossprod(Z[1:(n-k[i]),],Z[(k[i]+1):n,])/(n-k[i])
CM[,,i] <- (1/2)*(S+t(S))
}
V <- diag(p)
V0 <- V
W <- matrix(0,p,p)
iter <- 0
while (TRUE){
iter <- iter+1
W <- matrix(0,p,p)
for(mi in 1:K){
W <- W+2*tcrossprod(V,CM[,,mi])*matrix(rep(diag(g(tcrossprod(tcrossprod(V,CM[,,mi]), V),a)),p),p,p)
}
V <- crossprod(solve(mat.sqrt(tcrossprod(W,W))),W)
if(mat.norm(V-V0)<eps) break
if(iter==maxiter) stop("maxiter reached without convergence")
V0<-V
}
W <- V%*%W0
S <- tcrossprod(Z,V)
acs <- acf(S, lag.max=max(k), plot=FALSE)$acf
ssq_ac <- NULL
for(j in 1:p){
ssq_ac[j]<-sum(abs(acs[k+1,j,j])^a)
}
ord <- order(ssq_ac, decreasing=TRUE)
P <- matrix(0,p,p)
for(j in 1:p){
P[j,ord[j]]<-1
}
W <- P%*%W
W <- diag(sign(rowMeans(W)))%*%W
S <- tcrossprod(sweep(X,2,colMeans(X),"-"),W)
S <- ts(S, names=paste("Series",1:p))
res <- list(W=W, S=S, k=k, a=a)
class(res) <- "bss"
res
}
|
/scratch/gouwar.j/cran-all/cranData/BSSasymp/R/aSOBI.R
|
eSOBI <- function(X, taus=taus_def, M=200, fast=TRUE, eps=1e-06, maxiter=1000)
{
n <- dim(X)[1]
p <- dim(X)[2]
K <- length(taus)
Ws <- array(0,c(p,p,K))
sum_var <- rep(Inf,K)
if(fast){
Z<-SOBI(X,taus[[1]],eps=eps,maxiter=maxiter)$S
F_tau<-array(0,c(p,p,M+1))
for(m in 0:M){
F_tau[,,m+1]<-tcrossprod(t(Z[1:(n-m),]),t(Z[(m+1):n,]))/(n-m)
}
Beta<-2*diag(p)+matrix(1,p,p)
sasv<-NULL
for(i in 1:length(taus)){
Ki <- length(taus[[i]])
Lambda<-array(0,c(p,p,Ki))
for(k in 1:Ki){
for(j in 1:p){
Lambda[j,j,k]<-F_tau[j,j,taus[[i]][k]+1]
}
}
sum_var[i]<- sum(.C("ascov_all", as.double(as.vector(F_tau)),as.double(as.vector(Lambda)),as.double(taus[[i]]),as.integer(c(p,M,Ki)),as.double(as.vector(Beta)),res=double(p*(p-1)), PACKAGE="BSSasymp")$res)
}
b<-which.min(sum_var)
W<-SOBI(X,taus[[b]],eps=eps,maxiter=maxiter)$W
}else{
for(i in 1:K){
sobi <- tryCatch(SOBI(X, taus[[i]], eps=eps, maxiter=maxiter), error=function(e) 0)
if(is.list(sobi)){
Ws[,,i] <- sobi$W
ascov <- ASCOV_SOBI_estN(sobi$S,taus[[i]], mixed=FALSE, M=M)$COV_W*n
sum_var[i] <- sum(diag(ascov))
}
}
b <- which.min(sum_var)
W <- Ws[,,b]
}
S <- tcrossprod(X,W)
S <- sweep(S,2,colMeans(S),"-")
res <- list(W=W, S=S, taus_used=taus[[b]], sum_var=sum_var)
class(res) <- "bss"
res
}
|
/scratch/gouwar.j/cran-all/cranData/BSSasymp/R/eSOBI.R
|
taus_def <- list(1:12,1,1:2,1:3,1:5,1:8,1:20,1:50,c(1:10,(6:10)*2),
c(5:10,(6:10)*2,(5:10)*5),(1:10)*2,c(1,2,3,5,7,11,13,17,19))
|
/scratch/gouwar.j/cran-all/cranData/BSSasymp/R/eSOBI_lags.R
|
g <- function(x,a)
{
if(a==1){
res<-1
}else res<-sign(x)*a*abs(x)^(a-1)
res
}
mat.sqrt<-function(A)
{
eig<-eigen(A, symmetric=TRUE)
eig$vectors%*%(diag(eig$values^(1/2)))%*%t(eig$vectors)
}
mat.norm<-function(A)
{
sqrt(sum(A^2))
}
|
/scratch/gouwar.j/cran-all/cranData/BSSasymp/R/internals.R
|
#' @title Blind Source Separation Over Space
#' @description BSSS estimates the mixing matrix of blind source separation model for multivariate spatial data.
#' @param x A numeric matrix of dimension c(n, p), where the p columns correspond to the entries of the random field and the n rows are the observations.
#' @param coord A numeric matrix of dimension c(n,2) where each row represents the coordinates of a point in the spatial domain. Only needed if the argument kernel_list is NULL.
#' @param kernel_type A string indicating which kernel function to use. Either 'ring', 'ball' or 'gauss'.
#' @param kernel_parameter A numeric vector that gives the parameters for the kernel function. At least length of one for 'ball' and 'gauss' or two for 'ring' kernel.
#' @param kernel_list List of spatial kernel matrices with dimension c(n,n). Can be computed by the function \code{\link[SpatialBSS]{spatial_kernel_matrix}}.
#' @details BSSS estimates the mixing matrix by combining the information of all local covariance matrices together and conduct eigenanalysis.
#' @return BSSS returns a list, including the estimation of maxing matrix, the estimated latent field, and eigenvalues of matrix W for validating the estimation. Larger gaps among first few eigenvalues of matrix W strengthens the validity of estimation. See Zhang, Hao and Yao (2022) <arXiv:2201.02023> for details.
#' @import SpatialBSS expm
#' @export
#' @examples
#'
#' \donttest{
#' sample_size <- 500
#' coords <- runif(sample_size * 2) * 50
#' dim(coords) <- c(sample_size, 2)
#' dim <- 5 # specify the dimensionality of random variable
#' nu <- runif(dim, 0, 6) # parameter for matern covariance function
#' kappa <- runif(dim, 0, 2) # parameter for matern covariance function
#' zs <- gen_matern_gaussian_rf(coords=coords, dim=dim, nu=nu, kappa=kappa)
#' mix_mat <- diag(dim) # create a diagonal matrix as the mixing matrix
#' xs <- t(mix_mat %*% t(zs))
#' example <- BSSS(xs, coords, 'ring', c(0,0.5,0.5,1,1,8))
#' d_score(example$mix_mat_est, mix_mat)
#' }
BSSS<- function(x, coord, kernel_type, kernel_parameter, kernel_list = NULL){
x <-x-mean(x)
if (!missing(coord) &&
!missing(kernel_parameter) && is.vector(kernel_parameter)){
kernel_list <- SpatialBSS::spatial_kernel_matrix(coord, kernel_type = kernel_type,
kernel_parameters = kernel_parameter)
}else{
coord <- NULL
}
dim <- ncol(x)
size <- nrow(x)
B <- matrix(0, dim, dim)
for (l in 1:size) {
B <- B + tcrossprod(x[l,])
}
B <- B/size
lambda_hat_b <- diag(eigen(B)$value)
omega_hat_b <- eigen(B)$vectors
ys <- t(solve(expm::sqrtm(lambda_hat_b)) %*% solve(omega_hat_b) %*% t(x))
w_hat <- Reduce("+",lapply(SpatialBSS::local_covariance_matrix(x = ys, kernel_list = kernel_list, center=F),
function(lcov) tcrossprod(lcov)))/length(kernel_list)
omega_est <- eigen(w_hat, symmetric=TRUE)
omega_hat <- omega_est$vectors
lambda_hat <- omega_est$values
mix_mat_est <- omega_hat_b %*% expm::sqrtm(lambda_hat_b) %*% omega_hat
latent_field_est <- tcrossprod(x, solve(mix_mat_est))
return(list(mix_mat_est=mix_mat_est, latent_field_est=latent_field_est, w_eigenvalue=lambda_hat))
}
|
/scratch/gouwar.j/cran-all/cranData/BSSoverSpace/R/BSSS.R
|
#' @title d score
#' @description d score measures the similarity of two square matrix with same dimension. d_score equals 0 if the estimator is a column permutation of true value.
#' @param estimator A square matrix, usually an estimator of the \code{true_value} matrix.
#' @param true_value A square matrix, which the estimator is compared to.
#' @return A numeric value in [0,1].
#' @export
#' @examples d_score(diag(3), diag(3))
d_score <- function(estimator, true_value){
d <- solve(estimator) %*% true_value
q <- ncol(d)
d_coeff <- 1/(2*q*(sqrt(q)-1))
d_sum <- 0
for(j in 1:q){
max_ij <- max(abs(d[,j]))/sqrt(sum(d[,j]^2))
max_ji <- max(abs(d[j,]))/sqrt(sum(d[j,]^2))
temp <- 1/max_ij +1/max_ji-2
d_sum <- d_sum + temp
}
d_score <- d_coeff*d_sum
return(d_score)
}
|
/scratch/gouwar.j/cran-all/cranData/BSSoverSpace/R/d_score.R
|
#' @title Generating Gaussian random fields with Matern covariance function
#' @description Generate Gaussian random fields with Matern covariance function
#' @param coords coordinate of target randon field to be generated
#' @param dim dimension of target randon field to be generated
#' @param nu parameter of matern covariance function
#' @param kappa parameter of matern covariance function
#' @return A data matrix with number of rows equal to `coords`, and number of columns equal to `dim`.
#' @import rSPDE
#' @importFrom stats dist rnorm
#' @export
#'
gen_matern_gaussian_rf <- function(coords, dim, nu, kappa){
n <- nrow(coords)
field <- matrix(0, n, dim)
dist_coords <- dist(coords, method="maximum")
corr_list <- vector("list", dim)
for (i in 1:dim) {
ri <- unlist(lapply(dist_coords, function(x) matern.covariance(x, nu = nu[i], kappa = kappa[i], sigma = 1)))
r_matrix <- matrix(0, n, n)
r_matrix[lower.tri(r_matrix)] <- ri
r_matrix[upper.tri(r_matrix)] <- t(r_matrix)[upper.tri(r_matrix)]
diag(r_matrix) <- 1
r_svd <- svd(r_matrix)
r_dsq <- diag(sqrt(r_svd$d))
r_sqrt <- r_svd$u %*% r_dsq %*% t(r_svd$u)
corr_list[[i]] <- r_sqrt
}
for (i in 1:dim) {
field_rnrom <- rnorm(n)
field[,i] <- corr_list[[i]] %*% field_rnrom
}
return(field)
}
|
/scratch/gouwar.j/cran-all/cranData/BSSoverSpace/R/gen_matern_gaussian_rf.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(BSSoverSpace)
set.seed(16)
## ----sim coords---------------------------------------------------------------
sample_size <- 500
coords <- runif(sample_size * 2) * 50
dim(coords) <- c(sample_size, 2)
## ----sim zs-------------------------------------------------------------------
library('BSSoverSpace')
dim <- 5 # specify the dimensionality of random variable
nu <- runif(dim, 0, 6) # parameter for matern covariance function
kappa <- runif(dim, 0, 2) # parameter for matern covariance function
zs <- gen_matern_gaussian_rf(coords=coords, dim=dim, nu=nu, kappa=kappa)
## ----sim xs-------------------------------------------------------------------
mix_mat <- diag(dim) # create a diagonal matrix as the mixing matrix
xs <- t(mix_mat %*% t(zs))
## ----BSSS---------------------------------------------------------------------
example<-BSSS(xs, coord = coords, kernel_type = 'ring', kernel_parameter = c(0, 0.5, 0.5, 1, 1, 8))
## ----d_score------------------------------------------------------------------
d_score(example$mix_mat_est, mix_mat)
## -----------------------------------------------------------------------------
plot(example$w_eigenvalue)
|
/scratch/gouwar.j/cran-all/cranData/BSSoverSpace/inst/doc/my_vignette.R
|
---
title: "Introduction to BSSoverSpace"
output: rmarkdown::html_vignette
author: "Sixing Hao"
bibliography: reference.bib
vignette: >
%\VignetteIndexEntry{Introduction to BSSoverSpace}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(BSSoverSpace)
set.seed(16)
```
# Blind Source Separation Over Space
Blind Source Separation Over Space (BSSS) is a tool for analyzing spatial multivariate data. Blind Source Separation method assumes that observed variables are formed by linear combination of underlying independent latent variables, which cannot be observed directly. The goal is to estimate the latent variables, which also includes estimating the mixing matrix. This package `BSSoverSpace` is an implementation of the work @zhang_blind_2022. This manual provides introduction and simple instructions on how to use the functions within.
# The main function `BSSS`
In this package `BSSoverSpace`, the main function is `BSSS`. It implemented the method in @zhang_blind_2022 for estimating the latent field $Z(s)$, and the unmixing matrix. This function takes 5 inputs: `x` is the data matrix of observed random field $X(s)$. `coord` is the coordinate of observed random field $X(s)$. `kernel_type` and `kernel_parameter` are the specifications of kernels for us to select. For each `kernel_type`, specification of `kernel_parameter` slightly differs. If `kernel_type` equals `'ring'`, there must be an even number of parameters in `kernel_parameter`. Check `spatial_kernel_matrix` from package `SpatialBSS` for more details.
Here, we generate a random field and use it to demonstrate the usage of this function. First we generate 500 2-dimensional coordinates:
```{r sim coords}
sample_size <- 500
coords <- runif(sample_size * 2) * 50
dim(coords) <- c(sample_size, 2)
```
Next, we generate a 5-variate latent gaussian random field $Z(s)$ with matern covariance function in the following way:
```{r sim zs}
library('BSSoverSpace')
dim <- 5 # specify the dimensionality of random variable
nu <- runif(dim, 0, 6) # parameter for matern covariance function
kappa <- runif(dim, 0, 2) # parameter for matern covariance function
zs <- gen_matern_gaussian_rf(coords=coords, dim=dim, nu=nu, kappa=kappa)
```
Then, we create a mixing matrix $\Omega$ , and mix our latent field to get the observed random field $X(s)$:
```{r sim xs}
mix_mat <- diag(dim) # create a diagonal matrix as the mixing matrix
xs <- t(mix_mat %*% t(zs))
```
Now the observed random field $X(s)$ is created, and we need to choose kernels. Here we choose 3 ring kernels, with parameters $(0, 0.5)$, $(0.5, 1)$ and $(1, 8)$.
```{r BSSS}
example<-BSSS(xs, coord = coords, kernel_type = 'ring', kernel_parameter = c(0, 0.5, 0.5, 1, 1, 8))
```
The function `BSSS` returns both the estimated mixing matrix $\hat{\Omega}$ `mix_mat_est` and the estimated latent field $\widehat{Z(s)}$. To see how good $\hat{\Omega}$ is, we can use function `d_score`, which gives a numeric value between 0 and 1, with 0 meaning that the estimator is a column permutation of true value:
```{r d_score}
d_score(example$mix_mat_est, mix_mat)
```
We can further explore the validity of our estimation, by looking at the eigenvalues of $\hat{W}$. Larger gap between first few eigenvalues would strengthen the validity of our estimation. One can see the details of $\hat{W}$ in @zhang_blind_2022.
```{r}
plot(example$w_eigenvalue)
```
## References
|
/scratch/gouwar.j/cran-all/cranData/BSSoverSpace/inst/doc/my_vignette.Rmd
|
---
title: "Introduction to BSSoverSpace"
output: rmarkdown::html_vignette
author: "Sixing Hao"
bibliography: reference.bib
vignette: >
%\VignetteIndexEntry{Introduction to BSSoverSpace}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(BSSoverSpace)
set.seed(16)
```
# Blind Source Separation Over Space
Blind Source Separation Over Space (BSSS) is a tool for analyzing spatial multivariate data. Blind Source Separation method assumes that observed variables are formed by linear combination of underlying independent latent variables, which cannot be observed directly. The goal is to estimate the latent variables, which also includes estimating the mixing matrix. This package `BSSoverSpace` is an implementation of the work @zhang_blind_2022. This manual provides introduction and simple instructions on how to use the functions within.
# The main function `BSSS`
In this package `BSSoverSpace`, the main function is `BSSS`. It implemented the method in @zhang_blind_2022 for estimating the latent field $Z(s)$, and the unmixing matrix. This function takes 5 inputs: `x` is the data matrix of observed random field $X(s)$. `coord` is the coordinate of observed random field $X(s)$. `kernel_type` and `kernel_parameter` are the specifications of kernels for us to select. For each `kernel_type`, specification of `kernel_parameter` slightly differs. If `kernel_type` equals `'ring'`, there must be an even number of parameters in `kernel_parameter`. Check `spatial_kernel_matrix` from package `SpatialBSS` for more details.
Here, we generate a random field and use it to demonstrate the usage of this function. First we generate 500 2-dimensional coordinates:
```{r sim coords}
sample_size <- 500
coords <- runif(sample_size * 2) * 50
dim(coords) <- c(sample_size, 2)
```
Next, we generate a 5-variate latent gaussian random field $Z(s)$ with matern covariance function in the following way:
```{r sim zs}
library('BSSoverSpace')
dim <- 5 # specify the dimensionality of random variable
nu <- runif(dim, 0, 6) # parameter for matern covariance function
kappa <- runif(dim, 0, 2) # parameter for matern covariance function
zs <- gen_matern_gaussian_rf(coords=coords, dim=dim, nu=nu, kappa=kappa)
```
Then, we create a mixing matrix $\Omega$ , and mix our latent field to get the observed random field $X(s)$:
```{r sim xs}
mix_mat <- diag(dim) # create a diagonal matrix as the mixing matrix
xs <- t(mix_mat %*% t(zs))
```
Now the observed random field $X(s)$ is created, and we need to choose kernels. Here we choose 3 ring kernels, with parameters $(0, 0.5)$, $(0.5, 1)$ and $(1, 8)$.
```{r BSSS}
example<-BSSS(xs, coord = coords, kernel_type = 'ring', kernel_parameter = c(0, 0.5, 0.5, 1, 1, 8))
```
The function `BSSS` returns both the estimated mixing matrix $\hat{\Omega}$ `mix_mat_est` and the estimated latent field $\widehat{Z(s)}$. To see how good $\hat{\Omega}$ is, we can use function `d_score`, which gives a numeric value between 0 and 1, with 0 meaning that the estimator is a column permutation of true value:
```{r d_score}
d_score(example$mix_mat_est, mix_mat)
```
We can further explore the validity of our estimation, by looking at the eigenvalues of $\hat{W}$. Larger gap between first few eigenvalues would strengthen the validity of our estimation. One can see the details of $\hat{W}$ in @zhang_blind_2022.
```{r}
plot(example$w_eigenvalue)
```
## References
|
/scratch/gouwar.j/cran-all/cranData/BSSoverSpace/vignettes/my_vignette.Rmd
|
# a wrapper for BSSprep
BSSprep <- function(X) {
.Call("PREPBSS", X, PACKAGE = "BSSprep") #calling the function PREPBSS
}
|
/scratch/gouwar.j/cran-all/cranData/BSSprep/R/BSSprep.R
|
#' @title S4 Class \code{"bsw"}
#' @slot call An object of class \code{"call"}.
#' @slot formula An object of class \code{"formula"}.
#' @slot coefficients A numeric vector containing the estimated model parameters.
#' @slot iter A positive integer indicating the number of iterations.
#' @slot converged A logical constant that indicates whether the model has converged.
#' @slot y A numeric vector containing the dependent variable of the model.
#' @slot x The model matrix.
#' @slot data A data frame containing the variables in the model.
#' @author Adam Bekhit, Jakob Schöpe
#' @import Matrix matrixStats quadprog
#' @export
setClass(Class = "bsw", slots = c(call = "language", formula = "formula", coefficients = "numeric", iter = "numeric", converged = "logical", y = "numeric", x = "matrix", data = "data.frame"))
|
/scratch/gouwar.j/cran-all/cranData/BSW/R/bsw-class.R
|
#' @title Fitting a log-binomial model using the Bekhit-Schöpe-Wagenpfeil (BSW) algorithm
#' @description \code{bsw()} fits a log-binomial model using a modified Newton-type algorithm (BSW algorithm) for solving the maximum likelihood estimation problem under linear inequality constraints.
#' @usage bsw(formula, data, maxit = 200L)
#' @param formula An object of class \code{"formula"} (or one that can be coerced to that class): a symbolic description of the model to be fitted.
#' @param data A data frame containing the variables in the model.
#' @param maxit A positive integer giving the maximum number of iterations.
#' @return An object of S4 class \code{"bsw"} containing the following slots:
#' \item{call}{An object of class \code{"call"}.}
#' \item{formula}{An object of class \code{"formula"}.}
#' \item{coefficients}{A numeric vector containing the estimated model parameters.}
#' \item{iter}{A positive integer indicating the number of iterations.}
#' \item{converged}{A logical constant that indicates whether the model has converged.}
#' \item{y}{A numerical vector containing the dependent variable of the model.}
#' \item{x}{The model matrix.}
#' \item{data}{A data frame containing the variables in the model.}
#' @references Wagenpfeil S (1996) Dynamische Modelle zur Ereignisanalyse. Herbert Utz Verlag Wissenschaft, Munich, Germany
#'
#' Wagenpfeil S (1991) Implementierung eines SQP-Verfahrens mit dem Algorithmus von Ritter und Best. Diplomarbeit, TUM, Munich, Germany
#' @author Adam Bekhit, Jakob Schöpe
#' @examples
#' set.seed(123)
#' x <- rnorm(100, 50, 10)
#' y <- rbinom(100, 1, exp(-4 + x * 0.04))
#' fit <- bsw(formula = y ~ x, data = data.frame(y = y, x = x))
#' summary(fit)
#' @export
bsw <- function(formula, data, maxit = 200L) {
call <- match.call()
if (!inherits(x = formula, what = "formula")) {
stop("\"formula\" must be of class \"formula\"")
}
else if (!is.integer(maxit)) {
stop("\"maxit\" must be a positive integer")
}
else if (length(maxit) != 1L) {
stop("single positive integer for \"maxit\" expected")
}
else {
data <- stats::model.frame(formula = formula, data = data)
y <- unname(stats::model.matrix(stats::as.formula(paste("~", all.vars(formula)[1])), data = data)[,-1])
x <- stats::model.matrix(object = formula, data = data)
theta <- c(log(mean(y)), rep(0, times = ncol(x) - 1))
Amat <- constr(x)
bvec <- rep(0, times = nrow(Amat))
converged <- FALSE
iter <- 0
while(isFALSE(converged) & iter < maxit) {
iter <- iter + 1
Dmat <- Matrix::nearPD(hess(theta, y, x))$mat
dvec <- gradF(theta, y, x) + t(theta) %*% Dmat
fit <- quadprog::solve.QP(Dmat = Dmat, dvec = dvec, Amat = t(Amat), bvec = bvec)
converged <- all(abs(fit$solution - theta) < 1e-4)
theta <- fit$solution
names(theta) <- colnames(x)
}
if (iter == maxit & converged == FALSE) {
stop("Maximum number of iterations reached without convergence")
}
return(methods::new(Class = "bsw", call = call, formula = formula, coefficients = theta, iter = iter, converged = converged, y = y, x = x, data = data))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BSW/R/bsw.R
|
#' @title Extracting the estimated model parameters of \code{bsw()}
#' @description For objects of class \code{"bsw"}, \code{coef()} extracts the estimated model parameters of \code{bsw()}.
#' @param object An object of class \code{"bsw"}.
#' @return A numeric vector containing the estimated model parameters.
#' @aliases coef,bsw-method
#' @author Adam Bekhit, Jakob Schöpe
#' @export
setMethod(f = "coef",
signature = "bsw",
definition = function(object) {
return(object@coefficients)
}
)
|
/scratch/gouwar.j/cran-all/cranData/BSW/R/coef-method.R
|
#' @title Estimating confidence intervals of the estimated model parameters of \code{bsw()}
#' @description For objects of class \code{"bsw"}, \code{confint()} estimates confidence intervals of the estimated model parameters of \code{bsw()}.
#' @param object An object of class \code{"bsw"}.
#' @param parm A specification of which model parameters are to be given confidence intervals, either a vector of numbers or a vector of names. If missing, all model parameters are considered.
#' @param level A numeric value that indicates the level of confidence.
#' @param method A character giving the estimation method of the confidence intervals (\code{"bca"} or \code{"wald"}).
#' @param R A positive integer giving the number of bootstrap replicates.
#' @details \code{confint} provides Wald (default) and bias-corrected accelerated bootstrap confidence intervals of the estimated model parameters of \code{bsw()}.
#' @return A matrix with columns giving the lower and upper confidence limits of each estimated model parameter.
#' @aliases confint,bsw-method
#' @author Adam Bekhit, Jakob Schöpe
#' @export
setMethod(f = "confint",
signature = "bsw",
definition = function(object, parm, level = .95, method = "wald", R = 1000L) {
if (!is.numeric(level)) {
stop("\"level\" must be a numeric value")
}
else if (length(level) != 1L) {
stop("single numeric value for \"level\" expected")
}
else if (!is.character(method)) {
stop("\"method\" must be a character string")
}
else if (length(method) != 1L) {
stop("single character string for \"method\" expected")
}
else if (!(method %in% c("bca", "wald"))) {
stop("\"method\" is misspecified. Currently available confidence interval estimation procedures are: \"bca\" and \"wald\"")
}
else if (!is.integer(R)) {
stop("\"R\" must be a positive integer")
}
else if (length(R) != 1L) {
stop("single positive integer for \"R\" expected")
}
else if (R < 1000L) {
stop("\"R\" must be a positive integer equal to or greater than 1000")
}
else {
cf <- coef(object)
pnames <- names(cf)
if (missing(parm)) {
parm <- pnames
}
else if (is.numeric(parm)) {
parm <- pnames[parm]
}
alpha <- (1 - level) / 2
p <- c(alpha, 1 - alpha)
ci <- array(data = NA, dim = c(length(parm), 2L), dimnames = list(parm, paste(x = format(x = 100 * p, digits = 3, scientific = FALSE, trim = TRUE), "%", sep = "")))
if (method == "bca") {
f <- function(formula, data, parm, indices) {
dat <- data[indices,]
fit <- bsw(formula = formula, data = dat)
return(coef(fit)[parm])
}
b <- boot::boot(data = object@data, statistic = f, R = R, formula = object@formula, parm = parm)
ci[] <- matrix(unlist(lapply(1:ncol(b$t), function(i) {boot::boot.ci(b, conf = level, type = "bca", index = i)$bca[4:5]})), ncol = 2, byrow = TRUE)
}
if (method == "wald") {
se <- sqrt(diag(solve(hess(cf, object@y, object@x))))[parm]
ci[] <- cf[parm] + se %o% stats::qnorm(p)
}
return(ci)
}
}
)
|
/scratch/gouwar.j/cran-all/cranData/BSW/R/confint-method.R
|
#' @title Setting the linear inequality constraints for \code{bsw()}
#' @description \code{constr()} sets the linear inequality constraints for \code{bsw()}.
#' @usage constr(x)
#' @param x A model matrix.
#' @return A matrix containing the linear inequality constraints for \code{bsw()}.
#' @author Adam Bekhit, Jakob Schöpe
#' @export
constr <- function(x) {
colMax <- matrixStats::colMaxs(x)
colMin <- matrixStats::colMins(x)
const <- expand.grid(lapply(2:length(colMax), function(i) {c(colMin[i], colMax[i])}))
Amat <- unname(as.matrix(cbind(rep(1, times = nrow(const)), const)))
return(-Amat)
}
|
/scratch/gouwar.j/cran-all/cranData/BSW/R/constr.R
|
#' @title Deriving the first derivatives of the log likelihood function of the log-binomial model in \code{bsw()}
#' @description \code{gradF()} derives the first derivatives of the log likelihood function of the log-binomial model.
#' @usage gradF(theta, y, x)
#' @param theta A numeric vector containing the initial values of the model parameters.
#' @param y A numeric vector containing the dependent variable of the model.
#' @param x The model matrix.
#' @return A numeric vector containing the first derivatives of the log likelihood function of the log-binomial model.
#' @author Adam Bekhit, Jakob Schöpe
#' @export
gradF <- function(theta, y, x) {
p <- exp(x %*% theta)
p[p >= 1] <- 1 - 1e-5
s <- (y - p) / (1 - p)
deriv1 <- as.vector(t(x) %*% s)
return(deriv1)
}
|
/scratch/gouwar.j/cran-all/cranData/BSW/R/gradF.R
|
#' @title Deriving the second partial derivatives of the log likelihood function of the log-binomial model in \code{bsw()} (Hessian matrix)
#' @description \code{hess()} derives the second partial derivatives of the log likelihood function of the log-binomial model.
#' @usage hess(theta, y, x)
#' @param theta A numeric vector containing the initial values of the model parameters.
#' @param y A numeric vector containing the dependent variable of the model.
#' @param x The model matrix.
#' @return A numeric matrix containing the second partial derivatives of the log likelihood function of the log-binomial model (Hessian matrix).
#' @author Adam Bekhit, Jakob Schöpe
#' @export
hess <- function(theta, y, x) {
p <- exp(x %*% theta)
p[p >= 1] <- 1 - 1e-5
s <- p * (y - 1) / (1 - p)^2
im <- 0
for(i in 1:nrow(x)){
im <- im + x[i,] %*% t(x[i,]) * s[i]
}
colnames(im) <- names(theta)
rownames(im) <- names(theta)
return(-im)
}
|
/scratch/gouwar.j/cran-all/cranData/BSW/R/hess.R
|
#' @title Summarizing the estimated model parameters of \code{bsw()}
#' @description For objects of class \code{"bsw"}, \code{summary()} summarizes the estimated model parameters of \code{bsw()}.
#' @param object An object of class \code{"bsw"}.
#' @return A list containing the following elements:
#' \item{coefficients}{A numeric vector containing the estimated model parameters.}
#' \item{std.err}{A numeric vector containing the estimated standard errors of the model parameters.}
#' \item{z.value}{A numeric vector containing the estimated z test statistic of the model parameters.}
#' \item{p.value}{A numeric vector containing the estimated p values of the model parameters.}
#' @aliases summary,bsw-method
#' @author Adam Bekhit, Jakob Schöpe
#' @export
setMethod(f = "summary",
signature = "bsw",
definition = function(object) {
cf <- coef(object)
ci <- confint(object)
se <- sqrt(diag(solve(hess(cf, object@y, object@x))))
z <- cf / se
p <- 2 * stats::pnorm(abs(z), lower.tail = FALSE)
coef.table <- cbind(as.matrix(cf), as.matrix(se), as.matrix(z), as.matrix(p), as.matrix(exp(cf)), exp(ci))
colnames(coef.table) <- c("Estimate", "Std. Error", "z value", "Pr(>|z|)", "RR", colnames(ci))
cat("Call:\n")
print(object@call)
cat("\nConvergence:", object@converged)
cat("\nCoefficients:\n")
print(coef.table)
cat("\nIterations:", object@iter, "\n")
return(invisible(list(coefficients = cf, std.err = se, z.value = z, p.value = p)))
}
)
|
/scratch/gouwar.j/cran-all/cranData/BSW/R/summary-method.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- echo=TRUE, results='markup', comment=""---------------------------------
library(testthat)
library(BSW)
df <- data.frame(y = rep(c(0, 1), each = 250),
x = rep(c(0, 1, 0, 1), times = c(200, 50, 50, 200))
)
RR <- (200 * 250) / (50 * 250)
SE <- sqrt((1/200 + 1/50) - (1/250 + 1/250))
fit <- bsw(y ~ x, df)
out <- summary(fit)
## ---- echo=TRUE, results='markup', comment=""---------------------------------
test_that(desc = "Estimated relative risk is equal to 4",
code = {
expect_equal(object = unname(exp(coef(fit)[2])),
expected = RR)
}
)
## ---- echo=TRUE, results='markup', comment=""---------------------------------
test_that(desc = "Estimated standard error is equal to 0.1303840",
code = {
expect_equal(object = unname(out$std.err[2]),
expected = SE)
}
)
## ---- echo=TRUE, results='markup', comment=""---------------------------------
test_that(desc = "Estimated z-value is equal to 10.63239",
code = {
expect_equal(object = unname(out$z.value[2]),
expected = log(RR) / SE)
}
)
## ---- echo=TRUE, results='markup', comment=""---------------------------------
test_that(desc = "Estimated 95% confidence interval limits are equal to 3.097968 and 5.164676",
code = {
expect_equal(object = unname(exp(confint(fit)[2,])),
expected = exp(log(RR) + SE * qnorm(c(0.025, 0.975))))
}
)
|
/scratch/gouwar.j/cran-all/cranData/BSW/inst/doc/Validation.R
|
---
title: "Validation"
author: "Jakob Schöpe"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Validation}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Test Suite
The following fictitious example of a prospective cohort study will be used to validate the correct estimation of the BSW package in R.
||Exposed|Non-Exposed|
|:--:|:--:|:--:|
|Cases|200|50|
|Non-Cases|50|200|
```{r, echo=TRUE, results='markup', comment=""}
library(testthat)
library(BSW)
df <- data.frame(y = rep(c(0, 1), each = 250),
x = rep(c(0, 1, 0, 1), times = c(200, 50, 50, 200))
)
RR <- (200 * 250) / (50 * 250)
SE <- sqrt((1/200 + 1/50) - (1/250 + 1/250))
fit <- bsw(y ~ x, df)
out <- summary(fit)
```
The relative risk for exposed individuals compared to non-exposed individuals can be calculated from
<center>$RR = \displaystyle\frac{200 * 250}{50*250} = 4$.</center>
```{r, echo=TRUE, results='markup', comment=""}
test_that(desc = "Estimated relative risk is equal to 4",
code = {
expect_equal(object = unname(exp(coef(fit)[2])),
expected = RR)
}
)
```
The standard error of the natural logarithm of the relative risk can be calculated from
<center>$SE(ln(RR)) = \displaystyle\sqrt{\Big(\frac{1}{200} + \frac{1}{50}\Big) - \Big(\frac{1}{250}+\frac{1}{250}\Big)} = 0.130384$.</center>
```{r, echo=TRUE, results='markup', comment=""}
test_that(desc = "Estimated standard error is equal to 0.1303840",
code = {
expect_equal(object = unname(out$std.err[2]),
expected = SE)
}
)
```
The z-value can be calculated from
<center>$z = \displaystyle\frac{1.386294}{0.130384} = 10.63239$.</center>
```{r, echo=TRUE, results='markup', comment=""}
test_that(desc = "Estimated z-value is equal to 10.63239",
code = {
expect_equal(object = unname(out$z.value[2]),
expected = log(RR) / SE)
}
)
```
The 95% confidence interval limits can be calculated from
<center>$exp(1.386294 \pm 1.959964 * 0.1303840) = [3.097968; 5.164676]$.</center>
```{r, echo=TRUE, results='markup', comment=""}
test_that(desc = "Estimated 95% confidence interval limits are equal to 3.097968 and 5.164676",
code = {
expect_equal(object = unname(exp(confint(fit)[2,])),
expected = exp(log(RR) + SE * qnorm(c(0.025, 0.975))))
}
)
```
|
/scratch/gouwar.j/cran-all/cranData/BSW/inst/doc/Validation.Rmd
|
---
title: "Validation"
author: "Jakob Schöpe"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Validation}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Test Suite
The following fictitious example of a prospective cohort study will be used to validate the correct estimation of the BSW package in R.
||Exposed|Non-Exposed|
|:--:|:--:|:--:|
|Cases|200|50|
|Non-Cases|50|200|
```{r, echo=TRUE, results='markup', comment=""}
library(testthat)
library(BSW)
df <- data.frame(y = rep(c(0, 1), each = 250),
x = rep(c(0, 1, 0, 1), times = c(200, 50, 50, 200))
)
RR <- (200 * 250) / (50 * 250)
SE <- sqrt((1/200 + 1/50) - (1/250 + 1/250))
fit <- bsw(y ~ x, df)
out <- summary(fit)
```
The relative risk for exposed individuals compared to non-exposed individuals can be calculated from
<center>$RR = \displaystyle\frac{200 * 250}{50*250} = 4$.</center>
```{r, echo=TRUE, results='markup', comment=""}
test_that(desc = "Estimated relative risk is equal to 4",
code = {
expect_equal(object = unname(exp(coef(fit)[2])),
expected = RR)
}
)
```
The standard error of the natural logarithm of the relative risk can be calculated from
<center>$SE(ln(RR)) = \displaystyle\sqrt{\Big(\frac{1}{200} + \frac{1}{50}\Big) - \Big(\frac{1}{250}+\frac{1}{250}\Big)} = 0.130384$.</center>
```{r, echo=TRUE, results='markup', comment=""}
test_that(desc = "Estimated standard error is equal to 0.1303840",
code = {
expect_equal(object = unname(out$std.err[2]),
expected = SE)
}
)
```
The z-value can be calculated from
<center>$z = \displaystyle\frac{1.386294}{0.130384} = 10.63239$.</center>
```{r, echo=TRUE, results='markup', comment=""}
test_that(desc = "Estimated z-value is equal to 10.63239",
code = {
expect_equal(object = unname(out$z.value[2]),
expected = log(RR) / SE)
}
)
```
The 95% confidence interval limits can be calculated from
<center>$exp(1.386294 \pm 1.959964 * 0.1303840) = [3.097968; 5.164676]$.</center>
```{r, echo=TRUE, results='markup', comment=""}
test_that(desc = "Estimated 95% confidence interval limits are equal to 3.097968 and 5.164676",
code = {
expect_equal(object = unname(exp(confint(fit)[2,])),
expected = exp(log(RR) + SE * qnorm(c(0.025, 0.975))))
}
)
```
|
/scratch/gouwar.j/cran-all/cranData/BSW/vignettes/Validation.Rmd
|
`BOOTSimpsonD` <-
function(X, f, type="Dunnett",
cmat=NULL, conf.level=0.95, alternative=c("two.sided", "less", "greater"), madj=TRUE, ...)
{
args<-list(...)
alternative<-match.arg(alternative)
BSimpson<-function(X, i, f)
{
XNEW<-as.data.frame(X[i,])
est<-estSimpsonf(X=XNEW, f=f)
return(est$estimate)
}
bargs<-args
bargs$data<-as.data.frame(X)
bargs$statistic=BSimpson
bargs$strata=f
bargs$f<-f
if(is.null(bargs$R)){bargs$R<-999}
if(is.null(bargs$sim)){bargs$sim<-"ordinary"}
if(is.null(bargs$stype)){bargs$stype<-"i"}
bootout<-do.call("boot", bargs)
diffchains<-CCDiff.boot(x=bootout, cmat=cmat, type=type)
if(madj)
{
confint<-SCSnp.default(x=diffchains$chains,
conf.level=conf.level,
alternative=alternative)
}else{
confint<-CInp.default(x=diffchains$chains,
conf.level=conf.level,
alternative=alternative)
}
return(confint)
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/BOOTSimpsonD.R
|
`BOOTSimpsonR` <-
function(X, f, type="Dunnett",
cmat=NULL, conf.level=0.95, alternative=c("two.sided", "less", "greater"), madj=TRUE, ...)
{
args<-list(...)
alternative<-match.arg(alternative)
BSimpson<-function(X, i, f)
{
XNEW<-as.data.frame(X[i,])
est<-estSimpsonf(X=XNEW, f=f)
return(est$estimate)
}
bargs<-args
bargs$data<-as.data.frame(X)
bargs$statistic=BSimpson
bargs$strata=f
bargs$f<-f
if(is.null(bargs$R)){bargs$R<-999}
if(is.null(bargs$sim)){bargs$sim<-"ordinary"}
if(is.null(bargs$stype)){bargs$stype<-"i"}
bootout<-do.call("boot", bargs)
ratiochains<-CCRatio.boot(x=bootout, cmat=cmat, type=type)
if(madj)
{
confint<-SCSnp.default(x=ratiochains$chains,
conf.level=conf.level,
alternative=alternative)
}else{
confint<-CInp.default(x=ratiochains$chains,
conf.level=conf.level,
alternative=alternative)
}
return(confint)
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/BOOTSimpsonR.R
|
`CCDiff` <-
function(bugs, dat, cmat=NULL,
type=c("Dunnett", "Tukey", "Sequen", "Williams", "Changepoint"))
{
type<-match.arg(type)
if(class(bugs)!="bugs")
{stop("argument bugs must be an object of class 'bugs'")}
if(class(dat)!="R2Bugsdat1w")
{stop("argument dat must be an object of class 'R2Bugsdat1w'")}
if(dat$Intercept==TRUE)
{stop("dat$Intercept must be FALSE")}
ngroup<-dat$names$ni
chains<-bugs$sims.list$muvec
if(is.null(cmat))
{
cmat<-contrMat(n=ngroup,type=type)
}
else{
if(!is.matrix(cmat))
{stop("'cmat' must be a matrix, specifying the contrast coefficients")}
if(ngroup!=ncol(cmat))
{stop("ncol(cmat) must be the same as the number of means in muvec")}
cs<-apply(cmat,1,sum)
if(any(cs!=0))
{warning("Rows of cmat do not sum up to zero. Are the contrasts appropriately defined?")}
}
nchains<-apply(X=chains, MARGIN=1, FUN=function(x){cmat %*% x})
if(nrow(cmat)==1)
{nchains<-matrix(nchains, nrow=1)}
rownames(nchains)<-rownames(cmat)
out<-list(
chains=t(nchains),
bugs=bugs,
dat=dat,
cmat=cmat
)
class(out)<-"CCDiff"
return(out)
}
`CCDiff.default` <-
function(x, cmat)
{
if(!is.matrix(x) & !is.data.frame(x))
{stop("Argument 'x'must be a matrix or data.frame!")}
ngroup<-ncol(x)
Nsim<-nrow(x)
chains<-x
if(!is.matrix(cmat))
{stop("'cmat' must be a matrix, specifying the contrast coefficients")}
if(ngroup!=ncol(cmat))
{stop("ncol(cmat) must be the same as the number of means in muvec")}
cs<-apply(cmat,1,sum)
if(any(cs!=0))
{warning("Rows of cmat do not sum up to zero. Are the contrasts appropriately defined?")}
nchains<-apply(X=chains, MARGIN=1, FUN=function(x){cmat %*% x})
if(nrow(cmat)==1)
{nchains<-matrix(nchains, nrow=1)}
rownames(nchains)<-rownames(cmat)
out<-list(
chains=t(nchains),
x=x,
cmat=cmat
)
class(out)<-"CCDiff"
return(out)
}
`CCDiff.boot` <-
function(x, cmat=NULL,
type=c("Dunnett","Tukey","Sequen","Williams","Changepoint","McDermott","GrandMean","Marcus"))
{
type<-match.arg(type)
if(type %in% c("Williams","Changepoint","McDermott","Marcus","GrandMean"))
{warning("This is a test version. Choosing contrasts types differing from 'Dunnett','Tukey' or 'Sequen' might make no sense in case of unbalanced designs!")}
ngroup<-ncol(x$t)
f<-x$strata
ni<-unlist(lapply(split(f,f=f),length))
gnames<-names(x$t0)
names(ni)<-gnames
if(any(ni<5))
{warning("For sample sizes les than 5 this function hardly makes sense!")}
if(is.null(cmat))
{
cmat<-contrMat(n=ni,type=type)
}
chains <- x$t
out<-CCDiff.default(x=chains, cmat=cmat)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/CCDiff.R
|
`CCRatio` <-
function(bugs, dat, cmat=NULL,
type=c("Dunnett", "Tukey", "Sequen", "Williams", "Changepoint"))
{
type<-match.arg(type)
if(class(bugs)!="bugs")
{stop("argument bugs must be an object of class 'bugs'")}
if(class(dat)!="R2Bugsdat1w")
{stop("argument dat must be an object of class 'R2Bugsdat1w'")}
if(dat$Intercept==TRUE)
{stop("dat$Intercept must be FALSE")}
ngroup<-dat$names$ni
chains<-bugs$sims.list$muvec
if(is.null(cmat))
{
cmat<-contrMatRatio(n=ngroup, type=type)
}
else
{
if(!is.list(cmat))
{stop("cmat must be a list")}
if(is.null(cmat$numC)|is.null(cmat$denC))
{stop("cmat must be a list with elements $numC and $denC, specifying the numerator and denominator contrast coefficients")}
if(!is.matrix(cmat$numC)|!is.matrix(cmat$denC))
{stop("elements $numC and $denC of 'cmat' must be matrices, specifying the numerator and denominator contrast coefficients")}
if(ngroup!=ncol(cmat$numC))
{stop("ncol(cmat$numC) must be the same as the number of means in muvec")}
if(ngroup!=ncol(cmat$denC))
{stop("ncol(cmat$denC) must be the same as the number of means in muvec")}
}
nchains<-apply(X=chains, MARGIN=1, FUN=function(x){(cmat$numC%*%x) / (cmat$denC%*%x)})
if(nrow(cmat$numC)==1)
{nchains<-matrix(nchains, nrow=1)}
rownames(nchains)<-rownames(cmat$numC)
out<-list(
chains=t(nchains),
bugs=bugs,
dat=dat,
cmat=cmat
)
class(out)<-"CCRatio"
return(out)
}
`CCRatio.default` <-
function(x, cmat)
{
ngroup<-ncol(x)
chains<-x
if(!is.list(cmat))
{stop("cmat must be a list")}
if(is.null(cmat$numC)|is.null(cmat$denC))
{stop("cmat must be a list with elements $numC and $denC, specifying the numerator and denominator contrast coefficients")}
if(!is.matrix(cmat$numC)|!is.matrix(cmat$denC))
{stop("elements $numC and $denC of 'cmat' must be matrices, specifying the numerator and denominator contrast coefficients")}
if(ngroup!=ncol(cmat$numC))
{stop("ncol(cmat$numC) must be the same as the number of means in muvec")}
if(ngroup!=ncol(cmat$denC))
{stop("ncol(cmat$denC) must be the same as the number of means in muvec")}
nchains<-apply(X=chains, MARGIN=1, FUN=function(x){(cmat$numC%*%x) / (cmat$denC%*%x)})
if(nrow(cmat$numC)==1)
{nchains<-matrix(nchains, nrow=1)}
rownames(nchains)<-rownames(cmat$numC)
out<-list(
chains=t(nchains),
x=x,
cmat=cmat
)
class(out)<-"CCRatio"
return(out)
}
`CCRatio.boot` <-
function(x, cmat=NULL,
type=c("Dunnett","Tukey","Sequen","Williams","Changepoint","McDermott","GrandMean","Marcus"))
{
type<-match.arg(type)
if(type %in% c("Williams","Changepoint","McDermott","Marcus","GrandMean"))
{warning("This is a test version. Choosing contrasts types differing from 'Dunnett','Tukey' or 'Sequen' might make no sense in case of unbalanced designs!")}
ngroup<-ncol(x$t)
f<-x$strata
ni<-unlist(lapply(split(f,f=f),length))
gnames<-names(x$t0)
names(ni)<-gnames
if(any(ni<5))
{warning("For sample sizes les than 5 this function hardly makes sense!")}
if(is.null(cmat))
{
cmat<-contrMatRatio(n=ni, type=type)
}
chains <- x$t
out<-CCRatio.default(x=chains, cmat=cmat)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/CCRatio.R
|
CIGLM<-function(x, conf.level=0.95, method=c("Raw", "Adj", "Bonf"))
{
method<-match.arg(method)
switch(method,
"Raw"={
CI<-confint(x,level=conf.level, calpha=univariate_calpha())
adjnam<-"Unadjusted"
},
"Adj"={
CI<-confint(x,level=conf.level, calpha=adjusted_calpha())
adjnam<-"Adjusted"
},
"Bonf"={
ncomp<-nrow(x$linfct)
CI<-confint(x,level=1-(1-conf.level)/ncomp, calpha=univariate_calpha())
adjnam<-"Bonferroni-adjusted"
})
return(CI)
}
minus2slash<-function(x)
{
sl <- strsplit(x,"-")
out <- unlist(lapply(X=sl,FUN=function(x){paste(x,collapse="/")}))
return(out)
}
UnlogCI<-function(x){UseMethod("UnlogCI")}
# # # #
UnlogCI.glht<-function(x)
{
if(all(class(x)!="glht"))
{stop("x must be of class glht")}
# # # fit by glm
if(class(x$model)[1] %in% c("glm", "geeglm"))
{
fam<-x$model$family$family
if( !fam %in% c("poisson", "quasipoisson", "binomial", "quasibinomial") )
{stop("Family of the fitted glm should be one of poisson, quasipoisson, binomial or quasibinomial")}
link<-x$model$family$link
if(!link %in% c("log", "logit"))
{stop("The link function of the fitted glm should be log or logit")}
if( fam %in% c("poisson", "quasipoisson") )
{
CI<-x$confint
CIout<-exp(CI)
cnam <- dimnames(CI)[[1]]
dimnames(CIout)[[1]]<-minus2slash(cnam)
para<-"Confidence intervals for the ratios of abundance"
out<-x
out$conf.int<-CIout
out$parameter <- para
}
if(fam %in% c("binomial", "quasibinomial"))
{
CI<-x$confint
CIout<-exp(CI)
cnam <- dimnames(CI)[[1]]
dimnames(CIout)[[1]]<-minus2slash(cnam)
sf<-dimnames(x$model$model[[1]])[[2]]
para<-"Confidence intervals for odds ratios"
if(!is.null(sf[1]))
{
ORdef<-paste(c("with the odds defined as p(",sf[1],")/(1-p(",sf[1],"))"), collapse="" )
}
else
{ORdef<-""}
out<-x
out$conf.int<-CIout
out$parameter <- paste(para, ORdef, sep="\n")
}
}
# # # fit by glm.nb
if(class(x$model)[1]=="negbin")
{
link<-x$model$family$link
if(!link=="log")
{stop("The link function of the fitted glm should be log")}
CI<-x$confint
CIout<-exp(CI)
cnam <- dimnames(CI)[[1]]
dimnames(CIout)[[1]]<-minus2slash(cnam)
para<-"Confidence intervals for the ratios of abundance"
out<-x
out$conf.int<-CIout
out$parameter <- para
}
# # # fit by lme4 (older versions)
if(class(x$model)[1]=="glmer")
{
fam<-x$model@family$family
if( !fam %in% c("poisson", "quasipoisson", "binomial", "quasibinomial") )
{stop("Family of the fitted glmer should be one of poisson, quasipoisson, binomial or quasibinomial")}
link<-x$model@family$link
if(!link %in% c("log", "logit"))
{stop("The link function of the fitted glm should be log or logit")}
if( fam %in% c("poisson", "quasipoisson") )
{
CI<-x$confint
CIout<-exp(CI)
cnam <- dimnames(CI)[[1]]
dimnames(CIout)[[1]]<-minus2slash(cnam)
para<-"Confidence intervals for the ratios of abundance"
out<-x
out$conf.int<-CIout
out$parameter <- para
}
if(fam %in% c("binomial", "quasibinomial"))
{
CI<-x$confint
CIout<-exp(CI)
cnam <- dimnames(CI)[[1]]
dimnames(CIout)[[1]]<-minus2slash(cnam)
sf<-dimnames(x@frame[[1]])[[2]]
para<-"Confidence intervals for odds ratios"
if(!is.null(sf[1]))
{
ORdef<-paste(c("with the odds defined as p(",sf[1],")/(1-p(",sf[1],"))"), collapse="" )
}
else
{ORdef<-""}
out<-x
out$conf.int<-CIout
out$parameter <- paste(para, ORdef, sep="\n")
}
}
# # # fit by lmer, later versions
if(class(x$model)[1]=="mer")
{
callchar<-paste(deparse(x$model@call), collapse="")
bin <- grep("binomial", callchar, value=TRUE)
pois <- grep("poisson", callchar, value=TRUE)
if( length(bin)==0 & length(pois)==0 )
{stop("Family of the fitted model should be one of poisson, quasipoisson, binomial or quasibinomial")}
link<-grep("link", callchar, value=TRUE)
if(length(link)!=0)
{warning("Note: The application of this function most probably makes sense only when applied to models with log or logit link!")}
CI<-x$confint
CIout<-exp(CI)
cnam <- dimnames(CI)[[1]]
dimnames(CIout)[[1]]<-minus2slash(cnam)
para<-NULL
out<-x
out$conf.int<-CIout
out$parameter <- para
}
# # # fit by gamlss
if(class(x$model)[1]=="gamlss")
{
fam<-x$model$family[1]
paras<-x$model$parameters
link<-x$model$mu.link
if( fam=="NBI" & link=="log")
{
CI<-x$confint
CIout<-exp(CI)
cnam <- dimnames(CI)[[1]]
dimnames(CIout)[[1]]<-minus2slash(cnam)
para<-"Confidence intervals for the ratios of abundance"
out<-x
out$conf.int<-CIout
out$parameter <- para
}
else{
if( fam=="BB" & link=="logit")
{
CI<-x$confint
CIout<-exp(CI)
cnam <- dimnames(CI)[[1]]
dimnames(CIout)[[1]]<-minus2slash(cnam)
sf<-dimnames(x$model$model[[1]])[[2]]
para<-"Confidence intervals for odds ratios"
out<-x
out$conf.int<-CIout
out$parameter <- paste(para, "\n")
}
else{
stop("No methods except for families 'NBI' and 'BB' implemented for objects of class 'gamlss'")
}
}
}
class(out)<-c("UnlogCI", class(out))
return(out)
}
print.UnlogCI<-function(x,...)
{
args<-list(...)
if(is.null(args$digits))
{args$digits<-3}
call<-x$model$call
print(call)
out<-as.matrix(x$conf.int)
attr(out,"error")<-NULL
attr(out,"calpha")<-NULL
attr(out,"conf.level")<-NULL
args$x<-as.table(out)
cat(x$parameter,"\n")
do.call("print", args)
cat("Estimated quantile = ", round(attr(x$confint, which="calpha"),4), "\n")
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/CIglm.R
|
`CInp` <-
function(x, ...){UseMethod("CInp")}
`CInp.default` <-
function(x, conf.level=0.95, alternative="two.sided", ...)
{
alternative <- match.arg(alternative, choices=c("two.sided","less","greater"))
args<-list(...)
DataMatrix <- x
N <- nrow(DataMatrix)
k <- round(conf.level*N,0)
switch(alternative,
"two.sided"={
probs<-c((1-conf.level)/2, 1-(1-conf.level)/2)
CIs <- t( apply( X=DataMatrix, MARGIN=2,
FUN=function(x){quantile(x=x, probs=probs)} ))
},
"less"={
probs<-c(conf.level)
upper <- t( apply( X=DataMatrix, MARGIN=2,
FUN=function(x){quantile(x=x, probs=probs)} ))
CIs<-cbind(-Inf, upper)
},
"greater"={
probs<-c(1-conf.level)
lower <- t( apply( X=DataMatrix, MARGIN=2,
FUN=function(x){quantile(x=x, probs=probs)} ))
CIs<-cbind(lower,Inf)
}
)
# end of switch
estimate <- apply(X=DataMatrix, MARGIN=2, median)
colnames(CIs)<-c("lower","upper")
out<-list(
conf.int=CIs,
estimate=estimate,
x=x,
k=k,
N=N,
conf.level=conf.level,
alternative=alternative)
class(out)<-"CInp"
return(out)
}
`CInp.CCRatio` <-
function(x,...)
{
args<-list(...)
args$x<-x$chains
out<-do.call("CInp.default", args)
out$x<-x
return(out)
}
`CInp.CCDiff` <-
function(x,...)
{
args<-list(...)
args$x<-x$chains
out<-do.call("CInp.default", args)
out$x<-x
return(out)
}
`CInp.bugs` <-
function(x, conf.level=0.95, alternative="two.sided", whichp=NULL, ...)
{
args<-list(...)
sl<-x$sims.list
if(is.null(whichp))
{
mat<-x$sims.matrix
}
else{
namsl<-names(sl)
if(!whichp %in% namsl)
{stop("whichp could not be found in the parameter list of the openbugs object")}
if(length(whichp)==1)
{
mat<-sl[[whichp]]
}
if(length(whichp)>1)
{
mat<-matrix(nrow=x$n.sims)
for (i in seq(along.with=whichp))
{
mat<-cbind(mat,x$sims.list[[whichp[i]]])
}
}
}
args$x<-mat
args$conf.level<-conf.level
args$alternative<-alternative
out<-do.call("CInp.default", args)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/CInp.R
|
##############################
c2compnames<-function(cmat, ntype="aggr")
{
if(is.null(colnames(cmat)))
{colnames(cmat)<-1:ncol(cmat)}
if(!is.matrix(cmat))
{stop("cmat must be a matrix")}
if(!is.numeric(cmat))
{stop("cmat must be a numeric matric")}
if(length(ntype)!=1)
{stop("ntype must be a single character string!")}
if(!ntype %in% c("aggr", "sequ"))
{stop("ntype must be one of 'aggr', 'sequ'")}
if(ntype=="aggr")
{
cnames<-colnames(cmat)
rnames<-character(length=nrow(cmat))
for(i in 1:nrow(cmat))
{
si<-sign(cmat[i,])
wsip<-si==1
wsin<-si==(-1)
rnames[i]<-paste( "(", paste(cnames[wsip], collapse="+"), ")-(", paste(cnames[wsin], collapse="+"), ")", collapse="", sep="" )
}
}
if(ntype=="sequ")
{
cnames<-colnames(cmat)
rnames<-character(length=nrow(cmat))
for(i in 1:nrow(cmat))
{
si<-sign(cmat[i,])
wsin0<-si!=0
wsip<-si[wsin0]==1
wsin<-si[wsin0]==(-1)
nam<-cnames[wsin0]
sic<-character(length=length(nam))
sic[wsip]<-"+"
sic[wsin]<-"-"
rnames[i]<-paste(paste(sic, nam, sep=""), collapse="")
}
}
rownames(cmat)<-rnames
return(cmat)
}
###########################
IAcontrasts<-function(type, k)
{
if(!all(type %in% c("Dunnett", "Tukey", "Sequence", "Identity")))
{stop("all elements of type must be one of 'Dunnett','Tukey' or 'Sequence'")}
if ( any(c(length(k),length(type))!=2))
{stop("k and type must be vectors of length 2")}
if(!is.numeric(k) & !is.integer(k))
{stop("k must be an integer vector")}
n1<-rep(3,k[1])
names(n1)<-as.character(1:k[1])
n2<-rep(3,k[2])
names(n2)<-as.character(1:k[2])
if(type[1]!="Identity"){CM1 <- contrMat(n=n1, type=type[1])}
else{CM1<-diag(rep(1,k[1]))}
if(type[2]!="Identity"){CM2 <- contrMat(n=n2, type=type[2])}
else{CM2 <-diag(rep(1,k[2]))}
out <- kronecker(CM1, CM2)
cnames <- paste( rep(colnames(CM1), each=length(colnames(CM2))),
rep(colnames(CM2), times=length(colnames(CM1))), sep="")
colnames(out)<-cnames
return(out)
}
############################
IAcontrastsCMAT<-function(CMAT1, CMAT2)
{
out <- kronecker(CMAT1, CMAT2)
cnames <- paste( rep(colnames(CMAT1), each=length(colnames(CMAT2))),
rep(colnames(CMAT2), times=length(colnames(CMAT1))), sep="")
colnames(out)<-cnames
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/IAcontrasts.R
|
`R2Bugsdat1w` <-
function(formula, data)
{
if(class(data)!="data.frame")
{stop("argument 'data' must be of class data.frame")}
if(length(formula[[2]])!=1)
{stop("The left hand side of 'formula' must be a single variable name")}
if(length(formula[[3]])!=1)
{stop("The right hand side of 'formula' must be a single variable name")}
mf<-model.frame(formula=formula, data=data)
print(mf)
resp<-mf[,1]
if(!class(resp) %in% c("numeric", "integer"))
{stop("Ther response variable in data must be integer or numeric")}
# create the bugsdat, appropriate for the model
mm<-as.data.frame(model.matrix(object=formula, data=mf))
print(mm)
Y<-resp
pnames<-colnames(mm)
ngroups<-length(pnames)
lnames<-paste("X", 1:ngroups, sep="")
bugsdat<-as.list(mm)
names(bugsdat)<-lnames
bugsdat$Y<-resp
bugsdat$P<-ngroups
bugsdat$N<-length(resp)
ini<-list(
beta=rep(1,ngroups),
r=1)
splitlist<-split(mf[,1],f=mf[,2])
ni<-unlist(lapply(splitlist, length))
out<-list(bugsdat=bugsdat,
parameters="muvec",
inits=ini,
data=data,
names=list(pnames=pnames, ni=ni),
Intercept=TRUE
)
class(out)<-"R2Bugsdat1w"
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/R2Bugsdat1w.R
|
`R2Bugsdat1w.data.frame` <-
function(data, response, treatment, Intercept=FALSE)
{
if(class(data)!="data.frame")
{stop("argument 'data' must be of class data.frame")}
if(!is.character(response) | !is.character(treatment))
{stop("Arguments 'response' and 'treatment' must be character strings")}
if(length(response)!=1 | length(treatment)!=1)
{stop("Arguments 'response' and 'treatment' must be single character strings")}
dnames<-names(data)
if(!response %in% dnames)
{stop("response could not be found in data")}
if(!treatment %in% dnames)
{stop("treatment could not be found in data")}
# formula without intercept
if(!Intercept)
{form<-as.formula(paste(response, paste(0, treatment, sep="+"), sep="~"))}
else
{form<-as.formula(paste(response, treatment, sep="~"))}
mf<-model.frame(formula=form, data=data)
resp<-mf[,1]
if(!class(resp) %in% c("numeric", "integer"))
{stop("Ther response variable in data must be integer or numeric")}
# create the bugsdat, appropriate for the model
mm<-as.data.frame(model.matrix(object=form, data=mf))
pnames<-colnames(mm)
ngroups<-length(pnames)
lnames<-paste("X", 1:ngroups, sep="")
bugsdat<-as.list(mm)
names(bugsdat)<-lnames
bugsdat$Y<-resp
bugsdat$P<-ngroups
bugsdat$N<-length(resp)
ini<-list(
beta=rep(0,ngroups),
r=1)
splitlist<-split(mf[,1],f=mf[,2])
ni<-unlist(lapply(splitlist, length))
out<-list(bugsdat=bugsdat,
parameters="muvec",
inits=ini,
data=data,
names=list(pnames=pnames, ni=ni),
Intercept=Intercept
)
class(out)<-"R2Bugsdat1w"
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/R2Bugsdat1w.data.frame.R
|
`SCSnp` <-
function(x, ...){UseMethod("SCSnp")}
`SCSnp.default` <-
function(x, conf.level=0.95, alternative="two.sided", ...)
{
alternative <- match.arg(alternative, choices=c("two.sided","less","greater"))
DataMatrix <- x
N <- nrow(DataMatrix)
k <- round(conf.level*N,0)
RankDat <- apply(DataMatrix,2,rank)
switch(alternative,
"two.sided"={
W1 <- apply(RankDat,1,max)
W2 <- N + 1 - apply(RankDat,1,min)
Wmat <- cbind(W1,W2)
w <- apply(Wmat,1,max)
tstar <- round(sort(w)[k],0)
SCI <- function(x)
{
sortx <- sort(x)
cbind(sortx[N+1-tstar],sortx[tstar])
}
SCS <- t(apply(DataMatrix,2,SCI))
},
"less"={
W1 <- apply(RankDat,1,max)
tstar <- round(sort(W1)[k],0)
SCI <- function(x)
{
sortx <- sort(x)
cbind(-Inf, sortx[tstar])
}
SCS<-t(apply(DataMatrix,2,SCI))
},
"greater"={
W2 <- N + 1 - apply(RankDat,1,min)
tstar <- round(sort(W2)[k],0)
SCI <- function(x)
{
sortx <- sort(x)
cbind(sortx[N+1-tstar], Inf)
}
SCS<-t(apply(DataMatrix,2,SCI))
}
)
# end of switch
estimate<-apply(DataMatrix,2, median)
colnames(SCS)<-c("lower","upper")
out<-list(
conf.int=SCS,
estimate=estimate,
x=x,
k=k,
N=N,
conf.level=conf.level,
alternative=alternative)
class(out)<-"SCSnp"
return(out)
}
`SCSnp.CCRatio` <-
function(x,...)
{
args<-list(...)
args$x<-x$chains
out<-do.call("SCSnp.default", args)
out$x<-x
return(out)
}
`SCSnp.CCDiff` <-
function(x,...)
{
args<-list(...)
args$x<-x$chains
out<-do.call("SCSnp.default", args)
out$x<-x
return(out)
}
`SCSnp.bugs` <-
function(x, conf.level=0.95, alternative="two.sided", whichp=NULL, ...)
{
args<-list(...)
sl<-x$sims.list
if(is.null(whichp))
{
mat<-x$sims.matrix
}
else{
namsl<-names(sl)
if(!whichp %in% namsl)
{stop("whichp could not be found in the parameter list of the openbugs object")}
if(length(whichp)==1)
{
mat<-sl[[whichp]]
}
if(length(whichp)>1)
{
mat<-matrix(nrow=x$n.sims)
for (i in seq(along.with=whichp))
{
mat<-cbind(mat,x$sims.list[[whichp[i]]])
}
}
}
args$x<-mat
args$conf.level<-conf.level
args$alternative<-alternative
out<-do.call("SCSnp.default", args)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/SCSnp.R
|
`allignment` <-
function(response, block, type=c("mean","median"),...)
{
args<-list(...)
if(length(response)!=length(block))
{stop("'response' and 'block' must be vectors of the same length")}
if(!is.numeric(response))
{stop("'response' must be a numeric vector")}
type <- match.arg(type)
f <- as.factor(block)
splitdat <- split(response, f=f)
out1<-lapply(X=splitdat,
FUN=function(x,...)
{
args$x<-x
m<-do.call(type, args)
return(x-m)
}
)
out<-unsplit(value=out1, f=f)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/allignment.R
|
"vcov.gamlss" <- function(object,...)
{
Qr <- object$mu.qr
p <- object$mu.df
p1 <- 1:(p - object$mu.nl.df)
chol2inv(Qr$qr[p1, p1, drop = FALSE])
}
"modelparm.gamlss" <- function(model, coef.=coef, vcov.=vcov, df = NULL, ...){
### extract coefficients and their covariance matrix
beta <- coef.(object=model, what="mu")
sigma <- vcov.(object=model)
sigma <- as.matrix(sigma)
if (any(length(beta) != dim(sigma))) { stop("dimensions of coefficients and covariance matrix don't match")}
### try to identify non-estimable parameters
estimable <- rep(TRUE, length(beta))
if (any(is.na(beta))) {
estimable[is.na(beta)] <- FALSE
beta <- beta[estimable]
}
if (length(beta) != ncol(sigma) || nrow(sigma) != sum(estimable))
stop("could not extract coefficients and covariance matrix from ",
sQuote("model"))
if(is.null(df)){df<-model$df.residual}else{if(df<0){stop("df is not positive")}}
RET <- list(coef = beta, vcov = sigma, df = df, estimable = estimable)
class(RET) <- "modelparm"
RET
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/gamlss_glht.R
|
"vcov.geeglm" <- function(object,...){
sobj<-summary(object)
return(sobj$cov.scaled)
}
"modelparm.geeglm" <- function(model, coef.=coef, vcov.=vcov, df = NULL, ...){
### extract coefficients and their covariance matrix
beta <- coef.(object=model)
sigma <- vcov.(object=model)
sigma <- as.matrix(sigma)
if (any(length(beta) != dim(sigma))) { stop("dimensions of coefficients and covariance matrix don't match")}
### try to identify non-estimable parameters
estimable <- rep(TRUE, length(beta))
if (any(is.na(beta))) {
estimable[is.na(beta)] <- FALSE
beta <- beta[estimable]
}
if (length(beta) != ncol(sigma) || nrow(sigma) != sum(estimable))
stop("could not extract coefficients and covariance matrix from ",
sQuote("model"))
if(is.null(df)){df<-model$df.residual}else{if(df<0){stop("df is not positive")}}
RET <- list(coef = beta, vcov = sigma, df = df, estimable = estimable)
class(RET) <- "modelparm"
RET
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/geeglm_glht.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.